1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021, MediaTek Inc.
4 * Copyright (c) 2021-2022, Intel Corporation.
7 * Amir Hanania <amir.hanania@intel.com>
8 * Haijun Liu <haijun.liu@mediatek.com>
9 * Eliot Lee <eliot.lee@intel.com>
10 * Moises Veleta <moises.veleta@intel.com>
11 * Ricardo Martinez <ricardo.martinez@linux.intel.com>
14 * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
15 * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
16 * Sreehari Kancharla <sreehari.kancharla@intel.com>
19 #include <linux/atomic.h>
20 #include <linux/bitfield.h>
21 #include <linux/bitops.h>
22 #include <linux/device.h>
23 #include <linux/dma-direction.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/gfp.h>
26 #include <linux/err.h>
27 #include <linux/iopoll.h>
28 #include <linux/jiffies.h>
29 #include <linux/kernel.h>
30 #include <linux/kthread.h>
31 #include <linux/list.h>
32 #include <linux/minmax.h>
34 #include <linux/netdevice.h>
35 #include <linux/pm_runtime.h>
36 #include <linux/sched.h>
37 #include <linux/skbuff.h>
38 #include <linux/slab.h>
39 #include <linux/spinlock.h>
40 #include <linux/string.h>
41 #include <linux/types.h>
42 #include <linux/wait.h>
43 #include <linux/workqueue.h>
45 #include "t7xx_dpmaif.h"
46 #include "t7xx_hif_dpmaif.h"
47 #include "t7xx_hif_dpmaif_rx.h"
48 #include "t7xx_netdev.h"
51 #define DPMAIF_BAT_COUNT 8192
52 #define DPMAIF_FRG_COUNT 4814
53 #define DPMAIF_PIT_COUNT (DPMAIF_BAT_COUNT * 2)
55 #define DPMAIF_BAT_CNT_THRESHOLD 30
56 #define DPMAIF_PIT_CNT_THRESHOLD 60
57 #define DPMAIF_RX_PUSH_THRESHOLD_MASK GENMASK(2, 0)
58 #define DPMAIF_NOTIFY_RELEASE_COUNT 128
59 #define DPMAIF_POLL_PIT_TIME_US 20
60 #define DPMAIF_POLL_PIT_MAX_TIME_US 2000
61 #define DPMAIF_WQ_TIME_LIMIT_MS 2
62 #define DPMAIF_CS_RESULT_PASS 0
68 #define PKT_BUF_FRAG 1
70 static unsigned int t7xx_normal_pit_bid(const struct dpmaif_pit *pit_info)
74 value = FIELD_GET(PD_PIT_H_BID, le32_to_cpu(pit_info->pd.footer));
76 value += FIELD_GET(PD_PIT_BUFFER_ID, le32_to_cpu(pit_info->header));
80 static int t7xx_dpmaif_update_bat_wr_idx(struct dpmaif_ctrl *dpmaif_ctrl,
81 const unsigned int q_num, const unsigned int bat_cnt)
83 struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num];
84 struct dpmaif_bat_request *bat_req = rxq->bat_req;
85 unsigned int old_rl_idx, new_wr_idx, old_wr_idx;
87 if (!rxq->que_started) {
88 dev_err(dpmaif_ctrl->dev, "RX queue %d has not been started\n", rxq->index);
92 old_rl_idx = bat_req->bat_release_rd_idx;
93 old_wr_idx = bat_req->bat_wr_idx;
94 new_wr_idx = old_wr_idx + bat_cnt;
96 if (old_rl_idx > old_wr_idx && new_wr_idx >= old_rl_idx)
99 if (new_wr_idx >= bat_req->bat_size_cnt) {
100 new_wr_idx -= bat_req->bat_size_cnt;
101 if (new_wr_idx >= old_rl_idx)
105 bat_req->bat_wr_idx = new_wr_idx;
109 dev_err(dpmaif_ctrl->dev, "RX BAT flow check fail\n");
113 static bool t7xx_alloc_and_map_skb_info(const struct dpmaif_ctrl *dpmaif_ctrl,
114 const unsigned int size, struct dpmaif_bat_skb *cur_skb)
116 dma_addr_t data_bus_addr;
119 skb = __dev_alloc_skb(size, GFP_KERNEL);
123 data_bus_addr = dma_map_single(dpmaif_ctrl->dev, skb->data, size, DMA_FROM_DEVICE);
124 if (dma_mapping_error(dpmaif_ctrl->dev, data_bus_addr)) {
125 dev_err_ratelimited(dpmaif_ctrl->dev, "DMA mapping error\n");
126 dev_kfree_skb_any(skb);
131 cur_skb->data_bus_addr = data_bus_addr;
132 cur_skb->data_len = size;
137 static void t7xx_unmap_bat_skb(struct device *dev, struct dpmaif_bat_skb *bat_skb_base,
140 struct dpmaif_bat_skb *bat_skb = bat_skb_base + index;
143 dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE);
144 dev_kfree_skb(bat_skb->skb);
150 * t7xx_dpmaif_rx_buf_alloc() - Allocate buffers for the BAT ring.
151 * @dpmaif_ctrl: Pointer to DPMAIF context structure.
152 * @bat_req: Pointer to BAT request structure.
153 * @q_num: Queue number.
154 * @buf_cnt: Number of buffers to allocate.
155 * @initial: Indicates if the ring is being populated for the first time.
157 * Allocate skb and store the start address of the data buffer into the BAT ring.
158 * If this is not the initial call, notify the HW about the new entries.
162 * * -ERROR - Error code.
164 int t7xx_dpmaif_rx_buf_alloc(struct dpmaif_ctrl *dpmaif_ctrl,
165 const struct dpmaif_bat_request *bat_req,
166 const unsigned int q_num, const unsigned int buf_cnt,
169 unsigned int i, bat_cnt, bat_max_cnt, bat_start_idx;
172 if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt)
175 /* Check BAT buffer space */
176 bat_max_cnt = bat_req->bat_size_cnt;
178 bat_cnt = t7xx_ring_buf_rd_wr_count(bat_max_cnt, bat_req->bat_release_rd_idx,
179 bat_req->bat_wr_idx, DPMAIF_WRITE);
180 if (buf_cnt > bat_cnt)
183 bat_start_idx = bat_req->bat_wr_idx;
185 for (i = 0; i < buf_cnt; i++) {
186 unsigned int cur_bat_idx = bat_start_idx + i;
187 struct dpmaif_bat_skb *cur_skb;
188 struct dpmaif_bat *cur_bat;
190 if (cur_bat_idx >= bat_max_cnt)
191 cur_bat_idx -= bat_max_cnt;
193 cur_skb = (struct dpmaif_bat_skb *)bat_req->bat_skb + cur_bat_idx;
195 !t7xx_alloc_and_map_skb_info(dpmaif_ctrl, bat_req->pkt_buf_sz, cur_skb))
198 cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx;
199 cur_bat->buffer_addr_ext = upper_32_bits(cur_skb->data_bus_addr);
200 cur_bat->p_buffer_addr = lower_32_bits(cur_skb->data_bus_addr);
206 ret = t7xx_dpmaif_update_bat_wr_idx(dpmaif_ctrl, q_num, i);
211 unsigned int hw_wr_idx;
213 ret = t7xx_dpmaif_dl_snd_hw_bat_cnt(&dpmaif_ctrl->hw_info, i);
217 hw_wr_idx = t7xx_dpmaif_dl_get_bat_wr_idx(&dpmaif_ctrl->hw_info,
219 if (hw_wr_idx != bat_req->bat_wr_idx) {
221 dev_err(dpmaif_ctrl->dev, "Write index mismatch in RX ring\n");
230 t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i);
235 static int t7xx_dpmaifq_release_pit_entry(struct dpmaif_rx_queue *rxq,
236 const unsigned int rel_entry_num)
238 struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info;
239 unsigned int old_rel_idx, new_rel_idx, hw_wr_idx;
242 if (!rxq->que_started)
245 if (rel_entry_num >= rxq->pit_size_cnt) {
246 dev_err(rxq->dpmaif_ctrl->dev, "Invalid PIT release index\n");
250 old_rel_idx = rxq->pit_release_rd_idx;
251 new_rel_idx = old_rel_idx + rel_entry_num;
252 hw_wr_idx = rxq->pit_wr_idx;
253 if (hw_wr_idx < old_rel_idx && new_rel_idx >= rxq->pit_size_cnt)
254 new_rel_idx -= rxq->pit_size_cnt;
256 ret = t7xx_dpmaif_dlq_add_pit_remain_cnt(hw_info, rxq->index, rel_entry_num);
258 dev_err(rxq->dpmaif_ctrl->dev, "PIT release failure: %d\n", ret);
262 rxq->pit_release_rd_idx = new_rel_idx;
266 static void t7xx_dpmaif_set_bat_mask(struct dpmaif_bat_request *bat_req, unsigned int idx)
270 spin_lock_irqsave(&bat_req->mask_lock, flags);
271 set_bit(idx, bat_req->bat_bitmap);
272 spin_unlock_irqrestore(&bat_req->mask_lock, flags);
275 static int t7xx_frag_bat_cur_bid_check(struct dpmaif_rx_queue *rxq,
276 const unsigned int cur_bid)
278 struct dpmaif_bat_request *bat_frag = rxq->bat_frag;
279 struct dpmaif_bat_page *bat_page;
281 if (cur_bid >= DPMAIF_FRG_COUNT)
284 bat_page = bat_frag->bat_skb + cur_bid;
291 static void t7xx_unmap_bat_page(struct device *dev, struct dpmaif_bat_page *bat_page_base,
294 struct dpmaif_bat_page *bat_page = bat_page_base + index;
296 if (bat_page->page) {
297 dma_unmap_page(dev, bat_page->data_bus_addr, bat_page->data_len, DMA_FROM_DEVICE);
298 put_page(bat_page->page);
299 bat_page->page = NULL;
304 * t7xx_dpmaif_rx_frag_alloc() - Allocates buffers for the Fragment BAT ring.
305 * @dpmaif_ctrl: Pointer to DPMAIF context structure.
306 * @bat_req: Pointer to BAT request structure.
307 * @buf_cnt: Number of buffers to allocate.
308 * @initial: Indicates if the ring is being populated for the first time.
310 * Fragment BAT is used when the received packet does not fit in a normal BAT entry.
311 * This function allocates a page fragment and stores the start address of the page
312 * into the Fragment BAT ring.
313 * If this is not the initial call, notify the HW about the new entries.
317 * * -ERROR - Error code.
319 int t7xx_dpmaif_rx_frag_alloc(struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
320 const unsigned int buf_cnt, const bool initial)
322 unsigned int buf_space, cur_bat_idx = bat_req->bat_wr_idx;
323 struct dpmaif_bat_page *bat_skb = bat_req->bat_skb;
326 if (!buf_cnt || buf_cnt > bat_req->bat_size_cnt)
329 buf_space = t7xx_ring_buf_rd_wr_count(bat_req->bat_size_cnt,
330 bat_req->bat_release_rd_idx, bat_req->bat_wr_idx,
332 if (buf_cnt > buf_space) {
333 dev_err(dpmaif_ctrl->dev,
334 "Requested more buffers than the space available in RX frag ring\n");
338 for (i = 0; i < buf_cnt; i++) {
339 struct dpmaif_bat_page *cur_page = bat_skb + cur_bat_idx;
340 struct dpmaif_bat *cur_bat;
341 dma_addr_t data_base_addr;
343 if (!cur_page->page) {
344 unsigned long offset;
348 data = netdev_alloc_frag(bat_req->pkt_buf_sz);
352 page = virt_to_head_page(data);
353 offset = data - page_address(page);
355 data_base_addr = dma_map_page(dpmaif_ctrl->dev, page, offset,
356 bat_req->pkt_buf_sz, DMA_FROM_DEVICE);
357 if (dma_mapping_error(dpmaif_ctrl->dev, data_base_addr)) {
358 put_page(virt_to_head_page(data));
359 dev_err(dpmaif_ctrl->dev, "DMA mapping fail\n");
363 cur_page->page = page;
364 cur_page->data_bus_addr = data_base_addr;
365 cur_page->offset = offset;
366 cur_page->data_len = bat_req->pkt_buf_sz;
369 data_base_addr = cur_page->data_bus_addr;
370 cur_bat = (struct dpmaif_bat *)bat_req->bat_base + cur_bat_idx;
371 cur_bat->buffer_addr_ext = upper_32_bits(data_base_addr);
372 cur_bat->p_buffer_addr = lower_32_bits(data_base_addr);
373 cur_bat_idx = t7xx_ring_buf_get_next_wr_idx(bat_req->bat_size_cnt, cur_bat_idx);
376 bat_req->bat_wr_idx = cur_bat_idx;
379 t7xx_dpmaif_dl_snd_hw_frg_cnt(&dpmaif_ctrl->hw_info, i);
385 t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i);
392 static int t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue *rxq,
393 const struct dpmaif_pit *pkt_info,
396 unsigned long long data_bus_addr, data_base_addr;
397 struct device *dev = rxq->dpmaif_ctrl->dev;
398 struct dpmaif_bat_page *page_info;
399 unsigned int data_len;
402 page_info = rxq->bat_frag->bat_skb;
403 page_info += t7xx_normal_pit_bid(pkt_info);
404 dma_unmap_page(dev, page_info->data_bus_addr, page_info->data_len, DMA_FROM_DEVICE);
406 if (!page_info->page)
409 data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h);
410 data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l);
411 data_base_addr = page_info->data_bus_addr;
412 data_offset = data_bus_addr - data_base_addr;
413 data_offset += page_info->offset;
414 data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header));
415 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page,
416 data_offset, data_len, page_info->data_len);
418 page_info->page = NULL;
419 page_info->offset = 0;
420 page_info->data_len = 0;
424 static int t7xx_dpmaif_get_frag(struct dpmaif_rx_queue *rxq,
425 const struct dpmaif_pit *pkt_info,
426 const struct dpmaif_cur_rx_skb_info *skb_info)
428 unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info);
431 ret = t7xx_frag_bat_cur_bid_check(rxq, cur_bid);
435 ret = t7xx_dpmaif_set_frag_to_skb(rxq, pkt_info, skb_info->cur_skb);
437 dev_err(rxq->dpmaif_ctrl->dev, "Failed to set frag data to skb: %d\n", ret);
441 t7xx_dpmaif_set_bat_mask(rxq->bat_frag, cur_bid);
445 static int t7xx_bat_cur_bid_check(struct dpmaif_rx_queue *rxq, const unsigned int cur_bid)
447 struct dpmaif_bat_skb *bat_skb = rxq->bat_req->bat_skb;
450 if (cur_bid >= DPMAIF_BAT_COUNT || !bat_skb->skb)
456 static int t7xx_dpmaif_read_pit_seq(const struct dpmaif_pit *pit)
458 return FIELD_GET(PD_PIT_PIT_SEQ, le32_to_cpu(pit->pd.footer));
461 static int t7xx_dpmaif_check_pit_seq(struct dpmaif_rx_queue *rxq,
462 const struct dpmaif_pit *pit)
464 unsigned int cur_pit_seq, expect_pit_seq = rxq->expect_pit_seq;
466 if (read_poll_timeout_atomic(t7xx_dpmaif_read_pit_seq, cur_pit_seq,
467 cur_pit_seq == expect_pit_seq, DPMAIF_POLL_PIT_TIME_US,
468 DPMAIF_POLL_PIT_MAX_TIME_US, false, pit))
471 rxq->expect_pit_seq++;
472 if (rxq->expect_pit_seq >= DPMAIF_DL_PIT_SEQ_VALUE)
473 rxq->expect_pit_seq = 0;
478 static unsigned int t7xx_dpmaif_avail_pkt_bat_cnt(struct dpmaif_bat_request *bat_req)
480 unsigned int zero_index;
483 spin_lock_irqsave(&bat_req->mask_lock, flags);
485 zero_index = find_next_zero_bit(bat_req->bat_bitmap, bat_req->bat_size_cnt,
486 bat_req->bat_release_rd_idx);
488 if (zero_index < bat_req->bat_size_cnt) {
489 spin_unlock_irqrestore(&bat_req->mask_lock, flags);
490 return zero_index - bat_req->bat_release_rd_idx;
493 /* limiting the search till bat_release_rd_idx */
494 zero_index = find_first_zero_bit(bat_req->bat_bitmap, bat_req->bat_release_rd_idx);
495 spin_unlock_irqrestore(&bat_req->mask_lock, flags);
496 return bat_req->bat_size_cnt - bat_req->bat_release_rd_idx + zero_index;
499 static int t7xx_dpmaif_release_bat_entry(const struct dpmaif_rx_queue *rxq,
500 const unsigned int rel_entry_num,
501 const enum bat_type buf_type)
503 struct dpmaif_hw_info *hw_info = &rxq->dpmaif_ctrl->hw_info;
504 unsigned int old_rel_idx, new_rel_idx, hw_rd_idx, i;
505 struct dpmaif_bat_request *bat;
508 if (!rxq->que_started || !rel_entry_num)
511 if (buf_type == BAT_TYPE_FRAG) {
513 hw_rd_idx = t7xx_dpmaif_dl_get_frg_rd_idx(hw_info, rxq->index);
516 hw_rd_idx = t7xx_dpmaif_dl_get_bat_rd_idx(hw_info, rxq->index);
519 if (rel_entry_num >= bat->bat_size_cnt)
522 old_rel_idx = bat->bat_release_rd_idx;
523 new_rel_idx = old_rel_idx + rel_entry_num;
525 /* Do not need to release if the queue is empty */
526 if (bat->bat_wr_idx == old_rel_idx)
529 if (hw_rd_idx >= old_rel_idx) {
530 if (new_rel_idx > hw_rd_idx)
534 if (new_rel_idx >= bat->bat_size_cnt) {
535 new_rel_idx -= bat->bat_size_cnt;
536 if (new_rel_idx > hw_rd_idx)
540 spin_lock_irqsave(&bat->mask_lock, flags);
541 for (i = 0; i < rel_entry_num; i++) {
542 unsigned int index = bat->bat_release_rd_idx + i;
544 if (index >= bat->bat_size_cnt)
545 index -= bat->bat_size_cnt;
547 clear_bit(index, bat->bat_bitmap);
549 spin_unlock_irqrestore(&bat->mask_lock, flags);
551 bat->bat_release_rd_idx = new_rel_idx;
552 return rel_entry_num;
555 static int t7xx_dpmaif_pit_release_and_add(struct dpmaif_rx_queue *rxq)
559 if (rxq->pit_remain_release_cnt < DPMAIF_PIT_CNT_THRESHOLD)
562 ret = t7xx_dpmaifq_release_pit_entry(rxq, rxq->pit_remain_release_cnt);
566 rxq->pit_remain_release_cnt = 0;
570 static int t7xx_dpmaif_bat_release_and_add(const struct dpmaif_rx_queue *rxq)
572 unsigned int bid_cnt;
575 bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_req);
576 if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD)
579 ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_NORMAL);
581 dev_err(rxq->dpmaif_ctrl->dev, "Release PKT BAT failed: %d\n", ret);
585 ret = t7xx_dpmaif_rx_buf_alloc(rxq->dpmaif_ctrl, rxq->bat_req, rxq->index, bid_cnt, false);
587 dev_err(rxq->dpmaif_ctrl->dev, "Allocate new RX buffer failed: %d\n", ret);
592 static int t7xx_dpmaif_frag_bat_release_and_add(const struct dpmaif_rx_queue *rxq)
594 unsigned int bid_cnt;
597 bid_cnt = t7xx_dpmaif_avail_pkt_bat_cnt(rxq->bat_frag);
598 if (bid_cnt < DPMAIF_BAT_CNT_THRESHOLD)
601 ret = t7xx_dpmaif_release_bat_entry(rxq, bid_cnt, BAT_TYPE_FRAG);
603 dev_err(rxq->dpmaif_ctrl->dev, "Release BAT entry failed: %d\n", ret);
607 return t7xx_dpmaif_rx_frag_alloc(rxq->dpmaif_ctrl, rxq->bat_frag, bid_cnt, false);
610 static void t7xx_dpmaif_parse_msg_pit(const struct dpmaif_rx_queue *rxq,
611 const struct dpmaif_pit *msg_pit,
612 struct dpmaif_cur_rx_skb_info *skb_info)
614 int header = le32_to_cpu(msg_pit->header);
616 skb_info->cur_chn_idx = FIELD_GET(MSG_PIT_CHANNEL_ID, header);
617 skb_info->check_sum = FIELD_GET(MSG_PIT_CHECKSUM, header);
618 skb_info->pit_dp = FIELD_GET(MSG_PIT_DP, header);
619 skb_info->pkt_type = FIELD_GET(MSG_PIT_IP, le32_to_cpu(msg_pit->msg.params_3));
622 static int t7xx_dpmaif_set_data_to_skb(const struct dpmaif_rx_queue *rxq,
623 const struct dpmaif_pit *pkt_info,
624 struct dpmaif_cur_rx_skb_info *skb_info)
626 unsigned long long data_bus_addr, data_base_addr;
627 struct device *dev = rxq->dpmaif_ctrl->dev;
628 struct dpmaif_bat_skb *bat_skb;
629 unsigned int data_len;
633 bat_skb = rxq->bat_req->bat_skb;
634 bat_skb += t7xx_normal_pit_bid(pkt_info);
635 dma_unmap_single(dev, bat_skb->data_bus_addr, bat_skb->data_len, DMA_FROM_DEVICE);
637 data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h);
638 data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l);
639 data_base_addr = bat_skb->data_bus_addr;
640 data_offset = data_bus_addr - data_base_addr;
641 data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header));
644 skb_reset_tail_pointer(skb);
645 skb_reserve(skb, data_offset);
647 if (skb->tail + data_len > skb->end) {
648 dev_err(dev, "No buffer space available\n");
652 skb_put(skb, data_len);
653 skb_info->cur_skb = skb;
658 static int t7xx_dpmaif_get_rx_pkt(struct dpmaif_rx_queue *rxq,
659 const struct dpmaif_pit *pkt_info,
660 struct dpmaif_cur_rx_skb_info *skb_info)
662 unsigned int cur_bid = t7xx_normal_pit_bid(pkt_info);
665 ret = t7xx_bat_cur_bid_check(rxq, cur_bid);
669 ret = t7xx_dpmaif_set_data_to_skb(rxq, pkt_info, skb_info);
671 dev_err(rxq->dpmaif_ctrl->dev, "RX set data to skb failed: %d\n", ret);
675 t7xx_dpmaif_set_bat_mask(rxq->bat_req, cur_bid);
679 static int t7xx_dpmaifq_rx_notify_hw(struct dpmaif_rx_queue *rxq)
681 struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
684 queue_work(dpmaif_ctrl->bat_release_wq, &dpmaif_ctrl->bat_release_work);
686 ret = t7xx_dpmaif_pit_release_and_add(rxq);
688 dev_err(dpmaif_ctrl->dev, "RXQ%u update PIT failed: %d\n", rxq->index, ret);
693 static void t7xx_dpmaif_rx_skb(struct dpmaif_rx_queue *rxq,
694 struct dpmaif_cur_rx_skb_info *skb_info)
696 struct dpmaif_ctrl *dpmaif_ctrl = rxq->dpmaif_ctrl;
697 struct sk_buff *skb = skb_info->cur_skb;
698 struct t7xx_skb_cb *skb_cb;
701 skb_info->cur_skb = NULL;
703 if (skb_info->pit_dp) {
704 dev_kfree_skb_any(skb);
708 skb->ip_summed = skb_info->check_sum == DPMAIF_CS_RESULT_PASS ? CHECKSUM_UNNECESSARY :
710 netif_id = FIELD_GET(NETIF_MASK, skb_info->cur_chn_idx);
711 skb_cb = T7XX_SKB_CB(skb);
712 skb_cb->netif_idx = netif_id;
713 skb_cb->rx_pkt_type = skb_info->pkt_type;
714 dpmaif_ctrl->callbacks->recv_skb(dpmaif_ctrl->t7xx_dev->ccmni_ctlb, skb, &rxq->napi);
717 static int t7xx_dpmaif_rx_start(struct dpmaif_rx_queue *rxq, const unsigned int pit_cnt,
718 const unsigned int budget, int *once_more)
720 unsigned int cur_pit, pit_len, rx_cnt, recv_skb_cnt = 0;
721 struct device *dev = rxq->dpmaif_ctrl->dev;
722 struct dpmaif_cur_rx_skb_info *skb_info;
725 pit_len = rxq->pit_size_cnt;
726 skb_info = &rxq->rx_data_info;
727 cur_pit = rxq->pit_rd_idx;
729 for (rx_cnt = 0; rx_cnt < pit_cnt; rx_cnt++) {
730 struct dpmaif_pit *pkt_info;
733 if (!skb_info->msg_pit_received && recv_skb_cnt >= budget)
736 pkt_info = (struct dpmaif_pit *)rxq->pit_base + cur_pit;
737 if (t7xx_dpmaif_check_pit_seq(rxq, pkt_info)) {
738 dev_err_ratelimited(dev, "RXQ%u checks PIT SEQ fail\n", rxq->index);
743 val = FIELD_GET(PD_PIT_PACKET_TYPE, le32_to_cpu(pkt_info->header));
744 if (val == DES_PT_MSG) {
745 if (skb_info->msg_pit_received)
746 dev_err(dev, "RXQ%u received repeated PIT\n", rxq->index);
748 skb_info->msg_pit_received = true;
749 t7xx_dpmaif_parse_msg_pit(rxq, pkt_info, skb_info);
750 } else { /* DES_PT_PD */
751 val = FIELD_GET(PD_PIT_BUFFER_TYPE, le32_to_cpu(pkt_info->header));
752 if (val != PKT_BUF_FRAG)
753 ret = t7xx_dpmaif_get_rx_pkt(rxq, pkt_info, skb_info);
754 else if (!skb_info->cur_skb)
757 ret = t7xx_dpmaif_get_frag(rxq, pkt_info, skb_info);
760 skb_info->err_payload = 1;
761 dev_err_ratelimited(dev, "RXQ%u error payload\n", rxq->index);
764 val = FIELD_GET(PD_PIT_CONT, le32_to_cpu(pkt_info->header));
766 if (!skb_info->err_payload) {
767 t7xx_dpmaif_rx_skb(rxq, skb_info);
768 } else if (skb_info->cur_skb) {
769 dev_kfree_skb_any(skb_info->cur_skb);
770 skb_info->cur_skb = NULL;
773 memset(skb_info, 0, sizeof(*skb_info));
778 cur_pit = t7xx_ring_buf_get_next_wr_idx(pit_len, cur_pit);
779 rxq->pit_rd_idx = cur_pit;
780 rxq->pit_remain_release_cnt++;
782 if (rx_cnt > 0 && !(rx_cnt % DPMAIF_NOTIFY_RELEASE_COUNT)) {
783 ret = t7xx_dpmaifq_rx_notify_hw(rxq);
790 ret = t7xx_dpmaifq_rx_notify_hw(rxq);
798 static unsigned int t7xx_dpmaifq_poll_pit(struct dpmaif_rx_queue *rxq)
800 unsigned int hw_wr_idx, pit_cnt;
802 if (!rxq->que_started)
805 hw_wr_idx = t7xx_dpmaif_dl_dlq_pit_get_wr_idx(&rxq->dpmaif_ctrl->hw_info, rxq->index);
806 pit_cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx, hw_wr_idx,
808 rxq->pit_wr_idx = hw_wr_idx;
812 static int t7xx_dpmaif_napi_rx_data_collect(struct dpmaif_ctrl *dpmaif_ctrl,
813 const unsigned int q_num,
814 const unsigned int budget, int *once_more)
816 struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[q_num];
820 cnt = t7xx_dpmaifq_poll_pit(rxq);
824 ret = t7xx_dpmaif_rx_start(rxq, cnt, budget, once_more);
826 dev_err(dpmaif_ctrl->dev, "dlq%u rx ERR:%d\n", rxq->index, ret);
831 int t7xx_dpmaif_napi_rx_poll(struct napi_struct *napi, const int budget)
833 struct dpmaif_rx_queue *rxq = container_of(napi, struct dpmaif_rx_queue, napi);
834 struct t7xx_pci_dev *t7xx_dev = rxq->dpmaif_ctrl->t7xx_dev;
835 int ret, once_more = 0, work_done = 0;
837 atomic_set(&rxq->rx_processing, 1);
838 /* Ensure rx_processing is changed to 1 before actually begin RX flow */
841 if (!rxq->que_started) {
842 atomic_set(&rxq->rx_processing, 0);
843 pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
844 dev_err(rxq->dpmaif_ctrl->dev, "Work RXQ: %d has not been started\n", rxq->index);
848 if (!rxq->sleep_lock_pending)
849 t7xx_pci_disable_sleep(t7xx_dev);
851 ret = try_wait_for_completion(&t7xx_dev->sleep_lock_acquire);
853 napi_complete_done(napi, work_done);
854 rxq->sleep_lock_pending = true;
859 rxq->sleep_lock_pending = false;
860 while (work_done < budget) {
861 int each_budget = budget - work_done;
862 int rx_cnt = t7xx_dpmaif_napi_rx_data_collect(rxq->dpmaif_ctrl, rxq->index,
863 each_budget, &once_more);
871 napi_gro_flush(napi, false);
873 t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
874 } else if (work_done < budget) {
875 napi_complete_done(napi, work_done);
876 t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
877 t7xx_dpmaif_dlq_unmask_rx_done(&rxq->dpmaif_ctrl->hw_info, rxq->index);
878 t7xx_pci_enable_sleep(rxq->dpmaif_ctrl->t7xx_dev);
879 pm_runtime_mark_last_busy(rxq->dpmaif_ctrl->dev);
880 pm_runtime_put_autosuspend(rxq->dpmaif_ctrl->dev);
881 atomic_set(&rxq->rx_processing, 0);
883 t7xx_dpmaif_clr_ip_busy_sts(&rxq->dpmaif_ctrl->hw_info);
889 void t7xx_dpmaif_irq_rx_done(struct dpmaif_ctrl *dpmaif_ctrl, const unsigned int que_mask)
891 struct dpmaif_rx_queue *rxq;
892 struct dpmaif_ctrl *ctrl;
895 qno = ffs(que_mask) - 1;
896 if (qno < 0 || qno > DPMAIF_RXQ_NUM - 1) {
897 dev_err(dpmaif_ctrl->dev, "Invalid RXQ number: %u\n", qno);
901 rxq = &dpmaif_ctrl->rxq[qno];
902 ctrl = rxq->dpmaif_ctrl;
903 /* We need to make sure that the modem has been resumed before
904 * calling napi. This can't be done inside the polling function
905 * as we could be blocked waiting for device to be resumed,
906 * which can't be done from softirq context the poll function
909 ret = pm_runtime_resume_and_get(ctrl->dev);
910 if (ret < 0 && ret != -EACCES) {
911 dev_err(ctrl->dev, "Failed to resume device: %d\n", ret);
914 napi_schedule(&rxq->napi);
917 static void t7xx_dpmaif_base_free(const struct dpmaif_ctrl *dpmaif_ctrl,
918 const struct dpmaif_bat_request *bat_req)
920 if (bat_req->bat_base)
921 dma_free_coherent(dpmaif_ctrl->dev,
922 bat_req->bat_size_cnt * sizeof(struct dpmaif_bat),
923 bat_req->bat_base, bat_req->bat_bus_addr);
927 * t7xx_dpmaif_bat_alloc() - Allocate the BAT ring buffer.
928 * @dpmaif_ctrl: Pointer to DPMAIF context structure.
929 * @bat_req: Pointer to BAT request structure.
930 * @buf_type: BAT ring type.
932 * This function allocates the BAT ring buffer shared with the HW device, also allocates
933 * a buffer used to store information about the BAT skbs for further release.
937 * * -ERROR - Error code.
939 int t7xx_dpmaif_bat_alloc(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req,
940 const enum bat_type buf_type)
944 if (buf_type == BAT_TYPE_FRAG) {
945 sw_buf_size = sizeof(struct dpmaif_bat_page);
946 bat_req->bat_size_cnt = DPMAIF_FRG_COUNT;
947 bat_req->pkt_buf_sz = DPMAIF_HW_FRG_PKTBUF;
949 sw_buf_size = sizeof(struct dpmaif_bat_skb);
950 bat_req->bat_size_cnt = DPMAIF_BAT_COUNT;
951 bat_req->pkt_buf_sz = DPMAIF_HW_BAT_PKTBUF;
954 bat_req->type = buf_type;
955 bat_req->bat_wr_idx = 0;
956 bat_req->bat_release_rd_idx = 0;
958 bat_req->bat_base = dma_alloc_coherent(dpmaif_ctrl->dev,
959 bat_req->bat_size_cnt * sizeof(struct dpmaif_bat),
960 &bat_req->bat_bus_addr, GFP_KERNEL | __GFP_ZERO);
961 if (!bat_req->bat_base)
964 /* For AP SW to record skb information */
965 bat_req->bat_skb = devm_kzalloc(dpmaif_ctrl->dev, bat_req->bat_size_cnt * sw_buf_size,
967 if (!bat_req->bat_skb)
968 goto err_free_dma_mem;
970 bat_req->bat_bitmap = bitmap_zalloc(bat_req->bat_size_cnt, GFP_KERNEL);
971 if (!bat_req->bat_bitmap)
972 goto err_free_dma_mem;
974 spin_lock_init(&bat_req->mask_lock);
975 atomic_set(&bat_req->refcnt, 0);
979 t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req);
984 void t7xx_dpmaif_bat_free(const struct dpmaif_ctrl *dpmaif_ctrl, struct dpmaif_bat_request *bat_req)
986 if (!bat_req || !atomic_dec_and_test(&bat_req->refcnt))
989 bitmap_free(bat_req->bat_bitmap);
990 bat_req->bat_bitmap = NULL;
992 if (bat_req->bat_skb) {
995 for (i = 0; i < bat_req->bat_size_cnt; i++) {
996 if (bat_req->type == BAT_TYPE_FRAG)
997 t7xx_unmap_bat_page(dpmaif_ctrl->dev, bat_req->bat_skb, i);
999 t7xx_unmap_bat_skb(dpmaif_ctrl->dev, bat_req->bat_skb, i);
1003 t7xx_dpmaif_base_free(dpmaif_ctrl, bat_req);
1006 static int t7xx_dpmaif_rx_alloc(struct dpmaif_rx_queue *rxq)
1008 rxq->pit_size_cnt = DPMAIF_PIT_COUNT;
1009 rxq->pit_rd_idx = 0;
1010 rxq->pit_wr_idx = 0;
1011 rxq->pit_release_rd_idx = 0;
1012 rxq->expect_pit_seq = 0;
1013 rxq->pit_remain_release_cnt = 0;
1014 memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info));
1016 rxq->pit_base = dma_alloc_coherent(rxq->dpmaif_ctrl->dev,
1017 rxq->pit_size_cnt * sizeof(struct dpmaif_pit),
1018 &rxq->pit_bus_addr, GFP_KERNEL | __GFP_ZERO);
1022 rxq->bat_req = &rxq->dpmaif_ctrl->bat_req;
1023 atomic_inc(&rxq->bat_req->refcnt);
1025 rxq->bat_frag = &rxq->dpmaif_ctrl->bat_frag;
1026 atomic_inc(&rxq->bat_frag->refcnt);
1030 static void t7xx_dpmaif_rx_buf_free(const struct dpmaif_rx_queue *rxq)
1032 if (!rxq->dpmaif_ctrl)
1035 t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_req);
1036 t7xx_dpmaif_bat_free(rxq->dpmaif_ctrl, rxq->bat_frag);
1039 dma_free_coherent(rxq->dpmaif_ctrl->dev,
1040 rxq->pit_size_cnt * sizeof(struct dpmaif_pit),
1041 rxq->pit_base, rxq->pit_bus_addr);
1044 int t7xx_dpmaif_rxq_init(struct dpmaif_rx_queue *queue)
1048 ret = t7xx_dpmaif_rx_alloc(queue);
1050 dev_err(queue->dpmaif_ctrl->dev, "Failed to allocate RX buffers: %d\n", ret);
1055 void t7xx_dpmaif_rxq_free(struct dpmaif_rx_queue *queue)
1057 t7xx_dpmaif_rx_buf_free(queue);
1060 static void t7xx_dpmaif_bat_release_work(struct work_struct *work)
1062 struct dpmaif_ctrl *dpmaif_ctrl = container_of(work, struct dpmaif_ctrl, bat_release_work);
1063 struct dpmaif_rx_queue *rxq;
1066 ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
1067 if (ret < 0 && ret != -EACCES)
1070 t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
1072 /* ALL RXQ use one BAT table, so choose DPF_RX_QNO_DFT */
1073 rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT];
1074 if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) {
1075 t7xx_dpmaif_bat_release_and_add(rxq);
1076 t7xx_dpmaif_frag_bat_release_and_add(rxq);
1079 t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
1080 pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
1081 pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
1084 int t7xx_dpmaif_bat_rel_wq_alloc(struct dpmaif_ctrl *dpmaif_ctrl)
1086 dpmaif_ctrl->bat_release_wq = alloc_workqueue("dpmaif_bat_release_work_queue",
1088 if (!dpmaif_ctrl->bat_release_wq)
1091 INIT_WORK(&dpmaif_ctrl->bat_release_work, t7xx_dpmaif_bat_release_work);
1095 void t7xx_dpmaif_bat_wq_rel(struct dpmaif_ctrl *dpmaif_ctrl)
1097 flush_work(&dpmaif_ctrl->bat_release_work);
1099 if (dpmaif_ctrl->bat_release_wq) {
1100 destroy_workqueue(dpmaif_ctrl->bat_release_wq);
1101 dpmaif_ctrl->bat_release_wq = NULL;
1106 * t7xx_dpmaif_rx_stop() - Suspend RX flow.
1107 * @dpmaif_ctrl: Pointer to data path control struct dpmaif_ctrl.
1109 * Wait for all the RX work to finish executing and mark the RX queue as paused.
1111 void t7xx_dpmaif_rx_stop(struct dpmaif_ctrl *dpmaif_ctrl)
1115 for (i = 0; i < DPMAIF_RXQ_NUM; i++) {
1116 struct dpmaif_rx_queue *rxq = &dpmaif_ctrl->rxq[i];
1119 timeout = readx_poll_timeout_atomic(atomic_read, &rxq->rx_processing, value,
1120 !value, 0, DPMAIF_CHECK_INIT_TIMEOUT_US);
1122 dev_err(dpmaif_ctrl->dev, "Stop RX SW failed\n");
1124 /* Ensure RX processing has stopped before we set rxq->que_started to false */
1126 rxq->que_started = false;
1130 static void t7xx_dpmaif_stop_rxq(struct dpmaif_rx_queue *rxq)
1134 rxq->que_started = false;
1137 cnt = t7xx_ring_buf_rd_wr_count(rxq->pit_size_cnt, rxq->pit_rd_idx,
1138 rxq->pit_wr_idx, DPMAIF_READ);
1140 if (++j >= DPMAIF_MAX_CHECK_COUNT) {
1141 dev_err(rxq->dpmaif_ctrl->dev, "Stop RX SW failed, %d\n", cnt);
1146 memset(rxq->pit_base, 0, rxq->pit_size_cnt * sizeof(struct dpmaif_pit));
1147 memset(rxq->bat_req->bat_base, 0, rxq->bat_req->bat_size_cnt * sizeof(struct dpmaif_bat));
1148 bitmap_zero(rxq->bat_req->bat_bitmap, rxq->bat_req->bat_size_cnt);
1149 memset(&rxq->rx_data_info, 0, sizeof(rxq->rx_data_info));
1151 rxq->pit_rd_idx = 0;
1152 rxq->pit_wr_idx = 0;
1153 rxq->pit_release_rd_idx = 0;
1154 rxq->expect_pit_seq = 0;
1155 rxq->pit_remain_release_cnt = 0;
1156 rxq->bat_req->bat_release_rd_idx = 0;
1157 rxq->bat_req->bat_wr_idx = 0;
1158 rxq->bat_frag->bat_release_rd_idx = 0;
1159 rxq->bat_frag->bat_wr_idx = 0;
1162 void t7xx_dpmaif_rx_clear(struct dpmaif_ctrl *dpmaif_ctrl)
1166 for (i = 0; i < DPMAIF_RXQ_NUM; i++)
1167 t7xx_dpmaif_stop_rxq(&dpmaif_ctrl->rxq[i]);