1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021, MediaTek Inc.
4 * Copyright (c) 2021-2022, Intel Corporation.
7 * Amir Hanania <amir.hanania@intel.com>
8 * Haijun Liu <haijun.liu@mediatek.com>
9 * Eliot Lee <eliot.lee@intel.com>
10 * Moises Veleta <moises.veleta@intel.com>
11 * Ricardo Martinez <ricardo.martinez@linux.intel.com>
14 * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
15 * Sreehari Kancharla <sreehari.kancharla@intel.com>
18 #include <linux/atomic.h>
19 #include <linux/bitfield.h>
20 #include <linux/delay.h>
21 #include <linux/device.h>
22 #include <linux/dma-direction.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/err.h>
25 #include <linux/gfp.h>
26 #include <linux/kernel.h>
27 #include <linux/kthread.h>
28 #include <linux/list.h>
29 #include <linux/minmax.h>
30 #include <linux/netdevice.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/sched.h>
33 #include <linux/spinlock.h>
34 #include <linux/skbuff.h>
35 #include <linux/types.h>
36 #include <linux/wait.h>
37 #include <linux/workqueue.h>
39 #include "t7xx_dpmaif.h"
40 #include "t7xx_hif_dpmaif.h"
41 #include "t7xx_hif_dpmaif_tx.h"
44 #define DPMAIF_SKB_TX_BURST_CNT 5
45 #define DPMAIF_DRB_LIST_LEN 6144
49 #define DES_DTYP_MSG 1
51 static unsigned int t7xx_dpmaif_update_drb_rd_idx(struct dpmaif_ctrl *dpmaif_ctrl,
54 struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
55 unsigned int old_sw_rd_idx, new_hw_rd_idx, drb_cnt;
58 if (!txq->que_started)
61 old_sw_rd_idx = txq->drb_rd_idx;
62 new_hw_rd_idx = t7xx_dpmaif_ul_get_rd_idx(&dpmaif_ctrl->hw_info, q_num);
63 if (new_hw_rd_idx >= DPMAIF_DRB_LIST_LEN) {
64 dev_err(dpmaif_ctrl->dev, "Out of range read index: %u\n", new_hw_rd_idx);
68 if (old_sw_rd_idx <= new_hw_rd_idx)
69 drb_cnt = new_hw_rd_idx - old_sw_rd_idx;
71 drb_cnt = txq->drb_size_cnt - old_sw_rd_idx + new_hw_rd_idx;
73 spin_lock_irqsave(&txq->tx_lock, flags);
74 txq->drb_rd_idx = new_hw_rd_idx;
75 spin_unlock_irqrestore(&txq->tx_lock, flags);
80 static unsigned int t7xx_dpmaif_release_tx_buffer(struct dpmaif_ctrl *dpmaif_ctrl,
81 unsigned int q_num, unsigned int release_cnt)
83 struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
84 struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
85 struct dpmaif_drb_skb *cur_drb_skb, *drb_skb_base;
86 struct dpmaif_drb *cur_drb, *drb_base;
87 unsigned int drb_cnt, i, cur_idx;
90 drb_skb_base = txq->drb_skb_base;
91 drb_base = txq->drb_base;
93 spin_lock_irqsave(&txq->tx_lock, flags);
94 drb_cnt = txq->drb_size_cnt;
95 cur_idx = txq->drb_release_rd_idx;
96 spin_unlock_irqrestore(&txq->tx_lock, flags);
98 for (i = 0; i < release_cnt; i++) {
99 cur_drb = drb_base + cur_idx;
100 if (FIELD_GET(DRB_HDR_DTYP, le32_to_cpu(cur_drb->header)) == DES_DTYP_PD) {
101 cur_drb_skb = drb_skb_base + cur_idx;
102 if (!cur_drb_skb->is_msg)
103 dma_unmap_single(dpmaif_ctrl->dev, cur_drb_skb->bus_addr,
104 cur_drb_skb->data_len, DMA_TO_DEVICE);
106 if (!FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header))) {
107 if (!cur_drb_skb->skb) {
108 dev_err(dpmaif_ctrl->dev,
109 "txq%u: DRB check fail, invalid skb\n", q_num);
113 dev_kfree_skb_any(cur_drb_skb->skb);
116 cur_drb_skb->skb = NULL;
119 spin_lock_irqsave(&txq->tx_lock, flags);
120 cur_idx = t7xx_ring_buf_get_next_wr_idx(drb_cnt, cur_idx);
121 txq->drb_release_rd_idx = cur_idx;
122 spin_unlock_irqrestore(&txq->tx_lock, flags);
124 if (atomic_inc_return(&txq->tx_budget) > txq->drb_size_cnt / 8)
125 cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_IRQ, txq->index);
128 if (FIELD_GET(DRB_HDR_CONT, le32_to_cpu(cur_drb->header)))
129 dev_err(dpmaif_ctrl->dev, "txq%u: DRB not marked as the last one\n", q_num);
134 static int t7xx_dpmaif_tx_release(struct dpmaif_ctrl *dpmaif_ctrl,
135 unsigned int q_num, unsigned int budget)
137 struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[q_num];
138 unsigned int rel_cnt, real_rel_cnt;
140 /* Update read index from HW */
141 t7xx_dpmaif_update_drb_rd_idx(dpmaif_ctrl, q_num);
143 rel_cnt = t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx,
144 txq->drb_rd_idx, DPMAIF_READ);
146 real_rel_cnt = min_not_zero(budget, rel_cnt);
148 real_rel_cnt = t7xx_dpmaif_release_tx_buffer(dpmaif_ctrl, q_num, real_rel_cnt);
150 return real_rel_cnt < rel_cnt ? -EAGAIN : 0;
153 static bool t7xx_dpmaif_drb_ring_not_empty(struct dpmaif_tx_queue *txq)
155 return !!t7xx_dpmaif_update_drb_rd_idx(txq->dpmaif_ctrl, txq->index);
158 static void t7xx_dpmaif_tx_done(struct work_struct *work)
160 struct dpmaif_tx_queue *txq = container_of(work, struct dpmaif_tx_queue, dpmaif_tx_work);
161 struct dpmaif_ctrl *dpmaif_ctrl = txq->dpmaif_ctrl;
162 struct dpmaif_hw_info *hw_info;
165 ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
166 if (ret < 0 && ret != -EACCES)
169 /* The device may be in low power state. Disable sleep if needed */
170 t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
171 if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) {
172 hw_info = &dpmaif_ctrl->hw_info;
173 ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt);
174 if (ret == -EAGAIN ||
175 (t7xx_dpmaif_ul_clr_done(hw_info, txq->index) &&
176 t7xx_dpmaif_drb_ring_not_empty(txq))) {
177 queue_work(dpmaif_ctrl->txq[txq->index].worker,
178 &dpmaif_ctrl->txq[txq->index].dpmaif_tx_work);
179 /* Give the device time to enter the low power state */
180 t7xx_dpmaif_clr_ip_busy_sts(hw_info);
182 t7xx_dpmaif_clr_ip_busy_sts(hw_info);
183 t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index);
187 t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
188 pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
189 pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
192 static void t7xx_setup_msg_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
193 unsigned int cur_idx, unsigned int pkt_len, unsigned int count_l,
194 unsigned int channel_id)
196 struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base;
197 struct dpmaif_drb *drb = drb_base + cur_idx;
199 drb->header = cpu_to_le32(FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_MSG) |
200 FIELD_PREP(DRB_HDR_CONT, 1) |
201 FIELD_PREP(DRB_HDR_DATA_LEN, pkt_len));
203 drb->msg.msg_hdr = cpu_to_le32(FIELD_PREP(DRB_MSG_COUNT_L, count_l) |
204 FIELD_PREP(DRB_MSG_CHANNEL_ID, channel_id) |
205 FIELD_PREP(DRB_MSG_L4_CHK, 1));
208 static void t7xx_setup_payload_drb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
209 unsigned int cur_idx, dma_addr_t data_addr,
210 unsigned int pkt_size, bool last_one)
212 struct dpmaif_drb *drb_base = dpmaif_ctrl->txq[q_num].drb_base;
213 struct dpmaif_drb *drb = drb_base + cur_idx;
216 header = FIELD_PREP(DRB_HDR_DTYP, DES_DTYP_PD) | FIELD_PREP(DRB_HDR_DATA_LEN, pkt_size);
218 header |= FIELD_PREP(DRB_HDR_CONT, 1);
220 drb->header = cpu_to_le32(header);
221 drb->pd.data_addr_l = cpu_to_le32(lower_32_bits(data_addr));
222 drb->pd.data_addr_h = cpu_to_le32(upper_32_bits(data_addr));
225 static void t7xx_record_drb_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int q_num,
226 unsigned int cur_idx, struct sk_buff *skb, bool is_msg,
227 bool is_frag, bool is_last_one, dma_addr_t bus_addr,
228 unsigned int data_len)
230 struct dpmaif_drb_skb *drb_skb_base = dpmaif_ctrl->txq[q_num].drb_skb_base;
231 struct dpmaif_drb_skb *drb_skb = drb_skb_base + cur_idx;
234 drb_skb->bus_addr = bus_addr;
235 drb_skb->data_len = data_len;
236 drb_skb->index = cur_idx;
237 drb_skb->is_msg = is_msg;
238 drb_skb->is_frag = is_frag;
239 drb_skb->is_last = is_last_one;
242 static int t7xx_dpmaif_add_skb_to_ring(struct dpmaif_ctrl *dpmaif_ctrl, struct sk_buff *skb)
244 struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
245 unsigned int wr_cnt, send_cnt, payload_cnt;
246 unsigned int cur_idx, drb_wr_idx_backup;
247 struct skb_shared_info *shinfo;
248 struct dpmaif_tx_queue *txq;
249 struct t7xx_skb_cb *skb_cb;
252 skb_cb = T7XX_SKB_CB(skb);
253 txq = &dpmaif_ctrl->txq[skb_cb->txq_number];
254 if (!txq->que_started || dpmaif_ctrl->state != DPMAIF_STATE_PWRON)
257 atomic_set(&txq->tx_processing, 1);
258 /* Ensure tx_processing is changed to 1 before actually begin TX flow */
261 shinfo = skb_shinfo(skb);
262 if (shinfo->frag_list)
263 dev_warn_ratelimited(dpmaif_ctrl->dev, "frag_list not supported\n");
265 payload_cnt = shinfo->nr_frags + 1;
266 /* nr_frags: frag cnt, 1: skb->data, 1: msg DRB */
267 send_cnt = payload_cnt + 1;
269 spin_lock_irqsave(&txq->tx_lock, flags);
270 cur_idx = txq->drb_wr_idx;
271 drb_wr_idx_backup = cur_idx;
272 txq->drb_wr_idx += send_cnt;
273 if (txq->drb_wr_idx >= txq->drb_size_cnt)
274 txq->drb_wr_idx -= txq->drb_size_cnt;
275 t7xx_setup_msg_drb(dpmaif_ctrl, txq->index, cur_idx, skb->len, 0, skb_cb->netif_idx);
276 t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, true, 0, 0, 0, 0);
277 spin_unlock_irqrestore(&txq->tx_lock, flags);
279 for (wr_cnt = 0; wr_cnt < payload_cnt; wr_cnt++) {
280 bool is_frag, is_last_one = wr_cnt == payload_cnt - 1;
281 unsigned int data_len;
286 data_len = skb_headlen(skb);
287 data_addr = skb->data;
290 skb_frag_t *frag = shinfo->frags + wr_cnt - 1;
292 data_len = skb_frag_size(frag);
293 data_addr = skb_frag_address(frag);
297 bus_addr = dma_map_single(dpmaif_ctrl->dev, data_addr, data_len, DMA_TO_DEVICE);
298 if (dma_mapping_error(dpmaif_ctrl->dev, bus_addr))
301 cur_idx = t7xx_ring_buf_get_next_wr_idx(txq->drb_size_cnt, cur_idx);
303 spin_lock_irqsave(&txq->tx_lock, flags);
304 t7xx_setup_payload_drb(dpmaif_ctrl, txq->index, cur_idx, bus_addr, data_len,
306 t7xx_record_drb_skb(dpmaif_ctrl, txq->index, cur_idx, skb, false, is_frag,
307 is_last_one, bus_addr, data_len);
308 spin_unlock_irqrestore(&txq->tx_lock, flags);
311 if (atomic_sub_return(send_cnt, &txq->tx_budget) <= (MAX_SKB_FRAGS + 2))
312 cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq->index);
314 atomic_set(&txq->tx_processing, 0);
320 struct dpmaif_drb_skb *drb_skb = txq->drb_skb_base;
322 cur_idx = cur_idx ? cur_idx - 1 : txq->drb_size_cnt - 1;
324 dma_unmap_single(dpmaif_ctrl->dev, drb_skb->bus_addr,
325 drb_skb->data_len, DMA_TO_DEVICE);
328 txq->drb_wr_idx = drb_wr_idx_backup;
329 atomic_set(&txq->tx_processing, 0);
334 static bool t7xx_tx_lists_are_all_empty(const struct dpmaif_ctrl *dpmaif_ctrl)
338 for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
339 if (!skb_queue_empty(&dpmaif_ctrl->txq[i].tx_skb_head))
346 /* Currently, only the default TX queue is used */
347 static struct dpmaif_tx_queue *t7xx_select_tx_queue(struct dpmaif_ctrl *dpmaif_ctrl)
349 struct dpmaif_tx_queue *txq;
351 txq = &dpmaif_ctrl->txq[DPMAIF_TX_DEFAULT_QUEUE];
352 if (!txq->que_started)
358 static unsigned int t7xx_txq_drb_wr_available(struct dpmaif_tx_queue *txq)
360 return t7xx_ring_buf_rd_wr_count(txq->drb_size_cnt, txq->drb_release_rd_idx,
361 txq->drb_wr_idx, DPMAIF_WRITE);
364 static unsigned int t7xx_skb_drb_cnt(struct sk_buff *skb)
366 /* Normal DRB (frags data + skb linear data) + msg DRB */
367 return skb_shinfo(skb)->nr_frags + 2;
370 static int t7xx_txq_burst_send_skb(struct dpmaif_tx_queue *txq)
372 unsigned int drb_remain_cnt, i;
373 unsigned int send_drb_cnt;
377 drb_remain_cnt = t7xx_txq_drb_wr_available(txq);
379 for (i = 0; i < DPMAIF_SKB_TX_BURST_CNT; i++) {
382 skb = skb_peek(&txq->tx_skb_head);
386 send_drb_cnt = t7xx_skb_drb_cnt(skb);
387 if (drb_remain_cnt < send_drb_cnt) {
388 drb_remain_cnt = t7xx_txq_drb_wr_available(txq);
392 drb_remain_cnt -= send_drb_cnt;
394 ret = t7xx_dpmaif_add_skb_to_ring(txq->dpmaif_ctrl, skb);
396 dev_err(txq->dpmaif_ctrl->dev,
397 "Failed to add skb to device's ring: %d\n", ret);
401 drb_cnt += send_drb_cnt;
402 skb_unlink(skb, &txq->tx_skb_head);
411 static void t7xx_do_tx_hw_push(struct dpmaif_ctrl *dpmaif_ctrl)
413 bool wait_disable_sleep = true;
416 struct dpmaif_tx_queue *txq;
419 txq = t7xx_select_tx_queue(dpmaif_ctrl);
423 drb_send_cnt = t7xx_txq_burst_send_skb(txq);
424 if (drb_send_cnt <= 0) {
425 usleep_range(10, 20);
430 /* Wait for the PCIe resource to unlock */
431 if (wait_disable_sleep) {
432 if (!t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev))
435 wait_disable_sleep = false;
438 t7xx_dpmaif_ul_update_hw_drb_cnt(&dpmaif_ctrl->hw_info, txq->index,
439 drb_send_cnt * DPMAIF_UL_DRB_SIZE_WORD);
442 } while (!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) && !kthread_should_stop() &&
443 (dpmaif_ctrl->state == DPMAIF_STATE_PWRON));
446 static int t7xx_dpmaif_tx_hw_push_thread(void *arg)
448 struct dpmaif_ctrl *dpmaif_ctrl = arg;
451 while (!kthread_should_stop()) {
452 if (t7xx_tx_lists_are_all_empty(dpmaif_ctrl) ||
453 dpmaif_ctrl->state != DPMAIF_STATE_PWRON) {
454 if (wait_event_interruptible(dpmaif_ctrl->tx_wq,
455 (!t7xx_tx_lists_are_all_empty(dpmaif_ctrl) &&
456 dpmaif_ctrl->state == DPMAIF_STATE_PWRON) ||
457 kthread_should_stop()))
460 if (kthread_should_stop())
464 ret = pm_runtime_resume_and_get(dpmaif_ctrl->dev);
465 if (ret < 0 && ret != -EACCES)
468 t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
469 t7xx_do_tx_hw_push(dpmaif_ctrl);
470 t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
471 pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
472 pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
478 int t7xx_dpmaif_tx_thread_init(struct dpmaif_ctrl *dpmaif_ctrl)
480 init_waitqueue_head(&dpmaif_ctrl->tx_wq);
481 dpmaif_ctrl->tx_thread = kthread_run(t7xx_dpmaif_tx_hw_push_thread,
482 dpmaif_ctrl, "dpmaif_tx_hw_push");
483 return PTR_ERR_OR_ZERO(dpmaif_ctrl->tx_thread);
486 void t7xx_dpmaif_tx_thread_rel(struct dpmaif_ctrl *dpmaif_ctrl)
488 if (dpmaif_ctrl->tx_thread)
489 kthread_stop(dpmaif_ctrl->tx_thread);
493 * t7xx_dpmaif_tx_send_skb() - Add skb to the transmit queue.
494 * @dpmaif_ctrl: Pointer to struct dpmaif_ctrl.
495 * @txq_number: Queue number to xmit on.
496 * @skb: Pointer to the skb to transmit.
498 * Add the skb to the queue of the skbs to be transmit.
499 * Wake up the thread that push the skbs from the queue to the HW.
503 * * -EBUSY - Tx budget exhausted.
504 * In normal circumstances t7xx_dpmaif_add_skb_to_ring() must report the txq full
505 * state to prevent this error condition.
507 int t7xx_dpmaif_tx_send_skb(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int txq_number,
510 struct dpmaif_tx_queue *txq = &dpmaif_ctrl->txq[txq_number];
511 struct dpmaif_callbacks *cb = dpmaif_ctrl->callbacks;
512 struct t7xx_skb_cb *skb_cb;
514 if (atomic_read(&txq->tx_budget) <= t7xx_skb_drb_cnt(skb)) {
515 cb->state_notify(dpmaif_ctrl->t7xx_dev, DMPAIF_TXQ_STATE_FULL, txq_number);
519 skb_cb = T7XX_SKB_CB(skb);
520 skb_cb->txq_number = txq_number;
521 skb_queue_tail(&txq->tx_skb_head, skb);
522 wake_up(&dpmaif_ctrl->tx_wq);
527 void t7xx_dpmaif_irq_tx_done(struct dpmaif_ctrl *dpmaif_ctrl, unsigned int que_mask)
531 for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
532 if (que_mask & BIT(i))
533 queue_work(dpmaif_ctrl->txq[i].worker, &dpmaif_ctrl->txq[i].dpmaif_tx_work);
537 static int t7xx_dpmaif_tx_drb_buf_init(struct dpmaif_tx_queue *txq)
539 size_t brb_skb_size, brb_pd_size;
541 brb_pd_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb);
542 brb_skb_size = DPMAIF_DRB_LIST_LEN * sizeof(struct dpmaif_drb_skb);
544 txq->drb_size_cnt = DPMAIF_DRB_LIST_LEN;
546 /* For HW && AP SW */
547 txq->drb_base = dma_alloc_coherent(txq->dpmaif_ctrl->dev, brb_pd_size,
548 &txq->drb_bus_addr, GFP_KERNEL | __GFP_ZERO);
552 /* For AP SW to record the skb information */
553 txq->drb_skb_base = devm_kzalloc(txq->dpmaif_ctrl->dev, brb_skb_size, GFP_KERNEL);
554 if (!txq->drb_skb_base) {
555 dma_free_coherent(txq->dpmaif_ctrl->dev, brb_pd_size,
556 txq->drb_base, txq->drb_bus_addr);
563 static void t7xx_dpmaif_tx_free_drb_skb(struct dpmaif_tx_queue *txq)
565 struct dpmaif_drb_skb *drb_skb, *drb_skb_base = txq->drb_skb_base;
571 for (i = 0; i < txq->drb_size_cnt; i++) {
572 drb_skb = drb_skb_base + i;
576 if (!drb_skb->is_msg)
577 dma_unmap_single(txq->dpmaif_ctrl->dev, drb_skb->bus_addr,
578 drb_skb->data_len, DMA_TO_DEVICE);
580 if (drb_skb->is_last) {
581 dev_kfree_skb(drb_skb->skb);
587 static void t7xx_dpmaif_tx_drb_buf_rel(struct dpmaif_tx_queue *txq)
590 dma_free_coherent(txq->dpmaif_ctrl->dev,
591 txq->drb_size_cnt * sizeof(struct dpmaif_drb),
592 txq->drb_base, txq->drb_bus_addr);
594 t7xx_dpmaif_tx_free_drb_skb(txq);
598 * t7xx_dpmaif_txq_init() - Initialize TX queue.
599 * @txq: Pointer to struct dpmaif_tx_queue.
601 * Initialize the TX queue data structure and allocate memory for it to use.
605 * * -ERROR - Error code from failure sub-initializations.
607 int t7xx_dpmaif_txq_init(struct dpmaif_tx_queue *txq)
611 skb_queue_head_init(&txq->tx_skb_head);
612 init_waitqueue_head(&txq->req_wq);
613 atomic_set(&txq->tx_budget, DPMAIF_DRB_LIST_LEN);
615 ret = t7xx_dpmaif_tx_drb_buf_init(txq);
617 dev_err(txq->dpmaif_ctrl->dev, "Failed to initialize DRB buffers: %d\n", ret);
621 txq->worker = alloc_ordered_workqueue("md_dpmaif_tx%d_worker",
622 WQ_MEM_RECLAIM | (txq->index ? 0 : WQ_HIGHPRI),
627 INIT_WORK(&txq->dpmaif_tx_work, t7xx_dpmaif_tx_done);
628 spin_lock_init(&txq->tx_lock);
633 void t7xx_dpmaif_txq_free(struct dpmaif_tx_queue *txq)
636 destroy_workqueue(txq->worker);
638 skb_queue_purge(&txq->tx_skb_head);
639 t7xx_dpmaif_tx_drb_buf_rel(txq);
642 void t7xx_dpmaif_tx_stop(struct dpmaif_ctrl *dpmaif_ctrl)
646 for (i = 0; i < DPMAIF_TXQ_NUM; i++) {
647 struct dpmaif_tx_queue *txq;
650 txq = &dpmaif_ctrl->txq[i];
651 txq->que_started = false;
652 /* Make sure TXQ is disabled */
655 /* Wait for active Tx to be done */
656 while (atomic_read(&txq->tx_processing)) {
657 if (++count >= DPMAIF_MAX_CHECK_COUNT) {
658 dev_err(dpmaif_ctrl->dev, "TX queue stop failed\n");
665 static void t7xx_dpmaif_txq_flush_rel(struct dpmaif_tx_queue *txq)
667 txq->que_started = false;
669 cancel_work_sync(&txq->dpmaif_tx_work);
670 flush_work(&txq->dpmaif_tx_work);
671 t7xx_dpmaif_tx_free_drb_skb(txq);
675 txq->drb_release_rd_idx = 0;
678 void t7xx_dpmaif_tx_clear(struct dpmaif_ctrl *dpmaif_ctrl)
682 for (i = 0; i < DPMAIF_TXQ_NUM; i++)
683 t7xx_dpmaif_txq_flush_rel(&dpmaif_ctrl->txq[i]);