1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2021, MediaTek Inc.
4 * Copyright (c) 2021-2022, Intel Corporation.
7 * Amir Hanania <amir.hanania@intel.com>
8 * Haijun Liu <haijun.liu@mediatek.com>
9 * Moises Veleta <moises.veleta@intel.com>
10 * Ricardo Martinez <ricardo.martinez@linux.intel.com>
11 * Sreehari Kancharla <sreehari.kancharla@intel.com>
14 * Andy Shevchenko <andriy.shevchenko@linux.intel.com>
15 * Chiranjeevi Rapolu <chiranjeevi.rapolu@intel.com>
16 * Eliot Lee <eliot.lee@intel.com>
19 #include <linux/bits.h>
20 #include <linux/bitops.h>
21 #include <linux/delay.h>
22 #include <linux/device.h>
23 #include <linux/dmapool.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/dma-direction.h>
26 #include <linux/gfp.h>
28 #include <linux/io-64-nonatomic-lo-hi.h>
29 #include <linux/iopoll.h>
30 #include <linux/irqreturn.h>
31 #include <linux/kernel.h>
32 #include <linux/kthread.h>
33 #include <linux/list.h>
34 #include <linux/netdevice.h>
35 #include <linux/pci.h>
36 #include <linux/pm_runtime.h>
37 #include <linux/sched.h>
38 #include <linux/skbuff.h>
39 #include <linux/slab.h>
40 #include <linux/spinlock.h>
41 #include <linux/types.h>
42 #include <linux/wait.h>
43 #include <linux/workqueue.h>
45 #include "t7xx_cldma.h"
46 #include "t7xx_hif_cldma.h"
47 #include "t7xx_mhccif.h"
49 #include "t7xx_pcie_mac.h"
50 #include "t7xx_port_proxy.h"
52 #include "t7xx_state_monitor.h"
54 #define MAX_TX_BUDGET 16
55 #define MAX_RX_BUDGET 16
57 #define CHECK_Q_STOP_TIMEOUT_US 1000000
58 #define CHECK_Q_STOP_STEP_US 10000
60 #define CLDMA_JUMBO_BUFF_SZ (63 * 1024 + sizeof(struct ccci_header))
62 static void md_cd_queue_struct_reset(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
63 enum mtk_txrx tx_rx, unsigned int index)
67 queue->md_ctrl = md_ctrl;
68 queue->tr_ring = NULL;
69 queue->tr_done = NULL;
70 queue->tx_next = NULL;
73 static void md_cd_queue_struct_init(struct cldma_queue *queue, struct cldma_ctrl *md_ctrl,
74 enum mtk_txrx tx_rx, unsigned int index)
76 md_cd_queue_struct_reset(queue, md_ctrl, tx_rx, index);
77 init_waitqueue_head(&queue->req_wq);
78 spin_lock_init(&queue->ring_lock);
81 static void t7xx_cldma_gpd_set_data_ptr(struct cldma_gpd *gpd, dma_addr_t data_ptr)
83 gpd->data_buff_bd_ptr_h = cpu_to_le32(upper_32_bits(data_ptr));
84 gpd->data_buff_bd_ptr_l = cpu_to_le32(lower_32_bits(data_ptr));
87 static void t7xx_cldma_gpd_set_next_ptr(struct cldma_gpd *gpd, dma_addr_t next_ptr)
89 gpd->next_gpd_ptr_h = cpu_to_le32(upper_32_bits(next_ptr));
90 gpd->next_gpd_ptr_l = cpu_to_le32(lower_32_bits(next_ptr));
93 static int t7xx_cldma_alloc_and_map_skb(struct cldma_ctrl *md_ctrl, struct cldma_request *req,
94 size_t size, gfp_t gfp_mask)
96 req->skb = __dev_alloc_skb(size, gfp_mask);
100 req->mapped_buff = dma_map_single(md_ctrl->dev, req->skb->data, size, DMA_FROM_DEVICE);
101 if (dma_mapping_error(md_ctrl->dev, req->mapped_buff)) {
102 dev_kfree_skb_any(req->skb);
104 req->mapped_buff = 0;
105 dev_err(md_ctrl->dev, "DMA mapping failed\n");
112 static int t7xx_cldma_gpd_rx_from_q(struct cldma_queue *queue, int budget, bool *over_budget)
114 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
115 unsigned int hwo_polling_count = 0;
116 struct t7xx_cldma_hw *hw_info;
117 bool rx_not_done = true;
121 hw_info = &md_ctrl->hw_info;
124 struct cldma_request *req;
125 struct cldma_gpd *gpd;
129 req = queue->tr_done;
134 if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) {
137 if (!pci_device_is_present(to_pci_dev(md_ctrl->dev))) {
138 dev_err(md_ctrl->dev, "PCIe Link disconnected\n");
142 gpd_addr = ioread64_lo_hi(hw_info->ap_pdn_base +
143 REG_CLDMA_DL_CURRENT_ADDRL_0 +
144 queue->index * sizeof(u64));
145 if (req->gpd_addr == gpd_addr || hwo_polling_count++ >= 100)
152 hwo_polling_count = 0;
155 if (req->mapped_buff) {
156 dma_unmap_single(md_ctrl->dev, req->mapped_buff,
157 queue->tr_ring->pkt_size, DMA_FROM_DEVICE);
158 req->mapped_buff = 0;
162 skb_reset_tail_pointer(skb);
163 skb_put(skb, le16_to_cpu(gpd->data_buff_len));
165 ret = md_ctrl->recv_skb(queue, skb);
166 /* Break processing, will try again later */
171 t7xx_cldma_gpd_set_data_ptr(gpd, 0);
173 spin_lock_irqsave(&queue->ring_lock, flags);
174 queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
175 spin_unlock_irqrestore(&queue->ring_lock, flags);
176 req = queue->rx_refill;
178 ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, queue->tr_ring->pkt_size, GFP_KERNEL);
183 t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff);
184 gpd->data_buff_len = 0;
185 gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
187 spin_lock_irqsave(&queue->ring_lock, flags);
188 queue->rx_refill = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
189 spin_unlock_irqrestore(&queue->ring_lock, flags);
191 rx_not_done = ++count < budget || !need_resched();
192 } while (rx_not_done);
198 static int t7xx_cldma_gpd_rx_collect(struct cldma_queue *queue, int budget)
200 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
201 struct t7xx_cldma_hw *hw_info;
202 unsigned int pending_rx_int;
203 bool over_budget = false;
207 hw_info = &md_ctrl->hw_info;
210 ret = t7xx_cldma_gpd_rx_from_q(queue, budget, &over_budget);
218 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
219 if (md_ctrl->rxq_active & BIT(queue->index)) {
220 if (!t7xx_cldma_hw_queue_status(hw_info, queue->index, MTK_RX))
221 t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_RX);
223 pending_rx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index),
225 if (pending_rx_int) {
226 t7xx_cldma_hw_rx_done(hw_info, pending_rx_int);
229 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
234 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
235 } while (pending_rx_int);
240 static void t7xx_cldma_rx_done(struct work_struct *work)
242 struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work);
243 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
246 value = t7xx_cldma_gpd_rx_collect(queue, queue->budget);
247 if (value && md_ctrl->rxq_active & BIT(queue->index)) {
248 queue_work(queue->worker, &queue->cldma_work);
252 t7xx_cldma_clear_ip_busy(&md_ctrl->hw_info);
253 t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, queue->index, MTK_RX);
254 t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, queue->index, MTK_RX);
255 pm_runtime_mark_last_busy(md_ctrl->dev);
256 pm_runtime_put_autosuspend(md_ctrl->dev);
259 static int t7xx_cldma_gpd_tx_collect(struct cldma_queue *queue)
261 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
262 unsigned int dma_len, count = 0;
263 struct cldma_request *req;
264 struct cldma_gpd *gpd;
269 while (!kthread_should_stop()) {
270 spin_lock_irqsave(&queue->ring_lock, flags);
271 req = queue->tr_done;
273 spin_unlock_irqrestore(&queue->ring_lock, flags);
277 if ((gpd->flags & GPD_FLAGS_HWO) || !req->skb) {
278 spin_unlock_irqrestore(&queue->ring_lock, flags);
282 dma_free = req->mapped_buff;
283 dma_len = le16_to_cpu(gpd->data_buff_len);
286 queue->tr_done = list_next_entry_circular(req, &queue->tr_ring->gpd_ring, entry);
287 spin_unlock_irqrestore(&queue->ring_lock, flags);
290 dma_unmap_single(md_ctrl->dev, dma_free, dma_len, DMA_TO_DEVICE);
291 dev_kfree_skb_any(skb);
295 wake_up_nr(&queue->req_wq, count);
300 static void t7xx_cldma_txq_empty_hndl(struct cldma_queue *queue)
302 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
303 struct cldma_request *req;
304 dma_addr_t ul_curr_addr;
308 if (!(md_ctrl->txq_active & BIT(queue->index)))
311 spin_lock_irqsave(&queue->ring_lock, flags);
312 req = list_prev_entry_circular(queue->tx_next, &queue->tr_ring->gpd_ring, entry);
313 spin_unlock_irqrestore(&queue->ring_lock, flags);
315 pending_gpd = (req->gpd->flags & GPD_FLAGS_HWO) && req->skb;
317 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
319 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
321 /* Check current processing TGPD, 64-bit address is in a table by Q index */
322 ul_curr_addr = ioread64_lo_hi(hw_info->ap_pdn_base + REG_CLDMA_UL_CURRENT_ADDRL_0 +
323 queue->index * sizeof(u64));
324 if (req->gpd_addr != ul_curr_addr) {
325 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
326 dev_err(md_ctrl->dev, "CLDMA%d queue %d is not empty\n",
327 md_ctrl->hif_id, queue->index);
331 t7xx_cldma_hw_resume_queue(hw_info, queue->index, MTK_TX);
333 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
336 static void t7xx_cldma_tx_done(struct work_struct *work)
338 struct cldma_queue *queue = container_of(work, struct cldma_queue, cldma_work);
339 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
340 struct t7xx_cldma_hw *hw_info;
341 unsigned int l2_tx_int;
344 hw_info = &md_ctrl->hw_info;
345 t7xx_cldma_gpd_tx_collect(queue);
346 l2_tx_int = t7xx_cldma_hw_int_status(hw_info, BIT(queue->index) | EQ_STA_BIT(queue->index),
348 if (l2_tx_int & EQ_STA_BIT(queue->index)) {
349 t7xx_cldma_hw_tx_done(hw_info, EQ_STA_BIT(queue->index));
350 t7xx_cldma_txq_empty_hndl(queue);
353 if (l2_tx_int & BIT(queue->index)) {
354 t7xx_cldma_hw_tx_done(hw_info, BIT(queue->index));
355 queue_work(queue->worker, &queue->cldma_work);
359 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
360 if (md_ctrl->txq_active & BIT(queue->index)) {
361 t7xx_cldma_clear_ip_busy(hw_info);
362 t7xx_cldma_hw_irq_en_eq(hw_info, queue->index, MTK_TX);
363 t7xx_cldma_hw_irq_en_txrx(hw_info, queue->index, MTK_TX);
365 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
367 pm_runtime_mark_last_busy(md_ctrl->dev);
368 pm_runtime_put_autosuspend(md_ctrl->dev);
371 static void t7xx_cldma_ring_free(struct cldma_ctrl *md_ctrl,
372 struct cldma_ring *ring, enum dma_data_direction tx_rx)
374 struct cldma_request *req_cur, *req_next;
376 list_for_each_entry_safe(req_cur, req_next, &ring->gpd_ring, entry) {
377 if (req_cur->mapped_buff && req_cur->skb) {
378 dma_unmap_single(md_ctrl->dev, req_cur->mapped_buff,
379 ring->pkt_size, tx_rx);
380 req_cur->mapped_buff = 0;
383 dev_kfree_skb_any(req_cur->skb);
386 dma_pool_free(md_ctrl->gpd_dmapool, req_cur->gpd, req_cur->gpd_addr);
388 list_del(&req_cur->entry);
393 static struct cldma_request *t7xx_alloc_rx_request(struct cldma_ctrl *md_ctrl, size_t pkt_size)
395 struct cldma_request *req;
398 req = kzalloc(sizeof(*req), GFP_KERNEL);
402 req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr);
406 val = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, pkt_size, GFP_KERNEL);
413 dma_pool_free(md_ctrl->gpd_dmapool, req->gpd, req->gpd_addr);
421 static int t7xx_cldma_rx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring)
423 struct cldma_request *req;
424 struct cldma_gpd *gpd;
427 INIT_LIST_HEAD(&ring->gpd_ring);
428 ring->length = MAX_RX_BUDGET;
430 for (i = 0; i < ring->length; i++) {
431 req = t7xx_alloc_rx_request(md_ctrl, ring->pkt_size);
433 t7xx_cldma_ring_free(md_ctrl, ring, DMA_FROM_DEVICE);
438 t7xx_cldma_gpd_set_data_ptr(gpd, req->mapped_buff);
439 gpd->rx_data_allow_len = cpu_to_le16(ring->pkt_size);
440 gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
441 INIT_LIST_HEAD(&req->entry);
442 list_add_tail(&req->entry, &ring->gpd_ring);
445 /* Link previous GPD to next GPD, circular */
446 list_for_each_entry(req, &ring->gpd_ring, entry) {
447 t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr);
454 static struct cldma_request *t7xx_alloc_tx_request(struct cldma_ctrl *md_ctrl)
456 struct cldma_request *req;
458 req = kzalloc(sizeof(*req), GFP_KERNEL);
462 req->gpd = dma_pool_zalloc(md_ctrl->gpd_dmapool, GFP_KERNEL, &req->gpd_addr);
471 static int t7xx_cldma_tx_ring_init(struct cldma_ctrl *md_ctrl, struct cldma_ring *ring)
473 struct cldma_request *req;
474 struct cldma_gpd *gpd;
477 INIT_LIST_HEAD(&ring->gpd_ring);
478 ring->length = MAX_TX_BUDGET;
480 for (i = 0; i < ring->length; i++) {
481 req = t7xx_alloc_tx_request(md_ctrl);
483 t7xx_cldma_ring_free(md_ctrl, ring, DMA_TO_DEVICE);
488 gpd->flags = GPD_FLAGS_IOC;
489 INIT_LIST_HEAD(&req->entry);
490 list_add_tail(&req->entry, &ring->gpd_ring);
493 /* Link previous GPD to next GPD, circular */
494 list_for_each_entry(req, &ring->gpd_ring, entry) {
495 t7xx_cldma_gpd_set_next_ptr(gpd, req->gpd_addr);
503 * t7xx_cldma_q_reset() - Reset CLDMA request pointers to their initial values.
504 * @queue: Pointer to the queue structure.
506 * Called with ring_lock (unless called during initialization phase)
508 static void t7xx_cldma_q_reset(struct cldma_queue *queue)
510 struct cldma_request *req;
512 req = list_first_entry(&queue->tr_ring->gpd_ring, struct cldma_request, entry);
513 queue->tr_done = req;
514 queue->budget = queue->tr_ring->length;
516 if (queue->dir == MTK_TX)
517 queue->tx_next = req;
519 queue->rx_refill = req;
522 static void t7xx_cldma_rxq_init(struct cldma_queue *queue)
524 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
527 queue->tr_ring = &md_ctrl->rx_ring[queue->index];
528 t7xx_cldma_q_reset(queue);
531 static void t7xx_cldma_txq_init(struct cldma_queue *queue)
533 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
536 queue->tr_ring = &md_ctrl->tx_ring[queue->index];
537 t7xx_cldma_q_reset(queue);
540 static void t7xx_cldma_enable_irq(struct cldma_ctrl *md_ctrl)
542 t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id);
545 static void t7xx_cldma_disable_irq(struct cldma_ctrl *md_ctrl)
547 t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, md_ctrl->hw_info.phy_interrupt_id);
550 static void t7xx_cldma_irq_work_cb(struct cldma_ctrl *md_ctrl)
552 unsigned long l2_tx_int_msk, l2_rx_int_msk, l2_tx_int, l2_rx_int, val;
553 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
556 /* L2 raw interrupt status */
557 l2_tx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TISAR0);
558 l2_rx_int = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2RISAR0);
559 l2_tx_int_msk = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L2TIMR0);
560 l2_rx_int_msk = ioread32(hw_info->ap_ao_base + REG_CLDMA_L2RIMR0);
561 l2_tx_int &= ~l2_tx_int_msk;
562 l2_rx_int &= ~l2_rx_int_msk;
565 if (l2_tx_int & (TQ_ERR_INT_BITMASK | TQ_ACTIVE_START_ERR_INT_BITMASK)) {
566 /* Read and clear L3 TX interrupt status */
567 val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0);
568 iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR0);
569 val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1);
570 iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3TISAR1);
573 t7xx_cldma_hw_tx_done(hw_info, l2_tx_int);
574 if (l2_tx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
575 for_each_set_bit(i, &l2_tx_int, L2_INT_BIT_COUNT) {
576 if (i < CLDMA_TXQ_NUM) {
577 pm_runtime_get(md_ctrl->dev);
578 t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_TX);
579 t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_TX);
580 queue_work(md_ctrl->txq[i].worker,
581 &md_ctrl->txq[i].cldma_work);
583 t7xx_cldma_txq_empty_hndl(&md_ctrl->txq[i - CLDMA_TXQ_NUM]);
590 if (l2_rx_int & (RQ_ERR_INT_BITMASK | RQ_ACTIVE_START_ERR_INT_BITMASK)) {
591 /* Read and clear L3 RX interrupt status */
592 val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0);
593 iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR0);
594 val = ioread32(hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1);
595 iowrite32(val, hw_info->ap_pdn_base + REG_CLDMA_L3RISAR1);
598 t7xx_cldma_hw_rx_done(hw_info, l2_rx_int);
599 if (l2_rx_int & (TXRX_STATUS_BITMASK | EMPTY_STATUS_BITMASK)) {
600 l2_rx_int |= l2_rx_int >> CLDMA_RXQ_NUM;
601 for_each_set_bit(i, &l2_rx_int, CLDMA_RXQ_NUM) {
602 pm_runtime_get(md_ctrl->dev);
603 t7xx_cldma_hw_irq_dis_eq(hw_info, i, MTK_RX);
604 t7xx_cldma_hw_irq_dis_txrx(hw_info, i, MTK_RX);
605 queue_work(md_ctrl->rxq[i].worker, &md_ctrl->rxq[i].cldma_work);
611 static bool t7xx_cldma_qs_are_active(struct cldma_ctrl *md_ctrl)
613 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
614 unsigned int tx_active;
615 unsigned int rx_active;
617 if (!pci_device_is_present(to_pci_dev(md_ctrl->dev)))
620 tx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_TX);
621 rx_active = t7xx_cldma_hw_queue_status(hw_info, CLDMA_ALL_Q, MTK_RX);
623 return tx_active || rx_active;
627 * t7xx_cldma_stop() - Stop CLDMA.
628 * @md_ctrl: CLDMA context structure.
630 * Stop TX and RX queues. Disable L1 and L2 interrupts.
631 * Clear status registers.
635 * * -ERROR - Error code from polling cldma_queues_active.
637 int t7xx_cldma_stop(struct cldma_ctrl *md_ctrl)
639 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
643 md_ctrl->rxq_active = 0;
644 t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX);
645 md_ctrl->txq_active = 0;
646 t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX);
647 md_ctrl->txq_started = 0;
648 t7xx_cldma_disable_irq(md_ctrl);
649 t7xx_cldma_hw_stop(hw_info, MTK_RX);
650 t7xx_cldma_hw_stop(hw_info, MTK_TX);
651 t7xx_cldma_hw_tx_done(hw_info, CLDMA_L2TISAR0_ALL_INT_MASK);
652 t7xx_cldma_hw_rx_done(hw_info, CLDMA_L2RISAR0_ALL_INT_MASK);
654 if (md_ctrl->is_late_init) {
655 for (i = 0; i < CLDMA_TXQ_NUM; i++)
656 flush_work(&md_ctrl->txq[i].cldma_work);
658 for (i = 0; i < CLDMA_RXQ_NUM; i++)
659 flush_work(&md_ctrl->rxq[i].cldma_work);
662 ret = read_poll_timeout(t7xx_cldma_qs_are_active, active, !active, CHECK_Q_STOP_STEP_US,
663 CHECK_Q_STOP_TIMEOUT_US, true, md_ctrl);
665 dev_err(md_ctrl->dev, "Could not stop CLDMA%d queues", md_ctrl->hif_id);
670 static void t7xx_cldma_late_release(struct cldma_ctrl *md_ctrl)
674 if (!md_ctrl->is_late_init)
677 for (i = 0; i < CLDMA_TXQ_NUM; i++)
678 t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE);
680 for (i = 0; i < CLDMA_RXQ_NUM; i++)
681 t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[i], DMA_FROM_DEVICE);
683 dma_pool_destroy(md_ctrl->gpd_dmapool);
684 md_ctrl->gpd_dmapool = NULL;
685 md_ctrl->is_late_init = false;
688 void t7xx_cldma_reset(struct cldma_ctrl *md_ctrl)
693 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
694 md_ctrl->txq_active = 0;
695 md_ctrl->rxq_active = 0;
696 t7xx_cldma_disable_irq(md_ctrl);
697 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
699 for (i = 0; i < CLDMA_TXQ_NUM; i++) {
700 cancel_work_sync(&md_ctrl->txq[i].cldma_work);
702 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
703 md_cd_queue_struct_reset(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
704 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
707 for (i = 0; i < CLDMA_RXQ_NUM; i++) {
708 cancel_work_sync(&md_ctrl->rxq[i].cldma_work);
710 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
711 md_cd_queue_struct_reset(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
712 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
715 t7xx_cldma_late_release(md_ctrl);
719 * t7xx_cldma_start() - Start CLDMA.
720 * @md_ctrl: CLDMA context structure.
722 * Set TX/RX start address.
723 * Start all RX queues and enable L2 interrupt.
725 void t7xx_cldma_start(struct cldma_ctrl *md_ctrl)
729 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
730 if (md_ctrl->is_late_init) {
731 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
734 t7xx_cldma_enable_irq(md_ctrl);
736 for (i = 0; i < CLDMA_TXQ_NUM; i++) {
737 if (md_ctrl->txq[i].tr_done)
738 t7xx_cldma_hw_set_start_addr(hw_info, i,
739 md_ctrl->txq[i].tr_done->gpd_addr,
743 for (i = 0; i < CLDMA_RXQ_NUM; i++) {
744 if (md_ctrl->rxq[i].tr_done)
745 t7xx_cldma_hw_set_start_addr(hw_info, i,
746 md_ctrl->rxq[i].tr_done->gpd_addr,
750 /* Enable L2 interrupt */
751 t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX);
752 t7xx_cldma_hw_start(hw_info);
753 md_ctrl->txq_started = 0;
754 md_ctrl->txq_active |= TXRX_STATUS_BITMASK;
755 md_ctrl->rxq_active |= TXRX_STATUS_BITMASK;
757 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
760 static void t7xx_cldma_clear_txq(struct cldma_ctrl *md_ctrl, int qnum)
762 struct cldma_queue *txq = &md_ctrl->txq[qnum];
763 struct cldma_request *req;
764 struct cldma_gpd *gpd;
767 spin_lock_irqsave(&txq->ring_lock, flags);
768 t7xx_cldma_q_reset(txq);
769 list_for_each_entry(req, &txq->tr_ring->gpd_ring, entry) {
771 gpd->flags &= ~GPD_FLAGS_HWO;
772 t7xx_cldma_gpd_set_data_ptr(gpd, 0);
773 gpd->data_buff_len = 0;
774 dev_kfree_skb_any(req->skb);
777 spin_unlock_irqrestore(&txq->ring_lock, flags);
780 static int t7xx_cldma_clear_rxq(struct cldma_ctrl *md_ctrl, int qnum)
782 struct cldma_queue *rxq = &md_ctrl->rxq[qnum];
783 struct cldma_request *req;
784 struct cldma_gpd *gpd;
788 spin_lock_irqsave(&rxq->ring_lock, flags);
789 t7xx_cldma_q_reset(rxq);
790 list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) {
792 gpd->flags = GPD_FLAGS_IOC | GPD_FLAGS_HWO;
793 gpd->data_buff_len = 0;
797 skb_reset_tail_pointer(req->skb);
801 list_for_each_entry(req, &rxq->tr_ring->gpd_ring, entry) {
805 ret = t7xx_cldma_alloc_and_map_skb(md_ctrl, req, rxq->tr_ring->pkt_size, GFP_ATOMIC);
809 t7xx_cldma_gpd_set_data_ptr(req->gpd, req->mapped_buff);
811 spin_unlock_irqrestore(&rxq->ring_lock, flags);
816 void t7xx_cldma_clear_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx)
820 if (tx_rx == MTK_TX) {
821 for (i = 0; i < CLDMA_TXQ_NUM; i++)
822 t7xx_cldma_clear_txq(md_ctrl, i);
824 for (i = 0; i < CLDMA_RXQ_NUM; i++)
825 t7xx_cldma_clear_rxq(md_ctrl, i);
829 void t7xx_cldma_stop_all_qs(struct cldma_ctrl *md_ctrl, enum mtk_txrx tx_rx)
831 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
834 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
835 t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, tx_rx);
836 t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, tx_rx);
838 md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK;
840 md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK;
841 t7xx_cldma_hw_stop_all_qs(hw_info, tx_rx);
842 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
845 static int t7xx_cldma_gpd_handle_tx_request(struct cldma_queue *queue, struct cldma_request *tx_req,
848 struct cldma_ctrl *md_ctrl = queue->md_ctrl;
849 struct cldma_gpd *gpd = tx_req->gpd;
853 tx_req->mapped_buff = dma_map_single(md_ctrl->dev, skb->data, skb->len, DMA_TO_DEVICE);
855 if (dma_mapping_error(md_ctrl->dev, tx_req->mapped_buff)) {
856 dev_err(md_ctrl->dev, "DMA mapping failed\n");
860 t7xx_cldma_gpd_set_data_ptr(gpd, tx_req->mapped_buff);
861 gpd->data_buff_len = cpu_to_le16(skb->len);
863 /* This lock must cover TGPD setting, as even without a resume operation,
864 * CLDMA can send next HWO=1 if last TGPD just finished.
866 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
867 if (md_ctrl->txq_active & BIT(queue->index))
868 gpd->flags |= GPD_FLAGS_HWO;
870 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
876 /* Called with cldma_lock */
877 static void t7xx_cldma_hw_start_send(struct cldma_ctrl *md_ctrl, int qno,
878 struct cldma_request *prev_req)
880 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
882 /* Check whether the device was powered off (CLDMA start address is not set) */
883 if (!t7xx_cldma_tx_addr_is_set(hw_info, qno)) {
884 t7xx_cldma_hw_init(hw_info);
885 t7xx_cldma_hw_set_start_addr(hw_info, qno, prev_req->gpd_addr, MTK_TX);
886 md_ctrl->txq_started &= ~BIT(qno);
889 if (!t7xx_cldma_hw_queue_status(hw_info, qno, MTK_TX)) {
890 if (md_ctrl->txq_started & BIT(qno))
891 t7xx_cldma_hw_resume_queue(hw_info, qno, MTK_TX);
893 t7xx_cldma_hw_start_queue(hw_info, qno, MTK_TX);
895 md_ctrl->txq_started |= BIT(qno);
900 * t7xx_cldma_set_recv_skb() - Set the callback to handle RX packets.
901 * @md_ctrl: CLDMA context structure.
902 * @recv_skb: Receiving skb callback.
904 void t7xx_cldma_set_recv_skb(struct cldma_ctrl *md_ctrl,
905 int (*recv_skb)(struct cldma_queue *queue, struct sk_buff *skb))
907 md_ctrl->recv_skb = recv_skb;
911 * t7xx_cldma_send_skb() - Send control data to modem.
912 * @md_ctrl: CLDMA context structure.
913 * @qno: Queue number.
914 * @skb: Socket buffer.
918 * * -ENOMEM - Allocation failure.
919 * * -EINVAL - Invalid queue request.
920 * * -EIO - Queue is not active.
921 * * -ETIMEDOUT - Timeout waiting for the device to wake up.
923 int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb)
925 struct cldma_request *tx_req;
926 struct cldma_queue *queue;
930 if (qno >= CLDMA_TXQ_NUM)
933 ret = pm_runtime_resume_and_get(md_ctrl->dev);
934 if (ret < 0 && ret != -EACCES)
937 t7xx_pci_disable_sleep(md_ctrl->t7xx_dev);
938 queue = &md_ctrl->txq[qno];
940 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
941 if (!(md_ctrl->txq_active & BIT(qno))) {
943 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
946 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
949 spin_lock_irqsave(&queue->ring_lock, flags);
950 tx_req = queue->tx_next;
951 if (queue->budget > 0 && !tx_req->skb) {
952 struct list_head *gpd_ring = &queue->tr_ring->gpd_ring;
955 t7xx_cldma_gpd_handle_tx_request(queue, tx_req, skb);
956 queue->tx_next = list_next_entry_circular(tx_req, gpd_ring, entry);
957 spin_unlock_irqrestore(&queue->ring_lock, flags);
959 if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) {
964 /* Protect the access to the modem for queues operations (resume/start)
965 * which access shared locations by all the queues.
966 * cldma_lock is independent of ring_lock which is per queue.
968 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
969 t7xx_cldma_hw_start_send(md_ctrl, qno, tx_req);
970 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
974 spin_unlock_irqrestore(&queue->ring_lock, flags);
976 if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) {
981 if (!t7xx_cldma_hw_queue_status(&md_ctrl->hw_info, qno, MTK_TX)) {
982 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
983 t7xx_cldma_hw_resume_queue(&md_ctrl->hw_info, qno, MTK_TX);
984 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
987 ret = wait_event_interruptible_exclusive(queue->req_wq, queue->budget > 0);
991 t7xx_pci_enable_sleep(md_ctrl->t7xx_dev);
992 pm_runtime_mark_last_busy(md_ctrl->dev);
993 pm_runtime_put_autosuspend(md_ctrl->dev);
997 static int t7xx_cldma_late_init(struct cldma_ctrl *md_ctrl)
999 char dma_pool_name[32];
1002 if (md_ctrl->is_late_init) {
1003 dev_err(md_ctrl->dev, "CLDMA late init was already done\n");
1007 snprintf(dma_pool_name, sizeof(dma_pool_name), "cldma_req_hif%d", md_ctrl->hif_id);
1009 md_ctrl->gpd_dmapool = dma_pool_create(dma_pool_name, md_ctrl->dev,
1010 sizeof(struct cldma_gpd), GPD_DMAPOOL_ALIGN, 0);
1011 if (!md_ctrl->gpd_dmapool) {
1012 dev_err(md_ctrl->dev, "DMA pool alloc fail\n");
1016 for (i = 0; i < CLDMA_TXQ_NUM; i++) {
1017 ret = t7xx_cldma_tx_ring_init(md_ctrl, &md_ctrl->tx_ring[i]);
1019 dev_err(md_ctrl->dev, "control TX ring init fail\n");
1020 goto err_free_tx_ring;
1023 md_ctrl->tx_ring[i].pkt_size = CLDMA_MTU;
1026 for (j = 0; j < CLDMA_RXQ_NUM; j++) {
1027 md_ctrl->rx_ring[j].pkt_size = CLDMA_MTU;
1029 if (j == CLDMA_RXQ_NUM - 1)
1030 md_ctrl->rx_ring[j].pkt_size = CLDMA_JUMBO_BUFF_SZ;
1032 ret = t7xx_cldma_rx_ring_init(md_ctrl, &md_ctrl->rx_ring[j]);
1034 dev_err(md_ctrl->dev, "Control RX ring init fail\n");
1035 goto err_free_rx_ring;
1039 for (i = 0; i < CLDMA_TXQ_NUM; i++)
1040 t7xx_cldma_txq_init(&md_ctrl->txq[i]);
1042 for (j = 0; j < CLDMA_RXQ_NUM; j++)
1043 t7xx_cldma_rxq_init(&md_ctrl->rxq[j]);
1045 md_ctrl->is_late_init = true;
1050 t7xx_cldma_ring_free(md_ctrl, &md_ctrl->rx_ring[j], DMA_FROM_DEVICE);
1054 t7xx_cldma_ring_free(md_ctrl, &md_ctrl->tx_ring[i], DMA_TO_DEVICE);
1059 static void __iomem *t7xx_pcie_addr_transfer(void __iomem *addr, u32 addr_trs1, u32 phy_addr)
1061 return addr + phy_addr - addr_trs1;
1064 static void t7xx_hw_info_init(struct cldma_ctrl *md_ctrl)
1066 struct t7xx_addr_base *pbase = &md_ctrl->t7xx_dev->base_addr;
1067 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
1068 u32 phy_ao_base, phy_pd_base;
1070 hw_info->hw_mode = MODE_BIT_64;
1072 if (md_ctrl->hif_id == CLDMA_ID_MD) {
1073 phy_ao_base = CLDMA1_AO_BASE;
1074 phy_pd_base = CLDMA1_PD_BASE;
1075 hw_info->phy_interrupt_id = CLDMA1_INT;
1077 phy_ao_base = CLDMA0_AO_BASE;
1078 phy_pd_base = CLDMA0_PD_BASE;
1079 hw_info->phy_interrupt_id = CLDMA0_INT;
1082 hw_info->ap_ao_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
1083 pbase->pcie_dev_reg_trsl_addr, phy_ao_base);
1084 hw_info->ap_pdn_base = t7xx_pcie_addr_transfer(pbase->pcie_ext_reg_base,
1085 pbase->pcie_dev_reg_trsl_addr, phy_pd_base);
1088 static int t7xx_cldma_default_recv_skb(struct cldma_queue *queue, struct sk_buff *skb)
1090 dev_kfree_skb_any(skb);
1094 int t7xx_cldma_alloc(enum cldma_id hif_id, struct t7xx_pci_dev *t7xx_dev)
1096 struct device *dev = &t7xx_dev->pdev->dev;
1097 struct cldma_ctrl *md_ctrl;
1099 md_ctrl = devm_kzalloc(dev, sizeof(*md_ctrl), GFP_KERNEL);
1103 md_ctrl->t7xx_dev = t7xx_dev;
1105 md_ctrl->hif_id = hif_id;
1106 md_ctrl->recv_skb = t7xx_cldma_default_recv_skb;
1107 t7xx_hw_info_init(md_ctrl);
1108 t7xx_dev->md->md_ctrl[hif_id] = md_ctrl;
1112 static void t7xx_cldma_resume_early(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
1114 struct cldma_ctrl *md_ctrl = entity_param;
1115 struct t7xx_cldma_hw *hw_info;
1116 unsigned long flags;
1119 hw_info = &md_ctrl->hw_info;
1121 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
1122 t7xx_cldma_hw_restore(hw_info);
1123 for (qno_t = 0; qno_t < CLDMA_TXQ_NUM; qno_t++) {
1124 t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->txq[qno_t].tx_next->gpd_addr,
1126 t7xx_cldma_hw_set_start_addr(hw_info, qno_t, md_ctrl->rxq[qno_t].tr_done->gpd_addr,
1129 t7xx_cldma_enable_irq(md_ctrl);
1130 t7xx_cldma_hw_start_queue(hw_info, CLDMA_ALL_Q, MTK_RX);
1131 md_ctrl->rxq_active |= TXRX_STATUS_BITMASK;
1132 t7xx_cldma_hw_irq_en_eq(hw_info, CLDMA_ALL_Q, MTK_RX);
1133 t7xx_cldma_hw_irq_en_txrx(hw_info, CLDMA_ALL_Q, MTK_RX);
1134 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
1137 static int t7xx_cldma_resume(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
1139 struct cldma_ctrl *md_ctrl = entity_param;
1140 unsigned long flags;
1142 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
1143 md_ctrl->txq_active |= TXRX_STATUS_BITMASK;
1144 t7xx_cldma_hw_irq_en_txrx(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX);
1145 t7xx_cldma_hw_irq_en_eq(&md_ctrl->hw_info, CLDMA_ALL_Q, MTK_TX);
1146 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
1148 if (md_ctrl->hif_id == CLDMA_ID_MD)
1149 t7xx_mhccif_mask_clr(t7xx_dev, D2H_SW_INT_MASK);
1154 static void t7xx_cldma_suspend_late(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
1156 struct cldma_ctrl *md_ctrl = entity_param;
1157 struct t7xx_cldma_hw *hw_info;
1158 unsigned long flags;
1160 hw_info = &md_ctrl->hw_info;
1162 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
1163 t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_RX);
1164 t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_RX);
1165 md_ctrl->rxq_active &= ~TXRX_STATUS_BITMASK;
1166 t7xx_cldma_hw_stop_all_qs(hw_info, MTK_RX);
1167 t7xx_cldma_clear_ip_busy(hw_info);
1168 t7xx_cldma_disable_irq(md_ctrl);
1169 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
1172 static int t7xx_cldma_suspend(struct t7xx_pci_dev *t7xx_dev, void *entity_param)
1174 struct cldma_ctrl *md_ctrl = entity_param;
1175 struct t7xx_cldma_hw *hw_info;
1176 unsigned long flags;
1178 if (md_ctrl->hif_id == CLDMA_ID_MD)
1179 t7xx_mhccif_mask_set(t7xx_dev, D2H_SW_INT_MASK);
1181 hw_info = &md_ctrl->hw_info;
1183 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
1184 t7xx_cldma_hw_irq_dis_eq(hw_info, CLDMA_ALL_Q, MTK_TX);
1185 t7xx_cldma_hw_irq_dis_txrx(hw_info, CLDMA_ALL_Q, MTK_TX);
1186 md_ctrl->txq_active &= ~TXRX_STATUS_BITMASK;
1187 t7xx_cldma_hw_stop_all_qs(hw_info, MTK_TX);
1188 md_ctrl->txq_started = 0;
1189 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
1194 static int t7xx_cldma_pm_init(struct cldma_ctrl *md_ctrl)
1196 md_ctrl->pm_entity = kzalloc(sizeof(*md_ctrl->pm_entity), GFP_KERNEL);
1197 if (!md_ctrl->pm_entity)
1200 md_ctrl->pm_entity->entity_param = md_ctrl;
1202 if (md_ctrl->hif_id == CLDMA_ID_MD)
1203 md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL1;
1205 md_ctrl->pm_entity->id = PM_ENTITY_ID_CTRL2;
1207 md_ctrl->pm_entity->suspend = t7xx_cldma_suspend;
1208 md_ctrl->pm_entity->suspend_late = t7xx_cldma_suspend_late;
1209 md_ctrl->pm_entity->resume = t7xx_cldma_resume;
1210 md_ctrl->pm_entity->resume_early = t7xx_cldma_resume_early;
1212 return t7xx_pci_pm_entity_register(md_ctrl->t7xx_dev, md_ctrl->pm_entity);
1215 static int t7xx_cldma_pm_uninit(struct cldma_ctrl *md_ctrl)
1217 if (!md_ctrl->pm_entity)
1220 t7xx_pci_pm_entity_unregister(md_ctrl->t7xx_dev, md_ctrl->pm_entity);
1221 kfree(md_ctrl->pm_entity);
1222 md_ctrl->pm_entity = NULL;
1226 void t7xx_cldma_hif_hw_init(struct cldma_ctrl *md_ctrl)
1228 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
1229 unsigned long flags;
1231 spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
1232 t7xx_cldma_hw_stop(hw_info, MTK_TX);
1233 t7xx_cldma_hw_stop(hw_info, MTK_RX);
1234 t7xx_cldma_hw_rx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK);
1235 t7xx_cldma_hw_tx_done(hw_info, EMPTY_STATUS_BITMASK | TXRX_STATUS_BITMASK);
1236 t7xx_cldma_hw_init(hw_info);
1237 spin_unlock_irqrestore(&md_ctrl->cldma_lock, flags);
1240 static irqreturn_t t7xx_cldma_isr_handler(int irq, void *data)
1242 struct cldma_ctrl *md_ctrl = data;
1245 interrupt = md_ctrl->hw_info.phy_interrupt_id;
1246 t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, interrupt);
1247 t7xx_cldma_irq_work_cb(md_ctrl);
1248 t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, interrupt);
1249 t7xx_pcie_mac_set_int(md_ctrl->t7xx_dev, interrupt);
1253 static void t7xx_cldma_destroy_wqs(struct cldma_ctrl *md_ctrl)
1257 for (i = 0; i < CLDMA_TXQ_NUM; i++) {
1258 if (md_ctrl->txq[i].worker) {
1259 destroy_workqueue(md_ctrl->txq[i].worker);
1260 md_ctrl->txq[i].worker = NULL;
1264 for (i = 0; i < CLDMA_RXQ_NUM; i++) {
1265 if (md_ctrl->rxq[i].worker) {
1266 destroy_workqueue(md_ctrl->rxq[i].worker);
1267 md_ctrl->rxq[i].worker = NULL;
1273 * t7xx_cldma_init() - Initialize CLDMA.
1274 * @md_ctrl: CLDMA context structure.
1276 * Allocate and initialize device power management entity.
1277 * Initialize HIF TX/RX queue structure.
1278 * Register CLDMA callback ISR with PCIe driver.
1282 * * -ERROR - Error code from failure sub-initializations.
1284 int t7xx_cldma_init(struct cldma_ctrl *md_ctrl)
1286 struct t7xx_cldma_hw *hw_info = &md_ctrl->hw_info;
1289 md_ctrl->txq_active = 0;
1290 md_ctrl->rxq_active = 0;
1291 md_ctrl->is_late_init = false;
1293 ret = t7xx_cldma_pm_init(md_ctrl);
1297 spin_lock_init(&md_ctrl->cldma_lock);
1299 for (i = 0; i < CLDMA_TXQ_NUM; i++) {
1300 md_cd_queue_struct_init(&md_ctrl->txq[i], md_ctrl, MTK_TX, i);
1301 md_ctrl->txq[i].worker =
1302 alloc_ordered_workqueue("md_hif%d_tx%d_worker",
1303 WQ_MEM_RECLAIM | (i ? 0 : WQ_HIGHPRI),
1304 md_ctrl->hif_id, i);
1305 if (!md_ctrl->txq[i].worker)
1308 INIT_WORK(&md_ctrl->txq[i].cldma_work, t7xx_cldma_tx_done);
1311 for (i = 0; i < CLDMA_RXQ_NUM; i++) {
1312 md_cd_queue_struct_init(&md_ctrl->rxq[i], md_ctrl, MTK_RX, i);
1313 INIT_WORK(&md_ctrl->rxq[i].cldma_work, t7xx_cldma_rx_done);
1315 md_ctrl->rxq[i].worker =
1316 alloc_ordered_workqueue("md_hif%d_rx%d_worker",
1318 md_ctrl->hif_id, i);
1319 if (!md_ctrl->rxq[i].worker)
1323 t7xx_pcie_mac_clear_int(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id);
1324 md_ctrl->t7xx_dev->intr_handler[hw_info->phy_interrupt_id] = t7xx_cldma_isr_handler;
1325 md_ctrl->t7xx_dev->intr_thread[hw_info->phy_interrupt_id] = NULL;
1326 md_ctrl->t7xx_dev->callback_param[hw_info->phy_interrupt_id] = md_ctrl;
1327 t7xx_pcie_mac_clear_int_status(md_ctrl->t7xx_dev, hw_info->phy_interrupt_id);
1331 t7xx_cldma_destroy_wqs(md_ctrl);
1332 t7xx_cldma_pm_uninit(md_ctrl);
1336 void t7xx_cldma_switch_cfg(struct cldma_ctrl *md_ctrl)
1338 t7xx_cldma_late_release(md_ctrl);
1339 t7xx_cldma_late_init(md_ctrl);
1342 void t7xx_cldma_exit(struct cldma_ctrl *md_ctrl)
1344 t7xx_cldma_stop(md_ctrl);
1345 t7xx_cldma_late_release(md_ctrl);
1346 t7xx_cldma_destroy_wqs(md_ctrl);
1347 t7xx_cldma_pm_uninit(md_ctrl);