1 #include <linux/delay.h>
2 #include <linux/dmaengine.h>
3 #include <linux/dma-mapping.h>
4 #include <linux/platform_device.h>
5 #include <linux/module.h>
7 #include <linux/slab.h>
8 #include <linux/of_dma.h>
9 #include <linux/of_irq.h>
10 #include <linux/dmapool.h>
11 #include <linux/interrupt.h>
12 #include <linux/of_address.h>
13 #include <linux/pm_runtime.h>
14 #include "dmaengine.h"
17 #define DESC_TYPE_HOST 0x10
18 #define DESC_TYPE_TEARD 0x13
20 #define TD_DESC_IS_RX (1 << 16)
21 #define TD_DESC_DMA_NUM 10
23 #define DESC_LENGTH_BITS_NUM 21
25 #define DESC_TYPE_USB (5 << 26)
26 #define DESC_PD_COMPLETE (1 << 31)
30 #define DMA_TXGCR(x) (0x800 + (x) * 0x20)
31 #define DMA_RXGCR(x) (0x808 + (x) * 0x20)
34 #define GCR_CHAN_ENABLE (1 << 31)
35 #define GCR_TEARDOWN (1 << 30)
36 #define GCR_STARV_RETRY (1 << 24)
37 #define GCR_DESC_TYPE_HOST (1 << 14)
40 #define DMA_SCHED_CTRL 0
41 #define DMA_SCHED_CTRL_EN (1 << 31)
42 #define DMA_SCHED_WORD(x) ((x) * 4 + 0x800)
44 #define SCHED_ENTRY0_CHAN(x) ((x) << 0)
45 #define SCHED_ENTRY0_IS_RX (1 << 7)
47 #define SCHED_ENTRY1_CHAN(x) ((x) << 8)
48 #define SCHED_ENTRY1_IS_RX (1 << 15)
50 #define SCHED_ENTRY2_CHAN(x) ((x) << 16)
51 #define SCHED_ENTRY2_IS_RX (1 << 23)
53 #define SCHED_ENTRY3_CHAN(x) ((x) << 24)
54 #define SCHED_ENTRY3_IS_RX (1 << 31)
57 /* 4 KiB of memory for descriptors, 2 for each endpoint */
58 #define ALLOC_DECS_NUM 128
60 #define TOTAL_DESCS_NUM (ALLOC_DECS_NUM * DESCS_AREAS)
61 #define QMGR_SCRATCH_SIZE (TOTAL_DESCS_NUM * 4)
63 #define QMGR_LRAM0_BASE 0x80
64 #define QMGR_LRAM_SIZE 0x84
65 #define QMGR_LRAM1_BASE 0x88
66 #define QMGR_MEMBASE(x) (0x1000 + (x) * 0x10)
67 #define QMGR_MEMCTRL(x) (0x1004 + (x) * 0x10)
68 #define QMGR_MEMCTRL_IDX_SH 16
69 #define QMGR_MEMCTRL_DESC_SH 8
71 #define QMGR_PEND(x) (0x90 + (x) * 4)
73 #define QMGR_PENDING_SLOT_Q(x) (x / 32)
74 #define QMGR_PENDING_BIT_Q(x) (x % 32)
76 #define QMGR_QUEUE_A(n) (0x2000 + (n) * 0x10)
77 #define QMGR_QUEUE_B(n) (0x2004 + (n) * 0x10)
78 #define QMGR_QUEUE_C(n) (0x2008 + (n) * 0x10)
79 #define QMGR_QUEUE_D(n) (0x200c + (n) * 0x10)
81 /* Packet Descriptor */
82 #define PD2_ZERO_LENGTH (1 << 19)
84 struct cppi41_channel {
86 struct dma_async_tx_descriptor txd;
87 struct cppi41_dd *cdd;
88 struct cppi41_desc *desc;
90 void __iomem *gcr_reg;
95 unsigned int q_comp_num;
96 unsigned int port_num;
101 unsigned td_desc_seen:1;
103 struct list_head node; /* Node for pending list */
123 struct dma_device ddev;
126 dma_addr_t scratch_phys;
128 struct cppi41_desc *cd;
129 dma_addr_t descs_phys;
131 struct cppi41_channel *chan_busy[ALLOC_DECS_NUM];
133 void __iomem *ctrl_mem;
134 void __iomem *sched_mem;
135 void __iomem *qmgr_mem;
137 const struct chan_queues *queues_rx;
138 const struct chan_queues *queues_tx;
139 struct chan_queues td_queue;
140 u16 first_completion_queue;
145 struct list_head pending; /* Pending queued transfers */
146 spinlock_t lock; /* Lock for pending list */
148 /* context for suspend/resume */
149 unsigned int dma_tdfdq;
154 static struct chan_queues am335x_usb_queues_tx[] = {
156 [ 0] = { .submit = 32, .complete = 93},
157 [ 1] = { .submit = 34, .complete = 94},
158 [ 2] = { .submit = 36, .complete = 95},
159 [ 3] = { .submit = 38, .complete = 96},
160 [ 4] = { .submit = 40, .complete = 97},
161 [ 5] = { .submit = 42, .complete = 98},
162 [ 6] = { .submit = 44, .complete = 99},
163 [ 7] = { .submit = 46, .complete = 100},
164 [ 8] = { .submit = 48, .complete = 101},
165 [ 9] = { .submit = 50, .complete = 102},
166 [10] = { .submit = 52, .complete = 103},
167 [11] = { .submit = 54, .complete = 104},
168 [12] = { .submit = 56, .complete = 105},
169 [13] = { .submit = 58, .complete = 106},
170 [14] = { .submit = 60, .complete = 107},
173 [15] = { .submit = 62, .complete = 125},
174 [16] = { .submit = 64, .complete = 126},
175 [17] = { .submit = 66, .complete = 127},
176 [18] = { .submit = 68, .complete = 128},
177 [19] = { .submit = 70, .complete = 129},
178 [20] = { .submit = 72, .complete = 130},
179 [21] = { .submit = 74, .complete = 131},
180 [22] = { .submit = 76, .complete = 132},
181 [23] = { .submit = 78, .complete = 133},
182 [24] = { .submit = 80, .complete = 134},
183 [25] = { .submit = 82, .complete = 135},
184 [26] = { .submit = 84, .complete = 136},
185 [27] = { .submit = 86, .complete = 137},
186 [28] = { .submit = 88, .complete = 138},
187 [29] = { .submit = 90, .complete = 139},
190 static const struct chan_queues am335x_usb_queues_rx[] = {
192 [ 0] = { .submit = 1, .complete = 109},
193 [ 1] = { .submit = 2, .complete = 110},
194 [ 2] = { .submit = 3, .complete = 111},
195 [ 3] = { .submit = 4, .complete = 112},
196 [ 4] = { .submit = 5, .complete = 113},
197 [ 5] = { .submit = 6, .complete = 114},
198 [ 6] = { .submit = 7, .complete = 115},
199 [ 7] = { .submit = 8, .complete = 116},
200 [ 8] = { .submit = 9, .complete = 117},
201 [ 9] = { .submit = 10, .complete = 118},
202 [10] = { .submit = 11, .complete = 119},
203 [11] = { .submit = 12, .complete = 120},
204 [12] = { .submit = 13, .complete = 121},
205 [13] = { .submit = 14, .complete = 122},
206 [14] = { .submit = 15, .complete = 123},
209 [15] = { .submit = 16, .complete = 141},
210 [16] = { .submit = 17, .complete = 142},
211 [17] = { .submit = 18, .complete = 143},
212 [18] = { .submit = 19, .complete = 144},
213 [19] = { .submit = 20, .complete = 145},
214 [20] = { .submit = 21, .complete = 146},
215 [21] = { .submit = 22, .complete = 147},
216 [22] = { .submit = 23, .complete = 148},
217 [23] = { .submit = 24, .complete = 149},
218 [24] = { .submit = 25, .complete = 150},
219 [25] = { .submit = 26, .complete = 151},
220 [26] = { .submit = 27, .complete = 152},
221 [27] = { .submit = 28, .complete = 153},
222 [28] = { .submit = 29, .complete = 154},
223 [29] = { .submit = 30, .complete = 155},
226 static const struct chan_queues da8xx_usb_queues_tx[] = {
227 [0] = { .submit = 16, .complete = 24},
228 [1] = { .submit = 18, .complete = 24},
229 [2] = { .submit = 20, .complete = 24},
230 [3] = { .submit = 22, .complete = 24},
233 static const struct chan_queues da8xx_usb_queues_rx[] = {
234 [0] = { .submit = 1, .complete = 26},
235 [1] = { .submit = 3, .complete = 26},
236 [2] = { .submit = 5, .complete = 26},
237 [3] = { .submit = 7, .complete = 26},
240 struct cppi_glue_infos {
241 const struct chan_queues *queues_rx;
242 const struct chan_queues *queues_tx;
243 struct chan_queues td_queue;
244 u16 first_completion_queue;
248 static struct cppi41_channel *to_cpp41_chan(struct dma_chan *c)
250 return container_of(c, struct cppi41_channel, chan);
253 static struct cppi41_channel *desc_to_chan(struct cppi41_dd *cdd, u32 desc)
255 struct cppi41_channel *c;
259 descs_size = sizeof(struct cppi41_desc) * ALLOC_DECS_NUM;
261 if (!((desc >= cdd->descs_phys) &&
262 (desc < (cdd->descs_phys + descs_size)))) {
266 desc_num = (desc - cdd->descs_phys) / sizeof(struct cppi41_desc);
267 BUG_ON(desc_num >= ALLOC_DECS_NUM);
268 c = cdd->chan_busy[desc_num];
269 cdd->chan_busy[desc_num] = NULL;
271 /* Usecount for chan_busy[], paired with push_desc_queue() */
272 pm_runtime_put(cdd->ddev.dev);
277 static void cppi_writel(u32 val, void *__iomem *mem)
279 __raw_writel(val, mem);
282 static u32 cppi_readl(void *__iomem *mem)
284 return __raw_readl(mem);
287 static u32 pd_trans_len(u32 val)
289 return val & ((1 << (DESC_LENGTH_BITS_NUM + 1)) - 1);
292 static u32 cppi41_pop_desc(struct cppi41_dd *cdd, unsigned queue_num)
296 desc = cppi_readl(cdd->qmgr_mem + QMGR_QUEUE_D(queue_num));
301 static irqreturn_t cppi41_irq(int irq, void *data)
303 struct cppi41_dd *cdd = data;
304 u16 first_completion_queue = cdd->first_completion_queue;
305 u16 qmgr_num_pend = cdd->qmgr_num_pend;
306 struct cppi41_channel *c;
309 for (i = QMGR_PENDING_SLOT_Q(first_completion_queue); i < qmgr_num_pend;
314 val = cppi_readl(cdd->qmgr_mem + QMGR_PEND(i));
315 if (i == QMGR_PENDING_SLOT_Q(first_completion_queue) && val) {
317 /* set corresponding bit for completetion Q 93 */
318 mask = 1 << QMGR_PENDING_BIT_Q(first_completion_queue);
319 /* not set all bits for queues less than Q 93 */
321 /* now invert and keep only Q 93+ set */
332 * This should never trigger, see the comments in
335 WARN_ON(cdd->is_suspended);
338 val &= ~(1 << q_num);
340 desc = cppi41_pop_desc(cdd, q_num);
341 c = desc_to_chan(cdd, desc);
343 pr_err("%s() q %d desc %08x\n", __func__,
348 if (c->desc->pd2 & PD2_ZERO_LENGTH)
351 len = pd_trans_len(c->desc->pd0);
353 c->residue = pd_trans_len(c->desc->pd6) - len;
354 dma_cookie_complete(&c->txd);
355 dmaengine_desc_get_callback_invoke(&c->txd, NULL);
361 static dma_cookie_t cppi41_tx_submit(struct dma_async_tx_descriptor *tx)
365 cookie = dma_cookie_assign(tx);
370 static int cppi41_dma_alloc_chan_resources(struct dma_chan *chan)
372 struct cppi41_channel *c = to_cpp41_chan(chan);
373 struct cppi41_dd *cdd = c->cdd;
376 error = pm_runtime_get_sync(cdd->ddev.dev);
378 dev_err(cdd->ddev.dev, "%s pm runtime get: %i\n",
380 pm_runtime_put_noidle(cdd->ddev.dev);
385 dma_cookie_init(chan);
386 dma_async_tx_descriptor_init(&c->txd, chan);
387 c->txd.tx_submit = cppi41_tx_submit;
390 cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
392 pm_runtime_mark_last_busy(cdd->ddev.dev);
393 pm_runtime_put_autosuspend(cdd->ddev.dev);
398 static void cppi41_dma_free_chan_resources(struct dma_chan *chan)
400 struct cppi41_channel *c = to_cpp41_chan(chan);
401 struct cppi41_dd *cdd = c->cdd;
404 error = pm_runtime_get_sync(cdd->ddev.dev);
406 pm_runtime_put_noidle(cdd->ddev.dev);
411 WARN_ON(!list_empty(&cdd->pending));
413 pm_runtime_mark_last_busy(cdd->ddev.dev);
414 pm_runtime_put_autosuspend(cdd->ddev.dev);
417 static enum dma_status cppi41_dma_tx_status(struct dma_chan *chan,
418 dma_cookie_t cookie, struct dma_tx_state *txstate)
420 struct cppi41_channel *c = to_cpp41_chan(chan);
423 ret = dma_cookie_status(chan, cookie, txstate);
425 dma_set_residue(txstate, c->residue);
430 static void push_desc_queue(struct cppi41_channel *c)
432 struct cppi41_dd *cdd = c->cdd;
439 reg = GCR_CHAN_ENABLE;
441 reg |= GCR_STARV_RETRY;
442 reg |= GCR_DESC_TYPE_HOST;
443 reg |= c->q_comp_num;
446 cppi_writel(reg, c->gcr_reg);
449 * We don't use writel() but __raw_writel() so we have to make sure
450 * that the DMA descriptor in coherent memory made to the main memory
451 * before starting the dma engine.
456 * DMA transfers can take at least 200ms to complete with USB mass
457 * storage connected. To prevent autosuspend timeouts, we must use
458 * pm_runtime_get/put() when chan_busy[] is modified. This will get
459 * cleared in desc_to_chan() or cppi41_stop_chan() depending on the
460 * outcome of the transfer.
462 pm_runtime_get(cdd->ddev.dev);
464 desc_phys = lower_32_bits(c->desc_phys);
465 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
466 WARN_ON(cdd->chan_busy[desc_num]);
467 cdd->chan_busy[desc_num] = c;
469 reg = (sizeof(struct cppi41_desc) - 24) / 4;
471 cppi_writel(reg, cdd->qmgr_mem + QMGR_QUEUE_D(c->q_num));
475 * Caller must hold cdd->lock to prevent push_desc_queue()
476 * getting called out of order. We have both cppi41_dma_issue_pending()
477 * and cppi41_runtime_resume() call this function.
479 static void cppi41_run_queue(struct cppi41_dd *cdd)
481 struct cppi41_channel *c, *_c;
483 list_for_each_entry_safe(c, _c, &cdd->pending, node) {
489 static void cppi41_dma_issue_pending(struct dma_chan *chan)
491 struct cppi41_channel *c = to_cpp41_chan(chan);
492 struct cppi41_dd *cdd = c->cdd;
496 error = pm_runtime_get(cdd->ddev.dev);
497 if ((error != -EINPROGRESS) && error < 0) {
498 pm_runtime_put_noidle(cdd->ddev.dev);
499 dev_err(cdd->ddev.dev, "Failed to pm_runtime_get: %i\n",
505 spin_lock_irqsave(&cdd->lock, flags);
506 list_add_tail(&c->node, &cdd->pending);
507 if (!cdd->is_suspended)
508 cppi41_run_queue(cdd);
509 spin_unlock_irqrestore(&cdd->lock, flags);
511 pm_runtime_mark_last_busy(cdd->ddev.dev);
512 pm_runtime_put_autosuspend(cdd->ddev.dev);
515 static u32 get_host_pd0(u32 length)
519 reg = DESC_TYPE_HOST << DESC_TYPE;
525 static u32 get_host_pd1(struct cppi41_channel *c)
534 static u32 get_host_pd2(struct cppi41_channel *c)
539 reg |= c->q_comp_num;
544 static u32 get_host_pd3(u32 length)
548 /* PD3 = packet size */
554 static u32 get_host_pd6(u32 length)
558 /* PD6 buffer size */
559 reg = DESC_PD_COMPLETE;
565 static u32 get_host_pd4_or_7(u32 addr)
574 static u32 get_host_pd5(void)
583 static struct dma_async_tx_descriptor *cppi41_dma_prep_slave_sg(
584 struct dma_chan *chan, struct scatterlist *sgl, unsigned sg_len,
585 enum dma_transfer_direction dir, unsigned long tx_flags, void *context)
587 struct cppi41_channel *c = to_cpp41_chan(chan);
588 struct dma_async_tx_descriptor *txd = NULL;
589 struct cppi41_dd *cdd = c->cdd;
590 struct cppi41_desc *d;
591 struct scatterlist *sg;
595 error = pm_runtime_get(cdd->ddev.dev);
597 pm_runtime_put_noidle(cdd->ddev.dev);
602 if (cdd->is_suspended)
603 goto err_out_not_ready;
606 for_each_sg(sgl, sg, sg_len, i) {
610 /* We need to use more than one desc once musb supports sg */
611 addr = lower_32_bits(sg_dma_address(sg));
612 len = sg_dma_len(sg);
614 d->pd0 = get_host_pd0(len);
615 d->pd1 = get_host_pd1(c);
616 d->pd2 = get_host_pd2(c);
617 d->pd3 = get_host_pd3(len);
618 d->pd4 = get_host_pd4_or_7(addr);
619 d->pd5 = get_host_pd5();
620 d->pd6 = get_host_pd6(len);
621 d->pd7 = get_host_pd4_or_7(addr);
629 pm_runtime_mark_last_busy(cdd->ddev.dev);
630 pm_runtime_put_autosuspend(cdd->ddev.dev);
635 static void cppi41_compute_td_desc(struct cppi41_desc *d)
637 d->pd0 = DESC_TYPE_TEARD << DESC_TYPE;
640 static int cppi41_tear_down_chan(struct cppi41_channel *c)
642 struct dmaengine_result abort_result;
643 struct cppi41_dd *cdd = c->cdd;
644 struct cppi41_desc *td;
650 td += cdd->first_td_desc;
652 td_desc_phys = cdd->descs_phys;
653 td_desc_phys += cdd->first_td_desc * sizeof(struct cppi41_desc);
656 cppi41_compute_td_desc(td);
659 reg = (sizeof(struct cppi41_desc) - 24) / 4;
661 cppi_writel(reg, cdd->qmgr_mem +
662 QMGR_QUEUE_D(cdd->td_queue.submit));
664 reg = GCR_CHAN_ENABLE;
666 reg |= GCR_STARV_RETRY;
667 reg |= GCR_DESC_TYPE_HOST;
668 reg |= cdd->td_queue.complete;
671 cppi_writel(reg, c->gcr_reg);
676 if (!c->td_seen || !c->td_desc_seen) {
678 desc_phys = cppi41_pop_desc(cdd, cdd->td_queue.complete);
679 if (!desc_phys && c->is_tx)
680 desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
682 if (desc_phys == c->desc_phys) {
685 } else if (desc_phys == td_desc_phys) {
690 WARN_ON((pd0 >> DESC_TYPE) != DESC_TYPE_TEARD);
691 WARN_ON(!c->is_tx && !(pd0 & TD_DESC_IS_RX));
692 WARN_ON((pd0 & 0x1f) != c->port_num);
694 } else if (desc_phys) {
700 * If the TX descriptor / channel is in use, the caller needs to poke
701 * his TD bit multiple times. After that he hardware releases the
702 * transfer descriptor followed by TD descriptor. Waiting seems not to
703 * cause any difference.
704 * RX seems to be thrown out right away. However once the TearDown
705 * descriptor gets through we are done. If we have seens the transfer
706 * descriptor before the TD we fetch it from enqueue, it has to be
707 * there waiting for us.
709 if (!c->td_seen && c->td_retry) {
713 WARN_ON(!c->td_retry);
715 if (!c->td_desc_seen) {
716 desc_phys = cppi41_pop_desc(cdd, c->q_num);
718 desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
725 cppi_writel(0, c->gcr_reg);
727 /* Invoke the callback to do the necessary clean-up */
728 abort_result.result = DMA_TRANS_ABORTED;
729 dma_cookie_complete(&c->txd);
730 dmaengine_desc_get_callback_invoke(&c->txd, &abort_result);
735 static int cppi41_stop_chan(struct dma_chan *chan)
737 struct cppi41_channel *c = to_cpp41_chan(chan);
738 struct cppi41_dd *cdd = c->cdd;
743 desc_phys = lower_32_bits(c->desc_phys);
744 desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
745 if (!cdd->chan_busy[desc_num]) {
746 struct cppi41_channel *cc, *_ct;
749 * channels might still be in the pendling list if
750 * cppi41_dma_issue_pending() is called after
751 * cppi41_runtime_suspend() is called
753 list_for_each_entry_safe(cc, _ct, &cdd->pending, node) {
762 ret = cppi41_tear_down_chan(c);
766 WARN_ON(!cdd->chan_busy[desc_num]);
767 cdd->chan_busy[desc_num] = NULL;
769 /* Usecount for chan_busy[], paired with push_desc_queue() */
770 pm_runtime_put(cdd->ddev.dev);
775 static int cppi41_add_chans(struct device *dev, struct cppi41_dd *cdd)
777 struct cppi41_channel *cchan, *chans;
779 u32 n_chans = cdd->n_chans;
782 * The channels can only be used as TX or as RX. So we add twice
783 * that much dma channels because USB can only do RX or TX.
787 chans = devm_kcalloc(dev, n_chans, sizeof(*chans), GFP_KERNEL);
791 for (i = 0; i < n_chans; i++) {
796 cchan->gcr_reg = cdd->ctrl_mem + DMA_TXGCR(i >> 1);
799 cchan->gcr_reg = cdd->ctrl_mem + DMA_RXGCR(i >> 1);
802 cchan->port_num = i >> 1;
803 cchan->desc = &cdd->cd[i];
804 cchan->desc_phys = cdd->descs_phys;
805 cchan->desc_phys += i * sizeof(struct cppi41_desc);
806 cchan->chan.device = &cdd->ddev;
807 list_add_tail(&cchan->chan.device_node, &cdd->ddev.channels);
809 cdd->first_td_desc = n_chans;
814 static void purge_descs(struct device *dev, struct cppi41_dd *cdd)
816 unsigned int mem_decs;
819 mem_decs = ALLOC_DECS_NUM * sizeof(struct cppi41_desc);
821 for (i = 0; i < DESCS_AREAS; i++) {
823 cppi_writel(0, cdd->qmgr_mem + QMGR_MEMBASE(i));
824 cppi_writel(0, cdd->qmgr_mem + QMGR_MEMCTRL(i));
826 dma_free_coherent(dev, mem_decs, cdd->cd,
831 static void disable_sched(struct cppi41_dd *cdd)
833 cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
836 static void deinit_cppi41(struct device *dev, struct cppi41_dd *cdd)
840 purge_descs(dev, cdd);
842 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
843 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM0_BASE);
844 dma_free_coherent(dev, QMGR_SCRATCH_SIZE, cdd->qmgr_scratch,
848 static int init_descs(struct device *dev, struct cppi41_dd *cdd)
850 unsigned int desc_size;
851 unsigned int mem_decs;
856 BUILD_BUG_ON(sizeof(struct cppi41_desc) &
857 (sizeof(struct cppi41_desc) - 1));
858 BUILD_BUG_ON(sizeof(struct cppi41_desc) < 32);
859 BUILD_BUG_ON(ALLOC_DECS_NUM < 32);
861 desc_size = sizeof(struct cppi41_desc);
862 mem_decs = ALLOC_DECS_NUM * desc_size;
865 for (i = 0; i < DESCS_AREAS; i++) {
867 reg = idx << QMGR_MEMCTRL_IDX_SH;
868 reg |= (ilog2(desc_size) - 5) << QMGR_MEMCTRL_DESC_SH;
869 reg |= ilog2(ALLOC_DECS_NUM) - 5;
871 BUILD_BUG_ON(DESCS_AREAS != 1);
872 cdd->cd = dma_alloc_coherent(dev, mem_decs,
873 &cdd->descs_phys, GFP_KERNEL);
877 cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
878 cppi_writel(reg, cdd->qmgr_mem + QMGR_MEMCTRL(i));
880 idx += ALLOC_DECS_NUM;
885 static void init_sched(struct cppi41_dd *cdd)
892 cppi_writel(0, cdd->sched_mem + DMA_SCHED_CTRL);
893 for (ch = 0; ch < cdd->n_chans; ch += 2) {
895 reg = SCHED_ENTRY0_CHAN(ch);
896 reg |= SCHED_ENTRY1_CHAN(ch) | SCHED_ENTRY1_IS_RX;
898 reg |= SCHED_ENTRY2_CHAN(ch + 1);
899 reg |= SCHED_ENTRY3_CHAN(ch + 1) | SCHED_ENTRY3_IS_RX;
900 cppi_writel(reg, cdd->sched_mem + DMA_SCHED_WORD(word));
903 reg = cdd->n_chans * 2 - 1;
904 reg |= DMA_SCHED_CTRL_EN;
905 cppi_writel(reg, cdd->sched_mem + DMA_SCHED_CTRL);
908 static int init_cppi41(struct device *dev, struct cppi41_dd *cdd)
912 BUILD_BUG_ON(QMGR_SCRATCH_SIZE > ((1 << 14) - 1));
913 cdd->qmgr_scratch = dma_alloc_coherent(dev, QMGR_SCRATCH_SIZE,
914 &cdd->scratch_phys, GFP_KERNEL);
915 if (!cdd->qmgr_scratch)
918 cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
919 cppi_writel(TOTAL_DESCS_NUM, cdd->qmgr_mem + QMGR_LRAM_SIZE);
920 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
922 ret = init_descs(dev, cdd);
926 cppi_writel(cdd->td_queue.submit, cdd->ctrl_mem + DMA_TDFDQ);
931 deinit_cppi41(dev, cdd);
935 static struct platform_driver cpp41_dma_driver;
937 * The param format is:
945 static bool cpp41_dma_filter_fn(struct dma_chan *chan, void *param)
947 struct cppi41_channel *cchan;
948 struct cppi41_dd *cdd;
949 const struct chan_queues *queues;
952 if (chan->device->dev->driver != &cpp41_dma_driver.driver)
955 cchan = to_cpp41_chan(chan);
957 if (cchan->port_num != num[INFO_PORT])
960 if (cchan->is_tx && !num[INFO_IS_TX])
964 queues = cdd->queues_tx;
966 queues = cdd->queues_rx;
968 BUILD_BUG_ON(ARRAY_SIZE(am335x_usb_queues_rx) !=
969 ARRAY_SIZE(am335x_usb_queues_tx));
970 if (WARN_ON(cchan->port_num > ARRAY_SIZE(am335x_usb_queues_rx)))
973 cchan->q_num = queues[cchan->port_num].submit;
974 cchan->q_comp_num = queues[cchan->port_num].complete;
978 static struct of_dma_filter_info cpp41_dma_info = {
979 .filter_fn = cpp41_dma_filter_fn,
982 static struct dma_chan *cppi41_dma_xlate(struct of_phandle_args *dma_spec,
983 struct of_dma *ofdma)
985 int count = dma_spec->args_count;
986 struct of_dma_filter_info *info = ofdma->of_dma_data;
988 if (!info || !info->filter_fn)
994 return dma_request_channel(info->dma_cap, info->filter_fn,
998 static const struct cppi_glue_infos am335x_usb_infos = {
999 .queues_rx = am335x_usb_queues_rx,
1000 .queues_tx = am335x_usb_queues_tx,
1001 .td_queue = { .submit = 31, .complete = 0 },
1002 .first_completion_queue = 93,
1006 static const struct cppi_glue_infos da8xx_usb_infos = {
1007 .queues_rx = da8xx_usb_queues_rx,
1008 .queues_tx = da8xx_usb_queues_tx,
1009 .td_queue = { .submit = 31, .complete = 0 },
1010 .first_completion_queue = 24,
1014 static const struct of_device_id cppi41_dma_ids[] = {
1015 { .compatible = "ti,am3359-cppi41", .data = &am335x_usb_infos},
1016 { .compatible = "ti,da830-cppi41", .data = &da8xx_usb_infos},
1019 MODULE_DEVICE_TABLE(of, cppi41_dma_ids);
1021 static const struct cppi_glue_infos *get_glue_info(struct device *dev)
1023 const struct of_device_id *of_id;
1025 of_id = of_match_node(cppi41_dma_ids, dev->of_node);
1031 #define CPPI41_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1032 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1033 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
1034 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1036 static int cppi41_dma_probe(struct platform_device *pdev)
1038 struct cppi41_dd *cdd;
1039 struct device *dev = &pdev->dev;
1040 const struct cppi_glue_infos *glue_info;
1041 struct resource *mem;
1046 glue_info = get_glue_info(dev);
1050 cdd = devm_kzalloc(&pdev->dev, sizeof(*cdd), GFP_KERNEL);
1054 dma_cap_set(DMA_SLAVE, cdd->ddev.cap_mask);
1055 cdd->ddev.device_alloc_chan_resources = cppi41_dma_alloc_chan_resources;
1056 cdd->ddev.device_free_chan_resources = cppi41_dma_free_chan_resources;
1057 cdd->ddev.device_tx_status = cppi41_dma_tx_status;
1058 cdd->ddev.device_issue_pending = cppi41_dma_issue_pending;
1059 cdd->ddev.device_prep_slave_sg = cppi41_dma_prep_slave_sg;
1060 cdd->ddev.device_terminate_all = cppi41_stop_chan;
1061 cdd->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1062 cdd->ddev.src_addr_widths = CPPI41_DMA_BUSWIDTHS;
1063 cdd->ddev.dst_addr_widths = CPPI41_DMA_BUSWIDTHS;
1064 cdd->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1065 cdd->ddev.dev = dev;
1066 INIT_LIST_HEAD(&cdd->ddev.channels);
1067 cpp41_dma_info.dma_cap = cdd->ddev.cap_mask;
1069 index = of_property_match_string(dev->of_node,
1070 "reg-names", "controller");
1074 mem = platform_get_resource(pdev, IORESOURCE_MEM, index);
1075 cdd->ctrl_mem = devm_ioremap_resource(dev, mem);
1076 if (IS_ERR(cdd->ctrl_mem))
1077 return PTR_ERR(cdd->ctrl_mem);
1079 mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 1);
1080 cdd->sched_mem = devm_ioremap_resource(dev, mem);
1081 if (IS_ERR(cdd->sched_mem))
1082 return PTR_ERR(cdd->sched_mem);
1084 mem = platform_get_resource(pdev, IORESOURCE_MEM, index + 2);
1085 cdd->qmgr_mem = devm_ioremap_resource(dev, mem);
1086 if (IS_ERR(cdd->qmgr_mem))
1087 return PTR_ERR(cdd->qmgr_mem);
1089 spin_lock_init(&cdd->lock);
1090 INIT_LIST_HEAD(&cdd->pending);
1092 platform_set_drvdata(pdev, cdd);
1094 pm_runtime_enable(dev);
1095 pm_runtime_set_autosuspend_delay(dev, 100);
1096 pm_runtime_use_autosuspend(dev);
1097 ret = pm_runtime_get_sync(dev);
1101 cdd->queues_rx = glue_info->queues_rx;
1102 cdd->queues_tx = glue_info->queues_tx;
1103 cdd->td_queue = glue_info->td_queue;
1104 cdd->qmgr_num_pend = glue_info->qmgr_num_pend;
1105 cdd->first_completion_queue = glue_info->first_completion_queue;
1107 ret = of_property_read_u32(dev->of_node,
1108 "#dma-channels", &cdd->n_chans);
1110 goto err_get_n_chans;
1112 ret = init_cppi41(dev, cdd);
1116 ret = cppi41_add_chans(dev, cdd);
1120 irq = irq_of_parse_and_map(dev->of_node, 0);
1126 ret = devm_request_irq(&pdev->dev, irq, cppi41_irq, IRQF_SHARED,
1127 dev_name(dev), cdd);
1132 ret = dma_async_device_register(&cdd->ddev);
1136 ret = of_dma_controller_register(dev->of_node,
1137 cppi41_dma_xlate, &cpp41_dma_info);
1141 pm_runtime_mark_last_busy(dev);
1142 pm_runtime_put_autosuspend(dev);
1146 dma_async_device_unregister(&cdd->ddev);
1148 deinit_cppi41(dev, cdd);
1150 pm_runtime_dont_use_autosuspend(dev);
1153 pm_runtime_put_sync(dev);
1154 pm_runtime_disable(dev);
1158 static int cppi41_dma_remove(struct platform_device *pdev)
1160 struct cppi41_dd *cdd = platform_get_drvdata(pdev);
1163 error = pm_runtime_get_sync(&pdev->dev);
1165 dev_err(&pdev->dev, "%s could not pm_runtime_get: %i\n",
1167 of_dma_controller_free(pdev->dev.of_node);
1168 dma_async_device_unregister(&cdd->ddev);
1170 devm_free_irq(&pdev->dev, cdd->irq, cdd);
1171 deinit_cppi41(&pdev->dev, cdd);
1172 pm_runtime_dont_use_autosuspend(&pdev->dev);
1173 pm_runtime_put_sync(&pdev->dev);
1174 pm_runtime_disable(&pdev->dev);
1178 static int __maybe_unused cppi41_suspend(struct device *dev)
1180 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1182 cdd->dma_tdfdq = cppi_readl(cdd->ctrl_mem + DMA_TDFDQ);
1188 static int __maybe_unused cppi41_resume(struct device *dev)
1190 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1191 struct cppi41_channel *c;
1194 for (i = 0; i < DESCS_AREAS; i++)
1195 cppi_writel(cdd->descs_phys, cdd->qmgr_mem + QMGR_MEMBASE(i));
1197 list_for_each_entry(c, &cdd->ddev.channels, chan.device_node)
1199 cppi_writel(c->q_num, c->gcr_reg + RXHPCRA0);
1203 cppi_writel(cdd->dma_tdfdq, cdd->ctrl_mem + DMA_TDFDQ);
1204 cppi_writel(cdd->scratch_phys, cdd->qmgr_mem + QMGR_LRAM0_BASE);
1205 cppi_writel(QMGR_SCRATCH_SIZE, cdd->qmgr_mem + QMGR_LRAM_SIZE);
1206 cppi_writel(0, cdd->qmgr_mem + QMGR_LRAM1_BASE);
1211 static int __maybe_unused cppi41_runtime_suspend(struct device *dev)
1213 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1214 unsigned long flags;
1216 spin_lock_irqsave(&cdd->lock, flags);
1217 cdd->is_suspended = true;
1218 WARN_ON(!list_empty(&cdd->pending));
1219 spin_unlock_irqrestore(&cdd->lock, flags);
1224 static int __maybe_unused cppi41_runtime_resume(struct device *dev)
1226 struct cppi41_dd *cdd = dev_get_drvdata(dev);
1227 unsigned long flags;
1229 spin_lock_irqsave(&cdd->lock, flags);
1230 cdd->is_suspended = false;
1231 cppi41_run_queue(cdd);
1232 spin_unlock_irqrestore(&cdd->lock, flags);
1237 static const struct dev_pm_ops cppi41_pm_ops = {
1238 SET_LATE_SYSTEM_SLEEP_PM_OPS(cppi41_suspend, cppi41_resume)
1239 SET_RUNTIME_PM_OPS(cppi41_runtime_suspend,
1240 cppi41_runtime_resume,
1244 static struct platform_driver cpp41_dma_driver = {
1245 .probe = cppi41_dma_probe,
1246 .remove = cppi41_dma_remove,
1248 .name = "cppi41-dma-engine",
1249 .pm = &cppi41_pm_ops,
1250 .of_match_table = of_match_ptr(cppi41_dma_ids),
1254 module_platform_driver(cpp41_dma_driver);
1255 MODULE_LICENSE("GPL");
1256 MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");