1 // SPDX-License-Identifier: GPL-2.0
3 * Core driver for the Synopsys DesignWare DMA Controller
5 * Copyright (C) 2007-2008 Atmel Corporation
6 * Copyright (C) 2010-2011 ST Microelectronics
7 * Copyright (C) 2013 Intel Corporation
10 #include <linux/bitops.h>
11 #include <linux/delay.h>
12 #include <linux/dmaengine.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/dmapool.h>
15 #include <linux/err.h>
16 #include <linux/init.h>
17 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/slab.h>
22 #include <linux/pm_runtime.h>
24 #include "../dmaengine.h"
28 * This supports the Synopsys "DesignWare AHB Central DMA Controller",
29 * (DW_ahb_dmac) which is used with various AMBA 2.0 systems (not all
30 * of which use ARM any more). See the "Databook" from Synopsys for
31 * information beyond what licensees probably provide.
33 * The driver has been tested with the Atmel AT32AP7000, which does not
34 * support descriptor writeback.
37 /* The set of bus widths supported by the DMA controller */
38 #define DW_DMA_BUSWIDTHS \
39 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
40 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
41 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
42 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)
44 /*----------------------------------------------------------------------*/
46 static struct device *chan2dev(struct dma_chan *chan)
48 return &chan->dev->device;
51 static struct dw_desc *dwc_first_active(struct dw_dma_chan *dwc)
53 return to_dw_desc(dwc->active_list.next);
56 static dma_cookie_t dwc_tx_submit(struct dma_async_tx_descriptor *tx)
58 struct dw_desc *desc = txd_to_dw_desc(tx);
59 struct dw_dma_chan *dwc = to_dw_dma_chan(tx->chan);
63 spin_lock_irqsave(&dwc->lock, flags);
64 cookie = dma_cookie_assign(tx);
67 * REVISIT: We should attempt to chain as many descriptors as
68 * possible, perhaps even appending to those already submitted
69 * for DMA. But this is hard to do in a race-free manner.
72 list_add_tail(&desc->desc_node, &dwc->queue);
73 spin_unlock_irqrestore(&dwc->lock, flags);
74 dev_vdbg(chan2dev(tx->chan), "%s: queued %u\n",
75 __func__, desc->txd.cookie);
80 static struct dw_desc *dwc_desc_get(struct dw_dma_chan *dwc)
82 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
86 desc = dma_pool_zalloc(dw->desc_pool, GFP_ATOMIC, &phys);
90 dwc->descs_allocated++;
91 INIT_LIST_HEAD(&desc->tx_list);
92 dma_async_tx_descriptor_init(&desc->txd, &dwc->chan);
93 desc->txd.tx_submit = dwc_tx_submit;
94 desc->txd.flags = DMA_CTRL_ACK;
95 desc->txd.phys = phys;
99 static void dwc_desc_put(struct dw_dma_chan *dwc, struct dw_desc *desc)
101 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
102 struct dw_desc *child, *_next;
107 list_for_each_entry_safe(child, _next, &desc->tx_list, desc_node) {
108 list_del(&child->desc_node);
109 dma_pool_free(dw->desc_pool, child, child->txd.phys);
110 dwc->descs_allocated--;
113 dma_pool_free(dw->desc_pool, desc, desc->txd.phys);
114 dwc->descs_allocated--;
117 static void dwc_initialize(struct dw_dma_chan *dwc)
119 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
121 dw->initialize_chan(dwc);
123 /* Enable interrupts */
124 channel_set_bit(dw, MASK.XFER, dwc->mask);
125 channel_set_bit(dw, MASK.ERROR, dwc->mask);
128 /*----------------------------------------------------------------------*/
130 static inline void dwc_dump_chan_regs(struct dw_dma_chan *dwc)
132 dev_err(chan2dev(&dwc->chan),
133 " SAR: 0x%x DAR: 0x%x LLP: 0x%x CTL: 0x%x:%08x\n",
134 channel_readl(dwc, SAR),
135 channel_readl(dwc, DAR),
136 channel_readl(dwc, LLP),
137 channel_readl(dwc, CTL_HI),
138 channel_readl(dwc, CTL_LO));
141 static inline void dwc_chan_disable(struct dw_dma *dw, struct dw_dma_chan *dwc)
143 channel_clear_bit(dw, CH_EN, dwc->mask);
144 while (dma_readl(dw, CH_EN) & dwc->mask)
148 /*----------------------------------------------------------------------*/
150 /* Perform single block transfer */
151 static inline void dwc_do_single_block(struct dw_dma_chan *dwc,
152 struct dw_desc *desc)
154 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
158 * Software emulation of LLP mode relies on interrupts to continue
159 * multi block transfer.
161 ctllo = lli_read(desc, ctllo) | DWC_CTLL_INT_EN;
163 channel_writel(dwc, SAR, lli_read(desc, sar));
164 channel_writel(dwc, DAR, lli_read(desc, dar));
165 channel_writel(dwc, CTL_LO, ctllo);
166 channel_writel(dwc, CTL_HI, lli_read(desc, ctlhi));
167 channel_set_bit(dw, CH_EN, dwc->mask);
169 /* Move pointer to next descriptor */
170 dwc->tx_node_active = dwc->tx_node_active->next;
173 /* Called with dwc->lock held and bh disabled */
174 static void dwc_dostart(struct dw_dma_chan *dwc, struct dw_desc *first)
176 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
177 u8 lms = DWC_LLP_LMS(dwc->dws.m_master);
178 unsigned long was_soft_llp;
180 /* ASSERT: channel is idle */
181 if (dma_readl(dw, CH_EN) & dwc->mask) {
182 dev_err(chan2dev(&dwc->chan),
183 "%s: BUG: Attempted to start non-idle channel\n",
185 dwc_dump_chan_regs(dwc);
187 /* The tasklet will hopefully advance the queue... */
192 was_soft_llp = test_and_set_bit(DW_DMA_IS_SOFT_LLP,
195 dev_err(chan2dev(&dwc->chan),
196 "BUG: Attempted to start new LLP transfer inside ongoing one\n");
202 first->residue = first->total_len;
203 dwc->tx_node_active = &first->tx_list;
205 /* Submit first block */
206 dwc_do_single_block(dwc, first);
213 channel_writel(dwc, LLP, first->txd.phys | lms);
214 channel_writel(dwc, CTL_LO, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
215 channel_writel(dwc, CTL_HI, 0);
216 channel_set_bit(dw, CH_EN, dwc->mask);
219 static void dwc_dostart_first_queued(struct dw_dma_chan *dwc)
221 struct dw_desc *desc;
223 if (list_empty(&dwc->queue))
226 list_move(dwc->queue.next, &dwc->active_list);
227 desc = dwc_first_active(dwc);
228 dev_vdbg(chan2dev(&dwc->chan), "%s: started %u\n", __func__, desc->txd.cookie);
229 dwc_dostart(dwc, desc);
232 /*----------------------------------------------------------------------*/
235 dwc_descriptor_complete(struct dw_dma_chan *dwc, struct dw_desc *desc,
236 bool callback_required)
238 struct dma_async_tx_descriptor *txd = &desc->txd;
239 struct dw_desc *child;
241 struct dmaengine_desc_callback cb;
243 dev_vdbg(chan2dev(&dwc->chan), "descriptor %u complete\n", txd->cookie);
245 spin_lock_irqsave(&dwc->lock, flags);
246 dma_cookie_complete(txd);
247 if (callback_required)
248 dmaengine_desc_get_callback(txd, &cb);
250 memset(&cb, 0, sizeof(cb));
253 list_for_each_entry(child, &desc->tx_list, desc_node)
254 async_tx_ack(&child->txd);
255 async_tx_ack(&desc->txd);
256 dwc_desc_put(dwc, desc);
257 spin_unlock_irqrestore(&dwc->lock, flags);
259 dmaengine_desc_callback_invoke(&cb, NULL);
262 static void dwc_complete_all(struct dw_dma *dw, struct dw_dma_chan *dwc)
264 struct dw_desc *desc, *_desc;
268 spin_lock_irqsave(&dwc->lock, flags);
269 if (dma_readl(dw, CH_EN) & dwc->mask) {
270 dev_err(chan2dev(&dwc->chan),
271 "BUG: XFER bit set, but channel not idle!\n");
273 /* Try to continue after resetting the channel... */
274 dwc_chan_disable(dw, dwc);
278 * Submit queued descriptors ASAP, i.e. before we go through
279 * the completed ones.
281 list_splice_init(&dwc->active_list, &list);
282 dwc_dostart_first_queued(dwc);
284 spin_unlock_irqrestore(&dwc->lock, flags);
286 list_for_each_entry_safe(desc, _desc, &list, desc_node)
287 dwc_descriptor_complete(dwc, desc, true);
290 /* Returns how many bytes were already received from source */
291 static inline u32 dwc_get_sent(struct dw_dma_chan *dwc)
293 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
294 u32 ctlhi = channel_readl(dwc, CTL_HI);
295 u32 ctllo = channel_readl(dwc, CTL_LO);
297 return dw->block2bytes(dwc, ctlhi, ctllo >> 4 & 7);
300 static void dwc_scan_descriptors(struct dw_dma *dw, struct dw_dma_chan *dwc)
303 struct dw_desc *desc, *_desc;
304 struct dw_desc *child;
308 spin_lock_irqsave(&dwc->lock, flags);
309 llp = channel_readl(dwc, LLP);
310 status_xfer = dma_readl(dw, RAW.XFER);
312 if (status_xfer & dwc->mask) {
313 /* Everything we've submitted is done */
314 dma_writel(dw, CLEAR.XFER, dwc->mask);
316 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
317 struct list_head *head, *active = dwc->tx_node_active;
320 * We are inside first active descriptor.
321 * Otherwise something is really wrong.
323 desc = dwc_first_active(dwc);
325 head = &desc->tx_list;
326 if (active != head) {
327 /* Update residue to reflect last sent descriptor */
328 if (active == head->next)
329 desc->residue -= desc->len;
331 desc->residue -= to_dw_desc(active->prev)->len;
333 child = to_dw_desc(active);
335 /* Submit next block */
336 dwc_do_single_block(dwc, child);
338 spin_unlock_irqrestore(&dwc->lock, flags);
342 /* We are done here */
343 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
346 spin_unlock_irqrestore(&dwc->lock, flags);
348 dwc_complete_all(dw, dwc);
352 if (list_empty(&dwc->active_list)) {
353 spin_unlock_irqrestore(&dwc->lock, flags);
357 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags)) {
358 dev_vdbg(chan2dev(&dwc->chan), "%s: soft LLP mode\n", __func__);
359 spin_unlock_irqrestore(&dwc->lock, flags);
363 dev_vdbg(chan2dev(&dwc->chan), "%s: llp=%pad\n", __func__, &llp);
365 list_for_each_entry_safe(desc, _desc, &dwc->active_list, desc_node) {
366 /* Initial residue value */
367 desc->residue = desc->total_len;
369 /* Check first descriptors addr */
370 if (desc->txd.phys == DWC_LLP_LOC(llp)) {
371 spin_unlock_irqrestore(&dwc->lock, flags);
375 /* Check first descriptors llp */
376 if (lli_read(desc, llp) == llp) {
377 /* This one is currently in progress */
378 desc->residue -= dwc_get_sent(dwc);
379 spin_unlock_irqrestore(&dwc->lock, flags);
383 desc->residue -= desc->len;
384 list_for_each_entry(child, &desc->tx_list, desc_node) {
385 if (lli_read(child, llp) == llp) {
386 /* Currently in progress */
387 desc->residue -= dwc_get_sent(dwc);
388 spin_unlock_irqrestore(&dwc->lock, flags);
391 desc->residue -= child->len;
395 * No descriptors so far seem to be in progress, i.e.
396 * this one must be done.
398 spin_unlock_irqrestore(&dwc->lock, flags);
399 dwc_descriptor_complete(dwc, desc, true);
400 spin_lock_irqsave(&dwc->lock, flags);
403 dev_err(chan2dev(&dwc->chan),
404 "BUG: All descriptors done, but channel not idle!\n");
406 /* Try to continue after resetting the channel... */
407 dwc_chan_disable(dw, dwc);
409 dwc_dostart_first_queued(dwc);
410 spin_unlock_irqrestore(&dwc->lock, flags);
413 static inline void dwc_dump_lli(struct dw_dma_chan *dwc, struct dw_desc *desc)
415 dev_crit(chan2dev(&dwc->chan), " desc: s0x%x d0x%x l0x%x c0x%x:%x\n",
419 lli_read(desc, ctlhi),
420 lli_read(desc, ctllo));
423 static void dwc_handle_error(struct dw_dma *dw, struct dw_dma_chan *dwc)
425 struct dw_desc *bad_desc;
426 struct dw_desc *child;
429 dwc_scan_descriptors(dw, dwc);
431 spin_lock_irqsave(&dwc->lock, flags);
434 * The descriptor currently at the head of the active list is
435 * borked. Since we don't have any way to report errors, we'll
436 * just have to scream loudly and try to carry on.
438 bad_desc = dwc_first_active(dwc);
439 list_del_init(&bad_desc->desc_node);
440 list_move(dwc->queue.next, dwc->active_list.prev);
442 /* Clear the error flag and try to restart the controller */
443 dma_writel(dw, CLEAR.ERROR, dwc->mask);
444 if (!list_empty(&dwc->active_list))
445 dwc_dostart(dwc, dwc_first_active(dwc));
448 * WARN may seem harsh, but since this only happens
449 * when someone submits a bad physical address in a
450 * descriptor, we should consider ourselves lucky that the
451 * controller flagged an error instead of scribbling over
452 * random memory locations.
454 dev_WARN(chan2dev(&dwc->chan), "Bad descriptor submitted for DMA!\n"
455 " cookie: %d\n", bad_desc->txd.cookie);
456 dwc_dump_lli(dwc, bad_desc);
457 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
458 dwc_dump_lli(dwc, child);
460 spin_unlock_irqrestore(&dwc->lock, flags);
462 /* Pretend the descriptor completed successfully */
463 dwc_descriptor_complete(dwc, bad_desc, true);
466 static void dw_dma_tasklet(unsigned long data)
468 struct dw_dma *dw = (struct dw_dma *)data;
469 struct dw_dma_chan *dwc;
474 status_xfer = dma_readl(dw, RAW.XFER);
475 status_err = dma_readl(dw, RAW.ERROR);
477 dev_vdbg(dw->dma.dev, "%s: status_err=%x\n", __func__, status_err);
479 for (i = 0; i < dw->dma.chancnt; i++) {
481 if (test_bit(DW_DMA_IS_CYCLIC, &dwc->flags))
482 dev_vdbg(dw->dma.dev, "Cyclic xfer is not implemented\n");
483 else if (status_err & (1 << i))
484 dwc_handle_error(dw, dwc);
485 else if (status_xfer & (1 << i))
486 dwc_scan_descriptors(dw, dwc);
489 /* Re-enable interrupts */
490 channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
491 channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
494 static irqreturn_t dw_dma_interrupt(int irq, void *dev_id)
496 struct dw_dma *dw = dev_id;
499 /* Check if we have any interrupt from the DMAC which is not in use */
503 status = dma_readl(dw, STATUS_INT);
504 dev_vdbg(dw->dma.dev, "%s: status=0x%x\n", __func__, status);
506 /* Check if we have any interrupt from the DMAC */
511 * Just disable the interrupts. We'll turn them back on in the
514 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
515 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
516 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
518 status = dma_readl(dw, STATUS_INT);
521 "BUG: Unexpected interrupts pending: 0x%x\n",
525 channel_clear_bit(dw, MASK.XFER, (1 << 8) - 1);
526 channel_clear_bit(dw, MASK.BLOCK, (1 << 8) - 1);
527 channel_clear_bit(dw, MASK.SRC_TRAN, (1 << 8) - 1);
528 channel_clear_bit(dw, MASK.DST_TRAN, (1 << 8) - 1);
529 channel_clear_bit(dw, MASK.ERROR, (1 << 8) - 1);
532 tasklet_schedule(&dw->tasklet);
537 /*----------------------------------------------------------------------*/
539 static struct dma_async_tx_descriptor *
540 dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
541 size_t len, unsigned long flags)
543 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
544 struct dw_dma *dw = to_dw_dma(chan->device);
545 struct dw_desc *desc;
546 struct dw_desc *first;
547 struct dw_desc *prev;
550 u8 m_master = dwc->dws.m_master;
551 unsigned int src_width;
552 unsigned int dst_width;
553 unsigned int data_width = dw->pdata->data_width[m_master];
555 u8 lms = DWC_LLP_LMS(m_master);
557 dev_vdbg(chan2dev(chan),
558 "%s: d%pad s%pad l0x%zx f0x%lx\n", __func__,
559 &dest, &src, len, flags);
561 if (unlikely(!len)) {
562 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
566 dwc->direction = DMA_MEM_TO_MEM;
568 src_width = dst_width = __ffs(data_width | src | dest | len);
570 ctllo = dw->prepare_ctllo(dwc)
571 | DWC_CTLL_DST_WIDTH(dst_width)
572 | DWC_CTLL_SRC_WIDTH(src_width)
578 for (offset = 0; offset < len; offset += xfer_count) {
579 desc = dwc_desc_get(dwc);
583 ctlhi = dw->bytes2block(dwc, len - offset, src_width, &xfer_count);
585 lli_write(desc, sar, src + offset);
586 lli_write(desc, dar, dest + offset);
587 lli_write(desc, ctllo, ctllo);
588 lli_write(desc, ctlhi, ctlhi);
589 desc->len = xfer_count;
594 lli_write(prev, llp, desc->txd.phys | lms);
595 list_add_tail(&desc->desc_node, &first->tx_list);
600 if (flags & DMA_PREP_INTERRUPT)
601 /* Trigger interrupt after last block */
602 lli_set(prev, ctllo, DWC_CTLL_INT_EN);
605 lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
606 first->txd.flags = flags;
607 first->total_len = len;
612 dwc_desc_put(dwc, first);
616 static struct dma_async_tx_descriptor *
617 dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
618 unsigned int sg_len, enum dma_transfer_direction direction,
619 unsigned long flags, void *context)
621 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
622 struct dw_dma *dw = to_dw_dma(chan->device);
623 struct dma_slave_config *sconfig = &dwc->dma_sconfig;
624 struct dw_desc *prev;
625 struct dw_desc *first;
627 u8 m_master = dwc->dws.m_master;
628 u8 lms = DWC_LLP_LMS(m_master);
630 unsigned int reg_width;
631 unsigned int mem_width;
632 unsigned int data_width = dw->pdata->data_width[m_master];
634 struct scatterlist *sg;
635 size_t total_len = 0;
637 dev_vdbg(chan2dev(chan), "%s\n", __func__);
639 if (unlikely(!is_slave_direction(direction) || !sg_len))
642 dwc->direction = direction;
648 reg_width = __ffs(sconfig->dst_addr_width);
649 reg = sconfig->dst_addr;
650 ctllo = dw->prepare_ctllo(dwc)
651 | DWC_CTLL_DST_WIDTH(reg_width)
655 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_M2P) :
656 DWC_CTLL_FC(DW_DMA_FC_D_M2P);
658 for_each_sg(sgl, sg, sg_len, i) {
659 struct dw_desc *desc;
663 mem = sg_dma_address(sg);
664 len = sg_dma_len(sg);
666 mem_width = __ffs(data_width | mem | len);
668 slave_sg_todev_fill_desc:
669 desc = dwc_desc_get(dwc);
673 ctlhi = dw->bytes2block(dwc, len, mem_width, &dlen);
675 lli_write(desc, sar, mem);
676 lli_write(desc, dar, reg);
677 lli_write(desc, ctlhi, ctlhi);
678 lli_write(desc, ctllo, ctllo | DWC_CTLL_SRC_WIDTH(mem_width));
684 lli_write(prev, llp, desc->txd.phys | lms);
685 list_add_tail(&desc->desc_node, &first->tx_list);
694 goto slave_sg_todev_fill_desc;
698 reg_width = __ffs(sconfig->src_addr_width);
699 reg = sconfig->src_addr;
700 ctllo = dw->prepare_ctllo(dwc)
701 | DWC_CTLL_SRC_WIDTH(reg_width)
705 ctllo |= sconfig->device_fc ? DWC_CTLL_FC(DW_DMA_FC_P_P2M) :
706 DWC_CTLL_FC(DW_DMA_FC_D_P2M);
708 for_each_sg(sgl, sg, sg_len, i) {
709 struct dw_desc *desc;
713 mem = sg_dma_address(sg);
714 len = sg_dma_len(sg);
716 slave_sg_fromdev_fill_desc:
717 desc = dwc_desc_get(dwc);
721 ctlhi = dw->bytes2block(dwc, len, reg_width, &dlen);
723 lli_write(desc, sar, reg);
724 lli_write(desc, dar, mem);
725 lli_write(desc, ctlhi, ctlhi);
726 mem_width = __ffs(data_width | mem | dlen);
727 lli_write(desc, ctllo, ctllo | DWC_CTLL_DST_WIDTH(mem_width));
733 lli_write(prev, llp, desc->txd.phys | lms);
734 list_add_tail(&desc->desc_node, &first->tx_list);
743 goto slave_sg_fromdev_fill_desc;
750 if (flags & DMA_PREP_INTERRUPT)
751 /* Trigger interrupt after last block */
752 lli_set(prev, ctllo, DWC_CTLL_INT_EN);
755 lli_clear(prev, ctllo, DWC_CTLL_LLP_D_EN | DWC_CTLL_LLP_S_EN);
756 first->total_len = total_len;
761 dev_err(chan2dev(chan),
762 "not enough descriptors available. Direction %d\n", direction);
763 dwc_desc_put(dwc, first);
767 bool dw_dma_filter(struct dma_chan *chan, void *param)
769 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
770 struct dw_dma_slave *dws = param;
772 if (dws->dma_dev != chan->device->dev)
775 /* permit channels in accordance with the channels mask */
776 if (dws->channels && !(dws->channels & dwc->mask))
779 /* We have to copy data since dws can be temporary storage */
780 memcpy(&dwc->dws, dws, sizeof(struct dw_dma_slave));
784 EXPORT_SYMBOL_GPL(dw_dma_filter);
786 static int dwc_config(struct dma_chan *chan, struct dma_slave_config *sconfig)
788 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
789 struct dw_dma *dw = to_dw_dma(chan->device);
791 memcpy(&dwc->dma_sconfig, sconfig, sizeof(*sconfig));
793 dw->encode_maxburst(dwc, &dwc->dma_sconfig.src_maxburst);
794 dw->encode_maxburst(dwc, &dwc->dma_sconfig.dst_maxburst);
799 static void dwc_chan_pause(struct dw_dma_chan *dwc, bool drain)
801 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
802 unsigned int count = 20; /* timeout iterations */
804 dw->suspend_chan(dwc, drain);
806 while (!(channel_readl(dwc, CFG_LO) & DWC_CFGL_FIFO_EMPTY) && count--)
809 set_bit(DW_DMA_IS_PAUSED, &dwc->flags);
812 static int dwc_pause(struct dma_chan *chan)
814 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
817 spin_lock_irqsave(&dwc->lock, flags);
818 dwc_chan_pause(dwc, false);
819 spin_unlock_irqrestore(&dwc->lock, flags);
824 static inline void dwc_chan_resume(struct dw_dma_chan *dwc, bool drain)
826 struct dw_dma *dw = to_dw_dma(dwc->chan.device);
828 dw->resume_chan(dwc, drain);
830 clear_bit(DW_DMA_IS_PAUSED, &dwc->flags);
833 static int dwc_resume(struct dma_chan *chan)
835 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
838 spin_lock_irqsave(&dwc->lock, flags);
840 if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags))
841 dwc_chan_resume(dwc, false);
843 spin_unlock_irqrestore(&dwc->lock, flags);
848 static int dwc_terminate_all(struct dma_chan *chan)
850 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
851 struct dw_dma *dw = to_dw_dma(chan->device);
852 struct dw_desc *desc, *_desc;
856 spin_lock_irqsave(&dwc->lock, flags);
858 clear_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags);
860 dwc_chan_pause(dwc, true);
862 dwc_chan_disable(dw, dwc);
864 dwc_chan_resume(dwc, true);
866 /* active_list entries will end up before queued entries */
867 list_splice_init(&dwc->queue, &list);
868 list_splice_init(&dwc->active_list, &list);
870 spin_unlock_irqrestore(&dwc->lock, flags);
872 /* Flush all pending and queued descriptors */
873 list_for_each_entry_safe(desc, _desc, &list, desc_node)
874 dwc_descriptor_complete(dwc, desc, false);
879 static struct dw_desc *dwc_find_desc(struct dw_dma_chan *dwc, dma_cookie_t c)
881 struct dw_desc *desc;
883 list_for_each_entry(desc, &dwc->active_list, desc_node)
884 if (desc->txd.cookie == c)
890 static u32 dwc_get_residue(struct dw_dma_chan *dwc, dma_cookie_t cookie)
892 struct dw_desc *desc;
896 spin_lock_irqsave(&dwc->lock, flags);
898 desc = dwc_find_desc(dwc, cookie);
900 if (desc == dwc_first_active(dwc)) {
901 residue = desc->residue;
902 if (test_bit(DW_DMA_IS_SOFT_LLP, &dwc->flags) && residue)
903 residue -= dwc_get_sent(dwc);
905 residue = desc->total_len;
911 spin_unlock_irqrestore(&dwc->lock, flags);
915 static enum dma_status
916 dwc_tx_status(struct dma_chan *chan,
918 struct dma_tx_state *txstate)
920 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
923 ret = dma_cookie_status(chan, cookie, txstate);
924 if (ret == DMA_COMPLETE)
927 dwc_scan_descriptors(to_dw_dma(chan->device), dwc);
929 ret = dma_cookie_status(chan, cookie, txstate);
930 if (ret == DMA_COMPLETE)
933 dma_set_residue(txstate, dwc_get_residue(dwc, cookie));
935 if (test_bit(DW_DMA_IS_PAUSED, &dwc->flags) && ret == DMA_IN_PROGRESS)
941 static void dwc_issue_pending(struct dma_chan *chan)
943 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
946 spin_lock_irqsave(&dwc->lock, flags);
947 if (list_empty(&dwc->active_list))
948 dwc_dostart_first_queued(dwc);
949 spin_unlock_irqrestore(&dwc->lock, flags);
952 /*----------------------------------------------------------------------*/
954 void do_dw_dma_off(struct dw_dma *dw)
956 dma_writel(dw, CFG, 0);
958 channel_clear_bit(dw, MASK.XFER, dw->all_chan_mask);
959 channel_clear_bit(dw, MASK.BLOCK, dw->all_chan_mask);
960 channel_clear_bit(dw, MASK.SRC_TRAN, dw->all_chan_mask);
961 channel_clear_bit(dw, MASK.DST_TRAN, dw->all_chan_mask);
962 channel_clear_bit(dw, MASK.ERROR, dw->all_chan_mask);
964 while (dma_readl(dw, CFG) & DW_CFG_DMA_EN)
968 void do_dw_dma_on(struct dw_dma *dw)
970 dma_writel(dw, CFG, DW_CFG_DMA_EN);
973 static int dwc_alloc_chan_resources(struct dma_chan *chan)
975 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
976 struct dw_dma *dw = to_dw_dma(chan->device);
978 dev_vdbg(chan2dev(chan), "%s\n", __func__);
980 /* ASSERT: channel is idle */
981 if (dma_readl(dw, CH_EN) & dwc->mask) {
982 dev_dbg(chan2dev(chan), "DMA channel not idle?\n");
986 dma_cookie_init(chan);
989 * NOTE: some controllers may have additional features that we
990 * need to initialize here, like "scatter-gather" (which
991 * doesn't mean what you think it means), and status writeback.
995 * We need controller-specific data to set up slave transfers.
997 if (chan->private && !dw_dma_filter(chan, chan->private)) {
998 dev_warn(chan2dev(chan), "Wrong controller-specific data\n");
1002 /* Enable controller here if needed */
1005 dw->in_use |= dwc->mask;
1010 static void dwc_free_chan_resources(struct dma_chan *chan)
1012 struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
1013 struct dw_dma *dw = to_dw_dma(chan->device);
1014 unsigned long flags;
1016 dev_dbg(chan2dev(chan), "%s: descs allocated=%u\n", __func__,
1017 dwc->descs_allocated);
1019 /* ASSERT: channel is idle */
1020 BUG_ON(!list_empty(&dwc->active_list));
1021 BUG_ON(!list_empty(&dwc->queue));
1022 BUG_ON(dma_readl(to_dw_dma(chan->device), CH_EN) & dwc->mask);
1024 spin_lock_irqsave(&dwc->lock, flags);
1026 /* Clear custom channel configuration */
1027 memset(&dwc->dws, 0, sizeof(struct dw_dma_slave));
1029 /* Disable interrupts */
1030 channel_clear_bit(dw, MASK.XFER, dwc->mask);
1031 channel_clear_bit(dw, MASK.BLOCK, dwc->mask);
1032 channel_clear_bit(dw, MASK.ERROR, dwc->mask);
1034 spin_unlock_irqrestore(&dwc->lock, flags);
1036 /* Disable controller in case it was a last user */
1037 dw->in_use &= ~dwc->mask;
1041 dev_vdbg(chan2dev(chan), "%s: done\n", __func__);
1044 int do_dma_probe(struct dw_dma_chip *chip)
1046 struct dw_dma *dw = chip->dw;
1047 struct dw_dma_platform_data *pdata;
1048 bool autocfg = false;
1049 unsigned int dw_params;
1053 dw->pdata = devm_kzalloc(chip->dev, sizeof(*dw->pdata), GFP_KERNEL);
1057 dw->regs = chip->regs;
1059 pm_runtime_get_sync(chip->dev);
1062 dw_params = dma_readl(dw, DW_PARAMS);
1063 dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params);
1065 autocfg = dw_params >> DW_PARAMS_EN & 1;
1071 /* Reassign the platform data pointer */
1074 /* Get hardware configuration parameters */
1075 pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1;
1076 pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1;
1077 for (i = 0; i < pdata->nr_masters; i++) {
1078 pdata->data_width[i] =
1079 4 << (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3);
1081 pdata->block_size = dma_readl(dw, MAX_BLK_SIZE);
1083 /* Fill platform data with the default values */
1084 pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING;
1085 pdata->chan_priority = CHAN_PRIORITY_ASCENDING;
1086 } else if (chip->pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) {
1090 memcpy(dw->pdata, chip->pdata, sizeof(*dw->pdata));
1092 /* Reassign the platform data pointer */
1096 dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan),
1103 /* Calculate all channel mask before DMA setup */
1104 dw->all_chan_mask = (1 << pdata->nr_channels) - 1;
1106 /* Force dma off, just in case */
1109 /* Device and instance ID for IRQ and DMA pool */
1110 dw->set_device_name(dw, chip->id);
1112 /* Create a pool of consistent memory blocks for hardware descriptors */
1113 dw->desc_pool = dmam_pool_create(dw->name, chip->dev,
1114 sizeof(struct dw_desc), 4, 0);
1115 if (!dw->desc_pool) {
1116 dev_err(chip->dev, "No memory for descriptors dma pool\n");
1121 tasklet_init(&dw->tasklet, dw_dma_tasklet, (unsigned long)dw);
1123 err = request_irq(chip->irq, dw_dma_interrupt, IRQF_SHARED,
1128 INIT_LIST_HEAD(&dw->dma.channels);
1129 for (i = 0; i < pdata->nr_channels; i++) {
1130 struct dw_dma_chan *dwc = &dw->chan[i];
1132 dwc->chan.device = &dw->dma;
1133 dma_cookie_init(&dwc->chan);
1134 if (pdata->chan_allocation_order == CHAN_ALLOCATION_ASCENDING)
1135 list_add_tail(&dwc->chan.device_node,
1138 list_add(&dwc->chan.device_node, &dw->dma.channels);
1140 /* 7 is highest priority & 0 is lowest. */
1141 if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING)
1142 dwc->priority = pdata->nr_channels - i - 1;
1146 dwc->ch_regs = &__dw_regs(dw)->CHAN[i];
1147 spin_lock_init(&dwc->lock);
1150 INIT_LIST_HEAD(&dwc->active_list);
1151 INIT_LIST_HEAD(&dwc->queue);
1153 channel_clear_bit(dw, CH_EN, dwc->mask);
1155 dwc->direction = DMA_TRANS_NONE;
1157 /* Hardware configuration */
1159 unsigned int r = DW_DMA_MAX_NR_CHANNELS - i - 1;
1160 void __iomem *addr = &__dw_regs(dw)->DWC_PARAMS[r];
1161 unsigned int dwc_params = readl(addr);
1163 dev_dbg(chip->dev, "DWC_PARAMS[%d]: 0x%08x\n", i,
1167 * Decode maximum block size for given channel. The
1168 * stored 4 bit value represents blocks from 0x00 for 3
1169 * up to 0x0a for 4095.
1172 (4 << ((pdata->block_size >> 4 * i) & 0xf)) - 1;
1174 (dwc_params >> DWC_PARAMS_MBLK_EN & 0x1) == 0;
1176 dwc->block_size = pdata->block_size;
1177 dwc->nollp = !pdata->multi_block[i];
1181 /* Clear all interrupts on all channels. */
1182 dma_writel(dw, CLEAR.XFER, dw->all_chan_mask);
1183 dma_writel(dw, CLEAR.BLOCK, dw->all_chan_mask);
1184 dma_writel(dw, CLEAR.SRC_TRAN, dw->all_chan_mask);
1185 dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask);
1186 dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask);
1188 /* Set capabilities */
1189 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1190 dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask);
1191 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1193 dw->dma.dev = chip->dev;
1194 dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources;
1195 dw->dma.device_free_chan_resources = dwc_free_chan_resources;
1197 dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
1198 dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
1200 dw->dma.device_config = dwc_config;
1201 dw->dma.device_pause = dwc_pause;
1202 dw->dma.device_resume = dwc_resume;
1203 dw->dma.device_terminate_all = dwc_terminate_all;
1205 dw->dma.device_tx_status = dwc_tx_status;
1206 dw->dma.device_issue_pending = dwc_issue_pending;
1208 /* DMA capabilities */
1209 dw->dma.src_addr_widths = DW_DMA_BUSWIDTHS;
1210 dw->dma.dst_addr_widths = DW_DMA_BUSWIDTHS;
1211 dw->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV) |
1212 BIT(DMA_MEM_TO_MEM);
1213 dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1215 err = dma_async_device_register(&dw->dma);
1217 goto err_dma_register;
1219 dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n",
1220 pdata->nr_channels);
1222 pm_runtime_put_sync_suspend(chip->dev);
1227 free_irq(chip->irq, dw);
1229 pm_runtime_put_sync_suspend(chip->dev);
1233 int do_dma_remove(struct dw_dma_chip *chip)
1235 struct dw_dma *dw = chip->dw;
1236 struct dw_dma_chan *dwc, *_dwc;
1238 pm_runtime_get_sync(chip->dev);
1241 dma_async_device_unregister(&dw->dma);
1243 free_irq(chip->irq, dw);
1244 tasklet_kill(&dw->tasklet);
1246 list_for_each_entry_safe(dwc, _dwc, &dw->dma.channels,
1248 list_del(&dwc->chan.device_node);
1249 channel_clear_bit(dw, CH_EN, dwc->mask);
1252 pm_runtime_put_sync_suspend(chip->dev);
1256 int do_dw_dma_disable(struct dw_dma_chip *chip)
1258 struct dw_dma *dw = chip->dw;
1263 EXPORT_SYMBOL_GPL(do_dw_dma_disable);
1265 int do_dw_dma_enable(struct dw_dma_chip *chip)
1267 struct dw_dma *dw = chip->dw;
1272 EXPORT_SYMBOL_GPL(do_dw_dma_enable);
1274 MODULE_LICENSE("GPL v2");
1275 MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller core driver");
1276 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1277 MODULE_AUTHOR("Viresh Kumar <vireshk@kernel.org>");