1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
5 * Copyright (C) 2008 Atmel Corporation
7 * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
8 * The only Atmel DMA Controller that is not covered by this driver is the one
9 * found on AT91SAM9263.
12 #include <dt-bindings/dma/at91.h>
13 #include <linux/clk.h>
14 #include <linux/dmaengine.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/dmapool.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
19 #include <linux/platform_device.h>
20 #include <linux/slab.h>
22 #include <linux/of_device.h>
23 #include <linux/of_dma.h>
25 #include "at_hdmac_regs.h"
26 #include "dmaengine.h"
32 * at_hdmac : Name of the ATmel AHB DMA Controller
33 * at_dma_ / atdma : ATmel DMA controller entity related
34 * atc_ / atchan : ATmel DMA Channel entity related
37 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
38 #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
39 |ATC_DIF(AT_DMA_MEM_IF))
40 #define ATC_DMA_BUSWIDTHS\
41 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
42 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
43 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
44 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
46 #define ATC_MAX_DSCR_TRIALS 10
49 * Initial number of descriptors to allocate for each channel. This could
50 * be increased during dma usage.
52 static unsigned int init_nr_desc_per_channel = 64;
53 module_param(init_nr_desc_per_channel, uint, 0644);
54 MODULE_PARM_DESC(init_nr_desc_per_channel,
55 "initial descriptors per channel (default: 64)");
59 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
60 static void atc_issue_pending(struct dma_chan *chan);
63 /*----------------------------------------------------------------------*/
65 static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst,
70 if (!((src | dst | len) & 3))
72 else if (!((src | dst | len) & 1))
80 static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
82 return list_first_entry(&atchan->active_list,
83 struct at_desc, desc_node);
86 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
88 return list_first_entry(&atchan->queue,
89 struct at_desc, desc_node);
93 * atc_alloc_descriptor - allocate and return an initialized descriptor
94 * @chan: the channel to allocate descriptors for
95 * @gfp_flags: GFP allocation flags
97 * Note: The ack-bit is positioned in the descriptor flag at creation time
98 * to make initial allocation more convenient. This bit will be cleared
99 * and control will be given to client at usage time (during
100 * preparation functions).
102 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
105 struct at_desc *desc = NULL;
106 struct at_dma *atdma = to_at_dma(chan->device);
109 desc = dma_pool_zalloc(atdma->dma_desc_pool, gfp_flags, &phys);
111 INIT_LIST_HEAD(&desc->tx_list);
112 dma_async_tx_descriptor_init(&desc->txd, chan);
113 /* txd.flags will be overwritten in prep functions */
114 desc->txd.flags = DMA_CTRL_ACK;
115 desc->txd.tx_submit = atc_tx_submit;
116 desc->txd.phys = phys;
123 * atc_desc_get - get an unused descriptor from free_list
124 * @atchan: channel we want a new descriptor for
126 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
128 struct at_desc *desc, *_desc;
129 struct at_desc *ret = NULL;
133 spin_lock_irqsave(&atchan->lock, flags);
134 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
136 if (async_tx_test_ack(&desc->txd)) {
137 list_del(&desc->desc_node);
141 dev_dbg(chan2dev(&atchan->chan_common),
142 "desc %p not ACKed\n", desc);
144 spin_unlock_irqrestore(&atchan->lock, flags);
145 dev_vdbg(chan2dev(&atchan->chan_common),
146 "scanned %u descriptors on freelist\n", i);
148 /* no more descriptor available in initial pool: create one more */
150 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_NOWAIT);
156 * atc_desc_put - move a descriptor, including any children, to the free list
157 * @atchan: channel we work on
158 * @desc: descriptor, at the head of a chain, to move to free list
160 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
163 struct at_desc *child;
166 spin_lock_irqsave(&atchan->lock, flags);
167 list_for_each_entry(child, &desc->tx_list, desc_node)
168 dev_vdbg(chan2dev(&atchan->chan_common),
169 "moving child desc %p to freelist\n",
171 list_splice_init(&desc->tx_list, &atchan->free_list);
172 dev_vdbg(chan2dev(&atchan->chan_common),
173 "moving desc %p to freelist\n", desc);
174 list_add(&desc->desc_node, &atchan->free_list);
175 spin_unlock_irqrestore(&atchan->lock, flags);
180 * atc_desc_chain - build chain adding a descriptor
181 * @first: address of first descriptor of the chain
182 * @prev: address of previous descriptor of the chain
183 * @desc: descriptor to queue
185 * Called from prep_* functions
187 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
188 struct at_desc *desc)
193 /* inform the HW lli about chaining */
194 (*prev)->lli.dscr = desc->txd.phys;
195 /* insert the link descriptor to the LD ring */
196 list_add_tail(&desc->desc_node,
203 * atc_dostart - starts the DMA engine for real
204 * @atchan: the channel we want to start
205 * @first: first descriptor in the list we want to begin with
207 * Called with atchan->lock held and bh disabled
209 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
211 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
213 /* ASSERT: channel is idle */
214 if (atc_chan_is_enabled(atchan)) {
215 dev_err(chan2dev(&atchan->chan_common),
216 "BUG: Attempted to start non-idle channel\n");
217 dev_err(chan2dev(&atchan->chan_common),
218 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
219 channel_readl(atchan, SADDR),
220 channel_readl(atchan, DADDR),
221 channel_readl(atchan, CTRLA),
222 channel_readl(atchan, CTRLB),
223 channel_readl(atchan, DSCR));
225 /* The tasklet will hopefully advance the queue... */
229 vdbg_dump_regs(atchan);
231 channel_writel(atchan, SADDR, 0);
232 channel_writel(atchan, DADDR, 0);
233 channel_writel(atchan, CTRLA, 0);
234 channel_writel(atchan, CTRLB, 0);
235 channel_writel(atchan, DSCR, first->txd.phys);
236 channel_writel(atchan, SPIP, ATC_SPIP_HOLE(first->src_hole) |
237 ATC_SPIP_BOUNDARY(first->boundary));
238 channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
239 ATC_DPIP_BOUNDARY(first->boundary));
240 /* Don't allow CPU to reorder channel enable. */
242 dma_writel(atdma, CHER, atchan->mask);
244 vdbg_dump_regs(atchan);
248 * atc_get_desc_by_cookie - get the descriptor of a cookie
249 * @atchan: the DMA channel
250 * @cookie: the cookie to get the descriptor for
252 static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
255 struct at_desc *desc, *_desc;
257 list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) {
258 if (desc->txd.cookie == cookie)
262 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
263 if (desc->txd.cookie == cookie)
271 * atc_calc_bytes_left - calculates the number of bytes left according to the
272 * value read from CTRLA.
274 * @current_len: the number of bytes left before reading CTRLA
275 * @ctrla: the value of CTRLA
277 static inline int atc_calc_bytes_left(int current_len, u32 ctrla)
279 u32 btsize = (ctrla & ATC_BTSIZE_MAX);
280 u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla);
283 * According to the datasheet, when reading the Control A Register
284 * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the
285 * number of transfers completed on the Source Interface.
286 * So btsize is always a number of source width transfers.
288 return current_len - (btsize << src_width);
292 * atc_get_bytes_left - get the number of bytes residue for a cookie
294 * @cookie: transaction identifier to check status of
296 static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
298 struct at_dma_chan *atchan = to_at_dma_chan(chan);
299 struct at_desc *desc_first = atc_first_active(atchan);
300 struct at_desc *desc;
306 * If the cookie doesn't match to the currently running transfer then
307 * we can return the total length of the associated DMA transfer,
308 * because it is still queued.
310 desc = atc_get_desc_by_cookie(atchan, cookie);
313 else if (desc != desc_first)
314 return desc->total_len;
316 /* cookie matches to the currently running transfer */
317 ret = desc_first->total_len;
319 if (desc_first->lli.dscr) {
320 /* hardware linked list transfer */
323 * Calculate the residue by removing the length of the child
324 * descriptors already transferred from the total length.
325 * To get the current child descriptor we can use the value of
326 * the channel's DSCR register and compare it against the value
327 * of the hardware linked list structure of each child
330 * The CTRLA register provides us with the amount of data
331 * already read from the source for the current child
332 * descriptor. So we can compute a more accurate residue by also
333 * removing the number of bytes corresponding to this amount of
336 * However, the DSCR and CTRLA registers cannot be read both
337 * atomically. Hence a race condition may occur: the first read
338 * register may refer to one child descriptor whereas the second
339 * read may refer to a later child descriptor in the list
340 * because of the DMA transfer progression inbetween the two
343 * One solution could have been to pause the DMA transfer, read
344 * the DSCR and CTRLA then resume the DMA transfer. Nonetheless,
345 * this approach presents some drawbacks:
346 * - If the DMA transfer is paused, RX overruns or TX underruns
347 * are more likey to occur depending on the system latency.
348 * Taking the USART driver as an example, it uses a cyclic DMA
349 * transfer to read data from the Receive Holding Register
350 * (RHR) to avoid RX overruns since the RHR is not protected
351 * by any FIFO on most Atmel SoCs. So pausing the DMA transfer
352 * to compute the residue would break the USART driver design.
353 * - The atc_pause() function masks interrupts but we'd rather
354 * avoid to do so for system latency purpose.
356 * Then we'd rather use another solution: the DSCR is read a
357 * first time, the CTRLA is read in turn, next the DSCR is read
358 * a second time. If the two consecutive read values of the DSCR
359 * are the same then we assume both refers to the very same
360 * child descriptor as well as the CTRLA value read inbetween
361 * does. For cyclic tranfers, the assumption is that a full loop
363 * If the two DSCR values are different, we read again the CTRLA
364 * then the DSCR till two consecutive read values from DSCR are
365 * equal or till the maxium trials is reach.
366 * This algorithm is very unlikely not to find a stable value for
370 dscr = channel_readl(atchan, DSCR);
371 rmb(); /* ensure DSCR is read before CTRLA */
372 ctrla = channel_readl(atchan, CTRLA);
373 for (i = 0; i < ATC_MAX_DSCR_TRIALS; ++i) {
376 rmb(); /* ensure DSCR is read after CTRLA */
377 new_dscr = channel_readl(atchan, DSCR);
380 * If the DSCR register value has not changed inside the
381 * DMA controller since the previous read, we assume
382 * that both the dscr and ctrla values refers to the
383 * very same descriptor.
385 if (likely(new_dscr == dscr))
389 * DSCR has changed inside the DMA controller, so the
390 * previouly read value of CTRLA may refer to an already
391 * processed descriptor hence could be outdated.
392 * We need to update ctrla to match the current
396 rmb(); /* ensure DSCR is read before CTRLA */
397 ctrla = channel_readl(atchan, CTRLA);
399 if (unlikely(i == ATC_MAX_DSCR_TRIALS))
402 /* for the first descriptor we can be more accurate */
403 if (desc_first->lli.dscr == dscr)
404 return atc_calc_bytes_left(ret, ctrla);
406 ret -= desc_first->len;
407 list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
408 if (desc->lli.dscr == dscr)
415 * For the current descriptor in the chain we can calculate
416 * the remaining bytes using the channel's register.
418 ret = atc_calc_bytes_left(ret, ctrla);
420 /* single transfer */
421 ctrla = channel_readl(atchan, CTRLA);
422 ret = atc_calc_bytes_left(ret, ctrla);
429 * atc_chain_complete - finish work for one transaction chain
430 * @atchan: channel we work on
431 * @desc: descriptor at the head of the chain we want do complete
434 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
436 struct dma_async_tx_descriptor *txd = &desc->txd;
437 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
440 dev_vdbg(chan2dev(&atchan->chan_common),
441 "descriptor %u complete\n", txd->cookie);
443 spin_lock_irqsave(&atchan->lock, flags);
445 /* mark the descriptor as complete for non cyclic cases only */
446 if (!atc_chan_is_cyclic(atchan))
447 dma_cookie_complete(txd);
449 spin_unlock_irqrestore(&atchan->lock, flags);
451 dma_descriptor_unmap(txd);
452 /* for cyclic transfers,
453 * no need to replay callback function while stopping */
454 if (!atc_chan_is_cyclic(atchan))
455 dmaengine_desc_get_callback_invoke(txd, NULL);
457 dma_run_dependencies(txd);
459 spin_lock_irqsave(&atchan->lock, flags);
460 /* move children to free_list */
461 list_splice_init(&desc->tx_list, &atchan->free_list);
462 /* add myself to free_list */
463 list_add(&desc->desc_node, &atchan->free_list);
464 spin_unlock_irqrestore(&atchan->lock, flags);
466 /* If the transfer was a memset, free our temporary buffer */
467 if (desc->memset_buffer) {
468 dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
470 desc->memset_buffer = false;
475 * atc_advance_work - at the end of a transaction, move forward
476 * @atchan: channel where the transaction ended
478 static void atc_advance_work(struct at_dma_chan *atchan)
480 struct at_desc *desc;
483 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
485 spin_lock_irqsave(&atchan->lock, flags);
486 if (atc_chan_is_enabled(atchan) || list_empty(&atchan->active_list))
487 return spin_unlock_irqrestore(&atchan->lock, flags);
489 desc = atc_first_active(atchan);
490 /* Remove the transfer node from the active list. */
491 list_del_init(&desc->desc_node);
492 spin_unlock_irqrestore(&atchan->lock, flags);
493 atc_chain_complete(atchan, desc);
496 spin_lock_irqsave(&atchan->lock, flags);
497 if (!list_empty(&atchan->active_list)) {
498 desc = atc_first_queued(atchan);
499 list_move_tail(&desc->desc_node, &atchan->active_list);
500 atc_dostart(atchan, desc);
502 spin_unlock_irqrestore(&atchan->lock, flags);
507 * atc_handle_error - handle errors reported by DMA controller
508 * @atchan: channel where error occurs
510 static void atc_handle_error(struct at_dma_chan *atchan)
512 struct at_desc *bad_desc;
513 struct at_desc *desc;
514 struct at_desc *child;
517 spin_lock_irqsave(&atchan->lock, flags);
519 * The descriptor currently at the head of the active list is
520 * broked. Since we don't have any way to report errors, we'll
521 * just have to scream loudly and try to carry on.
523 bad_desc = atc_first_active(atchan);
524 list_del_init(&bad_desc->desc_node);
526 /* Try to restart the controller */
527 if (!list_empty(&atchan->active_list)) {
528 desc = atc_first_queued(atchan);
529 list_move_tail(&desc->desc_node, &atchan->active_list);
530 atc_dostart(atchan, desc);
534 * KERN_CRITICAL may seem harsh, but since this only happens
535 * when someone submits a bad physical address in a
536 * descriptor, we should consider ourselves lucky that the
537 * controller flagged an error instead of scribbling over
538 * random memory locations.
540 dev_crit(chan2dev(&atchan->chan_common),
541 "Bad descriptor submitted for DMA!\n");
542 dev_crit(chan2dev(&atchan->chan_common),
543 " cookie: %d\n", bad_desc->txd.cookie);
544 atc_dump_lli(atchan, &bad_desc->lli);
545 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
546 atc_dump_lli(atchan, &child->lli);
548 spin_unlock_irqrestore(&atchan->lock, flags);
550 /* Pretend the descriptor completed successfully */
551 atc_chain_complete(atchan, bad_desc);
555 * atc_handle_cyclic - at the end of a period, run callback function
556 * @atchan: channel used for cyclic operations
558 static void atc_handle_cyclic(struct at_dma_chan *atchan)
560 struct at_desc *first = atc_first_active(atchan);
561 struct dma_async_tx_descriptor *txd = &first->txd;
563 dev_vdbg(chan2dev(&atchan->chan_common),
564 "new cyclic period llp 0x%08x\n",
565 channel_readl(atchan, DSCR));
567 dmaengine_desc_get_callback_invoke(txd, NULL);
570 /*-- IRQ & Tasklet ---------------------------------------------------*/
572 static void atc_tasklet(struct tasklet_struct *t)
574 struct at_dma_chan *atchan = from_tasklet(atchan, t, tasklet);
576 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
577 return atc_handle_error(atchan);
579 if (atc_chan_is_cyclic(atchan))
580 return atc_handle_cyclic(atchan);
582 atc_advance_work(atchan);
585 static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
587 struct at_dma *atdma = (struct at_dma *)dev_id;
588 struct at_dma_chan *atchan;
590 u32 status, pending, imr;
594 imr = dma_readl(atdma, EBCIMR);
595 status = dma_readl(atdma, EBCISR);
596 pending = status & imr;
601 dev_vdbg(atdma->dma_common.dev,
602 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
603 status, imr, pending);
605 for (i = 0; i < atdma->dma_common.chancnt; i++) {
606 atchan = &atdma->chan[i];
607 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
608 if (pending & AT_DMA_ERR(i)) {
609 /* Disable channel on AHB error */
610 dma_writel(atdma, CHDR,
611 AT_DMA_RES(i) | atchan->mask);
612 /* Give information to tasklet */
613 set_bit(ATC_IS_ERROR, &atchan->status);
615 tasklet_schedule(&atchan->tasklet);
626 /*-- DMA Engine API --------------------------------------------------*/
629 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
630 * @tx: descriptor at the head of the transaction chain
632 * Queue chain if DMA engine is working already
634 * Cookie increment and adding to active_list or queue must be atomic
636 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
638 struct at_desc *desc = txd_to_at_desc(tx);
639 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
643 spin_lock_irqsave(&atchan->lock, flags);
644 cookie = dma_cookie_assign(tx);
646 list_add_tail(&desc->desc_node, &atchan->queue);
647 spin_unlock_irqrestore(&atchan->lock, flags);
649 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
655 * atc_prep_dma_interleaved - prepare memory to memory interleaved operation
656 * @chan: the channel to prepare operation on
657 * @xt: Interleaved transfer template
658 * @flags: tx descriptor status flags
660 static struct dma_async_tx_descriptor *
661 atc_prep_dma_interleaved(struct dma_chan *chan,
662 struct dma_interleaved_template *xt,
665 struct at_dma_chan *atchan = to_at_dma_chan(chan);
666 struct data_chunk *first;
667 struct at_desc *desc = NULL;
675 if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
680 dev_info(chan2dev(chan),
681 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
682 __func__, &xt->src_start, &xt->dst_start, xt->numf,
683 xt->frame_size, flags);
686 * The controller can only "skip" X bytes every Y bytes, so we
687 * need to make sure we are given a template that fit that
688 * description, ie a template with chunks that always have the
689 * same size, with the same ICGs.
691 for (i = 0; i < xt->frame_size; i++) {
692 struct data_chunk *chunk = xt->sgl + i;
694 if ((chunk->size != xt->sgl->size) ||
695 (dmaengine_get_dst_icg(xt, chunk) != dmaengine_get_dst_icg(xt, first)) ||
696 (dmaengine_get_src_icg(xt, chunk) != dmaengine_get_src_icg(xt, first))) {
697 dev_err(chan2dev(chan),
698 "%s: the controller can transfer only identical chunks\n",
706 dwidth = atc_get_xfer_width(xt->src_start,
709 xfer_count = len >> dwidth;
710 if (xfer_count > ATC_BTSIZE_MAX) {
711 dev_err(chan2dev(chan), "%s: buffer is too big\n", __func__);
715 ctrla = ATC_SRC_WIDTH(dwidth) |
716 ATC_DST_WIDTH(dwidth);
718 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
719 | ATC_SRC_ADDR_MODE_INCR
720 | ATC_DST_ADDR_MODE_INCR
725 /* create the transfer */
726 desc = atc_desc_get(atchan);
728 dev_err(chan2dev(chan),
729 "%s: couldn't allocate our descriptor\n", __func__);
733 desc->lli.saddr = xt->src_start;
734 desc->lli.daddr = xt->dst_start;
735 desc->lli.ctrla = ctrla | xfer_count;
736 desc->lli.ctrlb = ctrlb;
738 desc->boundary = first->size >> dwidth;
739 desc->dst_hole = (dmaengine_get_dst_icg(xt, first) >> dwidth) + 1;
740 desc->src_hole = (dmaengine_get_src_icg(xt, first) >> dwidth) + 1;
742 desc->txd.cookie = -EBUSY;
743 desc->total_len = desc->len = len;
745 /* set end-of-link to the last link descriptor of list*/
748 desc->txd.flags = flags; /* client is in control of this ack */
754 * atc_prep_dma_memcpy - prepare a memcpy operation
755 * @chan: the channel to prepare operation on
756 * @dest: operation virtual destination address
757 * @src: operation virtual source address
758 * @len: operation length
759 * @flags: tx descriptor status flags
761 static struct dma_async_tx_descriptor *
762 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
763 size_t len, unsigned long flags)
765 struct at_dma_chan *atchan = to_at_dma_chan(chan);
766 struct at_desc *desc = NULL;
767 struct at_desc *first = NULL;
768 struct at_desc *prev = NULL;
771 unsigned int src_width;
772 unsigned int dst_width;
776 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n",
777 &dest, &src, len, flags);
779 if (unlikely(!len)) {
780 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
784 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
785 | ATC_SRC_ADDR_MODE_INCR
786 | ATC_DST_ADDR_MODE_INCR
790 * We can be a lot more clever here, but this should take care
791 * of the most common optimization.
793 src_width = dst_width = atc_get_xfer_width(src, dest, len);
795 ctrla = ATC_SRC_WIDTH(src_width) |
796 ATC_DST_WIDTH(dst_width);
798 for (offset = 0; offset < len; offset += xfer_count << src_width) {
799 xfer_count = min_t(size_t, (len - offset) >> src_width,
802 desc = atc_desc_get(atchan);
806 desc->lli.saddr = src + offset;
807 desc->lli.daddr = dest + offset;
808 desc->lli.ctrla = ctrla | xfer_count;
809 desc->lli.ctrlb = ctrlb;
811 desc->txd.cookie = 0;
812 desc->len = xfer_count << src_width;
814 atc_desc_chain(&first, &prev, desc);
817 /* First descriptor of the chain embedds additional information */
818 first->txd.cookie = -EBUSY;
819 first->total_len = len;
821 /* set end-of-link to the last link descriptor of list*/
824 first->txd.flags = flags; /* client is in control of this ack */
829 atc_desc_put(atchan, first);
833 static struct at_desc *atc_create_memset_desc(struct dma_chan *chan,
838 struct at_dma_chan *atchan = to_at_dma_chan(chan);
839 struct at_desc *desc;
842 u32 ctrla = ATC_SRC_WIDTH(2) | ATC_DST_WIDTH(2);
843 u32 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN |
844 ATC_SRC_ADDR_MODE_FIXED |
845 ATC_DST_ADDR_MODE_INCR |
848 xfer_count = len >> 2;
849 if (xfer_count > ATC_BTSIZE_MAX) {
850 dev_err(chan2dev(chan), "%s: buffer is too big\n",
855 desc = atc_desc_get(atchan);
857 dev_err(chan2dev(chan), "%s: can't get a descriptor\n",
862 desc->lli.saddr = psrc;
863 desc->lli.daddr = pdst;
864 desc->lli.ctrla = ctrla | xfer_count;
865 desc->lli.ctrlb = ctrlb;
867 desc->txd.cookie = 0;
874 * atc_prep_dma_memset - prepare a memcpy operation
875 * @chan: the channel to prepare operation on
876 * @dest: operation virtual destination address
877 * @value: value to set memory buffer to
878 * @len: operation length
879 * @flags: tx descriptor status flags
881 static struct dma_async_tx_descriptor *
882 atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
883 size_t len, unsigned long flags)
885 struct at_dma *atdma = to_at_dma(chan->device);
886 struct at_desc *desc;
890 dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
891 &dest, value, len, flags);
893 if (unlikely(!len)) {
894 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
898 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
899 dev_dbg(chan2dev(chan), "%s: buffer is not aligned\n",
904 vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr);
906 dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
910 *(u32*)vaddr = value;
912 desc = atc_create_memset_desc(chan, paddr, dest, len);
914 dev_err(chan2dev(chan), "%s: couldn't get a descriptor\n",
916 goto err_free_buffer;
919 desc->memset_paddr = paddr;
920 desc->memset_vaddr = vaddr;
921 desc->memset_buffer = true;
923 desc->txd.cookie = -EBUSY;
924 desc->total_len = len;
926 /* set end-of-link on the descriptor */
929 desc->txd.flags = flags;
934 dma_pool_free(atdma->memset_pool, vaddr, paddr);
938 static struct dma_async_tx_descriptor *
939 atc_prep_dma_memset_sg(struct dma_chan *chan,
940 struct scatterlist *sgl,
941 unsigned int sg_len, int value,
944 struct at_dma_chan *atchan = to_at_dma_chan(chan);
945 struct at_dma *atdma = to_at_dma(chan->device);
946 struct at_desc *desc = NULL, *first = NULL, *prev = NULL;
947 struct scatterlist *sg;
950 size_t total_len = 0;
953 dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%zx f0x%lx\n", __func__,
954 value, sg_len, flags);
956 if (unlikely(!sgl || !sg_len)) {
957 dev_dbg(chan2dev(chan), "%s: scatterlist is empty!\n",
962 vaddr = dma_pool_alloc(atdma->memset_pool, GFP_NOWAIT, &paddr);
964 dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
968 *(u32*)vaddr = value;
970 for_each_sg(sgl, sg, sg_len, i) {
971 dma_addr_t dest = sg_dma_address(sg);
972 size_t len = sg_dma_len(sg);
974 dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n",
975 __func__, &dest, len);
977 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
978 dev_err(chan2dev(chan), "%s: buffer is not aligned\n",
983 desc = atc_create_memset_desc(chan, paddr, dest, len);
987 atc_desc_chain(&first, &prev, desc);
993 * Only set the buffer pointers on the last descriptor to
994 * avoid free'ing while we have our transfer still going
996 desc->memset_paddr = paddr;
997 desc->memset_vaddr = vaddr;
998 desc->memset_buffer = true;
1000 first->txd.cookie = -EBUSY;
1001 first->total_len = total_len;
1003 /* set end-of-link on the descriptor */
1006 first->txd.flags = flags;
1011 atc_desc_put(atchan, first);
1016 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1017 * @chan: DMA channel
1018 * @sgl: scatterlist to transfer to/from
1019 * @sg_len: number of entries in @scatterlist
1020 * @direction: DMA direction
1021 * @flags: tx descriptor status flags
1022 * @context: transaction context (ignored)
1024 static struct dma_async_tx_descriptor *
1025 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1026 unsigned int sg_len, enum dma_transfer_direction direction,
1027 unsigned long flags, void *context)
1029 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1030 struct at_dma_slave *atslave = chan->private;
1031 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1032 struct at_desc *first = NULL;
1033 struct at_desc *prev = NULL;
1037 unsigned int reg_width;
1038 unsigned int mem_width;
1040 struct scatterlist *sg;
1041 size_t total_len = 0;
1043 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
1045 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1048 if (unlikely(!atslave || !sg_len)) {
1049 dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
1053 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
1054 | ATC_DCSIZE(sconfig->dst_maxburst);
1057 switch (direction) {
1058 case DMA_MEM_TO_DEV:
1059 reg_width = convert_buswidth(sconfig->dst_addr_width);
1060 ctrla |= ATC_DST_WIDTH(reg_width);
1061 ctrlb |= ATC_DST_ADDR_MODE_FIXED
1062 | ATC_SRC_ADDR_MODE_INCR
1064 | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
1065 reg = sconfig->dst_addr;
1066 for_each_sg(sgl, sg, sg_len, i) {
1067 struct at_desc *desc;
1071 desc = atc_desc_get(atchan);
1075 mem = sg_dma_address(sg);
1076 len = sg_dma_len(sg);
1077 if (unlikely(!len)) {
1078 dev_dbg(chan2dev(chan),
1079 "prep_slave_sg: sg(%d) data length is zero\n", i);
1083 if (unlikely(mem & 3 || len & 3))
1086 desc->lli.saddr = mem;
1087 desc->lli.daddr = reg;
1088 desc->lli.ctrla = ctrla
1089 | ATC_SRC_WIDTH(mem_width)
1091 desc->lli.ctrlb = ctrlb;
1094 atc_desc_chain(&first, &prev, desc);
1098 case DMA_DEV_TO_MEM:
1099 reg_width = convert_buswidth(sconfig->src_addr_width);
1100 ctrla |= ATC_SRC_WIDTH(reg_width);
1101 ctrlb |= ATC_DST_ADDR_MODE_INCR
1102 | ATC_SRC_ADDR_MODE_FIXED
1104 | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
1106 reg = sconfig->src_addr;
1107 for_each_sg(sgl, sg, sg_len, i) {
1108 struct at_desc *desc;
1112 desc = atc_desc_get(atchan);
1116 mem = sg_dma_address(sg);
1117 len = sg_dma_len(sg);
1118 if (unlikely(!len)) {
1119 dev_dbg(chan2dev(chan),
1120 "prep_slave_sg: sg(%d) data length is zero\n", i);
1124 if (unlikely(mem & 3 || len & 3))
1127 desc->lli.saddr = reg;
1128 desc->lli.daddr = mem;
1129 desc->lli.ctrla = ctrla
1130 | ATC_DST_WIDTH(mem_width)
1132 desc->lli.ctrlb = ctrlb;
1135 atc_desc_chain(&first, &prev, desc);
1143 /* set end-of-link to the last link descriptor of list*/
1146 /* First descriptor of the chain embedds additional information */
1147 first->txd.cookie = -EBUSY;
1148 first->total_len = total_len;
1150 /* first link descriptor of list is responsible of flags */
1151 first->txd.flags = flags; /* client is in control of this ack */
1156 dev_err(chan2dev(chan), "not enough descriptors available\n");
1158 atc_desc_put(atchan, first);
1163 * atc_dma_cyclic_check_values
1164 * Check for too big/unaligned periods and unaligned DMA buffer
1167 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
1170 if (period_len > (ATC_BTSIZE_MAX << reg_width))
1172 if (unlikely(period_len & ((1 << reg_width) - 1)))
1174 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1184 * atc_dma_cyclic_fill_desc - Fill one period descriptor
1187 atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
1188 unsigned int period_index, dma_addr_t buf_addr,
1189 unsigned int reg_width, size_t period_len,
1190 enum dma_transfer_direction direction)
1192 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1193 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1196 /* prepare common CRTLA value */
1197 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
1198 | ATC_DCSIZE(sconfig->dst_maxburst)
1199 | ATC_DST_WIDTH(reg_width)
1200 | ATC_SRC_WIDTH(reg_width)
1201 | period_len >> reg_width;
1203 switch (direction) {
1204 case DMA_MEM_TO_DEV:
1205 desc->lli.saddr = buf_addr + (period_len * period_index);
1206 desc->lli.daddr = sconfig->dst_addr;
1207 desc->lli.ctrla = ctrla;
1208 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
1209 | ATC_SRC_ADDR_MODE_INCR
1211 | ATC_SIF(atchan->mem_if)
1212 | ATC_DIF(atchan->per_if);
1213 desc->len = period_len;
1216 case DMA_DEV_TO_MEM:
1217 desc->lli.saddr = sconfig->src_addr;
1218 desc->lli.daddr = buf_addr + (period_len * period_index);
1219 desc->lli.ctrla = ctrla;
1220 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
1221 | ATC_SRC_ADDR_MODE_FIXED
1223 | ATC_SIF(atchan->per_if)
1224 | ATC_DIF(atchan->mem_if);
1225 desc->len = period_len;
1236 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
1237 * @chan: the DMA channel to prepare
1238 * @buf_addr: physical DMA address where the buffer starts
1239 * @buf_len: total number of bytes for the entire buffer
1240 * @period_len: number of bytes for each period
1241 * @direction: transfer direction, to or from device
1242 * @flags: tx descriptor status flags
1244 static struct dma_async_tx_descriptor *
1245 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1246 size_t period_len, enum dma_transfer_direction direction,
1247 unsigned long flags)
1249 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1250 struct at_dma_slave *atslave = chan->private;
1251 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1252 struct at_desc *first = NULL;
1253 struct at_desc *prev = NULL;
1254 unsigned long was_cyclic;
1255 unsigned int reg_width;
1256 unsigned int periods = buf_len / period_len;
1259 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
1260 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1262 periods, buf_len, period_len);
1264 if (unlikely(!atslave || !buf_len || !period_len)) {
1265 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
1269 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
1271 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
1275 if (unlikely(!is_slave_direction(direction)))
1278 if (direction == DMA_MEM_TO_DEV)
1279 reg_width = convert_buswidth(sconfig->dst_addr_width);
1281 reg_width = convert_buswidth(sconfig->src_addr_width);
1283 /* Check for too big/unaligned periods and unaligned DMA buffer */
1284 if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
1287 /* build cyclic linked list */
1288 for (i = 0; i < periods; i++) {
1289 struct at_desc *desc;
1291 desc = atc_desc_get(atchan);
1295 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
1296 reg_width, period_len, direction))
1299 atc_desc_chain(&first, &prev, desc);
1302 /* lets make a cyclic list */
1303 prev->lli.dscr = first->txd.phys;
1305 /* First descriptor of the chain embedds additional information */
1306 first->txd.cookie = -EBUSY;
1307 first->total_len = buf_len;
1312 dev_err(chan2dev(chan), "not enough descriptors available\n");
1313 atc_desc_put(atchan, first);
1315 clear_bit(ATC_IS_CYCLIC, &atchan->status);
1319 static int atc_config(struct dma_chan *chan,
1320 struct dma_slave_config *sconfig)
1322 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1324 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1326 /* Check if it is chan is configured for slave transfers */
1330 memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
1332 convert_burst(&atchan->dma_sconfig.src_maxburst);
1333 convert_burst(&atchan->dma_sconfig.dst_maxburst);
1338 static int atc_pause(struct dma_chan *chan)
1340 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1341 struct at_dma *atdma = to_at_dma(chan->device);
1342 int chan_id = atchan->chan_common.chan_id;
1343 unsigned long flags;
1345 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1347 spin_lock_irqsave(&atchan->lock, flags);
1349 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
1350 set_bit(ATC_IS_PAUSED, &atchan->status);
1352 spin_unlock_irqrestore(&atchan->lock, flags);
1357 static int atc_resume(struct dma_chan *chan)
1359 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1360 struct at_dma *atdma = to_at_dma(chan->device);
1361 int chan_id = atchan->chan_common.chan_id;
1362 unsigned long flags;
1364 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1366 if (!atc_chan_is_paused(atchan))
1369 spin_lock_irqsave(&atchan->lock, flags);
1371 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
1372 clear_bit(ATC_IS_PAUSED, &atchan->status);
1374 spin_unlock_irqrestore(&atchan->lock, flags);
1379 static int atc_terminate_all(struct dma_chan *chan)
1381 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1382 struct at_dma *atdma = to_at_dma(chan->device);
1383 int chan_id = atchan->chan_common.chan_id;
1384 unsigned long flags;
1386 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1389 * This is only called when something went wrong elsewhere, so
1390 * we don't really care about the data. Just disable the
1391 * channel. We still have to poll the channel enable bit due
1392 * to AHB/HSB limitations.
1394 spin_lock_irqsave(&atchan->lock, flags);
1396 /* disabling channel: must also remove suspend state */
1397 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1399 /* confirm that this channel is disabled */
1400 while (dma_readl(atdma, CHSR) & atchan->mask)
1403 /* active_list entries will end up before queued entries */
1404 list_splice_tail_init(&atchan->queue, &atchan->free_list);
1405 list_splice_tail_init(&atchan->active_list, &atchan->free_list);
1407 clear_bit(ATC_IS_PAUSED, &atchan->status);
1408 /* if channel dedicated to cyclic operations, free it */
1409 clear_bit(ATC_IS_CYCLIC, &atchan->status);
1411 spin_unlock_irqrestore(&atchan->lock, flags);
1417 * atc_tx_status - poll for transaction completion
1418 * @chan: DMA channel
1419 * @cookie: transaction identifier to check status of
1420 * @txstate: if not %NULL updated with transaction state
1422 * If @txstate is passed in, upon return it reflect the driver
1423 * internal state and can be used with dma_async_is_complete() to check
1424 * the status of multiple cookies without re-checking hardware state.
1426 static enum dma_status
1427 atc_tx_status(struct dma_chan *chan,
1428 dma_cookie_t cookie,
1429 struct dma_tx_state *txstate)
1431 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1432 unsigned long flags;
1433 enum dma_status ret;
1436 ret = dma_cookie_status(chan, cookie, txstate);
1437 if (ret == DMA_COMPLETE)
1440 * There's no point calculating the residue if there's
1441 * no txstate to store the value.
1446 spin_lock_irqsave(&atchan->lock, flags);
1448 /* Get number of bytes left in the active transactions */
1449 bytes = atc_get_bytes_left(chan, cookie);
1451 spin_unlock_irqrestore(&atchan->lock, flags);
1453 if (unlikely(bytes < 0)) {
1454 dev_vdbg(chan2dev(chan), "get residual bytes error\n");
1457 dma_set_residue(txstate, bytes);
1460 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n",
1461 ret, cookie, bytes);
1467 * atc_issue_pending - takes the first transaction descriptor in the pending
1468 * queue and starts the transfer.
1469 * @chan: target DMA channel
1471 static void atc_issue_pending(struct dma_chan *chan)
1473 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1474 struct at_desc *desc;
1475 unsigned long flags;
1477 dev_vdbg(chan2dev(chan), "issue_pending\n");
1479 spin_lock_irqsave(&atchan->lock, flags);
1480 if (atc_chan_is_enabled(atchan) || list_empty(&atchan->queue))
1481 return spin_unlock_irqrestore(&atchan->lock, flags);
1483 desc = atc_first_queued(atchan);
1484 list_move_tail(&desc->desc_node, &atchan->active_list);
1485 atc_dostart(atchan, desc);
1486 spin_unlock_irqrestore(&atchan->lock, flags);
1490 * atc_alloc_chan_resources - allocate resources for DMA channel
1491 * @chan: allocate descriptor resources for this channel
1493 * return - the number of allocated descriptors
1495 static int atc_alloc_chan_resources(struct dma_chan *chan)
1497 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1498 struct at_dma *atdma = to_at_dma(chan->device);
1499 struct at_desc *desc;
1500 struct at_dma_slave *atslave;
1504 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1506 /* ASSERT: channel is idle */
1507 if (atc_chan_is_enabled(atchan)) {
1508 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1512 if (!list_empty(&atchan->free_list)) {
1513 dev_dbg(chan2dev(chan), "can't allocate channel resources (channel not freed from a previous use)\n");
1517 cfg = ATC_DEFAULT_CFG;
1519 atslave = chan->private;
1522 * We need controller-specific data to set up slave
1525 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1527 /* if cfg configuration specified take it instead of default */
1532 /* Allocate initial pool of descriptors */
1533 for (i = 0; i < init_nr_desc_per_channel; i++) {
1534 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1536 dev_err(atdma->dma_common.dev,
1537 "Only %d initial descriptors\n", i);
1540 list_add_tail(&desc->desc_node, &atchan->free_list);
1543 dma_cookie_init(chan);
1545 /* channel parameters */
1546 channel_writel(atchan, CFG, cfg);
1548 dev_dbg(chan2dev(chan),
1549 "alloc_chan_resources: allocated %d descriptors\n", i);
1555 * atc_free_chan_resources - free all channel resources
1556 * @chan: DMA channel
1558 static void atc_free_chan_resources(struct dma_chan *chan)
1560 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1561 struct at_dma *atdma = to_at_dma(chan->device);
1562 struct at_desc *desc, *_desc;
1565 /* ASSERT: channel is idle */
1566 BUG_ON(!list_empty(&atchan->active_list));
1567 BUG_ON(!list_empty(&atchan->queue));
1568 BUG_ON(atc_chan_is_enabled(atchan));
1570 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1571 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1572 list_del(&desc->desc_node);
1573 /* free link descriptor */
1574 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1576 list_splice_init(&atchan->free_list, &list);
1580 * Free atslave allocated in at_dma_xlate()
1582 kfree(chan->private);
1583 chan->private = NULL;
1585 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1589 static bool at_dma_filter(struct dma_chan *chan, void *slave)
1591 struct at_dma_slave *atslave = slave;
1593 if (atslave->dma_dev == chan->device->dev) {
1594 chan->private = atslave;
1601 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1602 struct of_dma *of_dma)
1604 struct dma_chan *chan;
1605 struct at_dma_chan *atchan;
1606 struct at_dma_slave *atslave;
1607 dma_cap_mask_t mask;
1608 unsigned int per_id;
1609 struct platform_device *dmac_pdev;
1611 if (dma_spec->args_count != 2)
1614 dmac_pdev = of_find_device_by_node(dma_spec->np);
1619 dma_cap_set(DMA_SLAVE, mask);
1621 atslave = kmalloc(sizeof(*atslave), GFP_KERNEL);
1623 put_device(&dmac_pdev->dev);
1627 atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
1629 * We can fill both SRC_PER and DST_PER, one of these fields will be
1630 * ignored depending on DMA transfer direction.
1632 per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK;
1633 atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id)
1634 | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id);
1636 * We have to translate the value we get from the device tree since
1637 * the half FIFO configuration value had to be 0 to keep backward
1640 switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) {
1641 case AT91_DMA_CFG_FIFOCFG_ALAP:
1642 atslave->cfg |= ATC_FIFOCFG_LARGESTBURST;
1644 case AT91_DMA_CFG_FIFOCFG_ASAP:
1645 atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE;
1647 case AT91_DMA_CFG_FIFOCFG_HALF:
1649 atslave->cfg |= ATC_FIFOCFG_HALFFIFO;
1651 atslave->dma_dev = &dmac_pdev->dev;
1653 chan = dma_request_channel(mask, at_dma_filter, atslave);
1655 put_device(&dmac_pdev->dev);
1660 atchan = to_at_dma_chan(chan);
1661 atchan->per_if = dma_spec->args[0] & 0xff;
1662 atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
1667 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1668 struct of_dma *of_dma)
1674 /*-- Module Management -----------------------------------------------*/
1676 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1677 static struct at_dma_platform_data at91sam9rl_config = {
1680 static struct at_dma_platform_data at91sam9g45_config = {
1684 #if defined(CONFIG_OF)
1685 static const struct of_device_id atmel_dma_dt_ids[] = {
1687 .compatible = "atmel,at91sam9rl-dma",
1688 .data = &at91sam9rl_config,
1690 .compatible = "atmel,at91sam9g45-dma",
1691 .data = &at91sam9g45_config,
1697 MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1700 static const struct platform_device_id atdma_devtypes[] = {
1702 .name = "at91sam9rl_dma",
1703 .driver_data = (unsigned long) &at91sam9rl_config,
1705 .name = "at91sam9g45_dma",
1706 .driver_data = (unsigned long) &at91sam9g45_config,
1712 static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
1713 struct platform_device *pdev)
1715 if (pdev->dev.of_node) {
1716 const struct of_device_id *match;
1717 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1722 return (struct at_dma_platform_data *)
1723 platform_get_device_id(pdev)->driver_data;
1727 * at_dma_off - disable DMA controller
1728 * @atdma: the Atmel HDAMC device
1730 static void at_dma_off(struct at_dma *atdma)
1732 dma_writel(atdma, EN, 0);
1734 /* disable all interrupts */
1735 dma_writel(atdma, EBCIDR, -1L);
1737 /* confirm that all channels are disabled */
1738 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1742 static int __init at_dma_probe(struct platform_device *pdev)
1744 struct resource *io;
1745 struct at_dma *atdma;
1750 const struct at_dma_platform_data *plat_dat;
1752 /* setup platform data for each SoC */
1753 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1754 dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
1755 dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1756 dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
1757 dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
1758 dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
1759 dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1761 /* get DMA parameters from controller type */
1762 plat_dat = at_dma_get_driver_data(pdev);
1766 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1770 irq = platform_get_irq(pdev, 0);
1774 size = sizeof(struct at_dma);
1775 size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
1776 atdma = kzalloc(size, GFP_KERNEL);
1780 /* discover transaction capabilities */
1781 atdma->dma_common.cap_mask = plat_dat->cap_mask;
1782 atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
1784 size = resource_size(io);
1785 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1790 atdma->regs = ioremap(io->start, size);
1796 atdma->clk = clk_get(&pdev->dev, "dma_clk");
1797 if (IS_ERR(atdma->clk)) {
1798 err = PTR_ERR(atdma->clk);
1801 err = clk_prepare_enable(atdma->clk);
1803 goto err_clk_prepare;
1805 /* force dma off, just in case */
1808 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
1812 platform_set_drvdata(pdev, atdma);
1814 /* create a pool of consistent memory blocks for hardware descriptors */
1815 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
1816 &pdev->dev, sizeof(struct at_desc),
1817 4 /* word alignment */, 0);
1818 if (!atdma->dma_desc_pool) {
1819 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
1821 goto err_desc_pool_create;
1824 /* create a pool of consistent memory blocks for memset blocks */
1825 atdma->memset_pool = dma_pool_create("at_hdmac_memset_pool",
1826 &pdev->dev, sizeof(int), 4, 0);
1827 if (!atdma->memset_pool) {
1828 dev_err(&pdev->dev, "No memory for memset dma pool\n");
1830 goto err_memset_pool_create;
1833 /* clear any pending interrupt */
1834 while (dma_readl(atdma, EBCISR))
1837 /* initialize channels related values */
1838 INIT_LIST_HEAD(&atdma->dma_common.channels);
1839 for (i = 0; i < plat_dat->nr_channels; i++) {
1840 struct at_dma_chan *atchan = &atdma->chan[i];
1842 atchan->mem_if = AT_DMA_MEM_IF;
1843 atchan->per_if = AT_DMA_PER_IF;
1844 atchan->chan_common.device = &atdma->dma_common;
1845 dma_cookie_init(&atchan->chan_common);
1846 list_add_tail(&atchan->chan_common.device_node,
1847 &atdma->dma_common.channels);
1849 atchan->ch_regs = atdma->regs + ch_regs(i);
1850 spin_lock_init(&atchan->lock);
1851 atchan->mask = 1 << i;
1853 INIT_LIST_HEAD(&atchan->active_list);
1854 INIT_LIST_HEAD(&atchan->queue);
1855 INIT_LIST_HEAD(&atchan->free_list);
1857 tasklet_setup(&atchan->tasklet, atc_tasklet);
1858 atc_enable_chan_irq(atdma, i);
1861 /* set base routines */
1862 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
1863 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
1864 atdma->dma_common.device_tx_status = atc_tx_status;
1865 atdma->dma_common.device_issue_pending = atc_issue_pending;
1866 atdma->dma_common.dev = &pdev->dev;
1868 /* set prep routines based on capability */
1869 if (dma_has_cap(DMA_INTERLEAVE, atdma->dma_common.cap_mask))
1870 atdma->dma_common.device_prep_interleaved_dma = atc_prep_dma_interleaved;
1872 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
1873 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
1875 if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) {
1876 atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset;
1877 atdma->dma_common.device_prep_dma_memset_sg = atc_prep_dma_memset_sg;
1878 atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES;
1881 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
1882 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
1883 /* controller can do slave DMA: can trigger cyclic transfers */
1884 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
1885 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
1886 atdma->dma_common.device_config = atc_config;
1887 atdma->dma_common.device_pause = atc_pause;
1888 atdma->dma_common.device_resume = atc_resume;
1889 atdma->dma_common.device_terminate_all = atc_terminate_all;
1890 atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS;
1891 atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS;
1892 atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1893 atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1896 dma_writel(atdma, EN, AT_DMA_ENABLE);
1898 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s), %d channels\n",
1899 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
1900 dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
1901 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
1902 plat_dat->nr_channels);
1904 err = dma_async_device_register(&atdma->dma_common);
1906 dev_err(&pdev->dev, "Unable to register: %d.\n", err);
1907 goto err_dma_async_device_register;
1911 * Do not return an error if the dmac node is not present in order to
1912 * not break the existing way of requesting channel with
1913 * dma_request_channel().
1915 if (pdev->dev.of_node) {
1916 err = of_dma_controller_register(pdev->dev.of_node,
1917 at_dma_xlate, atdma);
1919 dev_err(&pdev->dev, "could not register of_dma_controller\n");
1920 goto err_of_dma_controller_register;
1926 err_of_dma_controller_register:
1927 dma_async_device_unregister(&atdma->dma_common);
1928 err_dma_async_device_register:
1929 dma_pool_destroy(atdma->memset_pool);
1930 err_memset_pool_create:
1931 dma_pool_destroy(atdma->dma_desc_pool);
1932 err_desc_pool_create:
1933 free_irq(platform_get_irq(pdev, 0), atdma);
1935 clk_disable_unprepare(atdma->clk);
1937 clk_put(atdma->clk);
1939 iounmap(atdma->regs);
1942 release_mem_region(io->start, size);
1948 static int at_dma_remove(struct platform_device *pdev)
1950 struct at_dma *atdma = platform_get_drvdata(pdev);
1951 struct dma_chan *chan, *_chan;
1952 struct resource *io;
1955 if (pdev->dev.of_node)
1956 of_dma_controller_free(pdev->dev.of_node);
1957 dma_async_device_unregister(&atdma->dma_common);
1959 dma_pool_destroy(atdma->memset_pool);
1960 dma_pool_destroy(atdma->dma_desc_pool);
1961 free_irq(platform_get_irq(pdev, 0), atdma);
1963 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
1965 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1967 /* Disable interrupts */
1968 atc_disable_chan_irq(atdma, chan->chan_id);
1970 tasklet_kill(&atchan->tasklet);
1971 list_del(&chan->device_node);
1974 clk_disable_unprepare(atdma->clk);
1975 clk_put(atdma->clk);
1977 iounmap(atdma->regs);
1980 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1981 release_mem_region(io->start, resource_size(io));
1988 static void at_dma_shutdown(struct platform_device *pdev)
1990 struct at_dma *atdma = platform_get_drvdata(pdev);
1992 at_dma_off(platform_get_drvdata(pdev));
1993 clk_disable_unprepare(atdma->clk);
1996 static int at_dma_prepare(struct device *dev)
1998 struct at_dma *atdma = dev_get_drvdata(dev);
1999 struct dma_chan *chan, *_chan;
2001 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2003 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2004 /* wait for transaction completion (except in cyclic case) */
2005 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
2011 static void atc_suspend_cyclic(struct at_dma_chan *atchan)
2013 struct dma_chan *chan = &atchan->chan_common;
2015 /* Channel should be paused by user
2016 * do it anyway even if it is not done already */
2017 if (!atc_chan_is_paused(atchan)) {
2018 dev_warn(chan2dev(chan),
2019 "cyclic channel not paused, should be done by channel user\n");
2023 /* now preserve additional data for cyclic operations */
2024 /* next descriptor address in the cyclic list */
2025 atchan->save_dscr = channel_readl(atchan, DSCR);
2027 vdbg_dump_regs(atchan);
2030 static int at_dma_suspend_noirq(struct device *dev)
2032 struct at_dma *atdma = dev_get_drvdata(dev);
2033 struct dma_chan *chan, *_chan;
2036 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2038 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2040 if (atc_chan_is_cyclic(atchan))
2041 atc_suspend_cyclic(atchan);
2042 atchan->save_cfg = channel_readl(atchan, CFG);
2044 atdma->save_imr = dma_readl(atdma, EBCIMR);
2046 /* disable DMA controller */
2048 clk_disable_unprepare(atdma->clk);
2052 static void atc_resume_cyclic(struct at_dma_chan *atchan)
2054 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
2056 /* restore channel status for cyclic descriptors list:
2057 * next descriptor in the cyclic list at the time of suspend */
2058 channel_writel(atchan, SADDR, 0);
2059 channel_writel(atchan, DADDR, 0);
2060 channel_writel(atchan, CTRLA, 0);
2061 channel_writel(atchan, CTRLB, 0);
2062 channel_writel(atchan, DSCR, atchan->save_dscr);
2063 dma_writel(atdma, CHER, atchan->mask);
2065 /* channel pause status should be removed by channel user
2066 * We cannot take the initiative to do it here */
2068 vdbg_dump_regs(atchan);
2071 static int at_dma_resume_noirq(struct device *dev)
2073 struct at_dma *atdma = dev_get_drvdata(dev);
2074 struct dma_chan *chan, *_chan;
2076 /* bring back DMA controller */
2077 clk_prepare_enable(atdma->clk);
2078 dma_writel(atdma, EN, AT_DMA_ENABLE);
2080 /* clear any pending interrupt */
2081 while (dma_readl(atdma, EBCISR))
2084 /* restore saved data */
2085 dma_writel(atdma, EBCIER, atdma->save_imr);
2086 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2088 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2090 channel_writel(atchan, CFG, atchan->save_cfg);
2091 if (atc_chan_is_cyclic(atchan))
2092 atc_resume_cyclic(atchan);
2097 static const struct dev_pm_ops at_dma_dev_pm_ops = {
2098 .prepare = at_dma_prepare,
2099 .suspend_noirq = at_dma_suspend_noirq,
2100 .resume_noirq = at_dma_resume_noirq,
2103 static struct platform_driver at_dma_driver = {
2104 .remove = at_dma_remove,
2105 .shutdown = at_dma_shutdown,
2106 .id_table = atdma_devtypes,
2109 .pm = &at_dma_dev_pm_ops,
2110 .of_match_table = of_match_ptr(atmel_dma_dt_ids),
2114 static int __init at_dma_init(void)
2116 return platform_driver_probe(&at_dma_driver, at_dma_probe);
2118 subsys_initcall(at_dma_init);
2120 static void __exit at_dma_exit(void)
2122 platform_driver_unregister(&at_dma_driver);
2124 module_exit(at_dma_exit);
2126 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
2127 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
2128 MODULE_LICENSE("GPL");
2129 MODULE_ALIAS("platform:at_hdmac");