2 * Driver for the Atmel AHB DMA Controller (aka HDMA or DMAC on AT91 systems)
4 * Copyright (C) 2008 Atmel Corporation
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
12 * This supports the Atmel AHB DMA Controller found in several Atmel SoCs.
13 * The only Atmel DMA Controller that is not covered by this driver is the one
14 * found on AT91SAM9263.
17 #include <dt-bindings/dma/at91.h>
18 #include <linux/clk.h>
19 #include <linux/dmaengine.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmapool.h>
22 #include <linux/interrupt.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
27 #include <linux/of_device.h>
28 #include <linux/of_dma.h>
30 #include "at_hdmac_regs.h"
31 #include "dmaengine.h"
37 * at_hdmac : Name of the ATmel AHB DMA Controller
38 * at_dma_ / atdma : ATmel DMA controller entity related
39 * atc_ / atchan : ATmel DMA Channel entity related
42 #define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
43 #define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
44 |ATC_DIF(AT_DMA_MEM_IF))
45 #define ATC_DMA_BUSWIDTHS\
46 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
47 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
48 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
49 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
51 #define ATC_MAX_DSCR_TRIALS 10
54 * Initial number of descriptors to allocate for each channel. This could
55 * be increased during dma usage.
57 static unsigned int init_nr_desc_per_channel = 64;
58 module_param(init_nr_desc_per_channel, uint, 0644);
59 MODULE_PARM_DESC(init_nr_desc_per_channel,
60 "initial descriptors per channel (default: 64)");
64 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx);
65 static void atc_issue_pending(struct dma_chan *chan);
68 /*----------------------------------------------------------------------*/
70 static inline unsigned int atc_get_xfer_width(dma_addr_t src, dma_addr_t dst,
75 if (!((src | dst | len) & 3))
77 else if (!((src | dst | len) & 1))
85 static struct at_desc *atc_first_active(struct at_dma_chan *atchan)
87 return list_first_entry(&atchan->active_list,
88 struct at_desc, desc_node);
91 static struct at_desc *atc_first_queued(struct at_dma_chan *atchan)
93 return list_first_entry(&atchan->queue,
94 struct at_desc, desc_node);
98 * atc_alloc_descriptor - allocate and return an initialized descriptor
99 * @chan: the channel to allocate descriptors for
100 * @gfp_flags: GFP allocation flags
102 * Note: The ack-bit is positioned in the descriptor flag at creation time
103 * to make initial allocation more convenient. This bit will be cleared
104 * and control will be given to client at usage time (during
105 * preparation functions).
107 static struct at_desc *atc_alloc_descriptor(struct dma_chan *chan,
110 struct at_desc *desc = NULL;
111 struct at_dma *atdma = to_at_dma(chan->device);
114 desc = dma_pool_alloc(atdma->dma_desc_pool, gfp_flags, &phys);
116 memset(desc, 0, sizeof(struct at_desc));
117 INIT_LIST_HEAD(&desc->tx_list);
118 dma_async_tx_descriptor_init(&desc->txd, chan);
119 /* txd.flags will be overwritten in prep functions */
120 desc->txd.flags = DMA_CTRL_ACK;
121 desc->txd.tx_submit = atc_tx_submit;
122 desc->txd.phys = phys;
129 * atc_desc_get - get an unused descriptor from free_list
130 * @atchan: channel we want a new descriptor for
132 static struct at_desc *atc_desc_get(struct at_dma_chan *atchan)
134 struct at_desc *desc, *_desc;
135 struct at_desc *ret = NULL;
140 spin_lock_irqsave(&atchan->lock, flags);
141 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
143 if (async_tx_test_ack(&desc->txd)) {
144 list_del(&desc->desc_node);
148 dev_dbg(chan2dev(&atchan->chan_common),
149 "desc %p not ACKed\n", desc);
151 spin_unlock_irqrestore(&atchan->lock, flags);
152 dev_vdbg(chan2dev(&atchan->chan_common),
153 "scanned %u descriptors on freelist\n", i);
155 /* no more descriptor available in initial pool: create one more */
157 ret = atc_alloc_descriptor(&atchan->chan_common, GFP_ATOMIC);
159 spin_lock_irqsave(&atchan->lock, flags);
160 atchan->descs_allocated++;
161 spin_unlock_irqrestore(&atchan->lock, flags);
163 dev_err(chan2dev(&atchan->chan_common),
164 "not enough descriptors available\n");
172 * atc_desc_put - move a descriptor, including any children, to the free list
173 * @atchan: channel we work on
174 * @desc: descriptor, at the head of a chain, to move to free list
176 static void atc_desc_put(struct at_dma_chan *atchan, struct at_desc *desc)
179 struct at_desc *child;
182 spin_lock_irqsave(&atchan->lock, flags);
183 list_for_each_entry(child, &desc->tx_list, desc_node)
184 dev_vdbg(chan2dev(&atchan->chan_common),
185 "moving child desc %p to freelist\n",
187 list_splice_init(&desc->tx_list, &atchan->free_list);
188 dev_vdbg(chan2dev(&atchan->chan_common),
189 "moving desc %p to freelist\n", desc);
190 list_add(&desc->desc_node, &atchan->free_list);
191 spin_unlock_irqrestore(&atchan->lock, flags);
196 * atc_desc_chain - build chain adding a descriptor
197 * @first: address of first descriptor of the chain
198 * @prev: address of previous descriptor of the chain
199 * @desc: descriptor to queue
201 * Called from prep_* functions
203 static void atc_desc_chain(struct at_desc **first, struct at_desc **prev,
204 struct at_desc *desc)
209 /* inform the HW lli about chaining */
210 (*prev)->lli.dscr = desc->txd.phys;
211 /* insert the link descriptor to the LD ring */
212 list_add_tail(&desc->desc_node,
219 * atc_dostart - starts the DMA engine for real
220 * @atchan: the channel we want to start
221 * @first: first descriptor in the list we want to begin with
223 * Called with atchan->lock held and bh disabled
225 static void atc_dostart(struct at_dma_chan *atchan, struct at_desc *first)
227 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
229 /* ASSERT: channel is idle */
230 if (atc_chan_is_enabled(atchan)) {
231 dev_err(chan2dev(&atchan->chan_common),
232 "BUG: Attempted to start non-idle channel\n");
233 dev_err(chan2dev(&atchan->chan_common),
234 " channel: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
235 channel_readl(atchan, SADDR),
236 channel_readl(atchan, DADDR),
237 channel_readl(atchan, CTRLA),
238 channel_readl(atchan, CTRLB),
239 channel_readl(atchan, DSCR));
241 /* The tasklet will hopefully advance the queue... */
245 vdbg_dump_regs(atchan);
247 channel_writel(atchan, SADDR, 0);
248 channel_writel(atchan, DADDR, 0);
249 channel_writel(atchan, CTRLA, 0);
250 channel_writel(atchan, CTRLB, 0);
251 channel_writel(atchan, DSCR, first->txd.phys);
252 channel_writel(atchan, SPIP, ATC_SPIP_HOLE(first->src_hole) |
253 ATC_SPIP_BOUNDARY(first->boundary));
254 channel_writel(atchan, DPIP, ATC_DPIP_HOLE(first->dst_hole) |
255 ATC_DPIP_BOUNDARY(first->boundary));
256 dma_writel(atdma, CHER, atchan->mask);
258 vdbg_dump_regs(atchan);
262 * atc_get_desc_by_cookie - get the descriptor of a cookie
263 * @atchan: the DMA channel
264 * @cookie: the cookie to get the descriptor for
266 static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
269 struct at_desc *desc, *_desc;
271 list_for_each_entry_safe(desc, _desc, &atchan->queue, desc_node) {
272 if (desc->txd.cookie == cookie)
276 list_for_each_entry_safe(desc, _desc, &atchan->active_list, desc_node) {
277 if (desc->txd.cookie == cookie)
285 * atc_calc_bytes_left - calculates the number of bytes left according to the
286 * value read from CTRLA.
288 * @current_len: the number of bytes left before reading CTRLA
289 * @ctrla: the value of CTRLA
291 static inline int atc_calc_bytes_left(int current_len, u32 ctrla)
293 u32 btsize = (ctrla & ATC_BTSIZE_MAX);
294 u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla);
297 * According to the datasheet, when reading the Control A Register
298 * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the
299 * number of transfers completed on the Source Interface.
300 * So btsize is always a number of source width transfers.
302 return current_len - (btsize << src_width);
306 * atc_get_bytes_left - get the number of bytes residue for a cookie
308 * @cookie: transaction identifier to check status of
310 static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
312 struct at_dma_chan *atchan = to_at_dma_chan(chan);
313 struct at_desc *desc_first = atc_first_active(atchan);
314 struct at_desc *desc;
316 u32 ctrla, dscr, trials;
319 * If the cookie doesn't match to the currently running transfer then
320 * we can return the total length of the associated DMA transfer,
321 * because it is still queued.
323 desc = atc_get_desc_by_cookie(atchan, cookie);
326 else if (desc != desc_first)
327 return desc->total_len;
329 /* cookie matches to the currently running transfer */
330 ret = desc_first->total_len;
332 if (desc_first->lli.dscr) {
333 /* hardware linked list transfer */
336 * Calculate the residue by removing the length of the child
337 * descriptors already transferred from the total length.
338 * To get the current child descriptor we can use the value of
339 * the channel's DSCR register and compare it against the value
340 * of the hardware linked list structure of each child
343 * The CTRLA register provides us with the amount of data
344 * already read from the source for the current child
345 * descriptor. So we can compute a more accurate residue by also
346 * removing the number of bytes corresponding to this amount of
349 * However, the DSCR and CTRLA registers cannot be read both
350 * atomically. Hence a race condition may occur: the first read
351 * register may refer to one child descriptor whereas the second
352 * read may refer to a later child descriptor in the list
353 * because of the DMA transfer progression inbetween the two
356 * One solution could have been to pause the DMA transfer, read
357 * the DSCR and CTRLA then resume the DMA transfer. Nonetheless,
358 * this approach presents some drawbacks:
359 * - If the DMA transfer is paused, RX overruns or TX underruns
360 * are more likey to occur depending on the system latency.
361 * Taking the USART driver as an example, it uses a cyclic DMA
362 * transfer to read data from the Receive Holding Register
363 * (RHR) to avoid RX overruns since the RHR is not protected
364 * by any FIFO on most Atmel SoCs. So pausing the DMA transfer
365 * to compute the residue would break the USART driver design.
366 * - The atc_pause() function masks interrupts but we'd rather
367 * avoid to do so for system latency purpose.
369 * Then we'd rather use another solution: the DSCR is read a
370 * first time, the CTRLA is read in turn, next the DSCR is read
371 * a second time. If the two consecutive read values of the DSCR
372 * are the same then we assume both refers to the very same
373 * child descriptor as well as the CTRLA value read inbetween
374 * does. For cyclic tranfers, the assumption is that a full loop
376 * If the two DSCR values are different, we read again the CTRLA
377 * then the DSCR till two consecutive read values from DSCR are
378 * equal or till the maxium trials is reach.
379 * This algorithm is very unlikely not to find a stable value for
383 dscr = channel_readl(atchan, DSCR);
384 rmb(); /* ensure DSCR is read before CTRLA */
385 ctrla = channel_readl(atchan, CTRLA);
386 for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
389 rmb(); /* ensure DSCR is read after CTRLA */
390 new_dscr = channel_readl(atchan, DSCR);
393 * If the DSCR register value has not changed inside the
394 * DMA controller since the previous read, we assume
395 * that both the dscr and ctrla values refers to the
396 * very same descriptor.
398 if (likely(new_dscr == dscr))
402 * DSCR has changed inside the DMA controller, so the
403 * previouly read value of CTRLA may refer to an already
404 * processed descriptor hence could be outdated.
405 * We need to update ctrla to match the current
409 rmb(); /* ensure DSCR is read before CTRLA */
410 ctrla = channel_readl(atchan, CTRLA);
412 if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
415 /* for the first descriptor we can be more accurate */
416 if (desc_first->lli.dscr == dscr)
417 return atc_calc_bytes_left(ret, ctrla);
419 ret -= desc_first->len;
420 list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
421 if (desc->lli.dscr == dscr)
428 * For the current descriptor in the chain we can calculate
429 * the remaining bytes using the channel's register.
431 ret = atc_calc_bytes_left(ret, ctrla);
433 /* single transfer */
434 ctrla = channel_readl(atchan, CTRLA);
435 ret = atc_calc_bytes_left(ret, ctrla);
442 * atc_chain_complete - finish work for one transaction chain
443 * @atchan: channel we work on
444 * @desc: descriptor at the head of the chain we want do complete
446 * Called with atchan->lock held and bh disabled */
448 atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
450 struct dma_async_tx_descriptor *txd = &desc->txd;
451 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
453 dev_vdbg(chan2dev(&atchan->chan_common),
454 "descriptor %u complete\n", txd->cookie);
456 /* mark the descriptor as complete for non cyclic cases only */
457 if (!atc_chan_is_cyclic(atchan))
458 dma_cookie_complete(txd);
460 /* If the transfer was a memset, free our temporary buffer */
461 if (desc->memset_buffer) {
462 dma_pool_free(atdma->memset_pool, desc->memset_vaddr,
464 desc->memset_buffer = false;
467 /* move children to free_list */
468 list_splice_init(&desc->tx_list, &atchan->free_list);
469 /* move myself to free_list */
470 list_move(&desc->desc_node, &atchan->free_list);
472 dma_descriptor_unmap(txd);
473 /* for cyclic transfers,
474 * no need to replay callback function while stopping */
475 if (!atc_chan_is_cyclic(atchan)) {
477 * The API requires that no submissions are done from a
478 * callback, so we don't need to drop the lock here
480 dmaengine_desc_get_callback_invoke(txd, NULL);
483 dma_run_dependencies(txd);
487 * atc_complete_all - finish work for all transactions
488 * @atchan: channel to complete transactions for
490 * Eventually submit queued descriptors if any
492 * Assume channel is idle while calling this function
493 * Called with atchan->lock held and bh disabled
495 static void atc_complete_all(struct at_dma_chan *atchan)
497 struct at_desc *desc, *_desc;
500 dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
503 * Submit queued descriptors ASAP, i.e. before we go through
504 * the completed ones.
506 if (!list_empty(&atchan->queue))
507 atc_dostart(atchan, atc_first_queued(atchan));
508 /* empty active_list now it is completed */
509 list_splice_init(&atchan->active_list, &list);
510 /* empty queue list by moving descriptors (if any) to active_list */
511 list_splice_init(&atchan->queue, &atchan->active_list);
513 list_for_each_entry_safe(desc, _desc, &list, desc_node)
514 atc_chain_complete(atchan, desc);
518 * atc_advance_work - at the end of a transaction, move forward
519 * @atchan: channel where the transaction ended
521 * Called with atchan->lock held and bh disabled
523 static void atc_advance_work(struct at_dma_chan *atchan)
525 dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
527 if (atc_chan_is_enabled(atchan))
530 if (list_empty(&atchan->active_list) ||
531 list_is_singular(&atchan->active_list)) {
532 atc_complete_all(atchan);
534 atc_chain_complete(atchan, atc_first_active(atchan));
536 atc_dostart(atchan, atc_first_active(atchan));
542 * atc_handle_error - handle errors reported by DMA controller
543 * @atchan: channel where error occurs
545 * Called with atchan->lock held and bh disabled
547 static void atc_handle_error(struct at_dma_chan *atchan)
549 struct at_desc *bad_desc;
550 struct at_desc *child;
553 * The descriptor currently at the head of the active list is
554 * broked. Since we don't have any way to report errors, we'll
555 * just have to scream loudly and try to carry on.
557 bad_desc = atc_first_active(atchan);
558 list_del_init(&bad_desc->desc_node);
560 /* As we are stopped, take advantage to push queued descriptors
562 list_splice_init(&atchan->queue, atchan->active_list.prev);
564 /* Try to restart the controller */
565 if (!list_empty(&atchan->active_list))
566 atc_dostart(atchan, atc_first_active(atchan));
569 * KERN_CRITICAL may seem harsh, but since this only happens
570 * when someone submits a bad physical address in a
571 * descriptor, we should consider ourselves lucky that the
572 * controller flagged an error instead of scribbling over
573 * random memory locations.
575 dev_crit(chan2dev(&atchan->chan_common),
576 "Bad descriptor submitted for DMA!\n");
577 dev_crit(chan2dev(&atchan->chan_common),
578 " cookie: %d\n", bad_desc->txd.cookie);
579 atc_dump_lli(atchan, &bad_desc->lli);
580 list_for_each_entry(child, &bad_desc->tx_list, desc_node)
581 atc_dump_lli(atchan, &child->lli);
583 /* Pretend the descriptor completed successfully */
584 atc_chain_complete(atchan, bad_desc);
588 * atc_handle_cyclic - at the end of a period, run callback function
589 * @atchan: channel used for cyclic operations
591 * Called with atchan->lock held and bh disabled
593 static void atc_handle_cyclic(struct at_dma_chan *atchan)
595 struct at_desc *first = atc_first_active(atchan);
596 struct dma_async_tx_descriptor *txd = &first->txd;
598 dev_vdbg(chan2dev(&atchan->chan_common),
599 "new cyclic period llp 0x%08x\n",
600 channel_readl(atchan, DSCR));
602 dmaengine_desc_get_callback_invoke(txd, NULL);
605 /*-- IRQ & Tasklet ---------------------------------------------------*/
607 static void atc_tasklet(unsigned long data)
609 struct at_dma_chan *atchan = (struct at_dma_chan *)data;
612 spin_lock_irqsave(&atchan->lock, flags);
613 if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
614 atc_handle_error(atchan);
615 else if (atc_chan_is_cyclic(atchan))
616 atc_handle_cyclic(atchan);
618 atc_advance_work(atchan);
620 spin_unlock_irqrestore(&atchan->lock, flags);
623 static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
625 struct at_dma *atdma = (struct at_dma *)dev_id;
626 struct at_dma_chan *atchan;
628 u32 status, pending, imr;
632 imr = dma_readl(atdma, EBCIMR);
633 status = dma_readl(atdma, EBCISR);
634 pending = status & imr;
639 dev_vdbg(atdma->dma_common.dev,
640 "interrupt: status = 0x%08x, 0x%08x, 0x%08x\n",
641 status, imr, pending);
643 for (i = 0; i < atdma->dma_common.chancnt; i++) {
644 atchan = &atdma->chan[i];
645 if (pending & (AT_DMA_BTC(i) | AT_DMA_ERR(i))) {
646 if (pending & AT_DMA_ERR(i)) {
647 /* Disable channel on AHB error */
648 dma_writel(atdma, CHDR,
649 AT_DMA_RES(i) | atchan->mask);
650 /* Give information to tasklet */
651 set_bit(ATC_IS_ERROR, &atchan->status);
653 tasklet_schedule(&atchan->tasklet);
664 /*-- DMA Engine API --------------------------------------------------*/
667 * atc_tx_submit - set the prepared descriptor(s) to be executed by the engine
668 * @desc: descriptor at the head of the transaction chain
670 * Queue chain if DMA engine is working already
672 * Cookie increment and adding to active_list or queue must be atomic
674 static dma_cookie_t atc_tx_submit(struct dma_async_tx_descriptor *tx)
676 struct at_desc *desc = txd_to_at_desc(tx);
677 struct at_dma_chan *atchan = to_at_dma_chan(tx->chan);
681 spin_lock_irqsave(&atchan->lock, flags);
682 cookie = dma_cookie_assign(tx);
684 if (list_empty(&atchan->active_list)) {
685 dev_vdbg(chan2dev(tx->chan), "tx_submit: started %u\n",
687 atc_dostart(atchan, desc);
688 list_add_tail(&desc->desc_node, &atchan->active_list);
690 dev_vdbg(chan2dev(tx->chan), "tx_submit: queued %u\n",
692 list_add_tail(&desc->desc_node, &atchan->queue);
695 spin_unlock_irqrestore(&atchan->lock, flags);
701 * atc_prep_dma_interleaved - prepare memory to memory interleaved operation
702 * @chan: the channel to prepare operation on
703 * @xt: Interleaved transfer template
704 * @flags: tx descriptor status flags
706 static struct dma_async_tx_descriptor *
707 atc_prep_dma_interleaved(struct dma_chan *chan,
708 struct dma_interleaved_template *xt,
711 struct at_dma_chan *atchan = to_at_dma_chan(chan);
712 struct data_chunk *first;
713 struct at_desc *desc = NULL;
721 if (unlikely(!xt || xt->numf != 1 || !xt->frame_size))
726 dev_info(chan2dev(chan),
727 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
728 __func__, &xt->src_start, &xt->dst_start, xt->numf,
729 xt->frame_size, flags);
732 * The controller can only "skip" X bytes every Y bytes, so we
733 * need to make sure we are given a template that fit that
734 * description, ie a template with chunks that always have the
735 * same size, with the same ICGs.
737 for (i = 0; i < xt->frame_size; i++) {
738 struct data_chunk *chunk = xt->sgl + i;
740 if ((chunk->size != xt->sgl->size) ||
741 (dmaengine_get_dst_icg(xt, chunk) != dmaengine_get_dst_icg(xt, first)) ||
742 (dmaengine_get_src_icg(xt, chunk) != dmaengine_get_src_icg(xt, first))) {
743 dev_err(chan2dev(chan),
744 "%s: the controller can transfer only identical chunks\n",
752 dwidth = atc_get_xfer_width(xt->src_start,
755 xfer_count = len >> dwidth;
756 if (xfer_count > ATC_BTSIZE_MAX) {
757 dev_err(chan2dev(chan), "%s: buffer is too big\n", __func__);
761 ctrla = ATC_SRC_WIDTH(dwidth) |
762 ATC_DST_WIDTH(dwidth);
764 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
765 | ATC_SRC_ADDR_MODE_INCR
766 | ATC_DST_ADDR_MODE_INCR
771 /* create the transfer */
772 desc = atc_desc_get(atchan);
774 dev_err(chan2dev(chan),
775 "%s: couldn't allocate our descriptor\n", __func__);
779 desc->lli.saddr = xt->src_start;
780 desc->lli.daddr = xt->dst_start;
781 desc->lli.ctrla = ctrla | xfer_count;
782 desc->lli.ctrlb = ctrlb;
784 desc->boundary = first->size >> dwidth;
785 desc->dst_hole = (dmaengine_get_dst_icg(xt, first) >> dwidth) + 1;
786 desc->src_hole = (dmaengine_get_src_icg(xt, first) >> dwidth) + 1;
788 desc->txd.cookie = -EBUSY;
789 desc->total_len = desc->len = len;
791 /* set end-of-link to the last link descriptor of list*/
794 desc->txd.flags = flags; /* client is in control of this ack */
800 * atc_prep_dma_memcpy - prepare a memcpy operation
801 * @chan: the channel to prepare operation on
802 * @dest: operation virtual destination address
803 * @src: operation virtual source address
804 * @len: operation length
805 * @flags: tx descriptor status flags
807 static struct dma_async_tx_descriptor *
808 atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
809 size_t len, unsigned long flags)
811 struct at_dma_chan *atchan = to_at_dma_chan(chan);
812 struct at_desc *desc = NULL;
813 struct at_desc *first = NULL;
814 struct at_desc *prev = NULL;
817 unsigned int src_width;
818 unsigned int dst_width;
822 dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n",
823 &dest, &src, len, flags);
825 if (unlikely(!len)) {
826 dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
830 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
831 | ATC_SRC_ADDR_MODE_INCR
832 | ATC_DST_ADDR_MODE_INCR
836 * We can be a lot more clever here, but this should take care
837 * of the most common optimization.
839 src_width = dst_width = atc_get_xfer_width(src, dest, len);
841 ctrla = ATC_SRC_WIDTH(src_width) |
842 ATC_DST_WIDTH(dst_width);
844 for (offset = 0; offset < len; offset += xfer_count << src_width) {
845 xfer_count = min_t(size_t, (len - offset) >> src_width,
848 desc = atc_desc_get(atchan);
852 desc->lli.saddr = src + offset;
853 desc->lli.daddr = dest + offset;
854 desc->lli.ctrla = ctrla | xfer_count;
855 desc->lli.ctrlb = ctrlb;
857 desc->txd.cookie = 0;
858 desc->len = xfer_count << src_width;
860 atc_desc_chain(&first, &prev, desc);
863 /* First descriptor of the chain embedds additional information */
864 first->txd.cookie = -EBUSY;
865 first->total_len = len;
867 /* set end-of-link to the last link descriptor of list*/
870 first->txd.flags = flags; /* client is in control of this ack */
875 atc_desc_put(atchan, first);
879 static struct at_desc *atc_create_memset_desc(struct dma_chan *chan,
884 struct at_dma_chan *atchan = to_at_dma_chan(chan);
885 struct at_desc *desc;
888 u32 ctrla = ATC_SRC_WIDTH(2) | ATC_DST_WIDTH(2);
889 u32 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN |
890 ATC_SRC_ADDR_MODE_FIXED |
891 ATC_DST_ADDR_MODE_INCR |
894 xfer_count = len >> 2;
895 if (xfer_count > ATC_BTSIZE_MAX) {
896 dev_err(chan2dev(chan), "%s: buffer is too big\n",
901 desc = atc_desc_get(atchan);
903 dev_err(chan2dev(chan), "%s: can't get a descriptor\n",
908 desc->lli.saddr = psrc;
909 desc->lli.daddr = pdst;
910 desc->lli.ctrla = ctrla | xfer_count;
911 desc->lli.ctrlb = ctrlb;
913 desc->txd.cookie = 0;
920 * atc_prep_dma_memset - prepare a memcpy operation
921 * @chan: the channel to prepare operation on
922 * @dest: operation virtual destination address
923 * @value: value to set memory buffer to
924 * @len: operation length
925 * @flags: tx descriptor status flags
927 static struct dma_async_tx_descriptor *
928 atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
929 size_t len, unsigned long flags)
931 struct at_dma *atdma = to_at_dma(chan->device);
932 struct at_desc *desc;
936 dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
937 &dest, value, len, flags);
939 if (unlikely(!len)) {
940 dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
944 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
945 dev_dbg(chan2dev(chan), "%s: buffer is not aligned\n",
950 vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
952 dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
956 *(u32*)vaddr = value;
958 desc = atc_create_memset_desc(chan, paddr, dest, len);
960 dev_err(chan2dev(chan), "%s: couldn't get a descriptor\n",
962 goto err_free_buffer;
965 desc->memset_paddr = paddr;
966 desc->memset_vaddr = vaddr;
967 desc->memset_buffer = true;
969 desc->txd.cookie = -EBUSY;
970 desc->total_len = len;
972 /* set end-of-link on the descriptor */
975 desc->txd.flags = flags;
980 dma_pool_free(atdma->memset_pool, vaddr, paddr);
984 static struct dma_async_tx_descriptor *
985 atc_prep_dma_memset_sg(struct dma_chan *chan,
986 struct scatterlist *sgl,
987 unsigned int sg_len, int value,
990 struct at_dma_chan *atchan = to_at_dma_chan(chan);
991 struct at_dma *atdma = to_at_dma(chan->device);
992 struct at_desc *desc = NULL, *first = NULL, *prev = NULL;
993 struct scatterlist *sg;
996 size_t total_len = 0;
999 dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%zx f0x%lx\n", __func__,
1000 value, sg_len, flags);
1002 if (unlikely(!sgl || !sg_len)) {
1003 dev_dbg(chan2dev(chan), "%s: scatterlist is empty!\n",
1008 vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr);
1010 dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n",
1014 *(u32*)vaddr = value;
1016 for_each_sg(sgl, sg, sg_len, i) {
1017 dma_addr_t dest = sg_dma_address(sg);
1018 size_t len = sg_dma_len(sg);
1020 dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n",
1021 __func__, &dest, len);
1023 if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
1024 dev_err(chan2dev(chan), "%s: buffer is not aligned\n",
1029 desc = atc_create_memset_desc(chan, paddr, dest, len);
1033 atc_desc_chain(&first, &prev, desc);
1039 * Only set the buffer pointers on the last descriptor to
1040 * avoid free'ing while we have our transfer still going
1042 desc->memset_paddr = paddr;
1043 desc->memset_vaddr = vaddr;
1044 desc->memset_buffer = true;
1046 first->txd.cookie = -EBUSY;
1047 first->total_len = total_len;
1049 /* set end-of-link on the descriptor */
1052 first->txd.flags = flags;
1057 atc_desc_put(atchan, first);
1062 * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1063 * @chan: DMA channel
1064 * @sgl: scatterlist to transfer to/from
1065 * @sg_len: number of entries in @scatterlist
1066 * @direction: DMA direction
1067 * @flags: tx descriptor status flags
1068 * @context: transaction context (ignored)
1070 static struct dma_async_tx_descriptor *
1071 atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1072 unsigned int sg_len, enum dma_transfer_direction direction,
1073 unsigned long flags, void *context)
1075 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1076 struct at_dma_slave *atslave = chan->private;
1077 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1078 struct at_desc *first = NULL;
1079 struct at_desc *prev = NULL;
1083 unsigned int reg_width;
1084 unsigned int mem_width;
1086 struct scatterlist *sg;
1087 size_t total_len = 0;
1089 dev_vdbg(chan2dev(chan), "prep_slave_sg (%d): %s f0x%lx\n",
1091 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1094 if (unlikely(!atslave || !sg_len)) {
1095 dev_dbg(chan2dev(chan), "prep_slave_sg: sg length is zero!\n");
1099 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
1100 | ATC_DCSIZE(sconfig->dst_maxburst);
1103 switch (direction) {
1104 case DMA_MEM_TO_DEV:
1105 reg_width = convert_buswidth(sconfig->dst_addr_width);
1106 ctrla |= ATC_DST_WIDTH(reg_width);
1107 ctrlb |= ATC_DST_ADDR_MODE_FIXED
1108 | ATC_SRC_ADDR_MODE_INCR
1110 | ATC_SIF(atchan->mem_if) | ATC_DIF(atchan->per_if);
1111 reg = sconfig->dst_addr;
1112 for_each_sg(sgl, sg, sg_len, i) {
1113 struct at_desc *desc;
1117 desc = atc_desc_get(atchan);
1121 mem = sg_dma_address(sg);
1122 len = sg_dma_len(sg);
1123 if (unlikely(!len)) {
1124 dev_dbg(chan2dev(chan),
1125 "prep_slave_sg: sg(%d) data length is zero\n", i);
1129 if (unlikely(mem & 3 || len & 3))
1132 desc->lli.saddr = mem;
1133 desc->lli.daddr = reg;
1134 desc->lli.ctrla = ctrla
1135 | ATC_SRC_WIDTH(mem_width)
1137 desc->lli.ctrlb = ctrlb;
1140 atc_desc_chain(&first, &prev, desc);
1144 case DMA_DEV_TO_MEM:
1145 reg_width = convert_buswidth(sconfig->src_addr_width);
1146 ctrla |= ATC_SRC_WIDTH(reg_width);
1147 ctrlb |= ATC_DST_ADDR_MODE_INCR
1148 | ATC_SRC_ADDR_MODE_FIXED
1150 | ATC_SIF(atchan->per_if) | ATC_DIF(atchan->mem_if);
1152 reg = sconfig->src_addr;
1153 for_each_sg(sgl, sg, sg_len, i) {
1154 struct at_desc *desc;
1158 desc = atc_desc_get(atchan);
1162 mem = sg_dma_address(sg);
1163 len = sg_dma_len(sg);
1164 if (unlikely(!len)) {
1165 dev_dbg(chan2dev(chan),
1166 "prep_slave_sg: sg(%d) data length is zero\n", i);
1170 if (unlikely(mem & 3 || len & 3))
1173 desc->lli.saddr = reg;
1174 desc->lli.daddr = mem;
1175 desc->lli.ctrla = ctrla
1176 | ATC_DST_WIDTH(mem_width)
1178 desc->lli.ctrlb = ctrlb;
1181 atc_desc_chain(&first, &prev, desc);
1189 /* set end-of-link to the last link descriptor of list*/
1192 /* First descriptor of the chain embedds additional information */
1193 first->txd.cookie = -EBUSY;
1194 first->total_len = total_len;
1196 /* first link descriptor of list is responsible of flags */
1197 first->txd.flags = flags; /* client is in control of this ack */
1202 dev_err(chan2dev(chan), "not enough descriptors available\n");
1204 atc_desc_put(atchan, first);
1209 * atc_prep_dma_sg - prepare memory to memory scather-gather operation
1210 * @chan: the channel to prepare operation on
1211 * @dst_sg: destination scatterlist
1212 * @dst_nents: number of destination scatterlist entries
1213 * @src_sg: source scatterlist
1214 * @src_nents: number of source scatterlist entries
1215 * @flags: tx descriptor status flags
1217 static struct dma_async_tx_descriptor *
1218 atc_prep_dma_sg(struct dma_chan *chan,
1219 struct scatterlist *dst_sg, unsigned int dst_nents,
1220 struct scatterlist *src_sg, unsigned int src_nents,
1221 unsigned long flags)
1223 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1224 struct at_desc *desc = NULL;
1225 struct at_desc *first = NULL;
1226 struct at_desc *prev = NULL;
1227 unsigned int src_width;
1228 unsigned int dst_width;
1232 size_t dst_len = 0, src_len = 0;
1233 dma_addr_t dst = 0, src = 0;
1234 size_t len = 0, total_len = 0;
1236 if (unlikely(dst_nents == 0 || src_nents == 0))
1239 if (unlikely(dst_sg == NULL || src_sg == NULL))
1242 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
1243 | ATC_SRC_ADDR_MODE_INCR
1244 | ATC_DST_ADDR_MODE_INCR
1248 * loop until there is either no more source or no more destination
1253 /* prepare the next transfer */
1256 /* no more destination scatterlist entries */
1257 if (!dst_sg || !dst_nents)
1260 dst = sg_dma_address(dst_sg);
1261 dst_len = sg_dma_len(dst_sg);
1263 dst_sg = sg_next(dst_sg);
1269 /* no more source scatterlist entries */
1270 if (!src_sg || !src_nents)
1273 src = sg_dma_address(src_sg);
1274 src_len = sg_dma_len(src_sg);
1276 src_sg = sg_next(src_sg);
1280 len = min_t(size_t, src_len, dst_len);
1284 /* take care for the alignment */
1285 src_width = dst_width = atc_get_xfer_width(src, dst, len);
1287 ctrla = ATC_SRC_WIDTH(src_width) |
1288 ATC_DST_WIDTH(dst_width);
1291 * The number of transfers to set up refer to the source width
1292 * that depends on the alignment.
1294 xfer_count = len >> src_width;
1295 if (xfer_count > ATC_BTSIZE_MAX) {
1296 xfer_count = ATC_BTSIZE_MAX;
1297 len = ATC_BTSIZE_MAX << src_width;
1300 /* create the transfer */
1301 desc = atc_desc_get(atchan);
1305 desc->lli.saddr = src;
1306 desc->lli.daddr = dst;
1307 desc->lli.ctrla = ctrla | xfer_count;
1308 desc->lli.ctrlb = ctrlb;
1310 desc->txd.cookie = 0;
1313 atc_desc_chain(&first, &prev, desc);
1315 /* update the lengths and addresses for the next loop cycle */
1324 /* First descriptor of the chain embedds additional information */
1325 first->txd.cookie = -EBUSY;
1326 first->total_len = total_len;
1328 /* set end-of-link to the last link descriptor of list*/
1331 first->txd.flags = flags; /* client is in control of this ack */
1336 atc_desc_put(atchan, first);
1341 * atc_dma_cyclic_check_values
1342 * Check for too big/unaligned periods and unaligned DMA buffer
1345 atc_dma_cyclic_check_values(unsigned int reg_width, dma_addr_t buf_addr,
1348 if (period_len > (ATC_BTSIZE_MAX << reg_width))
1350 if (unlikely(period_len & ((1 << reg_width) - 1)))
1352 if (unlikely(buf_addr & ((1 << reg_width) - 1)))
1362 * atc_dma_cyclic_fill_desc - Fill one period descriptor
1365 atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
1366 unsigned int period_index, dma_addr_t buf_addr,
1367 unsigned int reg_width, size_t period_len,
1368 enum dma_transfer_direction direction)
1370 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1371 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1374 /* prepare common CRTLA value */
1375 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
1376 | ATC_DCSIZE(sconfig->dst_maxburst)
1377 | ATC_DST_WIDTH(reg_width)
1378 | ATC_SRC_WIDTH(reg_width)
1379 | period_len >> reg_width;
1381 switch (direction) {
1382 case DMA_MEM_TO_DEV:
1383 desc->lli.saddr = buf_addr + (period_len * period_index);
1384 desc->lli.daddr = sconfig->dst_addr;
1385 desc->lli.ctrla = ctrla;
1386 desc->lli.ctrlb = ATC_DST_ADDR_MODE_FIXED
1387 | ATC_SRC_ADDR_MODE_INCR
1389 | ATC_SIF(atchan->mem_if)
1390 | ATC_DIF(atchan->per_if);
1391 desc->len = period_len;
1394 case DMA_DEV_TO_MEM:
1395 desc->lli.saddr = sconfig->src_addr;
1396 desc->lli.daddr = buf_addr + (period_len * period_index);
1397 desc->lli.ctrla = ctrla;
1398 desc->lli.ctrlb = ATC_DST_ADDR_MODE_INCR
1399 | ATC_SRC_ADDR_MODE_FIXED
1401 | ATC_SIF(atchan->per_if)
1402 | ATC_DIF(atchan->mem_if);
1403 desc->len = period_len;
1414 * atc_prep_dma_cyclic - prepare the cyclic DMA transfer
1415 * @chan: the DMA channel to prepare
1416 * @buf_addr: physical DMA address where the buffer starts
1417 * @buf_len: total number of bytes for the entire buffer
1418 * @period_len: number of bytes for each period
1419 * @direction: transfer direction, to or from device
1420 * @flags: tx descriptor status flags
1422 static struct dma_async_tx_descriptor *
1423 atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1424 size_t period_len, enum dma_transfer_direction direction,
1425 unsigned long flags)
1427 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1428 struct at_dma_slave *atslave = chan->private;
1429 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
1430 struct at_desc *first = NULL;
1431 struct at_desc *prev = NULL;
1432 unsigned long was_cyclic;
1433 unsigned int reg_width;
1434 unsigned int periods = buf_len / period_len;
1437 dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
1438 direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
1440 periods, buf_len, period_len);
1442 if (unlikely(!atslave || !buf_len || !period_len)) {
1443 dev_dbg(chan2dev(chan), "prep_dma_cyclic: length is zero!\n");
1447 was_cyclic = test_and_set_bit(ATC_IS_CYCLIC, &atchan->status);
1449 dev_dbg(chan2dev(chan), "prep_dma_cyclic: channel in use!\n");
1453 if (unlikely(!is_slave_direction(direction)))
1456 if (sconfig->direction == DMA_MEM_TO_DEV)
1457 reg_width = convert_buswidth(sconfig->dst_addr_width);
1459 reg_width = convert_buswidth(sconfig->src_addr_width);
1461 /* Check for too big/unaligned periods and unaligned DMA buffer */
1462 if (atc_dma_cyclic_check_values(reg_width, buf_addr, period_len))
1465 /* build cyclic linked list */
1466 for (i = 0; i < periods; i++) {
1467 struct at_desc *desc;
1469 desc = atc_desc_get(atchan);
1473 if (atc_dma_cyclic_fill_desc(chan, desc, i, buf_addr,
1474 reg_width, period_len, direction))
1477 atc_desc_chain(&first, &prev, desc);
1480 /* lets make a cyclic list */
1481 prev->lli.dscr = first->txd.phys;
1483 /* First descriptor of the chain embedds additional information */
1484 first->txd.cookie = -EBUSY;
1485 first->total_len = buf_len;
1490 dev_err(chan2dev(chan), "not enough descriptors available\n");
1491 atc_desc_put(atchan, first);
1493 clear_bit(ATC_IS_CYCLIC, &atchan->status);
1497 static int atc_config(struct dma_chan *chan,
1498 struct dma_slave_config *sconfig)
1500 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1502 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1504 /* Check if it is chan is configured for slave transfers */
1508 memcpy(&atchan->dma_sconfig, sconfig, sizeof(*sconfig));
1510 convert_burst(&atchan->dma_sconfig.src_maxburst);
1511 convert_burst(&atchan->dma_sconfig.dst_maxburst);
1516 static int atc_pause(struct dma_chan *chan)
1518 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1519 struct at_dma *atdma = to_at_dma(chan->device);
1520 int chan_id = atchan->chan_common.chan_id;
1521 unsigned long flags;
1525 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1527 spin_lock_irqsave(&atchan->lock, flags);
1529 dma_writel(atdma, CHER, AT_DMA_SUSP(chan_id));
1530 set_bit(ATC_IS_PAUSED, &atchan->status);
1532 spin_unlock_irqrestore(&atchan->lock, flags);
1537 static int atc_resume(struct dma_chan *chan)
1539 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1540 struct at_dma *atdma = to_at_dma(chan->device);
1541 int chan_id = atchan->chan_common.chan_id;
1542 unsigned long flags;
1546 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1548 if (!atc_chan_is_paused(atchan))
1551 spin_lock_irqsave(&atchan->lock, flags);
1553 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id));
1554 clear_bit(ATC_IS_PAUSED, &atchan->status);
1556 spin_unlock_irqrestore(&atchan->lock, flags);
1561 static int atc_terminate_all(struct dma_chan *chan)
1563 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1564 struct at_dma *atdma = to_at_dma(chan->device);
1565 int chan_id = atchan->chan_common.chan_id;
1566 struct at_desc *desc, *_desc;
1567 unsigned long flags;
1571 dev_vdbg(chan2dev(chan), "%s\n", __func__);
1574 * This is only called when something went wrong elsewhere, so
1575 * we don't really care about the data. Just disable the
1576 * channel. We still have to poll the channel enable bit due
1577 * to AHB/HSB limitations.
1579 spin_lock_irqsave(&atchan->lock, flags);
1581 /* disabling channel: must also remove suspend state */
1582 dma_writel(atdma, CHDR, AT_DMA_RES(chan_id) | atchan->mask);
1584 /* confirm that this channel is disabled */
1585 while (dma_readl(atdma, CHSR) & atchan->mask)
1588 /* active_list entries will end up before queued entries */
1589 list_splice_init(&atchan->queue, &list);
1590 list_splice_init(&atchan->active_list, &list);
1592 /* Flush all pending and queued descriptors */
1593 list_for_each_entry_safe(desc, _desc, &list, desc_node)
1594 atc_chain_complete(atchan, desc);
1596 clear_bit(ATC_IS_PAUSED, &atchan->status);
1597 /* if channel dedicated to cyclic operations, free it */
1598 clear_bit(ATC_IS_CYCLIC, &atchan->status);
1600 spin_unlock_irqrestore(&atchan->lock, flags);
1606 * atc_tx_status - poll for transaction completion
1607 * @chan: DMA channel
1608 * @cookie: transaction identifier to check status of
1609 * @txstate: if not %NULL updated with transaction state
1611 * If @txstate is passed in, upon return it reflect the driver
1612 * internal state and can be used with dma_async_is_complete() to check
1613 * the status of multiple cookies without re-checking hardware state.
1615 static enum dma_status
1616 atc_tx_status(struct dma_chan *chan,
1617 dma_cookie_t cookie,
1618 struct dma_tx_state *txstate)
1620 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1621 unsigned long flags;
1622 enum dma_status ret;
1625 ret = dma_cookie_status(chan, cookie, txstate);
1626 if (ret == DMA_COMPLETE)
1629 * There's no point calculating the residue if there's
1630 * no txstate to store the value.
1635 spin_lock_irqsave(&atchan->lock, flags);
1637 /* Get number of bytes left in the active transactions */
1638 bytes = atc_get_bytes_left(chan, cookie);
1640 spin_unlock_irqrestore(&atchan->lock, flags);
1642 if (unlikely(bytes < 0)) {
1643 dev_vdbg(chan2dev(chan), "get residual bytes error\n");
1646 dma_set_residue(txstate, bytes);
1649 dev_vdbg(chan2dev(chan), "tx_status %d: cookie = %d residue = %d\n",
1650 ret, cookie, bytes);
1656 * atc_issue_pending - try to finish work
1657 * @chan: target DMA channel
1659 static void atc_issue_pending(struct dma_chan *chan)
1661 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1662 unsigned long flags;
1664 dev_vdbg(chan2dev(chan), "issue_pending\n");
1666 /* Not needed for cyclic transfers */
1667 if (atc_chan_is_cyclic(atchan))
1670 spin_lock_irqsave(&atchan->lock, flags);
1671 atc_advance_work(atchan);
1672 spin_unlock_irqrestore(&atchan->lock, flags);
1676 * atc_alloc_chan_resources - allocate resources for DMA channel
1677 * @chan: allocate descriptor resources for this channel
1678 * @client: current client requesting the channel be ready for requests
1680 * return - the number of allocated descriptors
1682 static int atc_alloc_chan_resources(struct dma_chan *chan)
1684 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1685 struct at_dma *atdma = to_at_dma(chan->device);
1686 struct at_desc *desc;
1687 struct at_dma_slave *atslave;
1688 unsigned long flags;
1691 LIST_HEAD(tmp_list);
1693 dev_vdbg(chan2dev(chan), "alloc_chan_resources\n");
1695 /* ASSERT: channel is idle */
1696 if (atc_chan_is_enabled(atchan)) {
1697 dev_dbg(chan2dev(chan), "DMA channel not idle ?\n");
1701 cfg = ATC_DEFAULT_CFG;
1703 atslave = chan->private;
1706 * We need controller-specific data to set up slave
1709 BUG_ON(!atslave->dma_dev || atslave->dma_dev != atdma->dma_common.dev);
1711 /* if cfg configuration specified take it instead of default */
1716 /* have we already been set up?
1717 * reconfigure channel but no need to reallocate descriptors */
1718 if (!list_empty(&atchan->free_list))
1719 return atchan->descs_allocated;
1721 /* Allocate initial pool of descriptors */
1722 for (i = 0; i < init_nr_desc_per_channel; i++) {
1723 desc = atc_alloc_descriptor(chan, GFP_KERNEL);
1725 dev_err(atdma->dma_common.dev,
1726 "Only %d initial descriptors\n", i);
1729 list_add_tail(&desc->desc_node, &tmp_list);
1732 spin_lock_irqsave(&atchan->lock, flags);
1733 atchan->descs_allocated = i;
1734 list_splice(&tmp_list, &atchan->free_list);
1735 dma_cookie_init(chan);
1736 spin_unlock_irqrestore(&atchan->lock, flags);
1738 /* channel parameters */
1739 channel_writel(atchan, CFG, cfg);
1741 dev_dbg(chan2dev(chan),
1742 "alloc_chan_resources: allocated %d descriptors\n",
1743 atchan->descs_allocated);
1745 return atchan->descs_allocated;
1749 * atc_free_chan_resources - free all channel resources
1750 * @chan: DMA channel
1752 static void atc_free_chan_resources(struct dma_chan *chan)
1754 struct at_dma_chan *atchan = to_at_dma_chan(chan);
1755 struct at_dma *atdma = to_at_dma(chan->device);
1756 struct at_desc *desc, *_desc;
1759 dev_dbg(chan2dev(chan), "free_chan_resources: (descs allocated=%u)\n",
1760 atchan->descs_allocated);
1762 /* ASSERT: channel is idle */
1763 BUG_ON(!list_empty(&atchan->active_list));
1764 BUG_ON(!list_empty(&atchan->queue));
1765 BUG_ON(atc_chan_is_enabled(atchan));
1767 list_for_each_entry_safe(desc, _desc, &atchan->free_list, desc_node) {
1768 dev_vdbg(chan2dev(chan), " freeing descriptor %p\n", desc);
1769 list_del(&desc->desc_node);
1770 /* free link descriptor */
1771 dma_pool_free(atdma->dma_desc_pool, desc, desc->txd.phys);
1773 list_splice_init(&atchan->free_list, &list);
1774 atchan->descs_allocated = 0;
1778 * Free atslave allocated in at_dma_xlate()
1780 kfree(chan->private);
1781 chan->private = NULL;
1783 dev_vdbg(chan2dev(chan), "free_chan_resources: done\n");
1787 static bool at_dma_filter(struct dma_chan *chan, void *slave)
1789 struct at_dma_slave *atslave = slave;
1791 if (atslave->dma_dev == chan->device->dev) {
1792 chan->private = atslave;
1799 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1800 struct of_dma *of_dma)
1802 struct dma_chan *chan;
1803 struct at_dma_chan *atchan;
1804 struct at_dma_slave *atslave;
1805 dma_cap_mask_t mask;
1806 unsigned int per_id;
1807 struct platform_device *dmac_pdev;
1809 if (dma_spec->args_count != 2)
1812 dmac_pdev = of_find_device_by_node(dma_spec->np);
1817 dma_cap_set(DMA_SLAVE, mask);
1819 atslave = kzalloc(sizeof(*atslave), GFP_KERNEL);
1823 atslave->cfg = ATC_DST_H2SEL_HW | ATC_SRC_H2SEL_HW;
1825 * We can fill both SRC_PER and DST_PER, one of these fields will be
1826 * ignored depending on DMA transfer direction.
1828 per_id = dma_spec->args[1] & AT91_DMA_CFG_PER_ID_MASK;
1829 atslave->cfg |= ATC_DST_PER_MSB(per_id) | ATC_DST_PER(per_id)
1830 | ATC_SRC_PER_MSB(per_id) | ATC_SRC_PER(per_id);
1832 * We have to translate the value we get from the device tree since
1833 * the half FIFO configuration value had to be 0 to keep backward
1836 switch (dma_spec->args[1] & AT91_DMA_CFG_FIFOCFG_MASK) {
1837 case AT91_DMA_CFG_FIFOCFG_ALAP:
1838 atslave->cfg |= ATC_FIFOCFG_LARGESTBURST;
1840 case AT91_DMA_CFG_FIFOCFG_ASAP:
1841 atslave->cfg |= ATC_FIFOCFG_ENOUGHSPACE;
1843 case AT91_DMA_CFG_FIFOCFG_HALF:
1845 atslave->cfg |= ATC_FIFOCFG_HALFFIFO;
1847 atslave->dma_dev = &dmac_pdev->dev;
1849 chan = dma_request_channel(mask, at_dma_filter, atslave);
1853 atchan = to_at_dma_chan(chan);
1854 atchan->per_if = dma_spec->args[0] & 0xff;
1855 atchan->mem_if = (dma_spec->args[0] >> 16) & 0xff;
1860 static struct dma_chan *at_dma_xlate(struct of_phandle_args *dma_spec,
1861 struct of_dma *of_dma)
1867 /*-- Module Management -----------------------------------------------*/
1869 /* cap_mask is a multi-u32 bitfield, fill it with proper C code. */
1870 static struct at_dma_platform_data at91sam9rl_config = {
1873 static struct at_dma_platform_data at91sam9g45_config = {
1877 #if defined(CONFIG_OF)
1878 static const struct of_device_id atmel_dma_dt_ids[] = {
1880 .compatible = "atmel,at91sam9rl-dma",
1881 .data = &at91sam9rl_config,
1883 .compatible = "atmel,at91sam9g45-dma",
1884 .data = &at91sam9g45_config,
1890 MODULE_DEVICE_TABLE(of, atmel_dma_dt_ids);
1893 static const struct platform_device_id atdma_devtypes[] = {
1895 .name = "at91sam9rl_dma",
1896 .driver_data = (unsigned long) &at91sam9rl_config,
1898 .name = "at91sam9g45_dma",
1899 .driver_data = (unsigned long) &at91sam9g45_config,
1905 static inline const struct at_dma_platform_data * __init at_dma_get_driver_data(
1906 struct platform_device *pdev)
1908 if (pdev->dev.of_node) {
1909 const struct of_device_id *match;
1910 match = of_match_node(atmel_dma_dt_ids, pdev->dev.of_node);
1915 return (struct at_dma_platform_data *)
1916 platform_get_device_id(pdev)->driver_data;
1920 * at_dma_off - disable DMA controller
1921 * @atdma: the Atmel HDAMC device
1923 static void at_dma_off(struct at_dma *atdma)
1925 dma_writel(atdma, EN, 0);
1927 /* disable all interrupts */
1928 dma_writel(atdma, EBCIDR, -1L);
1930 /* confirm that all channels are disabled */
1931 while (dma_readl(atdma, CHSR) & atdma->all_chan_mask)
1935 static int __init at_dma_probe(struct platform_device *pdev)
1937 struct resource *io;
1938 struct at_dma *atdma;
1943 const struct at_dma_platform_data *plat_dat;
1945 /* setup platform data for each SoC */
1946 dma_cap_set(DMA_MEMCPY, at91sam9rl_config.cap_mask);
1947 dma_cap_set(DMA_SG, at91sam9rl_config.cap_mask);
1948 dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask);
1949 dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask);
1950 dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask);
1951 dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask);
1952 dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask);
1953 dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask);
1954 dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask);
1956 /* get DMA parameters from controller type */
1957 plat_dat = at_dma_get_driver_data(pdev);
1961 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1965 irq = platform_get_irq(pdev, 0);
1969 size = sizeof(struct at_dma);
1970 size += plat_dat->nr_channels * sizeof(struct at_dma_chan);
1971 atdma = kzalloc(size, GFP_KERNEL);
1975 /* discover transaction capabilities */
1976 atdma->dma_common.cap_mask = plat_dat->cap_mask;
1977 atdma->all_chan_mask = (1 << plat_dat->nr_channels) - 1;
1979 size = resource_size(io);
1980 if (!request_mem_region(io->start, size, pdev->dev.driver->name)) {
1985 atdma->regs = ioremap(io->start, size);
1991 atdma->clk = clk_get(&pdev->dev, "dma_clk");
1992 if (IS_ERR(atdma->clk)) {
1993 err = PTR_ERR(atdma->clk);
1996 err = clk_prepare_enable(atdma->clk);
1998 goto err_clk_prepare;
2000 /* force dma off, just in case */
2003 err = request_irq(irq, at_dma_interrupt, 0, "at_hdmac", atdma);
2007 platform_set_drvdata(pdev, atdma);
2009 /* create a pool of consistent memory blocks for hardware descriptors */
2010 atdma->dma_desc_pool = dma_pool_create("at_hdmac_desc_pool",
2011 &pdev->dev, sizeof(struct at_desc),
2012 4 /* word alignment */, 0);
2013 if (!atdma->dma_desc_pool) {
2014 dev_err(&pdev->dev, "No memory for descriptors dma pool\n");
2016 goto err_desc_pool_create;
2019 /* create a pool of consistent memory blocks for memset blocks */
2020 atdma->memset_pool = dma_pool_create("at_hdmac_memset_pool",
2021 &pdev->dev, sizeof(int), 4, 0);
2022 if (!atdma->memset_pool) {
2023 dev_err(&pdev->dev, "No memory for memset dma pool\n");
2025 goto err_memset_pool_create;
2028 /* clear any pending interrupt */
2029 while (dma_readl(atdma, EBCISR))
2032 /* initialize channels related values */
2033 INIT_LIST_HEAD(&atdma->dma_common.channels);
2034 for (i = 0; i < plat_dat->nr_channels; i++) {
2035 struct at_dma_chan *atchan = &atdma->chan[i];
2037 atchan->mem_if = AT_DMA_MEM_IF;
2038 atchan->per_if = AT_DMA_PER_IF;
2039 atchan->chan_common.device = &atdma->dma_common;
2040 dma_cookie_init(&atchan->chan_common);
2041 list_add_tail(&atchan->chan_common.device_node,
2042 &atdma->dma_common.channels);
2044 atchan->ch_regs = atdma->regs + ch_regs(i);
2045 spin_lock_init(&atchan->lock);
2046 atchan->mask = 1 << i;
2048 INIT_LIST_HEAD(&atchan->active_list);
2049 INIT_LIST_HEAD(&atchan->queue);
2050 INIT_LIST_HEAD(&atchan->free_list);
2052 tasklet_init(&atchan->tasklet, atc_tasklet,
2053 (unsigned long)atchan);
2054 atc_enable_chan_irq(atdma, i);
2057 /* set base routines */
2058 atdma->dma_common.device_alloc_chan_resources = atc_alloc_chan_resources;
2059 atdma->dma_common.device_free_chan_resources = atc_free_chan_resources;
2060 atdma->dma_common.device_tx_status = atc_tx_status;
2061 atdma->dma_common.device_issue_pending = atc_issue_pending;
2062 atdma->dma_common.dev = &pdev->dev;
2064 /* set prep routines based on capability */
2065 if (dma_has_cap(DMA_INTERLEAVE, atdma->dma_common.cap_mask))
2066 atdma->dma_common.device_prep_interleaved_dma = atc_prep_dma_interleaved;
2068 if (dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask))
2069 atdma->dma_common.device_prep_dma_memcpy = atc_prep_dma_memcpy;
2071 if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) {
2072 atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset;
2073 atdma->dma_common.device_prep_dma_memset_sg = atc_prep_dma_memset_sg;
2074 atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES;
2077 if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
2078 atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
2079 /* controller can do slave DMA: can trigger cyclic transfers */
2080 dma_cap_set(DMA_CYCLIC, atdma->dma_common.cap_mask);
2081 atdma->dma_common.device_prep_dma_cyclic = atc_prep_dma_cyclic;
2082 atdma->dma_common.device_config = atc_config;
2083 atdma->dma_common.device_pause = atc_pause;
2084 atdma->dma_common.device_resume = atc_resume;
2085 atdma->dma_common.device_terminate_all = atc_terminate_all;
2086 atdma->dma_common.src_addr_widths = ATC_DMA_BUSWIDTHS;
2087 atdma->dma_common.dst_addr_widths = ATC_DMA_BUSWIDTHS;
2088 atdma->dma_common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2089 atdma->dma_common.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2092 if (dma_has_cap(DMA_SG, atdma->dma_common.cap_mask))
2093 atdma->dma_common.device_prep_dma_sg = atc_prep_dma_sg;
2095 dma_writel(atdma, EN, AT_DMA_ENABLE);
2097 dev_info(&pdev->dev, "Atmel AHB DMA Controller ( %s%s%s%s), %d channels\n",
2098 dma_has_cap(DMA_MEMCPY, atdma->dma_common.cap_mask) ? "cpy " : "",
2099 dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask) ? "set " : "",
2100 dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask) ? "slave " : "",
2101 dma_has_cap(DMA_SG, atdma->dma_common.cap_mask) ? "sg-cpy " : "",
2102 plat_dat->nr_channels);
2104 dma_async_device_register(&atdma->dma_common);
2107 * Do not return an error if the dmac node is not present in order to
2108 * not break the existing way of requesting channel with
2109 * dma_request_channel().
2111 if (pdev->dev.of_node) {
2112 err = of_dma_controller_register(pdev->dev.of_node,
2113 at_dma_xlate, atdma);
2115 dev_err(&pdev->dev, "could not register of_dma_controller\n");
2116 goto err_of_dma_controller_register;
2122 err_of_dma_controller_register:
2123 dma_async_device_unregister(&atdma->dma_common);
2124 dma_pool_destroy(atdma->memset_pool);
2125 err_memset_pool_create:
2126 dma_pool_destroy(atdma->dma_desc_pool);
2127 err_desc_pool_create:
2128 free_irq(platform_get_irq(pdev, 0), atdma);
2130 clk_disable_unprepare(atdma->clk);
2132 clk_put(atdma->clk);
2134 iounmap(atdma->regs);
2137 release_mem_region(io->start, size);
2143 static int at_dma_remove(struct platform_device *pdev)
2145 struct at_dma *atdma = platform_get_drvdata(pdev);
2146 struct dma_chan *chan, *_chan;
2147 struct resource *io;
2150 if (pdev->dev.of_node)
2151 of_dma_controller_free(pdev->dev.of_node);
2152 dma_async_device_unregister(&atdma->dma_common);
2154 dma_pool_destroy(atdma->memset_pool);
2155 dma_pool_destroy(atdma->dma_desc_pool);
2156 free_irq(platform_get_irq(pdev, 0), atdma);
2158 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2160 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2162 /* Disable interrupts */
2163 atc_disable_chan_irq(atdma, chan->chan_id);
2165 tasklet_kill(&atchan->tasklet);
2166 list_del(&chan->device_node);
2169 clk_disable_unprepare(atdma->clk);
2170 clk_put(atdma->clk);
2172 iounmap(atdma->regs);
2175 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2176 release_mem_region(io->start, resource_size(io));
2183 static void at_dma_shutdown(struct platform_device *pdev)
2185 struct at_dma *atdma = platform_get_drvdata(pdev);
2187 at_dma_off(platform_get_drvdata(pdev));
2188 clk_disable_unprepare(atdma->clk);
2191 static int at_dma_prepare(struct device *dev)
2193 struct platform_device *pdev = to_platform_device(dev);
2194 struct at_dma *atdma = platform_get_drvdata(pdev);
2195 struct dma_chan *chan, *_chan;
2197 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2199 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2200 /* wait for transaction completion (except in cyclic case) */
2201 if (atc_chan_is_enabled(atchan) && !atc_chan_is_cyclic(atchan))
2207 static void atc_suspend_cyclic(struct at_dma_chan *atchan)
2209 struct dma_chan *chan = &atchan->chan_common;
2211 /* Channel should be paused by user
2212 * do it anyway even if it is not done already */
2213 if (!atc_chan_is_paused(atchan)) {
2214 dev_warn(chan2dev(chan),
2215 "cyclic channel not paused, should be done by channel user\n");
2219 /* now preserve additional data for cyclic operations */
2220 /* next descriptor address in the cyclic list */
2221 atchan->save_dscr = channel_readl(atchan, DSCR);
2223 vdbg_dump_regs(atchan);
2226 static int at_dma_suspend_noirq(struct device *dev)
2228 struct platform_device *pdev = to_platform_device(dev);
2229 struct at_dma *atdma = platform_get_drvdata(pdev);
2230 struct dma_chan *chan, *_chan;
2233 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2235 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2237 if (atc_chan_is_cyclic(atchan))
2238 atc_suspend_cyclic(atchan);
2239 atchan->save_cfg = channel_readl(atchan, CFG);
2241 atdma->save_imr = dma_readl(atdma, EBCIMR);
2243 /* disable DMA controller */
2245 clk_disable_unprepare(atdma->clk);
2249 static void atc_resume_cyclic(struct at_dma_chan *atchan)
2251 struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
2253 /* restore channel status for cyclic descriptors list:
2254 * next descriptor in the cyclic list at the time of suspend */
2255 channel_writel(atchan, SADDR, 0);
2256 channel_writel(atchan, DADDR, 0);
2257 channel_writel(atchan, CTRLA, 0);
2258 channel_writel(atchan, CTRLB, 0);
2259 channel_writel(atchan, DSCR, atchan->save_dscr);
2260 dma_writel(atdma, CHER, atchan->mask);
2262 /* channel pause status should be removed by channel user
2263 * We cannot take the initiative to do it here */
2265 vdbg_dump_regs(atchan);
2268 static int at_dma_resume_noirq(struct device *dev)
2270 struct platform_device *pdev = to_platform_device(dev);
2271 struct at_dma *atdma = platform_get_drvdata(pdev);
2272 struct dma_chan *chan, *_chan;
2274 /* bring back DMA controller */
2275 clk_prepare_enable(atdma->clk);
2276 dma_writel(atdma, EN, AT_DMA_ENABLE);
2278 /* clear any pending interrupt */
2279 while (dma_readl(atdma, EBCISR))
2282 /* restore saved data */
2283 dma_writel(atdma, EBCIER, atdma->save_imr);
2284 list_for_each_entry_safe(chan, _chan, &atdma->dma_common.channels,
2286 struct at_dma_chan *atchan = to_at_dma_chan(chan);
2288 channel_writel(atchan, CFG, atchan->save_cfg);
2289 if (atc_chan_is_cyclic(atchan))
2290 atc_resume_cyclic(atchan);
2295 static const struct dev_pm_ops at_dma_dev_pm_ops = {
2296 .prepare = at_dma_prepare,
2297 .suspend_noirq = at_dma_suspend_noirq,
2298 .resume_noirq = at_dma_resume_noirq,
2301 static struct platform_driver at_dma_driver = {
2302 .remove = at_dma_remove,
2303 .shutdown = at_dma_shutdown,
2304 .id_table = atdma_devtypes,
2307 .pm = &at_dma_dev_pm_ops,
2308 .of_match_table = of_match_ptr(atmel_dma_dt_ids),
2312 static int __init at_dma_init(void)
2314 return platform_driver_probe(&at_dma_driver, at_dma_probe);
2316 subsys_initcall(at_dma_init);
2318 static void __exit at_dma_exit(void)
2320 platform_driver_unregister(&at_dma_driver);
2322 module_exit(at_dma_exit);
2324 MODULE_DESCRIPTION("Atmel AHB DMA Controller driver");
2325 MODULE_AUTHOR("Nicolas Ferre <nicolas.ferre@atmel.com>");
2326 MODULE_LICENSE("GPL");
2327 MODULE_ALIAS("platform:at_hdmac");