1 // SPDX-License-Identifier: GPL-2.0-only
3 * DMA driver for Nvidia's Tegra20 APB DMA controller.
5 * Copyright (c) 2012-2013, NVIDIA CORPORATION. All rights reserved.
8 #include <linux/bitops.h>
10 #include <linux/delay.h>
11 #include <linux/dmaengine.h>
12 #include <linux/dma-mapping.h>
13 #include <linux/err.h>
14 #include <linux/init.h>
15 #include <linux/interrupt.h>
18 #include <linux/module.h>
20 #include <linux/of_device.h>
21 #include <linux/of_dma.h>
22 #include <linux/platform_device.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/reset.h>
26 #include <linux/slab.h>
28 #include "dmaengine.h"
30 #define CREATE_TRACE_POINTS
31 #include <trace/events/tegra_apb_dma.h>
33 #define TEGRA_APBDMA_GENERAL 0x0
34 #define TEGRA_APBDMA_GENERAL_ENABLE BIT(31)
36 #define TEGRA_APBDMA_CONTROL 0x010
37 #define TEGRA_APBDMA_IRQ_MASK 0x01c
38 #define TEGRA_APBDMA_IRQ_MASK_SET 0x020
41 #define TEGRA_APBDMA_CHAN_CSR 0x00
42 #define TEGRA_APBDMA_CSR_ENB BIT(31)
43 #define TEGRA_APBDMA_CSR_IE_EOC BIT(30)
44 #define TEGRA_APBDMA_CSR_HOLD BIT(29)
45 #define TEGRA_APBDMA_CSR_DIR BIT(28)
46 #define TEGRA_APBDMA_CSR_ONCE BIT(27)
47 #define TEGRA_APBDMA_CSR_FLOW BIT(21)
48 #define TEGRA_APBDMA_CSR_REQ_SEL_SHIFT 16
49 #define TEGRA_APBDMA_CSR_REQ_SEL_MASK 0x1F
50 #define TEGRA_APBDMA_CSR_WCOUNT_MASK 0xFFFC
53 #define TEGRA_APBDMA_CHAN_STATUS 0x004
54 #define TEGRA_APBDMA_STATUS_BUSY BIT(31)
55 #define TEGRA_APBDMA_STATUS_ISE_EOC BIT(30)
56 #define TEGRA_APBDMA_STATUS_HALT BIT(29)
57 #define TEGRA_APBDMA_STATUS_PING_PONG BIT(28)
58 #define TEGRA_APBDMA_STATUS_COUNT_SHIFT 2
59 #define TEGRA_APBDMA_STATUS_COUNT_MASK 0xFFFC
61 #define TEGRA_APBDMA_CHAN_CSRE 0x00C
62 #define TEGRA_APBDMA_CHAN_CSRE_PAUSE (1 << 31)
64 /* AHB memory address */
65 #define TEGRA_APBDMA_CHAN_AHBPTR 0x010
67 /* AHB sequence register */
68 #define TEGRA_APBDMA_CHAN_AHBSEQ 0x14
69 #define TEGRA_APBDMA_AHBSEQ_INTR_ENB BIT(31)
70 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_8 (0 << 28)
71 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_16 (1 << 28)
72 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32 (2 << 28)
73 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_64 (3 << 28)
74 #define TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_128 (4 << 28)
75 #define TEGRA_APBDMA_AHBSEQ_DATA_SWAP BIT(27)
76 #define TEGRA_APBDMA_AHBSEQ_BURST_1 (4 << 24)
77 #define TEGRA_APBDMA_AHBSEQ_BURST_4 (5 << 24)
78 #define TEGRA_APBDMA_AHBSEQ_BURST_8 (6 << 24)
79 #define TEGRA_APBDMA_AHBSEQ_DBL_BUF BIT(19)
80 #define TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT 16
81 #define TEGRA_APBDMA_AHBSEQ_WRAP_NONE 0
84 #define TEGRA_APBDMA_CHAN_APBPTR 0x018
86 /* APB sequence register */
87 #define TEGRA_APBDMA_CHAN_APBSEQ 0x01c
88 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8 (0 << 28)
89 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16 (1 << 28)
90 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32 (2 << 28)
91 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64 (3 << 28)
92 #define TEGRA_APBDMA_APBSEQ_BUS_WIDTH_128 (4 << 28)
93 #define TEGRA_APBDMA_APBSEQ_DATA_SWAP BIT(27)
94 #define TEGRA_APBDMA_APBSEQ_WRAP_WORD_1 (1 << 16)
96 /* Tegra148 specific registers */
97 #define TEGRA_APBDMA_CHAN_WCOUNT 0x20
99 #define TEGRA_APBDMA_CHAN_WORD_TRANSFER 0x24
102 * If any burst is in flight and DMA paused then this is the time to complete
103 * on-flight burst and update DMA status register.
105 #define TEGRA_APBDMA_BURST_COMPLETE_TIME 20
107 /* Channel base address offset from APBDMA base address */
108 #define TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET 0x1000
110 #define TEGRA_APBDMA_SLAVE_ID_INVALID (TEGRA_APBDMA_CSR_REQ_SEL_MASK + 1)
115 * tegra_dma_chip_data Tegra chip specific DMA data
116 * @nr_channels: Number of channels available in the controller.
117 * @channel_reg_size: Channel register size/stride.
118 * @max_dma_count: Maximum DMA transfer count supported by DMA controller.
119 * @support_channel_pause: Support channel wise pause of dma.
120 * @support_separate_wcount_reg: Support separate word count register.
122 struct tegra_dma_chip_data {
124 int channel_reg_size;
126 bool support_channel_pause;
127 bool support_separate_wcount_reg;
130 /* DMA channel registers */
131 struct tegra_dma_channel_regs {
133 unsigned long ahb_ptr;
134 unsigned long apb_ptr;
135 unsigned long ahb_seq;
136 unsigned long apb_seq;
137 unsigned long wcount;
141 * tegra_dma_sg_req: DMA request details to configure hardware. This
142 * contains the details for one transfer to configure DMA hw.
143 * The client's request for data transfer can be broken into multiple
144 * sub-transfer as per requester details and hw support.
145 * This sub transfer get added in the list of transfer and point to Tegra
146 * DMA descriptor which manages the transfer details.
148 struct tegra_dma_sg_req {
149 struct tegra_dma_channel_regs ch_regs;
150 unsigned int req_len;
153 struct list_head node;
154 struct tegra_dma_desc *dma_desc;
155 unsigned int words_xferred;
159 * tegra_dma_desc: Tegra DMA descriptors which manages the client requests.
160 * This descriptor keep track of transfer status, callbacks and request
163 struct tegra_dma_desc {
164 struct dma_async_tx_descriptor txd;
165 unsigned int bytes_requested;
166 unsigned int bytes_transferred;
167 enum dma_status dma_status;
168 struct list_head node;
169 struct list_head tx_list;
170 struct list_head cb_node;
174 struct tegra_dma_channel;
176 typedef void (*dma_isr_handler)(struct tegra_dma_channel *tdc,
179 /* tegra_dma_channel: Channel specific information */
180 struct tegra_dma_channel {
181 struct dma_chan dma_chan;
186 void __iomem *chan_addr;
189 struct tegra_dma *tdma;
192 /* Different lists for managing the requests */
193 struct list_head free_sg_req;
194 struct list_head pending_sg_req;
195 struct list_head free_dma_desc;
196 struct list_head cb_desc;
198 /* ISR handler and tasklet for bottom half of isr handling */
199 dma_isr_handler isr_handler;
200 struct tasklet_struct tasklet;
202 /* Channel-slave specific configuration */
203 unsigned int slave_id;
204 struct dma_slave_config dma_sconfig;
205 struct tegra_dma_channel_regs channel_reg;
208 /* tegra_dma: Tegra DMA specific information */
210 struct dma_device dma_dev;
213 struct reset_control *rst;
214 spinlock_t global_lock;
215 void __iomem *base_addr;
216 const struct tegra_dma_chip_data *chip_data;
219 * Counter for managing global pausing of the DMA controller.
220 * Only applicable for devices that don't support individual
223 u32 global_pause_count;
225 /* Some register need to be cache before suspend */
228 /* Last member of the structure */
229 struct tegra_dma_channel channels[0];
232 static inline void tdma_write(struct tegra_dma *tdma, u32 reg, u32 val)
234 writel(val, tdma->base_addr + reg);
237 static inline u32 tdma_read(struct tegra_dma *tdma, u32 reg)
239 return readl(tdma->base_addr + reg);
242 static inline void tdc_write(struct tegra_dma_channel *tdc,
245 writel(val, tdc->chan_addr + reg);
248 static inline u32 tdc_read(struct tegra_dma_channel *tdc, u32 reg)
250 return readl(tdc->chan_addr + reg);
253 static inline struct tegra_dma_channel *to_tegra_dma_chan(struct dma_chan *dc)
255 return container_of(dc, struct tegra_dma_channel, dma_chan);
258 static inline struct tegra_dma_desc *txd_to_tegra_dma_desc(
259 struct dma_async_tx_descriptor *td)
261 return container_of(td, struct tegra_dma_desc, txd);
264 static inline struct device *tdc2dev(struct tegra_dma_channel *tdc)
266 return &tdc->dma_chan.dev->device;
269 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *tx);
270 static int tegra_dma_runtime_suspend(struct device *dev);
271 static int tegra_dma_runtime_resume(struct device *dev);
273 /* Get DMA desc from free list, if not there then allocate it. */
274 static struct tegra_dma_desc *tegra_dma_desc_get(
275 struct tegra_dma_channel *tdc)
277 struct tegra_dma_desc *dma_desc;
280 spin_lock_irqsave(&tdc->lock, flags);
282 /* Do not allocate if desc are waiting for ack */
283 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
284 if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) {
285 list_del(&dma_desc->node);
286 spin_unlock_irqrestore(&tdc->lock, flags);
287 dma_desc->txd.flags = 0;
292 spin_unlock_irqrestore(&tdc->lock, flags);
294 /* Allocate DMA desc */
295 dma_desc = kzalloc(sizeof(*dma_desc), GFP_NOWAIT);
299 dma_async_tx_descriptor_init(&dma_desc->txd, &tdc->dma_chan);
300 dma_desc->txd.tx_submit = tegra_dma_tx_submit;
301 dma_desc->txd.flags = 0;
305 static void tegra_dma_desc_put(struct tegra_dma_channel *tdc,
306 struct tegra_dma_desc *dma_desc)
310 spin_lock_irqsave(&tdc->lock, flags);
311 if (!list_empty(&dma_desc->tx_list))
312 list_splice_init(&dma_desc->tx_list, &tdc->free_sg_req);
313 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
314 spin_unlock_irqrestore(&tdc->lock, flags);
317 static struct tegra_dma_sg_req *tegra_dma_sg_req_get(
318 struct tegra_dma_channel *tdc)
320 struct tegra_dma_sg_req *sg_req = NULL;
323 spin_lock_irqsave(&tdc->lock, flags);
324 if (!list_empty(&tdc->free_sg_req)) {
325 sg_req = list_first_entry(&tdc->free_sg_req,
326 typeof(*sg_req), node);
327 list_del(&sg_req->node);
328 spin_unlock_irqrestore(&tdc->lock, flags);
331 spin_unlock_irqrestore(&tdc->lock, flags);
333 sg_req = kzalloc(sizeof(struct tegra_dma_sg_req), GFP_NOWAIT);
338 static int tegra_dma_slave_config(struct dma_chan *dc,
339 struct dma_slave_config *sconfig)
341 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
343 if (!list_empty(&tdc->pending_sg_req)) {
344 dev_err(tdc2dev(tdc), "Configuration not allowed\n");
348 memcpy(&tdc->dma_sconfig, sconfig, sizeof(*sconfig));
349 if (tdc->slave_id == TEGRA_APBDMA_SLAVE_ID_INVALID &&
350 sconfig->device_fc) {
351 if (sconfig->slave_id > TEGRA_APBDMA_CSR_REQ_SEL_MASK)
353 tdc->slave_id = sconfig->slave_id;
355 tdc->config_init = true;
359 static void tegra_dma_global_pause(struct tegra_dma_channel *tdc,
360 bool wait_for_burst_complete)
362 struct tegra_dma *tdma = tdc->tdma;
364 spin_lock(&tdma->global_lock);
366 if (tdc->tdma->global_pause_count == 0) {
367 tdma_write(tdma, TEGRA_APBDMA_GENERAL, 0);
368 if (wait_for_burst_complete)
369 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
372 tdc->tdma->global_pause_count++;
374 spin_unlock(&tdma->global_lock);
377 static void tegra_dma_global_resume(struct tegra_dma_channel *tdc)
379 struct tegra_dma *tdma = tdc->tdma;
381 spin_lock(&tdma->global_lock);
383 if (WARN_ON(tdc->tdma->global_pause_count == 0))
386 if (--tdc->tdma->global_pause_count == 0)
387 tdma_write(tdma, TEGRA_APBDMA_GENERAL,
388 TEGRA_APBDMA_GENERAL_ENABLE);
391 spin_unlock(&tdma->global_lock);
394 static void tegra_dma_pause(struct tegra_dma_channel *tdc,
395 bool wait_for_burst_complete)
397 struct tegra_dma *tdma = tdc->tdma;
399 if (tdma->chip_data->support_channel_pause) {
400 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE,
401 TEGRA_APBDMA_CHAN_CSRE_PAUSE);
402 if (wait_for_burst_complete)
403 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
405 tegra_dma_global_pause(tdc, wait_for_burst_complete);
409 static void tegra_dma_resume(struct tegra_dma_channel *tdc)
411 struct tegra_dma *tdma = tdc->tdma;
413 if (tdma->chip_data->support_channel_pause) {
414 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSRE, 0);
416 tegra_dma_global_resume(tdc);
420 static void tegra_dma_stop(struct tegra_dma_channel *tdc)
425 /* Disable interrupts */
426 csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
427 csr &= ~TEGRA_APBDMA_CSR_IE_EOC;
428 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
431 csr &= ~TEGRA_APBDMA_CSR_ENB;
432 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, csr);
434 /* Clear interrupt status if it is there */
435 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
436 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
437 dev_dbg(tdc2dev(tdc), "%s():clearing interrupt\n", __func__);
438 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
443 static void tegra_dma_start(struct tegra_dma_channel *tdc,
444 struct tegra_dma_sg_req *sg_req)
446 struct tegra_dma_channel_regs *ch_regs = &sg_req->ch_regs;
448 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR, ch_regs->csr);
449 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_regs->apb_seq);
450 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_regs->apb_ptr);
451 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_regs->ahb_seq);
452 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_regs->ahb_ptr);
453 if (tdc->tdma->chip_data->support_separate_wcount_reg)
454 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT, ch_regs->wcount);
457 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
458 ch_regs->csr | TEGRA_APBDMA_CSR_ENB);
461 static void tegra_dma_configure_for_next(struct tegra_dma_channel *tdc,
462 struct tegra_dma_sg_req *nsg_req)
464 unsigned long status;
467 * The DMA controller reloads the new configuration for next transfer
468 * after last burst of current transfer completes.
469 * If there is no IEC status then this makes sure that last burst
470 * has not be completed. There may be case that last burst is on
471 * flight and so it can complete but because DMA is paused, it
472 * will not generates interrupt as well as not reload the new
474 * If there is already IEC status then interrupt handler need to
475 * load new configuration.
477 tegra_dma_pause(tdc, false);
478 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
481 * If interrupt is pending then do nothing as the ISR will handle
482 * the programing for new request.
484 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
485 dev_err(tdc2dev(tdc),
486 "Skipping new configuration as interrupt is pending\n");
487 tegra_dma_resume(tdc);
491 /* Safe to program new configuration */
492 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, nsg_req->ch_regs.apb_ptr);
493 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, nsg_req->ch_regs.ahb_ptr);
494 if (tdc->tdma->chip_data->support_separate_wcount_reg)
495 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
496 nsg_req->ch_regs.wcount);
497 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
498 nsg_req->ch_regs.csr | TEGRA_APBDMA_CSR_ENB);
499 nsg_req->configured = true;
500 nsg_req->words_xferred = 0;
502 tegra_dma_resume(tdc);
505 static void tdc_start_head_req(struct tegra_dma_channel *tdc)
507 struct tegra_dma_sg_req *sg_req;
509 if (list_empty(&tdc->pending_sg_req))
512 sg_req = list_first_entry(&tdc->pending_sg_req,
513 typeof(*sg_req), node);
514 tegra_dma_start(tdc, sg_req);
515 sg_req->configured = true;
516 sg_req->words_xferred = 0;
520 static void tdc_configure_next_head_desc(struct tegra_dma_channel *tdc)
522 struct tegra_dma_sg_req *hsgreq;
523 struct tegra_dma_sg_req *hnsgreq;
525 if (list_empty(&tdc->pending_sg_req))
528 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
529 if (!list_is_last(&hsgreq->node, &tdc->pending_sg_req)) {
530 hnsgreq = list_first_entry(&hsgreq->node,
531 typeof(*hnsgreq), node);
532 tegra_dma_configure_for_next(tdc, hnsgreq);
536 static inline int get_current_xferred_count(struct tegra_dma_channel *tdc,
537 struct tegra_dma_sg_req *sg_req, unsigned long status)
539 return sg_req->req_len - (status & TEGRA_APBDMA_STATUS_COUNT_MASK) - 4;
542 static void tegra_dma_abort_all(struct tegra_dma_channel *tdc)
544 struct tegra_dma_sg_req *sgreq;
545 struct tegra_dma_desc *dma_desc;
547 while (!list_empty(&tdc->pending_sg_req)) {
548 sgreq = list_first_entry(&tdc->pending_sg_req,
549 typeof(*sgreq), node);
550 list_move_tail(&sgreq->node, &tdc->free_sg_req);
551 if (sgreq->last_sg) {
552 dma_desc = sgreq->dma_desc;
553 dma_desc->dma_status = DMA_ERROR;
554 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
556 /* Add in cb list if it is not there. */
557 if (!dma_desc->cb_count)
558 list_add_tail(&dma_desc->cb_node,
560 dma_desc->cb_count++;
563 tdc->isr_handler = NULL;
566 static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
567 struct tegra_dma_sg_req *last_sg_req, bool to_terminate)
569 struct tegra_dma_sg_req *hsgreq = NULL;
571 if (list_empty(&tdc->pending_sg_req)) {
572 dev_err(tdc2dev(tdc), "DMA is running without req\n");
578 * Check that head req on list should be in flight.
579 * If it is not in flight then abort transfer as
580 * looping of transfer can not continue.
582 hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
583 if (!hsgreq->configured) {
585 dev_err(tdc2dev(tdc), "Error in DMA transfer, aborting DMA\n");
586 tegra_dma_abort_all(tdc);
590 /* Configure next request */
592 tdc_configure_next_head_desc(tdc);
596 static void handle_once_dma_done(struct tegra_dma_channel *tdc,
599 struct tegra_dma_sg_req *sgreq;
600 struct tegra_dma_desc *dma_desc;
603 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
604 dma_desc = sgreq->dma_desc;
605 dma_desc->bytes_transferred += sgreq->req_len;
607 list_del(&sgreq->node);
608 if (sgreq->last_sg) {
609 dma_desc->dma_status = DMA_COMPLETE;
610 dma_cookie_complete(&dma_desc->txd);
611 if (!dma_desc->cb_count)
612 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
613 dma_desc->cb_count++;
614 list_add_tail(&dma_desc->node, &tdc->free_dma_desc);
616 list_add_tail(&sgreq->node, &tdc->free_sg_req);
618 /* Do not start DMA if it is going to be terminate */
619 if (to_terminate || list_empty(&tdc->pending_sg_req))
622 tdc_start_head_req(tdc);
625 static void handle_cont_sngl_cycle_dma_done(struct tegra_dma_channel *tdc,
628 struct tegra_dma_sg_req *sgreq;
629 struct tegra_dma_desc *dma_desc;
632 sgreq = list_first_entry(&tdc->pending_sg_req, typeof(*sgreq), node);
633 dma_desc = sgreq->dma_desc;
634 /* if we dma for long enough the transfer count will wrap */
635 dma_desc->bytes_transferred =
636 (dma_desc->bytes_transferred + sgreq->req_len) %
637 dma_desc->bytes_requested;
639 /* Callback need to be call */
640 if (!dma_desc->cb_count)
641 list_add_tail(&dma_desc->cb_node, &tdc->cb_desc);
642 dma_desc->cb_count++;
644 sgreq->words_xferred = 0;
646 /* If not last req then put at end of pending list */
647 if (!list_is_last(&sgreq->node, &tdc->pending_sg_req)) {
648 list_move_tail(&sgreq->node, &tdc->pending_sg_req);
649 sgreq->configured = false;
650 st = handle_continuous_head_request(tdc, sgreq, to_terminate);
652 dma_desc->dma_status = DMA_ERROR;
656 static void tegra_dma_tasklet(unsigned long data)
658 struct tegra_dma_channel *tdc = (struct tegra_dma_channel *)data;
659 struct dmaengine_desc_callback cb;
660 struct tegra_dma_desc *dma_desc;
664 spin_lock_irqsave(&tdc->lock, flags);
665 while (!list_empty(&tdc->cb_desc)) {
666 dma_desc = list_first_entry(&tdc->cb_desc,
667 typeof(*dma_desc), cb_node);
668 list_del(&dma_desc->cb_node);
669 dmaengine_desc_get_callback(&dma_desc->txd, &cb);
670 cb_count = dma_desc->cb_count;
671 dma_desc->cb_count = 0;
672 trace_tegra_dma_complete_cb(&tdc->dma_chan, cb_count,
674 spin_unlock_irqrestore(&tdc->lock, flags);
676 dmaengine_desc_callback_invoke(&cb, NULL);
677 spin_lock_irqsave(&tdc->lock, flags);
679 spin_unlock_irqrestore(&tdc->lock, flags);
682 static irqreturn_t tegra_dma_isr(int irq, void *dev_id)
684 struct tegra_dma_channel *tdc = dev_id;
685 unsigned long status;
688 spin_lock_irqsave(&tdc->lock, flags);
690 trace_tegra_dma_isr(&tdc->dma_chan, irq);
691 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
692 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
693 tdc_write(tdc, TEGRA_APBDMA_CHAN_STATUS, status);
694 tdc->isr_handler(tdc, false);
695 tasklet_schedule(&tdc->tasklet);
696 spin_unlock_irqrestore(&tdc->lock, flags);
700 spin_unlock_irqrestore(&tdc->lock, flags);
701 dev_info(tdc2dev(tdc),
702 "Interrupt already served status 0x%08lx\n", status);
706 static dma_cookie_t tegra_dma_tx_submit(struct dma_async_tx_descriptor *txd)
708 struct tegra_dma_desc *dma_desc = txd_to_tegra_dma_desc(txd);
709 struct tegra_dma_channel *tdc = to_tegra_dma_chan(txd->chan);
713 spin_lock_irqsave(&tdc->lock, flags);
714 dma_desc->dma_status = DMA_IN_PROGRESS;
715 cookie = dma_cookie_assign(&dma_desc->txd);
716 list_splice_tail_init(&dma_desc->tx_list, &tdc->pending_sg_req);
717 spin_unlock_irqrestore(&tdc->lock, flags);
721 static void tegra_dma_issue_pending(struct dma_chan *dc)
723 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
726 spin_lock_irqsave(&tdc->lock, flags);
727 if (list_empty(&tdc->pending_sg_req)) {
728 dev_err(tdc2dev(tdc), "No DMA request\n");
732 tdc_start_head_req(tdc);
734 /* Continuous single mode: Configure next req */
737 * Wait for 1 burst time for configure DMA for
740 udelay(TEGRA_APBDMA_BURST_COMPLETE_TIME);
741 tdc_configure_next_head_desc(tdc);
745 spin_unlock_irqrestore(&tdc->lock, flags);
748 static int tegra_dma_terminate_all(struct dma_chan *dc)
750 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
751 struct tegra_dma_sg_req *sgreq;
752 struct tegra_dma_desc *dma_desc;
754 unsigned long status;
755 unsigned long wcount;
758 spin_lock_irqsave(&tdc->lock, flags);
763 /* Pause DMA before checking the queue status */
764 tegra_dma_pause(tdc, true);
766 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
767 if (status & TEGRA_APBDMA_STATUS_ISE_EOC) {
768 dev_dbg(tdc2dev(tdc), "%s():handling isr\n", __func__);
769 tdc->isr_handler(tdc, true);
770 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
772 if (tdc->tdma->chip_data->support_separate_wcount_reg)
773 wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
777 was_busy = tdc->busy;
780 if (!list_empty(&tdc->pending_sg_req) && was_busy) {
781 sgreq = list_first_entry(&tdc->pending_sg_req,
782 typeof(*sgreq), node);
783 sgreq->dma_desc->bytes_transferred +=
784 get_current_xferred_count(tdc, sgreq, wcount);
786 tegra_dma_resume(tdc);
789 tegra_dma_abort_all(tdc);
791 while (!list_empty(&tdc->cb_desc)) {
792 dma_desc = list_first_entry(&tdc->cb_desc,
793 typeof(*dma_desc), cb_node);
794 list_del(&dma_desc->cb_node);
795 dma_desc->cb_count = 0;
797 spin_unlock_irqrestore(&tdc->lock, flags);
801 static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,
802 struct tegra_dma_sg_req *sg_req)
804 unsigned long status, wcount = 0;
806 if (!list_is_first(&sg_req->node, &tdc->pending_sg_req))
809 if (tdc->tdma->chip_data->support_separate_wcount_reg)
810 wcount = tdc_read(tdc, TEGRA_APBDMA_CHAN_WORD_TRANSFER);
812 status = tdc_read(tdc, TEGRA_APBDMA_CHAN_STATUS);
814 if (!tdc->tdma->chip_data->support_separate_wcount_reg)
817 if (status & TEGRA_APBDMA_STATUS_ISE_EOC)
818 return sg_req->req_len;
820 wcount = get_current_xferred_count(tdc, sg_req, wcount);
824 * If wcount wasn't ever polled for this SG before, then
825 * simply assume that transfer hasn't started yet.
827 * Otherwise it's the end of the transfer.
829 * The alternative would be to poll the status register
830 * until EOC bit is set or wcount goes UP. That's so
831 * because EOC bit is getting set only after the last
832 * burst's completion and counter is less than the actual
833 * transfer size by 4 bytes. The counter value wraps around
834 * in a cyclic mode before EOC is set(!), so we can't easily
835 * distinguish start of transfer from its end.
837 if (sg_req->words_xferred)
838 wcount = sg_req->req_len - 4;
840 } else if (wcount < sg_req->words_xferred) {
842 * This case will never happen for a non-cyclic transfer.
844 * For a cyclic transfer, although it is possible for the
845 * next transfer to have already started (resetting the word
846 * count), this case should still not happen because we should
847 * have detected that the EOC bit is set and hence the transfer
852 wcount = sg_req->req_len - 4;
854 sg_req->words_xferred = wcount;
860 static enum dma_status tegra_dma_tx_status(struct dma_chan *dc,
861 dma_cookie_t cookie, struct dma_tx_state *txstate)
863 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
864 struct tegra_dma_desc *dma_desc;
865 struct tegra_dma_sg_req *sg_req;
868 unsigned int residual;
869 unsigned int bytes = 0;
871 ret = dma_cookie_status(dc, cookie, txstate);
872 if (ret == DMA_COMPLETE)
875 spin_lock_irqsave(&tdc->lock, flags);
877 /* Check on wait_ack desc status */
878 list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
879 if (dma_desc->txd.cookie == cookie) {
880 ret = dma_desc->dma_status;
885 /* Check in pending list */
886 list_for_each_entry(sg_req, &tdc->pending_sg_req, node) {
887 dma_desc = sg_req->dma_desc;
888 if (dma_desc->txd.cookie == cookie) {
889 bytes = tegra_dma_sg_bytes_xferred(tdc, sg_req);
890 ret = dma_desc->dma_status;
895 dev_dbg(tdc2dev(tdc), "cookie %d not found\n", cookie);
899 if (dma_desc && txstate) {
900 residual = dma_desc->bytes_requested -
901 ((dma_desc->bytes_transferred + bytes) %
902 dma_desc->bytes_requested);
903 dma_set_residue(txstate, residual);
906 trace_tegra_dma_tx_status(&tdc->dma_chan, cookie, txstate);
907 spin_unlock_irqrestore(&tdc->lock, flags);
911 static inline int get_bus_width(struct tegra_dma_channel *tdc,
912 enum dma_slave_buswidth slave_bw)
915 case DMA_SLAVE_BUSWIDTH_1_BYTE:
916 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_8;
917 case DMA_SLAVE_BUSWIDTH_2_BYTES:
918 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_16;
919 case DMA_SLAVE_BUSWIDTH_4_BYTES:
920 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
921 case DMA_SLAVE_BUSWIDTH_8_BYTES:
922 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_64;
924 dev_warn(tdc2dev(tdc),
925 "slave bw is not supported, using 32bits\n");
926 return TEGRA_APBDMA_APBSEQ_BUS_WIDTH_32;
930 static inline int get_burst_size(struct tegra_dma_channel *tdc,
931 u32 burst_size, enum dma_slave_buswidth slave_bw, int len)
937 * burst_size from client is in terms of the bus_width.
938 * convert them into AHB memory width which is 4 byte.
940 burst_byte = burst_size * slave_bw;
941 burst_ahb_width = burst_byte / 4;
943 /* If burst size is 0 then calculate the burst size based on length */
944 if (!burst_ahb_width) {
946 return TEGRA_APBDMA_AHBSEQ_BURST_1;
947 else if ((len >> 4) & 0x1)
948 return TEGRA_APBDMA_AHBSEQ_BURST_4;
950 return TEGRA_APBDMA_AHBSEQ_BURST_8;
952 if (burst_ahb_width < 4)
953 return TEGRA_APBDMA_AHBSEQ_BURST_1;
954 else if (burst_ahb_width < 8)
955 return TEGRA_APBDMA_AHBSEQ_BURST_4;
957 return TEGRA_APBDMA_AHBSEQ_BURST_8;
960 static int get_transfer_param(struct tegra_dma_channel *tdc,
961 enum dma_transfer_direction direction, unsigned long *apb_addr,
962 unsigned long *apb_seq, unsigned long *csr, unsigned int *burst_size,
963 enum dma_slave_buswidth *slave_bw)
967 *apb_addr = tdc->dma_sconfig.dst_addr;
968 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.dst_addr_width);
969 *burst_size = tdc->dma_sconfig.dst_maxburst;
970 *slave_bw = tdc->dma_sconfig.dst_addr_width;
971 *csr = TEGRA_APBDMA_CSR_DIR;
975 *apb_addr = tdc->dma_sconfig.src_addr;
976 *apb_seq = get_bus_width(tdc, tdc->dma_sconfig.src_addr_width);
977 *burst_size = tdc->dma_sconfig.src_maxburst;
978 *slave_bw = tdc->dma_sconfig.src_addr_width;
983 dev_err(tdc2dev(tdc), "DMA direction is not supported\n");
989 static void tegra_dma_prep_wcount(struct tegra_dma_channel *tdc,
990 struct tegra_dma_channel_regs *ch_regs, u32 len)
992 u32 len_field = (len - 4) & 0xFFFC;
994 if (tdc->tdma->chip_data->support_separate_wcount_reg)
995 ch_regs->wcount = len_field;
997 ch_regs->csr |= len_field;
1000 static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
1001 struct dma_chan *dc, struct scatterlist *sgl, unsigned int sg_len,
1002 enum dma_transfer_direction direction, unsigned long flags,
1005 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1006 struct tegra_dma_desc *dma_desc;
1008 struct scatterlist *sg;
1009 unsigned long csr, ahb_seq, apb_ptr, apb_seq;
1010 struct list_head req_list;
1011 struct tegra_dma_sg_req *sg_req = NULL;
1013 enum dma_slave_buswidth slave_bw;
1015 if (!tdc->config_init) {
1016 dev_err(tdc2dev(tdc), "DMA channel is not configured\n");
1020 dev_err(tdc2dev(tdc), "Invalid segment length %d\n", sg_len);
1024 if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
1025 &burst_size, &slave_bw) < 0)
1028 INIT_LIST_HEAD(&req_list);
1030 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
1031 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
1032 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
1033 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
1035 csr |= TEGRA_APBDMA_CSR_ONCE;
1037 if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
1038 csr |= TEGRA_APBDMA_CSR_FLOW;
1039 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1042 if (flags & DMA_PREP_INTERRUPT) {
1043 csr |= TEGRA_APBDMA_CSR_IE_EOC;
1049 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
1051 dma_desc = tegra_dma_desc_get(tdc);
1053 dev_err(tdc2dev(tdc), "DMA descriptors not available\n");
1056 INIT_LIST_HEAD(&dma_desc->tx_list);
1057 INIT_LIST_HEAD(&dma_desc->cb_node);
1058 dma_desc->cb_count = 0;
1059 dma_desc->bytes_requested = 0;
1060 dma_desc->bytes_transferred = 0;
1061 dma_desc->dma_status = DMA_IN_PROGRESS;
1063 /* Make transfer requests */
1064 for_each_sg(sgl, sg, sg_len, i) {
1067 mem = sg_dma_address(sg);
1068 len = sg_dma_len(sg);
1070 if ((len & 3) || (mem & 3) ||
1071 (len > tdc->tdma->chip_data->max_dma_count)) {
1072 dev_err(tdc2dev(tdc),
1073 "DMA length/memory address is not supported\n");
1074 tegra_dma_desc_put(tdc, dma_desc);
1078 sg_req = tegra_dma_sg_req_get(tdc);
1080 dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
1081 tegra_dma_desc_put(tdc, dma_desc);
1085 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1086 dma_desc->bytes_requested += len;
1088 sg_req->ch_regs.apb_ptr = apb_ptr;
1089 sg_req->ch_regs.ahb_ptr = mem;
1090 sg_req->ch_regs.csr = csr;
1091 tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
1092 sg_req->ch_regs.apb_seq = apb_seq;
1093 sg_req->ch_regs.ahb_seq = ahb_seq;
1094 sg_req->configured = false;
1095 sg_req->last_sg = false;
1096 sg_req->dma_desc = dma_desc;
1097 sg_req->req_len = len;
1099 list_add_tail(&sg_req->node, &dma_desc->tx_list);
1101 sg_req->last_sg = true;
1102 if (flags & DMA_CTRL_ACK)
1103 dma_desc->txd.flags = DMA_CTRL_ACK;
1106 * Make sure that mode should not be conflicting with currently
1109 if (!tdc->isr_handler) {
1110 tdc->isr_handler = handle_once_dma_done;
1111 tdc->cyclic = false;
1114 dev_err(tdc2dev(tdc), "DMA configured in cyclic mode\n");
1115 tegra_dma_desc_put(tdc, dma_desc);
1120 return &dma_desc->txd;
1123 static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
1124 struct dma_chan *dc, dma_addr_t buf_addr, size_t buf_len,
1125 size_t period_len, enum dma_transfer_direction direction,
1126 unsigned long flags)
1128 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1129 struct tegra_dma_desc *dma_desc = NULL;
1130 struct tegra_dma_sg_req *sg_req = NULL;
1131 unsigned long csr, ahb_seq, apb_ptr, apb_seq;
1134 dma_addr_t mem = buf_addr;
1136 enum dma_slave_buswidth slave_bw;
1138 if (!buf_len || !period_len) {
1139 dev_err(tdc2dev(tdc), "Invalid buffer/period len\n");
1143 if (!tdc->config_init) {
1144 dev_err(tdc2dev(tdc), "DMA slave is not configured\n");
1149 * We allow to take more number of requests till DMA is
1150 * not started. The driver will loop over all requests.
1151 * Once DMA is started then new requests can be queued only after
1152 * terminating the DMA.
1155 dev_err(tdc2dev(tdc), "Request not allowed when DMA running\n");
1160 * We only support cycle transfer when buf_len is multiple of
1163 if (buf_len % period_len) {
1164 dev_err(tdc2dev(tdc), "buf_len is not multiple of period_len\n");
1169 if ((len & 3) || (buf_addr & 3) ||
1170 (len > tdc->tdma->chip_data->max_dma_count)) {
1171 dev_err(tdc2dev(tdc), "Req len/mem address is not correct\n");
1175 if (get_transfer_param(tdc, direction, &apb_ptr, &apb_seq, &csr,
1176 &burst_size, &slave_bw) < 0)
1179 ahb_seq = TEGRA_APBDMA_AHBSEQ_INTR_ENB;
1180 ahb_seq |= TEGRA_APBDMA_AHBSEQ_WRAP_NONE <<
1181 TEGRA_APBDMA_AHBSEQ_WRAP_SHIFT;
1182 ahb_seq |= TEGRA_APBDMA_AHBSEQ_BUS_WIDTH_32;
1184 if (tdc->slave_id != TEGRA_APBDMA_SLAVE_ID_INVALID) {
1185 csr |= TEGRA_APBDMA_CSR_FLOW;
1186 csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
1189 if (flags & DMA_PREP_INTERRUPT) {
1190 csr |= TEGRA_APBDMA_CSR_IE_EOC;
1196 apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
1198 dma_desc = tegra_dma_desc_get(tdc);
1200 dev_err(tdc2dev(tdc), "not enough descriptors available\n");
1204 INIT_LIST_HEAD(&dma_desc->tx_list);
1205 INIT_LIST_HEAD(&dma_desc->cb_node);
1206 dma_desc->cb_count = 0;
1208 dma_desc->bytes_transferred = 0;
1209 dma_desc->bytes_requested = buf_len;
1210 remain_len = buf_len;
1212 /* Split transfer equal to period size */
1213 while (remain_len) {
1214 sg_req = tegra_dma_sg_req_get(tdc);
1216 dev_err(tdc2dev(tdc), "DMA sg-req not available\n");
1217 tegra_dma_desc_put(tdc, dma_desc);
1221 ahb_seq |= get_burst_size(tdc, burst_size, slave_bw, len);
1222 sg_req->ch_regs.apb_ptr = apb_ptr;
1223 sg_req->ch_regs.ahb_ptr = mem;
1224 sg_req->ch_regs.csr = csr;
1225 tegra_dma_prep_wcount(tdc, &sg_req->ch_regs, len);
1226 sg_req->ch_regs.apb_seq = apb_seq;
1227 sg_req->ch_regs.ahb_seq = ahb_seq;
1228 sg_req->configured = false;
1229 sg_req->last_sg = false;
1230 sg_req->dma_desc = dma_desc;
1231 sg_req->req_len = len;
1233 list_add_tail(&sg_req->node, &dma_desc->tx_list);
1237 sg_req->last_sg = true;
1238 if (flags & DMA_CTRL_ACK)
1239 dma_desc->txd.flags = DMA_CTRL_ACK;
1242 * Make sure that mode should not be conflicting with currently
1245 if (!tdc->isr_handler) {
1246 tdc->isr_handler = handle_cont_sngl_cycle_dma_done;
1250 dev_err(tdc2dev(tdc), "DMA configuration conflict\n");
1251 tegra_dma_desc_put(tdc, dma_desc);
1256 return &dma_desc->txd;
1259 static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
1261 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1262 struct tegra_dma *tdma = tdc->tdma;
1265 dma_cookie_init(&tdc->dma_chan);
1266 tdc->config_init = false;
1268 ret = pm_runtime_get_sync(tdma->dev);
1275 static void tegra_dma_free_chan_resources(struct dma_chan *dc)
1277 struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
1278 struct tegra_dma *tdma = tdc->tdma;
1279 struct tegra_dma_desc *dma_desc;
1280 struct tegra_dma_sg_req *sg_req;
1281 struct list_head dma_desc_list;
1282 struct list_head sg_req_list;
1283 unsigned long flags;
1285 INIT_LIST_HEAD(&dma_desc_list);
1286 INIT_LIST_HEAD(&sg_req_list);
1288 dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
1290 tegra_dma_terminate_all(dc);
1292 spin_lock_irqsave(&tdc->lock, flags);
1293 list_splice_init(&tdc->pending_sg_req, &sg_req_list);
1294 list_splice_init(&tdc->free_sg_req, &sg_req_list);
1295 list_splice_init(&tdc->free_dma_desc, &dma_desc_list);
1296 INIT_LIST_HEAD(&tdc->cb_desc);
1297 tdc->config_init = false;
1298 tdc->isr_handler = NULL;
1299 spin_unlock_irqrestore(&tdc->lock, flags);
1301 while (!list_empty(&dma_desc_list)) {
1302 dma_desc = list_first_entry(&dma_desc_list,
1303 typeof(*dma_desc), node);
1304 list_del(&dma_desc->node);
1308 while (!list_empty(&sg_req_list)) {
1309 sg_req = list_first_entry(&sg_req_list, typeof(*sg_req), node);
1310 list_del(&sg_req->node);
1313 pm_runtime_put(tdma->dev);
1315 tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
1318 static struct dma_chan *tegra_dma_of_xlate(struct of_phandle_args *dma_spec,
1319 struct of_dma *ofdma)
1321 struct tegra_dma *tdma = ofdma->of_dma_data;
1322 struct dma_chan *chan;
1323 struct tegra_dma_channel *tdc;
1325 if (dma_spec->args[0] > TEGRA_APBDMA_CSR_REQ_SEL_MASK) {
1326 dev_err(tdma->dev, "Invalid slave id: %d\n", dma_spec->args[0]);
1330 chan = dma_get_any_slave_channel(&tdma->dma_dev);
1334 tdc = to_tegra_dma_chan(chan);
1335 tdc->slave_id = dma_spec->args[0];
1340 /* Tegra20 specific DMA controller information */
1341 static const struct tegra_dma_chip_data tegra20_dma_chip_data = {
1343 .channel_reg_size = 0x20,
1344 .max_dma_count = 1024UL * 64,
1345 .support_channel_pause = false,
1346 .support_separate_wcount_reg = false,
1349 /* Tegra30 specific DMA controller information */
1350 static const struct tegra_dma_chip_data tegra30_dma_chip_data = {
1352 .channel_reg_size = 0x20,
1353 .max_dma_count = 1024UL * 64,
1354 .support_channel_pause = false,
1355 .support_separate_wcount_reg = false,
1358 /* Tegra114 specific DMA controller information */
1359 static const struct tegra_dma_chip_data tegra114_dma_chip_data = {
1361 .channel_reg_size = 0x20,
1362 .max_dma_count = 1024UL * 64,
1363 .support_channel_pause = true,
1364 .support_separate_wcount_reg = false,
1367 /* Tegra148 specific DMA controller information */
1368 static const struct tegra_dma_chip_data tegra148_dma_chip_data = {
1370 .channel_reg_size = 0x40,
1371 .max_dma_count = 1024UL * 64,
1372 .support_channel_pause = true,
1373 .support_separate_wcount_reg = true,
1376 static int tegra_dma_probe(struct platform_device *pdev)
1378 struct resource *res;
1379 struct tegra_dma *tdma;
1382 const struct tegra_dma_chip_data *cdata;
1384 cdata = of_device_get_match_data(&pdev->dev);
1386 dev_err(&pdev->dev, "Error: No device match data found\n");
1390 tdma = devm_kzalloc(&pdev->dev,
1391 struct_size(tdma, channels, cdata->nr_channels),
1396 tdma->dev = &pdev->dev;
1397 tdma->chip_data = cdata;
1398 platform_set_drvdata(pdev, tdma);
1400 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1401 tdma->base_addr = devm_ioremap_resource(&pdev->dev, res);
1402 if (IS_ERR(tdma->base_addr))
1403 return PTR_ERR(tdma->base_addr);
1405 tdma->dma_clk = devm_clk_get(&pdev->dev, NULL);
1406 if (IS_ERR(tdma->dma_clk)) {
1407 dev_err(&pdev->dev, "Error: Missing controller clock\n");
1408 return PTR_ERR(tdma->dma_clk);
1411 tdma->rst = devm_reset_control_get(&pdev->dev, "dma");
1412 if (IS_ERR(tdma->rst)) {
1413 dev_err(&pdev->dev, "Error: Missing reset\n");
1414 return PTR_ERR(tdma->rst);
1417 spin_lock_init(&tdma->global_lock);
1419 pm_runtime_enable(&pdev->dev);
1420 if (!pm_runtime_enabled(&pdev->dev))
1421 ret = tegra_dma_runtime_resume(&pdev->dev);
1423 ret = pm_runtime_get_sync(&pdev->dev);
1426 pm_runtime_disable(&pdev->dev);
1430 /* Reset DMA controller */
1431 reset_control_assert(tdma->rst);
1433 reset_control_deassert(tdma->rst);
1435 /* Enable global DMA registers */
1436 tdma_write(tdma, TEGRA_APBDMA_GENERAL, TEGRA_APBDMA_GENERAL_ENABLE);
1437 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1438 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
1440 pm_runtime_put(&pdev->dev);
1442 INIT_LIST_HEAD(&tdma->dma_dev.channels);
1443 for (i = 0; i < cdata->nr_channels; i++) {
1444 struct tegra_dma_channel *tdc = &tdma->channels[i];
1446 tdc->chan_addr = tdma->base_addr +
1447 TEGRA_APBDMA_CHANNEL_BASE_ADD_OFFSET +
1448 (i * cdata->channel_reg_size);
1450 res = platform_get_resource(pdev, IORESOURCE_IRQ, i);
1453 dev_err(&pdev->dev, "No irq resource for chan %d\n", i);
1456 tdc->irq = res->start;
1457 snprintf(tdc->name, sizeof(tdc->name), "apbdma.%d", i);
1458 ret = request_irq(tdc->irq, tegra_dma_isr, 0, tdc->name, tdc);
1461 "request_irq failed with err %d channel %d\n",
1466 tdc->dma_chan.device = &tdma->dma_dev;
1467 dma_cookie_init(&tdc->dma_chan);
1468 list_add_tail(&tdc->dma_chan.device_node,
1469 &tdma->dma_dev.channels);
1472 tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
1474 tasklet_init(&tdc->tasklet, tegra_dma_tasklet,
1475 (unsigned long)tdc);
1476 spin_lock_init(&tdc->lock);
1478 INIT_LIST_HEAD(&tdc->pending_sg_req);
1479 INIT_LIST_HEAD(&tdc->free_sg_req);
1480 INIT_LIST_HEAD(&tdc->free_dma_desc);
1481 INIT_LIST_HEAD(&tdc->cb_desc);
1484 dma_cap_set(DMA_SLAVE, tdma->dma_dev.cap_mask);
1485 dma_cap_set(DMA_PRIVATE, tdma->dma_dev.cap_mask);
1486 dma_cap_set(DMA_CYCLIC, tdma->dma_dev.cap_mask);
1488 tdma->global_pause_count = 0;
1489 tdma->dma_dev.dev = &pdev->dev;
1490 tdma->dma_dev.device_alloc_chan_resources =
1491 tegra_dma_alloc_chan_resources;
1492 tdma->dma_dev.device_free_chan_resources =
1493 tegra_dma_free_chan_resources;
1494 tdma->dma_dev.device_prep_slave_sg = tegra_dma_prep_slave_sg;
1495 tdma->dma_dev.device_prep_dma_cyclic = tegra_dma_prep_dma_cyclic;
1496 tdma->dma_dev.src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1497 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1498 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1499 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1500 tdma->dma_dev.dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1501 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1502 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |
1503 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES);
1504 tdma->dma_dev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1505 tdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1506 tdma->dma_dev.device_config = tegra_dma_slave_config;
1507 tdma->dma_dev.device_terminate_all = tegra_dma_terminate_all;
1508 tdma->dma_dev.device_tx_status = tegra_dma_tx_status;
1509 tdma->dma_dev.device_issue_pending = tegra_dma_issue_pending;
1511 ret = dma_async_device_register(&tdma->dma_dev);
1514 "Tegra20 APB DMA driver registration failed %d\n", ret);
1518 ret = of_dma_controller_register(pdev->dev.of_node,
1519 tegra_dma_of_xlate, tdma);
1522 "Tegra20 APB DMA OF registration failed %d\n", ret);
1523 goto err_unregister_dma_dev;
1526 dev_info(&pdev->dev, "Tegra20 APB DMA driver register %d channels\n",
1527 cdata->nr_channels);
1530 err_unregister_dma_dev:
1531 dma_async_device_unregister(&tdma->dma_dev);
1534 struct tegra_dma_channel *tdc = &tdma->channels[i];
1536 free_irq(tdc->irq, tdc);
1537 tasklet_kill(&tdc->tasklet);
1540 pm_runtime_disable(&pdev->dev);
1541 if (!pm_runtime_status_suspended(&pdev->dev))
1542 tegra_dma_runtime_suspend(&pdev->dev);
1546 static int tegra_dma_remove(struct platform_device *pdev)
1548 struct tegra_dma *tdma = platform_get_drvdata(pdev);
1550 struct tegra_dma_channel *tdc;
1552 dma_async_device_unregister(&tdma->dma_dev);
1554 for (i = 0; i < tdma->chip_data->nr_channels; ++i) {
1555 tdc = &tdma->channels[i];
1556 free_irq(tdc->irq, tdc);
1557 tasklet_kill(&tdc->tasklet);
1560 pm_runtime_disable(&pdev->dev);
1561 if (!pm_runtime_status_suspended(&pdev->dev))
1562 tegra_dma_runtime_suspend(&pdev->dev);
1567 static int tegra_dma_runtime_suspend(struct device *dev)
1569 struct tegra_dma *tdma = dev_get_drvdata(dev);
1572 tdma->reg_gen = tdma_read(tdma, TEGRA_APBDMA_GENERAL);
1573 for (i = 0; i < tdma->chip_data->nr_channels; i++) {
1574 struct tegra_dma_channel *tdc = &tdma->channels[i];
1575 struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
1577 /* Only save the state of DMA channels that are in use */
1578 if (!tdc->config_init)
1581 ch_reg->csr = tdc_read(tdc, TEGRA_APBDMA_CHAN_CSR);
1582 ch_reg->ahb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBPTR);
1583 ch_reg->apb_ptr = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBPTR);
1584 ch_reg->ahb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_AHBSEQ);
1585 ch_reg->apb_seq = tdc_read(tdc, TEGRA_APBDMA_CHAN_APBSEQ);
1586 if (tdma->chip_data->support_separate_wcount_reg)
1587 ch_reg->wcount = tdc_read(tdc,
1588 TEGRA_APBDMA_CHAN_WCOUNT);
1591 clk_disable_unprepare(tdma->dma_clk);
1596 static int tegra_dma_runtime_resume(struct device *dev)
1598 struct tegra_dma *tdma = dev_get_drvdata(dev);
1601 ret = clk_prepare_enable(tdma->dma_clk);
1603 dev_err(dev, "clk_enable failed: %d\n", ret);
1607 tdma_write(tdma, TEGRA_APBDMA_GENERAL, tdma->reg_gen);
1608 tdma_write(tdma, TEGRA_APBDMA_CONTROL, 0);
1609 tdma_write(tdma, TEGRA_APBDMA_IRQ_MASK_SET, 0xFFFFFFFFul);
1611 for (i = 0; i < tdma->chip_data->nr_channels; i++) {
1612 struct tegra_dma_channel *tdc = &tdma->channels[i];
1613 struct tegra_dma_channel_regs *ch_reg = &tdc->channel_reg;
1615 /* Only restore the state of DMA channels that are in use */
1616 if (!tdc->config_init)
1619 if (tdma->chip_data->support_separate_wcount_reg)
1620 tdc_write(tdc, TEGRA_APBDMA_CHAN_WCOUNT,
1622 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBSEQ, ch_reg->apb_seq);
1623 tdc_write(tdc, TEGRA_APBDMA_CHAN_APBPTR, ch_reg->apb_ptr);
1624 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBSEQ, ch_reg->ahb_seq);
1625 tdc_write(tdc, TEGRA_APBDMA_CHAN_AHBPTR, ch_reg->ahb_ptr);
1626 tdc_write(tdc, TEGRA_APBDMA_CHAN_CSR,
1627 (ch_reg->csr & ~TEGRA_APBDMA_CSR_ENB));
1633 static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
1634 SET_RUNTIME_PM_OPS(tegra_dma_runtime_suspend, tegra_dma_runtime_resume,
1636 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
1637 pm_runtime_force_resume)
1640 static const struct of_device_id tegra_dma_of_match[] = {
1642 .compatible = "nvidia,tegra148-apbdma",
1643 .data = &tegra148_dma_chip_data,
1645 .compatible = "nvidia,tegra114-apbdma",
1646 .data = &tegra114_dma_chip_data,
1648 .compatible = "nvidia,tegra30-apbdma",
1649 .data = &tegra30_dma_chip_data,
1651 .compatible = "nvidia,tegra20-apbdma",
1652 .data = &tegra20_dma_chip_data,
1656 MODULE_DEVICE_TABLE(of, tegra_dma_of_match);
1658 static struct platform_driver tegra_dmac_driver = {
1660 .name = "tegra-apbdma",
1661 .pm = &tegra_dma_dev_pm_ops,
1662 .of_match_table = tegra_dma_of_match,
1664 .probe = tegra_dma_probe,
1665 .remove = tegra_dma_remove,
1668 module_platform_driver(tegra_dmac_driver);
1670 MODULE_ALIAS("platform:tegra20-apbdma");
1671 MODULE_DESCRIPTION("NVIDIA Tegra APB DMA Controller driver");
1672 MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
1673 MODULE_LICENSE("GPL v2");