1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
4 * Synopsys DesignWare eDMA core driver
6 * Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
9 #include <linux/module.h>
10 #include <linux/device.h>
11 #include <linux/kernel.h>
12 #include <linux/pm_runtime.h>
13 #include <linux/dmaengine.h>
14 #include <linux/err.h>
15 #include <linux/interrupt.h>
16 #include <linux/irq.h>
17 #include <linux/dma/edma.h>
18 #include <linux/dma-mapping.h>
20 #include "dw-edma-core.h"
21 #include "dw-edma-v0-core.h"
22 #include "../dmaengine.h"
23 #include "../virt-dma.h"
26 struct device *dchan2dev(struct dma_chan *dchan)
28 return &dchan->dev->device;
32 struct device *chan2dev(struct dw_edma_chan *chan)
34 return &chan->vc.chan.dev->device;
38 struct dw_edma_desc *vd2dw_edma_desc(struct virt_dma_desc *vd)
40 return container_of(vd, struct dw_edma_desc, vd);
43 static struct dw_edma_burst *dw_edma_alloc_burst(struct dw_edma_chunk *chunk)
45 struct dw_edma_burst *burst;
47 burst = kzalloc(sizeof(*burst), GFP_NOWAIT);
51 INIT_LIST_HEAD(&burst->list);
53 /* Create and add new element into the linked list */
54 chunk->bursts_alloc++;
55 list_add_tail(&burst->list, &chunk->burst->list);
58 chunk->bursts_alloc = 0;
65 static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
67 struct dw_edma_chan *chan = desc->chan;
68 struct dw_edma *dw = chan->chip->dw;
69 struct dw_edma_chunk *chunk;
71 chunk = kzalloc(sizeof(*chunk), GFP_NOWAIT);
75 INIT_LIST_HEAD(&chunk->list);
77 /* Toggling change bit (CB) in each chunk, this is a mechanism to
78 * inform the eDMA HW block that this is a new linked list ready
80 * - Odd chunks originate CB equal to 0
81 * - Even chunks originate CB equal to 1
83 chunk->cb = !(desc->chunks_alloc % 2);
84 if (chan->dir == EDMA_DIR_WRITE) {
85 chunk->ll_region.paddr = dw->ll_region_wr[chan->id].paddr;
86 chunk->ll_region.vaddr = dw->ll_region_wr[chan->id].vaddr;
88 chunk->ll_region.paddr = dw->ll_region_rd[chan->id].paddr;
89 chunk->ll_region.vaddr = dw->ll_region_rd[chan->id].vaddr;
93 /* Create and add new element into the linked list */
94 if (!dw_edma_alloc_burst(chunk)) {
99 list_add_tail(&chunk->list, &desc->chunk->list);
103 desc->chunks_alloc = 0;
110 static struct dw_edma_desc *dw_edma_alloc_desc(struct dw_edma_chan *chan)
112 struct dw_edma_desc *desc;
114 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
119 if (!dw_edma_alloc_chunk(desc)) {
127 static void dw_edma_free_burst(struct dw_edma_chunk *chunk)
129 struct dw_edma_burst *child, *_next;
131 /* Remove all the list elements */
132 list_for_each_entry_safe(child, _next, &chunk->burst->list, list) {
133 list_del(&child->list);
135 chunk->bursts_alloc--;
138 /* Remove the list head */
143 static void dw_edma_free_chunk(struct dw_edma_desc *desc)
145 struct dw_edma_chunk *child, *_next;
150 /* Remove all the list elements */
151 list_for_each_entry_safe(child, _next, &desc->chunk->list, list) {
152 dw_edma_free_burst(child);
153 list_del(&child->list);
155 desc->chunks_alloc--;
158 /* Remove the list head */
163 static void dw_edma_free_desc(struct dw_edma_desc *desc)
165 dw_edma_free_chunk(desc);
169 static void vchan_free_desc(struct virt_dma_desc *vdesc)
171 dw_edma_free_desc(vd2dw_edma_desc(vdesc));
174 static void dw_edma_start_transfer(struct dw_edma_chan *chan)
176 struct dw_edma_chunk *child;
177 struct dw_edma_desc *desc;
178 struct virt_dma_desc *vd;
180 vd = vchan_next_desc(&chan->vc);
184 desc = vd2dw_edma_desc(vd);
188 child = list_first_entry_or_null(&desc->chunk->list,
189 struct dw_edma_chunk, list);
193 dw_edma_v0_core_start(child, !desc->xfer_sz);
194 desc->xfer_sz += child->ll_region.sz;
195 dw_edma_free_burst(child);
196 list_del(&child->list);
198 desc->chunks_alloc--;
201 static int dw_edma_device_config(struct dma_chan *dchan,
202 struct dma_slave_config *config)
204 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
206 memcpy(&chan->config, config, sizeof(*config));
207 chan->configured = true;
212 static int dw_edma_device_pause(struct dma_chan *dchan)
214 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
217 if (!chan->configured)
219 else if (chan->status != EDMA_ST_BUSY)
221 else if (chan->request != EDMA_REQ_NONE)
224 chan->request = EDMA_REQ_PAUSE;
229 static int dw_edma_device_resume(struct dma_chan *dchan)
231 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
234 if (!chan->configured) {
236 } else if (chan->status != EDMA_ST_PAUSE) {
238 } else if (chan->request != EDMA_REQ_NONE) {
241 chan->status = EDMA_ST_BUSY;
242 dw_edma_start_transfer(chan);
248 static int dw_edma_device_terminate_all(struct dma_chan *dchan)
250 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
253 if (!chan->configured) {
255 } else if (chan->status == EDMA_ST_PAUSE) {
256 chan->status = EDMA_ST_IDLE;
257 chan->configured = false;
258 } else if (chan->status == EDMA_ST_IDLE) {
259 chan->configured = false;
260 } else if (dw_edma_v0_core_ch_status(chan) == DMA_COMPLETE) {
262 * The channel is in a false BUSY state, probably didn't
263 * receive or lost an interrupt
265 chan->status = EDMA_ST_IDLE;
266 chan->configured = false;
267 } else if (chan->request > EDMA_REQ_PAUSE) {
270 chan->request = EDMA_REQ_STOP;
276 static void dw_edma_device_issue_pending(struct dma_chan *dchan)
278 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
281 spin_lock_irqsave(&chan->vc.lock, flags);
282 if (chan->configured && chan->request == EDMA_REQ_NONE &&
283 chan->status == EDMA_ST_IDLE && vchan_issue_pending(&chan->vc)) {
284 chan->status = EDMA_ST_BUSY;
285 dw_edma_start_transfer(chan);
287 spin_unlock_irqrestore(&chan->vc.lock, flags);
290 static enum dma_status
291 dw_edma_device_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
292 struct dma_tx_state *txstate)
294 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
295 struct dw_edma_desc *desc;
296 struct virt_dma_desc *vd;
301 ret = dma_cookie_status(dchan, cookie, txstate);
302 if (ret == DMA_COMPLETE)
305 if (ret == DMA_IN_PROGRESS && chan->status == EDMA_ST_PAUSE)
311 spin_lock_irqsave(&chan->vc.lock, flags);
312 vd = vchan_find_desc(&chan->vc, cookie);
314 desc = vd2dw_edma_desc(vd);
316 residue = desc->alloc_sz - desc->xfer_sz;
318 spin_unlock_irqrestore(&chan->vc.lock, flags);
321 dma_set_residue(txstate, residue);
326 static struct dma_async_tx_descriptor *
327 dw_edma_device_transfer(struct dw_edma_transfer *xfer)
329 struct dw_edma_chan *chan = dchan2dw_edma_chan(xfer->dchan);
330 enum dma_transfer_direction dir = xfer->direction;
331 phys_addr_t src_addr, dst_addr;
332 struct scatterlist *sg = NULL;
333 struct dw_edma_chunk *chunk;
334 struct dw_edma_burst *burst;
335 struct dw_edma_desc *desc;
339 if (!chan->configured)
342 switch (chan->config.direction) {
343 case DMA_DEV_TO_MEM: /* local DMA */
344 if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_READ)
347 case DMA_MEM_TO_DEV: /* local DMA */
348 if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_WRITE)
351 default: /* remote DMA */
352 if (dir == DMA_MEM_TO_DEV && chan->dir == EDMA_DIR_READ)
354 if (dir == DMA_DEV_TO_MEM && chan->dir == EDMA_DIR_WRITE)
359 if (xfer->type == EDMA_XFER_CYCLIC) {
360 if (!xfer->xfer.cyclic.len || !xfer->xfer.cyclic.cnt)
362 } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
363 if (xfer->xfer.sg.len < 1)
365 } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
366 if (!xfer->xfer.il->numf)
368 if (xfer->xfer.il->numf > 0 && xfer->xfer.il->frame_size > 0)
374 desc = dw_edma_alloc_desc(chan);
378 chunk = dw_edma_alloc_chunk(desc);
379 if (unlikely(!chunk))
382 if (xfer->type == EDMA_XFER_INTERLEAVED) {
383 src_addr = xfer->xfer.il->src_start;
384 dst_addr = xfer->xfer.il->dst_start;
386 src_addr = chan->config.src_addr;
387 dst_addr = chan->config.dst_addr;
390 if (xfer->type == EDMA_XFER_CYCLIC) {
391 cnt = xfer->xfer.cyclic.cnt;
392 } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
393 cnt = xfer->xfer.sg.len;
394 sg = xfer->xfer.sg.sgl;
395 } else if (xfer->type == EDMA_XFER_INTERLEAVED) {
396 if (xfer->xfer.il->numf > 0)
397 cnt = xfer->xfer.il->numf;
399 cnt = xfer->xfer.il->frame_size;
402 for (i = 0; i < cnt; i++) {
403 if (xfer->type == EDMA_XFER_SCATTER_GATHER && !sg)
406 if (chunk->bursts_alloc == chan->ll_max) {
407 chunk = dw_edma_alloc_chunk(desc);
408 if (unlikely(!chunk))
412 burst = dw_edma_alloc_burst(chunk);
413 if (unlikely(!burst))
416 if (xfer->type == EDMA_XFER_CYCLIC)
417 burst->sz = xfer->xfer.cyclic.len;
418 else if (xfer->type == EDMA_XFER_SCATTER_GATHER)
419 burst->sz = sg_dma_len(sg);
420 else if (xfer->type == EDMA_XFER_INTERLEAVED)
421 burst->sz = xfer->xfer.il->sgl[i].size;
423 chunk->ll_region.sz += burst->sz;
424 desc->alloc_sz += burst->sz;
426 if (chan->dir == EDMA_DIR_WRITE) {
427 burst->sar = src_addr;
428 if (xfer->type == EDMA_XFER_CYCLIC) {
429 burst->dar = xfer->xfer.cyclic.paddr;
430 } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
431 src_addr += sg_dma_len(sg);
432 burst->dar = sg_dma_address(sg);
433 /* Unlike the typical assumption by other
434 * drivers/IPs the peripheral memory isn't
435 * a FIFO memory, in this case, it's a
436 * linear memory and that why the source
437 * and destination addresses are increased
438 * by the same portion (data length)
442 burst->dar = dst_addr;
443 if (xfer->type == EDMA_XFER_CYCLIC) {
444 burst->sar = xfer->xfer.cyclic.paddr;
445 } else if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
446 dst_addr += sg_dma_len(sg);
447 burst->sar = sg_dma_address(sg);
448 /* Unlike the typical assumption by other
449 * drivers/IPs the peripheral memory isn't
450 * a FIFO memory, in this case, it's a
451 * linear memory and that why the source
452 * and destination addresses are increased
453 * by the same portion (data length)
458 if (xfer->type == EDMA_XFER_SCATTER_GATHER) {
460 } else if (xfer->type == EDMA_XFER_INTERLEAVED &&
461 xfer->xfer.il->frame_size > 0) {
462 struct dma_interleaved_template *il = xfer->xfer.il;
463 struct data_chunk *dc = &il->sgl[i];
466 src_addr += burst->sz;
467 src_addr += dmaengine_get_src_icg(il, dc);
471 dst_addr += burst->sz;
472 dst_addr += dmaengine_get_dst_icg(il, dc);
477 return vchan_tx_prep(&chan->vc, &desc->vd, xfer->flags);
481 dw_edma_free_desc(desc);
486 static struct dma_async_tx_descriptor *
487 dw_edma_device_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
489 enum dma_transfer_direction direction,
490 unsigned long flags, void *context)
492 struct dw_edma_transfer xfer;
495 xfer.direction = direction;
496 xfer.xfer.sg.sgl = sgl;
497 xfer.xfer.sg.len = len;
499 xfer.type = EDMA_XFER_SCATTER_GATHER;
501 return dw_edma_device_transfer(&xfer);
504 static struct dma_async_tx_descriptor *
505 dw_edma_device_prep_dma_cyclic(struct dma_chan *dchan, dma_addr_t paddr,
506 size_t len, size_t count,
507 enum dma_transfer_direction direction,
510 struct dw_edma_transfer xfer;
513 xfer.direction = direction;
514 xfer.xfer.cyclic.paddr = paddr;
515 xfer.xfer.cyclic.len = len;
516 xfer.xfer.cyclic.cnt = count;
518 xfer.type = EDMA_XFER_CYCLIC;
520 return dw_edma_device_transfer(&xfer);
523 static struct dma_async_tx_descriptor *
524 dw_edma_device_prep_interleaved_dma(struct dma_chan *dchan,
525 struct dma_interleaved_template *ilt,
528 struct dw_edma_transfer xfer;
531 xfer.direction = ilt->dir;
534 xfer.type = EDMA_XFER_INTERLEAVED;
536 return dw_edma_device_transfer(&xfer);
539 static void dw_edma_done_interrupt(struct dw_edma_chan *chan)
541 struct dw_edma_desc *desc;
542 struct virt_dma_desc *vd;
545 dw_edma_v0_core_clear_done_int(chan);
547 spin_lock_irqsave(&chan->vc.lock, flags);
548 vd = vchan_next_desc(&chan->vc);
550 switch (chan->request) {
552 desc = vd2dw_edma_desc(vd);
553 if (desc->chunks_alloc) {
554 chan->status = EDMA_ST_BUSY;
555 dw_edma_start_transfer(chan);
558 vchan_cookie_complete(vd);
559 chan->status = EDMA_ST_IDLE;
565 vchan_cookie_complete(vd);
566 chan->request = EDMA_REQ_NONE;
567 chan->status = EDMA_ST_IDLE;
571 chan->request = EDMA_REQ_NONE;
572 chan->status = EDMA_ST_PAUSE;
579 spin_unlock_irqrestore(&chan->vc.lock, flags);
582 static void dw_edma_abort_interrupt(struct dw_edma_chan *chan)
584 struct virt_dma_desc *vd;
587 dw_edma_v0_core_clear_abort_int(chan);
589 spin_lock_irqsave(&chan->vc.lock, flags);
590 vd = vchan_next_desc(&chan->vc);
593 vchan_cookie_complete(vd);
595 spin_unlock_irqrestore(&chan->vc.lock, flags);
596 chan->request = EDMA_REQ_NONE;
597 chan->status = EDMA_ST_IDLE;
600 static irqreturn_t dw_edma_interrupt(int irq, void *data, bool write)
602 struct dw_edma_irq *dw_irq = data;
603 struct dw_edma *dw = dw_irq->dw;
604 unsigned long total, pos, val;
609 total = dw->wr_ch_cnt;
611 mask = dw_irq->wr_mask;
613 total = dw->rd_ch_cnt;
615 mask = dw_irq->rd_mask;
618 val = dw_edma_v0_core_status_done_int(dw, write ?
622 for_each_set_bit(pos, &val, total) {
623 struct dw_edma_chan *chan = &dw->chan[pos + off];
625 dw_edma_done_interrupt(chan);
628 val = dw_edma_v0_core_status_abort_int(dw, write ?
632 for_each_set_bit(pos, &val, total) {
633 struct dw_edma_chan *chan = &dw->chan[pos + off];
635 dw_edma_abort_interrupt(chan);
641 static inline irqreturn_t dw_edma_interrupt_write(int irq, void *data)
643 return dw_edma_interrupt(irq, data, true);
646 static inline irqreturn_t dw_edma_interrupt_read(int irq, void *data)
648 return dw_edma_interrupt(irq, data, false);
651 static irqreturn_t dw_edma_interrupt_common(int irq, void *data)
653 dw_edma_interrupt(irq, data, true);
654 dw_edma_interrupt(irq, data, false);
659 static int dw_edma_alloc_chan_resources(struct dma_chan *dchan)
661 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
663 if (chan->status != EDMA_ST_IDLE)
666 pm_runtime_get(chan->chip->dev);
671 static void dw_edma_free_chan_resources(struct dma_chan *dchan)
673 unsigned long timeout = jiffies + msecs_to_jiffies(5000);
674 struct dw_edma_chan *chan = dchan2dw_edma_chan(dchan);
677 while (time_before(jiffies, timeout)) {
678 ret = dw_edma_device_terminate_all(dchan);
682 if (time_after_eq(jiffies, timeout))
688 pm_runtime_put(chan->chip->dev);
691 static int dw_edma_channel_setup(struct dw_edma_chip *chip, bool write,
692 u32 wr_alloc, u32 rd_alloc)
694 struct dw_edma_region *dt_region;
695 struct device *dev = chip->dev;
696 struct dw_edma *dw = chip->dw;
697 struct dw_edma_chan *chan;
698 struct dw_edma_irq *irq;
699 struct dma_device *dma;
700 u32 alloc, off_alloc;
716 off_alloc = wr_alloc;
719 INIT_LIST_HEAD(&dma->channels);
720 for (j = 0; (alloc || dw->nr_irqs == 1) && j < cnt; j++, i++) {
723 dt_region = devm_kzalloc(dev, sizeof(*dt_region), GFP_KERNEL);
727 chan->vc.chan.private = dt_region;
731 chan->dir = write ? EDMA_DIR_WRITE : EDMA_DIR_READ;
732 chan->configured = false;
733 chan->request = EDMA_REQ_NONE;
734 chan->status = EDMA_ST_IDLE;
737 chan->ll_max = (dw->ll_region_wr[j].sz / EDMA_LL_SZ);
739 chan->ll_max = (dw->ll_region_rd[j].sz / EDMA_LL_SZ);
742 dev_vdbg(dev, "L. List:\tChannel %s[%u] max_cnt=%u\n",
743 write ? "write" : "read", j, chan->ll_max);
745 if (dw->nr_irqs == 1)
748 pos = off_alloc + (j % alloc);
753 irq->wr_mask |= BIT(j);
755 irq->rd_mask |= BIT(j);
758 memcpy(&chan->msi, &irq->msi, sizeof(chan->msi));
760 dev_vdbg(dev, "MSI:\t\tChannel %s[%u] addr=0x%.8x%.8x, data=0x%.8x\n",
761 write ? "write" : "read", j,
762 chan->msi.address_hi, chan->msi.address_lo,
765 chan->vc.desc_free = vchan_free_desc;
766 vchan_init(&chan->vc, dma);
769 dt_region->paddr = dw->dt_region_wr[j].paddr;
770 dt_region->vaddr = dw->dt_region_wr[j].vaddr;
771 dt_region->sz = dw->dt_region_wr[j].sz;
773 dt_region->paddr = dw->dt_region_rd[j].paddr;
774 dt_region->vaddr = dw->dt_region_rd[j].vaddr;
775 dt_region->sz = dw->dt_region_rd[j].sz;
778 dw_edma_v0_core_device_config(chan);
781 /* Set DMA channel capabilities */
782 dma_cap_zero(dma->cap_mask);
783 dma_cap_set(DMA_SLAVE, dma->cap_mask);
784 dma_cap_set(DMA_CYCLIC, dma->cap_mask);
785 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
786 dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
787 dma->directions = BIT(write ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV);
788 dma->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
789 dma->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
790 dma->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
793 /* Set DMA channel callbacks */
794 dma->dev = chip->dev;
795 dma->device_alloc_chan_resources = dw_edma_alloc_chan_resources;
796 dma->device_free_chan_resources = dw_edma_free_chan_resources;
797 dma->device_config = dw_edma_device_config;
798 dma->device_pause = dw_edma_device_pause;
799 dma->device_resume = dw_edma_device_resume;
800 dma->device_terminate_all = dw_edma_device_terminate_all;
801 dma->device_issue_pending = dw_edma_device_issue_pending;
802 dma->device_tx_status = dw_edma_device_tx_status;
803 dma->device_prep_slave_sg = dw_edma_device_prep_slave_sg;
804 dma->device_prep_dma_cyclic = dw_edma_device_prep_dma_cyclic;
805 dma->device_prep_interleaved_dma = dw_edma_device_prep_interleaved_dma;
807 dma_set_max_seg_size(dma->dev, U32_MAX);
809 /* Register DMA device */
810 err = dma_async_device_register(dma);
815 static inline void dw_edma_dec_irq_alloc(int *nr_irqs, u32 *alloc, u16 cnt)
817 if (*nr_irqs && *alloc < cnt) {
823 static inline void dw_edma_add_irq_mask(u32 *mask, u32 alloc, u16 cnt)
825 while (*mask * alloc < cnt)
829 static int dw_edma_irq_request(struct dw_edma_chip *chip,
830 u32 *wr_alloc, u32 *rd_alloc)
832 struct device *dev = chip->dev;
833 struct dw_edma *dw = chip->dw;
840 ch_cnt = dw->wr_ch_cnt + dw->rd_ch_cnt;
845 if (dw->nr_irqs == 1) {
846 /* Common IRQ shared among all channels */
847 irq = dw->ops->irq_vector(dev, 0);
848 err = request_irq(irq, dw_edma_interrupt_common,
849 IRQF_SHARED, dw->name, &dw->irq[0]);
855 if (irq_get_msi_desc(irq))
856 get_cached_msi_msg(irq, &dw->irq[0].msi);
858 /* Distribute IRQs equally among all channels */
859 int tmp = dw->nr_irqs;
861 while (tmp && (*wr_alloc + *rd_alloc) < ch_cnt) {
862 dw_edma_dec_irq_alloc(&tmp, wr_alloc, dw->wr_ch_cnt);
863 dw_edma_dec_irq_alloc(&tmp, rd_alloc, dw->rd_ch_cnt);
866 dw_edma_add_irq_mask(&wr_mask, *wr_alloc, dw->wr_ch_cnt);
867 dw_edma_add_irq_mask(&rd_mask, *rd_alloc, dw->rd_ch_cnt);
869 for (i = 0; i < (*wr_alloc + *rd_alloc); i++) {
870 irq = dw->ops->irq_vector(dev, i);
871 err = request_irq(irq,
873 dw_edma_interrupt_write :
874 dw_edma_interrupt_read,
875 IRQF_SHARED, dw->name,
882 if (irq_get_msi_desc(irq))
883 get_cached_msi_msg(irq, &dw->irq[i].msi);
892 int dw_edma_probe(struct dw_edma_chip *chip)
908 if (!dw || !dw->irq || !dw->ops || !dw->ops->irq_vector)
911 raw_spin_lock_init(&dw->lock);
913 dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt,
914 dw_edma_v0_core_ch_count(dw, EDMA_DIR_WRITE));
915 dw->wr_ch_cnt = min_t(u16, dw->wr_ch_cnt, EDMA_MAX_WR_CH);
917 dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt,
918 dw_edma_v0_core_ch_count(dw, EDMA_DIR_READ));
919 dw->rd_ch_cnt = min_t(u16, dw->rd_ch_cnt, EDMA_MAX_RD_CH);
921 if (!dw->wr_ch_cnt && !dw->rd_ch_cnt)
924 dev_vdbg(dev, "Channels:\twrite=%d, read=%d\n",
925 dw->wr_ch_cnt, dw->rd_ch_cnt);
927 /* Allocate channels */
928 dw->chan = devm_kcalloc(dev, dw->wr_ch_cnt + dw->rd_ch_cnt,
929 sizeof(*dw->chan), GFP_KERNEL);
933 snprintf(dw->name, sizeof(dw->name), "dw-edma-core:%d", chip->id);
935 /* Disable eDMA, only to establish the ideal initial conditions */
936 dw_edma_v0_core_off(dw);
939 err = dw_edma_irq_request(chip, &wr_alloc, &rd_alloc);
943 /* Setup write channels */
944 err = dw_edma_channel_setup(chip, true, wr_alloc, rd_alloc);
948 /* Setup read channels */
949 err = dw_edma_channel_setup(chip, false, wr_alloc, rd_alloc);
953 /* Power management */
954 pm_runtime_enable(dev);
956 /* Turn debugfs on */
957 dw_edma_v0_core_debugfs_on(chip);
962 for (i = (dw->nr_irqs - 1); i >= 0; i--)
963 free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
969 EXPORT_SYMBOL_GPL(dw_edma_probe);
971 int dw_edma_remove(struct dw_edma_chip *chip)
973 struct dw_edma_chan *chan, *_chan;
974 struct device *dev = chip->dev;
975 struct dw_edma *dw = chip->dw;
979 dw_edma_v0_core_off(dw);
982 for (i = (dw->nr_irqs - 1); i >= 0; i--)
983 free_irq(dw->ops->irq_vector(dev, i), &dw->irq[i]);
985 /* Power management */
986 pm_runtime_disable(dev);
988 /* Deregister eDMA device */
989 dma_async_device_unregister(&dw->wr_edma);
990 list_for_each_entry_safe(chan, _chan, &dw->wr_edma.channels,
991 vc.chan.device_node) {
992 tasklet_kill(&chan->vc.task);
993 list_del(&chan->vc.chan.device_node);
996 dma_async_device_unregister(&dw->rd_edma);
997 list_for_each_entry_safe(chan, _chan, &dw->rd_edma.channels,
998 vc.chan.device_node) {
999 tasklet_kill(&chan->vc.task);
1000 list_del(&chan->vc.chan.device_node);
1003 /* Turn debugfs off */
1004 dw_edma_v0_core_debugfs_off(chip);
1008 EXPORT_SYMBOL_GPL(dw_edma_remove);
1010 MODULE_LICENSE("GPL v2");
1011 MODULE_DESCRIPTION("Synopsys DesignWare eDMA controller core driver");
1012 MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>");