1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * SiFive FU540 Platform DMA driver
4 * Copyright (C) 2019 SiFive
7 * - drivers/dma/fsl-edma.c
8 * - drivers/dma/dw-edma/
9 * - drivers/dma/pxa-dma.c
11 * See the following sources for further documentation:
12 * - Chapter 12 "Platform DMA Engine (PDMA)" of
13 * SiFive FU540-C000 v1.0
14 * https://static.dev.sifive.com/FU540-C000-v1.0.pdf
16 #include <linux/module.h>
17 #include <linux/device.h>
18 #include <linux/kernel.h>
19 #include <linux/platform_device.h>
20 #include <linux/mod_devicetable.h>
21 #include <linux/dma-mapping.h>
23 #include <linux/slab.h>
28 static inline unsigned long long readq(void __iomem *addr)
30 return readl(addr) | (((unsigned long long)readl(addr + 4)) << 32LL);
35 static inline void writeq(unsigned long long v, void __iomem *addr)
37 writel(lower_32_bits(v), addr);
38 writel(upper_32_bits(v), addr + 4);
42 static inline struct sf_pdma_chan *to_sf_pdma_chan(struct dma_chan *dchan)
44 return container_of(dchan, struct sf_pdma_chan, vchan.chan);
47 static inline struct sf_pdma_desc *to_sf_pdma_desc(struct virt_dma_desc *vd)
49 return container_of(vd, struct sf_pdma_desc, vdesc);
52 static struct sf_pdma_desc *sf_pdma_alloc_desc(struct sf_pdma_chan *chan)
54 struct sf_pdma_desc *desc;
56 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
65 static void sf_pdma_fill_desc(struct sf_pdma_desc *desc,
66 u64 dst, u64 src, u64 size)
68 desc->xfer_type = PDMA_FULL_SPEED;
69 desc->xfer_size = size;
74 static void sf_pdma_disclaim_chan(struct sf_pdma_chan *chan)
76 struct pdma_regs *regs = &chan->regs;
78 writel(PDMA_CLEAR_CTRL, regs->ctrl);
81 static struct dma_async_tx_descriptor *
82 sf_pdma_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dest, dma_addr_t src,
83 size_t len, unsigned long flags)
85 struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
86 struct sf_pdma_desc *desc;
89 if (chan && (!len || !dest || !src)) {
90 dev_err(chan->pdma->dma_dev.dev,
91 "Please check dma len, dest, src!\n");
95 desc = sf_pdma_alloc_desc(chan);
100 desc->dirn = DMA_MEM_TO_MEM;
101 desc->async_tx = vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
103 spin_lock_irqsave(&chan->vchan.lock, iflags);
104 sf_pdma_fill_desc(desc, dest, src, len);
105 spin_unlock_irqrestore(&chan->vchan.lock, iflags);
107 return desc->async_tx;
110 static int sf_pdma_slave_config(struct dma_chan *dchan,
111 struct dma_slave_config *cfg)
113 struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
115 memcpy(&chan->cfg, cfg, sizeof(*cfg));
120 static int sf_pdma_alloc_chan_resources(struct dma_chan *dchan)
122 struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
123 struct pdma_regs *regs = &chan->regs;
125 dma_cookie_init(dchan);
126 writel(PDMA_CLAIM_MASK, regs->ctrl);
131 static void sf_pdma_disable_request(struct sf_pdma_chan *chan)
133 struct pdma_regs *regs = &chan->regs;
135 writel(readl(regs->ctrl) & ~PDMA_RUN_MASK, regs->ctrl);
138 static void sf_pdma_free_chan_resources(struct dma_chan *dchan)
140 struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
144 spin_lock_irqsave(&chan->vchan.lock, flags);
145 sf_pdma_disable_request(chan);
148 vchan_get_all_descriptors(&chan->vchan, &head);
149 sf_pdma_disclaim_chan(chan);
150 spin_unlock_irqrestore(&chan->vchan.lock, flags);
151 vchan_dma_desc_free_list(&chan->vchan, &head);
154 static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan,
157 struct virt_dma_desc *vd = NULL;
158 struct pdma_regs *regs = &chan->regs;
161 struct sf_pdma_desc *desc;
162 struct dma_async_tx_descriptor *tx = NULL;
164 spin_lock_irqsave(&chan->vchan.lock, flags);
166 list_for_each_entry(vd, &chan->vchan.desc_submitted, node)
167 if (vd->tx.cookie == cookie)
173 if (cookie == tx->chan->completed_cookie)
176 if (cookie == tx->cookie) {
177 residue = readq(regs->residue);
179 vd = vchan_find_desc(&chan->vchan, cookie);
183 desc = to_sf_pdma_desc(vd);
184 residue = desc->xfer_size;
188 spin_unlock_irqrestore(&chan->vchan.lock, flags);
192 static enum dma_status
193 sf_pdma_tx_status(struct dma_chan *dchan,
195 struct dma_tx_state *txstate)
197 struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
198 enum dma_status status;
200 status = dma_cookie_status(dchan, cookie, txstate);
202 if (txstate && status != DMA_ERROR)
203 dma_set_residue(txstate, sf_pdma_desc_residue(chan, cookie));
208 static int sf_pdma_terminate_all(struct dma_chan *dchan)
210 struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
214 spin_lock_irqsave(&chan->vchan.lock, flags);
215 sf_pdma_disable_request(chan);
218 chan->xfer_err = false;
219 vchan_get_all_descriptors(&chan->vchan, &head);
220 spin_unlock_irqrestore(&chan->vchan.lock, flags);
221 vchan_dma_desc_free_list(&chan->vchan, &head);
226 static void sf_pdma_enable_request(struct sf_pdma_chan *chan)
228 struct pdma_regs *regs = &chan->regs;
231 v = PDMA_CLAIM_MASK |
232 PDMA_ENABLE_DONE_INT_MASK |
233 PDMA_ENABLE_ERR_INT_MASK |
236 writel(v, regs->ctrl);
239 static struct sf_pdma_desc *sf_pdma_get_first_pending_desc(struct sf_pdma_chan *chan)
241 struct virt_dma_chan *vchan = &chan->vchan;
242 struct virt_dma_desc *vdesc;
244 if (list_empty(&vchan->desc_issued))
247 vdesc = list_first_entry(&vchan->desc_issued, struct virt_dma_desc, node);
249 return container_of(vdesc, struct sf_pdma_desc, vdesc);
252 static void sf_pdma_xfer_desc(struct sf_pdma_chan *chan)
254 struct sf_pdma_desc *desc = chan->desc;
255 struct pdma_regs *regs = &chan->regs;
258 dev_err(chan->pdma->dma_dev.dev, "NULL desc.\n");
262 writel(desc->xfer_type, regs->xfer_type);
263 writeq(desc->xfer_size, regs->xfer_size);
264 writeq(desc->dst_addr, regs->dst_addr);
265 writeq(desc->src_addr, regs->src_addr);
268 chan->status = DMA_IN_PROGRESS;
269 sf_pdma_enable_request(chan);
272 static void sf_pdma_issue_pending(struct dma_chan *dchan)
274 struct sf_pdma_chan *chan = to_sf_pdma_chan(dchan);
277 spin_lock_irqsave(&chan->vchan.lock, flags);
279 if (!chan->desc && vchan_issue_pending(&chan->vchan)) {
280 /* vchan_issue_pending has made a check that desc in not NULL */
281 chan->desc = sf_pdma_get_first_pending_desc(chan);
282 sf_pdma_xfer_desc(chan);
285 spin_unlock_irqrestore(&chan->vchan.lock, flags);
288 static void sf_pdma_free_desc(struct virt_dma_desc *vdesc)
290 struct sf_pdma_desc *desc;
292 desc = to_sf_pdma_desc(vdesc);
293 desc->in_use = false;
296 static void sf_pdma_donebh_tasklet(struct tasklet_struct *t)
298 struct sf_pdma_chan *chan = from_tasklet(chan, t, done_tasklet);
301 spin_lock_irqsave(&chan->lock, flags);
302 if (chan->xfer_err) {
303 chan->retries = MAX_RETRY;
304 chan->status = DMA_COMPLETE;
305 chan->xfer_err = false;
307 spin_unlock_irqrestore(&chan->lock, flags);
309 spin_lock_irqsave(&chan->vchan.lock, flags);
310 list_del(&chan->desc->vdesc.node);
311 vchan_cookie_complete(&chan->desc->vdesc);
313 chan->desc = sf_pdma_get_first_pending_desc(chan);
315 sf_pdma_xfer_desc(chan);
317 spin_unlock_irqrestore(&chan->vchan.lock, flags);
320 static void sf_pdma_errbh_tasklet(struct tasklet_struct *t)
322 struct sf_pdma_chan *chan = from_tasklet(chan, t, err_tasklet);
323 struct sf_pdma_desc *desc = chan->desc;
326 spin_lock_irqsave(&chan->lock, flags);
327 if (chan->retries <= 0) {
328 /* fail to recover */
329 spin_unlock_irqrestore(&chan->lock, flags);
330 dmaengine_desc_get_callback_invoke(desc->async_tx, NULL);
334 chan->xfer_err = true;
335 chan->status = DMA_ERROR;
337 sf_pdma_enable_request(chan);
338 spin_unlock_irqrestore(&chan->lock, flags);
342 static irqreturn_t sf_pdma_done_isr(int irq, void *dev_id)
344 struct sf_pdma_chan *chan = dev_id;
345 struct pdma_regs *regs = &chan->regs;
349 spin_lock_irqsave(&chan->vchan.lock, flags);
350 writel((readl(regs->ctrl)) & ~PDMA_DONE_STATUS_MASK, regs->ctrl);
351 residue = readq(regs->residue);
354 tasklet_hi_schedule(&chan->done_tasklet);
356 /* submit next trascatioin if possible */
357 struct sf_pdma_desc *desc = chan->desc;
359 desc->src_addr += desc->xfer_size - residue;
360 desc->dst_addr += desc->xfer_size - residue;
361 desc->xfer_size = residue;
363 sf_pdma_xfer_desc(chan);
366 spin_unlock_irqrestore(&chan->vchan.lock, flags);
371 static irqreturn_t sf_pdma_err_isr(int irq, void *dev_id)
373 struct sf_pdma_chan *chan = dev_id;
374 struct pdma_regs *regs = &chan->regs;
377 spin_lock_irqsave(&chan->lock, flags);
378 writel((readl(regs->ctrl)) & ~PDMA_ERR_STATUS_MASK, regs->ctrl);
379 spin_unlock_irqrestore(&chan->lock, flags);
381 tasklet_schedule(&chan->err_tasklet);
387 * sf_pdma_irq_init() - Init PDMA IRQ Handlers
388 * @pdev: pointer of platform_device
389 * @pdma: pointer of PDMA engine. Caller should check NULL
391 * Initialize DONE and ERROR interrupt handler for 4 channels. Caller should
392 * make sure the pointer passed in are non-NULL. This function should be called
393 * only one time during the device probe.
395 * Context: Any context.
398 * * 0 - OK to init all IRQ handlers
399 * * -EINVAL - Fail to request IRQ
401 static int sf_pdma_irq_init(struct platform_device *pdev, struct sf_pdma *pdma)
404 struct sf_pdma_chan *chan;
406 for (i = 0; i < pdma->n_chans; i++) {
407 chan = &pdma->chans[i];
409 irq = platform_get_irq(pdev, i * 2);
411 dev_err(&pdev->dev, "ch(%d) Can't get done irq.\n", i);
415 r = devm_request_irq(&pdev->dev, irq, sf_pdma_done_isr, 0,
416 dev_name(&pdev->dev), (void *)chan);
418 dev_err(&pdev->dev, "Fail to attach done ISR: %d\n", r);
424 irq = platform_get_irq(pdev, (i * 2) + 1);
426 dev_err(&pdev->dev, "ch(%d) Can't get err irq.\n", i);
430 r = devm_request_irq(&pdev->dev, irq, sf_pdma_err_isr, 0,
431 dev_name(&pdev->dev), (void *)chan);
433 dev_err(&pdev->dev, "Fail to attach err ISR: %d\n", r);
444 * sf_pdma_setup_chans() - Init settings of each channel
445 * @pdma: pointer of PDMA engine. Caller should check NULL
447 * Initialize all data structure and register base. Caller should make sure
448 * the pointer passed in are non-NULL. This function should be called only
449 * one time during the device probe.
451 * Context: Any context.
455 static void sf_pdma_setup_chans(struct sf_pdma *pdma)
458 struct sf_pdma_chan *chan;
460 INIT_LIST_HEAD(&pdma->dma_dev.channels);
462 for (i = 0; i < pdma->n_chans; i++) {
463 chan = &pdma->chans[i];
466 SF_PDMA_REG_BASE(i) + PDMA_CTRL;
467 chan->regs.xfer_type =
468 SF_PDMA_REG_BASE(i) + PDMA_XFER_TYPE;
469 chan->regs.xfer_size =
470 SF_PDMA_REG_BASE(i) + PDMA_XFER_SIZE;
471 chan->regs.dst_addr =
472 SF_PDMA_REG_BASE(i) + PDMA_DST_ADDR;
473 chan->regs.src_addr =
474 SF_PDMA_REG_BASE(i) + PDMA_SRC_ADDR;
475 chan->regs.act_type =
476 SF_PDMA_REG_BASE(i) + PDMA_ACT_TYPE;
478 SF_PDMA_REG_BASE(i) + PDMA_REMAINING_BYTE;
479 chan->regs.cur_dst_addr =
480 SF_PDMA_REG_BASE(i) + PDMA_CUR_DST_ADDR;
481 chan->regs.cur_src_addr =
482 SF_PDMA_REG_BASE(i) + PDMA_CUR_SRC_ADDR;
485 chan->pm_state = RUNNING;
487 chan->xfer_err = false;
488 spin_lock_init(&chan->lock);
490 chan->vchan.desc_free = sf_pdma_free_desc;
491 vchan_init(&chan->vchan, &pdma->dma_dev);
493 writel(PDMA_CLEAR_CTRL, chan->regs.ctrl);
495 tasklet_setup(&chan->done_tasklet, sf_pdma_donebh_tasklet);
496 tasklet_setup(&chan->err_tasklet, sf_pdma_errbh_tasklet);
500 static int sf_pdma_probe(struct platform_device *pdev)
502 struct sf_pdma *pdma;
503 struct sf_pdma_chan *chan;
504 struct resource *res;
507 const enum dma_slave_buswidth widths =
508 DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
509 DMA_SLAVE_BUSWIDTH_4_BYTES | DMA_SLAVE_BUSWIDTH_8_BYTES |
510 DMA_SLAVE_BUSWIDTH_16_BYTES | DMA_SLAVE_BUSWIDTH_32_BYTES |
511 DMA_SLAVE_BUSWIDTH_64_BYTES;
514 len = sizeof(*pdma) + sizeof(*chan) * chans;
515 pdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
519 pdma->n_chans = chans;
521 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
522 pdma->membase = devm_ioremap_resource(&pdev->dev, res);
523 if (IS_ERR(pdma->membase))
524 return PTR_ERR(pdma->membase);
526 ret = sf_pdma_irq_init(pdev, pdma);
530 sf_pdma_setup_chans(pdma);
532 pdma->dma_dev.dev = &pdev->dev;
534 /* Setup capability */
535 dma_cap_set(DMA_MEMCPY, pdma->dma_dev.cap_mask);
536 pdma->dma_dev.copy_align = 2;
537 pdma->dma_dev.src_addr_widths = widths;
538 pdma->dma_dev.dst_addr_widths = widths;
539 pdma->dma_dev.directions = BIT(DMA_MEM_TO_MEM);
540 pdma->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
541 pdma->dma_dev.descriptor_reuse = true;
544 pdma->dma_dev.device_alloc_chan_resources =
545 sf_pdma_alloc_chan_resources;
546 pdma->dma_dev.device_free_chan_resources =
547 sf_pdma_free_chan_resources;
548 pdma->dma_dev.device_tx_status = sf_pdma_tx_status;
549 pdma->dma_dev.device_prep_dma_memcpy = sf_pdma_prep_dma_memcpy;
550 pdma->dma_dev.device_config = sf_pdma_slave_config;
551 pdma->dma_dev.device_terminate_all = sf_pdma_terminate_all;
552 pdma->dma_dev.device_issue_pending = sf_pdma_issue_pending;
554 platform_set_drvdata(pdev, pdma);
556 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
559 "Failed to set DMA mask. Fall back to default.\n");
561 ret = dma_async_device_register(&pdma->dma_dev);
564 "Can't register SiFive Platform DMA. (%d)\n", ret);
571 static int sf_pdma_remove(struct platform_device *pdev)
573 struct sf_pdma *pdma = platform_get_drvdata(pdev);
574 struct sf_pdma_chan *ch;
577 for (i = 0; i < PDMA_NR_CH; i++) {
578 ch = &pdma->chans[i];
580 devm_free_irq(&pdev->dev, ch->txirq, ch);
581 devm_free_irq(&pdev->dev, ch->errirq, ch);
582 list_del(&ch->vchan.chan.device_node);
583 tasklet_kill(&ch->vchan.task);
584 tasklet_kill(&ch->done_tasklet);
585 tasklet_kill(&ch->err_tasklet);
588 dma_async_device_unregister(&pdma->dma_dev);
593 static const struct of_device_id sf_pdma_dt_ids[] = {
594 { .compatible = "sifive,fu540-c000-pdma" },
597 MODULE_DEVICE_TABLE(of, sf_pdma_dt_ids);
599 static struct platform_driver sf_pdma_driver = {
600 .probe = sf_pdma_probe,
601 .remove = sf_pdma_remove,
604 .of_match_table = of_match_ptr(sf_pdma_dt_ids),
608 static int __init sf_pdma_init(void)
610 return platform_driver_register(&sf_pdma_driver);
613 static void __exit sf_pdma_exit(void)
615 platform_driver_unregister(&sf_pdma_driver);
619 subsys_initcall(sf_pdma_init);
620 module_exit(sf_pdma_exit);
622 MODULE_LICENSE("GPL v2");
623 MODULE_DESCRIPTION("SiFive Platform DMA driver");
624 MODULE_AUTHOR("Green Wan <green.wan@sifive.com>");