1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * DMA driver for Xilinx DMA/Bridge Subsystem
5 * Copyright (C) 2017-2020 Xilinx, Inc. All rights reserved.
6 * Copyright (C) 2022, Advanced Micro Devices, Inc.
10 * The DMA/Bridge Subsystem for PCI Express allows for the movement of data
11 * between Host memory and the DMA subsystem. It does this by operating on
12 * 'descriptors' that contain information about the source, destination and
13 * amount of data to transfer. These direct memory transfers can be both in
14 * the Host to Card (H2C) and Card to Host (C2H) transfers. The DMA can be
15 * configured to have a single AXI4 Master interface shared by all channels
16 * or one AXI4-Stream interface for each channel enabled. Memory transfers are
17 * specified on a per-channel basis in descriptor linked lists, which the DMA
18 * fetches from host memory and processes. Events such as descriptor completion
19 * and errors are signaled using interrupts. The core also provides up to 16
20 * user interrupt wires that generate interrupts to the host.
23 #include <linux/mod_devicetable.h>
24 #include <linux/bitfield.h>
25 #include <linux/dmapool.h>
26 #include <linux/regmap.h>
27 #include <linux/dmaengine.h>
28 #include <linux/dma/amd_xdma.h>
29 #include <linux/platform_device.h>
30 #include <linux/platform_data/amd_xdma.h>
31 #include <linux/dma-mapping.h>
32 #include <linux/pci.h>
33 #include "../virt-dma.h"
34 #include "xdma-regs.h"
36 /* mmio regmap config for all XDMA registers */
37 static const struct regmap_config xdma_regmap_config = {
41 .max_register = XDMA_REG_SPACE_LEN,
45 * struct xdma_desc_block - Descriptor block
46 * @virt_addr: Virtual address of block start
47 * @dma_addr: DMA address of block start
49 struct xdma_desc_block {
55 * struct xdma_chan - Driver specific DMA channel structure
56 * @vchan: Virtual channel
57 * @xdev_hdl: Pointer to DMA device structure
58 * @base: Offset of channel registers
59 * @desc_pool: Descriptor pool
60 * @busy: Busy flag of the channel
61 * @dir: Transferring direction of the channel
62 * @cfg: Transferring config of the channel
63 * @irq: IRQ assigned to the channel
66 struct virt_dma_chan vchan;
69 struct dma_pool *desc_pool;
71 enum dma_transfer_direction dir;
72 struct dma_slave_config cfg;
77 * struct xdma_desc - DMA desc structure
78 * @vdesc: Virtual DMA descriptor
79 * @chan: DMA channel pointer
80 * @dir: Transferring direction of the request
81 * @dev_addr: Physical address on DMA device side
82 * @desc_blocks: Hardware descriptor blocks
83 * @dblk_num: Number of hardware descriptor blocks
84 * @desc_num: Number of hardware descriptors
85 * @completed_desc_num: Completed hardware descriptors
86 * @cyclic: Cyclic transfer vs. scatter-gather
87 * @periods: Number of periods in the cyclic transfer
88 * @period_size: Size of a period in bytes in cyclic transfers
91 struct virt_dma_desc vdesc;
92 struct xdma_chan *chan;
93 enum dma_transfer_direction dir;
95 struct xdma_desc_block *desc_blocks;
98 u32 completed_desc_num;
104 #define XDMA_DEV_STATUS_REG_DMA BIT(0)
105 #define XDMA_DEV_STATUS_INIT_MSIX BIT(1)
108 * struct xdma_device - DMA device structure
109 * @pdev: Platform device pointer
110 * @dma_dev: DMA device structure
111 * @rmap: MMIO regmap for DMA registers
112 * @h2c_chans: Host to Card channels
113 * @c2h_chans: Card to Host channels
114 * @h2c_chan_num: Number of H2C channels
115 * @c2h_chan_num: Number of C2H channels
116 * @irq_start: Start IRQ assigned to device
117 * @irq_num: Number of IRQ assigned to device
118 * @status: Initialization status
121 struct platform_device *pdev;
122 struct dma_device dma_dev;
124 struct xdma_chan *h2c_chans;
125 struct xdma_chan *c2h_chans;
133 #define xdma_err(xdev, fmt, args...) \
134 dev_err(&(xdev)->pdev->dev, fmt, ##args)
135 #define XDMA_CHAN_NUM(_xd) ({ \
136 typeof(_xd) (xd) = (_xd); \
137 ((xd)->h2c_chan_num + (xd)->c2h_chan_num); })
139 /* Get the last desc in a desc block */
140 static inline void *xdma_blk_last_desc(struct xdma_desc_block *block)
142 return block->virt_addr + (XDMA_DESC_ADJACENT - 1) * XDMA_DESC_SIZE;
146 * xdma_link_sg_desc_blocks - Link SG descriptor blocks for DMA transfer
147 * @sw_desc: Tx descriptor pointer
149 static void xdma_link_sg_desc_blocks(struct xdma_desc *sw_desc)
151 struct xdma_desc_block *block;
152 u32 last_blk_desc, desc_control;
153 struct xdma_hw_desc *desc;
156 desc_control = XDMA_DESC_CONTROL(XDMA_DESC_ADJACENT, 0);
157 for (i = 1; i < sw_desc->dblk_num; i++) {
158 block = &sw_desc->desc_blocks[i - 1];
159 desc = xdma_blk_last_desc(block);
161 if (!(i & XDMA_DESC_BLOCK_MASK)) {
162 desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
165 desc->control = cpu_to_le32(desc_control);
166 desc->next_desc = cpu_to_le64(block[1].dma_addr);
169 /* update the last block */
170 last_blk_desc = (sw_desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
171 if (((sw_desc->dblk_num - 1) & XDMA_DESC_BLOCK_MASK) > 0) {
172 block = &sw_desc->desc_blocks[sw_desc->dblk_num - 2];
173 desc = xdma_blk_last_desc(block);
174 desc_control = XDMA_DESC_CONTROL(last_blk_desc + 1, 0);
175 desc->control = cpu_to_le32(desc_control);
178 block = &sw_desc->desc_blocks[sw_desc->dblk_num - 1];
179 desc = block->virt_addr + last_blk_desc * XDMA_DESC_SIZE;
180 desc->control = cpu_to_le32(XDMA_DESC_CONTROL_LAST);
184 * xdma_link_cyclic_desc_blocks - Link cyclic descriptor blocks for DMA transfer
185 * @sw_desc: Tx descriptor pointer
187 static void xdma_link_cyclic_desc_blocks(struct xdma_desc *sw_desc)
189 struct xdma_desc_block *block;
190 struct xdma_hw_desc *desc;
193 block = sw_desc->desc_blocks;
194 for (i = 0; i < sw_desc->desc_num - 1; i++) {
195 desc = block->virt_addr + i * XDMA_DESC_SIZE;
196 desc->next_desc = cpu_to_le64(block->dma_addr + ((i + 1) * XDMA_DESC_SIZE));
198 desc = block->virt_addr + i * XDMA_DESC_SIZE;
199 desc->next_desc = cpu_to_le64(block->dma_addr);
202 static inline struct xdma_chan *to_xdma_chan(struct dma_chan *chan)
204 return container_of(chan, struct xdma_chan, vchan.chan);
207 static inline struct xdma_desc *to_xdma_desc(struct virt_dma_desc *vdesc)
209 return container_of(vdesc, struct xdma_desc, vdesc);
213 * xdma_channel_init - Initialize DMA channel registers
214 * @chan: DMA channel pointer
216 static int xdma_channel_init(struct xdma_chan *chan)
218 struct xdma_device *xdev = chan->xdev_hdl;
221 ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_CONTROL_W1C,
222 CHAN_CTRL_NON_INCR_ADDR);
226 ret = regmap_write(xdev->rmap, chan->base + XDMA_CHAN_INTR_ENABLE,
235 * xdma_free_desc - Free descriptor
236 * @vdesc: Virtual DMA descriptor
238 static void xdma_free_desc(struct virt_dma_desc *vdesc)
240 struct xdma_desc *sw_desc;
243 sw_desc = to_xdma_desc(vdesc);
244 for (i = 0; i < sw_desc->dblk_num; i++) {
245 if (!sw_desc->desc_blocks[i].virt_addr)
247 dma_pool_free(sw_desc->chan->desc_pool,
248 sw_desc->desc_blocks[i].virt_addr,
249 sw_desc->desc_blocks[i].dma_addr);
251 kfree(sw_desc->desc_blocks);
256 * xdma_alloc_desc - Allocate descriptor
257 * @chan: DMA channel pointer
258 * @desc_num: Number of hardware descriptors
259 * @cyclic: Whether this is a cyclic transfer
261 static struct xdma_desc *
262 xdma_alloc_desc(struct xdma_chan *chan, u32 desc_num, bool cyclic)
264 struct xdma_desc *sw_desc;
265 struct xdma_hw_desc *desc;
272 sw_desc = kzalloc(sizeof(*sw_desc), GFP_NOWAIT);
276 sw_desc->chan = chan;
277 sw_desc->desc_num = desc_num;
278 sw_desc->cyclic = cyclic;
279 dblk_num = DIV_ROUND_UP(desc_num, XDMA_DESC_ADJACENT);
280 sw_desc->desc_blocks = kcalloc(dblk_num, sizeof(*sw_desc->desc_blocks),
282 if (!sw_desc->desc_blocks)
286 control = XDMA_DESC_CONTROL_CYCLIC;
288 control = XDMA_DESC_CONTROL(1, 0);
290 sw_desc->dblk_num = dblk_num;
291 for (i = 0; i < sw_desc->dblk_num; i++) {
292 addr = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &dma_addr);
296 sw_desc->desc_blocks[i].virt_addr = addr;
297 sw_desc->desc_blocks[i].dma_addr = dma_addr;
298 for (j = 0, desc = addr; j < XDMA_DESC_ADJACENT; j++)
299 desc[j].control = cpu_to_le32(control);
303 xdma_link_cyclic_desc_blocks(sw_desc);
305 xdma_link_sg_desc_blocks(sw_desc);
310 xdma_free_desc(&sw_desc->vdesc);
315 * xdma_xfer_start - Start DMA transfer
316 * @xchan: DMA channel pointer
318 static int xdma_xfer_start(struct xdma_chan *xchan)
320 struct virt_dma_desc *vd = vchan_next_desc(&xchan->vchan);
321 struct xdma_device *xdev = xchan->xdev_hdl;
322 struct xdma_desc_block *block;
323 u32 val, completed_blocks;
324 struct xdma_desc *desc;
328 * check if there is not any submitted descriptor or channel is busy.
329 * vchan lock should be held where this function is called.
331 if (!vd || xchan->busy)
334 /* clear run stop bit to get ready for transfer */
335 ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL_W1C,
340 desc = to_xdma_desc(vd);
341 if (desc->dir != xchan->dir) {
342 xdma_err(xdev, "incorrect request direction");
346 /* set DMA engine to the first descriptor block */
347 completed_blocks = desc->completed_desc_num / XDMA_DESC_ADJACENT;
348 block = &desc->desc_blocks[completed_blocks];
349 val = lower_32_bits(block->dma_addr);
350 ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_LO, val);
354 val = upper_32_bits(block->dma_addr);
355 ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_HI, val);
359 if (completed_blocks + 1 == desc->dblk_num)
360 val = (desc->desc_num - 1) & XDMA_DESC_ADJACENT_MASK;
362 val = XDMA_DESC_ADJACENT - 1;
363 ret = regmap_write(xdev->rmap, xchan->base + XDMA_SGDMA_DESC_ADJ, val);
367 /* kick off DMA transfer */
368 ret = regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_CONTROL,
378 * xdma_alloc_channels - Detect and allocate DMA channels
379 * @xdev: DMA device pointer
380 * @dir: Channel direction
382 static int xdma_alloc_channels(struct xdma_device *xdev,
383 enum dma_transfer_direction dir)
385 struct xdma_platdata *pdata = dev_get_platdata(&xdev->pdev->dev);
386 struct xdma_chan **chans, *xchan;
387 u32 base, identifier, target;
391 if (dir == DMA_MEM_TO_DEV) {
392 base = XDMA_CHAN_H2C_OFFSET;
393 target = XDMA_CHAN_H2C_TARGET;
394 chans = &xdev->h2c_chans;
395 chan_num = &xdev->h2c_chan_num;
396 } else if (dir == DMA_DEV_TO_MEM) {
397 base = XDMA_CHAN_C2H_OFFSET;
398 target = XDMA_CHAN_C2H_TARGET;
399 chans = &xdev->c2h_chans;
400 chan_num = &xdev->c2h_chan_num;
402 xdma_err(xdev, "invalid direction specified");
406 /* detect number of available DMA channels */
407 for (i = 0, *chan_num = 0; i < pdata->max_dma_channels; i++) {
408 ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
413 /* check if it is available DMA channel */
414 if (XDMA_CHAN_CHECK_TARGET(identifier, target))
419 xdma_err(xdev, "does not probe any channel");
423 *chans = devm_kcalloc(&xdev->pdev->dev, *chan_num, sizeof(**chans),
428 for (i = 0, j = 0; i < pdata->max_dma_channels; i++) {
429 ret = regmap_read(xdev->rmap, base + i * XDMA_CHAN_STRIDE,
434 if (!XDMA_CHAN_CHECK_TARGET(identifier, target))
437 if (j == *chan_num) {
438 xdma_err(xdev, "invalid channel number");
442 /* init channel structure and hardware */
443 xchan = &(*chans)[j];
444 xchan->xdev_hdl = xdev;
445 xchan->base = base + i * XDMA_CHAN_STRIDE;
448 ret = xdma_channel_init(xchan);
451 xchan->vchan.desc_free = xdma_free_desc;
452 vchan_init(&xchan->vchan, &xdev->dma_dev);
457 dev_info(&xdev->pdev->dev, "configured %d %s channels", j,
458 (dir == DMA_MEM_TO_DEV) ? "H2C" : "C2H");
464 * xdma_issue_pending - Issue pending transactions
465 * @chan: DMA channel pointer
467 static void xdma_issue_pending(struct dma_chan *chan)
469 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
472 spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
473 if (vchan_issue_pending(&xdma_chan->vchan))
474 xdma_xfer_start(xdma_chan);
475 spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
479 * xdma_prep_device_sg - prepare a descriptor for a DMA transaction
480 * @chan: DMA channel pointer
481 * @sgl: Transfer scatter gather list
482 * @sg_len: Length of scatter gather list
483 * @dir: Transfer direction
484 * @flags: transfer ack flags
485 * @context: APP words of the descriptor
487 static struct dma_async_tx_descriptor *
488 xdma_prep_device_sg(struct dma_chan *chan, struct scatterlist *sgl,
489 unsigned int sg_len, enum dma_transfer_direction dir,
490 unsigned long flags, void *context)
492 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
493 struct dma_async_tx_descriptor *tx_desc;
494 u32 desc_num = 0, i, len, rest;
495 struct xdma_desc_block *dblk;
496 struct xdma_hw_desc *desc;
497 struct xdma_desc *sw_desc;
498 u64 dev_addr, *src, *dst;
499 struct scatterlist *sg;
502 for_each_sg(sgl, sg, sg_len, i)
503 desc_num += DIV_ROUND_UP(sg_dma_len(sg), XDMA_DESC_BLEN_MAX);
505 sw_desc = xdma_alloc_desc(xdma_chan, desc_num, false);
510 if (dir == DMA_MEM_TO_DEV) {
511 dev_addr = xdma_chan->cfg.dst_addr;
515 dev_addr = xdma_chan->cfg.src_addr;
520 dblk = sw_desc->desc_blocks;
521 desc = dblk->virt_addr;
523 for_each_sg(sgl, sg, sg_len, i) {
524 addr = sg_dma_address(sg);
525 rest = sg_dma_len(sg);
528 len = min_t(u32, rest, XDMA_DESC_BLEN_MAX);
529 /* set hardware descriptor */
530 desc->bytes = cpu_to_le32(len);
531 desc->src_addr = cpu_to_le64(*src);
532 desc->dst_addr = cpu_to_le64(*dst);
534 if (!(desc_num & XDMA_DESC_ADJACENT_MASK)) {
536 desc = dblk->virt_addr;
548 tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
555 xdma_free_desc(&sw_desc->vdesc);
561 * xdma_prep_dma_cyclic - prepare for cyclic DMA transactions
562 * @chan: DMA channel pointer
563 * @address: Device DMA address to access
564 * @size: Total length to transfer
565 * @period_size: Period size to use for each transfer
566 * @dir: Transfer direction
567 * @flags: Transfer ack flags
569 static struct dma_async_tx_descriptor *
570 xdma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t address,
571 size_t size, size_t period_size,
572 enum dma_transfer_direction dir,
575 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
576 struct xdma_device *xdev = xdma_chan->xdev_hdl;
577 unsigned int periods = size / period_size;
578 struct dma_async_tx_descriptor *tx_desc;
579 struct xdma_desc_block *dblk;
580 struct xdma_hw_desc *desc;
581 struct xdma_desc *sw_desc;
585 * Simplify the whole logic by preventing an abnormally high number of
586 * periods and periods size.
588 if (period_size > XDMA_DESC_BLEN_MAX) {
589 xdma_err(xdev, "period size limited to %lu bytes\n", XDMA_DESC_BLEN_MAX);
593 if (periods > XDMA_DESC_ADJACENT) {
594 xdma_err(xdev, "number of periods limited to %u\n", XDMA_DESC_ADJACENT);
598 sw_desc = xdma_alloc_desc(xdma_chan, periods, true);
602 sw_desc->periods = periods;
603 sw_desc->period_size = period_size;
606 dblk = sw_desc->desc_blocks;
607 desc = dblk->virt_addr;
609 /* fill hardware descriptor */
610 for (i = 0; i < periods; i++) {
611 desc->bytes = cpu_to_le32(period_size);
612 if (dir == DMA_MEM_TO_DEV) {
613 desc->src_addr = cpu_to_le64(address + i * period_size);
614 desc->dst_addr = cpu_to_le64(xdma_chan->cfg.dst_addr);
616 desc->src_addr = cpu_to_le64(xdma_chan->cfg.src_addr);
617 desc->dst_addr = cpu_to_le64(address + i * period_size);
623 tx_desc = vchan_tx_prep(&xdma_chan->vchan, &sw_desc->vdesc, flags);
630 xdma_free_desc(&sw_desc->vdesc);
636 * xdma_device_config - Configure the DMA channel
638 * @cfg: channel configuration
640 static int xdma_device_config(struct dma_chan *chan,
641 struct dma_slave_config *cfg)
643 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
645 memcpy(&xdma_chan->cfg, cfg, sizeof(*cfg));
651 * xdma_free_chan_resources - Free channel resources
654 static void xdma_free_chan_resources(struct dma_chan *chan)
656 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
658 vchan_free_chan_resources(&xdma_chan->vchan);
659 dma_pool_destroy(xdma_chan->desc_pool);
660 xdma_chan->desc_pool = NULL;
664 * xdma_alloc_chan_resources - Allocate channel resources
667 static int xdma_alloc_chan_resources(struct dma_chan *chan)
669 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
670 struct xdma_device *xdev = xdma_chan->xdev_hdl;
671 struct device *dev = xdev->dma_dev.dev;
673 while (dev && !dev_is_pci(dev))
676 xdma_err(xdev, "unable to find pci device");
680 xdma_chan->desc_pool = dma_pool_create(dma_chan_name(chan),
681 dev, XDMA_DESC_BLOCK_SIZE,
682 XDMA_DESC_BLOCK_ALIGN, 0);
683 if (!xdma_chan->desc_pool) {
684 xdma_err(xdev, "unable to allocate descriptor pool");
691 static enum dma_status xdma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
692 struct dma_tx_state *state)
694 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
695 struct xdma_desc *desc = NULL;
696 struct virt_dma_desc *vd;
699 unsigned int period_idx;
702 ret = dma_cookie_status(chan, cookie, state);
703 if (ret == DMA_COMPLETE)
706 spin_lock_irqsave(&xdma_chan->vchan.lock, flags);
708 vd = vchan_find_desc(&xdma_chan->vchan, cookie);
710 desc = to_xdma_desc(vd);
711 if (!desc || !desc->cyclic) {
712 spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
716 period_idx = desc->completed_desc_num % desc->periods;
717 residue = (desc->periods - period_idx) * desc->period_size;
719 spin_unlock_irqrestore(&xdma_chan->vchan.lock, flags);
721 dma_set_residue(state, residue);
727 * xdma_channel_isr - XDMA channel interrupt handler
729 * @dev_id: Pointer to the DMA channel structure
731 static irqreturn_t xdma_channel_isr(int irq, void *dev_id)
733 struct xdma_chan *xchan = dev_id;
734 u32 complete_desc_num = 0;
735 struct xdma_device *xdev;
736 struct virt_dma_desc *vd;
737 struct xdma_desc *desc;
741 spin_lock(&xchan->vchan.lock);
743 /* get submitted request */
744 vd = vchan_next_desc(&xchan->vchan);
749 desc = to_xdma_desc(vd);
750 xdev = xchan->xdev_hdl;
752 ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_COMPLETED_DESC,
758 desc->completed_desc_num = complete_desc_num;
760 ret = regmap_read(xdev->rmap, xchan->base + XDMA_CHAN_STATUS,
765 regmap_write(xdev->rmap, xchan->base + XDMA_CHAN_STATUS, st);
767 vchan_cyclic_callback(vd);
771 desc->completed_desc_num += complete_desc_num;
774 * if all data blocks are transferred, remove and complete the request
776 if (desc->completed_desc_num == desc->desc_num) {
778 vchan_cookie_complete(vd);
782 if (desc->completed_desc_num > desc->desc_num ||
783 complete_desc_num != XDMA_DESC_BLOCK_NUM * XDMA_DESC_ADJACENT)
786 /* transfer the rest of data (SG only) */
787 xdma_xfer_start(xchan);
790 spin_unlock(&xchan->vchan.lock);
795 * xdma_irq_fini - Uninitialize IRQ
796 * @xdev: DMA device pointer
798 static void xdma_irq_fini(struct xdma_device *xdev)
802 /* disable interrupt */
803 regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1C, ~0);
805 /* free irq handler */
806 for (i = 0; i < xdev->h2c_chan_num; i++)
807 free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
809 for (i = 0; i < xdev->c2h_chan_num; i++)
810 free_irq(xdev->c2h_chans[i].irq, &xdev->c2h_chans[i]);
814 * xdma_set_vector_reg - configure hardware IRQ registers
815 * @xdev: DMA device pointer
816 * @vec_tbl_start: Start of IRQ registers
817 * @irq_start: Start of IRQ
818 * @irq_num: Number of IRQ
820 static int xdma_set_vector_reg(struct xdma_device *xdev, u32 vec_tbl_start,
821 u32 irq_start, u32 irq_num)
823 u32 shift, i, val = 0;
826 /* Each IRQ register is 32 bit and contains 4 IRQs */
827 while (irq_num > 0) {
828 for (i = 0; i < 4; i++) {
829 shift = XDMA_IRQ_VEC_SHIFT * i;
830 val |= irq_start << shift;
837 /* write IRQ register */
838 ret = regmap_write(xdev->rmap, vec_tbl_start, val);
841 vec_tbl_start += sizeof(u32);
849 * xdma_irq_init - initialize IRQs
850 * @xdev: DMA device pointer
852 static int xdma_irq_init(struct xdma_device *xdev)
854 u32 irq = xdev->irq_start;
858 /* return failure if there are not enough IRQs */
859 if (xdev->irq_num < XDMA_CHAN_NUM(xdev)) {
860 xdma_err(xdev, "not enough irq");
864 /* setup H2C interrupt handler */
865 for (i = 0; i < xdev->h2c_chan_num; i++) {
866 ret = request_irq(irq, xdma_channel_isr, 0,
867 "xdma-h2c-channel", &xdev->h2c_chans[i]);
869 xdma_err(xdev, "H2C channel%d request irq%d failed: %d",
871 goto failed_init_h2c;
873 xdev->h2c_chans[i].irq = irq;
877 /* setup C2H interrupt handler */
878 for (j = 0; j < xdev->c2h_chan_num; j++) {
879 ret = request_irq(irq, xdma_channel_isr, 0,
880 "xdma-c2h-channel", &xdev->c2h_chans[j]);
882 xdma_err(xdev, "C2H channel%d request irq%d failed: %d",
884 goto failed_init_c2h;
886 xdev->c2h_chans[j].irq = irq;
890 /* config hardware IRQ registers */
891 ret = xdma_set_vector_reg(xdev, XDMA_IRQ_CHAN_VEC_NUM, 0,
892 XDMA_CHAN_NUM(xdev));
894 xdma_err(xdev, "failed to set channel vectors: %d", ret);
895 goto failed_init_c2h;
898 /* config user IRQ registers if needed */
899 user_irq_start = XDMA_CHAN_NUM(xdev);
900 if (xdev->irq_num > user_irq_start) {
901 ret = xdma_set_vector_reg(xdev, XDMA_IRQ_USER_VEC_NUM,
903 xdev->irq_num - user_irq_start);
905 xdma_err(xdev, "failed to set user vectors: %d", ret);
906 goto failed_init_c2h;
910 /* enable interrupt */
911 ret = regmap_write(xdev->rmap, XDMA_IRQ_CHAN_INT_EN_W1S, ~0);
913 goto failed_init_c2h;
919 free_irq(xdev->c2h_chans[j].irq, &xdev->c2h_chans[j]);
922 free_irq(xdev->h2c_chans[i].irq, &xdev->h2c_chans[i]);
927 static bool xdma_filter_fn(struct dma_chan *chan, void *param)
929 struct xdma_chan *xdma_chan = to_xdma_chan(chan);
930 struct xdma_chan_info *chan_info = param;
932 return chan_info->dir == xdma_chan->dir;
936 * xdma_disable_user_irq - Disable user interrupt
937 * @pdev: Pointer to the platform_device structure
938 * @irq_num: System IRQ number
940 void xdma_disable_user_irq(struct platform_device *pdev, u32 irq_num)
942 struct xdma_device *xdev = platform_get_drvdata(pdev);
945 index = irq_num - xdev->irq_start;
946 if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
947 xdma_err(xdev, "invalid user irq number");
950 index -= XDMA_CHAN_NUM(xdev);
952 regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1C, 1 << index);
954 EXPORT_SYMBOL(xdma_disable_user_irq);
957 * xdma_enable_user_irq - Enable user logic interrupt
958 * @pdev: Pointer to the platform_device structure
959 * @irq_num: System IRQ number
961 int xdma_enable_user_irq(struct platform_device *pdev, u32 irq_num)
963 struct xdma_device *xdev = platform_get_drvdata(pdev);
967 index = irq_num - xdev->irq_start;
968 if (index < XDMA_CHAN_NUM(xdev) || index >= xdev->irq_num) {
969 xdma_err(xdev, "invalid user irq number");
972 index -= XDMA_CHAN_NUM(xdev);
974 ret = regmap_write(xdev->rmap, XDMA_IRQ_USER_INT_EN_W1S, 1 << index);
980 EXPORT_SYMBOL(xdma_enable_user_irq);
983 * xdma_get_user_irq - Get system IRQ number
984 * @pdev: Pointer to the platform_device structure
985 * @user_irq_index: User logic IRQ wire index
987 * Return: The system IRQ number allocated for the given wire index.
989 int xdma_get_user_irq(struct platform_device *pdev, u32 user_irq_index)
991 struct xdma_device *xdev = platform_get_drvdata(pdev);
993 if (XDMA_CHAN_NUM(xdev) + user_irq_index >= xdev->irq_num) {
994 xdma_err(xdev, "invalid user irq index");
998 return xdev->irq_start + XDMA_CHAN_NUM(xdev) + user_irq_index;
1000 EXPORT_SYMBOL(xdma_get_user_irq);
1003 * xdma_remove - Driver remove function
1004 * @pdev: Pointer to the platform_device structure
1006 static void xdma_remove(struct platform_device *pdev)
1008 struct xdma_device *xdev = platform_get_drvdata(pdev);
1010 if (xdev->status & XDMA_DEV_STATUS_INIT_MSIX)
1011 xdma_irq_fini(xdev);
1013 if (xdev->status & XDMA_DEV_STATUS_REG_DMA)
1014 dma_async_device_unregister(&xdev->dma_dev);
1018 * xdma_probe - Driver probe function
1019 * @pdev: Pointer to the platform_device structure
1021 static int xdma_probe(struct platform_device *pdev)
1023 struct xdma_platdata *pdata = dev_get_platdata(&pdev->dev);
1024 struct xdma_device *xdev;
1025 void __iomem *reg_base;
1026 struct resource *res;
1029 if (pdata->max_dma_channels > XDMA_MAX_CHANNELS) {
1030 dev_err(&pdev->dev, "invalid max dma channels %d",
1031 pdata->max_dma_channels);
1035 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
1039 platform_set_drvdata(pdev, xdev);
1042 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1044 xdma_err(xdev, "failed to get irq resource");
1047 xdev->irq_start = res->start;
1048 xdev->irq_num = resource_size(res);
1050 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1052 xdma_err(xdev, "failed to get io resource");
1056 reg_base = devm_ioremap_resource(&pdev->dev, res);
1057 if (IS_ERR(reg_base)) {
1058 xdma_err(xdev, "ioremap failed");
1062 xdev->rmap = devm_regmap_init_mmio(&pdev->dev, reg_base,
1063 &xdma_regmap_config);
1065 xdma_err(xdev, "config regmap failed: %d", ret);
1068 INIT_LIST_HEAD(&xdev->dma_dev.channels);
1070 ret = xdma_alloc_channels(xdev, DMA_MEM_TO_DEV);
1072 xdma_err(xdev, "config H2C channels failed: %d", ret);
1076 ret = xdma_alloc_channels(xdev, DMA_DEV_TO_MEM);
1078 xdma_err(xdev, "config C2H channels failed: %d", ret);
1082 dma_cap_set(DMA_SLAVE, xdev->dma_dev.cap_mask);
1083 dma_cap_set(DMA_PRIVATE, xdev->dma_dev.cap_mask);
1084 dma_cap_set(DMA_CYCLIC, xdev->dma_dev.cap_mask);
1086 xdev->dma_dev.dev = &pdev->dev;
1087 xdev->dma_dev.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
1088 xdev->dma_dev.device_free_chan_resources = xdma_free_chan_resources;
1089 xdev->dma_dev.device_alloc_chan_resources = xdma_alloc_chan_resources;
1090 xdev->dma_dev.device_tx_status = xdma_tx_status;
1091 xdev->dma_dev.device_prep_slave_sg = xdma_prep_device_sg;
1092 xdev->dma_dev.device_config = xdma_device_config;
1093 xdev->dma_dev.device_issue_pending = xdma_issue_pending;
1094 xdev->dma_dev.filter.map = pdata->device_map;
1095 xdev->dma_dev.filter.mapcnt = pdata->device_map_cnt;
1096 xdev->dma_dev.filter.fn = xdma_filter_fn;
1097 xdev->dma_dev.device_prep_dma_cyclic = xdma_prep_dma_cyclic;
1099 ret = dma_async_device_register(&xdev->dma_dev);
1101 xdma_err(xdev, "failed to register Xilinx XDMA: %d", ret);
1104 xdev->status |= XDMA_DEV_STATUS_REG_DMA;
1106 ret = xdma_irq_init(xdev);
1108 xdma_err(xdev, "failed to init msix: %d", ret);
1111 xdev->status |= XDMA_DEV_STATUS_INIT_MSIX;
1121 static const struct platform_device_id xdma_id_table[] = {
1126 static struct platform_driver xdma_driver = {
1130 .id_table = xdma_id_table,
1131 .probe = xdma_probe,
1132 .remove_new = xdma_remove,
1135 module_platform_driver(xdma_driver);
1137 MODULE_DESCRIPTION("AMD XDMA driver");
1138 MODULE_AUTHOR("XRT Team <runtimeca39d@amd.com>");
1139 MODULE_LICENSE("GPL");