1 // SPDX-License-Identifier: GPL-2.0
3 * Xilinx ZynqMP DPDMA Engine driver
5 * Copyright (C) 2015 - 2020 Xilinx, Inc.
7 * Author: Hyun Woo Kwon <hyun.kwon@xilinx.com>
10 #include <linux/bitfield.h>
11 #include <linux/bits.h>
12 #include <linux/clk.h>
13 #include <linux/debugfs.h>
14 #include <linux/delay.h>
15 #include <linux/dmaengine.h>
16 #include <linux/dmapool.h>
17 #include <linux/interrupt.h>
18 #include <linux/module.h>
20 #include <linux/of_dma.h>
21 #include <linux/platform_device.h>
22 #include <linux/sched.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/wait.h>
27 #include <dt-bindings/dma/xlnx-zynqmp-dpdma.h>
29 #include "../dmaengine.h"
30 #include "../virt-dma.h"
33 #define XILINX_DPDMA_ERR_CTRL 0x000
34 #define XILINX_DPDMA_ISR 0x004
35 #define XILINX_DPDMA_IMR 0x008
36 #define XILINX_DPDMA_IEN 0x00c
37 #define XILINX_DPDMA_IDS 0x010
38 #define XILINX_DPDMA_INTR_DESC_DONE(n) BIT((n) + 0)
39 #define XILINX_DPDMA_INTR_DESC_DONE_MASK GENMASK(5, 0)
40 #define XILINX_DPDMA_INTR_NO_OSTAND(n) BIT((n) + 6)
41 #define XILINX_DPDMA_INTR_NO_OSTAND_MASK GENMASK(11, 6)
42 #define XILINX_DPDMA_INTR_AXI_ERR(n) BIT((n) + 12)
43 #define XILINX_DPDMA_INTR_AXI_ERR_MASK GENMASK(17, 12)
44 #define XILINX_DPDMA_INTR_DESC_ERR(n) BIT((n) + 16)
45 #define XILINX_DPDMA_INTR_DESC_ERR_MASK GENMASK(23, 18)
46 #define XILINX_DPDMA_INTR_WR_CMD_FIFO_FULL BIT(24)
47 #define XILINX_DPDMA_INTR_WR_DATA_FIFO_FULL BIT(25)
48 #define XILINX_DPDMA_INTR_AXI_4K_CROSS BIT(26)
49 #define XILINX_DPDMA_INTR_VSYNC BIT(27)
50 #define XILINX_DPDMA_INTR_CHAN_ERR_MASK 0x00041000
51 #define XILINX_DPDMA_INTR_CHAN_ERR 0x00fff000
52 #define XILINX_DPDMA_INTR_GLOBAL_ERR 0x07000000
53 #define XILINX_DPDMA_INTR_ERR_ALL 0x07fff000
54 #define XILINX_DPDMA_INTR_CHAN_MASK 0x00041041
55 #define XILINX_DPDMA_INTR_GLOBAL_MASK 0x0f000000
56 #define XILINX_DPDMA_INTR_ALL 0x0fffffff
57 #define XILINX_DPDMA_EISR 0x014
58 #define XILINX_DPDMA_EIMR 0x018
59 #define XILINX_DPDMA_EIEN 0x01c
60 #define XILINX_DPDMA_EIDS 0x020
61 #define XILINX_DPDMA_EINTR_INV_APB BIT(0)
62 #define XILINX_DPDMA_EINTR_RD_AXI_ERR(n) BIT((n) + 1)
63 #define XILINX_DPDMA_EINTR_RD_AXI_ERR_MASK GENMASK(6, 1)
64 #define XILINX_DPDMA_EINTR_PRE_ERR(n) BIT((n) + 7)
65 #define XILINX_DPDMA_EINTR_PRE_ERR_MASK GENMASK(12, 7)
66 #define XILINX_DPDMA_EINTR_CRC_ERR(n) BIT((n) + 13)
67 #define XILINX_DPDMA_EINTR_CRC_ERR_MASK GENMASK(18, 13)
68 #define XILINX_DPDMA_EINTR_WR_AXI_ERR(n) BIT((n) + 19)
69 #define XILINX_DPDMA_EINTR_WR_AXI_ERR_MASK GENMASK(24, 19)
70 #define XILINX_DPDMA_EINTR_DESC_DONE_ERR(n) BIT((n) + 25)
71 #define XILINX_DPDMA_EINTR_DESC_DONE_ERR_MASK GENMASK(30, 25)
72 #define XILINX_DPDMA_EINTR_RD_CMD_FIFO_FULL BIT(32)
73 #define XILINX_DPDMA_EINTR_CHAN_ERR_MASK 0x02082082
74 #define XILINX_DPDMA_EINTR_CHAN_ERR 0x7ffffffe
75 #define XILINX_DPDMA_EINTR_GLOBAL_ERR 0x80000001
76 #define XILINX_DPDMA_EINTR_ALL 0xffffffff
77 #define XILINX_DPDMA_CNTL 0x100
78 #define XILINX_DPDMA_GBL 0x104
79 #define XILINX_DPDMA_GBL_TRIG_MASK(n) ((n) << 0)
80 #define XILINX_DPDMA_GBL_RETRIG_MASK(n) ((n) << 6)
81 #define XILINX_DPDMA_ALC0_CNTL 0x108
82 #define XILINX_DPDMA_ALC0_STATUS 0x10c
83 #define XILINX_DPDMA_ALC0_MAX 0x110
84 #define XILINX_DPDMA_ALC0_MIN 0x114
85 #define XILINX_DPDMA_ALC0_ACC 0x118
86 #define XILINX_DPDMA_ALC0_ACC_TRAN 0x11c
87 #define XILINX_DPDMA_ALC1_CNTL 0x120
88 #define XILINX_DPDMA_ALC1_STATUS 0x124
89 #define XILINX_DPDMA_ALC1_MAX 0x128
90 #define XILINX_DPDMA_ALC1_MIN 0x12c
91 #define XILINX_DPDMA_ALC1_ACC 0x130
92 #define XILINX_DPDMA_ALC1_ACC_TRAN 0x134
94 /* Channel register */
95 #define XILINX_DPDMA_CH_BASE 0x200
96 #define XILINX_DPDMA_CH_OFFSET 0x100
97 #define XILINX_DPDMA_CH_DESC_START_ADDRE 0x000
98 #define XILINX_DPDMA_CH_DESC_START_ADDRE_MASK GENMASK(15, 0)
99 #define XILINX_DPDMA_CH_DESC_START_ADDR 0x004
100 #define XILINX_DPDMA_CH_DESC_NEXT_ADDRE 0x008
101 #define XILINX_DPDMA_CH_DESC_NEXT_ADDR 0x00c
102 #define XILINX_DPDMA_CH_PYLD_CUR_ADDRE 0x010
103 #define XILINX_DPDMA_CH_PYLD_CUR_ADDR 0x014
104 #define XILINX_DPDMA_CH_CNTL 0x018
105 #define XILINX_DPDMA_CH_CNTL_ENABLE BIT(0)
106 #define XILINX_DPDMA_CH_CNTL_PAUSE BIT(1)
107 #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_MASK GENMASK(5, 2)
108 #define XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_MASK GENMASK(9, 6)
109 #define XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_MASK GENMASK(13, 10)
110 #define XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS 11
111 #define XILINX_DPDMA_CH_STATUS 0x01c
112 #define XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK GENMASK(24, 21)
113 #define XILINX_DPDMA_CH_VDO 0x020
114 #define XILINX_DPDMA_CH_PYLD_SZ 0x024
115 #define XILINX_DPDMA_CH_DESC_ID 0x028
116 #define XILINX_DPDMA_CH_DESC_ID_MASK GENMASK(15, 0)
118 /* DPDMA descriptor fields */
119 #define XILINX_DPDMA_DESC_CONTROL_PREEMBLE 0xa5
120 #define XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR BIT(8)
121 #define XILINX_DPDMA_DESC_CONTROL_DESC_UPDATE BIT(9)
122 #define XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE BIT(10)
123 #define XILINX_DPDMA_DESC_CONTROL_FRAG_MODE BIT(18)
124 #define XILINX_DPDMA_DESC_CONTROL_LAST BIT(19)
125 #define XILINX_DPDMA_DESC_CONTROL_ENABLE_CRC BIT(20)
126 #define XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME BIT(21)
127 #define XILINX_DPDMA_DESC_ID_MASK GENMASK(15, 0)
128 #define XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK GENMASK(17, 0)
129 #define XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK GENMASK(31, 18)
130 #define XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK GENMASK(15, 0)
131 #define XILINX_DPDMA_DESC_ADDR_EXT_SRC_ADDR_MASK GENMASK(31, 16)
133 #define XILINX_DPDMA_ALIGN_BYTES 256
134 #define XILINX_DPDMA_LINESIZE_ALIGN_BITS 128
136 #define XILINX_DPDMA_NUM_CHAN 6
138 struct xilinx_dpdma_chan;
141 * struct xilinx_dpdma_hw_desc - DPDMA hardware descriptor
142 * @control: control configuration field
143 * @desc_id: descriptor ID
144 * @xfer_size: transfer size
145 * @hsize_stride: horizontal size and stride
146 * @timestamp_lsb: LSB of time stamp
147 * @timestamp_msb: MSB of time stamp
148 * @addr_ext: upper 16 bit of 48 bit address (next_desc and src_addr)
149 * @next_desc: next descriptor 32 bit address
150 * @src_addr: payload source address (1st page, 32 LSB)
151 * @addr_ext_23: payload source address (3nd and 3rd pages, 16 LSBs)
152 * @addr_ext_45: payload source address (4th and 5th pages, 16 LSBs)
153 * @src_addr2: payload source address (2nd page, 32 LSB)
154 * @src_addr3: payload source address (3rd page, 32 LSB)
155 * @src_addr4: payload source address (4th page, 32 LSB)
156 * @src_addr5: payload source address (5th page, 32 LSB)
157 * @crc: descriptor CRC
159 struct xilinx_dpdma_hw_desc {
176 } __aligned(XILINX_DPDMA_ALIGN_BYTES);
179 * struct xilinx_dpdma_sw_desc - DPDMA software descriptor
180 * @hw: DPDMA hardware descriptor
181 * @node: list node for software descriptors
182 * @dma_addr: DMA address of the software descriptor
184 struct xilinx_dpdma_sw_desc {
185 struct xilinx_dpdma_hw_desc hw;
186 struct list_head node;
191 * struct xilinx_dpdma_tx_desc - DPDMA transaction descriptor
192 * @vdesc: virtual DMA descriptor
194 * @descriptors: list of software descriptors
195 * @error: an error has been detected with this descriptor
197 struct xilinx_dpdma_tx_desc {
198 struct virt_dma_desc vdesc;
199 struct xilinx_dpdma_chan *chan;
200 struct list_head descriptors;
204 #define to_dpdma_tx_desc(_desc) \
205 container_of(_desc, struct xilinx_dpdma_tx_desc, vdesc)
208 * struct xilinx_dpdma_chan - DPDMA channel
209 * @vchan: virtual DMA channel
210 * @reg: register base address
212 * @wait_to_stop: queue to wait for outstanding transacitons before stopping
213 * @running: true if the channel is running
214 * @first_frame: flag for the first frame of stream
215 * @video_group: flag if multi-channel operation is needed for video channels
216 * @lock: lock to access struct xilinx_dpdma_chan
217 * @desc_pool: descriptor allocation pool
218 * @err_task: error IRQ bottom half handler
219 * @desc: References to descriptors being processed
220 * @desc.pending: Descriptor schedule to the hardware, pending execution
221 * @desc.active: Descriptor being executed by the hardware
222 * @xdev: DPDMA device
224 struct xilinx_dpdma_chan {
225 struct virt_dma_chan vchan;
229 wait_queue_head_t wait_to_stop;
234 spinlock_t lock; /* lock to access struct xilinx_dpdma_chan */
235 struct dma_pool *desc_pool;
236 struct tasklet_struct err_task;
239 struct xilinx_dpdma_tx_desc *pending;
240 struct xilinx_dpdma_tx_desc *active;
243 struct xilinx_dpdma_device *xdev;
246 #define to_xilinx_chan(_chan) \
247 container_of(_chan, struct xilinx_dpdma_chan, vchan.chan)
250 * struct xilinx_dpdma_device - DPDMA device
251 * @common: generic dma device structure
252 * @reg: register base address
253 * @dev: generic device structure
254 * @irq: the interrupt number
255 * @axi_clk: axi clock
256 * @chan: DPDMA channels
257 * @ext_addr: flag for 64 bit system (48 bit addressing)
259 struct xilinx_dpdma_device {
260 struct dma_device common;
266 struct xilinx_dpdma_chan *chan[XILINX_DPDMA_NUM_CHAN];
271 /* -----------------------------------------------------------------------------
275 #ifdef CONFIG_DEBUG_FS
277 #define XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE 32
278 #define XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR "65535"
280 /* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
281 enum xilinx_dpdma_testcases {
286 struct xilinx_dpdma_debugfs {
287 enum xilinx_dpdma_testcases testcase;
288 u16 xilinx_dpdma_irq_done_count;
289 unsigned int chan_id;
292 static struct xilinx_dpdma_debugfs dpdma_debugfs;
293 struct xilinx_dpdma_debugfs_request {
295 enum xilinx_dpdma_testcases tc;
296 ssize_t (*read)(char *buf);
297 int (*write)(char *args);
300 static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan)
302 if (chan->id == dpdma_debugfs.chan_id)
303 dpdma_debugfs.xilinx_dpdma_irq_done_count++;
306 static ssize_t xilinx_dpdma_debugfs_desc_done_irq_read(char *buf)
310 dpdma_debugfs.testcase = DPDMA_TC_NONE;
312 out_str_len = strlen(XILINX_DPDMA_DEBUGFS_UINT16_MAX_STR);
313 out_str_len = min_t(size_t, XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE,
315 snprintf(buf, out_str_len, "%d",
316 dpdma_debugfs.xilinx_dpdma_irq_done_count);
321 static int xilinx_dpdma_debugfs_desc_done_irq_write(char *args)
327 arg = strsep(&args, " ");
328 if (!arg || strncasecmp(arg, "start", 5))
331 arg = strsep(&args, " ");
335 ret = kstrtou32(arg, 0, &id);
339 if (id < ZYNQMP_DPDMA_VIDEO0 || id > ZYNQMP_DPDMA_AUDIO1)
342 dpdma_debugfs.testcase = DPDMA_TC_INTR_DONE;
343 dpdma_debugfs.xilinx_dpdma_irq_done_count = 0;
344 dpdma_debugfs.chan_id = id;
349 /* Match xilinx_dpdma_testcases vs dpdma_debugfs_reqs[] entry */
350 static struct xilinx_dpdma_debugfs_request dpdma_debugfs_reqs[] = {
352 .name = "DESCRIPTOR_DONE_INTR",
353 .tc = DPDMA_TC_INTR_DONE,
354 .read = xilinx_dpdma_debugfs_desc_done_irq_read,
355 .write = xilinx_dpdma_debugfs_desc_done_irq_write,
359 static ssize_t xilinx_dpdma_debugfs_read(struct file *f, char __user *buf,
360 size_t size, loff_t *pos)
362 enum xilinx_dpdma_testcases testcase;
366 if (*pos != 0 || size <= 0)
369 kern_buff = kzalloc(XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE, GFP_KERNEL);
371 dpdma_debugfs.testcase = DPDMA_TC_NONE;
375 testcase = READ_ONCE(dpdma_debugfs.testcase);
376 if (testcase != DPDMA_TC_NONE) {
377 ret = dpdma_debugfs_reqs[testcase].read(kern_buff);
381 strlcpy(kern_buff, "No testcase executed",
382 XILINX_DPDMA_DEBUGFS_READ_MAX_SIZE);
385 size = min(size, strlen(kern_buff));
386 if (copy_to_user(buf, kern_buff, size))
398 static ssize_t xilinx_dpdma_debugfs_write(struct file *f,
399 const char __user *buf, size_t size,
402 char *kern_buff, *kern_buff_start;
407 if (*pos != 0 || size <= 0)
410 /* Supporting single instance of test as of now. */
411 if (dpdma_debugfs.testcase != DPDMA_TC_NONE)
414 kern_buff = kzalloc(size, GFP_KERNEL);
417 kern_buff_start = kern_buff;
419 ret = strncpy_from_user(kern_buff, buf, size);
423 /* Read the testcase name from a user request. */
424 testcase = strsep(&kern_buff, " ");
426 for (i = 0; i < ARRAY_SIZE(dpdma_debugfs_reqs); i++) {
427 if (!strcasecmp(testcase, dpdma_debugfs_reqs[i].name))
431 if (i == ARRAY_SIZE(dpdma_debugfs_reqs)) {
436 ret = dpdma_debugfs_reqs[i].write(kern_buff);
443 kfree(kern_buff_start);
447 static const struct file_operations fops_xilinx_dpdma_dbgfs = {
448 .owner = THIS_MODULE,
449 .read = xilinx_dpdma_debugfs_read,
450 .write = xilinx_dpdma_debugfs_write,
453 static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev)
457 dpdma_debugfs.testcase = DPDMA_TC_NONE;
459 dent = debugfs_create_file("testcase", 0444, xdev->common.dbg_dev_root,
460 NULL, &fops_xilinx_dpdma_dbgfs);
462 dev_err(xdev->dev, "Failed to create debugfs testcase file\n");
466 static void xilinx_dpdma_debugfs_init(struct xilinx_dpdma_device *xdev)
470 static void xilinx_dpdma_debugfs_desc_done_irq(struct xilinx_dpdma_chan *chan)
473 #endif /* CONFIG_DEBUG_FS */
475 /* -----------------------------------------------------------------------------
479 static inline u32 dpdma_read(void __iomem *base, u32 offset)
481 return ioread32(base + offset);
484 static inline void dpdma_write(void __iomem *base, u32 offset, u32 val)
486 iowrite32(val, base + offset);
489 static inline void dpdma_clr(void __iomem *base, u32 offset, u32 clr)
491 dpdma_write(base, offset, dpdma_read(base, offset) & ~clr);
494 static inline void dpdma_set(void __iomem *base, u32 offset, u32 set)
496 dpdma_write(base, offset, dpdma_read(base, offset) | set);
499 /* -----------------------------------------------------------------------------
500 * Descriptor Operations
504 * xilinx_dpdma_sw_desc_set_dma_addrs - Set DMA addresses in the descriptor
505 * @xdev: DPDMA device
506 * @sw_desc: The software descriptor in which to set DMA addresses
507 * @prev: The previous descriptor
508 * @dma_addr: array of dma addresses
509 * @num_src_addr: number of addresses in @dma_addr
511 * Set all the DMA addresses in the hardware descriptor corresponding to @dev
512 * from @dma_addr. If a previous descriptor is specified in @prev, its next
513 * descriptor DMA address is set to the DMA address of @sw_desc. @prev may be
514 * identical to @sw_desc for cyclic transfers.
516 static void xilinx_dpdma_sw_desc_set_dma_addrs(struct xilinx_dpdma_device *xdev,
517 struct xilinx_dpdma_sw_desc *sw_desc,
518 struct xilinx_dpdma_sw_desc *prev,
519 dma_addr_t dma_addr[],
520 unsigned int num_src_addr)
522 struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
525 hw_desc->src_addr = lower_32_bits(dma_addr[0]);
528 FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_SRC_ADDR_MASK,
529 upper_32_bits(dma_addr[0]));
531 for (i = 1; i < num_src_addr; i++) {
532 u32 *addr = &hw_desc->src_addr2;
534 addr[i-1] = lower_32_bits(dma_addr[i]);
536 if (xdev->ext_addr) {
537 u32 *addr_ext = &hw_desc->addr_ext_23;
540 addr_msb = upper_32_bits(dma_addr[i]) & GENMASK(15, 0);
541 addr_msb <<= 16 * ((i - 1) % 2);
542 addr_ext[(i - 1) / 2] |= addr_msb;
549 prev->hw.next_desc = lower_32_bits(sw_desc->dma_addr);
552 FIELD_PREP(XILINX_DPDMA_DESC_ADDR_EXT_NEXT_ADDR_MASK,
553 upper_32_bits(sw_desc->dma_addr));
557 * xilinx_dpdma_chan_alloc_sw_desc - Allocate a software descriptor
558 * @chan: DPDMA channel
560 * Allocate a software descriptor from the channel's descriptor pool.
562 * Return: a software descriptor or NULL.
564 static struct xilinx_dpdma_sw_desc *
565 xilinx_dpdma_chan_alloc_sw_desc(struct xilinx_dpdma_chan *chan)
567 struct xilinx_dpdma_sw_desc *sw_desc;
570 sw_desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &dma_addr);
574 sw_desc->dma_addr = dma_addr;
580 * xilinx_dpdma_chan_free_sw_desc - Free a software descriptor
581 * @chan: DPDMA channel
582 * @sw_desc: software descriptor to free
584 * Free a software descriptor from the channel's descriptor pool.
587 xilinx_dpdma_chan_free_sw_desc(struct xilinx_dpdma_chan *chan,
588 struct xilinx_dpdma_sw_desc *sw_desc)
590 dma_pool_free(chan->desc_pool, sw_desc, sw_desc->dma_addr);
594 * xilinx_dpdma_chan_dump_tx_desc - Dump a tx descriptor
595 * @chan: DPDMA channel
596 * @tx_desc: tx descriptor to dump
598 * Dump contents of a tx descriptor
600 static void xilinx_dpdma_chan_dump_tx_desc(struct xilinx_dpdma_chan *chan,
601 struct xilinx_dpdma_tx_desc *tx_desc)
603 struct xilinx_dpdma_sw_desc *sw_desc;
604 struct device *dev = chan->xdev->dev;
607 dev_dbg(dev, "------- TX descriptor dump start -------\n");
608 dev_dbg(dev, "------- channel ID = %d -------\n", chan->id);
610 list_for_each_entry(sw_desc, &tx_desc->descriptors, node) {
611 struct xilinx_dpdma_hw_desc *hw_desc = &sw_desc->hw;
613 dev_dbg(dev, "------- HW descriptor %d -------\n", i++);
614 dev_dbg(dev, "descriptor DMA addr: %pad\n", &sw_desc->dma_addr);
615 dev_dbg(dev, "control: 0x%08x\n", hw_desc->control);
616 dev_dbg(dev, "desc_id: 0x%08x\n", hw_desc->desc_id);
617 dev_dbg(dev, "xfer_size: 0x%08x\n", hw_desc->xfer_size);
618 dev_dbg(dev, "hsize_stride: 0x%08x\n", hw_desc->hsize_stride);
619 dev_dbg(dev, "timestamp_lsb: 0x%08x\n", hw_desc->timestamp_lsb);
620 dev_dbg(dev, "timestamp_msb: 0x%08x\n", hw_desc->timestamp_msb);
621 dev_dbg(dev, "addr_ext: 0x%08x\n", hw_desc->addr_ext);
622 dev_dbg(dev, "next_desc: 0x%08x\n", hw_desc->next_desc);
623 dev_dbg(dev, "src_addr: 0x%08x\n", hw_desc->src_addr);
624 dev_dbg(dev, "addr_ext_23: 0x%08x\n", hw_desc->addr_ext_23);
625 dev_dbg(dev, "addr_ext_45: 0x%08x\n", hw_desc->addr_ext_45);
626 dev_dbg(dev, "src_addr2: 0x%08x\n", hw_desc->src_addr2);
627 dev_dbg(dev, "src_addr3: 0x%08x\n", hw_desc->src_addr3);
628 dev_dbg(dev, "src_addr4: 0x%08x\n", hw_desc->src_addr4);
629 dev_dbg(dev, "src_addr5: 0x%08x\n", hw_desc->src_addr5);
630 dev_dbg(dev, "crc: 0x%08x\n", hw_desc->crc);
633 dev_dbg(dev, "------- TX descriptor dump end -------\n");
637 * xilinx_dpdma_chan_alloc_tx_desc - Allocate a transaction descriptor
638 * @chan: DPDMA channel
640 * Allocate a tx descriptor.
642 * Return: a tx descriptor or NULL.
644 static struct xilinx_dpdma_tx_desc *
645 xilinx_dpdma_chan_alloc_tx_desc(struct xilinx_dpdma_chan *chan)
647 struct xilinx_dpdma_tx_desc *tx_desc;
649 tx_desc = kzalloc(sizeof(*tx_desc), GFP_NOWAIT);
653 INIT_LIST_HEAD(&tx_desc->descriptors);
654 tx_desc->chan = chan;
655 tx_desc->error = false;
661 * xilinx_dpdma_chan_free_tx_desc - Free a virtual DMA descriptor
662 * @vdesc: virtual DMA descriptor
664 * Free the virtual DMA descriptor @vdesc including its software descriptors.
666 static void xilinx_dpdma_chan_free_tx_desc(struct virt_dma_desc *vdesc)
668 struct xilinx_dpdma_sw_desc *sw_desc, *next;
669 struct xilinx_dpdma_tx_desc *desc;
674 desc = to_dpdma_tx_desc(vdesc);
676 list_for_each_entry_safe(sw_desc, next, &desc->descriptors, node) {
677 list_del(&sw_desc->node);
678 xilinx_dpdma_chan_free_sw_desc(desc->chan, sw_desc);
685 * xilinx_dpdma_chan_prep_interleaved_dma - Prepare an interleaved dma
687 * @chan: DPDMA channel
688 * @xt: dma interleaved template
690 * Prepare a tx descriptor including internal software/hardware descriptors
693 * Return: A DPDMA TX descriptor on success, or NULL.
695 static struct xilinx_dpdma_tx_desc *
696 xilinx_dpdma_chan_prep_interleaved_dma(struct xilinx_dpdma_chan *chan,
697 struct dma_interleaved_template *xt)
699 struct xilinx_dpdma_tx_desc *tx_desc;
700 struct xilinx_dpdma_sw_desc *sw_desc;
701 struct xilinx_dpdma_hw_desc *hw_desc;
702 size_t hsize = xt->sgl[0].size;
703 size_t stride = hsize + xt->sgl[0].icg;
705 if (!IS_ALIGNED(xt->src_start, XILINX_DPDMA_ALIGN_BYTES)) {
706 dev_err(chan->xdev->dev, "buffer should be aligned at %d B\n",
707 XILINX_DPDMA_ALIGN_BYTES);
711 tx_desc = xilinx_dpdma_chan_alloc_tx_desc(chan);
715 sw_desc = xilinx_dpdma_chan_alloc_sw_desc(chan);
717 xilinx_dpdma_chan_free_tx_desc(&tx_desc->vdesc);
721 xilinx_dpdma_sw_desc_set_dma_addrs(chan->xdev, sw_desc, sw_desc,
724 hw_desc = &sw_desc->hw;
725 hsize = ALIGN(hsize, XILINX_DPDMA_LINESIZE_ALIGN_BITS / 8);
726 hw_desc->xfer_size = hsize * xt->numf;
727 hw_desc->hsize_stride =
728 FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_HSIZE_MASK, hsize) |
729 FIELD_PREP(XILINX_DPDMA_DESC_HSIZE_STRIDE_STRIDE_MASK,
731 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_PREEMBLE;
732 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_COMPLETE_INTR;
733 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_IGNORE_DONE;
734 hw_desc->control |= XILINX_DPDMA_DESC_CONTROL_LAST_OF_FRAME;
736 list_add_tail(&sw_desc->node, &tx_desc->descriptors);
741 /* -----------------------------------------------------------------------------
742 * DPDMA Channel Operations
746 * xilinx_dpdma_chan_enable - Enable the channel
747 * @chan: DPDMA channel
749 * Enable the channel and its interrupts. Set the QoS values for video class.
751 static void xilinx_dpdma_chan_enable(struct xilinx_dpdma_chan *chan)
755 reg = (XILINX_DPDMA_INTR_CHAN_MASK << chan->id)
756 | XILINX_DPDMA_INTR_GLOBAL_MASK;
757 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
758 reg = (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id)
759 | XILINX_DPDMA_INTR_GLOBAL_ERR;
760 dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
762 reg = XILINX_DPDMA_CH_CNTL_ENABLE
763 | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DSCR_WR_MASK,
764 XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS)
765 | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DSCR_RD_MASK,
766 XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS)
767 | FIELD_PREP(XILINX_DPDMA_CH_CNTL_QOS_DATA_RD_MASK,
768 XILINX_DPDMA_CH_CNTL_QOS_VID_CLASS);
769 dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, reg);
773 * xilinx_dpdma_chan_disable - Disable the channel
774 * @chan: DPDMA channel
776 * Disable the channel and its interrupts.
778 static void xilinx_dpdma_chan_disable(struct xilinx_dpdma_chan *chan)
782 reg = XILINX_DPDMA_INTR_CHAN_MASK << chan->id;
783 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN, reg);
784 reg = XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id;
785 dpdma_write(chan->xdev->reg, XILINX_DPDMA_EIEN, reg);
787 dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
791 * xilinx_dpdma_chan_pause - Pause the channel
792 * @chan: DPDMA channel
796 static void xilinx_dpdma_chan_pause(struct xilinx_dpdma_chan *chan)
798 dpdma_set(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
802 * xilinx_dpdma_chan_unpause - Unpause the channel
803 * @chan: DPDMA channel
805 * Unpause the channel.
807 static void xilinx_dpdma_chan_unpause(struct xilinx_dpdma_chan *chan)
809 dpdma_clr(chan->reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_PAUSE);
812 static u32 xilinx_dpdma_chan_video_group_ready(struct xilinx_dpdma_chan *chan)
814 struct xilinx_dpdma_device *xdev = chan->xdev;
818 for (i = ZYNQMP_DPDMA_VIDEO0; i <= ZYNQMP_DPDMA_VIDEO2; i++) {
819 if (xdev->chan[i]->video_group && !xdev->chan[i]->running)
822 if (xdev->chan[i]->video_group)
830 * xilinx_dpdma_chan_queue_transfer - Queue the next transfer
831 * @chan: DPDMA channel
833 * Queue the next descriptor, if any, to the hardware. If the channel is
834 * stopped, start it first. Otherwise retrigger it with the next descriptor.
836 static void xilinx_dpdma_chan_queue_transfer(struct xilinx_dpdma_chan *chan)
838 struct xilinx_dpdma_device *xdev = chan->xdev;
839 struct xilinx_dpdma_sw_desc *sw_desc;
840 struct xilinx_dpdma_tx_desc *desc;
841 struct virt_dma_desc *vdesc;
845 lockdep_assert_held(&chan->lock);
847 if (chan->desc.pending)
850 if (!chan->running) {
851 xilinx_dpdma_chan_unpause(chan);
852 xilinx_dpdma_chan_enable(chan);
853 chan->first_frame = true;
854 chan->running = true;
857 vdesc = vchan_next_desc(&chan->vchan);
861 desc = to_dpdma_tx_desc(vdesc);
862 chan->desc.pending = desc;
863 list_del(&desc->vdesc.node);
866 * Assign the cookie to descriptors in this transaction. Only 16 bit
867 * will be used, but it should be enough.
869 list_for_each_entry(sw_desc, &desc->descriptors, node)
870 sw_desc->hw.desc_id = desc->vdesc.tx.cookie
871 & XILINX_DPDMA_CH_DESC_ID_MASK;
873 sw_desc = list_first_entry(&desc->descriptors,
874 struct xilinx_dpdma_sw_desc, node);
875 dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR,
876 lower_32_bits(sw_desc->dma_addr));
878 dpdma_write(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE,
879 FIELD_PREP(XILINX_DPDMA_CH_DESC_START_ADDRE_MASK,
880 upper_32_bits(sw_desc->dma_addr)));
882 first_frame = chan->first_frame;
883 chan->first_frame = false;
885 if (chan->video_group) {
886 channels = xilinx_dpdma_chan_video_group_ready(chan);
888 * Trigger the transfer only when all channels in the group are
894 channels = BIT(chan->id);
898 reg = XILINX_DPDMA_GBL_TRIG_MASK(channels);
900 reg = XILINX_DPDMA_GBL_RETRIG_MASK(channels);
902 dpdma_write(xdev->reg, XILINX_DPDMA_GBL, reg);
906 * xilinx_dpdma_chan_ostand - Number of outstanding transactions
907 * @chan: DPDMA channel
909 * Read and return the number of outstanding transactions from register.
911 * Return: Number of outstanding transactions from the status register.
913 static u32 xilinx_dpdma_chan_ostand(struct xilinx_dpdma_chan *chan)
915 return FIELD_GET(XILINX_DPDMA_CH_STATUS_OTRAN_CNT_MASK,
916 dpdma_read(chan->reg, XILINX_DPDMA_CH_STATUS));
920 * xilinx_dpdma_chan_no_ostand - Notify no outstanding transaction event
921 * @chan: DPDMA channel
923 * Notify waiters for no outstanding event, so waiters can stop the channel
924 * safely. This function is supposed to be called when 'no outstanding'
925 * interrupt is generated. The 'no outstanding' interrupt is disabled and
926 * should be re-enabled when this event is handled. If the channel status
927 * register still shows some number of outstanding transactions, the interrupt
930 * Return: 0 on success. On failure, -EWOULDBLOCK if there's still outstanding
933 static int xilinx_dpdma_chan_notify_no_ostand(struct xilinx_dpdma_chan *chan)
937 cnt = xilinx_dpdma_chan_ostand(chan);
939 dev_dbg(chan->xdev->dev, "%d outstanding transactions\n", cnt);
943 /* Disable 'no outstanding' interrupt */
944 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IDS,
945 XILINX_DPDMA_INTR_NO_OSTAND(chan->id));
946 wake_up(&chan->wait_to_stop);
952 * xilinx_dpdma_chan_wait_no_ostand - Wait for the no outstanding irq
953 * @chan: DPDMA channel
955 * Wait for the no outstanding transaction interrupt. This functions can sleep
958 * Return: 0 on success. On failure, -ETIMEOUT for time out, or the error code
959 * from wait_event_interruptible_timeout().
961 static int xilinx_dpdma_chan_wait_no_ostand(struct xilinx_dpdma_chan *chan)
965 /* Wait for a no outstanding transaction interrupt upto 50msec */
966 ret = wait_event_interruptible_timeout(chan->wait_to_stop,
967 !xilinx_dpdma_chan_ostand(chan),
968 msecs_to_jiffies(50));
970 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
971 XILINX_DPDMA_INTR_NO_OSTAND(chan->id));
975 dev_err(chan->xdev->dev, "not ready to stop: %d trans\n",
976 xilinx_dpdma_chan_ostand(chan));
985 * xilinx_dpdma_chan_poll_no_ostand - Poll the outstanding transaction status
986 * @chan: DPDMA channel
988 * Poll the outstanding transaction status, and return when there's no
989 * outstanding transaction. This functions can be used in the interrupt context
990 * or where the atomicity is required. Calling thread may wait more than 50ms.
992 * Return: 0 on success, or -ETIMEDOUT.
994 static int xilinx_dpdma_chan_poll_no_ostand(struct xilinx_dpdma_chan *chan)
996 u32 cnt, loop = 50000;
998 /* Poll at least for 50ms (20 fps). */
1000 cnt = xilinx_dpdma_chan_ostand(chan);
1002 } while (loop-- > 0 && cnt);
1005 dpdma_write(chan->xdev->reg, XILINX_DPDMA_IEN,
1006 XILINX_DPDMA_INTR_NO_OSTAND(chan->id));
1010 dev_err(chan->xdev->dev, "not ready to stop: %d trans\n",
1011 xilinx_dpdma_chan_ostand(chan));
1017 * xilinx_dpdma_chan_stop - Stop the channel
1018 * @chan: DPDMA channel
1020 * Stop a previously paused channel by first waiting for completion of all
1021 * outstanding transaction and then disabling the channel.
1023 * Return: 0 on success, or -ETIMEDOUT if the channel failed to stop.
1025 static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
1027 unsigned long flags;
1030 ret = xilinx_dpdma_chan_wait_no_ostand(chan);
1034 spin_lock_irqsave(&chan->lock, flags);
1035 xilinx_dpdma_chan_disable(chan);
1036 chan->running = false;
1037 spin_unlock_irqrestore(&chan->lock, flags);
1043 * xilinx_dpdma_chan_done_irq - Handle hardware descriptor completion
1044 * @chan: DPDMA channel
1046 * Handle completion of the currently active descriptor (@chan->desc.active). As
1047 * we currently support cyclic transfers only, this just invokes the cyclic
1048 * callback. The descriptor will be completed at the VSYNC interrupt when a new
1049 * descriptor replaces it.
1051 static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
1053 struct xilinx_dpdma_tx_desc *active;
1054 unsigned long flags;
1056 spin_lock_irqsave(&chan->lock, flags);
1058 xilinx_dpdma_debugfs_desc_done_irq(chan);
1060 active = chan->desc.active;
1062 vchan_cyclic_callback(&active->vdesc);
1064 dev_warn(chan->xdev->dev,
1065 "DONE IRQ with no active descriptor!\n");
1067 spin_unlock_irqrestore(&chan->lock, flags);
1071 * xilinx_dpdma_chan_vsync_irq - Handle hardware descriptor scheduling
1072 * @chan: DPDMA channel
1074 * At VSYNC the active descriptor may have been replaced by the pending
1075 * descriptor. Detect this through the DESC_ID and perform appropriate
1078 static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
1080 struct xilinx_dpdma_tx_desc *pending;
1081 struct xilinx_dpdma_sw_desc *sw_desc;
1082 unsigned long flags;
1085 spin_lock_irqsave(&chan->lock, flags);
1087 pending = chan->desc.pending;
1088 if (!chan->running || !pending)
1091 desc_id = dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_ID)
1092 & XILINX_DPDMA_CH_DESC_ID_MASK;
1094 /* If the retrigger raced with vsync, retry at the next frame. */
1095 sw_desc = list_first_entry(&pending->descriptors,
1096 struct xilinx_dpdma_sw_desc, node);
1097 if (sw_desc->hw.desc_id != desc_id)
1101 * Complete the active descriptor, if any, promote the pending
1102 * descriptor to active, and queue the next transfer, if any.
1104 if (chan->desc.active)
1105 vchan_cookie_complete(&chan->desc.active->vdesc);
1106 chan->desc.active = pending;
1107 chan->desc.pending = NULL;
1109 xilinx_dpdma_chan_queue_transfer(chan);
1112 spin_unlock_irqrestore(&chan->lock, flags);
1116 * xilinx_dpdma_chan_err - Detect any channel error
1117 * @chan: DPDMA channel
1118 * @isr: masked Interrupt Status Register
1119 * @eisr: Error Interrupt Status Register
1121 * Return: true if any channel error occurs, or false otherwise.
1124 xilinx_dpdma_chan_err(struct xilinx_dpdma_chan *chan, u32 isr, u32 eisr)
1129 if (chan->running &&
1130 ((isr & (XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id)) ||
1131 (eisr & (XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id))))
1138 * xilinx_dpdma_chan_handle_err - DPDMA channel error handling
1139 * @chan: DPDMA channel
1141 * This function is called when any channel error or any global error occurs.
1142 * The function disables the paused channel by errors and determines
1143 * if the current active descriptor can be rescheduled depending on
1144 * the descriptor status.
1146 static void xilinx_dpdma_chan_handle_err(struct xilinx_dpdma_chan *chan)
1148 struct xilinx_dpdma_device *xdev = chan->xdev;
1149 struct xilinx_dpdma_tx_desc *active;
1150 unsigned long flags;
1152 spin_lock_irqsave(&chan->lock, flags);
1154 dev_dbg(xdev->dev, "cur desc addr = 0x%04x%08x\n",
1155 dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDRE),
1156 dpdma_read(chan->reg, XILINX_DPDMA_CH_DESC_START_ADDR));
1157 dev_dbg(xdev->dev, "cur payload addr = 0x%04x%08x\n",
1158 dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDRE),
1159 dpdma_read(chan->reg, XILINX_DPDMA_CH_PYLD_CUR_ADDR));
1161 xilinx_dpdma_chan_disable(chan);
1162 chan->running = false;
1164 if (!chan->desc.active)
1167 active = chan->desc.active;
1168 chan->desc.active = NULL;
1170 xilinx_dpdma_chan_dump_tx_desc(chan, active);
1173 dev_dbg(xdev->dev, "repeated error on desc\n");
1175 /* Reschedule if there's no new descriptor */
1176 if (!chan->desc.pending &&
1177 list_empty(&chan->vchan.desc_issued)) {
1178 active->error = true;
1179 list_add_tail(&active->vdesc.node,
1180 &chan->vchan.desc_issued);
1182 xilinx_dpdma_chan_free_tx_desc(&active->vdesc);
1186 spin_unlock_irqrestore(&chan->lock, flags);
1189 /* -----------------------------------------------------------------------------
1190 * DMA Engine Operations
1193 static struct dma_async_tx_descriptor *
1194 xilinx_dpdma_prep_interleaved_dma(struct dma_chan *dchan,
1195 struct dma_interleaved_template *xt,
1196 unsigned long flags)
1198 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1199 struct xilinx_dpdma_tx_desc *desc;
1201 if (xt->dir != DMA_MEM_TO_DEV)
1204 if (!xt->numf || !xt->sgl[0].size)
1207 if (!(flags & DMA_PREP_REPEAT) || !(flags & DMA_PREP_LOAD_EOT))
1210 desc = xilinx_dpdma_chan_prep_interleaved_dma(chan, xt);
1214 vchan_tx_prep(&chan->vchan, &desc->vdesc, flags | DMA_CTRL_ACK);
1216 return &desc->vdesc.tx;
1220 * xilinx_dpdma_alloc_chan_resources - Allocate resources for the channel
1221 * @dchan: DMA channel
1223 * Allocate a descriptor pool for the channel.
1225 * Return: 0 on success, or -ENOMEM if failed to allocate a pool.
1227 static int xilinx_dpdma_alloc_chan_resources(struct dma_chan *dchan)
1229 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1230 size_t align = __alignof__(struct xilinx_dpdma_sw_desc);
1232 chan->desc_pool = dma_pool_create(dev_name(chan->xdev->dev),
1234 sizeof(struct xilinx_dpdma_sw_desc),
1236 if (!chan->desc_pool) {
1237 dev_err(chan->xdev->dev,
1238 "failed to allocate a descriptor pool\n");
1246 * xilinx_dpdma_free_chan_resources - Free all resources for the channel
1247 * @dchan: DMA channel
1249 * Free resources associated with the virtual DMA channel, and destroy the
1252 static void xilinx_dpdma_free_chan_resources(struct dma_chan *dchan)
1254 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1256 vchan_free_chan_resources(&chan->vchan);
1258 dma_pool_destroy(chan->desc_pool);
1259 chan->desc_pool = NULL;
1262 static void xilinx_dpdma_issue_pending(struct dma_chan *dchan)
1264 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1265 unsigned long flags;
1267 spin_lock_irqsave(&chan->vchan.lock, flags);
1268 if (vchan_issue_pending(&chan->vchan))
1269 xilinx_dpdma_chan_queue_transfer(chan);
1270 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1273 static int xilinx_dpdma_config(struct dma_chan *dchan,
1274 struct dma_slave_config *config)
1276 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1277 unsigned long flags;
1280 * The destination address doesn't need to be specified as the DPDMA is
1281 * hardwired to the destination (the DP controller). The transfer
1282 * width, burst size and port window size are thus meaningless, they're
1283 * fixed both on the DPDMA side and on the DP controller side.
1286 spin_lock_irqsave(&chan->lock, flags);
1289 * Abuse the slave_id to indicate that the channel is part of a video
1292 if (chan->id <= ZYNQMP_DPDMA_VIDEO2)
1293 chan->video_group = config->slave_id != 0;
1295 spin_unlock_irqrestore(&chan->lock, flags);
1300 static int xilinx_dpdma_pause(struct dma_chan *dchan)
1302 xilinx_dpdma_chan_pause(to_xilinx_chan(dchan));
1307 static int xilinx_dpdma_resume(struct dma_chan *dchan)
1309 xilinx_dpdma_chan_unpause(to_xilinx_chan(dchan));
1315 * xilinx_dpdma_terminate_all - Terminate the channel and descriptors
1316 * @dchan: DMA channel
1318 * Pause the channel without waiting for ongoing transfers to complete. Waiting
1319 * for completion is performed by xilinx_dpdma_synchronize() that will disable
1320 * the channel to complete the stop.
1322 * All the descriptors associated with the channel that are guaranteed not to
1323 * be touched by the hardware. The pending and active descriptor are not
1324 * touched, and will be freed either upon completion, or by
1325 * xilinx_dpdma_synchronize().
1327 * Return: 0 on success, or -ETIMEDOUT if the channel failed to stop.
1329 static int xilinx_dpdma_terminate_all(struct dma_chan *dchan)
1331 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1332 struct xilinx_dpdma_device *xdev = chan->xdev;
1333 LIST_HEAD(descriptors);
1334 unsigned long flags;
1337 /* Pause the channel (including the whole video group if applicable). */
1338 if (chan->video_group) {
1339 for (i = ZYNQMP_DPDMA_VIDEO0; i <= ZYNQMP_DPDMA_VIDEO2; i++) {
1340 if (xdev->chan[i]->video_group &&
1341 xdev->chan[i]->running) {
1342 xilinx_dpdma_chan_pause(xdev->chan[i]);
1343 xdev->chan[i]->video_group = false;
1347 xilinx_dpdma_chan_pause(chan);
1350 /* Gather all the descriptors we can free and free them. */
1351 spin_lock_irqsave(&chan->vchan.lock, flags);
1352 vchan_get_all_descriptors(&chan->vchan, &descriptors);
1353 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1355 vchan_dma_desc_free_list(&chan->vchan, &descriptors);
1361 * xilinx_dpdma_synchronize - Synchronize callback execution
1362 * @dchan: DMA channel
1364 * Synchronizing callback execution ensures that all previously issued
1365 * transfers have completed and all associated callbacks have been called and
1368 * This function waits for the DMA channel to stop. It assumes it has been
1369 * paused by a previous call to dmaengine_terminate_async(), and that no new
1370 * pending descriptors have been issued with dma_async_issue_pending(). The
1371 * behaviour is undefined otherwise.
1373 static void xilinx_dpdma_synchronize(struct dma_chan *dchan)
1375 struct xilinx_dpdma_chan *chan = to_xilinx_chan(dchan);
1376 unsigned long flags;
1378 xilinx_dpdma_chan_stop(chan);
1380 spin_lock_irqsave(&chan->vchan.lock, flags);
1381 if (chan->desc.pending) {
1382 vchan_terminate_vdesc(&chan->desc.pending->vdesc);
1383 chan->desc.pending = NULL;
1385 if (chan->desc.active) {
1386 vchan_terminate_vdesc(&chan->desc.active->vdesc);
1387 chan->desc.active = NULL;
1389 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1391 vchan_synchronize(&chan->vchan);
1394 /* -----------------------------------------------------------------------------
1395 * Interrupt and Tasklet Handling
1399 * xilinx_dpdma_err - Detect any global error
1400 * @isr: Interrupt Status Register
1401 * @eisr: Error Interrupt Status Register
1403 * Return: True if any global error occurs, or false otherwise.
1405 static bool xilinx_dpdma_err(u32 isr, u32 eisr)
1407 if (isr & XILINX_DPDMA_INTR_GLOBAL_ERR ||
1408 eisr & XILINX_DPDMA_EINTR_GLOBAL_ERR)
1415 * xilinx_dpdma_handle_err_irq - Handle DPDMA error interrupt
1416 * @xdev: DPDMA device
1417 * @isr: masked Interrupt Status Register
1418 * @eisr: Error Interrupt Status Register
1420 * Handle if any error occurs based on @isr and @eisr. This function disables
1421 * corresponding error interrupts, and those should be re-enabled once handling
1424 static void xilinx_dpdma_handle_err_irq(struct xilinx_dpdma_device *xdev,
1427 bool err = xilinx_dpdma_err(isr, eisr);
1430 dev_dbg_ratelimited(xdev->dev,
1431 "error irq: isr = 0x%08x, eisr = 0x%08x\n",
1434 /* Disable channel error interrupts until errors are handled. */
1435 dpdma_write(xdev->reg, XILINX_DPDMA_IDS,
1436 isr & ~XILINX_DPDMA_INTR_GLOBAL_ERR);
1437 dpdma_write(xdev->reg, XILINX_DPDMA_EIDS,
1438 eisr & ~XILINX_DPDMA_EINTR_GLOBAL_ERR);
1440 for (i = 0; i < ARRAY_SIZE(xdev->chan); i++)
1441 if (err || xilinx_dpdma_chan_err(xdev->chan[i], isr, eisr))
1442 tasklet_schedule(&xdev->chan[i]->err_task);
1446 * xilinx_dpdma_enable_irq - Enable interrupts
1447 * @xdev: DPDMA device
1449 * Enable interrupts.
1451 static void xilinx_dpdma_enable_irq(struct xilinx_dpdma_device *xdev)
1453 dpdma_write(xdev->reg, XILINX_DPDMA_IEN, XILINX_DPDMA_INTR_ALL);
1454 dpdma_write(xdev->reg, XILINX_DPDMA_EIEN, XILINX_DPDMA_EINTR_ALL);
1458 * xilinx_dpdma_disable_irq - Disable interrupts
1459 * @xdev: DPDMA device
1461 * Disable interrupts.
1463 static void xilinx_dpdma_disable_irq(struct xilinx_dpdma_device *xdev)
1465 dpdma_write(xdev->reg, XILINX_DPDMA_IDS, XILINX_DPDMA_INTR_ALL);
1466 dpdma_write(xdev->reg, XILINX_DPDMA_EIDS, XILINX_DPDMA_EINTR_ALL);
1470 * xilinx_dpdma_chan_err_task - Per channel tasklet for error handling
1471 * @t: pointer to the tasklet associated with this handler
1473 * Per channel error handling tasklet. This function waits for the outstanding
1474 * transaction to complete and triggers error handling. After error handling,
1475 * re-enable channel error interrupts, and restart the channel if needed.
1477 static void xilinx_dpdma_chan_err_task(struct tasklet_struct *t)
1479 struct xilinx_dpdma_chan *chan = from_tasklet(chan, t, err_task);
1480 struct xilinx_dpdma_device *xdev = chan->xdev;
1481 unsigned long flags;
1483 /* Proceed error handling even when polling fails. */
1484 xilinx_dpdma_chan_poll_no_ostand(chan);
1486 xilinx_dpdma_chan_handle_err(chan);
1488 dpdma_write(xdev->reg, XILINX_DPDMA_IEN,
1489 XILINX_DPDMA_INTR_CHAN_ERR_MASK << chan->id);
1490 dpdma_write(xdev->reg, XILINX_DPDMA_EIEN,
1491 XILINX_DPDMA_EINTR_CHAN_ERR_MASK << chan->id);
1493 spin_lock_irqsave(&chan->lock, flags);
1494 xilinx_dpdma_chan_queue_transfer(chan);
1495 spin_unlock_irqrestore(&chan->lock, flags);
1498 static irqreturn_t xilinx_dpdma_irq_handler(int irq, void *data)
1500 struct xilinx_dpdma_device *xdev = data;
1506 status = dpdma_read(xdev->reg, XILINX_DPDMA_ISR);
1507 error = dpdma_read(xdev->reg, XILINX_DPDMA_EISR);
1508 if (!status && !error)
1511 dpdma_write(xdev->reg, XILINX_DPDMA_ISR, status);
1512 dpdma_write(xdev->reg, XILINX_DPDMA_EISR, error);
1514 if (status & XILINX_DPDMA_INTR_VSYNC) {
1516 * There's a single VSYNC interrupt that needs to be processed
1517 * by each running channel to update the active descriptor.
1519 for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) {
1520 struct xilinx_dpdma_chan *chan = xdev->chan[i];
1523 xilinx_dpdma_chan_vsync_irq(chan);
1527 mask = FIELD_GET(XILINX_DPDMA_INTR_DESC_DONE_MASK, status);
1529 for_each_set_bit(i, &mask, ARRAY_SIZE(xdev->chan))
1530 xilinx_dpdma_chan_done_irq(xdev->chan[i]);
1533 mask = FIELD_GET(XILINX_DPDMA_INTR_NO_OSTAND_MASK, status);
1535 for_each_set_bit(i, &mask, ARRAY_SIZE(xdev->chan))
1536 xilinx_dpdma_chan_notify_no_ostand(xdev->chan[i]);
1539 mask = status & XILINX_DPDMA_INTR_ERR_ALL;
1541 xilinx_dpdma_handle_err_irq(xdev, mask, error);
1546 /* -----------------------------------------------------------------------------
1547 * Initialization & Cleanup
1550 static int xilinx_dpdma_chan_init(struct xilinx_dpdma_device *xdev,
1551 unsigned int chan_id)
1553 struct xilinx_dpdma_chan *chan;
1555 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
1560 chan->reg = xdev->reg + XILINX_DPDMA_CH_BASE
1561 + XILINX_DPDMA_CH_OFFSET * chan->id;
1562 chan->running = false;
1565 spin_lock_init(&chan->lock);
1566 init_waitqueue_head(&chan->wait_to_stop);
1568 tasklet_setup(&chan->err_task, xilinx_dpdma_chan_err_task);
1570 chan->vchan.desc_free = xilinx_dpdma_chan_free_tx_desc;
1571 vchan_init(&chan->vchan, &xdev->common);
1573 xdev->chan[chan->id] = chan;
1578 static void xilinx_dpdma_chan_remove(struct xilinx_dpdma_chan *chan)
1583 tasklet_kill(&chan->err_task);
1584 list_del(&chan->vchan.chan.device_node);
1587 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
1588 struct of_dma *ofdma)
1590 struct xilinx_dpdma_device *xdev = ofdma->of_dma_data;
1591 uint32_t chan_id = dma_spec->args[0];
1593 if (chan_id >= ARRAY_SIZE(xdev->chan))
1596 if (!xdev->chan[chan_id])
1599 return dma_get_slave_channel(&xdev->chan[chan_id]->vchan.chan);
1602 static void dpdma_hw_init(struct xilinx_dpdma_device *xdev)
1607 /* Disable all interrupts */
1608 xilinx_dpdma_disable_irq(xdev);
1610 /* Stop all channels */
1611 for (i = 0; i < ARRAY_SIZE(xdev->chan); i++) {
1612 reg = xdev->reg + XILINX_DPDMA_CH_BASE
1613 + XILINX_DPDMA_CH_OFFSET * i;
1614 dpdma_clr(reg, XILINX_DPDMA_CH_CNTL, XILINX_DPDMA_CH_CNTL_ENABLE);
1617 /* Clear the interrupt status registers */
1618 dpdma_write(xdev->reg, XILINX_DPDMA_ISR, XILINX_DPDMA_INTR_ALL);
1619 dpdma_write(xdev->reg, XILINX_DPDMA_EISR, XILINX_DPDMA_EINTR_ALL);
1622 static int xilinx_dpdma_probe(struct platform_device *pdev)
1624 struct xilinx_dpdma_device *xdev;
1625 struct dma_device *ddev;
1629 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
1633 xdev->dev = &pdev->dev;
1634 xdev->ext_addr = sizeof(dma_addr_t) > 4;
1636 INIT_LIST_HEAD(&xdev->common.channels);
1638 platform_set_drvdata(pdev, xdev);
1640 xdev->axi_clk = devm_clk_get(xdev->dev, "axi_clk");
1641 if (IS_ERR(xdev->axi_clk))
1642 return PTR_ERR(xdev->axi_clk);
1644 xdev->reg = devm_platform_ioremap_resource(pdev, 0);
1645 if (IS_ERR(xdev->reg))
1646 return PTR_ERR(xdev->reg);
1648 dpdma_hw_init(xdev);
1650 xdev->irq = platform_get_irq(pdev, 0);
1651 if (xdev->irq < 0) {
1652 dev_err(xdev->dev, "failed to get platform irq\n");
1656 ret = request_irq(xdev->irq, xilinx_dpdma_irq_handler, IRQF_SHARED,
1657 dev_name(xdev->dev), xdev);
1659 dev_err(xdev->dev, "failed to request IRQ\n");
1663 ddev = &xdev->common;
1664 ddev->dev = &pdev->dev;
1666 dma_cap_set(DMA_SLAVE, ddev->cap_mask);
1667 dma_cap_set(DMA_PRIVATE, ddev->cap_mask);
1668 dma_cap_set(DMA_INTERLEAVE, ddev->cap_mask);
1669 dma_cap_set(DMA_REPEAT, ddev->cap_mask);
1670 dma_cap_set(DMA_LOAD_EOT, ddev->cap_mask);
1671 ddev->copy_align = fls(XILINX_DPDMA_ALIGN_BYTES - 1);
1673 ddev->device_alloc_chan_resources = xilinx_dpdma_alloc_chan_resources;
1674 ddev->device_free_chan_resources = xilinx_dpdma_free_chan_resources;
1675 ddev->device_prep_interleaved_dma = xilinx_dpdma_prep_interleaved_dma;
1676 /* TODO: Can we achieve better granularity ? */
1677 ddev->device_tx_status = dma_cookie_status;
1678 ddev->device_issue_pending = xilinx_dpdma_issue_pending;
1679 ddev->device_config = xilinx_dpdma_config;
1680 ddev->device_pause = xilinx_dpdma_pause;
1681 ddev->device_resume = xilinx_dpdma_resume;
1682 ddev->device_terminate_all = xilinx_dpdma_terminate_all;
1683 ddev->device_synchronize = xilinx_dpdma_synchronize;
1684 ddev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED);
1685 ddev->directions = BIT(DMA_MEM_TO_DEV);
1686 ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1688 for (i = 0; i < ARRAY_SIZE(xdev->chan); ++i) {
1689 ret = xilinx_dpdma_chan_init(xdev, i);
1691 dev_err(xdev->dev, "failed to initialize channel %u\n",
1697 ret = clk_prepare_enable(xdev->axi_clk);
1699 dev_err(xdev->dev, "failed to enable the axi clock\n");
1703 ret = dma_async_device_register(ddev);
1705 dev_err(xdev->dev, "failed to register the dma device\n");
1706 goto error_dma_async;
1709 ret = of_dma_controller_register(xdev->dev->of_node,
1710 of_dma_xilinx_xlate, ddev);
1712 dev_err(xdev->dev, "failed to register DMA to DT DMA helper\n");
1716 xilinx_dpdma_enable_irq(xdev);
1718 xilinx_dpdma_debugfs_init(xdev);
1720 dev_info(&pdev->dev, "Xilinx DPDMA engine is probed\n");
1725 dma_async_device_unregister(ddev);
1727 clk_disable_unprepare(xdev->axi_clk);
1729 for (i = 0; i < ARRAY_SIZE(xdev->chan); i++)
1730 xilinx_dpdma_chan_remove(xdev->chan[i]);
1732 free_irq(xdev->irq, xdev);
1737 static int xilinx_dpdma_remove(struct platform_device *pdev)
1739 struct xilinx_dpdma_device *xdev = platform_get_drvdata(pdev);
1742 /* Start by disabling the IRQ to avoid races during cleanup. */
1743 free_irq(xdev->irq, xdev);
1745 xilinx_dpdma_disable_irq(xdev);
1746 of_dma_controller_free(pdev->dev.of_node);
1747 dma_async_device_unregister(&xdev->common);
1748 clk_disable_unprepare(xdev->axi_clk);
1750 for (i = 0; i < ARRAY_SIZE(xdev->chan); i++)
1751 xilinx_dpdma_chan_remove(xdev->chan[i]);
1756 static const struct of_device_id xilinx_dpdma_of_match[] = {
1757 { .compatible = "xlnx,zynqmp-dpdma",},
1758 { /* end of table */ },
1760 MODULE_DEVICE_TABLE(of, xilinx_dpdma_of_match);
1762 static struct platform_driver xilinx_dpdma_driver = {
1763 .probe = xilinx_dpdma_probe,
1764 .remove = xilinx_dpdma_remove,
1766 .name = "xilinx-zynqmp-dpdma",
1767 .of_match_table = xilinx_dpdma_of_match,
1771 module_platform_driver(xilinx_dpdma_driver);
1773 MODULE_AUTHOR("Xilinx, Inc.");
1774 MODULE_DESCRIPTION("Xilinx ZynqMP DPDMA driver");
1775 MODULE_LICENSE("GPL v2");