1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * DMA driver for Xilinx Video DMA Engine
5 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
7 * Based on the Freescale DMA driver.
10 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
11 * core that provides high-bandwidth direct memory access between memory
12 * and AXI4-Stream type video target peripherals. The core provides efficient
13 * two dimensional DMA operations with independent asynchronous read (S2MM)
14 * and write (MM2S) channel operation. It can be configured to have either
15 * one channel or two channels. If configured as two channels, one is to
16 * transmit to the video device (MM2S) and another is to receive from the
17 * video device (S2MM). Initialization, status, interrupt and management
18 * registers are accessed through an AXI4-Lite slave interface.
20 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
21 * provides high-bandwidth one dimensional direct memory access between memory
22 * and AXI4-Stream target peripherals. It supports one receive and one
23 * transmit channel, both of them optional at synthesis time.
25 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
26 * Access (DMA) between a memory-mapped source address and a memory-mapped
27 * destination address.
29 * The AXI Multichannel Direct Memory Access (AXI MCDMA) core is a soft
30 * Xilinx IP that provides high-bandwidth direct memory access between
31 * memory and AXI4-Stream target peripherals. It provides scatter gather
32 * (SG) interface with multiple channels independent configuration support.
36 #include <linux/bitops.h>
37 #include <linux/dmapool.h>
38 #include <linux/dma/xilinx_dma.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
42 #include <linux/iopoll.h>
43 #include <linux/module.h>
44 #include <linux/of_address.h>
45 #include <linux/of_dma.h>
46 #include <linux/of_platform.h>
47 #include <linux/of_irq.h>
48 #include <linux/slab.h>
49 #include <linux/clk.h>
50 #include <linux/io-64-nonatomic-lo-hi.h>
52 #include "../dmaengine.h"
54 /* Register/Descriptor Offsets */
55 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
56 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
57 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
58 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
60 /* Control Registers */
61 #define XILINX_DMA_REG_DMACR 0x0000
62 #define XILINX_DMA_DMACR_DELAY_MAX 0xff
63 #define XILINX_DMA_DMACR_DELAY_SHIFT 24
64 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
65 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
66 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
67 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
68 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
69 #define XILINX_DMA_DMACR_MASTER_SHIFT 8
70 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
71 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
72 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
73 #define XILINX_DMA_DMACR_RESET BIT(2)
74 #define XILINX_DMA_DMACR_CIRC_EN BIT(1)
75 #define XILINX_DMA_DMACR_RUNSTOP BIT(0)
76 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
77 #define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24)
78 #define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16)
79 #define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8)
81 #define XILINX_DMA_REG_DMASR 0x0004
82 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
83 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
84 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
85 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
86 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
87 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
88 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
89 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
90 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
91 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
92 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
93 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
94 #define XILINX_DMA_DMASR_SG_MASK BIT(3)
95 #define XILINX_DMA_DMASR_IDLE BIT(1)
96 #define XILINX_DMA_DMASR_HALTED BIT(0)
97 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
98 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
100 #define XILINX_DMA_REG_CURDESC 0x0008
101 #define XILINX_DMA_REG_TAILDESC 0x0010
102 #define XILINX_DMA_REG_REG_INDEX 0x0014
103 #define XILINX_DMA_REG_FRMSTORE 0x0018
104 #define XILINX_DMA_REG_THRESHOLD 0x001c
105 #define XILINX_DMA_REG_FRMPTR_STS 0x0024
106 #define XILINX_DMA_REG_PARK_PTR 0x0028
107 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
108 #define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
109 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
110 #define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
111 #define XILINX_DMA_REG_VDMA_VERSION 0x002c
113 /* Register Direct Mode Registers */
114 #define XILINX_DMA_REG_VSIZE 0x0000
115 #define XILINX_DMA_REG_HSIZE 0x0004
117 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
118 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
119 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
121 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
122 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
124 #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
125 #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
127 /* HW specific definitions */
128 #define XILINX_MCDMA_MAX_CHANS_PER_DEVICE 0x20
129 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
130 #define XILINX_CDMA_MAX_CHANS_PER_DEVICE 0x1
132 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
133 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
134 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
135 XILINX_DMA_DMASR_ERR_IRQ)
137 #define XILINX_DMA_DMASR_ALL_ERR_MASK \
138 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
139 XILINX_DMA_DMASR_SOF_LATE_ERR | \
140 XILINX_DMA_DMASR_SG_DEC_ERR | \
141 XILINX_DMA_DMASR_SG_SLV_ERR | \
142 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
143 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
144 XILINX_DMA_DMASR_DMA_DEC_ERR | \
145 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
146 XILINX_DMA_DMASR_DMA_INT_ERR)
149 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
150 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
151 * is enabled in the h/w system.
153 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
154 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
155 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
156 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
157 XILINX_DMA_DMASR_DMA_INT_ERR)
159 /* Axi VDMA Flush on Fsync bits */
160 #define XILINX_DMA_FLUSH_S2MM 3
161 #define XILINX_DMA_FLUSH_MM2S 2
162 #define XILINX_DMA_FLUSH_BOTH 1
164 /* Delay loop counter to prevent hardware failure */
165 #define XILINX_DMA_LOOP_COUNT 1000000
167 /* AXI DMA Specific Registers/Offsets */
168 #define XILINX_DMA_REG_SRCDSTADDR 0x18
169 #define XILINX_DMA_REG_BTT 0x28
171 /* AXI DMA Specific Masks/Bit fields */
172 #define XILINX_DMA_MAX_TRANS_LEN_MIN 8
173 #define XILINX_DMA_MAX_TRANS_LEN_MAX 23
174 #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
175 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
176 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
177 #define XILINX_DMA_CR_COALESCE_SHIFT 16
178 #define XILINX_DMA_BD_SOP BIT(27)
179 #define XILINX_DMA_BD_EOP BIT(26)
180 #define XILINX_DMA_COALESCE_MAX 255
181 #define XILINX_DMA_NUM_DESCS 255
182 #define XILINX_DMA_NUM_APP_WORDS 5
184 /* AXI CDMA Specific Registers/Offsets */
185 #define XILINX_CDMA_REG_SRCADDR 0x18
186 #define XILINX_CDMA_REG_DSTADDR 0x20
188 /* AXI CDMA Specific Masks */
189 #define XILINX_CDMA_CR_SGMODE BIT(3)
191 #define xilinx_prep_dma_addr_t(addr) \
192 ((dma_addr_t)((u64)addr##_##msb << 32 | (addr)))
194 /* AXI MCDMA Specific Registers/Offsets */
195 #define XILINX_MCDMA_MM2S_CTRL_OFFSET 0x0000
196 #define XILINX_MCDMA_S2MM_CTRL_OFFSET 0x0500
197 #define XILINX_MCDMA_CHEN_OFFSET 0x0008
198 #define XILINX_MCDMA_CH_ERR_OFFSET 0x0010
199 #define XILINX_MCDMA_RXINT_SER_OFFSET 0x0020
200 #define XILINX_MCDMA_TXINT_SER_OFFSET 0x0028
201 #define XILINX_MCDMA_CHAN_CR_OFFSET(x) (0x40 + (x) * 0x40)
202 #define XILINX_MCDMA_CHAN_SR_OFFSET(x) (0x44 + (x) * 0x40)
203 #define XILINX_MCDMA_CHAN_CDESC_OFFSET(x) (0x48 + (x) * 0x40)
204 #define XILINX_MCDMA_CHAN_TDESC_OFFSET(x) (0x50 + (x) * 0x40)
206 /* AXI MCDMA Specific Masks/Shifts */
207 #define XILINX_MCDMA_COALESCE_SHIFT 16
208 #define XILINX_MCDMA_COALESCE_MAX 24
209 #define XILINX_MCDMA_IRQ_ALL_MASK GENMASK(7, 5)
210 #define XILINX_MCDMA_COALESCE_MASK GENMASK(23, 16)
211 #define XILINX_MCDMA_CR_RUNSTOP_MASK BIT(0)
212 #define XILINX_MCDMA_IRQ_IOC_MASK BIT(5)
213 #define XILINX_MCDMA_IRQ_DELAY_MASK BIT(6)
214 #define XILINX_MCDMA_IRQ_ERR_MASK BIT(7)
215 #define XILINX_MCDMA_BD_EOP BIT(30)
216 #define XILINX_MCDMA_BD_SOP BIT(31)
219 * struct xilinx_vdma_desc_hw - Hardware Descriptor
220 * @next_desc: Next Descriptor Pointer @0x00
221 * @pad1: Reserved @0x04
222 * @buf_addr: Buffer address @0x08
223 * @buf_addr_msb: MSB of Buffer address @0x0C
224 * @vsize: Vertical Size @0x10
225 * @hsize: Horizontal Size @0x14
226 * @stride: Number of bytes between the first
227 * pixels of each horizontal line @0x18
229 struct xilinx_vdma_desc_hw {
240 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
241 * @next_desc: Next Descriptor Pointer @0x00
242 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
243 * @buf_addr: Buffer address @0x08
244 * @buf_addr_msb: MSB of Buffer address @0x0C
245 * @reserved1: Reserved @0x10
246 * @reserved2: Reserved @0x14
247 * @control: Control field @0x18
248 * @status: Status field @0x1C
249 * @app: APP Fields @0x20 - 0x30
251 struct xilinx_axidma_desc_hw {
260 u32 app[XILINX_DMA_NUM_APP_WORDS];
264 * struct xilinx_aximcdma_desc_hw - Hardware Descriptor for AXI MCDMA
265 * @next_desc: Next Descriptor Pointer @0x00
266 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
267 * @buf_addr: Buffer address @0x08
268 * @buf_addr_msb: MSB of Buffer address @0x0C
269 * @rsvd: Reserved field @0x10
270 * @control: Control Information field @0x14
271 * @status: Status field @0x18
272 * @sideband_status: Status of sideband signals @0x1C
273 * @app: APP Fields @0x20 - 0x30
275 struct xilinx_aximcdma_desc_hw {
284 u32 app[XILINX_DMA_NUM_APP_WORDS];
288 * struct xilinx_cdma_desc_hw - Hardware Descriptor
289 * @next_desc: Next Descriptor Pointer @0x00
290 * @next_desc_msb: Next Descriptor Pointer MSB @0x04
291 * @src_addr: Source address @0x08
292 * @src_addr_msb: Source address MSB @0x0C
293 * @dest_addr: Destination address @0x10
294 * @dest_addr_msb: Destination address MSB @0x14
295 * @control: Control field @0x18
296 * @status: Status field @0x1C
298 struct xilinx_cdma_desc_hw {
310 * struct xilinx_vdma_tx_segment - Descriptor segment
311 * @hw: Hardware descriptor
312 * @node: Node in the descriptor segments list
313 * @phys: Physical address of segment
315 struct xilinx_vdma_tx_segment {
316 struct xilinx_vdma_desc_hw hw;
317 struct list_head node;
322 * struct xilinx_axidma_tx_segment - Descriptor segment
323 * @hw: Hardware descriptor
324 * @node: Node in the descriptor segments list
325 * @phys: Physical address of segment
327 struct xilinx_axidma_tx_segment {
328 struct xilinx_axidma_desc_hw hw;
329 struct list_head node;
334 * struct xilinx_aximcdma_tx_segment - Descriptor segment
335 * @hw: Hardware descriptor
336 * @node: Node in the descriptor segments list
337 * @phys: Physical address of segment
339 struct xilinx_aximcdma_tx_segment {
340 struct xilinx_aximcdma_desc_hw hw;
341 struct list_head node;
346 * struct xilinx_cdma_tx_segment - Descriptor segment
347 * @hw: Hardware descriptor
348 * @node: Node in the descriptor segments list
349 * @phys: Physical address of segment
351 struct xilinx_cdma_tx_segment {
352 struct xilinx_cdma_desc_hw hw;
353 struct list_head node;
358 * struct xilinx_dma_tx_descriptor - Per Transaction structure
359 * @async_tx: Async transaction descriptor
360 * @segments: TX segments list
361 * @node: Node in the channel descriptors list
362 * @cyclic: Check for cyclic transfers.
363 * @err: Whether the descriptor has an error.
364 * @residue: Residue of the completed descriptor
366 struct xilinx_dma_tx_descriptor {
367 struct dma_async_tx_descriptor async_tx;
368 struct list_head segments;
369 struct list_head node;
376 * struct xilinx_dma_chan - Driver specific DMA channel structure
377 * @xdev: Driver specific device structure
378 * @ctrl_offset: Control registers offset
379 * @desc_offset: TX descriptor registers offset
380 * @lock: Descriptor operation lock
381 * @pending_list: Descriptors waiting
382 * @active_list: Descriptors ready to submit
383 * @done_list: Complete descriptors
384 * @free_seg_list: Free descriptors
385 * @common: DMA common channel
386 * @desc_pool: Descriptors pool
387 * @dev: The dma device
390 * @direction: Transfer direction
391 * @num_frms: Number of frames
392 * @has_sg: Support scatter transfers
393 * @cyclic: Check for cyclic transfers.
394 * @genlock: Support genlock mode
395 * @err: Channel has errors
396 * @idle: Check for channel idle
397 * @terminating: Check for channel being synchronized by user
398 * @tasklet: Cleanup work after irq
399 * @config: Device configuration info
400 * @flush_on_fsync: Flush on Frame sync
401 * @desc_pendingcount: Descriptor pending count
402 * @ext_addr: Indicates 64 bit addressing is supported by dma channel
403 * @desc_submitcount: Descriptor h/w submitted count
404 * @seg_v: Statically allocated segments base
405 * @seg_mv: Statically allocated segments base for MCDMA
406 * @seg_p: Physical allocated segments base
407 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
408 * @cyclic_seg_p: Physical allocated segments base for cyclic dma
409 * @start_transfer: Differentiate b/w DMA IP's transfer
410 * @stop_transfer: Differentiate b/w DMA IP's quiesce
411 * @tdest: TDEST value for mcdma
412 * @has_vflip: S2MM vertical flip
414 struct xilinx_dma_chan {
415 struct xilinx_dma_device *xdev;
419 struct list_head pending_list;
420 struct list_head active_list;
421 struct list_head done_list;
422 struct list_head free_seg_list;
423 struct dma_chan common;
424 struct dma_pool *desc_pool;
428 enum dma_transfer_direction direction;
436 struct tasklet_struct tasklet;
437 struct xilinx_vdma_config config;
439 u32 desc_pendingcount;
441 u32 desc_submitcount;
442 struct xilinx_axidma_tx_segment *seg_v;
443 struct xilinx_aximcdma_tx_segment *seg_mv;
445 struct xilinx_axidma_tx_segment *cyclic_seg_v;
446 dma_addr_t cyclic_seg_p;
447 void (*start_transfer)(struct xilinx_dma_chan *chan);
448 int (*stop_transfer)(struct xilinx_dma_chan *chan);
454 * enum xdma_ip_type - DMA IP type.
456 * @XDMA_TYPE_AXIDMA: Axi dma ip.
457 * @XDMA_TYPE_CDMA: Axi cdma ip.
458 * @XDMA_TYPE_VDMA: Axi vdma ip.
459 * @XDMA_TYPE_AXIMCDMA: Axi MCDMA ip.
463 XDMA_TYPE_AXIDMA = 0,
469 struct xilinx_dma_config {
470 enum xdma_ip_type dmatype;
471 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
472 struct clk **tx_clk, struct clk **txs_clk,
473 struct clk **rx_clk, struct clk **rxs_clk);
474 irqreturn_t (*irq_handler)(int irq, void *data);
475 const int max_channels;
479 * struct xilinx_dma_device - DMA device structure
480 * @regs: I/O mapped base address
481 * @dev: Device Structure
482 * @common: DMA device structure
483 * @chan: Driver specific DMA channel
484 * @flush_on_fsync: Flush on frame sync
485 * @ext_addr: Indicates 64 bit addressing is supported by dma device
486 * @pdev: Platform device structure pointer
487 * @dma_config: DMA config structure
488 * @axi_clk: DMA Axi4-lite interace clock
489 * @tx_clk: DMA mm2s clock
490 * @txs_clk: DMA mm2s stream clock
491 * @rx_clk: DMA s2mm clock
492 * @rxs_clk: DMA s2mm stream clock
493 * @s2mm_chan_id: DMA s2mm channel identifier
494 * @mm2s_chan_id: DMA mm2s channel identifier
495 * @max_buffer_len: Max buffer length
497 struct xilinx_dma_device {
500 struct dma_device common;
501 struct xilinx_dma_chan *chan[XILINX_MCDMA_MAX_CHANS_PER_DEVICE];
504 struct platform_device *pdev;
505 const struct xilinx_dma_config *dma_config;
517 #define to_xilinx_chan(chan) \
518 container_of(chan, struct xilinx_dma_chan, common)
519 #define to_dma_tx_descriptor(tx) \
520 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
521 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
522 readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \
523 val, cond, delay_us, timeout_us)
526 static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
528 return ioread32(chan->xdev->regs + reg);
531 static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
533 iowrite32(value, chan->xdev->regs + reg);
536 static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
539 dma_write(chan, chan->desc_offset + reg, value);
542 static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
544 return dma_read(chan, chan->ctrl_offset + reg);
547 static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
550 dma_write(chan, chan->ctrl_offset + reg, value);
553 static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
556 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
559 static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
562 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
566 * vdma_desc_write_64 - 64-bit descriptor write
567 * @chan: Driver specific VDMA channel
568 * @reg: Register to write
569 * @value_lsb: lower address of the descriptor.
570 * @value_msb: upper address of the descriptor.
572 * Since vdma driver is trying to write to a register offset which is not a
573 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
574 * instead of a single 64 bit register write.
576 static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
577 u32 value_lsb, u32 value_msb)
579 /* Write the lsb 32 bits*/
580 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
582 /* Write the msb 32 bits */
583 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
586 static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
588 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
591 static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
595 dma_writeq(chan, reg, addr);
597 dma_ctrl_write(chan, reg, addr);
600 static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
601 struct xilinx_axidma_desc_hw *hw,
602 dma_addr_t buf_addr, size_t sg_used,
605 if (chan->ext_addr) {
606 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
607 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
610 hw->buf_addr = buf_addr + sg_used + period_len;
614 static inline void xilinx_aximcdma_buf(struct xilinx_dma_chan *chan,
615 struct xilinx_aximcdma_desc_hw *hw,
616 dma_addr_t buf_addr, size_t sg_used)
618 if (chan->ext_addr) {
619 hw->buf_addr = lower_32_bits(buf_addr + sg_used);
620 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used);
622 hw->buf_addr = buf_addr + sg_used;
626 /* -----------------------------------------------------------------------------
627 * Descriptors and segments alloc and free
631 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
632 * @chan: Driver specific DMA channel
634 * Return: The allocated segment on success and NULL on failure.
636 static struct xilinx_vdma_tx_segment *
637 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
639 struct xilinx_vdma_tx_segment *segment;
642 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
646 segment->phys = phys;
652 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
653 * @chan: Driver specific DMA channel
655 * Return: The allocated segment on success and NULL on failure.
657 static struct xilinx_cdma_tx_segment *
658 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
660 struct xilinx_cdma_tx_segment *segment;
663 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
667 segment->phys = phys;
673 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
674 * @chan: Driver specific DMA channel
676 * Return: The allocated segment on success and NULL on failure.
678 static struct xilinx_axidma_tx_segment *
679 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
681 struct xilinx_axidma_tx_segment *segment = NULL;
684 spin_lock_irqsave(&chan->lock, flags);
685 if (!list_empty(&chan->free_seg_list)) {
686 segment = list_first_entry(&chan->free_seg_list,
687 struct xilinx_axidma_tx_segment,
689 list_del(&segment->node);
691 spin_unlock_irqrestore(&chan->lock, flags);
694 dev_dbg(chan->dev, "Could not find free tx segment\n");
700 * xilinx_aximcdma_alloc_tx_segment - Allocate transaction segment
701 * @chan: Driver specific DMA channel
703 * Return: The allocated segment on success and NULL on failure.
705 static struct xilinx_aximcdma_tx_segment *
706 xilinx_aximcdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
708 struct xilinx_aximcdma_tx_segment *segment = NULL;
711 spin_lock_irqsave(&chan->lock, flags);
712 if (!list_empty(&chan->free_seg_list)) {
713 segment = list_first_entry(&chan->free_seg_list,
714 struct xilinx_aximcdma_tx_segment,
716 list_del(&segment->node);
718 spin_unlock_irqrestore(&chan->lock, flags);
723 static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
725 u32 next_desc = hw->next_desc;
726 u32 next_desc_msb = hw->next_desc_msb;
728 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
730 hw->next_desc = next_desc;
731 hw->next_desc_msb = next_desc_msb;
734 static void xilinx_mcdma_clean_hw_desc(struct xilinx_aximcdma_desc_hw *hw)
736 u32 next_desc = hw->next_desc;
737 u32 next_desc_msb = hw->next_desc_msb;
739 memset(hw, 0, sizeof(struct xilinx_aximcdma_desc_hw));
741 hw->next_desc = next_desc;
742 hw->next_desc_msb = next_desc_msb;
746 * xilinx_dma_free_tx_segment - Free transaction segment
747 * @chan: Driver specific DMA channel
748 * @segment: DMA transaction segment
750 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
751 struct xilinx_axidma_tx_segment *segment)
753 xilinx_dma_clean_hw_desc(&segment->hw);
755 list_add_tail(&segment->node, &chan->free_seg_list);
759 * xilinx_mcdma_free_tx_segment - Free transaction segment
760 * @chan: Driver specific DMA channel
761 * @segment: DMA transaction segment
763 static void xilinx_mcdma_free_tx_segment(struct xilinx_dma_chan *chan,
764 struct xilinx_aximcdma_tx_segment *
767 xilinx_mcdma_clean_hw_desc(&segment->hw);
769 list_add_tail(&segment->node, &chan->free_seg_list);
773 * xilinx_cdma_free_tx_segment - Free transaction segment
774 * @chan: Driver specific DMA channel
775 * @segment: DMA transaction segment
777 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
778 struct xilinx_cdma_tx_segment *segment)
780 dma_pool_free(chan->desc_pool, segment, segment->phys);
784 * xilinx_vdma_free_tx_segment - Free transaction segment
785 * @chan: Driver specific DMA channel
786 * @segment: DMA transaction segment
788 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
789 struct xilinx_vdma_tx_segment *segment)
791 dma_pool_free(chan->desc_pool, segment, segment->phys);
795 * xilinx_dma_alloc_tx_descriptor - Allocate transaction descriptor
796 * @chan: Driver specific DMA channel
798 * Return: The allocated descriptor on success and NULL on failure.
800 static struct xilinx_dma_tx_descriptor *
801 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
803 struct xilinx_dma_tx_descriptor *desc;
805 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
809 INIT_LIST_HEAD(&desc->segments);
815 * xilinx_dma_free_tx_descriptor - Free transaction descriptor
816 * @chan: Driver specific DMA channel
817 * @desc: DMA transaction descriptor
820 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
821 struct xilinx_dma_tx_descriptor *desc)
823 struct xilinx_vdma_tx_segment *segment, *next;
824 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
825 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
826 struct xilinx_aximcdma_tx_segment *aximcdma_segment, *aximcdma_next;
831 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
832 list_for_each_entry_safe(segment, next, &desc->segments, node) {
833 list_del(&segment->node);
834 xilinx_vdma_free_tx_segment(chan, segment);
836 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
837 list_for_each_entry_safe(cdma_segment, cdma_next,
838 &desc->segments, node) {
839 list_del(&cdma_segment->node);
840 xilinx_cdma_free_tx_segment(chan, cdma_segment);
842 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
843 list_for_each_entry_safe(axidma_segment, axidma_next,
844 &desc->segments, node) {
845 list_del(&axidma_segment->node);
846 xilinx_dma_free_tx_segment(chan, axidma_segment);
849 list_for_each_entry_safe(aximcdma_segment, aximcdma_next,
850 &desc->segments, node) {
851 list_del(&aximcdma_segment->node);
852 xilinx_mcdma_free_tx_segment(chan, aximcdma_segment);
859 /* Required functions */
862 * xilinx_dma_free_desc_list - Free descriptors list
863 * @chan: Driver specific DMA channel
864 * @list: List to parse and delete the descriptor
866 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
867 struct list_head *list)
869 struct xilinx_dma_tx_descriptor *desc, *next;
871 list_for_each_entry_safe(desc, next, list, node) {
872 list_del(&desc->node);
873 xilinx_dma_free_tx_descriptor(chan, desc);
878 * xilinx_dma_free_descriptors - Free channel descriptors
879 * @chan: Driver specific DMA channel
881 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
885 spin_lock_irqsave(&chan->lock, flags);
887 xilinx_dma_free_desc_list(chan, &chan->pending_list);
888 xilinx_dma_free_desc_list(chan, &chan->done_list);
889 xilinx_dma_free_desc_list(chan, &chan->active_list);
891 spin_unlock_irqrestore(&chan->lock, flags);
895 * xilinx_dma_free_chan_resources - Free channel resources
896 * @dchan: DMA channel
898 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
900 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
903 dev_dbg(chan->dev, "Free all channel resources.\n");
905 xilinx_dma_free_descriptors(chan);
907 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
908 spin_lock_irqsave(&chan->lock, flags);
909 INIT_LIST_HEAD(&chan->free_seg_list);
910 spin_unlock_irqrestore(&chan->lock, flags);
912 /* Free memory that is allocated for BD */
913 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
914 XILINX_DMA_NUM_DESCS, chan->seg_v,
917 /* Free Memory that is allocated for cyclic DMA Mode */
918 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
919 chan->cyclic_seg_v, chan->cyclic_seg_p);
922 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
923 spin_lock_irqsave(&chan->lock, flags);
924 INIT_LIST_HEAD(&chan->free_seg_list);
925 spin_unlock_irqrestore(&chan->lock, flags);
927 /* Free memory that is allocated for BD */
928 dma_free_coherent(chan->dev, sizeof(*chan->seg_mv) *
929 XILINX_DMA_NUM_DESCS, chan->seg_mv,
933 if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA &&
934 chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA) {
935 dma_pool_destroy(chan->desc_pool);
936 chan->desc_pool = NULL;
942 * xilinx_dma_get_residue - Compute residue for a given descriptor
943 * @chan: Driver specific dma channel
944 * @desc: dma transaction descriptor
946 * Return: The number of residue bytes for the descriptor.
948 static u32 xilinx_dma_get_residue(struct xilinx_dma_chan *chan,
949 struct xilinx_dma_tx_descriptor *desc)
951 struct xilinx_cdma_tx_segment *cdma_seg;
952 struct xilinx_axidma_tx_segment *axidma_seg;
953 struct xilinx_aximcdma_tx_segment *aximcdma_seg;
954 struct xilinx_cdma_desc_hw *cdma_hw;
955 struct xilinx_axidma_desc_hw *axidma_hw;
956 struct xilinx_aximcdma_desc_hw *aximcdma_hw;
957 struct list_head *entry;
960 list_for_each(entry, &desc->segments) {
961 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
962 cdma_seg = list_entry(entry,
963 struct xilinx_cdma_tx_segment,
965 cdma_hw = &cdma_seg->hw;
966 residue += (cdma_hw->control - cdma_hw->status) &
967 chan->xdev->max_buffer_len;
968 } else if (chan->xdev->dma_config->dmatype ==
970 axidma_seg = list_entry(entry,
971 struct xilinx_axidma_tx_segment,
973 axidma_hw = &axidma_seg->hw;
974 residue += (axidma_hw->control - axidma_hw->status) &
975 chan->xdev->max_buffer_len;
979 struct xilinx_aximcdma_tx_segment,
981 aximcdma_hw = &aximcdma_seg->hw;
983 (aximcdma_hw->control - aximcdma_hw->status) &
984 chan->xdev->max_buffer_len;
992 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
993 * @chan: Driver specific dma channel
994 * @desc: dma transaction descriptor
995 * @flags: flags for spin lock
997 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
998 struct xilinx_dma_tx_descriptor *desc,
999 unsigned long *flags)
1001 struct dmaengine_desc_callback cb;
1003 dmaengine_desc_get_callback(&desc->async_tx, &cb);
1004 if (dmaengine_desc_callback_valid(&cb)) {
1005 spin_unlock_irqrestore(&chan->lock, *flags);
1006 dmaengine_desc_callback_invoke(&cb, NULL);
1007 spin_lock_irqsave(&chan->lock, *flags);
1012 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
1013 * @chan: Driver specific DMA channel
1015 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
1017 struct xilinx_dma_tx_descriptor *desc, *next;
1018 unsigned long flags;
1020 spin_lock_irqsave(&chan->lock, flags);
1022 list_for_each_entry_safe(desc, next, &chan->done_list, node) {
1023 struct dmaengine_result result;
1026 xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
1030 /* Remove from the list of running transactions */
1031 list_del(&desc->node);
1033 if (unlikely(desc->err)) {
1034 if (chan->direction == DMA_DEV_TO_MEM)
1035 result.result = DMA_TRANS_READ_FAILED;
1037 result.result = DMA_TRANS_WRITE_FAILED;
1039 result.result = DMA_TRANS_NOERROR;
1042 result.residue = desc->residue;
1044 /* Run the link descriptor callback function */
1045 spin_unlock_irqrestore(&chan->lock, flags);
1046 dmaengine_desc_get_callback_invoke(&desc->async_tx, &result);
1047 spin_lock_irqsave(&chan->lock, flags);
1049 /* Run any dependencies, then free the descriptor */
1050 dma_run_dependencies(&desc->async_tx);
1051 xilinx_dma_free_tx_descriptor(chan, desc);
1054 * While we ran a callback the user called a terminate function,
1055 * which takes care of cleaning up any remaining descriptors
1057 if (chan->terminating)
1061 spin_unlock_irqrestore(&chan->lock, flags);
1065 * xilinx_dma_do_tasklet - Schedule completion tasklet
1066 * @t: Pointer to the Xilinx DMA channel structure
1068 static void xilinx_dma_do_tasklet(struct tasklet_struct *t)
1070 struct xilinx_dma_chan *chan = from_tasklet(chan, t, tasklet);
1072 xilinx_dma_chan_desc_cleanup(chan);
1076 * xilinx_dma_alloc_chan_resources - Allocate channel resources
1077 * @dchan: DMA channel
1079 * Return: '0' on success and failure value on error
1081 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
1083 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1086 /* Has this channel already been allocated? */
1087 if (chan->desc_pool)
1091 * We need the descriptor to be aligned to 64bytes
1092 * for meeting Xilinx VDMA specification requirement.
1094 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1095 /* Allocate the buffer descriptors. */
1096 chan->seg_v = dma_alloc_coherent(chan->dev,
1097 sizeof(*chan->seg_v) * XILINX_DMA_NUM_DESCS,
1098 &chan->seg_p, GFP_KERNEL);
1101 "unable to allocate channel %d descriptors\n",
1106 * For cyclic DMA mode we need to program the tail Descriptor
1107 * register with a value which is not a part of the BD chain
1108 * so allocating a desc segment during channel allocation for
1109 * programming tail descriptor.
1111 chan->cyclic_seg_v = dma_alloc_coherent(chan->dev,
1112 sizeof(*chan->cyclic_seg_v),
1113 &chan->cyclic_seg_p,
1115 if (!chan->cyclic_seg_v) {
1117 "unable to allocate desc segment for cyclic DMA\n");
1118 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
1119 XILINX_DMA_NUM_DESCS, chan->seg_v,
1123 chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
1125 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
1126 chan->seg_v[i].hw.next_desc =
1127 lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
1128 ((i + 1) % XILINX_DMA_NUM_DESCS));
1129 chan->seg_v[i].hw.next_desc_msb =
1130 upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
1131 ((i + 1) % XILINX_DMA_NUM_DESCS));
1132 chan->seg_v[i].phys = chan->seg_p +
1133 sizeof(*chan->seg_v) * i;
1134 list_add_tail(&chan->seg_v[i].node,
1135 &chan->free_seg_list);
1137 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
1138 /* Allocate the buffer descriptors. */
1139 chan->seg_mv = dma_alloc_coherent(chan->dev,
1140 sizeof(*chan->seg_mv) *
1141 XILINX_DMA_NUM_DESCS,
1142 &chan->seg_p, GFP_KERNEL);
1143 if (!chan->seg_mv) {
1145 "unable to allocate channel %d descriptors\n",
1149 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
1150 chan->seg_mv[i].hw.next_desc =
1151 lower_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
1152 ((i + 1) % XILINX_DMA_NUM_DESCS));
1153 chan->seg_mv[i].hw.next_desc_msb =
1154 upper_32_bits(chan->seg_p + sizeof(*chan->seg_mv) *
1155 ((i + 1) % XILINX_DMA_NUM_DESCS));
1156 chan->seg_mv[i].phys = chan->seg_p +
1157 sizeof(*chan->seg_mv) * i;
1158 list_add_tail(&chan->seg_mv[i].node,
1159 &chan->free_seg_list);
1161 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1162 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
1164 sizeof(struct xilinx_cdma_tx_segment),
1165 __alignof__(struct xilinx_cdma_tx_segment),
1168 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
1170 sizeof(struct xilinx_vdma_tx_segment),
1171 __alignof__(struct xilinx_vdma_tx_segment),
1175 if (!chan->desc_pool &&
1176 ((chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) &&
1177 chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIMCDMA)) {
1179 "unable to allocate channel %d descriptor pool\n",
1184 dma_cookie_init(dchan);
1186 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1187 /* For AXI DMA resetting once channel will reset the
1188 * other channel as well so enable the interrupts here.
1190 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1191 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1194 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
1195 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1196 XILINX_CDMA_CR_SGMODE);
1202 * xilinx_dma_calc_copysize - Calculate the amount of data to copy
1203 * @chan: Driver specific DMA channel
1204 * @size: Total data that needs to be copied
1205 * @done: Amount of data that has been already copied
1207 * Return: Amount of data that has to be copied
1209 static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
1214 copy = min_t(size_t, size - done,
1215 chan->xdev->max_buffer_len);
1217 if ((copy + done < size) &&
1218 chan->xdev->common.copy_align) {
1220 * If this is not the last descriptor, make sure
1221 * the next one will be properly aligned
1223 copy = rounddown(copy,
1224 (1 << chan->xdev->common.copy_align));
1230 * xilinx_dma_tx_status - Get DMA transaction status
1231 * @dchan: DMA channel
1232 * @cookie: Transaction identifier
1233 * @txstate: Transaction state
1235 * Return: DMA transaction status
1237 static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
1238 dma_cookie_t cookie,
1239 struct dma_tx_state *txstate)
1241 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1242 struct xilinx_dma_tx_descriptor *desc;
1243 enum dma_status ret;
1244 unsigned long flags;
1247 ret = dma_cookie_status(dchan, cookie, txstate);
1248 if (ret == DMA_COMPLETE || !txstate)
1251 spin_lock_irqsave(&chan->lock, flags);
1252 if (!list_empty(&chan->active_list)) {
1253 desc = list_last_entry(&chan->active_list,
1254 struct xilinx_dma_tx_descriptor, node);
1256 * VDMA and simple mode do not support residue reporting, so the
1257 * residue field will always be 0.
1259 if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
1260 residue = xilinx_dma_get_residue(chan, desc);
1262 spin_unlock_irqrestore(&chan->lock, flags);
1264 dma_set_residue(txstate, residue);
1270 * xilinx_dma_stop_transfer - Halt DMA channel
1271 * @chan: Driver specific DMA channel
1273 * Return: '0' on success and failure value on error
1275 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
1279 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1281 /* Wait for the hardware to halt */
1282 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1283 val & XILINX_DMA_DMASR_HALTED, 0,
1284 XILINX_DMA_LOOP_COUNT);
1288 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
1289 * @chan: Driver specific DMA channel
1291 * Return: '0' on success and failure value on error
1293 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
1297 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1298 val & XILINX_DMA_DMASR_IDLE, 0,
1299 XILINX_DMA_LOOP_COUNT);
1303 * xilinx_dma_start - Start DMA channel
1304 * @chan: Driver specific DMA channel
1306 static void xilinx_dma_start(struct xilinx_dma_chan *chan)
1311 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1313 /* Wait for the hardware to start */
1314 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1315 !(val & XILINX_DMA_DMASR_HALTED), 0,
1316 XILINX_DMA_LOOP_COUNT);
1319 dev_err(chan->dev, "Cannot start channel %p: %x\n",
1320 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1327 * xilinx_vdma_start_transfer - Starts VDMA transfer
1328 * @chan: Driver specific channel struct pointer
1330 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1332 struct xilinx_vdma_config *config = &chan->config;
1333 struct xilinx_dma_tx_descriptor *desc;
1335 struct xilinx_vdma_tx_segment *segment, *last = NULL;
1338 /* This function was invoked with lock held */
1345 if (list_empty(&chan->pending_list))
1348 desc = list_first_entry(&chan->pending_list,
1349 struct xilinx_dma_tx_descriptor, node);
1351 /* Configure the hardware using info in the config structure */
1352 if (chan->has_vflip) {
1353 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
1354 reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
1355 reg |= config->vflip_en;
1356 dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
1360 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1362 if (config->frm_cnt_en)
1363 reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
1365 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1367 /* If not parking, enable circular mode */
1369 reg &= ~XILINX_DMA_DMACR_CIRC_EN;
1371 reg |= XILINX_DMA_DMACR_CIRC_EN;
1373 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1375 j = chan->desc_submitcount;
1376 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1377 if (chan->direction == DMA_MEM_TO_DEV) {
1378 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1379 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1381 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1382 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
1384 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
1386 /* Start the hardware */
1387 xilinx_dma_start(chan);
1392 /* Start the transfer */
1393 if (chan->desc_submitcount < chan->num_frms)
1394 i = chan->desc_submitcount;
1396 list_for_each_entry(segment, &desc->segments, node) {
1398 vdma_desc_write_64(chan,
1399 XILINX_VDMA_REG_START_ADDRESS_64(i++),
1400 segment->hw.buf_addr,
1401 segment->hw.buf_addr_msb);
1403 vdma_desc_write(chan,
1404 XILINX_VDMA_REG_START_ADDRESS(i++),
1405 segment->hw.buf_addr);
1413 /* HW expects these parameters to be same for one transaction */
1414 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1415 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1417 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1419 chan->desc_submitcount++;
1420 chan->desc_pendingcount--;
1421 list_move_tail(&desc->node, &chan->active_list);
1422 if (chan->desc_submitcount == chan->num_frms)
1423 chan->desc_submitcount = 0;
1429 * xilinx_cdma_start_transfer - Starts cdma transfer
1430 * @chan: Driver specific channel struct pointer
1432 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1434 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1435 struct xilinx_cdma_tx_segment *tail_segment;
1436 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1444 if (list_empty(&chan->pending_list))
1447 head_desc = list_first_entry(&chan->pending_list,
1448 struct xilinx_dma_tx_descriptor, node);
1449 tail_desc = list_last_entry(&chan->pending_list,
1450 struct xilinx_dma_tx_descriptor, node);
1451 tail_segment = list_last_entry(&tail_desc->segments,
1452 struct xilinx_cdma_tx_segment, node);
1454 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1455 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1456 ctrl_reg |= chan->desc_pendingcount <<
1457 XILINX_DMA_CR_COALESCE_SHIFT;
1458 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1462 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1463 XILINX_CDMA_CR_SGMODE);
1465 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1466 XILINX_CDMA_CR_SGMODE);
1468 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1469 head_desc->async_tx.phys);
1471 /* Update tail ptr register which will start the transfer */
1472 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1473 tail_segment->phys);
1475 /* In simple mode */
1476 struct xilinx_cdma_tx_segment *segment;
1477 struct xilinx_cdma_desc_hw *hw;
1479 segment = list_first_entry(&head_desc->segments,
1480 struct xilinx_cdma_tx_segment,
1485 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR,
1486 xilinx_prep_dma_addr_t(hw->src_addr));
1487 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR,
1488 xilinx_prep_dma_addr_t(hw->dest_addr));
1490 /* Start the transfer */
1491 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1492 hw->control & chan->xdev->max_buffer_len);
1495 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1496 chan->desc_pendingcount = 0;
1501 * xilinx_dma_start_transfer - Starts DMA transfer
1502 * @chan: Driver specific channel struct pointer
1504 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1506 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1507 struct xilinx_axidma_tx_segment *tail_segment;
1513 if (list_empty(&chan->pending_list))
1519 head_desc = list_first_entry(&chan->pending_list,
1520 struct xilinx_dma_tx_descriptor, node);
1521 tail_desc = list_last_entry(&chan->pending_list,
1522 struct xilinx_dma_tx_descriptor, node);
1523 tail_segment = list_last_entry(&tail_desc->segments,
1524 struct xilinx_axidma_tx_segment, node);
1526 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1528 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1529 reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1530 reg |= chan->desc_pendingcount <<
1531 XILINX_DMA_CR_COALESCE_SHIFT;
1532 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1536 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1537 head_desc->async_tx.phys);
1539 xilinx_dma_start(chan);
1544 /* Start the transfer */
1547 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1548 chan->cyclic_seg_v->phys);
1550 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1551 tail_segment->phys);
1553 struct xilinx_axidma_tx_segment *segment;
1554 struct xilinx_axidma_desc_hw *hw;
1556 segment = list_first_entry(&head_desc->segments,
1557 struct xilinx_axidma_tx_segment,
1561 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR,
1562 xilinx_prep_dma_addr_t(hw->buf_addr));
1564 /* Start the transfer */
1565 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1566 hw->control & chan->xdev->max_buffer_len);
1569 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1570 chan->desc_pendingcount = 0;
1575 * xilinx_mcdma_start_transfer - Starts MCDMA transfer
1576 * @chan: Driver specific channel struct pointer
1578 static void xilinx_mcdma_start_transfer(struct xilinx_dma_chan *chan)
1580 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1581 struct xilinx_aximcdma_tx_segment *tail_segment;
1585 * lock has been held by calling functions, so we don't need it
1586 * to take it here again.
1595 if (list_empty(&chan->pending_list))
1598 head_desc = list_first_entry(&chan->pending_list,
1599 struct xilinx_dma_tx_descriptor, node);
1600 tail_desc = list_last_entry(&chan->pending_list,
1601 struct xilinx_dma_tx_descriptor, node);
1602 tail_segment = list_last_entry(&tail_desc->segments,
1603 struct xilinx_aximcdma_tx_segment, node);
1605 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
1607 if (chan->desc_pendingcount <= XILINX_MCDMA_COALESCE_MAX) {
1608 reg &= ~XILINX_MCDMA_COALESCE_MASK;
1609 reg |= chan->desc_pendingcount <<
1610 XILINX_MCDMA_COALESCE_SHIFT;
1613 reg |= XILINX_MCDMA_IRQ_ALL_MASK;
1614 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
1616 /* Program current descriptor */
1617 xilinx_write(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET(chan->tdest),
1618 head_desc->async_tx.phys);
1620 /* Program channel enable register */
1621 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHEN_OFFSET);
1622 reg |= BIT(chan->tdest);
1623 dma_ctrl_write(chan, XILINX_MCDMA_CHEN_OFFSET, reg);
1625 /* Start the fetch of BDs for the channel */
1626 reg = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest));
1627 reg |= XILINX_MCDMA_CR_RUNSTOP_MASK;
1628 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_CR_OFFSET(chan->tdest), reg);
1630 xilinx_dma_start(chan);
1635 /* Start the transfer */
1636 xilinx_write(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET(chan->tdest),
1637 tail_segment->phys);
1639 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1640 chan->desc_pendingcount = 0;
1645 * xilinx_dma_issue_pending - Issue pending transactions
1646 * @dchan: DMA channel
1648 static void xilinx_dma_issue_pending(struct dma_chan *dchan)
1650 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1651 unsigned long flags;
1653 spin_lock_irqsave(&chan->lock, flags);
1654 chan->start_transfer(chan);
1655 spin_unlock_irqrestore(&chan->lock, flags);
1659 * xilinx_dma_device_config - Configure the DMA channel
1660 * @dchan: DMA channel
1661 * @config: channel configuration
1663 static int xilinx_dma_device_config(struct dma_chan *dchan,
1664 struct dma_slave_config *config)
1670 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
1671 * @chan : xilinx DMA channel
1675 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
1677 struct xilinx_dma_tx_descriptor *desc, *next;
1679 /* This function was invoked with lock held */
1680 if (list_empty(&chan->active_list))
1683 list_for_each_entry_safe(desc, next, &chan->active_list, node) {
1684 if (chan->has_sg && chan->xdev->dma_config->dmatype !=
1686 desc->residue = xilinx_dma_get_residue(chan, desc);
1689 desc->err = chan->err;
1691 list_del(&desc->node);
1693 dma_cookie_complete(&desc->async_tx);
1694 list_add_tail(&desc->node, &chan->done_list);
1699 * xilinx_dma_reset - Reset DMA channel
1700 * @chan: Driver specific DMA channel
1702 * Return: '0' on success and failure value on error
1704 static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
1709 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
1711 /* Wait for the hardware to finish reset */
1712 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1713 !(tmp & XILINX_DMA_DMACR_RESET), 0,
1714 XILINX_DMA_LOOP_COUNT);
1717 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
1718 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1719 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1725 chan->desc_pendingcount = 0;
1726 chan->desc_submitcount = 0;
1732 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1733 * @chan: Driver specific DMA channel
1735 * Return: '0' on success and failure value on error
1737 static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
1742 err = xilinx_dma_reset(chan);
1746 /* Enable interrupts */
1747 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1748 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1754 * xilinx_mcdma_irq_handler - MCDMA Interrupt handler
1756 * @data: Pointer to the Xilinx MCDMA channel structure
1758 * Return: IRQ_HANDLED/IRQ_NONE
1760 static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
1762 struct xilinx_dma_chan *chan = data;
1763 u32 status, ser_offset, chan_sermask, chan_offset = 0, chan_id;
1765 if (chan->direction == DMA_DEV_TO_MEM)
1766 ser_offset = XILINX_MCDMA_RXINT_SER_OFFSET;
1768 ser_offset = XILINX_MCDMA_TXINT_SER_OFFSET;
1770 /* Read the channel id raising the interrupt*/
1771 chan_sermask = dma_ctrl_read(chan, ser_offset);
1772 chan_id = ffs(chan_sermask);
1777 if (chan->direction == DMA_DEV_TO_MEM)
1778 chan_offset = chan->xdev->dma_config->max_channels / 2;
1780 chan_offset = chan_offset + (chan_id - 1);
1781 chan = chan->xdev->chan[chan_offset];
1782 /* Read the status and ack the interrupts. */
1783 status = dma_ctrl_read(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest));
1784 if (!(status & XILINX_MCDMA_IRQ_ALL_MASK))
1787 dma_ctrl_write(chan, XILINX_MCDMA_CHAN_SR_OFFSET(chan->tdest),
1788 status & XILINX_MCDMA_IRQ_ALL_MASK);
1790 if (status & XILINX_MCDMA_IRQ_ERR_MASK) {
1791 dev_err(chan->dev, "Channel %p has errors %x cdr %x tdr %x\n",
1793 dma_ctrl_read(chan, XILINX_MCDMA_CH_ERR_OFFSET),
1794 dma_ctrl_read(chan, XILINX_MCDMA_CHAN_CDESC_OFFSET
1796 dma_ctrl_read(chan, XILINX_MCDMA_CHAN_TDESC_OFFSET
1801 if (status & XILINX_MCDMA_IRQ_DELAY_MASK) {
1803 * Device takes too long to do the transfer when user requires
1806 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1809 if (status & XILINX_MCDMA_IRQ_IOC_MASK) {
1810 spin_lock(&chan->lock);
1811 xilinx_dma_complete_descriptor(chan);
1813 chan->start_transfer(chan);
1814 spin_unlock(&chan->lock);
1817 tasklet_schedule(&chan->tasklet);
1822 * xilinx_dma_irq_handler - DMA Interrupt handler
1824 * @data: Pointer to the Xilinx DMA channel structure
1826 * Return: IRQ_HANDLED/IRQ_NONE
1828 static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
1830 struct xilinx_dma_chan *chan = data;
1833 /* Read the status and ack the interrupts. */
1834 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1835 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
1838 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1839 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1841 if (status & XILINX_DMA_DMASR_ERR_IRQ) {
1843 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1844 * error is recoverable, ignore it. Otherwise flag the error.
1846 * Only recoverable errors can be cleared in the DMASR register,
1847 * make sure not to write to other error bits to 1.
1849 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
1851 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1852 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
1854 if (!chan->flush_on_fsync ||
1855 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
1857 "Channel %p has errors %x, cdr %x tdr %x\n",
1859 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1860 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
1865 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
1867 * Device takes too long to do the transfer when user requires
1870 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1873 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
1874 spin_lock(&chan->lock);
1875 xilinx_dma_complete_descriptor(chan);
1877 chan->start_transfer(chan);
1878 spin_unlock(&chan->lock);
1881 tasklet_schedule(&chan->tasklet);
1886 * append_desc_queue - Queuing descriptor
1887 * @chan: Driver specific dma channel
1888 * @desc: dma transaction descriptor
1890 static void append_desc_queue(struct xilinx_dma_chan *chan,
1891 struct xilinx_dma_tx_descriptor *desc)
1893 struct xilinx_vdma_tx_segment *tail_segment;
1894 struct xilinx_dma_tx_descriptor *tail_desc;
1895 struct xilinx_axidma_tx_segment *axidma_tail_segment;
1896 struct xilinx_aximcdma_tx_segment *aximcdma_tail_segment;
1897 struct xilinx_cdma_tx_segment *cdma_tail_segment;
1899 if (list_empty(&chan->pending_list))
1903 * Add the hardware descriptor to the chain of hardware descriptors
1904 * that already exists in memory.
1906 tail_desc = list_last_entry(&chan->pending_list,
1907 struct xilinx_dma_tx_descriptor, node);
1908 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1909 tail_segment = list_last_entry(&tail_desc->segments,
1910 struct xilinx_vdma_tx_segment,
1912 tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1913 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1914 cdma_tail_segment = list_last_entry(&tail_desc->segments,
1915 struct xilinx_cdma_tx_segment,
1917 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1918 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1919 axidma_tail_segment = list_last_entry(&tail_desc->segments,
1920 struct xilinx_axidma_tx_segment,
1922 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1924 aximcdma_tail_segment =
1925 list_last_entry(&tail_desc->segments,
1926 struct xilinx_aximcdma_tx_segment,
1928 aximcdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1932 * Add the software descriptor and all children to the list
1933 * of pending transactions
1936 list_add_tail(&desc->node, &chan->pending_list);
1937 chan->desc_pendingcount++;
1939 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1940 && unlikely(chan->desc_pendingcount > chan->num_frms)) {
1941 dev_dbg(chan->dev, "desc pendingcount is too high\n");
1942 chan->desc_pendingcount = chan->num_frms;
1947 * xilinx_dma_tx_submit - Submit DMA transaction
1948 * @tx: Async transaction descriptor
1950 * Return: cookie value on success and failure value on error
1952 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
1954 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1955 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
1956 dma_cookie_t cookie;
1957 unsigned long flags;
1961 xilinx_dma_free_tx_descriptor(chan, desc);
1967 * If reset fails, need to hard reset the system.
1968 * Channel is no longer functional
1970 err = xilinx_dma_chan_reset(chan);
1975 spin_lock_irqsave(&chan->lock, flags);
1977 cookie = dma_cookie_assign(tx);
1979 /* Put this transaction onto the tail of the pending queue */
1980 append_desc_queue(chan, desc);
1983 chan->cyclic = true;
1985 chan->terminating = false;
1987 spin_unlock_irqrestore(&chan->lock, flags);
1993 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
1994 * DMA_SLAVE transaction
1995 * @dchan: DMA channel
1996 * @xt: Interleaved template pointer
1997 * @flags: transfer ack flags
1999 * Return: Async transaction descriptor on success and NULL on failure
2001 static struct dma_async_tx_descriptor *
2002 xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
2003 struct dma_interleaved_template *xt,
2004 unsigned long flags)
2006 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2007 struct xilinx_dma_tx_descriptor *desc;
2008 struct xilinx_vdma_tx_segment *segment;
2009 struct xilinx_vdma_desc_hw *hw;
2011 if (!is_slave_direction(xt->dir))
2014 if (!xt->numf || !xt->sgl[0].size)
2017 if (xt->frame_size != 1)
2020 /* Allocate a transaction descriptor. */
2021 desc = xilinx_dma_alloc_tx_descriptor(chan);
2025 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2026 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2027 async_tx_ack(&desc->async_tx);
2029 /* Allocate the link descriptor from DMA pool */
2030 segment = xilinx_vdma_alloc_tx_segment(chan);
2034 /* Fill in the hardware descriptor */
2036 hw->vsize = xt->numf;
2037 hw->hsize = xt->sgl[0].size;
2038 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
2039 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
2040 hw->stride |= chan->config.frm_dly <<
2041 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
2043 if (xt->dir != DMA_MEM_TO_DEV) {
2044 if (chan->ext_addr) {
2045 hw->buf_addr = lower_32_bits(xt->dst_start);
2046 hw->buf_addr_msb = upper_32_bits(xt->dst_start);
2048 hw->buf_addr = xt->dst_start;
2051 if (chan->ext_addr) {
2052 hw->buf_addr = lower_32_bits(xt->src_start);
2053 hw->buf_addr_msb = upper_32_bits(xt->src_start);
2055 hw->buf_addr = xt->src_start;
2059 /* Insert the segment into the descriptor segments list. */
2060 list_add_tail(&segment->node, &desc->segments);
2062 /* Link the last hardware descriptor with the first. */
2063 segment = list_first_entry(&desc->segments,
2064 struct xilinx_vdma_tx_segment, node);
2065 desc->async_tx.phys = segment->phys;
2067 return &desc->async_tx;
2070 xilinx_dma_free_tx_descriptor(chan, desc);
2075 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
2076 * @dchan: DMA channel
2077 * @dma_dst: destination address
2078 * @dma_src: source address
2079 * @len: transfer length
2080 * @flags: transfer ack flags
2082 * Return: Async transaction descriptor on success and NULL on failure
2084 static struct dma_async_tx_descriptor *
2085 xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
2086 dma_addr_t dma_src, size_t len, unsigned long flags)
2088 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2089 struct xilinx_dma_tx_descriptor *desc;
2090 struct xilinx_cdma_tx_segment *segment;
2091 struct xilinx_cdma_desc_hw *hw;
2093 if (!len || len > chan->xdev->max_buffer_len)
2096 desc = xilinx_dma_alloc_tx_descriptor(chan);
2100 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2101 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2103 /* Allocate the link descriptor from DMA pool */
2104 segment = xilinx_cdma_alloc_tx_segment(chan);
2110 hw->src_addr = dma_src;
2111 hw->dest_addr = dma_dst;
2112 if (chan->ext_addr) {
2113 hw->src_addr_msb = upper_32_bits(dma_src);
2114 hw->dest_addr_msb = upper_32_bits(dma_dst);
2117 /* Insert the segment into the descriptor segments list. */
2118 list_add_tail(&segment->node, &desc->segments);
2120 desc->async_tx.phys = segment->phys;
2121 hw->next_desc = segment->phys;
2123 return &desc->async_tx;
2126 xilinx_dma_free_tx_descriptor(chan, desc);
2131 * xilinx_cdma_prep_memcpy_sg - prepare descriptors for a memcpy_sg transaction
2132 * @dchan: DMA channel
2133 * @dst_sg: Destination scatter list
2134 * @dst_sg_len: Number of entries in destination scatter list
2135 * @src_sg: Source scatter list
2136 * @src_sg_len: Number of entries in source scatter list
2137 * @flags: transfer ack flags
2139 * Return: Async transaction descriptor on success and NULL on failure
2141 static struct dma_async_tx_descriptor *xilinx_cdma_prep_memcpy_sg(
2142 struct dma_chan *dchan, struct scatterlist *dst_sg,
2143 unsigned int dst_sg_len, struct scatterlist *src_sg,
2144 unsigned int src_sg_len, unsigned long flags)
2146 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2147 struct xilinx_dma_tx_descriptor *desc;
2148 struct xilinx_cdma_tx_segment *segment, *prev = NULL;
2149 struct xilinx_cdma_desc_hw *hw;
2150 size_t len, dst_avail, src_avail;
2151 dma_addr_t dma_dst, dma_src;
2153 if (unlikely(dst_sg_len == 0 || src_sg_len == 0))
2156 if (unlikely(!dst_sg || !src_sg))
2159 desc = xilinx_dma_alloc_tx_descriptor(chan);
2163 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2164 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2166 dst_avail = sg_dma_len(dst_sg);
2167 src_avail = sg_dma_len(src_sg);
2169 * loop until there is either no more source or no more destination
2173 len = min_t(size_t, src_avail, dst_avail);
2174 len = min_t(size_t, len, chan->xdev->max_buffer_len);
2178 /* Allocate the link descriptor from DMA pool */
2179 segment = xilinx_cdma_alloc_tx_segment(chan);
2183 dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
2185 dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
2189 hw->src_addr = dma_src;
2190 hw->dest_addr = dma_dst;
2191 if (chan->ext_addr) {
2192 hw->src_addr_msb = upper_32_bits(dma_src);
2193 hw->dest_addr_msb = upper_32_bits(dma_dst);
2197 prev->hw.next_desc = segment->phys;
2199 prev->hw.next_desc_msb =
2200 upper_32_bits(segment->phys);
2206 list_add_tail(&segment->node, &desc->segments);
2209 /* Fetch the next dst scatterlist entry */
2210 if (dst_avail == 0) {
2211 if (dst_sg_len == 0)
2213 dst_sg = sg_next(dst_sg);
2217 dst_avail = sg_dma_len(dst_sg);
2219 /* Fetch the next src scatterlist entry */
2220 if (src_avail == 0) {
2221 if (src_sg_len == 0)
2223 src_sg = sg_next(src_sg);
2227 src_avail = sg_dma_len(src_sg);
2231 if (list_empty(&desc->segments)) {
2232 dev_err(chan->xdev->dev,
2233 "%s: Zero-size SG transfer requested\n", __func__);
2237 /* Link the last hardware descriptor with the first. */
2238 segment = list_first_entry(&desc->segments,
2239 struct xilinx_cdma_tx_segment, node);
2240 desc->async_tx.phys = segment->phys;
2241 prev->hw.next_desc = segment->phys;
2243 return &desc->async_tx;
2246 xilinx_dma_free_tx_descriptor(chan, desc);
2251 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
2252 * @dchan: DMA channel
2253 * @sgl: scatterlist to transfer to/from
2254 * @sg_len: number of entries in @scatterlist
2255 * @direction: DMA direction
2256 * @flags: transfer ack flags
2257 * @context: APP words of the descriptor
2259 * Return: Async transaction descriptor on success and NULL on failure
2261 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
2262 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
2263 enum dma_transfer_direction direction, unsigned long flags,
2266 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2267 struct xilinx_dma_tx_descriptor *desc;
2268 struct xilinx_axidma_tx_segment *segment = NULL;
2269 u32 *app_w = (u32 *)context;
2270 struct scatterlist *sg;
2275 if (!is_slave_direction(direction))
2278 /* Allocate a transaction descriptor. */
2279 desc = xilinx_dma_alloc_tx_descriptor(chan);
2283 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2284 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2286 /* Build transactions using information in the scatter gather list */
2287 for_each_sg(sgl, sg, sg_len, i) {
2290 /* Loop until the entire scatterlist entry is used */
2291 while (sg_used < sg_dma_len(sg)) {
2292 struct xilinx_axidma_desc_hw *hw;
2294 /* Get a free segment */
2295 segment = xilinx_axidma_alloc_tx_segment(chan);
2300 * Calculate the maximum number of bytes to transfer,
2301 * making sure it is less than the hw limit
2303 copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
2307 /* Fill in the descriptor */
2308 xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
2313 if (chan->direction == DMA_MEM_TO_DEV) {
2315 memcpy(hw->app, app_w, sizeof(u32) *
2316 XILINX_DMA_NUM_APP_WORDS);
2322 * Insert the segment into the descriptor segments
2325 list_add_tail(&segment->node, &desc->segments);
2329 segment = list_first_entry(&desc->segments,
2330 struct xilinx_axidma_tx_segment, node);
2331 desc->async_tx.phys = segment->phys;
2333 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2334 if (chan->direction == DMA_MEM_TO_DEV) {
2335 segment->hw.control |= XILINX_DMA_BD_SOP;
2336 segment = list_last_entry(&desc->segments,
2337 struct xilinx_axidma_tx_segment,
2339 segment->hw.control |= XILINX_DMA_BD_EOP;
2342 return &desc->async_tx;
2345 xilinx_dma_free_tx_descriptor(chan, desc);
2350 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
2351 * @dchan: DMA channel
2352 * @buf_addr: Physical address of the buffer
2353 * @buf_len: Total length of the cyclic buffers
2354 * @period_len: length of individual cyclic buffer
2355 * @direction: DMA direction
2356 * @flags: transfer ack flags
2358 * Return: Async transaction descriptor on success and NULL on failure
2360 static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
2361 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
2362 size_t period_len, enum dma_transfer_direction direction,
2363 unsigned long flags)
2365 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2366 struct xilinx_dma_tx_descriptor *desc;
2367 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
2368 size_t copy, sg_used;
2369 unsigned int num_periods;
2376 num_periods = buf_len / period_len;
2381 if (!is_slave_direction(direction))
2384 /* Allocate a transaction descriptor. */
2385 desc = xilinx_dma_alloc_tx_descriptor(chan);
2389 chan->direction = direction;
2390 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2391 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2393 for (i = 0; i < num_periods; ++i) {
2396 while (sg_used < period_len) {
2397 struct xilinx_axidma_desc_hw *hw;
2399 /* Get a free segment */
2400 segment = xilinx_axidma_alloc_tx_segment(chan);
2405 * Calculate the maximum number of bytes to transfer,
2406 * making sure it is less than the hw limit
2408 copy = xilinx_dma_calc_copysize(chan, period_len,
2411 xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
2416 prev->hw.next_desc = segment->phys;
2422 * Insert the segment into the descriptor segments
2425 list_add_tail(&segment->node, &desc->segments);
2429 head_segment = list_first_entry(&desc->segments,
2430 struct xilinx_axidma_tx_segment, node);
2431 desc->async_tx.phys = head_segment->phys;
2433 desc->cyclic = true;
2434 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2435 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2436 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2438 segment = list_last_entry(&desc->segments,
2439 struct xilinx_axidma_tx_segment,
2441 segment->hw.next_desc = (u32) head_segment->phys;
2443 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2444 if (direction == DMA_MEM_TO_DEV) {
2445 head_segment->hw.control |= XILINX_DMA_BD_SOP;
2446 segment->hw.control |= XILINX_DMA_BD_EOP;
2449 return &desc->async_tx;
2452 xilinx_dma_free_tx_descriptor(chan, desc);
2457 * xilinx_mcdma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
2458 * @dchan: DMA channel
2459 * @sgl: scatterlist to transfer to/from
2460 * @sg_len: number of entries in @scatterlist
2461 * @direction: DMA direction
2462 * @flags: transfer ack flags
2463 * @context: APP words of the descriptor
2465 * Return: Async transaction descriptor on success and NULL on failure
2467 static struct dma_async_tx_descriptor *
2468 xilinx_mcdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
2469 unsigned int sg_len,
2470 enum dma_transfer_direction direction,
2471 unsigned long flags, void *context)
2473 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2474 struct xilinx_dma_tx_descriptor *desc;
2475 struct xilinx_aximcdma_tx_segment *segment = NULL;
2476 u32 *app_w = (u32 *)context;
2477 struct scatterlist *sg;
2482 if (!is_slave_direction(direction))
2485 /* Allocate a transaction descriptor. */
2486 desc = xilinx_dma_alloc_tx_descriptor(chan);
2490 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2491 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2493 /* Build transactions using information in the scatter gather list */
2494 for_each_sg(sgl, sg, sg_len, i) {
2497 /* Loop until the entire scatterlist entry is used */
2498 while (sg_used < sg_dma_len(sg)) {
2499 struct xilinx_aximcdma_desc_hw *hw;
2501 /* Get a free segment */
2502 segment = xilinx_aximcdma_alloc_tx_segment(chan);
2507 * Calculate the maximum number of bytes to transfer,
2508 * making sure it is less than the hw limit
2510 copy = min_t(size_t, sg_dma_len(sg) - sg_used,
2511 chan->xdev->max_buffer_len);
2514 /* Fill in the descriptor */
2515 xilinx_aximcdma_buf(chan, hw, sg_dma_address(sg),
2519 if (chan->direction == DMA_MEM_TO_DEV && app_w) {
2520 memcpy(hw->app, app_w, sizeof(u32) *
2521 XILINX_DMA_NUM_APP_WORDS);
2526 * Insert the segment into the descriptor segments
2529 list_add_tail(&segment->node, &desc->segments);
2533 segment = list_first_entry(&desc->segments,
2534 struct xilinx_aximcdma_tx_segment, node);
2535 desc->async_tx.phys = segment->phys;
2537 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2538 if (chan->direction == DMA_MEM_TO_DEV) {
2539 segment->hw.control |= XILINX_MCDMA_BD_SOP;
2540 segment = list_last_entry(&desc->segments,
2541 struct xilinx_aximcdma_tx_segment,
2543 segment->hw.control |= XILINX_MCDMA_BD_EOP;
2546 return &desc->async_tx;
2549 xilinx_dma_free_tx_descriptor(chan, desc);
2555 * xilinx_dma_terminate_all - Halt the channel and free descriptors
2556 * @dchan: Driver specific DMA Channel pointer
2558 * Return: '0' always.
2560 static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2562 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2566 if (!chan->cyclic) {
2567 err = chan->stop_transfer(chan);
2569 dev_err(chan->dev, "Cannot stop channel %p: %x\n",
2570 chan, dma_ctrl_read(chan,
2571 XILINX_DMA_REG_DMASR));
2576 xilinx_dma_chan_reset(chan);
2577 /* Remove and free all of the descriptors in the lists */
2578 chan->terminating = true;
2579 xilinx_dma_free_descriptors(chan);
2583 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2584 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2585 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2586 chan->cyclic = false;
2589 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
2590 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2591 XILINX_CDMA_CR_SGMODE);
2596 static void xilinx_dma_synchronize(struct dma_chan *dchan)
2598 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2600 tasklet_kill(&chan->tasklet);
2604 * xilinx_vdma_channel_set_config - Configure VDMA channel
2605 * Run-time configuration for Axi VDMA, supports:
2606 * . halt the channel
2607 * . configure interrupt coalescing and inter-packet delay threshold
2608 * . start/stop parking
2611 * @dchan: DMA channel
2612 * @cfg: VDMA device configuration pointer
2614 * Return: '0' on success and failure value on error
2616 int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2617 struct xilinx_vdma_config *cfg)
2619 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2623 return xilinx_dma_chan_reset(chan);
2625 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2627 chan->config.frm_dly = cfg->frm_dly;
2628 chan->config.park = cfg->park;
2630 /* genlock settings */
2631 chan->config.gen_lock = cfg->gen_lock;
2632 chan->config.master = cfg->master;
2634 dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
2635 if (cfg->gen_lock && chan->genlock) {
2636 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2637 dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
2638 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
2641 chan->config.frm_cnt_en = cfg->frm_cnt_en;
2642 chan->config.vflip_en = cfg->vflip_en;
2645 chan->config.park_frm = cfg->park_frm;
2647 chan->config.park_frm = -1;
2649 chan->config.coalesc = cfg->coalesc;
2650 chan->config.delay = cfg->delay;
2652 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2653 dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
2654 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
2655 chan->config.coalesc = cfg->coalesc;
2658 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2659 dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
2660 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
2661 chan->config.delay = cfg->delay;
2664 /* FSync Source selection */
2665 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2666 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
2668 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
2672 EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2674 /* -----------------------------------------------------------------------------
2679 * xilinx_dma_chan_remove - Per Channel remove function
2680 * @chan: Driver specific DMA channel
2682 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
2684 /* Disable all interrupts */
2685 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2686 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
2689 free_irq(chan->irq, chan);
2691 tasklet_kill(&chan->tasklet);
2693 list_del(&chan->common.device_node);
2696 static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2697 struct clk **tx_clk, struct clk **rx_clk,
2698 struct clk **sg_clk, struct clk **tmp_clk)
2704 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2705 if (IS_ERR(*axi_clk))
2706 return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
2708 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2709 if (IS_ERR(*tx_clk))
2712 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2713 if (IS_ERR(*rx_clk))
2716 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2717 if (IS_ERR(*sg_clk))
2720 err = clk_prepare_enable(*axi_clk);
2722 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2726 err = clk_prepare_enable(*tx_clk);
2728 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2729 goto err_disable_axiclk;
2732 err = clk_prepare_enable(*rx_clk);
2734 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2735 goto err_disable_txclk;
2738 err = clk_prepare_enable(*sg_clk);
2740 dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
2741 goto err_disable_rxclk;
2747 clk_disable_unprepare(*rx_clk);
2749 clk_disable_unprepare(*tx_clk);
2751 clk_disable_unprepare(*axi_clk);
2756 static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2757 struct clk **dev_clk, struct clk **tmp_clk,
2758 struct clk **tmp1_clk, struct clk **tmp2_clk)
2766 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2767 if (IS_ERR(*axi_clk))
2768 return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
2770 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2771 if (IS_ERR(*dev_clk))
2772 return dev_err_probe(&pdev->dev, PTR_ERR(*dev_clk), "failed to get dev_clk\n");
2774 err = clk_prepare_enable(*axi_clk);
2776 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2780 err = clk_prepare_enable(*dev_clk);
2782 dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
2783 goto err_disable_axiclk;
2789 clk_disable_unprepare(*axi_clk);
2794 static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2795 struct clk **tx_clk, struct clk **txs_clk,
2796 struct clk **rx_clk, struct clk **rxs_clk)
2800 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2801 if (IS_ERR(*axi_clk))
2802 return dev_err_probe(&pdev->dev, PTR_ERR(*axi_clk), "failed to get axi_aclk\n");
2804 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2805 if (IS_ERR(*tx_clk))
2808 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2809 if (IS_ERR(*txs_clk))
2812 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2813 if (IS_ERR(*rx_clk))
2816 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2817 if (IS_ERR(*rxs_clk))
2820 err = clk_prepare_enable(*axi_clk);
2822 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n",
2827 err = clk_prepare_enable(*tx_clk);
2829 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2830 goto err_disable_axiclk;
2833 err = clk_prepare_enable(*txs_clk);
2835 dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
2836 goto err_disable_txclk;
2839 err = clk_prepare_enable(*rx_clk);
2841 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2842 goto err_disable_txsclk;
2845 err = clk_prepare_enable(*rxs_clk);
2847 dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
2848 goto err_disable_rxclk;
2854 clk_disable_unprepare(*rx_clk);
2856 clk_disable_unprepare(*txs_clk);
2858 clk_disable_unprepare(*tx_clk);
2860 clk_disable_unprepare(*axi_clk);
2865 static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2867 clk_disable_unprepare(xdev->rxs_clk);
2868 clk_disable_unprepare(xdev->rx_clk);
2869 clk_disable_unprepare(xdev->txs_clk);
2870 clk_disable_unprepare(xdev->tx_clk);
2871 clk_disable_unprepare(xdev->axi_clk);
2875 * xilinx_dma_chan_probe - Per Channel Probing
2876 * It get channel features from the device tree entry and
2877 * initialize special channel handling routines
2879 * @xdev: Driver specific device structure
2880 * @node: Device node
2882 * Return: '0' on success and failure value on error
2884 static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2885 struct device_node *node)
2887 struct xilinx_dma_chan *chan;
2888 bool has_dre = false;
2892 /* Allocate and initialize the channel structure */
2893 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2897 chan->dev = xdev->dev;
2899 chan->desc_pendingcount = 0x0;
2900 chan->ext_addr = xdev->ext_addr;
2901 /* This variable ensures that descriptors are not
2902 * Submitted when dma engine is in progress. This variable is
2903 * Added to avoid polling for a bit in the status register to
2904 * Know dma state in the driver hot path.
2908 spin_lock_init(&chan->lock);
2909 INIT_LIST_HEAD(&chan->pending_list);
2910 INIT_LIST_HEAD(&chan->done_list);
2911 INIT_LIST_HEAD(&chan->active_list);
2912 INIT_LIST_HEAD(&chan->free_seg_list);
2914 /* Retrieve the channel properties from the device tree */
2915 has_dre = of_property_read_bool(node, "xlnx,include-dre");
2917 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2919 err = of_property_read_u32(node, "xlnx,datawidth", &value);
2921 dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2924 width = value >> 3; /* Convert bits to bytes */
2926 /* If data width is greater than 8 bytes, DRE is not in hw */
2931 xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1);
2933 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2934 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2935 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
2936 chan->direction = DMA_MEM_TO_DEV;
2937 chan->id = xdev->mm2s_chan_id++;
2938 chan->tdest = chan->id;
2940 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2941 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2942 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
2943 chan->config.park = 1;
2945 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2946 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2947 chan->flush_on_fsync = true;
2949 } else if (of_device_is_compatible(node,
2950 "xlnx,axi-vdma-s2mm-channel") ||
2951 of_device_is_compatible(node,
2952 "xlnx,axi-dma-s2mm-channel")) {
2953 chan->direction = DMA_DEV_TO_MEM;
2954 chan->id = xdev->s2mm_chan_id++;
2955 chan->tdest = chan->id - xdev->dma_config->max_channels / 2;
2956 chan->has_vflip = of_property_read_bool(node,
2957 "xlnx,enable-vert-flip");
2958 if (chan->has_vflip) {
2959 chan->config.vflip_en = dma_read(chan,
2960 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
2961 XILINX_VDMA_ENABLE_VERTICAL_FLIP;
2964 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
2965 chan->ctrl_offset = XILINX_MCDMA_S2MM_CTRL_OFFSET;
2967 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2969 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2970 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
2971 chan->config.park = 1;
2973 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2974 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2975 chan->flush_on_fsync = true;
2978 dev_err(xdev->dev, "Invalid channel compatible node\n");
2982 /* Request the interrupt */
2983 chan->irq = of_irq_get(node, chan->tdest);
2985 return dev_err_probe(xdev->dev, chan->irq, "failed to get irq\n");
2986 err = request_irq(chan->irq, xdev->dma_config->irq_handler,
2987 IRQF_SHARED, "xilinx-dma-controller", chan);
2989 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2993 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2994 chan->start_transfer = xilinx_dma_start_transfer;
2995 chan->stop_transfer = xilinx_dma_stop_transfer;
2996 } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
2997 chan->start_transfer = xilinx_mcdma_start_transfer;
2998 chan->stop_transfer = xilinx_dma_stop_transfer;
2999 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
3000 chan->start_transfer = xilinx_cdma_start_transfer;
3001 chan->stop_transfer = xilinx_cdma_stop_transfer;
3003 chan->start_transfer = xilinx_vdma_start_transfer;
3004 chan->stop_transfer = xilinx_dma_stop_transfer;
3007 /* check if SG is enabled (only for AXIDMA, AXIMCDMA, and CDMA) */
3008 if (xdev->dma_config->dmatype != XDMA_TYPE_VDMA) {
3009 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA ||
3010 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
3011 XILINX_DMA_DMASR_SG_MASK)
3012 chan->has_sg = true;
3013 dev_dbg(chan->dev, "ch %d: SG %s\n", chan->id,
3014 chan->has_sg ? "enabled" : "disabled");
3017 /* Initialize the tasklet */
3018 tasklet_setup(&chan->tasklet, xilinx_dma_do_tasklet);
3021 * Initialize the DMA channel and add it to the DMA engine channels
3024 chan->common.device = &xdev->common;
3026 list_add_tail(&chan->common.device_node, &xdev->common.channels);
3027 xdev->chan[chan->id] = chan;
3029 /* Reset the channel */
3030 err = xilinx_dma_chan_reset(chan);
3032 dev_err(xdev->dev, "Reset channel failed\n");
3040 * xilinx_dma_child_probe - Per child node probe
3041 * It get number of dma-channels per child node from
3042 * device-tree and initializes all the channels.
3044 * @xdev: Driver specific device structure
3045 * @node: Device node
3049 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
3050 struct device_node *node)
3053 u32 nr_channels = 1;
3055 ret = of_property_read_u32(node, "dma-channels", &nr_channels);
3056 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA && ret < 0)
3057 dev_warn(xdev->dev, "missing dma-channels property\n");
3059 for (i = 0; i < nr_channels; i++) {
3060 ret = xilinx_dma_chan_probe(xdev, node);
3069 * of_dma_xilinx_xlate - Translation function
3070 * @dma_spec: Pointer to DMA specifier as found in the device tree
3071 * @ofdma: Pointer to DMA controller data
3073 * Return: DMA channel pointer on success and NULL on error
3075 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
3076 struct of_dma *ofdma)
3078 struct xilinx_dma_device *xdev = ofdma->of_dma_data;
3079 int chan_id = dma_spec->args[0];
3081 if (chan_id >= xdev->dma_config->max_channels || !xdev->chan[chan_id])
3084 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
3087 static const struct xilinx_dma_config axidma_config = {
3088 .dmatype = XDMA_TYPE_AXIDMA,
3089 .clk_init = axidma_clk_init,
3090 .irq_handler = xilinx_dma_irq_handler,
3091 .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
3094 static const struct xilinx_dma_config aximcdma_config = {
3095 .dmatype = XDMA_TYPE_AXIMCDMA,
3096 .clk_init = axidma_clk_init,
3097 .irq_handler = xilinx_mcdma_irq_handler,
3098 .max_channels = XILINX_MCDMA_MAX_CHANS_PER_DEVICE,
3100 static const struct xilinx_dma_config axicdma_config = {
3101 .dmatype = XDMA_TYPE_CDMA,
3102 .clk_init = axicdma_clk_init,
3103 .irq_handler = xilinx_dma_irq_handler,
3104 .max_channels = XILINX_CDMA_MAX_CHANS_PER_DEVICE,
3107 static const struct xilinx_dma_config axivdma_config = {
3108 .dmatype = XDMA_TYPE_VDMA,
3109 .clk_init = axivdma_clk_init,
3110 .irq_handler = xilinx_dma_irq_handler,
3111 .max_channels = XILINX_DMA_MAX_CHANS_PER_DEVICE,
3114 static const struct of_device_id xilinx_dma_of_ids[] = {
3115 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
3116 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
3117 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
3118 { .compatible = "xlnx,axi-mcdma-1.00.a", .data = &aximcdma_config },
3121 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
3124 * xilinx_dma_probe - Driver probe function
3125 * @pdev: Pointer to the platform_device structure
3127 * Return: '0' on success and failure value on error
3129 static int xilinx_dma_probe(struct platform_device *pdev)
3131 int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
3132 struct clk **, struct clk **, struct clk **)
3134 struct device_node *node = pdev->dev.of_node;
3135 struct xilinx_dma_device *xdev;
3136 struct device_node *child, *np = pdev->dev.of_node;
3137 u32 num_frames, addr_width, len_width;
3140 /* Allocate and initialize the DMA engine structure */
3141 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
3145 xdev->dev = &pdev->dev;
3147 const struct of_device_id *match;
3149 match = of_match_node(xilinx_dma_of_ids, np);
3150 if (match && match->data) {
3151 xdev->dma_config = match->data;
3152 clk_init = xdev->dma_config->clk_init;
3156 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
3157 &xdev->rx_clk, &xdev->rxs_clk);
3161 /* Request and map I/O memory */
3162 xdev->regs = devm_platform_ioremap_resource(pdev, 0);
3163 if (IS_ERR(xdev->regs))
3164 return PTR_ERR(xdev->regs);
3166 /* Retrieve the DMA engine properties from the device tree */
3167 xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
3168 xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2;
3170 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA ||
3171 xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
3172 if (!of_property_read_u32(node, "xlnx,sg-length-width",
3174 if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
3175 len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
3177 "invalid xlnx,sg-length-width property value. Using default width\n");
3179 if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
3180 dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
3181 xdev->max_buffer_len =
3182 GENMASK(len_width - 1, 0);
3187 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
3188 err = of_property_read_u32(node, "xlnx,num-fstores",
3192 "missing xlnx,num-fstores property\n");
3196 err = of_property_read_u32(node, "xlnx,flush-fsync",
3197 &xdev->flush_on_fsync);
3200 "missing xlnx,flush-fsync property\n");
3203 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
3205 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
3207 if (addr_width > 32)
3208 xdev->ext_addr = true;
3210 xdev->ext_addr = false;
3212 /* Set the dma mask bits */
3213 dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
3215 /* Initialize the DMA engine */
3216 xdev->common.dev = &pdev->dev;
3218 INIT_LIST_HEAD(&xdev->common.channels);
3219 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
3220 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
3221 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
3224 xdev->common.device_alloc_chan_resources =
3225 xilinx_dma_alloc_chan_resources;
3226 xdev->common.device_free_chan_resources =
3227 xilinx_dma_free_chan_resources;
3228 xdev->common.device_terminate_all = xilinx_dma_terminate_all;
3229 xdev->common.device_synchronize = xilinx_dma_synchronize;
3230 xdev->common.device_tx_status = xilinx_dma_tx_status;
3231 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
3232 xdev->common.device_config = xilinx_dma_device_config;
3233 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
3234 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
3235 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
3236 xdev->common.device_prep_dma_cyclic =
3237 xilinx_dma_prep_dma_cyclic;
3238 /* Residue calculation is supported by only AXI DMA and CDMA */
3239 xdev->common.residue_granularity =
3240 DMA_RESIDUE_GRANULARITY_SEGMENT;
3241 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
3242 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
3243 dma_cap_set(DMA_MEMCPY_SG, xdev->common.cap_mask);
3244 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
3245 xdev->common.device_prep_dma_memcpy_sg = xilinx_cdma_prep_memcpy_sg;
3246 /* Residue calculation is supported by only AXI DMA and CDMA */
3247 xdev->common.residue_granularity =
3248 DMA_RESIDUE_GRANULARITY_SEGMENT;
3249 } else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
3250 xdev->common.device_prep_slave_sg = xilinx_mcdma_prep_slave_sg;
3252 xdev->common.device_prep_interleaved_dma =
3253 xilinx_vdma_dma_prep_interleaved;
3256 platform_set_drvdata(pdev, xdev);
3258 /* Initialize the channels */
3259 for_each_child_of_node(node, child) {
3260 err = xilinx_dma_child_probe(xdev, child);
3265 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
3266 for (i = 0; i < xdev->dma_config->max_channels; i++)
3268 xdev->chan[i]->num_frms = num_frames;
3271 /* Register the DMA engine with the core */
3272 err = dma_async_device_register(&xdev->common);
3274 dev_err(xdev->dev, "failed to register the dma device\n");
3278 err = of_dma_controller_register(node, of_dma_xilinx_xlate,
3281 dev_err(&pdev->dev, "Unable to register DMA to DT\n");
3282 dma_async_device_unregister(&xdev->common);
3286 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
3287 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
3288 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
3289 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
3290 else if (xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA)
3291 dev_info(&pdev->dev, "Xilinx AXI MCDMA Engine Driver Probed!!\n");
3293 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
3298 xdma_disable_allclks(xdev);
3300 for (i = 0; i < xdev->dma_config->max_channels; i++)
3302 xilinx_dma_chan_remove(xdev->chan[i]);
3308 * xilinx_dma_remove - Driver remove function
3309 * @pdev: Pointer to the platform_device structure
3311 * Return: Always '0'
3313 static int xilinx_dma_remove(struct platform_device *pdev)
3315 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
3318 of_dma_controller_free(pdev->dev.of_node);
3320 dma_async_device_unregister(&xdev->common);
3322 for (i = 0; i < xdev->dma_config->max_channels; i++)
3324 xilinx_dma_chan_remove(xdev->chan[i]);
3326 xdma_disable_allclks(xdev);
3331 static struct platform_driver xilinx_vdma_driver = {
3333 .name = "xilinx-vdma",
3334 .of_match_table = xilinx_dma_of_ids,
3336 .probe = xilinx_dma_probe,
3337 .remove = xilinx_dma_remove,
3340 module_platform_driver(xilinx_vdma_driver);
3342 MODULE_AUTHOR("Xilinx, Inc.");
3343 MODULE_DESCRIPTION("Xilinx VDMA driver");
3344 MODULE_LICENSE("GPL v2");