2 * DMA driver for Xilinx Video DMA Engine
4 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
6 * Based on the Freescale DMA driver.
9 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
10 * core that provides high-bandwidth direct memory access between memory
11 * and AXI4-Stream type video target peripherals. The core provides efficient
12 * two dimensional DMA operations with independent asynchronous read (S2MM)
13 * and write (MM2S) channel operation. It can be configured to have either
14 * one channel or two channels. If configured as two channels, one is to
15 * transmit to the video device (MM2S) and another is to receive from the
16 * video device (S2MM). Initialization, status, interrupt and management
17 * registers are accessed through an AXI4-Lite slave interface.
19 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
20 * provides high-bandwidth one dimensional direct memory access between memory
21 * and AXI4-Stream target peripherals. It supports one receive and one
22 * transmit channel, both of them optional at synthesis time.
24 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
25 * Access (DMA) between a memory-mapped source address and a memory-mapped
26 * destination address.
28 * This program is free software: you can redistribute it and/or modify
29 * it under the terms of the GNU General Public License as published by
30 * the Free Software Foundation, either version 2 of the License, or
31 * (at your option) any later version.
34 #include <linux/bitops.h>
35 #include <linux/dmapool.h>
36 #include <linux/dma/xilinx_dma.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
40 #include <linux/iopoll.h>
41 #include <linux/module.h>
42 #include <linux/of_address.h>
43 #include <linux/of_dma.h>
44 #include <linux/of_platform.h>
45 #include <linux/of_irq.h>
46 #include <linux/slab.h>
47 #include <linux/clk.h>
48 #include <linux/io-64-nonatomic-lo-hi.h>
50 #include "../dmaengine.h"
52 /* Register/Descriptor Offsets */
53 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
54 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
55 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
56 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
58 /* Control Registers */
59 #define XILINX_DMA_REG_DMACR 0x0000
60 #define XILINX_DMA_DMACR_DELAY_MAX 0xff
61 #define XILINX_DMA_DMACR_DELAY_SHIFT 24
62 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
63 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
64 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
65 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
66 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
67 #define XILINX_DMA_DMACR_MASTER_SHIFT 8
68 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
69 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
70 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
71 #define XILINX_DMA_DMACR_RESET BIT(2)
72 #define XILINX_DMA_DMACR_CIRC_EN BIT(1)
73 #define XILINX_DMA_DMACR_RUNSTOP BIT(0)
74 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
75 #define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24)
76 #define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16)
77 #define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8)
79 #define XILINX_DMA_REG_DMASR 0x0004
80 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
81 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
82 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
83 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
84 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
85 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
86 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
87 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
88 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
89 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
90 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
91 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
92 #define XILINX_DMA_DMASR_IDLE BIT(1)
93 #define XILINX_DMA_DMASR_HALTED BIT(0)
94 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
95 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
97 #define XILINX_DMA_REG_CURDESC 0x0008
98 #define XILINX_DMA_REG_TAILDESC 0x0010
99 #define XILINX_DMA_REG_REG_INDEX 0x0014
100 #define XILINX_DMA_REG_FRMSTORE 0x0018
101 #define XILINX_DMA_REG_THRESHOLD 0x001c
102 #define XILINX_DMA_REG_FRMPTR_STS 0x0024
103 #define XILINX_DMA_REG_PARK_PTR 0x0028
104 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
105 #define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
106 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
107 #define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
108 #define XILINX_DMA_REG_VDMA_VERSION 0x002c
110 /* Register Direct Mode Registers */
111 #define XILINX_DMA_REG_VSIZE 0x0000
112 #define XILINX_DMA_REG_HSIZE 0x0004
114 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
115 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
116 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
118 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
119 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
121 #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
122 #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
124 /* HW specific definitions */
125 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
127 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
128 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
129 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
130 XILINX_DMA_DMASR_ERR_IRQ)
132 #define XILINX_DMA_DMASR_ALL_ERR_MASK \
133 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
134 XILINX_DMA_DMASR_SOF_LATE_ERR | \
135 XILINX_DMA_DMASR_SG_DEC_ERR | \
136 XILINX_DMA_DMASR_SG_SLV_ERR | \
137 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
138 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
139 XILINX_DMA_DMASR_DMA_DEC_ERR | \
140 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
141 XILINX_DMA_DMASR_DMA_INT_ERR)
144 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
145 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
146 * is enabled in the h/w system.
148 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
149 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
150 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
151 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
152 XILINX_DMA_DMASR_DMA_INT_ERR)
154 /* Axi VDMA Flush on Fsync bits */
155 #define XILINX_DMA_FLUSH_S2MM 3
156 #define XILINX_DMA_FLUSH_MM2S 2
157 #define XILINX_DMA_FLUSH_BOTH 1
159 /* Delay loop counter to prevent hardware failure */
160 #define XILINX_DMA_LOOP_COUNT 1000000
162 /* AXI DMA Specific Registers/Offsets */
163 #define XILINX_DMA_REG_SRCDSTADDR 0x18
164 #define XILINX_DMA_REG_BTT 0x28
166 /* AXI DMA Specific Masks/Bit fields */
167 #define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0)
168 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
169 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
170 #define XILINX_DMA_CR_COALESCE_SHIFT 16
171 #define XILINX_DMA_BD_SOP BIT(27)
172 #define XILINX_DMA_BD_EOP BIT(26)
173 #define XILINX_DMA_COALESCE_MAX 255
174 #define XILINX_DMA_NUM_DESCS 255
175 #define XILINX_DMA_NUM_APP_WORDS 5
177 /* Multi-Channel DMA Descriptor offsets*/
178 #define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20)
179 #define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20)
181 /* Multi-Channel DMA Masks/Shifts */
182 #define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
183 #define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
184 #define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
185 #define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
186 #define XILINX_DMA_BD_STRIDE_SHIFT 0
187 #define XILINX_DMA_BD_VSIZE_SHIFT 19
189 /* AXI CDMA Specific Registers/Offsets */
190 #define XILINX_CDMA_REG_SRCADDR 0x18
191 #define XILINX_CDMA_REG_DSTADDR 0x20
193 /* AXI CDMA Specific Masks */
194 #define XILINX_CDMA_CR_SGMODE BIT(3)
197 * struct xilinx_vdma_desc_hw - Hardware Descriptor
198 * @next_desc: Next Descriptor Pointer @0x00
199 * @pad1: Reserved @0x04
200 * @buf_addr: Buffer address @0x08
201 * @buf_addr_msb: MSB of Buffer address @0x0C
202 * @vsize: Vertical Size @0x10
203 * @hsize: Horizontal Size @0x14
204 * @stride: Number of bytes between the first
205 * pixels of each horizontal line @0x18
207 struct xilinx_vdma_desc_hw {
218 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
219 * @next_desc: Next Descriptor Pointer @0x00
220 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
221 * @buf_addr: Buffer address @0x08
222 * @buf_addr_msb: MSB of Buffer address @0x0C
223 * @mcdma_control: Control field for mcdma @0x10
224 * @vsize_stride: Vsize and Stride field for mcdma @0x14
225 * @control: Control field @0x18
226 * @status: Status field @0x1C
227 * @app: APP Fields @0x20 - 0x30
229 struct xilinx_axidma_desc_hw {
238 u32 app[XILINX_DMA_NUM_APP_WORDS];
242 * struct xilinx_cdma_desc_hw - Hardware Descriptor
243 * @next_desc: Next Descriptor Pointer @0x00
244 * @next_desc_msb: Next Descriptor Pointer MSB @0x04
245 * @src_addr: Source address @0x08
246 * @src_addr_msb: Source address MSB @0x0C
247 * @dest_addr: Destination address @0x10
248 * @dest_addr_msb: Destination address MSB @0x14
249 * @control: Control field @0x18
250 * @status: Status field @0x1C
252 struct xilinx_cdma_desc_hw {
264 * struct xilinx_vdma_tx_segment - Descriptor segment
265 * @hw: Hardware descriptor
266 * @node: Node in the descriptor segments list
267 * @phys: Physical address of segment
269 struct xilinx_vdma_tx_segment {
270 struct xilinx_vdma_desc_hw hw;
271 struct list_head node;
276 * struct xilinx_axidma_tx_segment - Descriptor segment
277 * @hw: Hardware descriptor
278 * @node: Node in the descriptor segments list
279 * @phys: Physical address of segment
281 struct xilinx_axidma_tx_segment {
282 struct xilinx_axidma_desc_hw hw;
283 struct list_head node;
288 * struct xilinx_cdma_tx_segment - Descriptor segment
289 * @hw: Hardware descriptor
290 * @node: Node in the descriptor segments list
291 * @phys: Physical address of segment
293 struct xilinx_cdma_tx_segment {
294 struct xilinx_cdma_desc_hw hw;
295 struct list_head node;
300 * struct xilinx_dma_tx_descriptor - Per Transaction structure
301 * @async_tx: Async transaction descriptor
302 * @segments: TX segments list
303 * @node: Node in the channel descriptors list
304 * @cyclic: Check for cyclic transfers.
306 struct xilinx_dma_tx_descriptor {
307 struct dma_async_tx_descriptor async_tx;
308 struct list_head segments;
309 struct list_head node;
314 * struct xilinx_dma_chan - Driver specific DMA channel structure
315 * @xdev: Driver specific device structure
316 * @ctrl_offset: Control registers offset
317 * @desc_offset: TX descriptor registers offset
318 * @lock: Descriptor operation lock
319 * @pending_list: Descriptors waiting
320 * @active_list: Descriptors ready to submit
321 * @done_list: Complete descriptors
322 * @free_seg_list: Free descriptors
323 * @common: DMA common channel
324 * @desc_pool: Descriptors pool
325 * @dev: The dma device
328 * @direction: Transfer direction
329 * @num_frms: Number of frames
330 * @has_sg: Support scatter transfers
331 * @cyclic: Check for cyclic transfers.
332 * @genlock: Support genlock mode
333 * @err: Channel has errors
334 * @idle: Check for channel idle
335 * @terminating: Check for channel being synchronized by user
336 * @tasklet: Cleanup work after irq
337 * @config: Device configuration info
338 * @flush_on_fsync: Flush on Frame sync
339 * @desc_pendingcount: Descriptor pending count
340 * @ext_addr: Indicates 64 bit addressing is supported by dma channel
341 * @desc_submitcount: Descriptor h/w submitted count
342 * @residue: Residue for AXI DMA
343 * @seg_v: Statically allocated segments base
344 * @seg_p: Physical allocated segments base
345 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
346 * @cyclic_seg_p: Physical allocated segments base for cyclic dma
347 * @start_transfer: Differentiate b/w DMA IP's transfer
348 * @stop_transfer: Differentiate b/w DMA IP's quiesce
349 * @tdest: TDEST value for mcdma
350 * @has_vflip: S2MM vertical flip
352 struct xilinx_dma_chan {
353 struct xilinx_dma_device *xdev;
357 struct list_head pending_list;
358 struct list_head active_list;
359 struct list_head done_list;
360 struct list_head free_seg_list;
361 struct dma_chan common;
362 struct dma_pool *desc_pool;
366 enum dma_transfer_direction direction;
374 struct tasklet_struct tasklet;
375 struct xilinx_vdma_config config;
377 u32 desc_pendingcount;
379 u32 desc_submitcount;
381 struct xilinx_axidma_tx_segment *seg_v;
383 struct xilinx_axidma_tx_segment *cyclic_seg_v;
384 dma_addr_t cyclic_seg_p;
385 void (*start_transfer)(struct xilinx_dma_chan *chan);
386 int (*stop_transfer)(struct xilinx_dma_chan *chan);
392 * enum xdma_ip_type - DMA IP type.
394 * @XDMA_TYPE_AXIDMA: Axi dma ip.
395 * @XDMA_TYPE_CDMA: Axi cdma ip.
396 * @XDMA_TYPE_VDMA: Axi vdma ip.
400 XDMA_TYPE_AXIDMA = 0,
405 struct xilinx_dma_config {
406 enum xdma_ip_type dmatype;
407 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
408 struct clk **tx_clk, struct clk **txs_clk,
409 struct clk **rx_clk, struct clk **rxs_clk);
413 * struct xilinx_dma_device - DMA device structure
414 * @regs: I/O mapped base address
415 * @dev: Device Structure
416 * @common: DMA device structure
417 * @chan: Driver specific DMA channel
418 * @has_sg: Specifies whether Scatter-Gather is present or not
419 * @mcdma: Specifies whether Multi-Channel is present or not
420 * @flush_on_fsync: Flush on frame sync
421 * @ext_addr: Indicates 64 bit addressing is supported by dma device
422 * @pdev: Platform device structure pointer
423 * @dma_config: DMA config structure
424 * @axi_clk: DMA Axi4-lite interace clock
425 * @tx_clk: DMA mm2s clock
426 * @txs_clk: DMA mm2s stream clock
427 * @rx_clk: DMA s2mm clock
428 * @rxs_clk: DMA s2mm stream clock
429 * @nr_channels: Number of channels DMA device supports
430 * @chan_id: DMA channel identifier
432 struct xilinx_dma_device {
435 struct dma_device common;
436 struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
441 struct platform_device *pdev;
442 const struct xilinx_dma_config *dma_config;
453 #define to_xilinx_chan(chan) \
454 container_of(chan, struct xilinx_dma_chan, common)
455 #define to_dma_tx_descriptor(tx) \
456 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
457 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
458 readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \
459 val, cond, delay_us, timeout_us)
462 static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
464 return ioread32(chan->xdev->regs + reg);
467 static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
469 iowrite32(value, chan->xdev->regs + reg);
472 static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
475 dma_write(chan, chan->desc_offset + reg, value);
478 static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
480 return dma_read(chan, chan->ctrl_offset + reg);
483 static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
486 dma_write(chan, chan->ctrl_offset + reg, value);
489 static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
492 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
495 static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
498 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
502 * vdma_desc_write_64 - 64-bit descriptor write
503 * @chan: Driver specific VDMA channel
504 * @reg: Register to write
505 * @value_lsb: lower address of the descriptor.
506 * @value_msb: upper address of the descriptor.
508 * Since vdma driver is trying to write to a register offset which is not a
509 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
510 * instead of a single 64 bit register write.
512 static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
513 u32 value_lsb, u32 value_msb)
515 /* Write the lsb 32 bits*/
516 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
518 /* Write the msb 32 bits */
519 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
522 static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
524 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
527 static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
531 dma_writeq(chan, reg, addr);
533 dma_ctrl_write(chan, reg, addr);
536 static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
537 struct xilinx_axidma_desc_hw *hw,
538 dma_addr_t buf_addr, size_t sg_used,
541 if (chan->ext_addr) {
542 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
543 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
546 hw->buf_addr = buf_addr + sg_used + period_len;
550 /* -----------------------------------------------------------------------------
551 * Descriptors and segments alloc and free
555 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
556 * @chan: Driver specific DMA channel
558 * Return: The allocated segment on success and NULL on failure.
560 static struct xilinx_vdma_tx_segment *
561 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
563 struct xilinx_vdma_tx_segment *segment;
566 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
570 segment->phys = phys;
576 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
577 * @chan: Driver specific DMA channel
579 * Return: The allocated segment on success and NULL on failure.
581 static struct xilinx_cdma_tx_segment *
582 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
584 struct xilinx_cdma_tx_segment *segment;
587 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
591 segment->phys = phys;
597 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
598 * @chan: Driver specific DMA channel
600 * Return: The allocated segment on success and NULL on failure.
602 static struct xilinx_axidma_tx_segment *
603 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
605 struct xilinx_axidma_tx_segment *segment = NULL;
608 spin_lock_irqsave(&chan->lock, flags);
609 if (!list_empty(&chan->free_seg_list)) {
610 segment = list_first_entry(&chan->free_seg_list,
611 struct xilinx_axidma_tx_segment,
613 list_del(&segment->node);
615 spin_unlock_irqrestore(&chan->lock, flags);
620 static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
622 u32 next_desc = hw->next_desc;
623 u32 next_desc_msb = hw->next_desc_msb;
625 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
627 hw->next_desc = next_desc;
628 hw->next_desc_msb = next_desc_msb;
632 * xilinx_dma_free_tx_segment - Free transaction segment
633 * @chan: Driver specific DMA channel
634 * @segment: DMA transaction segment
636 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
637 struct xilinx_axidma_tx_segment *segment)
639 xilinx_dma_clean_hw_desc(&segment->hw);
641 list_add_tail(&segment->node, &chan->free_seg_list);
645 * xilinx_cdma_free_tx_segment - Free transaction segment
646 * @chan: Driver specific DMA channel
647 * @segment: DMA transaction segment
649 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
650 struct xilinx_cdma_tx_segment *segment)
652 dma_pool_free(chan->desc_pool, segment, segment->phys);
656 * xilinx_vdma_free_tx_segment - Free transaction segment
657 * @chan: Driver specific DMA channel
658 * @segment: DMA transaction segment
660 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
661 struct xilinx_vdma_tx_segment *segment)
663 dma_pool_free(chan->desc_pool, segment, segment->phys);
667 * xilinx_dma_tx_descriptor - Allocate transaction descriptor
668 * @chan: Driver specific DMA channel
670 * Return: The allocated descriptor on success and NULL on failure.
672 static struct xilinx_dma_tx_descriptor *
673 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
675 struct xilinx_dma_tx_descriptor *desc;
677 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
681 INIT_LIST_HEAD(&desc->segments);
687 * xilinx_dma_free_tx_descriptor - Free transaction descriptor
688 * @chan: Driver specific DMA channel
689 * @desc: DMA transaction descriptor
692 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
693 struct xilinx_dma_tx_descriptor *desc)
695 struct xilinx_vdma_tx_segment *segment, *next;
696 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
697 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
702 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
703 list_for_each_entry_safe(segment, next, &desc->segments, node) {
704 list_del(&segment->node);
705 xilinx_vdma_free_tx_segment(chan, segment);
707 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
708 list_for_each_entry_safe(cdma_segment, cdma_next,
709 &desc->segments, node) {
710 list_del(&cdma_segment->node);
711 xilinx_cdma_free_tx_segment(chan, cdma_segment);
714 list_for_each_entry_safe(axidma_segment, axidma_next,
715 &desc->segments, node) {
716 list_del(&axidma_segment->node);
717 xilinx_dma_free_tx_segment(chan, axidma_segment);
724 /* Required functions */
727 * xilinx_dma_free_desc_list - Free descriptors list
728 * @chan: Driver specific DMA channel
729 * @list: List to parse and delete the descriptor
731 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
732 struct list_head *list)
734 struct xilinx_dma_tx_descriptor *desc, *next;
736 list_for_each_entry_safe(desc, next, list, node) {
737 list_del(&desc->node);
738 xilinx_dma_free_tx_descriptor(chan, desc);
743 * xilinx_dma_free_descriptors - Free channel descriptors
744 * @chan: Driver specific DMA channel
746 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
750 spin_lock_irqsave(&chan->lock, flags);
752 xilinx_dma_free_desc_list(chan, &chan->pending_list);
753 xilinx_dma_free_desc_list(chan, &chan->done_list);
754 xilinx_dma_free_desc_list(chan, &chan->active_list);
756 spin_unlock_irqrestore(&chan->lock, flags);
760 * xilinx_dma_free_chan_resources - Free channel resources
761 * @dchan: DMA channel
763 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
765 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
768 dev_dbg(chan->dev, "Free all channel resources.\n");
770 xilinx_dma_free_descriptors(chan);
772 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
773 spin_lock_irqsave(&chan->lock, flags);
774 INIT_LIST_HEAD(&chan->free_seg_list);
775 spin_unlock_irqrestore(&chan->lock, flags);
777 /* Free memory that is allocated for BD */
778 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
779 XILINX_DMA_NUM_DESCS, chan->seg_v,
782 /* Free Memory that is allocated for cyclic DMA Mode */
783 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
784 chan->cyclic_seg_v, chan->cyclic_seg_p);
787 if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) {
788 dma_pool_destroy(chan->desc_pool);
789 chan->desc_pool = NULL;
794 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
795 * @chan: Driver specific dma channel
796 * @desc: dma transaction descriptor
797 * @flags: flags for spin lock
799 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
800 struct xilinx_dma_tx_descriptor *desc,
801 unsigned long *flags)
803 dma_async_tx_callback callback;
804 void *callback_param;
806 callback = desc->async_tx.callback;
807 callback_param = desc->async_tx.callback_param;
809 spin_unlock_irqrestore(&chan->lock, *flags);
810 callback(callback_param);
811 spin_lock_irqsave(&chan->lock, *flags);
816 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
817 * @chan: Driver specific DMA channel
819 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
821 struct xilinx_dma_tx_descriptor *desc, *next;
824 spin_lock_irqsave(&chan->lock, flags);
826 list_for_each_entry_safe(desc, next, &chan->done_list, node) {
827 struct dmaengine_desc_callback cb;
830 xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
834 /* Remove from the list of running transactions */
835 list_del(&desc->node);
837 /* Run the link descriptor callback function */
838 dmaengine_desc_get_callback(&desc->async_tx, &cb);
839 if (dmaengine_desc_callback_valid(&cb)) {
840 spin_unlock_irqrestore(&chan->lock, flags);
841 dmaengine_desc_callback_invoke(&cb, NULL);
842 spin_lock_irqsave(&chan->lock, flags);
845 /* Run any dependencies, then free the descriptor */
846 dma_run_dependencies(&desc->async_tx);
847 xilinx_dma_free_tx_descriptor(chan, desc);
850 * While we ran a callback the user called a terminate function,
851 * which takes care of cleaning up any remaining descriptors
853 if (chan->terminating)
857 spin_unlock_irqrestore(&chan->lock, flags);
861 * xilinx_dma_do_tasklet - Schedule completion tasklet
862 * @data: Pointer to the Xilinx DMA channel structure
864 static void xilinx_dma_do_tasklet(unsigned long data)
866 struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
868 xilinx_dma_chan_desc_cleanup(chan);
872 * xilinx_dma_alloc_chan_resources - Allocate channel resources
873 * @dchan: DMA channel
875 * Return: '0' on success and failure value on error
877 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
879 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
882 /* Has this channel already been allocated? */
887 * We need the descriptor to be aligned to 64bytes
888 * for meeting Xilinx VDMA specification requirement.
890 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
891 /* Allocate the buffer descriptors. */
892 chan->seg_v = dma_zalloc_coherent(chan->dev,
893 sizeof(*chan->seg_v) *
894 XILINX_DMA_NUM_DESCS,
895 &chan->seg_p, GFP_KERNEL);
898 "unable to allocate channel %d descriptors\n",
903 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
904 chan->seg_v[i].hw.next_desc =
905 lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
906 ((i + 1) % XILINX_DMA_NUM_DESCS));
907 chan->seg_v[i].hw.next_desc_msb =
908 upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
909 ((i + 1) % XILINX_DMA_NUM_DESCS));
910 chan->seg_v[i].phys = chan->seg_p +
911 sizeof(*chan->seg_v) * i;
912 list_add_tail(&chan->seg_v[i].node,
913 &chan->free_seg_list);
915 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
916 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
918 sizeof(struct xilinx_cdma_tx_segment),
919 __alignof__(struct xilinx_cdma_tx_segment),
922 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
924 sizeof(struct xilinx_vdma_tx_segment),
925 __alignof__(struct xilinx_vdma_tx_segment),
929 if (!chan->desc_pool &&
930 (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) {
932 "unable to allocate channel %d descriptor pool\n",
937 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
939 * For cyclic DMA mode we need to program the tail Descriptor
940 * register with a value which is not a part of the BD chain
941 * so allocating a desc segment during channel allocation for
942 * programming tail descriptor.
944 chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev,
945 sizeof(*chan->cyclic_seg_v),
946 &chan->cyclic_seg_p, GFP_KERNEL);
947 if (!chan->cyclic_seg_v) {
949 "unable to allocate desc segment for cyclic DMA\n");
952 chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
955 dma_cookie_init(dchan);
957 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
958 /* For AXI DMA resetting once channel will reset the
959 * other channel as well so enable the interrupts here.
961 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
962 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
965 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
966 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
967 XILINX_CDMA_CR_SGMODE);
973 * xilinx_dma_tx_status - Get DMA transaction status
974 * @dchan: DMA channel
975 * @cookie: Transaction identifier
976 * @txstate: Transaction state
978 * Return: DMA transaction status
980 static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
982 struct dma_tx_state *txstate)
984 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
985 struct xilinx_dma_tx_descriptor *desc;
986 struct xilinx_axidma_tx_segment *segment;
987 struct xilinx_axidma_desc_hw *hw;
992 ret = dma_cookie_status(dchan, cookie, txstate);
993 if (ret == DMA_COMPLETE || !txstate)
996 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
997 spin_lock_irqsave(&chan->lock, flags);
999 desc = list_last_entry(&chan->active_list,
1000 struct xilinx_dma_tx_descriptor, node);
1002 list_for_each_entry(segment, &desc->segments, node) {
1004 residue += (hw->control - hw->status) &
1005 XILINX_DMA_MAX_TRANS_LEN;
1008 spin_unlock_irqrestore(&chan->lock, flags);
1010 chan->residue = residue;
1011 dma_set_residue(txstate, chan->residue);
1018 * xilinx_dma_stop_transfer - Halt DMA channel
1019 * @chan: Driver specific DMA channel
1021 * Return: '0' on success and failure value on error
1023 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
1027 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1029 /* Wait for the hardware to halt */
1030 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1031 val & XILINX_DMA_DMASR_HALTED, 0,
1032 XILINX_DMA_LOOP_COUNT);
1036 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
1037 * @chan: Driver specific DMA channel
1039 * Return: '0' on success and failure value on error
1041 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
1045 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1046 val & XILINX_DMA_DMASR_IDLE, 0,
1047 XILINX_DMA_LOOP_COUNT);
1051 * xilinx_dma_start - Start DMA channel
1052 * @chan: Driver specific DMA channel
1054 static void xilinx_dma_start(struct xilinx_dma_chan *chan)
1059 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1061 /* Wait for the hardware to start */
1062 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1063 !(val & XILINX_DMA_DMASR_HALTED), 0,
1064 XILINX_DMA_LOOP_COUNT);
1067 dev_err(chan->dev, "Cannot start channel %p: %x\n",
1068 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1075 * xilinx_vdma_start_transfer - Starts VDMA transfer
1076 * @chan: Driver specific channel struct pointer
1078 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1080 struct xilinx_vdma_config *config = &chan->config;
1081 struct xilinx_dma_tx_descriptor *desc, *tail_desc;
1083 struct xilinx_vdma_tx_segment *tail_segment;
1085 /* This function was invoked with lock held */
1092 if (list_empty(&chan->pending_list))
1095 desc = list_first_entry(&chan->pending_list,
1096 struct xilinx_dma_tx_descriptor, node);
1097 tail_desc = list_last_entry(&chan->pending_list,
1098 struct xilinx_dma_tx_descriptor, node);
1100 tail_segment = list_last_entry(&tail_desc->segments,
1101 struct xilinx_vdma_tx_segment, node);
1104 * If hardware is idle, then all descriptors on the running lists are
1105 * done, start new transfers
1108 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1109 desc->async_tx.phys);
1111 /* Configure the hardware using info in the config structure */
1112 if (chan->has_vflip) {
1113 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
1114 reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
1115 reg |= config->vflip_en;
1116 dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
1120 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1122 if (config->frm_cnt_en)
1123 reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
1125 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1128 * With SG, start with circular mode, so that BDs can be fetched.
1129 * In direct register mode, if not parking, enable circular mode
1131 if (chan->has_sg || !config->park)
1132 reg |= XILINX_DMA_DMACR_CIRC_EN;
1135 reg &= ~XILINX_DMA_DMACR_CIRC_EN;
1137 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1139 j = chan->desc_submitcount;
1140 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1141 if (chan->direction == DMA_MEM_TO_DEV) {
1142 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1143 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1145 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1146 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
1148 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
1150 /* Start the hardware */
1151 xilinx_dma_start(chan);
1156 /* Start the transfer */
1158 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1159 tail_segment->phys);
1160 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1161 chan->desc_pendingcount = 0;
1163 struct xilinx_vdma_tx_segment *segment, *last = NULL;
1166 if (chan->desc_submitcount < chan->num_frms)
1167 i = chan->desc_submitcount;
1169 list_for_each_entry(segment, &desc->segments, node) {
1171 vdma_desc_write_64(chan,
1172 XILINX_VDMA_REG_START_ADDRESS_64(i++),
1173 segment->hw.buf_addr,
1174 segment->hw.buf_addr_msb);
1176 vdma_desc_write(chan,
1177 XILINX_VDMA_REG_START_ADDRESS(i++),
1178 segment->hw.buf_addr);
1186 /* HW expects these parameters to be same for one transaction */
1187 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1188 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1190 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1192 chan->desc_submitcount++;
1193 chan->desc_pendingcount--;
1194 list_del(&desc->node);
1195 list_add_tail(&desc->node, &chan->active_list);
1196 if (chan->desc_submitcount == chan->num_frms)
1197 chan->desc_submitcount = 0;
1204 * xilinx_cdma_start_transfer - Starts cdma transfer
1205 * @chan: Driver specific channel struct pointer
1207 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1209 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1210 struct xilinx_cdma_tx_segment *tail_segment;
1211 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1219 if (list_empty(&chan->pending_list))
1222 head_desc = list_first_entry(&chan->pending_list,
1223 struct xilinx_dma_tx_descriptor, node);
1224 tail_desc = list_last_entry(&chan->pending_list,
1225 struct xilinx_dma_tx_descriptor, node);
1226 tail_segment = list_last_entry(&tail_desc->segments,
1227 struct xilinx_cdma_tx_segment, node);
1229 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1230 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1231 ctrl_reg |= chan->desc_pendingcount <<
1232 XILINX_DMA_CR_COALESCE_SHIFT;
1233 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1237 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1238 XILINX_CDMA_CR_SGMODE);
1240 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1241 XILINX_CDMA_CR_SGMODE);
1243 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1244 head_desc->async_tx.phys);
1246 /* Update tail ptr register which will start the transfer */
1247 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1248 tail_segment->phys);
1250 /* In simple mode */
1251 struct xilinx_cdma_tx_segment *segment;
1252 struct xilinx_cdma_desc_hw *hw;
1254 segment = list_first_entry(&head_desc->segments,
1255 struct xilinx_cdma_tx_segment,
1260 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr);
1261 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr);
1263 /* Start the transfer */
1264 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1265 hw->control & XILINX_DMA_MAX_TRANS_LEN);
1268 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1269 chan->desc_pendingcount = 0;
1274 * xilinx_dma_start_transfer - Starts DMA transfer
1275 * @chan: Driver specific channel struct pointer
1277 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1279 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1280 struct xilinx_axidma_tx_segment *tail_segment;
1286 if (list_empty(&chan->pending_list))
1292 head_desc = list_first_entry(&chan->pending_list,
1293 struct xilinx_dma_tx_descriptor, node);
1294 tail_desc = list_last_entry(&chan->pending_list,
1295 struct xilinx_dma_tx_descriptor, node);
1296 tail_segment = list_last_entry(&tail_desc->segments,
1297 struct xilinx_axidma_tx_segment, node);
1299 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1301 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1302 reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1303 reg |= chan->desc_pendingcount <<
1304 XILINX_DMA_CR_COALESCE_SHIFT;
1305 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1308 if (chan->has_sg && !chan->xdev->mcdma)
1309 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1310 head_desc->async_tx.phys);
1312 if (chan->has_sg && chan->xdev->mcdma) {
1313 if (chan->direction == DMA_MEM_TO_DEV) {
1314 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1315 head_desc->async_tx.phys);
1318 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1319 head_desc->async_tx.phys);
1321 dma_ctrl_write(chan,
1322 XILINX_DMA_MCRX_CDESC(chan->tdest),
1323 head_desc->async_tx.phys);
1328 xilinx_dma_start(chan);
1333 /* Start the transfer */
1334 if (chan->has_sg && !chan->xdev->mcdma) {
1336 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1337 chan->cyclic_seg_v->phys);
1339 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1340 tail_segment->phys);
1341 } else if (chan->has_sg && chan->xdev->mcdma) {
1342 if (chan->direction == DMA_MEM_TO_DEV) {
1343 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1344 tail_segment->phys);
1347 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1348 tail_segment->phys);
1350 dma_ctrl_write(chan,
1351 XILINX_DMA_MCRX_TDESC(chan->tdest),
1352 tail_segment->phys);
1356 struct xilinx_axidma_tx_segment *segment;
1357 struct xilinx_axidma_desc_hw *hw;
1359 segment = list_first_entry(&head_desc->segments,
1360 struct xilinx_axidma_tx_segment,
1364 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
1366 /* Start the transfer */
1367 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1368 hw->control & XILINX_DMA_MAX_TRANS_LEN);
1371 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1372 chan->desc_pendingcount = 0;
1377 * xilinx_dma_issue_pending - Issue pending transactions
1378 * @dchan: DMA channel
1380 static void xilinx_dma_issue_pending(struct dma_chan *dchan)
1382 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1383 unsigned long flags;
1385 spin_lock_irqsave(&chan->lock, flags);
1386 chan->start_transfer(chan);
1387 spin_unlock_irqrestore(&chan->lock, flags);
1391 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
1392 * @chan : xilinx DMA channel
1396 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
1398 struct xilinx_dma_tx_descriptor *desc, *next;
1400 /* This function was invoked with lock held */
1401 if (list_empty(&chan->active_list))
1404 list_for_each_entry_safe(desc, next, &chan->active_list, node) {
1405 list_del(&desc->node);
1407 dma_cookie_complete(&desc->async_tx);
1408 list_add_tail(&desc->node, &chan->done_list);
1413 * xilinx_dma_reset - Reset DMA channel
1414 * @chan: Driver specific DMA channel
1416 * Return: '0' on success and failure value on error
1418 static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
1423 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
1425 /* Wait for the hardware to finish reset */
1426 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1427 !(tmp & XILINX_DMA_DMACR_RESET), 0,
1428 XILINX_DMA_LOOP_COUNT);
1431 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
1432 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1433 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1439 chan->desc_pendingcount = 0;
1440 chan->desc_submitcount = 0;
1446 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1447 * @chan: Driver specific DMA channel
1449 * Return: '0' on success and failure value on error
1451 static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
1456 err = xilinx_dma_reset(chan);
1460 /* Enable interrupts */
1461 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1462 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1468 * xilinx_dma_irq_handler - DMA Interrupt handler
1470 * @data: Pointer to the Xilinx DMA channel structure
1472 * Return: IRQ_HANDLED/IRQ_NONE
1474 static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
1476 struct xilinx_dma_chan *chan = data;
1479 /* Read the status and ack the interrupts. */
1480 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1481 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
1484 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1485 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1487 if (status & XILINX_DMA_DMASR_ERR_IRQ) {
1489 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1490 * error is recoverable, ignore it. Otherwise flag the error.
1492 * Only recoverable errors can be cleared in the DMASR register,
1493 * make sure not to write to other error bits to 1.
1495 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
1497 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1498 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
1500 if (!chan->flush_on_fsync ||
1501 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
1503 "Channel %p has errors %x, cdr %x tdr %x\n",
1505 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1506 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
1511 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
1513 * Device takes too long to do the transfer when user requires
1516 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1519 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
1520 spin_lock(&chan->lock);
1521 xilinx_dma_complete_descriptor(chan);
1523 chan->start_transfer(chan);
1524 spin_unlock(&chan->lock);
1527 tasklet_schedule(&chan->tasklet);
1532 * append_desc_queue - Queuing descriptor
1533 * @chan: Driver specific dma channel
1534 * @desc: dma transaction descriptor
1536 static void append_desc_queue(struct xilinx_dma_chan *chan,
1537 struct xilinx_dma_tx_descriptor *desc)
1539 struct xilinx_vdma_tx_segment *tail_segment;
1540 struct xilinx_dma_tx_descriptor *tail_desc;
1541 struct xilinx_axidma_tx_segment *axidma_tail_segment;
1542 struct xilinx_cdma_tx_segment *cdma_tail_segment;
1544 if (list_empty(&chan->pending_list))
1548 * Add the hardware descriptor to the chain of hardware descriptors
1549 * that already exists in memory.
1551 tail_desc = list_last_entry(&chan->pending_list,
1552 struct xilinx_dma_tx_descriptor, node);
1553 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1554 tail_segment = list_last_entry(&tail_desc->segments,
1555 struct xilinx_vdma_tx_segment,
1557 tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1558 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1559 cdma_tail_segment = list_last_entry(&tail_desc->segments,
1560 struct xilinx_cdma_tx_segment,
1562 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1564 axidma_tail_segment = list_last_entry(&tail_desc->segments,
1565 struct xilinx_axidma_tx_segment,
1567 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1571 * Add the software descriptor and all children to the list
1572 * of pending transactions
1575 list_add_tail(&desc->node, &chan->pending_list);
1576 chan->desc_pendingcount++;
1578 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1579 && unlikely(chan->desc_pendingcount > chan->num_frms)) {
1580 dev_dbg(chan->dev, "desc pendingcount is too high\n");
1581 chan->desc_pendingcount = chan->num_frms;
1586 * xilinx_dma_tx_submit - Submit DMA transaction
1587 * @tx: Async transaction descriptor
1589 * Return: cookie value on success and failure value on error
1591 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
1593 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1594 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
1595 dma_cookie_t cookie;
1596 unsigned long flags;
1600 xilinx_dma_free_tx_descriptor(chan, desc);
1606 * If reset fails, need to hard reset the system.
1607 * Channel is no longer functional
1609 err = xilinx_dma_chan_reset(chan);
1614 spin_lock_irqsave(&chan->lock, flags);
1616 cookie = dma_cookie_assign(tx);
1618 /* Put this transaction onto the tail of the pending queue */
1619 append_desc_queue(chan, desc);
1622 chan->cyclic = true;
1624 chan->terminating = false;
1626 spin_unlock_irqrestore(&chan->lock, flags);
1632 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
1633 * DMA_SLAVE transaction
1634 * @dchan: DMA channel
1635 * @xt: Interleaved template pointer
1636 * @flags: transfer ack flags
1638 * Return: Async transaction descriptor on success and NULL on failure
1640 static struct dma_async_tx_descriptor *
1641 xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
1642 struct dma_interleaved_template *xt,
1643 unsigned long flags)
1645 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1646 struct xilinx_dma_tx_descriptor *desc;
1647 struct xilinx_vdma_tx_segment *segment;
1648 struct xilinx_vdma_desc_hw *hw;
1650 if (!is_slave_direction(xt->dir))
1653 if (!xt->numf || !xt->sgl[0].size)
1656 if (xt->frame_size != 1)
1659 /* Allocate a transaction descriptor. */
1660 desc = xilinx_dma_alloc_tx_descriptor(chan);
1664 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1665 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1666 async_tx_ack(&desc->async_tx);
1668 /* Allocate the link descriptor from DMA pool */
1669 segment = xilinx_vdma_alloc_tx_segment(chan);
1673 /* Fill in the hardware descriptor */
1675 hw->vsize = xt->numf;
1676 hw->hsize = xt->sgl[0].size;
1677 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
1678 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
1679 hw->stride |= chan->config.frm_dly <<
1680 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
1682 if (xt->dir != DMA_MEM_TO_DEV) {
1683 if (chan->ext_addr) {
1684 hw->buf_addr = lower_32_bits(xt->dst_start);
1685 hw->buf_addr_msb = upper_32_bits(xt->dst_start);
1687 hw->buf_addr = xt->dst_start;
1690 if (chan->ext_addr) {
1691 hw->buf_addr = lower_32_bits(xt->src_start);
1692 hw->buf_addr_msb = upper_32_bits(xt->src_start);
1694 hw->buf_addr = xt->src_start;
1698 /* Insert the segment into the descriptor segments list. */
1699 list_add_tail(&segment->node, &desc->segments);
1701 /* Link the last hardware descriptor with the first. */
1702 segment = list_first_entry(&desc->segments,
1703 struct xilinx_vdma_tx_segment, node);
1704 desc->async_tx.phys = segment->phys;
1706 return &desc->async_tx;
1709 xilinx_dma_free_tx_descriptor(chan, desc);
1714 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
1715 * @dchan: DMA channel
1716 * @dma_dst: destination address
1717 * @dma_src: source address
1718 * @len: transfer length
1719 * @flags: transfer ack flags
1721 * Return: Async transaction descriptor on success and NULL on failure
1723 static struct dma_async_tx_descriptor *
1724 xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
1725 dma_addr_t dma_src, size_t len, unsigned long flags)
1727 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1728 struct xilinx_dma_tx_descriptor *desc;
1729 struct xilinx_cdma_tx_segment *segment;
1730 struct xilinx_cdma_desc_hw *hw;
1732 if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
1735 desc = xilinx_dma_alloc_tx_descriptor(chan);
1739 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1740 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1742 /* Allocate the link descriptor from DMA pool */
1743 segment = xilinx_cdma_alloc_tx_segment(chan);
1749 hw->src_addr = dma_src;
1750 hw->dest_addr = dma_dst;
1751 if (chan->ext_addr) {
1752 hw->src_addr_msb = upper_32_bits(dma_src);
1753 hw->dest_addr_msb = upper_32_bits(dma_dst);
1756 /* Insert the segment into the descriptor segments list. */
1757 list_add_tail(&segment->node, &desc->segments);
1759 desc->async_tx.phys = segment->phys;
1760 hw->next_desc = segment->phys;
1762 return &desc->async_tx;
1765 xilinx_dma_free_tx_descriptor(chan, desc);
1770 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1771 * @dchan: DMA channel
1772 * @sgl: scatterlist to transfer to/from
1773 * @sg_len: number of entries in @scatterlist
1774 * @direction: DMA direction
1775 * @flags: transfer ack flags
1776 * @context: APP words of the descriptor
1778 * Return: Async transaction descriptor on success and NULL on failure
1780 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1781 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1782 enum dma_transfer_direction direction, unsigned long flags,
1785 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1786 struct xilinx_dma_tx_descriptor *desc;
1787 struct xilinx_axidma_tx_segment *segment = NULL;
1788 u32 *app_w = (u32 *)context;
1789 struct scatterlist *sg;
1794 if (!is_slave_direction(direction))
1797 /* Allocate a transaction descriptor. */
1798 desc = xilinx_dma_alloc_tx_descriptor(chan);
1802 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1803 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1805 /* Build transactions using information in the scatter gather list */
1806 for_each_sg(sgl, sg, sg_len, i) {
1809 /* Loop until the entire scatterlist entry is used */
1810 while (sg_used < sg_dma_len(sg)) {
1811 struct xilinx_axidma_desc_hw *hw;
1813 /* Get a free segment */
1814 segment = xilinx_axidma_alloc_tx_segment(chan);
1819 * Calculate the maximum number of bytes to transfer,
1820 * making sure it is less than the hw limit
1822 copy = min_t(size_t, sg_dma_len(sg) - sg_used,
1823 XILINX_DMA_MAX_TRANS_LEN);
1826 /* Fill in the descriptor */
1827 xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
1832 if (chan->direction == DMA_MEM_TO_DEV) {
1834 memcpy(hw->app, app_w, sizeof(u32) *
1835 XILINX_DMA_NUM_APP_WORDS);
1841 * Insert the segment into the descriptor segments
1844 list_add_tail(&segment->node, &desc->segments);
1848 segment = list_first_entry(&desc->segments,
1849 struct xilinx_axidma_tx_segment, node);
1850 desc->async_tx.phys = segment->phys;
1852 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1853 if (chan->direction == DMA_MEM_TO_DEV) {
1854 segment->hw.control |= XILINX_DMA_BD_SOP;
1855 segment = list_last_entry(&desc->segments,
1856 struct xilinx_axidma_tx_segment,
1858 segment->hw.control |= XILINX_DMA_BD_EOP;
1861 return &desc->async_tx;
1864 xilinx_dma_free_tx_descriptor(chan, desc);
1869 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
1870 * @dchan: DMA channel
1871 * @buf_addr: Physical address of the buffer
1872 * @buf_len: Total length of the cyclic buffers
1873 * @period_len: length of individual cyclic buffer
1874 * @direction: DMA direction
1875 * @flags: transfer ack flags
1877 * Return: Async transaction descriptor on success and NULL on failure
1879 static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
1880 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
1881 size_t period_len, enum dma_transfer_direction direction,
1882 unsigned long flags)
1884 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1885 struct xilinx_dma_tx_descriptor *desc;
1886 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
1887 size_t copy, sg_used;
1888 unsigned int num_periods;
1895 num_periods = buf_len / period_len;
1900 if (!is_slave_direction(direction))
1903 /* Allocate a transaction descriptor. */
1904 desc = xilinx_dma_alloc_tx_descriptor(chan);
1908 chan->direction = direction;
1909 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1910 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1912 for (i = 0; i < num_periods; ++i) {
1915 while (sg_used < period_len) {
1916 struct xilinx_axidma_desc_hw *hw;
1918 /* Get a free segment */
1919 segment = xilinx_axidma_alloc_tx_segment(chan);
1924 * Calculate the maximum number of bytes to transfer,
1925 * making sure it is less than the hw limit
1927 copy = min_t(size_t, period_len - sg_used,
1928 XILINX_DMA_MAX_TRANS_LEN);
1930 xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
1935 prev->hw.next_desc = segment->phys;
1941 * Insert the segment into the descriptor segments
1944 list_add_tail(&segment->node, &desc->segments);
1948 head_segment = list_first_entry(&desc->segments,
1949 struct xilinx_axidma_tx_segment, node);
1950 desc->async_tx.phys = head_segment->phys;
1952 desc->cyclic = true;
1953 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1954 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
1955 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1957 segment = list_last_entry(&desc->segments,
1958 struct xilinx_axidma_tx_segment,
1960 segment->hw.next_desc = (u32) head_segment->phys;
1962 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1963 if (direction == DMA_MEM_TO_DEV) {
1964 head_segment->hw.control |= XILINX_DMA_BD_SOP;
1965 segment->hw.control |= XILINX_DMA_BD_EOP;
1968 return &desc->async_tx;
1971 xilinx_dma_free_tx_descriptor(chan, desc);
1976 * xilinx_dma_prep_interleaved - prepare a descriptor for a
1977 * DMA_SLAVE transaction
1978 * @dchan: DMA channel
1979 * @xt: Interleaved template pointer
1980 * @flags: transfer ack flags
1982 * Return: Async transaction descriptor on success and NULL on failure
1984 static struct dma_async_tx_descriptor *
1985 xilinx_dma_prep_interleaved(struct dma_chan *dchan,
1986 struct dma_interleaved_template *xt,
1987 unsigned long flags)
1989 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1990 struct xilinx_dma_tx_descriptor *desc;
1991 struct xilinx_axidma_tx_segment *segment;
1992 struct xilinx_axidma_desc_hw *hw;
1994 if (!is_slave_direction(xt->dir))
1997 if (!xt->numf || !xt->sgl[0].size)
2000 if (xt->frame_size != 1)
2003 /* Allocate a transaction descriptor. */
2004 desc = xilinx_dma_alloc_tx_descriptor(chan);
2008 chan->direction = xt->dir;
2009 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2010 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2012 /* Get a free segment */
2013 segment = xilinx_axidma_alloc_tx_segment(chan);
2019 /* Fill in the descriptor */
2020 if (xt->dir != DMA_MEM_TO_DEV)
2021 hw->buf_addr = xt->dst_start;
2023 hw->buf_addr = xt->src_start;
2025 hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK;
2026 hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) &
2027 XILINX_DMA_BD_VSIZE_MASK;
2028 hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) &
2029 XILINX_DMA_BD_STRIDE_MASK;
2030 hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK;
2033 * Insert the segment into the descriptor segments
2036 list_add_tail(&segment->node, &desc->segments);
2039 segment = list_first_entry(&desc->segments,
2040 struct xilinx_axidma_tx_segment, node);
2041 desc->async_tx.phys = segment->phys;
2043 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2044 if (xt->dir == DMA_MEM_TO_DEV) {
2045 segment->hw.control |= XILINX_DMA_BD_SOP;
2046 segment = list_last_entry(&desc->segments,
2047 struct xilinx_axidma_tx_segment,
2049 segment->hw.control |= XILINX_DMA_BD_EOP;
2052 return &desc->async_tx;
2055 xilinx_dma_free_tx_descriptor(chan, desc);
2060 * xilinx_dma_terminate_all - Halt the channel and free descriptors
2061 * @dchan: Driver specific DMA Channel pointer
2063 * Return: '0' always.
2065 static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2067 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2072 xilinx_dma_chan_reset(chan);
2074 err = chan->stop_transfer(chan);
2076 dev_err(chan->dev, "Cannot stop channel %p: %x\n",
2077 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
2081 /* Remove and free all of the descriptors in the lists */
2082 chan->terminating = true;
2083 xilinx_dma_free_descriptors(chan);
2087 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2088 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2089 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2090 chan->cyclic = false;
2093 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
2094 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2095 XILINX_CDMA_CR_SGMODE);
2101 * xilinx_dma_channel_set_config - Configure VDMA channel
2102 * Run-time configuration for Axi VDMA, supports:
2103 * . halt the channel
2104 * . configure interrupt coalescing and inter-packet delay threshold
2105 * . start/stop parking
2108 * @dchan: DMA channel
2109 * @cfg: VDMA device configuration pointer
2111 * Return: '0' on success and failure value on error
2113 int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2114 struct xilinx_vdma_config *cfg)
2116 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2120 return xilinx_dma_chan_reset(chan);
2122 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2124 chan->config.frm_dly = cfg->frm_dly;
2125 chan->config.park = cfg->park;
2127 /* genlock settings */
2128 chan->config.gen_lock = cfg->gen_lock;
2129 chan->config.master = cfg->master;
2131 dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
2132 if (cfg->gen_lock && chan->genlock) {
2133 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2134 dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
2135 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
2138 chan->config.frm_cnt_en = cfg->frm_cnt_en;
2139 chan->config.vflip_en = cfg->vflip_en;
2142 chan->config.park_frm = cfg->park_frm;
2144 chan->config.park_frm = -1;
2146 chan->config.coalesc = cfg->coalesc;
2147 chan->config.delay = cfg->delay;
2149 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2150 dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
2151 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
2152 chan->config.coalesc = cfg->coalesc;
2155 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2156 dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
2157 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
2158 chan->config.delay = cfg->delay;
2161 /* FSync Source selection */
2162 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2163 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
2165 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
2169 EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2171 /* -----------------------------------------------------------------------------
2176 * xilinx_dma_chan_remove - Per Channel remove function
2177 * @chan: Driver specific DMA channel
2179 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
2181 /* Disable all interrupts */
2182 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2183 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
2186 free_irq(chan->irq, chan);
2188 tasklet_kill(&chan->tasklet);
2190 list_del(&chan->common.device_node);
2193 static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2194 struct clk **tx_clk, struct clk **rx_clk,
2195 struct clk **sg_clk, struct clk **tmp_clk)
2201 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2202 if (IS_ERR(*axi_clk)) {
2203 err = PTR_ERR(*axi_clk);
2204 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
2208 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2209 if (IS_ERR(*tx_clk))
2212 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2213 if (IS_ERR(*rx_clk))
2216 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2217 if (IS_ERR(*sg_clk))
2220 err = clk_prepare_enable(*axi_clk);
2222 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2226 err = clk_prepare_enable(*tx_clk);
2228 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2229 goto err_disable_axiclk;
2232 err = clk_prepare_enable(*rx_clk);
2234 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2235 goto err_disable_txclk;
2238 err = clk_prepare_enable(*sg_clk);
2240 dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
2241 goto err_disable_rxclk;
2247 clk_disable_unprepare(*rx_clk);
2249 clk_disable_unprepare(*tx_clk);
2251 clk_disable_unprepare(*axi_clk);
2256 static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2257 struct clk **dev_clk, struct clk **tmp_clk,
2258 struct clk **tmp1_clk, struct clk **tmp2_clk)
2266 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2267 if (IS_ERR(*axi_clk)) {
2268 err = PTR_ERR(*axi_clk);
2269 dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err);
2273 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2274 if (IS_ERR(*dev_clk)) {
2275 err = PTR_ERR(*dev_clk);
2276 dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err);
2280 err = clk_prepare_enable(*axi_clk);
2282 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2286 err = clk_prepare_enable(*dev_clk);
2288 dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
2289 goto err_disable_axiclk;
2295 clk_disable_unprepare(*axi_clk);
2300 static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2301 struct clk **tx_clk, struct clk **txs_clk,
2302 struct clk **rx_clk, struct clk **rxs_clk)
2306 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2307 if (IS_ERR(*axi_clk)) {
2308 err = PTR_ERR(*axi_clk);
2309 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
2313 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2314 if (IS_ERR(*tx_clk))
2317 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2318 if (IS_ERR(*txs_clk))
2321 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2322 if (IS_ERR(*rx_clk))
2325 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2326 if (IS_ERR(*rxs_clk))
2329 err = clk_prepare_enable(*axi_clk);
2331 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2335 err = clk_prepare_enable(*tx_clk);
2337 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2338 goto err_disable_axiclk;
2341 err = clk_prepare_enable(*txs_clk);
2343 dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
2344 goto err_disable_txclk;
2347 err = clk_prepare_enable(*rx_clk);
2349 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2350 goto err_disable_txsclk;
2353 err = clk_prepare_enable(*rxs_clk);
2355 dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
2356 goto err_disable_rxclk;
2362 clk_disable_unprepare(*rx_clk);
2364 clk_disable_unprepare(*txs_clk);
2366 clk_disable_unprepare(*tx_clk);
2368 clk_disable_unprepare(*axi_clk);
2373 static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2375 clk_disable_unprepare(xdev->rxs_clk);
2376 clk_disable_unprepare(xdev->rx_clk);
2377 clk_disable_unprepare(xdev->txs_clk);
2378 clk_disable_unprepare(xdev->tx_clk);
2379 clk_disable_unprepare(xdev->axi_clk);
2383 * xilinx_dma_chan_probe - Per Channel Probing
2384 * It get channel features from the device tree entry and
2385 * initialize special channel handling routines
2387 * @xdev: Driver specific device structure
2388 * @node: Device node
2389 * @chan_id: DMA Channel id
2391 * Return: '0' on success and failure value on error
2393 static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2394 struct device_node *node, int chan_id)
2396 struct xilinx_dma_chan *chan;
2397 bool has_dre = false;
2401 /* Allocate and initialize the channel structure */
2402 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2406 chan->dev = xdev->dev;
2408 chan->has_sg = xdev->has_sg;
2409 chan->desc_pendingcount = 0x0;
2410 chan->ext_addr = xdev->ext_addr;
2411 /* This variable ensures that descriptors are not
2412 * Submitted when dma engine is in progress. This variable is
2413 * Added to avoid polling for a bit in the status register to
2414 * Know dma state in the driver hot path.
2418 spin_lock_init(&chan->lock);
2419 INIT_LIST_HEAD(&chan->pending_list);
2420 INIT_LIST_HEAD(&chan->done_list);
2421 INIT_LIST_HEAD(&chan->active_list);
2422 INIT_LIST_HEAD(&chan->free_seg_list);
2424 /* Retrieve the channel properties from the device tree */
2425 has_dre = of_property_read_bool(node, "xlnx,include-dre");
2427 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2429 err = of_property_read_u32(node, "xlnx,datawidth", &value);
2431 dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2434 width = value >> 3; /* Convert bits to bytes */
2436 /* If data width is greater than 8 bytes, DRE is not in hw */
2441 xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1);
2443 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2444 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2445 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
2446 chan->direction = DMA_MEM_TO_DEV;
2448 chan->tdest = chan_id;
2450 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2451 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2452 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
2453 chan->config.park = 1;
2455 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2456 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2457 chan->flush_on_fsync = true;
2459 } else if (of_device_is_compatible(node,
2460 "xlnx,axi-vdma-s2mm-channel") ||
2461 of_device_is_compatible(node,
2462 "xlnx,axi-dma-s2mm-channel")) {
2463 chan->direction = DMA_DEV_TO_MEM;
2465 chan->tdest = chan_id - xdev->nr_channels;
2466 chan->has_vflip = of_property_read_bool(node,
2467 "xlnx,enable-vert-flip");
2468 if (chan->has_vflip) {
2469 chan->config.vflip_en = dma_read(chan,
2470 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
2471 XILINX_VDMA_ENABLE_VERTICAL_FLIP;
2474 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2475 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2476 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
2477 chan->config.park = 1;
2479 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2480 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2481 chan->flush_on_fsync = true;
2484 dev_err(xdev->dev, "Invalid channel compatible node\n");
2488 /* Request the interrupt */
2489 chan->irq = irq_of_parse_and_map(node, 0);
2490 err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
2491 "xilinx-dma-controller", chan);
2493 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2497 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2498 chan->start_transfer = xilinx_dma_start_transfer;
2499 chan->stop_transfer = xilinx_dma_stop_transfer;
2500 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2501 chan->start_transfer = xilinx_cdma_start_transfer;
2502 chan->stop_transfer = xilinx_cdma_stop_transfer;
2504 chan->start_transfer = xilinx_vdma_start_transfer;
2505 chan->stop_transfer = xilinx_dma_stop_transfer;
2508 /* Initialize the tasklet */
2509 tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
2510 (unsigned long)chan);
2513 * Initialize the DMA channel and add it to the DMA engine channels
2516 chan->common.device = &xdev->common;
2518 list_add_tail(&chan->common.device_node, &xdev->common.channels);
2519 xdev->chan[chan->id] = chan;
2521 /* Reset the channel */
2522 err = xilinx_dma_chan_reset(chan);
2524 dev_err(xdev->dev, "Reset channel failed\n");
2532 * xilinx_dma_child_probe - Per child node probe
2533 * It get number of dma-channels per child node from
2534 * device-tree and initializes all the channels.
2536 * @xdev: Driver specific device structure
2537 * @node: Device node
2541 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
2542 struct device_node *node)
2545 u32 nr_channels = 1;
2547 ret = of_property_read_u32(node, "dma-channels", &nr_channels);
2548 if ((ret < 0) && xdev->mcdma)
2549 dev_warn(xdev->dev, "missing dma-channels property\n");
2551 for (i = 0; i < nr_channels; i++)
2552 xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
2554 xdev->nr_channels += nr_channels;
2560 * of_dma_xilinx_xlate - Translation function
2561 * @dma_spec: Pointer to DMA specifier as found in the device tree
2562 * @ofdma: Pointer to DMA controller data
2564 * Return: DMA channel pointer on success and NULL on error
2566 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2567 struct of_dma *ofdma)
2569 struct xilinx_dma_device *xdev = ofdma->of_dma_data;
2570 int chan_id = dma_spec->args[0];
2572 if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
2575 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
2578 static const struct xilinx_dma_config axidma_config = {
2579 .dmatype = XDMA_TYPE_AXIDMA,
2580 .clk_init = axidma_clk_init,
2583 static const struct xilinx_dma_config axicdma_config = {
2584 .dmatype = XDMA_TYPE_CDMA,
2585 .clk_init = axicdma_clk_init,
2588 static const struct xilinx_dma_config axivdma_config = {
2589 .dmatype = XDMA_TYPE_VDMA,
2590 .clk_init = axivdma_clk_init,
2593 static const struct of_device_id xilinx_dma_of_ids[] = {
2594 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
2595 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
2596 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
2599 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
2602 * xilinx_dma_probe - Driver probe function
2603 * @pdev: Pointer to the platform_device structure
2605 * Return: '0' on success and failure value on error
2607 static int xilinx_dma_probe(struct platform_device *pdev)
2609 int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
2610 struct clk **, struct clk **, struct clk **)
2612 struct device_node *node = pdev->dev.of_node;
2613 struct xilinx_dma_device *xdev;
2614 struct device_node *child, *np = pdev->dev.of_node;
2615 struct resource *io;
2616 u32 num_frames, addr_width;
2619 /* Allocate and initialize the DMA engine structure */
2620 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
2624 xdev->dev = &pdev->dev;
2626 const struct of_device_id *match;
2628 match = of_match_node(xilinx_dma_of_ids, np);
2629 if (match && match->data) {
2630 xdev->dma_config = match->data;
2631 clk_init = xdev->dma_config->clk_init;
2635 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
2636 &xdev->rx_clk, &xdev->rxs_clk);
2640 /* Request and map I/O memory */
2641 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2642 xdev->regs = devm_ioremap_resource(&pdev->dev, io);
2643 if (IS_ERR(xdev->regs))
2644 return PTR_ERR(xdev->regs);
2646 /* Retrieve the DMA engine properties from the device tree */
2647 xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
2648 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
2649 xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
2651 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2652 err = of_property_read_u32(node, "xlnx,num-fstores",
2656 "missing xlnx,num-fstores property\n");
2660 err = of_property_read_u32(node, "xlnx,flush-fsync",
2661 &xdev->flush_on_fsync);
2664 "missing xlnx,flush-fsync property\n");
2667 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
2669 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
2671 if (addr_width > 32)
2672 xdev->ext_addr = true;
2674 xdev->ext_addr = false;
2676 /* Set the dma mask bits */
2677 dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
2679 /* Initialize the DMA engine */
2680 xdev->common.dev = &pdev->dev;
2682 INIT_LIST_HEAD(&xdev->common.channels);
2683 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
2684 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
2685 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
2688 xdev->common.device_alloc_chan_resources =
2689 xilinx_dma_alloc_chan_resources;
2690 xdev->common.device_free_chan_resources =
2691 xilinx_dma_free_chan_resources;
2692 xdev->common.device_terminate_all = xilinx_dma_terminate_all;
2693 xdev->common.device_tx_status = xilinx_dma_tx_status;
2694 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
2695 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2696 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
2697 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
2698 xdev->common.device_prep_dma_cyclic =
2699 xilinx_dma_prep_dma_cyclic;
2700 xdev->common.device_prep_interleaved_dma =
2701 xilinx_dma_prep_interleaved;
2702 /* Residue calculation is supported by only AXI DMA */
2703 xdev->common.residue_granularity =
2704 DMA_RESIDUE_GRANULARITY_SEGMENT;
2705 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2706 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
2707 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
2709 xdev->common.device_prep_interleaved_dma =
2710 xilinx_vdma_dma_prep_interleaved;
2713 platform_set_drvdata(pdev, xdev);
2715 /* Initialize the channels */
2716 for_each_child_of_node(node, child) {
2717 err = xilinx_dma_child_probe(xdev, child);
2722 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2723 for (i = 0; i < xdev->nr_channels; i++)
2725 xdev->chan[i]->num_frms = num_frames;
2728 /* Register the DMA engine with the core */
2729 err = dma_async_device_register(&xdev->common);
2731 dev_err(xdev->dev, "failed to register the dma device\n");
2735 err = of_dma_controller_register(node, of_dma_xilinx_xlate,
2738 dev_err(&pdev->dev, "Unable to register DMA to DT\n");
2739 dma_async_device_unregister(&xdev->common);
2743 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
2744 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
2745 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
2746 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
2748 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
2753 xdma_disable_allclks(xdev);
2755 for (i = 0; i < xdev->nr_channels; i++)
2757 xilinx_dma_chan_remove(xdev->chan[i]);
2763 * xilinx_dma_remove - Driver remove function
2764 * @pdev: Pointer to the platform_device structure
2766 * Return: Always '0'
2768 static int xilinx_dma_remove(struct platform_device *pdev)
2770 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
2773 of_dma_controller_free(pdev->dev.of_node);
2775 dma_async_device_unregister(&xdev->common);
2777 for (i = 0; i < xdev->nr_channels; i++)
2779 xilinx_dma_chan_remove(xdev->chan[i]);
2781 xdma_disable_allclks(xdev);
2786 static struct platform_driver xilinx_vdma_driver = {
2788 .name = "xilinx-vdma",
2789 .of_match_table = xilinx_dma_of_ids,
2791 .probe = xilinx_dma_probe,
2792 .remove = xilinx_dma_remove,
2795 module_platform_driver(xilinx_vdma_driver);
2797 MODULE_AUTHOR("Xilinx, Inc.");
2798 MODULE_DESCRIPTION("Xilinx VDMA driver");
2799 MODULE_LICENSE("GPL v2");