2 * DMA driver for Xilinx Video DMA Engine
4 * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved.
6 * Based on the Freescale DMA driver.
9 * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP
10 * core that provides high-bandwidth direct memory access between memory
11 * and AXI4-Stream type video target peripherals. The core provides efficient
12 * two dimensional DMA operations with independent asynchronous read (S2MM)
13 * and write (MM2S) channel operation. It can be configured to have either
14 * one channel or two channels. If configured as two channels, one is to
15 * transmit to the video device (MM2S) and another is to receive from the
16 * video device (S2MM). Initialization, status, interrupt and management
17 * registers are accessed through an AXI4-Lite slave interface.
19 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
20 * provides high-bandwidth one dimensional direct memory access between memory
21 * and AXI4-Stream target peripherals. It supports one receive and one
22 * transmit channel, both of them optional at synthesis time.
24 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
25 * Access (DMA) between a memory-mapped source address and a memory-mapped
26 * destination address.
28 * This program is free software: you can redistribute it and/or modify
29 * it under the terms of the GNU General Public License as published by
30 * the Free Software Foundation, either version 2 of the License, or
31 * (at your option) any later version.
34 #include <linux/bitops.h>
35 #include <linux/dmapool.h>
36 #include <linux/dma/xilinx_dma.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
40 #include <linux/iopoll.h>
41 #include <linux/module.h>
42 #include <linux/of_address.h>
43 #include <linux/of_dma.h>
44 #include <linux/of_platform.h>
45 #include <linux/of_irq.h>
46 #include <linux/slab.h>
47 #include <linux/clk.h>
48 #include <linux/io-64-nonatomic-lo-hi.h>
50 #include "../dmaengine.h"
52 /* Register/Descriptor Offsets */
53 #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
54 #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
55 #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
56 #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
58 /* Control Registers */
59 #define XILINX_DMA_REG_DMACR 0x0000
60 #define XILINX_DMA_DMACR_DELAY_MAX 0xff
61 #define XILINX_DMA_DMACR_DELAY_SHIFT 24
62 #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
63 #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
64 #define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
65 #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
66 #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
67 #define XILINX_DMA_DMACR_MASTER_SHIFT 8
68 #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
69 #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
70 #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
71 #define XILINX_DMA_DMACR_RESET BIT(2)
72 #define XILINX_DMA_DMACR_CIRC_EN BIT(1)
73 #define XILINX_DMA_DMACR_RUNSTOP BIT(0)
74 #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
75 #define XILINX_DMA_DMACR_DELAY_MASK GENMASK(31, 24)
76 #define XILINX_DMA_DMACR_FRAME_COUNT_MASK GENMASK(23, 16)
77 #define XILINX_DMA_DMACR_MASTER_MASK GENMASK(11, 8)
79 #define XILINX_DMA_REG_DMASR 0x0004
80 #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
81 #define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
82 #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
83 #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
84 #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
85 #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
86 #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
87 #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
88 #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
89 #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
90 #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
91 #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
92 #define XILINX_DMA_DMASR_IDLE BIT(1)
93 #define XILINX_DMA_DMASR_HALTED BIT(0)
94 #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
95 #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
97 #define XILINX_DMA_REG_CURDESC 0x0008
98 #define XILINX_DMA_REG_TAILDESC 0x0010
99 #define XILINX_DMA_REG_REG_INDEX 0x0014
100 #define XILINX_DMA_REG_FRMSTORE 0x0018
101 #define XILINX_DMA_REG_THRESHOLD 0x001c
102 #define XILINX_DMA_REG_FRMPTR_STS 0x0024
103 #define XILINX_DMA_REG_PARK_PTR 0x0028
104 #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
105 #define XILINX_DMA_PARK_PTR_WR_REF_MASK GENMASK(12, 8)
106 #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
107 #define XILINX_DMA_PARK_PTR_RD_REF_MASK GENMASK(4, 0)
108 #define XILINX_DMA_REG_VDMA_VERSION 0x002c
110 /* Register Direct Mode Registers */
111 #define XILINX_DMA_REG_VSIZE 0x0000
112 #define XILINX_DMA_REG_HSIZE 0x0004
114 #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
115 #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
116 #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
118 #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
119 #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
121 #define XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP 0x00ec
122 #define XILINX_VDMA_ENABLE_VERTICAL_FLIP BIT(0)
124 /* HW specific definitions */
125 #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
127 #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
128 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
129 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
130 XILINX_DMA_DMASR_ERR_IRQ)
132 #define XILINX_DMA_DMASR_ALL_ERR_MASK \
133 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
134 XILINX_DMA_DMASR_SOF_LATE_ERR | \
135 XILINX_DMA_DMASR_SG_DEC_ERR | \
136 XILINX_DMA_DMASR_SG_SLV_ERR | \
137 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
138 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
139 XILINX_DMA_DMASR_DMA_DEC_ERR | \
140 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
141 XILINX_DMA_DMASR_DMA_INT_ERR)
144 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
145 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
146 * is enabled in the h/w system.
148 #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
149 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
150 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
151 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
152 XILINX_DMA_DMASR_DMA_INT_ERR)
154 /* Axi VDMA Flush on Fsync bits */
155 #define XILINX_DMA_FLUSH_S2MM 3
156 #define XILINX_DMA_FLUSH_MM2S 2
157 #define XILINX_DMA_FLUSH_BOTH 1
159 /* Delay loop counter to prevent hardware failure */
160 #define XILINX_DMA_LOOP_COUNT 1000000
162 /* AXI DMA Specific Registers/Offsets */
163 #define XILINX_DMA_REG_SRCDSTADDR 0x18
164 #define XILINX_DMA_REG_BTT 0x28
166 /* AXI DMA Specific Masks/Bit fields */
167 #define XILINX_DMA_MAX_TRANS_LEN_MIN 8
168 #define XILINX_DMA_MAX_TRANS_LEN_MAX 23
169 #define XILINX_DMA_V2_MAX_TRANS_LEN_MAX 26
170 #define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
171 #define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
172 #define XILINX_DMA_CR_COALESCE_SHIFT 16
173 #define XILINX_DMA_BD_SOP BIT(27)
174 #define XILINX_DMA_BD_EOP BIT(26)
175 #define XILINX_DMA_COALESCE_MAX 255
176 #define XILINX_DMA_NUM_DESCS 255
177 #define XILINX_DMA_NUM_APP_WORDS 5
179 /* Multi-Channel DMA Descriptor offsets*/
180 #define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20)
181 #define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20)
183 /* Multi-Channel DMA Masks/Shifts */
184 #define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
185 #define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
186 #define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
187 #define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
188 #define XILINX_DMA_BD_STRIDE_SHIFT 0
189 #define XILINX_DMA_BD_VSIZE_SHIFT 19
191 /* AXI CDMA Specific Registers/Offsets */
192 #define XILINX_CDMA_REG_SRCADDR 0x18
193 #define XILINX_CDMA_REG_DSTADDR 0x20
195 /* AXI CDMA Specific Masks */
196 #define XILINX_CDMA_CR_SGMODE BIT(3)
199 * struct xilinx_vdma_desc_hw - Hardware Descriptor
200 * @next_desc: Next Descriptor Pointer @0x00
201 * @pad1: Reserved @0x04
202 * @buf_addr: Buffer address @0x08
203 * @buf_addr_msb: MSB of Buffer address @0x0C
204 * @vsize: Vertical Size @0x10
205 * @hsize: Horizontal Size @0x14
206 * @stride: Number of bytes between the first
207 * pixels of each horizontal line @0x18
209 struct xilinx_vdma_desc_hw {
220 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
221 * @next_desc: Next Descriptor Pointer @0x00
222 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
223 * @buf_addr: Buffer address @0x08
224 * @buf_addr_msb: MSB of Buffer address @0x0C
225 * @mcdma_control: Control field for mcdma @0x10
226 * @vsize_stride: Vsize and Stride field for mcdma @0x14
227 * @control: Control field @0x18
228 * @status: Status field @0x1C
229 * @app: APP Fields @0x20 - 0x30
231 struct xilinx_axidma_desc_hw {
240 u32 app[XILINX_DMA_NUM_APP_WORDS];
244 * struct xilinx_cdma_desc_hw - Hardware Descriptor
245 * @next_desc: Next Descriptor Pointer @0x00
246 * @next_desc_msb: Next Descriptor Pointer MSB @0x04
247 * @src_addr: Source address @0x08
248 * @src_addr_msb: Source address MSB @0x0C
249 * @dest_addr: Destination address @0x10
250 * @dest_addr_msb: Destination address MSB @0x14
251 * @control: Control field @0x18
252 * @status: Status field @0x1C
254 struct xilinx_cdma_desc_hw {
266 * struct xilinx_vdma_tx_segment - Descriptor segment
267 * @hw: Hardware descriptor
268 * @node: Node in the descriptor segments list
269 * @phys: Physical address of segment
271 struct xilinx_vdma_tx_segment {
272 struct xilinx_vdma_desc_hw hw;
273 struct list_head node;
278 * struct xilinx_axidma_tx_segment - Descriptor segment
279 * @hw: Hardware descriptor
280 * @node: Node in the descriptor segments list
281 * @phys: Physical address of segment
283 struct xilinx_axidma_tx_segment {
284 struct xilinx_axidma_desc_hw hw;
285 struct list_head node;
290 * struct xilinx_cdma_tx_segment - Descriptor segment
291 * @hw: Hardware descriptor
292 * @node: Node in the descriptor segments list
293 * @phys: Physical address of segment
295 struct xilinx_cdma_tx_segment {
296 struct xilinx_cdma_desc_hw hw;
297 struct list_head node;
302 * struct xilinx_dma_tx_descriptor - Per Transaction structure
303 * @async_tx: Async transaction descriptor
304 * @segments: TX segments list
305 * @node: Node in the channel descriptors list
306 * @cyclic: Check for cyclic transfers.
308 struct xilinx_dma_tx_descriptor {
309 struct dma_async_tx_descriptor async_tx;
310 struct list_head segments;
311 struct list_head node;
316 * struct xilinx_dma_chan - Driver specific DMA channel structure
317 * @xdev: Driver specific device structure
318 * @ctrl_offset: Control registers offset
319 * @desc_offset: TX descriptor registers offset
320 * @lock: Descriptor operation lock
321 * @pending_list: Descriptors waiting
322 * @active_list: Descriptors ready to submit
323 * @done_list: Complete descriptors
324 * @free_seg_list: Free descriptors
325 * @common: DMA common channel
326 * @desc_pool: Descriptors pool
327 * @dev: The dma device
330 * @direction: Transfer direction
331 * @num_frms: Number of frames
332 * @has_sg: Support scatter transfers
333 * @cyclic: Check for cyclic transfers.
334 * @genlock: Support genlock mode
335 * @err: Channel has errors
336 * @idle: Check for channel idle
337 * @terminating: Check for channel being synchronized by user
338 * @tasklet: Cleanup work after irq
339 * @config: Device configuration info
340 * @flush_on_fsync: Flush on Frame sync
341 * @desc_pendingcount: Descriptor pending count
342 * @ext_addr: Indicates 64 bit addressing is supported by dma channel
343 * @desc_submitcount: Descriptor h/w submitted count
344 * @residue: Residue for AXI DMA
345 * @seg_v: Statically allocated segments base
346 * @seg_p: Physical allocated segments base
347 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
348 * @cyclic_seg_p: Physical allocated segments base for cyclic dma
349 * @start_transfer: Differentiate b/w DMA IP's transfer
350 * @stop_transfer: Differentiate b/w DMA IP's quiesce
351 * @tdest: TDEST value for mcdma
352 * @has_vflip: S2MM vertical flip
354 struct xilinx_dma_chan {
355 struct xilinx_dma_device *xdev;
359 struct list_head pending_list;
360 struct list_head active_list;
361 struct list_head done_list;
362 struct list_head free_seg_list;
363 struct dma_chan common;
364 struct dma_pool *desc_pool;
368 enum dma_transfer_direction direction;
376 struct tasklet_struct tasklet;
377 struct xilinx_vdma_config config;
379 u32 desc_pendingcount;
381 u32 desc_submitcount;
383 struct xilinx_axidma_tx_segment *seg_v;
385 struct xilinx_axidma_tx_segment *cyclic_seg_v;
386 dma_addr_t cyclic_seg_p;
387 void (*start_transfer)(struct xilinx_dma_chan *chan);
388 int (*stop_transfer)(struct xilinx_dma_chan *chan);
394 * enum xdma_ip_type - DMA IP type.
396 * @XDMA_TYPE_AXIDMA: Axi dma ip.
397 * @XDMA_TYPE_CDMA: Axi cdma ip.
398 * @XDMA_TYPE_VDMA: Axi vdma ip.
402 XDMA_TYPE_AXIDMA = 0,
407 struct xilinx_dma_config {
408 enum xdma_ip_type dmatype;
409 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
410 struct clk **tx_clk, struct clk **txs_clk,
411 struct clk **rx_clk, struct clk **rxs_clk);
415 * struct xilinx_dma_device - DMA device structure
416 * @regs: I/O mapped base address
417 * @dev: Device Structure
418 * @common: DMA device structure
419 * @chan: Driver specific DMA channel
420 * @has_sg: Specifies whether Scatter-Gather is present or not
421 * @mcdma: Specifies whether Multi-Channel is present or not
422 * @flush_on_fsync: Flush on frame sync
423 * @ext_addr: Indicates 64 bit addressing is supported by dma device
424 * @pdev: Platform device structure pointer
425 * @dma_config: DMA config structure
426 * @axi_clk: DMA Axi4-lite interace clock
427 * @tx_clk: DMA mm2s clock
428 * @txs_clk: DMA mm2s stream clock
429 * @rx_clk: DMA s2mm clock
430 * @rxs_clk: DMA s2mm stream clock
431 * @nr_channels: Number of channels DMA device supports
432 * @chan_id: DMA channel identifier
433 * @max_buffer_len: Max buffer length
435 struct xilinx_dma_device {
438 struct dma_device common;
439 struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
444 struct platform_device *pdev;
445 const struct xilinx_dma_config *dma_config;
457 #define to_xilinx_chan(chan) \
458 container_of(chan, struct xilinx_dma_chan, common)
459 #define to_dma_tx_descriptor(tx) \
460 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
461 #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
462 readl_poll_timeout_atomic(chan->xdev->regs + chan->ctrl_offset + reg, \
463 val, cond, delay_us, timeout_us)
466 static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
468 return ioread32(chan->xdev->regs + reg);
471 static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
473 iowrite32(value, chan->xdev->regs + reg);
476 static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
479 dma_write(chan, chan->desc_offset + reg, value);
482 static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
484 return dma_read(chan, chan->ctrl_offset + reg);
487 static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
490 dma_write(chan, chan->ctrl_offset + reg, value);
493 static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
496 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
499 static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
502 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
506 * vdma_desc_write_64 - 64-bit descriptor write
507 * @chan: Driver specific VDMA channel
508 * @reg: Register to write
509 * @value_lsb: lower address of the descriptor.
510 * @value_msb: upper address of the descriptor.
512 * Since vdma driver is trying to write to a register offset which is not a
513 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
514 * instead of a single 64 bit register write.
516 static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
517 u32 value_lsb, u32 value_msb)
519 /* Write the lsb 32 bits*/
520 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
522 /* Write the msb 32 bits */
523 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
526 static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
528 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
531 static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
535 dma_writeq(chan, reg, addr);
537 dma_ctrl_write(chan, reg, addr);
540 static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
541 struct xilinx_axidma_desc_hw *hw,
542 dma_addr_t buf_addr, size_t sg_used,
545 if (chan->ext_addr) {
546 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
547 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
550 hw->buf_addr = buf_addr + sg_used + period_len;
554 /* -----------------------------------------------------------------------------
555 * Descriptors and segments alloc and free
559 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
560 * @chan: Driver specific DMA channel
562 * Return: The allocated segment on success and NULL on failure.
564 static struct xilinx_vdma_tx_segment *
565 xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
567 struct xilinx_vdma_tx_segment *segment;
570 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
574 segment->phys = phys;
580 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
581 * @chan: Driver specific DMA channel
583 * Return: The allocated segment on success and NULL on failure.
585 static struct xilinx_cdma_tx_segment *
586 xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
588 struct xilinx_cdma_tx_segment *segment;
591 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
595 segment->phys = phys;
601 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
602 * @chan: Driver specific DMA channel
604 * Return: The allocated segment on success and NULL on failure.
606 static struct xilinx_axidma_tx_segment *
607 xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
609 struct xilinx_axidma_tx_segment *segment = NULL;
612 spin_lock_irqsave(&chan->lock, flags);
613 if (!list_empty(&chan->free_seg_list)) {
614 segment = list_first_entry(&chan->free_seg_list,
615 struct xilinx_axidma_tx_segment,
617 list_del(&segment->node);
619 spin_unlock_irqrestore(&chan->lock, flags);
624 static void xilinx_dma_clean_hw_desc(struct xilinx_axidma_desc_hw *hw)
626 u32 next_desc = hw->next_desc;
627 u32 next_desc_msb = hw->next_desc_msb;
629 memset(hw, 0, sizeof(struct xilinx_axidma_desc_hw));
631 hw->next_desc = next_desc;
632 hw->next_desc_msb = next_desc_msb;
636 * xilinx_dma_free_tx_segment - Free transaction segment
637 * @chan: Driver specific DMA channel
638 * @segment: DMA transaction segment
640 static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
641 struct xilinx_axidma_tx_segment *segment)
643 xilinx_dma_clean_hw_desc(&segment->hw);
645 list_add_tail(&segment->node, &chan->free_seg_list);
649 * xilinx_cdma_free_tx_segment - Free transaction segment
650 * @chan: Driver specific DMA channel
651 * @segment: DMA transaction segment
653 static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
654 struct xilinx_cdma_tx_segment *segment)
656 dma_pool_free(chan->desc_pool, segment, segment->phys);
660 * xilinx_vdma_free_tx_segment - Free transaction segment
661 * @chan: Driver specific DMA channel
662 * @segment: DMA transaction segment
664 static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
665 struct xilinx_vdma_tx_segment *segment)
667 dma_pool_free(chan->desc_pool, segment, segment->phys);
671 * xilinx_dma_tx_descriptor - Allocate transaction descriptor
672 * @chan: Driver specific DMA channel
674 * Return: The allocated descriptor on success and NULL on failure.
676 static struct xilinx_dma_tx_descriptor *
677 xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
679 struct xilinx_dma_tx_descriptor *desc;
681 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
685 INIT_LIST_HEAD(&desc->segments);
691 * xilinx_dma_free_tx_descriptor - Free transaction descriptor
692 * @chan: Driver specific DMA channel
693 * @desc: DMA transaction descriptor
696 xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
697 struct xilinx_dma_tx_descriptor *desc)
699 struct xilinx_vdma_tx_segment *segment, *next;
700 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
701 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
706 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
707 list_for_each_entry_safe(segment, next, &desc->segments, node) {
708 list_del(&segment->node);
709 xilinx_vdma_free_tx_segment(chan, segment);
711 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
712 list_for_each_entry_safe(cdma_segment, cdma_next,
713 &desc->segments, node) {
714 list_del(&cdma_segment->node);
715 xilinx_cdma_free_tx_segment(chan, cdma_segment);
718 list_for_each_entry_safe(axidma_segment, axidma_next,
719 &desc->segments, node) {
720 list_del(&axidma_segment->node);
721 xilinx_dma_free_tx_segment(chan, axidma_segment);
728 /* Required functions */
731 * xilinx_dma_free_desc_list - Free descriptors list
732 * @chan: Driver specific DMA channel
733 * @list: List to parse and delete the descriptor
735 static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
736 struct list_head *list)
738 struct xilinx_dma_tx_descriptor *desc, *next;
740 list_for_each_entry_safe(desc, next, list, node) {
741 list_del(&desc->node);
742 xilinx_dma_free_tx_descriptor(chan, desc);
747 * xilinx_dma_free_descriptors - Free channel descriptors
748 * @chan: Driver specific DMA channel
750 static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
754 spin_lock_irqsave(&chan->lock, flags);
756 xilinx_dma_free_desc_list(chan, &chan->pending_list);
757 xilinx_dma_free_desc_list(chan, &chan->done_list);
758 xilinx_dma_free_desc_list(chan, &chan->active_list);
760 spin_unlock_irqrestore(&chan->lock, flags);
764 * xilinx_dma_free_chan_resources - Free channel resources
765 * @dchan: DMA channel
767 static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
769 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
772 dev_dbg(chan->dev, "Free all channel resources.\n");
774 xilinx_dma_free_descriptors(chan);
776 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
777 spin_lock_irqsave(&chan->lock, flags);
778 INIT_LIST_HEAD(&chan->free_seg_list);
779 spin_unlock_irqrestore(&chan->lock, flags);
781 /* Free memory that is allocated for BD */
782 dma_free_coherent(chan->dev, sizeof(*chan->seg_v) *
783 XILINX_DMA_NUM_DESCS, chan->seg_v,
786 /* Free Memory that is allocated for cyclic DMA Mode */
787 dma_free_coherent(chan->dev, sizeof(*chan->cyclic_seg_v),
788 chan->cyclic_seg_v, chan->cyclic_seg_p);
791 if (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA) {
792 dma_pool_destroy(chan->desc_pool);
793 chan->desc_pool = NULL;
798 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
799 * @chan: Driver specific dma channel
800 * @desc: dma transaction descriptor
801 * @flags: flags for spin lock
803 static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
804 struct xilinx_dma_tx_descriptor *desc,
805 unsigned long *flags)
807 dma_async_tx_callback callback;
808 void *callback_param;
810 callback = desc->async_tx.callback;
811 callback_param = desc->async_tx.callback_param;
813 spin_unlock_irqrestore(&chan->lock, *flags);
814 callback(callback_param);
815 spin_lock_irqsave(&chan->lock, *flags);
820 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
821 * @chan: Driver specific DMA channel
823 static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
825 struct xilinx_dma_tx_descriptor *desc, *next;
828 spin_lock_irqsave(&chan->lock, flags);
830 list_for_each_entry_safe(desc, next, &chan->done_list, node) {
831 struct dmaengine_desc_callback cb;
834 xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
838 /* Remove from the list of running transactions */
839 list_del(&desc->node);
841 /* Run the link descriptor callback function */
842 dmaengine_desc_get_callback(&desc->async_tx, &cb);
843 if (dmaengine_desc_callback_valid(&cb)) {
844 spin_unlock_irqrestore(&chan->lock, flags);
845 dmaengine_desc_callback_invoke(&cb, NULL);
846 spin_lock_irqsave(&chan->lock, flags);
849 /* Run any dependencies, then free the descriptor */
850 dma_run_dependencies(&desc->async_tx);
851 xilinx_dma_free_tx_descriptor(chan, desc);
854 * While we ran a callback the user called a terminate function,
855 * which takes care of cleaning up any remaining descriptors
857 if (chan->terminating)
861 spin_unlock_irqrestore(&chan->lock, flags);
865 * xilinx_dma_do_tasklet - Schedule completion tasklet
866 * @data: Pointer to the Xilinx DMA channel structure
868 static void xilinx_dma_do_tasklet(unsigned long data)
870 struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
872 xilinx_dma_chan_desc_cleanup(chan);
876 * xilinx_dma_alloc_chan_resources - Allocate channel resources
877 * @dchan: DMA channel
879 * Return: '0' on success and failure value on error
881 static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
883 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
886 /* Has this channel already been allocated? */
891 * We need the descriptor to be aligned to 64bytes
892 * for meeting Xilinx VDMA specification requirement.
894 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
895 /* Allocate the buffer descriptors. */
896 chan->seg_v = dma_zalloc_coherent(chan->dev,
897 sizeof(*chan->seg_v) *
898 XILINX_DMA_NUM_DESCS,
899 &chan->seg_p, GFP_KERNEL);
902 "unable to allocate channel %d descriptors\n",
907 for (i = 0; i < XILINX_DMA_NUM_DESCS; i++) {
908 chan->seg_v[i].hw.next_desc =
909 lower_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
910 ((i + 1) % XILINX_DMA_NUM_DESCS));
911 chan->seg_v[i].hw.next_desc_msb =
912 upper_32_bits(chan->seg_p + sizeof(*chan->seg_v) *
913 ((i + 1) % XILINX_DMA_NUM_DESCS));
914 chan->seg_v[i].phys = chan->seg_p +
915 sizeof(*chan->seg_v) * i;
916 list_add_tail(&chan->seg_v[i].node,
917 &chan->free_seg_list);
919 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
920 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
922 sizeof(struct xilinx_cdma_tx_segment),
923 __alignof__(struct xilinx_cdma_tx_segment),
926 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
928 sizeof(struct xilinx_vdma_tx_segment),
929 __alignof__(struct xilinx_vdma_tx_segment),
933 if (!chan->desc_pool &&
934 (chan->xdev->dma_config->dmatype != XDMA_TYPE_AXIDMA)) {
936 "unable to allocate channel %d descriptor pool\n",
941 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
943 * For cyclic DMA mode we need to program the tail Descriptor
944 * register with a value which is not a part of the BD chain
945 * so allocating a desc segment during channel allocation for
946 * programming tail descriptor.
948 chan->cyclic_seg_v = dma_zalloc_coherent(chan->dev,
949 sizeof(*chan->cyclic_seg_v),
950 &chan->cyclic_seg_p, GFP_KERNEL);
951 if (!chan->cyclic_seg_v) {
953 "unable to allocate desc segment for cyclic DMA\n");
956 chan->cyclic_seg_v->phys = chan->cyclic_seg_p;
959 dma_cookie_init(dchan);
961 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
962 /* For AXI DMA resetting once channel will reset the
963 * other channel as well so enable the interrupts here.
965 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
966 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
969 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
970 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
971 XILINX_CDMA_CR_SGMODE);
977 * xilinx_dma_calc_copysize - Calculate the amount of data to copy
978 * @chan: Driver specific DMA channel
979 * @size: Total data that needs to be copied
980 * @done: Amount of data that has been already copied
982 * Return: Amount of data that has to be copied
984 static int xilinx_dma_calc_copysize(struct xilinx_dma_chan *chan,
989 copy = min_t(size_t, size - done,
990 chan->xdev->max_buffer_len);
996 * xilinx_dma_tx_status - Get DMA transaction status
997 * @dchan: DMA channel
998 * @cookie: Transaction identifier
999 * @txstate: Transaction state
1001 * Return: DMA transaction status
1003 static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
1004 dma_cookie_t cookie,
1005 struct dma_tx_state *txstate)
1007 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1008 struct xilinx_dma_tx_descriptor *desc;
1009 struct xilinx_axidma_tx_segment *segment;
1010 struct xilinx_axidma_desc_hw *hw;
1011 enum dma_status ret;
1012 unsigned long flags;
1015 ret = dma_cookie_status(dchan, cookie, txstate);
1016 if (ret == DMA_COMPLETE || !txstate)
1019 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1020 spin_lock_irqsave(&chan->lock, flags);
1022 desc = list_last_entry(&chan->active_list,
1023 struct xilinx_dma_tx_descriptor, node);
1025 list_for_each_entry(segment, &desc->segments, node) {
1027 residue += (hw->control - hw->status) &
1028 chan->xdev->max_buffer_len;
1031 spin_unlock_irqrestore(&chan->lock, flags);
1033 chan->residue = residue;
1034 dma_set_residue(txstate, chan->residue);
1041 * xilinx_dma_stop_transfer - Halt DMA channel
1042 * @chan: Driver specific DMA channel
1044 * Return: '0' on success and failure value on error
1046 static int xilinx_dma_stop_transfer(struct xilinx_dma_chan *chan)
1050 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1052 /* Wait for the hardware to halt */
1053 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1054 val & XILINX_DMA_DMASR_HALTED, 0,
1055 XILINX_DMA_LOOP_COUNT);
1059 * xilinx_cdma_stop_transfer - Wait for the current transfer to complete
1060 * @chan: Driver specific DMA channel
1062 * Return: '0' on success and failure value on error
1064 static int xilinx_cdma_stop_transfer(struct xilinx_dma_chan *chan)
1068 return xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1069 val & XILINX_DMA_DMASR_IDLE, 0,
1070 XILINX_DMA_LOOP_COUNT);
1074 * xilinx_dma_start - Start DMA channel
1075 * @chan: Driver specific DMA channel
1077 static void xilinx_dma_start(struct xilinx_dma_chan *chan)
1082 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
1084 /* Wait for the hardware to start */
1085 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
1086 !(val & XILINX_DMA_DMASR_HALTED), 0,
1087 XILINX_DMA_LOOP_COUNT);
1090 dev_err(chan->dev, "Cannot start channel %p: %x\n",
1091 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1098 * xilinx_vdma_start_transfer - Starts VDMA transfer
1099 * @chan: Driver specific channel struct pointer
1101 static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
1103 struct xilinx_vdma_config *config = &chan->config;
1104 struct xilinx_dma_tx_descriptor *desc, *tail_desc;
1106 struct xilinx_vdma_tx_segment *tail_segment;
1108 /* This function was invoked with lock held */
1115 if (list_empty(&chan->pending_list))
1118 desc = list_first_entry(&chan->pending_list,
1119 struct xilinx_dma_tx_descriptor, node);
1120 tail_desc = list_last_entry(&chan->pending_list,
1121 struct xilinx_dma_tx_descriptor, node);
1123 tail_segment = list_last_entry(&tail_desc->segments,
1124 struct xilinx_vdma_tx_segment, node);
1127 * If hardware is idle, then all descriptors on the running lists are
1128 * done, start new transfers
1131 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1132 desc->async_tx.phys);
1134 /* Configure the hardware using info in the config structure */
1135 if (chan->has_vflip) {
1136 reg = dma_read(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP);
1137 reg &= ~XILINX_VDMA_ENABLE_VERTICAL_FLIP;
1138 reg |= config->vflip_en;
1139 dma_write(chan, XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP,
1143 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1145 if (config->frm_cnt_en)
1146 reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
1148 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
1151 * With SG, start with circular mode, so that BDs can be fetched.
1152 * In direct register mode, if not parking, enable circular mode
1154 if (chan->has_sg || !config->park)
1155 reg |= XILINX_DMA_DMACR_CIRC_EN;
1158 reg &= ~XILINX_DMA_DMACR_CIRC_EN;
1160 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1162 j = chan->desc_submitcount;
1163 reg = dma_read(chan, XILINX_DMA_REG_PARK_PTR);
1164 if (chan->direction == DMA_MEM_TO_DEV) {
1165 reg &= ~XILINX_DMA_PARK_PTR_RD_REF_MASK;
1166 reg |= j << XILINX_DMA_PARK_PTR_RD_REF_SHIFT;
1168 reg &= ~XILINX_DMA_PARK_PTR_WR_REF_MASK;
1169 reg |= j << XILINX_DMA_PARK_PTR_WR_REF_SHIFT;
1171 dma_write(chan, XILINX_DMA_REG_PARK_PTR, reg);
1173 /* Start the hardware */
1174 xilinx_dma_start(chan);
1179 /* Start the transfer */
1181 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1182 tail_segment->phys);
1183 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1184 chan->desc_pendingcount = 0;
1186 struct xilinx_vdma_tx_segment *segment, *last = NULL;
1189 if (chan->desc_submitcount < chan->num_frms)
1190 i = chan->desc_submitcount;
1192 list_for_each_entry(segment, &desc->segments, node) {
1194 vdma_desc_write_64(chan,
1195 XILINX_VDMA_REG_START_ADDRESS_64(i++),
1196 segment->hw.buf_addr,
1197 segment->hw.buf_addr_msb);
1199 vdma_desc_write(chan,
1200 XILINX_VDMA_REG_START_ADDRESS(i++),
1201 segment->hw.buf_addr);
1209 /* HW expects these parameters to be same for one transaction */
1210 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
1211 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
1213 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1215 chan->desc_submitcount++;
1216 chan->desc_pendingcount--;
1217 list_del(&desc->node);
1218 list_add_tail(&desc->node, &chan->active_list);
1219 if (chan->desc_submitcount == chan->num_frms)
1220 chan->desc_submitcount = 0;
1227 * xilinx_cdma_start_transfer - Starts cdma transfer
1228 * @chan: Driver specific channel struct pointer
1230 static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1232 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1233 struct xilinx_cdma_tx_segment *tail_segment;
1234 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1242 if (list_empty(&chan->pending_list))
1245 head_desc = list_first_entry(&chan->pending_list,
1246 struct xilinx_dma_tx_descriptor, node);
1247 tail_desc = list_last_entry(&chan->pending_list,
1248 struct xilinx_dma_tx_descriptor, node);
1249 tail_segment = list_last_entry(&tail_desc->segments,
1250 struct xilinx_cdma_tx_segment, node);
1252 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1253 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1254 ctrl_reg |= chan->desc_pendingcount <<
1255 XILINX_DMA_CR_COALESCE_SHIFT;
1256 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1260 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1261 XILINX_CDMA_CR_SGMODE);
1263 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1264 XILINX_CDMA_CR_SGMODE);
1266 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1267 head_desc->async_tx.phys);
1269 /* Update tail ptr register which will start the transfer */
1270 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1271 tail_segment->phys);
1273 /* In simple mode */
1274 struct xilinx_cdma_tx_segment *segment;
1275 struct xilinx_cdma_desc_hw *hw;
1277 segment = list_first_entry(&head_desc->segments,
1278 struct xilinx_cdma_tx_segment,
1283 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr);
1284 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr);
1286 /* Start the transfer */
1287 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1288 hw->control & chan->xdev->max_buffer_len);
1291 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1292 chan->desc_pendingcount = 0;
1297 * xilinx_dma_start_transfer - Starts DMA transfer
1298 * @chan: Driver specific channel struct pointer
1300 static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1302 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1303 struct xilinx_axidma_tx_segment *tail_segment;
1309 if (list_empty(&chan->pending_list))
1315 head_desc = list_first_entry(&chan->pending_list,
1316 struct xilinx_dma_tx_descriptor, node);
1317 tail_desc = list_last_entry(&chan->pending_list,
1318 struct xilinx_dma_tx_descriptor, node);
1319 tail_segment = list_last_entry(&tail_desc->segments,
1320 struct xilinx_axidma_tx_segment, node);
1322 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1324 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1325 reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1326 reg |= chan->desc_pendingcount <<
1327 XILINX_DMA_CR_COALESCE_SHIFT;
1328 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1331 if (chan->has_sg && !chan->xdev->mcdma)
1332 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1333 head_desc->async_tx.phys);
1335 if (chan->has_sg && chan->xdev->mcdma) {
1336 if (chan->direction == DMA_MEM_TO_DEV) {
1337 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1338 head_desc->async_tx.phys);
1341 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1342 head_desc->async_tx.phys);
1344 dma_ctrl_write(chan,
1345 XILINX_DMA_MCRX_CDESC(chan->tdest),
1346 head_desc->async_tx.phys);
1351 xilinx_dma_start(chan);
1356 /* Start the transfer */
1357 if (chan->has_sg && !chan->xdev->mcdma) {
1359 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1360 chan->cyclic_seg_v->phys);
1362 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1363 tail_segment->phys);
1364 } else if (chan->has_sg && chan->xdev->mcdma) {
1365 if (chan->direction == DMA_MEM_TO_DEV) {
1366 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1367 tail_segment->phys);
1370 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1371 tail_segment->phys);
1373 dma_ctrl_write(chan,
1374 XILINX_DMA_MCRX_TDESC(chan->tdest),
1375 tail_segment->phys);
1379 struct xilinx_axidma_tx_segment *segment;
1380 struct xilinx_axidma_desc_hw *hw;
1382 segment = list_first_entry(&head_desc->segments,
1383 struct xilinx_axidma_tx_segment,
1387 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
1389 /* Start the transfer */
1390 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1391 hw->control & chan->xdev->max_buffer_len);
1394 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1395 chan->desc_pendingcount = 0;
1400 * xilinx_dma_issue_pending - Issue pending transactions
1401 * @dchan: DMA channel
1403 static void xilinx_dma_issue_pending(struct dma_chan *dchan)
1405 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1406 unsigned long flags;
1408 spin_lock_irqsave(&chan->lock, flags);
1409 chan->start_transfer(chan);
1410 spin_unlock_irqrestore(&chan->lock, flags);
1414 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
1415 * @chan : xilinx DMA channel
1419 static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
1421 struct xilinx_dma_tx_descriptor *desc, *next;
1423 /* This function was invoked with lock held */
1424 if (list_empty(&chan->active_list))
1427 list_for_each_entry_safe(desc, next, &chan->active_list, node) {
1428 list_del(&desc->node);
1430 dma_cookie_complete(&desc->async_tx);
1431 list_add_tail(&desc->node, &chan->done_list);
1436 * xilinx_dma_reset - Reset DMA channel
1437 * @chan: Driver specific DMA channel
1439 * Return: '0' on success and failure value on error
1441 static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
1446 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
1448 /* Wait for the hardware to finish reset */
1449 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
1450 !(tmp & XILINX_DMA_DMACR_RESET), 0,
1451 XILINX_DMA_LOOP_COUNT);
1454 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
1455 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
1456 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
1462 chan->desc_pendingcount = 0;
1463 chan->desc_submitcount = 0;
1469 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
1470 * @chan: Driver specific DMA channel
1472 * Return: '0' on success and failure value on error
1474 static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
1479 err = xilinx_dma_reset(chan);
1483 /* Enable interrupts */
1484 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
1485 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1491 * xilinx_dma_irq_handler - DMA Interrupt handler
1493 * @data: Pointer to the Xilinx DMA channel structure
1495 * Return: IRQ_HANDLED/IRQ_NONE
1497 static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
1499 struct xilinx_dma_chan *chan = data;
1502 /* Read the status and ack the interrupts. */
1503 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
1504 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
1507 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1508 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1510 if (status & XILINX_DMA_DMASR_ERR_IRQ) {
1512 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
1513 * error is recoverable, ignore it. Otherwise flag the error.
1515 * Only recoverable errors can be cleared in the DMASR register,
1516 * make sure not to write to other error bits to 1.
1518 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
1520 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1521 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
1523 if (!chan->flush_on_fsync ||
1524 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
1526 "Channel %p has errors %x, cdr %x tdr %x\n",
1528 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
1529 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
1534 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
1536 * Device takes too long to do the transfer when user requires
1539 dev_dbg(chan->dev, "Inter-packet latency too long\n");
1542 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
1543 spin_lock(&chan->lock);
1544 xilinx_dma_complete_descriptor(chan);
1546 chan->start_transfer(chan);
1547 spin_unlock(&chan->lock);
1550 tasklet_schedule(&chan->tasklet);
1555 * append_desc_queue - Queuing descriptor
1556 * @chan: Driver specific dma channel
1557 * @desc: dma transaction descriptor
1559 static void append_desc_queue(struct xilinx_dma_chan *chan,
1560 struct xilinx_dma_tx_descriptor *desc)
1562 struct xilinx_vdma_tx_segment *tail_segment;
1563 struct xilinx_dma_tx_descriptor *tail_desc;
1564 struct xilinx_axidma_tx_segment *axidma_tail_segment;
1565 struct xilinx_cdma_tx_segment *cdma_tail_segment;
1567 if (list_empty(&chan->pending_list))
1571 * Add the hardware descriptor to the chain of hardware descriptors
1572 * that already exists in memory.
1574 tail_desc = list_last_entry(&chan->pending_list,
1575 struct xilinx_dma_tx_descriptor, node);
1576 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1577 tail_segment = list_last_entry(&tail_desc->segments,
1578 struct xilinx_vdma_tx_segment,
1580 tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1581 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1582 cdma_tail_segment = list_last_entry(&tail_desc->segments,
1583 struct xilinx_cdma_tx_segment,
1585 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1587 axidma_tail_segment = list_last_entry(&tail_desc->segments,
1588 struct xilinx_axidma_tx_segment,
1590 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1594 * Add the software descriptor and all children to the list
1595 * of pending transactions
1598 list_add_tail(&desc->node, &chan->pending_list);
1599 chan->desc_pendingcount++;
1601 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1602 && unlikely(chan->desc_pendingcount > chan->num_frms)) {
1603 dev_dbg(chan->dev, "desc pendingcount is too high\n");
1604 chan->desc_pendingcount = chan->num_frms;
1609 * xilinx_dma_tx_submit - Submit DMA transaction
1610 * @tx: Async transaction descriptor
1612 * Return: cookie value on success and failure value on error
1614 static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
1616 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
1617 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
1618 dma_cookie_t cookie;
1619 unsigned long flags;
1623 xilinx_dma_free_tx_descriptor(chan, desc);
1629 * If reset fails, need to hard reset the system.
1630 * Channel is no longer functional
1632 err = xilinx_dma_chan_reset(chan);
1637 spin_lock_irqsave(&chan->lock, flags);
1639 cookie = dma_cookie_assign(tx);
1641 /* Put this transaction onto the tail of the pending queue */
1642 append_desc_queue(chan, desc);
1645 chan->cyclic = true;
1647 chan->terminating = false;
1649 spin_unlock_irqrestore(&chan->lock, flags);
1655 * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a
1656 * DMA_SLAVE transaction
1657 * @dchan: DMA channel
1658 * @xt: Interleaved template pointer
1659 * @flags: transfer ack flags
1661 * Return: Async transaction descriptor on success and NULL on failure
1663 static struct dma_async_tx_descriptor *
1664 xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
1665 struct dma_interleaved_template *xt,
1666 unsigned long flags)
1668 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1669 struct xilinx_dma_tx_descriptor *desc;
1670 struct xilinx_vdma_tx_segment *segment;
1671 struct xilinx_vdma_desc_hw *hw;
1673 if (!is_slave_direction(xt->dir))
1676 if (!xt->numf || !xt->sgl[0].size)
1679 if (xt->frame_size != 1)
1682 /* Allocate a transaction descriptor. */
1683 desc = xilinx_dma_alloc_tx_descriptor(chan);
1687 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1688 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1689 async_tx_ack(&desc->async_tx);
1691 /* Allocate the link descriptor from DMA pool */
1692 segment = xilinx_vdma_alloc_tx_segment(chan);
1696 /* Fill in the hardware descriptor */
1698 hw->vsize = xt->numf;
1699 hw->hsize = xt->sgl[0].size;
1700 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
1701 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
1702 hw->stride |= chan->config.frm_dly <<
1703 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
1705 if (xt->dir != DMA_MEM_TO_DEV) {
1706 if (chan->ext_addr) {
1707 hw->buf_addr = lower_32_bits(xt->dst_start);
1708 hw->buf_addr_msb = upper_32_bits(xt->dst_start);
1710 hw->buf_addr = xt->dst_start;
1713 if (chan->ext_addr) {
1714 hw->buf_addr = lower_32_bits(xt->src_start);
1715 hw->buf_addr_msb = upper_32_bits(xt->src_start);
1717 hw->buf_addr = xt->src_start;
1721 /* Insert the segment into the descriptor segments list. */
1722 list_add_tail(&segment->node, &desc->segments);
1724 /* Link the last hardware descriptor with the first. */
1725 segment = list_first_entry(&desc->segments,
1726 struct xilinx_vdma_tx_segment, node);
1727 desc->async_tx.phys = segment->phys;
1729 return &desc->async_tx;
1732 xilinx_dma_free_tx_descriptor(chan, desc);
1737 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
1738 * @dchan: DMA channel
1739 * @dma_dst: destination address
1740 * @dma_src: source address
1741 * @len: transfer length
1742 * @flags: transfer ack flags
1744 * Return: Async transaction descriptor on success and NULL on failure
1746 static struct dma_async_tx_descriptor *
1747 xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
1748 dma_addr_t dma_src, size_t len, unsigned long flags)
1750 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1751 struct xilinx_dma_tx_descriptor *desc;
1752 struct xilinx_cdma_tx_segment *segment;
1753 struct xilinx_cdma_desc_hw *hw;
1755 if (!len || len > chan->xdev->max_buffer_len)
1758 desc = xilinx_dma_alloc_tx_descriptor(chan);
1762 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1763 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1765 /* Allocate the link descriptor from DMA pool */
1766 segment = xilinx_cdma_alloc_tx_segment(chan);
1772 hw->src_addr = dma_src;
1773 hw->dest_addr = dma_dst;
1774 if (chan->ext_addr) {
1775 hw->src_addr_msb = upper_32_bits(dma_src);
1776 hw->dest_addr_msb = upper_32_bits(dma_dst);
1779 /* Insert the segment into the descriptor segments list. */
1780 list_add_tail(&segment->node, &desc->segments);
1782 desc->async_tx.phys = segment->phys;
1783 hw->next_desc = segment->phys;
1785 return &desc->async_tx;
1788 xilinx_dma_free_tx_descriptor(chan, desc);
1793 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1794 * @dchan: DMA channel
1795 * @sgl: scatterlist to transfer to/from
1796 * @sg_len: number of entries in @scatterlist
1797 * @direction: DMA direction
1798 * @flags: transfer ack flags
1799 * @context: APP words of the descriptor
1801 * Return: Async transaction descriptor on success and NULL on failure
1803 static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1804 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1805 enum dma_transfer_direction direction, unsigned long flags,
1808 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1809 struct xilinx_dma_tx_descriptor *desc;
1810 struct xilinx_axidma_tx_segment *segment = NULL;
1811 u32 *app_w = (u32 *)context;
1812 struct scatterlist *sg;
1817 if (!is_slave_direction(direction))
1820 /* Allocate a transaction descriptor. */
1821 desc = xilinx_dma_alloc_tx_descriptor(chan);
1825 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1826 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1828 /* Build transactions using information in the scatter gather list */
1829 for_each_sg(sgl, sg, sg_len, i) {
1832 /* Loop until the entire scatterlist entry is used */
1833 while (sg_used < sg_dma_len(sg)) {
1834 struct xilinx_axidma_desc_hw *hw;
1836 /* Get a free segment */
1837 segment = xilinx_axidma_alloc_tx_segment(chan);
1842 * Calculate the maximum number of bytes to transfer,
1843 * making sure it is less than the hw limit
1845 copy = xilinx_dma_calc_copysize(chan, sg_dma_len(sg),
1849 /* Fill in the descriptor */
1850 xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
1855 if (chan->direction == DMA_MEM_TO_DEV) {
1857 memcpy(hw->app, app_w, sizeof(u32) *
1858 XILINX_DMA_NUM_APP_WORDS);
1864 * Insert the segment into the descriptor segments
1867 list_add_tail(&segment->node, &desc->segments);
1871 segment = list_first_entry(&desc->segments,
1872 struct xilinx_axidma_tx_segment, node);
1873 desc->async_tx.phys = segment->phys;
1875 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1876 if (chan->direction == DMA_MEM_TO_DEV) {
1877 segment->hw.control |= XILINX_DMA_BD_SOP;
1878 segment = list_last_entry(&desc->segments,
1879 struct xilinx_axidma_tx_segment,
1881 segment->hw.control |= XILINX_DMA_BD_EOP;
1884 return &desc->async_tx;
1887 xilinx_dma_free_tx_descriptor(chan, desc);
1892 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
1893 * @dchan: DMA channel
1894 * @buf_addr: Physical address of the buffer
1895 * @buf_len: Total length of the cyclic buffers
1896 * @period_len: length of individual cyclic buffer
1897 * @direction: DMA direction
1898 * @flags: transfer ack flags
1900 * Return: Async transaction descriptor on success and NULL on failure
1902 static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
1903 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
1904 size_t period_len, enum dma_transfer_direction direction,
1905 unsigned long flags)
1907 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1908 struct xilinx_dma_tx_descriptor *desc;
1909 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
1910 size_t copy, sg_used;
1911 unsigned int num_periods;
1918 num_periods = buf_len / period_len;
1923 if (!is_slave_direction(direction))
1926 /* Allocate a transaction descriptor. */
1927 desc = xilinx_dma_alloc_tx_descriptor(chan);
1931 chan->direction = direction;
1932 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1933 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1935 for (i = 0; i < num_periods; ++i) {
1938 while (sg_used < period_len) {
1939 struct xilinx_axidma_desc_hw *hw;
1941 /* Get a free segment */
1942 segment = xilinx_axidma_alloc_tx_segment(chan);
1947 * Calculate the maximum number of bytes to transfer,
1948 * making sure it is less than the hw limit
1950 copy = xilinx_dma_calc_copysize(chan, period_len,
1953 xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
1958 prev->hw.next_desc = segment->phys;
1964 * Insert the segment into the descriptor segments
1967 list_add_tail(&segment->node, &desc->segments);
1971 head_segment = list_first_entry(&desc->segments,
1972 struct xilinx_axidma_tx_segment, node);
1973 desc->async_tx.phys = head_segment->phys;
1975 desc->cyclic = true;
1976 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1977 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
1978 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1980 segment = list_last_entry(&desc->segments,
1981 struct xilinx_axidma_tx_segment,
1983 segment->hw.next_desc = (u32) head_segment->phys;
1985 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1986 if (direction == DMA_MEM_TO_DEV) {
1987 head_segment->hw.control |= XILINX_DMA_BD_SOP;
1988 segment->hw.control |= XILINX_DMA_BD_EOP;
1991 return &desc->async_tx;
1994 xilinx_dma_free_tx_descriptor(chan, desc);
1999 * xilinx_dma_prep_interleaved - prepare a descriptor for a
2000 * DMA_SLAVE transaction
2001 * @dchan: DMA channel
2002 * @xt: Interleaved template pointer
2003 * @flags: transfer ack flags
2005 * Return: Async transaction descriptor on success and NULL on failure
2007 static struct dma_async_tx_descriptor *
2008 xilinx_dma_prep_interleaved(struct dma_chan *dchan,
2009 struct dma_interleaved_template *xt,
2010 unsigned long flags)
2012 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2013 struct xilinx_dma_tx_descriptor *desc;
2014 struct xilinx_axidma_tx_segment *segment;
2015 struct xilinx_axidma_desc_hw *hw;
2017 if (!is_slave_direction(xt->dir))
2020 if (!xt->numf || !xt->sgl[0].size)
2023 if (xt->frame_size != 1)
2026 /* Allocate a transaction descriptor. */
2027 desc = xilinx_dma_alloc_tx_descriptor(chan);
2031 chan->direction = xt->dir;
2032 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
2033 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
2035 /* Get a free segment */
2036 segment = xilinx_axidma_alloc_tx_segment(chan);
2042 /* Fill in the descriptor */
2043 if (xt->dir != DMA_MEM_TO_DEV)
2044 hw->buf_addr = xt->dst_start;
2046 hw->buf_addr = xt->src_start;
2048 hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK;
2049 hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) &
2050 XILINX_DMA_BD_VSIZE_MASK;
2051 hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) &
2052 XILINX_DMA_BD_STRIDE_MASK;
2053 hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK;
2056 * Insert the segment into the descriptor segments
2059 list_add_tail(&segment->node, &desc->segments);
2062 segment = list_first_entry(&desc->segments,
2063 struct xilinx_axidma_tx_segment, node);
2064 desc->async_tx.phys = segment->phys;
2066 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
2067 if (xt->dir == DMA_MEM_TO_DEV) {
2068 segment->hw.control |= XILINX_DMA_BD_SOP;
2069 segment = list_last_entry(&desc->segments,
2070 struct xilinx_axidma_tx_segment,
2072 segment->hw.control |= XILINX_DMA_BD_EOP;
2075 return &desc->async_tx;
2078 xilinx_dma_free_tx_descriptor(chan, desc);
2083 * xilinx_dma_terminate_all - Halt the channel and free descriptors
2084 * @dchan: Driver specific DMA Channel pointer
2086 * Return: '0' always.
2088 static int xilinx_dma_terminate_all(struct dma_chan *dchan)
2090 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2095 xilinx_dma_chan_reset(chan);
2097 err = chan->stop_transfer(chan);
2099 dev_err(chan->dev, "Cannot stop channel %p: %x\n",
2100 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
2104 /* Remove and free all of the descriptors in the lists */
2105 chan->terminating = true;
2106 xilinx_dma_free_descriptors(chan);
2110 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2111 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2112 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2113 chan->cyclic = false;
2116 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
2117 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2118 XILINX_CDMA_CR_SGMODE);
2124 * xilinx_dma_channel_set_config - Configure VDMA channel
2125 * Run-time configuration for Axi VDMA, supports:
2126 * . halt the channel
2127 * . configure interrupt coalescing and inter-packet delay threshold
2128 * . start/stop parking
2131 * @dchan: DMA channel
2132 * @cfg: VDMA device configuration pointer
2134 * Return: '0' on success and failure value on error
2136 int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
2137 struct xilinx_vdma_config *cfg)
2139 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2143 return xilinx_dma_chan_reset(chan);
2145 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2147 chan->config.frm_dly = cfg->frm_dly;
2148 chan->config.park = cfg->park;
2150 /* genlock settings */
2151 chan->config.gen_lock = cfg->gen_lock;
2152 chan->config.master = cfg->master;
2154 dmacr &= ~XILINX_DMA_DMACR_GENLOCK_EN;
2155 if (cfg->gen_lock && chan->genlock) {
2156 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
2157 dmacr &= ~XILINX_DMA_DMACR_MASTER_MASK;
2158 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
2161 chan->config.frm_cnt_en = cfg->frm_cnt_en;
2162 chan->config.vflip_en = cfg->vflip_en;
2165 chan->config.park_frm = cfg->park_frm;
2167 chan->config.park_frm = -1;
2169 chan->config.coalesc = cfg->coalesc;
2170 chan->config.delay = cfg->delay;
2172 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
2173 dmacr &= ~XILINX_DMA_DMACR_FRAME_COUNT_MASK;
2174 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
2175 chan->config.coalesc = cfg->coalesc;
2178 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
2179 dmacr &= ~XILINX_DMA_DMACR_DELAY_MASK;
2180 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
2181 chan->config.delay = cfg->delay;
2184 /* FSync Source selection */
2185 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
2186 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
2188 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
2192 EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
2194 /* -----------------------------------------------------------------------------
2199 * xilinx_dma_chan_remove - Per Channel remove function
2200 * @chan: Driver specific DMA channel
2202 static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
2204 /* Disable all interrupts */
2205 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
2206 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
2209 free_irq(chan->irq, chan);
2211 tasklet_kill(&chan->tasklet);
2213 list_del(&chan->common.device_node);
2216 static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2217 struct clk **tx_clk, struct clk **rx_clk,
2218 struct clk **sg_clk, struct clk **tmp_clk)
2224 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2225 if (IS_ERR(*axi_clk)) {
2226 err = PTR_ERR(*axi_clk);
2227 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
2231 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2232 if (IS_ERR(*tx_clk))
2235 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2236 if (IS_ERR(*rx_clk))
2239 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
2240 if (IS_ERR(*sg_clk))
2243 err = clk_prepare_enable(*axi_clk);
2245 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2249 err = clk_prepare_enable(*tx_clk);
2251 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2252 goto err_disable_axiclk;
2255 err = clk_prepare_enable(*rx_clk);
2257 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2258 goto err_disable_txclk;
2261 err = clk_prepare_enable(*sg_clk);
2263 dev_err(&pdev->dev, "failed to enable sg_clk (%d)\n", err);
2264 goto err_disable_rxclk;
2270 clk_disable_unprepare(*rx_clk);
2272 clk_disable_unprepare(*tx_clk);
2274 clk_disable_unprepare(*axi_clk);
2279 static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2280 struct clk **dev_clk, struct clk **tmp_clk,
2281 struct clk **tmp1_clk, struct clk **tmp2_clk)
2289 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2290 if (IS_ERR(*axi_clk)) {
2291 err = PTR_ERR(*axi_clk);
2292 dev_err(&pdev->dev, "failed to get axi_clk (%d)\n", err);
2296 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
2297 if (IS_ERR(*dev_clk)) {
2298 err = PTR_ERR(*dev_clk);
2299 dev_err(&pdev->dev, "failed to get dev_clk (%d)\n", err);
2303 err = clk_prepare_enable(*axi_clk);
2305 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2309 err = clk_prepare_enable(*dev_clk);
2311 dev_err(&pdev->dev, "failed to enable dev_clk (%d)\n", err);
2312 goto err_disable_axiclk;
2318 clk_disable_unprepare(*axi_clk);
2323 static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
2324 struct clk **tx_clk, struct clk **txs_clk,
2325 struct clk **rx_clk, struct clk **rxs_clk)
2329 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
2330 if (IS_ERR(*axi_clk)) {
2331 err = PTR_ERR(*axi_clk);
2332 dev_err(&pdev->dev, "failed to get axi_aclk (%d)\n", err);
2336 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
2337 if (IS_ERR(*tx_clk))
2340 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
2341 if (IS_ERR(*txs_clk))
2344 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
2345 if (IS_ERR(*rx_clk))
2348 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
2349 if (IS_ERR(*rxs_clk))
2352 err = clk_prepare_enable(*axi_clk);
2354 dev_err(&pdev->dev, "failed to enable axi_clk (%d)\n", err);
2358 err = clk_prepare_enable(*tx_clk);
2360 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
2361 goto err_disable_axiclk;
2364 err = clk_prepare_enable(*txs_clk);
2366 dev_err(&pdev->dev, "failed to enable txs_clk (%d)\n", err);
2367 goto err_disable_txclk;
2370 err = clk_prepare_enable(*rx_clk);
2372 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
2373 goto err_disable_txsclk;
2376 err = clk_prepare_enable(*rxs_clk);
2378 dev_err(&pdev->dev, "failed to enable rxs_clk (%d)\n", err);
2379 goto err_disable_rxclk;
2385 clk_disable_unprepare(*rx_clk);
2387 clk_disable_unprepare(*txs_clk);
2389 clk_disable_unprepare(*tx_clk);
2391 clk_disable_unprepare(*axi_clk);
2396 static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
2398 clk_disable_unprepare(xdev->rxs_clk);
2399 clk_disable_unprepare(xdev->rx_clk);
2400 clk_disable_unprepare(xdev->txs_clk);
2401 clk_disable_unprepare(xdev->tx_clk);
2402 clk_disable_unprepare(xdev->axi_clk);
2406 * xilinx_dma_chan_probe - Per Channel Probing
2407 * It get channel features from the device tree entry and
2408 * initialize special channel handling routines
2410 * @xdev: Driver specific device structure
2411 * @node: Device node
2412 * @chan_id: DMA Channel id
2414 * Return: '0' on success and failure value on error
2416 static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2417 struct device_node *node, int chan_id)
2419 struct xilinx_dma_chan *chan;
2420 bool has_dre = false;
2424 /* Allocate and initialize the channel structure */
2425 chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL);
2429 chan->dev = xdev->dev;
2431 chan->has_sg = xdev->has_sg;
2432 chan->desc_pendingcount = 0x0;
2433 chan->ext_addr = xdev->ext_addr;
2434 /* This variable ensures that descriptors are not
2435 * Submitted when dma engine is in progress. This variable is
2436 * Added to avoid polling for a bit in the status register to
2437 * Know dma state in the driver hot path.
2441 spin_lock_init(&chan->lock);
2442 INIT_LIST_HEAD(&chan->pending_list);
2443 INIT_LIST_HEAD(&chan->done_list);
2444 INIT_LIST_HEAD(&chan->active_list);
2445 INIT_LIST_HEAD(&chan->free_seg_list);
2447 /* Retrieve the channel properties from the device tree */
2448 has_dre = of_property_read_bool(node, "xlnx,include-dre");
2450 chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode");
2452 err = of_property_read_u32(node, "xlnx,datawidth", &value);
2454 dev_err(xdev->dev, "missing xlnx,datawidth property\n");
2457 width = value >> 3; /* Convert bits to bytes */
2459 /* If data width is greater than 8 bytes, DRE is not in hw */
2464 xdev->common.copy_align = (enum dmaengine_alignment)fls(width - 1);
2466 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2467 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2468 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
2469 chan->direction = DMA_MEM_TO_DEV;
2471 chan->tdest = chan_id;
2473 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2474 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2475 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
2476 chan->config.park = 1;
2478 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2479 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
2480 chan->flush_on_fsync = true;
2482 } else if (of_device_is_compatible(node,
2483 "xlnx,axi-vdma-s2mm-channel") ||
2484 of_device_is_compatible(node,
2485 "xlnx,axi-dma-s2mm-channel")) {
2486 chan->direction = DMA_DEV_TO_MEM;
2488 chan->tdest = chan_id - xdev->nr_channels;
2489 chan->has_vflip = of_property_read_bool(node,
2490 "xlnx,enable-vert-flip");
2491 if (chan->has_vflip) {
2492 chan->config.vflip_en = dma_read(chan,
2493 XILINX_VDMA_REG_ENABLE_VERTICAL_FLIP) &
2494 XILINX_VDMA_ENABLE_VERTICAL_FLIP;
2497 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2498 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2499 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
2500 chan->config.park = 1;
2502 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
2503 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
2504 chan->flush_on_fsync = true;
2507 dev_err(xdev->dev, "Invalid channel compatible node\n");
2511 /* Request the interrupt */
2512 chan->irq = irq_of_parse_and_map(node, 0);
2513 err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
2514 "xilinx-dma-controller", chan);
2516 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
2520 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2521 chan->start_transfer = xilinx_dma_start_transfer;
2522 chan->stop_transfer = xilinx_dma_stop_transfer;
2523 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2524 chan->start_transfer = xilinx_cdma_start_transfer;
2525 chan->stop_transfer = xilinx_cdma_stop_transfer;
2527 chan->start_transfer = xilinx_vdma_start_transfer;
2528 chan->stop_transfer = xilinx_dma_stop_transfer;
2531 /* Initialize the tasklet */
2532 tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
2533 (unsigned long)chan);
2536 * Initialize the DMA channel and add it to the DMA engine channels
2539 chan->common.device = &xdev->common;
2541 list_add_tail(&chan->common.device_node, &xdev->common.channels);
2542 xdev->chan[chan->id] = chan;
2544 /* Reset the channel */
2545 err = xilinx_dma_chan_reset(chan);
2547 dev_err(xdev->dev, "Reset channel failed\n");
2555 * xilinx_dma_child_probe - Per child node probe
2556 * It get number of dma-channels per child node from
2557 * device-tree and initializes all the channels.
2559 * @xdev: Driver specific device structure
2560 * @node: Device node
2564 static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
2565 struct device_node *node)
2568 u32 nr_channels = 1;
2570 ret = of_property_read_u32(node, "dma-channels", &nr_channels);
2571 if ((ret < 0) && xdev->mcdma)
2572 dev_warn(xdev->dev, "missing dma-channels property\n");
2574 for (i = 0; i < nr_channels; i++)
2575 xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
2577 xdev->nr_channels += nr_channels;
2583 * of_dma_xilinx_xlate - Translation function
2584 * @dma_spec: Pointer to DMA specifier as found in the device tree
2585 * @ofdma: Pointer to DMA controller data
2587 * Return: DMA channel pointer on success and NULL on error
2589 static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2590 struct of_dma *ofdma)
2592 struct xilinx_dma_device *xdev = ofdma->of_dma_data;
2593 int chan_id = dma_spec->args[0];
2595 if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
2598 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
2601 static const struct xilinx_dma_config axidma_config = {
2602 .dmatype = XDMA_TYPE_AXIDMA,
2603 .clk_init = axidma_clk_init,
2606 static const struct xilinx_dma_config axicdma_config = {
2607 .dmatype = XDMA_TYPE_CDMA,
2608 .clk_init = axicdma_clk_init,
2611 static const struct xilinx_dma_config axivdma_config = {
2612 .dmatype = XDMA_TYPE_VDMA,
2613 .clk_init = axivdma_clk_init,
2616 static const struct of_device_id xilinx_dma_of_ids[] = {
2617 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
2618 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
2619 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
2622 MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
2625 * xilinx_dma_probe - Driver probe function
2626 * @pdev: Pointer to the platform_device structure
2628 * Return: '0' on success and failure value on error
2630 static int xilinx_dma_probe(struct platform_device *pdev)
2632 int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
2633 struct clk **, struct clk **, struct clk **)
2635 struct device_node *node = pdev->dev.of_node;
2636 struct xilinx_dma_device *xdev;
2637 struct device_node *child, *np = pdev->dev.of_node;
2638 struct resource *io;
2639 u32 num_frames, addr_width, len_width;
2642 /* Allocate and initialize the DMA engine structure */
2643 xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL);
2647 xdev->dev = &pdev->dev;
2649 const struct of_device_id *match;
2651 match = of_match_node(xilinx_dma_of_ids, np);
2652 if (match && match->data) {
2653 xdev->dma_config = match->data;
2654 clk_init = xdev->dma_config->clk_init;
2658 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
2659 &xdev->rx_clk, &xdev->rxs_clk);
2663 /* Request and map I/O memory */
2664 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2665 xdev->regs = devm_ioremap_resource(&pdev->dev, io);
2666 if (IS_ERR(xdev->regs)) {
2667 err = PTR_ERR(xdev->regs);
2670 /* Retrieve the DMA engine properties from the device tree */
2671 xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
2672 xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
2674 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2675 xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
2676 if (!of_property_read_u32(node, "xlnx,sg-length-width",
2678 if (len_width < XILINX_DMA_MAX_TRANS_LEN_MIN ||
2679 len_width > XILINX_DMA_V2_MAX_TRANS_LEN_MAX) {
2681 "invalid xlnx,sg-length-width property value. Using default width\n");
2683 if (len_width > XILINX_DMA_MAX_TRANS_LEN_MAX)
2684 dev_warn(xdev->dev, "Please ensure that IP supports buffer length > 23 bits\n");
2685 xdev->max_buffer_len =
2686 GENMASK(len_width - 1, 0);
2691 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2692 err = of_property_read_u32(node, "xlnx,num-fstores",
2696 "missing xlnx,num-fstores property\n");
2700 err = of_property_read_u32(node, "xlnx,flush-fsync",
2701 &xdev->flush_on_fsync);
2704 "missing xlnx,flush-fsync property\n");
2707 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
2709 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
2711 if (addr_width > 32)
2712 xdev->ext_addr = true;
2714 xdev->ext_addr = false;
2716 /* Set the dma mask bits */
2717 err = dma_set_mask_and_coherent(xdev->dev, DMA_BIT_MASK(addr_width));
2719 dev_err(xdev->dev, "DMA mask error %d\n", err);
2723 /* Initialize the DMA engine */
2724 xdev->common.dev = &pdev->dev;
2726 INIT_LIST_HEAD(&xdev->common.channels);
2727 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
2728 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
2729 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
2732 xdev->common.device_alloc_chan_resources =
2733 xilinx_dma_alloc_chan_resources;
2734 xdev->common.device_free_chan_resources =
2735 xilinx_dma_free_chan_resources;
2736 xdev->common.device_terminate_all = xilinx_dma_terminate_all;
2737 xdev->common.device_tx_status = xilinx_dma_tx_status;
2738 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
2739 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2740 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
2741 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
2742 xdev->common.device_prep_dma_cyclic =
2743 xilinx_dma_prep_dma_cyclic;
2744 xdev->common.device_prep_interleaved_dma =
2745 xilinx_dma_prep_interleaved;
2746 /* Residue calculation is supported by only AXI DMA */
2747 xdev->common.residue_granularity =
2748 DMA_RESIDUE_GRANULARITY_SEGMENT;
2749 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2750 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
2751 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
2753 xdev->common.device_prep_interleaved_dma =
2754 xilinx_vdma_dma_prep_interleaved;
2757 platform_set_drvdata(pdev, xdev);
2759 /* Initialize the channels */
2760 for_each_child_of_node(node, child) {
2761 err = xilinx_dma_child_probe(xdev, child);
2768 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2769 for (i = 0; i < xdev->nr_channels; i++)
2771 xdev->chan[i]->num_frms = num_frames;
2774 /* Register the DMA engine with the core */
2775 err = dma_async_device_register(&xdev->common);
2777 dev_err(xdev->dev, "failed to register the dma device\n");
2781 err = of_dma_controller_register(node, of_dma_xilinx_xlate,
2784 dev_err(&pdev->dev, "Unable to register DMA to DT\n");
2785 dma_async_device_unregister(&xdev->common);
2789 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
2790 dev_info(&pdev->dev, "Xilinx AXI DMA Engine Driver Probed!!\n");
2791 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
2792 dev_info(&pdev->dev, "Xilinx AXI CDMA Engine Driver Probed!!\n");
2794 dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n");
2799 for (i = 0; i < xdev->nr_channels; i++)
2801 xilinx_dma_chan_remove(xdev->chan[i]);
2803 xdma_disable_allclks(xdev);
2809 * xilinx_dma_remove - Driver remove function
2810 * @pdev: Pointer to the platform_device structure
2812 * Return: Always '0'
2814 static int xilinx_dma_remove(struct platform_device *pdev)
2816 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
2819 of_dma_controller_free(pdev->dev.of_node);
2821 dma_async_device_unregister(&xdev->common);
2823 for (i = 0; i < xdev->nr_channels; i++)
2825 xilinx_dma_chan_remove(xdev->chan[i]);
2827 xdma_disable_allclks(xdev);
2832 static struct platform_driver xilinx_vdma_driver = {
2834 .name = "xilinx-vdma",
2835 .of_match_table = xilinx_dma_of_ids,
2837 .probe = xilinx_dma_probe,
2838 .remove = xilinx_dma_remove,
2841 module_platform_driver(xilinx_vdma_driver);
2843 MODULE_AUTHOR("Xilinx, Inc.");
2844 MODULE_DESCRIPTION("Xilinx VDMA driver");
2845 MODULE_LICENSE("GPL v2");