2 * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
4 * Copyright (C) 2014 Atmel Corporation
6 * Author: Ludovic Desroches <ludovic.desroches@atmel.com>
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published by
10 * the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program. If not, see <http://www.gnu.org/licenses/>.
21 #include <asm/barrier.h>
22 #include <dt-bindings/dma/at91.h>
23 #include <linux/clk.h>
24 #include <linux/dmaengine.h>
25 #include <linux/dmapool.h>
26 #include <linux/interrupt.h>
27 #include <linux/irq.h>
28 #include <linux/kernel.h>
29 #include <linux/list.h>
30 #include <linux/module.h>
31 #include <linux/of_dma.h>
32 #include <linux/of_platform.h>
33 #include <linux/platform_device.h>
36 #include "dmaengine.h"
38 /* Global registers */
39 #define AT_XDMAC_GTYPE 0x00 /* Global Type Register */
40 #define AT_XDMAC_NB_CH(i) (((i) & 0x1F) + 1) /* Number of Channels Minus One */
41 #define AT_XDMAC_FIFO_SZ(i) (((i) >> 5) & 0x7FF) /* Number of Bytes */
42 #define AT_XDMAC_NB_REQ(i) ((((i) >> 16) & 0x3F) + 1) /* Number of Peripheral Requests Minus One */
43 #define AT_XDMAC_GCFG 0x04 /* Global Configuration Register */
44 #define AT_XDMAC_GWAC 0x08 /* Global Weighted Arbiter Configuration Register */
45 #define AT_XDMAC_GIE 0x0C /* Global Interrupt Enable Register */
46 #define AT_XDMAC_GID 0x10 /* Global Interrupt Disable Register */
47 #define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */
48 #define AT_XDMAC_GIS 0x18 /* Global Interrupt Status Register */
49 #define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */
50 #define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */
51 #define AT_XDMAC_GS 0x24 /* Global Channel Status Register */
52 #define AT_XDMAC_GRS 0x28 /* Global Channel Read Suspend Register */
53 #define AT_XDMAC_GWS 0x2C /* Global Write Suspend Register */
54 #define AT_XDMAC_GRWS 0x30 /* Global Channel Read Write Suspend Register */
55 #define AT_XDMAC_GRWR 0x34 /* Global Channel Read Write Resume Register */
56 #define AT_XDMAC_GSWR 0x38 /* Global Channel Software Request Register */
57 #define AT_XDMAC_GSWS 0x3C /* Global channel Software Request Status Register */
58 #define AT_XDMAC_GSWF 0x40 /* Global Channel Software Flush Request Register */
59 #define AT_XDMAC_VERSION 0xFFC /* XDMAC Version Register */
61 /* Channel relative registers offsets */
62 #define AT_XDMAC_CIE 0x00 /* Channel Interrupt Enable Register */
63 #define AT_XDMAC_CIE_BIE BIT(0) /* End of Block Interrupt Enable Bit */
64 #define AT_XDMAC_CIE_LIE BIT(1) /* End of Linked List Interrupt Enable Bit */
65 #define AT_XDMAC_CIE_DIE BIT(2) /* End of Disable Interrupt Enable Bit */
66 #define AT_XDMAC_CIE_FIE BIT(3) /* End of Flush Interrupt Enable Bit */
67 #define AT_XDMAC_CIE_RBEIE BIT(4) /* Read Bus Error Interrupt Enable Bit */
68 #define AT_XDMAC_CIE_WBEIE BIT(5) /* Write Bus Error Interrupt Enable Bit */
69 #define AT_XDMAC_CIE_ROIE BIT(6) /* Request Overflow Interrupt Enable Bit */
70 #define AT_XDMAC_CID 0x04 /* Channel Interrupt Disable Register */
71 #define AT_XDMAC_CID_BID BIT(0) /* End of Block Interrupt Disable Bit */
72 #define AT_XDMAC_CID_LID BIT(1) /* End of Linked List Interrupt Disable Bit */
73 #define AT_XDMAC_CID_DID BIT(2) /* End of Disable Interrupt Disable Bit */
74 #define AT_XDMAC_CID_FID BIT(3) /* End of Flush Interrupt Disable Bit */
75 #define AT_XDMAC_CID_RBEID BIT(4) /* Read Bus Error Interrupt Disable Bit */
76 #define AT_XDMAC_CID_WBEID BIT(5) /* Write Bus Error Interrupt Disable Bit */
77 #define AT_XDMAC_CID_ROID BIT(6) /* Request Overflow Interrupt Disable Bit */
78 #define AT_XDMAC_CIM 0x08 /* Channel Interrupt Mask Register */
79 #define AT_XDMAC_CIM_BIM BIT(0) /* End of Block Interrupt Mask Bit */
80 #define AT_XDMAC_CIM_LIM BIT(1) /* End of Linked List Interrupt Mask Bit */
81 #define AT_XDMAC_CIM_DIM BIT(2) /* End of Disable Interrupt Mask Bit */
82 #define AT_XDMAC_CIM_FIM BIT(3) /* End of Flush Interrupt Mask Bit */
83 #define AT_XDMAC_CIM_RBEIM BIT(4) /* Read Bus Error Interrupt Mask Bit */
84 #define AT_XDMAC_CIM_WBEIM BIT(5) /* Write Bus Error Interrupt Mask Bit */
85 #define AT_XDMAC_CIM_ROIM BIT(6) /* Request Overflow Interrupt Mask Bit */
86 #define AT_XDMAC_CIS 0x0C /* Channel Interrupt Status Register */
87 #define AT_XDMAC_CIS_BIS BIT(0) /* End of Block Interrupt Status Bit */
88 #define AT_XDMAC_CIS_LIS BIT(1) /* End of Linked List Interrupt Status Bit */
89 #define AT_XDMAC_CIS_DIS BIT(2) /* End of Disable Interrupt Status Bit */
90 #define AT_XDMAC_CIS_FIS BIT(3) /* End of Flush Interrupt Status Bit */
91 #define AT_XDMAC_CIS_RBEIS BIT(4) /* Read Bus Error Interrupt Status Bit */
92 #define AT_XDMAC_CIS_WBEIS BIT(5) /* Write Bus Error Interrupt Status Bit */
93 #define AT_XDMAC_CIS_ROIS BIT(6) /* Request Overflow Interrupt Status Bit */
94 #define AT_XDMAC_CSA 0x10 /* Channel Source Address Register */
95 #define AT_XDMAC_CDA 0x14 /* Channel Destination Address Register */
96 #define AT_XDMAC_CNDA 0x18 /* Channel Next Descriptor Address Register */
97 #define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1) /* Channel x Next Descriptor Interface */
98 #define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc) /* Channel x Next Descriptor Address */
99 #define AT_XDMAC_CNDC 0x1C /* Channel Next Descriptor Control Register */
100 #define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */
101 #define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */
102 #define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */
103 #define AT_XDMAC_CNDC_NDVIEW_MASK GENMASK(28, 27)
104 #define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */
105 #define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */
106 #define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */
107 #define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3) /* Channel x Next Descriptor View 3 */
108 #define AT_XDMAC_CUBC 0x20 /* Channel Microblock Control Register */
109 #define AT_XDMAC_CBC 0x24 /* Channel Block Control Register */
110 #define AT_XDMAC_CC 0x28 /* Channel Configuration Register */
111 #define AT_XDMAC_CC_TYPE (0x1 << 0) /* Channel Transfer Type */
112 #define AT_XDMAC_CC_TYPE_MEM_TRAN (0x0 << 0) /* Memory to Memory Transfer */
113 #define AT_XDMAC_CC_TYPE_PER_TRAN (0x1 << 0) /* Peripheral to Memory or Memory to Peripheral Transfer */
114 #define AT_XDMAC_CC_MBSIZE_MASK (0x3 << 1)
115 #define AT_XDMAC_CC_MBSIZE_SINGLE (0x0 << 1)
116 #define AT_XDMAC_CC_MBSIZE_FOUR (0x1 << 1)
117 #define AT_XDMAC_CC_MBSIZE_EIGHT (0x2 << 1)
118 #define AT_XDMAC_CC_MBSIZE_SIXTEEN (0x3 << 1)
119 #define AT_XDMAC_CC_DSYNC (0x1 << 4) /* Channel Synchronization */
120 #define AT_XDMAC_CC_DSYNC_PER2MEM (0x0 << 4)
121 #define AT_XDMAC_CC_DSYNC_MEM2PER (0x1 << 4)
122 #define AT_XDMAC_CC_PROT (0x1 << 5) /* Channel Protection */
123 #define AT_XDMAC_CC_PROT_SEC (0x0 << 5)
124 #define AT_XDMAC_CC_PROT_UNSEC (0x1 << 5)
125 #define AT_XDMAC_CC_SWREQ (0x1 << 6) /* Channel Software Request Trigger */
126 #define AT_XDMAC_CC_SWREQ_HWR_CONNECTED (0x0 << 6)
127 #define AT_XDMAC_CC_SWREQ_SWR_CONNECTED (0x1 << 6)
128 #define AT_XDMAC_CC_MEMSET (0x1 << 7) /* Channel Fill Block of memory */
129 #define AT_XDMAC_CC_MEMSET_NORMAL_MODE (0x0 << 7)
130 #define AT_XDMAC_CC_MEMSET_HW_MODE (0x1 << 7)
131 #define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8) /* Channel Chunk Size */
132 #define AT_XDMAC_CC_DWIDTH_OFFSET 11
133 #define AT_XDMAC_CC_DWIDTH_MASK (0x3 << AT_XDMAC_CC_DWIDTH_OFFSET)
134 #define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET) /* Channel Data Width */
135 #define AT_XDMAC_CC_DWIDTH_BYTE 0x0
136 #define AT_XDMAC_CC_DWIDTH_HALFWORD 0x1
137 #define AT_XDMAC_CC_DWIDTH_WORD 0x2
138 #define AT_XDMAC_CC_DWIDTH_DWORD 0x3
139 #define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13) /* Channel Source Interface Identifier */
140 #define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14) /* Channel Destination Interface Identifier */
141 #define AT_XDMAC_CC_SAM_MASK (0x3 << 16) /* Channel Source Addressing Mode */
142 #define AT_XDMAC_CC_SAM_FIXED_AM (0x0 << 16)
143 #define AT_XDMAC_CC_SAM_INCREMENTED_AM (0x1 << 16)
144 #define AT_XDMAC_CC_SAM_UBS_AM (0x2 << 16)
145 #define AT_XDMAC_CC_SAM_UBS_DS_AM (0x3 << 16)
146 #define AT_XDMAC_CC_DAM_MASK (0x3 << 18) /* Channel Source Addressing Mode */
147 #define AT_XDMAC_CC_DAM_FIXED_AM (0x0 << 18)
148 #define AT_XDMAC_CC_DAM_INCREMENTED_AM (0x1 << 18)
149 #define AT_XDMAC_CC_DAM_UBS_AM (0x2 << 18)
150 #define AT_XDMAC_CC_DAM_UBS_DS_AM (0x3 << 18)
151 #define AT_XDMAC_CC_INITD (0x1 << 21) /* Channel Initialization Terminated (read only) */
152 #define AT_XDMAC_CC_INITD_TERMINATED (0x0 << 21)
153 #define AT_XDMAC_CC_INITD_IN_PROGRESS (0x1 << 21)
154 #define AT_XDMAC_CC_RDIP (0x1 << 22) /* Read in Progress (read only) */
155 #define AT_XDMAC_CC_RDIP_DONE (0x0 << 22)
156 #define AT_XDMAC_CC_RDIP_IN_PROGRESS (0x1 << 22)
157 #define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
158 #define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
159 #define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
160 #define AT_XDMAC_CC_PERID(i) ((0x7f & (i)) << 24) /* Channel Peripheral Identifier */
161 #define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
162 #define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
163 #define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
165 #define AT_XDMAC_CHAN_REG_BASE 0x50 /* Channel registers base address */
167 /* Microblock control members */
168 #define AT_XDMAC_MBR_UBC_UBLEN_MAX 0xFFFFFFUL /* Maximum Microblock Length */
169 #define AT_XDMAC_MBR_UBC_NDE (0x1 << 24) /* Next Descriptor Enable */
170 #define AT_XDMAC_MBR_UBC_NSEN (0x1 << 25) /* Next Descriptor Source Update */
171 #define AT_XDMAC_MBR_UBC_NDEN (0x1 << 26) /* Next Descriptor Destination Update */
172 #define AT_XDMAC_MBR_UBC_NDV0 (0x0 << 27) /* Next Descriptor View 0 */
173 #define AT_XDMAC_MBR_UBC_NDV1 (0x1 << 27) /* Next Descriptor View 1 */
174 #define AT_XDMAC_MBR_UBC_NDV2 (0x2 << 27) /* Next Descriptor View 2 */
175 #define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */
177 #define AT_XDMAC_MAX_CHAN 0x20
178 #define AT_XDMAC_MAX_CSIZE 16 /* 16 data */
179 #define AT_XDMAC_MAX_DWIDTH 8 /* 64 bits */
180 #define AT_XDMAC_RESIDUE_MAX_RETRIES 5
182 #define AT_XDMAC_DMA_BUSWIDTHS\
183 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
184 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
185 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
186 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
187 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
190 AT_XDMAC_CHAN_IS_CYCLIC = 0,
191 AT_XDMAC_CHAN_IS_PAUSED,
194 /* ----- Channels ----- */
195 struct at_xdmac_chan {
196 struct dma_chan chan;
197 void __iomem *ch_regs;
198 u32 mask; /* Channel Mask */
199 u32 cfg; /* Channel Configuration Register */
200 u8 perid; /* Peripheral ID */
201 u8 perif; /* Peripheral Interface */
202 u8 memif; /* Memory Interface */
208 unsigned long status;
209 struct tasklet_struct tasklet;
210 struct dma_slave_config sconfig;
214 struct list_head xfers_list;
215 struct list_head free_descs_list;
219 /* ----- Controller ----- */
221 struct dma_device dma;
226 struct dma_pool *at_xdmac_desc_pool;
227 struct at_xdmac_chan chan[0];
231 /* ----- Descriptors ----- */
233 /* Linked List Descriptor */
234 struct at_xdmac_lld {
235 u32 mbr_nda; /* Next Descriptor Member */
236 u32 mbr_ubc; /* Microblock Control Member */
237 u32 mbr_sa; /* Source Address Member */
238 u32 mbr_da; /* Destination Address Member */
239 u32 mbr_cfg; /* Configuration Register */
240 u32 mbr_bc; /* Block Control Register */
241 u32 mbr_ds; /* Data Stride Register */
242 u32 mbr_sus; /* Source Microblock Stride Register */
243 u32 mbr_dus; /* Destination Microblock Stride Register */
246 /* 64-bit alignment needed to update CNDA and CUBC registers in an atomic way. */
247 struct at_xdmac_desc {
248 struct at_xdmac_lld lld;
249 enum dma_transfer_direction direction;
250 struct dma_async_tx_descriptor tx_dma_desc;
251 struct list_head desc_node;
252 /* Following members are only used by the first descriptor */
254 unsigned int xfer_size;
255 struct list_head descs_list;
256 struct list_head xfer_node;
257 } __aligned(sizeof(u64));
259 static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
261 return atxdmac->regs + (AT_XDMAC_CHAN_REG_BASE + chan_nb * 0x40);
264 #define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
265 #define at_xdmac_write(atxdmac, reg, value) \
266 writel_relaxed((value), (atxdmac)->regs + (reg))
268 #define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
269 #define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
271 static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan)
273 return container_of(dchan, struct at_xdmac_chan, chan);
276 static struct device *chan2dev(struct dma_chan *chan)
278 return &chan->dev->device;
281 static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev)
283 return container_of(ddev, struct at_xdmac, dma);
286 static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd)
288 return container_of(txd, struct at_xdmac_desc, tx_dma_desc);
291 static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan)
293 return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
296 static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
298 return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
301 static inline int at_xdmac_csize(u32 maxburst)
305 csize = ffs(maxburst) - 1;
312 static inline u8 at_xdmac_get_dwidth(u32 cfg)
314 return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
317 static unsigned int init_nr_desc_per_channel = 64;
318 module_param(init_nr_desc_per_channel, uint, 0644);
319 MODULE_PARM_DESC(init_nr_desc_per_channel,
320 "initial descriptors per channel (default: 64)");
323 static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
325 return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask;
328 static void at_xdmac_off(struct at_xdmac *atxdmac)
330 at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
332 /* Wait that all chans are disabled. */
333 while (at_xdmac_read(atxdmac, AT_XDMAC_GS))
336 at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
339 /* Call with lock hold. */
340 static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
341 struct at_xdmac_desc *first)
343 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
346 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
348 /* Set transfer as active to not try to start it again. */
349 first->active_xfer = true;
351 /* Tell xdmac where to get the first descriptor. */
352 reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys)
353 | AT_XDMAC_CNDA_NDAIF(atchan->memif);
354 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
357 * When doing non cyclic transfer we need to use the next
358 * descriptor view 2 since some fields of the configuration register
359 * depend on transfer size and src/dest addresses.
361 if (at_xdmac_chan_is_cyclic(atchan))
362 reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
363 else if ((first->lld.mbr_ubc &
364 AT_XDMAC_CNDC_NDVIEW_MASK) == AT_XDMAC_MBR_UBC_NDV3)
365 reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
367 reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
369 * Even if the register will be updated from the configuration in the
370 * descriptor when using view 2 or higher, the PROT bit won't be set
371 * properly. This bit can be modified only by using the channel
372 * configuration register.
374 at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
376 reg |= AT_XDMAC_CNDC_NDDUP
377 | AT_XDMAC_CNDC_NDSUP
379 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg);
381 dev_vdbg(chan2dev(&atchan->chan),
382 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
383 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
384 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
385 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
386 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
387 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
388 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
390 at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
391 reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE | AT_XDMAC_CIE_ROIE;
393 * There is no end of list when doing cyclic dma, we need to get
394 * an interrupt after each periods.
396 if (at_xdmac_chan_is_cyclic(atchan))
397 at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
398 reg | AT_XDMAC_CIE_BIE);
400 at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
401 reg | AT_XDMAC_CIE_LIE);
402 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
403 dev_vdbg(chan2dev(&atchan->chan),
404 "%s: enable channel (0x%08x)\n", __func__, atchan->mask);
406 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
408 dev_vdbg(chan2dev(&atchan->chan),
409 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
410 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
411 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
412 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
413 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
414 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
415 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
419 static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
421 struct at_xdmac_desc *desc = txd_to_at_desc(tx);
422 struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan);
424 unsigned long irqflags;
426 spin_lock_irqsave(&atchan->lock, irqflags);
427 cookie = dma_cookie_assign(tx);
429 list_add_tail(&desc->xfer_node, &atchan->xfers_list);
430 spin_unlock_irqrestore(&atchan->lock, irqflags);
432 dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
433 __func__, atchan, desc);
438 static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
441 struct at_xdmac_desc *desc;
442 struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
445 desc = dma_pool_zalloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
447 INIT_LIST_HEAD(&desc->descs_list);
448 dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
449 desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
450 desc->tx_dma_desc.phys = phys;
456 static void at_xdmac_init_used_desc(struct at_xdmac_desc *desc)
458 memset(&desc->lld, 0, sizeof(desc->lld));
459 INIT_LIST_HEAD(&desc->descs_list);
460 desc->direction = DMA_TRANS_NONE;
462 desc->active_xfer = false;
465 /* Call must be protected by lock. */
466 static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
468 struct at_xdmac_desc *desc;
470 if (list_empty(&atchan->free_descs_list)) {
471 desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT);
473 desc = list_first_entry(&atchan->free_descs_list,
474 struct at_xdmac_desc, desc_node);
475 list_del(&desc->desc_node);
476 at_xdmac_init_used_desc(desc);
482 static void at_xdmac_queue_desc(struct dma_chan *chan,
483 struct at_xdmac_desc *prev,
484 struct at_xdmac_desc *desc)
489 prev->lld.mbr_nda = desc->tx_dma_desc.phys;
490 prev->lld.mbr_ubc |= AT_XDMAC_MBR_UBC_NDE;
492 dev_dbg(chan2dev(chan), "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
493 __func__, prev, &prev->lld.mbr_nda);
496 static inline void at_xdmac_increment_block_count(struct dma_chan *chan,
497 struct at_xdmac_desc *desc)
504 dev_dbg(chan2dev(chan),
505 "%s: incrementing the block count of the desc 0x%p\n",
509 static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
510 struct of_dma *of_dma)
512 struct at_xdmac *atxdmac = of_dma->of_dma_data;
513 struct at_xdmac_chan *atchan;
514 struct dma_chan *chan;
515 struct device *dev = atxdmac->dma.dev;
517 if (dma_spec->args_count != 1) {
518 dev_err(dev, "dma phandler args: bad number of args\n");
522 chan = dma_get_any_slave_channel(&atxdmac->dma);
524 dev_err(dev, "can't get a dma channel\n");
528 atchan = to_at_xdmac_chan(chan);
529 atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]);
530 atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]);
531 atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]);
532 dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n",
533 atchan->memif, atchan->perif, atchan->perid);
538 static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
539 enum dma_transfer_direction direction)
541 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
544 if (direction == DMA_DEV_TO_MEM) {
546 AT91_XDMAC_DT_PERID(atchan->perid)
547 | AT_XDMAC_CC_DAM_INCREMENTED_AM
548 | AT_XDMAC_CC_SAM_FIXED_AM
549 | AT_XDMAC_CC_DIF(atchan->memif)
550 | AT_XDMAC_CC_SIF(atchan->perif)
551 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
552 | AT_XDMAC_CC_DSYNC_PER2MEM
553 | AT_XDMAC_CC_MBSIZE_SIXTEEN
554 | AT_XDMAC_CC_TYPE_PER_TRAN;
555 csize = ffs(atchan->sconfig.src_maxburst) - 1;
557 dev_err(chan2dev(chan), "invalid src maxburst value\n");
560 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
561 dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
563 dev_err(chan2dev(chan), "invalid src addr width value\n");
566 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
567 } else if (direction == DMA_MEM_TO_DEV) {
569 AT91_XDMAC_DT_PERID(atchan->perid)
570 | AT_XDMAC_CC_DAM_FIXED_AM
571 | AT_XDMAC_CC_SAM_INCREMENTED_AM
572 | AT_XDMAC_CC_DIF(atchan->perif)
573 | AT_XDMAC_CC_SIF(atchan->memif)
574 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
575 | AT_XDMAC_CC_DSYNC_MEM2PER
576 | AT_XDMAC_CC_MBSIZE_SIXTEEN
577 | AT_XDMAC_CC_TYPE_PER_TRAN;
578 csize = ffs(atchan->sconfig.dst_maxburst) - 1;
580 dev_err(chan2dev(chan), "invalid src maxburst value\n");
583 atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
584 dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
586 dev_err(chan2dev(chan), "invalid dst addr width value\n");
589 atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
592 dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg);
598 * Only check that maxburst and addr width values are supported by the
599 * the controller but not that the configuration is good to perform the
600 * transfer since we don't know the direction at this stage.
602 static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
604 if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
605 || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
608 if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
609 || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
615 static int at_xdmac_set_slave_config(struct dma_chan *chan,
616 struct dma_slave_config *sconfig)
618 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
620 if (at_xdmac_check_slave_config(sconfig)) {
621 dev_err(chan2dev(chan), "invalid slave configuration\n");
625 memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));
630 static struct dma_async_tx_descriptor *
631 at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
632 unsigned int sg_len, enum dma_transfer_direction direction,
633 unsigned long flags, void *context)
635 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
636 struct at_xdmac_desc *first = NULL, *prev = NULL;
637 struct scatterlist *sg;
639 unsigned int xfer_size = 0;
640 unsigned long irqflags;
641 struct dma_async_tx_descriptor *ret = NULL;
646 if (!is_slave_direction(direction)) {
647 dev_err(chan2dev(chan), "invalid DMA direction\n");
651 dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n",
653 direction == DMA_MEM_TO_DEV ? "to device" : "from device",
656 /* Protect dma_sconfig field that can be modified by set_slave_conf. */
657 spin_lock_irqsave(&atchan->lock, irqflags);
659 if (at_xdmac_compute_chan_conf(chan, direction))
662 /* Prepare descriptors. */
663 for_each_sg(sgl, sg, sg_len, i) {
664 struct at_xdmac_desc *desc = NULL;
665 u32 len, mem, dwidth, fixed_dwidth;
667 len = sg_dma_len(sg);
668 mem = sg_dma_address(sg);
669 if (unlikely(!len)) {
670 dev_err(chan2dev(chan), "sg data length is zero\n");
673 dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
674 __func__, i, len, mem);
676 desc = at_xdmac_get_desc(atchan);
678 dev_err(chan2dev(chan), "can't get descriptor\n");
680 list_splice_init(&first->descs_list, &atchan->free_descs_list);
684 /* Linked list descriptor setup. */
685 if (direction == DMA_DEV_TO_MEM) {
686 desc->lld.mbr_sa = atchan->sconfig.src_addr;
687 desc->lld.mbr_da = mem;
689 desc->lld.mbr_sa = mem;
690 desc->lld.mbr_da = atchan->sconfig.dst_addr;
692 dwidth = at_xdmac_get_dwidth(atchan->cfg);
693 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
695 : AT_XDMAC_CC_DWIDTH_BYTE;
696 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
697 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
698 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
699 | (len >> fixed_dwidth); /* microblock length */
700 desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
701 AT_XDMAC_CC_DWIDTH(fixed_dwidth);
702 dev_dbg(chan2dev(chan),
703 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
704 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
708 at_xdmac_queue_desc(chan, prev, desc);
714 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
715 __func__, desc, first);
716 list_add_tail(&desc->desc_node, &first->descs_list);
721 first->tx_dma_desc.flags = flags;
722 first->xfer_size = xfer_size;
723 first->direction = direction;
724 ret = &first->tx_dma_desc;
727 spin_unlock_irqrestore(&atchan->lock, irqflags);
731 static struct dma_async_tx_descriptor *
732 at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
733 size_t buf_len, size_t period_len,
734 enum dma_transfer_direction direction,
737 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
738 struct at_xdmac_desc *first = NULL, *prev = NULL;
739 unsigned int periods = buf_len / period_len;
741 unsigned long irqflags;
743 dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
744 __func__, &buf_addr, buf_len, period_len,
745 direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags);
747 if (!is_slave_direction(direction)) {
748 dev_err(chan2dev(chan), "invalid DMA direction\n");
752 if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) {
753 dev_err(chan2dev(chan), "channel currently used\n");
757 if (at_xdmac_compute_chan_conf(chan, direction))
760 for (i = 0; i < periods; i++) {
761 struct at_xdmac_desc *desc = NULL;
763 spin_lock_irqsave(&atchan->lock, irqflags);
764 desc = at_xdmac_get_desc(atchan);
766 dev_err(chan2dev(chan), "can't get descriptor\n");
768 list_splice_init(&first->descs_list, &atchan->free_descs_list);
769 spin_unlock_irqrestore(&atchan->lock, irqflags);
772 spin_unlock_irqrestore(&atchan->lock, irqflags);
773 dev_dbg(chan2dev(chan),
774 "%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
775 __func__, desc, &desc->tx_dma_desc.phys);
777 if (direction == DMA_DEV_TO_MEM) {
778 desc->lld.mbr_sa = atchan->sconfig.src_addr;
779 desc->lld.mbr_da = buf_addr + i * period_len;
781 desc->lld.mbr_sa = buf_addr + i * period_len;
782 desc->lld.mbr_da = atchan->sconfig.dst_addr;
784 desc->lld.mbr_cfg = atchan->cfg;
785 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
786 | AT_XDMAC_MBR_UBC_NDEN
787 | AT_XDMAC_MBR_UBC_NSEN
788 | period_len >> at_xdmac_get_dwidth(desc->lld.mbr_cfg);
790 dev_dbg(chan2dev(chan),
791 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
792 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
796 at_xdmac_queue_desc(chan, prev, desc);
802 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
803 __func__, desc, first);
804 list_add_tail(&desc->desc_node, &first->descs_list);
807 at_xdmac_queue_desc(chan, prev, first);
808 first->tx_dma_desc.flags = flags;
809 first->xfer_size = buf_len;
810 first->direction = direction;
812 return &first->tx_dma_desc;
815 static inline u32 at_xdmac_align_width(struct dma_chan *chan, dma_addr_t addr)
820 * Check address alignment to select the greater data width we
823 * Some XDMAC implementations don't provide dword transfer, in
824 * this case selecting dword has the same behavior as
825 * selecting word transfers.
828 width = AT_XDMAC_CC_DWIDTH_DWORD;
829 dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
830 } else if (!(addr & 3)) {
831 width = AT_XDMAC_CC_DWIDTH_WORD;
832 dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
833 } else if (!(addr & 1)) {
834 width = AT_XDMAC_CC_DWIDTH_HALFWORD;
835 dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
837 width = AT_XDMAC_CC_DWIDTH_BYTE;
838 dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
844 static struct at_xdmac_desc *
845 at_xdmac_interleaved_queue_desc(struct dma_chan *chan,
846 struct at_xdmac_chan *atchan,
847 struct at_xdmac_desc *prev,
848 dma_addr_t src, dma_addr_t dst,
849 struct dma_interleaved_template *xt,
850 struct data_chunk *chunk)
852 struct at_xdmac_desc *desc;
857 * WARNING: The channel configuration is set here since there is no
858 * dmaengine_slave_config call in this case. Moreover we don't know the
859 * direction, it involves we can't dynamically set the source and dest
860 * interface so we have to use the same one. Only interface 0 allows EBI
861 * access. Hopefully we can access DDR through both ports (at least on
862 * SAMA5D4x), so we can use the same interface for source and dest,
863 * that solves the fact we don't know the direction.
864 * ERRATA: Even if useless for memory transfers, the PERID has to not
865 * match the one of another channel. If not, it could lead to spurious
868 u32 chan_cc = AT_XDMAC_CC_PERID(0x3f)
871 | AT_XDMAC_CC_MBSIZE_SIXTEEN
872 | AT_XDMAC_CC_TYPE_MEM_TRAN;
874 dwidth = at_xdmac_align_width(chan, src | dst | chunk->size);
875 if (chunk->size >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
876 dev_dbg(chan2dev(chan),
877 "%s: chunk too big (%zu, max size %lu)...\n",
878 __func__, chunk->size,
879 AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth);
884 dev_dbg(chan2dev(chan),
885 "Adding items at the end of desc 0x%p\n", prev);
889 chan_cc |= AT_XDMAC_CC_SAM_UBS_AM;
891 chan_cc |= AT_XDMAC_CC_SAM_INCREMENTED_AM;
896 chan_cc |= AT_XDMAC_CC_DAM_UBS_AM;
898 chan_cc |= AT_XDMAC_CC_DAM_INCREMENTED_AM;
901 spin_lock_irqsave(&atchan->lock, flags);
902 desc = at_xdmac_get_desc(atchan);
903 spin_unlock_irqrestore(&atchan->lock, flags);
905 dev_err(chan2dev(chan), "can't get descriptor\n");
909 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
911 ublen = chunk->size >> dwidth;
913 desc->lld.mbr_sa = src;
914 desc->lld.mbr_da = dst;
915 desc->lld.mbr_sus = dmaengine_get_src_icg(xt, chunk);
916 desc->lld.mbr_dus = dmaengine_get_dst_icg(xt, chunk);
918 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
919 | AT_XDMAC_MBR_UBC_NDEN
920 | AT_XDMAC_MBR_UBC_NSEN
922 desc->lld.mbr_cfg = chan_cc;
924 dev_dbg(chan2dev(chan),
925 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
926 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da,
927 desc->lld.mbr_ubc, desc->lld.mbr_cfg);
931 at_xdmac_queue_desc(chan, prev, desc);
936 static struct dma_async_tx_descriptor *
937 at_xdmac_prep_interleaved(struct dma_chan *chan,
938 struct dma_interleaved_template *xt,
941 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
942 struct at_xdmac_desc *prev = NULL, *first = NULL;
943 dma_addr_t dst_addr, src_addr;
944 size_t src_skip = 0, dst_skip = 0, len = 0;
945 struct data_chunk *chunk;
948 if (!xt || !xt->numf || (xt->dir != DMA_MEM_TO_MEM))
952 * TODO: Handle the case where we have to repeat a chain of
955 if ((xt->numf > 1) && (xt->frame_size > 1))
958 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%zu, frame_size=%zu, flags=0x%lx\n",
959 __func__, &xt->src_start, &xt->dst_start, xt->numf,
960 xt->frame_size, flags);
962 src_addr = xt->src_start;
963 dst_addr = xt->dst_start;
966 first = at_xdmac_interleaved_queue_desc(chan, atchan,
971 /* Length of the block is (BLEN+1) microblocks. */
972 for (i = 0; i < xt->numf - 1; i++)
973 at_xdmac_increment_block_count(chan, first);
975 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
976 __func__, first, first);
977 list_add_tail(&first->desc_node, &first->descs_list);
979 for (i = 0; i < xt->frame_size; i++) {
980 size_t src_icg = 0, dst_icg = 0;
981 struct at_xdmac_desc *desc;
985 dst_icg = dmaengine_get_dst_icg(xt, chunk);
986 src_icg = dmaengine_get_src_icg(xt, chunk);
988 src_skip = chunk->size + src_icg;
989 dst_skip = chunk->size + dst_icg;
991 dev_dbg(chan2dev(chan),
992 "%s: chunk size=%zu, src icg=%zu, dst icg=%zu\n",
993 __func__, chunk->size, src_icg, dst_icg);
995 desc = at_xdmac_interleaved_queue_desc(chan, atchan,
1000 list_splice_init(&first->descs_list,
1001 &atchan->free_descs_list);
1008 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1009 __func__, desc, first);
1010 list_add_tail(&desc->desc_node, &first->descs_list);
1013 src_addr += src_skip;
1016 dst_addr += dst_skip;
1023 first->tx_dma_desc.cookie = -EBUSY;
1024 first->tx_dma_desc.flags = flags;
1025 first->xfer_size = len;
1027 return &first->tx_dma_desc;
1030 static struct dma_async_tx_descriptor *
1031 at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1032 size_t len, unsigned long flags)
1034 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1035 struct at_xdmac_desc *first = NULL, *prev = NULL;
1036 size_t remaining_size = len, xfer_size = 0, ublen;
1037 dma_addr_t src_addr = src, dst_addr = dest;
1040 * WARNING: We don't know the direction, it involves we can't
1041 * dynamically set the source and dest interface so we have to use the
1042 * same one. Only interface 0 allows EBI access. Hopefully we can
1043 * access DDR through both ports (at least on SAMA5D4x), so we can use
1044 * the same interface for source and dest, that solves the fact we
1045 * don't know the direction.
1046 * ERRATA: Even if useless for memory transfers, the PERID has to not
1047 * match the one of another channel. If not, it could lead to spurious
1050 u32 chan_cc = AT_XDMAC_CC_PERID(0x3f)
1051 | AT_XDMAC_CC_DAM_INCREMENTED_AM
1052 | AT_XDMAC_CC_SAM_INCREMENTED_AM
1053 | AT_XDMAC_CC_DIF(0)
1054 | AT_XDMAC_CC_SIF(0)
1055 | AT_XDMAC_CC_MBSIZE_SIXTEEN
1056 | AT_XDMAC_CC_TYPE_MEM_TRAN;
1057 unsigned long irqflags;
1059 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
1060 __func__, &src, &dest, len, flags);
1065 dwidth = at_xdmac_align_width(chan, src_addr | dst_addr);
1067 /* Prepare descriptors. */
1068 while (remaining_size) {
1069 struct at_xdmac_desc *desc = NULL;
1071 dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
1073 spin_lock_irqsave(&atchan->lock, irqflags);
1074 desc = at_xdmac_get_desc(atchan);
1075 spin_unlock_irqrestore(&atchan->lock, irqflags);
1077 dev_err(chan2dev(chan), "can't get descriptor\n");
1079 list_splice_init(&first->descs_list, &atchan->free_descs_list);
1083 /* Update src and dest addresses. */
1084 src_addr += xfer_size;
1085 dst_addr += xfer_size;
1087 if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)
1088 xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth;
1090 xfer_size = remaining_size;
1092 dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size);
1094 /* Check remaining length and change data width if needed. */
1095 dwidth = at_xdmac_align_width(chan,
1096 src_addr | dst_addr | xfer_size);
1097 chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK;
1098 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1100 ublen = xfer_size >> dwidth;
1101 remaining_size -= xfer_size;
1103 desc->lld.mbr_sa = src_addr;
1104 desc->lld.mbr_da = dst_addr;
1105 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2
1106 | AT_XDMAC_MBR_UBC_NDEN
1107 | AT_XDMAC_MBR_UBC_NSEN
1109 desc->lld.mbr_cfg = chan_cc;
1111 dev_dbg(chan2dev(chan),
1112 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1113 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg);
1117 at_xdmac_queue_desc(chan, prev, desc);
1123 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
1124 __func__, desc, first);
1125 list_add_tail(&desc->desc_node, &first->descs_list);
1128 first->tx_dma_desc.flags = flags;
1129 first->xfer_size = len;
1131 return &first->tx_dma_desc;
1134 static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
1135 struct at_xdmac_chan *atchan,
1136 dma_addr_t dst_addr,
1140 struct at_xdmac_desc *desc;
1141 unsigned long flags;
1145 * WARNING: The channel configuration is set here since there is no
1146 * dmaengine_slave_config call in this case. Moreover we don't know the
1147 * direction, it involves we can't dynamically set the source and dest
1148 * interface so we have to use the same one. Only interface 0 allows EBI
1149 * access. Hopefully we can access DDR through both ports (at least on
1150 * SAMA5D4x), so we can use the same interface for source and dest,
1151 * that solves the fact we don't know the direction.
1152 * ERRATA: Even if useless for memory transfers, the PERID has to not
1153 * match the one of another channel. If not, it could lead to spurious
1156 u32 chan_cc = AT_XDMAC_CC_PERID(0x3f)
1157 | AT_XDMAC_CC_DAM_UBS_AM
1158 | AT_XDMAC_CC_SAM_INCREMENTED_AM
1159 | AT_XDMAC_CC_DIF(0)
1160 | AT_XDMAC_CC_SIF(0)
1161 | AT_XDMAC_CC_MBSIZE_SIXTEEN
1162 | AT_XDMAC_CC_MEMSET_HW_MODE
1163 | AT_XDMAC_CC_TYPE_MEM_TRAN;
1165 dwidth = at_xdmac_align_width(chan, dst_addr);
1167 if (len >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
1168 dev_err(chan2dev(chan),
1169 "%s: Transfer too large, aborting...\n",
1174 spin_lock_irqsave(&atchan->lock, flags);
1175 desc = at_xdmac_get_desc(atchan);
1176 spin_unlock_irqrestore(&atchan->lock, flags);
1178 dev_err(chan2dev(chan), "can't get descriptor\n");
1182 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1184 ublen = len >> dwidth;
1186 desc->lld.mbr_da = dst_addr;
1187 desc->lld.mbr_ds = value;
1188 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
1189 | AT_XDMAC_MBR_UBC_NDEN
1190 | AT_XDMAC_MBR_UBC_NSEN
1192 desc->lld.mbr_cfg = chan_cc;
1194 dev_dbg(chan2dev(chan),
1195 "%s: lld: mbr_da=%pad, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1196 __func__, &desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc,
1202 static struct dma_async_tx_descriptor *
1203 at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
1204 size_t len, unsigned long flags)
1206 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1207 struct at_xdmac_desc *desc;
1209 dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%zu, pattern=0x%x, flags=0x%lx\n",
1210 __func__, &dest, len, value, flags);
1215 desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
1216 list_add_tail(&desc->desc_node, &desc->descs_list);
1218 desc->tx_dma_desc.cookie = -EBUSY;
1219 desc->tx_dma_desc.flags = flags;
1220 desc->xfer_size = len;
1222 return &desc->tx_dma_desc;
1225 static struct dma_async_tx_descriptor *
1226 at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl,
1227 unsigned int sg_len, int value,
1228 unsigned long flags)
1230 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1231 struct at_xdmac_desc *desc, *pdesc = NULL,
1232 *ppdesc = NULL, *first = NULL;
1233 struct scatterlist *sg, *psg = NULL, *ppsg = NULL;
1234 size_t stride = 0, pstride = 0, len = 0;
1240 dev_dbg(chan2dev(chan), "%s: sg_len=%d, value=0x%x, flags=0x%lx\n",
1241 __func__, sg_len, value, flags);
1243 /* Prepare descriptors. */
1244 for_each_sg(sgl, sg, sg_len, i) {
1245 dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
1246 __func__, &sg_dma_address(sg), sg_dma_len(sg),
1248 desc = at_xdmac_memset_create_desc(chan, atchan,
1253 list_splice_init(&first->descs_list,
1254 &atchan->free_descs_list);
1259 /* Update our strides */
1262 stride = sg_dma_address(sg) -
1263 (sg_dma_address(psg) + sg_dma_len(psg));
1266 * The scatterlist API gives us only the address and
1267 * length of each elements.
1269 * Unfortunately, we don't have the stride, which we
1270 * will need to compute.
1272 * That make us end up in a situation like this one:
1273 * len stride len stride len
1274 * +-------+ +-------+ +-------+
1275 * | N-2 | | N-1 | | N |
1276 * +-------+ +-------+ +-------+
1278 * We need all these three elements (N-2, N-1 and N)
1279 * to actually take the decision on whether we need to
1280 * queue N-1 or reuse N-2.
1282 * We will only consider N if it is the last element.
1284 if (ppdesc && pdesc) {
1285 if ((stride == pstride) &&
1286 (sg_dma_len(ppsg) == sg_dma_len(psg))) {
1287 dev_dbg(chan2dev(chan),
1288 "%s: desc 0x%p can be merged with desc 0x%p\n",
1289 __func__, pdesc, ppdesc);
1292 * Increment the block count of the
1295 at_xdmac_increment_block_count(chan, ppdesc);
1296 ppdesc->lld.mbr_dus = stride;
1299 * Put back the N-1 descriptor in the
1300 * free descriptor list
1302 list_add_tail(&pdesc->desc_node,
1303 &atchan->free_descs_list);
1306 * Make our N-1 descriptor pointer
1307 * point to the N-2 since they were
1313 * Rule out the case where we don't have
1314 * pstride computed yet (our second sg
1317 * We also want to catch the case where there
1318 * would be a negative stride,
1320 } else if (pstride ||
1321 sg_dma_address(sg) < sg_dma_address(psg)) {
1323 * Queue the N-1 descriptor after the
1326 at_xdmac_queue_desc(chan, ppdesc, pdesc);
1329 * Add the N-1 descriptor to the list
1330 * of the descriptors used for this
1333 list_add_tail(&desc->desc_node,
1334 &first->descs_list);
1335 dev_dbg(chan2dev(chan),
1336 "%s: add desc 0x%p to descs_list 0x%p\n",
1337 __func__, desc, first);
1342 * If we are the last element, just see if we have the
1343 * same size than the previous element.
1345 * If so, we can merge it with the previous descriptor
1346 * since we don't care about the stride anymore.
1348 if ((i == (sg_len - 1)) &&
1349 sg_dma_len(psg) == sg_dma_len(sg)) {
1350 dev_dbg(chan2dev(chan),
1351 "%s: desc 0x%p can be merged with desc 0x%p\n",
1352 __func__, desc, pdesc);
1355 * Increment the block count of the N-1
1358 at_xdmac_increment_block_count(chan, pdesc);
1359 pdesc->lld.mbr_dus = stride;
1362 * Put back the N descriptor in the free
1365 list_add_tail(&desc->desc_node,
1366 &atchan->free_descs_list);
1369 /* Update our descriptors */
1373 /* Update our scatter pointers */
1377 len += sg_dma_len(sg);
1380 first->tx_dma_desc.cookie = -EBUSY;
1381 first->tx_dma_desc.flags = flags;
1382 first->xfer_size = len;
1384 return &first->tx_dma_desc;
1387 static enum dma_status
1388 at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1389 struct dma_tx_state *txstate)
1391 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1392 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1393 struct at_xdmac_desc *desc, *_desc, *iter;
1394 struct list_head *descs_list;
1395 enum dma_status ret;
1397 u32 cur_nda, check_nda, cur_ubc, mask, value;
1399 unsigned long flags;
1402 ret = dma_cookie_status(chan, cookie, txstate);
1403 if (ret == DMA_COMPLETE)
1409 spin_lock_irqsave(&atchan->lock, flags);
1411 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
1414 * If the transfer has not been started yet, don't need to compute the
1415 * residue, it's the transfer length.
1417 if (!desc->active_xfer) {
1418 dma_set_residue(txstate, desc->xfer_size);
1422 residue = desc->xfer_size;
1424 * Flush FIFO: only relevant when the transfer is source peripheral
1425 * synchronized. Flush is needed before reading CUBC because data in
1426 * the FIFO are not reported by CUBC. Reporting a residue of the
1427 * transfer length while we have data in FIFO can cause issue.
1428 * Usecase: atmel USART has a timeout which means I have received
1429 * characters but there is no more character received for a while. On
1430 * timeout, it requests the residue. If the data are in the DMA FIFO,
1431 * we will return a residue of the transfer length. It means no data
1432 * received. If an application is waiting for these data, it will hang
1433 * since we won't have another USART timeout without receiving new
1436 mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
1437 value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
1438 if ((desc->lld.mbr_cfg & mask) == value) {
1439 at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
1440 while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
1445 * The easiest way to compute the residue should be to pause the DMA
1446 * but doing this can lead to miss some data as some devices don't
1448 * We need to read several registers because:
1449 * - DMA is running therefore a descriptor change is possible while
1450 * reading these registers
1451 * - When the block transfer is done, the value of the CUBC register
1452 * is set to its initial value until the fetch of the next descriptor.
1453 * This value will corrupt the residue calculation so we have to skip
1456 * INITD -------- ------------
1457 * |____________________|
1458 * _______________________ _______________
1459 * NDA @desc2 \/ @desc3
1460 * _______________________/\_______________
1461 * __________ ___________ _______________
1462 * CUBC 0 \/ MAX desc1 \/ MAX desc2
1463 * __________/\___________/\_______________
1465 * Since descriptors are aligned on 64 bits, we can assume that
1466 * the update of NDA and CUBC is atomic.
1467 * Memory barriers are used to ensure the read order of the registers.
1468 * A max number of retries is set because unlikely it could never ends.
1470 for (retry = 0; retry < AT_XDMAC_RESIDUE_MAX_RETRIES; retry++) {
1471 check_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1473 cur_ubc = at_xdmac_chan_read(atchan, AT_XDMAC_CUBC);
1475 initd = !!(at_xdmac_chan_read(atchan, AT_XDMAC_CC) & AT_XDMAC_CC_INITD);
1477 cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
1480 if ((check_nda == cur_nda) && initd)
1484 if (unlikely(retry >= AT_XDMAC_RESIDUE_MAX_RETRIES)) {
1490 * Flush FIFO: only relevant when the transfer is source peripheral
1491 * synchronized. Another flush is needed here because CUBC is updated
1492 * when the controller sends the data write command. It can lead to
1493 * report data that are not written in the memory or the device. The
1494 * FIFO flush ensures that data are really written.
1496 if ((desc->lld.mbr_cfg & mask) == value) {
1497 at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
1498 while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
1503 * Remove size of all microblocks already transferred and the current
1504 * one. Then add the remaining size to transfer of the current
1507 descs_list = &desc->descs_list;
1508 list_for_each_entry_safe(iter, _desc, descs_list, desc_node) {
1509 dwidth = at_xdmac_get_dwidth(iter->lld.mbr_cfg);
1510 residue -= (iter->lld.mbr_ubc & 0xffffff) << dwidth;
1511 if ((iter->lld.mbr_nda & 0xfffffffc) == cur_nda) {
1516 residue += cur_ubc << dwidth;
1518 dma_set_residue(txstate, residue);
1520 dev_dbg(chan2dev(chan),
1521 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
1522 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
1525 spin_unlock_irqrestore(&atchan->lock, flags);
1529 /* Call must be protected by lock. */
1530 static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
1531 struct at_xdmac_desc *desc)
1533 dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1536 * Remove the transfer from the transfer list then move the transfer
1537 * descriptors into the free descriptors list.
1539 list_del(&desc->xfer_node);
1540 list_splice_init(&desc->descs_list, &atchan->free_descs_list);
1543 static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
1545 struct at_xdmac_desc *desc;
1546 unsigned long flags;
1548 spin_lock_irqsave(&atchan->lock, flags);
1551 * If channel is enabled, do nothing, advance_work will be triggered
1552 * after the interruption.
1554 if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) {
1555 desc = list_first_entry(&atchan->xfers_list,
1556 struct at_xdmac_desc,
1558 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1559 if (!desc->active_xfer)
1560 at_xdmac_start_xfer(atchan, desc);
1563 spin_unlock_irqrestore(&atchan->lock, flags);
1566 static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
1568 struct at_xdmac_desc *desc;
1569 struct dma_async_tx_descriptor *txd;
1571 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
1572 txd = &desc->tx_dma_desc;
1574 if (txd->flags & DMA_PREP_INTERRUPT)
1575 dmaengine_desc_get_callback_invoke(txd, NULL);
1578 static void at_xdmac_tasklet(unsigned long data)
1580 struct at_xdmac_chan *atchan = (struct at_xdmac_chan *)data;
1581 struct at_xdmac_desc *desc;
1584 dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
1585 __func__, atchan->irq_status);
1587 error_mask = AT_XDMAC_CIS_RBEIS
1588 | AT_XDMAC_CIS_WBEIS
1589 | AT_XDMAC_CIS_ROIS;
1591 if (at_xdmac_chan_is_cyclic(atchan)) {
1592 at_xdmac_handle_cyclic(atchan);
1593 } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
1594 || (atchan->irq_status & error_mask)) {
1595 struct dma_async_tx_descriptor *txd;
1597 if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
1598 dev_err(chan2dev(&atchan->chan), "read bus error!!!");
1599 if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
1600 dev_err(chan2dev(&atchan->chan), "write bus error!!!");
1601 if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
1602 dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
1604 spin_lock_bh(&atchan->lock);
1605 desc = list_first_entry(&atchan->xfers_list,
1606 struct at_xdmac_desc,
1608 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1609 if (!desc->active_xfer) {
1610 dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
1611 spin_unlock_bh(&atchan->lock);
1615 txd = &desc->tx_dma_desc;
1617 at_xdmac_remove_xfer(atchan, desc);
1618 spin_unlock_bh(&atchan->lock);
1620 if (!at_xdmac_chan_is_cyclic(atchan)) {
1621 dma_cookie_complete(txd);
1622 if (txd->flags & DMA_PREP_INTERRUPT)
1623 dmaengine_desc_get_callback_invoke(txd, NULL);
1626 dma_run_dependencies(txd);
1628 at_xdmac_advance_work(atchan);
1632 static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1634 struct at_xdmac *atxdmac = (struct at_xdmac *)dev_id;
1635 struct at_xdmac_chan *atchan;
1636 u32 imr, status, pending;
1637 u32 chan_imr, chan_status;
1638 int i, ret = IRQ_NONE;
1641 imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1642 status = at_xdmac_read(atxdmac, AT_XDMAC_GIS);
1643 pending = status & imr;
1645 dev_vdbg(atxdmac->dma.dev,
1646 "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n",
1647 __func__, status, imr, pending);
1652 /* We have to find which channel has generated the interrupt. */
1653 for (i = 0; i < atxdmac->dma.chancnt; i++) {
1654 if (!((1 << i) & pending))
1657 atchan = &atxdmac->chan[i];
1658 chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1659 chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
1660 atchan->irq_status = chan_status & chan_imr;
1661 dev_vdbg(atxdmac->dma.dev,
1662 "%s: chan%d: imr=0x%x, status=0x%x\n",
1663 __func__, i, chan_imr, chan_status);
1664 dev_vdbg(chan2dev(&atchan->chan),
1665 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
1667 at_xdmac_chan_read(atchan, AT_XDMAC_CC),
1668 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
1669 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
1670 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
1671 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
1672 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
1674 if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
1675 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1677 tasklet_schedule(&atchan->tasklet);
1686 static void at_xdmac_issue_pending(struct dma_chan *chan)
1688 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1690 dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
1692 if (!at_xdmac_chan_is_cyclic(atchan))
1693 at_xdmac_advance_work(atchan);
1698 static int at_xdmac_device_config(struct dma_chan *chan,
1699 struct dma_slave_config *config)
1701 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1703 unsigned long flags;
1705 dev_dbg(chan2dev(chan), "%s\n", __func__);
1707 spin_lock_irqsave(&atchan->lock, flags);
1708 ret = at_xdmac_set_slave_config(chan, config);
1709 spin_unlock_irqrestore(&atchan->lock, flags);
1714 static int at_xdmac_device_pause(struct dma_chan *chan)
1716 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1717 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1718 unsigned long flags;
1720 dev_dbg(chan2dev(chan), "%s\n", __func__);
1722 if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
1725 spin_lock_irqsave(&atchan->lock, flags);
1726 at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
1727 while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
1728 & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
1730 spin_unlock_irqrestore(&atchan->lock, flags);
1735 static int at_xdmac_device_resume(struct dma_chan *chan)
1737 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1738 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1739 unsigned long flags;
1741 dev_dbg(chan2dev(chan), "%s\n", __func__);
1743 spin_lock_irqsave(&atchan->lock, flags);
1744 if (!at_xdmac_chan_is_paused(atchan)) {
1745 spin_unlock_irqrestore(&atchan->lock, flags);
1749 at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
1750 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1751 spin_unlock_irqrestore(&atchan->lock, flags);
1756 static int at_xdmac_device_terminate_all(struct dma_chan *chan)
1758 struct at_xdmac_desc *desc, *_desc;
1759 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1760 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1761 unsigned long flags;
1763 dev_dbg(chan2dev(chan), "%s\n", __func__);
1765 spin_lock_irqsave(&atchan->lock, flags);
1766 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1767 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1770 /* Cancel all pending transfers. */
1771 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
1772 at_xdmac_remove_xfer(atchan, desc);
1774 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1775 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
1776 spin_unlock_irqrestore(&atchan->lock, flags);
1781 static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
1783 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1784 struct at_xdmac_desc *desc;
1786 unsigned long flags;
1788 spin_lock_irqsave(&atchan->lock, flags);
1790 if (at_xdmac_chan_is_enabled(atchan)) {
1791 dev_err(chan2dev(chan),
1792 "can't allocate channel resources (channel enabled)\n");
1797 if (!list_empty(&atchan->free_descs_list)) {
1798 dev_err(chan2dev(chan),
1799 "can't allocate channel resources (channel not free from a previous use)\n");
1804 for (i = 0; i < init_nr_desc_per_channel; i++) {
1805 desc = at_xdmac_alloc_desc(chan, GFP_ATOMIC);
1808 dev_warn(chan2dev(chan),
1809 "can't allocate any descriptors\n");
1812 dev_warn(chan2dev(chan),
1813 "only %d descriptors have been allocated\n", i);
1816 list_add_tail(&desc->desc_node, &atchan->free_descs_list);
1819 dma_cookie_init(chan);
1821 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
1824 spin_unlock_irqrestore(&atchan->lock, flags);
1828 static void at_xdmac_free_chan_resources(struct dma_chan *chan)
1830 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1831 struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
1832 struct at_xdmac_desc *desc, *_desc;
1834 list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) {
1835 dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc);
1836 list_del(&desc->desc_node);
1837 dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys);
1844 static int atmel_xdmac_prepare(struct device *dev)
1846 struct platform_device *pdev = to_platform_device(dev);
1847 struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
1848 struct dma_chan *chan, *_chan;
1850 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1851 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1853 /* Wait for transfer completion, except in cyclic case. */
1854 if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan))
1860 # define atmel_xdmac_prepare NULL
1863 #ifdef CONFIG_PM_SLEEP
1864 static int atmel_xdmac_suspend(struct device *dev)
1866 struct platform_device *pdev = to_platform_device(dev);
1867 struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
1868 struct dma_chan *chan, *_chan;
1870 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1871 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1873 atchan->save_cc = at_xdmac_chan_read(atchan, AT_XDMAC_CC);
1874 if (at_xdmac_chan_is_cyclic(atchan)) {
1875 if (!at_xdmac_chan_is_paused(atchan))
1876 at_xdmac_device_pause(chan);
1877 atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1878 atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
1879 atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
1882 atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1884 at_xdmac_off(atxdmac);
1885 clk_disable_unprepare(atxdmac->clk);
1889 static int atmel_xdmac_resume(struct device *dev)
1891 struct platform_device *pdev = to_platform_device(dev);
1892 struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
1893 struct at_xdmac_chan *atchan;
1894 struct dma_chan *chan, *_chan;
1898 ret = clk_prepare_enable(atxdmac->clk);
1902 /* Clear pending interrupts. */
1903 for (i = 0; i < atxdmac->dma.chancnt; i++) {
1904 atchan = &atxdmac->chan[i];
1905 while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
1909 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
1910 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1911 atchan = to_at_xdmac_chan(chan);
1912 at_xdmac_chan_write(atchan, AT_XDMAC_CC, atchan->save_cc);
1913 if (at_xdmac_chan_is_cyclic(atchan)) {
1914 if (at_xdmac_chan_is_paused(atchan))
1915 at_xdmac_device_resume(chan);
1916 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
1917 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
1918 at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
1920 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
1925 #endif /* CONFIG_PM_SLEEP */
1927 static int at_xdmac_probe(struct platform_device *pdev)
1929 struct resource *res;
1930 struct at_xdmac *atxdmac;
1931 int irq, size, nr_channels, i, ret;
1935 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1939 irq = platform_get_irq(pdev, 0);
1943 base = devm_ioremap_resource(&pdev->dev, res);
1945 return PTR_ERR(base);
1948 * Read number of xdmac channels, read helper function can't be used
1949 * since atxdmac is not yet allocated and we need to know the number
1950 * of channels to do the allocation.
1952 reg = readl_relaxed(base + AT_XDMAC_GTYPE);
1953 nr_channels = AT_XDMAC_NB_CH(reg);
1954 if (nr_channels > AT_XDMAC_MAX_CHAN) {
1955 dev_err(&pdev->dev, "invalid number of channels (%u)\n",
1960 size = sizeof(*atxdmac);
1961 size += nr_channels * sizeof(struct at_xdmac_chan);
1962 atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
1964 dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
1968 atxdmac->regs = base;
1971 atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
1972 if (IS_ERR(atxdmac->clk)) {
1973 dev_err(&pdev->dev, "can't get dma_clk\n");
1974 return PTR_ERR(atxdmac->clk);
1977 /* Do not use dev res to prevent races with tasklet */
1978 ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac);
1980 dev_err(&pdev->dev, "can't request irq\n");
1984 ret = clk_prepare_enable(atxdmac->clk);
1986 dev_err(&pdev->dev, "can't prepare or enable clock\n");
1990 atxdmac->at_xdmac_desc_pool =
1991 dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
1992 sizeof(struct at_xdmac_desc), 4, 0);
1993 if (!atxdmac->at_xdmac_desc_pool) {
1994 dev_err(&pdev->dev, "no memory for descriptors dma pool\n");
1996 goto err_clk_disable;
1999 dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
2000 dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask);
2001 dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
2002 dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask);
2003 dma_cap_set(DMA_MEMSET_SG, atxdmac->dma.cap_mask);
2004 dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
2006 * Without DMA_PRIVATE the driver is not able to allocate more than
2007 * one channel, second allocation fails in private_candidate.
2009 dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask);
2010 atxdmac->dma.dev = &pdev->dev;
2011 atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources;
2012 atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources;
2013 atxdmac->dma.device_tx_status = at_xdmac_tx_status;
2014 atxdmac->dma.device_issue_pending = at_xdmac_issue_pending;
2015 atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic;
2016 atxdmac->dma.device_prep_interleaved_dma = at_xdmac_prep_interleaved;
2017 atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy;
2018 atxdmac->dma.device_prep_dma_memset = at_xdmac_prep_dma_memset;
2019 atxdmac->dma.device_prep_dma_memset_sg = at_xdmac_prep_dma_memset_sg;
2020 atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg;
2021 atxdmac->dma.device_config = at_xdmac_device_config;
2022 atxdmac->dma.device_pause = at_xdmac_device_pause;
2023 atxdmac->dma.device_resume = at_xdmac_device_resume;
2024 atxdmac->dma.device_terminate_all = at_xdmac_device_terminate_all;
2025 atxdmac->dma.src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
2026 atxdmac->dma.dst_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
2027 atxdmac->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
2028 atxdmac->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
2030 /* Disable all chans and interrupts. */
2031 at_xdmac_off(atxdmac);
2033 /* Init channels. */
2034 INIT_LIST_HEAD(&atxdmac->dma.channels);
2035 for (i = 0; i < nr_channels; i++) {
2036 struct at_xdmac_chan *atchan = &atxdmac->chan[i];
2038 atchan->chan.device = &atxdmac->dma;
2039 list_add_tail(&atchan->chan.device_node,
2040 &atxdmac->dma.channels);
2042 atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i);
2043 atchan->mask = 1 << i;
2045 spin_lock_init(&atchan->lock);
2046 INIT_LIST_HEAD(&atchan->xfers_list);
2047 INIT_LIST_HEAD(&atchan->free_descs_list);
2048 tasklet_init(&atchan->tasklet, at_xdmac_tasklet,
2049 (unsigned long)atchan);
2051 /* Clear pending interrupts. */
2052 while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
2055 platform_set_drvdata(pdev, atxdmac);
2057 ret = dma_async_device_register(&atxdmac->dma);
2059 dev_err(&pdev->dev, "fail to register DMA engine device\n");
2060 goto err_clk_disable;
2063 ret = of_dma_controller_register(pdev->dev.of_node,
2064 at_xdmac_xlate, atxdmac);
2066 dev_err(&pdev->dev, "could not register of dma controller\n");
2067 goto err_dma_unregister;
2070 dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
2071 nr_channels, atxdmac->regs);
2076 dma_async_device_unregister(&atxdmac->dma);
2078 clk_disable_unprepare(atxdmac->clk);
2080 free_irq(atxdmac->irq, atxdmac);
2084 static int at_xdmac_remove(struct platform_device *pdev)
2086 struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
2089 at_xdmac_off(atxdmac);
2090 of_dma_controller_free(pdev->dev.of_node);
2091 dma_async_device_unregister(&atxdmac->dma);
2092 clk_disable_unprepare(atxdmac->clk);
2094 free_irq(atxdmac->irq, atxdmac);
2096 for (i = 0; i < atxdmac->dma.chancnt; i++) {
2097 struct at_xdmac_chan *atchan = &atxdmac->chan[i];
2099 tasklet_kill(&atchan->tasklet);
2100 at_xdmac_free_chan_resources(&atchan->chan);
2106 static const struct dev_pm_ops atmel_xdmac_dev_pm_ops = {
2107 .prepare = atmel_xdmac_prepare,
2108 SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
2111 static const struct of_device_id atmel_xdmac_dt_ids[] = {
2113 .compatible = "atmel,sama5d4-dma",
2118 MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
2120 static struct platform_driver at_xdmac_driver = {
2121 .probe = at_xdmac_probe,
2122 .remove = at_xdmac_remove,
2125 .of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
2126 .pm = &atmel_xdmac_dev_pm_ops,
2130 static int __init at_xdmac_init(void)
2132 return platform_driver_probe(&at_xdmac_driver, at_xdmac_probe);
2134 subsys_initcall(at_xdmac_init);
2136 MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
2137 MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
2138 MODULE_LICENSE("GPL");