1 // SPDX-License-Identifier: GPL-2.0-only
3 * Driver for STM32 DMA controller
5 * Inspired by dma-jz4740.c and tegra20-apb-dma.c
7 * Copyright (C) M'boumba Cedric Madianga 2015
8 * Author: M'boumba Cedric Madianga <cedric.madianga@gmail.com>
9 * Pierre-Yves Mordret <pierre-yves.mordret@st.com>
12 #include <linux/clk.h>
13 #include <linux/delay.h>
14 #include <linux/dmaengine.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
17 #include <linux/init.h>
18 #include <linux/iopoll.h>
19 #include <linux/jiffies.h>
20 #include <linux/list.h>
21 #include <linux/module.h>
23 #include <linux/of_device.h>
24 #include <linux/of_dma.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/reset.h>
28 #include <linux/sched.h>
29 #include <linux/slab.h>
33 #define STM32_DMA_LISR 0x0000 /* DMA Low Int Status Reg */
34 #define STM32_DMA_HISR 0x0004 /* DMA High Int Status Reg */
35 #define STM32_DMA_LIFCR 0x0008 /* DMA Low Int Flag Clear Reg */
36 #define STM32_DMA_HIFCR 0x000c /* DMA High Int Flag Clear Reg */
37 #define STM32_DMA_TCI BIT(5) /* Transfer Complete Interrupt */
38 #define STM32_DMA_HTI BIT(4) /* Half Transfer Interrupt */
39 #define STM32_DMA_TEI BIT(3) /* Transfer Error Interrupt */
40 #define STM32_DMA_DMEI BIT(2) /* Direct Mode Error Interrupt */
41 #define STM32_DMA_FEI BIT(0) /* FIFO Error Interrupt */
42 #define STM32_DMA_MASKI (STM32_DMA_TCI \
47 /* DMA Stream x Configuration Register */
48 #define STM32_DMA_SCR(x) (0x0010 + 0x18 * (x)) /* x = 0..7 */
49 #define STM32_DMA_SCR_REQ(n) ((n & 0x7) << 25)
50 #define STM32_DMA_SCR_MBURST_MASK GENMASK(24, 23)
51 #define STM32_DMA_SCR_MBURST(n) ((n & 0x3) << 23)
52 #define STM32_DMA_SCR_PBURST_MASK GENMASK(22, 21)
53 #define STM32_DMA_SCR_PBURST(n) ((n & 0x3) << 21)
54 #define STM32_DMA_SCR_PL_MASK GENMASK(17, 16)
55 #define STM32_DMA_SCR_PL(n) ((n & 0x3) << 16)
56 #define STM32_DMA_SCR_MSIZE_MASK GENMASK(14, 13)
57 #define STM32_DMA_SCR_MSIZE(n) ((n & 0x3) << 13)
58 #define STM32_DMA_SCR_PSIZE_MASK GENMASK(12, 11)
59 #define STM32_DMA_SCR_PSIZE(n) ((n & 0x3) << 11)
60 #define STM32_DMA_SCR_PSIZE_GET(n) ((n & STM32_DMA_SCR_PSIZE_MASK) >> 11)
61 #define STM32_DMA_SCR_DIR_MASK GENMASK(7, 6)
62 #define STM32_DMA_SCR_DIR(n) ((n & 0x3) << 6)
63 #define STM32_DMA_SCR_CT BIT(19) /* Target in double buffer */
64 #define STM32_DMA_SCR_DBM BIT(18) /* Double Buffer Mode */
65 #define STM32_DMA_SCR_PINCOS BIT(15) /* Peripheral inc offset size */
66 #define STM32_DMA_SCR_MINC BIT(10) /* Memory increment mode */
67 #define STM32_DMA_SCR_PINC BIT(9) /* Peripheral increment mode */
68 #define STM32_DMA_SCR_CIRC BIT(8) /* Circular mode */
69 #define STM32_DMA_SCR_PFCTRL BIT(5) /* Peripheral Flow Controller */
70 #define STM32_DMA_SCR_TCIE BIT(4) /* Transfer Complete Int Enable
72 #define STM32_DMA_SCR_TEIE BIT(2) /* Transfer Error Int Enable */
73 #define STM32_DMA_SCR_DMEIE BIT(1) /* Direct Mode Err Int Enable */
74 #define STM32_DMA_SCR_EN BIT(0) /* Stream Enable */
75 #define STM32_DMA_SCR_CFG_MASK (STM32_DMA_SCR_PINC \
76 | STM32_DMA_SCR_MINC \
77 | STM32_DMA_SCR_PINCOS \
78 | STM32_DMA_SCR_PL_MASK)
79 #define STM32_DMA_SCR_IRQ_MASK (STM32_DMA_SCR_TCIE \
80 | STM32_DMA_SCR_TEIE \
81 | STM32_DMA_SCR_DMEIE)
83 /* DMA Stream x number of data register */
84 #define STM32_DMA_SNDTR(x) (0x0014 + 0x18 * (x))
86 /* DMA stream peripheral address register */
87 #define STM32_DMA_SPAR(x) (0x0018 + 0x18 * (x))
89 /* DMA stream x memory 0 address register */
90 #define STM32_DMA_SM0AR(x) (0x001c + 0x18 * (x))
92 /* DMA stream x memory 1 address register */
93 #define STM32_DMA_SM1AR(x) (0x0020 + 0x18 * (x))
95 /* DMA stream x FIFO control register */
96 #define STM32_DMA_SFCR(x) (0x0024 + 0x18 * (x))
97 #define STM32_DMA_SFCR_FTH_MASK GENMASK(1, 0)
98 #define STM32_DMA_SFCR_FTH(n) (n & STM32_DMA_SFCR_FTH_MASK)
99 #define STM32_DMA_SFCR_FEIE BIT(7) /* FIFO error interrupt enable */
100 #define STM32_DMA_SFCR_DMDIS BIT(2) /* Direct mode disable */
101 #define STM32_DMA_SFCR_MASK (STM32_DMA_SFCR_FEIE \
102 | STM32_DMA_SFCR_DMDIS)
105 #define STM32_DMA_DEV_TO_MEM 0x00
106 #define STM32_DMA_MEM_TO_DEV 0x01
107 #define STM32_DMA_MEM_TO_MEM 0x02
109 /* DMA priority level */
110 #define STM32_DMA_PRIORITY_LOW 0x00
111 #define STM32_DMA_PRIORITY_MEDIUM 0x01
112 #define STM32_DMA_PRIORITY_HIGH 0x02
113 #define STM32_DMA_PRIORITY_VERY_HIGH 0x03
115 /* DMA FIFO threshold selection */
116 #define STM32_DMA_FIFO_THRESHOLD_1QUARTERFULL 0x00
117 #define STM32_DMA_FIFO_THRESHOLD_HALFFULL 0x01
118 #define STM32_DMA_FIFO_THRESHOLD_3QUARTERSFULL 0x02
119 #define STM32_DMA_FIFO_THRESHOLD_FULL 0x03
120 #define STM32_DMA_FIFO_THRESHOLD_NONE 0x04
122 #define STM32_DMA_MAX_DATA_ITEMS 0xffff
124 * Valid transfer starts from @0 to @0xFFFE leading to unaligned scatter
125 * gather at boundary. Thus it's safer to round down this value on FIFO
128 #define STM32_DMA_ALIGNED_MAX_DATA_ITEMS \
129 ALIGN_DOWN(STM32_DMA_MAX_DATA_ITEMS, 16)
130 #define STM32_DMA_MAX_CHANNELS 0x08
131 #define STM32_DMA_MAX_REQUEST_ID 0x08
132 #define STM32_DMA_MAX_DATA_PARAM 0x03
133 #define STM32_DMA_FIFO_SIZE 16 /* FIFO is 16 bytes */
134 #define STM32_DMA_MIN_BURST 4
135 #define STM32_DMA_MAX_BURST 16
138 #define STM32_DMA_THRESHOLD_FTR_MASK GENMASK(1, 0)
139 #define STM32_DMA_THRESHOLD_FTR_GET(n) ((n) & STM32_DMA_THRESHOLD_FTR_MASK)
140 #define STM32_DMA_DIRECT_MODE_MASK BIT(2)
141 #define STM32_DMA_DIRECT_MODE_GET(n) (((n) & STM32_DMA_DIRECT_MODE_MASK) \
144 enum stm32_dma_width {
150 enum stm32_dma_burst_size {
151 STM32_DMA_BURST_SINGLE,
152 STM32_DMA_BURST_INCR4,
153 STM32_DMA_BURST_INCR8,
154 STM32_DMA_BURST_INCR16,
158 * struct stm32_dma_cfg - STM32 DMA custom configuration
159 * @channel_id: channel ID
160 * @request_line: DMA request
161 * @stream_config: 32bit mask specifying the DMA channel configuration
162 * @features: 32bit mask specifying the DMA Feature list
164 struct stm32_dma_cfg {
171 struct stm32_dma_chan_reg {
184 struct stm32_dma_sg_req {
186 struct stm32_dma_chan_reg chan_reg;
189 struct stm32_dma_desc {
190 struct virt_dma_desc vdesc;
193 struct stm32_dma_sg_req sg_req[];
196 struct stm32_dma_chan {
197 struct virt_dma_chan vchan;
202 struct stm32_dma_desc *desc;
204 struct dma_slave_config dma_sconfig;
205 struct stm32_dma_chan_reg chan_reg;
211 struct stm32_dma_device {
212 struct dma_device ddev;
216 struct stm32_dma_chan chan[STM32_DMA_MAX_CHANNELS];
219 static struct stm32_dma_device *stm32_dma_get_dev(struct stm32_dma_chan *chan)
221 return container_of(chan->vchan.chan.device, struct stm32_dma_device,
225 static struct stm32_dma_chan *to_stm32_dma_chan(struct dma_chan *c)
227 return container_of(c, struct stm32_dma_chan, vchan.chan);
230 static struct stm32_dma_desc *to_stm32_dma_desc(struct virt_dma_desc *vdesc)
232 return container_of(vdesc, struct stm32_dma_desc, vdesc);
235 static struct device *chan2dev(struct stm32_dma_chan *chan)
237 return &chan->vchan.chan.dev->device;
240 static u32 stm32_dma_read(struct stm32_dma_device *dmadev, u32 reg)
242 return readl_relaxed(dmadev->base + reg);
245 static void stm32_dma_write(struct stm32_dma_device *dmadev, u32 reg, u32 val)
247 writel_relaxed(val, dmadev->base + reg);
250 static int stm32_dma_get_width(struct stm32_dma_chan *chan,
251 enum dma_slave_buswidth width)
254 case DMA_SLAVE_BUSWIDTH_1_BYTE:
255 return STM32_DMA_BYTE;
256 case DMA_SLAVE_BUSWIDTH_2_BYTES:
257 return STM32_DMA_HALF_WORD;
258 case DMA_SLAVE_BUSWIDTH_4_BYTES:
259 return STM32_DMA_WORD;
261 dev_err(chan2dev(chan), "Dma bus width not supported\n");
266 static enum dma_slave_buswidth stm32_dma_get_max_width(u32 buf_len,
269 enum dma_slave_buswidth max_width;
271 if (threshold == STM32_DMA_FIFO_THRESHOLD_FULL)
272 max_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
274 max_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
276 while ((buf_len < max_width || buf_len % max_width) &&
277 max_width > DMA_SLAVE_BUSWIDTH_1_BYTE)
278 max_width = max_width >> 1;
283 static bool stm32_dma_fifo_threshold_is_allowed(u32 burst, u32 threshold,
284 enum dma_slave_buswidth width)
288 if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE)
291 if (width != DMA_SLAVE_BUSWIDTH_UNDEFINED) {
294 * If number of beats fit in several whole bursts
295 * this configuration is allowed.
297 remaining = ((STM32_DMA_FIFO_SIZE / width) *
298 (threshold + 1) / 4) % burst;
310 static bool stm32_dma_is_burst_possible(u32 buf_len, u32 threshold)
312 /* If FIFO direct mode, burst is not possible */
313 if (threshold == STM32_DMA_FIFO_THRESHOLD_NONE)
317 * Buffer or period length has to be aligned on FIFO depth.
318 * Otherwise bytes may be stuck within FIFO at buffer or period
321 return ((buf_len % ((threshold + 1) * 4)) == 0);
324 static u32 stm32_dma_get_best_burst(u32 buf_len, u32 max_burst, u32 threshold,
325 enum dma_slave_buswidth width)
327 u32 best_burst = max_burst;
329 if (best_burst == 1 || !stm32_dma_is_burst_possible(buf_len, threshold))
332 while ((buf_len < best_burst * width && best_burst > 1) ||
333 !stm32_dma_fifo_threshold_is_allowed(best_burst, threshold,
335 if (best_burst > STM32_DMA_MIN_BURST)
336 best_burst = best_burst >> 1;
344 static int stm32_dma_get_burst(struct stm32_dma_chan *chan, u32 maxburst)
349 return STM32_DMA_BURST_SINGLE;
351 return STM32_DMA_BURST_INCR4;
353 return STM32_DMA_BURST_INCR8;
355 return STM32_DMA_BURST_INCR16;
357 dev_err(chan2dev(chan), "Dma burst size not supported\n");
362 static void stm32_dma_set_fifo_config(struct stm32_dma_chan *chan,
363 u32 src_burst, u32 dst_burst)
365 chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_MASK;
366 chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_DMEIE;
368 if (!src_burst && !dst_burst) {
369 /* Using direct mode */
370 chan->chan_reg.dma_scr |= STM32_DMA_SCR_DMEIE;
372 /* Using FIFO mode */
373 chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK;
377 static int stm32_dma_slave_config(struct dma_chan *c,
378 struct dma_slave_config *config)
380 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
382 memcpy(&chan->dma_sconfig, config, sizeof(*config));
384 chan->config_init = true;
389 static u32 stm32_dma_irq_status(struct stm32_dma_chan *chan)
391 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
395 * Read "flags" from DMA_xISR register corresponding to the selected
396 * DMA channel at the correct bit offset inside that register.
398 * If (ch % 4) is 2 or 3, left shift the mask by 16 bits.
399 * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
403 dma_isr = stm32_dma_read(dmadev, STM32_DMA_HISR);
405 dma_isr = stm32_dma_read(dmadev, STM32_DMA_LISR);
407 flags = dma_isr >> (((chan->id & 2) << 3) | ((chan->id & 1) * 6));
409 return flags & STM32_DMA_MASKI;
412 static void stm32_dma_irq_clear(struct stm32_dma_chan *chan, u32 flags)
414 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
418 * Write "flags" to the DMA_xIFCR register corresponding to the selected
419 * DMA channel at the correct bit offset inside that register.
421 * If (ch % 4) is 2 or 3, left shift the mask by 16 bits.
422 * If (ch % 4) is 1 or 3, additionally left shift the mask by 6 bits.
424 flags &= STM32_DMA_MASKI;
425 dma_ifcr = flags << (((chan->id & 2) << 3) | ((chan->id & 1) * 6));
428 stm32_dma_write(dmadev, STM32_DMA_HIFCR, dma_ifcr);
430 stm32_dma_write(dmadev, STM32_DMA_LIFCR, dma_ifcr);
433 static int stm32_dma_disable_chan(struct stm32_dma_chan *chan)
435 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
436 u32 dma_scr, id, reg;
439 reg = STM32_DMA_SCR(id);
440 dma_scr = stm32_dma_read(dmadev, reg);
442 if (dma_scr & STM32_DMA_SCR_EN) {
443 dma_scr &= ~STM32_DMA_SCR_EN;
444 stm32_dma_write(dmadev, reg, dma_scr);
446 return readl_relaxed_poll_timeout_atomic(dmadev->base + reg,
447 dma_scr, !(dma_scr & STM32_DMA_SCR_EN),
454 static void stm32_dma_stop(struct stm32_dma_chan *chan)
456 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
457 u32 dma_scr, dma_sfcr, status;
460 /* Disable interrupts */
461 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
462 dma_scr &= ~STM32_DMA_SCR_IRQ_MASK;
463 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), dma_scr);
464 dma_sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
465 dma_sfcr &= ~STM32_DMA_SFCR_FEIE;
466 stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), dma_sfcr);
469 ret = stm32_dma_disable_chan(chan);
473 /* Clear interrupt status if it is there */
474 status = stm32_dma_irq_status(chan);
476 dev_dbg(chan2dev(chan), "%s(): clearing interrupt: 0x%08x\n",
478 stm32_dma_irq_clear(chan, status);
484 static int stm32_dma_terminate_all(struct dma_chan *c)
486 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
490 spin_lock_irqsave(&chan->vchan.lock, flags);
493 vchan_terminate_vdesc(&chan->desc->vdesc);
495 stm32_dma_stop(chan);
499 vchan_get_all_descriptors(&chan->vchan, &head);
500 spin_unlock_irqrestore(&chan->vchan.lock, flags);
501 vchan_dma_desc_free_list(&chan->vchan, &head);
506 static void stm32_dma_synchronize(struct dma_chan *c)
508 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
510 vchan_synchronize(&chan->vchan);
513 static void stm32_dma_dump_reg(struct stm32_dma_chan *chan)
515 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
516 u32 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
517 u32 ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
518 u32 spar = stm32_dma_read(dmadev, STM32_DMA_SPAR(chan->id));
519 u32 sm0ar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(chan->id));
520 u32 sm1ar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(chan->id));
521 u32 sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
523 dev_dbg(chan2dev(chan), "SCR: 0x%08x\n", scr);
524 dev_dbg(chan2dev(chan), "NDTR: 0x%08x\n", ndtr);
525 dev_dbg(chan2dev(chan), "SPAR: 0x%08x\n", spar);
526 dev_dbg(chan2dev(chan), "SM0AR: 0x%08x\n", sm0ar);
527 dev_dbg(chan2dev(chan), "SM1AR: 0x%08x\n", sm1ar);
528 dev_dbg(chan2dev(chan), "SFCR: 0x%08x\n", sfcr);
531 static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan);
533 static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
535 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
536 struct virt_dma_desc *vdesc;
537 struct stm32_dma_sg_req *sg_req;
538 struct stm32_dma_chan_reg *reg;
542 ret = stm32_dma_disable_chan(chan);
547 vdesc = vchan_next_desc(&chan->vchan);
551 list_del(&vdesc->node);
553 chan->desc = to_stm32_dma_desc(vdesc);
557 if (chan->next_sg == chan->desc->num_sgs)
560 sg_req = &chan->desc->sg_req[chan->next_sg];
561 reg = &sg_req->chan_reg;
563 reg->dma_scr &= ~STM32_DMA_SCR_EN;
564 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
565 stm32_dma_write(dmadev, STM32_DMA_SPAR(chan->id), reg->dma_spar);
566 stm32_dma_write(dmadev, STM32_DMA_SM0AR(chan->id), reg->dma_sm0ar);
567 stm32_dma_write(dmadev, STM32_DMA_SFCR(chan->id), reg->dma_sfcr);
568 stm32_dma_write(dmadev, STM32_DMA_SM1AR(chan->id), reg->dma_sm1ar);
569 stm32_dma_write(dmadev, STM32_DMA_SNDTR(chan->id), reg->dma_sndtr);
573 /* Clear interrupt status if it is there */
574 status = stm32_dma_irq_status(chan);
576 stm32_dma_irq_clear(chan, status);
578 if (chan->desc->cyclic)
579 stm32_dma_configure_next_sg(chan);
581 stm32_dma_dump_reg(chan);
584 reg->dma_scr |= STM32_DMA_SCR_EN;
585 stm32_dma_write(dmadev, STM32_DMA_SCR(chan->id), reg->dma_scr);
589 dev_dbg(chan2dev(chan), "vchan %pK: started\n", &chan->vchan);
592 static void stm32_dma_configure_next_sg(struct stm32_dma_chan *chan)
594 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
595 struct stm32_dma_sg_req *sg_req;
596 u32 dma_scr, dma_sm0ar, dma_sm1ar, id;
599 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
601 if (dma_scr & STM32_DMA_SCR_DBM) {
602 if (chan->next_sg == chan->desc->num_sgs)
605 sg_req = &chan->desc->sg_req[chan->next_sg];
607 if (dma_scr & STM32_DMA_SCR_CT) {
608 dma_sm0ar = sg_req->chan_reg.dma_sm0ar;
609 stm32_dma_write(dmadev, STM32_DMA_SM0AR(id), dma_sm0ar);
610 dev_dbg(chan2dev(chan), "CT=1 <=> SM0AR: 0x%08x\n",
611 stm32_dma_read(dmadev, STM32_DMA_SM0AR(id)));
613 dma_sm1ar = sg_req->chan_reg.dma_sm1ar;
614 stm32_dma_write(dmadev, STM32_DMA_SM1AR(id), dma_sm1ar);
615 dev_dbg(chan2dev(chan), "CT=0 <=> SM1AR: 0x%08x\n",
616 stm32_dma_read(dmadev, STM32_DMA_SM1AR(id)));
621 static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan)
624 if (chan->desc->cyclic) {
625 vchan_cyclic_callback(&chan->desc->vdesc);
627 stm32_dma_configure_next_sg(chan);
630 if (chan->next_sg == chan->desc->num_sgs) {
631 vchan_cookie_complete(&chan->desc->vdesc);
634 stm32_dma_start_transfer(chan);
639 static irqreturn_t stm32_dma_chan_irq(int irq, void *devid)
641 struct stm32_dma_chan *chan = devid;
642 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
643 u32 status, scr, sfcr;
645 spin_lock(&chan->vchan.lock);
647 status = stm32_dma_irq_status(chan);
648 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
649 sfcr = stm32_dma_read(dmadev, STM32_DMA_SFCR(chan->id));
651 if (status & STM32_DMA_TCI) {
652 stm32_dma_irq_clear(chan, STM32_DMA_TCI);
653 if (scr & STM32_DMA_SCR_TCIE)
654 stm32_dma_handle_chan_done(chan);
655 status &= ~STM32_DMA_TCI;
657 if (status & STM32_DMA_HTI) {
658 stm32_dma_irq_clear(chan, STM32_DMA_HTI);
659 status &= ~STM32_DMA_HTI;
661 if (status & STM32_DMA_FEI) {
662 stm32_dma_irq_clear(chan, STM32_DMA_FEI);
663 status &= ~STM32_DMA_FEI;
664 if (sfcr & STM32_DMA_SFCR_FEIE) {
665 if (!(scr & STM32_DMA_SCR_EN))
666 dev_err(chan2dev(chan), "FIFO Error\n");
668 dev_dbg(chan2dev(chan), "FIFO over/underrun\n");
671 if (status & STM32_DMA_DMEI) {
672 stm32_dma_irq_clear(chan, STM32_DMA_DMEI);
673 status &= ~STM32_DMA_DMEI;
674 if (sfcr & STM32_DMA_SCR_DMEIE)
675 dev_dbg(chan2dev(chan), "Direct mode overrun\n");
678 stm32_dma_irq_clear(chan, status);
679 dev_err(chan2dev(chan), "DMA error: status=0x%08x\n", status);
680 if (!(scr & STM32_DMA_SCR_EN))
681 dev_err(chan2dev(chan), "chan disabled by HW\n");
684 spin_unlock(&chan->vchan.lock);
689 static void stm32_dma_issue_pending(struct dma_chan *c)
691 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
694 spin_lock_irqsave(&chan->vchan.lock, flags);
695 if (vchan_issue_pending(&chan->vchan) && !chan->desc && !chan->busy) {
696 dev_dbg(chan2dev(chan), "vchan %pK: issued\n", &chan->vchan);
697 stm32_dma_start_transfer(chan);
700 spin_unlock_irqrestore(&chan->vchan.lock, flags);
703 static int stm32_dma_set_xfer_param(struct stm32_dma_chan *chan,
704 enum dma_transfer_direction direction,
705 enum dma_slave_buswidth *buswidth,
708 enum dma_slave_buswidth src_addr_width, dst_addr_width;
709 int src_bus_width, dst_bus_width;
710 int src_burst_size, dst_burst_size;
711 u32 src_maxburst, dst_maxburst, src_best_burst, dst_best_burst;
714 src_addr_width = chan->dma_sconfig.src_addr_width;
715 dst_addr_width = chan->dma_sconfig.dst_addr_width;
716 src_maxburst = chan->dma_sconfig.src_maxburst;
717 dst_maxburst = chan->dma_sconfig.dst_maxburst;
718 fifoth = chan->threshold;
722 /* Set device data size */
723 dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
724 if (dst_bus_width < 0)
725 return dst_bus_width;
727 /* Set device burst size */
728 dst_best_burst = stm32_dma_get_best_burst(buf_len,
733 dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst);
734 if (dst_burst_size < 0)
735 return dst_burst_size;
737 /* Set memory data size */
738 src_addr_width = stm32_dma_get_max_width(buf_len, fifoth);
739 chan->mem_width = src_addr_width;
740 src_bus_width = stm32_dma_get_width(chan, src_addr_width);
741 if (src_bus_width < 0)
742 return src_bus_width;
744 /* Set memory burst size */
745 src_maxburst = STM32_DMA_MAX_BURST;
746 src_best_burst = stm32_dma_get_best_burst(buf_len,
750 src_burst_size = stm32_dma_get_burst(chan, src_best_burst);
751 if (src_burst_size < 0)
752 return src_burst_size;
754 dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_DEV) |
755 STM32_DMA_SCR_PSIZE(dst_bus_width) |
756 STM32_DMA_SCR_MSIZE(src_bus_width) |
757 STM32_DMA_SCR_PBURST(dst_burst_size) |
758 STM32_DMA_SCR_MBURST(src_burst_size);
760 /* Set FIFO threshold */
761 chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
762 if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE)
763 chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(fifoth);
765 /* Set peripheral address */
766 chan->chan_reg.dma_spar = chan->dma_sconfig.dst_addr;
767 *buswidth = dst_addr_width;
771 /* Set device data size */
772 src_bus_width = stm32_dma_get_width(chan, src_addr_width);
773 if (src_bus_width < 0)
774 return src_bus_width;
776 /* Set device burst size */
777 src_best_burst = stm32_dma_get_best_burst(buf_len,
781 chan->mem_burst = src_best_burst;
782 src_burst_size = stm32_dma_get_burst(chan, src_best_burst);
783 if (src_burst_size < 0)
784 return src_burst_size;
786 /* Set memory data size */
787 dst_addr_width = stm32_dma_get_max_width(buf_len, fifoth);
788 chan->mem_width = dst_addr_width;
789 dst_bus_width = stm32_dma_get_width(chan, dst_addr_width);
790 if (dst_bus_width < 0)
791 return dst_bus_width;
793 /* Set memory burst size */
794 dst_maxburst = STM32_DMA_MAX_BURST;
795 dst_best_burst = stm32_dma_get_best_burst(buf_len,
799 chan->mem_burst = dst_best_burst;
800 dst_burst_size = stm32_dma_get_burst(chan, dst_best_burst);
801 if (dst_burst_size < 0)
802 return dst_burst_size;
804 dma_scr = STM32_DMA_SCR_DIR(STM32_DMA_DEV_TO_MEM) |
805 STM32_DMA_SCR_PSIZE(src_bus_width) |
806 STM32_DMA_SCR_MSIZE(dst_bus_width) |
807 STM32_DMA_SCR_PBURST(src_burst_size) |
808 STM32_DMA_SCR_MBURST(dst_burst_size);
810 /* Set FIFO threshold */
811 chan->chan_reg.dma_sfcr &= ~STM32_DMA_SFCR_FTH_MASK;
812 if (fifoth != STM32_DMA_FIFO_THRESHOLD_NONE)
813 chan->chan_reg.dma_sfcr |= STM32_DMA_SFCR_FTH(fifoth);
815 /* Set peripheral address */
816 chan->chan_reg.dma_spar = chan->dma_sconfig.src_addr;
817 *buswidth = chan->dma_sconfig.src_addr_width;
821 dev_err(chan2dev(chan), "Dma direction is not supported\n");
825 stm32_dma_set_fifo_config(chan, src_best_burst, dst_best_burst);
827 /* Set DMA control register */
828 chan->chan_reg.dma_scr &= ~(STM32_DMA_SCR_DIR_MASK |
829 STM32_DMA_SCR_PSIZE_MASK | STM32_DMA_SCR_MSIZE_MASK |
830 STM32_DMA_SCR_PBURST_MASK | STM32_DMA_SCR_MBURST_MASK);
831 chan->chan_reg.dma_scr |= dma_scr;
836 static void stm32_dma_clear_reg(struct stm32_dma_chan_reg *regs)
838 memset(regs, 0, sizeof(struct stm32_dma_chan_reg));
841 static struct dma_async_tx_descriptor *stm32_dma_prep_slave_sg(
842 struct dma_chan *c, struct scatterlist *sgl,
843 u32 sg_len, enum dma_transfer_direction direction,
844 unsigned long flags, void *context)
846 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
847 struct stm32_dma_desc *desc;
848 struct scatterlist *sg;
849 enum dma_slave_buswidth buswidth;
853 if (!chan->config_init) {
854 dev_err(chan2dev(chan), "dma channel is not configured\n");
859 dev_err(chan2dev(chan), "Invalid segment length %d\n", sg_len);
863 desc = kzalloc(struct_size(desc, sg_req, sg_len), GFP_NOWAIT);
867 /* Set peripheral flow controller */
868 if (chan->dma_sconfig.device_fc)
869 chan->chan_reg.dma_scr |= STM32_DMA_SCR_PFCTRL;
871 chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
873 for_each_sg(sgl, sg, sg_len, i) {
874 ret = stm32_dma_set_xfer_param(chan, direction, &buswidth,
879 desc->sg_req[i].len = sg_dma_len(sg);
881 nb_data_items = desc->sg_req[i].len / buswidth;
882 if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) {
883 dev_err(chan2dev(chan), "nb items not supported\n");
887 stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
888 desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr;
889 desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr;
890 desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar;
891 desc->sg_req[i].chan_reg.dma_sm0ar = sg_dma_address(sg);
892 desc->sg_req[i].chan_reg.dma_sm1ar = sg_dma_address(sg);
893 desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
896 desc->num_sgs = sg_len;
897 desc->cyclic = false;
899 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
906 static struct dma_async_tx_descriptor *stm32_dma_prep_dma_cyclic(
907 struct dma_chan *c, dma_addr_t buf_addr, size_t buf_len,
908 size_t period_len, enum dma_transfer_direction direction,
911 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
912 struct stm32_dma_desc *desc;
913 enum dma_slave_buswidth buswidth;
914 u32 num_periods, nb_data_items;
917 if (!buf_len || !period_len) {
918 dev_err(chan2dev(chan), "Invalid buffer/period len\n");
922 if (!chan->config_init) {
923 dev_err(chan2dev(chan), "dma channel is not configured\n");
927 if (buf_len % period_len) {
928 dev_err(chan2dev(chan), "buf_len not multiple of period_len\n");
933 * We allow to take more number of requests till DMA is
934 * not started. The driver will loop over all requests.
935 * Once DMA is started then new requests can be queued only after
936 * terminating the DMA.
939 dev_err(chan2dev(chan), "Request not allowed when dma busy\n");
943 ret = stm32_dma_set_xfer_param(chan, direction, &buswidth, period_len);
947 nb_data_items = period_len / buswidth;
948 if (nb_data_items > STM32_DMA_ALIGNED_MAX_DATA_ITEMS) {
949 dev_err(chan2dev(chan), "number of items not supported\n");
953 /* Enable Circular mode or double buffer mode */
954 if (buf_len == period_len)
955 chan->chan_reg.dma_scr |= STM32_DMA_SCR_CIRC;
957 chan->chan_reg.dma_scr |= STM32_DMA_SCR_DBM;
959 /* Clear periph ctrl if client set it */
960 chan->chan_reg.dma_scr &= ~STM32_DMA_SCR_PFCTRL;
962 num_periods = buf_len / period_len;
964 desc = kzalloc(struct_size(desc, sg_req, num_periods), GFP_NOWAIT);
968 for (i = 0; i < num_periods; i++) {
969 desc->sg_req[i].len = period_len;
971 stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
972 desc->sg_req[i].chan_reg.dma_scr = chan->chan_reg.dma_scr;
973 desc->sg_req[i].chan_reg.dma_sfcr = chan->chan_reg.dma_sfcr;
974 desc->sg_req[i].chan_reg.dma_spar = chan->chan_reg.dma_spar;
975 desc->sg_req[i].chan_reg.dma_sm0ar = buf_addr;
976 desc->sg_req[i].chan_reg.dma_sm1ar = buf_addr;
977 desc->sg_req[i].chan_reg.dma_sndtr = nb_data_items;
978 buf_addr += period_len;
981 desc->num_sgs = num_periods;
984 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
987 static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
988 struct dma_chan *c, dma_addr_t dest,
989 dma_addr_t src, size_t len, unsigned long flags)
991 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
992 enum dma_slave_buswidth max_width;
993 struct stm32_dma_desc *desc;
994 size_t xfer_count, offset;
995 u32 num_sgs, best_burst, dma_burst, threshold;
998 num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
999 desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT);
1003 threshold = chan->threshold;
1005 for (offset = 0, i = 0; offset < len; offset += xfer_count, i++) {
1006 xfer_count = min_t(size_t, len - offset,
1007 STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
1009 /* Compute best burst size */
1010 max_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
1011 best_burst = stm32_dma_get_best_burst(len, STM32_DMA_MAX_BURST,
1012 threshold, max_width);
1013 dma_burst = stm32_dma_get_burst(chan, best_burst);
1015 stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
1016 desc->sg_req[i].chan_reg.dma_scr =
1017 STM32_DMA_SCR_DIR(STM32_DMA_MEM_TO_MEM) |
1018 STM32_DMA_SCR_PBURST(dma_burst) |
1019 STM32_DMA_SCR_MBURST(dma_burst) |
1020 STM32_DMA_SCR_MINC |
1021 STM32_DMA_SCR_PINC |
1022 STM32_DMA_SCR_TCIE |
1024 desc->sg_req[i].chan_reg.dma_sfcr |= STM32_DMA_SFCR_MASK;
1025 desc->sg_req[i].chan_reg.dma_sfcr |=
1026 STM32_DMA_SFCR_FTH(threshold);
1027 desc->sg_req[i].chan_reg.dma_spar = src + offset;
1028 desc->sg_req[i].chan_reg.dma_sm0ar = dest + offset;
1029 desc->sg_req[i].chan_reg.dma_sndtr = xfer_count;
1030 desc->sg_req[i].len = xfer_count;
1033 desc->num_sgs = num_sgs;
1034 desc->cyclic = false;
1036 return vchan_tx_prep(&chan->vchan, &desc->vdesc, flags);
1039 static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan)
1041 u32 dma_scr, width, ndtr;
1042 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
1044 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(chan->id));
1045 width = STM32_DMA_SCR_PSIZE_GET(dma_scr);
1046 ndtr = stm32_dma_read(dmadev, STM32_DMA_SNDTR(chan->id));
1048 return ndtr << width;
1052 * stm32_dma_is_current_sg - check that expected sg_req is currently transferred
1053 * @chan: dma channel
1055 * This function called when IRQ are disable, checks that the hardware has not
1056 * switched on the next transfer in double buffer mode. The test is done by
1057 * comparing the next_sg memory address with the hardware related register
1058 * (based on CT bit value).
1060 * Returns true if expected current transfer is still running or double
1061 * buffer mode is not activated.
1063 static bool stm32_dma_is_current_sg(struct stm32_dma_chan *chan)
1065 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
1066 struct stm32_dma_sg_req *sg_req;
1067 u32 dma_scr, dma_smar, id;
1070 dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
1072 if (!(dma_scr & STM32_DMA_SCR_DBM))
1075 sg_req = &chan->desc->sg_req[chan->next_sg];
1077 if (dma_scr & STM32_DMA_SCR_CT) {
1078 dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(id));
1079 return (dma_smar == sg_req->chan_reg.dma_sm0ar);
1082 dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(id));
1084 return (dma_smar == sg_req->chan_reg.dma_sm1ar);
1087 static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
1088 struct stm32_dma_desc *desc,
1091 u32 modulo, burst_size;
1094 struct stm32_dma_sg_req *sg_req = &chan->desc->sg_req[chan->next_sg];
1098 * Calculate the residue means compute the descriptors
1100 * - the sg_req currently transferred
1101 * - the Hardware remaining position in this sg (NDTR bits field).
1103 * A race condition may occur if DMA is running in cyclic or double
1104 * buffer mode, since the DMA register are automatically reloaded at end
1105 * of period transfer. The hardware may have switched to the next
1106 * transfer (CT bit updated) just before the position (SxNDTR reg) is
1108 * In this case the SxNDTR reg could (or not) correspond to the new
1109 * transfer position, and not the expected one.
1110 * The strategy implemented in the stm32 driver is to:
1111 * - read the SxNDTR register
1112 * - crosscheck that hardware is still in current transfer.
1113 * In case of switch, we can assume that the DMA is at the beginning of
1114 * the next transfer. So we approximate the residue in consequence, by
1115 * pointing on the beginning of next transfer.
1117 * This race condition doesn't apply for none cyclic mode, as double
1118 * buffer is not used. In such situation registers are updated by the
1122 residue = stm32_dma_get_remaining_bytes(chan);
1124 if (!stm32_dma_is_current_sg(chan)) {
1126 if (n_sg == chan->desc->num_sgs)
1128 residue = sg_req->len;
1132 * In cyclic mode, for the last period, residue = remaining bytes
1134 * else for all other periods in cyclic mode, and in sg mode,
1135 * residue = remaining bytes from NDTR + remaining
1136 * periods/sg to be transferred
1138 if (!chan->desc->cyclic || n_sg != 0)
1139 for (i = n_sg; i < desc->num_sgs; i++)
1140 residue += desc->sg_req[i].len;
1142 if (!chan->mem_burst)
1145 burst_size = chan->mem_burst * chan->mem_width;
1146 modulo = residue % burst_size;
1148 residue = residue - modulo + burst_size;
1153 static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
1154 dma_cookie_t cookie,
1155 struct dma_tx_state *state)
1157 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
1158 struct virt_dma_desc *vdesc;
1159 enum dma_status status;
1160 unsigned long flags;
1163 status = dma_cookie_status(c, cookie, state);
1164 if (status == DMA_COMPLETE || !state)
1167 spin_lock_irqsave(&chan->vchan.lock, flags);
1168 vdesc = vchan_find_desc(&chan->vchan, cookie);
1169 if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
1170 residue = stm32_dma_desc_residue(chan, chan->desc,
1173 residue = stm32_dma_desc_residue(chan,
1174 to_stm32_dma_desc(vdesc), 0);
1175 dma_set_residue(state, residue);
1177 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1182 static int stm32_dma_alloc_chan_resources(struct dma_chan *c)
1184 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
1185 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
1188 chan->config_init = false;
1190 ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
1194 ret = stm32_dma_disable_chan(chan);
1196 pm_runtime_put(dmadev->ddev.dev);
1201 static void stm32_dma_free_chan_resources(struct dma_chan *c)
1203 struct stm32_dma_chan *chan = to_stm32_dma_chan(c);
1204 struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
1205 unsigned long flags;
1207 dev_dbg(chan2dev(chan), "Freeing channel %d\n", chan->id);
1210 spin_lock_irqsave(&chan->vchan.lock, flags);
1211 stm32_dma_stop(chan);
1213 spin_unlock_irqrestore(&chan->vchan.lock, flags);
1216 pm_runtime_put(dmadev->ddev.dev);
1218 vchan_free_chan_resources(to_virt_chan(c));
1221 static void stm32_dma_desc_free(struct virt_dma_desc *vdesc)
1223 kfree(container_of(vdesc, struct stm32_dma_desc, vdesc));
1226 static void stm32_dma_set_config(struct stm32_dma_chan *chan,
1227 struct stm32_dma_cfg *cfg)
1229 stm32_dma_clear_reg(&chan->chan_reg);
1231 chan->chan_reg.dma_scr = cfg->stream_config & STM32_DMA_SCR_CFG_MASK;
1232 chan->chan_reg.dma_scr |= STM32_DMA_SCR_REQ(cfg->request_line);
1234 /* Enable Interrupts */
1235 chan->chan_reg.dma_scr |= STM32_DMA_SCR_TEIE | STM32_DMA_SCR_TCIE;
1237 chan->threshold = STM32_DMA_THRESHOLD_FTR_GET(cfg->features);
1238 if (STM32_DMA_DIRECT_MODE_GET(cfg->features))
1239 chan->threshold = STM32_DMA_FIFO_THRESHOLD_NONE;
1242 static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
1243 struct of_dma *ofdma)
1245 struct stm32_dma_device *dmadev = ofdma->of_dma_data;
1246 struct device *dev = dmadev->ddev.dev;
1247 struct stm32_dma_cfg cfg;
1248 struct stm32_dma_chan *chan;
1251 if (dma_spec->args_count < 4) {
1252 dev_err(dev, "Bad number of cells\n");
1256 cfg.channel_id = dma_spec->args[0];
1257 cfg.request_line = dma_spec->args[1];
1258 cfg.stream_config = dma_spec->args[2];
1259 cfg.features = dma_spec->args[3];
1261 if (cfg.channel_id >= STM32_DMA_MAX_CHANNELS ||
1262 cfg.request_line >= STM32_DMA_MAX_REQUEST_ID) {
1263 dev_err(dev, "Bad channel and/or request id\n");
1267 chan = &dmadev->chan[cfg.channel_id];
1269 c = dma_get_slave_channel(&chan->vchan.chan);
1271 dev_err(dev, "No more channels available\n");
1275 stm32_dma_set_config(chan, &cfg);
1280 static const struct of_device_id stm32_dma_of_match[] = {
1281 { .compatible = "st,stm32-dma", },
1284 MODULE_DEVICE_TABLE(of, stm32_dma_of_match);
1286 static int stm32_dma_probe(struct platform_device *pdev)
1288 struct stm32_dma_chan *chan;
1289 struct stm32_dma_device *dmadev;
1290 struct dma_device *dd;
1291 const struct of_device_id *match;
1292 struct resource *res;
1293 struct reset_control *rst;
1296 match = of_match_device(stm32_dma_of_match, &pdev->dev);
1298 dev_err(&pdev->dev, "Error: No device match found\n");
1302 dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
1308 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1309 dmadev->base = devm_ioremap_resource(&pdev->dev, res);
1310 if (IS_ERR(dmadev->base))
1311 return PTR_ERR(dmadev->base);
1313 dmadev->clk = devm_clk_get(&pdev->dev, NULL);
1314 if (IS_ERR(dmadev->clk))
1315 return dev_err_probe(&pdev->dev, PTR_ERR(dmadev->clk), "Can't get clock\n");
1317 ret = clk_prepare_enable(dmadev->clk);
1319 dev_err(&pdev->dev, "clk_prep_enable error: %d\n", ret);
1323 dmadev->mem2mem = of_property_read_bool(pdev->dev.of_node,
1326 rst = devm_reset_control_get(&pdev->dev, NULL);
1329 if (ret == -EPROBE_DEFER)
1332 reset_control_assert(rst);
1334 reset_control_deassert(rst);
1337 dma_set_max_seg_size(&pdev->dev, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
1339 dma_cap_set(DMA_SLAVE, dd->cap_mask);
1340 dma_cap_set(DMA_PRIVATE, dd->cap_mask);
1341 dma_cap_set(DMA_CYCLIC, dd->cap_mask);
1342 dd->device_alloc_chan_resources = stm32_dma_alloc_chan_resources;
1343 dd->device_free_chan_resources = stm32_dma_free_chan_resources;
1344 dd->device_tx_status = stm32_dma_tx_status;
1345 dd->device_issue_pending = stm32_dma_issue_pending;
1346 dd->device_prep_slave_sg = stm32_dma_prep_slave_sg;
1347 dd->device_prep_dma_cyclic = stm32_dma_prep_dma_cyclic;
1348 dd->device_config = stm32_dma_slave_config;
1349 dd->device_terminate_all = stm32_dma_terminate_all;
1350 dd->device_synchronize = stm32_dma_synchronize;
1351 dd->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1352 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1353 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1354 dd->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |
1355 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |
1356 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
1357 dd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1358 dd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1359 dd->copy_align = DMAENGINE_ALIGN_32_BYTES;
1360 dd->max_burst = STM32_DMA_MAX_BURST;
1361 dd->descriptor_reuse = true;
1362 dd->dev = &pdev->dev;
1363 INIT_LIST_HEAD(&dd->channels);
1365 if (dmadev->mem2mem) {
1366 dma_cap_set(DMA_MEMCPY, dd->cap_mask);
1367 dd->device_prep_dma_memcpy = stm32_dma_prep_dma_memcpy;
1368 dd->directions |= BIT(DMA_MEM_TO_MEM);
1371 for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
1372 chan = &dmadev->chan[i];
1374 chan->vchan.desc_free = stm32_dma_desc_free;
1375 vchan_init(&chan->vchan, dd);
1378 ret = dma_async_device_register(dd);
1382 for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
1383 chan = &dmadev->chan[i];
1384 ret = platform_get_irq(pdev, i);
1386 goto err_unregister;
1389 ret = devm_request_irq(&pdev->dev, chan->irq,
1390 stm32_dma_chan_irq, 0,
1391 dev_name(chan2dev(chan)), chan);
1394 "request_irq failed with err %d channel %d\n",
1396 goto err_unregister;
1400 ret = of_dma_controller_register(pdev->dev.of_node,
1401 stm32_dma_of_xlate, dmadev);
1404 "STM32 DMA DMA OF registration failed %d\n", ret);
1405 goto err_unregister;
1408 platform_set_drvdata(pdev, dmadev);
1410 pm_runtime_set_active(&pdev->dev);
1411 pm_runtime_enable(&pdev->dev);
1412 pm_runtime_get_noresume(&pdev->dev);
1413 pm_runtime_put(&pdev->dev);
1415 dev_info(&pdev->dev, "STM32 DMA driver registered\n");
1420 dma_async_device_unregister(dd);
1422 clk_disable_unprepare(dmadev->clk);
1428 static int stm32_dma_runtime_suspend(struct device *dev)
1430 struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
1432 clk_disable_unprepare(dmadev->clk);
1437 static int stm32_dma_runtime_resume(struct device *dev)
1439 struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
1442 ret = clk_prepare_enable(dmadev->clk);
1444 dev_err(dev, "failed to prepare_enable clock\n");
1452 #ifdef CONFIG_PM_SLEEP
1453 static int stm32_dma_suspend(struct device *dev)
1455 struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
1458 ret = pm_runtime_resume_and_get(dev);
1462 for (id = 0; id < STM32_DMA_MAX_CHANNELS; id++) {
1463 scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
1464 if (scr & STM32_DMA_SCR_EN) {
1465 dev_warn(dev, "Suspend is prevented by Chan %i\n", id);
1470 pm_runtime_put_sync(dev);
1472 pm_runtime_force_suspend(dev);
1477 static int stm32_dma_resume(struct device *dev)
1479 return pm_runtime_force_resume(dev);
1483 static const struct dev_pm_ops stm32_dma_pm_ops = {
1484 SET_SYSTEM_SLEEP_PM_OPS(stm32_dma_suspend, stm32_dma_resume)
1485 SET_RUNTIME_PM_OPS(stm32_dma_runtime_suspend,
1486 stm32_dma_runtime_resume, NULL)
1489 static struct platform_driver stm32_dma_driver = {
1491 .name = "stm32-dma",
1492 .of_match_table = stm32_dma_of_match,
1493 .pm = &stm32_dma_pm_ops,
1495 .probe = stm32_dma_probe,
1498 static int __init stm32_dma_init(void)
1500 return platform_driver_register(&stm32_dma_driver);
1502 subsys_initcall(stm32_dma_init);