2 * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
15 * QCOM BAM DMA engine driver
17 * QCOM BAM DMA blocks are distributed amongst a number of the on-chip
18 * peripherals on the MSM 8x74. The configuration of the channels are dependent
19 * on the way they are hard wired to that specific peripheral. The peripheral
20 * device tree entries specify the configuration of each channel.
22 * The DMA controller requires the use of external memory for storage of the
23 * hardware descriptors for each channel. The descriptor FIFO is accessed as a
24 * circular buffer and operations are managed according to the offset within the
25 * FIFO. After pipe/channel reset, all of the pipe registers and internal state
26 * are back to defaults.
28 * During DMA operations, we write descriptors to the FIFO, being careful to
29 * handle wrapping and then write the last FIFO offset to that channel's
30 * P_EVNT_REG register to kick off the transaction. The P_SW_OFSTS register
31 * indicates the current FIFO offset that is being processed, so there is some
32 * indication of where the hardware is currently working.
35 #include <linux/kernel.h>
37 #include <linux/init.h>
38 #include <linux/slab.h>
39 #include <linux/module.h>
40 #include <linux/interrupt.h>
41 #include <linux/dma-mapping.h>
42 #include <linux/scatterlist.h>
43 #include <linux/device.h>
44 #include <linux/platform_device.h>
46 #include <linux/of_address.h>
47 #include <linux/of_irq.h>
48 #include <linux/of_dma.h>
49 #include <linux/clk.h>
50 #include <linux/dmaengine.h>
51 #include <linux/pm_runtime.h>
53 #include "../dmaengine.h"
54 #include "../virt-dma.h"
57 __le32 addr; /* Buffer physical address */
58 __le16 size; /* Buffer size in bytes */
62 #define BAM_DMA_AUTOSUSPEND_DELAY 100
64 #define DESC_FLAG_INT BIT(15)
65 #define DESC_FLAG_EOT BIT(14)
66 #define DESC_FLAG_EOB BIT(13)
67 #define DESC_FLAG_NWD BIT(12)
68 #define DESC_FLAG_CMD BIT(11)
70 struct bam_async_desc {
71 struct virt_dma_desc vd;
76 /* transaction flags, EOT|EOB|NWD */
79 struct bam_desc_hw *curr_desc;
81 enum dma_transfer_direction dir;
83 struct bam_desc_hw desc[0];
93 BAM_IRQ_SRCS_UNMASKED,
106 BAM_P_EVNT_DEST_ADDR,
109 BAM_P_DATA_FIFO_ADDR,
110 BAM_P_DESC_FIFO_ADDR,
111 BAM_P_EVNT_GEN_TRSHLD,
115 struct reg_offset_data {
117 unsigned int pipe_mult, evnt_mult, ee_mult;
120 static const struct reg_offset_data bam_v1_3_reg_info[] = {
121 [BAM_CTRL] = { 0x0F80, 0x00, 0x00, 0x00 },
122 [BAM_REVISION] = { 0x0F84, 0x00, 0x00, 0x00 },
123 [BAM_NUM_PIPES] = { 0x0FBC, 0x00, 0x00, 0x00 },
124 [BAM_DESC_CNT_TRSHLD] = { 0x0F88, 0x00, 0x00, 0x00 },
125 [BAM_IRQ_SRCS] = { 0x0F8C, 0x00, 0x00, 0x00 },
126 [BAM_IRQ_SRCS_MSK] = { 0x0F90, 0x00, 0x00, 0x00 },
127 [BAM_IRQ_SRCS_UNMASKED] = { 0x0FB0, 0x00, 0x00, 0x00 },
128 [BAM_IRQ_STTS] = { 0x0F94, 0x00, 0x00, 0x00 },
129 [BAM_IRQ_CLR] = { 0x0F98, 0x00, 0x00, 0x00 },
130 [BAM_IRQ_EN] = { 0x0F9C, 0x00, 0x00, 0x00 },
131 [BAM_CNFG_BITS] = { 0x0FFC, 0x00, 0x00, 0x00 },
132 [BAM_IRQ_SRCS_EE] = { 0x1800, 0x00, 0x00, 0x80 },
133 [BAM_IRQ_SRCS_MSK_EE] = { 0x1804, 0x00, 0x00, 0x80 },
134 [BAM_P_CTRL] = { 0x0000, 0x80, 0x00, 0x00 },
135 [BAM_P_RST] = { 0x0004, 0x80, 0x00, 0x00 },
136 [BAM_P_HALT] = { 0x0008, 0x80, 0x00, 0x00 },
137 [BAM_P_IRQ_STTS] = { 0x0010, 0x80, 0x00, 0x00 },
138 [BAM_P_IRQ_CLR] = { 0x0014, 0x80, 0x00, 0x00 },
139 [BAM_P_IRQ_EN] = { 0x0018, 0x80, 0x00, 0x00 },
140 [BAM_P_EVNT_DEST_ADDR] = { 0x102C, 0x00, 0x40, 0x00 },
141 [BAM_P_EVNT_REG] = { 0x1018, 0x00, 0x40, 0x00 },
142 [BAM_P_SW_OFSTS] = { 0x1000, 0x00, 0x40, 0x00 },
143 [BAM_P_DATA_FIFO_ADDR] = { 0x1024, 0x00, 0x40, 0x00 },
144 [BAM_P_DESC_FIFO_ADDR] = { 0x101C, 0x00, 0x40, 0x00 },
145 [BAM_P_EVNT_GEN_TRSHLD] = { 0x1028, 0x00, 0x40, 0x00 },
146 [BAM_P_FIFO_SIZES] = { 0x1020, 0x00, 0x40, 0x00 },
149 static const struct reg_offset_data bam_v1_4_reg_info[] = {
150 [BAM_CTRL] = { 0x0000, 0x00, 0x00, 0x00 },
151 [BAM_REVISION] = { 0x0004, 0x00, 0x00, 0x00 },
152 [BAM_NUM_PIPES] = { 0x003C, 0x00, 0x00, 0x00 },
153 [BAM_DESC_CNT_TRSHLD] = { 0x0008, 0x00, 0x00, 0x00 },
154 [BAM_IRQ_SRCS] = { 0x000C, 0x00, 0x00, 0x00 },
155 [BAM_IRQ_SRCS_MSK] = { 0x0010, 0x00, 0x00, 0x00 },
156 [BAM_IRQ_SRCS_UNMASKED] = { 0x0030, 0x00, 0x00, 0x00 },
157 [BAM_IRQ_STTS] = { 0x0014, 0x00, 0x00, 0x00 },
158 [BAM_IRQ_CLR] = { 0x0018, 0x00, 0x00, 0x00 },
159 [BAM_IRQ_EN] = { 0x001C, 0x00, 0x00, 0x00 },
160 [BAM_CNFG_BITS] = { 0x007C, 0x00, 0x00, 0x00 },
161 [BAM_IRQ_SRCS_EE] = { 0x0800, 0x00, 0x00, 0x80 },
162 [BAM_IRQ_SRCS_MSK_EE] = { 0x0804, 0x00, 0x00, 0x80 },
163 [BAM_P_CTRL] = { 0x1000, 0x1000, 0x00, 0x00 },
164 [BAM_P_RST] = { 0x1004, 0x1000, 0x00, 0x00 },
165 [BAM_P_HALT] = { 0x1008, 0x1000, 0x00, 0x00 },
166 [BAM_P_IRQ_STTS] = { 0x1010, 0x1000, 0x00, 0x00 },
167 [BAM_P_IRQ_CLR] = { 0x1014, 0x1000, 0x00, 0x00 },
168 [BAM_P_IRQ_EN] = { 0x1018, 0x1000, 0x00, 0x00 },
169 [BAM_P_EVNT_DEST_ADDR] = { 0x182C, 0x00, 0x1000, 0x00 },
170 [BAM_P_EVNT_REG] = { 0x1818, 0x00, 0x1000, 0x00 },
171 [BAM_P_SW_OFSTS] = { 0x1800, 0x00, 0x1000, 0x00 },
172 [BAM_P_DATA_FIFO_ADDR] = { 0x1824, 0x00, 0x1000, 0x00 },
173 [BAM_P_DESC_FIFO_ADDR] = { 0x181C, 0x00, 0x1000, 0x00 },
174 [BAM_P_EVNT_GEN_TRSHLD] = { 0x1828, 0x00, 0x1000, 0x00 },
175 [BAM_P_FIFO_SIZES] = { 0x1820, 0x00, 0x1000, 0x00 },
178 static const struct reg_offset_data bam_v1_7_reg_info[] = {
179 [BAM_CTRL] = { 0x00000, 0x00, 0x00, 0x00 },
180 [BAM_REVISION] = { 0x01000, 0x00, 0x00, 0x00 },
181 [BAM_NUM_PIPES] = { 0x01008, 0x00, 0x00, 0x00 },
182 [BAM_DESC_CNT_TRSHLD] = { 0x00008, 0x00, 0x00, 0x00 },
183 [BAM_IRQ_SRCS] = { 0x03010, 0x00, 0x00, 0x00 },
184 [BAM_IRQ_SRCS_MSK] = { 0x03014, 0x00, 0x00, 0x00 },
185 [BAM_IRQ_SRCS_UNMASKED] = { 0x03018, 0x00, 0x00, 0x00 },
186 [BAM_IRQ_STTS] = { 0x00014, 0x00, 0x00, 0x00 },
187 [BAM_IRQ_CLR] = { 0x00018, 0x00, 0x00, 0x00 },
188 [BAM_IRQ_EN] = { 0x0001C, 0x00, 0x00, 0x00 },
189 [BAM_CNFG_BITS] = { 0x0007C, 0x00, 0x00, 0x00 },
190 [BAM_IRQ_SRCS_EE] = { 0x03000, 0x00, 0x00, 0x1000 },
191 [BAM_IRQ_SRCS_MSK_EE] = { 0x03004, 0x00, 0x00, 0x1000 },
192 [BAM_P_CTRL] = { 0x13000, 0x1000, 0x00, 0x00 },
193 [BAM_P_RST] = { 0x13004, 0x1000, 0x00, 0x00 },
194 [BAM_P_HALT] = { 0x13008, 0x1000, 0x00, 0x00 },
195 [BAM_P_IRQ_STTS] = { 0x13010, 0x1000, 0x00, 0x00 },
196 [BAM_P_IRQ_CLR] = { 0x13014, 0x1000, 0x00, 0x00 },
197 [BAM_P_IRQ_EN] = { 0x13018, 0x1000, 0x00, 0x00 },
198 [BAM_P_EVNT_DEST_ADDR] = { 0x1382C, 0x00, 0x1000, 0x00 },
199 [BAM_P_EVNT_REG] = { 0x13818, 0x00, 0x1000, 0x00 },
200 [BAM_P_SW_OFSTS] = { 0x13800, 0x00, 0x1000, 0x00 },
201 [BAM_P_DATA_FIFO_ADDR] = { 0x13824, 0x00, 0x1000, 0x00 },
202 [BAM_P_DESC_FIFO_ADDR] = { 0x1381C, 0x00, 0x1000, 0x00 },
203 [BAM_P_EVNT_GEN_TRSHLD] = { 0x13828, 0x00, 0x1000, 0x00 },
204 [BAM_P_FIFO_SIZES] = { 0x13820, 0x00, 0x1000, 0x00 },
208 #define BAM_SW_RST BIT(0)
209 #define BAM_EN BIT(1)
210 #define BAM_EN_ACCUM BIT(4)
211 #define BAM_TESTBUS_SEL_SHIFT 5
212 #define BAM_TESTBUS_SEL_MASK 0x3F
213 #define BAM_DESC_CACHE_SEL_SHIFT 13
214 #define BAM_DESC_CACHE_SEL_MASK 0x3
215 #define BAM_CACHED_DESC_STORE BIT(15)
216 #define IBC_DISABLE BIT(16)
219 #define REVISION_SHIFT 0
220 #define REVISION_MASK 0xFF
221 #define NUM_EES_SHIFT 8
222 #define NUM_EES_MASK 0xF
223 #define CE_BUFFER_SIZE BIT(13)
224 #define AXI_ACTIVE BIT(14)
225 #define USE_VMIDMT BIT(15)
226 #define SECURED BIT(16)
227 #define BAM_HAS_NO_BYPASS BIT(17)
228 #define HIGH_FREQUENCY_BAM BIT(18)
229 #define INACTIV_TMRS_EXST BIT(19)
230 #define NUM_INACTIV_TMRS BIT(20)
231 #define DESC_CACHE_DEPTH_SHIFT 21
232 #define DESC_CACHE_DEPTH_1 (0 << DESC_CACHE_DEPTH_SHIFT)
233 #define DESC_CACHE_DEPTH_2 (1 << DESC_CACHE_DEPTH_SHIFT)
234 #define DESC_CACHE_DEPTH_3 (2 << DESC_CACHE_DEPTH_SHIFT)
235 #define DESC_CACHE_DEPTH_4 (3 << DESC_CACHE_DEPTH_SHIFT)
236 #define CMD_DESC_EN BIT(23)
237 #define INACTIV_TMR_BASE_SHIFT 24
238 #define INACTIV_TMR_BASE_MASK 0xFF
241 #define BAM_NUM_PIPES_SHIFT 0
242 #define BAM_NUM_PIPES_MASK 0xFF
243 #define PERIPH_NON_PIPE_GRP_SHIFT 16
244 #define PERIPH_NON_PIP_GRP_MASK 0xFF
245 #define BAM_NON_PIPE_GRP_SHIFT 24
246 #define BAM_NON_PIPE_GRP_MASK 0xFF
249 #define BAM_PIPE_CNFG BIT(2)
250 #define BAM_FULL_PIPE BIT(11)
251 #define BAM_NO_EXT_P_RST BIT(12)
252 #define BAM_IBC_DISABLE BIT(13)
253 #define BAM_SB_CLK_REQ BIT(14)
254 #define BAM_PSM_CSW_REQ BIT(15)
255 #define BAM_PSM_P_RES BIT(16)
256 #define BAM_AU_P_RES BIT(17)
257 #define BAM_SI_P_RES BIT(18)
258 #define BAM_WB_P_RES BIT(19)
259 #define BAM_WB_BLK_CSW BIT(20)
260 #define BAM_WB_CSW_ACK_IDL BIT(21)
261 #define BAM_WB_RETR_SVPNT BIT(22)
262 #define BAM_WB_DSC_AVL_P_RST BIT(23)
263 #define BAM_REG_P_EN BIT(24)
264 #define BAM_PSM_P_HD_DATA BIT(25)
265 #define BAM_AU_ACCUMED BIT(26)
266 #define BAM_CMD_ENABLE BIT(27)
268 #define BAM_CNFG_BITS_DEFAULT (BAM_PIPE_CNFG | \
278 BAM_WB_CSW_ACK_IDL | \
279 BAM_WB_RETR_SVPNT | \
280 BAM_WB_DSC_AVL_P_RST | \
282 BAM_PSM_P_HD_DATA | \
288 #define P_DIRECTION BIT(3)
289 #define P_SYS_STRM BIT(4)
290 #define P_SYS_MODE BIT(5)
291 #define P_AUTO_EOB BIT(6)
292 #define P_AUTO_EOB_SEL_SHIFT 7
293 #define P_AUTO_EOB_SEL_512 (0 << P_AUTO_EOB_SEL_SHIFT)
294 #define P_AUTO_EOB_SEL_256 (1 << P_AUTO_EOB_SEL_SHIFT)
295 #define P_AUTO_EOB_SEL_128 (2 << P_AUTO_EOB_SEL_SHIFT)
296 #define P_AUTO_EOB_SEL_64 (3 << P_AUTO_EOB_SEL_SHIFT)
297 #define P_PREFETCH_LIMIT_SHIFT 9
298 #define P_PREFETCH_LIMIT_32 (0 << P_PREFETCH_LIMIT_SHIFT)
299 #define P_PREFETCH_LIMIT_16 (1 << P_PREFETCH_LIMIT_SHIFT)
300 #define P_PREFETCH_LIMIT_4 (2 << P_PREFETCH_LIMIT_SHIFT)
301 #define P_WRITE_NWD BIT(11)
302 #define P_LOCK_GROUP_SHIFT 16
303 #define P_LOCK_GROUP_MASK 0x1F
305 /* BAM_DESC_CNT_TRSHLD */
306 #define CNT_TRSHLD 0xffff
307 #define DEFAULT_CNT_THRSHLD 0x4
310 #define BAM_IRQ BIT(31)
311 #define P_IRQ 0x7fffffff
313 /* BAM_IRQ_SRCS_MSK */
314 #define BAM_IRQ_MSK BAM_IRQ
315 #define P_IRQ_MSK P_IRQ
318 #define BAM_TIMER_IRQ BIT(4)
319 #define BAM_EMPTY_IRQ BIT(3)
320 #define BAM_ERROR_IRQ BIT(2)
321 #define BAM_HRESP_ERR_IRQ BIT(1)
324 #define BAM_TIMER_CLR BIT(4)
325 #define BAM_EMPTY_CLR BIT(3)
326 #define BAM_ERROR_CLR BIT(2)
327 #define BAM_HRESP_ERR_CLR BIT(1)
330 #define BAM_TIMER_EN BIT(4)
331 #define BAM_EMPTY_EN BIT(3)
332 #define BAM_ERROR_EN BIT(2)
333 #define BAM_HRESP_ERR_EN BIT(1)
336 #define P_PRCSD_DESC_EN BIT(0)
337 #define P_TIMER_EN BIT(1)
338 #define P_WAKE_EN BIT(2)
339 #define P_OUT_OF_DESC_EN BIT(3)
340 #define P_ERR_EN BIT(4)
341 #define P_TRNSFR_END_EN BIT(5)
342 #define P_DEFAULT_IRQS_EN (P_PRCSD_DESC_EN | P_ERR_EN | P_TRNSFR_END_EN)
345 #define P_SW_OFSTS_MASK 0xffff
347 #define BAM_DESC_FIFO_SIZE SZ_32K
348 #define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1)
349 #define BAM_FIFO_SIZE (SZ_32K - 8)
352 struct virt_dma_chan vc;
354 struct bam_device *bdev;
356 /* configuration from device tree */
359 struct bam_async_desc *curr_txd; /* current running dma */
361 /* runtime configuration */
362 struct dma_slave_config slave;
365 struct bam_desc_hw *fifo_virt;
366 dma_addr_t fifo_phys;
369 unsigned short head; /* start of active descriptor entries */
370 unsigned short tail; /* end of active descriptor entries */
372 unsigned int initialized; /* is the channel hw initialized? */
373 unsigned int paused; /* is the channel paused? */
374 unsigned int reconfigure; /* new slave config? */
376 struct list_head node;
379 static inline struct bam_chan *to_bam_chan(struct dma_chan *common)
381 return container_of(common, struct bam_chan, vc.chan);
387 struct dma_device common;
388 struct device_dma_parameters dma_parms;
389 struct bam_chan *channels;
393 /* execution environment ID, from DT */
395 bool controlled_remotely;
397 const struct reg_offset_data *layout;
402 /* dma start transaction tasklet */
403 struct tasklet_struct task;
407 * bam_addr - returns BAM register address
409 * @pipe: pipe instance (ignored when register doesn't have multiple instances)
410 * @reg: register enum
412 static inline void __iomem *bam_addr(struct bam_device *bdev, u32 pipe,
415 const struct reg_offset_data r = bdev->layout[reg];
417 return bdev->regs + r.base_offset +
420 r.ee_mult * bdev->ee;
424 * bam_reset_channel - Reset individual BAM DMA channel
425 * @bchan: bam channel
427 * This function resets a specific BAM channel
429 static void bam_reset_channel(struct bam_chan *bchan)
431 struct bam_device *bdev = bchan->bdev;
433 lockdep_assert_held(&bchan->vc.lock);
436 writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_RST));
437 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_RST));
439 /* don't allow cpu to reorder BAM register accesses done after this */
442 /* make sure hw is initialized when channel is used the first time */
443 bchan->initialized = 0;
447 * bam_chan_init_hw - Initialize channel hardware
448 * @bchan: bam channel
450 * This function resets and initializes the BAM channel
452 static void bam_chan_init_hw(struct bam_chan *bchan,
453 enum dma_transfer_direction dir)
455 struct bam_device *bdev = bchan->bdev;
458 /* Reset the channel to clear internal state of the FIFO */
459 bam_reset_channel(bchan);
462 * write out 8 byte aligned address. We have enough space for this
463 * because we allocated 1 more descriptor (8 bytes) than we can use
465 writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)),
466 bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR));
467 writel_relaxed(BAM_FIFO_SIZE,
468 bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES));
470 /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */
471 writel_relaxed(P_DEFAULT_IRQS_EN,
472 bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
474 /* unmask the specific pipe and EE combo */
475 val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
476 val |= BIT(bchan->id);
477 writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
479 /* don't allow cpu to reorder the channel enable done below */
482 /* set fixed direction and mode, then enable channel */
483 val = P_EN | P_SYS_MODE;
484 if (dir == DMA_DEV_TO_MEM)
487 writel_relaxed(val, bam_addr(bdev, bchan->id, BAM_P_CTRL));
489 bchan->initialized = 1;
491 /* init FIFO pointers */
497 * bam_alloc_chan - Allocate channel resources for DMA channel.
498 * @chan: specified channel
500 * This function allocates the FIFO descriptor memory
502 static int bam_alloc_chan(struct dma_chan *chan)
504 struct bam_chan *bchan = to_bam_chan(chan);
505 struct bam_device *bdev = bchan->bdev;
507 if (bchan->fifo_virt)
510 /* allocate FIFO descriptor space, but only if necessary */
511 bchan->fifo_virt = dma_alloc_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
512 &bchan->fifo_phys, GFP_KERNEL);
514 if (!bchan->fifo_virt) {
515 dev_err(bdev->dev, "Failed to allocate desc fifo\n");
523 * bam_free_chan - Frees dma resources associated with specific channel
524 * @chan: specified channel
526 * Free the allocated fifo descriptor memory and channel resources
529 static void bam_free_chan(struct dma_chan *chan)
531 struct bam_chan *bchan = to_bam_chan(chan);
532 struct bam_device *bdev = bchan->bdev;
537 ret = pm_runtime_get_sync(bdev->dev);
541 vchan_free_chan_resources(to_virt_chan(chan));
543 if (bchan->curr_txd) {
544 dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
548 spin_lock_irqsave(&bchan->vc.lock, flags);
549 bam_reset_channel(bchan);
550 spin_unlock_irqrestore(&bchan->vc.lock, flags);
552 dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
554 bchan->fifo_virt = NULL;
556 /* mask irq for pipe/channel */
557 val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
558 val &= ~BIT(bchan->id);
559 writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
562 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
565 pm_runtime_mark_last_busy(bdev->dev);
566 pm_runtime_put_autosuspend(bdev->dev);
570 * bam_slave_config - set slave configuration for channel
572 * @cfg: slave configuration
574 * Sets slave configuration for channel
577 static int bam_slave_config(struct dma_chan *chan,
578 struct dma_slave_config *cfg)
580 struct bam_chan *bchan = to_bam_chan(chan);
583 spin_lock_irqsave(&bchan->vc.lock, flag);
584 memcpy(&bchan->slave, cfg, sizeof(*cfg));
585 bchan->reconfigure = 1;
586 spin_unlock_irqrestore(&bchan->vc.lock, flag);
592 * bam_prep_slave_sg - Prep slave sg transaction
595 * @sgl: scatter gather list
596 * @sg_len: length of sg
597 * @direction: DMA transfer direction
599 * @context: transfer context (unused)
601 static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
602 struct scatterlist *sgl, unsigned int sg_len,
603 enum dma_transfer_direction direction, unsigned long flags,
606 struct bam_chan *bchan = to_bam_chan(chan);
607 struct bam_device *bdev = bchan->bdev;
608 struct bam_async_desc *async_desc;
609 struct scatterlist *sg;
611 struct bam_desc_hw *desc;
612 unsigned int num_alloc = 0;
615 if (!is_slave_direction(direction)) {
616 dev_err(bdev->dev, "invalid dma direction\n");
620 /* calculate number of required entries */
621 for_each_sg(sgl, sg, sg_len, i)
622 num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE);
624 /* allocate enough room to accomodate the number of entries */
625 async_desc = kzalloc(sizeof(*async_desc) +
626 (num_alloc * sizeof(struct bam_desc_hw)), GFP_NOWAIT);
631 if (flags & DMA_PREP_FENCE)
632 async_desc->flags |= DESC_FLAG_NWD;
634 if (flags & DMA_PREP_INTERRUPT)
635 async_desc->flags |= DESC_FLAG_EOT;
637 async_desc->flags |= DESC_FLAG_INT;
639 async_desc->num_desc = num_alloc;
640 async_desc->curr_desc = async_desc->desc;
641 async_desc->dir = direction;
643 /* fill in temporary descriptors */
644 desc = async_desc->desc;
645 for_each_sg(sgl, sg, sg_len, i) {
646 unsigned int remainder = sg_dma_len(sg);
647 unsigned int curr_offset = 0;
650 if (flags & DMA_PREP_CMD)
651 desc->flags |= cpu_to_le16(DESC_FLAG_CMD);
653 desc->addr = cpu_to_le32(sg_dma_address(sg) +
656 if (remainder > BAM_FIFO_SIZE) {
657 desc->size = cpu_to_le16(BAM_FIFO_SIZE);
658 remainder -= BAM_FIFO_SIZE;
659 curr_offset += BAM_FIFO_SIZE;
661 desc->size = cpu_to_le16(remainder);
665 async_desc->length += desc->size;
667 } while (remainder > 0);
670 return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
678 * bam_dma_terminate_all - terminate all transactions on a channel
679 * @bchan: bam dma channel
681 * Dequeues and frees all transactions
682 * No callbacks are done
685 static int bam_dma_terminate_all(struct dma_chan *chan)
687 struct bam_chan *bchan = to_bam_chan(chan);
691 /* remove all transactions, including active transaction */
692 spin_lock_irqsave(&bchan->vc.lock, flag);
694 * If we have transactions queued, then some might be committed to the
695 * hardware in the desc fifo. The only way to reset the desc fifo is
696 * to do a hardware reset (either by pipe or the entire block).
697 * bam_chan_init_hw() will trigger a pipe reset, and also reinit the
698 * pipe. If the pipe is left disabled (default state after pipe reset)
699 * and is accessed by a connected hardware engine, a fatal error in
700 * the BAM will occur. There is a small window where this could happen
701 * with bam_chan_init_hw(), but it is assumed that the caller has
702 * stopped activity on any attached hardware engine. Make sure to do
703 * this first so that the BAM hardware doesn't cause memory corruption
704 * by accessing freed resources.
706 if (bchan->curr_txd) {
707 bam_chan_init_hw(bchan, bchan->curr_txd->dir);
708 list_add(&bchan->curr_txd->vd.node, &bchan->vc.desc_issued);
709 bchan->curr_txd = NULL;
712 vchan_get_all_descriptors(&bchan->vc, &head);
713 spin_unlock_irqrestore(&bchan->vc.lock, flag);
715 vchan_dma_desc_free_list(&bchan->vc, &head);
721 * bam_pause - Pause DMA channel
725 static int bam_pause(struct dma_chan *chan)
727 struct bam_chan *bchan = to_bam_chan(chan);
728 struct bam_device *bdev = bchan->bdev;
732 ret = pm_runtime_get_sync(bdev->dev);
736 spin_lock_irqsave(&bchan->vc.lock, flag);
737 writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
739 spin_unlock_irqrestore(&bchan->vc.lock, flag);
740 pm_runtime_mark_last_busy(bdev->dev);
741 pm_runtime_put_autosuspend(bdev->dev);
747 * bam_resume - Resume DMA channel operations
751 static int bam_resume(struct dma_chan *chan)
753 struct bam_chan *bchan = to_bam_chan(chan);
754 struct bam_device *bdev = bchan->bdev;
758 ret = pm_runtime_get_sync(bdev->dev);
762 spin_lock_irqsave(&bchan->vc.lock, flag);
763 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
765 spin_unlock_irqrestore(&bchan->vc.lock, flag);
766 pm_runtime_mark_last_busy(bdev->dev);
767 pm_runtime_put_autosuspend(bdev->dev);
773 * process_channel_irqs - processes the channel interrupts
774 * @bdev: bam controller
776 * This function processes the channel interrupts
779 static u32 process_channel_irqs(struct bam_device *bdev)
781 u32 i, srcs, pipe_stts;
783 struct bam_async_desc *async_desc;
785 srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE));
787 /* return early if no pipe/channel interrupts are present */
791 for (i = 0; i < bdev->num_channels; i++) {
792 struct bam_chan *bchan = &bdev->channels[i];
794 if (!(srcs & BIT(i)))
798 pipe_stts = readl_relaxed(bam_addr(bdev, i, BAM_P_IRQ_STTS));
800 writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR));
802 spin_lock_irqsave(&bchan->vc.lock, flags);
803 async_desc = bchan->curr_txd;
806 async_desc->num_desc -= async_desc->xfer_len;
807 async_desc->curr_desc += async_desc->xfer_len;
808 bchan->curr_txd = NULL;
811 bchan->head += async_desc->xfer_len;
812 bchan->head %= MAX_DESCRIPTORS;
815 * if complete, process cookie. Otherwise
816 * push back to front of desc_issued so that
817 * it gets restarted by the tasklet
819 if (!async_desc->num_desc)
820 vchan_cookie_complete(&async_desc->vd);
822 list_add(&async_desc->vd.node,
823 &bchan->vc.desc_issued);
826 spin_unlock_irqrestore(&bchan->vc.lock, flags);
833 * bam_dma_irq - irq handler for bam controller
834 * @irq: IRQ of interrupt
835 * @data: callback data
837 * IRQ handler for the bam controller
839 static irqreturn_t bam_dma_irq(int irq, void *data)
841 struct bam_device *bdev = data;
842 u32 clr_mask = 0, srcs = 0;
845 srcs |= process_channel_irqs(bdev);
847 /* kick off tasklet to start next dma transfer */
849 tasklet_schedule(&bdev->task);
851 ret = pm_runtime_get_sync(bdev->dev);
855 if (srcs & BAM_IRQ) {
856 clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS));
859 * don't allow reorder of the various accesses to the BAM
864 writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR));
867 pm_runtime_mark_last_busy(bdev->dev);
868 pm_runtime_put_autosuspend(bdev->dev);
874 * bam_tx_status - returns status of transaction
876 * @cookie: transaction cookie
877 * @txstate: DMA transaction state
879 * Return status of dma transaction
881 static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
882 struct dma_tx_state *txstate)
884 struct bam_chan *bchan = to_bam_chan(chan);
885 struct virt_dma_desc *vd;
891 ret = dma_cookie_status(chan, cookie, txstate);
892 if (ret == DMA_COMPLETE)
896 return bchan->paused ? DMA_PAUSED : ret;
898 spin_lock_irqsave(&bchan->vc.lock, flags);
899 vd = vchan_find_desc(&bchan->vc, cookie);
901 residue = container_of(vd, struct bam_async_desc, vd)->length;
902 else if (bchan->curr_txd && bchan->curr_txd->vd.tx.cookie == cookie)
903 for (i = 0; i < bchan->curr_txd->num_desc; i++)
904 residue += bchan->curr_txd->curr_desc[i].size;
906 spin_unlock_irqrestore(&bchan->vc.lock, flags);
908 dma_set_residue(txstate, residue);
910 if (ret == DMA_IN_PROGRESS && bchan->paused)
917 * bam_apply_new_config
918 * @bchan: bam dma channel
919 * @dir: DMA direction
921 static void bam_apply_new_config(struct bam_chan *bchan,
922 enum dma_transfer_direction dir)
924 struct bam_device *bdev = bchan->bdev;
927 if (dir == DMA_DEV_TO_MEM)
928 maxburst = bchan->slave.src_maxburst;
930 maxburst = bchan->slave.dst_maxburst;
932 writel_relaxed(maxburst, bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
934 bchan->reconfigure = 0;
938 * bam_start_dma - start next transaction
939 * @bchan - bam dma channel
941 static void bam_start_dma(struct bam_chan *bchan)
943 struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc);
944 struct bam_device *bdev = bchan->bdev;
945 struct bam_async_desc *async_desc;
946 struct bam_desc_hw *desc;
947 struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
948 sizeof(struct bam_desc_hw));
951 lockdep_assert_held(&bchan->vc.lock);
958 async_desc = container_of(vd, struct bam_async_desc, vd);
959 bchan->curr_txd = async_desc;
961 ret = pm_runtime_get_sync(bdev->dev);
965 /* on first use, initialize the channel hardware */
966 if (!bchan->initialized)
967 bam_chan_init_hw(bchan, async_desc->dir);
969 /* apply new slave config changes, if necessary */
970 if (bchan->reconfigure)
971 bam_apply_new_config(bchan, async_desc->dir);
973 desc = bchan->curr_txd->curr_desc;
975 if (async_desc->num_desc > MAX_DESCRIPTORS)
976 async_desc->xfer_len = MAX_DESCRIPTORS;
978 async_desc->xfer_len = async_desc->num_desc;
980 /* set any special flags on the last descriptor */
981 if (async_desc->num_desc == async_desc->xfer_len)
982 desc[async_desc->xfer_len - 1].flags |=
983 cpu_to_le16(async_desc->flags);
985 desc[async_desc->xfer_len - 1].flags |=
986 cpu_to_le16(DESC_FLAG_INT);
988 if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
989 u32 partial = MAX_DESCRIPTORS - bchan->tail;
991 memcpy(&fifo[bchan->tail], desc,
992 partial * sizeof(struct bam_desc_hw));
993 memcpy(fifo, &desc[partial], (async_desc->xfer_len - partial) *
994 sizeof(struct bam_desc_hw));
996 memcpy(&fifo[bchan->tail], desc,
997 async_desc->xfer_len * sizeof(struct bam_desc_hw));
1000 bchan->tail += async_desc->xfer_len;
1001 bchan->tail %= MAX_DESCRIPTORS;
1003 /* ensure descriptor writes and dma start not reordered */
1005 writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw),
1006 bam_addr(bdev, bchan->id, BAM_P_EVNT_REG));
1008 pm_runtime_mark_last_busy(bdev->dev);
1009 pm_runtime_put_autosuspend(bdev->dev);
1013 * dma_tasklet - DMA IRQ tasklet
1014 * @data: tasklet argument (bam controller structure)
1016 * Sets up next DMA operation and then processes all completed transactions
1018 static void dma_tasklet(unsigned long data)
1020 struct bam_device *bdev = (struct bam_device *)data;
1021 struct bam_chan *bchan;
1022 unsigned long flags;
1025 /* go through the channels and kick off transactions */
1026 for (i = 0; i < bdev->num_channels; i++) {
1027 bchan = &bdev->channels[i];
1028 spin_lock_irqsave(&bchan->vc.lock, flags);
1030 if (!list_empty(&bchan->vc.desc_issued) && !bchan->curr_txd)
1031 bam_start_dma(bchan);
1032 spin_unlock_irqrestore(&bchan->vc.lock, flags);
1038 * bam_issue_pending - starts pending transactions
1039 * @chan: dma channel
1041 * Calls tasklet directly which in turn starts any pending transactions
1043 static void bam_issue_pending(struct dma_chan *chan)
1045 struct bam_chan *bchan = to_bam_chan(chan);
1046 unsigned long flags;
1048 spin_lock_irqsave(&bchan->vc.lock, flags);
1050 /* if work pending and idle, start a transaction */
1051 if (vchan_issue_pending(&bchan->vc) && !bchan->curr_txd)
1052 bam_start_dma(bchan);
1054 spin_unlock_irqrestore(&bchan->vc.lock, flags);
1058 * bam_dma_free_desc - free descriptor memory
1059 * @vd: virtual descriptor
1062 static void bam_dma_free_desc(struct virt_dma_desc *vd)
1064 struct bam_async_desc *async_desc = container_of(vd,
1065 struct bam_async_desc, vd);
1070 static struct dma_chan *bam_dma_xlate(struct of_phandle_args *dma_spec,
1073 struct bam_device *bdev = container_of(of->of_dma_data,
1074 struct bam_device, common);
1075 unsigned int request;
1077 if (dma_spec->args_count != 1)
1080 request = dma_spec->args[0];
1081 if (request >= bdev->num_channels)
1084 return dma_get_slave_channel(&(bdev->channels[request].vc.chan));
1091 * Initialization helper for global bam registers
1093 static int bam_init(struct bam_device *bdev)
1097 /* read revision and configuration information */
1098 if (!bdev->num_ees) {
1099 val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
1100 bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK;
1103 /* check that configured EE is within range */
1104 if (bdev->ee >= bdev->num_ees)
1107 if (!bdev->num_channels) {
1108 val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
1109 bdev->num_channels = val & BAM_NUM_PIPES_MASK;
1112 if (bdev->controlled_remotely)
1116 /* after reset all pipes are disabled and idle */
1117 val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL));
1119 writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
1121 writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
1123 /* make sure previous stores are visible before enabling BAM */
1128 writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
1130 /* set descriptor threshhold, start with 4 bytes */
1131 writel_relaxed(DEFAULT_CNT_THRSHLD,
1132 bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
1134 /* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
1135 writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS));
1137 /* enable irqs for errors */
1138 writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN,
1139 bam_addr(bdev, 0, BAM_IRQ_EN));
1141 /* unmask global bam interrupt */
1142 writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
1147 static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
1153 vchan_init(&bchan->vc, &bdev->common);
1154 bchan->vc.desc_free = bam_dma_free_desc;
1157 static const struct of_device_id bam_of_match[] = {
1158 { .compatible = "qcom,bam-v1.3.0", .data = &bam_v1_3_reg_info },
1159 { .compatible = "qcom,bam-v1.4.0", .data = &bam_v1_4_reg_info },
1160 { .compatible = "qcom,bam-v1.7.0", .data = &bam_v1_7_reg_info },
1164 MODULE_DEVICE_TABLE(of, bam_of_match);
1166 static int bam_dma_probe(struct platform_device *pdev)
1168 struct bam_device *bdev;
1169 const struct of_device_id *match;
1170 struct resource *iores;
1173 bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL);
1177 bdev->dev = &pdev->dev;
1179 match = of_match_node(bam_of_match, pdev->dev.of_node);
1181 dev_err(&pdev->dev, "Unsupported BAM module\n");
1185 bdev->layout = match->data;
1187 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1188 bdev->regs = devm_ioremap_resource(&pdev->dev, iores);
1189 if (IS_ERR(bdev->regs))
1190 return PTR_ERR(bdev->regs);
1192 bdev->irq = platform_get_irq(pdev, 0);
1196 ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &bdev->ee);
1198 dev_err(bdev->dev, "Execution environment unspecified\n");
1202 bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node,
1203 "qcom,controlled-remotely");
1205 if (bdev->controlled_remotely) {
1206 ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
1207 &bdev->num_channels);
1209 dev_err(bdev->dev, "num-channels unspecified in dt\n");
1211 ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-ees",
1214 dev_err(bdev->dev, "num-ees unspecified in dt\n");
1217 bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
1218 if (IS_ERR(bdev->bamclk))
1219 return PTR_ERR(bdev->bamclk);
1221 ret = clk_prepare_enable(bdev->bamclk);
1223 dev_err(bdev->dev, "failed to prepare/enable clock\n");
1227 ret = bam_init(bdev);
1229 goto err_disable_clk;
1231 tasklet_init(&bdev->task, dma_tasklet, (unsigned long)bdev);
1233 bdev->channels = devm_kcalloc(bdev->dev, bdev->num_channels,
1234 sizeof(*bdev->channels), GFP_KERNEL);
1236 if (!bdev->channels) {
1238 goto err_tasklet_kill;
1241 /* allocate and initialize channels */
1242 INIT_LIST_HEAD(&bdev->common.channels);
1244 for (i = 0; i < bdev->num_channels; i++)
1245 bam_channel_init(bdev, &bdev->channels[i], i);
1247 ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq,
1248 IRQF_TRIGGER_HIGH, "bam_dma", bdev);
1250 goto err_bam_channel_exit;
1252 /* set max dma segment size */
1253 bdev->common.dev = bdev->dev;
1254 bdev->common.dev->dma_parms = &bdev->dma_parms;
1255 ret = dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE);
1257 dev_err(bdev->dev, "cannot set maximum segment size\n");
1258 goto err_bam_channel_exit;
1261 platform_set_drvdata(pdev, bdev);
1263 /* set capabilities */
1264 dma_cap_zero(bdev->common.cap_mask);
1265 dma_cap_set(DMA_SLAVE, bdev->common.cap_mask);
1267 /* initialize dmaengine apis */
1268 bdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1269 bdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
1270 bdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
1271 bdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
1272 bdev->common.device_alloc_chan_resources = bam_alloc_chan;
1273 bdev->common.device_free_chan_resources = bam_free_chan;
1274 bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
1275 bdev->common.device_config = bam_slave_config;
1276 bdev->common.device_pause = bam_pause;
1277 bdev->common.device_resume = bam_resume;
1278 bdev->common.device_terminate_all = bam_dma_terminate_all;
1279 bdev->common.device_issue_pending = bam_issue_pending;
1280 bdev->common.device_tx_status = bam_tx_status;
1281 bdev->common.dev = bdev->dev;
1283 ret = dma_async_device_register(&bdev->common);
1285 dev_err(bdev->dev, "failed to register dma async device\n");
1286 goto err_bam_channel_exit;
1289 ret = of_dma_controller_register(pdev->dev.of_node, bam_dma_xlate,
1292 goto err_unregister_dma;
1294 pm_runtime_irq_safe(&pdev->dev);
1295 pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY);
1296 pm_runtime_use_autosuspend(&pdev->dev);
1297 pm_runtime_mark_last_busy(&pdev->dev);
1298 pm_runtime_set_active(&pdev->dev);
1299 pm_runtime_enable(&pdev->dev);
1304 dma_async_device_unregister(&bdev->common);
1305 err_bam_channel_exit:
1306 for (i = 0; i < bdev->num_channels; i++)
1307 tasklet_kill(&bdev->channels[i].vc.task);
1309 tasklet_kill(&bdev->task);
1311 clk_disable_unprepare(bdev->bamclk);
1316 static int bam_dma_remove(struct platform_device *pdev)
1318 struct bam_device *bdev = platform_get_drvdata(pdev);
1321 pm_runtime_force_suspend(&pdev->dev);
1323 of_dma_controller_free(pdev->dev.of_node);
1324 dma_async_device_unregister(&bdev->common);
1326 /* mask all interrupts for this execution environment */
1327 writel_relaxed(0, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
1329 devm_free_irq(bdev->dev, bdev->irq, bdev);
1331 for (i = 0; i < bdev->num_channels; i++) {
1332 bam_dma_terminate_all(&bdev->channels[i].vc.chan);
1333 tasklet_kill(&bdev->channels[i].vc.task);
1335 if (!bdev->channels[i].fifo_virt)
1338 dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
1339 bdev->channels[i].fifo_virt,
1340 bdev->channels[i].fifo_phys);
1343 tasklet_kill(&bdev->task);
1345 clk_disable_unprepare(bdev->bamclk);
1350 static int __maybe_unused bam_dma_runtime_suspend(struct device *dev)
1352 struct bam_device *bdev = dev_get_drvdata(dev);
1354 clk_disable(bdev->bamclk);
1359 static int __maybe_unused bam_dma_runtime_resume(struct device *dev)
1361 struct bam_device *bdev = dev_get_drvdata(dev);
1364 ret = clk_enable(bdev->bamclk);
1366 dev_err(dev, "clk_enable failed: %d\n", ret);
1373 static int __maybe_unused bam_dma_suspend(struct device *dev)
1375 struct bam_device *bdev = dev_get_drvdata(dev);
1377 pm_runtime_force_suspend(dev);
1379 clk_unprepare(bdev->bamclk);
1384 static int __maybe_unused bam_dma_resume(struct device *dev)
1386 struct bam_device *bdev = dev_get_drvdata(dev);
1389 ret = clk_prepare(bdev->bamclk);
1393 pm_runtime_force_resume(dev);
1398 static const struct dev_pm_ops bam_dma_pm_ops = {
1399 SET_LATE_SYSTEM_SLEEP_PM_OPS(bam_dma_suspend, bam_dma_resume)
1400 SET_RUNTIME_PM_OPS(bam_dma_runtime_suspend, bam_dma_runtime_resume,
1404 static struct platform_driver bam_dma_driver = {
1405 .probe = bam_dma_probe,
1406 .remove = bam_dma_remove,
1408 .name = "bam-dma-engine",
1409 .pm = &bam_dma_pm_ops,
1410 .of_match_table = bam_of_match,
1414 module_platform_driver(bam_dma_driver);
1416 MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
1417 MODULE_DESCRIPTION("QCOM BAM DMA engine driver");
1418 MODULE_LICENSE("GPL v2");