GNU Linux-libre 5.4.200-gnu1
[releases.git] / drivers / dma / qcom / bam_dma.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
4  */
5 /*
6  * QCOM BAM DMA engine driver
7  *
8  * QCOM BAM DMA blocks are distributed amongst a number of the on-chip
9  * peripherals on the MSM 8x74.  The configuration of the channels are dependent
10  * on the way they are hard wired to that specific peripheral.  The peripheral
11  * device tree entries specify the configuration of each channel.
12  *
13  * The DMA controller requires the use of external memory for storage of the
14  * hardware descriptors for each channel.  The descriptor FIFO is accessed as a
15  * circular buffer and operations are managed according to the offset within the
16  * FIFO.  After pipe/channel reset, all of the pipe registers and internal state
17  * are back to defaults.
18  *
19  * During DMA operations, we write descriptors to the FIFO, being careful to
20  * handle wrapping and then write the last FIFO offset to that channel's
21  * P_EVNT_REG register to kick off the transaction.  The P_SW_OFSTS register
22  * indicates the current FIFO offset that is being processed, so there is some
23  * indication of where the hardware is currently working.
24  */
25
26 #include <linux/kernel.h>
27 #include <linux/io.h>
28 #include <linux/init.h>
29 #include <linux/slab.h>
30 #include <linux/module.h>
31 #include <linux/interrupt.h>
32 #include <linux/dma-mapping.h>
33 #include <linux/scatterlist.h>
34 #include <linux/device.h>
35 #include <linux/platform_device.h>
36 #include <linux/of.h>
37 #include <linux/of_address.h>
38 #include <linux/of_irq.h>
39 #include <linux/of_dma.h>
40 #include <linux/circ_buf.h>
41 #include <linux/clk.h>
42 #include <linux/dmaengine.h>
43 #include <linux/pm_runtime.h>
44
45 #include "../dmaengine.h"
46 #include "../virt-dma.h"
47
48 struct bam_desc_hw {
49         __le32 addr;            /* Buffer physical address */
50         __le16 size;            /* Buffer size in bytes */
51         __le16 flags;
52 };
53
54 #define BAM_DMA_AUTOSUSPEND_DELAY 100
55
56 #define DESC_FLAG_INT BIT(15)
57 #define DESC_FLAG_EOT BIT(14)
58 #define DESC_FLAG_EOB BIT(13)
59 #define DESC_FLAG_NWD BIT(12)
60 #define DESC_FLAG_CMD BIT(11)
61
62 struct bam_async_desc {
63         struct virt_dma_desc vd;
64
65         u32 num_desc;
66         u32 xfer_len;
67
68         /* transaction flags, EOT|EOB|NWD */
69         u16 flags;
70
71         struct bam_desc_hw *curr_desc;
72
73         /* list node for the desc in the bam_chan list of descriptors */
74         struct list_head desc_node;
75         enum dma_transfer_direction dir;
76         size_t length;
77         struct bam_desc_hw desc[0];
78 };
79
80 enum bam_reg {
81         BAM_CTRL,
82         BAM_REVISION,
83         BAM_NUM_PIPES,
84         BAM_DESC_CNT_TRSHLD,
85         BAM_IRQ_SRCS,
86         BAM_IRQ_SRCS_MSK,
87         BAM_IRQ_SRCS_UNMASKED,
88         BAM_IRQ_STTS,
89         BAM_IRQ_CLR,
90         BAM_IRQ_EN,
91         BAM_CNFG_BITS,
92         BAM_IRQ_SRCS_EE,
93         BAM_IRQ_SRCS_MSK_EE,
94         BAM_P_CTRL,
95         BAM_P_RST,
96         BAM_P_HALT,
97         BAM_P_IRQ_STTS,
98         BAM_P_IRQ_CLR,
99         BAM_P_IRQ_EN,
100         BAM_P_EVNT_DEST_ADDR,
101         BAM_P_EVNT_REG,
102         BAM_P_SW_OFSTS,
103         BAM_P_DATA_FIFO_ADDR,
104         BAM_P_DESC_FIFO_ADDR,
105         BAM_P_EVNT_GEN_TRSHLD,
106         BAM_P_FIFO_SIZES,
107 };
108
109 struct reg_offset_data {
110         u32 base_offset;
111         unsigned int pipe_mult, evnt_mult, ee_mult;
112 };
113
114 static const struct reg_offset_data bam_v1_3_reg_info[] = {
115         [BAM_CTRL]              = { 0x0F80, 0x00, 0x00, 0x00 },
116         [BAM_REVISION]          = { 0x0F84, 0x00, 0x00, 0x00 },
117         [BAM_NUM_PIPES]         = { 0x0FBC, 0x00, 0x00, 0x00 },
118         [BAM_DESC_CNT_TRSHLD]   = { 0x0F88, 0x00, 0x00, 0x00 },
119         [BAM_IRQ_SRCS]          = { 0x0F8C, 0x00, 0x00, 0x00 },
120         [BAM_IRQ_SRCS_MSK]      = { 0x0F90, 0x00, 0x00, 0x00 },
121         [BAM_IRQ_SRCS_UNMASKED] = { 0x0FB0, 0x00, 0x00, 0x00 },
122         [BAM_IRQ_STTS]          = { 0x0F94, 0x00, 0x00, 0x00 },
123         [BAM_IRQ_CLR]           = { 0x0F98, 0x00, 0x00, 0x00 },
124         [BAM_IRQ_EN]            = { 0x0F9C, 0x00, 0x00, 0x00 },
125         [BAM_CNFG_BITS]         = { 0x0FFC, 0x00, 0x00, 0x00 },
126         [BAM_IRQ_SRCS_EE]       = { 0x1800, 0x00, 0x00, 0x80 },
127         [BAM_IRQ_SRCS_MSK_EE]   = { 0x1804, 0x00, 0x00, 0x80 },
128         [BAM_P_CTRL]            = { 0x0000, 0x80, 0x00, 0x00 },
129         [BAM_P_RST]             = { 0x0004, 0x80, 0x00, 0x00 },
130         [BAM_P_HALT]            = { 0x0008, 0x80, 0x00, 0x00 },
131         [BAM_P_IRQ_STTS]        = { 0x0010, 0x80, 0x00, 0x00 },
132         [BAM_P_IRQ_CLR]         = { 0x0014, 0x80, 0x00, 0x00 },
133         [BAM_P_IRQ_EN]          = { 0x0018, 0x80, 0x00, 0x00 },
134         [BAM_P_EVNT_DEST_ADDR]  = { 0x102C, 0x00, 0x40, 0x00 },
135         [BAM_P_EVNT_REG]        = { 0x1018, 0x00, 0x40, 0x00 },
136         [BAM_P_SW_OFSTS]        = { 0x1000, 0x00, 0x40, 0x00 },
137         [BAM_P_DATA_FIFO_ADDR]  = { 0x1024, 0x00, 0x40, 0x00 },
138         [BAM_P_DESC_FIFO_ADDR]  = { 0x101C, 0x00, 0x40, 0x00 },
139         [BAM_P_EVNT_GEN_TRSHLD] = { 0x1028, 0x00, 0x40, 0x00 },
140         [BAM_P_FIFO_SIZES]      = { 0x1020, 0x00, 0x40, 0x00 },
141 };
142
143 static const struct reg_offset_data bam_v1_4_reg_info[] = {
144         [BAM_CTRL]              = { 0x0000, 0x00, 0x00, 0x00 },
145         [BAM_REVISION]          = { 0x0004, 0x00, 0x00, 0x00 },
146         [BAM_NUM_PIPES]         = { 0x003C, 0x00, 0x00, 0x00 },
147         [BAM_DESC_CNT_TRSHLD]   = { 0x0008, 0x00, 0x00, 0x00 },
148         [BAM_IRQ_SRCS]          = { 0x000C, 0x00, 0x00, 0x00 },
149         [BAM_IRQ_SRCS_MSK]      = { 0x0010, 0x00, 0x00, 0x00 },
150         [BAM_IRQ_SRCS_UNMASKED] = { 0x0030, 0x00, 0x00, 0x00 },
151         [BAM_IRQ_STTS]          = { 0x0014, 0x00, 0x00, 0x00 },
152         [BAM_IRQ_CLR]           = { 0x0018, 0x00, 0x00, 0x00 },
153         [BAM_IRQ_EN]            = { 0x001C, 0x00, 0x00, 0x00 },
154         [BAM_CNFG_BITS]         = { 0x007C, 0x00, 0x00, 0x00 },
155         [BAM_IRQ_SRCS_EE]       = { 0x0800, 0x00, 0x00, 0x80 },
156         [BAM_IRQ_SRCS_MSK_EE]   = { 0x0804, 0x00, 0x00, 0x80 },
157         [BAM_P_CTRL]            = { 0x1000, 0x1000, 0x00, 0x00 },
158         [BAM_P_RST]             = { 0x1004, 0x1000, 0x00, 0x00 },
159         [BAM_P_HALT]            = { 0x1008, 0x1000, 0x00, 0x00 },
160         [BAM_P_IRQ_STTS]        = { 0x1010, 0x1000, 0x00, 0x00 },
161         [BAM_P_IRQ_CLR]         = { 0x1014, 0x1000, 0x00, 0x00 },
162         [BAM_P_IRQ_EN]          = { 0x1018, 0x1000, 0x00, 0x00 },
163         [BAM_P_EVNT_DEST_ADDR]  = { 0x182C, 0x00, 0x1000, 0x00 },
164         [BAM_P_EVNT_REG]        = { 0x1818, 0x00, 0x1000, 0x00 },
165         [BAM_P_SW_OFSTS]        = { 0x1800, 0x00, 0x1000, 0x00 },
166         [BAM_P_DATA_FIFO_ADDR]  = { 0x1824, 0x00, 0x1000, 0x00 },
167         [BAM_P_DESC_FIFO_ADDR]  = { 0x181C, 0x00, 0x1000, 0x00 },
168         [BAM_P_EVNT_GEN_TRSHLD] = { 0x1828, 0x00, 0x1000, 0x00 },
169         [BAM_P_FIFO_SIZES]      = { 0x1820, 0x00, 0x1000, 0x00 },
170 };
171
172 static const struct reg_offset_data bam_v1_7_reg_info[] = {
173         [BAM_CTRL]              = { 0x00000, 0x00, 0x00, 0x00 },
174         [BAM_REVISION]          = { 0x01000, 0x00, 0x00, 0x00 },
175         [BAM_NUM_PIPES]         = { 0x01008, 0x00, 0x00, 0x00 },
176         [BAM_DESC_CNT_TRSHLD]   = { 0x00008, 0x00, 0x00, 0x00 },
177         [BAM_IRQ_SRCS]          = { 0x03010, 0x00, 0x00, 0x00 },
178         [BAM_IRQ_SRCS_MSK]      = { 0x03014, 0x00, 0x00, 0x00 },
179         [BAM_IRQ_SRCS_UNMASKED] = { 0x03018, 0x00, 0x00, 0x00 },
180         [BAM_IRQ_STTS]          = { 0x00014, 0x00, 0x00, 0x00 },
181         [BAM_IRQ_CLR]           = { 0x00018, 0x00, 0x00, 0x00 },
182         [BAM_IRQ_EN]            = { 0x0001C, 0x00, 0x00, 0x00 },
183         [BAM_CNFG_BITS]         = { 0x0007C, 0x00, 0x00, 0x00 },
184         [BAM_IRQ_SRCS_EE]       = { 0x03000, 0x00, 0x00, 0x1000 },
185         [BAM_IRQ_SRCS_MSK_EE]   = { 0x03004, 0x00, 0x00, 0x1000 },
186         [BAM_P_CTRL]            = { 0x13000, 0x1000, 0x00, 0x00 },
187         [BAM_P_RST]             = { 0x13004, 0x1000, 0x00, 0x00 },
188         [BAM_P_HALT]            = { 0x13008, 0x1000, 0x00, 0x00 },
189         [BAM_P_IRQ_STTS]        = { 0x13010, 0x1000, 0x00, 0x00 },
190         [BAM_P_IRQ_CLR]         = { 0x13014, 0x1000, 0x00, 0x00 },
191         [BAM_P_IRQ_EN]          = { 0x13018, 0x1000, 0x00, 0x00 },
192         [BAM_P_EVNT_DEST_ADDR]  = { 0x1382C, 0x00, 0x1000, 0x00 },
193         [BAM_P_EVNT_REG]        = { 0x13818, 0x00, 0x1000, 0x00 },
194         [BAM_P_SW_OFSTS]        = { 0x13800, 0x00, 0x1000, 0x00 },
195         [BAM_P_DATA_FIFO_ADDR]  = { 0x13824, 0x00, 0x1000, 0x00 },
196         [BAM_P_DESC_FIFO_ADDR]  = { 0x1381C, 0x00, 0x1000, 0x00 },
197         [BAM_P_EVNT_GEN_TRSHLD] = { 0x13828, 0x00, 0x1000, 0x00 },
198         [BAM_P_FIFO_SIZES]      = { 0x13820, 0x00, 0x1000, 0x00 },
199 };
200
201 /* BAM CTRL */
202 #define BAM_SW_RST                      BIT(0)
203 #define BAM_EN                          BIT(1)
204 #define BAM_EN_ACCUM                    BIT(4)
205 #define BAM_TESTBUS_SEL_SHIFT           5
206 #define BAM_TESTBUS_SEL_MASK            0x3F
207 #define BAM_DESC_CACHE_SEL_SHIFT        13
208 #define BAM_DESC_CACHE_SEL_MASK         0x3
209 #define BAM_CACHED_DESC_STORE           BIT(15)
210 #define IBC_DISABLE                     BIT(16)
211
212 /* BAM REVISION */
213 #define REVISION_SHIFT          0
214 #define REVISION_MASK           0xFF
215 #define NUM_EES_SHIFT           8
216 #define NUM_EES_MASK            0xF
217 #define CE_BUFFER_SIZE          BIT(13)
218 #define AXI_ACTIVE              BIT(14)
219 #define USE_VMIDMT              BIT(15)
220 #define SECURED                 BIT(16)
221 #define BAM_HAS_NO_BYPASS       BIT(17)
222 #define HIGH_FREQUENCY_BAM      BIT(18)
223 #define INACTIV_TMRS_EXST       BIT(19)
224 #define NUM_INACTIV_TMRS        BIT(20)
225 #define DESC_CACHE_DEPTH_SHIFT  21
226 #define DESC_CACHE_DEPTH_1      (0 << DESC_CACHE_DEPTH_SHIFT)
227 #define DESC_CACHE_DEPTH_2      (1 << DESC_CACHE_DEPTH_SHIFT)
228 #define DESC_CACHE_DEPTH_3      (2 << DESC_CACHE_DEPTH_SHIFT)
229 #define DESC_CACHE_DEPTH_4      (3 << DESC_CACHE_DEPTH_SHIFT)
230 #define CMD_DESC_EN             BIT(23)
231 #define INACTIV_TMR_BASE_SHIFT  24
232 #define INACTIV_TMR_BASE_MASK   0xFF
233
234 /* BAM NUM PIPES */
235 #define BAM_NUM_PIPES_SHIFT             0
236 #define BAM_NUM_PIPES_MASK              0xFF
237 #define PERIPH_NON_PIPE_GRP_SHIFT       16
238 #define PERIPH_NON_PIP_GRP_MASK         0xFF
239 #define BAM_NON_PIPE_GRP_SHIFT          24
240 #define BAM_NON_PIPE_GRP_MASK           0xFF
241
242 /* BAM CNFG BITS */
243 #define BAM_PIPE_CNFG           BIT(2)
244 #define BAM_FULL_PIPE           BIT(11)
245 #define BAM_NO_EXT_P_RST        BIT(12)
246 #define BAM_IBC_DISABLE         BIT(13)
247 #define BAM_SB_CLK_REQ          BIT(14)
248 #define BAM_PSM_CSW_REQ         BIT(15)
249 #define BAM_PSM_P_RES           BIT(16)
250 #define BAM_AU_P_RES            BIT(17)
251 #define BAM_SI_P_RES            BIT(18)
252 #define BAM_WB_P_RES            BIT(19)
253 #define BAM_WB_BLK_CSW          BIT(20)
254 #define BAM_WB_CSW_ACK_IDL      BIT(21)
255 #define BAM_WB_RETR_SVPNT       BIT(22)
256 #define BAM_WB_DSC_AVL_P_RST    BIT(23)
257 #define BAM_REG_P_EN            BIT(24)
258 #define BAM_PSM_P_HD_DATA       BIT(25)
259 #define BAM_AU_ACCUMED          BIT(26)
260 #define BAM_CMD_ENABLE          BIT(27)
261
262 #define BAM_CNFG_BITS_DEFAULT   (BAM_PIPE_CNFG |        \
263                                  BAM_NO_EXT_P_RST |     \
264                                  BAM_IBC_DISABLE |      \
265                                  BAM_SB_CLK_REQ |       \
266                                  BAM_PSM_CSW_REQ |      \
267                                  BAM_PSM_P_RES |        \
268                                  BAM_AU_P_RES |         \
269                                  BAM_SI_P_RES |         \
270                                  BAM_WB_P_RES |         \
271                                  BAM_WB_BLK_CSW |       \
272                                  BAM_WB_CSW_ACK_IDL |   \
273                                  BAM_WB_RETR_SVPNT |    \
274                                  BAM_WB_DSC_AVL_P_RST | \
275                                  BAM_REG_P_EN |         \
276                                  BAM_PSM_P_HD_DATA |    \
277                                  BAM_AU_ACCUMED |       \
278                                  BAM_CMD_ENABLE)
279
280 /* PIPE CTRL */
281 #define P_EN                    BIT(1)
282 #define P_DIRECTION             BIT(3)
283 #define P_SYS_STRM              BIT(4)
284 #define P_SYS_MODE              BIT(5)
285 #define P_AUTO_EOB              BIT(6)
286 #define P_AUTO_EOB_SEL_SHIFT    7
287 #define P_AUTO_EOB_SEL_512      (0 << P_AUTO_EOB_SEL_SHIFT)
288 #define P_AUTO_EOB_SEL_256      (1 << P_AUTO_EOB_SEL_SHIFT)
289 #define P_AUTO_EOB_SEL_128      (2 << P_AUTO_EOB_SEL_SHIFT)
290 #define P_AUTO_EOB_SEL_64       (3 << P_AUTO_EOB_SEL_SHIFT)
291 #define P_PREFETCH_LIMIT_SHIFT  9
292 #define P_PREFETCH_LIMIT_32     (0 << P_PREFETCH_LIMIT_SHIFT)
293 #define P_PREFETCH_LIMIT_16     (1 << P_PREFETCH_LIMIT_SHIFT)
294 #define P_PREFETCH_LIMIT_4      (2 << P_PREFETCH_LIMIT_SHIFT)
295 #define P_WRITE_NWD             BIT(11)
296 #define P_LOCK_GROUP_SHIFT      16
297 #define P_LOCK_GROUP_MASK       0x1F
298
299 /* BAM_DESC_CNT_TRSHLD */
300 #define CNT_TRSHLD              0xffff
301 #define DEFAULT_CNT_THRSHLD     0x4
302
303 /* BAM_IRQ_SRCS */
304 #define BAM_IRQ                 BIT(31)
305 #define P_IRQ                   0x7fffffff
306
307 /* BAM_IRQ_SRCS_MSK */
308 #define BAM_IRQ_MSK             BAM_IRQ
309 #define P_IRQ_MSK               P_IRQ
310
311 /* BAM_IRQ_STTS */
312 #define BAM_TIMER_IRQ           BIT(4)
313 #define BAM_EMPTY_IRQ           BIT(3)
314 #define BAM_ERROR_IRQ           BIT(2)
315 #define BAM_HRESP_ERR_IRQ       BIT(1)
316
317 /* BAM_IRQ_CLR */
318 #define BAM_TIMER_CLR           BIT(4)
319 #define BAM_EMPTY_CLR           BIT(3)
320 #define BAM_ERROR_CLR           BIT(2)
321 #define BAM_HRESP_ERR_CLR       BIT(1)
322
323 /* BAM_IRQ_EN */
324 #define BAM_TIMER_EN            BIT(4)
325 #define BAM_EMPTY_EN            BIT(3)
326 #define BAM_ERROR_EN            BIT(2)
327 #define BAM_HRESP_ERR_EN        BIT(1)
328
329 /* BAM_P_IRQ_EN */
330 #define P_PRCSD_DESC_EN         BIT(0)
331 #define P_TIMER_EN              BIT(1)
332 #define P_WAKE_EN               BIT(2)
333 #define P_OUT_OF_DESC_EN        BIT(3)
334 #define P_ERR_EN                BIT(4)
335 #define P_TRNSFR_END_EN         BIT(5)
336 #define P_DEFAULT_IRQS_EN       (P_PRCSD_DESC_EN | P_ERR_EN | P_TRNSFR_END_EN)
337
338 /* BAM_P_SW_OFSTS */
339 #define P_SW_OFSTS_MASK         0xffff
340
341 #define BAM_DESC_FIFO_SIZE      SZ_32K
342 #define MAX_DESCRIPTORS (BAM_DESC_FIFO_SIZE / sizeof(struct bam_desc_hw) - 1)
343 #define BAM_FIFO_SIZE   (SZ_32K - 8)
344 #define IS_BUSY(chan)   (CIRC_SPACE(bchan->tail, bchan->head,\
345                          MAX_DESCRIPTORS + 1) == 0)
346
347 struct bam_chan {
348         struct virt_dma_chan vc;
349
350         struct bam_device *bdev;
351
352         /* configuration from device tree */
353         u32 id;
354
355         /* runtime configuration */
356         struct dma_slave_config slave;
357
358         /* fifo storage */
359         struct bam_desc_hw *fifo_virt;
360         dma_addr_t fifo_phys;
361
362         /* fifo markers */
363         unsigned short head;            /* start of active descriptor entries */
364         unsigned short tail;            /* end of active descriptor entries */
365
366         unsigned int initialized;       /* is the channel hw initialized? */
367         unsigned int paused;            /* is the channel paused? */
368         unsigned int reconfigure;       /* new slave config? */
369         /* list of descriptors currently processed */
370         struct list_head desc_list;
371
372         struct list_head node;
373 };
374
375 static inline struct bam_chan *to_bam_chan(struct dma_chan *common)
376 {
377         return container_of(common, struct bam_chan, vc.chan);
378 }
379
380 struct bam_device {
381         void __iomem *regs;
382         struct device *dev;
383         struct dma_device common;
384         struct device_dma_parameters dma_parms;
385         struct bam_chan *channels;
386         u32 num_channels;
387         u32 num_ees;
388
389         /* execution environment ID, from DT */
390         u32 ee;
391         bool controlled_remotely;
392
393         const struct reg_offset_data *layout;
394
395         struct clk *bamclk;
396         int irq;
397
398         /* dma start transaction tasklet */
399         struct tasklet_struct task;
400 };
401
402 /**
403  * bam_addr - returns BAM register address
404  * @bdev: bam device
405  * @pipe: pipe instance (ignored when register doesn't have multiple instances)
406  * @reg:  register enum
407  */
408 static inline void __iomem *bam_addr(struct bam_device *bdev, u32 pipe,
409                 enum bam_reg reg)
410 {
411         const struct reg_offset_data r = bdev->layout[reg];
412
413         return bdev->regs + r.base_offset +
414                 r.pipe_mult * pipe +
415                 r.evnt_mult * pipe +
416                 r.ee_mult * bdev->ee;
417 }
418
419 /**
420  * bam_reset_channel - Reset individual BAM DMA channel
421  * @bchan: bam channel
422  *
423  * This function resets a specific BAM channel
424  */
425 static void bam_reset_channel(struct bam_chan *bchan)
426 {
427         struct bam_device *bdev = bchan->bdev;
428
429         lockdep_assert_held(&bchan->vc.lock);
430
431         /* reset channel */
432         writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_RST));
433         writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_RST));
434
435         /* don't allow cpu to reorder BAM register accesses done after this */
436         wmb();
437
438         /* make sure hw is initialized when channel is used the first time  */
439         bchan->initialized = 0;
440 }
441
442 /**
443  * bam_chan_init_hw - Initialize channel hardware
444  * @bchan: bam channel
445  * @dir: DMA transfer direction
446  *
447  * This function resets and initializes the BAM channel
448  */
449 static void bam_chan_init_hw(struct bam_chan *bchan,
450         enum dma_transfer_direction dir)
451 {
452         struct bam_device *bdev = bchan->bdev;
453         u32 val;
454
455         /* Reset the channel to clear internal state of the FIFO */
456         bam_reset_channel(bchan);
457
458         /*
459          * write out 8 byte aligned address.  We have enough space for this
460          * because we allocated 1 more descriptor (8 bytes) than we can use
461          */
462         writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)),
463                         bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR));
464         writel_relaxed(BAM_FIFO_SIZE,
465                         bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES));
466
467         /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */
468         writel_relaxed(P_DEFAULT_IRQS_EN,
469                         bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
470
471         /* unmask the specific pipe and EE combo */
472         val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
473         val |= BIT(bchan->id);
474         writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
475
476         /* don't allow cpu to reorder the channel enable done below */
477         wmb();
478
479         /* set fixed direction and mode, then enable channel */
480         val = P_EN | P_SYS_MODE;
481         if (dir == DMA_DEV_TO_MEM)
482                 val |= P_DIRECTION;
483
484         writel_relaxed(val, bam_addr(bdev, bchan->id, BAM_P_CTRL));
485
486         bchan->initialized = 1;
487
488         /* init FIFO pointers */
489         bchan->head = 0;
490         bchan->tail = 0;
491 }
492
493 /**
494  * bam_alloc_chan - Allocate channel resources for DMA channel.
495  * @chan: specified channel
496  *
497  * This function allocates the FIFO descriptor memory
498  */
499 static int bam_alloc_chan(struct dma_chan *chan)
500 {
501         struct bam_chan *bchan = to_bam_chan(chan);
502         struct bam_device *bdev = bchan->bdev;
503
504         if (bchan->fifo_virt)
505                 return 0;
506
507         /* allocate FIFO descriptor space, but only if necessary */
508         bchan->fifo_virt = dma_alloc_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
509                                         &bchan->fifo_phys, GFP_KERNEL);
510
511         if (!bchan->fifo_virt) {
512                 dev_err(bdev->dev, "Failed to allocate desc fifo\n");
513                 return -ENOMEM;
514         }
515
516         return 0;
517 }
518
519 static int bam_pm_runtime_get_sync(struct device *dev)
520 {
521         if (pm_runtime_enabled(dev))
522                 return pm_runtime_get_sync(dev);
523
524         return 0;
525 }
526
527 /**
528  * bam_free_chan - Frees dma resources associated with specific channel
529  * @chan: specified channel
530  *
531  * Free the allocated fifo descriptor memory and channel resources
532  *
533  */
534 static void bam_free_chan(struct dma_chan *chan)
535 {
536         struct bam_chan *bchan = to_bam_chan(chan);
537         struct bam_device *bdev = bchan->bdev;
538         u32 val;
539         unsigned long flags;
540         int ret;
541
542         ret = bam_pm_runtime_get_sync(bdev->dev);
543         if (ret < 0)
544                 return;
545
546         vchan_free_chan_resources(to_virt_chan(chan));
547
548         if (!list_empty(&bchan->desc_list)) {
549                 dev_err(bchan->bdev->dev, "Cannot free busy channel\n");
550                 goto err;
551         }
552
553         spin_lock_irqsave(&bchan->vc.lock, flags);
554         bam_reset_channel(bchan);
555         spin_unlock_irqrestore(&bchan->vc.lock, flags);
556
557         dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE, bchan->fifo_virt,
558                     bchan->fifo_phys);
559         bchan->fifo_virt = NULL;
560
561         /* mask irq for pipe/channel */
562         val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
563         val &= ~BIT(bchan->id);
564         writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
565
566         /* disable irq */
567         writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
568
569 err:
570         pm_runtime_mark_last_busy(bdev->dev);
571         pm_runtime_put_autosuspend(bdev->dev);
572 }
573
574 /**
575  * bam_slave_config - set slave configuration for channel
576  * @chan: dma channel
577  * @cfg: slave configuration
578  *
579  * Sets slave configuration for channel
580  *
581  */
582 static int bam_slave_config(struct dma_chan *chan,
583                             struct dma_slave_config *cfg)
584 {
585         struct bam_chan *bchan = to_bam_chan(chan);
586         unsigned long flag;
587
588         spin_lock_irqsave(&bchan->vc.lock, flag);
589         memcpy(&bchan->slave, cfg, sizeof(*cfg));
590         bchan->reconfigure = 1;
591         spin_unlock_irqrestore(&bchan->vc.lock, flag);
592
593         return 0;
594 }
595
596 /**
597  * bam_prep_slave_sg - Prep slave sg transaction
598  *
599  * @chan: dma channel
600  * @sgl: scatter gather list
601  * @sg_len: length of sg
602  * @direction: DMA transfer direction
603  * @flags: DMA flags
604  * @context: transfer context (unused)
605  */
606 static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
607         struct scatterlist *sgl, unsigned int sg_len,
608         enum dma_transfer_direction direction, unsigned long flags,
609         void *context)
610 {
611         struct bam_chan *bchan = to_bam_chan(chan);
612         struct bam_device *bdev = bchan->bdev;
613         struct bam_async_desc *async_desc;
614         struct scatterlist *sg;
615         u32 i;
616         struct bam_desc_hw *desc;
617         unsigned int num_alloc = 0;
618
619
620         if (!is_slave_direction(direction)) {
621                 dev_err(bdev->dev, "invalid dma direction\n");
622                 return NULL;
623         }
624
625         /* calculate number of required entries */
626         for_each_sg(sgl, sg, sg_len, i)
627                 num_alloc += DIV_ROUND_UP(sg_dma_len(sg), BAM_FIFO_SIZE);
628
629         /* allocate enough room to accomodate the number of entries */
630         async_desc = kzalloc(struct_size(async_desc, desc, num_alloc),
631                              GFP_NOWAIT);
632
633         if (!async_desc)
634                 goto err_out;
635
636         if (flags & DMA_PREP_FENCE)
637                 async_desc->flags |= DESC_FLAG_NWD;
638
639         if (flags & DMA_PREP_INTERRUPT)
640                 async_desc->flags |= DESC_FLAG_EOT;
641
642         async_desc->num_desc = num_alloc;
643         async_desc->curr_desc = async_desc->desc;
644         async_desc->dir = direction;
645
646         /* fill in temporary descriptors */
647         desc = async_desc->desc;
648         for_each_sg(sgl, sg, sg_len, i) {
649                 unsigned int remainder = sg_dma_len(sg);
650                 unsigned int curr_offset = 0;
651
652                 do {
653                         if (flags & DMA_PREP_CMD)
654                                 desc->flags |= cpu_to_le16(DESC_FLAG_CMD);
655
656                         desc->addr = cpu_to_le32(sg_dma_address(sg) +
657                                                  curr_offset);
658
659                         if (remainder > BAM_FIFO_SIZE) {
660                                 desc->size = cpu_to_le16(BAM_FIFO_SIZE);
661                                 remainder -= BAM_FIFO_SIZE;
662                                 curr_offset += BAM_FIFO_SIZE;
663                         } else {
664                                 desc->size = cpu_to_le16(remainder);
665                                 remainder = 0;
666                         }
667
668                         async_desc->length += le16_to_cpu(desc->size);
669                         desc++;
670                 } while (remainder > 0);
671         }
672
673         return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
674
675 err_out:
676         kfree(async_desc);
677         return NULL;
678 }
679
680 /**
681  * bam_dma_terminate_all - terminate all transactions on a channel
682  * @chan: bam dma channel
683  *
684  * Dequeues and frees all transactions
685  * No callbacks are done
686  *
687  */
688 static int bam_dma_terminate_all(struct dma_chan *chan)
689 {
690         struct bam_chan *bchan = to_bam_chan(chan);
691         struct bam_async_desc *async_desc, *tmp;
692         unsigned long flag;
693         LIST_HEAD(head);
694
695         /* remove all transactions, including active transaction */
696         spin_lock_irqsave(&bchan->vc.lock, flag);
697         /*
698          * If we have transactions queued, then some might be committed to the
699          * hardware in the desc fifo.  The only way to reset the desc fifo is
700          * to do a hardware reset (either by pipe or the entire block).
701          * bam_chan_init_hw() will trigger a pipe reset, and also reinit the
702          * pipe.  If the pipe is left disabled (default state after pipe reset)
703          * and is accessed by a connected hardware engine, a fatal error in
704          * the BAM will occur.  There is a small window where this could happen
705          * with bam_chan_init_hw(), but it is assumed that the caller has
706          * stopped activity on any attached hardware engine.  Make sure to do
707          * this first so that the BAM hardware doesn't cause memory corruption
708          * by accessing freed resources.
709          */
710         if (!list_empty(&bchan->desc_list)) {
711                 async_desc = list_first_entry(&bchan->desc_list,
712                                               struct bam_async_desc, desc_node);
713                 bam_chan_init_hw(bchan, async_desc->dir);
714         }
715
716         list_for_each_entry_safe(async_desc, tmp,
717                                  &bchan->desc_list, desc_node) {
718                 list_add(&async_desc->vd.node, &bchan->vc.desc_issued);
719                 list_del(&async_desc->desc_node);
720         }
721
722         vchan_get_all_descriptors(&bchan->vc, &head);
723         spin_unlock_irqrestore(&bchan->vc.lock, flag);
724
725         vchan_dma_desc_free_list(&bchan->vc, &head);
726
727         return 0;
728 }
729
730 /**
731  * bam_pause - Pause DMA channel
732  * @chan: dma channel
733  *
734  */
735 static int bam_pause(struct dma_chan *chan)
736 {
737         struct bam_chan *bchan = to_bam_chan(chan);
738         struct bam_device *bdev = bchan->bdev;
739         unsigned long flag;
740         int ret;
741
742         ret = bam_pm_runtime_get_sync(bdev->dev);
743         if (ret < 0)
744                 return ret;
745
746         spin_lock_irqsave(&bchan->vc.lock, flag);
747         writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
748         bchan->paused = 1;
749         spin_unlock_irqrestore(&bchan->vc.lock, flag);
750         pm_runtime_mark_last_busy(bdev->dev);
751         pm_runtime_put_autosuspend(bdev->dev);
752
753         return 0;
754 }
755
756 /**
757  * bam_resume - Resume DMA channel operations
758  * @chan: dma channel
759  *
760  */
761 static int bam_resume(struct dma_chan *chan)
762 {
763         struct bam_chan *bchan = to_bam_chan(chan);
764         struct bam_device *bdev = bchan->bdev;
765         unsigned long flag;
766         int ret;
767
768         ret = bam_pm_runtime_get_sync(bdev->dev);
769         if (ret < 0)
770                 return ret;
771
772         spin_lock_irqsave(&bchan->vc.lock, flag);
773         writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
774         bchan->paused = 0;
775         spin_unlock_irqrestore(&bchan->vc.lock, flag);
776         pm_runtime_mark_last_busy(bdev->dev);
777         pm_runtime_put_autosuspend(bdev->dev);
778
779         return 0;
780 }
781
782 /**
783  * process_channel_irqs - processes the channel interrupts
784  * @bdev: bam controller
785  *
786  * This function processes the channel interrupts
787  *
788  */
789 static u32 process_channel_irqs(struct bam_device *bdev)
790 {
791         u32 i, srcs, pipe_stts, offset, avail;
792         unsigned long flags;
793         struct bam_async_desc *async_desc, *tmp;
794
795         srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE));
796
797         /* return early if no pipe/channel interrupts are present */
798         if (!(srcs & P_IRQ))
799                 return srcs;
800
801         for (i = 0; i < bdev->num_channels; i++) {
802                 struct bam_chan *bchan = &bdev->channels[i];
803
804                 if (!(srcs & BIT(i)))
805                         continue;
806
807                 /* clear pipe irq */
808                 pipe_stts = readl_relaxed(bam_addr(bdev, i, BAM_P_IRQ_STTS));
809
810                 writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR));
811
812                 spin_lock_irqsave(&bchan->vc.lock, flags);
813
814                 offset = readl_relaxed(bam_addr(bdev, i, BAM_P_SW_OFSTS)) &
815                                        P_SW_OFSTS_MASK;
816                 offset /= sizeof(struct bam_desc_hw);
817
818                 /* Number of bytes available to read */
819                 avail = CIRC_CNT(offset, bchan->head, MAX_DESCRIPTORS + 1);
820
821                 if (offset < bchan->head)
822                         avail--;
823
824                 list_for_each_entry_safe(async_desc, tmp,
825                                          &bchan->desc_list, desc_node) {
826                         /* Not enough data to read */
827                         if (avail < async_desc->xfer_len)
828                                 break;
829
830                         /* manage FIFO */
831                         bchan->head += async_desc->xfer_len;
832                         bchan->head %= MAX_DESCRIPTORS;
833
834                         async_desc->num_desc -= async_desc->xfer_len;
835                         async_desc->curr_desc += async_desc->xfer_len;
836                         avail -= async_desc->xfer_len;
837
838                         /*
839                          * if complete, process cookie. Otherwise
840                          * push back to front of desc_issued so that
841                          * it gets restarted by the tasklet
842                          */
843                         if (!async_desc->num_desc) {
844                                 vchan_cookie_complete(&async_desc->vd);
845                         } else {
846                                 list_add(&async_desc->vd.node,
847                                          &bchan->vc.desc_issued);
848                         }
849                         list_del(&async_desc->desc_node);
850                 }
851
852                 spin_unlock_irqrestore(&bchan->vc.lock, flags);
853         }
854
855         return srcs;
856 }
857
858 /**
859  * bam_dma_irq - irq handler for bam controller
860  * @irq: IRQ of interrupt
861  * @data: callback data
862  *
863  * IRQ handler for the bam controller
864  */
865 static irqreturn_t bam_dma_irq(int irq, void *data)
866 {
867         struct bam_device *bdev = data;
868         u32 clr_mask = 0, srcs = 0;
869         int ret;
870
871         srcs |= process_channel_irqs(bdev);
872
873         /* kick off tasklet to start next dma transfer */
874         if (srcs & P_IRQ)
875                 tasklet_schedule(&bdev->task);
876
877         ret = bam_pm_runtime_get_sync(bdev->dev);
878         if (ret < 0)
879                 return ret;
880
881         if (srcs & BAM_IRQ) {
882                 clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS));
883
884                 /*
885                  * don't allow reorder of the various accesses to the BAM
886                  * registers
887                  */
888                 mb();
889
890                 writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR));
891         }
892
893         pm_runtime_mark_last_busy(bdev->dev);
894         pm_runtime_put_autosuspend(bdev->dev);
895
896         return IRQ_HANDLED;
897 }
898
899 /**
900  * bam_tx_status - returns status of transaction
901  * @chan: dma channel
902  * @cookie: transaction cookie
903  * @txstate: DMA transaction state
904  *
905  * Return status of dma transaction
906  */
907 static enum dma_status bam_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
908                 struct dma_tx_state *txstate)
909 {
910         struct bam_chan *bchan = to_bam_chan(chan);
911         struct bam_async_desc *async_desc;
912         struct virt_dma_desc *vd;
913         int ret;
914         size_t residue = 0;
915         unsigned int i;
916         unsigned long flags;
917
918         ret = dma_cookie_status(chan, cookie, txstate);
919         if (ret == DMA_COMPLETE)
920                 return ret;
921
922         if (!txstate)
923                 return bchan->paused ? DMA_PAUSED : ret;
924
925         spin_lock_irqsave(&bchan->vc.lock, flags);
926         vd = vchan_find_desc(&bchan->vc, cookie);
927         if (vd) {
928                 residue = container_of(vd, struct bam_async_desc, vd)->length;
929         } else {
930                 list_for_each_entry(async_desc, &bchan->desc_list, desc_node) {
931                         if (async_desc->vd.tx.cookie != cookie)
932                                 continue;
933
934                         for (i = 0; i < async_desc->num_desc; i++)
935                                 residue += le16_to_cpu(
936                                                 async_desc->curr_desc[i].size);
937                 }
938         }
939
940         spin_unlock_irqrestore(&bchan->vc.lock, flags);
941
942         dma_set_residue(txstate, residue);
943
944         if (ret == DMA_IN_PROGRESS && bchan->paused)
945                 ret = DMA_PAUSED;
946
947         return ret;
948 }
949
950 /**
951  * bam_apply_new_config
952  * @bchan: bam dma channel
953  * @dir: DMA direction
954  */
955 static void bam_apply_new_config(struct bam_chan *bchan,
956         enum dma_transfer_direction dir)
957 {
958         struct bam_device *bdev = bchan->bdev;
959         u32 maxburst;
960
961         if (!bdev->controlled_remotely) {
962                 if (dir == DMA_DEV_TO_MEM)
963                         maxburst = bchan->slave.src_maxburst;
964                 else
965                         maxburst = bchan->slave.dst_maxburst;
966
967                 writel_relaxed(maxburst,
968                                bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
969         }
970
971         bchan->reconfigure = 0;
972 }
973
974 /**
975  * bam_start_dma - start next transaction
976  * @bchan: bam dma channel
977  */
978 static void bam_start_dma(struct bam_chan *bchan)
979 {
980         struct virt_dma_desc *vd = vchan_next_desc(&bchan->vc);
981         struct bam_device *bdev = bchan->bdev;
982         struct bam_async_desc *async_desc = NULL;
983         struct bam_desc_hw *desc;
984         struct bam_desc_hw *fifo = PTR_ALIGN(bchan->fifo_virt,
985                                         sizeof(struct bam_desc_hw));
986         int ret;
987         unsigned int avail;
988         struct dmaengine_desc_callback cb;
989
990         lockdep_assert_held(&bchan->vc.lock);
991
992         if (!vd)
993                 return;
994
995         ret = bam_pm_runtime_get_sync(bdev->dev);
996         if (ret < 0)
997                 return;
998
999         while (vd && !IS_BUSY(bchan)) {
1000                 list_del(&vd->node);
1001
1002                 async_desc = container_of(vd, struct bam_async_desc, vd);
1003
1004                 /* on first use, initialize the channel hardware */
1005                 if (!bchan->initialized)
1006                         bam_chan_init_hw(bchan, async_desc->dir);
1007
1008                 /* apply new slave config changes, if necessary */
1009                 if (bchan->reconfigure)
1010                         bam_apply_new_config(bchan, async_desc->dir);
1011
1012                 desc = async_desc->curr_desc;
1013                 avail = CIRC_SPACE(bchan->tail, bchan->head,
1014                                    MAX_DESCRIPTORS + 1);
1015
1016                 if (async_desc->num_desc > avail)
1017                         async_desc->xfer_len = avail;
1018                 else
1019                         async_desc->xfer_len = async_desc->num_desc;
1020
1021                 /* set any special flags on the last descriptor */
1022                 if (async_desc->num_desc == async_desc->xfer_len)
1023                         desc[async_desc->xfer_len - 1].flags |=
1024                                                 cpu_to_le16(async_desc->flags);
1025
1026                 vd = vchan_next_desc(&bchan->vc);
1027
1028                 dmaengine_desc_get_callback(&async_desc->vd.tx, &cb);
1029
1030                 /*
1031                  * An interrupt is generated at this desc, if
1032                  *  - FIFO is FULL.
1033                  *  - No more descriptors to add.
1034                  *  - If a callback completion was requested for this DESC,
1035                  *     In this case, BAM will deliver the completion callback
1036                  *     for this desc and continue processing the next desc.
1037                  */
1038                 if (((avail <= async_desc->xfer_len) || !vd ||
1039                      dmaengine_desc_callback_valid(&cb)) &&
1040                     !(async_desc->flags & DESC_FLAG_EOT))
1041                         desc[async_desc->xfer_len - 1].flags |=
1042                                 cpu_to_le16(DESC_FLAG_INT);
1043
1044                 if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
1045                         u32 partial = MAX_DESCRIPTORS - bchan->tail;
1046
1047                         memcpy(&fifo[bchan->tail], desc,
1048                                partial * sizeof(struct bam_desc_hw));
1049                         memcpy(fifo, &desc[partial],
1050                                (async_desc->xfer_len - partial) *
1051                                 sizeof(struct bam_desc_hw));
1052                 } else {
1053                         memcpy(&fifo[bchan->tail], desc,
1054                                async_desc->xfer_len *
1055                                sizeof(struct bam_desc_hw));
1056                 }
1057
1058                 bchan->tail += async_desc->xfer_len;
1059                 bchan->tail %= MAX_DESCRIPTORS;
1060                 list_add_tail(&async_desc->desc_node, &bchan->desc_list);
1061         }
1062
1063         /* ensure descriptor writes and dma start not reordered */
1064         wmb();
1065         writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw),
1066                         bam_addr(bdev, bchan->id, BAM_P_EVNT_REG));
1067
1068         pm_runtime_mark_last_busy(bdev->dev);
1069         pm_runtime_put_autosuspend(bdev->dev);
1070 }
1071
1072 /**
1073  * dma_tasklet - DMA IRQ tasklet
1074  * @data: tasklet argument (bam controller structure)
1075  *
1076  * Sets up next DMA operation and then processes all completed transactions
1077  */
1078 static void dma_tasklet(unsigned long data)
1079 {
1080         struct bam_device *bdev = (struct bam_device *)data;
1081         struct bam_chan *bchan;
1082         unsigned long flags;
1083         unsigned int i;
1084
1085         /* go through the channels and kick off transactions */
1086         for (i = 0; i < bdev->num_channels; i++) {
1087                 bchan = &bdev->channels[i];
1088                 spin_lock_irqsave(&bchan->vc.lock, flags);
1089
1090                 if (!list_empty(&bchan->vc.desc_issued) && !IS_BUSY(bchan))
1091                         bam_start_dma(bchan);
1092                 spin_unlock_irqrestore(&bchan->vc.lock, flags);
1093         }
1094
1095 }
1096
1097 /**
1098  * bam_issue_pending - starts pending transactions
1099  * @chan: dma channel
1100  *
1101  * Calls tasklet directly which in turn starts any pending transactions
1102  */
1103 static void bam_issue_pending(struct dma_chan *chan)
1104 {
1105         struct bam_chan *bchan = to_bam_chan(chan);
1106         unsigned long flags;
1107
1108         spin_lock_irqsave(&bchan->vc.lock, flags);
1109
1110         /* if work pending and idle, start a transaction */
1111         if (vchan_issue_pending(&bchan->vc) && !IS_BUSY(bchan))
1112                 bam_start_dma(bchan);
1113
1114         spin_unlock_irqrestore(&bchan->vc.lock, flags);
1115 }
1116
1117 /**
1118  * bam_dma_free_desc - free descriptor memory
1119  * @vd: virtual descriptor
1120  *
1121  */
1122 static void bam_dma_free_desc(struct virt_dma_desc *vd)
1123 {
1124         struct bam_async_desc *async_desc = container_of(vd,
1125                         struct bam_async_desc, vd);
1126
1127         kfree(async_desc);
1128 }
1129
1130 static struct dma_chan *bam_dma_xlate(struct of_phandle_args *dma_spec,
1131                 struct of_dma *of)
1132 {
1133         struct bam_device *bdev = container_of(of->of_dma_data,
1134                                         struct bam_device, common);
1135         unsigned int request;
1136
1137         if (dma_spec->args_count != 1)
1138                 return NULL;
1139
1140         request = dma_spec->args[0];
1141         if (request >= bdev->num_channels)
1142                 return NULL;
1143
1144         return dma_get_slave_channel(&(bdev->channels[request].vc.chan));
1145 }
1146
1147 /**
1148  * bam_init
1149  * @bdev: bam device
1150  *
1151  * Initialization helper for global bam registers
1152  */
1153 static int bam_init(struct bam_device *bdev)
1154 {
1155         u32 val;
1156
1157         /* read revision and configuration information */
1158         if (!bdev->num_ees) {
1159                 val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION));
1160                 bdev->num_ees = (val >> NUM_EES_SHIFT) & NUM_EES_MASK;
1161         }
1162
1163         /* check that configured EE is within range */
1164         if (bdev->ee >= bdev->num_ees)
1165                 return -EINVAL;
1166
1167         if (!bdev->num_channels) {
1168                 val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
1169                 bdev->num_channels = val & BAM_NUM_PIPES_MASK;
1170         }
1171
1172         if (bdev->controlled_remotely)
1173                 return 0;
1174
1175         /* s/w reset bam */
1176         /* after reset all pipes are disabled and idle */
1177         val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL));
1178         val |= BAM_SW_RST;
1179         writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
1180         val &= ~BAM_SW_RST;
1181         writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
1182
1183         /* make sure previous stores are visible before enabling BAM */
1184         wmb();
1185
1186         /* enable bam */
1187         val |= BAM_EN;
1188         writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
1189
1190         /* set descriptor threshhold, start with 4 bytes */
1191         writel_relaxed(DEFAULT_CNT_THRSHLD,
1192                         bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
1193
1194         /* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
1195         writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS));
1196
1197         /* enable irqs for errors */
1198         writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN,
1199                         bam_addr(bdev, 0, BAM_IRQ_EN));
1200
1201         /* unmask global bam interrupt */
1202         writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
1203
1204         return 0;
1205 }
1206
1207 static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
1208         u32 index)
1209 {
1210         bchan->id = index;
1211         bchan->bdev = bdev;
1212
1213         vchan_init(&bchan->vc, &bdev->common);
1214         bchan->vc.desc_free = bam_dma_free_desc;
1215         INIT_LIST_HEAD(&bchan->desc_list);
1216 }
1217
1218 static const struct of_device_id bam_of_match[] = {
1219         { .compatible = "qcom,bam-v1.3.0", .data = &bam_v1_3_reg_info },
1220         { .compatible = "qcom,bam-v1.4.0", .data = &bam_v1_4_reg_info },
1221         { .compatible = "qcom,bam-v1.7.0", .data = &bam_v1_7_reg_info },
1222         {}
1223 };
1224
1225 MODULE_DEVICE_TABLE(of, bam_of_match);
1226
1227 static int bam_dma_probe(struct platform_device *pdev)
1228 {
1229         struct bam_device *bdev;
1230         const struct of_device_id *match;
1231         struct resource *iores;
1232         int ret, i;
1233
1234         bdev = devm_kzalloc(&pdev->dev, sizeof(*bdev), GFP_KERNEL);
1235         if (!bdev)
1236                 return -ENOMEM;
1237
1238         bdev->dev = &pdev->dev;
1239
1240         match = of_match_node(bam_of_match, pdev->dev.of_node);
1241         if (!match) {
1242                 dev_err(&pdev->dev, "Unsupported BAM module\n");
1243                 return -ENODEV;
1244         }
1245
1246         bdev->layout = match->data;
1247
1248         iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1249         bdev->regs = devm_ioremap_resource(&pdev->dev, iores);
1250         if (IS_ERR(bdev->regs))
1251                 return PTR_ERR(bdev->regs);
1252
1253         bdev->irq = platform_get_irq(pdev, 0);
1254         if (bdev->irq < 0)
1255                 return bdev->irq;
1256
1257         ret = of_property_read_u32(pdev->dev.of_node, "qcom,ee", &bdev->ee);
1258         if (ret) {
1259                 dev_err(bdev->dev, "Execution environment unspecified\n");
1260                 return ret;
1261         }
1262
1263         bdev->controlled_remotely = of_property_read_bool(pdev->dev.of_node,
1264                                                 "qcom,controlled-remotely");
1265
1266         if (bdev->controlled_remotely) {
1267                 ret = of_property_read_u32(pdev->dev.of_node, "num-channels",
1268                                            &bdev->num_channels);
1269                 if (ret)
1270                         dev_err(bdev->dev, "num-channels unspecified in dt\n");
1271
1272                 ret = of_property_read_u32(pdev->dev.of_node, "qcom,num-ees",
1273                                            &bdev->num_ees);
1274                 if (ret)
1275                         dev_err(bdev->dev, "num-ees unspecified in dt\n");
1276         }
1277
1278         bdev->bamclk = devm_clk_get(bdev->dev, "bam_clk");
1279         if (IS_ERR(bdev->bamclk)) {
1280                 if (!bdev->controlled_remotely)
1281                         return PTR_ERR(bdev->bamclk);
1282
1283                 bdev->bamclk = NULL;
1284         }
1285
1286         ret = clk_prepare_enable(bdev->bamclk);
1287         if (ret) {
1288                 dev_err(bdev->dev, "failed to prepare/enable clock\n");
1289                 return ret;
1290         }
1291
1292         ret = bam_init(bdev);
1293         if (ret)
1294                 goto err_disable_clk;
1295
1296         tasklet_init(&bdev->task, dma_tasklet, (unsigned long)bdev);
1297
1298         bdev->channels = devm_kcalloc(bdev->dev, bdev->num_channels,
1299                                 sizeof(*bdev->channels), GFP_KERNEL);
1300
1301         if (!bdev->channels) {
1302                 ret = -ENOMEM;
1303                 goto err_tasklet_kill;
1304         }
1305
1306         /* allocate and initialize channels */
1307         INIT_LIST_HEAD(&bdev->common.channels);
1308
1309         for (i = 0; i < bdev->num_channels; i++)
1310                 bam_channel_init(bdev, &bdev->channels[i], i);
1311
1312         ret = devm_request_irq(bdev->dev, bdev->irq, bam_dma_irq,
1313                         IRQF_TRIGGER_HIGH, "bam_dma", bdev);
1314         if (ret)
1315                 goto err_bam_channel_exit;
1316
1317         /* set max dma segment size */
1318         bdev->common.dev = bdev->dev;
1319         bdev->common.dev->dma_parms = &bdev->dma_parms;
1320         ret = dma_set_max_seg_size(bdev->common.dev, BAM_FIFO_SIZE);
1321         if (ret) {
1322                 dev_err(bdev->dev, "cannot set maximum segment size\n");
1323                 goto err_bam_channel_exit;
1324         }
1325
1326         platform_set_drvdata(pdev, bdev);
1327
1328         /* set capabilities */
1329         dma_cap_zero(bdev->common.cap_mask);
1330         dma_cap_set(DMA_SLAVE, bdev->common.cap_mask);
1331
1332         /* initialize dmaengine apis */
1333         bdev->common.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1334         bdev->common.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
1335         bdev->common.src_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
1336         bdev->common.dst_addr_widths = DMA_SLAVE_BUSWIDTH_4_BYTES;
1337         bdev->common.device_alloc_chan_resources = bam_alloc_chan;
1338         bdev->common.device_free_chan_resources = bam_free_chan;
1339         bdev->common.device_prep_slave_sg = bam_prep_slave_sg;
1340         bdev->common.device_config = bam_slave_config;
1341         bdev->common.device_pause = bam_pause;
1342         bdev->common.device_resume = bam_resume;
1343         bdev->common.device_terminate_all = bam_dma_terminate_all;
1344         bdev->common.device_issue_pending = bam_issue_pending;
1345         bdev->common.device_tx_status = bam_tx_status;
1346         bdev->common.dev = bdev->dev;
1347
1348         ret = dma_async_device_register(&bdev->common);
1349         if (ret) {
1350                 dev_err(bdev->dev, "failed to register dma async device\n");
1351                 goto err_bam_channel_exit;
1352         }
1353
1354         ret = of_dma_controller_register(pdev->dev.of_node, bam_dma_xlate,
1355                                         &bdev->common);
1356         if (ret)
1357                 goto err_unregister_dma;
1358
1359         if (bdev->controlled_remotely) {
1360                 pm_runtime_disable(&pdev->dev);
1361                 return 0;
1362         }
1363
1364         pm_runtime_irq_safe(&pdev->dev);
1365         pm_runtime_set_autosuspend_delay(&pdev->dev, BAM_DMA_AUTOSUSPEND_DELAY);
1366         pm_runtime_use_autosuspend(&pdev->dev);
1367         pm_runtime_mark_last_busy(&pdev->dev);
1368         pm_runtime_set_active(&pdev->dev);
1369         pm_runtime_enable(&pdev->dev);
1370
1371         return 0;
1372
1373 err_unregister_dma:
1374         dma_async_device_unregister(&bdev->common);
1375 err_bam_channel_exit:
1376         for (i = 0; i < bdev->num_channels; i++)
1377                 tasklet_kill(&bdev->channels[i].vc.task);
1378 err_tasklet_kill:
1379         tasklet_kill(&bdev->task);
1380 err_disable_clk:
1381         clk_disable_unprepare(bdev->bamclk);
1382
1383         return ret;
1384 }
1385
1386 static int bam_dma_remove(struct platform_device *pdev)
1387 {
1388         struct bam_device *bdev = platform_get_drvdata(pdev);
1389         u32 i;
1390
1391         pm_runtime_force_suspend(&pdev->dev);
1392
1393         of_dma_controller_free(pdev->dev.of_node);
1394         dma_async_device_unregister(&bdev->common);
1395
1396         /* mask all interrupts for this execution environment */
1397         writel_relaxed(0, bam_addr(bdev, 0,  BAM_IRQ_SRCS_MSK_EE));
1398
1399         devm_free_irq(bdev->dev, bdev->irq, bdev);
1400
1401         for (i = 0; i < bdev->num_channels; i++) {
1402                 bam_dma_terminate_all(&bdev->channels[i].vc.chan);
1403                 tasklet_kill(&bdev->channels[i].vc.task);
1404
1405                 if (!bdev->channels[i].fifo_virt)
1406                         continue;
1407
1408                 dma_free_wc(bdev->dev, BAM_DESC_FIFO_SIZE,
1409                             bdev->channels[i].fifo_virt,
1410                             bdev->channels[i].fifo_phys);
1411         }
1412
1413         tasklet_kill(&bdev->task);
1414
1415         clk_disable_unprepare(bdev->bamclk);
1416
1417         return 0;
1418 }
1419
1420 static int __maybe_unused bam_dma_runtime_suspend(struct device *dev)
1421 {
1422         struct bam_device *bdev = dev_get_drvdata(dev);
1423
1424         clk_disable(bdev->bamclk);
1425
1426         return 0;
1427 }
1428
1429 static int __maybe_unused bam_dma_runtime_resume(struct device *dev)
1430 {
1431         struct bam_device *bdev = dev_get_drvdata(dev);
1432         int ret;
1433
1434         ret = clk_enable(bdev->bamclk);
1435         if (ret < 0) {
1436                 dev_err(dev, "clk_enable failed: %d\n", ret);
1437                 return ret;
1438         }
1439
1440         return 0;
1441 }
1442
1443 static int __maybe_unused bam_dma_suspend(struct device *dev)
1444 {
1445         struct bam_device *bdev = dev_get_drvdata(dev);
1446
1447         if (!bdev->controlled_remotely)
1448                 pm_runtime_force_suspend(dev);
1449
1450         clk_unprepare(bdev->bamclk);
1451
1452         return 0;
1453 }
1454
1455 static int __maybe_unused bam_dma_resume(struct device *dev)
1456 {
1457         struct bam_device *bdev = dev_get_drvdata(dev);
1458         int ret;
1459
1460         ret = clk_prepare(bdev->bamclk);
1461         if (ret)
1462                 return ret;
1463
1464         if (!bdev->controlled_remotely)
1465                 pm_runtime_force_resume(dev);
1466
1467         return 0;
1468 }
1469
1470 static const struct dev_pm_ops bam_dma_pm_ops = {
1471         SET_LATE_SYSTEM_SLEEP_PM_OPS(bam_dma_suspend, bam_dma_resume)
1472         SET_RUNTIME_PM_OPS(bam_dma_runtime_suspend, bam_dma_runtime_resume,
1473                                 NULL)
1474 };
1475
1476 static struct platform_driver bam_dma_driver = {
1477         .probe = bam_dma_probe,
1478         .remove = bam_dma_remove,
1479         .driver = {
1480                 .name = "bam-dma-engine",
1481                 .pm = &bam_dma_pm_ops,
1482                 .of_match_table = bam_of_match,
1483         },
1484 };
1485
1486 module_platform_driver(bam_dma_driver);
1487
1488 MODULE_AUTHOR("Andy Gross <agross@codeaurora.org>");
1489 MODULE_DESCRIPTION("QCOM BAM DMA engine driver");
1490 MODULE_LICENSE("GPL v2");