GNU Linux-libre 4.14.294-gnu1
[releases.git] / drivers / dma / edma.c
1 /*
2  * TI EDMA DMA engine driver
3  *
4  * Copyright 2012 Texas Instruments
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License as
8  * published by the Free Software Foundation version 2.
9  *
10  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
11  * kind, whether express or implied; without even the implied warranty
12  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  */
15
16 #include <linux/dmaengine.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/edma.h>
19 #include <linux/err.h>
20 #include <linux/init.h>
21 #include <linux/interrupt.h>
22 #include <linux/list.h>
23 #include <linux/module.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
26 #include <linux/spinlock.h>
27 #include <linux/of.h>
28 #include <linux/of_dma.h>
29 #include <linux/of_irq.h>
30 #include <linux/of_address.h>
31 #include <linux/of_device.h>
32 #include <linux/pm_runtime.h>
33
34 #include <linux/platform_data/edma.h>
35
36 #include "dmaengine.h"
37 #include "virt-dma.h"
38
39 /* Offsets matching "struct edmacc_param" */
40 #define PARM_OPT                0x00
41 #define PARM_SRC                0x04
42 #define PARM_A_B_CNT            0x08
43 #define PARM_DST                0x0c
44 #define PARM_SRC_DST_BIDX       0x10
45 #define PARM_LINK_BCNTRLD       0x14
46 #define PARM_SRC_DST_CIDX       0x18
47 #define PARM_CCNT               0x1c
48
49 #define PARM_SIZE               0x20
50
51 /* Offsets for EDMA CC global channel registers and their shadows */
52 #define SH_ER                   0x00    /* 64 bits */
53 #define SH_ECR                  0x08    /* 64 bits */
54 #define SH_ESR                  0x10    /* 64 bits */
55 #define SH_CER                  0x18    /* 64 bits */
56 #define SH_EER                  0x20    /* 64 bits */
57 #define SH_EECR                 0x28    /* 64 bits */
58 #define SH_EESR                 0x30    /* 64 bits */
59 #define SH_SER                  0x38    /* 64 bits */
60 #define SH_SECR                 0x40    /* 64 bits */
61 #define SH_IER                  0x50    /* 64 bits */
62 #define SH_IECR                 0x58    /* 64 bits */
63 #define SH_IESR                 0x60    /* 64 bits */
64 #define SH_IPR                  0x68    /* 64 bits */
65 #define SH_ICR                  0x70    /* 64 bits */
66 #define SH_IEVAL                0x78
67 #define SH_QER                  0x80
68 #define SH_QEER                 0x84
69 #define SH_QEECR                0x88
70 #define SH_QEESR                0x8c
71 #define SH_QSER                 0x90
72 #define SH_QSECR                0x94
73 #define SH_SIZE                 0x200
74
75 /* Offsets for EDMA CC global registers */
76 #define EDMA_REV                0x0000
77 #define EDMA_CCCFG              0x0004
78 #define EDMA_QCHMAP             0x0200  /* 8 registers */
79 #define EDMA_DMAQNUM            0x0240  /* 8 registers (4 on OMAP-L1xx) */
80 #define EDMA_QDMAQNUM           0x0260
81 #define EDMA_QUETCMAP           0x0280
82 #define EDMA_QUEPRI             0x0284
83 #define EDMA_EMR                0x0300  /* 64 bits */
84 #define EDMA_EMCR               0x0308  /* 64 bits */
85 #define EDMA_QEMR               0x0310
86 #define EDMA_QEMCR              0x0314
87 #define EDMA_CCERR              0x0318
88 #define EDMA_CCERRCLR           0x031c
89 #define EDMA_EEVAL              0x0320
90 #define EDMA_DRAE               0x0340  /* 4 x 64 bits*/
91 #define EDMA_QRAE               0x0380  /* 4 registers */
92 #define EDMA_QUEEVTENTRY        0x0400  /* 2 x 16 registers */
93 #define EDMA_QSTAT              0x0600  /* 2 registers */
94 #define EDMA_QWMTHRA            0x0620
95 #define EDMA_QWMTHRB            0x0624
96 #define EDMA_CCSTAT             0x0640
97
98 #define EDMA_M                  0x1000  /* global channel registers */
99 #define EDMA_ECR                0x1008
100 #define EDMA_ECRH               0x100C
101 #define EDMA_SHADOW0            0x2000  /* 4 shadow regions */
102 #define EDMA_PARM               0x4000  /* PaRAM entries */
103
104 #define PARM_OFFSET(param_no)   (EDMA_PARM + ((param_no) << 5))
105
106 #define EDMA_DCHMAP             0x0100  /* 64 registers */
107
108 /* CCCFG register */
109 #define GET_NUM_DMACH(x)        (x & 0x7) /* bits 0-2 */
110 #define GET_NUM_QDMACH(x)       ((x & 0x70) >> 4) /* bits 4-6 */
111 #define GET_NUM_PAENTRY(x)      ((x & 0x7000) >> 12) /* bits 12-14 */
112 #define GET_NUM_EVQUE(x)        ((x & 0x70000) >> 16) /* bits 16-18 */
113 #define GET_NUM_REGN(x)         ((x & 0x300000) >> 20) /* bits 20-21 */
114 #define CHMAP_EXIST             BIT(24)
115
116 /* CCSTAT register */
117 #define EDMA_CCSTAT_ACTV        BIT(4)
118
119 /*
120  * Max of 20 segments per channel to conserve PaRAM slots
121  * Also note that MAX_NR_SG should be atleast the no.of periods
122  * that are required for ASoC, otherwise DMA prep calls will
123  * fail. Today davinci-pcm is the only user of this driver and
124  * requires atleast 17 slots, so we setup the default to 20.
125  */
126 #define MAX_NR_SG               20
127 #define EDMA_MAX_SLOTS          MAX_NR_SG
128 #define EDMA_DESCRIPTORS        16
129
130 #define EDMA_CHANNEL_ANY                -1      /* for edma_alloc_channel() */
131 #define EDMA_SLOT_ANY                   -1      /* for edma_alloc_slot() */
132 #define EDMA_CONT_PARAMS_ANY             1001
133 #define EDMA_CONT_PARAMS_FIXED_EXACT     1002
134 #define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003
135
136 /* PaRAM slots are laid out like this */
137 struct edmacc_param {
138         u32 opt;
139         u32 src;
140         u32 a_b_cnt;
141         u32 dst;
142         u32 src_dst_bidx;
143         u32 link_bcntrld;
144         u32 src_dst_cidx;
145         u32 ccnt;
146 } __packed;
147
148 /* fields in edmacc_param.opt */
149 #define SAM             BIT(0)
150 #define DAM             BIT(1)
151 #define SYNCDIM         BIT(2)
152 #define STATIC          BIT(3)
153 #define EDMA_FWID       (0x07 << 8)
154 #define TCCMODE         BIT(11)
155 #define EDMA_TCC(t)     ((t) << 12)
156 #define TCINTEN         BIT(20)
157 #define ITCINTEN        BIT(21)
158 #define TCCHEN          BIT(22)
159 #define ITCCHEN         BIT(23)
160
161 struct edma_pset {
162         u32                             len;
163         dma_addr_t                      addr;
164         struct edmacc_param             param;
165 };
166
167 struct edma_desc {
168         struct virt_dma_desc            vdesc;
169         struct list_head                node;
170         enum dma_transfer_direction     direction;
171         int                             cyclic;
172         int                             absync;
173         int                             pset_nr;
174         struct edma_chan                *echan;
175         int                             processed;
176
177         /*
178          * The following 4 elements are used for residue accounting.
179          *
180          * - processed_stat: the number of SG elements we have traversed
181          * so far to cover accounting. This is updated directly to processed
182          * during edma_callback and is always <= processed, because processed
183          * refers to the number of pending transfer (programmed to EDMA
184          * controller), where as processed_stat tracks number of transfers
185          * accounted for so far.
186          *
187          * - residue: The amount of bytes we have left to transfer for this desc
188          *
189          * - residue_stat: The residue in bytes of data we have covered
190          * so far for accounting. This is updated directly to residue
191          * during callbacks to keep it current.
192          *
193          * - sg_len: Tracks the length of the current intermediate transfer,
194          * this is required to update the residue during intermediate transfer
195          * completion callback.
196          */
197         int                             processed_stat;
198         u32                             sg_len;
199         u32                             residue;
200         u32                             residue_stat;
201
202         struct edma_pset                pset[0];
203 };
204
205 struct edma_cc;
206
207 struct edma_tc {
208         struct device_node              *node;
209         u16                             id;
210 };
211
212 struct edma_chan {
213         struct virt_dma_chan            vchan;
214         struct list_head                node;
215         struct edma_desc                *edesc;
216         struct edma_cc                  *ecc;
217         struct edma_tc                  *tc;
218         int                             ch_num;
219         bool                            alloced;
220         bool                            hw_triggered;
221         int                             slot[EDMA_MAX_SLOTS];
222         int                             missed;
223         struct dma_slave_config         cfg;
224 };
225
226 struct edma_cc {
227         struct device                   *dev;
228         struct edma_soc_info            *info;
229         void __iomem                    *base;
230         int                             id;
231         bool                            legacy_mode;
232
233         /* eDMA3 resource information */
234         unsigned                        num_channels;
235         unsigned                        num_qchannels;
236         unsigned                        num_region;
237         unsigned                        num_slots;
238         unsigned                        num_tc;
239         bool                            chmap_exist;
240         enum dma_event_q                default_queue;
241
242         unsigned int                    ccint;
243         unsigned int                    ccerrint;
244
245         /*
246          * The slot_inuse bit for each PaRAM slot is clear unless the slot is
247          * in use by Linux or if it is allocated to be used by DSP.
248          */
249         unsigned long *slot_inuse;
250
251         struct dma_device               dma_slave;
252         struct dma_device               *dma_memcpy;
253         struct edma_chan                *slave_chans;
254         struct edma_tc                  *tc_list;
255         int                             dummy_slot;
256 };
257
258 /* dummy param set used to (re)initialize parameter RAM slots */
259 static const struct edmacc_param dummy_paramset = {
260         .link_bcntrld = 0xffff,
261         .ccnt = 1,
262 };
263
264 #define EDMA_BINDING_LEGACY     0
265 #define EDMA_BINDING_TPCC       1
266 static const u32 edma_binding_type[] = {
267         [EDMA_BINDING_LEGACY] = EDMA_BINDING_LEGACY,
268         [EDMA_BINDING_TPCC] = EDMA_BINDING_TPCC,
269 };
270
271 static const struct of_device_id edma_of_ids[] = {
272         {
273                 .compatible = "ti,edma3",
274                 .data = &edma_binding_type[EDMA_BINDING_LEGACY],
275         },
276         {
277                 .compatible = "ti,edma3-tpcc",
278                 .data = &edma_binding_type[EDMA_BINDING_TPCC],
279         },
280         {}
281 };
282 MODULE_DEVICE_TABLE(of, edma_of_ids);
283
284 static const struct of_device_id edma_tptc_of_ids[] = {
285         { .compatible = "ti,edma3-tptc", },
286         {}
287 };
288 MODULE_DEVICE_TABLE(of, edma_tptc_of_ids);
289
290 static inline unsigned int edma_read(struct edma_cc *ecc, int offset)
291 {
292         return (unsigned int)__raw_readl(ecc->base + offset);
293 }
294
295 static inline void edma_write(struct edma_cc *ecc, int offset, int val)
296 {
297         __raw_writel(val, ecc->base + offset);
298 }
299
300 static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and,
301                                unsigned or)
302 {
303         unsigned val = edma_read(ecc, offset);
304
305         val &= and;
306         val |= or;
307         edma_write(ecc, offset, val);
308 }
309
310 static inline void edma_and(struct edma_cc *ecc, int offset, unsigned and)
311 {
312         unsigned val = edma_read(ecc, offset);
313
314         val &= and;
315         edma_write(ecc, offset, val);
316 }
317
318 static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or)
319 {
320         unsigned val = edma_read(ecc, offset);
321
322         val |= or;
323         edma_write(ecc, offset, val);
324 }
325
326 static inline unsigned int edma_read_array(struct edma_cc *ecc, int offset,
327                                            int i)
328 {
329         return edma_read(ecc, offset + (i << 2));
330 }
331
332 static inline void edma_write_array(struct edma_cc *ecc, int offset, int i,
333                                     unsigned val)
334 {
335         edma_write(ecc, offset + (i << 2), val);
336 }
337
338 static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i,
339                                      unsigned and, unsigned or)
340 {
341         edma_modify(ecc, offset + (i << 2), and, or);
342 }
343
344 static inline void edma_or_array(struct edma_cc *ecc, int offset, int i,
345                                  unsigned or)
346 {
347         edma_or(ecc, offset + (i << 2), or);
348 }
349
350 static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j,
351                                   unsigned or)
352 {
353         edma_or(ecc, offset + ((i * 2 + j) << 2), or);
354 }
355
356 static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i,
357                                      int j, unsigned val)
358 {
359         edma_write(ecc, offset + ((i * 2 + j) << 2), val);
360 }
361
362 static inline unsigned int edma_shadow0_read(struct edma_cc *ecc, int offset)
363 {
364         return edma_read(ecc, EDMA_SHADOW0 + offset);
365 }
366
367 static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc,
368                                                    int offset, int i)
369 {
370         return edma_read(ecc, EDMA_SHADOW0 + offset + (i << 2));
371 }
372
373 static inline void edma_shadow0_write(struct edma_cc *ecc, int offset,
374                                       unsigned val)
375 {
376         edma_write(ecc, EDMA_SHADOW0 + offset, val);
377 }
378
379 static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset,
380                                             int i, unsigned val)
381 {
382         edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val);
383 }
384
385 static inline unsigned int edma_param_read(struct edma_cc *ecc, int offset,
386                                            int param_no)
387 {
388         return edma_read(ecc, EDMA_PARM + offset + (param_no << 5));
389 }
390
391 static inline void edma_param_write(struct edma_cc *ecc, int offset,
392                                     int param_no, unsigned val)
393 {
394         edma_write(ecc, EDMA_PARM + offset + (param_no << 5), val);
395 }
396
397 static inline void edma_param_modify(struct edma_cc *ecc, int offset,
398                                      int param_no, unsigned and, unsigned or)
399 {
400         edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or);
401 }
402
403 static inline void edma_param_and(struct edma_cc *ecc, int offset, int param_no,
404                                   unsigned and)
405 {
406         edma_and(ecc, EDMA_PARM + offset + (param_no << 5), and);
407 }
408
409 static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no,
410                                  unsigned or)
411 {
412         edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or);
413 }
414
415 static inline void edma_set_bits(int offset, int len, unsigned long *p)
416 {
417         for (; len > 0; len--)
418                 set_bit(offset + (len - 1), p);
419 }
420
421 static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no,
422                                           int priority)
423 {
424         int bit = queue_no * 4;
425
426         edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit));
427 }
428
429 static void edma_set_chmap(struct edma_chan *echan, int slot)
430 {
431         struct edma_cc *ecc = echan->ecc;
432         int channel = EDMA_CHAN_SLOT(echan->ch_num);
433
434         if (ecc->chmap_exist) {
435                 slot = EDMA_CHAN_SLOT(slot);
436                 edma_write_array(ecc, EDMA_DCHMAP, channel, (slot << 5));
437         }
438 }
439
440 static void edma_setup_interrupt(struct edma_chan *echan, bool enable)
441 {
442         struct edma_cc *ecc = echan->ecc;
443         int channel = EDMA_CHAN_SLOT(echan->ch_num);
444
445         if (enable) {
446                 edma_shadow0_write_array(ecc, SH_ICR, channel >> 5,
447                                          BIT(channel & 0x1f));
448                 edma_shadow0_write_array(ecc, SH_IESR, channel >> 5,
449                                          BIT(channel & 0x1f));
450         } else {
451                 edma_shadow0_write_array(ecc, SH_IECR, channel >> 5,
452                                          BIT(channel & 0x1f));
453         }
454 }
455
456 /*
457  * paRAM slot management functions
458  */
459 static void edma_write_slot(struct edma_cc *ecc, unsigned slot,
460                             const struct edmacc_param *param)
461 {
462         slot = EDMA_CHAN_SLOT(slot);
463         if (slot >= ecc->num_slots)
464                 return;
465         memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE);
466 }
467
468 static int edma_read_slot(struct edma_cc *ecc, unsigned slot,
469                            struct edmacc_param *param)
470 {
471         slot = EDMA_CHAN_SLOT(slot);
472         if (slot >= ecc->num_slots)
473                 return -EINVAL;
474         memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE);
475
476         return 0;
477 }
478
479 /**
480  * edma_alloc_slot - allocate DMA parameter RAM
481  * @ecc: pointer to edma_cc struct
482  * @slot: specific slot to allocate; negative for "any unused slot"
483  *
484  * This allocates a parameter RAM slot, initializing it to hold a
485  * dummy transfer.  Slots allocated using this routine have not been
486  * mapped to a hardware DMA channel, and will normally be used by
487  * linking to them from a slot associated with a DMA channel.
488  *
489  * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific
490  * slots may be allocated on behalf of DSP firmware.
491  *
492  * Returns the number of the slot, else negative errno.
493  */
494 static int edma_alloc_slot(struct edma_cc *ecc, int slot)
495 {
496         if (slot >= 0) {
497                 slot = EDMA_CHAN_SLOT(slot);
498                 /* Requesting entry paRAM slot for a HW triggered channel. */
499                 if (ecc->chmap_exist && slot < ecc->num_channels)
500                         slot = EDMA_SLOT_ANY;
501         }
502
503         if (slot < 0) {
504                 if (ecc->chmap_exist)
505                         slot = 0;
506                 else
507                         slot = ecc->num_channels;
508                 for (;;) {
509                         slot = find_next_zero_bit(ecc->slot_inuse,
510                                                   ecc->num_slots,
511                                                   slot);
512                         if (slot == ecc->num_slots)
513                                 return -ENOMEM;
514                         if (!test_and_set_bit(slot, ecc->slot_inuse))
515                                 break;
516                 }
517         } else if (slot >= ecc->num_slots) {
518                 return -EINVAL;
519         } else if (test_and_set_bit(slot, ecc->slot_inuse)) {
520                 return -EBUSY;
521         }
522
523         edma_write_slot(ecc, slot, &dummy_paramset);
524
525         return EDMA_CTLR_CHAN(ecc->id, slot);
526 }
527
528 static void edma_free_slot(struct edma_cc *ecc, unsigned slot)
529 {
530         slot = EDMA_CHAN_SLOT(slot);
531         if (slot >= ecc->num_slots)
532                 return;
533
534         edma_write_slot(ecc, slot, &dummy_paramset);
535         clear_bit(slot, ecc->slot_inuse);
536 }
537
538 /**
539  * edma_link - link one parameter RAM slot to another
540  * @ecc: pointer to edma_cc struct
541  * @from: parameter RAM slot originating the link
542  * @to: parameter RAM slot which is the link target
543  *
544  * The originating slot should not be part of any active DMA transfer.
545  */
546 static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to)
547 {
548         if (unlikely(EDMA_CTLR(from) != EDMA_CTLR(to)))
549                 dev_warn(ecc->dev, "Ignoring eDMA instance for linking\n");
550
551         from = EDMA_CHAN_SLOT(from);
552         to = EDMA_CHAN_SLOT(to);
553         if (from >= ecc->num_slots || to >= ecc->num_slots)
554                 return;
555
556         edma_param_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000,
557                           PARM_OFFSET(to));
558 }
559
560 /**
561  * edma_get_position - returns the current transfer point
562  * @ecc: pointer to edma_cc struct
563  * @slot: parameter RAM slot being examined
564  * @dst:  true selects the dest position, false the source
565  *
566  * Returns the position of the current active slot
567  */
568 static dma_addr_t edma_get_position(struct edma_cc *ecc, unsigned slot,
569                                     bool dst)
570 {
571         u32 offs;
572
573         slot = EDMA_CHAN_SLOT(slot);
574         offs = PARM_OFFSET(slot);
575         offs += dst ? PARM_DST : PARM_SRC;
576
577         return edma_read(ecc, offs);
578 }
579
580 /*
581  * Channels with event associations will be triggered by their hardware
582  * events, and channels without such associations will be triggered by
583  * software.  (At this writing there is no interface for using software
584  * triggers except with channels that don't support hardware triggers.)
585  */
586 static void edma_start(struct edma_chan *echan)
587 {
588         struct edma_cc *ecc = echan->ecc;
589         int channel = EDMA_CHAN_SLOT(echan->ch_num);
590         int j = (channel >> 5);
591         unsigned int mask = BIT(channel & 0x1f);
592
593         if (!echan->hw_triggered) {
594                 /* EDMA channels without event association */
595                 dev_dbg(ecc->dev, "ESR%d %08x\n", j,
596                         edma_shadow0_read_array(ecc, SH_ESR, j));
597                 edma_shadow0_write_array(ecc, SH_ESR, j, mask);
598         } else {
599                 /* EDMA channel with event association */
600                 dev_dbg(ecc->dev, "ER%d %08x\n", j,
601                         edma_shadow0_read_array(ecc, SH_ER, j));
602                 /* Clear any pending event or error */
603                 edma_write_array(ecc, EDMA_ECR, j, mask);
604                 edma_write_array(ecc, EDMA_EMCR, j, mask);
605                 /* Clear any SER */
606                 edma_shadow0_write_array(ecc, SH_SECR, j, mask);
607                 edma_shadow0_write_array(ecc, SH_EESR, j, mask);
608                 dev_dbg(ecc->dev, "EER%d %08x\n", j,
609                         edma_shadow0_read_array(ecc, SH_EER, j));
610         }
611 }
612
613 static void edma_stop(struct edma_chan *echan)
614 {
615         struct edma_cc *ecc = echan->ecc;
616         int channel = EDMA_CHAN_SLOT(echan->ch_num);
617         int j = (channel >> 5);
618         unsigned int mask = BIT(channel & 0x1f);
619
620         edma_shadow0_write_array(ecc, SH_EECR, j, mask);
621         edma_shadow0_write_array(ecc, SH_ECR, j, mask);
622         edma_shadow0_write_array(ecc, SH_SECR, j, mask);
623         edma_write_array(ecc, EDMA_EMCR, j, mask);
624
625         /* clear possibly pending completion interrupt */
626         edma_shadow0_write_array(ecc, SH_ICR, j, mask);
627
628         dev_dbg(ecc->dev, "EER%d %08x\n", j,
629                 edma_shadow0_read_array(ecc, SH_EER, j));
630
631         /* REVISIT:  consider guarding against inappropriate event
632          * chaining by overwriting with dummy_paramset.
633          */
634 }
635
636 /*
637  * Temporarily disable EDMA hardware events on the specified channel,
638  * preventing them from triggering new transfers
639  */
640 static void edma_pause(struct edma_chan *echan)
641 {
642         int channel = EDMA_CHAN_SLOT(echan->ch_num);
643         unsigned int mask = BIT(channel & 0x1f);
644
645         edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask);
646 }
647
648 /* Re-enable EDMA hardware events on the specified channel.  */
649 static void edma_resume(struct edma_chan *echan)
650 {
651         int channel = EDMA_CHAN_SLOT(echan->ch_num);
652         unsigned int mask = BIT(channel & 0x1f);
653
654         edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask);
655 }
656
657 static void edma_trigger_channel(struct edma_chan *echan)
658 {
659         struct edma_cc *ecc = echan->ecc;
660         int channel = EDMA_CHAN_SLOT(echan->ch_num);
661         unsigned int mask = BIT(channel & 0x1f);
662
663         edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask);
664
665         dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5),
666                 edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5)));
667 }
668
669 static void edma_clean_channel(struct edma_chan *echan)
670 {
671         struct edma_cc *ecc = echan->ecc;
672         int channel = EDMA_CHAN_SLOT(echan->ch_num);
673         int j = (channel >> 5);
674         unsigned int mask = BIT(channel & 0x1f);
675
676         dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j));
677         edma_shadow0_write_array(ecc, SH_ECR, j, mask);
678         /* Clear the corresponding EMR bits */
679         edma_write_array(ecc, EDMA_EMCR, j, mask);
680         /* Clear any SER */
681         edma_shadow0_write_array(ecc, SH_SECR, j, mask);
682         edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0));
683 }
684
685 /* Move channel to a specific event queue */
686 static void edma_assign_channel_eventq(struct edma_chan *echan,
687                                        enum dma_event_q eventq_no)
688 {
689         struct edma_cc *ecc = echan->ecc;
690         int channel = EDMA_CHAN_SLOT(echan->ch_num);
691         int bit = (channel & 0x7) * 4;
692
693         /* default to low priority queue */
694         if (eventq_no == EVENTQ_DEFAULT)
695                 eventq_no = ecc->default_queue;
696         if (eventq_no >= ecc->num_tc)
697                 return;
698
699         eventq_no &= 7;
700         edma_modify_array(ecc, EDMA_DMAQNUM, (channel >> 3), ~(0x7 << bit),
701                           eventq_no << bit);
702 }
703
704 static int edma_alloc_channel(struct edma_chan *echan,
705                               enum dma_event_q eventq_no)
706 {
707         struct edma_cc *ecc = echan->ecc;
708         int channel = EDMA_CHAN_SLOT(echan->ch_num);
709
710         /* ensure access through shadow region 0 */
711         edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f));
712
713         /* ensure no events are pending */
714         edma_stop(echan);
715
716         edma_setup_interrupt(echan, true);
717
718         edma_assign_channel_eventq(echan, eventq_no);
719
720         return 0;
721 }
722
723 static void edma_free_channel(struct edma_chan *echan)
724 {
725         /* ensure no events are pending */
726         edma_stop(echan);
727         /* REVISIT should probably take out of shadow region 0 */
728         edma_setup_interrupt(echan, false);
729 }
730
731 static inline struct edma_cc *to_edma_cc(struct dma_device *d)
732 {
733         return container_of(d, struct edma_cc, dma_slave);
734 }
735
736 static inline struct edma_chan *to_edma_chan(struct dma_chan *c)
737 {
738         return container_of(c, struct edma_chan, vchan.chan);
739 }
740
741 static inline struct edma_desc *to_edma_desc(struct dma_async_tx_descriptor *tx)
742 {
743         return container_of(tx, struct edma_desc, vdesc.tx);
744 }
745
746 static void edma_desc_free(struct virt_dma_desc *vdesc)
747 {
748         kfree(container_of(vdesc, struct edma_desc, vdesc));
749 }
750
751 /* Dispatch a queued descriptor to the controller (caller holds lock) */
752 static void edma_execute(struct edma_chan *echan)
753 {
754         struct edma_cc *ecc = echan->ecc;
755         struct virt_dma_desc *vdesc;
756         struct edma_desc *edesc;
757         struct device *dev = echan->vchan.chan.device->dev;
758         int i, j, left, nslots;
759
760         if (!echan->edesc) {
761                 /* Setup is needed for the first transfer */
762                 vdesc = vchan_next_desc(&echan->vchan);
763                 if (!vdesc)
764                         return;
765                 list_del(&vdesc->node);
766                 echan->edesc = to_edma_desc(&vdesc->tx);
767         }
768
769         edesc = echan->edesc;
770
771         /* Find out how many left */
772         left = edesc->pset_nr - edesc->processed;
773         nslots = min(MAX_NR_SG, left);
774         edesc->sg_len = 0;
775
776         /* Write descriptor PaRAM set(s) */
777         for (i = 0; i < nslots; i++) {
778                 j = i + edesc->processed;
779                 edma_write_slot(ecc, echan->slot[i], &edesc->pset[j].param);
780                 edesc->sg_len += edesc->pset[j].len;
781                 dev_vdbg(dev,
782                          "\n pset[%d]:\n"
783                          "  chnum\t%d\n"
784                          "  slot\t%d\n"
785                          "  opt\t%08x\n"
786                          "  src\t%08x\n"
787                          "  dst\t%08x\n"
788                          "  abcnt\t%08x\n"
789                          "  ccnt\t%08x\n"
790                          "  bidx\t%08x\n"
791                          "  cidx\t%08x\n"
792                          "  lkrld\t%08x\n",
793                          j, echan->ch_num, echan->slot[i],
794                          edesc->pset[j].param.opt,
795                          edesc->pset[j].param.src,
796                          edesc->pset[j].param.dst,
797                          edesc->pset[j].param.a_b_cnt,
798                          edesc->pset[j].param.ccnt,
799                          edesc->pset[j].param.src_dst_bidx,
800                          edesc->pset[j].param.src_dst_cidx,
801                          edesc->pset[j].param.link_bcntrld);
802                 /* Link to the previous slot if not the last set */
803                 if (i != (nslots - 1))
804                         edma_link(ecc, echan->slot[i], echan->slot[i + 1]);
805         }
806
807         edesc->processed += nslots;
808
809         /*
810          * If this is either the last set in a set of SG-list transactions
811          * then setup a link to the dummy slot, this results in all future
812          * events being absorbed and that's OK because we're done
813          */
814         if (edesc->processed == edesc->pset_nr) {
815                 if (edesc->cyclic)
816                         edma_link(ecc, echan->slot[nslots - 1], echan->slot[1]);
817                 else
818                         edma_link(ecc, echan->slot[nslots - 1],
819                                   echan->ecc->dummy_slot);
820         }
821
822         if (echan->missed) {
823                 /*
824                  * This happens due to setup times between intermediate
825                  * transfers in long SG lists which have to be broken up into
826                  * transfers of MAX_NR_SG
827                  */
828                 dev_dbg(dev, "missed event on channel %d\n", echan->ch_num);
829                 edma_clean_channel(echan);
830                 edma_stop(echan);
831                 edma_start(echan);
832                 edma_trigger_channel(echan);
833                 echan->missed = 0;
834         } else if (edesc->processed <= MAX_NR_SG) {
835                 dev_dbg(dev, "first transfer starting on channel %d\n",
836                         echan->ch_num);
837                 edma_start(echan);
838         } else {
839                 dev_dbg(dev, "chan: %d: completed %d elements, resuming\n",
840                         echan->ch_num, edesc->processed);
841                 edma_resume(echan);
842         }
843 }
844
845 static int edma_terminate_all(struct dma_chan *chan)
846 {
847         struct edma_chan *echan = to_edma_chan(chan);
848         unsigned long flags;
849         LIST_HEAD(head);
850
851         spin_lock_irqsave(&echan->vchan.lock, flags);
852
853         /*
854          * Stop DMA activity: we assume the callback will not be called
855          * after edma_dma() returns (even if it does, it will see
856          * echan->edesc is NULL and exit.)
857          */
858         if (echan->edesc) {
859                 edma_stop(echan);
860                 /* Move the cyclic channel back to default queue */
861                 if (!echan->tc && echan->edesc->cyclic)
862                         edma_assign_channel_eventq(echan, EVENTQ_DEFAULT);
863                 /*
864                  * free the running request descriptor
865                  * since it is not in any of the vdesc lists
866                  */
867                 edma_desc_free(&echan->edesc->vdesc);
868                 echan->edesc = NULL;
869         }
870
871         vchan_get_all_descriptors(&echan->vchan, &head);
872         spin_unlock_irqrestore(&echan->vchan.lock, flags);
873         vchan_dma_desc_free_list(&echan->vchan, &head);
874
875         return 0;
876 }
877
878 static void edma_synchronize(struct dma_chan *chan)
879 {
880         struct edma_chan *echan = to_edma_chan(chan);
881
882         vchan_synchronize(&echan->vchan);
883 }
884
885 static int edma_slave_config(struct dma_chan *chan,
886         struct dma_slave_config *cfg)
887 {
888         struct edma_chan *echan = to_edma_chan(chan);
889
890         if (cfg->src_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES ||
891             cfg->dst_addr_width == DMA_SLAVE_BUSWIDTH_8_BYTES)
892                 return -EINVAL;
893
894         memcpy(&echan->cfg, cfg, sizeof(echan->cfg));
895
896         return 0;
897 }
898
899 static int edma_dma_pause(struct dma_chan *chan)
900 {
901         struct edma_chan *echan = to_edma_chan(chan);
902
903         if (!echan->edesc)
904                 return -EINVAL;
905
906         edma_pause(echan);
907         return 0;
908 }
909
910 static int edma_dma_resume(struct dma_chan *chan)
911 {
912         struct edma_chan *echan = to_edma_chan(chan);
913
914         edma_resume(echan);
915         return 0;
916 }
917
918 /*
919  * A PaRAM set configuration abstraction used by other modes
920  * @chan: Channel who's PaRAM set we're configuring
921  * @pset: PaRAM set to initialize and setup.
922  * @src_addr: Source address of the DMA
923  * @dst_addr: Destination address of the DMA
924  * @burst: In units of dev_width, how much to send
925  * @dev_width: How much is the dev_width
926  * @dma_length: Total length of the DMA transfer
927  * @direction: Direction of the transfer
928  */
929 static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset,
930                             dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst,
931                             unsigned int acnt, unsigned int dma_length,
932                             enum dma_transfer_direction direction)
933 {
934         struct edma_chan *echan = to_edma_chan(chan);
935         struct device *dev = chan->device->dev;
936         struct edmacc_param *param = &epset->param;
937         int bcnt, ccnt, cidx;
938         int src_bidx, dst_bidx, src_cidx, dst_cidx;
939         int absync;
940
941         /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */
942         if (!burst)
943                 burst = 1;
944         /*
945          * If the maxburst is equal to the fifo width, use
946          * A-synced transfers. This allows for large contiguous
947          * buffer transfers using only one PaRAM set.
948          */
949         if (burst == 1) {
950                 /*
951                  * For the A-sync case, bcnt and ccnt are the remainder
952                  * and quotient respectively of the division of:
953                  * (dma_length / acnt) by (SZ_64K -1). This is so
954                  * that in case bcnt over flows, we have ccnt to use.
955                  * Note: In A-sync tranfer only, bcntrld is used, but it
956                  * only applies for sg_dma_len(sg) >= SZ_64K.
957                  * In this case, the best way adopted is- bccnt for the
958                  * first frame will be the remainder below. Then for
959                  * every successive frame, bcnt will be SZ_64K-1. This
960                  * is assured as bcntrld = 0xffff in end of function.
961                  */
962                 absync = false;
963                 ccnt = dma_length / acnt / (SZ_64K - 1);
964                 bcnt = dma_length / acnt - ccnt * (SZ_64K - 1);
965                 /*
966                  * If bcnt is non-zero, we have a remainder and hence an
967                  * extra frame to transfer, so increment ccnt.
968                  */
969                 if (bcnt)
970                         ccnt++;
971                 else
972                         bcnt = SZ_64K - 1;
973                 cidx = acnt;
974         } else {
975                 /*
976                  * If maxburst is greater than the fifo address_width,
977                  * use AB-synced transfers where A count is the fifo
978                  * address_width and B count is the maxburst. In this
979                  * case, we are limited to transfers of C count frames
980                  * of (address_width * maxburst) where C count is limited
981                  * to SZ_64K-1. This places an upper bound on the length
982                  * of an SG segment that can be handled.
983                  */
984                 absync = true;
985                 bcnt = burst;
986                 ccnt = dma_length / (acnt * bcnt);
987                 if (ccnt > (SZ_64K - 1)) {
988                         dev_err(dev, "Exceeded max SG segment size\n");
989                         return -EINVAL;
990                 }
991                 cidx = acnt * bcnt;
992         }
993
994         epset->len = dma_length;
995
996         if (direction == DMA_MEM_TO_DEV) {
997                 src_bidx = acnt;
998                 src_cidx = cidx;
999                 dst_bidx = 0;
1000                 dst_cidx = 0;
1001                 epset->addr = src_addr;
1002         } else if (direction == DMA_DEV_TO_MEM)  {
1003                 src_bidx = 0;
1004                 src_cidx = 0;
1005                 dst_bidx = acnt;
1006                 dst_cidx = cidx;
1007                 epset->addr = dst_addr;
1008         } else if (direction == DMA_MEM_TO_MEM)  {
1009                 src_bidx = acnt;
1010                 src_cidx = cidx;
1011                 dst_bidx = acnt;
1012                 dst_cidx = cidx;
1013         } else {
1014                 dev_err(dev, "%s: direction not implemented yet\n", __func__);
1015                 return -EINVAL;
1016         }
1017
1018         param->opt = EDMA_TCC(EDMA_CHAN_SLOT(echan->ch_num));
1019         /* Configure A or AB synchronized transfers */
1020         if (absync)
1021                 param->opt |= SYNCDIM;
1022
1023         param->src = src_addr;
1024         param->dst = dst_addr;
1025
1026         param->src_dst_bidx = (dst_bidx << 16) | src_bidx;
1027         param->src_dst_cidx = (dst_cidx << 16) | src_cidx;
1028
1029         param->a_b_cnt = bcnt << 16 | acnt;
1030         param->ccnt = ccnt;
1031         /*
1032          * Only time when (bcntrld) auto reload is required is for
1033          * A-sync case, and in this case, a requirement of reload value
1034          * of SZ_64K-1 only is assured. 'link' is initially set to NULL
1035          * and then later will be populated by edma_execute.
1036          */
1037         param->link_bcntrld = 0xffffffff;
1038         return absync;
1039 }
1040
1041 static struct dma_async_tx_descriptor *edma_prep_slave_sg(
1042         struct dma_chan *chan, struct scatterlist *sgl,
1043         unsigned int sg_len, enum dma_transfer_direction direction,
1044         unsigned long tx_flags, void *context)
1045 {
1046         struct edma_chan *echan = to_edma_chan(chan);
1047         struct device *dev = chan->device->dev;
1048         struct edma_desc *edesc;
1049         dma_addr_t src_addr = 0, dst_addr = 0;
1050         enum dma_slave_buswidth dev_width;
1051         u32 burst;
1052         struct scatterlist *sg;
1053         int i, nslots, ret;
1054
1055         if (unlikely(!echan || !sgl || !sg_len))
1056                 return NULL;
1057
1058         if (direction == DMA_DEV_TO_MEM) {
1059                 src_addr = echan->cfg.src_addr;
1060                 dev_width = echan->cfg.src_addr_width;
1061                 burst = echan->cfg.src_maxburst;
1062         } else if (direction == DMA_MEM_TO_DEV) {
1063                 dst_addr = echan->cfg.dst_addr;
1064                 dev_width = echan->cfg.dst_addr_width;
1065                 burst = echan->cfg.dst_maxburst;
1066         } else {
1067                 dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
1068                 return NULL;
1069         }
1070
1071         if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
1072                 dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
1073                 return NULL;
1074         }
1075
1076         edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]),
1077                         GFP_ATOMIC);
1078         if (!edesc)
1079                 return NULL;
1080
1081         edesc->pset_nr = sg_len;
1082         edesc->residue = 0;
1083         edesc->direction = direction;
1084         edesc->echan = echan;
1085
1086         /* Allocate a PaRAM slot, if needed */
1087         nslots = min_t(unsigned, MAX_NR_SG, sg_len);
1088
1089         for (i = 0; i < nslots; i++) {
1090                 if (echan->slot[i] < 0) {
1091                         echan->slot[i] =
1092                                 edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
1093                         if (echan->slot[i] < 0) {
1094                                 kfree(edesc);
1095                                 dev_err(dev, "%s: Failed to allocate slot\n",
1096                                         __func__);
1097                                 return NULL;
1098                         }
1099                 }
1100         }
1101
1102         /* Configure PaRAM sets for each SG */
1103         for_each_sg(sgl, sg, sg_len, i) {
1104                 /* Get address for each SG */
1105                 if (direction == DMA_DEV_TO_MEM)
1106                         dst_addr = sg_dma_address(sg);
1107                 else
1108                         src_addr = sg_dma_address(sg);
1109
1110                 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
1111                                        dst_addr, burst, dev_width,
1112                                        sg_dma_len(sg), direction);
1113                 if (ret < 0) {
1114                         kfree(edesc);
1115                         return NULL;
1116                 }
1117
1118                 edesc->absync = ret;
1119                 edesc->residue += sg_dma_len(sg);
1120
1121                 if (i == sg_len - 1)
1122                         /* Enable completion interrupt */
1123                         edesc->pset[i].param.opt |= TCINTEN;
1124                 else if (!((i+1) % MAX_NR_SG))
1125                         /*
1126                          * Enable early completion interrupt for the
1127                          * intermediateset. In this case the driver will be
1128                          * notified when the paRAM set is submitted to TC. This
1129                          * will allow more time to set up the next set of slots.
1130                          */
1131                         edesc->pset[i].param.opt |= (TCINTEN | TCCMODE);
1132         }
1133         edesc->residue_stat = edesc->residue;
1134
1135         return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1136 }
1137
1138 static struct dma_async_tx_descriptor *edma_prep_dma_memcpy(
1139         struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1140         size_t len, unsigned long tx_flags)
1141 {
1142         int ret, nslots;
1143         struct edma_desc *edesc;
1144         struct device *dev = chan->device->dev;
1145         struct edma_chan *echan = to_edma_chan(chan);
1146         unsigned int width, pset_len, array_size;
1147
1148         if (unlikely(!echan || !len))
1149                 return NULL;
1150
1151         /* Align the array size (acnt block) with the transfer properties */
1152         switch (__ffs((src | dest | len))) {
1153         case 0:
1154                 array_size = SZ_32K - 1;
1155                 break;
1156         case 1:
1157                 array_size = SZ_32K - 2;
1158                 break;
1159         default:
1160                 array_size = SZ_32K - 4;
1161                 break;
1162         }
1163
1164         if (len < SZ_64K) {
1165                 /*
1166                  * Transfer size less than 64K can be handled with one paRAM
1167                  * slot and with one burst.
1168                  * ACNT = length
1169                  */
1170                 width = len;
1171                 pset_len = len;
1172                 nslots = 1;
1173         } else {
1174                 /*
1175                  * Transfer size bigger than 64K will be handled with maximum of
1176                  * two paRAM slots.
1177                  * slot1: (full_length / 32767) times 32767 bytes bursts.
1178                  *        ACNT = 32767, length1: (full_length / 32767) * 32767
1179                  * slot2: the remaining amount of data after slot1.
1180                  *        ACNT = full_length - length1, length2 = ACNT
1181                  *
1182                  * When the full_length is multibple of 32767 one slot can be
1183                  * used to complete the transfer.
1184                  */
1185                 width = array_size;
1186                 pset_len = rounddown(len, width);
1187                 /* One slot is enough for lengths multiple of (SZ_32K -1) */
1188                 if (unlikely(pset_len == len))
1189                         nslots = 1;
1190                 else
1191                         nslots = 2;
1192         }
1193
1194         edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
1195                         GFP_ATOMIC);
1196         if (!edesc)
1197                 return NULL;
1198
1199         edesc->pset_nr = nslots;
1200         edesc->residue = edesc->residue_stat = len;
1201         edesc->direction = DMA_MEM_TO_MEM;
1202         edesc->echan = echan;
1203
1204         ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1,
1205                                width, pset_len, DMA_MEM_TO_MEM);
1206         if (ret < 0) {
1207                 kfree(edesc);
1208                 return NULL;
1209         }
1210
1211         edesc->absync = ret;
1212
1213         edesc->pset[0].param.opt |= ITCCHEN;
1214         if (nslots == 1) {
1215                 /* Enable transfer complete interrupt */
1216                 edesc->pset[0].param.opt |= TCINTEN;
1217         } else {
1218                 /* Enable transfer complete chaining for the first slot */
1219                 edesc->pset[0].param.opt |= TCCHEN;
1220
1221                 if (echan->slot[1] < 0) {
1222                         echan->slot[1] = edma_alloc_slot(echan->ecc,
1223                                                          EDMA_SLOT_ANY);
1224                         if (echan->slot[1] < 0) {
1225                                 kfree(edesc);
1226                                 dev_err(dev, "%s: Failed to allocate slot\n",
1227                                         __func__);
1228                                 return NULL;
1229                         }
1230                 }
1231                 dest += pset_len;
1232                 src += pset_len;
1233                 pset_len = width = len % array_size;
1234
1235                 ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1,
1236                                        width, pset_len, DMA_MEM_TO_MEM);
1237                 if (ret < 0) {
1238                         kfree(edesc);
1239                         return NULL;
1240                 }
1241
1242                 edesc->pset[1].param.opt |= ITCCHEN;
1243                 edesc->pset[1].param.opt |= TCINTEN;
1244         }
1245
1246         return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1247 }
1248
1249 static struct dma_async_tx_descriptor *edma_prep_dma_cyclic(
1250         struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1251         size_t period_len, enum dma_transfer_direction direction,
1252         unsigned long tx_flags)
1253 {
1254         struct edma_chan *echan = to_edma_chan(chan);
1255         struct device *dev = chan->device->dev;
1256         struct edma_desc *edesc;
1257         dma_addr_t src_addr, dst_addr;
1258         enum dma_slave_buswidth dev_width;
1259         bool use_intermediate = false;
1260         u32 burst;
1261         int i, ret, nslots;
1262
1263         if (unlikely(!echan || !buf_len || !period_len))
1264                 return NULL;
1265
1266         if (direction == DMA_DEV_TO_MEM) {
1267                 src_addr = echan->cfg.src_addr;
1268                 dst_addr = buf_addr;
1269                 dev_width = echan->cfg.src_addr_width;
1270                 burst = echan->cfg.src_maxburst;
1271         } else if (direction == DMA_MEM_TO_DEV) {
1272                 src_addr = buf_addr;
1273                 dst_addr = echan->cfg.dst_addr;
1274                 dev_width = echan->cfg.dst_addr_width;
1275                 burst = echan->cfg.dst_maxburst;
1276         } else {
1277                 dev_err(dev, "%s: bad direction: %d\n", __func__, direction);
1278                 return NULL;
1279         }
1280
1281         if (dev_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) {
1282                 dev_err(dev, "%s: Undefined slave buswidth\n", __func__);
1283                 return NULL;
1284         }
1285
1286         if (unlikely(buf_len % period_len)) {
1287                 dev_err(dev, "Period should be multiple of Buffer length\n");
1288                 return NULL;
1289         }
1290
1291         nslots = (buf_len / period_len) + 1;
1292
1293         /*
1294          * Cyclic DMA users such as audio cannot tolerate delays introduced
1295          * by cases where the number of periods is more than the maximum
1296          * number of SGs the EDMA driver can handle at a time. For DMA types
1297          * such as Slave SGs, such delays are tolerable and synchronized,
1298          * but the synchronization is difficult to achieve with Cyclic and
1299          * cannot be guaranteed, so we error out early.
1300          */
1301         if (nslots > MAX_NR_SG) {
1302                 /*
1303                  * If the burst and period sizes are the same, we can put
1304                  * the full buffer into a single period and activate
1305                  * intermediate interrupts. This will produce interrupts
1306                  * after each burst, which is also after each desired period.
1307                  */
1308                 if (burst == period_len) {
1309                         period_len = buf_len;
1310                         nslots = 2;
1311                         use_intermediate = true;
1312                 } else {
1313                         return NULL;
1314                 }
1315         }
1316
1317         edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]),
1318                         GFP_ATOMIC);
1319         if (!edesc)
1320                 return NULL;
1321
1322         edesc->cyclic = 1;
1323         edesc->pset_nr = nslots;
1324         edesc->residue = edesc->residue_stat = buf_len;
1325         edesc->direction = direction;
1326         edesc->echan = echan;
1327
1328         dev_dbg(dev, "%s: channel=%d nslots=%d period_len=%zu buf_len=%zu\n",
1329                 __func__, echan->ch_num, nslots, period_len, buf_len);
1330
1331         for (i = 0; i < nslots; i++) {
1332                 /* Allocate a PaRAM slot, if needed */
1333                 if (echan->slot[i] < 0) {
1334                         echan->slot[i] =
1335                                 edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY);
1336                         if (echan->slot[i] < 0) {
1337                                 kfree(edesc);
1338                                 dev_err(dev, "%s: Failed to allocate slot\n",
1339                                         __func__);
1340                                 return NULL;
1341                         }
1342                 }
1343
1344                 if (i == nslots - 1) {
1345                         memcpy(&edesc->pset[i], &edesc->pset[0],
1346                                sizeof(edesc->pset[0]));
1347                         break;
1348                 }
1349
1350                 ret = edma_config_pset(chan, &edesc->pset[i], src_addr,
1351                                        dst_addr, burst, dev_width, period_len,
1352                                        direction);
1353                 if (ret < 0) {
1354                         kfree(edesc);
1355                         return NULL;
1356                 }
1357
1358                 if (direction == DMA_DEV_TO_MEM)
1359                         dst_addr += period_len;
1360                 else
1361                         src_addr += period_len;
1362
1363                 dev_vdbg(dev, "%s: Configure period %d of buf:\n", __func__, i);
1364                 dev_vdbg(dev,
1365                         "\n pset[%d]:\n"
1366                         "  chnum\t%d\n"
1367                         "  slot\t%d\n"
1368                         "  opt\t%08x\n"
1369                         "  src\t%08x\n"
1370                         "  dst\t%08x\n"
1371                         "  abcnt\t%08x\n"
1372                         "  ccnt\t%08x\n"
1373                         "  bidx\t%08x\n"
1374                         "  cidx\t%08x\n"
1375                         "  lkrld\t%08x\n",
1376                         i, echan->ch_num, echan->slot[i],
1377                         edesc->pset[i].param.opt,
1378                         edesc->pset[i].param.src,
1379                         edesc->pset[i].param.dst,
1380                         edesc->pset[i].param.a_b_cnt,
1381                         edesc->pset[i].param.ccnt,
1382                         edesc->pset[i].param.src_dst_bidx,
1383                         edesc->pset[i].param.src_dst_cidx,
1384                         edesc->pset[i].param.link_bcntrld);
1385
1386                 edesc->absync = ret;
1387
1388                 /*
1389                  * Enable period interrupt only if it is requested
1390                  */
1391                 if (tx_flags & DMA_PREP_INTERRUPT) {
1392                         edesc->pset[i].param.opt |= TCINTEN;
1393
1394                         /* Also enable intermediate interrupts if necessary */
1395                         if (use_intermediate)
1396                                 edesc->pset[i].param.opt |= ITCINTEN;
1397                 }
1398         }
1399
1400         /* Place the cyclic channel to highest priority queue */
1401         if (!echan->tc)
1402                 edma_assign_channel_eventq(echan, EVENTQ_0);
1403
1404         return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags);
1405 }
1406
1407 static void edma_completion_handler(struct edma_chan *echan)
1408 {
1409         struct device *dev = echan->vchan.chan.device->dev;
1410         struct edma_desc *edesc;
1411
1412         spin_lock(&echan->vchan.lock);
1413         edesc = echan->edesc;
1414         if (edesc) {
1415                 if (edesc->cyclic) {
1416                         vchan_cyclic_callback(&edesc->vdesc);
1417                         spin_unlock(&echan->vchan.lock);
1418                         return;
1419                 } else if (edesc->processed == edesc->pset_nr) {
1420                         edesc->residue = 0;
1421                         edma_stop(echan);
1422                         vchan_cookie_complete(&edesc->vdesc);
1423                         echan->edesc = NULL;
1424
1425                         dev_dbg(dev, "Transfer completed on channel %d\n",
1426                                 echan->ch_num);
1427                 } else {
1428                         dev_dbg(dev, "Sub transfer completed on channel %d\n",
1429                                 echan->ch_num);
1430
1431                         edma_pause(echan);
1432
1433                         /* Update statistics for tx_status */
1434                         edesc->residue -= edesc->sg_len;
1435                         edesc->residue_stat = edesc->residue;
1436                         edesc->processed_stat = edesc->processed;
1437                 }
1438                 edma_execute(echan);
1439         }
1440
1441         spin_unlock(&echan->vchan.lock);
1442 }
1443
1444 /* eDMA interrupt handler */
1445 static irqreturn_t dma_irq_handler(int irq, void *data)
1446 {
1447         struct edma_cc *ecc = data;
1448         int ctlr;
1449         u32 sh_ier;
1450         u32 sh_ipr;
1451         u32 bank;
1452
1453         ctlr = ecc->id;
1454         if (ctlr < 0)
1455                 return IRQ_NONE;
1456
1457         dev_vdbg(ecc->dev, "dma_irq_handler\n");
1458
1459         sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 0);
1460         if (!sh_ipr) {
1461                 sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 1);
1462                 if (!sh_ipr)
1463                         return IRQ_NONE;
1464                 sh_ier = edma_shadow0_read_array(ecc, SH_IER, 1);
1465                 bank = 1;
1466         } else {
1467                 sh_ier = edma_shadow0_read_array(ecc, SH_IER, 0);
1468                 bank = 0;
1469         }
1470
1471         do {
1472                 u32 slot;
1473                 u32 channel;
1474
1475                 slot = __ffs(sh_ipr);
1476                 sh_ipr &= ~(BIT(slot));
1477
1478                 if (sh_ier & BIT(slot)) {
1479                         channel = (bank << 5) | slot;
1480                         /* Clear the corresponding IPR bits */
1481                         edma_shadow0_write_array(ecc, SH_ICR, bank, BIT(slot));
1482                         edma_completion_handler(&ecc->slave_chans[channel]);
1483                 }
1484         } while (sh_ipr);
1485
1486         edma_shadow0_write(ecc, SH_IEVAL, 1);
1487         return IRQ_HANDLED;
1488 }
1489
1490 static void edma_error_handler(struct edma_chan *echan)
1491 {
1492         struct edma_cc *ecc = echan->ecc;
1493         struct device *dev = echan->vchan.chan.device->dev;
1494         struct edmacc_param p;
1495         int err;
1496
1497         if (!echan->edesc)
1498                 return;
1499
1500         spin_lock(&echan->vchan.lock);
1501
1502         err = edma_read_slot(ecc, echan->slot[0], &p);
1503
1504         /*
1505          * Issue later based on missed flag which will be sure
1506          * to happen as:
1507          * (1) we finished transmitting an intermediate slot and
1508          *     edma_execute is coming up.
1509          * (2) or we finished current transfer and issue will
1510          *     call edma_execute.
1511          *
1512          * Important note: issuing can be dangerous here and
1513          * lead to some nasty recursion when we are in a NULL
1514          * slot. So we avoid doing so and set the missed flag.
1515          */
1516         if (err || (p.a_b_cnt == 0 && p.ccnt == 0)) {
1517                 dev_dbg(dev, "Error on null slot, setting miss\n");
1518                 echan->missed = 1;
1519         } else {
1520                 /*
1521                  * The slot is already programmed but the event got
1522                  * missed, so its safe to issue it here.
1523                  */
1524                 dev_dbg(dev, "Missed event, TRIGGERING\n");
1525                 edma_clean_channel(echan);
1526                 edma_stop(echan);
1527                 edma_start(echan);
1528                 edma_trigger_channel(echan);
1529         }
1530         spin_unlock(&echan->vchan.lock);
1531 }
1532
1533 static inline bool edma_error_pending(struct edma_cc *ecc)
1534 {
1535         if (edma_read_array(ecc, EDMA_EMR, 0) ||
1536             edma_read_array(ecc, EDMA_EMR, 1) ||
1537             edma_read(ecc, EDMA_QEMR) || edma_read(ecc, EDMA_CCERR))
1538                 return true;
1539
1540         return false;
1541 }
1542
1543 /* eDMA error interrupt handler */
1544 static irqreturn_t dma_ccerr_handler(int irq, void *data)
1545 {
1546         struct edma_cc *ecc = data;
1547         int i, j;
1548         int ctlr;
1549         unsigned int cnt = 0;
1550         unsigned int val;
1551
1552         ctlr = ecc->id;
1553         if (ctlr < 0)
1554                 return IRQ_NONE;
1555
1556         dev_vdbg(ecc->dev, "dma_ccerr_handler\n");
1557
1558         if (!edma_error_pending(ecc)) {
1559                 /*
1560                  * The registers indicate no pending error event but the irq
1561                  * handler has been called.
1562                  * Ask eDMA to re-evaluate the error registers.
1563                  */
1564                 dev_err(ecc->dev, "%s: Error interrupt without error event!\n",
1565                         __func__);
1566                 edma_write(ecc, EDMA_EEVAL, 1);
1567                 return IRQ_NONE;
1568         }
1569
1570         while (1) {
1571                 /* Event missed register(s) */
1572                 for (j = 0; j < 2; j++) {
1573                         unsigned long emr;
1574
1575                         val = edma_read_array(ecc, EDMA_EMR, j);
1576                         if (!val)
1577                                 continue;
1578
1579                         dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val);
1580                         emr = val;
1581                         for (i = find_next_bit(&emr, 32, 0); i < 32;
1582                              i = find_next_bit(&emr, 32, i + 1)) {
1583                                 int k = (j << 5) + i;
1584
1585                                 /* Clear the corresponding EMR bits */
1586                                 edma_write_array(ecc, EDMA_EMCR, j, BIT(i));
1587                                 /* Clear any SER */
1588                                 edma_shadow0_write_array(ecc, SH_SECR, j,
1589                                                          BIT(i));
1590                                 edma_error_handler(&ecc->slave_chans[k]);
1591                         }
1592                 }
1593
1594                 val = edma_read(ecc, EDMA_QEMR);
1595                 if (val) {
1596                         dev_dbg(ecc->dev, "QEMR 0x%02x\n", val);
1597                         /* Not reported, just clear the interrupt reason. */
1598                         edma_write(ecc, EDMA_QEMCR, val);
1599                         edma_shadow0_write(ecc, SH_QSECR, val);
1600                 }
1601
1602                 val = edma_read(ecc, EDMA_CCERR);
1603                 if (val) {
1604                         dev_warn(ecc->dev, "CCERR 0x%08x\n", val);
1605                         /* Not reported, just clear the interrupt reason. */
1606                         edma_write(ecc, EDMA_CCERRCLR, val);
1607                 }
1608
1609                 if (!edma_error_pending(ecc))
1610                         break;
1611                 cnt++;
1612                 if (cnt > 10)
1613                         break;
1614         }
1615         edma_write(ecc, EDMA_EEVAL, 1);
1616         return IRQ_HANDLED;
1617 }
1618
1619 /* Alloc channel resources */
1620 static int edma_alloc_chan_resources(struct dma_chan *chan)
1621 {
1622         struct edma_chan *echan = to_edma_chan(chan);
1623         struct edma_cc *ecc = echan->ecc;
1624         struct device *dev = ecc->dev;
1625         enum dma_event_q eventq_no = EVENTQ_DEFAULT;
1626         int ret;
1627
1628         if (echan->tc) {
1629                 eventq_no = echan->tc->id;
1630         } else if (ecc->tc_list) {
1631                 /* memcpy channel */
1632                 echan->tc = &ecc->tc_list[ecc->info->default_queue];
1633                 eventq_no = echan->tc->id;
1634         }
1635
1636         ret = edma_alloc_channel(echan, eventq_no);
1637         if (ret)
1638                 return ret;
1639
1640         echan->slot[0] = edma_alloc_slot(ecc, echan->ch_num);
1641         if (echan->slot[0] < 0) {
1642                 dev_err(dev, "Entry slot allocation failed for channel %u\n",
1643                         EDMA_CHAN_SLOT(echan->ch_num));
1644                 ret = echan->slot[0];
1645                 goto err_slot;
1646         }
1647
1648         /* Set up channel -> slot mapping for the entry slot */
1649         edma_set_chmap(echan, echan->slot[0]);
1650         echan->alloced = true;
1651
1652         dev_dbg(dev, "Got eDMA channel %d for virt channel %d (%s trigger)\n",
1653                 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id,
1654                 echan->hw_triggered ? "HW" : "SW");
1655
1656         return 0;
1657
1658 err_slot:
1659         edma_free_channel(echan);
1660         return ret;
1661 }
1662
1663 /* Free channel resources */
1664 static void edma_free_chan_resources(struct dma_chan *chan)
1665 {
1666         struct edma_chan *echan = to_edma_chan(chan);
1667         struct device *dev = echan->ecc->dev;
1668         int i;
1669
1670         /* Terminate transfers */
1671         edma_stop(echan);
1672
1673         vchan_free_chan_resources(&echan->vchan);
1674
1675         /* Free EDMA PaRAM slots */
1676         for (i = 0; i < EDMA_MAX_SLOTS; i++) {
1677                 if (echan->slot[i] >= 0) {
1678                         edma_free_slot(echan->ecc, echan->slot[i]);
1679                         echan->slot[i] = -1;
1680                 }
1681         }
1682
1683         /* Set entry slot to the dummy slot */
1684         edma_set_chmap(echan, echan->ecc->dummy_slot);
1685
1686         /* Free EDMA channel */
1687         if (echan->alloced) {
1688                 edma_free_channel(echan);
1689                 echan->alloced = false;
1690         }
1691
1692         echan->tc = NULL;
1693         echan->hw_triggered = false;
1694
1695         dev_dbg(dev, "Free eDMA channel %d for virt channel %d\n",
1696                 EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id);
1697 }
1698
1699 /* Send pending descriptor to hardware */
1700 static void edma_issue_pending(struct dma_chan *chan)
1701 {
1702         struct edma_chan *echan = to_edma_chan(chan);
1703         unsigned long flags;
1704
1705         spin_lock_irqsave(&echan->vchan.lock, flags);
1706         if (vchan_issue_pending(&echan->vchan) && !echan->edesc)
1707                 edma_execute(echan);
1708         spin_unlock_irqrestore(&echan->vchan.lock, flags);
1709 }
1710
1711 /*
1712  * This limit exists to avoid a possible infinite loop when waiting for proof
1713  * that a particular transfer is completed. This limit can be hit if there
1714  * are large bursts to/from slow devices or the CPU is never able to catch
1715  * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART
1716  * RX-FIFO, as many as 55 loops have been seen.
1717  */
1718 #define EDMA_MAX_TR_WAIT_LOOPS 1000
1719
1720 static u32 edma_residue(struct edma_desc *edesc)
1721 {
1722         bool dst = edesc->direction == DMA_DEV_TO_MEM;
1723         int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
1724         struct edma_chan *echan = edesc->echan;
1725         struct edma_pset *pset = edesc->pset;
1726         dma_addr_t done, pos;
1727         int i;
1728
1729         /*
1730          * We always read the dst/src position from the first RamPar
1731          * pset. That's the one which is active now.
1732          */
1733         pos = edma_get_position(echan->ecc, echan->slot[0], dst);
1734
1735         /*
1736          * "pos" may represent a transfer request that is still being
1737          * processed by the EDMACC or EDMATC. We will busy wait until
1738          * any one of the situations occurs:
1739          *   1. the DMA hardware is idle
1740          *   2. a new transfer request is setup
1741          *   3. we hit the loop limit
1742          */
1743         while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) {
1744                 /* check if a new transfer request is setup */
1745                 if (edma_get_position(echan->ecc,
1746                                       echan->slot[0], dst) != pos) {
1747                         break;
1748                 }
1749
1750                 if (!--loop_count) {
1751                         dev_dbg_ratelimited(echan->vchan.chan.device->dev,
1752                                 "%s: timeout waiting for PaRAM update\n",
1753                                 __func__);
1754                         break;
1755                 }
1756
1757                 cpu_relax();
1758         }
1759
1760         /*
1761          * Cyclic is simple. Just subtract pset[0].addr from pos.
1762          *
1763          * We never update edesc->residue in the cyclic case, so we
1764          * can tell the remaining room to the end of the circular
1765          * buffer.
1766          */
1767         if (edesc->cyclic) {
1768                 done = pos - pset->addr;
1769                 edesc->residue_stat = edesc->residue - done;
1770                 return edesc->residue_stat;
1771         }
1772
1773         /*
1774          * For SG operation we catch up with the last processed
1775          * status.
1776          */
1777         pset += edesc->processed_stat;
1778
1779         for (i = edesc->processed_stat; i < edesc->processed; i++, pset++) {
1780                 /*
1781                  * If we are inside this pset address range, we know
1782                  * this is the active one. Get the current delta and
1783                  * stop walking the psets.
1784                  */
1785                 if (pos >= pset->addr && pos < pset->addr + pset->len)
1786                         return edesc->residue_stat - (pos - pset->addr);
1787
1788                 /* Otherwise mark it done and update residue_stat. */
1789                 edesc->processed_stat++;
1790                 edesc->residue_stat -= pset->len;
1791         }
1792         return edesc->residue_stat;
1793 }
1794
1795 /* Check request completion status */
1796 static enum dma_status edma_tx_status(struct dma_chan *chan,
1797                                       dma_cookie_t cookie,
1798                                       struct dma_tx_state *txstate)
1799 {
1800         struct edma_chan *echan = to_edma_chan(chan);
1801         struct virt_dma_desc *vdesc;
1802         enum dma_status ret;
1803         unsigned long flags;
1804
1805         ret = dma_cookie_status(chan, cookie, txstate);
1806         if (ret == DMA_COMPLETE || !txstate)
1807                 return ret;
1808
1809         spin_lock_irqsave(&echan->vchan.lock, flags);
1810         if (echan->edesc && echan->edesc->vdesc.tx.cookie == cookie)
1811                 txstate->residue = edma_residue(echan->edesc);
1812         else if ((vdesc = vchan_find_desc(&echan->vchan, cookie)))
1813                 txstate->residue = to_edma_desc(&vdesc->tx)->residue;
1814         spin_unlock_irqrestore(&echan->vchan.lock, flags);
1815
1816         return ret;
1817 }
1818
1819 static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels)
1820 {
1821         if (!memcpy_channels)
1822                 return false;
1823         while (*memcpy_channels != -1) {
1824                 if (*memcpy_channels == ch_num)
1825                         return true;
1826                 memcpy_channels++;
1827         }
1828         return false;
1829 }
1830
1831 #define EDMA_DMA_BUSWIDTHS      (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
1832                                  BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
1833                                  BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
1834                                  BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
1835
1836 static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode)
1837 {
1838         struct dma_device *s_ddev = &ecc->dma_slave;
1839         struct dma_device *m_ddev = NULL;
1840         s32 *memcpy_channels = ecc->info->memcpy_channels;
1841         int i, j;
1842
1843         dma_cap_zero(s_ddev->cap_mask);
1844         dma_cap_set(DMA_SLAVE, s_ddev->cap_mask);
1845         dma_cap_set(DMA_CYCLIC, s_ddev->cap_mask);
1846         if (ecc->legacy_mode && !memcpy_channels) {
1847                 dev_warn(ecc->dev,
1848                          "Legacy memcpy is enabled, things might not work\n");
1849
1850                 dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask);
1851                 s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
1852                 s_ddev->directions = BIT(DMA_MEM_TO_MEM);
1853         }
1854
1855         s_ddev->device_prep_slave_sg = edma_prep_slave_sg;
1856         s_ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic;
1857         s_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
1858         s_ddev->device_free_chan_resources = edma_free_chan_resources;
1859         s_ddev->device_issue_pending = edma_issue_pending;
1860         s_ddev->device_tx_status = edma_tx_status;
1861         s_ddev->device_config = edma_slave_config;
1862         s_ddev->device_pause = edma_dma_pause;
1863         s_ddev->device_resume = edma_dma_resume;
1864         s_ddev->device_terminate_all = edma_terminate_all;
1865         s_ddev->device_synchronize = edma_synchronize;
1866
1867         s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
1868         s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
1869         s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV));
1870         s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1871
1872         s_ddev->dev = ecc->dev;
1873         INIT_LIST_HEAD(&s_ddev->channels);
1874
1875         if (memcpy_channels) {
1876                 m_ddev = devm_kzalloc(ecc->dev, sizeof(*m_ddev), GFP_KERNEL);
1877                 ecc->dma_memcpy = m_ddev;
1878
1879                 dma_cap_zero(m_ddev->cap_mask);
1880                 dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask);
1881
1882                 m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy;
1883                 m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources;
1884                 m_ddev->device_free_chan_resources = edma_free_chan_resources;
1885                 m_ddev->device_issue_pending = edma_issue_pending;
1886                 m_ddev->device_tx_status = edma_tx_status;
1887                 m_ddev->device_config = edma_slave_config;
1888                 m_ddev->device_pause = edma_dma_pause;
1889                 m_ddev->device_resume = edma_dma_resume;
1890                 m_ddev->device_terminate_all = edma_terminate_all;
1891                 m_ddev->device_synchronize = edma_synchronize;
1892
1893                 m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS;
1894                 m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS;
1895                 m_ddev->directions = BIT(DMA_MEM_TO_MEM);
1896                 m_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1897
1898                 m_ddev->dev = ecc->dev;
1899                 INIT_LIST_HEAD(&m_ddev->channels);
1900         } else if (!ecc->legacy_mode) {
1901                 dev_info(ecc->dev, "memcpy is disabled\n");
1902         }
1903
1904         for (i = 0; i < ecc->num_channels; i++) {
1905                 struct edma_chan *echan = &ecc->slave_chans[i];
1906                 echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i);
1907                 echan->ecc = ecc;
1908                 echan->vchan.desc_free = edma_desc_free;
1909
1910                 if (m_ddev && edma_is_memcpy_channel(i, memcpy_channels))
1911                         vchan_init(&echan->vchan, m_ddev);
1912                 else
1913                         vchan_init(&echan->vchan, s_ddev);
1914
1915                 INIT_LIST_HEAD(&echan->node);
1916                 for (j = 0; j < EDMA_MAX_SLOTS; j++)
1917                         echan->slot[j] = -1;
1918         }
1919 }
1920
1921 static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata,
1922                               struct edma_cc *ecc)
1923 {
1924         int i;
1925         u32 value, cccfg;
1926         s8 (*queue_priority_map)[2];
1927
1928         /* Decode the eDMA3 configuration from CCCFG register */
1929         cccfg = edma_read(ecc, EDMA_CCCFG);
1930
1931         value = GET_NUM_REGN(cccfg);
1932         ecc->num_region = BIT(value);
1933
1934         value = GET_NUM_DMACH(cccfg);
1935         ecc->num_channels = BIT(value + 1);
1936
1937         value = GET_NUM_QDMACH(cccfg);
1938         ecc->num_qchannels = value * 2;
1939
1940         value = GET_NUM_PAENTRY(cccfg);
1941         ecc->num_slots = BIT(value + 4);
1942
1943         value = GET_NUM_EVQUE(cccfg);
1944         ecc->num_tc = value + 1;
1945
1946         ecc->chmap_exist = (cccfg & CHMAP_EXIST) ? true : false;
1947
1948         dev_dbg(dev, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg);
1949         dev_dbg(dev, "num_region: %u\n", ecc->num_region);
1950         dev_dbg(dev, "num_channels: %u\n", ecc->num_channels);
1951         dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels);
1952         dev_dbg(dev, "num_slots: %u\n", ecc->num_slots);
1953         dev_dbg(dev, "num_tc: %u\n", ecc->num_tc);
1954         dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no");
1955
1956         /* Nothing need to be done if queue priority is provided */
1957         if (pdata->queue_priority_mapping)
1958                 return 0;
1959
1960         /*
1961          * Configure TC/queue priority as follows:
1962          * Q0 - priority 0
1963          * Q1 - priority 1
1964          * Q2 - priority 2
1965          * ...
1966          * The meaning of priority numbers: 0 highest priority, 7 lowest
1967          * priority. So Q0 is the highest priority queue and the last queue has
1968          * the lowest priority.
1969          */
1970         queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8),
1971                                           GFP_KERNEL);
1972         if (!queue_priority_map)
1973                 return -ENOMEM;
1974
1975         for (i = 0; i < ecc->num_tc; i++) {
1976                 queue_priority_map[i][0] = i;
1977                 queue_priority_map[i][1] = i;
1978         }
1979         queue_priority_map[i][0] = -1;
1980         queue_priority_map[i][1] = -1;
1981
1982         pdata->queue_priority_mapping = queue_priority_map;
1983         /* Default queue has the lowest priority */
1984         pdata->default_queue = i - 1;
1985
1986         return 0;
1987 }
1988
1989 #if IS_ENABLED(CONFIG_OF)
1990 static int edma_xbar_event_map(struct device *dev, struct edma_soc_info *pdata,
1991                                size_t sz)
1992 {
1993         const char pname[] = "ti,edma-xbar-event-map";
1994         struct resource res;
1995         void __iomem *xbar;
1996         s16 (*xbar_chans)[2];
1997         size_t nelm = sz / sizeof(s16);
1998         u32 shift, offset, mux;
1999         int ret, i;
2000
2001         xbar_chans = devm_kcalloc(dev, nelm + 2, sizeof(s16), GFP_KERNEL);
2002         if (!xbar_chans)
2003                 return -ENOMEM;
2004
2005         ret = of_address_to_resource(dev->of_node, 1, &res);
2006         if (ret)
2007                 return -ENOMEM;
2008
2009         xbar = devm_ioremap(dev, res.start, resource_size(&res));
2010         if (!xbar)
2011                 return -ENOMEM;
2012
2013         ret = of_property_read_u16_array(dev->of_node, pname, (u16 *)xbar_chans,
2014                                          nelm);
2015         if (ret)
2016                 return -EIO;
2017
2018         /* Invalidate last entry for the other user of this mess */
2019         nelm >>= 1;
2020         xbar_chans[nelm][0] = -1;
2021         xbar_chans[nelm][1] = -1;
2022
2023         for (i = 0; i < nelm; i++) {
2024                 shift = (xbar_chans[i][1] & 0x03) << 3;
2025                 offset = xbar_chans[i][1] & 0xfffffffc;
2026                 mux = readl(xbar + offset);
2027                 mux &= ~(0xff << shift);
2028                 mux |= xbar_chans[i][0] << shift;
2029                 writel(mux, (xbar + offset));
2030         }
2031
2032         pdata->xbar_chans = (const s16 (*)[2]) xbar_chans;
2033         return 0;
2034 }
2035
2036 static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
2037                                                      bool legacy_mode)
2038 {
2039         struct edma_soc_info *info;
2040         struct property *prop;
2041         int sz, ret;
2042
2043         info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL);
2044         if (!info)
2045                 return ERR_PTR(-ENOMEM);
2046
2047         if (legacy_mode) {
2048                 prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map",
2049                                         &sz);
2050                 if (prop) {
2051                         ret = edma_xbar_event_map(dev, info, sz);
2052                         if (ret)
2053                                 return ERR_PTR(ret);
2054                 }
2055                 return info;
2056         }
2057
2058         /* Get the list of channels allocated to be used for memcpy */
2059         prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz);
2060         if (prop) {
2061                 const char pname[] = "ti,edma-memcpy-channels";
2062                 size_t nelm = sz / sizeof(s32);
2063                 s32 *memcpy_ch;
2064
2065                 memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32),
2066                                          GFP_KERNEL);
2067                 if (!memcpy_ch)
2068                         return ERR_PTR(-ENOMEM);
2069
2070                 ret = of_property_read_u32_array(dev->of_node, pname,
2071                                                  (u32 *)memcpy_ch, nelm);
2072                 if (ret)
2073                         return ERR_PTR(ret);
2074
2075                 memcpy_ch[nelm] = -1;
2076                 info->memcpy_channels = memcpy_ch;
2077         }
2078
2079         prop = of_find_property(dev->of_node, "ti,edma-reserved-slot-ranges",
2080                                 &sz);
2081         if (prop) {
2082                 const char pname[] = "ti,edma-reserved-slot-ranges";
2083                 u32 (*tmp)[2];
2084                 s16 (*rsv_slots)[2];
2085                 size_t nelm = sz / sizeof(*tmp);
2086                 struct edma_rsv_info *rsv_info;
2087                 int i;
2088
2089                 if (!nelm)
2090                         return info;
2091
2092                 tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL);
2093                 if (!tmp)
2094                         return ERR_PTR(-ENOMEM);
2095
2096                 rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL);
2097                 if (!rsv_info) {
2098                         kfree(tmp);
2099                         return ERR_PTR(-ENOMEM);
2100                 }
2101
2102                 rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots),
2103                                          GFP_KERNEL);
2104                 if (!rsv_slots) {
2105                         kfree(tmp);
2106                         return ERR_PTR(-ENOMEM);
2107                 }
2108
2109                 ret = of_property_read_u32_array(dev->of_node, pname,
2110                                                  (u32 *)tmp, nelm * 2);
2111                 if (ret) {
2112                         kfree(tmp);
2113                         return ERR_PTR(ret);
2114                 }
2115
2116                 for (i = 0; i < nelm; i++) {
2117                         rsv_slots[i][0] = tmp[i][0];
2118                         rsv_slots[i][1] = tmp[i][1];
2119                 }
2120                 rsv_slots[nelm][0] = -1;
2121                 rsv_slots[nelm][1] = -1;
2122
2123                 info->rsv = rsv_info;
2124                 info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots;
2125
2126                 kfree(tmp);
2127         }
2128
2129         return info;
2130 }
2131
2132 static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
2133                                       struct of_dma *ofdma)
2134 {
2135         struct edma_cc *ecc = ofdma->of_dma_data;
2136         struct dma_chan *chan = NULL;
2137         struct edma_chan *echan;
2138         int i;
2139
2140         if (!ecc || dma_spec->args_count < 1)
2141                 return NULL;
2142
2143         for (i = 0; i < ecc->num_channels; i++) {
2144                 echan = &ecc->slave_chans[i];
2145                 if (echan->ch_num == dma_spec->args[0]) {
2146                         chan = &echan->vchan.chan;
2147                         break;
2148                 }
2149         }
2150
2151         if (!chan)
2152                 return NULL;
2153
2154         if (echan->ecc->legacy_mode && dma_spec->args_count == 1)
2155                 goto out;
2156
2157         if (!echan->ecc->legacy_mode && dma_spec->args_count == 2 &&
2158             dma_spec->args[1] < echan->ecc->num_tc) {
2159                 echan->tc = &echan->ecc->tc_list[dma_spec->args[1]];
2160                 goto out;
2161         }
2162
2163         return NULL;
2164 out:
2165         /* The channel is going to be used as HW synchronized */
2166         echan->hw_triggered = true;
2167         return dma_get_slave_channel(chan);
2168 }
2169 #else
2170 static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev,
2171                                                      bool legacy_mode)
2172 {
2173         return ERR_PTR(-EINVAL);
2174 }
2175
2176 static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec,
2177                                       struct of_dma *ofdma)
2178 {
2179         return NULL;
2180 }
2181 #endif
2182
2183 static int edma_probe(struct platform_device *pdev)
2184 {
2185         struct edma_soc_info    *info = pdev->dev.platform_data;
2186         s8                      (*queue_priority_mapping)[2];
2187         int                     i, off, ln;
2188         const s16               (*rsv_slots)[2];
2189         const s16               (*xbar_chans)[2];
2190         int                     irq;
2191         char                    *irq_name;
2192         struct resource         *mem;
2193         struct device_node      *node = pdev->dev.of_node;
2194         struct device           *dev = &pdev->dev;
2195         struct edma_cc          *ecc;
2196         bool                    legacy_mode = true;
2197         int ret;
2198
2199         if (node) {
2200                 const struct of_device_id *match;
2201
2202                 match = of_match_node(edma_of_ids, node);
2203                 if (match && (*(u32 *)match->data) == EDMA_BINDING_TPCC)
2204                         legacy_mode = false;
2205
2206                 info = edma_setup_info_from_dt(dev, legacy_mode);
2207                 if (IS_ERR(info)) {
2208                         dev_err(dev, "failed to get DT data\n");
2209                         return PTR_ERR(info);
2210                 }
2211         }
2212
2213         if (!info)
2214                 return -ENODEV;
2215
2216         pm_runtime_enable(dev);
2217         ret = pm_runtime_get_sync(dev);
2218         if (ret < 0) {
2219                 dev_err(dev, "pm_runtime_get_sync() failed\n");
2220                 return ret;
2221         }
2222
2223         ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
2224         if (ret)
2225                 return ret;
2226
2227         ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL);
2228         if (!ecc)
2229                 return -ENOMEM;
2230
2231         ecc->dev = dev;
2232         ecc->id = pdev->id;
2233         ecc->legacy_mode = legacy_mode;
2234         /* When booting with DT the pdev->id is -1 */
2235         if (ecc->id < 0)
2236                 ecc->id = 0;
2237
2238         mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc");
2239         if (!mem) {
2240                 dev_dbg(dev, "mem resource not found, using index 0\n");
2241                 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2242                 if (!mem) {
2243                         dev_err(dev, "no mem resource?\n");
2244                         return -ENODEV;
2245                 }
2246         }
2247         ecc->base = devm_ioremap_resource(dev, mem);
2248         if (IS_ERR(ecc->base))
2249                 return PTR_ERR(ecc->base);
2250
2251         platform_set_drvdata(pdev, ecc);
2252
2253         /* Get eDMA3 configuration from IP */
2254         ret = edma_setup_from_hw(dev, info, ecc);
2255         if (ret)
2256                 return ret;
2257
2258         /* Allocate memory based on the information we got from the IP */
2259         ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels,
2260                                         sizeof(*ecc->slave_chans), GFP_KERNEL);
2261         if (!ecc->slave_chans)
2262                 return -ENOMEM;
2263
2264         ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots),
2265                                        sizeof(unsigned long), GFP_KERNEL);
2266         if (!ecc->slot_inuse)
2267                 return -ENOMEM;
2268
2269         ecc->default_queue = info->default_queue;
2270
2271         if (info->rsv) {
2272                 /* Set the reserved slots in inuse list */
2273                 rsv_slots = info->rsv->rsv_slots;
2274                 if (rsv_slots) {
2275                         for (i = 0; rsv_slots[i][0] != -1; i++) {
2276                                 off = rsv_slots[i][0];
2277                                 ln = rsv_slots[i][1];
2278                                 edma_set_bits(off, ln, ecc->slot_inuse);
2279                         }
2280                 }
2281         }
2282
2283         for (i = 0; i < ecc->num_slots; i++) {
2284                 /* Reset only unused - not reserved - paRAM slots */
2285                 if (!test_bit(i, ecc->slot_inuse))
2286                         edma_write_slot(ecc, i, &dummy_paramset);
2287         }
2288
2289         /* Clear the xbar mapped channels in unused list */
2290         xbar_chans = info->xbar_chans;
2291         if (xbar_chans) {
2292                 for (i = 0; xbar_chans[i][1] != -1; i++) {
2293                         off = xbar_chans[i][1];
2294                 }
2295         }
2296
2297         irq = platform_get_irq_byname(pdev, "edma3_ccint");
2298         if (irq < 0 && node)
2299                 irq = irq_of_parse_and_map(node, 0);
2300
2301         if (irq >= 0) {
2302                 irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint",
2303                                           dev_name(dev));
2304                 ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name,
2305                                        ecc);
2306                 if (ret) {
2307                         dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret);
2308                         return ret;
2309                 }
2310                 ecc->ccint = irq;
2311         }
2312
2313         irq = platform_get_irq_byname(pdev, "edma3_ccerrint");
2314         if (irq < 0 && node)
2315                 irq = irq_of_parse_and_map(node, 2);
2316
2317         if (irq >= 0) {
2318                 irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint",
2319                                           dev_name(dev));
2320                 ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name,
2321                                        ecc);
2322                 if (ret) {
2323                         dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret);
2324                         return ret;
2325                 }
2326                 ecc->ccerrint = irq;
2327         }
2328
2329         ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY);
2330         if (ecc->dummy_slot < 0) {
2331                 dev_err(dev, "Can't allocate PaRAM dummy slot\n");
2332                 return ecc->dummy_slot;
2333         }
2334
2335         queue_priority_mapping = info->queue_priority_mapping;
2336
2337         if (!ecc->legacy_mode) {
2338                 int lowest_priority = 0;
2339                 struct of_phandle_args tc_args;
2340
2341                 ecc->tc_list = devm_kcalloc(dev, ecc->num_tc,
2342                                             sizeof(*ecc->tc_list), GFP_KERNEL);
2343                 if (!ecc->tc_list) {
2344                         ret = -ENOMEM;
2345                         goto err_reg1;
2346                 }
2347
2348                 for (i = 0;; i++) {
2349                         ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs",
2350                                                                1, i, &tc_args);
2351                         if (ret || i == ecc->num_tc)
2352                                 break;
2353
2354                         ecc->tc_list[i].node = tc_args.np;
2355                         ecc->tc_list[i].id = i;
2356                         queue_priority_mapping[i][1] = tc_args.args[0];
2357                         if (queue_priority_mapping[i][1] > lowest_priority) {
2358                                 lowest_priority = queue_priority_mapping[i][1];
2359                                 info->default_queue = i;
2360                         }
2361                 }
2362         }
2363
2364         /* Event queue priority mapping */
2365         for (i = 0; queue_priority_mapping[i][0] != -1; i++)
2366                 edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
2367                                               queue_priority_mapping[i][1]);
2368
2369         for (i = 0; i < ecc->num_region; i++) {
2370                 edma_write_array2(ecc, EDMA_DRAE, i, 0, 0x0);
2371                 edma_write_array2(ecc, EDMA_DRAE, i, 1, 0x0);
2372                 edma_write_array(ecc, EDMA_QRAE, i, 0x0);
2373         }
2374         ecc->info = info;
2375
2376         /* Init the dma device and channels */
2377         edma_dma_init(ecc, legacy_mode);
2378
2379         for (i = 0; i < ecc->num_channels; i++) {
2380                 /* Assign all channels to the default queue */
2381                 edma_assign_channel_eventq(&ecc->slave_chans[i],
2382                                            info->default_queue);
2383                 /* Set entry slot to the dummy slot */
2384                 edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot);
2385         }
2386
2387         ecc->dma_slave.filter.map = info->slave_map;
2388         ecc->dma_slave.filter.mapcnt = info->slavecnt;
2389         ecc->dma_slave.filter.fn = edma_filter_fn;
2390
2391         ret = dma_async_device_register(&ecc->dma_slave);
2392         if (ret) {
2393                 dev_err(dev, "slave ddev registration failed (%d)\n", ret);
2394                 goto err_reg1;
2395         }
2396
2397         if (ecc->dma_memcpy) {
2398                 ret = dma_async_device_register(ecc->dma_memcpy);
2399                 if (ret) {
2400                         dev_err(dev, "memcpy ddev registration failed (%d)\n",
2401                                 ret);
2402                         dma_async_device_unregister(&ecc->dma_slave);
2403                         goto err_reg1;
2404                 }
2405         }
2406
2407         if (node)
2408                 of_dma_controller_register(node, of_edma_xlate, ecc);
2409
2410         dev_info(dev, "TI EDMA DMA engine driver\n");
2411
2412         return 0;
2413
2414 err_reg1:
2415         edma_free_slot(ecc, ecc->dummy_slot);
2416         return ret;
2417 }
2418
2419 static void edma_cleanupp_vchan(struct dma_device *dmadev)
2420 {
2421         struct edma_chan *echan, *_echan;
2422
2423         list_for_each_entry_safe(echan, _echan,
2424                         &dmadev->channels, vchan.chan.device_node) {
2425                 list_del(&echan->vchan.chan.device_node);
2426                 tasklet_kill(&echan->vchan.task);
2427         }
2428 }
2429
2430 static int edma_remove(struct platform_device *pdev)
2431 {
2432         struct device *dev = &pdev->dev;
2433         struct edma_cc *ecc = dev_get_drvdata(dev);
2434
2435         devm_free_irq(dev, ecc->ccint, ecc);
2436         devm_free_irq(dev, ecc->ccerrint, ecc);
2437
2438         edma_cleanupp_vchan(&ecc->dma_slave);
2439
2440         if (dev->of_node)
2441                 of_dma_controller_free(dev->of_node);
2442         dma_async_device_unregister(&ecc->dma_slave);
2443         if (ecc->dma_memcpy)
2444                 dma_async_device_unregister(ecc->dma_memcpy);
2445         edma_free_slot(ecc, ecc->dummy_slot);
2446
2447         return 0;
2448 }
2449
2450 #ifdef CONFIG_PM_SLEEP
2451 static int edma_pm_suspend(struct device *dev)
2452 {
2453         struct edma_cc *ecc = dev_get_drvdata(dev);
2454         struct edma_chan *echan = ecc->slave_chans;
2455         int i;
2456
2457         for (i = 0; i < ecc->num_channels; i++) {
2458                 if (echan[i].alloced)
2459                         edma_setup_interrupt(&echan[i], false);
2460         }
2461
2462         return 0;
2463 }
2464
2465 static int edma_pm_resume(struct device *dev)
2466 {
2467         struct edma_cc *ecc = dev_get_drvdata(dev);
2468         struct edma_chan *echan = ecc->slave_chans;
2469         int i;
2470         s8 (*queue_priority_mapping)[2];
2471
2472         /* re initialize dummy slot to dummy param set */
2473         edma_write_slot(ecc, ecc->dummy_slot, &dummy_paramset);
2474
2475         queue_priority_mapping = ecc->info->queue_priority_mapping;
2476
2477         /* Event queue priority mapping */
2478         for (i = 0; queue_priority_mapping[i][0] != -1; i++)
2479                 edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0],
2480                                               queue_priority_mapping[i][1]);
2481
2482         for (i = 0; i < ecc->num_channels; i++) {
2483                 if (echan[i].alloced) {
2484                         /* ensure access through shadow region 0 */
2485                         edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5,
2486                                        BIT(i & 0x1f));
2487
2488                         edma_setup_interrupt(&echan[i], true);
2489
2490                         /* Set up channel -> slot mapping for the entry slot */
2491                         edma_set_chmap(&echan[i], echan[i].slot[0]);
2492                 }
2493         }
2494
2495         return 0;
2496 }
2497 #endif
2498
2499 static const struct dev_pm_ops edma_pm_ops = {
2500         SET_LATE_SYSTEM_SLEEP_PM_OPS(edma_pm_suspend, edma_pm_resume)
2501 };
2502
2503 static struct platform_driver edma_driver = {
2504         .probe          = edma_probe,
2505         .remove         = edma_remove,
2506         .driver = {
2507                 .name   = "edma",
2508                 .pm     = &edma_pm_ops,
2509                 .of_match_table = edma_of_ids,
2510         },
2511 };
2512
2513 static int edma_tptc_probe(struct platform_device *pdev)
2514 {
2515         pm_runtime_enable(&pdev->dev);
2516         return pm_runtime_get_sync(&pdev->dev);
2517 }
2518
2519 static struct platform_driver edma_tptc_driver = {
2520         .probe          = edma_tptc_probe,
2521         .driver = {
2522                 .name   = "edma3-tptc",
2523                 .of_match_table = edma_tptc_of_ids,
2524         },
2525 };
2526
2527 bool edma_filter_fn(struct dma_chan *chan, void *param)
2528 {
2529         bool match = false;
2530
2531         if (chan->device->dev->driver == &edma_driver.driver) {
2532                 struct edma_chan *echan = to_edma_chan(chan);
2533                 unsigned ch_req = *(unsigned *)param;
2534                 if (ch_req == echan->ch_num) {
2535                         /* The channel is going to be used as HW synchronized */
2536                         echan->hw_triggered = true;
2537                         match = true;
2538                 }
2539         }
2540         return match;
2541 }
2542 EXPORT_SYMBOL(edma_filter_fn);
2543
2544 static int edma_init(void)
2545 {
2546         int ret;
2547
2548         ret = platform_driver_register(&edma_tptc_driver);
2549         if (ret)
2550                 return ret;
2551
2552         return platform_driver_register(&edma_driver);
2553 }
2554 subsys_initcall(edma_init);
2555
2556 static void __exit edma_exit(void)
2557 {
2558         platform_driver_unregister(&edma_driver);
2559         platform_driver_unregister(&edma_tptc_driver);
2560 }
2561 module_exit(edma_exit);
2562
2563 MODULE_AUTHOR("Matt Porter <matt.porter@linaro.org>");
2564 MODULE_DESCRIPTION("TI EDMA DMA engine driver");
2565 MODULE_LICENSE("GPL v2");