GNU Linux-libre 6.1.24-gnu
[releases.git] / drivers / dma / ti / k3-udma.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
4  *  Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
5  */
6
7 #include <linux/kernel.h>
8 #include <linux/delay.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/sys_soc.h>
20 #include <linux/of.h>
21 #include <linux/of_dma.h>
22 #include <linux/of_device.h>
23 #include <linux/of_irq.h>
24 #include <linux/workqueue.h>
25 #include <linux/completion.h>
26 #include <linux/soc/ti/k3-ringacc.h>
27 #include <linux/soc/ti/ti_sci_protocol.h>
28 #include <linux/soc/ti/ti_sci_inta_msi.h>
29 #include <linux/dma/k3-event-router.h>
30 #include <linux/dma/ti-cppi5.h>
31
32 #include "../virt-dma.h"
33 #include "k3-udma.h"
34 #include "k3-psil-priv.h"
35
36 struct udma_static_tr {
37         u8 elsize; /* RPSTR0 */
38         u16 elcnt; /* RPSTR0 */
39         u16 bstcnt; /* RPSTR1 */
40 };
41
42 #define K3_UDMA_MAX_RFLOWS              1024
43 #define K3_UDMA_DEFAULT_RING_SIZE       16
44
45 /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
46 #define UDMA_RFLOW_SRCTAG_NONE          0
47 #define UDMA_RFLOW_SRCTAG_CFG_TAG       1
48 #define UDMA_RFLOW_SRCTAG_FLOW_ID       2
49 #define UDMA_RFLOW_SRCTAG_SRC_TAG       4
50
51 #define UDMA_RFLOW_DSTTAG_NONE          0
52 #define UDMA_RFLOW_DSTTAG_CFG_TAG       1
53 #define UDMA_RFLOW_DSTTAG_FLOW_ID       2
54 #define UDMA_RFLOW_DSTTAG_DST_TAG_LO    4
55 #define UDMA_RFLOW_DSTTAG_DST_TAG_HI    5
56
57 struct udma_chan;
58
59 enum k3_dma_type {
60         DMA_TYPE_UDMA = 0,
61         DMA_TYPE_BCDMA,
62         DMA_TYPE_PKTDMA,
63 };
64
65 enum udma_mmr {
66         MMR_GCFG = 0,
67         MMR_BCHANRT,
68         MMR_RCHANRT,
69         MMR_TCHANRT,
70         MMR_LAST,
71 };
72
73 static const char * const mmr_names[] = {
74         [MMR_GCFG] = "gcfg",
75         [MMR_BCHANRT] = "bchanrt",
76         [MMR_RCHANRT] = "rchanrt",
77         [MMR_TCHANRT] = "tchanrt",
78 };
79
80 struct udma_tchan {
81         void __iomem *reg_rt;
82
83         int id;
84         struct k3_ring *t_ring; /* Transmit ring */
85         struct k3_ring *tc_ring; /* Transmit Completion ring */
86         int tflow_id; /* applicable only for PKTDMA */
87
88 };
89
90 #define udma_bchan udma_tchan
91
92 struct udma_rflow {
93         int id;
94         struct k3_ring *fd_ring; /* Free Descriptor ring */
95         struct k3_ring *r_ring; /* Receive ring */
96 };
97
98 struct udma_rchan {
99         void __iomem *reg_rt;
100
101         int id;
102 };
103
104 struct udma_oes_offsets {
105         /* K3 UDMA Output Event Offset */
106         u32 udma_rchan;
107
108         /* BCDMA Output Event Offsets */
109         u32 bcdma_bchan_data;
110         u32 bcdma_bchan_ring;
111         u32 bcdma_tchan_data;
112         u32 bcdma_tchan_ring;
113         u32 bcdma_rchan_data;
114         u32 bcdma_rchan_ring;
115
116         /* PKTDMA Output Event Offsets */
117         u32 pktdma_tchan_flow;
118         u32 pktdma_rchan_flow;
119 };
120
121 #define UDMA_FLAG_PDMA_ACC32            BIT(0)
122 #define UDMA_FLAG_PDMA_BURST            BIT(1)
123 #define UDMA_FLAG_TDTYPE                BIT(2)
124 #define UDMA_FLAG_BURST_SIZE            BIT(3)
125 #define UDMA_FLAGS_J7_CLASS             (UDMA_FLAG_PDMA_ACC32 | \
126                                          UDMA_FLAG_PDMA_BURST | \
127                                          UDMA_FLAG_TDTYPE | \
128                                          UDMA_FLAG_BURST_SIZE)
129
130 struct udma_match_data {
131         enum k3_dma_type type;
132         u32 psil_base;
133         bool enable_memcpy_support;
134         u32 flags;
135         u32 statictr_z_mask;
136         u8 burst_size[3];
137 };
138
139 struct udma_soc_data {
140         struct udma_oes_offsets oes;
141         u32 bcdma_trigger_event_offset;
142 };
143
144 struct udma_hwdesc {
145         size_t cppi5_desc_size;
146         void *cppi5_desc_vaddr;
147         dma_addr_t cppi5_desc_paddr;
148
149         /* TR descriptor internal pointers */
150         void *tr_req_base;
151         struct cppi5_tr_resp_t *tr_resp_base;
152 };
153
154 struct udma_rx_flush {
155         struct udma_hwdesc hwdescs[2];
156
157         size_t buffer_size;
158         void *buffer_vaddr;
159         dma_addr_t buffer_paddr;
160 };
161
162 struct udma_tpl {
163         u8 levels;
164         u32 start_idx[3];
165 };
166
167 struct udma_dev {
168         struct dma_device ddev;
169         struct device *dev;
170         void __iomem *mmrs[MMR_LAST];
171         const struct udma_match_data *match_data;
172         const struct udma_soc_data *soc_data;
173
174         struct udma_tpl bchan_tpl;
175         struct udma_tpl tchan_tpl;
176         struct udma_tpl rchan_tpl;
177
178         size_t desc_align; /* alignment to use for descriptors */
179
180         struct udma_tisci_rm tisci_rm;
181
182         struct k3_ringacc *ringacc;
183
184         struct work_struct purge_work;
185         struct list_head desc_to_purge;
186         spinlock_t lock;
187
188         struct udma_rx_flush rx_flush;
189
190         int bchan_cnt;
191         int tchan_cnt;
192         int echan_cnt;
193         int rchan_cnt;
194         int rflow_cnt;
195         int tflow_cnt;
196         unsigned long *bchan_map;
197         unsigned long *tchan_map;
198         unsigned long *rchan_map;
199         unsigned long *rflow_gp_map;
200         unsigned long *rflow_gp_map_allocated;
201         unsigned long *rflow_in_use;
202         unsigned long *tflow_map;
203
204         struct udma_bchan *bchans;
205         struct udma_tchan *tchans;
206         struct udma_rchan *rchans;
207         struct udma_rflow *rflows;
208
209         struct udma_chan *channels;
210         u32 psil_base;
211         u32 atype;
212         u32 asel;
213 };
214
215 struct udma_desc {
216         struct virt_dma_desc vd;
217
218         bool terminated;
219
220         enum dma_transfer_direction dir;
221
222         struct udma_static_tr static_tr;
223         u32 residue;
224
225         unsigned int sglen;
226         unsigned int desc_idx; /* Only used for cyclic in packet mode */
227         unsigned int tr_idx;
228
229         u32 metadata_size;
230         void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
231
232         unsigned int hwdesc_count;
233         struct udma_hwdesc hwdesc[];
234 };
235
236 enum udma_chan_state {
237         UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
238         UDMA_CHAN_IS_ACTIVE, /* Normal operation */
239         UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
240 };
241
242 struct udma_tx_drain {
243         struct delayed_work work;
244         ktime_t tstamp;
245         u32 residue;
246 };
247
248 struct udma_chan_config {
249         bool pkt_mode; /* TR or packet */
250         bool needs_epib; /* EPIB is needed for the communication or not */
251         u32 psd_size; /* size of Protocol Specific Data */
252         u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
253         u32 hdesc_size; /* Size of a packet descriptor in packet mode */
254         bool notdpkt; /* Suppress sending TDC packet */
255         int remote_thread_id;
256         u32 atype;
257         u32 asel;
258         u32 src_thread;
259         u32 dst_thread;
260         enum psil_endpoint_type ep_type;
261         bool enable_acc32;
262         bool enable_burst;
263         enum udma_tp_level channel_tpl; /* Channel Throughput Level */
264
265         u32 tr_trigger_type;
266         unsigned long tx_flags;
267
268         /* PKDMA mapped channel */
269         int mapped_channel_id;
270         /* PKTDMA default tflow or rflow for mapped channel */
271         int default_flow_id;
272
273         enum dma_transfer_direction dir;
274 };
275
276 struct udma_chan {
277         struct virt_dma_chan vc;
278         struct dma_slave_config cfg;
279         struct udma_dev *ud;
280         struct device *dma_dev;
281         struct udma_desc *desc;
282         struct udma_desc *terminated_desc;
283         struct udma_static_tr static_tr;
284         char *name;
285
286         struct udma_bchan *bchan;
287         struct udma_tchan *tchan;
288         struct udma_rchan *rchan;
289         struct udma_rflow *rflow;
290
291         bool psil_paired;
292
293         int irq_num_ring;
294         int irq_num_udma;
295
296         bool cyclic;
297         bool paused;
298
299         enum udma_chan_state state;
300         struct completion teardown_completed;
301
302         struct udma_tx_drain tx_drain;
303
304         /* Channel configuration parameters */
305         struct udma_chan_config config;
306
307         /* dmapool for packet mode descriptors */
308         bool use_dma_pool;
309         struct dma_pool *hdesc_pool;
310
311         u32 id;
312 };
313
314 static inline struct udma_dev *to_udma_dev(struct dma_device *d)
315 {
316         return container_of(d, struct udma_dev, ddev);
317 }
318
319 static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
320 {
321         return container_of(c, struct udma_chan, vc.chan);
322 }
323
324 static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
325 {
326         return container_of(t, struct udma_desc, vd.tx);
327 }
328
329 /* Generic register access functions */
330 static inline u32 udma_read(void __iomem *base, int reg)
331 {
332         return readl(base + reg);
333 }
334
335 static inline void udma_write(void __iomem *base, int reg, u32 val)
336 {
337         writel(val, base + reg);
338 }
339
340 static inline void udma_update_bits(void __iomem *base, int reg,
341                                     u32 mask, u32 val)
342 {
343         u32 tmp, orig;
344
345         orig = readl(base + reg);
346         tmp = orig & ~mask;
347         tmp |= (val & mask);
348
349         if (tmp != orig)
350                 writel(tmp, base + reg);
351 }
352
353 /* TCHANRT */
354 static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg)
355 {
356         if (!uc->tchan)
357                 return 0;
358         return udma_read(uc->tchan->reg_rt, reg);
359 }
360
361 static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val)
362 {
363         if (!uc->tchan)
364                 return;
365         udma_write(uc->tchan->reg_rt, reg, val);
366 }
367
368 static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg,
369                                             u32 mask, u32 val)
370 {
371         if (!uc->tchan)
372                 return;
373         udma_update_bits(uc->tchan->reg_rt, reg, mask, val);
374 }
375
376 /* RCHANRT */
377 static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg)
378 {
379         if (!uc->rchan)
380                 return 0;
381         return udma_read(uc->rchan->reg_rt, reg);
382 }
383
384 static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val)
385 {
386         if (!uc->rchan)
387                 return;
388         udma_write(uc->rchan->reg_rt, reg, val);
389 }
390
391 static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg,
392                                             u32 mask, u32 val)
393 {
394         if (!uc->rchan)
395                 return;
396         udma_update_bits(uc->rchan->reg_rt, reg, mask, val);
397 }
398
399 static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
400 {
401         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
402
403         dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
404         return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
405                                               tisci_rm->tisci_navss_dev_id,
406                                               src_thread, dst_thread);
407 }
408
409 static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
410                              u32 dst_thread)
411 {
412         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
413
414         dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
415         return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
416                                                 tisci_rm->tisci_navss_dev_id,
417                                                 src_thread, dst_thread);
418 }
419
420 static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel)
421 {
422         struct device *chan_dev = &chan->dev->device;
423
424         if (asel == 0) {
425                 /* No special handling for the channel */
426                 chan->dev->chan_dma_dev = false;
427
428                 chan_dev->dma_coherent = false;
429                 chan_dev->dma_parms = NULL;
430         } else if (asel == 14 || asel == 15) {
431                 chan->dev->chan_dma_dev = true;
432
433                 chan_dev->dma_coherent = true;
434                 dma_coerce_mask_and_coherent(chan_dev, DMA_BIT_MASK(48));
435                 chan_dev->dma_parms = chan_dev->parent->dma_parms;
436         } else {
437                 dev_warn(chan->device->dev, "Invalid ASEL value: %u\n", asel);
438
439                 chan_dev->dma_coherent = false;
440                 chan_dev->dma_parms = NULL;
441         }
442 }
443
444 static u8 udma_get_chan_tpl_index(struct udma_tpl *tpl_map, int chan_id)
445 {
446         int i;
447
448         for (i = 0; i < tpl_map->levels; i++) {
449                 if (chan_id >= tpl_map->start_idx[i])
450                         return i;
451         }
452
453         return 0;
454 }
455
456 static void udma_reset_uchan(struct udma_chan *uc)
457 {
458         memset(&uc->config, 0, sizeof(uc->config));
459         uc->config.remote_thread_id = -1;
460         uc->config.mapped_channel_id = -1;
461         uc->config.default_flow_id = -1;
462         uc->state = UDMA_CHAN_IS_IDLE;
463 }
464
465 static void udma_dump_chan_stdata(struct udma_chan *uc)
466 {
467         struct device *dev = uc->ud->dev;
468         u32 offset;
469         int i;
470
471         if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
472                 dev_dbg(dev, "TCHAN State data:\n");
473                 for (i = 0; i < 32; i++) {
474                         offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
475                         dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
476                                 udma_tchanrt_read(uc, offset));
477                 }
478         }
479
480         if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
481                 dev_dbg(dev, "RCHAN State data:\n");
482                 for (i = 0; i < 32; i++) {
483                         offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
484                         dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
485                                 udma_rchanrt_read(uc, offset));
486                 }
487         }
488 }
489
490 static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
491                                                     int idx)
492 {
493         return d->hwdesc[idx].cppi5_desc_paddr;
494 }
495
496 static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
497 {
498         return d->hwdesc[idx].cppi5_desc_vaddr;
499 }
500
501 static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
502                                                    dma_addr_t paddr)
503 {
504         struct udma_desc *d = uc->terminated_desc;
505
506         if (d) {
507                 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
508                                                                    d->desc_idx);
509
510                 if (desc_paddr != paddr)
511                         d = NULL;
512         }
513
514         if (!d) {
515                 d = uc->desc;
516                 if (d) {
517                         dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
518                                                                 d->desc_idx);
519
520                         if (desc_paddr != paddr)
521                                 d = NULL;
522                 }
523         }
524
525         return d;
526 }
527
528 static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
529 {
530         if (uc->use_dma_pool) {
531                 int i;
532
533                 for (i = 0; i < d->hwdesc_count; i++) {
534                         if (!d->hwdesc[i].cppi5_desc_vaddr)
535                                 continue;
536
537                         dma_pool_free(uc->hdesc_pool,
538                                       d->hwdesc[i].cppi5_desc_vaddr,
539                                       d->hwdesc[i].cppi5_desc_paddr);
540
541                         d->hwdesc[i].cppi5_desc_vaddr = NULL;
542                 }
543         } else if (d->hwdesc[0].cppi5_desc_vaddr) {
544                 dma_free_coherent(uc->dma_dev, d->hwdesc[0].cppi5_desc_size,
545                                   d->hwdesc[0].cppi5_desc_vaddr,
546                                   d->hwdesc[0].cppi5_desc_paddr);
547
548                 d->hwdesc[0].cppi5_desc_vaddr = NULL;
549         }
550 }
551
552 static void udma_purge_desc_work(struct work_struct *work)
553 {
554         struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
555         struct virt_dma_desc *vd, *_vd;
556         unsigned long flags;
557         LIST_HEAD(head);
558
559         spin_lock_irqsave(&ud->lock, flags);
560         list_splice_tail_init(&ud->desc_to_purge, &head);
561         spin_unlock_irqrestore(&ud->lock, flags);
562
563         list_for_each_entry_safe(vd, _vd, &head, node) {
564                 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
565                 struct udma_desc *d = to_udma_desc(&vd->tx);
566
567                 udma_free_hwdesc(uc, d);
568                 list_del(&vd->node);
569                 kfree(d);
570         }
571
572         /* If more to purge, schedule the work again */
573         if (!list_empty(&ud->desc_to_purge))
574                 schedule_work(&ud->purge_work);
575 }
576
577 static void udma_desc_free(struct virt_dma_desc *vd)
578 {
579         struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
580         struct udma_chan *uc = to_udma_chan(vd->tx.chan);
581         struct udma_desc *d = to_udma_desc(&vd->tx);
582         unsigned long flags;
583
584         if (uc->terminated_desc == d)
585                 uc->terminated_desc = NULL;
586
587         if (uc->use_dma_pool) {
588                 udma_free_hwdesc(uc, d);
589                 kfree(d);
590                 return;
591         }
592
593         spin_lock_irqsave(&ud->lock, flags);
594         list_add_tail(&vd->node, &ud->desc_to_purge);
595         spin_unlock_irqrestore(&ud->lock, flags);
596
597         schedule_work(&ud->purge_work);
598 }
599
600 static bool udma_is_chan_running(struct udma_chan *uc)
601 {
602         u32 trt_ctl = 0;
603         u32 rrt_ctl = 0;
604
605         if (uc->tchan)
606                 trt_ctl = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
607         if (uc->rchan)
608                 rrt_ctl = udma_rchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
609
610         if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
611                 return true;
612
613         return false;
614 }
615
616 static bool udma_is_chan_paused(struct udma_chan *uc)
617 {
618         u32 val, pause_mask;
619
620         switch (uc->config.dir) {
621         case DMA_DEV_TO_MEM:
622                 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
623                 pause_mask = UDMA_PEER_RT_EN_PAUSE;
624                 break;
625         case DMA_MEM_TO_DEV:
626                 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
627                 pause_mask = UDMA_PEER_RT_EN_PAUSE;
628                 break;
629         case DMA_MEM_TO_MEM:
630                 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
631                 pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
632                 break;
633         default:
634                 return false;
635         }
636
637         if (val & pause_mask)
638                 return true;
639
640         return false;
641 }
642
643 static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
644 {
645         return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
646 }
647
648 static int udma_push_to_ring(struct udma_chan *uc, int idx)
649 {
650         struct udma_desc *d = uc->desc;
651         struct k3_ring *ring = NULL;
652         dma_addr_t paddr;
653
654         switch (uc->config.dir) {
655         case DMA_DEV_TO_MEM:
656                 ring = uc->rflow->fd_ring;
657                 break;
658         case DMA_MEM_TO_DEV:
659         case DMA_MEM_TO_MEM:
660                 ring = uc->tchan->t_ring;
661                 break;
662         default:
663                 return -EINVAL;
664         }
665
666         /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
667         if (idx == -1) {
668                 paddr = udma_get_rx_flush_hwdesc_paddr(uc);
669         } else {
670                 paddr = udma_curr_cppi5_desc_paddr(d, idx);
671
672                 wmb(); /* Ensure that writes are not moved over this point */
673         }
674
675         return k3_ringacc_ring_push(ring, &paddr);
676 }
677
678 static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
679 {
680         if (uc->config.dir != DMA_DEV_TO_MEM)
681                 return false;
682
683         if (addr == udma_get_rx_flush_hwdesc_paddr(uc))
684                 return true;
685
686         return false;
687 }
688
689 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
690 {
691         struct k3_ring *ring = NULL;
692         int ret;
693
694         switch (uc->config.dir) {
695         case DMA_DEV_TO_MEM:
696                 ring = uc->rflow->r_ring;
697                 break;
698         case DMA_MEM_TO_DEV:
699         case DMA_MEM_TO_MEM:
700                 ring = uc->tchan->tc_ring;
701                 break;
702         default:
703                 return -ENOENT;
704         }
705
706         ret = k3_ringacc_ring_pop(ring, addr);
707         if (ret)
708                 return ret;
709
710         rmb(); /* Ensure that reads are not moved before this point */
711
712         /* Teardown completion */
713         if (cppi5_desc_is_tdcm(*addr))
714                 return 0;
715
716         /* Check for flush descriptor */
717         if (udma_desc_is_rx_flush(uc, *addr))
718                 return -ENOENT;
719
720         return 0;
721 }
722
723 static void udma_reset_rings(struct udma_chan *uc)
724 {
725         struct k3_ring *ring1 = NULL;
726         struct k3_ring *ring2 = NULL;
727
728         switch (uc->config.dir) {
729         case DMA_DEV_TO_MEM:
730                 if (uc->rchan) {
731                         ring1 = uc->rflow->fd_ring;
732                         ring2 = uc->rflow->r_ring;
733                 }
734                 break;
735         case DMA_MEM_TO_DEV:
736         case DMA_MEM_TO_MEM:
737                 if (uc->tchan) {
738                         ring1 = uc->tchan->t_ring;
739                         ring2 = uc->tchan->tc_ring;
740                 }
741                 break;
742         default:
743                 break;
744         }
745
746         if (ring1)
747                 k3_ringacc_ring_reset_dma(ring1,
748                                           k3_ringacc_ring_get_occ(ring1));
749         if (ring2)
750                 k3_ringacc_ring_reset(ring2);
751
752         /* make sure we are not leaking memory by stalled descriptor */
753         if (uc->terminated_desc) {
754                 udma_desc_free(&uc->terminated_desc->vd);
755                 uc->terminated_desc = NULL;
756         }
757 }
758
759 static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val)
760 {
761         if (uc->desc->dir == DMA_DEV_TO_MEM) {
762                 udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
763                 udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
764                 if (uc->config.ep_type != PSIL_EP_NATIVE)
765                         udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
766         } else {
767                 udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
768                 udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
769                 if (!uc->bchan && uc->config.ep_type != PSIL_EP_NATIVE)
770                         udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
771         }
772 }
773
774 static void udma_reset_counters(struct udma_chan *uc)
775 {
776         u32 val;
777
778         if (uc->tchan) {
779                 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
780                 udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
781
782                 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
783                 udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
784
785                 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
786                 udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
787
788                 if (!uc->bchan) {
789                         val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
790                         udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
791                 }
792         }
793
794         if (uc->rchan) {
795                 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
796                 udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
797
798                 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
799                 udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
800
801                 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
802                 udma_rchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
803
804                 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
805                 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
806         }
807 }
808
809 static int udma_reset_chan(struct udma_chan *uc, bool hard)
810 {
811         switch (uc->config.dir) {
812         case DMA_DEV_TO_MEM:
813                 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
814                 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
815                 break;
816         case DMA_MEM_TO_DEV:
817                 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
818                 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
819                 break;
820         case DMA_MEM_TO_MEM:
821                 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
822                 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
823                 break;
824         default:
825                 return -EINVAL;
826         }
827
828         /* Reset all counters */
829         udma_reset_counters(uc);
830
831         /* Hard reset: re-initialize the channel to reset */
832         if (hard) {
833                 struct udma_chan_config ucc_backup;
834                 int ret;
835
836                 memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
837                 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
838
839                 /* restore the channel configuration */
840                 memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
841                 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
842                 if (ret)
843                         return ret;
844
845                 /*
846                  * Setting forced teardown after forced reset helps recovering
847                  * the rchan.
848                  */
849                 if (uc->config.dir == DMA_DEV_TO_MEM)
850                         udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
851                                            UDMA_CHAN_RT_CTL_EN |
852                                            UDMA_CHAN_RT_CTL_TDOWN |
853                                            UDMA_CHAN_RT_CTL_FTDOWN);
854         }
855         uc->state = UDMA_CHAN_IS_IDLE;
856
857         return 0;
858 }
859
860 static void udma_start_desc(struct udma_chan *uc)
861 {
862         struct udma_chan_config *ucc = &uc->config;
863
864         if (uc->ud->match_data->type == DMA_TYPE_UDMA && ucc->pkt_mode &&
865             (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
866                 int i;
867
868                 /*
869                  * UDMA only: Push all descriptors to ring for packet mode
870                  * cyclic or RX
871                  * PKTDMA supports pre-linked descriptor and cyclic is not
872                  * supported
873                  */
874                 for (i = 0; i < uc->desc->sglen; i++)
875                         udma_push_to_ring(uc, i);
876         } else {
877                 udma_push_to_ring(uc, 0);
878         }
879 }
880
881 static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
882 {
883         /* Only PDMAs have staticTR */
884         if (uc->config.ep_type == PSIL_EP_NATIVE)
885                 return false;
886
887         /* Check if the staticTR configuration has changed for TX */
888         if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
889                 return true;
890
891         return false;
892 }
893
894 static int udma_start(struct udma_chan *uc)
895 {
896         struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
897
898         if (!vd) {
899                 uc->desc = NULL;
900                 return -ENOENT;
901         }
902
903         list_del(&vd->node);
904
905         uc->desc = to_udma_desc(&vd->tx);
906
907         /* Channel is already running and does not need reconfiguration */
908         if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
909                 udma_start_desc(uc);
910                 goto out;
911         }
912
913         /* Make sure that we clear the teardown bit, if it is set */
914         udma_reset_chan(uc, false);
915
916         /* Push descriptors before we start the channel */
917         udma_start_desc(uc);
918
919         switch (uc->desc->dir) {
920         case DMA_DEV_TO_MEM:
921                 /* Config remote TR */
922                 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
923                         u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
924                                   PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
925                         const struct udma_match_data *match_data =
926                                                         uc->ud->match_data;
927
928                         if (uc->config.enable_acc32)
929                                 val |= PDMA_STATIC_TR_XY_ACC32;
930                         if (uc->config.enable_burst)
931                                 val |= PDMA_STATIC_TR_XY_BURST;
932
933                         udma_rchanrt_write(uc,
934                                            UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
935                                            val);
936
937                         udma_rchanrt_write(uc,
938                                 UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG,
939                                 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
940                                                  match_data->statictr_z_mask));
941
942                         /* save the current staticTR configuration */
943                         memcpy(&uc->static_tr, &uc->desc->static_tr,
944                                sizeof(uc->static_tr));
945                 }
946
947                 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
948                                    UDMA_CHAN_RT_CTL_EN);
949
950                 /* Enable remote */
951                 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
952                                    UDMA_PEER_RT_EN_ENABLE);
953
954                 break;
955         case DMA_MEM_TO_DEV:
956                 /* Config remote TR */
957                 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
958                         u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
959                                   PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
960
961                         if (uc->config.enable_acc32)
962                                 val |= PDMA_STATIC_TR_XY_ACC32;
963                         if (uc->config.enable_burst)
964                                 val |= PDMA_STATIC_TR_XY_BURST;
965
966                         udma_tchanrt_write(uc,
967                                            UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
968                                            val);
969
970                         /* save the current staticTR configuration */
971                         memcpy(&uc->static_tr, &uc->desc->static_tr,
972                                sizeof(uc->static_tr));
973                 }
974
975                 /* Enable remote */
976                 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
977                                    UDMA_PEER_RT_EN_ENABLE);
978
979                 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
980                                    UDMA_CHAN_RT_CTL_EN);
981
982                 break;
983         case DMA_MEM_TO_MEM:
984                 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
985                                    UDMA_CHAN_RT_CTL_EN);
986                 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
987                                    UDMA_CHAN_RT_CTL_EN);
988
989                 break;
990         default:
991                 return -EINVAL;
992         }
993
994         uc->state = UDMA_CHAN_IS_ACTIVE;
995 out:
996
997         return 0;
998 }
999
1000 static int udma_stop(struct udma_chan *uc)
1001 {
1002         enum udma_chan_state old_state = uc->state;
1003
1004         uc->state = UDMA_CHAN_IS_TERMINATING;
1005         reinit_completion(&uc->teardown_completed);
1006
1007         switch (uc->config.dir) {
1008         case DMA_DEV_TO_MEM:
1009                 if (!uc->cyclic && !uc->desc)
1010                         udma_push_to_ring(uc, -1);
1011
1012                 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
1013                                    UDMA_PEER_RT_EN_ENABLE |
1014                                    UDMA_PEER_RT_EN_TEARDOWN);
1015                 break;
1016         case DMA_MEM_TO_DEV:
1017                 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
1018                                    UDMA_PEER_RT_EN_ENABLE |
1019                                    UDMA_PEER_RT_EN_FLUSH);
1020                 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
1021                                    UDMA_CHAN_RT_CTL_EN |
1022                                    UDMA_CHAN_RT_CTL_TDOWN);
1023                 break;
1024         case DMA_MEM_TO_MEM:
1025                 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
1026                                    UDMA_CHAN_RT_CTL_EN |
1027                                    UDMA_CHAN_RT_CTL_TDOWN);
1028                 break;
1029         default:
1030                 uc->state = old_state;
1031                 complete_all(&uc->teardown_completed);
1032                 return -EINVAL;
1033         }
1034
1035         return 0;
1036 }
1037
1038 static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
1039 {
1040         struct udma_desc *d = uc->desc;
1041         struct cppi5_host_desc_t *h_desc;
1042
1043         h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
1044         cppi5_hdesc_reset_to_original(h_desc);
1045         udma_push_to_ring(uc, d->desc_idx);
1046         d->desc_idx = (d->desc_idx + 1) % d->sglen;
1047 }
1048
1049 static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
1050 {
1051         struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
1052
1053         memcpy(d->metadata, h_desc->epib, d->metadata_size);
1054 }
1055
1056 static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
1057 {
1058         u32 peer_bcnt, bcnt;
1059
1060         /*
1061          * Only TX towards PDMA is affected.
1062          * If DMA_PREP_INTERRUPT is not set by consumer then skip the transfer
1063          * completion calculation, consumer must ensure that there is no stale
1064          * data in DMA fabric in this case.
1065          */
1066         if (uc->config.ep_type == PSIL_EP_NATIVE ||
1067             uc->config.dir != DMA_MEM_TO_DEV || !(uc->config.tx_flags & DMA_PREP_INTERRUPT))
1068                 return true;
1069
1070         peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
1071         bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
1072
1073         /* Transfer is incomplete, store current residue and time stamp */
1074         if (peer_bcnt < bcnt) {
1075                 uc->tx_drain.residue = bcnt - peer_bcnt;
1076                 uc->tx_drain.tstamp = ktime_get();
1077                 return false;
1078         }
1079
1080         return true;
1081 }
1082
1083 static void udma_check_tx_completion(struct work_struct *work)
1084 {
1085         struct udma_chan *uc = container_of(work, typeof(*uc),
1086                                             tx_drain.work.work);
1087         bool desc_done = true;
1088         u32 residue_diff;
1089         ktime_t time_diff;
1090         unsigned long delay;
1091
1092         while (1) {
1093                 if (uc->desc) {
1094                         /* Get previous residue and time stamp */
1095                         residue_diff = uc->tx_drain.residue;
1096                         time_diff = uc->tx_drain.tstamp;
1097                         /*
1098                          * Get current residue and time stamp or see if
1099                          * transfer is complete
1100                          */
1101                         desc_done = udma_is_desc_really_done(uc, uc->desc);
1102                 }
1103
1104                 if (!desc_done) {
1105                         /*
1106                          * Find the time delta and residue delta w.r.t
1107                          * previous poll
1108                          */
1109                         time_diff = ktime_sub(uc->tx_drain.tstamp,
1110                                               time_diff) + 1;
1111                         residue_diff -= uc->tx_drain.residue;
1112                         if (residue_diff) {
1113                                 /*
1114                                  * Try to guess when we should check
1115                                  * next time by calculating rate at
1116                                  * which data is being drained at the
1117                                  * peer device
1118                                  */
1119                                 delay = (time_diff / residue_diff) *
1120                                         uc->tx_drain.residue;
1121                         } else {
1122                                 /* No progress, check again in 1 second  */
1123                                 schedule_delayed_work(&uc->tx_drain.work, HZ);
1124                                 break;
1125                         }
1126
1127                         usleep_range(ktime_to_us(delay),
1128                                      ktime_to_us(delay) + 10);
1129                         continue;
1130                 }
1131
1132                 if (uc->desc) {
1133                         struct udma_desc *d = uc->desc;
1134
1135                         udma_decrement_byte_counters(uc, d->residue);
1136                         udma_start(uc);
1137                         vchan_cookie_complete(&d->vd);
1138                         break;
1139                 }
1140
1141                 break;
1142         }
1143 }
1144
1145 static irqreturn_t udma_ring_irq_handler(int irq, void *data)
1146 {
1147         struct udma_chan *uc = data;
1148         struct udma_desc *d;
1149         dma_addr_t paddr = 0;
1150
1151         if (udma_pop_from_ring(uc, &paddr) || !paddr)
1152                 return IRQ_HANDLED;
1153
1154         spin_lock(&uc->vc.lock);
1155
1156         /* Teardown completion message */
1157         if (cppi5_desc_is_tdcm(paddr)) {
1158                 complete_all(&uc->teardown_completed);
1159
1160                 if (uc->terminated_desc) {
1161                         udma_desc_free(&uc->terminated_desc->vd);
1162                         uc->terminated_desc = NULL;
1163                 }
1164
1165                 if (!uc->desc)
1166                         udma_start(uc);
1167
1168                 goto out;
1169         }
1170
1171         d = udma_udma_desc_from_paddr(uc, paddr);
1172
1173         if (d) {
1174                 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
1175                                                                    d->desc_idx);
1176                 if (desc_paddr != paddr) {
1177                         dev_err(uc->ud->dev, "not matching descriptors!\n");
1178                         goto out;
1179                 }
1180
1181                 if (d == uc->desc) {
1182                         /* active descriptor */
1183                         if (uc->cyclic) {
1184                                 udma_cyclic_packet_elapsed(uc);
1185                                 vchan_cyclic_callback(&d->vd);
1186                         } else {
1187                                 if (udma_is_desc_really_done(uc, d)) {
1188                                         udma_decrement_byte_counters(uc, d->residue);
1189                                         udma_start(uc);
1190                                         vchan_cookie_complete(&d->vd);
1191                                 } else {
1192                                         schedule_delayed_work(&uc->tx_drain.work,
1193                                                               0);
1194                                 }
1195                         }
1196                 } else {
1197                         /*
1198                          * terminated descriptor, mark the descriptor as
1199                          * completed to update the channel's cookie marker
1200                          */
1201                         dma_cookie_complete(&d->vd.tx);
1202                 }
1203         }
1204 out:
1205         spin_unlock(&uc->vc.lock);
1206
1207         return IRQ_HANDLED;
1208 }
1209
1210 static irqreturn_t udma_udma_irq_handler(int irq, void *data)
1211 {
1212         struct udma_chan *uc = data;
1213         struct udma_desc *d;
1214
1215         spin_lock(&uc->vc.lock);
1216         d = uc->desc;
1217         if (d) {
1218                 d->tr_idx = (d->tr_idx + 1) % d->sglen;
1219
1220                 if (uc->cyclic) {
1221                         vchan_cyclic_callback(&d->vd);
1222                 } else {
1223                         /* TODO: figure out the real amount of data */
1224                         udma_decrement_byte_counters(uc, d->residue);
1225                         udma_start(uc);
1226                         vchan_cookie_complete(&d->vd);
1227                 }
1228         }
1229
1230         spin_unlock(&uc->vc.lock);
1231
1232         return IRQ_HANDLED;
1233 }
1234
1235 /**
1236  * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1237  * @ud: UDMA device
1238  * @from: Start the search from this flow id number
1239  * @cnt: Number of consecutive flow ids to allocate
1240  *
1241  * Allocate range of RX flow ids for future use, those flows can be requested
1242  * only using explicit flow id number. if @from is set to -1 it will try to find
1243  * first free range. if @from is positive value it will force allocation only
1244  * of the specified range of flows.
1245  *
1246  * Returns -ENOMEM if can't find free range.
1247  * -EEXIST if requested range is busy.
1248  * -EINVAL if wrong input values passed.
1249  * Returns flow id on success.
1250  */
1251 static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1252 {
1253         int start, tmp_from;
1254         DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
1255
1256         tmp_from = from;
1257         if (tmp_from < 0)
1258                 tmp_from = ud->rchan_cnt;
1259         /* default flows can't be allocated and accessible only by id */
1260         if (tmp_from < ud->rchan_cnt)
1261                 return -EINVAL;
1262
1263         if (tmp_from + cnt > ud->rflow_cnt)
1264                 return -EINVAL;
1265
1266         bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
1267                   ud->rflow_cnt);
1268
1269         start = bitmap_find_next_zero_area(tmp,
1270                                            ud->rflow_cnt,
1271                                            tmp_from, cnt, 0);
1272         if (start >= ud->rflow_cnt)
1273                 return -ENOMEM;
1274
1275         if (from >= 0 && start != from)
1276                 return -EEXIST;
1277
1278         bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
1279         return start;
1280 }
1281
1282 static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1283 {
1284         if (from < ud->rchan_cnt)
1285                 return -EINVAL;
1286         if (from + cnt > ud->rflow_cnt)
1287                 return -EINVAL;
1288
1289         bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
1290         return 0;
1291 }
1292
1293 static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
1294 {
1295         /*
1296          * Attempt to request rflow by ID can be made for any rflow
1297          * if not in use with assumption that caller knows what's doing.
1298          * TI-SCI FW will perform additional permission check ant way, it's
1299          * safe
1300          */
1301
1302         if (id < 0 || id >= ud->rflow_cnt)
1303                 return ERR_PTR(-ENOENT);
1304
1305         if (test_bit(id, ud->rflow_in_use))
1306                 return ERR_PTR(-ENOENT);
1307
1308         if (ud->rflow_gp_map) {
1309                 /* GP rflow has to be allocated first */
1310                 if (!test_bit(id, ud->rflow_gp_map) &&
1311                     !test_bit(id, ud->rflow_gp_map_allocated))
1312                         return ERR_PTR(-EINVAL);
1313         }
1314
1315         dev_dbg(ud->dev, "get rflow%d\n", id);
1316         set_bit(id, ud->rflow_in_use);
1317         return &ud->rflows[id];
1318 }
1319
1320 static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
1321 {
1322         if (!test_bit(rflow->id, ud->rflow_in_use)) {
1323                 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
1324                 return;
1325         }
1326
1327         dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
1328         clear_bit(rflow->id, ud->rflow_in_use);
1329 }
1330
1331 #define UDMA_RESERVE_RESOURCE(res)                                      \
1332 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud,     \
1333                                                enum udma_tp_level tpl,  \
1334                                                int id)                  \
1335 {                                                                       \
1336         if (id >= 0) {                                                  \
1337                 if (test_bit(id, ud->res##_map)) {                      \
1338                         dev_err(ud->dev, "res##%d is in use\n", id);    \
1339                         return ERR_PTR(-ENOENT);                        \
1340                 }                                                       \
1341         } else {                                                        \
1342                 int start;                                              \
1343                                                                         \
1344                 if (tpl >= ud->res##_tpl.levels)                        \
1345                         tpl = ud->res##_tpl.levels - 1;                 \
1346                                                                         \
1347                 start = ud->res##_tpl.start_idx[tpl];                   \
1348                                                                         \
1349                 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt,   \
1350                                         start);                         \
1351                 if (id == ud->res##_cnt) {                              \
1352                         return ERR_PTR(-ENOENT);                        \
1353                 }                                                       \
1354         }                                                               \
1355                                                                         \
1356         set_bit(id, ud->res##_map);                                     \
1357         return &ud->res##s[id];                                         \
1358 }
1359
1360 UDMA_RESERVE_RESOURCE(bchan);
1361 UDMA_RESERVE_RESOURCE(tchan);
1362 UDMA_RESERVE_RESOURCE(rchan);
1363
1364 static int bcdma_get_bchan(struct udma_chan *uc)
1365 {
1366         struct udma_dev *ud = uc->ud;
1367         enum udma_tp_level tpl;
1368         int ret;
1369
1370         if (uc->bchan) {
1371                 dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n",
1372                         uc->id, uc->bchan->id);
1373                 return 0;
1374         }
1375
1376         /*
1377          * Use normal channels for peripherals, and highest TPL channel for
1378          * mem2mem
1379          */
1380         if (uc->config.tr_trigger_type)
1381                 tpl = 0;
1382         else
1383                 tpl = ud->bchan_tpl.levels - 1;
1384
1385         uc->bchan = __udma_reserve_bchan(ud, tpl, -1);
1386         if (IS_ERR(uc->bchan)) {
1387                 ret = PTR_ERR(uc->bchan);
1388                 uc->bchan = NULL;
1389                 return ret;
1390         }
1391
1392         uc->tchan = uc->bchan;
1393
1394         return 0;
1395 }
1396
1397 static int udma_get_tchan(struct udma_chan *uc)
1398 {
1399         struct udma_dev *ud = uc->ud;
1400         int ret;
1401
1402         if (uc->tchan) {
1403                 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
1404                         uc->id, uc->tchan->id);
1405                 return 0;
1406         }
1407
1408         /*
1409          * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1410          * For PKTDMA mapped channels it is configured to a channel which must
1411          * be used to service the peripheral.
1412          */
1413         uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl,
1414                                          uc->config.mapped_channel_id);
1415         if (IS_ERR(uc->tchan)) {
1416                 ret = PTR_ERR(uc->tchan);
1417                 uc->tchan = NULL;
1418                 return ret;
1419         }
1420
1421         if (ud->tflow_cnt) {
1422                 int tflow_id;
1423
1424                 /* Only PKTDMA have support for tx flows */
1425                 if (uc->config.default_flow_id >= 0)
1426                         tflow_id = uc->config.default_flow_id;
1427                 else
1428                         tflow_id = uc->tchan->id;
1429
1430                 if (test_bit(tflow_id, ud->tflow_map)) {
1431                         dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
1432                         clear_bit(uc->tchan->id, ud->tchan_map);
1433                         uc->tchan = NULL;
1434                         return -ENOENT;
1435                 }
1436
1437                 uc->tchan->tflow_id = tflow_id;
1438                 set_bit(tflow_id, ud->tflow_map);
1439         } else {
1440                 uc->tchan->tflow_id = -1;
1441         }
1442
1443         return 0;
1444 }
1445
1446 static int udma_get_rchan(struct udma_chan *uc)
1447 {
1448         struct udma_dev *ud = uc->ud;
1449         int ret;
1450
1451         if (uc->rchan) {
1452                 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
1453                         uc->id, uc->rchan->id);
1454                 return 0;
1455         }
1456
1457         /*
1458          * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1459          * For PKTDMA mapped channels it is configured to a channel which must
1460          * be used to service the peripheral.
1461          */
1462         uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl,
1463                                          uc->config.mapped_channel_id);
1464         if (IS_ERR(uc->rchan)) {
1465                 ret = PTR_ERR(uc->rchan);
1466                 uc->rchan = NULL;
1467                 return ret;
1468         }
1469
1470         return 0;
1471 }
1472
1473 static int udma_get_chan_pair(struct udma_chan *uc)
1474 {
1475         struct udma_dev *ud = uc->ud;
1476         int chan_id, end;
1477
1478         if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
1479                 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
1480                          uc->id, uc->tchan->id);
1481                 return 0;
1482         }
1483
1484         if (uc->tchan) {
1485                 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
1486                         uc->id, uc->tchan->id);
1487                 return -EBUSY;
1488         } else if (uc->rchan) {
1489                 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
1490                         uc->id, uc->rchan->id);
1491                 return -EBUSY;
1492         }
1493
1494         /* Can be optimized, but let's have it like this for now */
1495         end = min(ud->tchan_cnt, ud->rchan_cnt);
1496         /*
1497          * Try to use the highest TPL channel pair for MEM_TO_MEM channels
1498          * Note: in UDMAP the channel TPL is symmetric between tchan and rchan
1499          */
1500         chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1];
1501         for (; chan_id < end; chan_id++) {
1502                 if (!test_bit(chan_id, ud->tchan_map) &&
1503                     !test_bit(chan_id, ud->rchan_map))
1504                         break;
1505         }
1506
1507         if (chan_id == end)
1508                 return -ENOENT;
1509
1510         set_bit(chan_id, ud->tchan_map);
1511         set_bit(chan_id, ud->rchan_map);
1512         uc->tchan = &ud->tchans[chan_id];
1513         uc->rchan = &ud->rchans[chan_id];
1514
1515         /* UDMA does not use tx flows */
1516         uc->tchan->tflow_id = -1;
1517
1518         return 0;
1519 }
1520
1521 static int udma_get_rflow(struct udma_chan *uc, int flow_id)
1522 {
1523         struct udma_dev *ud = uc->ud;
1524         int ret;
1525
1526         if (!uc->rchan) {
1527                 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
1528                 return -EINVAL;
1529         }
1530
1531         if (uc->rflow) {
1532                 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
1533                         uc->id, uc->rflow->id);
1534                 return 0;
1535         }
1536
1537         uc->rflow = __udma_get_rflow(ud, flow_id);
1538         if (IS_ERR(uc->rflow)) {
1539                 ret = PTR_ERR(uc->rflow);
1540                 uc->rflow = NULL;
1541                 return ret;
1542         }
1543
1544         return 0;
1545 }
1546
1547 static void bcdma_put_bchan(struct udma_chan *uc)
1548 {
1549         struct udma_dev *ud = uc->ud;
1550
1551         if (uc->bchan) {
1552                 dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
1553                         uc->bchan->id);
1554                 clear_bit(uc->bchan->id, ud->bchan_map);
1555                 uc->bchan = NULL;
1556                 uc->tchan = NULL;
1557         }
1558 }
1559
1560 static void udma_put_rchan(struct udma_chan *uc)
1561 {
1562         struct udma_dev *ud = uc->ud;
1563
1564         if (uc->rchan) {
1565                 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
1566                         uc->rchan->id);
1567                 clear_bit(uc->rchan->id, ud->rchan_map);
1568                 uc->rchan = NULL;
1569         }
1570 }
1571
1572 static void udma_put_tchan(struct udma_chan *uc)
1573 {
1574         struct udma_dev *ud = uc->ud;
1575
1576         if (uc->tchan) {
1577                 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
1578                         uc->tchan->id);
1579                 clear_bit(uc->tchan->id, ud->tchan_map);
1580
1581                 if (uc->tchan->tflow_id >= 0)
1582                         clear_bit(uc->tchan->tflow_id, ud->tflow_map);
1583
1584                 uc->tchan = NULL;
1585         }
1586 }
1587
1588 static void udma_put_rflow(struct udma_chan *uc)
1589 {
1590         struct udma_dev *ud = uc->ud;
1591
1592         if (uc->rflow) {
1593                 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
1594                         uc->rflow->id);
1595                 __udma_put_rflow(ud, uc->rflow);
1596                 uc->rflow = NULL;
1597         }
1598 }
1599
1600 static void bcdma_free_bchan_resources(struct udma_chan *uc)
1601 {
1602         if (!uc->bchan)
1603                 return;
1604
1605         k3_ringacc_ring_free(uc->bchan->tc_ring);
1606         k3_ringacc_ring_free(uc->bchan->t_ring);
1607         uc->bchan->tc_ring = NULL;
1608         uc->bchan->t_ring = NULL;
1609         k3_configure_chan_coherency(&uc->vc.chan, 0);
1610
1611         bcdma_put_bchan(uc);
1612 }
1613
1614 static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
1615 {
1616         struct k3_ring_cfg ring_cfg;
1617         struct udma_dev *ud = uc->ud;
1618         int ret;
1619
1620         ret = bcdma_get_bchan(uc);
1621         if (ret)
1622                 return ret;
1623
1624         ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
1625                                             &uc->bchan->t_ring,
1626                                             &uc->bchan->tc_ring);
1627         if (ret) {
1628                 ret = -EBUSY;
1629                 goto err_ring;
1630         }
1631
1632         memset(&ring_cfg, 0, sizeof(ring_cfg));
1633         ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1634         ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1635         ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1636
1637         k3_configure_chan_coherency(&uc->vc.chan, ud->asel);
1638         ring_cfg.asel = ud->asel;
1639         ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1640
1641         ret = k3_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
1642         if (ret)
1643                 goto err_ringcfg;
1644
1645         return 0;
1646
1647 err_ringcfg:
1648         k3_ringacc_ring_free(uc->bchan->tc_ring);
1649         uc->bchan->tc_ring = NULL;
1650         k3_ringacc_ring_free(uc->bchan->t_ring);
1651         uc->bchan->t_ring = NULL;
1652         k3_configure_chan_coherency(&uc->vc.chan, 0);
1653 err_ring:
1654         bcdma_put_bchan(uc);
1655
1656         return ret;
1657 }
1658
1659 static void udma_free_tx_resources(struct udma_chan *uc)
1660 {
1661         if (!uc->tchan)
1662                 return;
1663
1664         k3_ringacc_ring_free(uc->tchan->t_ring);
1665         k3_ringacc_ring_free(uc->tchan->tc_ring);
1666         uc->tchan->t_ring = NULL;
1667         uc->tchan->tc_ring = NULL;
1668
1669         udma_put_tchan(uc);
1670 }
1671
1672 static int udma_alloc_tx_resources(struct udma_chan *uc)
1673 {
1674         struct k3_ring_cfg ring_cfg;
1675         struct udma_dev *ud = uc->ud;
1676         struct udma_tchan *tchan;
1677         int ring_idx, ret;
1678
1679         ret = udma_get_tchan(uc);
1680         if (ret)
1681                 return ret;
1682
1683         tchan = uc->tchan;
1684         if (tchan->tflow_id >= 0)
1685                 ring_idx = tchan->tflow_id;
1686         else
1687                 ring_idx = ud->bchan_cnt + tchan->id;
1688
1689         ret = k3_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1,
1690                                             &tchan->t_ring,
1691                                             &tchan->tc_ring);
1692         if (ret) {
1693                 ret = -EBUSY;
1694                 goto err_ring;
1695         }
1696
1697         memset(&ring_cfg, 0, sizeof(ring_cfg));
1698         ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1699         ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1700         if (ud->match_data->type == DMA_TYPE_UDMA) {
1701                 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1702         } else {
1703                 ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1704
1705                 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
1706                 ring_cfg.asel = uc->config.asel;
1707                 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1708         }
1709
1710         ret = k3_ringacc_ring_cfg(tchan->t_ring, &ring_cfg);
1711         ret |= k3_ringacc_ring_cfg(tchan->tc_ring, &ring_cfg);
1712
1713         if (ret)
1714                 goto err_ringcfg;
1715
1716         return 0;
1717
1718 err_ringcfg:
1719         k3_ringacc_ring_free(uc->tchan->tc_ring);
1720         uc->tchan->tc_ring = NULL;
1721         k3_ringacc_ring_free(uc->tchan->t_ring);
1722         uc->tchan->t_ring = NULL;
1723 err_ring:
1724         udma_put_tchan(uc);
1725
1726         return ret;
1727 }
1728
1729 static void udma_free_rx_resources(struct udma_chan *uc)
1730 {
1731         if (!uc->rchan)
1732                 return;
1733
1734         if (uc->rflow) {
1735                 struct udma_rflow *rflow = uc->rflow;
1736
1737                 k3_ringacc_ring_free(rflow->fd_ring);
1738                 k3_ringacc_ring_free(rflow->r_ring);
1739                 rflow->fd_ring = NULL;
1740                 rflow->r_ring = NULL;
1741
1742                 udma_put_rflow(uc);
1743         }
1744
1745         udma_put_rchan(uc);
1746 }
1747
1748 static int udma_alloc_rx_resources(struct udma_chan *uc)
1749 {
1750         struct udma_dev *ud = uc->ud;
1751         struct k3_ring_cfg ring_cfg;
1752         struct udma_rflow *rflow;
1753         int fd_ring_id;
1754         int ret;
1755
1756         ret = udma_get_rchan(uc);
1757         if (ret)
1758                 return ret;
1759
1760         /* For MEM_TO_MEM we don't need rflow or rings */
1761         if (uc->config.dir == DMA_MEM_TO_MEM)
1762                 return 0;
1763
1764         if (uc->config.default_flow_id >= 0)
1765                 ret = udma_get_rflow(uc, uc->config.default_flow_id);
1766         else
1767                 ret = udma_get_rflow(uc, uc->rchan->id);
1768
1769         if (ret) {
1770                 ret = -EBUSY;
1771                 goto err_rflow;
1772         }
1773
1774         rflow = uc->rflow;
1775         if (ud->tflow_cnt)
1776                 fd_ring_id = ud->tflow_cnt + rflow->id;
1777         else
1778                 fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
1779                              uc->rchan->id;
1780
1781         ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
1782                                             &rflow->fd_ring, &rflow->r_ring);
1783         if (ret) {
1784                 ret = -EBUSY;
1785                 goto err_ring;
1786         }
1787
1788         memset(&ring_cfg, 0, sizeof(ring_cfg));
1789
1790         ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1791         if (ud->match_data->type == DMA_TYPE_UDMA) {
1792                 if (uc->config.pkt_mode)
1793                         ring_cfg.size = SG_MAX_SEGMENTS;
1794                 else
1795                         ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1796
1797                 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1798         } else {
1799                 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1800                 ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1801
1802                 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
1803                 ring_cfg.asel = uc->config.asel;
1804                 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1805         }
1806
1807         ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
1808
1809         ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1810         ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
1811
1812         if (ret)
1813                 goto err_ringcfg;
1814
1815         return 0;
1816
1817 err_ringcfg:
1818         k3_ringacc_ring_free(rflow->r_ring);
1819         rflow->r_ring = NULL;
1820         k3_ringacc_ring_free(rflow->fd_ring);
1821         rflow->fd_ring = NULL;
1822 err_ring:
1823         udma_put_rflow(uc);
1824 err_rflow:
1825         udma_put_rchan(uc);
1826
1827         return ret;
1828 }
1829
1830 #define TISCI_BCDMA_BCHAN_VALID_PARAMS (                        \
1831         TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |       \
1832         TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
1833
1834 #define TISCI_BCDMA_TCHAN_VALID_PARAMS (                        \
1835         TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |       \
1836         TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
1837
1838 #define TISCI_BCDMA_RCHAN_VALID_PARAMS (                        \
1839         TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
1840
1841 #define TISCI_UDMA_TCHAN_VALID_PARAMS (                         \
1842         TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |       \
1843         TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID |      \
1844         TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID |    \
1845         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |          \
1846         TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID |      \
1847         TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |         \
1848         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |            \
1849         TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1850
1851 #define TISCI_UDMA_RCHAN_VALID_PARAMS (                         \
1852         TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID |       \
1853         TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID |         \
1854         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID |            \
1855         TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID |          \
1856         TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID |    \
1857         TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID |     \
1858         TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID |    \
1859         TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID |      \
1860         TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1861
1862 static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
1863 {
1864         struct udma_dev *ud = uc->ud;
1865         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1866         const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1867         struct udma_tchan *tchan = uc->tchan;
1868         struct udma_rchan *rchan = uc->rchan;
1869         u8 burst_size = 0;
1870         int ret;
1871         u8 tpl;
1872
1873         /* Non synchronized - mem to mem type of transfer */
1874         int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1875         struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1876         struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1877
1878         if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
1879                 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, tchan->id);
1880
1881                 burst_size = ud->match_data->burst_size[tpl];
1882         }
1883
1884         req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
1885         req_tx.nav_id = tisci_rm->tisci_dev_id;
1886         req_tx.index = tchan->id;
1887         req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1888         req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1889         req_tx.txcq_qnum = tc_ring;
1890         req_tx.tx_atype = ud->atype;
1891         if (burst_size) {
1892                 req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1893                 req_tx.tx_burst_size = burst_size;
1894         }
1895
1896         ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1897         if (ret) {
1898                 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1899                 return ret;
1900         }
1901
1902         req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
1903         req_rx.nav_id = tisci_rm->tisci_dev_id;
1904         req_rx.index = rchan->id;
1905         req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1906         req_rx.rxcq_qnum = tc_ring;
1907         req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1908         req_rx.rx_atype = ud->atype;
1909         if (burst_size) {
1910                 req_rx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1911                 req_rx.rx_burst_size = burst_size;
1912         }
1913
1914         ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1915         if (ret)
1916                 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
1917
1918         return ret;
1919 }
1920
1921 static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
1922 {
1923         struct udma_dev *ud = uc->ud;
1924         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1925         const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1926         struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1927         struct udma_bchan *bchan = uc->bchan;
1928         u8 burst_size = 0;
1929         int ret;
1930         u8 tpl;
1931
1932         if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
1933                 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, bchan->id);
1934
1935                 burst_size = ud->match_data->burst_size[tpl];
1936         }
1937
1938         req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
1939         req_tx.nav_id = tisci_rm->tisci_dev_id;
1940         req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
1941         req_tx.index = bchan->id;
1942         if (burst_size) {
1943                 req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1944                 req_tx.tx_burst_size = burst_size;
1945         }
1946
1947         ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1948         if (ret)
1949                 dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
1950
1951         return ret;
1952 }
1953
1954 static int udma_tisci_tx_channel_config(struct udma_chan *uc)
1955 {
1956         struct udma_dev *ud = uc->ud;
1957         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1958         const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1959         struct udma_tchan *tchan = uc->tchan;
1960         int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1961         struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1962         u32 mode, fetch_size;
1963         int ret;
1964
1965         if (uc->config.pkt_mode) {
1966                 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1967                 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1968                                                    uc->config.psd_size, 0);
1969         } else {
1970                 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1971                 fetch_size = sizeof(struct cppi5_desc_hdr_t);
1972         }
1973
1974         req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
1975         req_tx.nav_id = tisci_rm->tisci_dev_id;
1976         req_tx.index = tchan->id;
1977         req_tx.tx_chan_type = mode;
1978         req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1979         req_tx.tx_fetch_size = fetch_size >> 2;
1980         req_tx.txcq_qnum = tc_ring;
1981         req_tx.tx_atype = uc->config.atype;
1982         if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
1983             ud->match_data->flags & UDMA_FLAG_TDTYPE) {
1984                 /* wait for peer to complete the teardown for PDMAs */
1985                 req_tx.valid_params |=
1986                                 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
1987                 req_tx.tx_tdtype = 1;
1988         }
1989
1990         ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1991         if (ret)
1992                 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1993
1994         return ret;
1995 }
1996
1997 static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
1998 {
1999         struct udma_dev *ud = uc->ud;
2000         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2001         const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2002         struct udma_tchan *tchan = uc->tchan;
2003         struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
2004         int ret;
2005
2006         req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
2007         req_tx.nav_id = tisci_rm->tisci_dev_id;
2008         req_tx.index = tchan->id;
2009         req_tx.tx_supr_tdpkt = uc->config.notdpkt;
2010         if (ud->match_data->flags & UDMA_FLAG_TDTYPE) {
2011                 /* wait for peer to complete the teardown for PDMAs */
2012                 req_tx.valid_params |=
2013                                 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
2014                 req_tx.tx_tdtype = 1;
2015         }
2016
2017         ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
2018         if (ret)
2019                 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
2020
2021         return ret;
2022 }
2023
2024 #define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
2025
2026 static int udma_tisci_rx_channel_config(struct udma_chan *uc)
2027 {
2028         struct udma_dev *ud = uc->ud;
2029         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2030         const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2031         struct udma_rchan *rchan = uc->rchan;
2032         int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
2033         int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2034         struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2035         struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2036         u32 mode, fetch_size;
2037         int ret;
2038
2039         if (uc->config.pkt_mode) {
2040                 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
2041                 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
2042                                                    uc->config.psd_size, 0);
2043         } else {
2044                 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
2045                 fetch_size = sizeof(struct cppi5_desc_hdr_t);
2046         }
2047
2048         req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
2049         req_rx.nav_id = tisci_rm->tisci_dev_id;
2050         req_rx.index = rchan->id;
2051         req_rx.rx_fetch_size =  fetch_size >> 2;
2052         req_rx.rxcq_qnum = rx_ring;
2053         req_rx.rx_chan_type = mode;
2054         req_rx.rx_atype = uc->config.atype;
2055
2056         ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2057         if (ret) {
2058                 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
2059                 return ret;
2060         }
2061
2062         flow_req.valid_params =
2063                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2064                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2065                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
2066                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
2067                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
2068                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
2069                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
2070                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
2071                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
2072                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
2073                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
2074                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
2075                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
2076
2077         flow_req.nav_id = tisci_rm->tisci_dev_id;
2078         flow_req.flow_index = rchan->id;
2079
2080         if (uc->config.needs_epib)
2081                 flow_req.rx_einfo_present = 1;
2082         else
2083                 flow_req.rx_einfo_present = 0;
2084         if (uc->config.psd_size)
2085                 flow_req.rx_psinfo_present = 1;
2086         else
2087                 flow_req.rx_psinfo_present = 0;
2088         flow_req.rx_error_handling = 1;
2089         flow_req.rx_dest_qnum = rx_ring;
2090         flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
2091         flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
2092         flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
2093         flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
2094         flow_req.rx_fdq0_sz0_qnum = fd_ring;
2095         flow_req.rx_fdq1_qnum = fd_ring;
2096         flow_req.rx_fdq2_qnum = fd_ring;
2097         flow_req.rx_fdq3_qnum = fd_ring;
2098
2099         ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2100
2101         if (ret)
2102                 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
2103
2104         return 0;
2105 }
2106
2107 static int bcdma_tisci_rx_channel_config(struct udma_chan *uc)
2108 {
2109         struct udma_dev *ud = uc->ud;
2110         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2111         const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2112         struct udma_rchan *rchan = uc->rchan;
2113         struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2114         int ret;
2115
2116         req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2117         req_rx.nav_id = tisci_rm->tisci_dev_id;
2118         req_rx.index = rchan->id;
2119
2120         ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2121         if (ret)
2122                 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
2123
2124         return ret;
2125 }
2126
2127 static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
2128 {
2129         struct udma_dev *ud = uc->ud;
2130         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2131         const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2132         struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2133         struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2134         int ret;
2135
2136         req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2137         req_rx.nav_id = tisci_rm->tisci_dev_id;
2138         req_rx.index = uc->rchan->id;
2139
2140         ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2141         if (ret) {
2142                 dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
2143                 return ret;
2144         }
2145
2146         flow_req.valid_params =
2147                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2148                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2149                 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID;
2150
2151         flow_req.nav_id = tisci_rm->tisci_dev_id;
2152         flow_req.flow_index = uc->rflow->id;
2153
2154         if (uc->config.needs_epib)
2155                 flow_req.rx_einfo_present = 1;
2156         else
2157                 flow_req.rx_einfo_present = 0;
2158         if (uc->config.psd_size)
2159                 flow_req.rx_psinfo_present = 1;
2160         else
2161                 flow_req.rx_psinfo_present = 0;
2162         flow_req.rx_error_handling = 1;
2163
2164         ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2165
2166         if (ret)
2167                 dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
2168                         ret);
2169
2170         return ret;
2171 }
2172
2173 static int udma_alloc_chan_resources(struct dma_chan *chan)
2174 {
2175         struct udma_chan *uc = to_udma_chan(chan);
2176         struct udma_dev *ud = to_udma_dev(chan->device);
2177         const struct udma_soc_data *soc_data = ud->soc_data;
2178         struct k3_ring *irq_ring;
2179         u32 irq_udma_idx;
2180         int ret;
2181
2182         uc->dma_dev = ud->dev;
2183
2184         if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
2185                 uc->use_dma_pool = true;
2186                 /* in case of MEM_TO_MEM we have maximum of two TRs */
2187                 if (uc->config.dir == DMA_MEM_TO_MEM) {
2188                         uc->config.hdesc_size = cppi5_trdesc_calc_size(
2189                                         sizeof(struct cppi5_tr_type15_t), 2);
2190                         uc->config.pkt_mode = false;
2191                 }
2192         }
2193
2194         if (uc->use_dma_pool) {
2195                 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
2196                                                  uc->config.hdesc_size,
2197                                                  ud->desc_align,
2198                                                  0);
2199                 if (!uc->hdesc_pool) {
2200                         dev_err(ud->ddev.dev,
2201                                 "Descriptor pool allocation failed\n");
2202                         uc->use_dma_pool = false;
2203                         ret = -ENOMEM;
2204                         goto err_cleanup;
2205                 }
2206         }
2207
2208         /*
2209          * Make sure that the completion is in a known state:
2210          * No teardown, the channel is idle
2211          */
2212         reinit_completion(&uc->teardown_completed);
2213         complete_all(&uc->teardown_completed);
2214         uc->state = UDMA_CHAN_IS_IDLE;
2215
2216         switch (uc->config.dir) {
2217         case DMA_MEM_TO_MEM:
2218                 /* Non synchronized - mem to mem type of transfer */
2219                 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2220                         uc->id);
2221
2222                 ret = udma_get_chan_pair(uc);
2223                 if (ret)
2224                         goto err_cleanup;
2225
2226                 ret = udma_alloc_tx_resources(uc);
2227                 if (ret) {
2228                         udma_put_rchan(uc);
2229                         goto err_cleanup;
2230                 }
2231
2232                 ret = udma_alloc_rx_resources(uc);
2233                 if (ret) {
2234                         udma_free_tx_resources(uc);
2235                         goto err_cleanup;
2236                 }
2237
2238                 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2239                 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2240                                         K3_PSIL_DST_THREAD_ID_OFFSET;
2241
2242                 irq_ring = uc->tchan->tc_ring;
2243                 irq_udma_idx = uc->tchan->id;
2244
2245                 ret = udma_tisci_m2m_channel_config(uc);
2246                 break;
2247         case DMA_MEM_TO_DEV:
2248                 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2249                 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2250                         uc->id);
2251
2252                 ret = udma_alloc_tx_resources(uc);
2253                 if (ret)
2254                         goto err_cleanup;
2255
2256                 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2257                 uc->config.dst_thread = uc->config.remote_thread_id;
2258                 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2259
2260                 irq_ring = uc->tchan->tc_ring;
2261                 irq_udma_idx = uc->tchan->id;
2262
2263                 ret = udma_tisci_tx_channel_config(uc);
2264                 break;
2265         case DMA_DEV_TO_MEM:
2266                 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2267                 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2268                         uc->id);
2269
2270                 ret = udma_alloc_rx_resources(uc);
2271                 if (ret)
2272                         goto err_cleanup;
2273
2274                 uc->config.src_thread = uc->config.remote_thread_id;
2275                 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2276                                         K3_PSIL_DST_THREAD_ID_OFFSET;
2277
2278                 irq_ring = uc->rflow->r_ring;
2279                 irq_udma_idx = soc_data->oes.udma_rchan + uc->rchan->id;
2280
2281                 ret = udma_tisci_rx_channel_config(uc);
2282                 break;
2283         default:
2284                 /* Can not happen */
2285                 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2286                         __func__, uc->id, uc->config.dir);
2287                 ret = -EINVAL;
2288                 goto err_cleanup;
2289
2290         }
2291
2292         /* check if the channel configuration was successful */
2293         if (ret)
2294                 goto err_res_free;
2295
2296         if (udma_is_chan_running(uc)) {
2297                 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2298                 udma_reset_chan(uc, false);
2299                 if (udma_is_chan_running(uc)) {
2300                         dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2301                         ret = -EBUSY;
2302                         goto err_res_free;
2303                 }
2304         }
2305
2306         /* PSI-L pairing */
2307         ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2308         if (ret) {
2309                 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2310                         uc->config.src_thread, uc->config.dst_thread);
2311                 goto err_res_free;
2312         }
2313
2314         uc->psil_paired = true;
2315
2316         uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
2317         if (uc->irq_num_ring <= 0) {
2318                 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2319                         k3_ringacc_get_ring_id(irq_ring));
2320                 ret = -EINVAL;
2321                 goto err_psi_free;
2322         }
2323
2324         ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2325                           IRQF_TRIGGER_HIGH, uc->name, uc);
2326         if (ret) {
2327                 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2328                 goto err_irq_free;
2329         }
2330
2331         /* Event from UDMA (TR events) only needed for slave TR mode channels */
2332         if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
2333                 uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
2334                 if (uc->irq_num_udma <= 0) {
2335                         dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
2336                                 irq_udma_idx);
2337                         free_irq(uc->irq_num_ring, uc);
2338                         ret = -EINVAL;
2339                         goto err_irq_free;
2340                 }
2341
2342                 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
2343                                   uc->name, uc);
2344                 if (ret) {
2345                         dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
2346                                 uc->id);
2347                         free_irq(uc->irq_num_ring, uc);
2348                         goto err_irq_free;
2349                 }
2350         } else {
2351                 uc->irq_num_udma = 0;
2352         }
2353
2354         udma_reset_rings(uc);
2355
2356         return 0;
2357
2358 err_irq_free:
2359         uc->irq_num_ring = 0;
2360         uc->irq_num_udma = 0;
2361 err_psi_free:
2362         navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
2363         uc->psil_paired = false;
2364 err_res_free:
2365         udma_free_tx_resources(uc);
2366         udma_free_rx_resources(uc);
2367 err_cleanup:
2368         udma_reset_uchan(uc);
2369
2370         if (uc->use_dma_pool) {
2371                 dma_pool_destroy(uc->hdesc_pool);
2372                 uc->use_dma_pool = false;
2373         }
2374
2375         return ret;
2376 }
2377
2378 static int bcdma_alloc_chan_resources(struct dma_chan *chan)
2379 {
2380         struct udma_chan *uc = to_udma_chan(chan);
2381         struct udma_dev *ud = to_udma_dev(chan->device);
2382         const struct udma_oes_offsets *oes = &ud->soc_data->oes;
2383         u32 irq_udma_idx, irq_ring_idx;
2384         int ret;
2385
2386         /* Only TR mode is supported */
2387         uc->config.pkt_mode = false;
2388
2389         /*
2390          * Make sure that the completion is in a known state:
2391          * No teardown, the channel is idle
2392          */
2393         reinit_completion(&uc->teardown_completed);
2394         complete_all(&uc->teardown_completed);
2395         uc->state = UDMA_CHAN_IS_IDLE;
2396
2397         switch (uc->config.dir) {
2398         case DMA_MEM_TO_MEM:
2399                 /* Non synchronized - mem to mem type of transfer */
2400                 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2401                         uc->id);
2402
2403                 ret = bcdma_alloc_bchan_resources(uc);
2404                 if (ret)
2405                         return ret;
2406
2407                 irq_ring_idx = uc->bchan->id + oes->bcdma_bchan_ring;
2408                 irq_udma_idx = uc->bchan->id + oes->bcdma_bchan_data;
2409
2410                 ret = bcdma_tisci_m2m_channel_config(uc);
2411                 break;
2412         case DMA_MEM_TO_DEV:
2413                 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2414                 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2415                         uc->id);
2416
2417                 ret = udma_alloc_tx_resources(uc);
2418                 if (ret) {
2419                         uc->config.remote_thread_id = -1;
2420                         return ret;
2421                 }
2422
2423                 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2424                 uc->config.dst_thread = uc->config.remote_thread_id;
2425                 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2426
2427                 irq_ring_idx = uc->tchan->id + oes->bcdma_tchan_ring;
2428                 irq_udma_idx = uc->tchan->id + oes->bcdma_tchan_data;
2429
2430                 ret = bcdma_tisci_tx_channel_config(uc);
2431                 break;
2432         case DMA_DEV_TO_MEM:
2433                 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2434                 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2435                         uc->id);
2436
2437                 ret = udma_alloc_rx_resources(uc);
2438                 if (ret) {
2439                         uc->config.remote_thread_id = -1;
2440                         return ret;
2441                 }
2442
2443                 uc->config.src_thread = uc->config.remote_thread_id;
2444                 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2445                                         K3_PSIL_DST_THREAD_ID_OFFSET;
2446
2447                 irq_ring_idx = uc->rchan->id + oes->bcdma_rchan_ring;
2448                 irq_udma_idx = uc->rchan->id + oes->bcdma_rchan_data;
2449
2450                 ret = bcdma_tisci_rx_channel_config(uc);
2451                 break;
2452         default:
2453                 /* Can not happen */
2454                 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2455                         __func__, uc->id, uc->config.dir);
2456                 return -EINVAL;
2457         }
2458
2459         /* check if the channel configuration was successful */
2460         if (ret)
2461                 goto err_res_free;
2462
2463         if (udma_is_chan_running(uc)) {
2464                 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2465                 udma_reset_chan(uc, false);
2466                 if (udma_is_chan_running(uc)) {
2467                         dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2468                         ret = -EBUSY;
2469                         goto err_res_free;
2470                 }
2471         }
2472
2473         uc->dma_dev = dmaengine_get_dma_device(chan);
2474         if (uc->config.dir == DMA_MEM_TO_MEM  && !uc->config.tr_trigger_type) {
2475                 uc->config.hdesc_size = cppi5_trdesc_calc_size(
2476                                         sizeof(struct cppi5_tr_type15_t), 2);
2477
2478                 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
2479                                                  uc->config.hdesc_size,
2480                                                  ud->desc_align,
2481                                                  0);
2482                 if (!uc->hdesc_pool) {
2483                         dev_err(ud->ddev.dev,
2484                                 "Descriptor pool allocation failed\n");
2485                         uc->use_dma_pool = false;
2486                         ret = -ENOMEM;
2487                         goto err_res_free;
2488                 }
2489
2490                 uc->use_dma_pool = true;
2491         } else if (uc->config.dir != DMA_MEM_TO_MEM) {
2492                 /* PSI-L pairing */
2493                 ret = navss_psil_pair(ud, uc->config.src_thread,
2494                                       uc->config.dst_thread);
2495                 if (ret) {
2496                         dev_err(ud->dev,
2497                                 "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2498                                 uc->config.src_thread, uc->config.dst_thread);
2499                         goto err_res_free;
2500                 }
2501
2502                 uc->psil_paired = true;
2503         }
2504
2505         uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
2506         if (uc->irq_num_ring <= 0) {
2507                 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2508                         irq_ring_idx);
2509                 ret = -EINVAL;
2510                 goto err_psi_free;
2511         }
2512
2513         ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2514                           IRQF_TRIGGER_HIGH, uc->name, uc);
2515         if (ret) {
2516                 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2517                 goto err_irq_free;
2518         }
2519
2520         /* Event from BCDMA (TR events) only needed for slave channels */
2521         if (is_slave_direction(uc->config.dir)) {
2522                 uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
2523                 if (uc->irq_num_udma <= 0) {
2524                         dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n",
2525                                 irq_udma_idx);
2526                         free_irq(uc->irq_num_ring, uc);
2527                         ret = -EINVAL;
2528                         goto err_irq_free;
2529                 }
2530
2531                 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
2532                                   uc->name, uc);
2533                 if (ret) {
2534                         dev_err(ud->dev, "chan%d: BCDMA irq request failed\n",
2535                                 uc->id);
2536                         free_irq(uc->irq_num_ring, uc);
2537                         goto err_irq_free;
2538                 }
2539         } else {
2540                 uc->irq_num_udma = 0;
2541         }
2542
2543         udma_reset_rings(uc);
2544
2545         INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
2546                                   udma_check_tx_completion);
2547         return 0;
2548
2549 err_irq_free:
2550         uc->irq_num_ring = 0;
2551         uc->irq_num_udma = 0;
2552 err_psi_free:
2553         if (uc->psil_paired)
2554                 navss_psil_unpair(ud, uc->config.src_thread,
2555                                   uc->config.dst_thread);
2556         uc->psil_paired = false;
2557 err_res_free:
2558         bcdma_free_bchan_resources(uc);
2559         udma_free_tx_resources(uc);
2560         udma_free_rx_resources(uc);
2561
2562         udma_reset_uchan(uc);
2563
2564         if (uc->use_dma_pool) {
2565                 dma_pool_destroy(uc->hdesc_pool);
2566                 uc->use_dma_pool = false;
2567         }
2568
2569         return ret;
2570 }
2571
2572 static int bcdma_router_config(struct dma_chan *chan)
2573 {
2574         struct k3_event_route_data *router_data = chan->route_data;
2575         struct udma_chan *uc = to_udma_chan(chan);
2576         u32 trigger_event;
2577
2578         if (!uc->bchan)
2579                 return -EINVAL;
2580
2581         if (uc->config.tr_trigger_type != 1 && uc->config.tr_trigger_type != 2)
2582                 return -EINVAL;
2583
2584         trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset;
2585         trigger_event += (uc->bchan->id * 2) + uc->config.tr_trigger_type - 1;
2586
2587         return router_data->set_event(router_data->priv, trigger_event);
2588 }
2589
2590 static int pktdma_alloc_chan_resources(struct dma_chan *chan)
2591 {
2592         struct udma_chan *uc = to_udma_chan(chan);
2593         struct udma_dev *ud = to_udma_dev(chan->device);
2594         const struct udma_oes_offsets *oes = &ud->soc_data->oes;
2595         u32 irq_ring_idx;
2596         int ret;
2597
2598         /*
2599          * Make sure that the completion is in a known state:
2600          * No teardown, the channel is idle
2601          */
2602         reinit_completion(&uc->teardown_completed);
2603         complete_all(&uc->teardown_completed);
2604         uc->state = UDMA_CHAN_IS_IDLE;
2605
2606         switch (uc->config.dir) {
2607         case DMA_MEM_TO_DEV:
2608                 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2609                 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2610                         uc->id);
2611
2612                 ret = udma_alloc_tx_resources(uc);
2613                 if (ret) {
2614                         uc->config.remote_thread_id = -1;
2615                         return ret;
2616                 }
2617
2618                 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2619                 uc->config.dst_thread = uc->config.remote_thread_id;
2620                 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2621
2622                 irq_ring_idx = uc->tchan->tflow_id + oes->pktdma_tchan_flow;
2623
2624                 ret = pktdma_tisci_tx_channel_config(uc);
2625                 break;
2626         case DMA_DEV_TO_MEM:
2627                 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2628                 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2629                         uc->id);
2630
2631                 ret = udma_alloc_rx_resources(uc);
2632                 if (ret) {
2633                         uc->config.remote_thread_id = -1;
2634                         return ret;
2635                 }
2636
2637                 uc->config.src_thread = uc->config.remote_thread_id;
2638                 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2639                                         K3_PSIL_DST_THREAD_ID_OFFSET;
2640
2641                 irq_ring_idx = uc->rflow->id + oes->pktdma_rchan_flow;
2642
2643                 ret = pktdma_tisci_rx_channel_config(uc);
2644                 break;
2645         default:
2646                 /* Can not happen */
2647                 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2648                         __func__, uc->id, uc->config.dir);
2649                 return -EINVAL;
2650         }
2651
2652         /* check if the channel configuration was successful */
2653         if (ret)
2654                 goto err_res_free;
2655
2656         if (udma_is_chan_running(uc)) {
2657                 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2658                 udma_reset_chan(uc, false);
2659                 if (udma_is_chan_running(uc)) {
2660                         dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2661                         ret = -EBUSY;
2662                         goto err_res_free;
2663                 }
2664         }
2665
2666         uc->dma_dev = dmaengine_get_dma_device(chan);
2667         uc->hdesc_pool = dma_pool_create(uc->name, uc->dma_dev,
2668                                          uc->config.hdesc_size, ud->desc_align,
2669                                          0);
2670         if (!uc->hdesc_pool) {
2671                 dev_err(ud->ddev.dev,
2672                         "Descriptor pool allocation failed\n");
2673                 uc->use_dma_pool = false;
2674                 ret = -ENOMEM;
2675                 goto err_res_free;
2676         }
2677
2678         uc->use_dma_pool = true;
2679
2680         /* PSI-L pairing */
2681         ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2682         if (ret) {
2683                 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2684                         uc->config.src_thread, uc->config.dst_thread);
2685                 goto err_res_free;
2686         }
2687
2688         uc->psil_paired = true;
2689
2690         uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
2691         if (uc->irq_num_ring <= 0) {
2692                 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2693                         irq_ring_idx);
2694                 ret = -EINVAL;
2695                 goto err_psi_free;
2696         }
2697
2698         ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2699                           IRQF_TRIGGER_HIGH, uc->name, uc);
2700         if (ret) {
2701                 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2702                 goto err_irq_free;
2703         }
2704
2705         uc->irq_num_udma = 0;
2706
2707         udma_reset_rings(uc);
2708
2709         INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
2710                                   udma_check_tx_completion);
2711
2712         if (uc->tchan)
2713                 dev_dbg(ud->dev,
2714                         "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
2715                         uc->id, uc->tchan->id, uc->tchan->tflow_id,
2716                         uc->config.remote_thread_id);
2717         else if (uc->rchan)
2718                 dev_dbg(ud->dev,
2719                         "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
2720                         uc->id, uc->rchan->id, uc->rflow->id,
2721                         uc->config.remote_thread_id);
2722         return 0;
2723
2724 err_irq_free:
2725         uc->irq_num_ring = 0;
2726 err_psi_free:
2727         navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
2728         uc->psil_paired = false;
2729 err_res_free:
2730         udma_free_tx_resources(uc);
2731         udma_free_rx_resources(uc);
2732
2733         udma_reset_uchan(uc);
2734
2735         dma_pool_destroy(uc->hdesc_pool);
2736         uc->use_dma_pool = false;
2737
2738         return ret;
2739 }
2740
2741 static int udma_slave_config(struct dma_chan *chan,
2742                              struct dma_slave_config *cfg)
2743 {
2744         struct udma_chan *uc = to_udma_chan(chan);
2745
2746         memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
2747
2748         return 0;
2749 }
2750
2751 static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
2752                                             size_t tr_size, int tr_count,
2753                                             enum dma_transfer_direction dir)
2754 {
2755         struct udma_hwdesc *hwdesc;
2756         struct cppi5_desc_hdr_t *tr_desc;
2757         struct udma_desc *d;
2758         u32 reload_count = 0;
2759         u32 ring_id;
2760
2761         switch (tr_size) {
2762         case 16:
2763         case 32:
2764         case 64:
2765         case 128:
2766                 break;
2767         default:
2768                 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
2769                 return NULL;
2770         }
2771
2772         /* We have only one descriptor containing multiple TRs */
2773         d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
2774         if (!d)
2775                 return NULL;
2776
2777         d->sglen = tr_count;
2778
2779         d->hwdesc_count = 1;
2780         hwdesc = &d->hwdesc[0];
2781
2782         /* Allocate memory for DMA ring descriptor */
2783         if (uc->use_dma_pool) {
2784                 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2785                 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2786                                                 GFP_NOWAIT,
2787                                                 &hwdesc->cppi5_desc_paddr);
2788         } else {
2789                 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
2790                                                                  tr_count);
2791                 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
2792                                                 uc->ud->desc_align);
2793                 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
2794                                                 hwdesc->cppi5_desc_size,
2795                                                 &hwdesc->cppi5_desc_paddr,
2796                                                 GFP_NOWAIT);
2797         }
2798
2799         if (!hwdesc->cppi5_desc_vaddr) {
2800                 kfree(d);
2801                 return NULL;
2802         }
2803
2804         /* Start of the TR req records */
2805         hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
2806         /* Start address of the TR response array */
2807         hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
2808
2809         tr_desc = hwdesc->cppi5_desc_vaddr;
2810
2811         if (uc->cyclic)
2812                 reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
2813
2814         if (dir == DMA_DEV_TO_MEM)
2815                 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2816         else
2817                 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2818
2819         cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
2820         cppi5_desc_set_pktids(tr_desc, uc->id,
2821                               CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2822         cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
2823
2824         return d;
2825 }
2826
2827 /**
2828  * udma_get_tr_counters - calculate TR counters for a given length
2829  * @len: Length of the trasnfer
2830  * @align_to: Preferred alignment
2831  * @tr0_cnt0: First TR icnt0
2832  * @tr0_cnt1: First TR icnt1
2833  * @tr1_cnt0: Second (if used) TR icnt0
2834  *
2835  * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
2836  * For len >= SZ_64K two TRs are used in a simple way:
2837  * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
2838  * Second TR: the remaining length (tr1_cnt0)
2839  *
2840  * Returns the number of TRs the length needs (1 or 2)
2841  * -EINVAL if the length can not be supported
2842  */
2843 static int udma_get_tr_counters(size_t len, unsigned long align_to,
2844                                 u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0)
2845 {
2846         if (len < SZ_64K) {
2847                 *tr0_cnt0 = len;
2848                 *tr0_cnt1 = 1;
2849
2850                 return 1;
2851         }
2852
2853         if (align_to > 3)
2854                 align_to = 3;
2855
2856 realign:
2857         *tr0_cnt0 = SZ_64K - BIT(align_to);
2858         if (len / *tr0_cnt0 >= SZ_64K) {
2859                 if (align_to) {
2860                         align_to--;
2861                         goto realign;
2862                 }
2863                 return -EINVAL;
2864         }
2865
2866         *tr0_cnt1 = len / *tr0_cnt0;
2867         *tr1_cnt0 = len % *tr0_cnt0;
2868
2869         return 2;
2870 }
2871
2872 static struct udma_desc *
2873 udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
2874                       unsigned int sglen, enum dma_transfer_direction dir,
2875                       unsigned long tx_flags, void *context)
2876 {
2877         struct scatterlist *sgent;
2878         struct udma_desc *d;
2879         struct cppi5_tr_type1_t *tr_req = NULL;
2880         u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2881         unsigned int i;
2882         size_t tr_size;
2883         int num_tr = 0;
2884         int tr_idx = 0;
2885         u64 asel;
2886
2887         /* estimate the number of TRs we will need */
2888         for_each_sg(sgl, sgent, sglen, i) {
2889                 if (sg_dma_len(sgent) < SZ_64K)
2890                         num_tr++;
2891                 else
2892                         num_tr += 2;
2893         }
2894
2895         /* Now allocate and setup the descriptor. */
2896         tr_size = sizeof(struct cppi5_tr_type1_t);
2897         d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
2898         if (!d)
2899                 return NULL;
2900
2901         d->sglen = sglen;
2902
2903         if (uc->ud->match_data->type == DMA_TYPE_UDMA)
2904                 asel = 0;
2905         else
2906                 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
2907
2908         tr_req = d->hwdesc[0].tr_req_base;
2909         for_each_sg(sgl, sgent, sglen, i) {
2910                 dma_addr_t sg_addr = sg_dma_address(sgent);
2911
2912                 num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr),
2913                                               &tr0_cnt0, &tr0_cnt1, &tr1_cnt0);
2914                 if (num_tr < 0) {
2915                         dev_err(uc->ud->dev, "size %u is not supported\n",
2916                                 sg_dma_len(sgent));
2917                         udma_free_hwdesc(uc, d);
2918                         kfree(d);
2919                         return NULL;
2920                 }
2921
2922                 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
2923                               false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2924                 cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
2925
2926                 sg_addr |= asel;
2927                 tr_req[tr_idx].addr = sg_addr;
2928                 tr_req[tr_idx].icnt0 = tr0_cnt0;
2929                 tr_req[tr_idx].icnt1 = tr0_cnt1;
2930                 tr_req[tr_idx].dim1 = tr0_cnt0;
2931                 tr_idx++;
2932
2933                 if (num_tr == 2) {
2934                         cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2935                                       false, false,
2936                                       CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2937                         cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2938                                          CPPI5_TR_CSF_SUPR_EVT);
2939
2940                         tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0;
2941                         tr_req[tr_idx].icnt0 = tr1_cnt0;
2942                         tr_req[tr_idx].icnt1 = 1;
2943                         tr_req[tr_idx].dim1 = tr1_cnt0;
2944                         tr_idx++;
2945                 }
2946
2947                 d->residue += sg_dma_len(sgent);
2948         }
2949
2950         cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
2951                          CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
2952
2953         return d;
2954 }
2955
2956 static struct udma_desc *
2957 udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
2958                                 unsigned int sglen,
2959                                 enum dma_transfer_direction dir,
2960                                 unsigned long tx_flags, void *context)
2961 {
2962         struct scatterlist *sgent;
2963         struct cppi5_tr_type15_t *tr_req = NULL;
2964         enum dma_slave_buswidth dev_width;
2965         u16 tr_cnt0, tr_cnt1;
2966         dma_addr_t dev_addr;
2967         struct udma_desc *d;
2968         unsigned int i;
2969         size_t tr_size, sg_len;
2970         int num_tr = 0;
2971         int tr_idx = 0;
2972         u32 burst, trigger_size, port_window;
2973         u64 asel;
2974
2975         if (dir == DMA_DEV_TO_MEM) {
2976                 dev_addr = uc->cfg.src_addr;
2977                 dev_width = uc->cfg.src_addr_width;
2978                 burst = uc->cfg.src_maxburst;
2979                 port_window = uc->cfg.src_port_window_size;
2980         } else if (dir == DMA_MEM_TO_DEV) {
2981                 dev_addr = uc->cfg.dst_addr;
2982                 dev_width = uc->cfg.dst_addr_width;
2983                 burst = uc->cfg.dst_maxburst;
2984                 port_window = uc->cfg.dst_port_window_size;
2985         } else {
2986                 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
2987                 return NULL;
2988         }
2989
2990         if (!burst)
2991                 burst = 1;
2992
2993         if (port_window) {
2994                 if (port_window != burst) {
2995                         dev_err(uc->ud->dev,
2996                                 "The burst must be equal to port_window\n");
2997                         return NULL;
2998                 }
2999
3000                 tr_cnt0 = dev_width * port_window;
3001                 tr_cnt1 = 1;
3002         } else {
3003                 tr_cnt0 = dev_width;
3004                 tr_cnt1 = burst;
3005         }
3006         trigger_size = tr_cnt0 * tr_cnt1;
3007
3008         /* estimate the number of TRs we will need */
3009         for_each_sg(sgl, sgent, sglen, i) {
3010                 sg_len = sg_dma_len(sgent);
3011
3012                 if (sg_len % trigger_size) {
3013                         dev_err(uc->ud->dev,
3014                                 "Not aligned SG entry (%zu for %u)\n", sg_len,
3015                                 trigger_size);
3016                         return NULL;
3017                 }
3018
3019                 if (sg_len / trigger_size < SZ_64K)
3020                         num_tr++;
3021                 else
3022                         num_tr += 2;
3023         }
3024
3025         /* Now allocate and setup the descriptor. */
3026         tr_size = sizeof(struct cppi5_tr_type15_t);
3027         d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
3028         if (!d)
3029                 return NULL;
3030
3031         d->sglen = sglen;
3032
3033         if (uc->ud->match_data->type == DMA_TYPE_UDMA) {
3034                 asel = 0;
3035         } else {
3036                 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3037                 dev_addr |= asel;
3038         }
3039
3040         tr_req = d->hwdesc[0].tr_req_base;
3041         for_each_sg(sgl, sgent, sglen, i) {
3042                 u16 tr0_cnt2, tr0_cnt3, tr1_cnt2;
3043                 dma_addr_t sg_addr = sg_dma_address(sgent);
3044
3045                 sg_len = sg_dma_len(sgent);
3046                 num_tr = udma_get_tr_counters(sg_len / trigger_size, 0,
3047                                               &tr0_cnt2, &tr0_cnt3, &tr1_cnt2);
3048                 if (num_tr < 0) {
3049                         dev_err(uc->ud->dev, "size %zu is not supported\n",
3050                                 sg_len);
3051                         udma_free_hwdesc(uc, d);
3052                         kfree(d);
3053                         return NULL;
3054                 }
3055
3056                 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false,
3057                               true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3058                 cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
3059                 cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
3060                                      uc->config.tr_trigger_type,
3061                                      CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0);
3062
3063                 sg_addr |= asel;
3064                 if (dir == DMA_DEV_TO_MEM) {
3065                         tr_req[tr_idx].addr = dev_addr;
3066                         tr_req[tr_idx].icnt0 = tr_cnt0;
3067                         tr_req[tr_idx].icnt1 = tr_cnt1;
3068                         tr_req[tr_idx].icnt2 = tr0_cnt2;
3069                         tr_req[tr_idx].icnt3 = tr0_cnt3;
3070                         tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
3071
3072                         tr_req[tr_idx].daddr = sg_addr;
3073                         tr_req[tr_idx].dicnt0 = tr_cnt0;
3074                         tr_req[tr_idx].dicnt1 = tr_cnt1;
3075                         tr_req[tr_idx].dicnt2 = tr0_cnt2;
3076                         tr_req[tr_idx].dicnt3 = tr0_cnt3;
3077                         tr_req[tr_idx].ddim1 = tr_cnt0;
3078                         tr_req[tr_idx].ddim2 = trigger_size;
3079                         tr_req[tr_idx].ddim3 = trigger_size * tr0_cnt2;
3080                 } else {
3081                         tr_req[tr_idx].addr = sg_addr;
3082                         tr_req[tr_idx].icnt0 = tr_cnt0;
3083                         tr_req[tr_idx].icnt1 = tr_cnt1;
3084                         tr_req[tr_idx].icnt2 = tr0_cnt2;
3085                         tr_req[tr_idx].icnt3 = tr0_cnt3;
3086                         tr_req[tr_idx].dim1 = tr_cnt0;
3087                         tr_req[tr_idx].dim2 = trigger_size;
3088                         tr_req[tr_idx].dim3 = trigger_size * tr0_cnt2;
3089
3090                         tr_req[tr_idx].daddr = dev_addr;
3091                         tr_req[tr_idx].dicnt0 = tr_cnt0;
3092                         tr_req[tr_idx].dicnt1 = tr_cnt1;
3093                         tr_req[tr_idx].dicnt2 = tr0_cnt2;
3094                         tr_req[tr_idx].dicnt3 = tr0_cnt3;
3095                         tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
3096                 }
3097
3098                 tr_idx++;
3099
3100                 if (num_tr == 2) {
3101                         cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15,
3102                                       false, true,
3103                                       CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3104                         cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3105                                          CPPI5_TR_CSF_SUPR_EVT);
3106                         cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
3107                                              uc->config.tr_trigger_type,
3108                                              CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
3109                                              0, 0);
3110
3111                         sg_addr += trigger_size * tr0_cnt2 * tr0_cnt3;
3112                         if (dir == DMA_DEV_TO_MEM) {
3113                                 tr_req[tr_idx].addr = dev_addr;
3114                                 tr_req[tr_idx].icnt0 = tr_cnt0;
3115                                 tr_req[tr_idx].icnt1 = tr_cnt1;
3116                                 tr_req[tr_idx].icnt2 = tr1_cnt2;
3117                                 tr_req[tr_idx].icnt3 = 1;
3118                                 tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
3119
3120                                 tr_req[tr_idx].daddr = sg_addr;
3121                                 tr_req[tr_idx].dicnt0 = tr_cnt0;
3122                                 tr_req[tr_idx].dicnt1 = tr_cnt1;
3123                                 tr_req[tr_idx].dicnt2 = tr1_cnt2;
3124                                 tr_req[tr_idx].dicnt3 = 1;
3125                                 tr_req[tr_idx].ddim1 = tr_cnt0;
3126                                 tr_req[tr_idx].ddim2 = trigger_size;
3127                         } else {
3128                                 tr_req[tr_idx].addr = sg_addr;
3129                                 tr_req[tr_idx].icnt0 = tr_cnt0;
3130                                 tr_req[tr_idx].icnt1 = tr_cnt1;
3131                                 tr_req[tr_idx].icnt2 = tr1_cnt2;
3132                                 tr_req[tr_idx].icnt3 = 1;
3133                                 tr_req[tr_idx].dim1 = tr_cnt0;
3134                                 tr_req[tr_idx].dim2 = trigger_size;
3135
3136                                 tr_req[tr_idx].daddr = dev_addr;
3137                                 tr_req[tr_idx].dicnt0 = tr_cnt0;
3138                                 tr_req[tr_idx].dicnt1 = tr_cnt1;
3139                                 tr_req[tr_idx].dicnt2 = tr1_cnt2;
3140                                 tr_req[tr_idx].dicnt3 = 1;
3141                                 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
3142                         }
3143                         tr_idx++;
3144                 }
3145
3146                 d->residue += sg_len;
3147         }
3148
3149         cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
3150                          CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
3151
3152         return d;
3153 }
3154
3155 static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
3156                                    enum dma_slave_buswidth dev_width,
3157                                    u16 elcnt)
3158 {
3159         if (uc->config.ep_type != PSIL_EP_PDMA_XY)
3160                 return 0;
3161
3162         /* Bus width translates to the element size (ES) */
3163         switch (dev_width) {
3164         case DMA_SLAVE_BUSWIDTH_1_BYTE:
3165                 d->static_tr.elsize = 0;
3166                 break;
3167         case DMA_SLAVE_BUSWIDTH_2_BYTES:
3168                 d->static_tr.elsize = 1;
3169                 break;
3170         case DMA_SLAVE_BUSWIDTH_3_BYTES:
3171                 d->static_tr.elsize = 2;
3172                 break;
3173         case DMA_SLAVE_BUSWIDTH_4_BYTES:
3174                 d->static_tr.elsize = 3;
3175                 break;
3176         case DMA_SLAVE_BUSWIDTH_8_BYTES:
3177                 d->static_tr.elsize = 4;
3178                 break;
3179         default: /* not reached */
3180                 return -EINVAL;
3181         }
3182
3183         d->static_tr.elcnt = elcnt;
3184
3185         /*
3186          * PDMA must to close the packet when the channel is in packet mode.
3187          * For TR mode when the channel is not cyclic we also need PDMA to close
3188          * the packet otherwise the transfer will stall because PDMA holds on
3189          * the data it has received from the peripheral.
3190          */
3191         if (uc->config.pkt_mode || !uc->cyclic) {
3192                 unsigned int div = dev_width * elcnt;
3193
3194                 if (uc->cyclic)
3195                         d->static_tr.bstcnt = d->residue / d->sglen / div;
3196                 else
3197                         d->static_tr.bstcnt = d->residue / div;
3198
3199                 if (uc->config.dir == DMA_DEV_TO_MEM &&
3200                     d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
3201                         return -EINVAL;
3202         } else {
3203                 d->static_tr.bstcnt = 0;
3204         }
3205
3206         return 0;
3207 }
3208
3209 static struct udma_desc *
3210 udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
3211                        unsigned int sglen, enum dma_transfer_direction dir,
3212                        unsigned long tx_flags, void *context)
3213 {
3214         struct scatterlist *sgent;
3215         struct cppi5_host_desc_t *h_desc = NULL;
3216         struct udma_desc *d;
3217         u32 ring_id;
3218         unsigned int i;
3219         u64 asel;
3220
3221         d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT);
3222         if (!d)
3223                 return NULL;
3224
3225         d->sglen = sglen;
3226         d->hwdesc_count = sglen;
3227
3228         if (dir == DMA_DEV_TO_MEM)
3229                 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
3230         else
3231                 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
3232
3233         if (uc->ud->match_data->type == DMA_TYPE_UDMA)
3234                 asel = 0;
3235         else
3236                 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3237
3238         for_each_sg(sgl, sgent, sglen, i) {
3239                 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
3240                 dma_addr_t sg_addr = sg_dma_address(sgent);
3241                 struct cppi5_host_desc_t *desc;
3242                 size_t sg_len = sg_dma_len(sgent);
3243
3244                 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
3245                                                 GFP_NOWAIT,
3246                                                 &hwdesc->cppi5_desc_paddr);
3247                 if (!hwdesc->cppi5_desc_vaddr) {
3248                         dev_err(uc->ud->dev,
3249                                 "descriptor%d allocation failed\n", i);
3250
3251                         udma_free_hwdesc(uc, d);
3252                         kfree(d);
3253                         return NULL;
3254                 }
3255
3256                 d->residue += sg_len;
3257                 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
3258                 desc = hwdesc->cppi5_desc_vaddr;
3259
3260                 if (i == 0) {
3261                         cppi5_hdesc_init(desc, 0, 0);
3262                         /* Flow and Packed ID */
3263                         cppi5_desc_set_pktids(&desc->hdr, uc->id,
3264                                               CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3265                         cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
3266                 } else {
3267                         cppi5_hdesc_reset_hbdesc(desc);
3268                         cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
3269                 }
3270
3271                 /* attach the sg buffer to the descriptor */
3272                 sg_addr |= asel;
3273                 cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
3274
3275                 /* Attach link as host buffer descriptor */
3276                 if (h_desc)
3277                         cppi5_hdesc_link_hbdesc(h_desc,
3278                                                 hwdesc->cppi5_desc_paddr | asel);
3279
3280                 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA ||
3281                     dir == DMA_MEM_TO_DEV)
3282                         h_desc = desc;
3283         }
3284
3285         if (d->residue >= SZ_4M) {
3286                 dev_err(uc->ud->dev,
3287                         "%s: Transfer size %u is over the supported 4M range\n",
3288                         __func__, d->residue);
3289                 udma_free_hwdesc(uc, d);
3290                 kfree(d);
3291                 return NULL;
3292         }
3293
3294         h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3295         cppi5_hdesc_set_pktlen(h_desc, d->residue);
3296
3297         return d;
3298 }
3299
3300 static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
3301                                 void *data, size_t len)
3302 {
3303         struct udma_desc *d = to_udma_desc(desc);
3304         struct udma_chan *uc = to_udma_chan(desc->chan);
3305         struct cppi5_host_desc_t *h_desc;
3306         u32 psd_size = len;
3307         u32 flags = 0;
3308
3309         if (!uc->config.pkt_mode || !uc->config.metadata_size)
3310                 return -ENOTSUPP;
3311
3312         if (!data || len > uc->config.metadata_size)
3313                 return -EINVAL;
3314
3315         if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
3316                 return -EINVAL;
3317
3318         h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3319         if (d->dir == DMA_MEM_TO_DEV)
3320                 memcpy(h_desc->epib, data, len);
3321
3322         if (uc->config.needs_epib)
3323                 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
3324
3325         d->metadata = data;
3326         d->metadata_size = len;
3327         if (uc->config.needs_epib)
3328                 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
3329
3330         cppi5_hdesc_update_flags(h_desc, flags);
3331         cppi5_hdesc_update_psdata_size(h_desc, psd_size);
3332
3333         return 0;
3334 }
3335
3336 static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
3337                                    size_t *payload_len, size_t *max_len)
3338 {
3339         struct udma_desc *d = to_udma_desc(desc);
3340         struct udma_chan *uc = to_udma_chan(desc->chan);
3341         struct cppi5_host_desc_t *h_desc;
3342
3343         if (!uc->config.pkt_mode || !uc->config.metadata_size)
3344                 return ERR_PTR(-ENOTSUPP);
3345
3346         h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3347
3348         *max_len = uc->config.metadata_size;
3349
3350         *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
3351                        CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
3352         *payload_len += cppi5_hdesc_get_psdata_size(h_desc);
3353
3354         return h_desc->epib;
3355 }
3356
3357 static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
3358                                  size_t payload_len)
3359 {
3360         struct udma_desc *d = to_udma_desc(desc);
3361         struct udma_chan *uc = to_udma_chan(desc->chan);
3362         struct cppi5_host_desc_t *h_desc;
3363         u32 psd_size = payload_len;
3364         u32 flags = 0;
3365
3366         if (!uc->config.pkt_mode || !uc->config.metadata_size)
3367                 return -ENOTSUPP;
3368
3369         if (payload_len > uc->config.metadata_size)
3370                 return -EINVAL;
3371
3372         if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
3373                 return -EINVAL;
3374
3375         h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3376
3377         if (uc->config.needs_epib) {
3378                 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
3379                 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
3380         }
3381
3382         cppi5_hdesc_update_flags(h_desc, flags);
3383         cppi5_hdesc_update_psdata_size(h_desc, psd_size);
3384
3385         return 0;
3386 }
3387
3388 static struct dma_descriptor_metadata_ops metadata_ops = {
3389         .attach = udma_attach_metadata,
3390         .get_ptr = udma_get_metadata_ptr,
3391         .set_len = udma_set_metadata_len,
3392 };
3393
3394 static struct dma_async_tx_descriptor *
3395 udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
3396                    unsigned int sglen, enum dma_transfer_direction dir,
3397                    unsigned long tx_flags, void *context)
3398 {
3399         struct udma_chan *uc = to_udma_chan(chan);
3400         enum dma_slave_buswidth dev_width;
3401         struct udma_desc *d;
3402         u32 burst;
3403
3404         if (dir != uc->config.dir &&
3405             (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type)) {
3406                 dev_err(chan->device->dev,
3407                         "%s: chan%d is for %s, not supporting %s\n",
3408                         __func__, uc->id,
3409                         dmaengine_get_direction_text(uc->config.dir),
3410                         dmaengine_get_direction_text(dir));
3411                 return NULL;
3412         }
3413
3414         if (dir == DMA_DEV_TO_MEM) {
3415                 dev_width = uc->cfg.src_addr_width;
3416                 burst = uc->cfg.src_maxburst;
3417         } else if (dir == DMA_MEM_TO_DEV) {
3418                 dev_width = uc->cfg.dst_addr_width;
3419                 burst = uc->cfg.dst_maxburst;
3420         } else {
3421                 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
3422                 return NULL;
3423         }
3424
3425         if (!burst)
3426                 burst = 1;
3427
3428         uc->config.tx_flags = tx_flags;
3429
3430         if (uc->config.pkt_mode)
3431                 d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
3432                                            context);
3433         else if (is_slave_direction(uc->config.dir))
3434                 d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
3435                                           context);
3436         else
3437                 d = udma_prep_slave_sg_triggered_tr(uc, sgl, sglen, dir,
3438                                                     tx_flags, context);
3439
3440         if (!d)
3441                 return NULL;
3442
3443         d->dir = dir;
3444         d->desc_idx = 0;
3445         d->tr_idx = 0;
3446
3447         /* static TR for remote PDMA */
3448         if (udma_configure_statictr(uc, d, dev_width, burst)) {
3449                 dev_err(uc->ud->dev,
3450                         "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
3451                         __func__, d->static_tr.bstcnt);
3452
3453                 udma_free_hwdesc(uc, d);
3454                 kfree(d);
3455                 return NULL;
3456         }
3457
3458         if (uc->config.metadata_size)
3459                 d->vd.tx.metadata_ops = &metadata_ops;
3460
3461         return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
3462 }
3463
3464 static struct udma_desc *
3465 udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
3466                         size_t buf_len, size_t period_len,
3467                         enum dma_transfer_direction dir, unsigned long flags)
3468 {
3469         struct udma_desc *d;
3470         size_t tr_size, period_addr;
3471         struct cppi5_tr_type1_t *tr_req;
3472         unsigned int periods = buf_len / period_len;
3473         u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
3474         unsigned int i;
3475         int num_tr;
3476
3477         num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
3478                                       &tr0_cnt1, &tr1_cnt0);
3479         if (num_tr < 0) {
3480                 dev_err(uc->ud->dev, "size %zu is not supported\n",
3481                         period_len);
3482                 return NULL;
3483         }
3484
3485         /* Now allocate and setup the descriptor. */
3486         tr_size = sizeof(struct cppi5_tr_type1_t);
3487         d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir);
3488         if (!d)
3489                 return NULL;
3490
3491         tr_req = d->hwdesc[0].tr_req_base;
3492         if (uc->ud->match_data->type == DMA_TYPE_UDMA)
3493                 period_addr = buf_addr;
3494         else
3495                 period_addr = buf_addr |
3496                         ((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
3497
3498         for (i = 0; i < periods; i++) {
3499                 int tr_idx = i * num_tr;
3500
3501                 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
3502                               false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3503
3504                 tr_req[tr_idx].addr = period_addr;
3505                 tr_req[tr_idx].icnt0 = tr0_cnt0;
3506                 tr_req[tr_idx].icnt1 = tr0_cnt1;
3507                 tr_req[tr_idx].dim1 = tr0_cnt0;
3508
3509                 if (num_tr == 2) {
3510                         cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3511                                          CPPI5_TR_CSF_SUPR_EVT);
3512                         tr_idx++;
3513
3514                         cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
3515                                       false, false,
3516                                       CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3517
3518                         tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0;
3519                         tr_req[tr_idx].icnt0 = tr1_cnt0;
3520                         tr_req[tr_idx].icnt1 = 1;
3521                         tr_req[tr_idx].dim1 = tr1_cnt0;
3522                 }
3523
3524                 if (!(flags & DMA_PREP_INTERRUPT))
3525                         cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3526                                          CPPI5_TR_CSF_SUPR_EVT);
3527
3528                 period_addr += period_len;
3529         }
3530
3531         return d;
3532 }
3533
3534 static struct udma_desc *
3535 udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
3536                          size_t buf_len, size_t period_len,
3537                          enum dma_transfer_direction dir, unsigned long flags)
3538 {
3539         struct udma_desc *d;
3540         u32 ring_id;
3541         int i;
3542         int periods = buf_len / period_len;
3543
3544         if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
3545                 return NULL;
3546
3547         if (period_len >= SZ_4M)
3548                 return NULL;
3549
3550         d = kzalloc(struct_size(d, hwdesc, periods), GFP_NOWAIT);
3551         if (!d)
3552                 return NULL;
3553
3554         d->hwdesc_count = periods;
3555
3556         /* TODO: re-check this... */
3557         if (dir == DMA_DEV_TO_MEM)
3558                 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
3559         else
3560                 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
3561
3562         if (uc->ud->match_data->type != DMA_TYPE_UDMA)
3563                 buf_addr |= (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3564
3565         for (i = 0; i < periods; i++) {
3566                 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
3567                 dma_addr_t period_addr = buf_addr + (period_len * i);
3568                 struct cppi5_host_desc_t *h_desc;
3569
3570                 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
3571                                                 GFP_NOWAIT,
3572                                                 &hwdesc->cppi5_desc_paddr);
3573                 if (!hwdesc->cppi5_desc_vaddr) {
3574                         dev_err(uc->ud->dev,
3575                                 "descriptor%d allocation failed\n", i);
3576
3577                         udma_free_hwdesc(uc, d);
3578                         kfree(d);
3579                         return NULL;
3580                 }
3581
3582                 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
3583                 h_desc = hwdesc->cppi5_desc_vaddr;
3584
3585                 cppi5_hdesc_init(h_desc, 0, 0);
3586                 cppi5_hdesc_set_pktlen(h_desc, period_len);
3587
3588                 /* Flow and Packed ID */
3589                 cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
3590                                       CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3591                 cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
3592
3593                 /* attach each period to a new descriptor */
3594                 cppi5_hdesc_attach_buf(h_desc,
3595                                        period_addr, period_len,
3596                                        period_addr, period_len);
3597         }
3598
3599         return d;
3600 }
3601
3602 static struct dma_async_tx_descriptor *
3603 udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
3604                      size_t period_len, enum dma_transfer_direction dir,
3605                      unsigned long flags)
3606 {
3607         struct udma_chan *uc = to_udma_chan(chan);
3608         enum dma_slave_buswidth dev_width;
3609         struct udma_desc *d;
3610         u32 burst;
3611
3612         if (dir != uc->config.dir) {
3613                 dev_err(chan->device->dev,
3614                         "%s: chan%d is for %s, not supporting %s\n",
3615                         __func__, uc->id,
3616                         dmaengine_get_direction_text(uc->config.dir),
3617                         dmaengine_get_direction_text(dir));
3618                 return NULL;
3619         }
3620
3621         uc->cyclic = true;
3622
3623         if (dir == DMA_DEV_TO_MEM) {
3624                 dev_width = uc->cfg.src_addr_width;
3625                 burst = uc->cfg.src_maxburst;
3626         } else if (dir == DMA_MEM_TO_DEV) {
3627                 dev_width = uc->cfg.dst_addr_width;
3628                 burst = uc->cfg.dst_maxburst;
3629         } else {
3630                 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
3631                 return NULL;
3632         }
3633
3634         if (!burst)
3635                 burst = 1;
3636
3637         if (uc->config.pkt_mode)
3638                 d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
3639                                              dir, flags);
3640         else
3641                 d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
3642                                             dir, flags);
3643
3644         if (!d)
3645                 return NULL;
3646
3647         d->sglen = buf_len / period_len;
3648
3649         d->dir = dir;
3650         d->residue = buf_len;
3651
3652         /* static TR for remote PDMA */
3653         if (udma_configure_statictr(uc, d, dev_width, burst)) {
3654                 dev_err(uc->ud->dev,
3655                         "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
3656                         __func__, d->static_tr.bstcnt);
3657
3658                 udma_free_hwdesc(uc, d);
3659                 kfree(d);
3660                 return NULL;
3661         }
3662
3663         if (uc->config.metadata_size)
3664                 d->vd.tx.metadata_ops = &metadata_ops;
3665
3666         return vchan_tx_prep(&uc->vc, &d->vd, flags);
3667 }
3668
3669 static struct dma_async_tx_descriptor *
3670 udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
3671                      size_t len, unsigned long tx_flags)
3672 {
3673         struct udma_chan *uc = to_udma_chan(chan);
3674         struct udma_desc *d;
3675         struct cppi5_tr_type15_t *tr_req;
3676         int num_tr;
3677         size_t tr_size = sizeof(struct cppi5_tr_type15_t);
3678         u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
3679
3680         if (uc->config.dir != DMA_MEM_TO_MEM) {
3681                 dev_err(chan->device->dev,
3682                         "%s: chan%d is for %s, not supporting %s\n",
3683                         __func__, uc->id,
3684                         dmaengine_get_direction_text(uc->config.dir),
3685                         dmaengine_get_direction_text(DMA_MEM_TO_MEM));
3686                 return NULL;
3687         }
3688
3689         num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0,
3690                                       &tr0_cnt1, &tr1_cnt0);
3691         if (num_tr < 0) {
3692                 dev_err(uc->ud->dev, "size %zu is not supported\n",
3693                         len);
3694                 return NULL;
3695         }
3696
3697         d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
3698         if (!d)
3699                 return NULL;
3700
3701         d->dir = DMA_MEM_TO_MEM;
3702         d->desc_idx = 0;
3703         d->tr_idx = 0;
3704         d->residue = len;
3705
3706         if (uc->ud->match_data->type != DMA_TYPE_UDMA) {
3707                 src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
3708                 dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
3709         }
3710
3711         tr_req = d->hwdesc[0].tr_req_base;
3712
3713         cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
3714                       CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3715         cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
3716
3717         tr_req[0].addr = src;
3718         tr_req[0].icnt0 = tr0_cnt0;
3719         tr_req[0].icnt1 = tr0_cnt1;
3720         tr_req[0].icnt2 = 1;
3721         tr_req[0].icnt3 = 1;
3722         tr_req[0].dim1 = tr0_cnt0;
3723
3724         tr_req[0].daddr = dest;
3725         tr_req[0].dicnt0 = tr0_cnt0;
3726         tr_req[0].dicnt1 = tr0_cnt1;
3727         tr_req[0].dicnt2 = 1;
3728         tr_req[0].dicnt3 = 1;
3729         tr_req[0].ddim1 = tr0_cnt0;
3730
3731         if (num_tr == 2) {
3732                 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
3733                               CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3734                 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
3735
3736                 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
3737                 tr_req[1].icnt0 = tr1_cnt0;
3738                 tr_req[1].icnt1 = 1;
3739                 tr_req[1].icnt2 = 1;
3740                 tr_req[1].icnt3 = 1;
3741
3742                 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
3743                 tr_req[1].dicnt0 = tr1_cnt0;
3744                 tr_req[1].dicnt1 = 1;
3745                 tr_req[1].dicnt2 = 1;
3746                 tr_req[1].dicnt3 = 1;
3747         }
3748
3749         cppi5_tr_csf_set(&tr_req[num_tr - 1].flags,
3750                          CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
3751
3752         if (uc->config.metadata_size)
3753                 d->vd.tx.metadata_ops = &metadata_ops;
3754
3755         return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
3756 }
3757
3758 static void udma_issue_pending(struct dma_chan *chan)
3759 {
3760         struct udma_chan *uc = to_udma_chan(chan);
3761         unsigned long flags;
3762
3763         spin_lock_irqsave(&uc->vc.lock, flags);
3764
3765         /* If we have something pending and no active descriptor, then */
3766         if (vchan_issue_pending(&uc->vc) && !uc->desc) {
3767                 /*
3768                  * start a descriptor if the channel is NOT [marked as
3769                  * terminating _and_ it is still running (teardown has not
3770                  * completed yet)].
3771                  */
3772                 if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
3773                       udma_is_chan_running(uc)))
3774                         udma_start(uc);
3775         }
3776
3777         spin_unlock_irqrestore(&uc->vc.lock, flags);
3778 }
3779
3780 static enum dma_status udma_tx_status(struct dma_chan *chan,
3781                                       dma_cookie_t cookie,
3782                                       struct dma_tx_state *txstate)
3783 {
3784         struct udma_chan *uc = to_udma_chan(chan);
3785         enum dma_status ret;
3786         unsigned long flags;
3787
3788         spin_lock_irqsave(&uc->vc.lock, flags);
3789
3790         ret = dma_cookie_status(chan, cookie, txstate);
3791
3792         if (!udma_is_chan_running(uc))
3793                 ret = DMA_COMPLETE;
3794
3795         if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
3796                 ret = DMA_PAUSED;
3797
3798         if (ret == DMA_COMPLETE || !txstate)
3799                 goto out;
3800
3801         if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
3802                 u32 peer_bcnt = 0;
3803                 u32 bcnt = 0;
3804                 u32 residue = uc->desc->residue;
3805                 u32 delay = 0;
3806
3807                 if (uc->desc->dir == DMA_MEM_TO_DEV) {
3808                         bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
3809
3810                         if (uc->config.ep_type != PSIL_EP_NATIVE) {
3811                                 peer_bcnt = udma_tchanrt_read(uc,
3812                                                 UDMA_CHAN_RT_PEER_BCNT_REG);
3813
3814                                 if (bcnt > peer_bcnt)
3815                                         delay = bcnt - peer_bcnt;
3816                         }
3817                 } else if (uc->desc->dir == DMA_DEV_TO_MEM) {
3818                         bcnt = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
3819
3820                         if (uc->config.ep_type != PSIL_EP_NATIVE) {
3821                                 peer_bcnt = udma_rchanrt_read(uc,
3822                                                 UDMA_CHAN_RT_PEER_BCNT_REG);
3823
3824                                 if (peer_bcnt > bcnt)
3825                                         delay = peer_bcnt - bcnt;
3826                         }
3827                 } else {
3828                         bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
3829                 }
3830
3831                 if (bcnt && !(bcnt % uc->desc->residue))
3832                         residue = 0;
3833                 else
3834                         residue -= bcnt % uc->desc->residue;
3835
3836                 if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
3837                         ret = DMA_COMPLETE;
3838                         delay = 0;
3839                 }
3840
3841                 dma_set_residue(txstate, residue);
3842                 dma_set_in_flight_bytes(txstate, delay);
3843
3844         } else {
3845                 ret = DMA_COMPLETE;
3846         }
3847
3848 out:
3849         spin_unlock_irqrestore(&uc->vc.lock, flags);
3850         return ret;
3851 }
3852
3853 static int udma_pause(struct dma_chan *chan)
3854 {
3855         struct udma_chan *uc = to_udma_chan(chan);
3856
3857         /* pause the channel */
3858         switch (uc->config.dir) {
3859         case DMA_DEV_TO_MEM:
3860                 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3861                                          UDMA_PEER_RT_EN_PAUSE,
3862                                          UDMA_PEER_RT_EN_PAUSE);
3863                 break;
3864         case DMA_MEM_TO_DEV:
3865                 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3866                                          UDMA_PEER_RT_EN_PAUSE,
3867                                          UDMA_PEER_RT_EN_PAUSE);
3868                 break;
3869         case DMA_MEM_TO_MEM:
3870                 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
3871                                          UDMA_CHAN_RT_CTL_PAUSE,
3872                                          UDMA_CHAN_RT_CTL_PAUSE);
3873                 break;
3874         default:
3875                 return -EINVAL;
3876         }
3877
3878         return 0;
3879 }
3880
3881 static int udma_resume(struct dma_chan *chan)
3882 {
3883         struct udma_chan *uc = to_udma_chan(chan);
3884
3885         /* resume the channel */
3886         switch (uc->config.dir) {
3887         case DMA_DEV_TO_MEM:
3888                 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3889                                          UDMA_PEER_RT_EN_PAUSE, 0);
3890
3891                 break;
3892         case DMA_MEM_TO_DEV:
3893                 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3894                                          UDMA_PEER_RT_EN_PAUSE, 0);
3895                 break;
3896         case DMA_MEM_TO_MEM:
3897                 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
3898                                          UDMA_CHAN_RT_CTL_PAUSE, 0);
3899                 break;
3900         default:
3901                 return -EINVAL;
3902         }
3903
3904         return 0;
3905 }
3906
3907 static int udma_terminate_all(struct dma_chan *chan)
3908 {
3909         struct udma_chan *uc = to_udma_chan(chan);
3910         unsigned long flags;
3911         LIST_HEAD(head);
3912
3913         spin_lock_irqsave(&uc->vc.lock, flags);
3914
3915         if (udma_is_chan_running(uc))
3916                 udma_stop(uc);
3917
3918         if (uc->desc) {
3919                 uc->terminated_desc = uc->desc;
3920                 uc->desc = NULL;
3921                 uc->terminated_desc->terminated = true;
3922                 cancel_delayed_work(&uc->tx_drain.work);
3923         }
3924
3925         uc->paused = false;
3926
3927         vchan_get_all_descriptors(&uc->vc, &head);
3928         spin_unlock_irqrestore(&uc->vc.lock, flags);
3929         vchan_dma_desc_free_list(&uc->vc, &head);
3930
3931         return 0;
3932 }
3933
3934 static void udma_synchronize(struct dma_chan *chan)
3935 {
3936         struct udma_chan *uc = to_udma_chan(chan);
3937         unsigned long timeout = msecs_to_jiffies(1000);
3938
3939         vchan_synchronize(&uc->vc);
3940
3941         if (uc->state == UDMA_CHAN_IS_TERMINATING) {
3942                 timeout = wait_for_completion_timeout(&uc->teardown_completed,
3943                                                       timeout);
3944                 if (!timeout) {
3945                         dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
3946                                  uc->id);
3947                         udma_dump_chan_stdata(uc);
3948                         udma_reset_chan(uc, true);
3949                 }
3950         }
3951
3952         udma_reset_chan(uc, false);
3953         if (udma_is_chan_running(uc))
3954                 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
3955
3956         cancel_delayed_work_sync(&uc->tx_drain.work);
3957         udma_reset_rings(uc);
3958 }
3959
3960 static void udma_desc_pre_callback(struct virt_dma_chan *vc,
3961                                    struct virt_dma_desc *vd,
3962                                    struct dmaengine_result *result)
3963 {
3964         struct udma_chan *uc = to_udma_chan(&vc->chan);
3965         struct udma_desc *d;
3966
3967         if (!vd)
3968                 return;
3969
3970         d = to_udma_desc(&vd->tx);
3971
3972         if (d->metadata_size)
3973                 udma_fetch_epib(uc, d);
3974
3975         /* Provide residue information for the client */
3976         if (result) {
3977                 void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
3978
3979                 if (cppi5_desc_get_type(desc_vaddr) ==
3980                     CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
3981                         result->residue = d->residue -
3982                                           cppi5_hdesc_get_pktlen(desc_vaddr);
3983                         if (result->residue)
3984                                 result->result = DMA_TRANS_ABORTED;
3985                         else
3986                                 result->result = DMA_TRANS_NOERROR;
3987                 } else {
3988                         result->residue = 0;
3989                         result->result = DMA_TRANS_NOERROR;
3990                 }
3991         }
3992 }
3993
3994 /*
3995  * This tasklet handles the completion of a DMA descriptor by
3996  * calling its callback and freeing it.
3997  */
3998 static void udma_vchan_complete(struct tasklet_struct *t)
3999 {
4000         struct virt_dma_chan *vc = from_tasklet(vc, t, task);
4001         struct virt_dma_desc *vd, *_vd;
4002         struct dmaengine_desc_callback cb;
4003         LIST_HEAD(head);
4004
4005         spin_lock_irq(&vc->lock);
4006         list_splice_tail_init(&vc->desc_completed, &head);
4007         vd = vc->cyclic;
4008         if (vd) {
4009                 vc->cyclic = NULL;
4010                 dmaengine_desc_get_callback(&vd->tx, &cb);
4011         } else {
4012                 memset(&cb, 0, sizeof(cb));
4013         }
4014         spin_unlock_irq(&vc->lock);
4015
4016         udma_desc_pre_callback(vc, vd, NULL);
4017         dmaengine_desc_callback_invoke(&cb, NULL);
4018
4019         list_for_each_entry_safe(vd, _vd, &head, node) {
4020                 struct dmaengine_result result;
4021
4022                 dmaengine_desc_get_callback(&vd->tx, &cb);
4023
4024                 list_del(&vd->node);
4025
4026                 udma_desc_pre_callback(vc, vd, &result);
4027                 dmaengine_desc_callback_invoke(&cb, &result);
4028
4029                 vchan_vdesc_fini(vd);
4030         }
4031 }
4032
4033 static void udma_free_chan_resources(struct dma_chan *chan)
4034 {
4035         struct udma_chan *uc = to_udma_chan(chan);
4036         struct udma_dev *ud = to_udma_dev(chan->device);
4037
4038         udma_terminate_all(chan);
4039         if (uc->terminated_desc) {
4040                 udma_reset_chan(uc, false);
4041                 udma_reset_rings(uc);
4042         }
4043
4044         cancel_delayed_work_sync(&uc->tx_drain.work);
4045
4046         if (uc->irq_num_ring > 0) {
4047                 free_irq(uc->irq_num_ring, uc);
4048
4049                 uc->irq_num_ring = 0;
4050         }
4051         if (uc->irq_num_udma > 0) {
4052                 free_irq(uc->irq_num_udma, uc);
4053
4054                 uc->irq_num_udma = 0;
4055         }
4056
4057         /* Release PSI-L pairing */
4058         if (uc->psil_paired) {
4059                 navss_psil_unpair(ud, uc->config.src_thread,
4060                                   uc->config.dst_thread);
4061                 uc->psil_paired = false;
4062         }
4063
4064         vchan_free_chan_resources(&uc->vc);
4065         tasklet_kill(&uc->vc.task);
4066
4067         bcdma_free_bchan_resources(uc);
4068         udma_free_tx_resources(uc);
4069         udma_free_rx_resources(uc);
4070         udma_reset_uchan(uc);
4071
4072         if (uc->use_dma_pool) {
4073                 dma_pool_destroy(uc->hdesc_pool);
4074                 uc->use_dma_pool = false;
4075         }
4076 }
4077
4078 static struct platform_driver udma_driver;
4079 static struct platform_driver bcdma_driver;
4080 static struct platform_driver pktdma_driver;
4081
4082 struct udma_filter_param {
4083         int remote_thread_id;
4084         u32 atype;
4085         u32 asel;
4086         u32 tr_trigger_type;
4087 };
4088
4089 static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
4090 {
4091         struct udma_chan_config *ucc;
4092         struct psil_endpoint_config *ep_config;
4093         struct udma_filter_param *filter_param;
4094         struct udma_chan *uc;
4095         struct udma_dev *ud;
4096
4097         if (chan->device->dev->driver != &udma_driver.driver &&
4098             chan->device->dev->driver != &bcdma_driver.driver &&
4099             chan->device->dev->driver != &pktdma_driver.driver)
4100                 return false;
4101
4102         uc = to_udma_chan(chan);
4103         ucc = &uc->config;
4104         ud = uc->ud;
4105         filter_param = param;
4106
4107         if (filter_param->atype > 2) {
4108                 dev_err(ud->dev, "Invalid channel atype: %u\n",
4109                         filter_param->atype);
4110                 return false;
4111         }
4112
4113         if (filter_param->asel > 15) {
4114                 dev_err(ud->dev, "Invalid channel asel: %u\n",
4115                         filter_param->asel);
4116                 return false;
4117         }
4118
4119         ucc->remote_thread_id = filter_param->remote_thread_id;
4120         ucc->atype = filter_param->atype;
4121         ucc->asel = filter_param->asel;
4122         ucc->tr_trigger_type = filter_param->tr_trigger_type;
4123
4124         if (ucc->tr_trigger_type) {
4125                 ucc->dir = DMA_MEM_TO_MEM;
4126                 goto triggered_bchan;
4127         } else if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) {
4128                 ucc->dir = DMA_MEM_TO_DEV;
4129         } else {
4130                 ucc->dir = DMA_DEV_TO_MEM;
4131         }
4132
4133         ep_config = psil_get_ep_config(ucc->remote_thread_id);
4134         if (IS_ERR(ep_config)) {
4135                 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
4136                         ucc->remote_thread_id);
4137                 ucc->dir = DMA_MEM_TO_MEM;
4138                 ucc->remote_thread_id = -1;
4139                 ucc->atype = 0;
4140                 ucc->asel = 0;
4141                 return false;
4142         }
4143
4144         if (ud->match_data->type == DMA_TYPE_BCDMA &&
4145             ep_config->pkt_mode) {
4146                 dev_err(ud->dev,
4147                         "Only TR mode is supported (psi-l thread 0x%04x)\n",
4148                         ucc->remote_thread_id);
4149                 ucc->dir = DMA_MEM_TO_MEM;
4150                 ucc->remote_thread_id = -1;
4151                 ucc->atype = 0;
4152                 ucc->asel = 0;
4153                 return false;
4154         }
4155
4156         ucc->pkt_mode = ep_config->pkt_mode;
4157         ucc->channel_tpl = ep_config->channel_tpl;
4158         ucc->notdpkt = ep_config->notdpkt;
4159         ucc->ep_type = ep_config->ep_type;
4160
4161         if (ud->match_data->type == DMA_TYPE_PKTDMA &&
4162             ep_config->mapped_channel_id >= 0) {
4163                 ucc->mapped_channel_id = ep_config->mapped_channel_id;
4164                 ucc->default_flow_id = ep_config->default_flow_id;
4165         } else {
4166                 ucc->mapped_channel_id = -1;
4167                 ucc->default_flow_id = -1;
4168         }
4169
4170         if (ucc->ep_type != PSIL_EP_NATIVE) {
4171                 const struct udma_match_data *match_data = ud->match_data;
4172
4173                 if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
4174                         ucc->enable_acc32 = ep_config->pdma_acc32;
4175                 if (match_data->flags & UDMA_FLAG_PDMA_BURST)
4176                         ucc->enable_burst = ep_config->pdma_burst;
4177         }
4178
4179         ucc->needs_epib = ep_config->needs_epib;
4180         ucc->psd_size = ep_config->psd_size;
4181         ucc->metadata_size =
4182                         (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
4183                         ucc->psd_size;
4184
4185         if (ucc->pkt_mode)
4186                 ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
4187                                  ucc->metadata_size, ud->desc_align);
4188
4189         dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
4190                 ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
4191
4192         return true;
4193
4194 triggered_bchan:
4195         dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id,
4196                 ucc->tr_trigger_type);
4197
4198         return true;
4199
4200 }
4201
4202 static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
4203                                       struct of_dma *ofdma)
4204 {
4205         struct udma_dev *ud = ofdma->of_dma_data;
4206         dma_cap_mask_t mask = ud->ddev.cap_mask;
4207         struct udma_filter_param filter_param;
4208         struct dma_chan *chan;
4209
4210         if (ud->match_data->type == DMA_TYPE_BCDMA) {
4211                 if (dma_spec->args_count != 3)
4212                         return NULL;
4213
4214                 filter_param.tr_trigger_type = dma_spec->args[0];
4215                 filter_param.remote_thread_id = dma_spec->args[1];
4216                 filter_param.asel = dma_spec->args[2];
4217                 filter_param.atype = 0;
4218         } else {
4219                 if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
4220                         return NULL;
4221
4222                 filter_param.remote_thread_id = dma_spec->args[0];
4223                 filter_param.tr_trigger_type = 0;
4224                 if (dma_spec->args_count == 2) {
4225                         if (ud->match_data->type == DMA_TYPE_UDMA) {
4226                                 filter_param.atype = dma_spec->args[1];
4227                                 filter_param.asel = 0;
4228                         } else {
4229                                 filter_param.atype = 0;
4230                                 filter_param.asel = dma_spec->args[1];
4231                         }
4232                 } else {
4233                         filter_param.atype = 0;
4234                         filter_param.asel = 0;
4235                 }
4236         }
4237
4238         chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
4239                                      ofdma->of_node);
4240         if (!chan) {
4241                 dev_err(ud->dev, "get channel fail in %s.\n", __func__);
4242                 return ERR_PTR(-EINVAL);
4243         }
4244
4245         return chan;
4246 }
4247
4248 static struct udma_match_data am654_main_data = {
4249         .type = DMA_TYPE_UDMA,
4250         .psil_base = 0x1000,
4251         .enable_memcpy_support = true,
4252         .statictr_z_mask = GENMASK(11, 0),
4253         .burst_size = {
4254                 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4255                 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
4256                 0, /* No UH Channels */
4257         },
4258 };
4259
4260 static struct udma_match_data am654_mcu_data = {
4261         .type = DMA_TYPE_UDMA,
4262         .psil_base = 0x6000,
4263         .enable_memcpy_support = false,
4264         .statictr_z_mask = GENMASK(11, 0),
4265         .burst_size = {
4266                 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4267                 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
4268                 0, /* No UH Channels */
4269         },
4270 };
4271
4272 static struct udma_match_data j721e_main_data = {
4273         .type = DMA_TYPE_UDMA,
4274         .psil_base = 0x1000,
4275         .enable_memcpy_support = true,
4276         .flags = UDMA_FLAGS_J7_CLASS,
4277         .statictr_z_mask = GENMASK(23, 0),
4278         .burst_size = {
4279                 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4280                 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* H Channels */
4281                 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* UH Channels */
4282         },
4283 };
4284
4285 static struct udma_match_data j721e_mcu_data = {
4286         .type = DMA_TYPE_UDMA,
4287         .psil_base = 0x6000,
4288         .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
4289         .flags = UDMA_FLAGS_J7_CLASS,
4290         .statictr_z_mask = GENMASK(23, 0),
4291         .burst_size = {
4292                 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4293                 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES, /* H Channels */
4294                 0, /* No UH Channels */
4295         },
4296 };
4297
4298 static struct udma_match_data am64_bcdma_data = {
4299         .type = DMA_TYPE_BCDMA,
4300         .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
4301         .enable_memcpy_support = true, /* Supported via bchan */
4302         .flags = UDMA_FLAGS_J7_CLASS,
4303         .statictr_z_mask = GENMASK(23, 0),
4304         .burst_size = {
4305                 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4306                 0, /* No H Channels */
4307                 0, /* No UH Channels */
4308         },
4309 };
4310
4311 static struct udma_match_data am64_pktdma_data = {
4312         .type = DMA_TYPE_PKTDMA,
4313         .psil_base = 0x1000,
4314         .enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */
4315         .flags = UDMA_FLAGS_J7_CLASS,
4316         .statictr_z_mask = GENMASK(23, 0),
4317         .burst_size = {
4318                 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4319                 0, /* No H Channels */
4320                 0, /* No UH Channels */
4321         },
4322 };
4323
4324 static const struct of_device_id udma_of_match[] = {
4325         {
4326                 .compatible = "ti,am654-navss-main-udmap",
4327                 .data = &am654_main_data,
4328         },
4329         {
4330                 .compatible = "ti,am654-navss-mcu-udmap",
4331                 .data = &am654_mcu_data,
4332         }, {
4333                 .compatible = "ti,j721e-navss-main-udmap",
4334                 .data = &j721e_main_data,
4335         }, {
4336                 .compatible = "ti,j721e-navss-mcu-udmap",
4337                 .data = &j721e_mcu_data,
4338         },
4339         { /* Sentinel */ },
4340 };
4341
4342 static const struct of_device_id bcdma_of_match[] = {
4343         {
4344                 .compatible = "ti,am64-dmss-bcdma",
4345                 .data = &am64_bcdma_data,
4346         },
4347         { /* Sentinel */ },
4348 };
4349
4350 static const struct of_device_id pktdma_of_match[] = {
4351         {
4352                 .compatible = "ti,am64-dmss-pktdma",
4353                 .data = &am64_pktdma_data,
4354         },
4355         { /* Sentinel */ },
4356 };
4357
4358 static struct udma_soc_data am654_soc_data = {
4359         .oes = {
4360                 .udma_rchan = 0x200,
4361         },
4362 };
4363
4364 static struct udma_soc_data j721e_soc_data = {
4365         .oes = {
4366                 .udma_rchan = 0x400,
4367         },
4368 };
4369
4370 static struct udma_soc_data j7200_soc_data = {
4371         .oes = {
4372                 .udma_rchan = 0x80,
4373         },
4374 };
4375
4376 static struct udma_soc_data am64_soc_data = {
4377         .oes = {
4378                 .bcdma_bchan_data = 0x2200,
4379                 .bcdma_bchan_ring = 0x2400,
4380                 .bcdma_tchan_data = 0x2800,
4381                 .bcdma_tchan_ring = 0x2a00,
4382                 .bcdma_rchan_data = 0x2e00,
4383                 .bcdma_rchan_ring = 0x3000,
4384                 .pktdma_tchan_flow = 0x1200,
4385                 .pktdma_rchan_flow = 0x1600,
4386         },
4387         .bcdma_trigger_event_offset = 0xc400,
4388 };
4389
4390 static const struct soc_device_attribute k3_soc_devices[] = {
4391         { .family = "AM65X", .data = &am654_soc_data },
4392         { .family = "J721E", .data = &j721e_soc_data },
4393         { .family = "J7200", .data = &j7200_soc_data },
4394         { .family = "AM64X", .data = &am64_soc_data },
4395         { .family = "J721S2", .data = &j721e_soc_data},
4396         { .family = "AM62X", .data = &am64_soc_data },
4397         { /* sentinel */ }
4398 };
4399
4400 static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
4401 {
4402         u32 cap2, cap3, cap4;
4403         int i;
4404
4405         ud->mmrs[MMR_GCFG] = devm_platform_ioremap_resource_byname(pdev, mmr_names[MMR_GCFG]);
4406         if (IS_ERR(ud->mmrs[MMR_GCFG]))
4407                 return PTR_ERR(ud->mmrs[MMR_GCFG]);
4408
4409         cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
4410         cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4411
4412         switch (ud->match_data->type) {
4413         case DMA_TYPE_UDMA:
4414                 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
4415                 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
4416                 ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2);
4417                 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
4418                 break;
4419         case DMA_TYPE_BCDMA:
4420                 ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
4421                 ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
4422                 ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
4423                 ud->rflow_cnt = ud->rchan_cnt;
4424                 break;
4425         case DMA_TYPE_PKTDMA:
4426                 cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
4427                 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
4428                 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
4429                 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
4430                 ud->tflow_cnt = PKTDMA_CAP4_TFLOW_CNT(cap4);
4431                 break;
4432         default:
4433                 return -EINVAL;
4434         }
4435
4436         for (i = 1; i < MMR_LAST; i++) {
4437                 if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
4438                         continue;
4439                 if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
4440                         continue;
4441                 if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
4442                         continue;
4443
4444                 ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]);
4445                 if (IS_ERR(ud->mmrs[i]))
4446                         return PTR_ERR(ud->mmrs[i]);
4447         }
4448
4449         return 0;
4450 }
4451
4452 static void udma_mark_resource_ranges(struct udma_dev *ud, unsigned long *map,
4453                                       struct ti_sci_resource_desc *rm_desc,
4454                                       char *name)
4455 {
4456         bitmap_clear(map, rm_desc->start, rm_desc->num);
4457         bitmap_clear(map, rm_desc->start_sec, rm_desc->num_sec);
4458         dev_dbg(ud->dev, "ti_sci resource range for %s: %d:%d | %d:%d\n", name,
4459                 rm_desc->start, rm_desc->num, rm_desc->start_sec,
4460                 rm_desc->num_sec);
4461 }
4462
4463 static const char * const range_names[] = {
4464         [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
4465         [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
4466         [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
4467         [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
4468         [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
4469 };
4470
4471 static int udma_setup_resources(struct udma_dev *ud)
4472 {
4473         int ret, i, j;
4474         struct device *dev = ud->dev;
4475         struct ti_sci_resource *rm_res, irq_res;
4476         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4477         u32 cap3;
4478
4479         /* Set up the throughput level start indexes */
4480         cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4481         if (of_device_is_compatible(dev->of_node,
4482                                     "ti,am654-navss-main-udmap")) {
4483                 ud->tchan_tpl.levels = 2;
4484                 ud->tchan_tpl.start_idx[0] = 8;
4485         } else if (of_device_is_compatible(dev->of_node,
4486                                            "ti,am654-navss-mcu-udmap")) {
4487                 ud->tchan_tpl.levels = 2;
4488                 ud->tchan_tpl.start_idx[0] = 2;
4489         } else if (UDMA_CAP3_UCHAN_CNT(cap3)) {
4490                 ud->tchan_tpl.levels = 3;
4491                 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
4492                 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4493         } else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
4494                 ud->tchan_tpl.levels = 2;
4495                 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4496         } else {
4497                 ud->tchan_tpl.levels = 1;
4498         }
4499
4500         ud->rchan_tpl.levels = ud->tchan_tpl.levels;
4501         ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
4502         ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
4503
4504         ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4505                                            sizeof(unsigned long), GFP_KERNEL);
4506         ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4507                                   GFP_KERNEL);
4508         ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4509                                            sizeof(unsigned long), GFP_KERNEL);
4510         ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4511                                   GFP_KERNEL);
4512         ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
4513                                               sizeof(unsigned long),
4514                                               GFP_KERNEL);
4515         ud->rflow_gp_map_allocated = devm_kcalloc(dev,
4516                                                   BITS_TO_LONGS(ud->rflow_cnt),
4517                                                   sizeof(unsigned long),
4518                                                   GFP_KERNEL);
4519         ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
4520                                         sizeof(unsigned long),
4521                                         GFP_KERNEL);
4522         ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
4523                                   GFP_KERNEL);
4524
4525         if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
4526             !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
4527             !ud->rflows || !ud->rflow_in_use)
4528                 return -ENOMEM;
4529
4530         /*
4531          * RX flows with the same Ids as RX channels are reserved to be used
4532          * as default flows if remote HW can't generate flow_ids. Those
4533          * RX flows can be requested only explicitly by id.
4534          */
4535         bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
4536
4537         /* by default no GP rflows are assigned to Linux */
4538         bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
4539
4540         /* Get resource ranges from tisci */
4541         for (i = 0; i < RM_RANGE_LAST; i++) {
4542                 if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
4543                         continue;
4544
4545                 tisci_rm->rm_ranges[i] =
4546                         devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4547                                                     tisci_rm->tisci_dev_id,
4548                                                     (char *)range_names[i]);
4549         }
4550
4551         /* tchan ranges */
4552         rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4553         if (IS_ERR(rm_res)) {
4554                 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4555                 irq_res.sets = 1;
4556         } else {
4557                 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4558                 for (i = 0; i < rm_res->sets; i++)
4559                         udma_mark_resource_ranges(ud, ud->tchan_map,
4560                                                   &rm_res->desc[i], "tchan");
4561                 irq_res.sets = rm_res->sets;
4562         }
4563
4564         /* rchan and matching default flow ranges */
4565         rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4566         if (IS_ERR(rm_res)) {
4567                 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4568                 irq_res.sets++;
4569         } else {
4570                 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4571                 for (i = 0; i < rm_res->sets; i++)
4572                         udma_mark_resource_ranges(ud, ud->rchan_map,
4573                                                   &rm_res->desc[i], "rchan");
4574                 irq_res.sets += rm_res->sets;
4575         }
4576
4577         irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4578         if (!irq_res.desc)
4579                 return -ENOMEM;
4580         rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4581         if (IS_ERR(rm_res)) {
4582                 irq_res.desc[0].start = 0;
4583                 irq_res.desc[0].num = ud->tchan_cnt;
4584                 i = 1;
4585         } else {
4586                 for (i = 0; i < rm_res->sets; i++) {
4587                         irq_res.desc[i].start = rm_res->desc[i].start;
4588                         irq_res.desc[i].num = rm_res->desc[i].num;
4589                         irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
4590                         irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
4591                 }
4592         }
4593         rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4594         if (IS_ERR(rm_res)) {
4595                 irq_res.desc[i].start = 0;
4596                 irq_res.desc[i].num = ud->rchan_cnt;
4597         } else {
4598                 for (j = 0; j < rm_res->sets; j++, i++) {
4599                         if (rm_res->desc[j].num) {
4600                                 irq_res.desc[i].start = rm_res->desc[j].start +
4601                                                 ud->soc_data->oes.udma_rchan;
4602                                 irq_res.desc[i].num = rm_res->desc[j].num;
4603                         }
4604                         if (rm_res->desc[j].num_sec) {
4605                                 irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
4606                                                 ud->soc_data->oes.udma_rchan;
4607                                 irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
4608                         }
4609                 }
4610         }
4611         ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4612         kfree(irq_res.desc);
4613         if (ret) {
4614                 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4615                 return ret;
4616         }
4617
4618         /* GP rflow ranges */
4619         rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4620         if (IS_ERR(rm_res)) {
4621                 /* all gp flows are assigned exclusively to Linux */
4622                 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
4623                              ud->rflow_cnt - ud->rchan_cnt);
4624         } else {
4625                 for (i = 0; i < rm_res->sets; i++)
4626                         udma_mark_resource_ranges(ud, ud->rflow_gp_map,
4627                                                   &rm_res->desc[i], "gp-rflow");
4628         }
4629
4630         return 0;
4631 }
4632
4633 static int bcdma_setup_resources(struct udma_dev *ud)
4634 {
4635         int ret, i, j;
4636         struct device *dev = ud->dev;
4637         struct ti_sci_resource *rm_res, irq_res;
4638         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4639         const struct udma_oes_offsets *oes = &ud->soc_data->oes;
4640         u32 cap;
4641
4642         /* Set up the throughput level start indexes */
4643         cap = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4644         if (BCDMA_CAP3_UBCHAN_CNT(cap)) {
4645                 ud->bchan_tpl.levels = 3;
4646                 ud->bchan_tpl.start_idx[1] = BCDMA_CAP3_UBCHAN_CNT(cap);
4647                 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
4648         } else if (BCDMA_CAP3_HBCHAN_CNT(cap)) {
4649                 ud->bchan_tpl.levels = 2;
4650                 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
4651         } else {
4652                 ud->bchan_tpl.levels = 1;
4653         }
4654
4655         cap = udma_read(ud->mmrs[MMR_GCFG], 0x30);
4656         if (BCDMA_CAP4_URCHAN_CNT(cap)) {
4657                 ud->rchan_tpl.levels = 3;
4658                 ud->rchan_tpl.start_idx[1] = BCDMA_CAP4_URCHAN_CNT(cap);
4659                 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
4660         } else if (BCDMA_CAP4_HRCHAN_CNT(cap)) {
4661                 ud->rchan_tpl.levels = 2;
4662                 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
4663         } else {
4664                 ud->rchan_tpl.levels = 1;
4665         }
4666
4667         if (BCDMA_CAP4_UTCHAN_CNT(cap)) {
4668                 ud->tchan_tpl.levels = 3;
4669                 ud->tchan_tpl.start_idx[1] = BCDMA_CAP4_UTCHAN_CNT(cap);
4670                 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
4671         } else if (BCDMA_CAP4_HTCHAN_CNT(cap)) {
4672                 ud->tchan_tpl.levels = 2;
4673                 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
4674         } else {
4675                 ud->tchan_tpl.levels = 1;
4676         }
4677
4678         ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
4679                                            sizeof(unsigned long), GFP_KERNEL);
4680         ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
4681                                   GFP_KERNEL);
4682         ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4683                                            sizeof(unsigned long), GFP_KERNEL);
4684         ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4685                                   GFP_KERNEL);
4686         ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4687                                            sizeof(unsigned long), GFP_KERNEL);
4688         ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4689                                   GFP_KERNEL);
4690         /* BCDMA do not really have flows, but the driver expect it */
4691         ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt),
4692                                         sizeof(unsigned long),
4693                                         GFP_KERNEL);
4694         ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
4695                                   GFP_KERNEL);
4696
4697         if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
4698             !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans ||
4699             !ud->rflows)
4700                 return -ENOMEM;
4701
4702         /* Get resource ranges from tisci */
4703         for (i = 0; i < RM_RANGE_LAST; i++) {
4704                 if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW)
4705                         continue;
4706                 if (i == RM_RANGE_BCHAN && ud->bchan_cnt == 0)
4707                         continue;
4708                 if (i == RM_RANGE_TCHAN && ud->tchan_cnt == 0)
4709                         continue;
4710                 if (i == RM_RANGE_RCHAN && ud->rchan_cnt == 0)
4711                         continue;
4712
4713                 tisci_rm->rm_ranges[i] =
4714                         devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4715                                                     tisci_rm->tisci_dev_id,
4716                                                     (char *)range_names[i]);
4717         }
4718
4719         irq_res.sets = 0;
4720
4721         /* bchan ranges */
4722         if (ud->bchan_cnt) {
4723                 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
4724                 if (IS_ERR(rm_res)) {
4725                         bitmap_zero(ud->bchan_map, ud->bchan_cnt);
4726                         irq_res.sets++;
4727                 } else {
4728                         bitmap_fill(ud->bchan_map, ud->bchan_cnt);
4729                         for (i = 0; i < rm_res->sets; i++)
4730                                 udma_mark_resource_ranges(ud, ud->bchan_map,
4731                                                           &rm_res->desc[i],
4732                                                           "bchan");
4733                         irq_res.sets += rm_res->sets;
4734                 }
4735         }
4736
4737         /* tchan ranges */
4738         if (ud->tchan_cnt) {
4739                 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4740                 if (IS_ERR(rm_res)) {
4741                         bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4742                         irq_res.sets += 2;
4743                 } else {
4744                         bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4745                         for (i = 0; i < rm_res->sets; i++)
4746                                 udma_mark_resource_ranges(ud, ud->tchan_map,
4747                                                           &rm_res->desc[i],
4748                                                           "tchan");
4749                         irq_res.sets += rm_res->sets * 2;
4750                 }
4751         }
4752
4753         /* rchan ranges */
4754         if (ud->rchan_cnt) {
4755                 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4756                 if (IS_ERR(rm_res)) {
4757                         bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4758                         irq_res.sets += 2;
4759                 } else {
4760                         bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4761                         for (i = 0; i < rm_res->sets; i++)
4762                                 udma_mark_resource_ranges(ud, ud->rchan_map,
4763                                                           &rm_res->desc[i],
4764                                                           "rchan");
4765                         irq_res.sets += rm_res->sets * 2;
4766                 }
4767         }
4768
4769         irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4770         if (!irq_res.desc)
4771                 return -ENOMEM;
4772         if (ud->bchan_cnt) {
4773                 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
4774                 if (IS_ERR(rm_res)) {
4775                         irq_res.desc[0].start = oes->bcdma_bchan_ring;
4776                         irq_res.desc[0].num = ud->bchan_cnt;
4777                         i = 1;
4778                 } else {
4779                         for (i = 0; i < rm_res->sets; i++) {
4780                                 irq_res.desc[i].start = rm_res->desc[i].start +
4781                                                         oes->bcdma_bchan_ring;
4782                                 irq_res.desc[i].num = rm_res->desc[i].num;
4783                         }
4784                 }
4785         }
4786         if (ud->tchan_cnt) {
4787                 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4788                 if (IS_ERR(rm_res)) {
4789                         irq_res.desc[i].start = oes->bcdma_tchan_data;
4790                         irq_res.desc[i].num = ud->tchan_cnt;
4791                         irq_res.desc[i + 1].start = oes->bcdma_tchan_ring;
4792                         irq_res.desc[i + 1].num = ud->tchan_cnt;
4793                         i += 2;
4794                 } else {
4795                         for (j = 0; j < rm_res->sets; j++, i += 2) {
4796                                 irq_res.desc[i].start = rm_res->desc[j].start +
4797                                                         oes->bcdma_tchan_data;
4798                                 irq_res.desc[i].num = rm_res->desc[j].num;
4799
4800                                 irq_res.desc[i + 1].start = rm_res->desc[j].start +
4801                                                         oes->bcdma_tchan_ring;
4802                                 irq_res.desc[i + 1].num = rm_res->desc[j].num;
4803                         }
4804                 }
4805         }
4806         if (ud->rchan_cnt) {
4807                 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4808                 if (IS_ERR(rm_res)) {
4809                         irq_res.desc[i].start = oes->bcdma_rchan_data;
4810                         irq_res.desc[i].num = ud->rchan_cnt;
4811                         irq_res.desc[i + 1].start = oes->bcdma_rchan_ring;
4812                         irq_res.desc[i + 1].num = ud->rchan_cnt;
4813                         i += 2;
4814                 } else {
4815                         for (j = 0; j < rm_res->sets; j++, i += 2) {
4816                                 irq_res.desc[i].start = rm_res->desc[j].start +
4817                                                         oes->bcdma_rchan_data;
4818                                 irq_res.desc[i].num = rm_res->desc[j].num;
4819
4820                                 irq_res.desc[i + 1].start = rm_res->desc[j].start +
4821                                                         oes->bcdma_rchan_ring;
4822                                 irq_res.desc[i + 1].num = rm_res->desc[j].num;
4823                         }
4824                 }
4825         }
4826
4827         ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4828         kfree(irq_res.desc);
4829         if (ret) {
4830                 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4831                 return ret;
4832         }
4833
4834         return 0;
4835 }
4836
4837 static int pktdma_setup_resources(struct udma_dev *ud)
4838 {
4839         int ret, i, j;
4840         struct device *dev = ud->dev;
4841         struct ti_sci_resource *rm_res, irq_res;
4842         struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4843         const struct udma_oes_offsets *oes = &ud->soc_data->oes;
4844         u32 cap3;
4845
4846         /* Set up the throughput level start indexes */
4847         cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4848         if (UDMA_CAP3_UCHAN_CNT(cap3)) {
4849                 ud->tchan_tpl.levels = 3;
4850                 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
4851                 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4852         } else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
4853                 ud->tchan_tpl.levels = 2;
4854                 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4855         } else {
4856                 ud->tchan_tpl.levels = 1;
4857         }
4858
4859         ud->rchan_tpl.levels = ud->tchan_tpl.levels;
4860         ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
4861         ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
4862
4863         ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4864                                            sizeof(unsigned long), GFP_KERNEL);
4865         ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4866                                   GFP_KERNEL);
4867         ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4868                                            sizeof(unsigned long), GFP_KERNEL);
4869         ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4870                                   GFP_KERNEL);
4871         ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
4872                                         sizeof(unsigned long),
4873                                         GFP_KERNEL);
4874         ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
4875                                   GFP_KERNEL);
4876         ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
4877                                            sizeof(unsigned long), GFP_KERNEL);
4878
4879         if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
4880             !ud->rchans || !ud->rflows || !ud->rflow_in_use)
4881                 return -ENOMEM;
4882
4883         /* Get resource ranges from tisci */
4884         for (i = 0; i < RM_RANGE_LAST; i++) {
4885                 if (i == RM_RANGE_BCHAN)
4886                         continue;
4887
4888                 tisci_rm->rm_ranges[i] =
4889                         devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4890                                                     tisci_rm->tisci_dev_id,
4891                                                     (char *)range_names[i]);
4892         }
4893
4894         /* tchan ranges */
4895         rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4896         if (IS_ERR(rm_res)) {
4897                 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4898         } else {
4899                 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4900                 for (i = 0; i < rm_res->sets; i++)
4901                         udma_mark_resource_ranges(ud, ud->tchan_map,
4902                                                   &rm_res->desc[i], "tchan");
4903         }
4904
4905         /* rchan ranges */
4906         rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4907         if (IS_ERR(rm_res)) {
4908                 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4909         } else {
4910                 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4911                 for (i = 0; i < rm_res->sets; i++)
4912                         udma_mark_resource_ranges(ud, ud->rchan_map,
4913                                                   &rm_res->desc[i], "rchan");
4914         }
4915
4916         /* rflow ranges */
4917         rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4918         if (IS_ERR(rm_res)) {
4919                 /* all rflows are assigned exclusively to Linux */
4920                 bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
4921                 irq_res.sets = 1;
4922         } else {
4923                 bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
4924                 for (i = 0; i < rm_res->sets; i++)
4925                         udma_mark_resource_ranges(ud, ud->rflow_in_use,
4926                                                   &rm_res->desc[i], "rflow");
4927                 irq_res.sets = rm_res->sets;
4928         }
4929
4930         /* tflow ranges */
4931         rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
4932         if (IS_ERR(rm_res)) {
4933                 /* all tflows are assigned exclusively to Linux */
4934                 bitmap_zero(ud->tflow_map, ud->tflow_cnt);
4935                 irq_res.sets++;
4936         } else {
4937                 bitmap_fill(ud->tflow_map, ud->tflow_cnt);
4938                 for (i = 0; i < rm_res->sets; i++)
4939                         udma_mark_resource_ranges(ud, ud->tflow_map,
4940                                                   &rm_res->desc[i], "tflow");
4941                 irq_res.sets += rm_res->sets;
4942         }
4943
4944         irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4945         if (!irq_res.desc)
4946                 return -ENOMEM;
4947         rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
4948         if (IS_ERR(rm_res)) {
4949                 irq_res.desc[0].start = oes->pktdma_tchan_flow;
4950                 irq_res.desc[0].num = ud->tflow_cnt;
4951                 i = 1;
4952         } else {
4953                 for (i = 0; i < rm_res->sets; i++) {
4954                         irq_res.desc[i].start = rm_res->desc[i].start +
4955                                                 oes->pktdma_tchan_flow;
4956                         irq_res.desc[i].num = rm_res->desc[i].num;
4957                 }
4958         }
4959         rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4960         if (IS_ERR(rm_res)) {
4961                 irq_res.desc[i].start = oes->pktdma_rchan_flow;
4962                 irq_res.desc[i].num = ud->rflow_cnt;
4963         } else {
4964                 for (j = 0; j < rm_res->sets; j++, i++) {
4965                         irq_res.desc[i].start = rm_res->desc[j].start +
4966                                                 oes->pktdma_rchan_flow;
4967                         irq_res.desc[i].num = rm_res->desc[j].num;
4968                 }
4969         }
4970         ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4971         kfree(irq_res.desc);
4972         if (ret) {
4973                 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4974                 return ret;
4975         }
4976
4977         return 0;
4978 }
4979
4980 static int setup_resources(struct udma_dev *ud)
4981 {
4982         struct device *dev = ud->dev;
4983         int ch_count, ret;
4984
4985         switch (ud->match_data->type) {
4986         case DMA_TYPE_UDMA:
4987                 ret = udma_setup_resources(ud);
4988                 break;
4989         case DMA_TYPE_BCDMA:
4990                 ret = bcdma_setup_resources(ud);
4991                 break;
4992         case DMA_TYPE_PKTDMA:
4993                 ret = pktdma_setup_resources(ud);
4994                 break;
4995         default:
4996                 return -EINVAL;
4997         }
4998
4999         if (ret)
5000                 return ret;
5001
5002         ch_count  = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
5003         if (ud->bchan_cnt)
5004                 ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
5005         ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
5006         ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
5007         if (!ch_count)
5008                 return -ENODEV;
5009
5010         ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
5011                                     GFP_KERNEL);
5012         if (!ud->channels)
5013                 return -ENOMEM;
5014
5015         switch (ud->match_data->type) {
5016         case DMA_TYPE_UDMA:
5017                 dev_info(dev,
5018                          "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
5019                          ch_count,
5020                          ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5021                                                        ud->tchan_cnt),
5022                          ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5023                                                        ud->rchan_cnt),
5024                          ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
5025                                                        ud->rflow_cnt));
5026                 break;
5027         case DMA_TYPE_BCDMA:
5028                 dev_info(dev,
5029                          "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
5030                          ch_count,
5031                          ud->bchan_cnt - bitmap_weight(ud->bchan_map,
5032                                                        ud->bchan_cnt),
5033                          ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5034                                                        ud->tchan_cnt),
5035                          ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5036                                                        ud->rchan_cnt));
5037                 break;
5038         case DMA_TYPE_PKTDMA:
5039                 dev_info(dev,
5040                          "Channels: %d (tchan: %u, rchan: %u)\n",
5041                          ch_count,
5042                          ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5043                                                        ud->tchan_cnt),
5044                          ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5045                                                        ud->rchan_cnt));
5046                 break;
5047         default:
5048                 break;
5049         }
5050
5051         return ch_count;
5052 }
5053
5054 static int udma_setup_rx_flush(struct udma_dev *ud)
5055 {
5056         struct udma_rx_flush *rx_flush = &ud->rx_flush;
5057         struct cppi5_desc_hdr_t *tr_desc;
5058         struct cppi5_tr_type1_t *tr_req;
5059         struct cppi5_host_desc_t *desc;
5060         struct device *dev = ud->dev;
5061         struct udma_hwdesc *hwdesc;
5062         size_t tr_size;
5063
5064         /* Allocate 1K buffer for discarded data on RX channel teardown */
5065         rx_flush->buffer_size = SZ_1K;
5066         rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size,
5067                                               GFP_KERNEL);
5068         if (!rx_flush->buffer_vaddr)
5069                 return -ENOMEM;
5070
5071         rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr,
5072                                                 rx_flush->buffer_size,
5073                                                 DMA_TO_DEVICE);
5074         if (dma_mapping_error(dev, rx_flush->buffer_paddr))
5075                 return -ENOMEM;
5076
5077         /* Set up descriptor to be used for TR mode */
5078         hwdesc = &rx_flush->hwdescs[0];
5079         tr_size = sizeof(struct cppi5_tr_type1_t);
5080         hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1);
5081         hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
5082                                         ud->desc_align);
5083
5084         hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
5085                                                 GFP_KERNEL);
5086         if (!hwdesc->cppi5_desc_vaddr)
5087                 return -ENOMEM;
5088
5089         hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
5090                                                   hwdesc->cppi5_desc_size,
5091                                                   DMA_TO_DEVICE);
5092         if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
5093                 return -ENOMEM;
5094
5095         /* Start of the TR req records */
5096         hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
5097         /* Start address of the TR response array */
5098         hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size;
5099
5100         tr_desc = hwdesc->cppi5_desc_vaddr;
5101         cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0);
5102         cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
5103         cppi5_desc_set_retpolicy(tr_desc, 0, 0);
5104
5105         tr_req = hwdesc->tr_req_base;
5106         cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false,
5107                       CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
5108         cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT);
5109
5110         tr_req->addr = rx_flush->buffer_paddr;
5111         tr_req->icnt0 = rx_flush->buffer_size;
5112         tr_req->icnt1 = 1;
5113
5114         dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
5115                                    hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
5116
5117         /* Set up descriptor to be used for packet mode */
5118         hwdesc = &rx_flush->hwdescs[1];
5119         hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
5120                                         CPPI5_INFO0_HDESC_EPIB_SIZE +
5121                                         CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE,
5122                                         ud->desc_align);
5123
5124         hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
5125                                                 GFP_KERNEL);
5126         if (!hwdesc->cppi5_desc_vaddr)
5127                 return -ENOMEM;
5128
5129         hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
5130                                                   hwdesc->cppi5_desc_size,
5131                                                   DMA_TO_DEVICE);
5132         if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
5133                 return -ENOMEM;
5134
5135         desc = hwdesc->cppi5_desc_vaddr;
5136         cppi5_hdesc_init(desc, 0, 0);
5137         cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
5138         cppi5_desc_set_retpolicy(&desc->hdr, 0, 0);
5139
5140         cppi5_hdesc_attach_buf(desc,
5141                                rx_flush->buffer_paddr, rx_flush->buffer_size,
5142                                rx_flush->buffer_paddr, rx_flush->buffer_size);
5143
5144         dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
5145                                    hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
5146         return 0;
5147 }
5148
5149 #ifdef CONFIG_DEBUG_FS
5150 static void udma_dbg_summary_show_chan(struct seq_file *s,
5151                                        struct dma_chan *chan)
5152 {
5153         struct udma_chan *uc = to_udma_chan(chan);
5154         struct udma_chan_config *ucc = &uc->config;
5155
5156         seq_printf(s, " %-13s| %s", dma_chan_name(chan),
5157                    chan->dbg_client_name ?: "in-use");
5158         if (ucc->tr_trigger_type)
5159                 seq_puts(s, " (triggered, ");
5160         else
5161                 seq_printf(s, " (%s, ",
5162                            dmaengine_get_direction_text(uc->config.dir));
5163
5164         switch (uc->config.dir) {
5165         case DMA_MEM_TO_MEM:
5166                 if (uc->ud->match_data->type == DMA_TYPE_BCDMA) {
5167                         seq_printf(s, "bchan%d)\n", uc->bchan->id);
5168                         return;
5169                 }
5170
5171                 seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
5172                            ucc->src_thread, ucc->dst_thread);
5173                 break;
5174         case DMA_DEV_TO_MEM:
5175                 seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
5176                            ucc->src_thread, ucc->dst_thread);
5177                 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5178                         seq_printf(s, "rflow%d, ", uc->rflow->id);
5179                 break;
5180         case DMA_MEM_TO_DEV:
5181                 seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
5182                            ucc->src_thread, ucc->dst_thread);
5183                 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5184                         seq_printf(s, "tflow%d, ", uc->tchan->tflow_id);
5185                 break;
5186         default:
5187                 seq_printf(s, ")\n");
5188                 return;
5189         }
5190
5191         if (ucc->ep_type == PSIL_EP_NATIVE) {
5192                 seq_printf(s, "PSI-L Native");
5193                 if (ucc->metadata_size) {
5194                         seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : "");
5195                         if (ucc->psd_size)
5196                                 seq_printf(s, " PSDsize:%u", ucc->psd_size);
5197                         seq_printf(s, " ]");
5198                 }
5199         } else {
5200                 seq_printf(s, "PDMA");
5201                 if (ucc->enable_acc32 || ucc->enable_burst)
5202                         seq_printf(s, "[%s%s ]",
5203                                    ucc->enable_acc32 ? " ACC32" : "",
5204                                    ucc->enable_burst ? " BURST" : "");
5205         }
5206
5207         seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode");
5208 }
5209
5210 static void udma_dbg_summary_show(struct seq_file *s,
5211                                   struct dma_device *dma_dev)
5212 {
5213         struct dma_chan *chan;
5214
5215         list_for_each_entry(chan, &dma_dev->channels, device_node) {
5216                 if (chan->client_count)
5217                         udma_dbg_summary_show_chan(s, chan);
5218         }
5219 }
5220 #endif /* CONFIG_DEBUG_FS */
5221
5222 static enum dmaengine_alignment udma_get_copy_align(struct udma_dev *ud)
5223 {
5224         const struct udma_match_data *match_data = ud->match_data;
5225         u8 tpl;
5226
5227         if (!match_data->enable_memcpy_support)
5228                 return DMAENGINE_ALIGN_8_BYTES;
5229
5230         /* Get the highest TPL level the device supports for memcpy */
5231         if (ud->bchan_cnt)
5232                 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, 0);
5233         else if (ud->tchan_cnt)
5234                 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, 0);
5235         else
5236                 return DMAENGINE_ALIGN_8_BYTES;
5237
5238         switch (match_data->burst_size[tpl]) {
5239         case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES:
5240                 return DMAENGINE_ALIGN_256_BYTES;
5241         case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES:
5242                 return DMAENGINE_ALIGN_128_BYTES;
5243         case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES:
5244         fallthrough;
5245         default:
5246                 return DMAENGINE_ALIGN_64_BYTES;
5247         }
5248 }
5249
5250 #define TI_UDMAC_BUSWIDTHS      (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
5251                                  BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
5252                                  BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
5253                                  BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
5254                                  BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
5255
5256 static int udma_probe(struct platform_device *pdev)
5257 {
5258         struct device_node *navss_node = pdev->dev.parent->of_node;
5259         const struct soc_device_attribute *soc;
5260         struct device *dev = &pdev->dev;
5261         struct udma_dev *ud;
5262         const struct of_device_id *match;
5263         int i, ret;
5264         int ch_count;
5265
5266         ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
5267         if (ret)
5268                 dev_err(dev, "failed to set dma mask stuff\n");
5269
5270         ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
5271         if (!ud)
5272                 return -ENOMEM;
5273
5274         match = of_match_node(udma_of_match, dev->of_node);
5275         if (!match)
5276                 match = of_match_node(bcdma_of_match, dev->of_node);
5277         if (!match) {
5278                 match = of_match_node(pktdma_of_match, dev->of_node);
5279                 if (!match) {
5280                         dev_err(dev, "No compatible match found\n");
5281                         return -ENODEV;
5282                 }
5283         }
5284         ud->match_data = match->data;
5285
5286         soc = soc_device_match(k3_soc_devices);
5287         if (!soc) {
5288                 dev_err(dev, "No compatible SoC found\n");
5289                 return -ENODEV;
5290         }
5291         ud->soc_data = soc->data;
5292
5293         ret = udma_get_mmrs(pdev, ud);
5294         if (ret)
5295                 return ret;
5296
5297         ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
5298         if (IS_ERR(ud->tisci_rm.tisci))
5299                 return PTR_ERR(ud->tisci_rm.tisci);
5300
5301         ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
5302                                    &ud->tisci_rm.tisci_dev_id);
5303         if (ret) {
5304                 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
5305                 return ret;
5306         }
5307         pdev->id = ud->tisci_rm.tisci_dev_id;
5308
5309         ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
5310                                    &ud->tisci_rm.tisci_navss_dev_id);
5311         if (ret) {
5312                 dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
5313                 return ret;
5314         }
5315
5316         if (ud->match_data->type == DMA_TYPE_UDMA) {
5317                 ret = of_property_read_u32(dev->of_node, "ti,udma-atype",
5318                                            &ud->atype);
5319                 if (!ret && ud->atype > 2) {
5320                         dev_err(dev, "Invalid atype: %u\n", ud->atype);
5321                         return -EINVAL;
5322                 }
5323         } else {
5324                 ret = of_property_read_u32(dev->of_node, "ti,asel",
5325                                            &ud->asel);
5326                 if (!ret && ud->asel > 15) {
5327                         dev_err(dev, "Invalid asel: %u\n", ud->asel);
5328                         return -EINVAL;
5329                 }
5330         }
5331
5332         ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
5333         ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
5334
5335         if (ud->match_data->type == DMA_TYPE_UDMA) {
5336                 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
5337         } else {
5338                 struct k3_ringacc_init_data ring_init_data;
5339
5340                 ring_init_data.tisci = ud->tisci_rm.tisci;
5341                 ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
5342                 if (ud->match_data->type == DMA_TYPE_BCDMA) {
5343                         ring_init_data.num_rings = ud->bchan_cnt +
5344                                                    ud->tchan_cnt +
5345                                                    ud->rchan_cnt;
5346                 } else {
5347                         ring_init_data.num_rings = ud->rflow_cnt +
5348                                                    ud->tflow_cnt;
5349                 }
5350
5351                 ud->ringacc = k3_ringacc_dmarings_init(pdev, &ring_init_data);
5352         }
5353
5354         if (IS_ERR(ud->ringacc))
5355                 return PTR_ERR(ud->ringacc);
5356
5357         dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
5358                                             DOMAIN_BUS_TI_SCI_INTA_MSI);
5359         if (!dev->msi.domain) {
5360                 dev_err(dev, "Failed to get MSI domain\n");
5361                 return -EPROBE_DEFER;
5362         }
5363
5364         dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
5365         /* cyclic operation is not supported via PKTDMA */
5366         if (ud->match_data->type != DMA_TYPE_PKTDMA) {
5367                 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
5368                 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
5369         }
5370
5371         ud->ddev.device_config = udma_slave_config;
5372         ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
5373         ud->ddev.device_issue_pending = udma_issue_pending;
5374         ud->ddev.device_tx_status = udma_tx_status;
5375         ud->ddev.device_pause = udma_pause;
5376         ud->ddev.device_resume = udma_resume;
5377         ud->ddev.device_terminate_all = udma_terminate_all;
5378         ud->ddev.device_synchronize = udma_synchronize;
5379 #ifdef CONFIG_DEBUG_FS
5380         ud->ddev.dbg_summary_show = udma_dbg_summary_show;
5381 #endif
5382
5383         switch (ud->match_data->type) {
5384         case DMA_TYPE_UDMA:
5385                 ud->ddev.device_alloc_chan_resources =
5386                                         udma_alloc_chan_resources;
5387                 break;
5388         case DMA_TYPE_BCDMA:
5389                 ud->ddev.device_alloc_chan_resources =
5390                                         bcdma_alloc_chan_resources;
5391                 ud->ddev.device_router_config = bcdma_router_config;
5392                 break;
5393         case DMA_TYPE_PKTDMA:
5394                 ud->ddev.device_alloc_chan_resources =
5395                                         pktdma_alloc_chan_resources;
5396                 break;
5397         default:
5398                 return -EINVAL;
5399         }
5400         ud->ddev.device_free_chan_resources = udma_free_chan_resources;
5401
5402         ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
5403         ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
5404         ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
5405         ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
5406         ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
5407                                        DESC_METADATA_ENGINE;
5408         if (ud->match_data->enable_memcpy_support &&
5409             !(ud->match_data->type == DMA_TYPE_BCDMA && ud->bchan_cnt == 0)) {
5410                 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
5411                 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
5412                 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
5413         }
5414
5415         ud->ddev.dev = dev;
5416         ud->dev = dev;
5417         ud->psil_base = ud->match_data->psil_base;
5418
5419         INIT_LIST_HEAD(&ud->ddev.channels);
5420         INIT_LIST_HEAD(&ud->desc_to_purge);
5421
5422         ch_count = setup_resources(ud);
5423         if (ch_count <= 0)
5424                 return ch_count;
5425
5426         spin_lock_init(&ud->lock);
5427         INIT_WORK(&ud->purge_work, udma_purge_desc_work);
5428
5429         ud->desc_align = 64;
5430         if (ud->desc_align < dma_get_cache_alignment())
5431                 ud->desc_align = dma_get_cache_alignment();
5432
5433         ret = udma_setup_rx_flush(ud);
5434         if (ret)
5435                 return ret;
5436
5437         for (i = 0; i < ud->bchan_cnt; i++) {
5438                 struct udma_bchan *bchan = &ud->bchans[i];
5439
5440                 bchan->id = i;
5441                 bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
5442         }
5443
5444         for (i = 0; i < ud->tchan_cnt; i++) {
5445                 struct udma_tchan *tchan = &ud->tchans[i];
5446
5447                 tchan->id = i;
5448                 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
5449         }
5450
5451         for (i = 0; i < ud->rchan_cnt; i++) {
5452                 struct udma_rchan *rchan = &ud->rchans[i];
5453
5454                 rchan->id = i;
5455                 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
5456         }
5457
5458         for (i = 0; i < ud->rflow_cnt; i++) {
5459                 struct udma_rflow *rflow = &ud->rflows[i];
5460
5461                 rflow->id = i;
5462         }
5463
5464         for (i = 0; i < ch_count; i++) {
5465                 struct udma_chan *uc = &ud->channels[i];
5466
5467                 uc->ud = ud;
5468                 uc->vc.desc_free = udma_desc_free;
5469                 uc->id = i;
5470                 uc->bchan = NULL;
5471                 uc->tchan = NULL;
5472                 uc->rchan = NULL;
5473                 uc->config.remote_thread_id = -1;
5474                 uc->config.mapped_channel_id = -1;
5475                 uc->config.default_flow_id = -1;
5476                 uc->config.dir = DMA_MEM_TO_MEM;
5477                 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
5478                                           dev_name(dev), i);
5479
5480                 vchan_init(&uc->vc, &ud->ddev);
5481                 /* Use custom vchan completion handling */
5482                 tasklet_setup(&uc->vc.task, udma_vchan_complete);
5483                 init_completion(&uc->teardown_completed);
5484                 INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
5485         }
5486
5487         /* Configure the copy_align to the maximum burst size the device supports */
5488         ud->ddev.copy_align = udma_get_copy_align(ud);
5489
5490         ret = dma_async_device_register(&ud->ddev);
5491         if (ret) {
5492                 dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
5493                 return ret;
5494         }
5495
5496         platform_set_drvdata(pdev, ud);
5497
5498         ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
5499         if (ret) {
5500                 dev_err(dev, "failed to register of_dma controller\n");
5501                 dma_async_device_unregister(&ud->ddev);
5502         }
5503
5504         return ret;
5505 }
5506
5507 static struct platform_driver udma_driver = {
5508         .driver = {
5509                 .name   = "ti-udma",
5510                 .of_match_table = udma_of_match,
5511                 .suppress_bind_attrs = true,
5512         },
5513         .probe          = udma_probe,
5514 };
5515 builtin_platform_driver(udma_driver);
5516
5517 static struct platform_driver bcdma_driver = {
5518         .driver = {
5519                 .name   = "ti-bcdma",
5520                 .of_match_table = bcdma_of_match,
5521                 .suppress_bind_attrs = true,
5522         },
5523         .probe          = udma_probe,
5524 };
5525 builtin_platform_driver(bcdma_driver);
5526
5527 static struct platform_driver pktdma_driver = {
5528         .driver = {
5529                 .name   = "ti-pktdma",
5530                 .of_match_table = pktdma_of_match,
5531                 .suppress_bind_attrs = true,
5532         },
5533         .probe          = udma_probe,
5534 };
5535 builtin_platform_driver(pktdma_driver);
5536
5537 /* Private interfaces to UDMA */
5538 #include "k3-udma-private.c"