1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
7 #include <linux/kernel.h>
8 #include <linux/delay.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/sys_soc.h>
21 #include <linux/of_dma.h>
22 #include <linux/of_device.h>
23 #include <linux/of_irq.h>
24 #include <linux/workqueue.h>
25 #include <linux/completion.h>
26 #include <linux/soc/ti/k3-ringacc.h>
27 #include <linux/soc/ti/ti_sci_protocol.h>
28 #include <linux/soc/ti/ti_sci_inta_msi.h>
29 #include <linux/dma/k3-event-router.h>
30 #include <linux/dma/ti-cppi5.h>
32 #include "../virt-dma.h"
34 #include "k3-psil-priv.h"
36 struct udma_static_tr {
37 u8 elsize; /* RPSTR0 */
38 u16 elcnt; /* RPSTR0 */
39 u16 bstcnt; /* RPSTR1 */
42 #define K3_UDMA_MAX_RFLOWS 1024
43 #define K3_UDMA_DEFAULT_RING_SIZE 16
45 /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
46 #define UDMA_RFLOW_SRCTAG_NONE 0
47 #define UDMA_RFLOW_SRCTAG_CFG_TAG 1
48 #define UDMA_RFLOW_SRCTAG_FLOW_ID 2
49 #define UDMA_RFLOW_SRCTAG_SRC_TAG 4
51 #define UDMA_RFLOW_DSTTAG_NONE 0
52 #define UDMA_RFLOW_DSTTAG_CFG_TAG 1
53 #define UDMA_RFLOW_DSTTAG_FLOW_ID 2
54 #define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4
55 #define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5
73 static const char * const mmr_names[] = {
75 [MMR_BCHANRT] = "bchanrt",
76 [MMR_RCHANRT] = "rchanrt",
77 [MMR_TCHANRT] = "tchanrt",
84 struct k3_ring *t_ring; /* Transmit ring */
85 struct k3_ring *tc_ring; /* Transmit Completion ring */
86 int tflow_id; /* applicable only for PKTDMA */
90 #define udma_bchan udma_tchan
94 struct k3_ring *fd_ring; /* Free Descriptor ring */
95 struct k3_ring *r_ring; /* Receive ring */
104 struct udma_oes_offsets {
105 /* K3 UDMA Output Event Offset */
108 /* BCDMA Output Event Offsets */
109 u32 bcdma_bchan_data;
110 u32 bcdma_bchan_ring;
111 u32 bcdma_tchan_data;
112 u32 bcdma_tchan_ring;
113 u32 bcdma_rchan_data;
114 u32 bcdma_rchan_ring;
116 /* PKTDMA Output Event Offsets */
117 u32 pktdma_tchan_flow;
118 u32 pktdma_rchan_flow;
121 #define UDMA_FLAG_PDMA_ACC32 BIT(0)
122 #define UDMA_FLAG_PDMA_BURST BIT(1)
123 #define UDMA_FLAG_TDTYPE BIT(2)
124 #define UDMA_FLAG_BURST_SIZE BIT(3)
125 #define UDMA_FLAGS_J7_CLASS (UDMA_FLAG_PDMA_ACC32 | \
126 UDMA_FLAG_PDMA_BURST | \
128 UDMA_FLAG_BURST_SIZE)
130 struct udma_match_data {
131 enum k3_dma_type type;
133 bool enable_memcpy_support;
139 struct udma_soc_data {
140 struct udma_oes_offsets oes;
141 u32 bcdma_trigger_event_offset;
145 size_t cppi5_desc_size;
146 void *cppi5_desc_vaddr;
147 dma_addr_t cppi5_desc_paddr;
149 /* TR descriptor internal pointers */
151 struct cppi5_tr_resp_t *tr_resp_base;
154 struct udma_rx_flush {
155 struct udma_hwdesc hwdescs[2];
159 dma_addr_t buffer_paddr;
168 struct dma_device ddev;
170 void __iomem *mmrs[MMR_LAST];
171 const struct udma_match_data *match_data;
172 const struct udma_soc_data *soc_data;
174 struct udma_tpl bchan_tpl;
175 struct udma_tpl tchan_tpl;
176 struct udma_tpl rchan_tpl;
178 size_t desc_align; /* alignment to use for descriptors */
180 struct udma_tisci_rm tisci_rm;
182 struct k3_ringacc *ringacc;
184 struct work_struct purge_work;
185 struct list_head desc_to_purge;
188 struct udma_rx_flush rx_flush;
196 unsigned long *bchan_map;
197 unsigned long *tchan_map;
198 unsigned long *rchan_map;
199 unsigned long *rflow_gp_map;
200 unsigned long *rflow_gp_map_allocated;
201 unsigned long *rflow_in_use;
202 unsigned long *tflow_map;
204 struct udma_bchan *bchans;
205 struct udma_tchan *tchans;
206 struct udma_rchan *rchans;
207 struct udma_rflow *rflows;
209 struct udma_chan *channels;
216 struct virt_dma_desc vd;
220 enum dma_transfer_direction dir;
222 struct udma_static_tr static_tr;
226 unsigned int desc_idx; /* Only used for cyclic in packet mode */
230 void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
232 unsigned int hwdesc_count;
233 struct udma_hwdesc hwdesc[];
236 enum udma_chan_state {
237 UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
238 UDMA_CHAN_IS_ACTIVE, /* Normal operation */
239 UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
242 struct udma_tx_drain {
243 struct delayed_work work;
248 struct udma_chan_config {
249 bool pkt_mode; /* TR or packet */
250 bool needs_epib; /* EPIB is needed for the communication or not */
251 u32 psd_size; /* size of Protocol Specific Data */
252 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
253 u32 hdesc_size; /* Size of a packet descriptor in packet mode */
254 bool notdpkt; /* Suppress sending TDC packet */
255 int remote_thread_id;
260 enum psil_endpoint_type ep_type;
263 enum udma_tp_level channel_tpl; /* Channel Throughput Level */
266 unsigned long tx_flags;
268 /* PKDMA mapped channel */
269 int mapped_channel_id;
270 /* PKTDMA default tflow or rflow for mapped channel */
273 enum dma_transfer_direction dir;
277 struct virt_dma_chan vc;
278 struct dma_slave_config cfg;
280 struct device *dma_dev;
281 struct udma_desc *desc;
282 struct udma_desc *terminated_desc;
283 struct udma_static_tr static_tr;
286 struct udma_bchan *bchan;
287 struct udma_tchan *tchan;
288 struct udma_rchan *rchan;
289 struct udma_rflow *rflow;
299 enum udma_chan_state state;
300 struct completion teardown_completed;
302 struct udma_tx_drain tx_drain;
304 /* Channel configuration parameters */
305 struct udma_chan_config config;
307 /* dmapool for packet mode descriptors */
309 struct dma_pool *hdesc_pool;
314 static inline struct udma_dev *to_udma_dev(struct dma_device *d)
316 return container_of(d, struct udma_dev, ddev);
319 static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
321 return container_of(c, struct udma_chan, vc.chan);
324 static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
326 return container_of(t, struct udma_desc, vd.tx);
329 /* Generic register access functions */
330 static inline u32 udma_read(void __iomem *base, int reg)
332 return readl(base + reg);
335 static inline void udma_write(void __iomem *base, int reg, u32 val)
337 writel(val, base + reg);
340 static inline void udma_update_bits(void __iomem *base, int reg,
345 orig = readl(base + reg);
350 writel(tmp, base + reg);
354 static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg)
358 return udma_read(uc->tchan->reg_rt, reg);
361 static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val)
365 udma_write(uc->tchan->reg_rt, reg, val);
368 static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg,
373 udma_update_bits(uc->tchan->reg_rt, reg, mask, val);
377 static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg)
381 return udma_read(uc->rchan->reg_rt, reg);
384 static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val)
388 udma_write(uc->rchan->reg_rt, reg, val);
391 static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg,
396 udma_update_bits(uc->rchan->reg_rt, reg, mask, val);
399 static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
401 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
403 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
404 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
405 tisci_rm->tisci_navss_dev_id,
406 src_thread, dst_thread);
409 static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
412 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
414 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
415 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
416 tisci_rm->tisci_navss_dev_id,
417 src_thread, dst_thread);
420 static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel)
422 struct device *chan_dev = &chan->dev->device;
425 /* No special handling for the channel */
426 chan->dev->chan_dma_dev = false;
428 chan_dev->dma_coherent = false;
429 chan_dev->dma_parms = NULL;
430 } else if (asel == 14 || asel == 15) {
431 chan->dev->chan_dma_dev = true;
433 chan_dev->dma_coherent = true;
434 dma_coerce_mask_and_coherent(chan_dev, DMA_BIT_MASK(48));
435 chan_dev->dma_parms = chan_dev->parent->dma_parms;
437 dev_warn(chan->device->dev, "Invalid ASEL value: %u\n", asel);
439 chan_dev->dma_coherent = false;
440 chan_dev->dma_parms = NULL;
444 static u8 udma_get_chan_tpl_index(struct udma_tpl *tpl_map, int chan_id)
448 for (i = 0; i < tpl_map->levels; i++) {
449 if (chan_id >= tpl_map->start_idx[i])
456 static void udma_reset_uchan(struct udma_chan *uc)
458 memset(&uc->config, 0, sizeof(uc->config));
459 uc->config.remote_thread_id = -1;
460 uc->config.mapped_channel_id = -1;
461 uc->config.default_flow_id = -1;
462 uc->state = UDMA_CHAN_IS_IDLE;
465 static void udma_dump_chan_stdata(struct udma_chan *uc)
467 struct device *dev = uc->ud->dev;
471 if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
472 dev_dbg(dev, "TCHAN State data:\n");
473 for (i = 0; i < 32; i++) {
474 offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
475 dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
476 udma_tchanrt_read(uc, offset));
480 if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
481 dev_dbg(dev, "RCHAN State data:\n");
482 for (i = 0; i < 32; i++) {
483 offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
484 dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
485 udma_rchanrt_read(uc, offset));
490 static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
493 return d->hwdesc[idx].cppi5_desc_paddr;
496 static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
498 return d->hwdesc[idx].cppi5_desc_vaddr;
501 static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
504 struct udma_desc *d = uc->terminated_desc;
507 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
510 if (desc_paddr != paddr)
517 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
520 if (desc_paddr != paddr)
528 static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
530 if (uc->use_dma_pool) {
533 for (i = 0; i < d->hwdesc_count; i++) {
534 if (!d->hwdesc[i].cppi5_desc_vaddr)
537 dma_pool_free(uc->hdesc_pool,
538 d->hwdesc[i].cppi5_desc_vaddr,
539 d->hwdesc[i].cppi5_desc_paddr);
541 d->hwdesc[i].cppi5_desc_vaddr = NULL;
543 } else if (d->hwdesc[0].cppi5_desc_vaddr) {
544 dma_free_coherent(uc->dma_dev, d->hwdesc[0].cppi5_desc_size,
545 d->hwdesc[0].cppi5_desc_vaddr,
546 d->hwdesc[0].cppi5_desc_paddr);
548 d->hwdesc[0].cppi5_desc_vaddr = NULL;
552 static void udma_purge_desc_work(struct work_struct *work)
554 struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
555 struct virt_dma_desc *vd, *_vd;
559 spin_lock_irqsave(&ud->lock, flags);
560 list_splice_tail_init(&ud->desc_to_purge, &head);
561 spin_unlock_irqrestore(&ud->lock, flags);
563 list_for_each_entry_safe(vd, _vd, &head, node) {
564 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
565 struct udma_desc *d = to_udma_desc(&vd->tx);
567 udma_free_hwdesc(uc, d);
572 /* If more to purge, schedule the work again */
573 if (!list_empty(&ud->desc_to_purge))
574 schedule_work(&ud->purge_work);
577 static void udma_desc_free(struct virt_dma_desc *vd)
579 struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
580 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
581 struct udma_desc *d = to_udma_desc(&vd->tx);
584 if (uc->terminated_desc == d)
585 uc->terminated_desc = NULL;
587 if (uc->use_dma_pool) {
588 udma_free_hwdesc(uc, d);
593 spin_lock_irqsave(&ud->lock, flags);
594 list_add_tail(&vd->node, &ud->desc_to_purge);
595 spin_unlock_irqrestore(&ud->lock, flags);
597 schedule_work(&ud->purge_work);
600 static bool udma_is_chan_running(struct udma_chan *uc)
606 trt_ctl = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
608 rrt_ctl = udma_rchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
610 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
616 static bool udma_is_chan_paused(struct udma_chan *uc)
620 switch (uc->config.dir) {
622 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
623 pause_mask = UDMA_PEER_RT_EN_PAUSE;
626 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
627 pause_mask = UDMA_PEER_RT_EN_PAUSE;
630 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
631 pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
637 if (val & pause_mask)
643 static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
645 return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
648 static int udma_push_to_ring(struct udma_chan *uc, int idx)
650 struct udma_desc *d = uc->desc;
651 struct k3_ring *ring = NULL;
654 switch (uc->config.dir) {
656 ring = uc->rflow->fd_ring;
660 ring = uc->tchan->t_ring;
666 /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
668 paddr = udma_get_rx_flush_hwdesc_paddr(uc);
670 paddr = udma_curr_cppi5_desc_paddr(d, idx);
672 wmb(); /* Ensure that writes are not moved over this point */
675 return k3_ringacc_ring_push(ring, &paddr);
678 static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
680 if (uc->config.dir != DMA_DEV_TO_MEM)
683 if (addr == udma_get_rx_flush_hwdesc_paddr(uc))
689 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
691 struct k3_ring *ring = NULL;
694 switch (uc->config.dir) {
696 ring = uc->rflow->r_ring;
700 ring = uc->tchan->tc_ring;
706 ret = k3_ringacc_ring_pop(ring, addr);
710 rmb(); /* Ensure that reads are not moved before this point */
712 /* Teardown completion */
713 if (cppi5_desc_is_tdcm(*addr))
716 /* Check for flush descriptor */
717 if (udma_desc_is_rx_flush(uc, *addr))
723 static void udma_reset_rings(struct udma_chan *uc)
725 struct k3_ring *ring1 = NULL;
726 struct k3_ring *ring2 = NULL;
728 switch (uc->config.dir) {
731 ring1 = uc->rflow->fd_ring;
732 ring2 = uc->rflow->r_ring;
738 ring1 = uc->tchan->t_ring;
739 ring2 = uc->tchan->tc_ring;
747 k3_ringacc_ring_reset_dma(ring1,
748 k3_ringacc_ring_get_occ(ring1));
750 k3_ringacc_ring_reset(ring2);
752 /* make sure we are not leaking memory by stalled descriptor */
753 if (uc->terminated_desc) {
754 udma_desc_free(&uc->terminated_desc->vd);
755 uc->terminated_desc = NULL;
759 static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val)
761 if (uc->desc->dir == DMA_DEV_TO_MEM) {
762 udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
763 udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
764 if (uc->config.ep_type != PSIL_EP_NATIVE)
765 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
767 udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
768 udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
769 if (!uc->bchan && uc->config.ep_type != PSIL_EP_NATIVE)
770 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
774 static void udma_reset_counters(struct udma_chan *uc)
779 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
780 udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
782 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
783 udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
785 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
786 udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
789 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
790 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
795 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
796 udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
798 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
799 udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
801 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
802 udma_rchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
804 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
805 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
809 static int udma_reset_chan(struct udma_chan *uc, bool hard)
811 switch (uc->config.dir) {
813 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
814 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
817 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
818 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
821 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
822 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
828 /* Reset all counters */
829 udma_reset_counters(uc);
831 /* Hard reset: re-initialize the channel to reset */
833 struct udma_chan_config ucc_backup;
836 memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
837 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
839 /* restore the channel configuration */
840 memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
841 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
846 * Setting forced teardown after forced reset helps recovering
849 if (uc->config.dir == DMA_DEV_TO_MEM)
850 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
851 UDMA_CHAN_RT_CTL_EN |
852 UDMA_CHAN_RT_CTL_TDOWN |
853 UDMA_CHAN_RT_CTL_FTDOWN);
855 uc->state = UDMA_CHAN_IS_IDLE;
860 static void udma_start_desc(struct udma_chan *uc)
862 struct udma_chan_config *ucc = &uc->config;
864 if (uc->ud->match_data->type == DMA_TYPE_UDMA && ucc->pkt_mode &&
865 (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
869 * UDMA only: Push all descriptors to ring for packet mode
871 * PKTDMA supports pre-linked descriptor and cyclic is not
874 for (i = 0; i < uc->desc->sglen; i++)
875 udma_push_to_ring(uc, i);
877 udma_push_to_ring(uc, 0);
881 static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
883 /* Only PDMAs have staticTR */
884 if (uc->config.ep_type == PSIL_EP_NATIVE)
887 /* Check if the staticTR configuration has changed for TX */
888 if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
894 static int udma_start(struct udma_chan *uc)
896 struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
905 uc->desc = to_udma_desc(&vd->tx);
907 /* Channel is already running and does not need reconfiguration */
908 if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
913 /* Make sure that we clear the teardown bit, if it is set */
914 udma_reset_chan(uc, false);
916 /* Push descriptors before we start the channel */
919 switch (uc->desc->dir) {
921 /* Config remote TR */
922 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
923 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
924 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
925 const struct udma_match_data *match_data =
928 if (uc->config.enable_acc32)
929 val |= PDMA_STATIC_TR_XY_ACC32;
930 if (uc->config.enable_burst)
931 val |= PDMA_STATIC_TR_XY_BURST;
933 udma_rchanrt_write(uc,
934 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
937 udma_rchanrt_write(uc,
938 UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG,
939 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
940 match_data->statictr_z_mask));
942 /* save the current staticTR configuration */
943 memcpy(&uc->static_tr, &uc->desc->static_tr,
944 sizeof(uc->static_tr));
947 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
948 UDMA_CHAN_RT_CTL_EN);
951 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
952 UDMA_PEER_RT_EN_ENABLE);
956 /* Config remote TR */
957 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
958 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
959 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
961 if (uc->config.enable_acc32)
962 val |= PDMA_STATIC_TR_XY_ACC32;
963 if (uc->config.enable_burst)
964 val |= PDMA_STATIC_TR_XY_BURST;
966 udma_tchanrt_write(uc,
967 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
970 /* save the current staticTR configuration */
971 memcpy(&uc->static_tr, &uc->desc->static_tr,
972 sizeof(uc->static_tr));
976 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
977 UDMA_PEER_RT_EN_ENABLE);
979 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
980 UDMA_CHAN_RT_CTL_EN);
984 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
985 UDMA_CHAN_RT_CTL_EN);
986 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
987 UDMA_CHAN_RT_CTL_EN);
994 uc->state = UDMA_CHAN_IS_ACTIVE;
1000 static int udma_stop(struct udma_chan *uc)
1002 enum udma_chan_state old_state = uc->state;
1004 uc->state = UDMA_CHAN_IS_TERMINATING;
1005 reinit_completion(&uc->teardown_completed);
1007 switch (uc->config.dir) {
1008 case DMA_DEV_TO_MEM:
1009 if (!uc->cyclic && !uc->desc)
1010 udma_push_to_ring(uc, -1);
1012 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
1013 UDMA_PEER_RT_EN_ENABLE |
1014 UDMA_PEER_RT_EN_TEARDOWN);
1016 case DMA_MEM_TO_DEV:
1017 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
1018 UDMA_PEER_RT_EN_ENABLE |
1019 UDMA_PEER_RT_EN_FLUSH);
1020 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
1021 UDMA_CHAN_RT_CTL_EN |
1022 UDMA_CHAN_RT_CTL_TDOWN);
1024 case DMA_MEM_TO_MEM:
1025 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
1026 UDMA_CHAN_RT_CTL_EN |
1027 UDMA_CHAN_RT_CTL_TDOWN);
1030 uc->state = old_state;
1031 complete_all(&uc->teardown_completed);
1038 static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
1040 struct udma_desc *d = uc->desc;
1041 struct cppi5_host_desc_t *h_desc;
1043 h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
1044 cppi5_hdesc_reset_to_original(h_desc);
1045 udma_push_to_ring(uc, d->desc_idx);
1046 d->desc_idx = (d->desc_idx + 1) % d->sglen;
1049 static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
1051 struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
1053 memcpy(d->metadata, h_desc->epib, d->metadata_size);
1056 static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
1058 u32 peer_bcnt, bcnt;
1061 * Only TX towards PDMA is affected.
1062 * If DMA_PREP_INTERRUPT is not set by consumer then skip the transfer
1063 * completion calculation, consumer must ensure that there is no stale
1064 * data in DMA fabric in this case.
1066 if (uc->config.ep_type == PSIL_EP_NATIVE ||
1067 uc->config.dir != DMA_MEM_TO_DEV || !(uc->config.tx_flags & DMA_PREP_INTERRUPT))
1070 peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
1071 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
1073 /* Transfer is incomplete, store current residue and time stamp */
1074 if (peer_bcnt < bcnt) {
1075 uc->tx_drain.residue = bcnt - peer_bcnt;
1076 uc->tx_drain.tstamp = ktime_get();
1083 static void udma_check_tx_completion(struct work_struct *work)
1085 struct udma_chan *uc = container_of(work, typeof(*uc),
1086 tx_drain.work.work);
1087 bool desc_done = true;
1090 unsigned long delay;
1094 /* Get previous residue and time stamp */
1095 residue_diff = uc->tx_drain.residue;
1096 time_diff = uc->tx_drain.tstamp;
1098 * Get current residue and time stamp or see if
1099 * transfer is complete
1101 desc_done = udma_is_desc_really_done(uc, uc->desc);
1106 * Find the time delta and residue delta w.r.t
1109 time_diff = ktime_sub(uc->tx_drain.tstamp,
1111 residue_diff -= uc->tx_drain.residue;
1114 * Try to guess when we should check
1115 * next time by calculating rate at
1116 * which data is being drained at the
1119 delay = (time_diff / residue_diff) *
1120 uc->tx_drain.residue;
1122 /* No progress, check again in 1 second */
1123 schedule_delayed_work(&uc->tx_drain.work, HZ);
1127 usleep_range(ktime_to_us(delay),
1128 ktime_to_us(delay) + 10);
1133 struct udma_desc *d = uc->desc;
1135 udma_decrement_byte_counters(uc, d->residue);
1137 vchan_cookie_complete(&d->vd);
1145 static irqreturn_t udma_ring_irq_handler(int irq, void *data)
1147 struct udma_chan *uc = data;
1148 struct udma_desc *d;
1149 dma_addr_t paddr = 0;
1151 if (udma_pop_from_ring(uc, &paddr) || !paddr)
1154 spin_lock(&uc->vc.lock);
1156 /* Teardown completion message */
1157 if (cppi5_desc_is_tdcm(paddr)) {
1158 complete_all(&uc->teardown_completed);
1160 if (uc->terminated_desc) {
1161 udma_desc_free(&uc->terminated_desc->vd);
1162 uc->terminated_desc = NULL;
1171 d = udma_udma_desc_from_paddr(uc, paddr);
1174 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
1176 if (desc_paddr != paddr) {
1177 dev_err(uc->ud->dev, "not matching descriptors!\n");
1181 if (d == uc->desc) {
1182 /* active descriptor */
1184 udma_cyclic_packet_elapsed(uc);
1185 vchan_cyclic_callback(&d->vd);
1187 if (udma_is_desc_really_done(uc, d)) {
1188 udma_decrement_byte_counters(uc, d->residue);
1190 vchan_cookie_complete(&d->vd);
1192 schedule_delayed_work(&uc->tx_drain.work,
1198 * terminated descriptor, mark the descriptor as
1199 * completed to update the channel's cookie marker
1201 dma_cookie_complete(&d->vd.tx);
1205 spin_unlock(&uc->vc.lock);
1210 static irqreturn_t udma_udma_irq_handler(int irq, void *data)
1212 struct udma_chan *uc = data;
1213 struct udma_desc *d;
1215 spin_lock(&uc->vc.lock);
1218 d->tr_idx = (d->tr_idx + 1) % d->sglen;
1221 vchan_cyclic_callback(&d->vd);
1223 /* TODO: figure out the real amount of data */
1224 udma_decrement_byte_counters(uc, d->residue);
1226 vchan_cookie_complete(&d->vd);
1230 spin_unlock(&uc->vc.lock);
1236 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1238 * @from: Start the search from this flow id number
1239 * @cnt: Number of consecutive flow ids to allocate
1241 * Allocate range of RX flow ids for future use, those flows can be requested
1242 * only using explicit flow id number. if @from is set to -1 it will try to find
1243 * first free range. if @from is positive value it will force allocation only
1244 * of the specified range of flows.
1246 * Returns -ENOMEM if can't find free range.
1247 * -EEXIST if requested range is busy.
1248 * -EINVAL if wrong input values passed.
1249 * Returns flow id on success.
1251 static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1253 int start, tmp_from;
1254 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
1258 tmp_from = ud->rchan_cnt;
1259 /* default flows can't be allocated and accessible only by id */
1260 if (tmp_from < ud->rchan_cnt)
1263 if (tmp_from + cnt > ud->rflow_cnt)
1266 bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
1269 start = bitmap_find_next_zero_area(tmp,
1272 if (start >= ud->rflow_cnt)
1275 if (from >= 0 && start != from)
1278 bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
1282 static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1284 if (from < ud->rchan_cnt)
1286 if (from + cnt > ud->rflow_cnt)
1289 bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
1293 static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
1296 * Attempt to request rflow by ID can be made for any rflow
1297 * if not in use with assumption that caller knows what's doing.
1298 * TI-SCI FW will perform additional permission check ant way, it's
1302 if (id < 0 || id >= ud->rflow_cnt)
1303 return ERR_PTR(-ENOENT);
1305 if (test_bit(id, ud->rflow_in_use))
1306 return ERR_PTR(-ENOENT);
1308 if (ud->rflow_gp_map) {
1309 /* GP rflow has to be allocated first */
1310 if (!test_bit(id, ud->rflow_gp_map) &&
1311 !test_bit(id, ud->rflow_gp_map_allocated))
1312 return ERR_PTR(-EINVAL);
1315 dev_dbg(ud->dev, "get rflow%d\n", id);
1316 set_bit(id, ud->rflow_in_use);
1317 return &ud->rflows[id];
1320 static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
1322 if (!test_bit(rflow->id, ud->rflow_in_use)) {
1323 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
1327 dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
1328 clear_bit(rflow->id, ud->rflow_in_use);
1331 #define UDMA_RESERVE_RESOURCE(res) \
1332 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
1333 enum udma_tp_level tpl, \
1337 if (test_bit(id, ud->res##_map)) { \
1338 dev_err(ud->dev, "res##%d is in use\n", id); \
1339 return ERR_PTR(-ENOENT); \
1344 if (tpl >= ud->res##_tpl.levels) \
1345 tpl = ud->res##_tpl.levels - 1; \
1347 start = ud->res##_tpl.start_idx[tpl]; \
1349 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
1351 if (id == ud->res##_cnt) { \
1352 return ERR_PTR(-ENOENT); \
1356 set_bit(id, ud->res##_map); \
1357 return &ud->res##s[id]; \
1360 UDMA_RESERVE_RESOURCE(bchan);
1361 UDMA_RESERVE_RESOURCE(tchan);
1362 UDMA_RESERVE_RESOURCE(rchan);
1364 static int bcdma_get_bchan(struct udma_chan *uc)
1366 struct udma_dev *ud = uc->ud;
1367 enum udma_tp_level tpl;
1371 dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n",
1372 uc->id, uc->bchan->id);
1377 * Use normal channels for peripherals, and highest TPL channel for
1380 if (uc->config.tr_trigger_type)
1383 tpl = ud->bchan_tpl.levels - 1;
1385 uc->bchan = __udma_reserve_bchan(ud, tpl, -1);
1386 if (IS_ERR(uc->bchan)) {
1387 ret = PTR_ERR(uc->bchan);
1392 uc->tchan = uc->bchan;
1397 static int udma_get_tchan(struct udma_chan *uc)
1399 struct udma_dev *ud = uc->ud;
1403 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
1404 uc->id, uc->tchan->id);
1409 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1410 * For PKTDMA mapped channels it is configured to a channel which must
1411 * be used to service the peripheral.
1413 uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl,
1414 uc->config.mapped_channel_id);
1415 if (IS_ERR(uc->tchan)) {
1416 ret = PTR_ERR(uc->tchan);
1421 if (ud->tflow_cnt) {
1424 /* Only PKTDMA have support for tx flows */
1425 if (uc->config.default_flow_id >= 0)
1426 tflow_id = uc->config.default_flow_id;
1428 tflow_id = uc->tchan->id;
1430 if (test_bit(tflow_id, ud->tflow_map)) {
1431 dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
1432 clear_bit(uc->tchan->id, ud->tchan_map);
1437 uc->tchan->tflow_id = tflow_id;
1438 set_bit(tflow_id, ud->tflow_map);
1440 uc->tchan->tflow_id = -1;
1446 static int udma_get_rchan(struct udma_chan *uc)
1448 struct udma_dev *ud = uc->ud;
1452 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
1453 uc->id, uc->rchan->id);
1458 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1459 * For PKTDMA mapped channels it is configured to a channel which must
1460 * be used to service the peripheral.
1462 uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl,
1463 uc->config.mapped_channel_id);
1464 if (IS_ERR(uc->rchan)) {
1465 ret = PTR_ERR(uc->rchan);
1473 static int udma_get_chan_pair(struct udma_chan *uc)
1475 struct udma_dev *ud = uc->ud;
1478 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
1479 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
1480 uc->id, uc->tchan->id);
1485 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
1486 uc->id, uc->tchan->id);
1488 } else if (uc->rchan) {
1489 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
1490 uc->id, uc->rchan->id);
1494 /* Can be optimized, but let's have it like this for now */
1495 end = min(ud->tchan_cnt, ud->rchan_cnt);
1497 * Try to use the highest TPL channel pair for MEM_TO_MEM channels
1498 * Note: in UDMAP the channel TPL is symmetric between tchan and rchan
1500 chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1];
1501 for (; chan_id < end; chan_id++) {
1502 if (!test_bit(chan_id, ud->tchan_map) &&
1503 !test_bit(chan_id, ud->rchan_map))
1510 set_bit(chan_id, ud->tchan_map);
1511 set_bit(chan_id, ud->rchan_map);
1512 uc->tchan = &ud->tchans[chan_id];
1513 uc->rchan = &ud->rchans[chan_id];
1515 /* UDMA does not use tx flows */
1516 uc->tchan->tflow_id = -1;
1521 static int udma_get_rflow(struct udma_chan *uc, int flow_id)
1523 struct udma_dev *ud = uc->ud;
1527 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
1532 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
1533 uc->id, uc->rflow->id);
1537 uc->rflow = __udma_get_rflow(ud, flow_id);
1538 if (IS_ERR(uc->rflow)) {
1539 ret = PTR_ERR(uc->rflow);
1547 static void bcdma_put_bchan(struct udma_chan *uc)
1549 struct udma_dev *ud = uc->ud;
1552 dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
1554 clear_bit(uc->bchan->id, ud->bchan_map);
1560 static void udma_put_rchan(struct udma_chan *uc)
1562 struct udma_dev *ud = uc->ud;
1565 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
1567 clear_bit(uc->rchan->id, ud->rchan_map);
1572 static void udma_put_tchan(struct udma_chan *uc)
1574 struct udma_dev *ud = uc->ud;
1577 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
1579 clear_bit(uc->tchan->id, ud->tchan_map);
1581 if (uc->tchan->tflow_id >= 0)
1582 clear_bit(uc->tchan->tflow_id, ud->tflow_map);
1588 static void udma_put_rflow(struct udma_chan *uc)
1590 struct udma_dev *ud = uc->ud;
1593 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
1595 __udma_put_rflow(ud, uc->rflow);
1600 static void bcdma_free_bchan_resources(struct udma_chan *uc)
1605 k3_ringacc_ring_free(uc->bchan->tc_ring);
1606 k3_ringacc_ring_free(uc->bchan->t_ring);
1607 uc->bchan->tc_ring = NULL;
1608 uc->bchan->t_ring = NULL;
1609 k3_configure_chan_coherency(&uc->vc.chan, 0);
1611 bcdma_put_bchan(uc);
1614 static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
1616 struct k3_ring_cfg ring_cfg;
1617 struct udma_dev *ud = uc->ud;
1620 ret = bcdma_get_bchan(uc);
1624 ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
1626 &uc->bchan->tc_ring);
1632 memset(&ring_cfg, 0, sizeof(ring_cfg));
1633 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1634 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1635 ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1637 k3_configure_chan_coherency(&uc->vc.chan, ud->asel);
1638 ring_cfg.asel = ud->asel;
1639 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1641 ret = k3_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
1648 k3_ringacc_ring_free(uc->bchan->tc_ring);
1649 uc->bchan->tc_ring = NULL;
1650 k3_ringacc_ring_free(uc->bchan->t_ring);
1651 uc->bchan->t_ring = NULL;
1652 k3_configure_chan_coherency(&uc->vc.chan, 0);
1654 bcdma_put_bchan(uc);
1659 static void udma_free_tx_resources(struct udma_chan *uc)
1664 k3_ringacc_ring_free(uc->tchan->t_ring);
1665 k3_ringacc_ring_free(uc->tchan->tc_ring);
1666 uc->tchan->t_ring = NULL;
1667 uc->tchan->tc_ring = NULL;
1672 static int udma_alloc_tx_resources(struct udma_chan *uc)
1674 struct k3_ring_cfg ring_cfg;
1675 struct udma_dev *ud = uc->ud;
1676 struct udma_tchan *tchan;
1679 ret = udma_get_tchan(uc);
1684 if (tchan->tflow_id >= 0)
1685 ring_idx = tchan->tflow_id;
1687 ring_idx = ud->bchan_cnt + tchan->id;
1689 ret = k3_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1,
1697 memset(&ring_cfg, 0, sizeof(ring_cfg));
1698 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1699 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1700 if (ud->match_data->type == DMA_TYPE_UDMA) {
1701 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1703 ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1705 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
1706 ring_cfg.asel = uc->config.asel;
1707 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1710 ret = k3_ringacc_ring_cfg(tchan->t_ring, &ring_cfg);
1711 ret |= k3_ringacc_ring_cfg(tchan->tc_ring, &ring_cfg);
1719 k3_ringacc_ring_free(uc->tchan->tc_ring);
1720 uc->tchan->tc_ring = NULL;
1721 k3_ringacc_ring_free(uc->tchan->t_ring);
1722 uc->tchan->t_ring = NULL;
1729 static void udma_free_rx_resources(struct udma_chan *uc)
1735 struct udma_rflow *rflow = uc->rflow;
1737 k3_ringacc_ring_free(rflow->fd_ring);
1738 k3_ringacc_ring_free(rflow->r_ring);
1739 rflow->fd_ring = NULL;
1740 rflow->r_ring = NULL;
1748 static int udma_alloc_rx_resources(struct udma_chan *uc)
1750 struct udma_dev *ud = uc->ud;
1751 struct k3_ring_cfg ring_cfg;
1752 struct udma_rflow *rflow;
1756 ret = udma_get_rchan(uc);
1760 /* For MEM_TO_MEM we don't need rflow or rings */
1761 if (uc->config.dir == DMA_MEM_TO_MEM)
1764 if (uc->config.default_flow_id >= 0)
1765 ret = udma_get_rflow(uc, uc->config.default_flow_id);
1767 ret = udma_get_rflow(uc, uc->rchan->id);
1776 fd_ring_id = ud->tflow_cnt + rflow->id;
1778 fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
1781 ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
1782 &rflow->fd_ring, &rflow->r_ring);
1788 memset(&ring_cfg, 0, sizeof(ring_cfg));
1790 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1791 if (ud->match_data->type == DMA_TYPE_UDMA) {
1792 if (uc->config.pkt_mode)
1793 ring_cfg.size = SG_MAX_SEGMENTS;
1795 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1797 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1799 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1800 ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1802 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
1803 ring_cfg.asel = uc->config.asel;
1804 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1807 ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
1809 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1810 ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
1818 k3_ringacc_ring_free(rflow->r_ring);
1819 rflow->r_ring = NULL;
1820 k3_ringacc_ring_free(rflow->fd_ring);
1821 rflow->fd_ring = NULL;
1830 #define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \
1831 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1832 TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
1834 #define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \
1835 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1836 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
1838 #define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \
1839 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
1841 #define TISCI_UDMA_TCHAN_VALID_PARAMS ( \
1842 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1843 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
1844 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
1845 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1846 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
1847 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1848 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1849 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1851 #define TISCI_UDMA_RCHAN_VALID_PARAMS ( \
1852 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1853 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1854 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1855 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1856 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
1857 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1858 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
1859 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
1860 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1862 static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
1864 struct udma_dev *ud = uc->ud;
1865 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1866 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1867 struct udma_tchan *tchan = uc->tchan;
1868 struct udma_rchan *rchan = uc->rchan;
1873 /* Non synchronized - mem to mem type of transfer */
1874 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1875 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1876 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1878 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
1879 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, tchan->id);
1881 burst_size = ud->match_data->burst_size[tpl];
1884 req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
1885 req_tx.nav_id = tisci_rm->tisci_dev_id;
1886 req_tx.index = tchan->id;
1887 req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1888 req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1889 req_tx.txcq_qnum = tc_ring;
1890 req_tx.tx_atype = ud->atype;
1892 req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1893 req_tx.tx_burst_size = burst_size;
1896 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1898 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1902 req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
1903 req_rx.nav_id = tisci_rm->tisci_dev_id;
1904 req_rx.index = rchan->id;
1905 req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1906 req_rx.rxcq_qnum = tc_ring;
1907 req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1908 req_rx.rx_atype = ud->atype;
1910 req_rx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1911 req_rx.rx_burst_size = burst_size;
1914 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1916 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
1921 static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
1923 struct udma_dev *ud = uc->ud;
1924 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1925 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1926 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1927 struct udma_bchan *bchan = uc->bchan;
1932 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
1933 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, bchan->id);
1935 burst_size = ud->match_data->burst_size[tpl];
1938 req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
1939 req_tx.nav_id = tisci_rm->tisci_dev_id;
1940 req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
1941 req_tx.index = bchan->id;
1943 req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1944 req_tx.tx_burst_size = burst_size;
1947 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1949 dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
1954 static int udma_tisci_tx_channel_config(struct udma_chan *uc)
1956 struct udma_dev *ud = uc->ud;
1957 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1958 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1959 struct udma_tchan *tchan = uc->tchan;
1960 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1961 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1962 u32 mode, fetch_size;
1965 if (uc->config.pkt_mode) {
1966 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1967 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1968 uc->config.psd_size, 0);
1970 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1971 fetch_size = sizeof(struct cppi5_desc_hdr_t);
1974 req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
1975 req_tx.nav_id = tisci_rm->tisci_dev_id;
1976 req_tx.index = tchan->id;
1977 req_tx.tx_chan_type = mode;
1978 req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1979 req_tx.tx_fetch_size = fetch_size >> 2;
1980 req_tx.txcq_qnum = tc_ring;
1981 req_tx.tx_atype = uc->config.atype;
1982 if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
1983 ud->match_data->flags & UDMA_FLAG_TDTYPE) {
1984 /* wait for peer to complete the teardown for PDMAs */
1985 req_tx.valid_params |=
1986 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
1987 req_tx.tx_tdtype = 1;
1990 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1992 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1997 static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
1999 struct udma_dev *ud = uc->ud;
2000 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2001 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2002 struct udma_tchan *tchan = uc->tchan;
2003 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
2006 req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
2007 req_tx.nav_id = tisci_rm->tisci_dev_id;
2008 req_tx.index = tchan->id;
2009 req_tx.tx_supr_tdpkt = uc->config.notdpkt;
2010 if (ud->match_data->flags & UDMA_FLAG_TDTYPE) {
2011 /* wait for peer to complete the teardown for PDMAs */
2012 req_tx.valid_params |=
2013 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
2014 req_tx.tx_tdtype = 1;
2017 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
2019 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
2024 #define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
2026 static int udma_tisci_rx_channel_config(struct udma_chan *uc)
2028 struct udma_dev *ud = uc->ud;
2029 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2030 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2031 struct udma_rchan *rchan = uc->rchan;
2032 int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
2033 int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2034 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2035 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2036 u32 mode, fetch_size;
2039 if (uc->config.pkt_mode) {
2040 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
2041 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
2042 uc->config.psd_size, 0);
2044 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
2045 fetch_size = sizeof(struct cppi5_desc_hdr_t);
2048 req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
2049 req_rx.nav_id = tisci_rm->tisci_dev_id;
2050 req_rx.index = rchan->id;
2051 req_rx.rx_fetch_size = fetch_size >> 2;
2052 req_rx.rxcq_qnum = rx_ring;
2053 req_rx.rx_chan_type = mode;
2054 req_rx.rx_atype = uc->config.atype;
2056 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2058 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
2062 flow_req.valid_params =
2063 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2064 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2065 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
2066 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
2067 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
2068 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
2069 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
2070 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
2071 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
2072 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
2073 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
2074 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
2075 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
2077 flow_req.nav_id = tisci_rm->tisci_dev_id;
2078 flow_req.flow_index = rchan->id;
2080 if (uc->config.needs_epib)
2081 flow_req.rx_einfo_present = 1;
2083 flow_req.rx_einfo_present = 0;
2084 if (uc->config.psd_size)
2085 flow_req.rx_psinfo_present = 1;
2087 flow_req.rx_psinfo_present = 0;
2088 flow_req.rx_error_handling = 1;
2089 flow_req.rx_dest_qnum = rx_ring;
2090 flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
2091 flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
2092 flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
2093 flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
2094 flow_req.rx_fdq0_sz0_qnum = fd_ring;
2095 flow_req.rx_fdq1_qnum = fd_ring;
2096 flow_req.rx_fdq2_qnum = fd_ring;
2097 flow_req.rx_fdq3_qnum = fd_ring;
2099 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2102 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
2107 static int bcdma_tisci_rx_channel_config(struct udma_chan *uc)
2109 struct udma_dev *ud = uc->ud;
2110 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2111 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2112 struct udma_rchan *rchan = uc->rchan;
2113 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2116 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2117 req_rx.nav_id = tisci_rm->tisci_dev_id;
2118 req_rx.index = rchan->id;
2120 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2122 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
2127 static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
2129 struct udma_dev *ud = uc->ud;
2130 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2131 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2132 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2133 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2136 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2137 req_rx.nav_id = tisci_rm->tisci_dev_id;
2138 req_rx.index = uc->rchan->id;
2140 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2142 dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
2146 flow_req.valid_params =
2147 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2148 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2149 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID;
2151 flow_req.nav_id = tisci_rm->tisci_dev_id;
2152 flow_req.flow_index = uc->rflow->id;
2154 if (uc->config.needs_epib)
2155 flow_req.rx_einfo_present = 1;
2157 flow_req.rx_einfo_present = 0;
2158 if (uc->config.psd_size)
2159 flow_req.rx_psinfo_present = 1;
2161 flow_req.rx_psinfo_present = 0;
2162 flow_req.rx_error_handling = 1;
2164 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2167 dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
2173 static int udma_alloc_chan_resources(struct dma_chan *chan)
2175 struct udma_chan *uc = to_udma_chan(chan);
2176 struct udma_dev *ud = to_udma_dev(chan->device);
2177 const struct udma_soc_data *soc_data = ud->soc_data;
2178 struct k3_ring *irq_ring;
2182 uc->dma_dev = ud->dev;
2184 if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
2185 uc->use_dma_pool = true;
2186 /* in case of MEM_TO_MEM we have maximum of two TRs */
2187 if (uc->config.dir == DMA_MEM_TO_MEM) {
2188 uc->config.hdesc_size = cppi5_trdesc_calc_size(
2189 sizeof(struct cppi5_tr_type15_t), 2);
2190 uc->config.pkt_mode = false;
2194 if (uc->use_dma_pool) {
2195 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
2196 uc->config.hdesc_size,
2199 if (!uc->hdesc_pool) {
2200 dev_err(ud->ddev.dev,
2201 "Descriptor pool allocation failed\n");
2202 uc->use_dma_pool = false;
2209 * Make sure that the completion is in a known state:
2210 * No teardown, the channel is idle
2212 reinit_completion(&uc->teardown_completed);
2213 complete_all(&uc->teardown_completed);
2214 uc->state = UDMA_CHAN_IS_IDLE;
2216 switch (uc->config.dir) {
2217 case DMA_MEM_TO_MEM:
2218 /* Non synchronized - mem to mem type of transfer */
2219 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2222 ret = udma_get_chan_pair(uc);
2226 ret = udma_alloc_tx_resources(uc);
2232 ret = udma_alloc_rx_resources(uc);
2234 udma_free_tx_resources(uc);
2238 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2239 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2240 K3_PSIL_DST_THREAD_ID_OFFSET;
2242 irq_ring = uc->tchan->tc_ring;
2243 irq_udma_idx = uc->tchan->id;
2245 ret = udma_tisci_m2m_channel_config(uc);
2247 case DMA_MEM_TO_DEV:
2248 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2249 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2252 ret = udma_alloc_tx_resources(uc);
2256 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2257 uc->config.dst_thread = uc->config.remote_thread_id;
2258 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2260 irq_ring = uc->tchan->tc_ring;
2261 irq_udma_idx = uc->tchan->id;
2263 ret = udma_tisci_tx_channel_config(uc);
2265 case DMA_DEV_TO_MEM:
2266 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2267 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2270 ret = udma_alloc_rx_resources(uc);
2274 uc->config.src_thread = uc->config.remote_thread_id;
2275 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2276 K3_PSIL_DST_THREAD_ID_OFFSET;
2278 irq_ring = uc->rflow->r_ring;
2279 irq_udma_idx = soc_data->oes.udma_rchan + uc->rchan->id;
2281 ret = udma_tisci_rx_channel_config(uc);
2284 /* Can not happen */
2285 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2286 __func__, uc->id, uc->config.dir);
2292 /* check if the channel configuration was successful */
2296 if (udma_is_chan_running(uc)) {
2297 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2298 udma_reset_chan(uc, false);
2299 if (udma_is_chan_running(uc)) {
2300 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2307 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2309 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2310 uc->config.src_thread, uc->config.dst_thread);
2314 uc->psil_paired = true;
2316 uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
2317 if (uc->irq_num_ring <= 0) {
2318 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2319 k3_ringacc_get_ring_id(irq_ring));
2324 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2325 IRQF_TRIGGER_HIGH, uc->name, uc);
2327 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2331 /* Event from UDMA (TR events) only needed for slave TR mode channels */
2332 if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
2333 uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
2334 if (uc->irq_num_udma <= 0) {
2335 dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
2337 free_irq(uc->irq_num_ring, uc);
2342 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
2345 dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
2347 free_irq(uc->irq_num_ring, uc);
2351 uc->irq_num_udma = 0;
2354 udma_reset_rings(uc);
2359 uc->irq_num_ring = 0;
2360 uc->irq_num_udma = 0;
2362 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
2363 uc->psil_paired = false;
2365 udma_free_tx_resources(uc);
2366 udma_free_rx_resources(uc);
2368 udma_reset_uchan(uc);
2370 if (uc->use_dma_pool) {
2371 dma_pool_destroy(uc->hdesc_pool);
2372 uc->use_dma_pool = false;
2378 static int bcdma_alloc_chan_resources(struct dma_chan *chan)
2380 struct udma_chan *uc = to_udma_chan(chan);
2381 struct udma_dev *ud = to_udma_dev(chan->device);
2382 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
2383 u32 irq_udma_idx, irq_ring_idx;
2386 /* Only TR mode is supported */
2387 uc->config.pkt_mode = false;
2390 * Make sure that the completion is in a known state:
2391 * No teardown, the channel is idle
2393 reinit_completion(&uc->teardown_completed);
2394 complete_all(&uc->teardown_completed);
2395 uc->state = UDMA_CHAN_IS_IDLE;
2397 switch (uc->config.dir) {
2398 case DMA_MEM_TO_MEM:
2399 /* Non synchronized - mem to mem type of transfer */
2400 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2403 ret = bcdma_alloc_bchan_resources(uc);
2407 irq_ring_idx = uc->bchan->id + oes->bcdma_bchan_ring;
2408 irq_udma_idx = uc->bchan->id + oes->bcdma_bchan_data;
2410 ret = bcdma_tisci_m2m_channel_config(uc);
2412 case DMA_MEM_TO_DEV:
2413 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2414 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2417 ret = udma_alloc_tx_resources(uc);
2419 uc->config.remote_thread_id = -1;
2423 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2424 uc->config.dst_thread = uc->config.remote_thread_id;
2425 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2427 irq_ring_idx = uc->tchan->id + oes->bcdma_tchan_ring;
2428 irq_udma_idx = uc->tchan->id + oes->bcdma_tchan_data;
2430 ret = bcdma_tisci_tx_channel_config(uc);
2432 case DMA_DEV_TO_MEM:
2433 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2434 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2437 ret = udma_alloc_rx_resources(uc);
2439 uc->config.remote_thread_id = -1;
2443 uc->config.src_thread = uc->config.remote_thread_id;
2444 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2445 K3_PSIL_DST_THREAD_ID_OFFSET;
2447 irq_ring_idx = uc->rchan->id + oes->bcdma_rchan_ring;
2448 irq_udma_idx = uc->rchan->id + oes->bcdma_rchan_data;
2450 ret = bcdma_tisci_rx_channel_config(uc);
2453 /* Can not happen */
2454 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2455 __func__, uc->id, uc->config.dir);
2459 /* check if the channel configuration was successful */
2463 if (udma_is_chan_running(uc)) {
2464 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2465 udma_reset_chan(uc, false);
2466 if (udma_is_chan_running(uc)) {
2467 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2473 uc->dma_dev = dmaengine_get_dma_device(chan);
2474 if (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type) {
2475 uc->config.hdesc_size = cppi5_trdesc_calc_size(
2476 sizeof(struct cppi5_tr_type15_t), 2);
2478 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
2479 uc->config.hdesc_size,
2482 if (!uc->hdesc_pool) {
2483 dev_err(ud->ddev.dev,
2484 "Descriptor pool allocation failed\n");
2485 uc->use_dma_pool = false;
2490 uc->use_dma_pool = true;
2491 } else if (uc->config.dir != DMA_MEM_TO_MEM) {
2493 ret = navss_psil_pair(ud, uc->config.src_thread,
2494 uc->config.dst_thread);
2497 "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2498 uc->config.src_thread, uc->config.dst_thread);
2502 uc->psil_paired = true;
2505 uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
2506 if (uc->irq_num_ring <= 0) {
2507 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2513 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2514 IRQF_TRIGGER_HIGH, uc->name, uc);
2516 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2520 /* Event from BCDMA (TR events) only needed for slave channels */
2521 if (is_slave_direction(uc->config.dir)) {
2522 uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
2523 if (uc->irq_num_udma <= 0) {
2524 dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n",
2526 free_irq(uc->irq_num_ring, uc);
2531 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
2534 dev_err(ud->dev, "chan%d: BCDMA irq request failed\n",
2536 free_irq(uc->irq_num_ring, uc);
2540 uc->irq_num_udma = 0;
2543 udma_reset_rings(uc);
2545 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
2546 udma_check_tx_completion);
2550 uc->irq_num_ring = 0;
2551 uc->irq_num_udma = 0;
2553 if (uc->psil_paired)
2554 navss_psil_unpair(ud, uc->config.src_thread,
2555 uc->config.dst_thread);
2556 uc->psil_paired = false;
2558 bcdma_free_bchan_resources(uc);
2559 udma_free_tx_resources(uc);
2560 udma_free_rx_resources(uc);
2562 udma_reset_uchan(uc);
2564 if (uc->use_dma_pool) {
2565 dma_pool_destroy(uc->hdesc_pool);
2566 uc->use_dma_pool = false;
2572 static int bcdma_router_config(struct dma_chan *chan)
2574 struct k3_event_route_data *router_data = chan->route_data;
2575 struct udma_chan *uc = to_udma_chan(chan);
2581 if (uc->config.tr_trigger_type != 1 && uc->config.tr_trigger_type != 2)
2584 trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset;
2585 trigger_event += (uc->bchan->id * 2) + uc->config.tr_trigger_type - 1;
2587 return router_data->set_event(router_data->priv, trigger_event);
2590 static int pktdma_alloc_chan_resources(struct dma_chan *chan)
2592 struct udma_chan *uc = to_udma_chan(chan);
2593 struct udma_dev *ud = to_udma_dev(chan->device);
2594 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
2599 * Make sure that the completion is in a known state:
2600 * No teardown, the channel is idle
2602 reinit_completion(&uc->teardown_completed);
2603 complete_all(&uc->teardown_completed);
2604 uc->state = UDMA_CHAN_IS_IDLE;
2606 switch (uc->config.dir) {
2607 case DMA_MEM_TO_DEV:
2608 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2609 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2612 ret = udma_alloc_tx_resources(uc);
2614 uc->config.remote_thread_id = -1;
2618 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2619 uc->config.dst_thread = uc->config.remote_thread_id;
2620 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2622 irq_ring_idx = uc->tchan->tflow_id + oes->pktdma_tchan_flow;
2624 ret = pktdma_tisci_tx_channel_config(uc);
2626 case DMA_DEV_TO_MEM:
2627 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2628 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2631 ret = udma_alloc_rx_resources(uc);
2633 uc->config.remote_thread_id = -1;
2637 uc->config.src_thread = uc->config.remote_thread_id;
2638 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2639 K3_PSIL_DST_THREAD_ID_OFFSET;
2641 irq_ring_idx = uc->rflow->id + oes->pktdma_rchan_flow;
2643 ret = pktdma_tisci_rx_channel_config(uc);
2646 /* Can not happen */
2647 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2648 __func__, uc->id, uc->config.dir);
2652 /* check if the channel configuration was successful */
2656 if (udma_is_chan_running(uc)) {
2657 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2658 udma_reset_chan(uc, false);
2659 if (udma_is_chan_running(uc)) {
2660 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2666 uc->dma_dev = dmaengine_get_dma_device(chan);
2667 uc->hdesc_pool = dma_pool_create(uc->name, uc->dma_dev,
2668 uc->config.hdesc_size, ud->desc_align,
2670 if (!uc->hdesc_pool) {
2671 dev_err(ud->ddev.dev,
2672 "Descriptor pool allocation failed\n");
2673 uc->use_dma_pool = false;
2678 uc->use_dma_pool = true;
2681 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2683 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2684 uc->config.src_thread, uc->config.dst_thread);
2688 uc->psil_paired = true;
2690 uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
2691 if (uc->irq_num_ring <= 0) {
2692 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2698 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2699 IRQF_TRIGGER_HIGH, uc->name, uc);
2701 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2705 uc->irq_num_udma = 0;
2707 udma_reset_rings(uc);
2709 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
2710 udma_check_tx_completion);
2714 "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
2715 uc->id, uc->tchan->id, uc->tchan->tflow_id,
2716 uc->config.remote_thread_id);
2719 "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
2720 uc->id, uc->rchan->id, uc->rflow->id,
2721 uc->config.remote_thread_id);
2725 uc->irq_num_ring = 0;
2727 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
2728 uc->psil_paired = false;
2730 udma_free_tx_resources(uc);
2731 udma_free_rx_resources(uc);
2733 udma_reset_uchan(uc);
2735 dma_pool_destroy(uc->hdesc_pool);
2736 uc->use_dma_pool = false;
2741 static int udma_slave_config(struct dma_chan *chan,
2742 struct dma_slave_config *cfg)
2744 struct udma_chan *uc = to_udma_chan(chan);
2746 memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
2751 static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
2752 size_t tr_size, int tr_count,
2753 enum dma_transfer_direction dir)
2755 struct udma_hwdesc *hwdesc;
2756 struct cppi5_desc_hdr_t *tr_desc;
2757 struct udma_desc *d;
2758 u32 reload_count = 0;
2768 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
2772 /* We have only one descriptor containing multiple TRs */
2773 d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
2777 d->sglen = tr_count;
2779 d->hwdesc_count = 1;
2780 hwdesc = &d->hwdesc[0];
2782 /* Allocate memory for DMA ring descriptor */
2783 if (uc->use_dma_pool) {
2784 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2785 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2787 &hwdesc->cppi5_desc_paddr);
2789 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
2791 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
2792 uc->ud->desc_align);
2793 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
2794 hwdesc->cppi5_desc_size,
2795 &hwdesc->cppi5_desc_paddr,
2799 if (!hwdesc->cppi5_desc_vaddr) {
2804 /* Start of the TR req records */
2805 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
2806 /* Start address of the TR response array */
2807 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
2809 tr_desc = hwdesc->cppi5_desc_vaddr;
2812 reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
2814 if (dir == DMA_DEV_TO_MEM)
2815 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2817 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2819 cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
2820 cppi5_desc_set_pktids(tr_desc, uc->id,
2821 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2822 cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
2828 * udma_get_tr_counters - calculate TR counters for a given length
2829 * @len: Length of the trasnfer
2830 * @align_to: Preferred alignment
2831 * @tr0_cnt0: First TR icnt0
2832 * @tr0_cnt1: First TR icnt1
2833 * @tr1_cnt0: Second (if used) TR icnt0
2835 * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
2836 * For len >= SZ_64K two TRs are used in a simple way:
2837 * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
2838 * Second TR: the remaining length (tr1_cnt0)
2840 * Returns the number of TRs the length needs (1 or 2)
2841 * -EINVAL if the length can not be supported
2843 static int udma_get_tr_counters(size_t len, unsigned long align_to,
2844 u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0)
2857 *tr0_cnt0 = SZ_64K - BIT(align_to);
2858 if (len / *tr0_cnt0 >= SZ_64K) {
2866 *tr0_cnt1 = len / *tr0_cnt0;
2867 *tr1_cnt0 = len % *tr0_cnt0;
2872 static struct udma_desc *
2873 udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
2874 unsigned int sglen, enum dma_transfer_direction dir,
2875 unsigned long tx_flags, void *context)
2877 struct scatterlist *sgent;
2878 struct udma_desc *d;
2879 struct cppi5_tr_type1_t *tr_req = NULL;
2880 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2887 /* estimate the number of TRs we will need */
2888 for_each_sg(sgl, sgent, sglen, i) {
2889 if (sg_dma_len(sgent) < SZ_64K)
2895 /* Now allocate and setup the descriptor. */
2896 tr_size = sizeof(struct cppi5_tr_type1_t);
2897 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
2903 if (uc->ud->match_data->type == DMA_TYPE_UDMA)
2906 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
2908 tr_req = d->hwdesc[0].tr_req_base;
2909 for_each_sg(sgl, sgent, sglen, i) {
2910 dma_addr_t sg_addr = sg_dma_address(sgent);
2912 num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr),
2913 &tr0_cnt0, &tr0_cnt1, &tr1_cnt0);
2915 dev_err(uc->ud->dev, "size %u is not supported\n",
2917 udma_free_hwdesc(uc, d);
2922 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
2923 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2924 cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
2927 tr_req[tr_idx].addr = sg_addr;
2928 tr_req[tr_idx].icnt0 = tr0_cnt0;
2929 tr_req[tr_idx].icnt1 = tr0_cnt1;
2930 tr_req[tr_idx].dim1 = tr0_cnt0;
2934 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2936 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2937 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2938 CPPI5_TR_CSF_SUPR_EVT);
2940 tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0;
2941 tr_req[tr_idx].icnt0 = tr1_cnt0;
2942 tr_req[tr_idx].icnt1 = 1;
2943 tr_req[tr_idx].dim1 = tr1_cnt0;
2947 d->residue += sg_dma_len(sgent);
2950 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
2951 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
2956 static struct udma_desc *
2957 udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
2959 enum dma_transfer_direction dir,
2960 unsigned long tx_flags, void *context)
2962 struct scatterlist *sgent;
2963 struct cppi5_tr_type15_t *tr_req = NULL;
2964 enum dma_slave_buswidth dev_width;
2965 u16 tr_cnt0, tr_cnt1;
2966 dma_addr_t dev_addr;
2967 struct udma_desc *d;
2969 size_t tr_size, sg_len;
2972 u32 burst, trigger_size, port_window;
2975 if (dir == DMA_DEV_TO_MEM) {
2976 dev_addr = uc->cfg.src_addr;
2977 dev_width = uc->cfg.src_addr_width;
2978 burst = uc->cfg.src_maxburst;
2979 port_window = uc->cfg.src_port_window_size;
2980 } else if (dir == DMA_MEM_TO_DEV) {
2981 dev_addr = uc->cfg.dst_addr;
2982 dev_width = uc->cfg.dst_addr_width;
2983 burst = uc->cfg.dst_maxburst;
2984 port_window = uc->cfg.dst_port_window_size;
2986 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
2994 if (port_window != burst) {
2995 dev_err(uc->ud->dev,
2996 "The burst must be equal to port_window\n");
3000 tr_cnt0 = dev_width * port_window;
3003 tr_cnt0 = dev_width;
3006 trigger_size = tr_cnt0 * tr_cnt1;
3008 /* estimate the number of TRs we will need */
3009 for_each_sg(sgl, sgent, sglen, i) {
3010 sg_len = sg_dma_len(sgent);
3012 if (sg_len % trigger_size) {
3013 dev_err(uc->ud->dev,
3014 "Not aligned SG entry (%zu for %u)\n", sg_len,
3019 if (sg_len / trigger_size < SZ_64K)
3025 /* Now allocate and setup the descriptor. */
3026 tr_size = sizeof(struct cppi5_tr_type15_t);
3027 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
3033 if (uc->ud->match_data->type == DMA_TYPE_UDMA) {
3036 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3040 tr_req = d->hwdesc[0].tr_req_base;
3041 for_each_sg(sgl, sgent, sglen, i) {
3042 u16 tr0_cnt2, tr0_cnt3, tr1_cnt2;
3043 dma_addr_t sg_addr = sg_dma_address(sgent);
3045 sg_len = sg_dma_len(sgent);
3046 num_tr = udma_get_tr_counters(sg_len / trigger_size, 0,
3047 &tr0_cnt2, &tr0_cnt3, &tr1_cnt2);
3049 dev_err(uc->ud->dev, "size %zu is not supported\n",
3051 udma_free_hwdesc(uc, d);
3056 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false,
3057 true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3058 cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
3059 cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
3060 uc->config.tr_trigger_type,
3061 CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0);
3064 if (dir == DMA_DEV_TO_MEM) {
3065 tr_req[tr_idx].addr = dev_addr;
3066 tr_req[tr_idx].icnt0 = tr_cnt0;
3067 tr_req[tr_idx].icnt1 = tr_cnt1;
3068 tr_req[tr_idx].icnt2 = tr0_cnt2;
3069 tr_req[tr_idx].icnt3 = tr0_cnt3;
3070 tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
3072 tr_req[tr_idx].daddr = sg_addr;
3073 tr_req[tr_idx].dicnt0 = tr_cnt0;
3074 tr_req[tr_idx].dicnt1 = tr_cnt1;
3075 tr_req[tr_idx].dicnt2 = tr0_cnt2;
3076 tr_req[tr_idx].dicnt3 = tr0_cnt3;
3077 tr_req[tr_idx].ddim1 = tr_cnt0;
3078 tr_req[tr_idx].ddim2 = trigger_size;
3079 tr_req[tr_idx].ddim3 = trigger_size * tr0_cnt2;
3081 tr_req[tr_idx].addr = sg_addr;
3082 tr_req[tr_idx].icnt0 = tr_cnt0;
3083 tr_req[tr_idx].icnt1 = tr_cnt1;
3084 tr_req[tr_idx].icnt2 = tr0_cnt2;
3085 tr_req[tr_idx].icnt3 = tr0_cnt3;
3086 tr_req[tr_idx].dim1 = tr_cnt0;
3087 tr_req[tr_idx].dim2 = trigger_size;
3088 tr_req[tr_idx].dim3 = trigger_size * tr0_cnt2;
3090 tr_req[tr_idx].daddr = dev_addr;
3091 tr_req[tr_idx].dicnt0 = tr_cnt0;
3092 tr_req[tr_idx].dicnt1 = tr_cnt1;
3093 tr_req[tr_idx].dicnt2 = tr0_cnt2;
3094 tr_req[tr_idx].dicnt3 = tr0_cnt3;
3095 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
3101 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15,
3103 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3104 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3105 CPPI5_TR_CSF_SUPR_EVT);
3106 cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
3107 uc->config.tr_trigger_type,
3108 CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
3111 sg_addr += trigger_size * tr0_cnt2 * tr0_cnt3;
3112 if (dir == DMA_DEV_TO_MEM) {
3113 tr_req[tr_idx].addr = dev_addr;
3114 tr_req[tr_idx].icnt0 = tr_cnt0;
3115 tr_req[tr_idx].icnt1 = tr_cnt1;
3116 tr_req[tr_idx].icnt2 = tr1_cnt2;
3117 tr_req[tr_idx].icnt3 = 1;
3118 tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
3120 tr_req[tr_idx].daddr = sg_addr;
3121 tr_req[tr_idx].dicnt0 = tr_cnt0;
3122 tr_req[tr_idx].dicnt1 = tr_cnt1;
3123 tr_req[tr_idx].dicnt2 = tr1_cnt2;
3124 tr_req[tr_idx].dicnt3 = 1;
3125 tr_req[tr_idx].ddim1 = tr_cnt0;
3126 tr_req[tr_idx].ddim2 = trigger_size;
3128 tr_req[tr_idx].addr = sg_addr;
3129 tr_req[tr_idx].icnt0 = tr_cnt0;
3130 tr_req[tr_idx].icnt1 = tr_cnt1;
3131 tr_req[tr_idx].icnt2 = tr1_cnt2;
3132 tr_req[tr_idx].icnt3 = 1;
3133 tr_req[tr_idx].dim1 = tr_cnt0;
3134 tr_req[tr_idx].dim2 = trigger_size;
3136 tr_req[tr_idx].daddr = dev_addr;
3137 tr_req[tr_idx].dicnt0 = tr_cnt0;
3138 tr_req[tr_idx].dicnt1 = tr_cnt1;
3139 tr_req[tr_idx].dicnt2 = tr1_cnt2;
3140 tr_req[tr_idx].dicnt3 = 1;
3141 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
3146 d->residue += sg_len;
3149 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
3150 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
3155 static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
3156 enum dma_slave_buswidth dev_width,
3159 if (uc->config.ep_type != PSIL_EP_PDMA_XY)
3162 /* Bus width translates to the element size (ES) */
3163 switch (dev_width) {
3164 case DMA_SLAVE_BUSWIDTH_1_BYTE:
3165 d->static_tr.elsize = 0;
3167 case DMA_SLAVE_BUSWIDTH_2_BYTES:
3168 d->static_tr.elsize = 1;
3170 case DMA_SLAVE_BUSWIDTH_3_BYTES:
3171 d->static_tr.elsize = 2;
3173 case DMA_SLAVE_BUSWIDTH_4_BYTES:
3174 d->static_tr.elsize = 3;
3176 case DMA_SLAVE_BUSWIDTH_8_BYTES:
3177 d->static_tr.elsize = 4;
3179 default: /* not reached */
3183 d->static_tr.elcnt = elcnt;
3186 * PDMA must to close the packet when the channel is in packet mode.
3187 * For TR mode when the channel is not cyclic we also need PDMA to close
3188 * the packet otherwise the transfer will stall because PDMA holds on
3189 * the data it has received from the peripheral.
3191 if (uc->config.pkt_mode || !uc->cyclic) {
3192 unsigned int div = dev_width * elcnt;
3195 d->static_tr.bstcnt = d->residue / d->sglen / div;
3197 d->static_tr.bstcnt = d->residue / div;
3199 if (uc->config.dir == DMA_DEV_TO_MEM &&
3200 d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
3203 d->static_tr.bstcnt = 0;
3209 static struct udma_desc *
3210 udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
3211 unsigned int sglen, enum dma_transfer_direction dir,
3212 unsigned long tx_flags, void *context)
3214 struct scatterlist *sgent;
3215 struct cppi5_host_desc_t *h_desc = NULL;
3216 struct udma_desc *d;
3221 d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT);
3226 d->hwdesc_count = sglen;
3228 if (dir == DMA_DEV_TO_MEM)
3229 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
3231 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
3233 if (uc->ud->match_data->type == DMA_TYPE_UDMA)
3236 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3238 for_each_sg(sgl, sgent, sglen, i) {
3239 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
3240 dma_addr_t sg_addr = sg_dma_address(sgent);
3241 struct cppi5_host_desc_t *desc;
3242 size_t sg_len = sg_dma_len(sgent);
3244 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
3246 &hwdesc->cppi5_desc_paddr);
3247 if (!hwdesc->cppi5_desc_vaddr) {
3248 dev_err(uc->ud->dev,
3249 "descriptor%d allocation failed\n", i);
3251 udma_free_hwdesc(uc, d);
3256 d->residue += sg_len;
3257 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
3258 desc = hwdesc->cppi5_desc_vaddr;
3261 cppi5_hdesc_init(desc, 0, 0);
3262 /* Flow and Packed ID */
3263 cppi5_desc_set_pktids(&desc->hdr, uc->id,
3264 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3265 cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
3267 cppi5_hdesc_reset_hbdesc(desc);
3268 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
3271 /* attach the sg buffer to the descriptor */
3273 cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
3275 /* Attach link as host buffer descriptor */
3277 cppi5_hdesc_link_hbdesc(h_desc,
3278 hwdesc->cppi5_desc_paddr | asel);
3280 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA ||
3281 dir == DMA_MEM_TO_DEV)
3285 if (d->residue >= SZ_4M) {
3286 dev_err(uc->ud->dev,
3287 "%s: Transfer size %u is over the supported 4M range\n",
3288 __func__, d->residue);
3289 udma_free_hwdesc(uc, d);
3294 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3295 cppi5_hdesc_set_pktlen(h_desc, d->residue);
3300 static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
3301 void *data, size_t len)
3303 struct udma_desc *d = to_udma_desc(desc);
3304 struct udma_chan *uc = to_udma_chan(desc->chan);
3305 struct cppi5_host_desc_t *h_desc;
3309 if (!uc->config.pkt_mode || !uc->config.metadata_size)
3312 if (!data || len > uc->config.metadata_size)
3315 if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
3318 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3319 if (d->dir == DMA_MEM_TO_DEV)
3320 memcpy(h_desc->epib, data, len);
3322 if (uc->config.needs_epib)
3323 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
3326 d->metadata_size = len;
3327 if (uc->config.needs_epib)
3328 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
3330 cppi5_hdesc_update_flags(h_desc, flags);
3331 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
3336 static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
3337 size_t *payload_len, size_t *max_len)
3339 struct udma_desc *d = to_udma_desc(desc);
3340 struct udma_chan *uc = to_udma_chan(desc->chan);
3341 struct cppi5_host_desc_t *h_desc;
3343 if (!uc->config.pkt_mode || !uc->config.metadata_size)
3344 return ERR_PTR(-ENOTSUPP);
3346 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3348 *max_len = uc->config.metadata_size;
3350 *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
3351 CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
3352 *payload_len += cppi5_hdesc_get_psdata_size(h_desc);
3354 return h_desc->epib;
3357 static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
3360 struct udma_desc *d = to_udma_desc(desc);
3361 struct udma_chan *uc = to_udma_chan(desc->chan);
3362 struct cppi5_host_desc_t *h_desc;
3363 u32 psd_size = payload_len;
3366 if (!uc->config.pkt_mode || !uc->config.metadata_size)
3369 if (payload_len > uc->config.metadata_size)
3372 if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
3375 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3377 if (uc->config.needs_epib) {
3378 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
3379 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
3382 cppi5_hdesc_update_flags(h_desc, flags);
3383 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
3388 static struct dma_descriptor_metadata_ops metadata_ops = {
3389 .attach = udma_attach_metadata,
3390 .get_ptr = udma_get_metadata_ptr,
3391 .set_len = udma_set_metadata_len,
3394 static struct dma_async_tx_descriptor *
3395 udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
3396 unsigned int sglen, enum dma_transfer_direction dir,
3397 unsigned long tx_flags, void *context)
3399 struct udma_chan *uc = to_udma_chan(chan);
3400 enum dma_slave_buswidth dev_width;
3401 struct udma_desc *d;
3404 if (dir != uc->config.dir &&
3405 (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type)) {
3406 dev_err(chan->device->dev,
3407 "%s: chan%d is for %s, not supporting %s\n",
3409 dmaengine_get_direction_text(uc->config.dir),
3410 dmaengine_get_direction_text(dir));
3414 if (dir == DMA_DEV_TO_MEM) {
3415 dev_width = uc->cfg.src_addr_width;
3416 burst = uc->cfg.src_maxburst;
3417 } else if (dir == DMA_MEM_TO_DEV) {
3418 dev_width = uc->cfg.dst_addr_width;
3419 burst = uc->cfg.dst_maxburst;
3421 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
3428 uc->config.tx_flags = tx_flags;
3430 if (uc->config.pkt_mode)
3431 d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
3433 else if (is_slave_direction(uc->config.dir))
3434 d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
3437 d = udma_prep_slave_sg_triggered_tr(uc, sgl, sglen, dir,
3447 /* static TR for remote PDMA */
3448 if (udma_configure_statictr(uc, d, dev_width, burst)) {
3449 dev_err(uc->ud->dev,
3450 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
3451 __func__, d->static_tr.bstcnt);
3453 udma_free_hwdesc(uc, d);
3458 if (uc->config.metadata_size)
3459 d->vd.tx.metadata_ops = &metadata_ops;
3461 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
3464 static struct udma_desc *
3465 udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
3466 size_t buf_len, size_t period_len,
3467 enum dma_transfer_direction dir, unsigned long flags)
3469 struct udma_desc *d;
3470 size_t tr_size, period_addr;
3471 struct cppi5_tr_type1_t *tr_req;
3472 unsigned int periods = buf_len / period_len;
3473 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
3477 num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
3478 &tr0_cnt1, &tr1_cnt0);
3480 dev_err(uc->ud->dev, "size %zu is not supported\n",
3485 /* Now allocate and setup the descriptor. */
3486 tr_size = sizeof(struct cppi5_tr_type1_t);
3487 d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir);
3491 tr_req = d->hwdesc[0].tr_req_base;
3492 if (uc->ud->match_data->type == DMA_TYPE_UDMA)
3493 period_addr = buf_addr;
3495 period_addr = buf_addr |
3496 ((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
3498 for (i = 0; i < periods; i++) {
3499 int tr_idx = i * num_tr;
3501 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
3502 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3504 tr_req[tr_idx].addr = period_addr;
3505 tr_req[tr_idx].icnt0 = tr0_cnt0;
3506 tr_req[tr_idx].icnt1 = tr0_cnt1;
3507 tr_req[tr_idx].dim1 = tr0_cnt0;
3510 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3511 CPPI5_TR_CSF_SUPR_EVT);
3514 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
3516 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3518 tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0;
3519 tr_req[tr_idx].icnt0 = tr1_cnt0;
3520 tr_req[tr_idx].icnt1 = 1;
3521 tr_req[tr_idx].dim1 = tr1_cnt0;
3524 if (!(flags & DMA_PREP_INTERRUPT))
3525 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3526 CPPI5_TR_CSF_SUPR_EVT);
3528 period_addr += period_len;
3534 static struct udma_desc *
3535 udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
3536 size_t buf_len, size_t period_len,
3537 enum dma_transfer_direction dir, unsigned long flags)
3539 struct udma_desc *d;
3542 int periods = buf_len / period_len;
3544 if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
3547 if (period_len >= SZ_4M)
3550 d = kzalloc(struct_size(d, hwdesc, periods), GFP_NOWAIT);
3554 d->hwdesc_count = periods;
3556 /* TODO: re-check this... */
3557 if (dir == DMA_DEV_TO_MEM)
3558 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
3560 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
3562 if (uc->ud->match_data->type != DMA_TYPE_UDMA)
3563 buf_addr |= (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3565 for (i = 0; i < periods; i++) {
3566 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
3567 dma_addr_t period_addr = buf_addr + (period_len * i);
3568 struct cppi5_host_desc_t *h_desc;
3570 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
3572 &hwdesc->cppi5_desc_paddr);
3573 if (!hwdesc->cppi5_desc_vaddr) {
3574 dev_err(uc->ud->dev,
3575 "descriptor%d allocation failed\n", i);
3577 udma_free_hwdesc(uc, d);
3582 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
3583 h_desc = hwdesc->cppi5_desc_vaddr;
3585 cppi5_hdesc_init(h_desc, 0, 0);
3586 cppi5_hdesc_set_pktlen(h_desc, period_len);
3588 /* Flow and Packed ID */
3589 cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
3590 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3591 cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
3593 /* attach each period to a new descriptor */
3594 cppi5_hdesc_attach_buf(h_desc,
3595 period_addr, period_len,
3596 period_addr, period_len);
3602 static struct dma_async_tx_descriptor *
3603 udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
3604 size_t period_len, enum dma_transfer_direction dir,
3605 unsigned long flags)
3607 struct udma_chan *uc = to_udma_chan(chan);
3608 enum dma_slave_buswidth dev_width;
3609 struct udma_desc *d;
3612 if (dir != uc->config.dir) {
3613 dev_err(chan->device->dev,
3614 "%s: chan%d is for %s, not supporting %s\n",
3616 dmaengine_get_direction_text(uc->config.dir),
3617 dmaengine_get_direction_text(dir));
3623 if (dir == DMA_DEV_TO_MEM) {
3624 dev_width = uc->cfg.src_addr_width;
3625 burst = uc->cfg.src_maxburst;
3626 } else if (dir == DMA_MEM_TO_DEV) {
3627 dev_width = uc->cfg.dst_addr_width;
3628 burst = uc->cfg.dst_maxburst;
3630 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
3637 if (uc->config.pkt_mode)
3638 d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
3641 d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
3647 d->sglen = buf_len / period_len;
3650 d->residue = buf_len;
3652 /* static TR for remote PDMA */
3653 if (udma_configure_statictr(uc, d, dev_width, burst)) {
3654 dev_err(uc->ud->dev,
3655 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
3656 __func__, d->static_tr.bstcnt);
3658 udma_free_hwdesc(uc, d);
3663 if (uc->config.metadata_size)
3664 d->vd.tx.metadata_ops = &metadata_ops;
3666 return vchan_tx_prep(&uc->vc, &d->vd, flags);
3669 static struct dma_async_tx_descriptor *
3670 udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
3671 size_t len, unsigned long tx_flags)
3673 struct udma_chan *uc = to_udma_chan(chan);
3674 struct udma_desc *d;
3675 struct cppi5_tr_type15_t *tr_req;
3677 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
3678 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
3680 if (uc->config.dir != DMA_MEM_TO_MEM) {
3681 dev_err(chan->device->dev,
3682 "%s: chan%d is for %s, not supporting %s\n",
3684 dmaengine_get_direction_text(uc->config.dir),
3685 dmaengine_get_direction_text(DMA_MEM_TO_MEM));
3689 num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0,
3690 &tr0_cnt1, &tr1_cnt0);
3692 dev_err(uc->ud->dev, "size %zu is not supported\n",
3697 d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
3701 d->dir = DMA_MEM_TO_MEM;
3706 if (uc->ud->match_data->type != DMA_TYPE_UDMA) {
3707 src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
3708 dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
3711 tr_req = d->hwdesc[0].tr_req_base;
3713 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
3714 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3715 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
3717 tr_req[0].addr = src;
3718 tr_req[0].icnt0 = tr0_cnt0;
3719 tr_req[0].icnt1 = tr0_cnt1;
3720 tr_req[0].icnt2 = 1;
3721 tr_req[0].icnt3 = 1;
3722 tr_req[0].dim1 = tr0_cnt0;
3724 tr_req[0].daddr = dest;
3725 tr_req[0].dicnt0 = tr0_cnt0;
3726 tr_req[0].dicnt1 = tr0_cnt1;
3727 tr_req[0].dicnt2 = 1;
3728 tr_req[0].dicnt3 = 1;
3729 tr_req[0].ddim1 = tr0_cnt0;
3732 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
3733 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3734 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
3736 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
3737 tr_req[1].icnt0 = tr1_cnt0;
3738 tr_req[1].icnt1 = 1;
3739 tr_req[1].icnt2 = 1;
3740 tr_req[1].icnt3 = 1;
3742 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
3743 tr_req[1].dicnt0 = tr1_cnt0;
3744 tr_req[1].dicnt1 = 1;
3745 tr_req[1].dicnt2 = 1;
3746 tr_req[1].dicnt3 = 1;
3749 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags,
3750 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
3752 if (uc->config.metadata_size)
3753 d->vd.tx.metadata_ops = &metadata_ops;
3755 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
3758 static void udma_issue_pending(struct dma_chan *chan)
3760 struct udma_chan *uc = to_udma_chan(chan);
3761 unsigned long flags;
3763 spin_lock_irqsave(&uc->vc.lock, flags);
3765 /* If we have something pending and no active descriptor, then */
3766 if (vchan_issue_pending(&uc->vc) && !uc->desc) {
3768 * start a descriptor if the channel is NOT [marked as
3769 * terminating _and_ it is still running (teardown has not
3772 if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
3773 udma_is_chan_running(uc)))
3777 spin_unlock_irqrestore(&uc->vc.lock, flags);
3780 static enum dma_status udma_tx_status(struct dma_chan *chan,
3781 dma_cookie_t cookie,
3782 struct dma_tx_state *txstate)
3784 struct udma_chan *uc = to_udma_chan(chan);
3785 enum dma_status ret;
3786 unsigned long flags;
3788 spin_lock_irqsave(&uc->vc.lock, flags);
3790 ret = dma_cookie_status(chan, cookie, txstate);
3792 if (!udma_is_chan_running(uc))
3795 if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
3798 if (ret == DMA_COMPLETE || !txstate)
3801 if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
3804 u32 residue = uc->desc->residue;
3807 if (uc->desc->dir == DMA_MEM_TO_DEV) {
3808 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
3810 if (uc->config.ep_type != PSIL_EP_NATIVE) {
3811 peer_bcnt = udma_tchanrt_read(uc,
3812 UDMA_CHAN_RT_PEER_BCNT_REG);
3814 if (bcnt > peer_bcnt)
3815 delay = bcnt - peer_bcnt;
3817 } else if (uc->desc->dir == DMA_DEV_TO_MEM) {
3818 bcnt = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
3820 if (uc->config.ep_type != PSIL_EP_NATIVE) {
3821 peer_bcnt = udma_rchanrt_read(uc,
3822 UDMA_CHAN_RT_PEER_BCNT_REG);
3824 if (peer_bcnt > bcnt)
3825 delay = peer_bcnt - bcnt;
3828 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
3831 if (bcnt && !(bcnt % uc->desc->residue))
3834 residue -= bcnt % uc->desc->residue;
3836 if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
3841 dma_set_residue(txstate, residue);
3842 dma_set_in_flight_bytes(txstate, delay);
3849 spin_unlock_irqrestore(&uc->vc.lock, flags);
3853 static int udma_pause(struct dma_chan *chan)
3855 struct udma_chan *uc = to_udma_chan(chan);
3857 /* pause the channel */
3858 switch (uc->config.dir) {
3859 case DMA_DEV_TO_MEM:
3860 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3861 UDMA_PEER_RT_EN_PAUSE,
3862 UDMA_PEER_RT_EN_PAUSE);
3864 case DMA_MEM_TO_DEV:
3865 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3866 UDMA_PEER_RT_EN_PAUSE,
3867 UDMA_PEER_RT_EN_PAUSE);
3869 case DMA_MEM_TO_MEM:
3870 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
3871 UDMA_CHAN_RT_CTL_PAUSE,
3872 UDMA_CHAN_RT_CTL_PAUSE);
3881 static int udma_resume(struct dma_chan *chan)
3883 struct udma_chan *uc = to_udma_chan(chan);
3885 /* resume the channel */
3886 switch (uc->config.dir) {
3887 case DMA_DEV_TO_MEM:
3888 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3889 UDMA_PEER_RT_EN_PAUSE, 0);
3892 case DMA_MEM_TO_DEV:
3893 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3894 UDMA_PEER_RT_EN_PAUSE, 0);
3896 case DMA_MEM_TO_MEM:
3897 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
3898 UDMA_CHAN_RT_CTL_PAUSE, 0);
3907 static int udma_terminate_all(struct dma_chan *chan)
3909 struct udma_chan *uc = to_udma_chan(chan);
3910 unsigned long flags;
3913 spin_lock_irqsave(&uc->vc.lock, flags);
3915 if (udma_is_chan_running(uc))
3919 uc->terminated_desc = uc->desc;
3921 uc->terminated_desc->terminated = true;
3922 cancel_delayed_work(&uc->tx_drain.work);
3927 vchan_get_all_descriptors(&uc->vc, &head);
3928 spin_unlock_irqrestore(&uc->vc.lock, flags);
3929 vchan_dma_desc_free_list(&uc->vc, &head);
3934 static void udma_synchronize(struct dma_chan *chan)
3936 struct udma_chan *uc = to_udma_chan(chan);
3937 unsigned long timeout = msecs_to_jiffies(1000);
3939 vchan_synchronize(&uc->vc);
3941 if (uc->state == UDMA_CHAN_IS_TERMINATING) {
3942 timeout = wait_for_completion_timeout(&uc->teardown_completed,
3945 dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
3947 udma_dump_chan_stdata(uc);
3948 udma_reset_chan(uc, true);
3952 udma_reset_chan(uc, false);
3953 if (udma_is_chan_running(uc))
3954 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
3956 cancel_delayed_work_sync(&uc->tx_drain.work);
3957 udma_reset_rings(uc);
3960 static void udma_desc_pre_callback(struct virt_dma_chan *vc,
3961 struct virt_dma_desc *vd,
3962 struct dmaengine_result *result)
3964 struct udma_chan *uc = to_udma_chan(&vc->chan);
3965 struct udma_desc *d;
3970 d = to_udma_desc(&vd->tx);
3972 if (d->metadata_size)
3973 udma_fetch_epib(uc, d);
3975 /* Provide residue information for the client */
3977 void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
3979 if (cppi5_desc_get_type(desc_vaddr) ==
3980 CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
3981 result->residue = d->residue -
3982 cppi5_hdesc_get_pktlen(desc_vaddr);
3983 if (result->residue)
3984 result->result = DMA_TRANS_ABORTED;
3986 result->result = DMA_TRANS_NOERROR;
3988 result->residue = 0;
3989 result->result = DMA_TRANS_NOERROR;
3995 * This tasklet handles the completion of a DMA descriptor by
3996 * calling its callback and freeing it.
3998 static void udma_vchan_complete(struct tasklet_struct *t)
4000 struct virt_dma_chan *vc = from_tasklet(vc, t, task);
4001 struct virt_dma_desc *vd, *_vd;
4002 struct dmaengine_desc_callback cb;
4005 spin_lock_irq(&vc->lock);
4006 list_splice_tail_init(&vc->desc_completed, &head);
4010 dmaengine_desc_get_callback(&vd->tx, &cb);
4012 memset(&cb, 0, sizeof(cb));
4014 spin_unlock_irq(&vc->lock);
4016 udma_desc_pre_callback(vc, vd, NULL);
4017 dmaengine_desc_callback_invoke(&cb, NULL);
4019 list_for_each_entry_safe(vd, _vd, &head, node) {
4020 struct dmaengine_result result;
4022 dmaengine_desc_get_callback(&vd->tx, &cb);
4024 list_del(&vd->node);
4026 udma_desc_pre_callback(vc, vd, &result);
4027 dmaengine_desc_callback_invoke(&cb, &result);
4029 vchan_vdesc_fini(vd);
4033 static void udma_free_chan_resources(struct dma_chan *chan)
4035 struct udma_chan *uc = to_udma_chan(chan);
4036 struct udma_dev *ud = to_udma_dev(chan->device);
4038 udma_terminate_all(chan);
4039 if (uc->terminated_desc) {
4040 udma_reset_chan(uc, false);
4041 udma_reset_rings(uc);
4044 cancel_delayed_work_sync(&uc->tx_drain.work);
4046 if (uc->irq_num_ring > 0) {
4047 free_irq(uc->irq_num_ring, uc);
4049 uc->irq_num_ring = 0;
4051 if (uc->irq_num_udma > 0) {
4052 free_irq(uc->irq_num_udma, uc);
4054 uc->irq_num_udma = 0;
4057 /* Release PSI-L pairing */
4058 if (uc->psil_paired) {
4059 navss_psil_unpair(ud, uc->config.src_thread,
4060 uc->config.dst_thread);
4061 uc->psil_paired = false;
4064 vchan_free_chan_resources(&uc->vc);
4065 tasklet_kill(&uc->vc.task);
4067 bcdma_free_bchan_resources(uc);
4068 udma_free_tx_resources(uc);
4069 udma_free_rx_resources(uc);
4070 udma_reset_uchan(uc);
4072 if (uc->use_dma_pool) {
4073 dma_pool_destroy(uc->hdesc_pool);
4074 uc->use_dma_pool = false;
4078 static struct platform_driver udma_driver;
4079 static struct platform_driver bcdma_driver;
4080 static struct platform_driver pktdma_driver;
4082 struct udma_filter_param {
4083 int remote_thread_id;
4086 u32 tr_trigger_type;
4089 static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
4091 struct udma_chan_config *ucc;
4092 struct psil_endpoint_config *ep_config;
4093 struct udma_filter_param *filter_param;
4094 struct udma_chan *uc;
4095 struct udma_dev *ud;
4097 if (chan->device->dev->driver != &udma_driver.driver &&
4098 chan->device->dev->driver != &bcdma_driver.driver &&
4099 chan->device->dev->driver != &pktdma_driver.driver)
4102 uc = to_udma_chan(chan);
4105 filter_param = param;
4107 if (filter_param->atype > 2) {
4108 dev_err(ud->dev, "Invalid channel atype: %u\n",
4109 filter_param->atype);
4113 if (filter_param->asel > 15) {
4114 dev_err(ud->dev, "Invalid channel asel: %u\n",
4115 filter_param->asel);
4119 ucc->remote_thread_id = filter_param->remote_thread_id;
4120 ucc->atype = filter_param->atype;
4121 ucc->asel = filter_param->asel;
4122 ucc->tr_trigger_type = filter_param->tr_trigger_type;
4124 if (ucc->tr_trigger_type) {
4125 ucc->dir = DMA_MEM_TO_MEM;
4126 goto triggered_bchan;
4127 } else if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) {
4128 ucc->dir = DMA_MEM_TO_DEV;
4130 ucc->dir = DMA_DEV_TO_MEM;
4133 ep_config = psil_get_ep_config(ucc->remote_thread_id);
4134 if (IS_ERR(ep_config)) {
4135 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
4136 ucc->remote_thread_id);
4137 ucc->dir = DMA_MEM_TO_MEM;
4138 ucc->remote_thread_id = -1;
4144 if (ud->match_data->type == DMA_TYPE_BCDMA &&
4145 ep_config->pkt_mode) {
4147 "Only TR mode is supported (psi-l thread 0x%04x)\n",
4148 ucc->remote_thread_id);
4149 ucc->dir = DMA_MEM_TO_MEM;
4150 ucc->remote_thread_id = -1;
4156 ucc->pkt_mode = ep_config->pkt_mode;
4157 ucc->channel_tpl = ep_config->channel_tpl;
4158 ucc->notdpkt = ep_config->notdpkt;
4159 ucc->ep_type = ep_config->ep_type;
4161 if (ud->match_data->type == DMA_TYPE_PKTDMA &&
4162 ep_config->mapped_channel_id >= 0) {
4163 ucc->mapped_channel_id = ep_config->mapped_channel_id;
4164 ucc->default_flow_id = ep_config->default_flow_id;
4166 ucc->mapped_channel_id = -1;
4167 ucc->default_flow_id = -1;
4170 if (ucc->ep_type != PSIL_EP_NATIVE) {
4171 const struct udma_match_data *match_data = ud->match_data;
4173 if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
4174 ucc->enable_acc32 = ep_config->pdma_acc32;
4175 if (match_data->flags & UDMA_FLAG_PDMA_BURST)
4176 ucc->enable_burst = ep_config->pdma_burst;
4179 ucc->needs_epib = ep_config->needs_epib;
4180 ucc->psd_size = ep_config->psd_size;
4181 ucc->metadata_size =
4182 (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
4186 ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
4187 ucc->metadata_size, ud->desc_align);
4189 dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
4190 ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
4195 dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id,
4196 ucc->tr_trigger_type);
4202 static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
4203 struct of_dma *ofdma)
4205 struct udma_dev *ud = ofdma->of_dma_data;
4206 dma_cap_mask_t mask = ud->ddev.cap_mask;
4207 struct udma_filter_param filter_param;
4208 struct dma_chan *chan;
4210 if (ud->match_data->type == DMA_TYPE_BCDMA) {
4211 if (dma_spec->args_count != 3)
4214 filter_param.tr_trigger_type = dma_spec->args[0];
4215 filter_param.remote_thread_id = dma_spec->args[1];
4216 filter_param.asel = dma_spec->args[2];
4217 filter_param.atype = 0;
4219 if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
4222 filter_param.remote_thread_id = dma_spec->args[0];
4223 filter_param.tr_trigger_type = 0;
4224 if (dma_spec->args_count == 2) {
4225 if (ud->match_data->type == DMA_TYPE_UDMA) {
4226 filter_param.atype = dma_spec->args[1];
4227 filter_param.asel = 0;
4229 filter_param.atype = 0;
4230 filter_param.asel = dma_spec->args[1];
4233 filter_param.atype = 0;
4234 filter_param.asel = 0;
4238 chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
4241 dev_err(ud->dev, "get channel fail in %s.\n", __func__);
4242 return ERR_PTR(-EINVAL);
4248 static struct udma_match_data am654_main_data = {
4249 .type = DMA_TYPE_UDMA,
4250 .psil_base = 0x1000,
4251 .enable_memcpy_support = true,
4252 .statictr_z_mask = GENMASK(11, 0),
4254 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4255 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
4256 0, /* No UH Channels */
4260 static struct udma_match_data am654_mcu_data = {
4261 .type = DMA_TYPE_UDMA,
4262 .psil_base = 0x6000,
4263 .enable_memcpy_support = false,
4264 .statictr_z_mask = GENMASK(11, 0),
4266 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4267 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
4268 0, /* No UH Channels */
4272 static struct udma_match_data j721e_main_data = {
4273 .type = DMA_TYPE_UDMA,
4274 .psil_base = 0x1000,
4275 .enable_memcpy_support = true,
4276 .flags = UDMA_FLAGS_J7_CLASS,
4277 .statictr_z_mask = GENMASK(23, 0),
4279 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4280 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* H Channels */
4281 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* UH Channels */
4285 static struct udma_match_data j721e_mcu_data = {
4286 .type = DMA_TYPE_UDMA,
4287 .psil_base = 0x6000,
4288 .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
4289 .flags = UDMA_FLAGS_J7_CLASS,
4290 .statictr_z_mask = GENMASK(23, 0),
4292 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4293 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES, /* H Channels */
4294 0, /* No UH Channels */
4298 static struct udma_match_data am64_bcdma_data = {
4299 .type = DMA_TYPE_BCDMA,
4300 .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
4301 .enable_memcpy_support = true, /* Supported via bchan */
4302 .flags = UDMA_FLAGS_J7_CLASS,
4303 .statictr_z_mask = GENMASK(23, 0),
4305 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4306 0, /* No H Channels */
4307 0, /* No UH Channels */
4311 static struct udma_match_data am64_pktdma_data = {
4312 .type = DMA_TYPE_PKTDMA,
4313 .psil_base = 0x1000,
4314 .enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */
4315 .flags = UDMA_FLAGS_J7_CLASS,
4316 .statictr_z_mask = GENMASK(23, 0),
4318 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4319 0, /* No H Channels */
4320 0, /* No UH Channels */
4324 static const struct of_device_id udma_of_match[] = {
4326 .compatible = "ti,am654-navss-main-udmap",
4327 .data = &am654_main_data,
4330 .compatible = "ti,am654-navss-mcu-udmap",
4331 .data = &am654_mcu_data,
4333 .compatible = "ti,j721e-navss-main-udmap",
4334 .data = &j721e_main_data,
4336 .compatible = "ti,j721e-navss-mcu-udmap",
4337 .data = &j721e_mcu_data,
4342 static const struct of_device_id bcdma_of_match[] = {
4344 .compatible = "ti,am64-dmss-bcdma",
4345 .data = &am64_bcdma_data,
4350 static const struct of_device_id pktdma_of_match[] = {
4352 .compatible = "ti,am64-dmss-pktdma",
4353 .data = &am64_pktdma_data,
4358 static struct udma_soc_data am654_soc_data = {
4360 .udma_rchan = 0x200,
4364 static struct udma_soc_data j721e_soc_data = {
4366 .udma_rchan = 0x400,
4370 static struct udma_soc_data j7200_soc_data = {
4376 static struct udma_soc_data am64_soc_data = {
4378 .bcdma_bchan_data = 0x2200,
4379 .bcdma_bchan_ring = 0x2400,
4380 .bcdma_tchan_data = 0x2800,
4381 .bcdma_tchan_ring = 0x2a00,
4382 .bcdma_rchan_data = 0x2e00,
4383 .bcdma_rchan_ring = 0x3000,
4384 .pktdma_tchan_flow = 0x1200,
4385 .pktdma_rchan_flow = 0x1600,
4387 .bcdma_trigger_event_offset = 0xc400,
4390 static const struct soc_device_attribute k3_soc_devices[] = {
4391 { .family = "AM65X", .data = &am654_soc_data },
4392 { .family = "J721E", .data = &j721e_soc_data },
4393 { .family = "J7200", .data = &j7200_soc_data },
4394 { .family = "AM64X", .data = &am64_soc_data },
4395 { .family = "J721S2", .data = &j721e_soc_data},
4396 { .family = "AM62X", .data = &am64_soc_data },
4400 static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
4402 u32 cap2, cap3, cap4;
4405 ud->mmrs[MMR_GCFG] = devm_platform_ioremap_resource_byname(pdev, mmr_names[MMR_GCFG]);
4406 if (IS_ERR(ud->mmrs[MMR_GCFG]))
4407 return PTR_ERR(ud->mmrs[MMR_GCFG]);
4409 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
4410 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4412 switch (ud->match_data->type) {
4414 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
4415 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
4416 ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2);
4417 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
4419 case DMA_TYPE_BCDMA:
4420 ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
4421 ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
4422 ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
4423 ud->rflow_cnt = ud->rchan_cnt;
4425 case DMA_TYPE_PKTDMA:
4426 cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
4427 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
4428 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
4429 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
4430 ud->tflow_cnt = PKTDMA_CAP4_TFLOW_CNT(cap4);
4436 for (i = 1; i < MMR_LAST; i++) {
4437 if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
4439 if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
4441 if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
4444 ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]);
4445 if (IS_ERR(ud->mmrs[i]))
4446 return PTR_ERR(ud->mmrs[i]);
4452 static void udma_mark_resource_ranges(struct udma_dev *ud, unsigned long *map,
4453 struct ti_sci_resource_desc *rm_desc,
4456 bitmap_clear(map, rm_desc->start, rm_desc->num);
4457 bitmap_clear(map, rm_desc->start_sec, rm_desc->num_sec);
4458 dev_dbg(ud->dev, "ti_sci resource range for %s: %d:%d | %d:%d\n", name,
4459 rm_desc->start, rm_desc->num, rm_desc->start_sec,
4463 static const char * const range_names[] = {
4464 [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
4465 [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
4466 [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
4467 [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
4468 [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
4471 static int udma_setup_resources(struct udma_dev *ud)
4474 struct device *dev = ud->dev;
4475 struct ti_sci_resource *rm_res, irq_res;
4476 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4479 /* Set up the throughput level start indexes */
4480 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4481 if (of_device_is_compatible(dev->of_node,
4482 "ti,am654-navss-main-udmap")) {
4483 ud->tchan_tpl.levels = 2;
4484 ud->tchan_tpl.start_idx[0] = 8;
4485 } else if (of_device_is_compatible(dev->of_node,
4486 "ti,am654-navss-mcu-udmap")) {
4487 ud->tchan_tpl.levels = 2;
4488 ud->tchan_tpl.start_idx[0] = 2;
4489 } else if (UDMA_CAP3_UCHAN_CNT(cap3)) {
4490 ud->tchan_tpl.levels = 3;
4491 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
4492 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4493 } else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
4494 ud->tchan_tpl.levels = 2;
4495 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4497 ud->tchan_tpl.levels = 1;
4500 ud->rchan_tpl.levels = ud->tchan_tpl.levels;
4501 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
4502 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
4504 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4505 sizeof(unsigned long), GFP_KERNEL);
4506 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4508 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4509 sizeof(unsigned long), GFP_KERNEL);
4510 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4512 ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
4513 sizeof(unsigned long),
4515 ud->rflow_gp_map_allocated = devm_kcalloc(dev,
4516 BITS_TO_LONGS(ud->rflow_cnt),
4517 sizeof(unsigned long),
4519 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
4520 sizeof(unsigned long),
4522 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
4525 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
4526 !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
4527 !ud->rflows || !ud->rflow_in_use)
4531 * RX flows with the same Ids as RX channels are reserved to be used
4532 * as default flows if remote HW can't generate flow_ids. Those
4533 * RX flows can be requested only explicitly by id.
4535 bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
4537 /* by default no GP rflows are assigned to Linux */
4538 bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
4540 /* Get resource ranges from tisci */
4541 for (i = 0; i < RM_RANGE_LAST; i++) {
4542 if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
4545 tisci_rm->rm_ranges[i] =
4546 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4547 tisci_rm->tisci_dev_id,
4548 (char *)range_names[i]);
4552 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4553 if (IS_ERR(rm_res)) {
4554 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4557 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4558 for (i = 0; i < rm_res->sets; i++)
4559 udma_mark_resource_ranges(ud, ud->tchan_map,
4560 &rm_res->desc[i], "tchan");
4561 irq_res.sets = rm_res->sets;
4564 /* rchan and matching default flow ranges */
4565 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4566 if (IS_ERR(rm_res)) {
4567 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4570 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4571 for (i = 0; i < rm_res->sets; i++)
4572 udma_mark_resource_ranges(ud, ud->rchan_map,
4573 &rm_res->desc[i], "rchan");
4574 irq_res.sets += rm_res->sets;
4577 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4580 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4581 if (IS_ERR(rm_res)) {
4582 irq_res.desc[0].start = 0;
4583 irq_res.desc[0].num = ud->tchan_cnt;
4586 for (i = 0; i < rm_res->sets; i++) {
4587 irq_res.desc[i].start = rm_res->desc[i].start;
4588 irq_res.desc[i].num = rm_res->desc[i].num;
4589 irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
4590 irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
4593 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4594 if (IS_ERR(rm_res)) {
4595 irq_res.desc[i].start = 0;
4596 irq_res.desc[i].num = ud->rchan_cnt;
4598 for (j = 0; j < rm_res->sets; j++, i++) {
4599 if (rm_res->desc[j].num) {
4600 irq_res.desc[i].start = rm_res->desc[j].start +
4601 ud->soc_data->oes.udma_rchan;
4602 irq_res.desc[i].num = rm_res->desc[j].num;
4604 if (rm_res->desc[j].num_sec) {
4605 irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
4606 ud->soc_data->oes.udma_rchan;
4607 irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
4611 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4612 kfree(irq_res.desc);
4614 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4618 /* GP rflow ranges */
4619 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4620 if (IS_ERR(rm_res)) {
4621 /* all gp flows are assigned exclusively to Linux */
4622 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
4623 ud->rflow_cnt - ud->rchan_cnt);
4625 for (i = 0; i < rm_res->sets; i++)
4626 udma_mark_resource_ranges(ud, ud->rflow_gp_map,
4627 &rm_res->desc[i], "gp-rflow");
4633 static int bcdma_setup_resources(struct udma_dev *ud)
4636 struct device *dev = ud->dev;
4637 struct ti_sci_resource *rm_res, irq_res;
4638 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4639 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
4642 /* Set up the throughput level start indexes */
4643 cap = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4644 if (BCDMA_CAP3_UBCHAN_CNT(cap)) {
4645 ud->bchan_tpl.levels = 3;
4646 ud->bchan_tpl.start_idx[1] = BCDMA_CAP3_UBCHAN_CNT(cap);
4647 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
4648 } else if (BCDMA_CAP3_HBCHAN_CNT(cap)) {
4649 ud->bchan_tpl.levels = 2;
4650 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
4652 ud->bchan_tpl.levels = 1;
4655 cap = udma_read(ud->mmrs[MMR_GCFG], 0x30);
4656 if (BCDMA_CAP4_URCHAN_CNT(cap)) {
4657 ud->rchan_tpl.levels = 3;
4658 ud->rchan_tpl.start_idx[1] = BCDMA_CAP4_URCHAN_CNT(cap);
4659 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
4660 } else if (BCDMA_CAP4_HRCHAN_CNT(cap)) {
4661 ud->rchan_tpl.levels = 2;
4662 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
4664 ud->rchan_tpl.levels = 1;
4667 if (BCDMA_CAP4_UTCHAN_CNT(cap)) {
4668 ud->tchan_tpl.levels = 3;
4669 ud->tchan_tpl.start_idx[1] = BCDMA_CAP4_UTCHAN_CNT(cap);
4670 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
4671 } else if (BCDMA_CAP4_HTCHAN_CNT(cap)) {
4672 ud->tchan_tpl.levels = 2;
4673 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
4675 ud->tchan_tpl.levels = 1;
4678 ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
4679 sizeof(unsigned long), GFP_KERNEL);
4680 ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
4682 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4683 sizeof(unsigned long), GFP_KERNEL);
4684 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4686 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4687 sizeof(unsigned long), GFP_KERNEL);
4688 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4690 /* BCDMA do not really have flows, but the driver expect it */
4691 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt),
4692 sizeof(unsigned long),
4694 ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
4697 if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
4698 !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans ||
4702 /* Get resource ranges from tisci */
4703 for (i = 0; i < RM_RANGE_LAST; i++) {
4704 if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW)
4706 if (i == RM_RANGE_BCHAN && ud->bchan_cnt == 0)
4708 if (i == RM_RANGE_TCHAN && ud->tchan_cnt == 0)
4710 if (i == RM_RANGE_RCHAN && ud->rchan_cnt == 0)
4713 tisci_rm->rm_ranges[i] =
4714 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4715 tisci_rm->tisci_dev_id,
4716 (char *)range_names[i]);
4722 if (ud->bchan_cnt) {
4723 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
4724 if (IS_ERR(rm_res)) {
4725 bitmap_zero(ud->bchan_map, ud->bchan_cnt);
4728 bitmap_fill(ud->bchan_map, ud->bchan_cnt);
4729 for (i = 0; i < rm_res->sets; i++)
4730 udma_mark_resource_ranges(ud, ud->bchan_map,
4733 irq_res.sets += rm_res->sets;
4738 if (ud->tchan_cnt) {
4739 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4740 if (IS_ERR(rm_res)) {
4741 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4744 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4745 for (i = 0; i < rm_res->sets; i++)
4746 udma_mark_resource_ranges(ud, ud->tchan_map,
4749 irq_res.sets += rm_res->sets * 2;
4754 if (ud->rchan_cnt) {
4755 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4756 if (IS_ERR(rm_res)) {
4757 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4760 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4761 for (i = 0; i < rm_res->sets; i++)
4762 udma_mark_resource_ranges(ud, ud->rchan_map,
4765 irq_res.sets += rm_res->sets * 2;
4769 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4772 if (ud->bchan_cnt) {
4773 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
4774 if (IS_ERR(rm_res)) {
4775 irq_res.desc[0].start = oes->bcdma_bchan_ring;
4776 irq_res.desc[0].num = ud->bchan_cnt;
4779 for (i = 0; i < rm_res->sets; i++) {
4780 irq_res.desc[i].start = rm_res->desc[i].start +
4781 oes->bcdma_bchan_ring;
4782 irq_res.desc[i].num = rm_res->desc[i].num;
4786 if (ud->tchan_cnt) {
4787 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4788 if (IS_ERR(rm_res)) {
4789 irq_res.desc[i].start = oes->bcdma_tchan_data;
4790 irq_res.desc[i].num = ud->tchan_cnt;
4791 irq_res.desc[i + 1].start = oes->bcdma_tchan_ring;
4792 irq_res.desc[i + 1].num = ud->tchan_cnt;
4795 for (j = 0; j < rm_res->sets; j++, i += 2) {
4796 irq_res.desc[i].start = rm_res->desc[j].start +
4797 oes->bcdma_tchan_data;
4798 irq_res.desc[i].num = rm_res->desc[j].num;
4800 irq_res.desc[i + 1].start = rm_res->desc[j].start +
4801 oes->bcdma_tchan_ring;
4802 irq_res.desc[i + 1].num = rm_res->desc[j].num;
4806 if (ud->rchan_cnt) {
4807 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4808 if (IS_ERR(rm_res)) {
4809 irq_res.desc[i].start = oes->bcdma_rchan_data;
4810 irq_res.desc[i].num = ud->rchan_cnt;
4811 irq_res.desc[i + 1].start = oes->bcdma_rchan_ring;
4812 irq_res.desc[i + 1].num = ud->rchan_cnt;
4815 for (j = 0; j < rm_res->sets; j++, i += 2) {
4816 irq_res.desc[i].start = rm_res->desc[j].start +
4817 oes->bcdma_rchan_data;
4818 irq_res.desc[i].num = rm_res->desc[j].num;
4820 irq_res.desc[i + 1].start = rm_res->desc[j].start +
4821 oes->bcdma_rchan_ring;
4822 irq_res.desc[i + 1].num = rm_res->desc[j].num;
4827 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4828 kfree(irq_res.desc);
4830 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4837 static int pktdma_setup_resources(struct udma_dev *ud)
4840 struct device *dev = ud->dev;
4841 struct ti_sci_resource *rm_res, irq_res;
4842 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4843 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
4846 /* Set up the throughput level start indexes */
4847 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4848 if (UDMA_CAP3_UCHAN_CNT(cap3)) {
4849 ud->tchan_tpl.levels = 3;
4850 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
4851 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4852 } else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
4853 ud->tchan_tpl.levels = 2;
4854 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4856 ud->tchan_tpl.levels = 1;
4859 ud->rchan_tpl.levels = ud->tchan_tpl.levels;
4860 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
4861 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
4863 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4864 sizeof(unsigned long), GFP_KERNEL);
4865 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4867 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4868 sizeof(unsigned long), GFP_KERNEL);
4869 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4871 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
4872 sizeof(unsigned long),
4874 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
4876 ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
4877 sizeof(unsigned long), GFP_KERNEL);
4879 if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
4880 !ud->rchans || !ud->rflows || !ud->rflow_in_use)
4883 /* Get resource ranges from tisci */
4884 for (i = 0; i < RM_RANGE_LAST; i++) {
4885 if (i == RM_RANGE_BCHAN)
4888 tisci_rm->rm_ranges[i] =
4889 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4890 tisci_rm->tisci_dev_id,
4891 (char *)range_names[i]);
4895 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4896 if (IS_ERR(rm_res)) {
4897 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4899 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4900 for (i = 0; i < rm_res->sets; i++)
4901 udma_mark_resource_ranges(ud, ud->tchan_map,
4902 &rm_res->desc[i], "tchan");
4906 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4907 if (IS_ERR(rm_res)) {
4908 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4910 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4911 for (i = 0; i < rm_res->sets; i++)
4912 udma_mark_resource_ranges(ud, ud->rchan_map,
4913 &rm_res->desc[i], "rchan");
4917 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4918 if (IS_ERR(rm_res)) {
4919 /* all rflows are assigned exclusively to Linux */
4920 bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
4923 bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
4924 for (i = 0; i < rm_res->sets; i++)
4925 udma_mark_resource_ranges(ud, ud->rflow_in_use,
4926 &rm_res->desc[i], "rflow");
4927 irq_res.sets = rm_res->sets;
4931 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
4932 if (IS_ERR(rm_res)) {
4933 /* all tflows are assigned exclusively to Linux */
4934 bitmap_zero(ud->tflow_map, ud->tflow_cnt);
4937 bitmap_fill(ud->tflow_map, ud->tflow_cnt);
4938 for (i = 0; i < rm_res->sets; i++)
4939 udma_mark_resource_ranges(ud, ud->tflow_map,
4940 &rm_res->desc[i], "tflow");
4941 irq_res.sets += rm_res->sets;
4944 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4947 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
4948 if (IS_ERR(rm_res)) {
4949 irq_res.desc[0].start = oes->pktdma_tchan_flow;
4950 irq_res.desc[0].num = ud->tflow_cnt;
4953 for (i = 0; i < rm_res->sets; i++) {
4954 irq_res.desc[i].start = rm_res->desc[i].start +
4955 oes->pktdma_tchan_flow;
4956 irq_res.desc[i].num = rm_res->desc[i].num;
4959 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4960 if (IS_ERR(rm_res)) {
4961 irq_res.desc[i].start = oes->pktdma_rchan_flow;
4962 irq_res.desc[i].num = ud->rflow_cnt;
4964 for (j = 0; j < rm_res->sets; j++, i++) {
4965 irq_res.desc[i].start = rm_res->desc[j].start +
4966 oes->pktdma_rchan_flow;
4967 irq_res.desc[i].num = rm_res->desc[j].num;
4970 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4971 kfree(irq_res.desc);
4973 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4980 static int setup_resources(struct udma_dev *ud)
4982 struct device *dev = ud->dev;
4985 switch (ud->match_data->type) {
4987 ret = udma_setup_resources(ud);
4989 case DMA_TYPE_BCDMA:
4990 ret = bcdma_setup_resources(ud);
4992 case DMA_TYPE_PKTDMA:
4993 ret = pktdma_setup_resources(ud);
5002 ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
5004 ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
5005 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
5006 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
5010 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
5015 switch (ud->match_data->type) {
5018 "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
5020 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5022 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5024 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
5027 case DMA_TYPE_BCDMA:
5029 "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
5031 ud->bchan_cnt - bitmap_weight(ud->bchan_map,
5033 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5035 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5038 case DMA_TYPE_PKTDMA:
5040 "Channels: %d (tchan: %u, rchan: %u)\n",
5042 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5044 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5054 static int udma_setup_rx_flush(struct udma_dev *ud)
5056 struct udma_rx_flush *rx_flush = &ud->rx_flush;
5057 struct cppi5_desc_hdr_t *tr_desc;
5058 struct cppi5_tr_type1_t *tr_req;
5059 struct cppi5_host_desc_t *desc;
5060 struct device *dev = ud->dev;
5061 struct udma_hwdesc *hwdesc;
5064 /* Allocate 1K buffer for discarded data on RX channel teardown */
5065 rx_flush->buffer_size = SZ_1K;
5066 rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size,
5068 if (!rx_flush->buffer_vaddr)
5071 rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr,
5072 rx_flush->buffer_size,
5074 if (dma_mapping_error(dev, rx_flush->buffer_paddr))
5077 /* Set up descriptor to be used for TR mode */
5078 hwdesc = &rx_flush->hwdescs[0];
5079 tr_size = sizeof(struct cppi5_tr_type1_t);
5080 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1);
5081 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
5084 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
5086 if (!hwdesc->cppi5_desc_vaddr)
5089 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
5090 hwdesc->cppi5_desc_size,
5092 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
5095 /* Start of the TR req records */
5096 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
5097 /* Start address of the TR response array */
5098 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size;
5100 tr_desc = hwdesc->cppi5_desc_vaddr;
5101 cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0);
5102 cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
5103 cppi5_desc_set_retpolicy(tr_desc, 0, 0);
5105 tr_req = hwdesc->tr_req_base;
5106 cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false,
5107 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
5108 cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT);
5110 tr_req->addr = rx_flush->buffer_paddr;
5111 tr_req->icnt0 = rx_flush->buffer_size;
5114 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
5115 hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
5117 /* Set up descriptor to be used for packet mode */
5118 hwdesc = &rx_flush->hwdescs[1];
5119 hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
5120 CPPI5_INFO0_HDESC_EPIB_SIZE +
5121 CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE,
5124 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
5126 if (!hwdesc->cppi5_desc_vaddr)
5129 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
5130 hwdesc->cppi5_desc_size,
5132 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
5135 desc = hwdesc->cppi5_desc_vaddr;
5136 cppi5_hdesc_init(desc, 0, 0);
5137 cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
5138 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0);
5140 cppi5_hdesc_attach_buf(desc,
5141 rx_flush->buffer_paddr, rx_flush->buffer_size,
5142 rx_flush->buffer_paddr, rx_flush->buffer_size);
5144 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
5145 hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
5149 #ifdef CONFIG_DEBUG_FS
5150 static void udma_dbg_summary_show_chan(struct seq_file *s,
5151 struct dma_chan *chan)
5153 struct udma_chan *uc = to_udma_chan(chan);
5154 struct udma_chan_config *ucc = &uc->config;
5156 seq_printf(s, " %-13s| %s", dma_chan_name(chan),
5157 chan->dbg_client_name ?: "in-use");
5158 if (ucc->tr_trigger_type)
5159 seq_puts(s, " (triggered, ");
5161 seq_printf(s, " (%s, ",
5162 dmaengine_get_direction_text(uc->config.dir));
5164 switch (uc->config.dir) {
5165 case DMA_MEM_TO_MEM:
5166 if (uc->ud->match_data->type == DMA_TYPE_BCDMA) {
5167 seq_printf(s, "bchan%d)\n", uc->bchan->id);
5171 seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
5172 ucc->src_thread, ucc->dst_thread);
5174 case DMA_DEV_TO_MEM:
5175 seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
5176 ucc->src_thread, ucc->dst_thread);
5177 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5178 seq_printf(s, "rflow%d, ", uc->rflow->id);
5180 case DMA_MEM_TO_DEV:
5181 seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
5182 ucc->src_thread, ucc->dst_thread);
5183 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5184 seq_printf(s, "tflow%d, ", uc->tchan->tflow_id);
5187 seq_printf(s, ")\n");
5191 if (ucc->ep_type == PSIL_EP_NATIVE) {
5192 seq_printf(s, "PSI-L Native");
5193 if (ucc->metadata_size) {
5194 seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : "");
5196 seq_printf(s, " PSDsize:%u", ucc->psd_size);
5197 seq_printf(s, " ]");
5200 seq_printf(s, "PDMA");
5201 if (ucc->enable_acc32 || ucc->enable_burst)
5202 seq_printf(s, "[%s%s ]",
5203 ucc->enable_acc32 ? " ACC32" : "",
5204 ucc->enable_burst ? " BURST" : "");
5207 seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode");
5210 static void udma_dbg_summary_show(struct seq_file *s,
5211 struct dma_device *dma_dev)
5213 struct dma_chan *chan;
5215 list_for_each_entry(chan, &dma_dev->channels, device_node) {
5216 if (chan->client_count)
5217 udma_dbg_summary_show_chan(s, chan);
5220 #endif /* CONFIG_DEBUG_FS */
5222 static enum dmaengine_alignment udma_get_copy_align(struct udma_dev *ud)
5224 const struct udma_match_data *match_data = ud->match_data;
5227 if (!match_data->enable_memcpy_support)
5228 return DMAENGINE_ALIGN_8_BYTES;
5230 /* Get the highest TPL level the device supports for memcpy */
5232 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, 0);
5233 else if (ud->tchan_cnt)
5234 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, 0);
5236 return DMAENGINE_ALIGN_8_BYTES;
5238 switch (match_data->burst_size[tpl]) {
5239 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES:
5240 return DMAENGINE_ALIGN_256_BYTES;
5241 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES:
5242 return DMAENGINE_ALIGN_128_BYTES;
5243 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES:
5246 return DMAENGINE_ALIGN_64_BYTES;
5250 #define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
5251 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
5252 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
5253 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
5254 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
5256 static int udma_probe(struct platform_device *pdev)
5258 struct device_node *navss_node = pdev->dev.parent->of_node;
5259 const struct soc_device_attribute *soc;
5260 struct device *dev = &pdev->dev;
5261 struct udma_dev *ud;
5262 const struct of_device_id *match;
5266 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
5268 dev_err(dev, "failed to set dma mask stuff\n");
5270 ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
5274 match = of_match_node(udma_of_match, dev->of_node);
5276 match = of_match_node(bcdma_of_match, dev->of_node);
5278 match = of_match_node(pktdma_of_match, dev->of_node);
5280 dev_err(dev, "No compatible match found\n");
5284 ud->match_data = match->data;
5286 soc = soc_device_match(k3_soc_devices);
5288 dev_err(dev, "No compatible SoC found\n");
5291 ud->soc_data = soc->data;
5293 ret = udma_get_mmrs(pdev, ud);
5297 ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
5298 if (IS_ERR(ud->tisci_rm.tisci))
5299 return PTR_ERR(ud->tisci_rm.tisci);
5301 ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
5302 &ud->tisci_rm.tisci_dev_id);
5304 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
5307 pdev->id = ud->tisci_rm.tisci_dev_id;
5309 ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
5310 &ud->tisci_rm.tisci_navss_dev_id);
5312 dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
5316 if (ud->match_data->type == DMA_TYPE_UDMA) {
5317 ret = of_property_read_u32(dev->of_node, "ti,udma-atype",
5319 if (!ret && ud->atype > 2) {
5320 dev_err(dev, "Invalid atype: %u\n", ud->atype);
5324 ret = of_property_read_u32(dev->of_node, "ti,asel",
5326 if (!ret && ud->asel > 15) {
5327 dev_err(dev, "Invalid asel: %u\n", ud->asel);
5332 ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
5333 ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
5335 if (ud->match_data->type == DMA_TYPE_UDMA) {
5336 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
5338 struct k3_ringacc_init_data ring_init_data;
5340 ring_init_data.tisci = ud->tisci_rm.tisci;
5341 ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
5342 if (ud->match_data->type == DMA_TYPE_BCDMA) {
5343 ring_init_data.num_rings = ud->bchan_cnt +
5347 ring_init_data.num_rings = ud->rflow_cnt +
5351 ud->ringacc = k3_ringacc_dmarings_init(pdev, &ring_init_data);
5354 if (IS_ERR(ud->ringacc))
5355 return PTR_ERR(ud->ringacc);
5357 dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
5358 DOMAIN_BUS_TI_SCI_INTA_MSI);
5359 if (!dev->msi.domain) {
5360 dev_err(dev, "Failed to get MSI domain\n");
5361 return -EPROBE_DEFER;
5364 dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
5365 /* cyclic operation is not supported via PKTDMA */
5366 if (ud->match_data->type != DMA_TYPE_PKTDMA) {
5367 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
5368 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
5371 ud->ddev.device_config = udma_slave_config;
5372 ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
5373 ud->ddev.device_issue_pending = udma_issue_pending;
5374 ud->ddev.device_tx_status = udma_tx_status;
5375 ud->ddev.device_pause = udma_pause;
5376 ud->ddev.device_resume = udma_resume;
5377 ud->ddev.device_terminate_all = udma_terminate_all;
5378 ud->ddev.device_synchronize = udma_synchronize;
5379 #ifdef CONFIG_DEBUG_FS
5380 ud->ddev.dbg_summary_show = udma_dbg_summary_show;
5383 switch (ud->match_data->type) {
5385 ud->ddev.device_alloc_chan_resources =
5386 udma_alloc_chan_resources;
5388 case DMA_TYPE_BCDMA:
5389 ud->ddev.device_alloc_chan_resources =
5390 bcdma_alloc_chan_resources;
5391 ud->ddev.device_router_config = bcdma_router_config;
5393 case DMA_TYPE_PKTDMA:
5394 ud->ddev.device_alloc_chan_resources =
5395 pktdma_alloc_chan_resources;
5400 ud->ddev.device_free_chan_resources = udma_free_chan_resources;
5402 ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
5403 ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
5404 ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
5405 ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
5406 ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
5407 DESC_METADATA_ENGINE;
5408 if (ud->match_data->enable_memcpy_support &&
5409 !(ud->match_data->type == DMA_TYPE_BCDMA && ud->bchan_cnt == 0)) {
5410 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
5411 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
5412 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
5417 ud->psil_base = ud->match_data->psil_base;
5419 INIT_LIST_HEAD(&ud->ddev.channels);
5420 INIT_LIST_HEAD(&ud->desc_to_purge);
5422 ch_count = setup_resources(ud);
5426 spin_lock_init(&ud->lock);
5427 INIT_WORK(&ud->purge_work, udma_purge_desc_work);
5429 ud->desc_align = 64;
5430 if (ud->desc_align < dma_get_cache_alignment())
5431 ud->desc_align = dma_get_cache_alignment();
5433 ret = udma_setup_rx_flush(ud);
5437 for (i = 0; i < ud->bchan_cnt; i++) {
5438 struct udma_bchan *bchan = &ud->bchans[i];
5441 bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
5444 for (i = 0; i < ud->tchan_cnt; i++) {
5445 struct udma_tchan *tchan = &ud->tchans[i];
5448 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
5451 for (i = 0; i < ud->rchan_cnt; i++) {
5452 struct udma_rchan *rchan = &ud->rchans[i];
5455 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
5458 for (i = 0; i < ud->rflow_cnt; i++) {
5459 struct udma_rflow *rflow = &ud->rflows[i];
5464 for (i = 0; i < ch_count; i++) {
5465 struct udma_chan *uc = &ud->channels[i];
5468 uc->vc.desc_free = udma_desc_free;
5473 uc->config.remote_thread_id = -1;
5474 uc->config.mapped_channel_id = -1;
5475 uc->config.default_flow_id = -1;
5476 uc->config.dir = DMA_MEM_TO_MEM;
5477 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
5480 vchan_init(&uc->vc, &ud->ddev);
5481 /* Use custom vchan completion handling */
5482 tasklet_setup(&uc->vc.task, udma_vchan_complete);
5483 init_completion(&uc->teardown_completed);
5484 INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
5487 /* Configure the copy_align to the maximum burst size the device supports */
5488 ud->ddev.copy_align = udma_get_copy_align(ud);
5490 ret = dma_async_device_register(&ud->ddev);
5492 dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
5496 platform_set_drvdata(pdev, ud);
5498 ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
5500 dev_err(dev, "failed to register of_dma controller\n");
5501 dma_async_device_unregister(&ud->ddev);
5507 static struct platform_driver udma_driver = {
5510 .of_match_table = udma_of_match,
5511 .suppress_bind_attrs = true,
5513 .probe = udma_probe,
5515 builtin_platform_driver(udma_driver);
5517 static struct platform_driver bcdma_driver = {
5520 .of_match_table = bcdma_of_match,
5521 .suppress_bind_attrs = true,
5523 .probe = udma_probe,
5525 builtin_platform_driver(bcdma_driver);
5527 static struct platform_driver pktdma_driver = {
5529 .name = "ti-pktdma",
5530 .of_match_table = pktdma_of_match,
5531 .suppress_bind_attrs = true,
5533 .probe = udma_probe,
5535 builtin_platform_driver(pktdma_driver);
5537 /* Private interfaces to UDMA */
5538 #include "k3-udma-private.c"