1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
4 * Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
7 #include <linux/kernel.h>
8 #include <linux/delay.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/dmapool.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/platform_device.h>
17 #include <linux/slab.h>
18 #include <linux/spinlock.h>
19 #include <linux/sys_soc.h>
21 #include <linux/of_dma.h>
22 #include <linux/of_device.h>
23 #include <linux/of_irq.h>
24 #include <linux/workqueue.h>
25 #include <linux/completion.h>
26 #include <linux/soc/ti/k3-ringacc.h>
27 #include <linux/soc/ti/ti_sci_protocol.h>
28 #include <linux/soc/ti/ti_sci_inta_msi.h>
29 #include <linux/dma/k3-event-router.h>
30 #include <linux/dma/ti-cppi5.h>
32 #include "../virt-dma.h"
34 #include "k3-psil-priv.h"
36 struct udma_static_tr {
37 u8 elsize; /* RPSTR0 */
38 u16 elcnt; /* RPSTR0 */
39 u16 bstcnt; /* RPSTR1 */
42 #define K3_UDMA_MAX_RFLOWS 1024
43 #define K3_UDMA_DEFAULT_RING_SIZE 16
45 /* How SRC/DST tag should be updated by UDMA in the descriptor's Word 3 */
46 #define UDMA_RFLOW_SRCTAG_NONE 0
47 #define UDMA_RFLOW_SRCTAG_CFG_TAG 1
48 #define UDMA_RFLOW_SRCTAG_FLOW_ID 2
49 #define UDMA_RFLOW_SRCTAG_SRC_TAG 4
51 #define UDMA_RFLOW_DSTTAG_NONE 0
52 #define UDMA_RFLOW_DSTTAG_CFG_TAG 1
53 #define UDMA_RFLOW_DSTTAG_FLOW_ID 2
54 #define UDMA_RFLOW_DSTTAG_DST_TAG_LO 4
55 #define UDMA_RFLOW_DSTTAG_DST_TAG_HI 5
73 static const char * const mmr_names[] = {
75 [MMR_BCHANRT] = "bchanrt",
76 [MMR_RCHANRT] = "rchanrt",
77 [MMR_TCHANRT] = "tchanrt",
84 struct k3_ring *t_ring; /* Transmit ring */
85 struct k3_ring *tc_ring; /* Transmit Completion ring */
86 int tflow_id; /* applicable only for PKTDMA */
90 #define udma_bchan udma_tchan
94 struct k3_ring *fd_ring; /* Free Descriptor ring */
95 struct k3_ring *r_ring; /* Receive ring */
104 struct udma_oes_offsets {
105 /* K3 UDMA Output Event Offset */
108 /* BCDMA Output Event Offsets */
109 u32 bcdma_bchan_data;
110 u32 bcdma_bchan_ring;
111 u32 bcdma_tchan_data;
112 u32 bcdma_tchan_ring;
113 u32 bcdma_rchan_data;
114 u32 bcdma_rchan_ring;
116 /* PKTDMA Output Event Offsets */
117 u32 pktdma_tchan_flow;
118 u32 pktdma_rchan_flow;
121 #define UDMA_FLAG_PDMA_ACC32 BIT(0)
122 #define UDMA_FLAG_PDMA_BURST BIT(1)
123 #define UDMA_FLAG_TDTYPE BIT(2)
124 #define UDMA_FLAG_BURST_SIZE BIT(3)
125 #define UDMA_FLAGS_J7_CLASS (UDMA_FLAG_PDMA_ACC32 | \
126 UDMA_FLAG_PDMA_BURST | \
128 UDMA_FLAG_BURST_SIZE)
130 struct udma_match_data {
131 enum k3_dma_type type;
133 bool enable_memcpy_support;
139 struct udma_soc_data {
140 struct udma_oes_offsets oes;
141 u32 bcdma_trigger_event_offset;
145 size_t cppi5_desc_size;
146 void *cppi5_desc_vaddr;
147 dma_addr_t cppi5_desc_paddr;
149 /* TR descriptor internal pointers */
151 struct cppi5_tr_resp_t *tr_resp_base;
154 struct udma_rx_flush {
155 struct udma_hwdesc hwdescs[2];
159 dma_addr_t buffer_paddr;
168 struct dma_device ddev;
170 void __iomem *mmrs[MMR_LAST];
171 const struct udma_match_data *match_data;
172 const struct udma_soc_data *soc_data;
174 struct udma_tpl bchan_tpl;
175 struct udma_tpl tchan_tpl;
176 struct udma_tpl rchan_tpl;
178 size_t desc_align; /* alignment to use for descriptors */
180 struct udma_tisci_rm tisci_rm;
182 struct k3_ringacc *ringacc;
184 struct work_struct purge_work;
185 struct list_head desc_to_purge;
188 struct udma_rx_flush rx_flush;
196 unsigned long *bchan_map;
197 unsigned long *tchan_map;
198 unsigned long *rchan_map;
199 unsigned long *rflow_gp_map;
200 unsigned long *rflow_gp_map_allocated;
201 unsigned long *rflow_in_use;
202 unsigned long *tflow_map;
204 struct udma_bchan *bchans;
205 struct udma_tchan *tchans;
206 struct udma_rchan *rchans;
207 struct udma_rflow *rflows;
209 struct udma_chan *channels;
216 struct virt_dma_desc vd;
220 enum dma_transfer_direction dir;
222 struct udma_static_tr static_tr;
226 unsigned int desc_idx; /* Only used for cyclic in packet mode */
230 void *metadata; /* pointer to provided metadata buffer (EPIP, PSdata) */
232 unsigned int hwdesc_count;
233 struct udma_hwdesc hwdesc[];
236 enum udma_chan_state {
237 UDMA_CHAN_IS_IDLE = 0, /* not active, no teardown is in progress */
238 UDMA_CHAN_IS_ACTIVE, /* Normal operation */
239 UDMA_CHAN_IS_TERMINATING, /* channel is being terminated */
242 struct udma_tx_drain {
243 struct delayed_work work;
248 struct udma_chan_config {
249 bool pkt_mode; /* TR or packet */
250 bool needs_epib; /* EPIB is needed for the communication or not */
251 u32 psd_size; /* size of Protocol Specific Data */
252 u32 metadata_size; /* (needs_epib ? 16:0) + psd_size */
253 u32 hdesc_size; /* Size of a packet descriptor in packet mode */
254 bool notdpkt; /* Suppress sending TDC packet */
255 int remote_thread_id;
260 enum psil_endpoint_type ep_type;
263 enum udma_tp_level channel_tpl; /* Channel Throughput Level */
267 /* PKDMA mapped channel */
268 int mapped_channel_id;
269 /* PKTDMA default tflow or rflow for mapped channel */
272 enum dma_transfer_direction dir;
276 struct virt_dma_chan vc;
277 struct dma_slave_config cfg;
279 struct device *dma_dev;
280 struct udma_desc *desc;
281 struct udma_desc *terminated_desc;
282 struct udma_static_tr static_tr;
285 struct udma_bchan *bchan;
286 struct udma_tchan *tchan;
287 struct udma_rchan *rchan;
288 struct udma_rflow *rflow;
298 enum udma_chan_state state;
299 struct completion teardown_completed;
301 struct udma_tx_drain tx_drain;
303 u32 bcnt; /* number of bytes completed since the start of the channel */
305 /* Channel configuration parameters */
306 struct udma_chan_config config;
308 /* dmapool for packet mode descriptors */
310 struct dma_pool *hdesc_pool;
315 static inline struct udma_dev *to_udma_dev(struct dma_device *d)
317 return container_of(d, struct udma_dev, ddev);
320 static inline struct udma_chan *to_udma_chan(struct dma_chan *c)
322 return container_of(c, struct udma_chan, vc.chan);
325 static inline struct udma_desc *to_udma_desc(struct dma_async_tx_descriptor *t)
327 return container_of(t, struct udma_desc, vd.tx);
330 /* Generic register access functions */
331 static inline u32 udma_read(void __iomem *base, int reg)
333 return readl(base + reg);
336 static inline void udma_write(void __iomem *base, int reg, u32 val)
338 writel(val, base + reg);
341 static inline void udma_update_bits(void __iomem *base, int reg,
346 orig = readl(base + reg);
351 writel(tmp, base + reg);
355 static inline u32 udma_tchanrt_read(struct udma_chan *uc, int reg)
359 return udma_read(uc->tchan->reg_rt, reg);
362 static inline void udma_tchanrt_write(struct udma_chan *uc, int reg, u32 val)
366 udma_write(uc->tchan->reg_rt, reg, val);
369 static inline void udma_tchanrt_update_bits(struct udma_chan *uc, int reg,
374 udma_update_bits(uc->tchan->reg_rt, reg, mask, val);
378 static inline u32 udma_rchanrt_read(struct udma_chan *uc, int reg)
382 return udma_read(uc->rchan->reg_rt, reg);
385 static inline void udma_rchanrt_write(struct udma_chan *uc, int reg, u32 val)
389 udma_write(uc->rchan->reg_rt, reg, val);
392 static inline void udma_rchanrt_update_bits(struct udma_chan *uc, int reg,
397 udma_update_bits(uc->rchan->reg_rt, reg, mask, val);
400 static int navss_psil_pair(struct udma_dev *ud, u32 src_thread, u32 dst_thread)
402 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
404 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
405 return tisci_rm->tisci_psil_ops->pair(tisci_rm->tisci,
406 tisci_rm->tisci_navss_dev_id,
407 src_thread, dst_thread);
410 static int navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
413 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
415 dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
416 return tisci_rm->tisci_psil_ops->unpair(tisci_rm->tisci,
417 tisci_rm->tisci_navss_dev_id,
418 src_thread, dst_thread);
421 static void k3_configure_chan_coherency(struct dma_chan *chan, u32 asel)
423 struct device *chan_dev = &chan->dev->device;
426 /* No special handling for the channel */
427 chan->dev->chan_dma_dev = false;
429 chan_dev->dma_coherent = false;
430 chan_dev->dma_parms = NULL;
431 } else if (asel == 14 || asel == 15) {
432 chan->dev->chan_dma_dev = true;
434 chan_dev->dma_coherent = true;
435 dma_coerce_mask_and_coherent(chan_dev, DMA_BIT_MASK(48));
436 chan_dev->dma_parms = chan_dev->parent->dma_parms;
438 dev_warn(chan->device->dev, "Invalid ASEL value: %u\n", asel);
440 chan_dev->dma_coherent = false;
441 chan_dev->dma_parms = NULL;
445 static u8 udma_get_chan_tpl_index(struct udma_tpl *tpl_map, int chan_id)
449 for (i = 0; i < tpl_map->levels; i++) {
450 if (chan_id >= tpl_map->start_idx[i])
457 static void udma_reset_uchan(struct udma_chan *uc)
459 memset(&uc->config, 0, sizeof(uc->config));
460 uc->config.remote_thread_id = -1;
461 uc->config.mapped_channel_id = -1;
462 uc->config.default_flow_id = -1;
463 uc->state = UDMA_CHAN_IS_IDLE;
466 static void udma_dump_chan_stdata(struct udma_chan *uc)
468 struct device *dev = uc->ud->dev;
472 if (uc->config.dir == DMA_MEM_TO_DEV || uc->config.dir == DMA_MEM_TO_MEM) {
473 dev_dbg(dev, "TCHAN State data:\n");
474 for (i = 0; i < 32; i++) {
475 offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
476 dev_dbg(dev, "TRT_STDATA[%02d]: 0x%08x\n", i,
477 udma_tchanrt_read(uc, offset));
481 if (uc->config.dir == DMA_DEV_TO_MEM || uc->config.dir == DMA_MEM_TO_MEM) {
482 dev_dbg(dev, "RCHAN State data:\n");
483 for (i = 0; i < 32; i++) {
484 offset = UDMA_CHAN_RT_STDATA_REG + i * 4;
485 dev_dbg(dev, "RRT_STDATA[%02d]: 0x%08x\n", i,
486 udma_rchanrt_read(uc, offset));
491 static inline dma_addr_t udma_curr_cppi5_desc_paddr(struct udma_desc *d,
494 return d->hwdesc[idx].cppi5_desc_paddr;
497 static inline void *udma_curr_cppi5_desc_vaddr(struct udma_desc *d, int idx)
499 return d->hwdesc[idx].cppi5_desc_vaddr;
502 static struct udma_desc *udma_udma_desc_from_paddr(struct udma_chan *uc,
505 struct udma_desc *d = uc->terminated_desc;
508 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
511 if (desc_paddr != paddr)
518 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
521 if (desc_paddr != paddr)
529 static void udma_free_hwdesc(struct udma_chan *uc, struct udma_desc *d)
531 if (uc->use_dma_pool) {
534 for (i = 0; i < d->hwdesc_count; i++) {
535 if (!d->hwdesc[i].cppi5_desc_vaddr)
538 dma_pool_free(uc->hdesc_pool,
539 d->hwdesc[i].cppi5_desc_vaddr,
540 d->hwdesc[i].cppi5_desc_paddr);
542 d->hwdesc[i].cppi5_desc_vaddr = NULL;
544 } else if (d->hwdesc[0].cppi5_desc_vaddr) {
545 dma_free_coherent(uc->dma_dev, d->hwdesc[0].cppi5_desc_size,
546 d->hwdesc[0].cppi5_desc_vaddr,
547 d->hwdesc[0].cppi5_desc_paddr);
549 d->hwdesc[0].cppi5_desc_vaddr = NULL;
553 static void udma_purge_desc_work(struct work_struct *work)
555 struct udma_dev *ud = container_of(work, typeof(*ud), purge_work);
556 struct virt_dma_desc *vd, *_vd;
560 spin_lock_irqsave(&ud->lock, flags);
561 list_splice_tail_init(&ud->desc_to_purge, &head);
562 spin_unlock_irqrestore(&ud->lock, flags);
564 list_for_each_entry_safe(vd, _vd, &head, node) {
565 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
566 struct udma_desc *d = to_udma_desc(&vd->tx);
568 udma_free_hwdesc(uc, d);
573 /* If more to purge, schedule the work again */
574 if (!list_empty(&ud->desc_to_purge))
575 schedule_work(&ud->purge_work);
578 static void udma_desc_free(struct virt_dma_desc *vd)
580 struct udma_dev *ud = to_udma_dev(vd->tx.chan->device);
581 struct udma_chan *uc = to_udma_chan(vd->tx.chan);
582 struct udma_desc *d = to_udma_desc(&vd->tx);
585 if (uc->terminated_desc == d)
586 uc->terminated_desc = NULL;
588 if (uc->use_dma_pool) {
589 udma_free_hwdesc(uc, d);
594 spin_lock_irqsave(&ud->lock, flags);
595 list_add_tail(&vd->node, &ud->desc_to_purge);
596 spin_unlock_irqrestore(&ud->lock, flags);
598 schedule_work(&ud->purge_work);
601 static bool udma_is_chan_running(struct udma_chan *uc)
607 trt_ctl = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
609 rrt_ctl = udma_rchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
611 if (trt_ctl & UDMA_CHAN_RT_CTL_EN || rrt_ctl & UDMA_CHAN_RT_CTL_EN)
617 static bool udma_is_chan_paused(struct udma_chan *uc)
621 switch (uc->config.dir) {
623 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
624 pause_mask = UDMA_PEER_RT_EN_PAUSE;
627 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_RT_EN_REG);
628 pause_mask = UDMA_PEER_RT_EN_PAUSE;
631 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_CTL_REG);
632 pause_mask = UDMA_CHAN_RT_CTL_PAUSE;
638 if (val & pause_mask)
644 static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
646 return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
649 static int udma_push_to_ring(struct udma_chan *uc, int idx)
651 struct udma_desc *d = uc->desc;
652 struct k3_ring *ring = NULL;
655 switch (uc->config.dir) {
657 ring = uc->rflow->fd_ring;
661 ring = uc->tchan->t_ring;
667 /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
669 paddr = udma_get_rx_flush_hwdesc_paddr(uc);
671 paddr = udma_curr_cppi5_desc_paddr(d, idx);
673 wmb(); /* Ensure that writes are not moved over this point */
676 return k3_ringacc_ring_push(ring, &paddr);
679 static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
681 if (uc->config.dir != DMA_DEV_TO_MEM)
684 if (addr == udma_get_rx_flush_hwdesc_paddr(uc))
690 static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
692 struct k3_ring *ring = NULL;
695 switch (uc->config.dir) {
697 ring = uc->rflow->r_ring;
701 ring = uc->tchan->tc_ring;
707 ret = k3_ringacc_ring_pop(ring, addr);
711 rmb(); /* Ensure that reads are not moved before this point */
713 /* Teardown completion */
714 if (cppi5_desc_is_tdcm(*addr))
717 /* Check for flush descriptor */
718 if (udma_desc_is_rx_flush(uc, *addr))
724 static void udma_reset_rings(struct udma_chan *uc)
726 struct k3_ring *ring1 = NULL;
727 struct k3_ring *ring2 = NULL;
729 switch (uc->config.dir) {
732 ring1 = uc->rflow->fd_ring;
733 ring2 = uc->rflow->r_ring;
739 ring1 = uc->tchan->t_ring;
740 ring2 = uc->tchan->tc_ring;
748 k3_ringacc_ring_reset_dma(ring1,
749 k3_ringacc_ring_get_occ(ring1));
751 k3_ringacc_ring_reset(ring2);
753 /* make sure we are not leaking memory by stalled descriptor */
754 if (uc->terminated_desc) {
755 udma_desc_free(&uc->terminated_desc->vd);
756 uc->terminated_desc = NULL;
760 static void udma_reset_counters(struct udma_chan *uc)
765 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
766 udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
768 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
769 udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
771 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
772 udma_tchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
775 val = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
776 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
781 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
782 udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
784 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
785 udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
787 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PCNT_REG);
788 udma_rchanrt_write(uc, UDMA_CHAN_RT_PCNT_REG, val);
790 val = udma_rchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
791 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
797 static int udma_reset_chan(struct udma_chan *uc, bool hard)
799 switch (uc->config.dir) {
801 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
802 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
805 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
806 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG, 0);
809 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
810 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG, 0);
816 /* Reset all counters */
817 udma_reset_counters(uc);
819 /* Hard reset: re-initialize the channel to reset */
821 struct udma_chan_config ucc_backup;
824 memcpy(&ucc_backup, &uc->config, sizeof(uc->config));
825 uc->ud->ddev.device_free_chan_resources(&uc->vc.chan);
827 /* restore the channel configuration */
828 memcpy(&uc->config, &ucc_backup, sizeof(uc->config));
829 ret = uc->ud->ddev.device_alloc_chan_resources(&uc->vc.chan);
834 * Setting forced teardown after forced reset helps recovering
837 if (uc->config.dir == DMA_DEV_TO_MEM)
838 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
839 UDMA_CHAN_RT_CTL_EN |
840 UDMA_CHAN_RT_CTL_TDOWN |
841 UDMA_CHAN_RT_CTL_FTDOWN);
843 uc->state = UDMA_CHAN_IS_IDLE;
848 static void udma_start_desc(struct udma_chan *uc)
850 struct udma_chan_config *ucc = &uc->config;
852 if (uc->ud->match_data->type == DMA_TYPE_UDMA && ucc->pkt_mode &&
853 (uc->cyclic || ucc->dir == DMA_DEV_TO_MEM)) {
857 * UDMA only: Push all descriptors to ring for packet mode
859 * PKTDMA supports pre-linked descriptor and cyclic is not
862 for (i = 0; i < uc->desc->sglen; i++)
863 udma_push_to_ring(uc, i);
865 udma_push_to_ring(uc, 0);
869 static bool udma_chan_needs_reconfiguration(struct udma_chan *uc)
871 /* Only PDMAs have staticTR */
872 if (uc->config.ep_type == PSIL_EP_NATIVE)
875 /* Check if the staticTR configuration has changed for TX */
876 if (memcmp(&uc->static_tr, &uc->desc->static_tr, sizeof(uc->static_tr)))
882 static int udma_start(struct udma_chan *uc)
884 struct virt_dma_desc *vd = vchan_next_desc(&uc->vc);
893 uc->desc = to_udma_desc(&vd->tx);
895 /* Channel is already running and does not need reconfiguration */
896 if (udma_is_chan_running(uc) && !udma_chan_needs_reconfiguration(uc)) {
901 /* Make sure that we clear the teardown bit, if it is set */
902 udma_reset_chan(uc, false);
904 /* Push descriptors before we start the channel */
907 switch (uc->desc->dir) {
909 /* Config remote TR */
910 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
911 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
912 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
913 const struct udma_match_data *match_data =
916 if (uc->config.enable_acc32)
917 val |= PDMA_STATIC_TR_XY_ACC32;
918 if (uc->config.enable_burst)
919 val |= PDMA_STATIC_TR_XY_BURST;
921 udma_rchanrt_write(uc,
922 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
925 udma_rchanrt_write(uc,
926 UDMA_CHAN_RT_PEER_STATIC_TR_Z_REG,
927 PDMA_STATIC_TR_Z(uc->desc->static_tr.bstcnt,
928 match_data->statictr_z_mask));
930 /* save the current staticTR configuration */
931 memcpy(&uc->static_tr, &uc->desc->static_tr,
932 sizeof(uc->static_tr));
935 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
936 UDMA_CHAN_RT_CTL_EN);
939 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
940 UDMA_PEER_RT_EN_ENABLE);
944 /* Config remote TR */
945 if (uc->config.ep_type == PSIL_EP_PDMA_XY) {
946 u32 val = PDMA_STATIC_TR_Y(uc->desc->static_tr.elcnt) |
947 PDMA_STATIC_TR_X(uc->desc->static_tr.elsize);
949 if (uc->config.enable_acc32)
950 val |= PDMA_STATIC_TR_XY_ACC32;
951 if (uc->config.enable_burst)
952 val |= PDMA_STATIC_TR_XY_BURST;
954 udma_tchanrt_write(uc,
955 UDMA_CHAN_RT_PEER_STATIC_TR_XY_REG,
958 /* save the current staticTR configuration */
959 memcpy(&uc->static_tr, &uc->desc->static_tr,
960 sizeof(uc->static_tr));
964 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
965 UDMA_PEER_RT_EN_ENABLE);
967 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
968 UDMA_CHAN_RT_CTL_EN);
972 udma_rchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
973 UDMA_CHAN_RT_CTL_EN);
974 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
975 UDMA_CHAN_RT_CTL_EN);
982 uc->state = UDMA_CHAN_IS_ACTIVE;
988 static int udma_stop(struct udma_chan *uc)
990 enum udma_chan_state old_state = uc->state;
992 uc->state = UDMA_CHAN_IS_TERMINATING;
993 reinit_completion(&uc->teardown_completed);
995 switch (uc->config.dir) {
997 if (!uc->cyclic && !uc->desc)
998 udma_push_to_ring(uc, -1);
1000 udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
1001 UDMA_PEER_RT_EN_ENABLE |
1002 UDMA_PEER_RT_EN_TEARDOWN);
1004 case DMA_MEM_TO_DEV:
1005 udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
1006 UDMA_PEER_RT_EN_ENABLE |
1007 UDMA_PEER_RT_EN_FLUSH);
1008 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
1009 UDMA_CHAN_RT_CTL_EN |
1010 UDMA_CHAN_RT_CTL_TDOWN);
1012 case DMA_MEM_TO_MEM:
1013 udma_tchanrt_write(uc, UDMA_CHAN_RT_CTL_REG,
1014 UDMA_CHAN_RT_CTL_EN |
1015 UDMA_CHAN_RT_CTL_TDOWN);
1018 uc->state = old_state;
1019 complete_all(&uc->teardown_completed);
1026 static void udma_cyclic_packet_elapsed(struct udma_chan *uc)
1028 struct udma_desc *d = uc->desc;
1029 struct cppi5_host_desc_t *h_desc;
1031 h_desc = d->hwdesc[d->desc_idx].cppi5_desc_vaddr;
1032 cppi5_hdesc_reset_to_original(h_desc);
1033 udma_push_to_ring(uc, d->desc_idx);
1034 d->desc_idx = (d->desc_idx + 1) % d->sglen;
1037 static inline void udma_fetch_epib(struct udma_chan *uc, struct udma_desc *d)
1039 struct cppi5_host_desc_t *h_desc = d->hwdesc[0].cppi5_desc_vaddr;
1041 memcpy(d->metadata, h_desc->epib, d->metadata_size);
1044 static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
1046 u32 peer_bcnt, bcnt;
1048 /* Only TX towards PDMA is affected */
1049 if (uc->config.ep_type == PSIL_EP_NATIVE ||
1050 uc->config.dir != DMA_MEM_TO_DEV)
1053 peer_bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_PEER_BCNT_REG);
1054 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
1056 /* Transfer is incomplete, store current residue and time stamp */
1057 if (peer_bcnt < bcnt) {
1058 uc->tx_drain.residue = bcnt - peer_bcnt;
1059 uc->tx_drain.tstamp = ktime_get();
1066 static void udma_check_tx_completion(struct work_struct *work)
1068 struct udma_chan *uc = container_of(work, typeof(*uc),
1069 tx_drain.work.work);
1070 bool desc_done = true;
1073 unsigned long delay;
1077 /* Get previous residue and time stamp */
1078 residue_diff = uc->tx_drain.residue;
1079 time_diff = uc->tx_drain.tstamp;
1081 * Get current residue and time stamp or see if
1082 * transfer is complete
1084 desc_done = udma_is_desc_really_done(uc, uc->desc);
1089 * Find the time delta and residue delta w.r.t
1092 time_diff = ktime_sub(uc->tx_drain.tstamp,
1094 residue_diff -= uc->tx_drain.residue;
1097 * Try to guess when we should check
1098 * next time by calculating rate at
1099 * which data is being drained at the
1102 delay = (time_diff / residue_diff) *
1103 uc->tx_drain.residue;
1105 /* No progress, check again in 1 second */
1106 schedule_delayed_work(&uc->tx_drain.work, HZ);
1110 usleep_range(ktime_to_us(delay),
1111 ktime_to_us(delay) + 10);
1116 struct udma_desc *d = uc->desc;
1118 uc->bcnt += d->residue;
1120 vchan_cookie_complete(&d->vd);
1128 static irqreturn_t udma_ring_irq_handler(int irq, void *data)
1130 struct udma_chan *uc = data;
1131 struct udma_desc *d;
1132 dma_addr_t paddr = 0;
1134 if (udma_pop_from_ring(uc, &paddr) || !paddr)
1137 spin_lock(&uc->vc.lock);
1139 /* Teardown completion message */
1140 if (cppi5_desc_is_tdcm(paddr)) {
1141 complete_all(&uc->teardown_completed);
1143 if (uc->terminated_desc) {
1144 udma_desc_free(&uc->terminated_desc->vd);
1145 uc->terminated_desc = NULL;
1154 d = udma_udma_desc_from_paddr(uc, paddr);
1157 dma_addr_t desc_paddr = udma_curr_cppi5_desc_paddr(d,
1159 if (desc_paddr != paddr) {
1160 dev_err(uc->ud->dev, "not matching descriptors!\n");
1164 if (d == uc->desc) {
1165 /* active descriptor */
1167 udma_cyclic_packet_elapsed(uc);
1168 vchan_cyclic_callback(&d->vd);
1170 if (udma_is_desc_really_done(uc, d)) {
1171 uc->bcnt += d->residue;
1173 vchan_cookie_complete(&d->vd);
1175 schedule_delayed_work(&uc->tx_drain.work,
1181 * terminated descriptor, mark the descriptor as
1182 * completed to update the channel's cookie marker
1184 dma_cookie_complete(&d->vd.tx);
1188 spin_unlock(&uc->vc.lock);
1193 static irqreturn_t udma_udma_irq_handler(int irq, void *data)
1195 struct udma_chan *uc = data;
1196 struct udma_desc *d;
1198 spin_lock(&uc->vc.lock);
1201 d->tr_idx = (d->tr_idx + 1) % d->sglen;
1204 vchan_cyclic_callback(&d->vd);
1206 /* TODO: figure out the real amount of data */
1207 uc->bcnt += d->residue;
1209 vchan_cookie_complete(&d->vd);
1213 spin_unlock(&uc->vc.lock);
1219 * __udma_alloc_gp_rflow_range - alloc range of GP RX flows
1221 * @from: Start the search from this flow id number
1222 * @cnt: Number of consecutive flow ids to allocate
1224 * Allocate range of RX flow ids for future use, those flows can be requested
1225 * only using explicit flow id number. if @from is set to -1 it will try to find
1226 * first free range. if @from is positive value it will force allocation only
1227 * of the specified range of flows.
1229 * Returns -ENOMEM if can't find free range.
1230 * -EEXIST if requested range is busy.
1231 * -EINVAL if wrong input values passed.
1232 * Returns flow id on success.
1234 static int __udma_alloc_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1236 int start, tmp_from;
1237 DECLARE_BITMAP(tmp, K3_UDMA_MAX_RFLOWS);
1241 tmp_from = ud->rchan_cnt;
1242 /* default flows can't be allocated and accessible only by id */
1243 if (tmp_from < ud->rchan_cnt)
1246 if (tmp_from + cnt > ud->rflow_cnt)
1249 bitmap_or(tmp, ud->rflow_gp_map, ud->rflow_gp_map_allocated,
1252 start = bitmap_find_next_zero_area(tmp,
1255 if (start >= ud->rflow_cnt)
1258 if (from >= 0 && start != from)
1261 bitmap_set(ud->rflow_gp_map_allocated, start, cnt);
1265 static int __udma_free_gp_rflow_range(struct udma_dev *ud, int from, int cnt)
1267 if (from < ud->rchan_cnt)
1269 if (from + cnt > ud->rflow_cnt)
1272 bitmap_clear(ud->rflow_gp_map_allocated, from, cnt);
1276 static struct udma_rflow *__udma_get_rflow(struct udma_dev *ud, int id)
1279 * Attempt to request rflow by ID can be made for any rflow
1280 * if not in use with assumption that caller knows what's doing.
1281 * TI-SCI FW will perform additional permission check ant way, it's
1285 if (id < 0 || id >= ud->rflow_cnt)
1286 return ERR_PTR(-ENOENT);
1288 if (test_bit(id, ud->rflow_in_use))
1289 return ERR_PTR(-ENOENT);
1291 if (ud->rflow_gp_map) {
1292 /* GP rflow has to be allocated first */
1293 if (!test_bit(id, ud->rflow_gp_map) &&
1294 !test_bit(id, ud->rflow_gp_map_allocated))
1295 return ERR_PTR(-EINVAL);
1298 dev_dbg(ud->dev, "get rflow%d\n", id);
1299 set_bit(id, ud->rflow_in_use);
1300 return &ud->rflows[id];
1303 static void __udma_put_rflow(struct udma_dev *ud, struct udma_rflow *rflow)
1305 if (!test_bit(rflow->id, ud->rflow_in_use)) {
1306 dev_err(ud->dev, "attempt to put unused rflow%d\n", rflow->id);
1310 dev_dbg(ud->dev, "put rflow%d\n", rflow->id);
1311 clear_bit(rflow->id, ud->rflow_in_use);
1314 #define UDMA_RESERVE_RESOURCE(res) \
1315 static struct udma_##res *__udma_reserve_##res(struct udma_dev *ud, \
1316 enum udma_tp_level tpl, \
1320 if (test_bit(id, ud->res##_map)) { \
1321 dev_err(ud->dev, "res##%d is in use\n", id); \
1322 return ERR_PTR(-ENOENT); \
1327 if (tpl >= ud->res##_tpl.levels) \
1328 tpl = ud->res##_tpl.levels - 1; \
1330 start = ud->res##_tpl.start_idx[tpl]; \
1332 id = find_next_zero_bit(ud->res##_map, ud->res##_cnt, \
1334 if (id == ud->res##_cnt) { \
1335 return ERR_PTR(-ENOENT); \
1339 set_bit(id, ud->res##_map); \
1340 return &ud->res##s[id]; \
1343 UDMA_RESERVE_RESOURCE(bchan);
1344 UDMA_RESERVE_RESOURCE(tchan);
1345 UDMA_RESERVE_RESOURCE(rchan);
1347 static int bcdma_get_bchan(struct udma_chan *uc)
1349 struct udma_dev *ud = uc->ud;
1350 enum udma_tp_level tpl;
1354 dev_dbg(ud->dev, "chan%d: already have bchan%d allocated\n",
1355 uc->id, uc->bchan->id);
1360 * Use normal channels for peripherals, and highest TPL channel for
1363 if (uc->config.tr_trigger_type)
1366 tpl = ud->bchan_tpl.levels - 1;
1368 uc->bchan = __udma_reserve_bchan(ud, tpl, -1);
1369 if (IS_ERR(uc->bchan)) {
1370 ret = PTR_ERR(uc->bchan);
1375 uc->tchan = uc->bchan;
1380 static int udma_get_tchan(struct udma_chan *uc)
1382 struct udma_dev *ud = uc->ud;
1386 dev_dbg(ud->dev, "chan%d: already have tchan%d allocated\n",
1387 uc->id, uc->tchan->id);
1392 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1393 * For PKTDMA mapped channels it is configured to a channel which must
1394 * be used to service the peripheral.
1396 uc->tchan = __udma_reserve_tchan(ud, uc->config.channel_tpl,
1397 uc->config.mapped_channel_id);
1398 if (IS_ERR(uc->tchan)) {
1399 ret = PTR_ERR(uc->tchan);
1404 if (ud->tflow_cnt) {
1407 /* Only PKTDMA have support for tx flows */
1408 if (uc->config.default_flow_id >= 0)
1409 tflow_id = uc->config.default_flow_id;
1411 tflow_id = uc->tchan->id;
1413 if (test_bit(tflow_id, ud->tflow_map)) {
1414 dev_err(ud->dev, "tflow%d is in use\n", tflow_id);
1415 clear_bit(uc->tchan->id, ud->tchan_map);
1420 uc->tchan->tflow_id = tflow_id;
1421 set_bit(tflow_id, ud->tflow_map);
1423 uc->tchan->tflow_id = -1;
1429 static int udma_get_rchan(struct udma_chan *uc)
1431 struct udma_dev *ud = uc->ud;
1435 dev_dbg(ud->dev, "chan%d: already have rchan%d allocated\n",
1436 uc->id, uc->rchan->id);
1441 * mapped_channel_id is -1 for UDMA, BCDMA and PKTDMA unmapped channels.
1442 * For PKTDMA mapped channels it is configured to a channel which must
1443 * be used to service the peripheral.
1445 uc->rchan = __udma_reserve_rchan(ud, uc->config.channel_tpl,
1446 uc->config.mapped_channel_id);
1447 if (IS_ERR(uc->rchan)) {
1448 ret = PTR_ERR(uc->rchan);
1456 static int udma_get_chan_pair(struct udma_chan *uc)
1458 struct udma_dev *ud = uc->ud;
1461 if ((uc->tchan && uc->rchan) && uc->tchan->id == uc->rchan->id) {
1462 dev_info(ud->dev, "chan%d: already have %d pair allocated\n",
1463 uc->id, uc->tchan->id);
1468 dev_err(ud->dev, "chan%d: already have tchan%d allocated\n",
1469 uc->id, uc->tchan->id);
1471 } else if (uc->rchan) {
1472 dev_err(ud->dev, "chan%d: already have rchan%d allocated\n",
1473 uc->id, uc->rchan->id);
1477 /* Can be optimized, but let's have it like this for now */
1478 end = min(ud->tchan_cnt, ud->rchan_cnt);
1480 * Try to use the highest TPL channel pair for MEM_TO_MEM channels
1481 * Note: in UDMAP the channel TPL is symmetric between tchan and rchan
1483 chan_id = ud->tchan_tpl.start_idx[ud->tchan_tpl.levels - 1];
1484 for (; chan_id < end; chan_id++) {
1485 if (!test_bit(chan_id, ud->tchan_map) &&
1486 !test_bit(chan_id, ud->rchan_map))
1493 set_bit(chan_id, ud->tchan_map);
1494 set_bit(chan_id, ud->rchan_map);
1495 uc->tchan = &ud->tchans[chan_id];
1496 uc->rchan = &ud->rchans[chan_id];
1498 /* UDMA does not use tx flows */
1499 uc->tchan->tflow_id = -1;
1504 static int udma_get_rflow(struct udma_chan *uc, int flow_id)
1506 struct udma_dev *ud = uc->ud;
1510 dev_err(ud->dev, "chan%d: does not have rchan??\n", uc->id);
1515 dev_dbg(ud->dev, "chan%d: already have rflow%d allocated\n",
1516 uc->id, uc->rflow->id);
1520 uc->rflow = __udma_get_rflow(ud, flow_id);
1521 if (IS_ERR(uc->rflow)) {
1522 ret = PTR_ERR(uc->rflow);
1530 static void bcdma_put_bchan(struct udma_chan *uc)
1532 struct udma_dev *ud = uc->ud;
1535 dev_dbg(ud->dev, "chan%d: put bchan%d\n", uc->id,
1537 clear_bit(uc->bchan->id, ud->bchan_map);
1543 static void udma_put_rchan(struct udma_chan *uc)
1545 struct udma_dev *ud = uc->ud;
1548 dev_dbg(ud->dev, "chan%d: put rchan%d\n", uc->id,
1550 clear_bit(uc->rchan->id, ud->rchan_map);
1555 static void udma_put_tchan(struct udma_chan *uc)
1557 struct udma_dev *ud = uc->ud;
1560 dev_dbg(ud->dev, "chan%d: put tchan%d\n", uc->id,
1562 clear_bit(uc->tchan->id, ud->tchan_map);
1564 if (uc->tchan->tflow_id >= 0)
1565 clear_bit(uc->tchan->tflow_id, ud->tflow_map);
1571 static void udma_put_rflow(struct udma_chan *uc)
1573 struct udma_dev *ud = uc->ud;
1576 dev_dbg(ud->dev, "chan%d: put rflow%d\n", uc->id,
1578 __udma_put_rflow(ud, uc->rflow);
1583 static void bcdma_free_bchan_resources(struct udma_chan *uc)
1588 k3_ringacc_ring_free(uc->bchan->tc_ring);
1589 k3_ringacc_ring_free(uc->bchan->t_ring);
1590 uc->bchan->tc_ring = NULL;
1591 uc->bchan->t_ring = NULL;
1592 k3_configure_chan_coherency(&uc->vc.chan, 0);
1594 bcdma_put_bchan(uc);
1597 static int bcdma_alloc_bchan_resources(struct udma_chan *uc)
1599 struct k3_ring_cfg ring_cfg;
1600 struct udma_dev *ud = uc->ud;
1603 ret = bcdma_get_bchan(uc);
1607 ret = k3_ringacc_request_rings_pair(ud->ringacc, uc->bchan->id, -1,
1609 &uc->bchan->tc_ring);
1615 memset(&ring_cfg, 0, sizeof(ring_cfg));
1616 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1617 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1618 ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1620 k3_configure_chan_coherency(&uc->vc.chan, ud->asel);
1621 ring_cfg.asel = ud->asel;
1622 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1624 ret = k3_ringacc_ring_cfg(uc->bchan->t_ring, &ring_cfg);
1631 k3_ringacc_ring_free(uc->bchan->tc_ring);
1632 uc->bchan->tc_ring = NULL;
1633 k3_ringacc_ring_free(uc->bchan->t_ring);
1634 uc->bchan->t_ring = NULL;
1635 k3_configure_chan_coherency(&uc->vc.chan, 0);
1637 bcdma_put_bchan(uc);
1642 static void udma_free_tx_resources(struct udma_chan *uc)
1647 k3_ringacc_ring_free(uc->tchan->t_ring);
1648 k3_ringacc_ring_free(uc->tchan->tc_ring);
1649 uc->tchan->t_ring = NULL;
1650 uc->tchan->tc_ring = NULL;
1655 static int udma_alloc_tx_resources(struct udma_chan *uc)
1657 struct k3_ring_cfg ring_cfg;
1658 struct udma_dev *ud = uc->ud;
1659 struct udma_tchan *tchan;
1662 ret = udma_get_tchan(uc);
1667 if (tchan->tflow_id >= 0)
1668 ring_idx = tchan->tflow_id;
1670 ring_idx = ud->bchan_cnt + tchan->id;
1672 ret = k3_ringacc_request_rings_pair(ud->ringacc, ring_idx, -1,
1680 memset(&ring_cfg, 0, sizeof(ring_cfg));
1681 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1682 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1683 if (ud->match_data->type == DMA_TYPE_UDMA) {
1684 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1686 ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1688 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
1689 ring_cfg.asel = uc->config.asel;
1690 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1693 ret = k3_ringacc_ring_cfg(tchan->t_ring, &ring_cfg);
1694 ret |= k3_ringacc_ring_cfg(tchan->tc_ring, &ring_cfg);
1702 k3_ringacc_ring_free(uc->tchan->tc_ring);
1703 uc->tchan->tc_ring = NULL;
1704 k3_ringacc_ring_free(uc->tchan->t_ring);
1705 uc->tchan->t_ring = NULL;
1712 static void udma_free_rx_resources(struct udma_chan *uc)
1718 struct udma_rflow *rflow = uc->rflow;
1720 k3_ringacc_ring_free(rflow->fd_ring);
1721 k3_ringacc_ring_free(rflow->r_ring);
1722 rflow->fd_ring = NULL;
1723 rflow->r_ring = NULL;
1731 static int udma_alloc_rx_resources(struct udma_chan *uc)
1733 struct udma_dev *ud = uc->ud;
1734 struct k3_ring_cfg ring_cfg;
1735 struct udma_rflow *rflow;
1739 ret = udma_get_rchan(uc);
1743 /* For MEM_TO_MEM we don't need rflow or rings */
1744 if (uc->config.dir == DMA_MEM_TO_MEM)
1747 if (uc->config.default_flow_id >= 0)
1748 ret = udma_get_rflow(uc, uc->config.default_flow_id);
1750 ret = udma_get_rflow(uc, uc->rchan->id);
1759 fd_ring_id = ud->tflow_cnt + rflow->id;
1761 fd_ring_id = ud->bchan_cnt + ud->tchan_cnt + ud->echan_cnt +
1764 ret = k3_ringacc_request_rings_pair(ud->ringacc, fd_ring_id, -1,
1765 &rflow->fd_ring, &rflow->r_ring);
1771 memset(&ring_cfg, 0, sizeof(ring_cfg));
1773 ring_cfg.elm_size = K3_RINGACC_RING_ELSIZE_8;
1774 if (ud->match_data->type == DMA_TYPE_UDMA) {
1775 if (uc->config.pkt_mode)
1776 ring_cfg.size = SG_MAX_SEGMENTS;
1778 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1780 ring_cfg.mode = K3_RINGACC_RING_MODE_MESSAGE;
1782 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1783 ring_cfg.mode = K3_RINGACC_RING_MODE_RING;
1785 k3_configure_chan_coherency(&uc->vc.chan, uc->config.asel);
1786 ring_cfg.asel = uc->config.asel;
1787 ring_cfg.dma_dev = dmaengine_get_dma_device(&uc->vc.chan);
1790 ret = k3_ringacc_ring_cfg(rflow->fd_ring, &ring_cfg);
1792 ring_cfg.size = K3_UDMA_DEFAULT_RING_SIZE;
1793 ret |= k3_ringacc_ring_cfg(rflow->r_ring, &ring_cfg);
1801 k3_ringacc_ring_free(rflow->r_ring);
1802 rflow->r_ring = NULL;
1803 k3_ringacc_ring_free(rflow->fd_ring);
1804 rflow->fd_ring = NULL;
1813 #define TISCI_BCDMA_BCHAN_VALID_PARAMS ( \
1814 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1815 TI_SCI_MSG_VALUE_RM_UDMAP_CH_EXTENDED_CH_TYPE_VALID)
1817 #define TISCI_BCDMA_TCHAN_VALID_PARAMS ( \
1818 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1819 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID)
1821 #define TISCI_BCDMA_RCHAN_VALID_PARAMS ( \
1822 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID)
1824 #define TISCI_UDMA_TCHAN_VALID_PARAMS ( \
1825 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1826 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_EINFO_VALID | \
1827 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_FILT_PSWORDS_VALID | \
1828 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1829 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_SUPR_TDPKT_VALID | \
1830 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1831 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1832 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1834 #define TISCI_UDMA_RCHAN_VALID_PARAMS ( \
1835 TI_SCI_MSG_VALUE_RM_UDMAP_CH_PAUSE_ON_ERR_VALID | \
1836 TI_SCI_MSG_VALUE_RM_UDMAP_CH_FETCH_SIZE_VALID | \
1837 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CQ_QNUM_VALID | \
1838 TI_SCI_MSG_VALUE_RM_UDMAP_CH_CHAN_TYPE_VALID | \
1839 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_SHORT_VALID | \
1840 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_IGNORE_LONG_VALID | \
1841 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_START_VALID | \
1842 TI_SCI_MSG_VALUE_RM_UDMAP_CH_RX_FLOWID_CNT_VALID | \
1843 TI_SCI_MSG_VALUE_RM_UDMAP_CH_ATYPE_VALID)
1845 static int udma_tisci_m2m_channel_config(struct udma_chan *uc)
1847 struct udma_dev *ud = uc->ud;
1848 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1849 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1850 struct udma_tchan *tchan = uc->tchan;
1851 struct udma_rchan *rchan = uc->rchan;
1856 /* Non synchronized - mem to mem type of transfer */
1857 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1858 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1859 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
1861 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
1862 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, tchan->id);
1864 burst_size = ud->match_data->burst_size[tpl];
1867 req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
1868 req_tx.nav_id = tisci_rm->tisci_dev_id;
1869 req_tx.index = tchan->id;
1870 req_tx.tx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1871 req_tx.tx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1872 req_tx.txcq_qnum = tc_ring;
1873 req_tx.tx_atype = ud->atype;
1875 req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1876 req_tx.tx_burst_size = burst_size;
1879 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1881 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1885 req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
1886 req_rx.nav_id = tisci_rm->tisci_dev_id;
1887 req_rx.index = rchan->id;
1888 req_rx.rx_fetch_size = sizeof(struct cppi5_desc_hdr_t) >> 2;
1889 req_rx.rxcq_qnum = tc_ring;
1890 req_rx.rx_chan_type = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_BCOPY_PBRR;
1891 req_rx.rx_atype = ud->atype;
1893 req_rx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1894 req_rx.rx_burst_size = burst_size;
1897 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
1899 dev_err(ud->dev, "rchan%d alloc failed %d\n", rchan->id, ret);
1904 static int bcdma_tisci_m2m_channel_config(struct udma_chan *uc)
1906 struct udma_dev *ud = uc->ud;
1907 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1908 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1909 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1910 struct udma_bchan *bchan = uc->bchan;
1915 if (ud->match_data->flags & UDMA_FLAG_BURST_SIZE) {
1916 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, bchan->id);
1918 burst_size = ud->match_data->burst_size[tpl];
1921 req_tx.valid_params = TISCI_BCDMA_BCHAN_VALID_PARAMS;
1922 req_tx.nav_id = tisci_rm->tisci_dev_id;
1923 req_tx.extended_ch_type = TI_SCI_RM_BCDMA_EXTENDED_CH_TYPE_BCHAN;
1924 req_tx.index = bchan->id;
1926 req_tx.valid_params |= TI_SCI_MSG_VALUE_RM_UDMAP_CH_BURST_SIZE_VALID;
1927 req_tx.tx_burst_size = burst_size;
1930 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1932 dev_err(ud->dev, "bchan%d cfg failed %d\n", bchan->id, ret);
1937 static int udma_tisci_tx_channel_config(struct udma_chan *uc)
1939 struct udma_dev *ud = uc->ud;
1940 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1941 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1942 struct udma_tchan *tchan = uc->tchan;
1943 int tc_ring = k3_ringacc_get_ring_id(tchan->tc_ring);
1944 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1945 u32 mode, fetch_size;
1948 if (uc->config.pkt_mode) {
1949 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
1950 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
1951 uc->config.psd_size, 0);
1953 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
1954 fetch_size = sizeof(struct cppi5_desc_hdr_t);
1957 req_tx.valid_params = TISCI_UDMA_TCHAN_VALID_PARAMS;
1958 req_tx.nav_id = tisci_rm->tisci_dev_id;
1959 req_tx.index = tchan->id;
1960 req_tx.tx_chan_type = mode;
1961 req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1962 req_tx.tx_fetch_size = fetch_size >> 2;
1963 req_tx.txcq_qnum = tc_ring;
1964 req_tx.tx_atype = uc->config.atype;
1965 if (uc->config.ep_type == PSIL_EP_PDMA_XY &&
1966 ud->match_data->flags & UDMA_FLAG_TDTYPE) {
1967 /* wait for peer to complete the teardown for PDMAs */
1968 req_tx.valid_params |=
1969 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
1970 req_tx.tx_tdtype = 1;
1973 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
1975 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
1980 static int bcdma_tisci_tx_channel_config(struct udma_chan *uc)
1982 struct udma_dev *ud = uc->ud;
1983 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
1984 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
1985 struct udma_tchan *tchan = uc->tchan;
1986 struct ti_sci_msg_rm_udmap_tx_ch_cfg req_tx = { 0 };
1989 req_tx.valid_params = TISCI_BCDMA_TCHAN_VALID_PARAMS;
1990 req_tx.nav_id = tisci_rm->tisci_dev_id;
1991 req_tx.index = tchan->id;
1992 req_tx.tx_supr_tdpkt = uc->config.notdpkt;
1993 if (ud->match_data->flags & UDMA_FLAG_TDTYPE) {
1994 /* wait for peer to complete the teardown for PDMAs */
1995 req_tx.valid_params |=
1996 TI_SCI_MSG_VALUE_RM_UDMAP_CH_TX_TDTYPE_VALID;
1997 req_tx.tx_tdtype = 1;
2000 ret = tisci_ops->tx_ch_cfg(tisci_rm->tisci, &req_tx);
2002 dev_err(ud->dev, "tchan%d cfg failed %d\n", tchan->id, ret);
2007 #define pktdma_tisci_tx_channel_config bcdma_tisci_tx_channel_config
2009 static int udma_tisci_rx_channel_config(struct udma_chan *uc)
2011 struct udma_dev *ud = uc->ud;
2012 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2013 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2014 struct udma_rchan *rchan = uc->rchan;
2015 int fd_ring = k3_ringacc_get_ring_id(uc->rflow->fd_ring);
2016 int rx_ring = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2017 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2018 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2019 u32 mode, fetch_size;
2022 if (uc->config.pkt_mode) {
2023 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_PKT_PBRR;
2024 fetch_size = cppi5_hdesc_calc_size(uc->config.needs_epib,
2025 uc->config.psd_size, 0);
2027 mode = TI_SCI_RM_UDMAP_CHAN_TYPE_3RDP_PBRR;
2028 fetch_size = sizeof(struct cppi5_desc_hdr_t);
2031 req_rx.valid_params = TISCI_UDMA_RCHAN_VALID_PARAMS;
2032 req_rx.nav_id = tisci_rm->tisci_dev_id;
2033 req_rx.index = rchan->id;
2034 req_rx.rx_fetch_size = fetch_size >> 2;
2035 req_rx.rxcq_qnum = rx_ring;
2036 req_rx.rx_chan_type = mode;
2037 req_rx.rx_atype = uc->config.atype;
2039 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2041 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
2045 flow_req.valid_params =
2046 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2047 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2048 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID |
2049 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DESC_TYPE_VALID |
2050 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_QNUM_VALID |
2051 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_HI_SEL_VALID |
2052 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_SRC_TAG_LO_SEL_VALID |
2053 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_HI_SEL_VALID |
2054 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_DEST_TAG_LO_SEL_VALID |
2055 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ0_SZ0_QNUM_VALID |
2056 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ1_QNUM_VALID |
2057 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ2_QNUM_VALID |
2058 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_FDQ3_QNUM_VALID;
2060 flow_req.nav_id = tisci_rm->tisci_dev_id;
2061 flow_req.flow_index = rchan->id;
2063 if (uc->config.needs_epib)
2064 flow_req.rx_einfo_present = 1;
2066 flow_req.rx_einfo_present = 0;
2067 if (uc->config.psd_size)
2068 flow_req.rx_psinfo_present = 1;
2070 flow_req.rx_psinfo_present = 0;
2071 flow_req.rx_error_handling = 1;
2072 flow_req.rx_dest_qnum = rx_ring;
2073 flow_req.rx_src_tag_hi_sel = UDMA_RFLOW_SRCTAG_NONE;
2074 flow_req.rx_src_tag_lo_sel = UDMA_RFLOW_SRCTAG_SRC_TAG;
2075 flow_req.rx_dest_tag_hi_sel = UDMA_RFLOW_DSTTAG_DST_TAG_HI;
2076 flow_req.rx_dest_tag_lo_sel = UDMA_RFLOW_DSTTAG_DST_TAG_LO;
2077 flow_req.rx_fdq0_sz0_qnum = fd_ring;
2078 flow_req.rx_fdq1_qnum = fd_ring;
2079 flow_req.rx_fdq2_qnum = fd_ring;
2080 flow_req.rx_fdq3_qnum = fd_ring;
2082 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2085 dev_err(ud->dev, "flow%d config failed: %d\n", rchan->id, ret);
2090 static int bcdma_tisci_rx_channel_config(struct udma_chan *uc)
2092 struct udma_dev *ud = uc->ud;
2093 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2094 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2095 struct udma_rchan *rchan = uc->rchan;
2096 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2099 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2100 req_rx.nav_id = tisci_rm->tisci_dev_id;
2101 req_rx.index = rchan->id;
2103 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2105 dev_err(ud->dev, "rchan%d cfg failed %d\n", rchan->id, ret);
2110 static int pktdma_tisci_rx_channel_config(struct udma_chan *uc)
2112 struct udma_dev *ud = uc->ud;
2113 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
2114 const struct ti_sci_rm_udmap_ops *tisci_ops = tisci_rm->tisci_udmap_ops;
2115 struct ti_sci_msg_rm_udmap_rx_ch_cfg req_rx = { 0 };
2116 struct ti_sci_msg_rm_udmap_flow_cfg flow_req = { 0 };
2119 req_rx.valid_params = TISCI_BCDMA_RCHAN_VALID_PARAMS;
2120 req_rx.nav_id = tisci_rm->tisci_dev_id;
2121 req_rx.index = uc->rchan->id;
2123 ret = tisci_ops->rx_ch_cfg(tisci_rm->tisci, &req_rx);
2125 dev_err(ud->dev, "rchan%d cfg failed %d\n", uc->rchan->id, ret);
2129 flow_req.valid_params =
2130 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_EINFO_PRESENT_VALID |
2131 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_PSINFO_PRESENT_VALID |
2132 TI_SCI_MSG_VALUE_RM_UDMAP_FLOW_ERROR_HANDLING_VALID;
2134 flow_req.nav_id = tisci_rm->tisci_dev_id;
2135 flow_req.flow_index = uc->rflow->id;
2137 if (uc->config.needs_epib)
2138 flow_req.rx_einfo_present = 1;
2140 flow_req.rx_einfo_present = 0;
2141 if (uc->config.psd_size)
2142 flow_req.rx_psinfo_present = 1;
2144 flow_req.rx_psinfo_present = 0;
2145 flow_req.rx_error_handling = 1;
2147 ret = tisci_ops->rx_flow_cfg(tisci_rm->tisci, &flow_req);
2150 dev_err(ud->dev, "flow%d config failed: %d\n", uc->rflow->id,
2156 static int udma_alloc_chan_resources(struct dma_chan *chan)
2158 struct udma_chan *uc = to_udma_chan(chan);
2159 struct udma_dev *ud = to_udma_dev(chan->device);
2160 const struct udma_soc_data *soc_data = ud->soc_data;
2161 struct k3_ring *irq_ring;
2165 uc->dma_dev = ud->dev;
2167 if (uc->config.pkt_mode || uc->config.dir == DMA_MEM_TO_MEM) {
2168 uc->use_dma_pool = true;
2169 /* in case of MEM_TO_MEM we have maximum of two TRs */
2170 if (uc->config.dir == DMA_MEM_TO_MEM) {
2171 uc->config.hdesc_size = cppi5_trdesc_calc_size(
2172 sizeof(struct cppi5_tr_type15_t), 2);
2173 uc->config.pkt_mode = false;
2177 if (uc->use_dma_pool) {
2178 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
2179 uc->config.hdesc_size,
2182 if (!uc->hdesc_pool) {
2183 dev_err(ud->ddev.dev,
2184 "Descriptor pool allocation failed\n");
2185 uc->use_dma_pool = false;
2192 * Make sure that the completion is in a known state:
2193 * No teardown, the channel is idle
2195 reinit_completion(&uc->teardown_completed);
2196 complete_all(&uc->teardown_completed);
2197 uc->state = UDMA_CHAN_IS_IDLE;
2199 switch (uc->config.dir) {
2200 case DMA_MEM_TO_MEM:
2201 /* Non synchronized - mem to mem type of transfer */
2202 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2205 ret = udma_get_chan_pair(uc);
2209 ret = udma_alloc_tx_resources(uc);
2215 ret = udma_alloc_rx_resources(uc);
2217 udma_free_tx_resources(uc);
2221 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2222 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2223 K3_PSIL_DST_THREAD_ID_OFFSET;
2225 irq_ring = uc->tchan->tc_ring;
2226 irq_udma_idx = uc->tchan->id;
2228 ret = udma_tisci_m2m_channel_config(uc);
2230 case DMA_MEM_TO_DEV:
2231 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2232 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2235 ret = udma_alloc_tx_resources(uc);
2239 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2240 uc->config.dst_thread = uc->config.remote_thread_id;
2241 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2243 irq_ring = uc->tchan->tc_ring;
2244 irq_udma_idx = uc->tchan->id;
2246 ret = udma_tisci_tx_channel_config(uc);
2248 case DMA_DEV_TO_MEM:
2249 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2250 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2253 ret = udma_alloc_rx_resources(uc);
2257 uc->config.src_thread = uc->config.remote_thread_id;
2258 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2259 K3_PSIL_DST_THREAD_ID_OFFSET;
2261 irq_ring = uc->rflow->r_ring;
2262 irq_udma_idx = soc_data->oes.udma_rchan + uc->rchan->id;
2264 ret = udma_tisci_rx_channel_config(uc);
2267 /* Can not happen */
2268 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2269 __func__, uc->id, uc->config.dir);
2275 /* check if the channel configuration was successful */
2279 if (udma_is_chan_running(uc)) {
2280 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2281 udma_reset_chan(uc, false);
2282 if (udma_is_chan_running(uc)) {
2283 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2290 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2292 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2293 uc->config.src_thread, uc->config.dst_thread);
2297 uc->psil_paired = true;
2299 uc->irq_num_ring = k3_ringacc_get_ring_irq_num(irq_ring);
2300 if (uc->irq_num_ring <= 0) {
2301 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2302 k3_ringacc_get_ring_id(irq_ring));
2307 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2308 IRQF_TRIGGER_HIGH, uc->name, uc);
2310 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2314 /* Event from UDMA (TR events) only needed for slave TR mode channels */
2315 if (is_slave_direction(uc->config.dir) && !uc->config.pkt_mode) {
2316 uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
2317 if (uc->irq_num_udma <= 0) {
2318 dev_err(ud->dev, "Failed to get udma irq (index: %u)\n",
2320 free_irq(uc->irq_num_ring, uc);
2325 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
2328 dev_err(ud->dev, "chan%d: UDMA irq request failed\n",
2330 free_irq(uc->irq_num_ring, uc);
2334 uc->irq_num_udma = 0;
2337 udma_reset_rings(uc);
2342 uc->irq_num_ring = 0;
2343 uc->irq_num_udma = 0;
2345 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
2346 uc->psil_paired = false;
2348 udma_free_tx_resources(uc);
2349 udma_free_rx_resources(uc);
2351 udma_reset_uchan(uc);
2353 if (uc->use_dma_pool) {
2354 dma_pool_destroy(uc->hdesc_pool);
2355 uc->use_dma_pool = false;
2361 static int bcdma_alloc_chan_resources(struct dma_chan *chan)
2363 struct udma_chan *uc = to_udma_chan(chan);
2364 struct udma_dev *ud = to_udma_dev(chan->device);
2365 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
2366 u32 irq_udma_idx, irq_ring_idx;
2369 /* Only TR mode is supported */
2370 uc->config.pkt_mode = false;
2373 * Make sure that the completion is in a known state:
2374 * No teardown, the channel is idle
2376 reinit_completion(&uc->teardown_completed);
2377 complete_all(&uc->teardown_completed);
2378 uc->state = UDMA_CHAN_IS_IDLE;
2380 switch (uc->config.dir) {
2381 case DMA_MEM_TO_MEM:
2382 /* Non synchronized - mem to mem type of transfer */
2383 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-MEM\n", __func__,
2386 ret = bcdma_alloc_bchan_resources(uc);
2390 irq_ring_idx = uc->bchan->id + oes->bcdma_bchan_ring;
2391 irq_udma_idx = uc->bchan->id + oes->bcdma_bchan_data;
2393 ret = bcdma_tisci_m2m_channel_config(uc);
2395 case DMA_MEM_TO_DEV:
2396 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2397 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2400 ret = udma_alloc_tx_resources(uc);
2402 uc->config.remote_thread_id = -1;
2406 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2407 uc->config.dst_thread = uc->config.remote_thread_id;
2408 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2410 irq_ring_idx = uc->tchan->id + oes->bcdma_tchan_ring;
2411 irq_udma_idx = uc->tchan->id + oes->bcdma_tchan_data;
2413 ret = bcdma_tisci_tx_channel_config(uc);
2415 case DMA_DEV_TO_MEM:
2416 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2417 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2420 ret = udma_alloc_rx_resources(uc);
2422 uc->config.remote_thread_id = -1;
2426 uc->config.src_thread = uc->config.remote_thread_id;
2427 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2428 K3_PSIL_DST_THREAD_ID_OFFSET;
2430 irq_ring_idx = uc->rchan->id + oes->bcdma_rchan_ring;
2431 irq_udma_idx = uc->rchan->id + oes->bcdma_rchan_data;
2433 ret = bcdma_tisci_rx_channel_config(uc);
2436 /* Can not happen */
2437 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2438 __func__, uc->id, uc->config.dir);
2442 /* check if the channel configuration was successful */
2446 if (udma_is_chan_running(uc)) {
2447 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2448 udma_reset_chan(uc, false);
2449 if (udma_is_chan_running(uc)) {
2450 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2456 uc->dma_dev = dmaengine_get_dma_device(chan);
2457 if (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type) {
2458 uc->config.hdesc_size = cppi5_trdesc_calc_size(
2459 sizeof(struct cppi5_tr_type15_t), 2);
2461 uc->hdesc_pool = dma_pool_create(uc->name, ud->ddev.dev,
2462 uc->config.hdesc_size,
2465 if (!uc->hdesc_pool) {
2466 dev_err(ud->ddev.dev,
2467 "Descriptor pool allocation failed\n");
2468 uc->use_dma_pool = false;
2473 uc->use_dma_pool = true;
2474 } else if (uc->config.dir != DMA_MEM_TO_MEM) {
2476 ret = navss_psil_pair(ud, uc->config.src_thread,
2477 uc->config.dst_thread);
2480 "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2481 uc->config.src_thread, uc->config.dst_thread);
2485 uc->psil_paired = true;
2488 uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
2489 if (uc->irq_num_ring <= 0) {
2490 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2496 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2497 IRQF_TRIGGER_HIGH, uc->name, uc);
2499 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2503 /* Event from BCDMA (TR events) only needed for slave channels */
2504 if (is_slave_direction(uc->config.dir)) {
2505 uc->irq_num_udma = msi_get_virq(ud->dev, irq_udma_idx);
2506 if (uc->irq_num_udma <= 0) {
2507 dev_err(ud->dev, "Failed to get bcdma irq (index: %u)\n",
2509 free_irq(uc->irq_num_ring, uc);
2514 ret = request_irq(uc->irq_num_udma, udma_udma_irq_handler, 0,
2517 dev_err(ud->dev, "chan%d: BCDMA irq request failed\n",
2519 free_irq(uc->irq_num_ring, uc);
2523 uc->irq_num_udma = 0;
2526 udma_reset_rings(uc);
2528 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
2529 udma_check_tx_completion);
2533 uc->irq_num_ring = 0;
2534 uc->irq_num_udma = 0;
2536 if (uc->psil_paired)
2537 navss_psil_unpair(ud, uc->config.src_thread,
2538 uc->config.dst_thread);
2539 uc->psil_paired = false;
2541 bcdma_free_bchan_resources(uc);
2542 udma_free_tx_resources(uc);
2543 udma_free_rx_resources(uc);
2545 udma_reset_uchan(uc);
2547 if (uc->use_dma_pool) {
2548 dma_pool_destroy(uc->hdesc_pool);
2549 uc->use_dma_pool = false;
2555 static int bcdma_router_config(struct dma_chan *chan)
2557 struct k3_event_route_data *router_data = chan->route_data;
2558 struct udma_chan *uc = to_udma_chan(chan);
2564 if (uc->config.tr_trigger_type != 1 && uc->config.tr_trigger_type != 2)
2567 trigger_event = uc->ud->soc_data->bcdma_trigger_event_offset;
2568 trigger_event += (uc->bchan->id * 2) + uc->config.tr_trigger_type - 1;
2570 return router_data->set_event(router_data->priv, trigger_event);
2573 static int pktdma_alloc_chan_resources(struct dma_chan *chan)
2575 struct udma_chan *uc = to_udma_chan(chan);
2576 struct udma_dev *ud = to_udma_dev(chan->device);
2577 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
2582 * Make sure that the completion is in a known state:
2583 * No teardown, the channel is idle
2585 reinit_completion(&uc->teardown_completed);
2586 complete_all(&uc->teardown_completed);
2587 uc->state = UDMA_CHAN_IS_IDLE;
2589 switch (uc->config.dir) {
2590 case DMA_MEM_TO_DEV:
2591 /* Slave transfer synchronized - mem to dev (TX) trasnfer */
2592 dev_dbg(uc->ud->dev, "%s: chan%d as MEM-to-DEV\n", __func__,
2595 ret = udma_alloc_tx_resources(uc);
2597 uc->config.remote_thread_id = -1;
2601 uc->config.src_thread = ud->psil_base + uc->tchan->id;
2602 uc->config.dst_thread = uc->config.remote_thread_id;
2603 uc->config.dst_thread |= K3_PSIL_DST_THREAD_ID_OFFSET;
2605 irq_ring_idx = uc->tchan->tflow_id + oes->pktdma_tchan_flow;
2607 ret = pktdma_tisci_tx_channel_config(uc);
2609 case DMA_DEV_TO_MEM:
2610 /* Slave transfer synchronized - dev to mem (RX) trasnfer */
2611 dev_dbg(uc->ud->dev, "%s: chan%d as DEV-to-MEM\n", __func__,
2614 ret = udma_alloc_rx_resources(uc);
2616 uc->config.remote_thread_id = -1;
2620 uc->config.src_thread = uc->config.remote_thread_id;
2621 uc->config.dst_thread = (ud->psil_base + uc->rchan->id) |
2622 K3_PSIL_DST_THREAD_ID_OFFSET;
2624 irq_ring_idx = uc->rflow->id + oes->pktdma_rchan_flow;
2626 ret = pktdma_tisci_rx_channel_config(uc);
2629 /* Can not happen */
2630 dev_err(uc->ud->dev, "%s: chan%d invalid direction (%u)\n",
2631 __func__, uc->id, uc->config.dir);
2635 /* check if the channel configuration was successful */
2639 if (udma_is_chan_running(uc)) {
2640 dev_warn(ud->dev, "chan%d: is running!\n", uc->id);
2641 udma_reset_chan(uc, false);
2642 if (udma_is_chan_running(uc)) {
2643 dev_err(ud->dev, "chan%d: won't stop!\n", uc->id);
2649 uc->dma_dev = dmaengine_get_dma_device(chan);
2650 uc->hdesc_pool = dma_pool_create(uc->name, uc->dma_dev,
2651 uc->config.hdesc_size, ud->desc_align,
2653 if (!uc->hdesc_pool) {
2654 dev_err(ud->ddev.dev,
2655 "Descriptor pool allocation failed\n");
2656 uc->use_dma_pool = false;
2661 uc->use_dma_pool = true;
2664 ret = navss_psil_pair(ud, uc->config.src_thread, uc->config.dst_thread);
2666 dev_err(ud->dev, "PSI-L pairing failed: 0x%04x -> 0x%04x\n",
2667 uc->config.src_thread, uc->config.dst_thread);
2671 uc->psil_paired = true;
2673 uc->irq_num_ring = msi_get_virq(ud->dev, irq_ring_idx);
2674 if (uc->irq_num_ring <= 0) {
2675 dev_err(ud->dev, "Failed to get ring irq (index: %u)\n",
2681 ret = request_irq(uc->irq_num_ring, udma_ring_irq_handler,
2682 IRQF_TRIGGER_HIGH, uc->name, uc);
2684 dev_err(ud->dev, "chan%d: ring irq request failed\n", uc->id);
2688 uc->irq_num_udma = 0;
2690 udma_reset_rings(uc);
2692 INIT_DELAYED_WORK_ONSTACK(&uc->tx_drain.work,
2693 udma_check_tx_completion);
2697 "chan%d: tchan%d, tflow%d, Remote thread: 0x%04x\n",
2698 uc->id, uc->tchan->id, uc->tchan->tflow_id,
2699 uc->config.remote_thread_id);
2702 "chan%d: rchan%d, rflow%d, Remote thread: 0x%04x\n",
2703 uc->id, uc->rchan->id, uc->rflow->id,
2704 uc->config.remote_thread_id);
2708 uc->irq_num_ring = 0;
2710 navss_psil_unpair(ud, uc->config.src_thread, uc->config.dst_thread);
2711 uc->psil_paired = false;
2713 udma_free_tx_resources(uc);
2714 udma_free_rx_resources(uc);
2716 udma_reset_uchan(uc);
2718 dma_pool_destroy(uc->hdesc_pool);
2719 uc->use_dma_pool = false;
2724 static int udma_slave_config(struct dma_chan *chan,
2725 struct dma_slave_config *cfg)
2727 struct udma_chan *uc = to_udma_chan(chan);
2729 memcpy(&uc->cfg, cfg, sizeof(uc->cfg));
2734 static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
2735 size_t tr_size, int tr_count,
2736 enum dma_transfer_direction dir)
2738 struct udma_hwdesc *hwdesc;
2739 struct cppi5_desc_hdr_t *tr_desc;
2740 struct udma_desc *d;
2741 u32 reload_count = 0;
2751 dev_err(uc->ud->dev, "Unsupported TR size of %zu\n", tr_size);
2755 /* We have only one descriptor containing multiple TRs */
2756 d = kzalloc(sizeof(*d) + sizeof(d->hwdesc[0]), GFP_NOWAIT);
2760 d->sglen = tr_count;
2762 d->hwdesc_count = 1;
2763 hwdesc = &d->hwdesc[0];
2765 /* Allocate memory for DMA ring descriptor */
2766 if (uc->use_dma_pool) {
2767 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
2768 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
2770 &hwdesc->cppi5_desc_paddr);
2772 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size,
2774 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
2775 uc->ud->desc_align);
2776 hwdesc->cppi5_desc_vaddr = dma_alloc_coherent(uc->ud->dev,
2777 hwdesc->cppi5_desc_size,
2778 &hwdesc->cppi5_desc_paddr,
2782 if (!hwdesc->cppi5_desc_vaddr) {
2787 /* Start of the TR req records */
2788 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
2789 /* Start address of the TR response array */
2790 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size * tr_count;
2792 tr_desc = hwdesc->cppi5_desc_vaddr;
2795 reload_count = CPPI5_INFO0_TRDESC_RLDCNT_INFINITE;
2797 if (dir == DMA_DEV_TO_MEM)
2798 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
2800 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
2802 cppi5_trdesc_init(tr_desc, tr_count, tr_size, 0, reload_count);
2803 cppi5_desc_set_pktids(tr_desc, uc->id,
2804 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
2805 cppi5_desc_set_retpolicy(tr_desc, 0, ring_id);
2811 * udma_get_tr_counters - calculate TR counters for a given length
2812 * @len: Length of the trasnfer
2813 * @align_to: Preferred alignment
2814 * @tr0_cnt0: First TR icnt0
2815 * @tr0_cnt1: First TR icnt1
2816 * @tr1_cnt0: Second (if used) TR icnt0
2818 * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
2819 * For len >= SZ_64K two TRs are used in a simple way:
2820 * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
2821 * Second TR: the remaining length (tr1_cnt0)
2823 * Returns the number of TRs the length needs (1 or 2)
2824 * -EINVAL if the length can not be supported
2826 static int udma_get_tr_counters(size_t len, unsigned long align_to,
2827 u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0)
2840 *tr0_cnt0 = SZ_64K - BIT(align_to);
2841 if (len / *tr0_cnt0 >= SZ_64K) {
2849 *tr0_cnt1 = len / *tr0_cnt0;
2850 *tr1_cnt0 = len % *tr0_cnt0;
2855 static struct udma_desc *
2856 udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
2857 unsigned int sglen, enum dma_transfer_direction dir,
2858 unsigned long tx_flags, void *context)
2860 struct scatterlist *sgent;
2861 struct udma_desc *d;
2862 struct cppi5_tr_type1_t *tr_req = NULL;
2863 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
2870 /* estimate the number of TRs we will need */
2871 for_each_sg(sgl, sgent, sglen, i) {
2872 if (sg_dma_len(sgent) < SZ_64K)
2878 /* Now allocate and setup the descriptor. */
2879 tr_size = sizeof(struct cppi5_tr_type1_t);
2880 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
2886 if (uc->ud->match_data->type == DMA_TYPE_UDMA)
2889 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
2891 tr_req = d->hwdesc[0].tr_req_base;
2892 for_each_sg(sgl, sgent, sglen, i) {
2893 dma_addr_t sg_addr = sg_dma_address(sgent);
2895 num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr),
2896 &tr0_cnt0, &tr0_cnt1, &tr1_cnt0);
2898 dev_err(uc->ud->dev, "size %u is not supported\n",
2900 udma_free_hwdesc(uc, d);
2905 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
2906 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2907 cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
2910 tr_req[tr_idx].addr = sg_addr;
2911 tr_req[tr_idx].icnt0 = tr0_cnt0;
2912 tr_req[tr_idx].icnt1 = tr0_cnt1;
2913 tr_req[tr_idx].dim1 = tr0_cnt0;
2917 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
2919 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
2920 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
2921 CPPI5_TR_CSF_SUPR_EVT);
2923 tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0;
2924 tr_req[tr_idx].icnt0 = tr1_cnt0;
2925 tr_req[tr_idx].icnt1 = 1;
2926 tr_req[tr_idx].dim1 = tr1_cnt0;
2930 d->residue += sg_dma_len(sgent);
2933 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
2934 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
2939 static struct udma_desc *
2940 udma_prep_slave_sg_triggered_tr(struct udma_chan *uc, struct scatterlist *sgl,
2942 enum dma_transfer_direction dir,
2943 unsigned long tx_flags, void *context)
2945 struct scatterlist *sgent;
2946 struct cppi5_tr_type15_t *tr_req = NULL;
2947 enum dma_slave_buswidth dev_width;
2948 u16 tr_cnt0, tr_cnt1;
2949 dma_addr_t dev_addr;
2950 struct udma_desc *d;
2952 size_t tr_size, sg_len;
2955 u32 burst, trigger_size, port_window;
2958 if (dir == DMA_DEV_TO_MEM) {
2959 dev_addr = uc->cfg.src_addr;
2960 dev_width = uc->cfg.src_addr_width;
2961 burst = uc->cfg.src_maxburst;
2962 port_window = uc->cfg.src_port_window_size;
2963 } else if (dir == DMA_MEM_TO_DEV) {
2964 dev_addr = uc->cfg.dst_addr;
2965 dev_width = uc->cfg.dst_addr_width;
2966 burst = uc->cfg.dst_maxburst;
2967 port_window = uc->cfg.dst_port_window_size;
2969 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
2977 if (port_window != burst) {
2978 dev_err(uc->ud->dev,
2979 "The burst must be equal to port_window\n");
2983 tr_cnt0 = dev_width * port_window;
2986 tr_cnt0 = dev_width;
2989 trigger_size = tr_cnt0 * tr_cnt1;
2991 /* estimate the number of TRs we will need */
2992 for_each_sg(sgl, sgent, sglen, i) {
2993 sg_len = sg_dma_len(sgent);
2995 if (sg_len % trigger_size) {
2996 dev_err(uc->ud->dev,
2997 "Not aligned SG entry (%zu for %u)\n", sg_len,
3002 if (sg_len / trigger_size < SZ_64K)
3008 /* Now allocate and setup the descriptor. */
3009 tr_size = sizeof(struct cppi5_tr_type15_t);
3010 d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
3016 if (uc->ud->match_data->type == DMA_TYPE_UDMA) {
3019 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3023 tr_req = d->hwdesc[0].tr_req_base;
3024 for_each_sg(sgl, sgent, sglen, i) {
3025 u16 tr0_cnt2, tr0_cnt3, tr1_cnt2;
3026 dma_addr_t sg_addr = sg_dma_address(sgent);
3028 sg_len = sg_dma_len(sgent);
3029 num_tr = udma_get_tr_counters(sg_len / trigger_size, 0,
3030 &tr0_cnt2, &tr0_cnt3, &tr1_cnt2);
3032 dev_err(uc->ud->dev, "size %zu is not supported\n",
3034 udma_free_hwdesc(uc, d);
3039 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15, false,
3040 true, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3041 cppi5_tr_csf_set(&tr_req[tr_idx].flags, CPPI5_TR_CSF_SUPR_EVT);
3042 cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
3043 uc->config.tr_trigger_type,
3044 CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC, 0, 0);
3047 if (dir == DMA_DEV_TO_MEM) {
3048 tr_req[tr_idx].addr = dev_addr;
3049 tr_req[tr_idx].icnt0 = tr_cnt0;
3050 tr_req[tr_idx].icnt1 = tr_cnt1;
3051 tr_req[tr_idx].icnt2 = tr0_cnt2;
3052 tr_req[tr_idx].icnt3 = tr0_cnt3;
3053 tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
3055 tr_req[tr_idx].daddr = sg_addr;
3056 tr_req[tr_idx].dicnt0 = tr_cnt0;
3057 tr_req[tr_idx].dicnt1 = tr_cnt1;
3058 tr_req[tr_idx].dicnt2 = tr0_cnt2;
3059 tr_req[tr_idx].dicnt3 = tr0_cnt3;
3060 tr_req[tr_idx].ddim1 = tr_cnt0;
3061 tr_req[tr_idx].ddim2 = trigger_size;
3062 tr_req[tr_idx].ddim3 = trigger_size * tr0_cnt2;
3064 tr_req[tr_idx].addr = sg_addr;
3065 tr_req[tr_idx].icnt0 = tr_cnt0;
3066 tr_req[tr_idx].icnt1 = tr_cnt1;
3067 tr_req[tr_idx].icnt2 = tr0_cnt2;
3068 tr_req[tr_idx].icnt3 = tr0_cnt3;
3069 tr_req[tr_idx].dim1 = tr_cnt0;
3070 tr_req[tr_idx].dim2 = trigger_size;
3071 tr_req[tr_idx].dim3 = trigger_size * tr0_cnt2;
3073 tr_req[tr_idx].daddr = dev_addr;
3074 tr_req[tr_idx].dicnt0 = tr_cnt0;
3075 tr_req[tr_idx].dicnt1 = tr_cnt1;
3076 tr_req[tr_idx].dicnt2 = tr0_cnt2;
3077 tr_req[tr_idx].dicnt3 = tr0_cnt3;
3078 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
3084 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE15,
3086 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3087 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3088 CPPI5_TR_CSF_SUPR_EVT);
3089 cppi5_tr_set_trigger(&tr_req[tr_idx].flags,
3090 uc->config.tr_trigger_type,
3091 CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
3094 sg_addr += trigger_size * tr0_cnt2 * tr0_cnt3;
3095 if (dir == DMA_DEV_TO_MEM) {
3096 tr_req[tr_idx].addr = dev_addr;
3097 tr_req[tr_idx].icnt0 = tr_cnt0;
3098 tr_req[tr_idx].icnt1 = tr_cnt1;
3099 tr_req[tr_idx].icnt2 = tr1_cnt2;
3100 tr_req[tr_idx].icnt3 = 1;
3101 tr_req[tr_idx].dim1 = (-1) * tr_cnt0;
3103 tr_req[tr_idx].daddr = sg_addr;
3104 tr_req[tr_idx].dicnt0 = tr_cnt0;
3105 tr_req[tr_idx].dicnt1 = tr_cnt1;
3106 tr_req[tr_idx].dicnt2 = tr1_cnt2;
3107 tr_req[tr_idx].dicnt3 = 1;
3108 tr_req[tr_idx].ddim1 = tr_cnt0;
3109 tr_req[tr_idx].ddim2 = trigger_size;
3111 tr_req[tr_idx].addr = sg_addr;
3112 tr_req[tr_idx].icnt0 = tr_cnt0;
3113 tr_req[tr_idx].icnt1 = tr_cnt1;
3114 tr_req[tr_idx].icnt2 = tr1_cnt2;
3115 tr_req[tr_idx].icnt3 = 1;
3116 tr_req[tr_idx].dim1 = tr_cnt0;
3117 tr_req[tr_idx].dim2 = trigger_size;
3119 tr_req[tr_idx].daddr = dev_addr;
3120 tr_req[tr_idx].dicnt0 = tr_cnt0;
3121 tr_req[tr_idx].dicnt1 = tr_cnt1;
3122 tr_req[tr_idx].dicnt2 = tr1_cnt2;
3123 tr_req[tr_idx].dicnt3 = 1;
3124 tr_req[tr_idx].ddim1 = (-1) * tr_cnt0;
3129 d->residue += sg_len;
3132 cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
3133 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
3138 static int udma_configure_statictr(struct udma_chan *uc, struct udma_desc *d,
3139 enum dma_slave_buswidth dev_width,
3142 if (uc->config.ep_type != PSIL_EP_PDMA_XY)
3145 /* Bus width translates to the element size (ES) */
3146 switch (dev_width) {
3147 case DMA_SLAVE_BUSWIDTH_1_BYTE:
3148 d->static_tr.elsize = 0;
3150 case DMA_SLAVE_BUSWIDTH_2_BYTES:
3151 d->static_tr.elsize = 1;
3153 case DMA_SLAVE_BUSWIDTH_3_BYTES:
3154 d->static_tr.elsize = 2;
3156 case DMA_SLAVE_BUSWIDTH_4_BYTES:
3157 d->static_tr.elsize = 3;
3159 case DMA_SLAVE_BUSWIDTH_8_BYTES:
3160 d->static_tr.elsize = 4;
3162 default: /* not reached */
3166 d->static_tr.elcnt = elcnt;
3169 * PDMA must to close the packet when the channel is in packet mode.
3170 * For TR mode when the channel is not cyclic we also need PDMA to close
3171 * the packet otherwise the transfer will stall because PDMA holds on
3172 * the data it has received from the peripheral.
3174 if (uc->config.pkt_mode || !uc->cyclic) {
3175 unsigned int div = dev_width * elcnt;
3178 d->static_tr.bstcnt = d->residue / d->sglen / div;
3180 d->static_tr.bstcnt = d->residue / div;
3182 if (uc->config.dir == DMA_DEV_TO_MEM &&
3183 d->static_tr.bstcnt > uc->ud->match_data->statictr_z_mask)
3186 d->static_tr.bstcnt = 0;
3192 static struct udma_desc *
3193 udma_prep_slave_sg_pkt(struct udma_chan *uc, struct scatterlist *sgl,
3194 unsigned int sglen, enum dma_transfer_direction dir,
3195 unsigned long tx_flags, void *context)
3197 struct scatterlist *sgent;
3198 struct cppi5_host_desc_t *h_desc = NULL;
3199 struct udma_desc *d;
3204 d = kzalloc(struct_size(d, hwdesc, sglen), GFP_NOWAIT);
3209 d->hwdesc_count = sglen;
3211 if (dir == DMA_DEV_TO_MEM)
3212 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
3214 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
3216 if (uc->ud->match_data->type == DMA_TYPE_UDMA)
3219 asel = (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3221 for_each_sg(sgl, sgent, sglen, i) {
3222 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
3223 dma_addr_t sg_addr = sg_dma_address(sgent);
3224 struct cppi5_host_desc_t *desc;
3225 size_t sg_len = sg_dma_len(sgent);
3227 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
3229 &hwdesc->cppi5_desc_paddr);
3230 if (!hwdesc->cppi5_desc_vaddr) {
3231 dev_err(uc->ud->dev,
3232 "descriptor%d allocation failed\n", i);
3234 udma_free_hwdesc(uc, d);
3239 d->residue += sg_len;
3240 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
3241 desc = hwdesc->cppi5_desc_vaddr;
3244 cppi5_hdesc_init(desc, 0, 0);
3245 /* Flow and Packed ID */
3246 cppi5_desc_set_pktids(&desc->hdr, uc->id,
3247 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3248 cppi5_desc_set_retpolicy(&desc->hdr, 0, ring_id);
3250 cppi5_hdesc_reset_hbdesc(desc);
3251 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0xffff);
3254 /* attach the sg buffer to the descriptor */
3256 cppi5_hdesc_attach_buf(desc, sg_addr, sg_len, sg_addr, sg_len);
3258 /* Attach link as host buffer descriptor */
3260 cppi5_hdesc_link_hbdesc(h_desc,
3261 hwdesc->cppi5_desc_paddr | asel);
3263 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA ||
3264 dir == DMA_MEM_TO_DEV)
3268 if (d->residue >= SZ_4M) {
3269 dev_err(uc->ud->dev,
3270 "%s: Transfer size %u is over the supported 4M range\n",
3271 __func__, d->residue);
3272 udma_free_hwdesc(uc, d);
3277 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3278 cppi5_hdesc_set_pktlen(h_desc, d->residue);
3283 static int udma_attach_metadata(struct dma_async_tx_descriptor *desc,
3284 void *data, size_t len)
3286 struct udma_desc *d = to_udma_desc(desc);
3287 struct udma_chan *uc = to_udma_chan(desc->chan);
3288 struct cppi5_host_desc_t *h_desc;
3292 if (!uc->config.pkt_mode || !uc->config.metadata_size)
3295 if (!data || len > uc->config.metadata_size)
3298 if (uc->config.needs_epib && len < CPPI5_INFO0_HDESC_EPIB_SIZE)
3301 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3302 if (d->dir == DMA_MEM_TO_DEV)
3303 memcpy(h_desc->epib, data, len);
3305 if (uc->config.needs_epib)
3306 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
3309 d->metadata_size = len;
3310 if (uc->config.needs_epib)
3311 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
3313 cppi5_hdesc_update_flags(h_desc, flags);
3314 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
3319 static void *udma_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
3320 size_t *payload_len, size_t *max_len)
3322 struct udma_desc *d = to_udma_desc(desc);
3323 struct udma_chan *uc = to_udma_chan(desc->chan);
3324 struct cppi5_host_desc_t *h_desc;
3326 if (!uc->config.pkt_mode || !uc->config.metadata_size)
3327 return ERR_PTR(-ENOTSUPP);
3329 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3331 *max_len = uc->config.metadata_size;
3333 *payload_len = cppi5_hdesc_epib_present(&h_desc->hdr) ?
3334 CPPI5_INFO0_HDESC_EPIB_SIZE : 0;
3335 *payload_len += cppi5_hdesc_get_psdata_size(h_desc);
3337 return h_desc->epib;
3340 static int udma_set_metadata_len(struct dma_async_tx_descriptor *desc,
3343 struct udma_desc *d = to_udma_desc(desc);
3344 struct udma_chan *uc = to_udma_chan(desc->chan);
3345 struct cppi5_host_desc_t *h_desc;
3346 u32 psd_size = payload_len;
3349 if (!uc->config.pkt_mode || !uc->config.metadata_size)
3352 if (payload_len > uc->config.metadata_size)
3355 if (uc->config.needs_epib && payload_len < CPPI5_INFO0_HDESC_EPIB_SIZE)
3358 h_desc = d->hwdesc[0].cppi5_desc_vaddr;
3360 if (uc->config.needs_epib) {
3361 psd_size -= CPPI5_INFO0_HDESC_EPIB_SIZE;
3362 flags |= CPPI5_INFO0_HDESC_EPIB_PRESENT;
3365 cppi5_hdesc_update_flags(h_desc, flags);
3366 cppi5_hdesc_update_psdata_size(h_desc, psd_size);
3371 static struct dma_descriptor_metadata_ops metadata_ops = {
3372 .attach = udma_attach_metadata,
3373 .get_ptr = udma_get_metadata_ptr,
3374 .set_len = udma_set_metadata_len,
3377 static struct dma_async_tx_descriptor *
3378 udma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
3379 unsigned int sglen, enum dma_transfer_direction dir,
3380 unsigned long tx_flags, void *context)
3382 struct udma_chan *uc = to_udma_chan(chan);
3383 enum dma_slave_buswidth dev_width;
3384 struct udma_desc *d;
3387 if (dir != uc->config.dir &&
3388 (uc->config.dir == DMA_MEM_TO_MEM && !uc->config.tr_trigger_type)) {
3389 dev_err(chan->device->dev,
3390 "%s: chan%d is for %s, not supporting %s\n",
3392 dmaengine_get_direction_text(uc->config.dir),
3393 dmaengine_get_direction_text(dir));
3397 if (dir == DMA_DEV_TO_MEM) {
3398 dev_width = uc->cfg.src_addr_width;
3399 burst = uc->cfg.src_maxburst;
3400 } else if (dir == DMA_MEM_TO_DEV) {
3401 dev_width = uc->cfg.dst_addr_width;
3402 burst = uc->cfg.dst_maxburst;
3404 dev_err(chan->device->dev, "%s: bad direction?\n", __func__);
3411 if (uc->config.pkt_mode)
3412 d = udma_prep_slave_sg_pkt(uc, sgl, sglen, dir, tx_flags,
3414 else if (is_slave_direction(uc->config.dir))
3415 d = udma_prep_slave_sg_tr(uc, sgl, sglen, dir, tx_flags,
3418 d = udma_prep_slave_sg_triggered_tr(uc, sgl, sglen, dir,
3428 /* static TR for remote PDMA */
3429 if (udma_configure_statictr(uc, d, dev_width, burst)) {
3430 dev_err(uc->ud->dev,
3431 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
3432 __func__, d->static_tr.bstcnt);
3434 udma_free_hwdesc(uc, d);
3439 if (uc->config.metadata_size)
3440 d->vd.tx.metadata_ops = &metadata_ops;
3442 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
3445 static struct udma_desc *
3446 udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
3447 size_t buf_len, size_t period_len,
3448 enum dma_transfer_direction dir, unsigned long flags)
3450 struct udma_desc *d;
3451 size_t tr_size, period_addr;
3452 struct cppi5_tr_type1_t *tr_req;
3453 unsigned int periods = buf_len / period_len;
3454 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
3458 num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
3459 &tr0_cnt1, &tr1_cnt0);
3461 dev_err(uc->ud->dev, "size %zu is not supported\n",
3466 /* Now allocate and setup the descriptor. */
3467 tr_size = sizeof(struct cppi5_tr_type1_t);
3468 d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir);
3472 tr_req = d->hwdesc[0].tr_req_base;
3473 if (uc->ud->match_data->type == DMA_TYPE_UDMA)
3474 period_addr = buf_addr;
3476 period_addr = buf_addr |
3477 ((u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT);
3479 for (i = 0; i < periods; i++) {
3480 int tr_idx = i * num_tr;
3482 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
3483 false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3485 tr_req[tr_idx].addr = period_addr;
3486 tr_req[tr_idx].icnt0 = tr0_cnt0;
3487 tr_req[tr_idx].icnt1 = tr0_cnt1;
3488 tr_req[tr_idx].dim1 = tr0_cnt0;
3491 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3492 CPPI5_TR_CSF_SUPR_EVT);
3495 cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
3497 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3499 tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0;
3500 tr_req[tr_idx].icnt0 = tr1_cnt0;
3501 tr_req[tr_idx].icnt1 = 1;
3502 tr_req[tr_idx].dim1 = tr1_cnt0;
3505 if (!(flags & DMA_PREP_INTERRUPT))
3506 cppi5_tr_csf_set(&tr_req[tr_idx].flags,
3507 CPPI5_TR_CSF_SUPR_EVT);
3509 period_addr += period_len;
3515 static struct udma_desc *
3516 udma_prep_dma_cyclic_pkt(struct udma_chan *uc, dma_addr_t buf_addr,
3517 size_t buf_len, size_t period_len,
3518 enum dma_transfer_direction dir, unsigned long flags)
3520 struct udma_desc *d;
3523 int periods = buf_len / period_len;
3525 if (periods > (K3_UDMA_DEFAULT_RING_SIZE - 1))
3528 if (period_len >= SZ_4M)
3531 d = kzalloc(struct_size(d, hwdesc, periods), GFP_NOWAIT);
3535 d->hwdesc_count = periods;
3537 /* TODO: re-check this... */
3538 if (dir == DMA_DEV_TO_MEM)
3539 ring_id = k3_ringacc_get_ring_id(uc->rflow->r_ring);
3541 ring_id = k3_ringacc_get_ring_id(uc->tchan->tc_ring);
3543 if (uc->ud->match_data->type != DMA_TYPE_UDMA)
3544 buf_addr |= (u64)uc->config.asel << K3_ADDRESS_ASEL_SHIFT;
3546 for (i = 0; i < periods; i++) {
3547 struct udma_hwdesc *hwdesc = &d->hwdesc[i];
3548 dma_addr_t period_addr = buf_addr + (period_len * i);
3549 struct cppi5_host_desc_t *h_desc;
3551 hwdesc->cppi5_desc_vaddr = dma_pool_zalloc(uc->hdesc_pool,
3553 &hwdesc->cppi5_desc_paddr);
3554 if (!hwdesc->cppi5_desc_vaddr) {
3555 dev_err(uc->ud->dev,
3556 "descriptor%d allocation failed\n", i);
3558 udma_free_hwdesc(uc, d);
3563 hwdesc->cppi5_desc_size = uc->config.hdesc_size;
3564 h_desc = hwdesc->cppi5_desc_vaddr;
3566 cppi5_hdesc_init(h_desc, 0, 0);
3567 cppi5_hdesc_set_pktlen(h_desc, period_len);
3569 /* Flow and Packed ID */
3570 cppi5_desc_set_pktids(&h_desc->hdr, uc->id,
3571 CPPI5_INFO1_DESC_FLOWID_DEFAULT);
3572 cppi5_desc_set_retpolicy(&h_desc->hdr, 0, ring_id);
3574 /* attach each period to a new descriptor */
3575 cppi5_hdesc_attach_buf(h_desc,
3576 period_addr, period_len,
3577 period_addr, period_len);
3583 static struct dma_async_tx_descriptor *
3584 udma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
3585 size_t period_len, enum dma_transfer_direction dir,
3586 unsigned long flags)
3588 struct udma_chan *uc = to_udma_chan(chan);
3589 enum dma_slave_buswidth dev_width;
3590 struct udma_desc *d;
3593 if (dir != uc->config.dir) {
3594 dev_err(chan->device->dev,
3595 "%s: chan%d is for %s, not supporting %s\n",
3597 dmaengine_get_direction_text(uc->config.dir),
3598 dmaengine_get_direction_text(dir));
3604 if (dir == DMA_DEV_TO_MEM) {
3605 dev_width = uc->cfg.src_addr_width;
3606 burst = uc->cfg.src_maxburst;
3607 } else if (dir == DMA_MEM_TO_DEV) {
3608 dev_width = uc->cfg.dst_addr_width;
3609 burst = uc->cfg.dst_maxburst;
3611 dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
3618 if (uc->config.pkt_mode)
3619 d = udma_prep_dma_cyclic_pkt(uc, buf_addr, buf_len, period_len,
3622 d = udma_prep_dma_cyclic_tr(uc, buf_addr, buf_len, period_len,
3628 d->sglen = buf_len / period_len;
3631 d->residue = buf_len;
3633 /* static TR for remote PDMA */
3634 if (udma_configure_statictr(uc, d, dev_width, burst)) {
3635 dev_err(uc->ud->dev,
3636 "%s: StaticTR Z is limited to maximum 4095 (%u)\n",
3637 __func__, d->static_tr.bstcnt);
3639 udma_free_hwdesc(uc, d);
3644 if (uc->config.metadata_size)
3645 d->vd.tx.metadata_ops = &metadata_ops;
3647 return vchan_tx_prep(&uc->vc, &d->vd, flags);
3650 static struct dma_async_tx_descriptor *
3651 udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
3652 size_t len, unsigned long tx_flags)
3654 struct udma_chan *uc = to_udma_chan(chan);
3655 struct udma_desc *d;
3656 struct cppi5_tr_type15_t *tr_req;
3658 size_t tr_size = sizeof(struct cppi5_tr_type15_t);
3659 u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
3661 if (uc->config.dir != DMA_MEM_TO_MEM) {
3662 dev_err(chan->device->dev,
3663 "%s: chan%d is for %s, not supporting %s\n",
3665 dmaengine_get_direction_text(uc->config.dir),
3666 dmaengine_get_direction_text(DMA_MEM_TO_MEM));
3670 num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0,
3671 &tr0_cnt1, &tr1_cnt0);
3673 dev_err(uc->ud->dev, "size %zu is not supported\n",
3678 d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
3682 d->dir = DMA_MEM_TO_MEM;
3687 if (uc->ud->match_data->type != DMA_TYPE_UDMA) {
3688 src |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
3689 dest |= (u64)uc->ud->asel << K3_ADDRESS_ASEL_SHIFT;
3692 tr_req = d->hwdesc[0].tr_req_base;
3694 cppi5_tr_init(&tr_req[0].flags, CPPI5_TR_TYPE15, false, true,
3695 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3696 cppi5_tr_csf_set(&tr_req[0].flags, CPPI5_TR_CSF_SUPR_EVT);
3698 tr_req[0].addr = src;
3699 tr_req[0].icnt0 = tr0_cnt0;
3700 tr_req[0].icnt1 = tr0_cnt1;
3701 tr_req[0].icnt2 = 1;
3702 tr_req[0].icnt3 = 1;
3703 tr_req[0].dim1 = tr0_cnt0;
3705 tr_req[0].daddr = dest;
3706 tr_req[0].dicnt0 = tr0_cnt0;
3707 tr_req[0].dicnt1 = tr0_cnt1;
3708 tr_req[0].dicnt2 = 1;
3709 tr_req[0].dicnt3 = 1;
3710 tr_req[0].ddim1 = tr0_cnt0;
3713 cppi5_tr_init(&tr_req[1].flags, CPPI5_TR_TYPE15, false, true,
3714 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
3715 cppi5_tr_csf_set(&tr_req[1].flags, CPPI5_TR_CSF_SUPR_EVT);
3717 tr_req[1].addr = src + tr0_cnt1 * tr0_cnt0;
3718 tr_req[1].icnt0 = tr1_cnt0;
3719 tr_req[1].icnt1 = 1;
3720 tr_req[1].icnt2 = 1;
3721 tr_req[1].icnt3 = 1;
3723 tr_req[1].daddr = dest + tr0_cnt1 * tr0_cnt0;
3724 tr_req[1].dicnt0 = tr1_cnt0;
3725 tr_req[1].dicnt1 = 1;
3726 tr_req[1].dicnt2 = 1;
3727 tr_req[1].dicnt3 = 1;
3730 cppi5_tr_csf_set(&tr_req[num_tr - 1].flags,
3731 CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
3733 if (uc->config.metadata_size)
3734 d->vd.tx.metadata_ops = &metadata_ops;
3736 return vchan_tx_prep(&uc->vc, &d->vd, tx_flags);
3739 static void udma_issue_pending(struct dma_chan *chan)
3741 struct udma_chan *uc = to_udma_chan(chan);
3742 unsigned long flags;
3744 spin_lock_irqsave(&uc->vc.lock, flags);
3746 /* If we have something pending and no active descriptor, then */
3747 if (vchan_issue_pending(&uc->vc) && !uc->desc) {
3749 * start a descriptor if the channel is NOT [marked as
3750 * terminating _and_ it is still running (teardown has not
3753 if (!(uc->state == UDMA_CHAN_IS_TERMINATING &&
3754 udma_is_chan_running(uc)))
3758 spin_unlock_irqrestore(&uc->vc.lock, flags);
3761 static enum dma_status udma_tx_status(struct dma_chan *chan,
3762 dma_cookie_t cookie,
3763 struct dma_tx_state *txstate)
3765 struct udma_chan *uc = to_udma_chan(chan);
3766 enum dma_status ret;
3767 unsigned long flags;
3769 spin_lock_irqsave(&uc->vc.lock, flags);
3771 ret = dma_cookie_status(chan, cookie, txstate);
3773 if (!udma_is_chan_running(uc))
3776 if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
3779 if (ret == DMA_COMPLETE || !txstate)
3782 if (uc->desc && uc->desc->vd.tx.cookie == cookie) {
3785 u32 residue = uc->desc->residue;
3788 if (uc->desc->dir == DMA_MEM_TO_DEV) {
3789 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_SBCNT_REG);
3791 if (uc->config.ep_type != PSIL_EP_NATIVE) {
3792 peer_bcnt = udma_tchanrt_read(uc,
3793 UDMA_CHAN_RT_PEER_BCNT_REG);
3795 if (bcnt > peer_bcnt)
3796 delay = bcnt - peer_bcnt;
3798 } else if (uc->desc->dir == DMA_DEV_TO_MEM) {
3799 bcnt = udma_rchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
3801 if (uc->config.ep_type != PSIL_EP_NATIVE) {
3802 peer_bcnt = udma_rchanrt_read(uc,
3803 UDMA_CHAN_RT_PEER_BCNT_REG);
3805 if (peer_bcnt > bcnt)
3806 delay = peer_bcnt - bcnt;
3809 bcnt = udma_tchanrt_read(uc, UDMA_CHAN_RT_BCNT_REG);
3813 if (bcnt && !(bcnt % uc->desc->residue))
3816 residue -= bcnt % uc->desc->residue;
3818 if (!residue && (uc->config.dir == DMA_DEV_TO_MEM || !delay)) {
3823 dma_set_residue(txstate, residue);
3824 dma_set_in_flight_bytes(txstate, delay);
3831 spin_unlock_irqrestore(&uc->vc.lock, flags);
3835 static int udma_pause(struct dma_chan *chan)
3837 struct udma_chan *uc = to_udma_chan(chan);
3839 /* pause the channel */
3840 switch (uc->config.dir) {
3841 case DMA_DEV_TO_MEM:
3842 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3843 UDMA_PEER_RT_EN_PAUSE,
3844 UDMA_PEER_RT_EN_PAUSE);
3846 case DMA_MEM_TO_DEV:
3847 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3848 UDMA_PEER_RT_EN_PAUSE,
3849 UDMA_PEER_RT_EN_PAUSE);
3851 case DMA_MEM_TO_MEM:
3852 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
3853 UDMA_CHAN_RT_CTL_PAUSE,
3854 UDMA_CHAN_RT_CTL_PAUSE);
3863 static int udma_resume(struct dma_chan *chan)
3865 struct udma_chan *uc = to_udma_chan(chan);
3867 /* resume the channel */
3868 switch (uc->config.dir) {
3869 case DMA_DEV_TO_MEM:
3870 udma_rchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3871 UDMA_PEER_RT_EN_PAUSE, 0);
3874 case DMA_MEM_TO_DEV:
3875 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_PEER_RT_EN_REG,
3876 UDMA_PEER_RT_EN_PAUSE, 0);
3878 case DMA_MEM_TO_MEM:
3879 udma_tchanrt_update_bits(uc, UDMA_CHAN_RT_CTL_REG,
3880 UDMA_CHAN_RT_CTL_PAUSE, 0);
3889 static int udma_terminate_all(struct dma_chan *chan)
3891 struct udma_chan *uc = to_udma_chan(chan);
3892 unsigned long flags;
3895 spin_lock_irqsave(&uc->vc.lock, flags);
3897 if (udma_is_chan_running(uc))
3901 uc->terminated_desc = uc->desc;
3903 uc->terminated_desc->terminated = true;
3904 cancel_delayed_work(&uc->tx_drain.work);
3909 vchan_get_all_descriptors(&uc->vc, &head);
3910 spin_unlock_irqrestore(&uc->vc.lock, flags);
3911 vchan_dma_desc_free_list(&uc->vc, &head);
3916 static void udma_synchronize(struct dma_chan *chan)
3918 struct udma_chan *uc = to_udma_chan(chan);
3919 unsigned long timeout = msecs_to_jiffies(1000);
3921 vchan_synchronize(&uc->vc);
3923 if (uc->state == UDMA_CHAN_IS_TERMINATING) {
3924 timeout = wait_for_completion_timeout(&uc->teardown_completed,
3927 dev_warn(uc->ud->dev, "chan%d teardown timeout!\n",
3929 udma_dump_chan_stdata(uc);
3930 udma_reset_chan(uc, true);
3934 udma_reset_chan(uc, false);
3935 if (udma_is_chan_running(uc))
3936 dev_warn(uc->ud->dev, "chan%d refused to stop!\n", uc->id);
3938 cancel_delayed_work_sync(&uc->tx_drain.work);
3939 udma_reset_rings(uc);
3942 static void udma_desc_pre_callback(struct virt_dma_chan *vc,
3943 struct virt_dma_desc *vd,
3944 struct dmaengine_result *result)
3946 struct udma_chan *uc = to_udma_chan(&vc->chan);
3947 struct udma_desc *d;
3952 d = to_udma_desc(&vd->tx);
3954 if (d->metadata_size)
3955 udma_fetch_epib(uc, d);
3957 /* Provide residue information for the client */
3959 void *desc_vaddr = udma_curr_cppi5_desc_vaddr(d, d->desc_idx);
3961 if (cppi5_desc_get_type(desc_vaddr) ==
3962 CPPI5_INFO0_DESC_TYPE_VAL_HOST) {
3963 result->residue = d->residue -
3964 cppi5_hdesc_get_pktlen(desc_vaddr);
3965 if (result->residue)
3966 result->result = DMA_TRANS_ABORTED;
3968 result->result = DMA_TRANS_NOERROR;
3970 result->residue = 0;
3971 result->result = DMA_TRANS_NOERROR;
3977 * This tasklet handles the completion of a DMA descriptor by
3978 * calling its callback and freeing it.
3980 static void udma_vchan_complete(struct tasklet_struct *t)
3982 struct virt_dma_chan *vc = from_tasklet(vc, t, task);
3983 struct virt_dma_desc *vd, *_vd;
3984 struct dmaengine_desc_callback cb;
3987 spin_lock_irq(&vc->lock);
3988 list_splice_tail_init(&vc->desc_completed, &head);
3992 dmaengine_desc_get_callback(&vd->tx, &cb);
3994 memset(&cb, 0, sizeof(cb));
3996 spin_unlock_irq(&vc->lock);
3998 udma_desc_pre_callback(vc, vd, NULL);
3999 dmaengine_desc_callback_invoke(&cb, NULL);
4001 list_for_each_entry_safe(vd, _vd, &head, node) {
4002 struct dmaengine_result result;
4004 dmaengine_desc_get_callback(&vd->tx, &cb);
4006 list_del(&vd->node);
4008 udma_desc_pre_callback(vc, vd, &result);
4009 dmaengine_desc_callback_invoke(&cb, &result);
4011 vchan_vdesc_fini(vd);
4015 static void udma_free_chan_resources(struct dma_chan *chan)
4017 struct udma_chan *uc = to_udma_chan(chan);
4018 struct udma_dev *ud = to_udma_dev(chan->device);
4020 udma_terminate_all(chan);
4021 if (uc->terminated_desc) {
4022 udma_reset_chan(uc, false);
4023 udma_reset_rings(uc);
4026 cancel_delayed_work_sync(&uc->tx_drain.work);
4028 if (uc->irq_num_ring > 0) {
4029 free_irq(uc->irq_num_ring, uc);
4031 uc->irq_num_ring = 0;
4033 if (uc->irq_num_udma > 0) {
4034 free_irq(uc->irq_num_udma, uc);
4036 uc->irq_num_udma = 0;
4039 /* Release PSI-L pairing */
4040 if (uc->psil_paired) {
4041 navss_psil_unpair(ud, uc->config.src_thread,
4042 uc->config.dst_thread);
4043 uc->psil_paired = false;
4046 vchan_free_chan_resources(&uc->vc);
4047 tasklet_kill(&uc->vc.task);
4049 bcdma_free_bchan_resources(uc);
4050 udma_free_tx_resources(uc);
4051 udma_free_rx_resources(uc);
4052 udma_reset_uchan(uc);
4054 if (uc->use_dma_pool) {
4055 dma_pool_destroy(uc->hdesc_pool);
4056 uc->use_dma_pool = false;
4060 static struct platform_driver udma_driver;
4061 static struct platform_driver bcdma_driver;
4062 static struct platform_driver pktdma_driver;
4064 struct udma_filter_param {
4065 int remote_thread_id;
4068 u32 tr_trigger_type;
4071 static bool udma_dma_filter_fn(struct dma_chan *chan, void *param)
4073 struct udma_chan_config *ucc;
4074 struct psil_endpoint_config *ep_config;
4075 struct udma_filter_param *filter_param;
4076 struct udma_chan *uc;
4077 struct udma_dev *ud;
4079 if (chan->device->dev->driver != &udma_driver.driver &&
4080 chan->device->dev->driver != &bcdma_driver.driver &&
4081 chan->device->dev->driver != &pktdma_driver.driver)
4084 uc = to_udma_chan(chan);
4087 filter_param = param;
4089 if (filter_param->atype > 2) {
4090 dev_err(ud->dev, "Invalid channel atype: %u\n",
4091 filter_param->atype);
4095 if (filter_param->asel > 15) {
4096 dev_err(ud->dev, "Invalid channel asel: %u\n",
4097 filter_param->asel);
4101 ucc->remote_thread_id = filter_param->remote_thread_id;
4102 ucc->atype = filter_param->atype;
4103 ucc->asel = filter_param->asel;
4104 ucc->tr_trigger_type = filter_param->tr_trigger_type;
4106 if (ucc->tr_trigger_type) {
4107 ucc->dir = DMA_MEM_TO_MEM;
4108 goto triggered_bchan;
4109 } else if (ucc->remote_thread_id & K3_PSIL_DST_THREAD_ID_OFFSET) {
4110 ucc->dir = DMA_MEM_TO_DEV;
4112 ucc->dir = DMA_DEV_TO_MEM;
4115 ep_config = psil_get_ep_config(ucc->remote_thread_id);
4116 if (IS_ERR(ep_config)) {
4117 dev_err(ud->dev, "No configuration for psi-l thread 0x%04x\n",
4118 ucc->remote_thread_id);
4119 ucc->dir = DMA_MEM_TO_MEM;
4120 ucc->remote_thread_id = -1;
4126 if (ud->match_data->type == DMA_TYPE_BCDMA &&
4127 ep_config->pkt_mode) {
4129 "Only TR mode is supported (psi-l thread 0x%04x)\n",
4130 ucc->remote_thread_id);
4131 ucc->dir = DMA_MEM_TO_MEM;
4132 ucc->remote_thread_id = -1;
4138 ucc->pkt_mode = ep_config->pkt_mode;
4139 ucc->channel_tpl = ep_config->channel_tpl;
4140 ucc->notdpkt = ep_config->notdpkt;
4141 ucc->ep_type = ep_config->ep_type;
4143 if (ud->match_data->type == DMA_TYPE_PKTDMA &&
4144 ep_config->mapped_channel_id >= 0) {
4145 ucc->mapped_channel_id = ep_config->mapped_channel_id;
4146 ucc->default_flow_id = ep_config->default_flow_id;
4148 ucc->mapped_channel_id = -1;
4149 ucc->default_flow_id = -1;
4152 if (ucc->ep_type != PSIL_EP_NATIVE) {
4153 const struct udma_match_data *match_data = ud->match_data;
4155 if (match_data->flags & UDMA_FLAG_PDMA_ACC32)
4156 ucc->enable_acc32 = ep_config->pdma_acc32;
4157 if (match_data->flags & UDMA_FLAG_PDMA_BURST)
4158 ucc->enable_burst = ep_config->pdma_burst;
4161 ucc->needs_epib = ep_config->needs_epib;
4162 ucc->psd_size = ep_config->psd_size;
4163 ucc->metadata_size =
4164 (ucc->needs_epib ? CPPI5_INFO0_HDESC_EPIB_SIZE : 0) +
4168 ucc->hdesc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
4169 ucc->metadata_size, ud->desc_align);
4171 dev_dbg(ud->dev, "chan%d: Remote thread: 0x%04x (%s)\n", uc->id,
4172 ucc->remote_thread_id, dmaengine_get_direction_text(ucc->dir));
4177 dev_dbg(ud->dev, "chan%d: triggered channel (type: %u)\n", uc->id,
4178 ucc->tr_trigger_type);
4184 static struct dma_chan *udma_of_xlate(struct of_phandle_args *dma_spec,
4185 struct of_dma *ofdma)
4187 struct udma_dev *ud = ofdma->of_dma_data;
4188 dma_cap_mask_t mask = ud->ddev.cap_mask;
4189 struct udma_filter_param filter_param;
4190 struct dma_chan *chan;
4192 if (ud->match_data->type == DMA_TYPE_BCDMA) {
4193 if (dma_spec->args_count != 3)
4196 filter_param.tr_trigger_type = dma_spec->args[0];
4197 filter_param.remote_thread_id = dma_spec->args[1];
4198 filter_param.asel = dma_spec->args[2];
4199 filter_param.atype = 0;
4201 if (dma_spec->args_count != 1 && dma_spec->args_count != 2)
4204 filter_param.remote_thread_id = dma_spec->args[0];
4205 filter_param.tr_trigger_type = 0;
4206 if (dma_spec->args_count == 2) {
4207 if (ud->match_data->type == DMA_TYPE_UDMA) {
4208 filter_param.atype = dma_spec->args[1];
4209 filter_param.asel = 0;
4211 filter_param.atype = 0;
4212 filter_param.asel = dma_spec->args[1];
4215 filter_param.atype = 0;
4216 filter_param.asel = 0;
4220 chan = __dma_request_channel(&mask, udma_dma_filter_fn, &filter_param,
4223 dev_err(ud->dev, "get channel fail in %s.\n", __func__);
4224 return ERR_PTR(-EINVAL);
4230 static struct udma_match_data am654_main_data = {
4231 .type = DMA_TYPE_UDMA,
4232 .psil_base = 0x1000,
4233 .enable_memcpy_support = true,
4234 .statictr_z_mask = GENMASK(11, 0),
4236 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4237 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
4238 0, /* No UH Channels */
4242 static struct udma_match_data am654_mcu_data = {
4243 .type = DMA_TYPE_UDMA,
4244 .psil_base = 0x6000,
4245 .enable_memcpy_support = false,
4246 .statictr_z_mask = GENMASK(11, 0),
4248 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4249 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* H Channels */
4250 0, /* No UH Channels */
4254 static struct udma_match_data j721e_main_data = {
4255 .type = DMA_TYPE_UDMA,
4256 .psil_base = 0x1000,
4257 .enable_memcpy_support = true,
4258 .flags = UDMA_FLAGS_J7_CLASS,
4259 .statictr_z_mask = GENMASK(23, 0),
4261 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4262 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* H Channels */
4263 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES, /* UH Channels */
4267 static struct udma_match_data j721e_mcu_data = {
4268 .type = DMA_TYPE_UDMA,
4269 .psil_base = 0x6000,
4270 .enable_memcpy_support = false, /* MEM_TO_MEM is slow via MCU UDMA */
4271 .flags = UDMA_FLAGS_J7_CLASS,
4272 .statictr_z_mask = GENMASK(23, 0),
4274 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4275 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES, /* H Channels */
4276 0, /* No UH Channels */
4280 static struct udma_match_data am64_bcdma_data = {
4281 .type = DMA_TYPE_BCDMA,
4282 .psil_base = 0x2000, /* for tchan and rchan, not applicable to bchan */
4283 .enable_memcpy_support = true, /* Supported via bchan */
4284 .flags = UDMA_FLAGS_J7_CLASS,
4285 .statictr_z_mask = GENMASK(23, 0),
4287 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4288 0, /* No H Channels */
4289 0, /* No UH Channels */
4293 static struct udma_match_data am64_pktdma_data = {
4294 .type = DMA_TYPE_PKTDMA,
4295 .psil_base = 0x1000,
4296 .enable_memcpy_support = false, /* PKTDMA does not support MEM_TO_MEM */
4297 .flags = UDMA_FLAGS_J7_CLASS,
4298 .statictr_z_mask = GENMASK(23, 0),
4300 TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES, /* Normal Channels */
4301 0, /* No H Channels */
4302 0, /* No UH Channels */
4306 static const struct of_device_id udma_of_match[] = {
4308 .compatible = "ti,am654-navss-main-udmap",
4309 .data = &am654_main_data,
4312 .compatible = "ti,am654-navss-mcu-udmap",
4313 .data = &am654_mcu_data,
4315 .compatible = "ti,j721e-navss-main-udmap",
4316 .data = &j721e_main_data,
4318 .compatible = "ti,j721e-navss-mcu-udmap",
4319 .data = &j721e_mcu_data,
4324 static const struct of_device_id bcdma_of_match[] = {
4326 .compatible = "ti,am64-dmss-bcdma",
4327 .data = &am64_bcdma_data,
4332 static const struct of_device_id pktdma_of_match[] = {
4334 .compatible = "ti,am64-dmss-pktdma",
4335 .data = &am64_pktdma_data,
4340 static struct udma_soc_data am654_soc_data = {
4342 .udma_rchan = 0x200,
4346 static struct udma_soc_data j721e_soc_data = {
4348 .udma_rchan = 0x400,
4352 static struct udma_soc_data j7200_soc_data = {
4358 static struct udma_soc_data am64_soc_data = {
4360 .bcdma_bchan_data = 0x2200,
4361 .bcdma_bchan_ring = 0x2400,
4362 .bcdma_tchan_data = 0x2800,
4363 .bcdma_tchan_ring = 0x2a00,
4364 .bcdma_rchan_data = 0x2e00,
4365 .bcdma_rchan_ring = 0x3000,
4366 .pktdma_tchan_flow = 0x1200,
4367 .pktdma_rchan_flow = 0x1600,
4369 .bcdma_trigger_event_offset = 0xc400,
4372 static const struct soc_device_attribute k3_soc_devices[] = {
4373 { .family = "AM65X", .data = &am654_soc_data },
4374 { .family = "J721E", .data = &j721e_soc_data },
4375 { .family = "J7200", .data = &j7200_soc_data },
4376 { .family = "AM64X", .data = &am64_soc_data },
4377 { .family = "J721S2", .data = &j721e_soc_data},
4378 { .family = "AM62X", .data = &am64_soc_data },
4382 static int udma_get_mmrs(struct platform_device *pdev, struct udma_dev *ud)
4384 u32 cap2, cap3, cap4;
4387 ud->mmrs[MMR_GCFG] = devm_platform_ioremap_resource_byname(pdev, mmr_names[MMR_GCFG]);
4388 if (IS_ERR(ud->mmrs[MMR_GCFG]))
4389 return PTR_ERR(ud->mmrs[MMR_GCFG]);
4391 cap2 = udma_read(ud->mmrs[MMR_GCFG], 0x28);
4392 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4394 switch (ud->match_data->type) {
4396 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
4397 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
4398 ud->echan_cnt = UDMA_CAP2_ECHAN_CNT(cap2);
4399 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
4401 case DMA_TYPE_BCDMA:
4402 ud->bchan_cnt = BCDMA_CAP2_BCHAN_CNT(cap2);
4403 ud->tchan_cnt = BCDMA_CAP2_TCHAN_CNT(cap2);
4404 ud->rchan_cnt = BCDMA_CAP2_RCHAN_CNT(cap2);
4405 ud->rflow_cnt = ud->rchan_cnt;
4407 case DMA_TYPE_PKTDMA:
4408 cap4 = udma_read(ud->mmrs[MMR_GCFG], 0x30);
4409 ud->tchan_cnt = UDMA_CAP2_TCHAN_CNT(cap2);
4410 ud->rchan_cnt = UDMA_CAP2_RCHAN_CNT(cap2);
4411 ud->rflow_cnt = UDMA_CAP3_RFLOW_CNT(cap3);
4412 ud->tflow_cnt = PKTDMA_CAP4_TFLOW_CNT(cap4);
4418 for (i = 1; i < MMR_LAST; i++) {
4419 if (i == MMR_BCHANRT && ud->bchan_cnt == 0)
4421 if (i == MMR_TCHANRT && ud->tchan_cnt == 0)
4423 if (i == MMR_RCHANRT && ud->rchan_cnt == 0)
4426 ud->mmrs[i] = devm_platform_ioremap_resource_byname(pdev, mmr_names[i]);
4427 if (IS_ERR(ud->mmrs[i]))
4428 return PTR_ERR(ud->mmrs[i]);
4434 static void udma_mark_resource_ranges(struct udma_dev *ud, unsigned long *map,
4435 struct ti_sci_resource_desc *rm_desc,
4438 bitmap_clear(map, rm_desc->start, rm_desc->num);
4439 bitmap_clear(map, rm_desc->start_sec, rm_desc->num_sec);
4440 dev_dbg(ud->dev, "ti_sci resource range for %s: %d:%d | %d:%d\n", name,
4441 rm_desc->start, rm_desc->num, rm_desc->start_sec,
4445 static const char * const range_names[] = {
4446 [RM_RANGE_BCHAN] = "ti,sci-rm-range-bchan",
4447 [RM_RANGE_TCHAN] = "ti,sci-rm-range-tchan",
4448 [RM_RANGE_RCHAN] = "ti,sci-rm-range-rchan",
4449 [RM_RANGE_RFLOW] = "ti,sci-rm-range-rflow",
4450 [RM_RANGE_TFLOW] = "ti,sci-rm-range-tflow",
4453 static int udma_setup_resources(struct udma_dev *ud)
4456 struct device *dev = ud->dev;
4457 struct ti_sci_resource *rm_res, irq_res;
4458 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4461 /* Set up the throughput level start indexes */
4462 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4463 if (of_device_is_compatible(dev->of_node,
4464 "ti,am654-navss-main-udmap")) {
4465 ud->tchan_tpl.levels = 2;
4466 ud->tchan_tpl.start_idx[0] = 8;
4467 } else if (of_device_is_compatible(dev->of_node,
4468 "ti,am654-navss-mcu-udmap")) {
4469 ud->tchan_tpl.levels = 2;
4470 ud->tchan_tpl.start_idx[0] = 2;
4471 } else if (UDMA_CAP3_UCHAN_CNT(cap3)) {
4472 ud->tchan_tpl.levels = 3;
4473 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
4474 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4475 } else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
4476 ud->tchan_tpl.levels = 2;
4477 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4479 ud->tchan_tpl.levels = 1;
4482 ud->rchan_tpl.levels = ud->tchan_tpl.levels;
4483 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
4484 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
4486 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4487 sizeof(unsigned long), GFP_KERNEL);
4488 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4490 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4491 sizeof(unsigned long), GFP_KERNEL);
4492 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4494 ud->rflow_gp_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rflow_cnt),
4495 sizeof(unsigned long),
4497 ud->rflow_gp_map_allocated = devm_kcalloc(dev,
4498 BITS_TO_LONGS(ud->rflow_cnt),
4499 sizeof(unsigned long),
4501 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
4502 sizeof(unsigned long),
4504 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
4507 if (!ud->tchan_map || !ud->rchan_map || !ud->rflow_gp_map ||
4508 !ud->rflow_gp_map_allocated || !ud->tchans || !ud->rchans ||
4509 !ud->rflows || !ud->rflow_in_use)
4513 * RX flows with the same Ids as RX channels are reserved to be used
4514 * as default flows if remote HW can't generate flow_ids. Those
4515 * RX flows can be requested only explicitly by id.
4517 bitmap_set(ud->rflow_gp_map_allocated, 0, ud->rchan_cnt);
4519 /* by default no GP rflows are assigned to Linux */
4520 bitmap_set(ud->rflow_gp_map, 0, ud->rflow_cnt);
4522 /* Get resource ranges from tisci */
4523 for (i = 0; i < RM_RANGE_LAST; i++) {
4524 if (i == RM_RANGE_BCHAN || i == RM_RANGE_TFLOW)
4527 tisci_rm->rm_ranges[i] =
4528 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4529 tisci_rm->tisci_dev_id,
4530 (char *)range_names[i]);
4534 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4535 if (IS_ERR(rm_res)) {
4536 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4539 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4540 for (i = 0; i < rm_res->sets; i++)
4541 udma_mark_resource_ranges(ud, ud->tchan_map,
4542 &rm_res->desc[i], "tchan");
4543 irq_res.sets = rm_res->sets;
4546 /* rchan and matching default flow ranges */
4547 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4548 if (IS_ERR(rm_res)) {
4549 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4552 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4553 for (i = 0; i < rm_res->sets; i++)
4554 udma_mark_resource_ranges(ud, ud->rchan_map,
4555 &rm_res->desc[i], "rchan");
4556 irq_res.sets += rm_res->sets;
4559 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4562 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4563 if (IS_ERR(rm_res)) {
4564 irq_res.desc[0].start = 0;
4565 irq_res.desc[0].num = ud->tchan_cnt;
4568 for (i = 0; i < rm_res->sets; i++) {
4569 irq_res.desc[i].start = rm_res->desc[i].start;
4570 irq_res.desc[i].num = rm_res->desc[i].num;
4571 irq_res.desc[i].start_sec = rm_res->desc[i].start_sec;
4572 irq_res.desc[i].num_sec = rm_res->desc[i].num_sec;
4575 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4576 if (IS_ERR(rm_res)) {
4577 irq_res.desc[i].start = 0;
4578 irq_res.desc[i].num = ud->rchan_cnt;
4580 for (j = 0; j < rm_res->sets; j++, i++) {
4581 if (rm_res->desc[j].num) {
4582 irq_res.desc[i].start = rm_res->desc[j].start +
4583 ud->soc_data->oes.udma_rchan;
4584 irq_res.desc[i].num = rm_res->desc[j].num;
4586 if (rm_res->desc[j].num_sec) {
4587 irq_res.desc[i].start_sec = rm_res->desc[j].start_sec +
4588 ud->soc_data->oes.udma_rchan;
4589 irq_res.desc[i].num_sec = rm_res->desc[j].num_sec;
4593 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4594 kfree(irq_res.desc);
4596 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4600 /* GP rflow ranges */
4601 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4602 if (IS_ERR(rm_res)) {
4603 /* all gp flows are assigned exclusively to Linux */
4604 bitmap_clear(ud->rflow_gp_map, ud->rchan_cnt,
4605 ud->rflow_cnt - ud->rchan_cnt);
4607 for (i = 0; i < rm_res->sets; i++)
4608 udma_mark_resource_ranges(ud, ud->rflow_gp_map,
4609 &rm_res->desc[i], "gp-rflow");
4615 static int bcdma_setup_resources(struct udma_dev *ud)
4618 struct device *dev = ud->dev;
4619 struct ti_sci_resource *rm_res, irq_res;
4620 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4621 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
4624 /* Set up the throughput level start indexes */
4625 cap = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4626 if (BCDMA_CAP3_UBCHAN_CNT(cap)) {
4627 ud->bchan_tpl.levels = 3;
4628 ud->bchan_tpl.start_idx[1] = BCDMA_CAP3_UBCHAN_CNT(cap);
4629 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
4630 } else if (BCDMA_CAP3_HBCHAN_CNT(cap)) {
4631 ud->bchan_tpl.levels = 2;
4632 ud->bchan_tpl.start_idx[0] = BCDMA_CAP3_HBCHAN_CNT(cap);
4634 ud->bchan_tpl.levels = 1;
4637 cap = udma_read(ud->mmrs[MMR_GCFG], 0x30);
4638 if (BCDMA_CAP4_URCHAN_CNT(cap)) {
4639 ud->rchan_tpl.levels = 3;
4640 ud->rchan_tpl.start_idx[1] = BCDMA_CAP4_URCHAN_CNT(cap);
4641 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
4642 } else if (BCDMA_CAP4_HRCHAN_CNT(cap)) {
4643 ud->rchan_tpl.levels = 2;
4644 ud->rchan_tpl.start_idx[0] = BCDMA_CAP4_HRCHAN_CNT(cap);
4646 ud->rchan_tpl.levels = 1;
4649 if (BCDMA_CAP4_UTCHAN_CNT(cap)) {
4650 ud->tchan_tpl.levels = 3;
4651 ud->tchan_tpl.start_idx[1] = BCDMA_CAP4_UTCHAN_CNT(cap);
4652 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
4653 } else if (BCDMA_CAP4_HTCHAN_CNT(cap)) {
4654 ud->tchan_tpl.levels = 2;
4655 ud->tchan_tpl.start_idx[0] = BCDMA_CAP4_HTCHAN_CNT(cap);
4657 ud->tchan_tpl.levels = 1;
4660 ud->bchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->bchan_cnt),
4661 sizeof(unsigned long), GFP_KERNEL);
4662 ud->bchans = devm_kcalloc(dev, ud->bchan_cnt, sizeof(*ud->bchans),
4664 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4665 sizeof(unsigned long), GFP_KERNEL);
4666 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4668 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4669 sizeof(unsigned long), GFP_KERNEL);
4670 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4672 /* BCDMA do not really have flows, but the driver expect it */
4673 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rchan_cnt),
4674 sizeof(unsigned long),
4676 ud->rflows = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rflows),
4679 if (!ud->bchan_map || !ud->tchan_map || !ud->rchan_map ||
4680 !ud->rflow_in_use || !ud->bchans || !ud->tchans || !ud->rchans ||
4684 /* Get resource ranges from tisci */
4685 for (i = 0; i < RM_RANGE_LAST; i++) {
4686 if (i == RM_RANGE_RFLOW || i == RM_RANGE_TFLOW)
4688 if (i == RM_RANGE_BCHAN && ud->bchan_cnt == 0)
4690 if (i == RM_RANGE_TCHAN && ud->tchan_cnt == 0)
4692 if (i == RM_RANGE_RCHAN && ud->rchan_cnt == 0)
4695 tisci_rm->rm_ranges[i] =
4696 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4697 tisci_rm->tisci_dev_id,
4698 (char *)range_names[i]);
4704 if (ud->bchan_cnt) {
4705 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
4706 if (IS_ERR(rm_res)) {
4707 bitmap_zero(ud->bchan_map, ud->bchan_cnt);
4710 bitmap_fill(ud->bchan_map, ud->bchan_cnt);
4711 for (i = 0; i < rm_res->sets; i++)
4712 udma_mark_resource_ranges(ud, ud->bchan_map,
4715 irq_res.sets += rm_res->sets;
4720 if (ud->tchan_cnt) {
4721 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4722 if (IS_ERR(rm_res)) {
4723 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4726 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4727 for (i = 0; i < rm_res->sets; i++)
4728 udma_mark_resource_ranges(ud, ud->tchan_map,
4731 irq_res.sets += rm_res->sets * 2;
4736 if (ud->rchan_cnt) {
4737 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4738 if (IS_ERR(rm_res)) {
4739 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4742 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4743 for (i = 0; i < rm_res->sets; i++)
4744 udma_mark_resource_ranges(ud, ud->rchan_map,
4747 irq_res.sets += rm_res->sets * 2;
4751 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4754 if (ud->bchan_cnt) {
4755 rm_res = tisci_rm->rm_ranges[RM_RANGE_BCHAN];
4756 if (IS_ERR(rm_res)) {
4757 irq_res.desc[0].start = oes->bcdma_bchan_ring;
4758 irq_res.desc[0].num = ud->bchan_cnt;
4761 for (i = 0; i < rm_res->sets; i++) {
4762 irq_res.desc[i].start = rm_res->desc[i].start +
4763 oes->bcdma_bchan_ring;
4764 irq_res.desc[i].num = rm_res->desc[i].num;
4768 if (ud->tchan_cnt) {
4769 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4770 if (IS_ERR(rm_res)) {
4771 irq_res.desc[i].start = oes->bcdma_tchan_data;
4772 irq_res.desc[i].num = ud->tchan_cnt;
4773 irq_res.desc[i + 1].start = oes->bcdma_tchan_ring;
4774 irq_res.desc[i + 1].num = ud->tchan_cnt;
4777 for (j = 0; j < rm_res->sets; j++, i += 2) {
4778 irq_res.desc[i].start = rm_res->desc[j].start +
4779 oes->bcdma_tchan_data;
4780 irq_res.desc[i].num = rm_res->desc[j].num;
4782 irq_res.desc[i + 1].start = rm_res->desc[j].start +
4783 oes->bcdma_tchan_ring;
4784 irq_res.desc[i + 1].num = rm_res->desc[j].num;
4788 if (ud->rchan_cnt) {
4789 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4790 if (IS_ERR(rm_res)) {
4791 irq_res.desc[i].start = oes->bcdma_rchan_data;
4792 irq_res.desc[i].num = ud->rchan_cnt;
4793 irq_res.desc[i + 1].start = oes->bcdma_rchan_ring;
4794 irq_res.desc[i + 1].num = ud->rchan_cnt;
4797 for (j = 0; j < rm_res->sets; j++, i += 2) {
4798 irq_res.desc[i].start = rm_res->desc[j].start +
4799 oes->bcdma_rchan_data;
4800 irq_res.desc[i].num = rm_res->desc[j].num;
4802 irq_res.desc[i + 1].start = rm_res->desc[j].start +
4803 oes->bcdma_rchan_ring;
4804 irq_res.desc[i + 1].num = rm_res->desc[j].num;
4809 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4810 kfree(irq_res.desc);
4812 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4819 static int pktdma_setup_resources(struct udma_dev *ud)
4822 struct device *dev = ud->dev;
4823 struct ti_sci_resource *rm_res, irq_res;
4824 struct udma_tisci_rm *tisci_rm = &ud->tisci_rm;
4825 const struct udma_oes_offsets *oes = &ud->soc_data->oes;
4828 /* Set up the throughput level start indexes */
4829 cap3 = udma_read(ud->mmrs[MMR_GCFG], 0x2c);
4830 if (UDMA_CAP3_UCHAN_CNT(cap3)) {
4831 ud->tchan_tpl.levels = 3;
4832 ud->tchan_tpl.start_idx[1] = UDMA_CAP3_UCHAN_CNT(cap3);
4833 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4834 } else if (UDMA_CAP3_HCHAN_CNT(cap3)) {
4835 ud->tchan_tpl.levels = 2;
4836 ud->tchan_tpl.start_idx[0] = UDMA_CAP3_HCHAN_CNT(cap3);
4838 ud->tchan_tpl.levels = 1;
4841 ud->rchan_tpl.levels = ud->tchan_tpl.levels;
4842 ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
4843 ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
4845 ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
4846 sizeof(unsigned long), GFP_KERNEL);
4847 ud->tchans = devm_kcalloc(dev, ud->tchan_cnt, sizeof(*ud->tchans),
4849 ud->rchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->rchan_cnt),
4850 sizeof(unsigned long), GFP_KERNEL);
4851 ud->rchans = devm_kcalloc(dev, ud->rchan_cnt, sizeof(*ud->rchans),
4853 ud->rflow_in_use = devm_kcalloc(dev, BITS_TO_LONGS(ud->rflow_cnt),
4854 sizeof(unsigned long),
4856 ud->rflows = devm_kcalloc(dev, ud->rflow_cnt, sizeof(*ud->rflows),
4858 ud->tflow_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tflow_cnt),
4859 sizeof(unsigned long), GFP_KERNEL);
4861 if (!ud->tchan_map || !ud->rchan_map || !ud->tflow_map || !ud->tchans ||
4862 !ud->rchans || !ud->rflows || !ud->rflow_in_use)
4865 /* Get resource ranges from tisci */
4866 for (i = 0; i < RM_RANGE_LAST; i++) {
4867 if (i == RM_RANGE_BCHAN)
4870 tisci_rm->rm_ranges[i] =
4871 devm_ti_sci_get_of_resource(tisci_rm->tisci, dev,
4872 tisci_rm->tisci_dev_id,
4873 (char *)range_names[i]);
4877 rm_res = tisci_rm->rm_ranges[RM_RANGE_TCHAN];
4878 if (IS_ERR(rm_res)) {
4879 bitmap_zero(ud->tchan_map, ud->tchan_cnt);
4881 bitmap_fill(ud->tchan_map, ud->tchan_cnt);
4882 for (i = 0; i < rm_res->sets; i++)
4883 udma_mark_resource_ranges(ud, ud->tchan_map,
4884 &rm_res->desc[i], "tchan");
4888 rm_res = tisci_rm->rm_ranges[RM_RANGE_RCHAN];
4889 if (IS_ERR(rm_res)) {
4890 bitmap_zero(ud->rchan_map, ud->rchan_cnt);
4892 bitmap_fill(ud->rchan_map, ud->rchan_cnt);
4893 for (i = 0; i < rm_res->sets; i++)
4894 udma_mark_resource_ranges(ud, ud->rchan_map,
4895 &rm_res->desc[i], "rchan");
4899 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4900 if (IS_ERR(rm_res)) {
4901 /* all rflows are assigned exclusively to Linux */
4902 bitmap_zero(ud->rflow_in_use, ud->rflow_cnt);
4905 bitmap_fill(ud->rflow_in_use, ud->rflow_cnt);
4906 for (i = 0; i < rm_res->sets; i++)
4907 udma_mark_resource_ranges(ud, ud->rflow_in_use,
4908 &rm_res->desc[i], "rflow");
4909 irq_res.sets = rm_res->sets;
4913 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
4914 if (IS_ERR(rm_res)) {
4915 /* all tflows are assigned exclusively to Linux */
4916 bitmap_zero(ud->tflow_map, ud->tflow_cnt);
4919 bitmap_fill(ud->tflow_map, ud->tflow_cnt);
4920 for (i = 0; i < rm_res->sets; i++)
4921 udma_mark_resource_ranges(ud, ud->tflow_map,
4922 &rm_res->desc[i], "tflow");
4923 irq_res.sets += rm_res->sets;
4926 irq_res.desc = kcalloc(irq_res.sets, sizeof(*irq_res.desc), GFP_KERNEL);
4929 rm_res = tisci_rm->rm_ranges[RM_RANGE_TFLOW];
4930 if (IS_ERR(rm_res)) {
4931 irq_res.desc[0].start = oes->pktdma_tchan_flow;
4932 irq_res.desc[0].num = ud->tflow_cnt;
4935 for (i = 0; i < rm_res->sets; i++) {
4936 irq_res.desc[i].start = rm_res->desc[i].start +
4937 oes->pktdma_tchan_flow;
4938 irq_res.desc[i].num = rm_res->desc[i].num;
4941 rm_res = tisci_rm->rm_ranges[RM_RANGE_RFLOW];
4942 if (IS_ERR(rm_res)) {
4943 irq_res.desc[i].start = oes->pktdma_rchan_flow;
4944 irq_res.desc[i].num = ud->rflow_cnt;
4946 for (j = 0; j < rm_res->sets; j++, i++) {
4947 irq_res.desc[i].start = rm_res->desc[j].start +
4948 oes->pktdma_rchan_flow;
4949 irq_res.desc[i].num = rm_res->desc[j].num;
4952 ret = ti_sci_inta_msi_domain_alloc_irqs(ud->dev, &irq_res);
4953 kfree(irq_res.desc);
4955 dev_err(ud->dev, "Failed to allocate MSI interrupts\n");
4962 static int setup_resources(struct udma_dev *ud)
4964 struct device *dev = ud->dev;
4967 switch (ud->match_data->type) {
4969 ret = udma_setup_resources(ud);
4971 case DMA_TYPE_BCDMA:
4972 ret = bcdma_setup_resources(ud);
4974 case DMA_TYPE_PKTDMA:
4975 ret = pktdma_setup_resources(ud);
4984 ch_count = ud->bchan_cnt + ud->tchan_cnt + ud->rchan_cnt;
4986 ch_count -= bitmap_weight(ud->bchan_map, ud->bchan_cnt);
4987 ch_count -= bitmap_weight(ud->tchan_map, ud->tchan_cnt);
4988 ch_count -= bitmap_weight(ud->rchan_map, ud->rchan_cnt);
4992 ud->channels = devm_kcalloc(dev, ch_count, sizeof(*ud->channels),
4997 switch (ud->match_data->type) {
5000 "Channels: %d (tchan: %u, rchan: %u, gp-rflow: %u)\n",
5002 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5004 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5006 ud->rflow_cnt - bitmap_weight(ud->rflow_gp_map,
5009 case DMA_TYPE_BCDMA:
5011 "Channels: %d (bchan: %u, tchan: %u, rchan: %u)\n",
5013 ud->bchan_cnt - bitmap_weight(ud->bchan_map,
5015 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5017 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5020 case DMA_TYPE_PKTDMA:
5022 "Channels: %d (tchan: %u, rchan: %u)\n",
5024 ud->tchan_cnt - bitmap_weight(ud->tchan_map,
5026 ud->rchan_cnt - bitmap_weight(ud->rchan_map,
5036 static int udma_setup_rx_flush(struct udma_dev *ud)
5038 struct udma_rx_flush *rx_flush = &ud->rx_flush;
5039 struct cppi5_desc_hdr_t *tr_desc;
5040 struct cppi5_tr_type1_t *tr_req;
5041 struct cppi5_host_desc_t *desc;
5042 struct device *dev = ud->dev;
5043 struct udma_hwdesc *hwdesc;
5046 /* Allocate 1K buffer for discarded data on RX channel teardown */
5047 rx_flush->buffer_size = SZ_1K;
5048 rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size,
5050 if (!rx_flush->buffer_vaddr)
5053 rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr,
5054 rx_flush->buffer_size,
5056 if (dma_mapping_error(dev, rx_flush->buffer_paddr))
5059 /* Set up descriptor to be used for TR mode */
5060 hwdesc = &rx_flush->hwdescs[0];
5061 tr_size = sizeof(struct cppi5_tr_type1_t);
5062 hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1);
5063 hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
5066 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
5068 if (!hwdesc->cppi5_desc_vaddr)
5071 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
5072 hwdesc->cppi5_desc_size,
5074 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
5077 /* Start of the TR req records */
5078 hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
5079 /* Start address of the TR response array */
5080 hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size;
5082 tr_desc = hwdesc->cppi5_desc_vaddr;
5083 cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0);
5084 cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
5085 cppi5_desc_set_retpolicy(tr_desc, 0, 0);
5087 tr_req = hwdesc->tr_req_base;
5088 cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false,
5089 CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
5090 cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT);
5092 tr_req->addr = rx_flush->buffer_paddr;
5093 tr_req->icnt0 = rx_flush->buffer_size;
5096 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
5097 hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
5099 /* Set up descriptor to be used for packet mode */
5100 hwdesc = &rx_flush->hwdescs[1];
5101 hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
5102 CPPI5_INFO0_HDESC_EPIB_SIZE +
5103 CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE,
5106 hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
5108 if (!hwdesc->cppi5_desc_vaddr)
5111 hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
5112 hwdesc->cppi5_desc_size,
5114 if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
5117 desc = hwdesc->cppi5_desc_vaddr;
5118 cppi5_hdesc_init(desc, 0, 0);
5119 cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
5120 cppi5_desc_set_retpolicy(&desc->hdr, 0, 0);
5122 cppi5_hdesc_attach_buf(desc,
5123 rx_flush->buffer_paddr, rx_flush->buffer_size,
5124 rx_flush->buffer_paddr, rx_flush->buffer_size);
5126 dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
5127 hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
5131 #ifdef CONFIG_DEBUG_FS
5132 static void udma_dbg_summary_show_chan(struct seq_file *s,
5133 struct dma_chan *chan)
5135 struct udma_chan *uc = to_udma_chan(chan);
5136 struct udma_chan_config *ucc = &uc->config;
5138 seq_printf(s, " %-13s| %s", dma_chan_name(chan),
5139 chan->dbg_client_name ?: "in-use");
5140 if (ucc->tr_trigger_type)
5141 seq_puts(s, " (triggered, ");
5143 seq_printf(s, " (%s, ",
5144 dmaengine_get_direction_text(uc->config.dir));
5146 switch (uc->config.dir) {
5147 case DMA_MEM_TO_MEM:
5148 if (uc->ud->match_data->type == DMA_TYPE_BCDMA) {
5149 seq_printf(s, "bchan%d)\n", uc->bchan->id);
5153 seq_printf(s, "chan%d pair [0x%04x -> 0x%04x], ", uc->tchan->id,
5154 ucc->src_thread, ucc->dst_thread);
5156 case DMA_DEV_TO_MEM:
5157 seq_printf(s, "rchan%d [0x%04x -> 0x%04x], ", uc->rchan->id,
5158 ucc->src_thread, ucc->dst_thread);
5159 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5160 seq_printf(s, "rflow%d, ", uc->rflow->id);
5162 case DMA_MEM_TO_DEV:
5163 seq_printf(s, "tchan%d [0x%04x -> 0x%04x], ", uc->tchan->id,
5164 ucc->src_thread, ucc->dst_thread);
5165 if (uc->ud->match_data->type == DMA_TYPE_PKTDMA)
5166 seq_printf(s, "tflow%d, ", uc->tchan->tflow_id);
5169 seq_printf(s, ")\n");
5173 if (ucc->ep_type == PSIL_EP_NATIVE) {
5174 seq_printf(s, "PSI-L Native");
5175 if (ucc->metadata_size) {
5176 seq_printf(s, "[%s", ucc->needs_epib ? " EPIB" : "");
5178 seq_printf(s, " PSDsize:%u", ucc->psd_size);
5179 seq_printf(s, " ]");
5182 seq_printf(s, "PDMA");
5183 if (ucc->enable_acc32 || ucc->enable_burst)
5184 seq_printf(s, "[%s%s ]",
5185 ucc->enable_acc32 ? " ACC32" : "",
5186 ucc->enable_burst ? " BURST" : "");
5189 seq_printf(s, ", %s)\n", ucc->pkt_mode ? "Packet mode" : "TR mode");
5192 static void udma_dbg_summary_show(struct seq_file *s,
5193 struct dma_device *dma_dev)
5195 struct dma_chan *chan;
5197 list_for_each_entry(chan, &dma_dev->channels, device_node) {
5198 if (chan->client_count)
5199 udma_dbg_summary_show_chan(s, chan);
5202 #endif /* CONFIG_DEBUG_FS */
5204 static enum dmaengine_alignment udma_get_copy_align(struct udma_dev *ud)
5206 const struct udma_match_data *match_data = ud->match_data;
5209 if (!match_data->enable_memcpy_support)
5210 return DMAENGINE_ALIGN_8_BYTES;
5212 /* Get the highest TPL level the device supports for memcpy */
5214 tpl = udma_get_chan_tpl_index(&ud->bchan_tpl, 0);
5215 else if (ud->tchan_cnt)
5216 tpl = udma_get_chan_tpl_index(&ud->tchan_tpl, 0);
5218 return DMAENGINE_ALIGN_8_BYTES;
5220 switch (match_data->burst_size[tpl]) {
5221 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_256_BYTES:
5222 return DMAENGINE_ALIGN_256_BYTES;
5223 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_128_BYTES:
5224 return DMAENGINE_ALIGN_128_BYTES;
5225 case TI_SCI_RM_UDMAP_CHAN_BURST_SIZE_64_BYTES:
5228 return DMAENGINE_ALIGN_64_BYTES;
5232 #define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
5233 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
5234 BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
5235 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
5236 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
5238 static int udma_probe(struct platform_device *pdev)
5240 struct device_node *navss_node = pdev->dev.parent->of_node;
5241 const struct soc_device_attribute *soc;
5242 struct device *dev = &pdev->dev;
5243 struct udma_dev *ud;
5244 const struct of_device_id *match;
5248 ret = dma_coerce_mask_and_coherent(dev, DMA_BIT_MASK(48));
5250 dev_err(dev, "failed to set dma mask stuff\n");
5252 ud = devm_kzalloc(dev, sizeof(*ud), GFP_KERNEL);
5256 match = of_match_node(udma_of_match, dev->of_node);
5258 match = of_match_node(bcdma_of_match, dev->of_node);
5260 match = of_match_node(pktdma_of_match, dev->of_node);
5262 dev_err(dev, "No compatible match found\n");
5266 ud->match_data = match->data;
5268 soc = soc_device_match(k3_soc_devices);
5270 dev_err(dev, "No compatible SoC found\n");
5273 ud->soc_data = soc->data;
5275 ret = udma_get_mmrs(pdev, ud);
5279 ud->tisci_rm.tisci = ti_sci_get_by_phandle(dev->of_node, "ti,sci");
5280 if (IS_ERR(ud->tisci_rm.tisci))
5281 return PTR_ERR(ud->tisci_rm.tisci);
5283 ret = of_property_read_u32(dev->of_node, "ti,sci-dev-id",
5284 &ud->tisci_rm.tisci_dev_id);
5286 dev_err(dev, "ti,sci-dev-id read failure %d\n", ret);
5289 pdev->id = ud->tisci_rm.tisci_dev_id;
5291 ret = of_property_read_u32(navss_node, "ti,sci-dev-id",
5292 &ud->tisci_rm.tisci_navss_dev_id);
5294 dev_err(dev, "NAVSS ti,sci-dev-id read failure %d\n", ret);
5298 if (ud->match_data->type == DMA_TYPE_UDMA) {
5299 ret = of_property_read_u32(dev->of_node, "ti,udma-atype",
5301 if (!ret && ud->atype > 2) {
5302 dev_err(dev, "Invalid atype: %u\n", ud->atype);
5306 ret = of_property_read_u32(dev->of_node, "ti,asel",
5308 if (!ret && ud->asel > 15) {
5309 dev_err(dev, "Invalid asel: %u\n", ud->asel);
5314 ud->tisci_rm.tisci_udmap_ops = &ud->tisci_rm.tisci->ops.rm_udmap_ops;
5315 ud->tisci_rm.tisci_psil_ops = &ud->tisci_rm.tisci->ops.rm_psil_ops;
5317 if (ud->match_data->type == DMA_TYPE_UDMA) {
5318 ud->ringacc = of_k3_ringacc_get_by_phandle(dev->of_node, "ti,ringacc");
5320 struct k3_ringacc_init_data ring_init_data;
5322 ring_init_data.tisci = ud->tisci_rm.tisci;
5323 ring_init_data.tisci_dev_id = ud->tisci_rm.tisci_dev_id;
5324 if (ud->match_data->type == DMA_TYPE_BCDMA) {
5325 ring_init_data.num_rings = ud->bchan_cnt +
5329 ring_init_data.num_rings = ud->rflow_cnt +
5333 ud->ringacc = k3_ringacc_dmarings_init(pdev, &ring_init_data);
5336 if (IS_ERR(ud->ringacc))
5337 return PTR_ERR(ud->ringacc);
5339 dev->msi.domain = of_msi_get_domain(dev, dev->of_node,
5340 DOMAIN_BUS_TI_SCI_INTA_MSI);
5341 if (!dev->msi.domain) {
5342 dev_err(dev, "Failed to get MSI domain\n");
5343 return -EPROBE_DEFER;
5346 dma_cap_set(DMA_SLAVE, ud->ddev.cap_mask);
5347 /* cyclic operation is not supported via PKTDMA */
5348 if (ud->match_data->type != DMA_TYPE_PKTDMA) {
5349 dma_cap_set(DMA_CYCLIC, ud->ddev.cap_mask);
5350 ud->ddev.device_prep_dma_cyclic = udma_prep_dma_cyclic;
5353 ud->ddev.device_config = udma_slave_config;
5354 ud->ddev.device_prep_slave_sg = udma_prep_slave_sg;
5355 ud->ddev.device_issue_pending = udma_issue_pending;
5356 ud->ddev.device_tx_status = udma_tx_status;
5357 ud->ddev.device_pause = udma_pause;
5358 ud->ddev.device_resume = udma_resume;
5359 ud->ddev.device_terminate_all = udma_terminate_all;
5360 ud->ddev.device_synchronize = udma_synchronize;
5361 #ifdef CONFIG_DEBUG_FS
5362 ud->ddev.dbg_summary_show = udma_dbg_summary_show;
5365 switch (ud->match_data->type) {
5367 ud->ddev.device_alloc_chan_resources =
5368 udma_alloc_chan_resources;
5370 case DMA_TYPE_BCDMA:
5371 ud->ddev.device_alloc_chan_resources =
5372 bcdma_alloc_chan_resources;
5373 ud->ddev.device_router_config = bcdma_router_config;
5375 case DMA_TYPE_PKTDMA:
5376 ud->ddev.device_alloc_chan_resources =
5377 pktdma_alloc_chan_resources;
5382 ud->ddev.device_free_chan_resources = udma_free_chan_resources;
5384 ud->ddev.src_addr_widths = TI_UDMAC_BUSWIDTHS;
5385 ud->ddev.dst_addr_widths = TI_UDMAC_BUSWIDTHS;
5386 ud->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
5387 ud->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
5388 ud->ddev.desc_metadata_modes = DESC_METADATA_CLIENT |
5389 DESC_METADATA_ENGINE;
5390 if (ud->match_data->enable_memcpy_support &&
5391 !(ud->match_data->type == DMA_TYPE_BCDMA && ud->bchan_cnt == 0)) {
5392 dma_cap_set(DMA_MEMCPY, ud->ddev.cap_mask);
5393 ud->ddev.device_prep_dma_memcpy = udma_prep_dma_memcpy;
5394 ud->ddev.directions |= BIT(DMA_MEM_TO_MEM);
5399 ud->psil_base = ud->match_data->psil_base;
5401 INIT_LIST_HEAD(&ud->ddev.channels);
5402 INIT_LIST_HEAD(&ud->desc_to_purge);
5404 ch_count = setup_resources(ud);
5408 spin_lock_init(&ud->lock);
5409 INIT_WORK(&ud->purge_work, udma_purge_desc_work);
5411 ud->desc_align = 64;
5412 if (ud->desc_align < dma_get_cache_alignment())
5413 ud->desc_align = dma_get_cache_alignment();
5415 ret = udma_setup_rx_flush(ud);
5419 for (i = 0; i < ud->bchan_cnt; i++) {
5420 struct udma_bchan *bchan = &ud->bchans[i];
5423 bchan->reg_rt = ud->mmrs[MMR_BCHANRT] + i * 0x1000;
5426 for (i = 0; i < ud->tchan_cnt; i++) {
5427 struct udma_tchan *tchan = &ud->tchans[i];
5430 tchan->reg_rt = ud->mmrs[MMR_TCHANRT] + i * 0x1000;
5433 for (i = 0; i < ud->rchan_cnt; i++) {
5434 struct udma_rchan *rchan = &ud->rchans[i];
5437 rchan->reg_rt = ud->mmrs[MMR_RCHANRT] + i * 0x1000;
5440 for (i = 0; i < ud->rflow_cnt; i++) {
5441 struct udma_rflow *rflow = &ud->rflows[i];
5446 for (i = 0; i < ch_count; i++) {
5447 struct udma_chan *uc = &ud->channels[i];
5450 uc->vc.desc_free = udma_desc_free;
5455 uc->config.remote_thread_id = -1;
5456 uc->config.mapped_channel_id = -1;
5457 uc->config.default_flow_id = -1;
5458 uc->config.dir = DMA_MEM_TO_MEM;
5459 uc->name = devm_kasprintf(dev, GFP_KERNEL, "%s chan%d",
5462 vchan_init(&uc->vc, &ud->ddev);
5463 /* Use custom vchan completion handling */
5464 tasklet_setup(&uc->vc.task, udma_vchan_complete);
5465 init_completion(&uc->teardown_completed);
5466 INIT_DELAYED_WORK(&uc->tx_drain.work, udma_check_tx_completion);
5469 /* Configure the copy_align to the maximum burst size the device supports */
5470 ud->ddev.copy_align = udma_get_copy_align(ud);
5472 ret = dma_async_device_register(&ud->ddev);
5474 dev_err(dev, "failed to register slave DMA engine: %d\n", ret);
5478 platform_set_drvdata(pdev, ud);
5480 ret = of_dma_controller_register(dev->of_node, udma_of_xlate, ud);
5482 dev_err(dev, "failed to register of_dma controller\n");
5483 dma_async_device_unregister(&ud->ddev);
5489 static struct platform_driver udma_driver = {
5492 .of_match_table = udma_of_match,
5493 .suppress_bind_attrs = true,
5495 .probe = udma_probe,
5497 builtin_platform_driver(udma_driver);
5499 static struct platform_driver bcdma_driver = {
5502 .of_match_table = bcdma_of_match,
5503 .suppress_bind_attrs = true,
5505 .probe = udma_probe,
5507 builtin_platform_driver(bcdma_driver);
5509 static struct platform_driver pktdma_driver = {
5511 .name = "ti-pktdma",
5512 .of_match_table = pktdma_of_match,
5513 .suppress_bind_attrs = true,
5515 .probe = udma_probe,
5517 builtin_platform_driver(pktdma_driver);
5519 /* Private interfaces to UDMA */
5520 #include "k3-udma-private.c"