2 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * Copyright (C) 2010 Samsung Electronics Co. Ltd.
6 * Jaswinder Singh <jassi.brar@samsung.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
14 #include <linux/kernel.h>
16 #include <linux/init.h>
17 #include <linux/slab.h>
18 #include <linux/module.h>
19 #include <linux/string.h>
20 #include <linux/delay.h>
21 #include <linux/interrupt.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/dmaengine.h>
24 #include <linux/amba/bus.h>
25 #include <linux/scatterlist.h>
27 #include <linux/of_dma.h>
28 #include <linux/err.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/bug.h>
32 #include "dmaengine.h"
33 #define PL330_MAX_CHAN 8
34 #define PL330_MAX_IRQS 32
35 #define PL330_MAX_PERI 32
36 #define PL330_MAX_BURST 16
38 #define PL330_QUIRK_BROKEN_NO_FLUSHP BIT(0)
40 enum pl330_cachectrl {
41 CCTRL0, /* Noncacheable and nonbufferable */
42 CCTRL1, /* Bufferable only */
43 CCTRL2, /* Cacheable, but do not allocate */
44 CCTRL3, /* Cacheable and bufferable, but do not allocate */
45 INVALID1, /* AWCACHE = 0x1000 */
47 CCTRL6, /* Cacheable write-through, allocate on writes only */
48 CCTRL7, /* Cacheable write-back, allocate on writes only */
59 /* Register and Bit field Definitions */
61 #define DS_ST_STOP 0x0
62 #define DS_ST_EXEC 0x1
63 #define DS_ST_CMISS 0x2
64 #define DS_ST_UPDTPC 0x3
66 #define DS_ST_ATBRR 0x5
67 #define DS_ST_QBUSY 0x6
69 #define DS_ST_KILL 0x8
70 #define DS_ST_CMPLT 0x9
71 #define DS_ST_FLTCMP 0xe
72 #define DS_ST_FAULT 0xf
77 #define INTSTATUS 0x28
84 #define FTC(n) (_FTC + (n)*0x4)
87 #define CS(n) (_CS + (n)*0x8)
88 #define CS_CNS (1 << 21)
91 #define CPC(n) (_CPC + (n)*0x8)
94 #define SA(n) (_SA + (n)*0x20)
97 #define DA(n) (_DA + (n)*0x20)
100 #define CC(n) (_CC + (n)*0x20)
102 #define CC_SRCINC (1 << 0)
103 #define CC_DSTINC (1 << 14)
104 #define CC_SRCPRI (1 << 8)
105 #define CC_DSTPRI (1 << 22)
106 #define CC_SRCNS (1 << 9)
107 #define CC_DSTNS (1 << 23)
108 #define CC_SRCIA (1 << 10)
109 #define CC_DSTIA (1 << 24)
110 #define CC_SRCBRSTLEN_SHFT 4
111 #define CC_DSTBRSTLEN_SHFT 18
112 #define CC_SRCBRSTSIZE_SHFT 1
113 #define CC_DSTBRSTSIZE_SHFT 15
114 #define CC_SRCCCTRL_SHFT 11
115 #define CC_SRCCCTRL_MASK 0x7
116 #define CC_DSTCCTRL_SHFT 25
117 #define CC_DRCCCTRL_MASK 0x7
118 #define CC_SWAP_SHFT 28
121 #define LC0(n) (_LC0 + (n)*0x20)
124 #define LC1(n) (_LC1 + (n)*0x20)
126 #define DBGSTATUS 0xd00
127 #define DBG_BUSY (1 << 0)
130 #define DBGINST0 0xd08
131 #define DBGINST1 0xd0c
140 #define PERIPH_ID 0xfe0
141 #define PERIPH_REV_SHIFT 20
142 #define PERIPH_REV_MASK 0xf
143 #define PERIPH_REV_R0P0 0
144 #define PERIPH_REV_R1P0 1
145 #define PERIPH_REV_R1P1 2
147 #define CR0_PERIPH_REQ_SET (1 << 0)
148 #define CR0_BOOT_EN_SET (1 << 1)
149 #define CR0_BOOT_MAN_NS (1 << 2)
150 #define CR0_NUM_CHANS_SHIFT 4
151 #define CR0_NUM_CHANS_MASK 0x7
152 #define CR0_NUM_PERIPH_SHIFT 12
153 #define CR0_NUM_PERIPH_MASK 0x1f
154 #define CR0_NUM_EVENTS_SHIFT 17
155 #define CR0_NUM_EVENTS_MASK 0x1f
157 #define CR1_ICACHE_LEN_SHIFT 0
158 #define CR1_ICACHE_LEN_MASK 0x7
159 #define CR1_NUM_ICACHELINES_SHIFT 4
160 #define CR1_NUM_ICACHELINES_MASK 0xf
162 #define CRD_DATA_WIDTH_SHIFT 0
163 #define CRD_DATA_WIDTH_MASK 0x7
164 #define CRD_WR_CAP_SHIFT 4
165 #define CRD_WR_CAP_MASK 0x7
166 #define CRD_WR_Q_DEP_SHIFT 8
167 #define CRD_WR_Q_DEP_MASK 0xf
168 #define CRD_RD_CAP_SHIFT 12
169 #define CRD_RD_CAP_MASK 0x7
170 #define CRD_RD_Q_DEP_SHIFT 16
171 #define CRD_RD_Q_DEP_MASK 0xf
172 #define CRD_DATA_BUFF_SHIFT 20
173 #define CRD_DATA_BUFF_MASK 0x3ff
176 #define DESIGNER 0x41
178 #define INTEG_CFG 0x0
179 #define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12))
181 #define PL330_STATE_STOPPED (1 << 0)
182 #define PL330_STATE_EXECUTING (1 << 1)
183 #define PL330_STATE_WFE (1 << 2)
184 #define PL330_STATE_FAULTING (1 << 3)
185 #define PL330_STATE_COMPLETING (1 << 4)
186 #define PL330_STATE_WFP (1 << 5)
187 #define PL330_STATE_KILLING (1 << 6)
188 #define PL330_STATE_FAULT_COMPLETING (1 << 7)
189 #define PL330_STATE_CACHEMISS (1 << 8)
190 #define PL330_STATE_UPDTPC (1 << 9)
191 #define PL330_STATE_ATBARRIER (1 << 10)
192 #define PL330_STATE_QUEUEBUSY (1 << 11)
193 #define PL330_STATE_INVALID (1 << 15)
195 #define PL330_STABLE_STATES (PL330_STATE_STOPPED | PL330_STATE_EXECUTING \
196 | PL330_STATE_WFE | PL330_STATE_FAULTING)
198 #define CMD_DMAADDH 0x54
199 #define CMD_DMAEND 0x00
200 #define CMD_DMAFLUSHP 0x35
201 #define CMD_DMAGO 0xa0
202 #define CMD_DMALD 0x04
203 #define CMD_DMALDP 0x25
204 #define CMD_DMALP 0x20
205 #define CMD_DMALPEND 0x28
206 #define CMD_DMAKILL 0x01
207 #define CMD_DMAMOV 0xbc
208 #define CMD_DMANOP 0x18
209 #define CMD_DMARMB 0x12
210 #define CMD_DMASEV 0x34
211 #define CMD_DMAST 0x08
212 #define CMD_DMASTP 0x29
213 #define CMD_DMASTZ 0x0c
214 #define CMD_DMAWFE 0x36
215 #define CMD_DMAWFP 0x30
216 #define CMD_DMAWMB 0x13
220 #define SZ_DMAFLUSHP 2
224 #define SZ_DMALPEND 2
238 #define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1)
239 #define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7))
241 #define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr))
242 #define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr))
245 * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req
246 * at 1byte/burst for P<->M and M<->M respectively.
247 * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req
248 * should be enough for P<->M and M<->M respectively.
250 #define MCODE_BUFF_PER_REQ 256
252 /* Use this _only_ to wait on transient states */
253 #define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax();
255 #ifdef PL330_DEBUG_MCGEN
256 static unsigned cmd_line;
257 #define PL330_DBGCMD_DUMP(off, x...) do { \
258 printk("%x:", cmd_line); \
262 #define PL330_DBGMC_START(addr) (cmd_line = addr)
264 #define PL330_DBGCMD_DUMP(off, x...) do {} while (0)
265 #define PL330_DBGMC_START(addr) do {} while (0)
268 /* The number of default descriptors */
270 #define NR_DEFAULT_DESC 16
272 /* Delay for runtime PM autosuspend, ms */
273 #define PL330_AUTOSUSPEND_DELAY 20
275 /* Populated by the PL330 core driver for DMA API driver's info */
276 struct pl330_config {
278 #define DMAC_MODE_NS (1 << 0)
280 unsigned int data_bus_width:10; /* In number of bits */
281 unsigned int data_buf_dep:11;
282 unsigned int num_chan:4;
283 unsigned int num_peri:6;
285 unsigned int num_events:6;
290 * Request Configuration.
291 * The PL330 core does not modify this and uses the last
292 * working configuration if the request doesn't provide any.
294 * The Client may want to provide this info only for the
295 * first request and a request with new settings.
297 struct pl330_reqcfg {
298 /* Address Incrementing */
303 * For now, the SRC & DST protection levels
304 * and burst size/length are assumed same.
310 unsigned brst_size:3; /* in power of 2 */
312 enum pl330_cachectrl dcctl;
313 enum pl330_cachectrl scctl;
314 enum pl330_byteswap swap;
315 struct pl330_config *pcfg;
319 * One cycle of DMAC operation.
320 * There may be more than one xfer in a request.
329 /* The xfer callbacks are made with one of these arguments. */
331 /* The all xfers in the request were success. */
333 /* If req aborted due to global error. */
335 /* If req failed due to problem with Channel. */
356 struct dma_pl330_desc;
361 struct dma_pl330_desc *desc;
364 /* ToBeDone for tasklet */
372 struct pl330_thread {
375 /* If the channel is not yet acquired by any client */
378 struct pl330_dmac *dmac;
379 /* Only two at a time */
380 struct _pl330_req req[2];
381 /* Index of the last enqueued request */
383 /* Index of the last submitted request or -1 if the DMA is stopped */
387 enum pl330_dmac_state {
394 /* In the DMAC pool */
397 * Allocated to some channel during prep_xxx
398 * Also may be sitting on the work_list.
402 * Sitting on the work_list and already submitted
403 * to the PL330 core. Not more than two descriptors
404 * of a channel can be BUSY at any time.
408 * Sitting on the channel work_list but xfer done
414 struct dma_pl330_chan {
415 /* Schedule desc completion */
416 struct tasklet_struct task;
418 /* DMA-Engine Channel */
419 struct dma_chan chan;
421 /* List of submitted descriptors */
422 struct list_head submitted_list;
423 /* List of issued descriptors */
424 struct list_head work_list;
425 /* List of completed descriptors */
426 struct list_head completed_list;
428 /* Pointer to the DMAC that manages this channel,
429 * NULL if the channel is available to be acquired.
430 * As the parent, this DMAC also provides descriptors
433 struct pl330_dmac *dmac;
435 /* To protect channel manipulation */
439 * Hardware channel thread of PL330 DMAC. NULL if the channel is
442 struct pl330_thread *thread;
444 /* For D-to-M and M-to-D channels */
445 int burst_sz; /* the peripheral fifo width */
446 int burst_len; /* the number of burst */
447 phys_addr_t fifo_addr;
448 /* DMA-mapped view of the FIFO; may differ if an IOMMU is present */
450 enum dma_data_direction dir;
452 /* for cyclic capability */
455 /* for runtime pm tracking */
460 /* DMA-Engine Device */
461 struct dma_device ddma;
463 /* Holds info about sg limitations */
464 struct device_dma_parameters dma_parms;
466 /* Pool of descriptors available for the DMAC's channels */
467 struct list_head desc_pool;
468 /* To protect desc_pool manipulation */
469 spinlock_t pool_lock;
471 /* Size of MicroCode buffers for each channel. */
473 /* ioremap'ed address of PL330 registers. */
475 /* Populated by the PL330 core driver during pl330_add */
476 struct pl330_config pcfg;
479 /* Maximum possible events/irqs */
481 /* BUS address of MicroCode buffer */
482 dma_addr_t mcode_bus;
483 /* CPU address of MicroCode buffer */
485 /* List of all Channel threads */
486 struct pl330_thread *channels;
487 /* Pointer to the MANAGER thread */
488 struct pl330_thread *manager;
489 /* To handle bad news in interrupt */
490 struct tasklet_struct tasks;
491 struct _pl330_tbd dmac_tbd;
492 /* State of DMAC operation */
493 enum pl330_dmac_state state;
494 /* Holds list of reqs with due callbacks */
495 struct list_head req_done;
497 /* Peripheral channels connected to this DMAC */
498 unsigned int num_peripherals;
499 struct dma_pl330_chan *peripherals; /* keep at end */
503 static struct pl330_of_quirks {
508 .quirk = "arm,pl330-broken-no-flushp",
509 .id = PL330_QUIRK_BROKEN_NO_FLUSHP,
513 struct dma_pl330_desc {
514 /* To attach to a queue as child */
515 struct list_head node;
517 /* Descriptor for the DMA Engine API */
518 struct dma_async_tx_descriptor txd;
520 /* Xfer for PL330 core */
521 struct pl330_xfer px;
523 struct pl330_reqcfg rqcfg;
525 enum desc_status status;
530 /* The channel which currently holds this desc */
531 struct dma_pl330_chan *pchan;
533 enum dma_transfer_direction rqtype;
534 /* Index of peripheral for the xfer. */
536 /* Hook to attach to DMAC's list of reqs with due callback */
537 struct list_head rqd;
542 struct dma_pl330_desc *desc;
545 static inline bool _queue_full(struct pl330_thread *thrd)
547 return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL;
550 static inline bool is_manager(struct pl330_thread *thrd)
552 return thrd->dmac->manager == thrd;
555 /* If manager of the thread is in Non-Secure mode */
556 static inline bool _manager_ns(struct pl330_thread *thrd)
558 return (thrd->dmac->pcfg.mode & DMAC_MODE_NS) ? true : false;
561 static inline u32 get_revision(u32 periph_id)
563 return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK;
566 static inline u32 _emit_END(unsigned dry_run, u8 buf[])
573 PL330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n");
578 static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri)
583 buf[0] = CMD_DMAFLUSHP;
589 PL330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3);
594 static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum pl330_cond cond)
602 buf[0] |= (0 << 1) | (1 << 0);
603 else if (cond == BURST)
604 buf[0] |= (1 << 1) | (1 << 0);
606 PL330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n",
607 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
612 static inline u32 _emit_LDP(unsigned dry_run, u8 buf[],
613 enum pl330_cond cond, u8 peri)
627 PL330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n",
628 cond == SINGLE ? 'S' : 'B', peri >> 3);
633 static inline u32 _emit_LP(unsigned dry_run, u8 buf[],
634 unsigned loop, u8 cnt)
644 cnt--; /* DMAC increments by 1 internally */
647 PL330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt);
653 enum pl330_cond cond;
659 static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[],
660 const struct _arg_LPEND *arg)
662 enum pl330_cond cond = arg->cond;
663 bool forever = arg->forever;
664 unsigned loop = arg->loop;
665 u8 bjump = arg->bjump;
670 buf[0] = CMD_DMALPEND;
679 buf[0] |= (0 << 1) | (1 << 0);
680 else if (cond == BURST)
681 buf[0] |= (1 << 1) | (1 << 0);
685 PL330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n",
686 forever ? "FE" : "END",
687 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'),
694 static inline u32 _emit_KILL(unsigned dry_run, u8 buf[])
699 buf[0] = CMD_DMAKILL;
704 static inline u32 _emit_MOV(unsigned dry_run, u8 buf[],
705 enum dmamov_dst dst, u32 val)
717 PL330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n",
718 dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val);
723 static inline u32 _emit_RMB(unsigned dry_run, u8 buf[])
730 PL330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n");
735 static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev)
746 PL330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3);
751 static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum pl330_cond cond)
759 buf[0] |= (0 << 1) | (1 << 0);
760 else if (cond == BURST)
761 buf[0] |= (1 << 1) | (1 << 0);
763 PL330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n",
764 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'));
769 static inline u32 _emit_STP(unsigned dry_run, u8 buf[],
770 enum pl330_cond cond, u8 peri)
784 PL330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n",
785 cond == SINGLE ? 'S' : 'B', peri >> 3);
790 static inline u32 _emit_WFP(unsigned dry_run, u8 buf[],
791 enum pl330_cond cond, u8 peri)
799 buf[0] |= (0 << 1) | (0 << 0);
800 else if (cond == BURST)
801 buf[0] |= (1 << 1) | (0 << 0);
803 buf[0] |= (0 << 1) | (1 << 0);
809 PL330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n",
810 cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3);
815 static inline u32 _emit_WMB(unsigned dry_run, u8 buf[])
822 PL330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n");
833 static inline u32 _emit_GO(unsigned dry_run, u8 buf[],
834 const struct _arg_GO *arg)
837 u32 addr = arg->addr;
838 unsigned ns = arg->ns;
854 #define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
856 /* Returns Time-Out */
857 static bool _until_dmac_idle(struct pl330_thread *thrd)
859 void __iomem *regs = thrd->dmac->base;
860 unsigned long loops = msecs_to_loops(5);
863 /* Until Manager is Idle */
864 if (!(readl(regs + DBGSTATUS) & DBG_BUSY))
876 static inline void _execute_DBGINSN(struct pl330_thread *thrd,
877 u8 insn[], bool as_manager)
879 void __iomem *regs = thrd->dmac->base;
882 val = (insn[0] << 16) | (insn[1] << 24);
885 val |= (thrd->id << 8); /* Channel Number */
887 writel(val, regs + DBGINST0);
889 val = le32_to_cpu(*((__le32 *)&insn[2]));
890 writel(val, regs + DBGINST1);
892 /* If timed out due to halted state-machine */
893 if (_until_dmac_idle(thrd)) {
894 dev_err(thrd->dmac->ddma.dev, "DMAC halted!\n");
899 writel(0, regs + DBGCMD);
902 static inline u32 _state(struct pl330_thread *thrd)
904 void __iomem *regs = thrd->dmac->base;
907 if (is_manager(thrd))
908 val = readl(regs + DS) & 0xf;
910 val = readl(regs + CS(thrd->id)) & 0xf;
914 return PL330_STATE_STOPPED;
916 return PL330_STATE_EXECUTING;
918 return PL330_STATE_CACHEMISS;
920 return PL330_STATE_UPDTPC;
922 return PL330_STATE_WFE;
924 return PL330_STATE_FAULTING;
926 if (is_manager(thrd))
927 return PL330_STATE_INVALID;
929 return PL330_STATE_ATBARRIER;
931 if (is_manager(thrd))
932 return PL330_STATE_INVALID;
934 return PL330_STATE_QUEUEBUSY;
936 if (is_manager(thrd))
937 return PL330_STATE_INVALID;
939 return PL330_STATE_WFP;
941 if (is_manager(thrd))
942 return PL330_STATE_INVALID;
944 return PL330_STATE_KILLING;
946 if (is_manager(thrd))
947 return PL330_STATE_INVALID;
949 return PL330_STATE_COMPLETING;
951 if (is_manager(thrd))
952 return PL330_STATE_INVALID;
954 return PL330_STATE_FAULT_COMPLETING;
956 return PL330_STATE_INVALID;
960 static void _stop(struct pl330_thread *thrd)
962 void __iomem *regs = thrd->dmac->base;
963 u8 insn[6] = {0, 0, 0, 0, 0, 0};
964 u32 inten = readl(regs + INTEN);
966 if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
967 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
969 /* Return if nothing needs to be done */
970 if (_state(thrd) == PL330_STATE_COMPLETING
971 || _state(thrd) == PL330_STATE_KILLING
972 || _state(thrd) == PL330_STATE_STOPPED)
977 _execute_DBGINSN(thrd, insn, is_manager(thrd));
979 /* clear the event */
980 if (inten & (1 << thrd->ev))
981 writel(1 << thrd->ev, regs + INTCLR);
982 /* Stop generating interrupts for SEV */
983 writel(inten & ~(1 << thrd->ev), regs + INTEN);
986 /* Start doing req 'idx' of thread 'thrd' */
987 static bool _trigger(struct pl330_thread *thrd)
989 void __iomem *regs = thrd->dmac->base;
990 struct _pl330_req *req;
991 struct dma_pl330_desc *desc;
994 u8 insn[6] = {0, 0, 0, 0, 0, 0};
997 /* Return if already ACTIVE */
998 if (_state(thrd) != PL330_STATE_STOPPED)
1001 idx = 1 - thrd->lstenq;
1002 if (thrd->req[idx].desc != NULL) {
1003 req = &thrd->req[idx];
1006 if (thrd->req[idx].desc != NULL)
1007 req = &thrd->req[idx];
1012 /* Return if no request */
1016 /* Return if req is running */
1017 if (idx == thrd->req_running)
1022 ns = desc->rqcfg.nonsecure ? 1 : 0;
1024 /* See 'Abort Sources' point-4 at Page 2-25 */
1025 if (_manager_ns(thrd) && !ns)
1026 dev_info(thrd->dmac->ddma.dev, "%s:%d Recipe for ABORT!\n",
1027 __func__, __LINE__);
1030 go.addr = req->mc_bus;
1032 _emit_GO(0, insn, &go);
1034 /* Set to generate interrupts for SEV */
1035 writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN);
1037 /* Only manager can execute GO */
1038 _execute_DBGINSN(thrd, insn, true);
1040 thrd->req_running = idx;
1045 static bool _start(struct pl330_thread *thrd)
1047 switch (_state(thrd)) {
1048 case PL330_STATE_FAULT_COMPLETING:
1049 UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
1051 if (_state(thrd) == PL330_STATE_KILLING)
1052 UNTIL(thrd, PL330_STATE_STOPPED)
1055 case PL330_STATE_FAULTING:
1059 case PL330_STATE_KILLING:
1060 case PL330_STATE_COMPLETING:
1061 UNTIL(thrd, PL330_STATE_STOPPED)
1064 case PL330_STATE_STOPPED:
1065 return _trigger(thrd);
1067 case PL330_STATE_WFP:
1068 case PL330_STATE_QUEUEBUSY:
1069 case PL330_STATE_ATBARRIER:
1070 case PL330_STATE_UPDTPC:
1071 case PL330_STATE_CACHEMISS:
1072 case PL330_STATE_EXECUTING:
1075 case PL330_STATE_WFE: /* For RESUME, nothing yet */
1081 static inline int _ldst_memtomem(unsigned dry_run, u8 buf[],
1082 const struct _xfer_spec *pxs, int cyc)
1085 struct pl330_config *pcfg = pxs->desc->rqcfg.pcfg;
1087 /* check lock-up free version */
1088 if (get_revision(pcfg->periph_id) >= PERIPH_REV_R1P0) {
1090 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1091 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1095 off += _emit_LD(dry_run, &buf[off], ALWAYS);
1096 off += _emit_RMB(dry_run, &buf[off]);
1097 off += _emit_ST(dry_run, &buf[off], ALWAYS);
1098 off += _emit_WMB(dry_run, &buf[off]);
1105 static u32 _emit_load(unsigned int dry_run, u8 buf[],
1106 enum pl330_cond cond, enum dma_transfer_direction direction,
1111 switch (direction) {
1112 case DMA_MEM_TO_MEM:
1114 case DMA_MEM_TO_DEV:
1115 off += _emit_LD(dry_run, &buf[off], cond);
1118 case DMA_DEV_TO_MEM:
1119 if (cond == ALWAYS) {
1120 off += _emit_LDP(dry_run, &buf[off], SINGLE,
1122 off += _emit_LDP(dry_run, &buf[off], BURST,
1125 off += _emit_LDP(dry_run, &buf[off], cond,
1131 /* this code should be unreachable */
1139 static inline u32 _emit_store(unsigned int dry_run, u8 buf[],
1140 enum pl330_cond cond, enum dma_transfer_direction direction,
1145 switch (direction) {
1146 case DMA_MEM_TO_MEM:
1148 case DMA_DEV_TO_MEM:
1149 off += _emit_ST(dry_run, &buf[off], cond);
1152 case DMA_MEM_TO_DEV:
1153 if (cond == ALWAYS) {
1154 off += _emit_STP(dry_run, &buf[off], SINGLE,
1156 off += _emit_STP(dry_run, &buf[off], BURST,
1159 off += _emit_STP(dry_run, &buf[off], cond,
1165 /* this code should be unreachable */
1173 static inline int _ldst_peripheral(struct pl330_dmac *pl330,
1174 unsigned dry_run, u8 buf[],
1175 const struct _xfer_spec *pxs, int cyc,
1176 enum pl330_cond cond)
1180 if (pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
1184 * do FLUSHP at beginning to clear any stale dma requests before the
1187 if (!(pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP))
1188 off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri);
1190 off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri);
1191 off += _emit_load(dry_run, &buf[off], cond, pxs->desc->rqtype,
1193 off += _emit_store(dry_run, &buf[off], cond, pxs->desc->rqtype,
1200 static int _bursts(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
1201 const struct _xfer_spec *pxs, int cyc)
1204 enum pl330_cond cond = BRST_LEN(pxs->ccr) > 1 ? BURST : SINGLE;
1206 switch (pxs->desc->rqtype) {
1207 case DMA_MEM_TO_DEV:
1209 case DMA_DEV_TO_MEM:
1210 off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs, cyc,
1214 case DMA_MEM_TO_MEM:
1215 off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc);
1219 /* this code should be unreachable */
1228 * transfer dregs with single transfers to peripheral, or a reduced size burst
1231 static int _dregs(struct pl330_dmac *pl330, unsigned int dry_run, u8 buf[],
1232 const struct _xfer_spec *pxs, int transfer_length)
1237 if (transfer_length == 0)
1240 switch (pxs->desc->rqtype) {
1241 case DMA_MEM_TO_DEV:
1243 case DMA_DEV_TO_MEM:
1244 off += _ldst_peripheral(pl330, dry_run, &buf[off], pxs,
1245 transfer_length, SINGLE);
1248 case DMA_MEM_TO_MEM:
1249 dregs_ccr = pxs->ccr;
1250 dregs_ccr &= ~((0xf << CC_SRCBRSTLEN_SHFT) |
1251 (0xf << CC_DSTBRSTLEN_SHFT));
1252 dregs_ccr |= (((transfer_length - 1) & 0xf) <<
1253 CC_SRCBRSTLEN_SHFT);
1254 dregs_ccr |= (((transfer_length - 1) & 0xf) <<
1255 CC_DSTBRSTLEN_SHFT);
1256 off += _emit_MOV(dry_run, &buf[off], CCR, dregs_ccr);
1257 off += _ldst_memtomem(dry_run, &buf[off], pxs, 1);
1261 /* this code should be unreachable */
1269 /* Returns bytes consumed and updates bursts */
1270 static inline int _loop(struct pl330_dmac *pl330, unsigned dry_run, u8 buf[],
1271 unsigned long *bursts, const struct _xfer_spec *pxs)
1273 int cyc, cycmax, szlp, szlpend, szbrst, off;
1274 unsigned lcnt0, lcnt1, ljmp0, ljmp1;
1275 struct _arg_LPEND lpend;
1278 return _bursts(pl330, dry_run, buf, pxs, 1);
1280 /* Max iterations possible in DMALP is 256 */
1281 if (*bursts >= 256*256) {
1284 cyc = *bursts / lcnt1 / lcnt0;
1285 } else if (*bursts > 256) {
1287 lcnt0 = *bursts / lcnt1;
1295 szlp = _emit_LP(1, buf, 0, 0);
1296 szbrst = _bursts(pl330, 1, buf, pxs, 1);
1298 lpend.cond = ALWAYS;
1299 lpend.forever = false;
1302 szlpend = _emit_LPEND(1, buf, &lpend);
1310 * Max bursts that we can unroll due to limit on the
1311 * size of backward jump that can be encoded in DMALPEND
1312 * which is 8-bits and hence 255
1314 cycmax = (255 - (szlp + szlpend)) / szbrst;
1316 cyc = (cycmax < cyc) ? cycmax : cyc;
1321 off += _emit_LP(dry_run, &buf[off], 0, lcnt0);
1325 off += _emit_LP(dry_run, &buf[off], 1, lcnt1);
1328 off += _bursts(pl330, dry_run, &buf[off], pxs, cyc);
1330 lpend.cond = ALWAYS;
1331 lpend.forever = false;
1333 lpend.bjump = off - ljmp1;
1334 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1337 lpend.cond = ALWAYS;
1338 lpend.forever = false;
1340 lpend.bjump = off - ljmp0;
1341 off += _emit_LPEND(dry_run, &buf[off], &lpend);
1344 *bursts = lcnt1 * cyc;
1351 static inline int _setup_loops(struct pl330_dmac *pl330,
1352 unsigned dry_run, u8 buf[],
1353 const struct _xfer_spec *pxs)
1355 struct pl330_xfer *x = &pxs->desc->px;
1357 unsigned long c, bursts = BYTE_TO_BURST(x->bytes, ccr);
1358 int num_dregs = (x->bytes - BURST_TO_BYTE(bursts, ccr)) /
1364 off += _loop(pl330, dry_run, &buf[off], &c, pxs);
1367 off += _dregs(pl330, dry_run, &buf[off], pxs, num_dregs);
1372 static inline int _setup_xfer(struct pl330_dmac *pl330,
1373 unsigned dry_run, u8 buf[],
1374 const struct _xfer_spec *pxs)
1376 struct pl330_xfer *x = &pxs->desc->px;
1379 /* DMAMOV SAR, x->src_addr */
1380 off += _emit_MOV(dry_run, &buf[off], SAR, x->src_addr);
1381 /* DMAMOV DAR, x->dst_addr */
1382 off += _emit_MOV(dry_run, &buf[off], DAR, x->dst_addr);
1385 off += _setup_loops(pl330, dry_run, &buf[off], pxs);
1391 * A req is a sequence of one or more xfer units.
1392 * Returns the number of bytes taken to setup the MC for the req.
1394 static int _setup_req(struct pl330_dmac *pl330, unsigned dry_run,
1395 struct pl330_thread *thrd, unsigned index,
1396 struct _xfer_spec *pxs)
1398 struct _pl330_req *req = &thrd->req[index];
1399 u8 *buf = req->mc_cpu;
1402 PL330_DBGMC_START(req->mc_bus);
1404 /* DMAMOV CCR, ccr */
1405 off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr);
1407 off += _setup_xfer(pl330, dry_run, &buf[off], pxs);
1409 /* DMASEV peripheral/event */
1410 off += _emit_SEV(dry_run, &buf[off], thrd->ev);
1412 off += _emit_END(dry_run, &buf[off]);
1417 static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
1427 /* We set same protection levels for Src and DST for now */
1428 if (rqc->privileged)
1429 ccr |= CC_SRCPRI | CC_DSTPRI;
1431 ccr |= CC_SRCNS | CC_DSTNS;
1432 if (rqc->insnaccess)
1433 ccr |= CC_SRCIA | CC_DSTIA;
1435 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT);
1436 ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT);
1438 ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
1439 ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
1441 ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
1442 ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
1444 ccr |= (rqc->swap << CC_SWAP_SHFT);
1450 * Submit a list of xfers after which the client wants notification.
1451 * Client is not notified after each xfer unit, just once after all
1452 * xfer units are done or some error occurs.
1454 static int pl330_submit_req(struct pl330_thread *thrd,
1455 struct dma_pl330_desc *desc)
1457 struct pl330_dmac *pl330 = thrd->dmac;
1458 struct _xfer_spec xs;
1459 unsigned long flags;
1464 switch (desc->rqtype) {
1465 case DMA_MEM_TO_DEV:
1468 case DMA_DEV_TO_MEM:
1471 case DMA_MEM_TO_MEM:
1478 if (pl330->state == DYING
1479 || pl330->dmac_tbd.reset_chan & (1 << thrd->id)) {
1480 dev_info(thrd->dmac->ddma.dev, "%s:%d\n",
1481 __func__, __LINE__);
1485 /* If request for non-existing peripheral */
1486 if (desc->rqtype != DMA_MEM_TO_MEM &&
1487 desc->peri >= pl330->pcfg.num_peri) {
1488 dev_info(thrd->dmac->ddma.dev,
1489 "%s:%d Invalid peripheral(%u)!\n",
1490 __func__, __LINE__, desc->peri);
1494 spin_lock_irqsave(&pl330->lock, flags);
1496 if (_queue_full(thrd)) {
1501 /* Prefer Secure Channel */
1502 if (!_manager_ns(thrd))
1503 desc->rqcfg.nonsecure = 0;
1505 desc->rqcfg.nonsecure = 1;
1507 ccr = _prepare_ccr(&desc->rqcfg);
1509 idx = thrd->req[0].desc == NULL ? 0 : 1;
1514 /* First dry run to check if req is acceptable */
1515 ret = _setup_req(pl330, 1, thrd, idx, &xs);
1519 if (ret > pl330->mcbufsz / 2) {
1520 dev_info(pl330->ddma.dev, "%s:%d Try increasing mcbufsz (%i/%i)\n",
1521 __func__, __LINE__, ret, pl330->mcbufsz / 2);
1526 /* Hook the request */
1528 thrd->req[idx].desc = desc;
1529 _setup_req(pl330, 0, thrd, idx, &xs);
1534 spin_unlock_irqrestore(&pl330->lock, flags);
1539 static void dma_pl330_rqcb(struct dma_pl330_desc *desc, enum pl330_op_err err)
1541 struct dma_pl330_chan *pch;
1542 unsigned long flags;
1549 /* If desc aborted */
1553 spin_lock_irqsave(&pch->lock, flags);
1555 desc->status = DONE;
1557 spin_unlock_irqrestore(&pch->lock, flags);
1559 tasklet_schedule(&pch->task);
1562 static void pl330_dotask(unsigned long data)
1564 struct pl330_dmac *pl330 = (struct pl330_dmac *) data;
1565 unsigned long flags;
1568 spin_lock_irqsave(&pl330->lock, flags);
1570 /* The DMAC itself gone nuts */
1571 if (pl330->dmac_tbd.reset_dmac) {
1572 pl330->state = DYING;
1573 /* Reset the manager too */
1574 pl330->dmac_tbd.reset_mngr = true;
1575 /* Clear the reset flag */
1576 pl330->dmac_tbd.reset_dmac = false;
1579 if (pl330->dmac_tbd.reset_mngr) {
1580 _stop(pl330->manager);
1581 /* Reset all channels */
1582 pl330->dmac_tbd.reset_chan = (1 << pl330->pcfg.num_chan) - 1;
1583 /* Clear the reset flag */
1584 pl330->dmac_tbd.reset_mngr = false;
1587 for (i = 0; i < pl330->pcfg.num_chan; i++) {
1589 if (pl330->dmac_tbd.reset_chan & (1 << i)) {
1590 struct pl330_thread *thrd = &pl330->channels[i];
1591 void __iomem *regs = pl330->base;
1592 enum pl330_op_err err;
1596 if (readl(regs + FSC) & (1 << thrd->id))
1597 err = PL330_ERR_FAIL;
1599 err = PL330_ERR_ABORT;
1601 spin_unlock_irqrestore(&pl330->lock, flags);
1602 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, err);
1603 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, err);
1604 spin_lock_irqsave(&pl330->lock, flags);
1606 thrd->req[0].desc = NULL;
1607 thrd->req[1].desc = NULL;
1608 thrd->req_running = -1;
1610 /* Clear the reset flag */
1611 pl330->dmac_tbd.reset_chan &= ~(1 << i);
1615 spin_unlock_irqrestore(&pl330->lock, flags);
1620 /* Returns 1 if state was updated, 0 otherwise */
1621 static int pl330_update(struct pl330_dmac *pl330)
1623 struct dma_pl330_desc *descdone;
1624 unsigned long flags;
1627 int id, ev, ret = 0;
1631 spin_lock_irqsave(&pl330->lock, flags);
1633 val = readl(regs + FSM) & 0x1;
1635 pl330->dmac_tbd.reset_mngr = true;
1637 pl330->dmac_tbd.reset_mngr = false;
1639 val = readl(regs + FSC) & ((1 << pl330->pcfg.num_chan) - 1);
1640 pl330->dmac_tbd.reset_chan |= val;
1643 while (i < pl330->pcfg.num_chan) {
1644 if (val & (1 << i)) {
1645 dev_info(pl330->ddma.dev,
1646 "Reset Channel-%d\t CS-%x FTC-%x\n",
1647 i, readl(regs + CS(i)),
1648 readl(regs + FTC(i)));
1649 _stop(&pl330->channels[i]);
1655 /* Check which event happened i.e, thread notified */
1656 val = readl(regs + ES);
1657 if (pl330->pcfg.num_events < 32
1658 && val & ~((1 << pl330->pcfg.num_events) - 1)) {
1659 pl330->dmac_tbd.reset_dmac = true;
1660 dev_err(pl330->ddma.dev, "%s:%d Unexpected!\n", __func__,
1666 for (ev = 0; ev < pl330->pcfg.num_events; ev++) {
1667 if (val & (1 << ev)) { /* Event occurred */
1668 struct pl330_thread *thrd;
1669 u32 inten = readl(regs + INTEN);
1672 /* Clear the event */
1673 if (inten & (1 << ev))
1674 writel(1 << ev, regs + INTCLR);
1678 id = pl330->events[ev];
1680 thrd = &pl330->channels[id];
1682 active = thrd->req_running;
1683 if (active == -1) /* Aborted */
1686 /* Detach the req */
1687 descdone = thrd->req[active].desc;
1688 thrd->req[active].desc = NULL;
1690 thrd->req_running = -1;
1692 /* Get going again ASAP */
1695 /* For now, just make a list of callbacks to be done */
1696 list_add_tail(&descdone->rqd, &pl330->req_done);
1700 /* Now that we are in no hurry, do the callbacks */
1701 while (!list_empty(&pl330->req_done)) {
1702 descdone = list_first_entry(&pl330->req_done,
1703 struct dma_pl330_desc, rqd);
1704 list_del(&descdone->rqd);
1705 spin_unlock_irqrestore(&pl330->lock, flags);
1706 dma_pl330_rqcb(descdone, PL330_ERR_NONE);
1707 spin_lock_irqsave(&pl330->lock, flags);
1711 spin_unlock_irqrestore(&pl330->lock, flags);
1713 if (pl330->dmac_tbd.reset_dmac
1714 || pl330->dmac_tbd.reset_mngr
1715 || pl330->dmac_tbd.reset_chan) {
1717 tasklet_schedule(&pl330->tasks);
1723 /* Reserve an event */
1724 static inline int _alloc_event(struct pl330_thread *thrd)
1726 struct pl330_dmac *pl330 = thrd->dmac;
1729 for (ev = 0; ev < pl330->pcfg.num_events; ev++)
1730 if (pl330->events[ev] == -1) {
1731 pl330->events[ev] = thrd->id;
1738 static bool _chan_ns(const struct pl330_dmac *pl330, int i)
1740 return pl330->pcfg.irq_ns & (1 << i);
1743 /* Upon success, returns IdentityToken for the
1744 * allocated channel, NULL otherwise.
1746 static struct pl330_thread *pl330_request_channel(struct pl330_dmac *pl330)
1748 struct pl330_thread *thrd = NULL;
1751 if (pl330->state == DYING)
1754 chans = pl330->pcfg.num_chan;
1756 for (i = 0; i < chans; i++) {
1757 thrd = &pl330->channels[i];
1758 if ((thrd->free) && (!_manager_ns(thrd) ||
1759 _chan_ns(pl330, i))) {
1760 thrd->ev = _alloc_event(thrd);
1761 if (thrd->ev >= 0) {
1764 thrd->req[0].desc = NULL;
1765 thrd->req[1].desc = NULL;
1766 thrd->req_running = -1;
1776 /* Release an event */
1777 static inline void _free_event(struct pl330_thread *thrd, int ev)
1779 struct pl330_dmac *pl330 = thrd->dmac;
1781 /* If the event is valid and was held by the thread */
1782 if (ev >= 0 && ev < pl330->pcfg.num_events
1783 && pl330->events[ev] == thrd->id)
1784 pl330->events[ev] = -1;
1787 static void pl330_release_channel(struct pl330_thread *thrd)
1789 if (!thrd || thrd->free)
1794 dma_pl330_rqcb(thrd->req[1 - thrd->lstenq].desc, PL330_ERR_ABORT);
1795 dma_pl330_rqcb(thrd->req[thrd->lstenq].desc, PL330_ERR_ABORT);
1797 _free_event(thrd, thrd->ev);
1801 /* Initialize the structure for PL330 configuration, that can be used
1802 * by the client driver the make best use of the DMAC
1804 static void read_dmac_config(struct pl330_dmac *pl330)
1806 void __iomem *regs = pl330->base;
1809 val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT;
1810 val &= CRD_DATA_WIDTH_MASK;
1811 pl330->pcfg.data_bus_width = 8 * (1 << val);
1813 val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT;
1814 val &= CRD_DATA_BUFF_MASK;
1815 pl330->pcfg.data_buf_dep = val + 1;
1817 val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT;
1818 val &= CR0_NUM_CHANS_MASK;
1820 pl330->pcfg.num_chan = val;
1822 val = readl(regs + CR0);
1823 if (val & CR0_PERIPH_REQ_SET) {
1824 val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK;
1826 pl330->pcfg.num_peri = val;
1827 pl330->pcfg.peri_ns = readl(regs + CR4);
1829 pl330->pcfg.num_peri = 0;
1832 val = readl(regs + CR0);
1833 if (val & CR0_BOOT_MAN_NS)
1834 pl330->pcfg.mode |= DMAC_MODE_NS;
1836 pl330->pcfg.mode &= ~DMAC_MODE_NS;
1838 val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT;
1839 val &= CR0_NUM_EVENTS_MASK;
1841 pl330->pcfg.num_events = val;
1843 pl330->pcfg.irq_ns = readl(regs + CR3);
1846 static inline void _reset_thread(struct pl330_thread *thrd)
1848 struct pl330_dmac *pl330 = thrd->dmac;
1850 thrd->req[0].mc_cpu = pl330->mcode_cpu
1851 + (thrd->id * pl330->mcbufsz);
1852 thrd->req[0].mc_bus = pl330->mcode_bus
1853 + (thrd->id * pl330->mcbufsz);
1854 thrd->req[0].desc = NULL;
1856 thrd->req[1].mc_cpu = thrd->req[0].mc_cpu
1857 + pl330->mcbufsz / 2;
1858 thrd->req[1].mc_bus = thrd->req[0].mc_bus
1859 + pl330->mcbufsz / 2;
1860 thrd->req[1].desc = NULL;
1862 thrd->req_running = -1;
1865 static int dmac_alloc_threads(struct pl330_dmac *pl330)
1867 int chans = pl330->pcfg.num_chan;
1868 struct pl330_thread *thrd;
1871 /* Allocate 1 Manager and 'chans' Channel threads */
1872 pl330->channels = kcalloc(1 + chans, sizeof(*thrd),
1874 if (!pl330->channels)
1877 /* Init Channel threads */
1878 for (i = 0; i < chans; i++) {
1879 thrd = &pl330->channels[i];
1882 _reset_thread(thrd);
1886 /* MANAGER is indexed at the end */
1887 thrd = &pl330->channels[chans];
1891 pl330->manager = thrd;
1896 static int dmac_alloc_resources(struct pl330_dmac *pl330)
1898 int chans = pl330->pcfg.num_chan;
1902 * Alloc MicroCode buffer for 'chans' Channel threads.
1903 * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN)
1905 pl330->mcode_cpu = dma_alloc_attrs(pl330->ddma.dev,
1906 chans * pl330->mcbufsz,
1907 &pl330->mcode_bus, GFP_KERNEL,
1908 DMA_ATTR_PRIVILEGED);
1909 if (!pl330->mcode_cpu) {
1910 dev_err(pl330->ddma.dev, "%s:%d Can't allocate memory!\n",
1911 __func__, __LINE__);
1915 ret = dmac_alloc_threads(pl330);
1917 dev_err(pl330->ddma.dev, "%s:%d Can't to create channels for DMAC!\n",
1918 __func__, __LINE__);
1919 dma_free_coherent(pl330->ddma.dev,
1920 chans * pl330->mcbufsz,
1921 pl330->mcode_cpu, pl330->mcode_bus);
1928 static int pl330_add(struct pl330_dmac *pl330)
1932 /* Check if we can handle this DMAC */
1933 if ((pl330->pcfg.periph_id & 0xfffff) != PERIPH_ID_VAL) {
1934 dev_err(pl330->ddma.dev, "PERIPH_ID 0x%x !\n",
1935 pl330->pcfg.periph_id);
1939 /* Read the configuration of the DMAC */
1940 read_dmac_config(pl330);
1942 if (pl330->pcfg.num_events == 0) {
1943 dev_err(pl330->ddma.dev, "%s:%d Can't work without events!\n",
1944 __func__, __LINE__);
1948 spin_lock_init(&pl330->lock);
1950 INIT_LIST_HEAD(&pl330->req_done);
1952 /* Use default MC buffer size if not provided */
1953 if (!pl330->mcbufsz)
1954 pl330->mcbufsz = MCODE_BUFF_PER_REQ * 2;
1956 /* Mark all events as free */
1957 for (i = 0; i < pl330->pcfg.num_events; i++)
1958 pl330->events[i] = -1;
1960 /* Allocate resources needed by the DMAC */
1961 ret = dmac_alloc_resources(pl330);
1963 dev_err(pl330->ddma.dev, "Unable to create channels for DMAC\n");
1967 tasklet_init(&pl330->tasks, pl330_dotask, (unsigned long) pl330);
1969 pl330->state = INIT;
1974 static int dmac_free_threads(struct pl330_dmac *pl330)
1976 struct pl330_thread *thrd;
1979 /* Release Channel threads */
1980 for (i = 0; i < pl330->pcfg.num_chan; i++) {
1981 thrd = &pl330->channels[i];
1982 pl330_release_channel(thrd);
1986 kfree(pl330->channels);
1991 static void pl330_del(struct pl330_dmac *pl330)
1993 pl330->state = UNINIT;
1995 tasklet_kill(&pl330->tasks);
1997 /* Free DMAC resources */
1998 dmac_free_threads(pl330);
2000 dma_free_coherent(pl330->ddma.dev,
2001 pl330->pcfg.num_chan * pl330->mcbufsz, pl330->mcode_cpu,
2005 /* forward declaration */
2006 static struct amba_driver pl330_driver;
2008 static inline struct dma_pl330_chan *
2009 to_pchan(struct dma_chan *ch)
2014 return container_of(ch, struct dma_pl330_chan, chan);
2017 static inline struct dma_pl330_desc *
2018 to_desc(struct dma_async_tx_descriptor *tx)
2020 return container_of(tx, struct dma_pl330_desc, txd);
2023 static inline void fill_queue(struct dma_pl330_chan *pch)
2025 struct dma_pl330_desc *desc;
2028 list_for_each_entry(desc, &pch->work_list, node) {
2030 /* If already submitted */
2031 if (desc->status == BUSY)
2034 ret = pl330_submit_req(pch->thread, desc);
2036 desc->status = BUSY;
2037 } else if (ret == -EAGAIN) {
2038 /* QFull or DMAC Dying */
2041 /* Unacceptable request */
2042 desc->status = DONE;
2043 dev_err(pch->dmac->ddma.dev, "%s:%d Bad Desc(%d)\n",
2044 __func__, __LINE__, desc->txd.cookie);
2045 tasklet_schedule(&pch->task);
2050 static void pl330_tasklet(unsigned long data)
2052 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
2053 struct dma_pl330_desc *desc, *_dt;
2054 unsigned long flags;
2055 bool power_down = false;
2057 spin_lock_irqsave(&pch->lock, flags);
2059 /* Pick up ripe tomatoes */
2060 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
2061 if (desc->status == DONE) {
2063 dma_cookie_complete(&desc->txd);
2064 list_move_tail(&desc->node, &pch->completed_list);
2067 /* Try to submit a req imm. next to the last completed cookie */
2070 if (list_empty(&pch->work_list)) {
2071 spin_lock(&pch->thread->dmac->lock);
2073 spin_unlock(&pch->thread->dmac->lock);
2075 pch->active = false;
2077 /* Make sure the PL330 Channel thread is active */
2078 spin_lock(&pch->thread->dmac->lock);
2079 _start(pch->thread);
2080 spin_unlock(&pch->thread->dmac->lock);
2083 while (!list_empty(&pch->completed_list)) {
2084 struct dmaengine_desc_callback cb;
2086 desc = list_first_entry(&pch->completed_list,
2087 struct dma_pl330_desc, node);
2089 dmaengine_desc_get_callback(&desc->txd, &cb);
2092 desc->status = PREP;
2093 list_move_tail(&desc->node, &pch->work_list);
2096 spin_lock(&pch->thread->dmac->lock);
2097 _start(pch->thread);
2098 spin_unlock(&pch->thread->dmac->lock);
2102 desc->status = FREE;
2103 list_move_tail(&desc->node, &pch->dmac->desc_pool);
2106 dma_descriptor_unmap(&desc->txd);
2108 if (dmaengine_desc_callback_valid(&cb)) {
2109 spin_unlock_irqrestore(&pch->lock, flags);
2110 dmaengine_desc_callback_invoke(&cb, NULL);
2111 spin_lock_irqsave(&pch->lock, flags);
2114 spin_unlock_irqrestore(&pch->lock, flags);
2116 /* If work list empty, power down */
2118 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2119 pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
2123 static struct dma_chan *of_dma_pl330_xlate(struct of_phandle_args *dma_spec,
2124 struct of_dma *ofdma)
2126 int count = dma_spec->args_count;
2127 struct pl330_dmac *pl330 = ofdma->of_dma_data;
2128 unsigned int chan_id;
2136 chan_id = dma_spec->args[0];
2137 if (chan_id >= pl330->num_peripherals)
2140 return dma_get_slave_channel(&pl330->peripherals[chan_id].chan);
2143 static int pl330_alloc_chan_resources(struct dma_chan *chan)
2145 struct dma_pl330_chan *pch = to_pchan(chan);
2146 struct pl330_dmac *pl330 = pch->dmac;
2147 unsigned long flags;
2149 spin_lock_irqsave(&pl330->lock, flags);
2151 dma_cookie_init(chan);
2152 pch->cyclic = false;
2154 pch->thread = pl330_request_channel(pl330);
2156 spin_unlock_irqrestore(&pl330->lock, flags);
2160 tasklet_init(&pch->task, pl330_tasklet, (unsigned long) pch);
2162 spin_unlock_irqrestore(&pl330->lock, flags);
2168 * We need the data direction between the DMAC (the dma-mapping "device") and
2169 * the FIFO (the dmaengine "dev"), from the FIFO's point of view. Confusing!
2171 static enum dma_data_direction
2172 pl330_dma_slave_map_dir(enum dma_transfer_direction dir)
2175 case DMA_MEM_TO_DEV:
2176 return DMA_FROM_DEVICE;
2177 case DMA_DEV_TO_MEM:
2178 return DMA_TO_DEVICE;
2179 case DMA_DEV_TO_DEV:
2180 return DMA_BIDIRECTIONAL;
2186 static void pl330_unprep_slave_fifo(struct dma_pl330_chan *pch)
2188 if (pch->dir != DMA_NONE)
2189 dma_unmap_resource(pch->chan.device->dev, pch->fifo_dma,
2190 1 << pch->burst_sz, pch->dir, 0);
2191 pch->dir = DMA_NONE;
2195 static bool pl330_prep_slave_fifo(struct dma_pl330_chan *pch,
2196 enum dma_transfer_direction dir)
2198 struct device *dev = pch->chan.device->dev;
2199 enum dma_data_direction dma_dir = pl330_dma_slave_map_dir(dir);
2201 /* Already mapped for this config? */
2202 if (pch->dir == dma_dir)
2205 pl330_unprep_slave_fifo(pch);
2206 pch->fifo_dma = dma_map_resource(dev, pch->fifo_addr,
2207 1 << pch->burst_sz, dma_dir, 0);
2208 if (dma_mapping_error(dev, pch->fifo_dma))
2215 static int fixup_burst_len(int max_burst_len, int quirks)
2217 if (quirks & PL330_QUIRK_BROKEN_NO_FLUSHP)
2219 else if (max_burst_len > PL330_MAX_BURST)
2220 return PL330_MAX_BURST;
2221 else if (max_burst_len < 1)
2224 return max_burst_len;
2227 static int pl330_config(struct dma_chan *chan,
2228 struct dma_slave_config *slave_config)
2230 struct dma_pl330_chan *pch = to_pchan(chan);
2232 pl330_unprep_slave_fifo(pch);
2233 if (slave_config->direction == DMA_MEM_TO_DEV) {
2234 if (slave_config->dst_addr)
2235 pch->fifo_addr = slave_config->dst_addr;
2236 if (slave_config->dst_addr_width)
2237 pch->burst_sz = __ffs(slave_config->dst_addr_width);
2238 pch->burst_len = fixup_burst_len(slave_config->dst_maxburst,
2240 } else if (slave_config->direction == DMA_DEV_TO_MEM) {
2241 if (slave_config->src_addr)
2242 pch->fifo_addr = slave_config->src_addr;
2243 if (slave_config->src_addr_width)
2244 pch->burst_sz = __ffs(slave_config->src_addr_width);
2245 pch->burst_len = fixup_burst_len(slave_config->src_maxburst,
2252 static int pl330_terminate_all(struct dma_chan *chan)
2254 struct dma_pl330_chan *pch = to_pchan(chan);
2255 struct dma_pl330_desc *desc;
2256 unsigned long flags;
2257 struct pl330_dmac *pl330 = pch->dmac;
2259 bool power_down = false;
2261 pm_runtime_get_sync(pl330->ddma.dev);
2262 spin_lock_irqsave(&pch->lock, flags);
2264 spin_lock(&pl330->lock);
2266 pch->thread->req[0].desc = NULL;
2267 pch->thread->req[1].desc = NULL;
2268 pch->thread->req_running = -1;
2269 spin_unlock(&pl330->lock);
2271 power_down = pch->active;
2272 pch->active = false;
2274 /* Mark all desc done */
2275 list_for_each_entry(desc, &pch->submitted_list, node) {
2276 desc->status = FREE;
2277 dma_cookie_complete(&desc->txd);
2280 list_for_each_entry(desc, &pch->work_list , node) {
2281 desc->status = FREE;
2282 dma_cookie_complete(&desc->txd);
2285 list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool);
2286 list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
2287 list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
2288 spin_unlock_irqrestore(&pch->lock, flags);
2289 pm_runtime_mark_last_busy(pl330->ddma.dev);
2291 pm_runtime_put_autosuspend(pl330->ddma.dev);
2292 pm_runtime_put_autosuspend(pl330->ddma.dev);
2298 * We don't support DMA_RESUME command because of hardware
2299 * limitations, so after pausing the channel we cannot restore
2300 * it to active state. We have to terminate channel and setup
2301 * DMA transfer again. This pause feature was implemented to
2302 * allow safely read residue before channel termination.
2304 static int pl330_pause(struct dma_chan *chan)
2306 struct dma_pl330_chan *pch = to_pchan(chan);
2307 struct pl330_dmac *pl330 = pch->dmac;
2308 unsigned long flags;
2310 pm_runtime_get_sync(pl330->ddma.dev);
2311 spin_lock_irqsave(&pch->lock, flags);
2313 spin_lock(&pl330->lock);
2315 spin_unlock(&pl330->lock);
2317 spin_unlock_irqrestore(&pch->lock, flags);
2318 pm_runtime_mark_last_busy(pl330->ddma.dev);
2319 pm_runtime_put_autosuspend(pl330->ddma.dev);
2324 static void pl330_free_chan_resources(struct dma_chan *chan)
2326 struct dma_pl330_chan *pch = to_pchan(chan);
2327 struct pl330_dmac *pl330 = pch->dmac;
2328 unsigned long flags;
2330 tasklet_kill(&pch->task);
2332 pm_runtime_get_sync(pch->dmac->ddma.dev);
2333 spin_lock_irqsave(&pl330->lock, flags);
2335 pl330_release_channel(pch->thread);
2339 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
2341 spin_unlock_irqrestore(&pl330->lock, flags);
2342 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2343 pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
2344 pl330_unprep_slave_fifo(pch);
2347 static int pl330_get_current_xferred_count(struct dma_pl330_chan *pch,
2348 struct dma_pl330_desc *desc)
2350 struct pl330_thread *thrd = pch->thread;
2351 struct pl330_dmac *pl330 = pch->dmac;
2352 void __iomem *regs = thrd->dmac->base;
2355 pm_runtime_get_sync(pl330->ddma.dev);
2357 if (desc->rqcfg.src_inc) {
2358 val = readl(regs + SA(thrd->id));
2359 addr = desc->px.src_addr;
2361 val = readl(regs + DA(thrd->id));
2362 addr = desc->px.dst_addr;
2364 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2365 pm_runtime_put_autosuspend(pl330->ddma.dev);
2367 /* If DMAMOV hasn't finished yet, SAR/DAR can be zero */
2374 static enum dma_status
2375 pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
2376 struct dma_tx_state *txstate)
2378 enum dma_status ret;
2379 unsigned long flags;
2380 struct dma_pl330_desc *desc, *running = NULL, *last_enq = NULL;
2381 struct dma_pl330_chan *pch = to_pchan(chan);
2382 unsigned int transferred, residual = 0;
2384 ret = dma_cookie_status(chan, cookie, txstate);
2389 if (ret == DMA_COMPLETE)
2392 spin_lock_irqsave(&pch->lock, flags);
2393 spin_lock(&pch->thread->dmac->lock);
2395 if (pch->thread->req_running != -1)
2396 running = pch->thread->req[pch->thread->req_running].desc;
2398 last_enq = pch->thread->req[pch->thread->lstenq].desc;
2400 /* Check in pending list */
2401 list_for_each_entry(desc, &pch->work_list, node) {
2402 if (desc->status == DONE)
2403 transferred = desc->bytes_requested;
2404 else if (running && desc == running)
2406 pl330_get_current_xferred_count(pch, desc);
2407 else if (desc->status == BUSY)
2409 * Busy but not running means either just enqueued,
2410 * or finished and not yet marked done
2412 if (desc == last_enq)
2415 transferred = desc->bytes_requested;
2418 residual += desc->bytes_requested - transferred;
2419 if (desc->txd.cookie == cookie) {
2420 switch (desc->status) {
2426 ret = DMA_IN_PROGRESS;
2436 spin_unlock(&pch->thread->dmac->lock);
2437 spin_unlock_irqrestore(&pch->lock, flags);
2440 dma_set_residue(txstate, residual);
2445 static void pl330_issue_pending(struct dma_chan *chan)
2447 struct dma_pl330_chan *pch = to_pchan(chan);
2448 unsigned long flags;
2450 spin_lock_irqsave(&pch->lock, flags);
2451 if (list_empty(&pch->work_list)) {
2453 * Warn on nothing pending. Empty submitted_list may
2454 * break our pm_runtime usage counter as it is
2455 * updated on work_list emptiness status.
2457 WARN_ON(list_empty(&pch->submitted_list));
2459 pm_runtime_get_sync(pch->dmac->ddma.dev);
2461 list_splice_tail_init(&pch->submitted_list, &pch->work_list);
2462 spin_unlock_irqrestore(&pch->lock, flags);
2464 pl330_tasklet((unsigned long)pch);
2468 * We returned the last one of the circular list of descriptor(s)
2469 * from prep_xxx, so the argument to submit corresponds to the last
2470 * descriptor of the list.
2472 static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2474 struct dma_pl330_desc *desc, *last = to_desc(tx);
2475 struct dma_pl330_chan *pch = to_pchan(tx->chan);
2476 dma_cookie_t cookie;
2477 unsigned long flags;
2479 spin_lock_irqsave(&pch->lock, flags);
2481 /* Assign cookies to all nodes */
2482 while (!list_empty(&last->node)) {
2483 desc = list_entry(last->node.next, struct dma_pl330_desc, node);
2485 desc->txd.callback = last->txd.callback;
2486 desc->txd.callback_param = last->txd.callback_param;
2490 dma_cookie_assign(&desc->txd);
2492 list_move_tail(&desc->node, &pch->submitted_list);
2496 cookie = dma_cookie_assign(&last->txd);
2497 list_add_tail(&last->node, &pch->submitted_list);
2498 spin_unlock_irqrestore(&pch->lock, flags);
2503 static inline void _init_desc(struct dma_pl330_desc *desc)
2505 desc->rqcfg.swap = SWAP_NO;
2506 desc->rqcfg.scctl = CCTRL0;
2507 desc->rqcfg.dcctl = CCTRL0;
2508 desc->txd.tx_submit = pl330_tx_submit;
2510 INIT_LIST_HEAD(&desc->node);
2513 /* Returns the number of descriptors added to the DMAC pool */
2514 static int add_desc(struct list_head *pool, spinlock_t *lock,
2515 gfp_t flg, int count)
2517 struct dma_pl330_desc *desc;
2518 unsigned long flags;
2521 desc = kcalloc(count, sizeof(*desc), flg);
2525 spin_lock_irqsave(lock, flags);
2527 for (i = 0; i < count; i++) {
2528 _init_desc(&desc[i]);
2529 list_add_tail(&desc[i].node, pool);
2532 spin_unlock_irqrestore(lock, flags);
2537 static struct dma_pl330_desc *pluck_desc(struct list_head *pool,
2540 struct dma_pl330_desc *desc = NULL;
2541 unsigned long flags;
2543 spin_lock_irqsave(lock, flags);
2545 if (!list_empty(pool)) {
2546 desc = list_entry(pool->next,
2547 struct dma_pl330_desc, node);
2549 list_del_init(&desc->node);
2551 desc->status = PREP;
2552 desc->txd.callback = NULL;
2555 spin_unlock_irqrestore(lock, flags);
2560 static struct dma_pl330_desc *pl330_get_desc(struct dma_pl330_chan *pch)
2562 struct pl330_dmac *pl330 = pch->dmac;
2563 u8 *peri_id = pch->chan.private;
2564 struct dma_pl330_desc *desc;
2566 /* Pluck one desc from the pool of DMAC */
2567 desc = pluck_desc(&pl330->desc_pool, &pl330->pool_lock);
2569 /* If the DMAC pool is empty, alloc new */
2571 DEFINE_SPINLOCK(lock);
2574 if (!add_desc(&pool, &lock, GFP_ATOMIC, 1))
2577 desc = pluck_desc(&pool, &lock);
2578 WARN_ON(!desc || !list_empty(&pool));
2581 /* Initialize the descriptor */
2583 desc->txd.cookie = 0;
2584 async_tx_ack(&desc->txd);
2586 desc->peri = peri_id ? pch->chan.chan_id : 0;
2587 desc->rqcfg.pcfg = &pch->dmac->pcfg;
2589 dma_async_tx_descriptor_init(&desc->txd, &pch->chan);
2594 static inline void fill_px(struct pl330_xfer *px,
2595 dma_addr_t dst, dma_addr_t src, size_t len)
2602 static struct dma_pl330_desc *
2603 __pl330_prep_dma_memcpy(struct dma_pl330_chan *pch, dma_addr_t dst,
2604 dma_addr_t src, size_t len)
2606 struct dma_pl330_desc *desc = pl330_get_desc(pch);
2609 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
2610 __func__, __LINE__);
2615 * Ideally we should lookout for reqs bigger than
2616 * those that can be programmed with 256 bytes of
2617 * MC buffer, but considering a req size is seldom
2618 * going to be word-unaligned and more than 200MB,
2620 * Also, should the limit is reached we'd rather
2621 * have the platform increase MC buffer size than
2622 * complicating this API driver.
2624 fill_px(&desc->px, dst, src, len);
2629 /* Call after fixing burst size */
2630 static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
2632 struct dma_pl330_chan *pch = desc->pchan;
2633 struct pl330_dmac *pl330 = pch->dmac;
2636 burst_len = pl330->pcfg.data_bus_width / 8;
2637 burst_len *= pl330->pcfg.data_buf_dep / pl330->pcfg.num_chan;
2638 burst_len >>= desc->rqcfg.brst_size;
2640 /* src/dst_burst_len can't be more than 16 */
2641 if (burst_len > PL330_MAX_BURST)
2642 burst_len = PL330_MAX_BURST;
2647 static struct dma_async_tx_descriptor *pl330_prep_dma_cyclic(
2648 struct dma_chan *chan, dma_addr_t dma_addr, size_t len,
2649 size_t period_len, enum dma_transfer_direction direction,
2650 unsigned long flags)
2652 struct dma_pl330_desc *desc = NULL, *first = NULL;
2653 struct dma_pl330_chan *pch = to_pchan(chan);
2654 struct pl330_dmac *pl330 = pch->dmac;
2659 if (len % period_len != 0)
2662 if (!is_slave_direction(direction)) {
2663 dev_err(pch->dmac->ddma.dev, "%s:%d Invalid dma direction\n",
2664 __func__, __LINE__);
2668 if (!pl330_prep_slave_fifo(pch, direction))
2671 for (i = 0; i < len / period_len; i++) {
2672 desc = pl330_get_desc(pch);
2674 unsigned long iflags;
2676 dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n",
2677 __func__, __LINE__);
2682 spin_lock_irqsave(&pl330->pool_lock, iflags);
2684 while (!list_empty(&first->node)) {
2685 desc = list_entry(first->node.next,
2686 struct dma_pl330_desc, node);
2687 list_move_tail(&desc->node, &pl330->desc_pool);
2690 list_move_tail(&first->node, &pl330->desc_pool);
2692 spin_unlock_irqrestore(&pl330->pool_lock, iflags);
2697 switch (direction) {
2698 case DMA_MEM_TO_DEV:
2699 desc->rqcfg.src_inc = 1;
2700 desc->rqcfg.dst_inc = 0;
2702 dst = pch->fifo_dma;
2704 case DMA_DEV_TO_MEM:
2705 desc->rqcfg.src_inc = 0;
2706 desc->rqcfg.dst_inc = 1;
2707 src = pch->fifo_dma;
2714 desc->rqtype = direction;
2715 desc->rqcfg.brst_size = pch->burst_sz;
2716 desc->rqcfg.brst_len = pch->burst_len;
2717 desc->bytes_requested = period_len;
2718 fill_px(&desc->px, dst, src, period_len);
2723 list_add_tail(&desc->node, &first->node);
2725 dma_addr += period_len;
2732 desc->txd.flags = flags;
2737 static struct dma_async_tx_descriptor *
2738 pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
2739 dma_addr_t src, size_t len, unsigned long flags)
2741 struct dma_pl330_desc *desc;
2742 struct dma_pl330_chan *pch = to_pchan(chan);
2743 struct pl330_dmac *pl330;
2746 if (unlikely(!pch || !len))
2751 desc = __pl330_prep_dma_memcpy(pch, dst, src, len);
2755 desc->rqcfg.src_inc = 1;
2756 desc->rqcfg.dst_inc = 1;
2757 desc->rqtype = DMA_MEM_TO_MEM;
2759 /* Select max possible burst size */
2760 burst = pl330->pcfg.data_bus_width / 8;
2763 * Make sure we use a burst size that aligns with all the memcpy
2764 * parameters because our DMA programming algorithm doesn't cope with
2765 * transfers which straddle an entry in the DMA device's MFIFO.
2767 while ((src | dst | len) & (burst - 1))
2770 desc->rqcfg.brst_size = 0;
2771 while (burst != (1 << desc->rqcfg.brst_size))
2772 desc->rqcfg.brst_size++;
2774 desc->rqcfg.brst_len = get_burst_len(desc, len);
2776 * If burst size is smaller than bus width then make sure we only
2777 * transfer one at a time to avoid a burst stradling an MFIFO entry.
2779 if (burst * 8 < pl330->pcfg.data_bus_width)
2780 desc->rqcfg.brst_len = 1;
2782 desc->bytes_requested = len;
2784 desc->txd.flags = flags;
2789 static void __pl330_giveback_desc(struct pl330_dmac *pl330,
2790 struct dma_pl330_desc *first)
2792 unsigned long flags;
2793 struct dma_pl330_desc *desc;
2798 spin_lock_irqsave(&pl330->pool_lock, flags);
2800 while (!list_empty(&first->node)) {
2801 desc = list_entry(first->node.next,
2802 struct dma_pl330_desc, node);
2803 list_move_tail(&desc->node, &pl330->desc_pool);
2806 list_move_tail(&first->node, &pl330->desc_pool);
2808 spin_unlock_irqrestore(&pl330->pool_lock, flags);
2811 static struct dma_async_tx_descriptor *
2812 pl330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
2813 unsigned int sg_len, enum dma_transfer_direction direction,
2814 unsigned long flg, void *context)
2816 struct dma_pl330_desc *first, *desc = NULL;
2817 struct dma_pl330_chan *pch = to_pchan(chan);
2818 struct scatterlist *sg;
2821 if (unlikely(!pch || !sgl || !sg_len))
2824 if (!pl330_prep_slave_fifo(pch, direction))
2829 for_each_sg(sgl, sg, sg_len, i) {
2831 desc = pl330_get_desc(pch);
2833 struct pl330_dmac *pl330 = pch->dmac;
2835 dev_err(pch->dmac->ddma.dev,
2836 "%s:%d Unable to fetch desc\n",
2837 __func__, __LINE__);
2838 __pl330_giveback_desc(pl330, first);
2846 list_add_tail(&desc->node, &first->node);
2848 if (direction == DMA_MEM_TO_DEV) {
2849 desc->rqcfg.src_inc = 1;
2850 desc->rqcfg.dst_inc = 0;
2851 fill_px(&desc->px, pch->fifo_dma, sg_dma_address(sg),
2854 desc->rqcfg.src_inc = 0;
2855 desc->rqcfg.dst_inc = 1;
2856 fill_px(&desc->px, sg_dma_address(sg), pch->fifo_dma,
2860 desc->rqcfg.brst_size = pch->burst_sz;
2861 desc->rqcfg.brst_len = pch->burst_len;
2862 desc->rqtype = direction;
2863 desc->bytes_requested = sg_dma_len(sg);
2866 /* Return the last desc in the chain */
2867 desc->txd.flags = flg;
2871 static irqreturn_t pl330_irq_handler(int irq, void *data)
2873 if (pl330_update(data))
2879 #define PL330_DMA_BUSWIDTHS \
2880 BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \
2881 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
2882 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
2883 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
2884 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
2887 * Runtime PM callbacks are provided by amba/bus.c driver.
2889 * It is assumed here that IRQ safe runtime PM is chosen in probe and amba
2890 * bus driver will only disable/enable the clock in runtime PM callbacks.
2892 static int __maybe_unused pl330_suspend(struct device *dev)
2894 struct amba_device *pcdev = to_amba_device(dev);
2896 pm_runtime_disable(dev);
2898 if (!pm_runtime_status_suspended(dev)) {
2899 /* amba did not disable the clock */
2900 amba_pclk_disable(pcdev);
2902 amba_pclk_unprepare(pcdev);
2907 static int __maybe_unused pl330_resume(struct device *dev)
2909 struct amba_device *pcdev = to_amba_device(dev);
2912 ret = amba_pclk_prepare(pcdev);
2916 if (!pm_runtime_status_suspended(dev))
2917 ret = amba_pclk_enable(pcdev);
2919 pm_runtime_enable(dev);
2924 static SIMPLE_DEV_PM_OPS(pl330_pm, pl330_suspend, pl330_resume);
2927 pl330_probe(struct amba_device *adev, const struct amba_id *id)
2929 struct pl330_config *pcfg;
2930 struct pl330_dmac *pl330;
2931 struct dma_pl330_chan *pch, *_p;
2932 struct dma_device *pd;
2933 struct resource *res;
2936 struct device_node *np = adev->dev.of_node;
2938 ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32));
2942 /* Allocate a new DMAC and its Channels */
2943 pl330 = devm_kzalloc(&adev->dev, sizeof(*pl330), GFP_KERNEL);
2948 pd->dev = &adev->dev;
2953 for (i = 0; i < ARRAY_SIZE(of_quirks); i++)
2954 if (of_property_read_bool(np, of_quirks[i].quirk))
2955 pl330->quirks |= of_quirks[i].id;
2958 pl330->base = devm_ioremap_resource(&adev->dev, res);
2959 if (IS_ERR(pl330->base))
2960 return PTR_ERR(pl330->base);
2962 amba_set_drvdata(adev, pl330);
2964 for (i = 0; i < AMBA_NR_IRQS; i++) {
2967 ret = devm_request_irq(&adev->dev, irq,
2968 pl330_irq_handler, 0,
2969 dev_name(&adev->dev), pl330);
2977 pcfg = &pl330->pcfg;
2979 pcfg->periph_id = adev->periphid;
2980 ret = pl330_add(pl330);
2984 INIT_LIST_HEAD(&pl330->desc_pool);
2985 spin_lock_init(&pl330->pool_lock);
2987 /* Create a descriptor pool of default size */
2988 if (!add_desc(&pl330->desc_pool, &pl330->pool_lock,
2989 GFP_KERNEL, NR_DEFAULT_DESC))
2990 dev_warn(&adev->dev, "unable to allocate desc\n");
2992 INIT_LIST_HEAD(&pd->channels);
2994 /* Initialize channel parameters */
2995 num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan);
2997 pl330->num_peripherals = num_chan;
2999 pl330->peripherals = kcalloc(num_chan, sizeof(*pch), GFP_KERNEL);
3000 if (!pl330->peripherals) {
3005 for (i = 0; i < num_chan; i++) {
3006 pch = &pl330->peripherals[i];
3008 pch->chan.private = adev->dev.of_node;
3009 INIT_LIST_HEAD(&pch->submitted_list);
3010 INIT_LIST_HEAD(&pch->work_list);
3011 INIT_LIST_HEAD(&pch->completed_list);
3012 spin_lock_init(&pch->lock);
3014 pch->chan.device = pd;
3016 pch->dir = DMA_NONE;
3018 /* Add the channel to the DMAC list */
3019 list_add_tail(&pch->chan.device_node, &pd->channels);
3022 dma_cap_set(DMA_MEMCPY, pd->cap_mask);
3023 if (pcfg->num_peri) {
3024 dma_cap_set(DMA_SLAVE, pd->cap_mask);
3025 dma_cap_set(DMA_CYCLIC, pd->cap_mask);
3026 dma_cap_set(DMA_PRIVATE, pd->cap_mask);
3029 pd->device_alloc_chan_resources = pl330_alloc_chan_resources;
3030 pd->device_free_chan_resources = pl330_free_chan_resources;
3031 pd->device_prep_dma_memcpy = pl330_prep_dma_memcpy;
3032 pd->device_prep_dma_cyclic = pl330_prep_dma_cyclic;
3033 pd->device_tx_status = pl330_tx_status;
3034 pd->device_prep_slave_sg = pl330_prep_slave_sg;
3035 pd->device_config = pl330_config;
3036 pd->device_pause = pl330_pause;
3037 pd->device_terminate_all = pl330_terminate_all;
3038 pd->device_issue_pending = pl330_issue_pending;
3039 pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
3040 pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
3041 pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
3042 pd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
3043 pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ?
3044 1 : PL330_MAX_BURST);
3046 ret = dma_async_device_register(pd);
3048 dev_err(&adev->dev, "unable to register DMAC\n");
3052 if (adev->dev.of_node) {
3053 ret = of_dma_controller_register(adev->dev.of_node,
3054 of_dma_pl330_xlate, pl330);
3057 "unable to register DMA to the generic DT DMA helpers\n");
3061 adev->dev.dma_parms = &pl330->dma_parms;
3064 * This is the limit for transfers with a buswidth of 1, larger
3065 * buswidths will have larger limits.
3067 ret = dma_set_max_seg_size(&adev->dev, 1900800);
3069 dev_err(&adev->dev, "unable to set the seg size\n");
3072 dev_info(&adev->dev,
3073 "Loaded driver for PL330 DMAC-%x\n", adev->periphid);
3074 dev_info(&adev->dev,
3075 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
3076 pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan,
3077 pcfg->num_peri, pcfg->num_events);
3079 pm_runtime_irq_safe(&adev->dev);
3080 pm_runtime_use_autosuspend(&adev->dev);
3081 pm_runtime_set_autosuspend_delay(&adev->dev, PL330_AUTOSUSPEND_DELAY);
3082 pm_runtime_mark_last_busy(&adev->dev);
3083 pm_runtime_put_autosuspend(&adev->dev);
3088 list_for_each_entry_safe(pch, _p, &pl330->ddma.channels,
3091 /* Remove the channel */
3092 list_del(&pch->chan.device_node);
3094 /* Flush the channel */
3096 pl330_terminate_all(&pch->chan);
3097 pl330_free_chan_resources(&pch->chan);
3106 static int pl330_remove(struct amba_device *adev)
3108 struct pl330_dmac *pl330 = amba_get_drvdata(adev);
3109 struct dma_pl330_chan *pch, *_p;
3112 pm_runtime_get_noresume(pl330->ddma.dev);
3114 if (adev->dev.of_node)
3115 of_dma_controller_free(adev->dev.of_node);
3117 for (i = 0; i < AMBA_NR_IRQS; i++) {
3120 devm_free_irq(&adev->dev, irq, pl330);
3123 dma_async_device_unregister(&pl330->ddma);
3126 list_for_each_entry_safe(pch, _p, &pl330->ddma.channels,
3129 /* Remove the channel */
3130 list_del(&pch->chan.device_node);
3132 /* Flush the channel */
3134 pl330_terminate_all(&pch->chan);
3135 pl330_free_chan_resources(&pch->chan);
3144 static const struct amba_id pl330_ids[] = {
3152 MODULE_DEVICE_TABLE(amba, pl330_ids);
3154 static struct amba_driver pl330_driver = {
3156 .owner = THIS_MODULE,
3157 .name = "dma-pl330",
3160 .id_table = pl330_ids,
3161 .probe = pl330_probe,
3162 .remove = pl330_remove,
3165 module_amba_driver(pl330_driver);
3167 MODULE_AUTHOR("Jaswinder Singh <jassisinghbrar@gmail.com>");
3168 MODULE_DESCRIPTION("API Driver for PL330 DMAC");
3169 MODULE_LICENSE("GPL");