1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright 2014-2015 Freescale
6 * Driver for NXP Layerscape Queue Direct Memory Access Controller
9 * Wen He <wen.he_1@nxp.com>
10 * Jiaheng Fan <jiaheng.fan@nxp.com>
14 #include <linux/module.h>
15 #include <linux/delay.h>
16 #include <linux/of_irq.h>
17 #include <linux/of_platform.h>
18 #include <linux/of_dma.h>
19 #include <linux/dma-mapping.h>
24 /* Register related definition */
25 #define FSL_QDMA_DMR 0x0
26 #define FSL_QDMA_DSR 0x4
27 #define FSL_QDMA_DEIER 0xe00
28 #define FSL_QDMA_DEDR 0xe04
29 #define FSL_QDMA_DECFDW0R 0xe10
30 #define FSL_QDMA_DECFDW1R 0xe14
31 #define FSL_QDMA_DECFDW2R 0xe18
32 #define FSL_QDMA_DECFDW3R 0xe1c
33 #define FSL_QDMA_DECFQIDR 0xe30
34 #define FSL_QDMA_DECBR 0xe34
36 #define FSL_QDMA_BCQMR(x) (0xc0 + 0x100 * (x))
37 #define FSL_QDMA_BCQSR(x) (0xc4 + 0x100 * (x))
38 #define FSL_QDMA_BCQEDPA_SADDR(x) (0xc8 + 0x100 * (x))
39 #define FSL_QDMA_BCQDPA_SADDR(x) (0xcc + 0x100 * (x))
40 #define FSL_QDMA_BCQEEPA_SADDR(x) (0xd0 + 0x100 * (x))
41 #define FSL_QDMA_BCQEPA_SADDR(x) (0xd4 + 0x100 * (x))
42 #define FSL_QDMA_BCQIER(x) (0xe0 + 0x100 * (x))
43 #define FSL_QDMA_BCQIDR(x) (0xe4 + 0x100 * (x))
45 #define FSL_QDMA_SQDPAR 0x80c
46 #define FSL_QDMA_SQEPAR 0x814
47 #define FSL_QDMA_BSQMR 0x800
48 #define FSL_QDMA_BSQSR 0x804
49 #define FSL_QDMA_BSQICR 0x828
50 #define FSL_QDMA_CQMR 0xa00
51 #define FSL_QDMA_CQDSCR1 0xa08
52 #define FSL_QDMA_CQDSCR2 0xa0c
53 #define FSL_QDMA_CQIER 0xa10
54 #define FSL_QDMA_CQEDR 0xa14
55 #define FSL_QDMA_SQCCMR 0xa20
57 /* Registers for bit and genmask */
58 #define FSL_QDMA_CQIDR_SQT BIT(15)
59 #define QDMA_CCDF_FORMAT BIT(29)
60 #define QDMA_CCDF_SER BIT(30)
61 #define QDMA_SG_FIN BIT(30)
62 #define QDMA_SG_LEN_MASK GENMASK(29, 0)
63 #define QDMA_CCDF_MASK GENMASK(28, 20)
65 #define FSL_QDMA_DEDR_CLEAR GENMASK(31, 0)
66 #define FSL_QDMA_BCQIDR_CLEAR GENMASK(31, 0)
67 #define FSL_QDMA_DEIER_CLEAR GENMASK(31, 0)
69 #define FSL_QDMA_BCQIER_CQTIE BIT(15)
70 #define FSL_QDMA_BCQIER_CQPEIE BIT(23)
71 #define FSL_QDMA_BSQICR_ICEN BIT(31)
73 #define FSL_QDMA_BSQICR_ICST(x) ((x) << 16)
74 #define FSL_QDMA_CQIER_MEIE BIT(31)
75 #define FSL_QDMA_CQIER_TEIE BIT(0)
76 #define FSL_QDMA_SQCCMR_ENTER_WM BIT(21)
78 #define FSL_QDMA_BCQMR_EN BIT(31)
79 #define FSL_QDMA_BCQMR_EI BIT(30)
80 #define FSL_QDMA_BCQMR_CD_THLD(x) ((x) << 20)
81 #define FSL_QDMA_BCQMR_CQ_SIZE(x) ((x) << 16)
83 #define FSL_QDMA_BCQSR_QF BIT(16)
84 #define FSL_QDMA_BCQSR_XOFF BIT(0)
86 #define FSL_QDMA_BSQMR_EN BIT(31)
87 #define FSL_QDMA_BSQMR_DI BIT(30)
88 #define FSL_QDMA_BSQMR_CQ_SIZE(x) ((x) << 16)
90 #define FSL_QDMA_BSQSR_QE BIT(17)
92 #define FSL_QDMA_DMR_DQD BIT(30)
93 #define FSL_QDMA_DSR_DB BIT(31)
95 /* Size related definition */
96 #define FSL_QDMA_QUEUE_MAX 8
97 #define FSL_QDMA_COMMAND_BUFFER_SIZE 64
98 #define FSL_QDMA_DESCRIPTOR_BUFFER_SIZE 32
99 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MIN 64
100 #define FSL_QDMA_CIRCULAR_DESC_SIZE_MAX 16384
101 #define FSL_QDMA_QUEUE_NUM_MAX 8
103 /* Field definition for CMD */
104 #define FSL_QDMA_CMD_RWTTYPE 0x4
105 #define FSL_QDMA_CMD_LWC 0x2
106 #define FSL_QDMA_CMD_RWTTYPE_OFFSET 28
107 #define FSL_QDMA_CMD_NS_OFFSET 27
108 #define FSL_QDMA_CMD_DQOS_OFFSET 24
109 #define FSL_QDMA_CMD_WTHROTL_OFFSET 20
110 #define FSL_QDMA_CMD_DSEN_OFFSET 19
111 #define FSL_QDMA_CMD_LWC_OFFSET 16
112 #define FSL_QDMA_CMD_PF BIT(17)
114 /* Field definition for Descriptor status */
115 #define QDMA_CCDF_STATUS_RTE BIT(5)
116 #define QDMA_CCDF_STATUS_WTE BIT(4)
117 #define QDMA_CCDF_STATUS_CDE BIT(2)
118 #define QDMA_CCDF_STATUS_SDE BIT(1)
119 #define QDMA_CCDF_STATUS_DDE BIT(0)
120 #define QDMA_CCDF_STATUS_MASK (QDMA_CCDF_STATUS_RTE | \
121 QDMA_CCDF_STATUS_WTE | \
122 QDMA_CCDF_STATUS_CDE | \
123 QDMA_CCDF_STATUS_SDE | \
124 QDMA_CCDF_STATUS_DDE)
126 /* Field definition for Descriptor offset */
127 #define QDMA_CCDF_OFFSET 20
128 #define QDMA_SDDF_CMD(x) (((u64)(x)) << 32)
130 /* Field definition for safe loop count*/
131 #define FSL_QDMA_HALT_COUNT 1500
132 #define FSL_QDMA_MAX_SIZE 16385
133 #define FSL_QDMA_COMP_TIMEOUT 1000
134 #define FSL_COMMAND_QUEUE_OVERFLLOW 10
136 #define FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma_engine, x) \
137 (((fsl_qdma_engine)->block_offset) * (x))
140 * struct fsl_qdma_format - This is the struct holding describing compound
141 * descriptor format with qDMA.
142 * @status: Command status and enqueue status notification.
143 * @cfg: Frame offset and frame format.
144 * @addr_lo: Holding the compound descriptor of the lower
145 * 32-bits address in memory 40-bit address.
146 * @addr_hi: Same as above member, but point high 8-bits in
147 * memory 40-bit address.
148 * @__reserved1: Reserved field.
149 * @cfg8b_w1: Compound descriptor command queue origin produced
150 * by qDMA and dynamic debug field.
151 * @data: Pointer to the memory 40-bit address, describes DMA
152 * source information and DMA destination information.
154 struct fsl_qdma_format {
168 /* qDMA status notification pre information */
169 struct fsl_pre_status {
174 static DEFINE_PER_CPU(struct fsl_pre_status, pre);
176 struct fsl_qdma_chan {
177 struct virt_dma_chan vchan;
178 struct virt_dma_desc vdesc;
179 enum dma_status status;
180 struct fsl_qdma_engine *qdma;
181 struct fsl_qdma_queue *queue;
184 struct fsl_qdma_queue {
185 struct fsl_qdma_format *virt_head;
186 struct fsl_qdma_format *virt_tail;
187 struct list_head comp_used;
188 struct list_head comp_free;
189 struct dma_pool *comp_pool;
190 struct dma_pool *desc_pool;
191 spinlock_t queue_lock;
195 struct fsl_qdma_format *cq;
196 void __iomem *block_base;
199 struct fsl_qdma_comp {
201 dma_addr_t desc_bus_addr;
202 struct fsl_qdma_format *virt_addr;
203 struct fsl_qdma_format *desc_virt_addr;
204 struct fsl_qdma_chan *qchan;
205 struct virt_dma_desc vdesc;
206 struct list_head list;
209 struct fsl_qdma_engine {
210 struct dma_device dma_dev;
211 void __iomem *ctrl_base;
212 void __iomem *status_base;
213 void __iomem *block_base;
216 struct mutex fsl_qdma_mutex;
220 struct fsl_qdma_queue *queue;
221 struct fsl_qdma_queue **status;
222 struct fsl_qdma_chan *chans;
231 qdma_ccdf_addr_get64(const struct fsl_qdma_format *ccdf)
233 return le64_to_cpu(ccdf->data) & (U64_MAX >> 24);
237 qdma_desc_addr_set64(struct fsl_qdma_format *ccdf, u64 addr)
239 ccdf->addr_hi = upper_32_bits(addr);
240 ccdf->addr_lo = cpu_to_le32(lower_32_bits(addr));
244 qdma_ccdf_get_queue(const struct fsl_qdma_format *ccdf)
246 return ccdf->cfg8b_w1 & U8_MAX;
250 qdma_ccdf_get_offset(const struct fsl_qdma_format *ccdf)
252 return (le32_to_cpu(ccdf->cfg) & QDMA_CCDF_MASK) >> QDMA_CCDF_OFFSET;
256 qdma_ccdf_set_format(struct fsl_qdma_format *ccdf, int offset)
258 ccdf->cfg = cpu_to_le32(QDMA_CCDF_FORMAT |
259 (offset << QDMA_CCDF_OFFSET));
263 qdma_ccdf_get_status(const struct fsl_qdma_format *ccdf)
265 return (le32_to_cpu(ccdf->status) & QDMA_CCDF_STATUS_MASK);
269 qdma_ccdf_set_ser(struct fsl_qdma_format *ccdf, int status)
271 ccdf->status = cpu_to_le32(QDMA_CCDF_SER | status);
274 static inline void qdma_csgf_set_len(struct fsl_qdma_format *csgf, int len)
276 csgf->cfg = cpu_to_le32(len & QDMA_SG_LEN_MASK);
279 static inline void qdma_csgf_set_f(struct fsl_qdma_format *csgf, int len)
281 csgf->cfg = cpu_to_le32(QDMA_SG_FIN | (len & QDMA_SG_LEN_MASK));
284 static u32 qdma_readl(struct fsl_qdma_engine *qdma, void __iomem *addr)
286 return FSL_DMA_IN(qdma, addr, 32);
289 static void qdma_writel(struct fsl_qdma_engine *qdma, u32 val,
292 FSL_DMA_OUT(qdma, addr, val, 32);
295 static struct fsl_qdma_chan *to_fsl_qdma_chan(struct dma_chan *chan)
297 return container_of(chan, struct fsl_qdma_chan, vchan.chan);
300 static struct fsl_qdma_comp *to_fsl_qdma_comp(struct virt_dma_desc *vd)
302 return container_of(vd, struct fsl_qdma_comp, vdesc);
305 static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
307 struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
308 struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
309 struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
310 struct fsl_qdma_comp *comp_temp, *_comp_temp;
314 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
315 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
316 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
318 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
320 if (!fsl_queue->comp_pool && !fsl_queue->desc_pool)
323 list_for_each_entry_safe(comp_temp, _comp_temp,
324 &fsl_queue->comp_used, list) {
325 dma_pool_free(fsl_queue->comp_pool,
326 comp_temp->virt_addr,
327 comp_temp->bus_addr);
328 dma_pool_free(fsl_queue->desc_pool,
329 comp_temp->desc_virt_addr,
330 comp_temp->desc_bus_addr);
331 list_del(&comp_temp->list);
335 list_for_each_entry_safe(comp_temp, _comp_temp,
336 &fsl_queue->comp_free, list) {
337 dma_pool_free(fsl_queue->comp_pool,
338 comp_temp->virt_addr,
339 comp_temp->bus_addr);
340 dma_pool_free(fsl_queue->desc_pool,
341 comp_temp->desc_virt_addr,
342 comp_temp->desc_bus_addr);
343 list_del(&comp_temp->list);
347 dma_pool_destroy(fsl_queue->comp_pool);
348 dma_pool_destroy(fsl_queue->desc_pool);
350 fsl_qdma->desc_allocated--;
351 fsl_queue->comp_pool = NULL;
352 fsl_queue->desc_pool = NULL;
355 static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
356 dma_addr_t dst, dma_addr_t src, u32 len)
359 struct fsl_qdma_format *sdf, *ddf;
360 struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
362 ccdf = fsl_comp->virt_addr;
363 csgf_desc = fsl_comp->virt_addr + 1;
364 csgf_src = fsl_comp->virt_addr + 2;
365 csgf_dest = fsl_comp->virt_addr + 3;
366 sdf = fsl_comp->desc_virt_addr;
367 ddf = fsl_comp->desc_virt_addr + 1;
369 memset(fsl_comp->virt_addr, 0, FSL_QDMA_COMMAND_BUFFER_SIZE);
370 memset(fsl_comp->desc_virt_addr, 0, FSL_QDMA_DESCRIPTOR_BUFFER_SIZE);
371 /* Head Command Descriptor(Frame Descriptor) */
372 qdma_desc_addr_set64(ccdf, fsl_comp->bus_addr + 16);
373 qdma_ccdf_set_format(ccdf, qdma_ccdf_get_offset(ccdf));
374 qdma_ccdf_set_ser(ccdf, qdma_ccdf_get_status(ccdf));
375 /* Status notification is enqueued to status queue. */
376 /* Compound Command Descriptor(Frame List Table) */
377 qdma_desc_addr_set64(csgf_desc, fsl_comp->desc_bus_addr);
378 /* It must be 32 as Compound S/G Descriptor */
379 qdma_csgf_set_len(csgf_desc, 32);
380 qdma_desc_addr_set64(csgf_src, src);
381 qdma_csgf_set_len(csgf_src, len);
382 qdma_desc_addr_set64(csgf_dest, dst);
383 qdma_csgf_set_len(csgf_dest, len);
384 /* This entry is the last entry. */
385 qdma_csgf_set_f(csgf_dest, len);
386 /* Descriptor Buffer */
387 cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
388 FSL_QDMA_CMD_RWTTYPE_OFFSET) |
390 sdf->data = QDMA_SDDF_CMD(cmd);
392 cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
393 FSL_QDMA_CMD_RWTTYPE_OFFSET);
394 cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
395 ddf->data = QDMA_SDDF_CMD(cmd);
399 * Pre-request full command descriptor for enqueue.
401 static int fsl_qdma_pre_request_enqueue_desc(struct fsl_qdma_queue *queue)
404 struct fsl_qdma_comp *comp_temp, *_comp_temp;
406 for (i = 0; i < queue->n_cq + FSL_COMMAND_QUEUE_OVERFLLOW; i++) {
407 comp_temp = kzalloc(sizeof(*comp_temp), GFP_KERNEL);
410 comp_temp->virt_addr =
411 dma_pool_alloc(queue->comp_pool, GFP_KERNEL,
412 &comp_temp->bus_addr);
413 if (!comp_temp->virt_addr)
416 comp_temp->desc_virt_addr =
417 dma_pool_alloc(queue->desc_pool, GFP_KERNEL,
418 &comp_temp->desc_bus_addr);
419 if (!comp_temp->desc_virt_addr)
420 goto err_desc_dma_alloc;
422 list_add_tail(&comp_temp->list, &queue->comp_free);
428 dma_pool_free(queue->comp_pool, comp_temp->virt_addr,
429 comp_temp->bus_addr);
435 list_for_each_entry_safe(comp_temp, _comp_temp,
436 &queue->comp_free, list) {
437 if (comp_temp->virt_addr)
438 dma_pool_free(queue->comp_pool,
439 comp_temp->virt_addr,
440 comp_temp->bus_addr);
441 if (comp_temp->desc_virt_addr)
442 dma_pool_free(queue->desc_pool,
443 comp_temp->desc_virt_addr,
444 comp_temp->desc_bus_addr);
446 list_del(&comp_temp->list);
454 * Request a command descriptor for enqueue.
456 static struct fsl_qdma_comp
457 *fsl_qdma_request_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
460 struct fsl_qdma_comp *comp_temp;
461 int timeout = FSL_QDMA_COMP_TIMEOUT;
462 struct fsl_qdma_queue *queue = fsl_chan->queue;
465 spin_lock_irqsave(&queue->queue_lock, flags);
466 if (!list_empty(&queue->comp_free)) {
467 comp_temp = list_first_entry(&queue->comp_free,
468 struct fsl_qdma_comp,
470 list_del(&comp_temp->list);
472 spin_unlock_irqrestore(&queue->queue_lock, flags);
473 comp_temp->qchan = fsl_chan;
476 spin_unlock_irqrestore(&queue->queue_lock, flags);
483 static struct fsl_qdma_queue
484 *fsl_qdma_alloc_queue_resources(struct platform_device *pdev,
485 struct fsl_qdma_engine *fsl_qdma)
488 int queue_num, block_number;
489 unsigned int queue_size[FSL_QDMA_QUEUE_MAX];
490 struct fsl_qdma_queue *queue_head, *queue_temp;
492 queue_num = fsl_qdma->n_queues;
493 block_number = fsl_qdma->block_number;
495 if (queue_num > FSL_QDMA_QUEUE_MAX)
496 queue_num = FSL_QDMA_QUEUE_MAX;
497 len = sizeof(*queue_head) * queue_num * block_number;
498 queue_head = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
502 ret = device_property_read_u32_array(&pdev->dev, "queue-sizes",
503 queue_size, queue_num);
505 dev_err(&pdev->dev, "Can't get queue-sizes.\n");
508 for (j = 0; j < block_number; j++) {
509 for (i = 0; i < queue_num; i++) {
510 if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
511 queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
513 "Get wrong queue-sizes.\n");
516 queue_temp = queue_head + i + (j * queue_num);
519 dmam_alloc_coherent(&pdev->dev,
520 sizeof(struct fsl_qdma_format) *
522 &queue_temp->bus_addr,
526 queue_temp->block_base = fsl_qdma->block_base +
527 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
528 queue_temp->n_cq = queue_size[i];
530 queue_temp->virt_head = queue_temp->cq;
531 queue_temp->virt_tail = queue_temp->cq;
533 * List for queue command buffer
535 INIT_LIST_HEAD(&queue_temp->comp_used);
536 spin_lock_init(&queue_temp->queue_lock);
542 static struct fsl_qdma_queue
543 *fsl_qdma_prep_status_queue(struct platform_device *pdev)
546 unsigned int status_size;
547 struct fsl_qdma_queue *status_head;
548 struct device_node *np = pdev->dev.of_node;
550 ret = of_property_read_u32(np, "status-sizes", &status_size);
552 dev_err(&pdev->dev, "Can't get status-sizes.\n");
555 if (status_size > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX ||
556 status_size < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) {
557 dev_err(&pdev->dev, "Get wrong status_size.\n");
560 status_head = devm_kzalloc(&pdev->dev,
561 sizeof(*status_head), GFP_KERNEL);
566 * Buffer for queue command
568 status_head->cq = dmam_alloc_coherent(&pdev->dev,
569 sizeof(struct fsl_qdma_format) *
571 &status_head->bus_addr,
573 if (!status_head->cq) {
574 devm_kfree(&pdev->dev, status_head);
577 status_head->n_cq = status_size;
578 status_head->virt_head = status_head->cq;
579 status_head->virt_tail = status_head->cq;
580 status_head->comp_pool = NULL;
585 static int fsl_qdma_halt(struct fsl_qdma_engine *fsl_qdma)
588 int i, j, count = FSL_QDMA_HALT_COUNT;
589 void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
591 /* Disable the command queue and wait for idle state. */
592 reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
593 reg |= FSL_QDMA_DMR_DQD;
594 qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
595 for (j = 0; j < fsl_qdma->block_number; j++) {
596 block = fsl_qdma->block_base +
597 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
598 for (i = 0; i < FSL_QDMA_QUEUE_NUM_MAX; i++)
599 qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQMR(i));
602 reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DSR);
603 if (!(reg & FSL_QDMA_DSR_DB))
610 for (j = 0; j < fsl_qdma->block_number; j++) {
611 block = fsl_qdma->block_base +
612 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
614 /* Disable status queue. */
615 qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BSQMR);
618 * clear the command queue interrupt detect register for
621 qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
622 block + FSL_QDMA_BCQIDR(0));
629 fsl_qdma_queue_transfer_complete(struct fsl_qdma_engine *fsl_qdma,
635 u8 completion_status;
636 struct fsl_qdma_queue *temp_queue;
637 struct fsl_qdma_format *status_addr;
638 struct fsl_qdma_comp *fsl_comp = NULL;
639 struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
640 struct fsl_qdma_queue *fsl_status = fsl_qdma->status[id];
642 count = FSL_QDMA_MAX_SIZE;
646 reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQSR);
647 if (reg & FSL_QDMA_BSQSR_QE)
650 status_addr = fsl_status->virt_head;
652 if (qdma_ccdf_get_queue(status_addr) ==
653 __this_cpu_read(pre.queue) &&
654 qdma_ccdf_addr_get64(status_addr) ==
655 __this_cpu_read(pre.addr))
657 i = qdma_ccdf_get_queue(status_addr) +
658 id * fsl_qdma->n_queues;
659 __this_cpu_write(pre.addr, qdma_ccdf_addr_get64(status_addr));
660 __this_cpu_write(pre.queue, qdma_ccdf_get_queue(status_addr));
661 temp_queue = fsl_queue + i;
663 spin_lock(&temp_queue->queue_lock);
664 if (list_empty(&temp_queue->comp_used)) {
666 spin_unlock(&temp_queue->queue_lock);
670 fsl_comp = list_first_entry(&temp_queue->comp_used,
671 struct fsl_qdma_comp, list);
672 if (fsl_comp->bus_addr + 16 !=
673 __this_cpu_read(pre.addr)) {
675 spin_unlock(&temp_queue->queue_lock);
682 reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
683 reg |= FSL_QDMA_BSQMR_DI;
684 qdma_desc_addr_set64(status_addr, 0x0);
685 fsl_status->virt_head++;
686 if (fsl_status->virt_head == fsl_status->cq
688 fsl_status->virt_head = fsl_status->cq;
689 qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
690 spin_unlock(&temp_queue->queue_lock);
693 list_del(&fsl_comp->list);
695 completion_status = qdma_ccdf_get_status(status_addr);
697 reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
698 reg |= FSL_QDMA_BSQMR_DI;
699 qdma_desc_addr_set64(status_addr, 0x0);
700 fsl_status->virt_head++;
701 if (fsl_status->virt_head == fsl_status->cq + fsl_status->n_cq)
702 fsl_status->virt_head = fsl_status->cq;
703 qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
704 spin_unlock(&temp_queue->queue_lock);
706 /* The completion_status is evaluated here
707 * (outside of spin lock)
709 if (completion_status) {
710 /* A completion error occurred! */
711 if (completion_status & QDMA_CCDF_STATUS_WTE) {
712 /* Write transaction error */
713 fsl_comp->vdesc.tx_result.result =
714 DMA_TRANS_WRITE_FAILED;
715 } else if (completion_status & QDMA_CCDF_STATUS_RTE) {
716 /* Read transaction error */
717 fsl_comp->vdesc.tx_result.result =
718 DMA_TRANS_READ_FAILED;
720 /* Command/source/destination
723 fsl_comp->vdesc.tx_result.result =
725 dev_err(fsl_qdma->dma_dev.dev,
726 "DMA status descriptor error %x\n",
731 spin_lock(&fsl_comp->qchan->vchan.lock);
732 vchan_cookie_complete(&fsl_comp->vdesc);
733 fsl_comp->qchan->status = DMA_COMPLETE;
734 spin_unlock(&fsl_comp->qchan->vchan.lock);
740 static irqreturn_t fsl_qdma_error_handler(int irq, void *dev_id)
743 struct fsl_qdma_engine *fsl_qdma = dev_id;
744 void __iomem *status = fsl_qdma->status_base;
745 unsigned int decfdw0r;
746 unsigned int decfdw1r;
747 unsigned int decfdw2r;
748 unsigned int decfdw3r;
750 intr = qdma_readl(fsl_qdma, status + FSL_QDMA_DEDR);
753 decfdw0r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW0R);
754 decfdw1r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW1R);
755 decfdw2r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW2R);
756 decfdw3r = qdma_readl(fsl_qdma, status + FSL_QDMA_DECFDW3R);
757 dev_err(fsl_qdma->dma_dev.dev,
758 "DMA transaction error! (%x: %x-%x-%x-%x)\n",
759 intr, decfdw0r, decfdw1r, decfdw2r, decfdw3r);
762 qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
766 static irqreturn_t fsl_qdma_queue_handler(int irq, void *dev_id)
769 unsigned int intr, reg;
770 struct fsl_qdma_engine *fsl_qdma = dev_id;
771 void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
773 id = irq - fsl_qdma->irq_base;
774 if (id < 0 && id > fsl_qdma->block_number) {
775 dev_err(fsl_qdma->dma_dev.dev,
776 "irq %d is wrong irq_base is %d\n",
777 irq, fsl_qdma->irq_base);
780 block = fsl_qdma->block_base +
781 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, id);
783 intr = qdma_readl(fsl_qdma, block + FSL_QDMA_BCQIDR(0));
785 if ((intr & FSL_QDMA_CQIDR_SQT) != 0)
786 intr = fsl_qdma_queue_transfer_complete(fsl_qdma, block, id);
789 reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
790 reg |= FSL_QDMA_DMR_DQD;
791 qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
792 qdma_writel(fsl_qdma, 0, block + FSL_QDMA_BCQIER(0));
793 dev_err(fsl_qdma->dma_dev.dev, "QDMA: status err!\n");
796 /* Clear all detected events and interrupts. */
797 qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
798 block + FSL_QDMA_BCQIDR(0));
804 fsl_qdma_irq_init(struct platform_device *pdev,
805 struct fsl_qdma_engine *fsl_qdma)
812 fsl_qdma->error_irq =
813 platform_get_irq_byname(pdev, "qdma-error");
814 if (fsl_qdma->error_irq < 0)
815 return fsl_qdma->error_irq;
817 ret = devm_request_irq(&pdev->dev, fsl_qdma->error_irq,
818 fsl_qdma_error_handler, 0,
819 "qDMA error", fsl_qdma);
821 dev_err(&pdev->dev, "Can't register qDMA controller IRQ.\n");
825 for (i = 0; i < fsl_qdma->block_number; i++) {
826 sprintf(irq_name, "qdma-queue%d", i);
827 fsl_qdma->queue_irq[i] =
828 platform_get_irq_byname(pdev, irq_name);
830 if (fsl_qdma->queue_irq[i] < 0)
831 return fsl_qdma->queue_irq[i];
833 ret = devm_request_irq(&pdev->dev,
834 fsl_qdma->queue_irq[i],
835 fsl_qdma_queue_handler,
841 "Can't register qDMA queue IRQ.\n");
845 cpu = i % num_online_cpus();
846 ret = irq_set_affinity_hint(fsl_qdma->queue_irq[i],
850 "Can't set cpu %d affinity to IRQ %d.\n",
852 fsl_qdma->queue_irq[i]);
860 static void fsl_qdma_irq_exit(struct platform_device *pdev,
861 struct fsl_qdma_engine *fsl_qdma)
865 devm_free_irq(&pdev->dev, fsl_qdma->error_irq, fsl_qdma);
866 for (i = 0; i < fsl_qdma->block_number; i++)
867 devm_free_irq(&pdev->dev, fsl_qdma->queue_irq[i], fsl_qdma);
870 static int fsl_qdma_reg_init(struct fsl_qdma_engine *fsl_qdma)
874 struct fsl_qdma_queue *temp;
875 void __iomem *status = fsl_qdma->status_base;
876 void __iomem *block, *ctrl = fsl_qdma->ctrl_base;
877 struct fsl_qdma_queue *fsl_queue = fsl_qdma->queue;
879 /* Try to halt the qDMA engine first. */
880 ret = fsl_qdma_halt(fsl_qdma);
882 dev_err(fsl_qdma->dma_dev.dev, "DMA halt failed!");
886 for (i = 0; i < fsl_qdma->block_number; i++) {
888 * Clear the command queue interrupt detect register for
892 block = fsl_qdma->block_base +
893 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, i);
894 qdma_writel(fsl_qdma, FSL_QDMA_BCQIDR_CLEAR,
895 block + FSL_QDMA_BCQIDR(0));
898 for (j = 0; j < fsl_qdma->block_number; j++) {
899 block = fsl_qdma->block_base +
900 FSL_QDMA_BLOCK_BASE_OFFSET(fsl_qdma, j);
901 for (i = 0; i < fsl_qdma->n_queues; i++) {
902 temp = fsl_queue + i + (j * fsl_qdma->n_queues);
904 * Initialize Command Queue registers to
906 * command descriptor in memory.
907 * Dequeue Pointer Address Registers
908 * Enqueue Pointer Address Registers
911 qdma_writel(fsl_qdma, temp->bus_addr,
912 block + FSL_QDMA_BCQDPA_SADDR(i));
913 qdma_writel(fsl_qdma, temp->bus_addr,
914 block + FSL_QDMA_BCQEPA_SADDR(i));
916 /* Initialize the queue mode. */
917 reg = FSL_QDMA_BCQMR_EN;
918 reg |= FSL_QDMA_BCQMR_CD_THLD(ilog2(temp->n_cq) - 4);
919 reg |= FSL_QDMA_BCQMR_CQ_SIZE(ilog2(temp->n_cq) - 6);
920 qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BCQMR(i));
924 * Workaround for erratum: ERR010812.
925 * We must enable XOFF to avoid the enqueue rejection occurs.
926 * Setting SQCCMR ENTER_WM to 0x20.
929 qdma_writel(fsl_qdma, FSL_QDMA_SQCCMR_ENTER_WM,
930 block + FSL_QDMA_SQCCMR);
933 * Initialize status queue registers to point to the first
934 * command descriptor in memory.
935 * Dequeue Pointer Address Registers
936 * Enqueue Pointer Address Registers
939 qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
940 block + FSL_QDMA_SQEPAR);
941 qdma_writel(fsl_qdma, fsl_qdma->status[j]->bus_addr,
942 block + FSL_QDMA_SQDPAR);
943 /* Initialize status queue interrupt. */
944 qdma_writel(fsl_qdma, FSL_QDMA_BCQIER_CQTIE,
945 block + FSL_QDMA_BCQIER(0));
946 qdma_writel(fsl_qdma, FSL_QDMA_BSQICR_ICEN |
947 FSL_QDMA_BSQICR_ICST(5) | 0x8000,
948 block + FSL_QDMA_BSQICR);
949 qdma_writel(fsl_qdma, FSL_QDMA_CQIER_MEIE |
951 block + FSL_QDMA_CQIER);
953 /* Initialize the status queue mode. */
954 reg = FSL_QDMA_BSQMR_EN;
955 reg |= FSL_QDMA_BSQMR_CQ_SIZE(ilog2
956 (fsl_qdma->status[j]->n_cq) - 6);
958 qdma_writel(fsl_qdma, reg, block + FSL_QDMA_BSQMR);
959 reg = qdma_readl(fsl_qdma, block + FSL_QDMA_BSQMR);
962 /* Initialize controller interrupt register. */
963 qdma_writel(fsl_qdma, FSL_QDMA_DEDR_CLEAR, status + FSL_QDMA_DEDR);
964 qdma_writel(fsl_qdma, FSL_QDMA_DEIER_CLEAR, status + FSL_QDMA_DEIER);
966 reg = qdma_readl(fsl_qdma, ctrl + FSL_QDMA_DMR);
967 reg &= ~FSL_QDMA_DMR_DQD;
968 qdma_writel(fsl_qdma, reg, ctrl + FSL_QDMA_DMR);
973 static struct dma_async_tx_descriptor *
974 fsl_qdma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst,
975 dma_addr_t src, size_t len, unsigned long flags)
977 struct fsl_qdma_comp *fsl_comp;
978 struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
980 fsl_comp = fsl_qdma_request_enqueue_desc(fsl_chan);
985 fsl_qdma_comp_fill_memcpy(fsl_comp, dst, src, len);
987 return vchan_tx_prep(&fsl_chan->vchan, &fsl_comp->vdesc, flags);
990 static void fsl_qdma_enqueue_desc(struct fsl_qdma_chan *fsl_chan)
993 struct virt_dma_desc *vdesc;
994 struct fsl_qdma_comp *fsl_comp;
995 struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
996 void __iomem *block = fsl_queue->block_base;
998 reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQSR(fsl_queue->id));
999 if (reg & (FSL_QDMA_BCQSR_QF | FSL_QDMA_BCQSR_XOFF))
1001 vdesc = vchan_next_desc(&fsl_chan->vchan);
1004 list_del(&vdesc->node);
1005 fsl_comp = to_fsl_qdma_comp(vdesc);
1007 memcpy(fsl_queue->virt_head++,
1008 fsl_comp->virt_addr, sizeof(struct fsl_qdma_format));
1009 if (fsl_queue->virt_head == fsl_queue->cq + fsl_queue->n_cq)
1010 fsl_queue->virt_head = fsl_queue->cq;
1012 list_add_tail(&fsl_comp->list, &fsl_queue->comp_used);
1014 reg = qdma_readl(fsl_chan->qdma, block + FSL_QDMA_BCQMR(fsl_queue->id));
1015 reg |= FSL_QDMA_BCQMR_EI;
1016 qdma_writel(fsl_chan->qdma, reg, block + FSL_QDMA_BCQMR(fsl_queue->id));
1017 fsl_chan->status = DMA_IN_PROGRESS;
1020 static void fsl_qdma_free_desc(struct virt_dma_desc *vdesc)
1022 unsigned long flags;
1023 struct fsl_qdma_comp *fsl_comp;
1024 struct fsl_qdma_queue *fsl_queue;
1026 fsl_comp = to_fsl_qdma_comp(vdesc);
1027 fsl_queue = fsl_comp->qchan->queue;
1029 spin_lock_irqsave(&fsl_queue->queue_lock, flags);
1030 list_add_tail(&fsl_comp->list, &fsl_queue->comp_free);
1031 spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
1034 static void fsl_qdma_issue_pending(struct dma_chan *chan)
1036 unsigned long flags;
1037 struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
1038 struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
1040 spin_lock_irqsave(&fsl_queue->queue_lock, flags);
1041 spin_lock(&fsl_chan->vchan.lock);
1042 if (vchan_issue_pending(&fsl_chan->vchan))
1043 fsl_qdma_enqueue_desc(fsl_chan);
1044 spin_unlock(&fsl_chan->vchan.lock);
1045 spin_unlock_irqrestore(&fsl_queue->queue_lock, flags);
1048 static void fsl_qdma_synchronize(struct dma_chan *chan)
1050 struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
1052 vchan_synchronize(&fsl_chan->vchan);
1055 static int fsl_qdma_terminate_all(struct dma_chan *chan)
1058 unsigned long flags;
1059 struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
1061 spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
1062 vchan_get_all_descriptors(&fsl_chan->vchan, &head);
1063 spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
1064 vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
1068 static int fsl_qdma_alloc_chan_resources(struct dma_chan *chan)
1071 struct fsl_qdma_chan *fsl_chan = to_fsl_qdma_chan(chan);
1072 struct fsl_qdma_engine *fsl_qdma = fsl_chan->qdma;
1073 struct fsl_qdma_queue *fsl_queue = fsl_chan->queue;
1075 if (fsl_queue->comp_pool && fsl_queue->desc_pool)
1076 return fsl_qdma->desc_allocated;
1078 INIT_LIST_HEAD(&fsl_queue->comp_free);
1081 * The dma pool for queue command buffer
1083 fsl_queue->comp_pool =
1084 dma_pool_create("comp_pool",
1086 FSL_QDMA_COMMAND_BUFFER_SIZE,
1088 if (!fsl_queue->comp_pool)
1092 * The dma pool for Descriptor(SD/DD) buffer
1094 fsl_queue->desc_pool =
1095 dma_pool_create("desc_pool",
1097 FSL_QDMA_DESCRIPTOR_BUFFER_SIZE,
1099 if (!fsl_queue->desc_pool)
1102 ret = fsl_qdma_pre_request_enqueue_desc(fsl_queue);
1104 dev_err(chan->device->dev,
1105 "failed to alloc dma buffer for S/G descriptor\n");
1109 fsl_qdma->desc_allocated++;
1110 return fsl_qdma->desc_allocated;
1113 dma_pool_destroy(fsl_queue->desc_pool);
1115 dma_pool_destroy(fsl_queue->comp_pool);
1119 static int fsl_qdma_probe(struct platform_device *pdev)
1122 int blk_num, blk_off;
1123 u32 len, chans, queues;
1124 struct resource *res;
1125 struct fsl_qdma_chan *fsl_chan;
1126 struct fsl_qdma_engine *fsl_qdma;
1127 struct device_node *np = pdev->dev.of_node;
1129 ret = of_property_read_u32(np, "dma-channels", &chans);
1131 dev_err(&pdev->dev, "Can't get dma-channels.\n");
1135 ret = of_property_read_u32(np, "block-offset", &blk_off);
1137 dev_err(&pdev->dev, "Can't get block-offset.\n");
1141 ret = of_property_read_u32(np, "block-number", &blk_num);
1143 dev_err(&pdev->dev, "Can't get block-number.\n");
1147 blk_num = min_t(int, blk_num, num_online_cpus());
1149 len = sizeof(*fsl_qdma);
1150 fsl_qdma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1154 len = sizeof(*fsl_chan) * chans;
1155 fsl_qdma->chans = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1156 if (!fsl_qdma->chans)
1159 len = sizeof(struct fsl_qdma_queue *) * blk_num;
1160 fsl_qdma->status = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1161 if (!fsl_qdma->status)
1164 len = sizeof(int) * blk_num;
1165 fsl_qdma->queue_irq = devm_kzalloc(&pdev->dev, len, GFP_KERNEL);
1166 if (!fsl_qdma->queue_irq)
1169 ret = of_property_read_u32(np, "fsl,dma-queues", &queues);
1171 dev_err(&pdev->dev, "Can't get queues.\n");
1175 fsl_qdma->desc_allocated = 0;
1176 fsl_qdma->n_chans = chans;
1177 fsl_qdma->n_queues = queues;
1178 fsl_qdma->block_number = blk_num;
1179 fsl_qdma->block_offset = blk_off;
1181 mutex_init(&fsl_qdma->fsl_qdma_mutex);
1183 for (i = 0; i < fsl_qdma->block_number; i++) {
1184 fsl_qdma->status[i] = fsl_qdma_prep_status_queue(pdev);
1185 if (!fsl_qdma->status[i])
1188 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1189 fsl_qdma->ctrl_base = devm_ioremap_resource(&pdev->dev, res);
1190 if (IS_ERR(fsl_qdma->ctrl_base))
1191 return PTR_ERR(fsl_qdma->ctrl_base);
1193 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1194 fsl_qdma->status_base = devm_ioremap_resource(&pdev->dev, res);
1195 if (IS_ERR(fsl_qdma->status_base))
1196 return PTR_ERR(fsl_qdma->status_base);
1198 res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
1199 fsl_qdma->block_base = devm_ioremap_resource(&pdev->dev, res);
1200 if (IS_ERR(fsl_qdma->block_base))
1201 return PTR_ERR(fsl_qdma->block_base);
1202 fsl_qdma->queue = fsl_qdma_alloc_queue_resources(pdev, fsl_qdma);
1203 if (!fsl_qdma->queue)
1206 fsl_qdma->irq_base = platform_get_irq_byname(pdev, "qdma-queue0");
1207 if (fsl_qdma->irq_base < 0)
1208 return fsl_qdma->irq_base;
1210 fsl_qdma->feature = of_property_read_bool(np, "big-endian");
1211 INIT_LIST_HEAD(&fsl_qdma->dma_dev.channels);
1213 for (i = 0; i < fsl_qdma->n_chans; i++) {
1214 struct fsl_qdma_chan *fsl_chan = &fsl_qdma->chans[i];
1216 fsl_chan->qdma = fsl_qdma;
1217 fsl_chan->queue = fsl_qdma->queue + i % (fsl_qdma->n_queues *
1218 fsl_qdma->block_number);
1219 fsl_chan->vchan.desc_free = fsl_qdma_free_desc;
1220 vchan_init(&fsl_chan->vchan, &fsl_qdma->dma_dev);
1223 dma_cap_set(DMA_MEMCPY, fsl_qdma->dma_dev.cap_mask);
1225 fsl_qdma->dma_dev.dev = &pdev->dev;
1226 fsl_qdma->dma_dev.device_free_chan_resources =
1227 fsl_qdma_free_chan_resources;
1228 fsl_qdma->dma_dev.device_alloc_chan_resources =
1229 fsl_qdma_alloc_chan_resources;
1230 fsl_qdma->dma_dev.device_tx_status = dma_cookie_status;
1231 fsl_qdma->dma_dev.device_prep_dma_memcpy = fsl_qdma_prep_memcpy;
1232 fsl_qdma->dma_dev.device_issue_pending = fsl_qdma_issue_pending;
1233 fsl_qdma->dma_dev.device_synchronize = fsl_qdma_synchronize;
1234 fsl_qdma->dma_dev.device_terminate_all = fsl_qdma_terminate_all;
1236 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(40));
1238 dev_err(&pdev->dev, "dma_set_mask failure.\n");
1242 platform_set_drvdata(pdev, fsl_qdma);
1244 ret = fsl_qdma_reg_init(fsl_qdma);
1246 dev_err(&pdev->dev, "Can't Initialize the qDMA engine.\n");
1250 ret = fsl_qdma_irq_init(pdev, fsl_qdma);
1254 ret = dma_async_device_register(&fsl_qdma->dma_dev);
1256 dev_err(&pdev->dev, "Can't register NXP Layerscape qDMA engine.\n");
1263 static void fsl_qdma_cleanup_vchan(struct dma_device *dmadev)
1265 struct fsl_qdma_chan *chan, *_chan;
1267 list_for_each_entry_safe(chan, _chan,
1268 &dmadev->channels, vchan.chan.device_node) {
1269 list_del(&chan->vchan.chan.device_node);
1270 tasklet_kill(&chan->vchan.task);
1274 static int fsl_qdma_remove(struct platform_device *pdev)
1276 struct device_node *np = pdev->dev.of_node;
1277 struct fsl_qdma_engine *fsl_qdma = platform_get_drvdata(pdev);
1279 fsl_qdma_irq_exit(pdev, fsl_qdma);
1280 fsl_qdma_cleanup_vchan(&fsl_qdma->dma_dev);
1281 of_dma_controller_free(np);
1282 dma_async_device_unregister(&fsl_qdma->dma_dev);
1287 static const struct of_device_id fsl_qdma_dt_ids[] = {
1288 { .compatible = "fsl,ls1021a-qdma", },
1291 MODULE_DEVICE_TABLE(of, fsl_qdma_dt_ids);
1293 static struct platform_driver fsl_qdma_driver = {
1296 .of_match_table = fsl_qdma_dt_ids,
1298 .probe = fsl_qdma_probe,
1299 .remove = fsl_qdma_remove,
1302 module_platform_driver(fsl_qdma_driver);
1304 MODULE_ALIAS("platform:fsl-qdma");
1305 MODULE_LICENSE("GPL v2");
1306 MODULE_DESCRIPTION("NXP Layerscape qDMA engine driver");