2 * drivers/dma/fsl_raid.c
4 * Freescale RAID Engine device driver
7 * Harninder Rai <harninder.rai@freescale.com>
8 * Naveen Burmi <naveenburmi@freescale.com>
11 * Xuelin Shi <xuelin.shi@freescale.com>
13 * Copyright (c) 2010-2014 Freescale Semiconductor, Inc.
15 * Redistribution and use in source and binary forms, with or without
16 * modification, are permitted provided that the following conditions are met:
17 * * Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * * Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in the
21 * documentation and/or other materials provided with the distribution.
22 * * Neither the name of Freescale Semiconductor nor the
23 * names of its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written permission.
26 * ALTERNATIVELY, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") as published by the Free Software
28 * Foundation, either version 2 of that License or (at your option) any
31 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
32 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
33 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
34 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
35 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
36 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
37 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
38 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
39 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
40 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
42 * Theory of operation:
44 * General capabilities:
45 * RAID Engine (RE) block is capable of offloading XOR, memcpy and P/Q
46 * calculations required in RAID5 and RAID6 operations. RE driver
47 * registers with Linux's ASYNC layer as dma driver. RE hardware
48 * maintains strict ordering of the requests through chained
52 * Software RAID layer of Linux (MD layer) maintains RAID partitions,
53 * strips, stripes etc. It sends requests to the underlying ASYNC layer
54 * which further passes it to RE driver. ASYNC layer decides which request
55 * goes to which job ring of RE hardware. For every request processed by
56 * RAID Engine, driver gets an interrupt unless coalescing is set. The
57 * per job ring interrupt handler checks the status register for errors,
58 * clears the interrupt and leave the post interrupt processing to the irq
61 #include <linux/interrupt.h>
62 #include <linux/module.h>
63 #include <linux/of_irq.h>
64 #include <linux/of_address.h>
65 #include <linux/of_platform.h>
66 #include <linux/dma-mapping.h>
67 #include <linux/dmapool.h>
68 #include <linux/dmaengine.h>
70 #include <linux/spinlock.h>
71 #include <linux/slab.h>
73 #include "dmaengine.h"
76 #define FSL_RE_MAX_XOR_SRCS 16
77 #define FSL_RE_MAX_PQ_SRCS 16
78 #define FSL_RE_MIN_DESCS 256
79 #define FSL_RE_MAX_DESCS (4 * FSL_RE_MIN_DESCS)
80 #define FSL_RE_FRAME_FORMAT 0x1
81 #define FSL_RE_MAX_DATA_LEN (1024*1024)
83 #define to_fsl_re_dma_desc(tx) container_of(tx, struct fsl_re_desc, async_tx)
85 /* Add descriptors into per chan software queue - submit_q */
86 static dma_cookie_t fsl_re_tx_submit(struct dma_async_tx_descriptor *tx)
88 struct fsl_re_desc *desc;
89 struct fsl_re_chan *re_chan;
93 desc = to_fsl_re_dma_desc(tx);
94 re_chan = container_of(tx->chan, struct fsl_re_chan, chan);
96 spin_lock_irqsave(&re_chan->desc_lock, flags);
97 cookie = dma_cookie_assign(tx);
98 list_add_tail(&desc->node, &re_chan->submit_q);
99 spin_unlock_irqrestore(&re_chan->desc_lock, flags);
104 /* Copy descriptor from per chan software queue into hardware job ring */
105 static void fsl_re_issue_pending(struct dma_chan *chan)
107 struct fsl_re_chan *re_chan;
109 struct fsl_re_desc *desc, *_desc;
112 re_chan = container_of(chan, struct fsl_re_chan, chan);
114 spin_lock_irqsave(&re_chan->desc_lock, flags);
115 avail = FSL_RE_SLOT_AVAIL(
116 in_be32(&re_chan->jrregs->inbring_slot_avail));
118 list_for_each_entry_safe(desc, _desc, &re_chan->submit_q, node) {
122 list_move_tail(&desc->node, &re_chan->active_q);
124 memcpy(&re_chan->inb_ring_virt_addr[re_chan->inb_count],
125 &desc->hwdesc, sizeof(struct fsl_re_hw_desc));
127 re_chan->inb_count = (re_chan->inb_count + 1) &
128 FSL_RE_RING_SIZE_MASK;
129 out_be32(&re_chan->jrregs->inbring_add_job, FSL_RE_ADD_JOB(1));
132 spin_unlock_irqrestore(&re_chan->desc_lock, flags);
135 static void fsl_re_desc_done(struct fsl_re_desc *desc)
137 dma_cookie_complete(&desc->async_tx);
138 dma_descriptor_unmap(&desc->async_tx);
139 dmaengine_desc_get_callback_invoke(&desc->async_tx, NULL);
142 static void fsl_re_cleanup_descs(struct fsl_re_chan *re_chan)
144 struct fsl_re_desc *desc, *_desc;
147 spin_lock_irqsave(&re_chan->desc_lock, flags);
148 list_for_each_entry_safe(desc, _desc, &re_chan->ack_q, node) {
149 if (async_tx_test_ack(&desc->async_tx))
150 list_move_tail(&desc->node, &re_chan->free_q);
152 spin_unlock_irqrestore(&re_chan->desc_lock, flags);
154 fsl_re_issue_pending(&re_chan->chan);
157 static void fsl_re_dequeue(struct tasklet_struct *t)
159 struct fsl_re_chan *re_chan = from_tasklet(re_chan, t, irqtask);
160 struct fsl_re_desc *desc, *_desc;
161 struct fsl_re_hw_desc *hwdesc;
163 unsigned int count, oub_count;
166 fsl_re_cleanup_descs(re_chan);
168 spin_lock_irqsave(&re_chan->desc_lock, flags);
169 count = FSL_RE_SLOT_FULL(in_be32(&re_chan->jrregs->oubring_slot_full));
172 hwdesc = &re_chan->oub_ring_virt_addr[re_chan->oub_count];
173 list_for_each_entry_safe(desc, _desc, &re_chan->active_q,
175 /* compare the hw dma addr to find the completed */
176 if (desc->hwdesc.lbea32 == hwdesc->lbea32 &&
177 desc->hwdesc.addr_low == hwdesc->addr_low) {
184 fsl_re_desc_done(desc);
185 list_move_tail(&desc->node, &re_chan->ack_q);
187 dev_err(re_chan->dev,
188 "found hwdesc not in sw queue, discard it\n");
191 oub_count = (re_chan->oub_count + 1) & FSL_RE_RING_SIZE_MASK;
192 re_chan->oub_count = oub_count;
194 out_be32(&re_chan->jrregs->oubring_job_rmvd,
197 spin_unlock_irqrestore(&re_chan->desc_lock, flags);
200 /* Per Job Ring interrupt handler */
201 static irqreturn_t fsl_re_isr(int irq, void *data)
203 struct fsl_re_chan *re_chan;
204 u32 irqstate, status;
206 re_chan = dev_get_drvdata((struct device *)data);
208 irqstate = in_be32(&re_chan->jrregs->jr_interrupt_status);
213 * There's no way in upper layer (read MD layer) to recover from
214 * error conditions except restart everything. In long term we
215 * need to do something more than just crashing
217 if (irqstate & FSL_RE_ERROR) {
218 status = in_be32(&re_chan->jrregs->jr_status);
219 dev_err(re_chan->dev, "chan error irqstate: %x, status: %x\n",
223 /* Clear interrupt */
224 out_be32(&re_chan->jrregs->jr_interrupt_status, FSL_RE_CLR_INTR);
226 tasklet_schedule(&re_chan->irqtask);
231 static enum dma_status fsl_re_tx_status(struct dma_chan *chan,
233 struct dma_tx_state *txstate)
235 return dma_cookie_status(chan, cookie, txstate);
238 static void fill_cfd_frame(struct fsl_re_cmpnd_frame *cf, u8 index,
239 size_t length, dma_addr_t addr, bool final)
241 u32 efrl = length & FSL_RE_CF_LENGTH_MASK;
243 efrl |= final << FSL_RE_CF_FINAL_SHIFT;
244 cf[index].efrl32 = efrl;
245 cf[index].addr_high = upper_32_bits(addr);
246 cf[index].addr_low = lower_32_bits(addr);
249 static struct fsl_re_desc *fsl_re_init_desc(struct fsl_re_chan *re_chan,
250 struct fsl_re_desc *desc,
251 void *cf, dma_addr_t paddr)
253 desc->re_chan = re_chan;
254 desc->async_tx.tx_submit = fsl_re_tx_submit;
255 dma_async_tx_descriptor_init(&desc->async_tx, &re_chan->chan);
256 INIT_LIST_HEAD(&desc->node);
258 desc->hwdesc.fmt32 = FSL_RE_FRAME_FORMAT << FSL_RE_HWDESC_FMT_SHIFT;
259 desc->hwdesc.lbea32 = upper_32_bits(paddr);
260 desc->hwdesc.addr_low = lower_32_bits(paddr);
262 desc->cf_paddr = paddr;
264 desc->cdb_addr = (void *)(cf + FSL_RE_CF_DESC_SIZE);
265 desc->cdb_paddr = paddr + FSL_RE_CF_DESC_SIZE;
270 static struct fsl_re_desc *fsl_re_chan_alloc_desc(struct fsl_re_chan *re_chan,
273 struct fsl_re_desc *desc = NULL;
276 unsigned long lock_flag;
278 fsl_re_cleanup_descs(re_chan);
280 spin_lock_irqsave(&re_chan->desc_lock, lock_flag);
281 if (!list_empty(&re_chan->free_q)) {
282 /* take one desc from free_q */
283 desc = list_first_entry(&re_chan->free_q,
284 struct fsl_re_desc, node);
285 list_del(&desc->node);
287 desc->async_tx.flags = flags;
289 spin_unlock_irqrestore(&re_chan->desc_lock, lock_flag);
292 desc = kzalloc(sizeof(*desc), GFP_NOWAIT);
296 cf = dma_pool_alloc(re_chan->re_dev->cf_desc_pool, GFP_NOWAIT,
303 desc = fsl_re_init_desc(re_chan, desc, cf, paddr);
304 desc->async_tx.flags = flags;
306 spin_lock_irqsave(&re_chan->desc_lock, lock_flag);
307 re_chan->alloc_count++;
308 spin_unlock_irqrestore(&re_chan->desc_lock, lock_flag);
314 static struct dma_async_tx_descriptor *fsl_re_prep_dma_genq(
315 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
316 unsigned int src_cnt, const unsigned char *scf, size_t len,
319 struct fsl_re_chan *re_chan;
320 struct fsl_re_desc *desc;
321 struct fsl_re_xor_cdb *xor;
322 struct fsl_re_cmpnd_frame *cf;
325 unsigned int save_src_cnt = src_cnt;
328 re_chan = container_of(chan, struct fsl_re_chan, chan);
329 if (len > FSL_RE_MAX_DATA_LEN) {
330 dev_err(re_chan->dev, "genq tx length %zu, max length %d\n",
331 len, FSL_RE_MAX_DATA_LEN);
335 desc = fsl_re_chan_alloc_desc(re_chan, flags);
339 if (scf && (flags & DMA_PREP_CONTINUE)) {
344 /* Filling xor CDB */
345 cdb = FSL_RE_XOR_OPCODE << FSL_RE_CDB_OPCODE_SHIFT;
346 cdb |= (src_cnt - 1) << FSL_RE_CDB_NRCS_SHIFT;
347 cdb |= FSL_RE_BLOCK_SIZE << FSL_RE_CDB_BLKSIZE_SHIFT;
348 cdb |= FSL_RE_INTR_ON_ERROR << FSL_RE_CDB_ERROR_SHIFT;
349 cdb |= FSL_RE_DATA_DEP << FSL_RE_CDB_DEPEND_SHIFT;
350 xor = desc->cdb_addr;
354 /* compute q = src0*coef0^src1*coef1^..., * is GF(8) mult */
355 for (i = 0; i < save_src_cnt; i++)
356 xor->gfm[i] = scf[i];
360 /* compute P, that is XOR all srcs */
361 for (i = 0; i < src_cnt; i++)
365 /* Filling frame 0 of compound frame descriptor with CDB */
367 fill_cfd_frame(cf, 0, sizeof(*xor), desc->cdb_paddr, 0);
369 /* Fill CFD's 1st frame with dest buffer */
370 fill_cfd_frame(cf, 1, len, dest, 0);
372 /* Fill CFD's rest of the frames with source buffers */
373 for (i = 2, j = 0; j < save_src_cnt; i++, j++)
374 fill_cfd_frame(cf, i, len, src[j], 0);
377 fill_cfd_frame(cf, i++, len, dest, 0);
379 /* Setting the final bit in the last source buffer frame in CFD */
380 cf[i - 1].efrl32 |= 1 << FSL_RE_CF_FINAL_SHIFT;
382 return &desc->async_tx;
386 * Prep function for P parity calculation.In RAID Engine terminology,
387 * XOR calculation is called GenQ calculation done through GenQ command
389 static struct dma_async_tx_descriptor *fsl_re_prep_dma_xor(
390 struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
391 unsigned int src_cnt, size_t len, unsigned long flags)
393 /* NULL let genq take all coef as 1 */
394 return fsl_re_prep_dma_genq(chan, dest, src, src_cnt, NULL, len, flags);
398 * Prep function for P/Q parity calculation.In RAID Engine terminology,
399 * P/Q calculation is called GenQQ done through GenQQ command
401 static struct dma_async_tx_descriptor *fsl_re_prep_dma_pq(
402 struct dma_chan *chan, dma_addr_t *dest, dma_addr_t *src,
403 unsigned int src_cnt, const unsigned char *scf, size_t len,
406 struct fsl_re_chan *re_chan;
407 struct fsl_re_desc *desc;
408 struct fsl_re_pq_cdb *pq;
409 struct fsl_re_cmpnd_frame *cf;
413 unsigned int save_src_cnt = src_cnt;
415 re_chan = container_of(chan, struct fsl_re_chan, chan);
416 if (len > FSL_RE_MAX_DATA_LEN) {
417 dev_err(re_chan->dev, "pq tx length is %zu, max length is %d\n",
418 len, FSL_RE_MAX_DATA_LEN);
423 * RE requires at least 2 sources, if given only one source, we pass the
424 * second source same as the first one.
425 * With only one source, generating P is meaningless, only generate Q.
428 struct dma_async_tx_descriptor *tx;
429 dma_addr_t dma_src[2];
430 unsigned char coef[2];
436 tx = fsl_re_prep_dma_genq(chan, dest[1], dma_src, 2, coef, len,
439 desc = to_fsl_re_dma_desc(tx);
445 * During RAID6 array creation, Linux's MD layer gets P and Q
446 * calculated separately in two steps. But our RAID Engine has
447 * the capability to calculate both P and Q with a single command
448 * Hence to merge well with MD layer, we need to provide a hook
449 * here and call re_jq_prep_dma_genq() function
452 if (flags & DMA_PREP_PQ_DISABLE_P)
453 return fsl_re_prep_dma_genq(chan, dest[1], src, src_cnt,
456 if (flags & DMA_PREP_CONTINUE)
459 desc = fsl_re_chan_alloc_desc(re_chan, flags);
463 /* Filling GenQQ CDB */
464 cdb = FSL_RE_PQ_OPCODE << FSL_RE_CDB_OPCODE_SHIFT;
465 cdb |= (src_cnt - 1) << FSL_RE_CDB_NRCS_SHIFT;
466 cdb |= FSL_RE_BLOCK_SIZE << FSL_RE_CDB_BLKSIZE_SHIFT;
467 cdb |= FSL_RE_BUFFER_OUTPUT << FSL_RE_CDB_BUFFER_SHIFT;
468 cdb |= FSL_RE_DATA_DEP << FSL_RE_CDB_DEPEND_SHIFT;
475 for (i = 0; i < src_cnt; i++)
478 /* Align gfm[] to 32bit */
479 gfmq_len = ALIGN(src_cnt, 4);
483 for (i = 0; i < src_cnt; i++)
486 /* Filling frame 0 of compound frame descriptor with CDB */
488 fill_cfd_frame(cf, 0, sizeof(struct fsl_re_pq_cdb), desc->cdb_paddr, 0);
490 /* Fill CFD's 1st & 2nd frame with dest buffers */
491 for (i = 1, j = 0; i < 3; i++, j++)
492 fill_cfd_frame(cf, i, len, dest[j], 0);
494 /* Fill CFD's rest of the frames with source buffers */
495 for (i = 3, j = 0; j < save_src_cnt; i++, j++)
496 fill_cfd_frame(cf, i, len, src[j], 0);
498 /* PQ computation continuation */
499 if (flags & DMA_PREP_CONTINUE) {
500 if (src_cnt - save_src_cnt == 3) {
502 p[save_src_cnt + 1] = 0;
503 p[save_src_cnt + 2] = 1;
504 fill_cfd_frame(cf, i++, len, dest[0], 0);
505 fill_cfd_frame(cf, i++, len, dest[1], 0);
506 fill_cfd_frame(cf, i++, len, dest[1], 0);
508 dev_err(re_chan->dev, "PQ tx continuation error!\n");
513 /* Setting the final bit in the last source buffer frame in CFD */
514 cf[i - 1].efrl32 |= 1 << FSL_RE_CF_FINAL_SHIFT;
516 return &desc->async_tx;
520 * Prep function for memcpy. In RAID Engine, memcpy is done through MOVE
521 * command. Logic of this function will need to be modified once multipage
522 * support is added in Linux's MD/ASYNC Layer
524 static struct dma_async_tx_descriptor *fsl_re_prep_dma_memcpy(
525 struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
526 size_t len, unsigned long flags)
528 struct fsl_re_chan *re_chan;
529 struct fsl_re_desc *desc;
531 struct fsl_re_cmpnd_frame *cf;
532 struct fsl_re_move_cdb *move;
535 re_chan = container_of(chan, struct fsl_re_chan, chan);
537 if (len > FSL_RE_MAX_DATA_LEN) {
538 dev_err(re_chan->dev, "cp tx length is %zu, max length is %d\n",
539 len, FSL_RE_MAX_DATA_LEN);
543 desc = fsl_re_chan_alloc_desc(re_chan, flags);
547 /* Filling move CDB */
548 cdb = FSL_RE_MOVE_OPCODE << FSL_RE_CDB_OPCODE_SHIFT;
549 cdb |= FSL_RE_BLOCK_SIZE << FSL_RE_CDB_BLKSIZE_SHIFT;
550 cdb |= FSL_RE_INTR_ON_ERROR << FSL_RE_CDB_ERROR_SHIFT;
551 cdb |= FSL_RE_DATA_DEP << FSL_RE_CDB_DEPEND_SHIFT;
553 move = desc->cdb_addr;
556 /* Filling frame 0 of CFD with move CDB */
558 fill_cfd_frame(cf, 0, sizeof(*move), desc->cdb_paddr, 0);
560 length = min_t(size_t, len, FSL_RE_MAX_DATA_LEN);
562 /* Fill CFD's 1st frame with dest buffer */
563 fill_cfd_frame(cf, 1, length, dest, 0);
565 /* Fill CFD's 2nd frame with src buffer */
566 fill_cfd_frame(cf, 2, length, src, 1);
568 return &desc->async_tx;
571 static int fsl_re_alloc_chan_resources(struct dma_chan *chan)
573 struct fsl_re_chan *re_chan;
574 struct fsl_re_desc *desc;
579 re_chan = container_of(chan, struct fsl_re_chan, chan);
580 for (i = 0; i < FSL_RE_MIN_DESCS; i++) {
581 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
585 cf = dma_pool_alloc(re_chan->re_dev->cf_desc_pool, GFP_KERNEL,
592 INIT_LIST_HEAD(&desc->node);
593 fsl_re_init_desc(re_chan, desc, cf, paddr);
595 list_add_tail(&desc->node, &re_chan->free_q);
596 re_chan->alloc_count++;
598 return re_chan->alloc_count;
601 static void fsl_re_free_chan_resources(struct dma_chan *chan)
603 struct fsl_re_chan *re_chan;
604 struct fsl_re_desc *desc;
606 re_chan = container_of(chan, struct fsl_re_chan, chan);
607 while (re_chan->alloc_count--) {
608 desc = list_first_entry(&re_chan->free_q,
612 list_del(&desc->node);
613 dma_pool_free(re_chan->re_dev->cf_desc_pool, desc->cf_addr,
618 if (!list_empty(&re_chan->free_q))
619 dev_err(re_chan->dev, "chan resource cannot be cleaned!\n");
622 static int fsl_re_chan_probe(struct platform_device *ofdev,
623 struct device_node *np, u8 q, u32 off)
625 struct device *dev, *chandev;
626 struct fsl_re_drv_private *re_priv;
627 struct fsl_re_chan *chan;
628 struct dma_device *dma_dev;
632 struct platform_device *chan_ofdev;
635 re_priv = dev_get_drvdata(dev);
636 dma_dev = &re_priv->dma_dev;
638 chan = devm_kzalloc(dev, sizeof(*chan), GFP_KERNEL);
642 /* create platform device for chan node */
643 chan_ofdev = of_platform_device_create(np, NULL, dev);
645 dev_err(dev, "Not able to create ofdev for jr %d\n", q);
650 /* read reg property from dts */
651 rc = of_property_read_u32(np, "reg", &ptr);
653 dev_err(dev, "Reg property not found in jr %d\n", q);
658 chan->jrregs = (struct fsl_re_chan_cfg *)((u8 *)re_priv->re_regs +
661 /* read irq property from dts */
662 chan->irq = irq_of_parse_and_map(np, 0);
664 dev_err(dev, "No IRQ defined for JR %d\n", q);
669 snprintf(chan->name, sizeof(chan->name), "re_jr%02d", q);
671 chandev = &chan_ofdev->dev;
672 tasklet_setup(&chan->irqtask, fsl_re_dequeue);
674 ret = request_irq(chan->irq, fsl_re_isr, 0, chan->name, chandev);
676 dev_err(dev, "Unable to register interrupt for JR %d\n", q);
681 re_priv->re_jrs[q] = chan;
682 chan->chan.device = dma_dev;
683 chan->chan.private = chan;
685 chan->re_dev = re_priv;
687 spin_lock_init(&chan->desc_lock);
688 INIT_LIST_HEAD(&chan->ack_q);
689 INIT_LIST_HEAD(&chan->active_q);
690 INIT_LIST_HEAD(&chan->submit_q);
691 INIT_LIST_HEAD(&chan->free_q);
693 chan->inb_ring_virt_addr = dma_pool_alloc(chan->re_dev->hw_desc_pool,
694 GFP_KERNEL, &chan->inb_phys_addr);
695 if (!chan->inb_ring_virt_addr) {
696 dev_err(dev, "No dma memory for inb_ring_virt_addr\n");
701 chan->oub_ring_virt_addr = dma_pool_alloc(chan->re_dev->hw_desc_pool,
702 GFP_KERNEL, &chan->oub_phys_addr);
703 if (!chan->oub_ring_virt_addr) {
704 dev_err(dev, "No dma memory for oub_ring_virt_addr\n");
709 /* Program the Inbound/Outbound ring base addresses and size */
710 out_be32(&chan->jrregs->inbring_base_h,
711 chan->inb_phys_addr & FSL_RE_ADDR_BIT_MASK);
712 out_be32(&chan->jrregs->oubring_base_h,
713 chan->oub_phys_addr & FSL_RE_ADDR_BIT_MASK);
714 out_be32(&chan->jrregs->inbring_base_l,
715 chan->inb_phys_addr >> FSL_RE_ADDR_BIT_SHIFT);
716 out_be32(&chan->jrregs->oubring_base_l,
717 chan->oub_phys_addr >> FSL_RE_ADDR_BIT_SHIFT);
718 out_be32(&chan->jrregs->inbring_size,
719 FSL_RE_RING_SIZE << FSL_RE_RING_SIZE_SHIFT);
720 out_be32(&chan->jrregs->oubring_size,
721 FSL_RE_RING_SIZE << FSL_RE_RING_SIZE_SHIFT);
723 /* Read LIODN value from u-boot */
724 status = in_be32(&chan->jrregs->jr_config_1) & FSL_RE_REG_LIODN_MASK;
726 /* Program the CFG reg */
727 out_be32(&chan->jrregs->jr_config_1,
728 FSL_RE_CFG1_CBSI | FSL_RE_CFG1_CBS0 | status);
730 dev_set_drvdata(chandev, chan);
733 out_be32(&chan->jrregs->jr_command, FSL_RE_ENABLE);
738 dma_pool_free(chan->re_dev->hw_desc_pool, chan->inb_ring_virt_addr,
739 chan->inb_phys_addr);
744 /* Probe function for RAID Engine */
745 static int fsl_re_probe(struct platform_device *ofdev)
747 struct fsl_re_drv_private *re_priv;
748 struct device_node *np;
749 struct device_node *child;
752 struct dma_device *dma_dev;
753 struct resource *res;
755 struct device *dev = &ofdev->dev;
757 re_priv = devm_kzalloc(dev, sizeof(*re_priv), GFP_KERNEL);
761 res = platform_get_resource(ofdev, IORESOURCE_MEM, 0);
765 /* IOMAP the entire RAID Engine region */
766 re_priv->re_regs = devm_ioremap(dev, res->start, resource_size(res));
767 if (!re_priv->re_regs)
770 /* Program the RE mode */
771 out_be32(&re_priv->re_regs->global_config, FSL_RE_NON_DPAA_MODE);
773 /* Program Galois Field polynomial */
774 out_be32(&re_priv->re_regs->galois_field_config, FSL_RE_GFM_POLY);
776 dev_info(dev, "version %x, mode %x, gfp %x\n",
777 in_be32(&re_priv->re_regs->re_version_id),
778 in_be32(&re_priv->re_regs->global_config),
779 in_be32(&re_priv->re_regs->galois_field_config));
781 dma_dev = &re_priv->dma_dev;
783 INIT_LIST_HEAD(&dma_dev->channels);
784 dma_set_mask(dev, DMA_BIT_MASK(40));
786 dma_dev->device_alloc_chan_resources = fsl_re_alloc_chan_resources;
787 dma_dev->device_tx_status = fsl_re_tx_status;
788 dma_dev->device_issue_pending = fsl_re_issue_pending;
790 dma_dev->max_xor = FSL_RE_MAX_XOR_SRCS;
791 dma_dev->device_prep_dma_xor = fsl_re_prep_dma_xor;
792 dma_cap_set(DMA_XOR, dma_dev->cap_mask);
794 dma_dev->max_pq = FSL_RE_MAX_PQ_SRCS;
795 dma_dev->device_prep_dma_pq = fsl_re_prep_dma_pq;
796 dma_cap_set(DMA_PQ, dma_dev->cap_mask);
798 dma_dev->device_prep_dma_memcpy = fsl_re_prep_dma_memcpy;
799 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
801 dma_dev->device_free_chan_resources = fsl_re_free_chan_resources;
803 re_priv->total_chans = 0;
805 re_priv->cf_desc_pool = dmam_pool_create("fsl_re_cf_desc_pool", dev,
807 FSL_RE_CF_CDB_ALIGN, 0);
809 if (!re_priv->cf_desc_pool) {
810 dev_err(dev, "No memory for fsl re_cf desc pool\n");
814 re_priv->hw_desc_pool = dmam_pool_create("fsl_re_hw_desc_pool", dev,
815 sizeof(struct fsl_re_hw_desc) * FSL_RE_RING_SIZE,
816 FSL_RE_FRAME_ALIGN, 0);
817 if (!re_priv->hw_desc_pool) {
818 dev_err(dev, "No memory for fsl re_hw desc pool\n");
822 dev_set_drvdata(dev, re_priv);
824 /* Parse Device tree to find out the total number of JQs present */
825 for_each_compatible_node(np, NULL, "fsl,raideng-v1.0-job-queue") {
826 rc = of_property_read_u32(np, "reg", &off);
828 dev_err(dev, "Reg property not found in JQ node\n");
832 /* Find out the Job Rings present under each JQ */
833 for_each_child_of_node(np, child) {
834 rc = of_device_is_compatible(child,
835 "fsl,raideng-v1.0-job-ring");
837 fsl_re_chan_probe(ofdev, child, ridx++, off);
838 re_priv->total_chans++;
843 dma_async_device_register(dma_dev);
848 static void fsl_re_remove_chan(struct fsl_re_chan *chan)
850 tasklet_kill(&chan->irqtask);
852 dma_pool_free(chan->re_dev->hw_desc_pool, chan->inb_ring_virt_addr,
853 chan->inb_phys_addr);
855 dma_pool_free(chan->re_dev->hw_desc_pool, chan->oub_ring_virt_addr,
856 chan->oub_phys_addr);
859 static int fsl_re_remove(struct platform_device *ofdev)
861 struct fsl_re_drv_private *re_priv;
866 re_priv = dev_get_drvdata(dev);
868 /* Cleanup chan related memory areas */
869 for (i = 0; i < re_priv->total_chans; i++)
870 fsl_re_remove_chan(re_priv->re_jrs[i]);
872 /* Unregister the driver */
873 dma_async_device_unregister(&re_priv->dma_dev);
878 static const struct of_device_id fsl_re_ids[] = {
879 { .compatible = "fsl,raideng-v1.0", },
882 MODULE_DEVICE_TABLE(of, fsl_re_ids);
884 static struct platform_driver fsl_re_driver = {
886 .name = "fsl-raideng",
887 .of_match_table = fsl_re_ids,
889 .probe = fsl_re_probe,
890 .remove = fsl_re_remove,
893 module_platform_driver(fsl_re_driver);
895 MODULE_AUTHOR("Harninder Rai <harninder.rai@freescale.com>");
896 MODULE_LICENSE("GPL v2");
897 MODULE_DESCRIPTION("Freescale RAID Engine Device Driver");