1 // SPDX-License-Identifier: GPL-2.0
3 * Support for Macronix external hardware ECC engine for NAND devices, also
4 * called DPE for Data Processing Engine.
6 * Copyright © 2019 Macronix
7 * Author: Miquel Raynal <miquel.raynal@bootlin.com>
10 #include <linux/dma-mapping.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
14 #include <linux/iopoll.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
17 #include <linux/mtd/mtd.h>
18 #include <linux/mtd/nand.h>
19 #include <linux/mtd/nand-ecc-mxic.h>
20 #include <linux/mutex.h>
21 #include <linux/of_device.h>
22 #include <linux/of_platform.h>
23 #include <linux/platform_device.h>
24 #include <linux/slab.h>
26 /* DPE Configuration */
27 #define DP_CONFIG 0x00
29 #define ECC_TYP(idx) (((idx) << 3) & GENMASK(6, 3))
30 /* DPE Interrupt Status */
31 #define INTRPT_STS 0x04
32 #define TRANS_CMPLT BIT(0)
33 #define SDMA_MAIN BIT(1)
34 #define SDMA_SPARE BIT(2)
35 #define ECC_ERR BIT(3)
36 #define TO_SPARE BIT(4)
37 #define TO_MAIN BIT(5)
38 /* DPE Interrupt Status Enable */
39 #define INTRPT_STS_EN 0x08
40 /* DPE Interrupt Signal Enable */
41 #define INTRPT_SIG_EN 0x0C
42 /* Host Controller Configuration */
43 #define HC_CONFIG 0x10
44 #define DEV2MEM 0 /* TRANS_TYP_DMA in the spec */
45 #define MEM2MEM BIT(4) /* TRANS_TYP_IO in the spec */
46 #define MAPPING BIT(5) /* TRANS_TYP_MAPPING in the spec */
47 #define ECC_PACKED 0 /* LAYOUT_TYP_INTEGRATED in the spec */
48 #define ECC_INTERLEAVED BIT(2) /* LAYOUT_TYP_DISTRIBUTED in the spec */
49 #define BURST_TYP_FIXED 0
50 #define BURST_TYP_INCREASING BIT(0)
51 /* Host Controller Slave Address */
52 #define HC_SLV_ADDR 0x14
54 #define CHUNK_SIZE 0x20
56 #define MAIN_SIZE 0x24
58 #define SPARE_SIZE 0x28
59 #define META_SZ(reg) ((reg) & GENMASK(7, 0))
60 #define PARITY_SZ(reg) (((reg) & GENMASK(15, 8)) >> 8)
61 #define RSV_SZ(reg) (((reg) & GENMASK(23, 16)) >> 16)
62 #define SPARE_SZ(reg) ((reg) >> 24)
64 #define CHUNK_CNT 0x30
66 #define SDMA_CTRL 0x40
68 #define READ_NAND BIT(1)
69 #define CONT_NAND BIT(29)
70 #define CONT_SYSM BIT(30) /* Continue System Memory? */
71 #define SDMA_STRT BIT(31)
72 /* SDMA Address of Main Data */
73 #define SDMA_MAIN_ADDR 0x44
74 /* SDMA Address of Spare Data */
75 #define SDMA_SPARE_ADDR 0x48
76 /* DPE Version Number */
78 #define DP_VER_OFFSET 16
80 /* Status bytes between each chunk of spare data */
83 #define MAX_CORR_ERR 0x28
84 #define UNCORR_ERR 0xFE
85 #define ERASED_CHUNK 0xFF
87 struct mxic_ecc_engine {
91 struct completion complete;
92 struct nand_ecc_engine external_engine;
93 struct nand_ecc_engine pipelined_engine;
99 unsigned int data_step_sz;
100 unsigned int oob_step_sz;
101 unsigned int parity_sz;
102 unsigned int meta_sz;
106 /* DMA boilerplate */
107 struct nand_ecc_req_tweak_ctx req_ctx;
109 struct scatterlist sg[2];
110 struct nand_page_io_req *req;
111 unsigned int pageoffs;
114 static struct mxic_ecc_engine *ext_ecc_eng_to_mxic(struct nand_ecc_engine *eng)
116 return container_of(eng, struct mxic_ecc_engine, external_engine);
119 static struct mxic_ecc_engine *pip_ecc_eng_to_mxic(struct nand_ecc_engine *eng)
121 return container_of(eng, struct mxic_ecc_engine, pipelined_engine);
124 static struct mxic_ecc_engine *nand_to_mxic(struct nand_device *nand)
126 struct nand_ecc_engine *eng = nand->ecc.engine;
128 if (eng->integration == NAND_ECC_ENGINE_INTEGRATION_EXTERNAL)
129 return ext_ecc_eng_to_mxic(eng);
131 return pip_ecc_eng_to_mxic(eng);
134 static int mxic_ecc_ooblayout_ecc(struct mtd_info *mtd, int section,
135 struct mtd_oob_region *oobregion)
137 struct nand_device *nand = mtd_to_nanddev(mtd);
138 struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
140 if (section < 0 || section >= ctx->steps)
143 oobregion->offset = (section * ctx->oob_step_sz) + ctx->meta_sz;
144 oobregion->length = ctx->parity_sz;
149 static int mxic_ecc_ooblayout_free(struct mtd_info *mtd, int section,
150 struct mtd_oob_region *oobregion)
152 struct nand_device *nand = mtd_to_nanddev(mtd);
153 struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
155 if (section < 0 || section >= ctx->steps)
159 oobregion->offset = 2;
160 oobregion->length = ctx->meta_sz - 2;
162 oobregion->offset = section * ctx->oob_step_sz;
163 oobregion->length = ctx->meta_sz;
169 static const struct mtd_ooblayout_ops mxic_ecc_ooblayout_ops = {
170 .ecc = mxic_ecc_ooblayout_ecc,
171 .free = mxic_ecc_ooblayout_free,
174 static void mxic_ecc_disable_engine(struct mxic_ecc_engine *mxic)
178 reg = readl(mxic->regs + DP_CONFIG);
180 writel(reg, mxic->regs + DP_CONFIG);
183 static void mxic_ecc_enable_engine(struct mxic_ecc_engine *mxic)
187 reg = readl(mxic->regs + DP_CONFIG);
189 writel(reg, mxic->regs + DP_CONFIG);
192 static void mxic_ecc_disable_int(struct mxic_ecc_engine *mxic)
194 writel(0, mxic->regs + INTRPT_SIG_EN);
197 static void mxic_ecc_enable_int(struct mxic_ecc_engine *mxic)
199 writel(TRANS_CMPLT, mxic->regs + INTRPT_SIG_EN);
202 static irqreturn_t mxic_ecc_isr(int irq, void *dev_id)
204 struct mxic_ecc_engine *mxic = dev_id;
207 sts = readl(mxic->regs + INTRPT_STS);
211 if (sts & TRANS_CMPLT)
212 complete(&mxic->complete);
214 writel(sts, mxic->regs + INTRPT_STS);
219 static int mxic_ecc_init_ctx(struct nand_device *nand, struct device *dev)
221 struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
222 struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
223 struct nand_ecc_props *reqs = &nand->ecc.requirements;
224 struct nand_ecc_props *user = &nand->ecc.user_conf;
225 struct mtd_info *mtd = nanddev_to_mtd(nand);
226 int step_size = 0, strength = 0, desired_correction = 0, steps, idx;
227 static const int possible_strength[] = {4, 8, 40, 48};
228 static const int spare_size[] = {32, 32, 96, 96};
229 struct mxic_ecc_ctx *ctx;
233 ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
237 nand->ecc.ctx.priv = ctx;
239 /* Only large page NAND chips may use BCH */
240 if (mtd->oobsize < 64) {
241 pr_err("BCH cannot be used with small page NAND chips\n");
245 mtd_set_ooblayout(mtd, &mxic_ecc_ooblayout_ops);
247 /* Enable all status bits */
248 writel(TRANS_CMPLT | SDMA_MAIN | SDMA_SPARE | ECC_ERR |
249 TO_SPARE | TO_MAIN, mxic->regs + INTRPT_STS_EN);
251 /* Configure the correction depending on the NAND device topology */
252 if (user->step_size && user->strength) {
253 step_size = user->step_size;
254 strength = user->strength;
255 } else if (reqs->step_size && reqs->strength) {
256 step_size = reqs->step_size;
257 strength = reqs->strength;
260 if (step_size && strength) {
261 steps = mtd->writesize / step_size;
262 desired_correction = steps * strength;
265 /* Step size is fixed to 1kiB, strength may vary (4 possible values) */
266 conf->step_size = SZ_1K;
267 steps = mtd->writesize / conf->step_size;
269 ctx->status = devm_kzalloc(dev, steps * sizeof(u8), GFP_KERNEL);
273 if (desired_correction) {
274 strength = desired_correction / steps;
276 for (idx = 0; idx < ARRAY_SIZE(possible_strength); idx++)
277 if (possible_strength[idx] >= strength)
280 idx = min_t(unsigned int, idx,
281 ARRAY_SIZE(possible_strength) - 1);
283 /* Missing data, maximize the correction */
284 idx = ARRAY_SIZE(possible_strength) - 1;
287 /* Tune the selected strength until it fits in the OOB area */
288 for (; idx >= 0; idx--) {
289 if (spare_size[idx] * steps <= mtd->oobsize)
293 /* This engine cannot be used with this NAND device */
297 /* Configure the engine for the desired strength */
298 writel(ECC_TYP(idx), mxic->regs + DP_CONFIG);
299 conf->strength = possible_strength[idx];
300 spare_reg = readl(mxic->regs + SPARE_SIZE);
303 ctx->data_step_sz = mtd->writesize / steps;
304 ctx->oob_step_sz = mtd->oobsize / steps;
305 ctx->parity_sz = PARITY_SZ(spare_reg);
306 ctx->meta_sz = META_SZ(spare_reg);
308 /* Ensure buffers will contain enough bytes to store the STAT_BYTES */
309 ctx->req_ctx.oob_buffer_size = nanddev_per_page_oobsize(nand) +
310 (ctx->steps * STAT_BYTES);
311 ret = nand_ecc_init_req_tweaking(&ctx->req_ctx, nand);
315 ctx->oobwithstat = kmalloc(mtd->oobsize + (ctx->steps * STAT_BYTES),
317 if (!ctx->oobwithstat) {
319 goto cleanup_req_tweak;
322 sg_init_table(ctx->sg, 2);
324 /* Configuration dump and sanity checks */
325 dev_err(dev, "DPE version number: %d\n",
326 readl(mxic->regs + DP_VER) >> DP_VER_OFFSET);
327 dev_err(dev, "Chunk size: %d\n", readl(mxic->regs + CHUNK_SIZE));
328 dev_err(dev, "Main size: %d\n", readl(mxic->regs + MAIN_SIZE));
329 dev_err(dev, "Spare size: %d\n", SPARE_SZ(spare_reg));
330 dev_err(dev, "Rsv size: %ld\n", RSV_SZ(spare_reg));
331 dev_err(dev, "Parity size: %d\n", ctx->parity_sz);
332 dev_err(dev, "Meta size: %d\n", ctx->meta_sz);
334 if ((ctx->meta_sz + ctx->parity_sz + RSV_SZ(spare_reg)) !=
335 SPARE_SZ(spare_reg)) {
336 dev_err(dev, "Wrong OOB configuration: %d + %d + %ld != %d\n",
337 ctx->meta_sz, ctx->parity_sz, RSV_SZ(spare_reg),
338 SPARE_SZ(spare_reg));
340 goto free_oobwithstat;
343 if (ctx->oob_step_sz != SPARE_SZ(spare_reg)) {
344 dev_err(dev, "Wrong OOB configuration: %d != %d\n",
345 ctx->oob_step_sz, SPARE_SZ(spare_reg));
347 goto free_oobwithstat;
353 kfree(ctx->oobwithstat);
355 nand_ecc_cleanup_req_tweaking(&ctx->req_ctx);
360 static int mxic_ecc_init_ctx_external(struct nand_device *nand)
362 struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
363 struct device *dev = nand->ecc.engine->dev;
366 dev_info(dev, "Macronix ECC engine in external mode\n");
368 ret = mxic_ecc_init_ctx(nand, dev);
372 /* Trigger each step manually */
373 writel(1, mxic->regs + CHUNK_CNT);
374 writel(BURST_TYP_INCREASING | ECC_PACKED | MEM2MEM,
375 mxic->regs + HC_CONFIG);
380 static int mxic_ecc_init_ctx_pipelined(struct nand_device *nand)
382 struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
383 struct mxic_ecc_ctx *ctx;
387 dev = nand_ecc_get_engine_dev(nand->ecc.engine->dev);
391 dev_info(dev, "Macronix ECC engine in pipelined/mapping mode\n");
393 ret = mxic_ecc_init_ctx(nand, dev);
397 ctx = nand_to_ecc_ctx(nand);
399 /* All steps should be handled in one go directly by the internal DMA */
400 writel(ctx->steps, mxic->regs + CHUNK_CNT);
403 * Interleaved ECC scheme cannot be used otherwise factory bad block
404 * markers would be lost. A packed layout is mandatory.
406 writel(BURST_TYP_INCREASING | ECC_PACKED | MAPPING,
407 mxic->regs + HC_CONFIG);
412 static void mxic_ecc_cleanup_ctx(struct nand_device *nand)
414 struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
417 nand_ecc_cleanup_req_tweaking(&ctx->req_ctx);
418 kfree(ctx->oobwithstat);
422 static int mxic_ecc_data_xfer_wait_for_completion(struct mxic_ecc_engine *mxic)
428 reinit_completion(&mxic->complete);
429 mxic_ecc_enable_int(mxic);
430 ret = wait_for_completion_timeout(&mxic->complete,
431 msecs_to_jiffies(1000));
432 mxic_ecc_disable_int(mxic);
434 ret = readl_poll_timeout(mxic->regs + INTRPT_STS, val,
435 val & TRANS_CMPLT, 10, USEC_PER_SEC);
436 writel(val, mxic->regs + INTRPT_STS);
440 dev_err(mxic->dev, "Timeout on data xfer completion\n");
447 static int mxic_ecc_process_data(struct mxic_ecc_engine *mxic,
448 unsigned int direction)
450 unsigned int dir = (direction == NAND_PAGE_READ) ?
451 READ_NAND : WRITE_NAND;
454 mxic_ecc_enable_engine(mxic);
456 /* Trigger processing */
457 writel(SDMA_STRT | dir, mxic->regs + SDMA_CTRL);
459 /* Wait for completion */
460 ret = mxic_ecc_data_xfer_wait_for_completion(mxic);
462 mxic_ecc_disable_engine(mxic);
467 int mxic_ecc_process_data_pipelined(struct nand_ecc_engine *eng,
468 unsigned int direction, dma_addr_t dirmap)
470 struct mxic_ecc_engine *mxic = pip_ecc_eng_to_mxic(eng);
473 writel(dirmap, mxic->regs + HC_SLV_ADDR);
475 return mxic_ecc_process_data(mxic, direction);
477 EXPORT_SYMBOL_GPL(mxic_ecc_process_data_pipelined);
479 static void mxic_ecc_extract_status_bytes(struct mxic_ecc_ctx *ctx)
481 u8 *buf = ctx->oobwithstat;
485 /* Extract the ECC status */
486 for (step = 0; step < ctx->steps; step++) {
487 next_stat_pos = ctx->oob_step_sz +
488 ((STAT_BYTES + ctx->oob_step_sz) * step);
490 ctx->status[step] = buf[next_stat_pos];
494 static void mxic_ecc_reconstruct_oobbuf(struct mxic_ecc_ctx *ctx,
495 u8 *dst, const u8 *src)
499 /* Reconstruct the OOB buffer linearly (without the ECC status bytes) */
500 for (step = 0; step < ctx->steps; step++)
501 memcpy(dst + (step * ctx->oob_step_sz),
502 src + (step * (ctx->oob_step_sz + STAT_BYTES)),
506 static void mxic_ecc_add_room_in_oobbuf(struct mxic_ecc_ctx *ctx,
507 u8 *dst, const u8 *src)
511 /* Add some space in the OOB buffer for the status bytes */
512 for (step = 0; step < ctx->steps; step++)
513 memcpy(dst + (step * (ctx->oob_step_sz + STAT_BYTES)),
514 src + (step * ctx->oob_step_sz),
518 static int mxic_ecc_count_biterrs(struct mxic_ecc_engine *mxic,
519 struct nand_device *nand)
521 struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
522 struct mtd_info *mtd = nanddev_to_mtd(nand);
523 struct device *dev = mxic->dev;
524 unsigned int max_bf = 0;
525 bool failure = false;
528 for (step = 0; step < ctx->steps; step++) {
529 u8 stat = ctx->status[step];
531 if (stat == NO_ERR) {
532 dev_dbg(dev, "ECC step %d: no error\n", step);
533 } else if (stat == ERASED_CHUNK) {
534 dev_dbg(dev, "ECC step %d: erased\n", step);
535 } else if (stat == UNCORR_ERR || stat > MAX_CORR_ERR) {
536 dev_dbg(dev, "ECC step %d: uncorrectable\n", step);
537 mtd->ecc_stats.failed++;
540 dev_dbg(dev, "ECC step %d: %d bits corrected\n",
542 max_bf = max_t(unsigned int, max_bf, stat);
543 mtd->ecc_stats.corrected += stat;
547 return failure ? -EBADMSG : max_bf;
550 /* External ECC engine helpers */
551 static int mxic_ecc_prepare_io_req_external(struct nand_device *nand,
552 struct nand_page_io_req *req)
554 struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
555 struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
556 struct mtd_info *mtd = nanddev_to_mtd(nand);
557 int offset, nents, step, ret;
559 if (req->mode == MTD_OPS_RAW)
562 nand_ecc_tweak_req(&ctx->req_ctx, req);
565 if (req->type == NAND_PAGE_READ)
568 mxic_ecc_add_room_in_oobbuf(ctx, ctx->oobwithstat,
569 ctx->req->oobbuf.out);
571 sg_set_buf(&ctx->sg[0], req->databuf.out, req->datalen);
572 sg_set_buf(&ctx->sg[1], ctx->oobwithstat,
573 req->ooblen + (ctx->steps * STAT_BYTES));
575 nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
579 mutex_lock(&mxic->lock);
581 for (step = 0; step < ctx->steps; step++) {
582 writel(sg_dma_address(&ctx->sg[0]) + (step * ctx->data_step_sz),
583 mxic->regs + SDMA_MAIN_ADDR);
584 writel(sg_dma_address(&ctx->sg[1]) + (step * (ctx->oob_step_sz + STAT_BYTES)),
585 mxic->regs + SDMA_SPARE_ADDR);
586 ret = mxic_ecc_process_data(mxic, ctx->req->type);
591 mutex_unlock(&mxic->lock);
593 dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
598 /* Retrieve the calculated ECC bytes */
599 for (step = 0; step < ctx->steps; step++) {
600 offset = ctx->meta_sz + (step * ctx->oob_step_sz);
601 mtd_ooblayout_get_eccbytes(mtd,
602 (u8 *)ctx->req->oobbuf.out + offset,
603 ctx->oobwithstat + (step * STAT_BYTES),
604 step * ctx->parity_sz,
611 static int mxic_ecc_finish_io_req_external(struct nand_device *nand,
612 struct nand_page_io_req *req)
614 struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
615 struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
616 int nents, step, ret;
618 if (req->mode == MTD_OPS_RAW)
621 if (req->type == NAND_PAGE_WRITE) {
622 nand_ecc_restore_req(&ctx->req_ctx, req);
626 /* Copy the OOB buffer and add room for the ECC engine status bytes */
627 mxic_ecc_add_room_in_oobbuf(ctx, ctx->oobwithstat, ctx->req->oobbuf.in);
629 sg_set_buf(&ctx->sg[0], req->databuf.in, req->datalen);
630 sg_set_buf(&ctx->sg[1], ctx->oobwithstat,
631 req->ooblen + (ctx->steps * STAT_BYTES));
632 nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
636 mutex_lock(&mxic->lock);
638 for (step = 0; step < ctx->steps; step++) {
639 writel(sg_dma_address(&ctx->sg[0]) + (step * ctx->data_step_sz),
640 mxic->regs + SDMA_MAIN_ADDR);
641 writel(sg_dma_address(&ctx->sg[1]) + (step * (ctx->oob_step_sz + STAT_BYTES)),
642 mxic->regs + SDMA_SPARE_ADDR);
643 ret = mxic_ecc_process_data(mxic, ctx->req->type);
648 mutex_unlock(&mxic->lock);
650 dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
653 nand_ecc_restore_req(&ctx->req_ctx, req);
657 /* Extract the status bytes and reconstruct the buffer */
658 mxic_ecc_extract_status_bytes(ctx);
659 mxic_ecc_reconstruct_oobbuf(ctx, ctx->req->oobbuf.in, ctx->oobwithstat);
661 nand_ecc_restore_req(&ctx->req_ctx, req);
663 return mxic_ecc_count_biterrs(mxic, nand);
666 /* Pipelined ECC engine helpers */
667 static int mxic_ecc_prepare_io_req_pipelined(struct nand_device *nand,
668 struct nand_page_io_req *req)
670 struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
671 struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
674 if (req->mode == MTD_OPS_RAW)
677 nand_ecc_tweak_req(&ctx->req_ctx, req);
680 /* Copy the OOB buffer and add room for the ECC engine status bytes */
681 mxic_ecc_add_room_in_oobbuf(ctx, ctx->oobwithstat, ctx->req->oobbuf.in);
683 sg_set_buf(&ctx->sg[0], req->databuf.in, req->datalen);
684 sg_set_buf(&ctx->sg[1], ctx->oobwithstat,
685 req->ooblen + (ctx->steps * STAT_BYTES));
687 nents = dma_map_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
691 mutex_lock(&mxic->lock);
693 writel(sg_dma_address(&ctx->sg[0]), mxic->regs + SDMA_MAIN_ADDR);
694 writel(sg_dma_address(&ctx->sg[1]), mxic->regs + SDMA_SPARE_ADDR);
699 static int mxic_ecc_finish_io_req_pipelined(struct nand_device *nand,
700 struct nand_page_io_req *req)
702 struct mxic_ecc_engine *mxic = nand_to_mxic(nand);
703 struct mxic_ecc_ctx *ctx = nand_to_ecc_ctx(nand);
706 if (req->mode == MTD_OPS_RAW)
709 mutex_unlock(&mxic->lock);
711 dma_unmap_sg(mxic->dev, ctx->sg, 2, DMA_BIDIRECTIONAL);
713 if (req->type == NAND_PAGE_READ) {
714 mxic_ecc_extract_status_bytes(ctx);
715 mxic_ecc_reconstruct_oobbuf(ctx, ctx->req->oobbuf.in,
717 ret = mxic_ecc_count_biterrs(mxic, nand);
720 nand_ecc_restore_req(&ctx->req_ctx, req);
725 static struct nand_ecc_engine_ops mxic_ecc_engine_external_ops = {
726 .init_ctx = mxic_ecc_init_ctx_external,
727 .cleanup_ctx = mxic_ecc_cleanup_ctx,
728 .prepare_io_req = mxic_ecc_prepare_io_req_external,
729 .finish_io_req = mxic_ecc_finish_io_req_external,
732 static struct nand_ecc_engine_ops mxic_ecc_engine_pipelined_ops = {
733 .init_ctx = mxic_ecc_init_ctx_pipelined,
734 .cleanup_ctx = mxic_ecc_cleanup_ctx,
735 .prepare_io_req = mxic_ecc_prepare_io_req_pipelined,
736 .finish_io_req = mxic_ecc_finish_io_req_pipelined,
739 struct nand_ecc_engine_ops *mxic_ecc_get_pipelined_ops(void)
741 return &mxic_ecc_engine_pipelined_ops;
743 EXPORT_SYMBOL_GPL(mxic_ecc_get_pipelined_ops);
745 static struct platform_device *
746 mxic_ecc_get_pdev(struct platform_device *spi_pdev)
748 struct platform_device *eng_pdev;
749 struct device_node *np;
751 /* Retrieve the nand-ecc-engine phandle */
752 np = of_parse_phandle(spi_pdev->dev.of_node, "nand-ecc-engine", 0);
756 /* Jump to the engine's device node */
757 eng_pdev = of_find_device_by_node(np);
763 void mxic_ecc_put_pipelined_engine(struct nand_ecc_engine *eng)
765 struct mxic_ecc_engine *mxic = pip_ecc_eng_to_mxic(eng);
767 platform_device_put(to_platform_device(mxic->dev));
769 EXPORT_SYMBOL_GPL(mxic_ecc_put_pipelined_engine);
771 struct nand_ecc_engine *
772 mxic_ecc_get_pipelined_engine(struct platform_device *spi_pdev)
774 struct platform_device *eng_pdev;
775 struct mxic_ecc_engine *mxic;
777 eng_pdev = mxic_ecc_get_pdev(spi_pdev);
779 return ERR_PTR(-ENODEV);
781 mxic = platform_get_drvdata(eng_pdev);
783 platform_device_put(eng_pdev);
784 return ERR_PTR(-EPROBE_DEFER);
787 return &mxic->pipelined_engine;
789 EXPORT_SYMBOL_GPL(mxic_ecc_get_pipelined_engine);
792 * Only the external ECC engine is exported as the pipelined is SoC specific, so
793 * it is registered directly by the drivers that wrap it.
795 static int mxic_ecc_probe(struct platform_device *pdev)
797 struct device *dev = &pdev->dev;
798 struct mxic_ecc_engine *mxic;
801 mxic = devm_kzalloc(&pdev->dev, sizeof(*mxic), GFP_KERNEL);
805 mxic->dev = &pdev->dev;
808 * Both memory regions for the ECC engine itself and the AXI slave
809 * address are mandatory.
811 mxic->regs = devm_platform_ioremap_resource(pdev, 0);
812 if (IS_ERR(mxic->regs)) {
813 dev_err(&pdev->dev, "Missing memory region\n");
814 return PTR_ERR(mxic->regs);
817 mxic_ecc_disable_engine(mxic);
818 mxic_ecc_disable_int(mxic);
820 /* IRQ is optional yet much more efficient */
821 mxic->irq = platform_get_irq_byname_optional(pdev, "ecc-engine");
823 ret = devm_request_irq(&pdev->dev, mxic->irq, mxic_ecc_isr, 0,
828 dev_info(dev, "Invalid or missing IRQ, fallback to polling\n");
832 mutex_init(&mxic->lock);
835 * In external mode, the device is the ECC engine. In pipelined mode,
836 * the device is the host controller. The device is used to match the
837 * right ECC engine based on the DT properties.
839 mxic->external_engine.dev = &pdev->dev;
840 mxic->external_engine.integration = NAND_ECC_ENGINE_INTEGRATION_EXTERNAL;
841 mxic->external_engine.ops = &mxic_ecc_engine_external_ops;
843 nand_ecc_register_on_host_hw_engine(&mxic->external_engine);
845 platform_set_drvdata(pdev, mxic);
850 static int mxic_ecc_remove(struct platform_device *pdev)
852 struct mxic_ecc_engine *mxic = platform_get_drvdata(pdev);
854 nand_ecc_unregister_on_host_hw_engine(&mxic->external_engine);
859 static const struct of_device_id mxic_ecc_of_ids[] = {
861 .compatible = "mxicy,nand-ecc-engine-rev3",
865 MODULE_DEVICE_TABLE(of, mxic_ecc_of_ids);
867 static struct platform_driver mxic_ecc_driver = {
869 .name = "mxic-nand-ecc-engine",
870 .of_match_table = mxic_ecc_of_ids,
872 .probe = mxic_ecc_probe,
873 .remove = mxic_ecc_remove,
875 module_platform_driver(mxic_ecc_driver);
877 MODULE_LICENSE("GPL");
878 MODULE_AUTHOR("Miquel Raynal <miquel.raynal@bootlin.com>");
879 MODULE_DESCRIPTION("Macronix NAND hardware ECC controller");