GNU Linux-libre 5.10.215-gnu1
[releases.git] / drivers / mtd / nand / raw / gpmi-nand / gpmi-nand.c
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Freescale GPMI NAND Flash Driver
4  *
5  * Copyright (C) 2010-2015 Freescale Semiconductor, Inc.
6  * Copyright (C) 2008 Embedded Alley Solutions, Inc.
7  */
8 #include <linux/clk.h>
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <linux/sched/task_stack.h>
12 #include <linux/interrupt.h>
13 #include <linux/module.h>
14 #include <linux/mtd/partitions.h>
15 #include <linux/of.h>
16 #include <linux/of_device.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/dma/mxs-dma.h>
19 #include "gpmi-nand.h"
20 #include "gpmi-regs.h"
21 #include "bch-regs.h"
22
23 /* Resource names for the GPMI NAND driver. */
24 #define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME  "gpmi-nand"
25 #define GPMI_NAND_BCH_REGS_ADDR_RES_NAME   "bch"
26 #define GPMI_NAND_BCH_INTERRUPT_RES_NAME   "bch"
27
28 /* Converts time to clock cycles */
29 #define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period)
30
31 #define MXS_SET_ADDR            0x4
32 #define MXS_CLR_ADDR            0x8
33 /*
34  * Clear the bit and poll it cleared.  This is usually called with
35  * a reset address and mask being either SFTRST(bit 31) or CLKGATE
36  * (bit 30).
37  */
38 static int clear_poll_bit(void __iomem *addr, u32 mask)
39 {
40         int timeout = 0x400;
41
42         /* clear the bit */
43         writel(mask, addr + MXS_CLR_ADDR);
44
45         /*
46          * SFTRST needs 3 GPMI clocks to settle, the reference manual
47          * recommends to wait 1us.
48          */
49         udelay(1);
50
51         /* poll the bit becoming clear */
52         while ((readl(addr) & mask) && --timeout)
53                 /* nothing */;
54
55         return !timeout;
56 }
57
58 #define MODULE_CLKGATE          (1 << 30)
59 #define MODULE_SFTRST           (1 << 31)
60 /*
61  * The current mxs_reset_block() will do two things:
62  *  [1] enable the module.
63  *  [2] reset the module.
64  *
65  * In most of the cases, it's ok.
66  * But in MX23, there is a hardware bug in the BCH block (see erratum #2847).
67  * If you try to soft reset the BCH block, it becomes unusable until
68  * the next hard reset. This case occurs in the NAND boot mode. When the board
69  * boots by NAND, the ROM of the chip will initialize the BCH blocks itself.
70  * So If the driver tries to reset the BCH again, the BCH will not work anymore.
71  * You will see a DMA timeout in this case. The bug has been fixed
72  * in the following chips, such as MX28.
73  *
74  * To avoid this bug, just add a new parameter `just_enable` for
75  * the mxs_reset_block(), and rewrite it here.
76  */
77 static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
78 {
79         int ret;
80         int timeout = 0x400;
81
82         /* clear and poll SFTRST */
83         ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
84         if (unlikely(ret))
85                 goto error;
86
87         /* clear CLKGATE */
88         writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
89
90         if (!just_enable) {
91                 /* set SFTRST to reset the block */
92                 writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
93                 udelay(1);
94
95                 /* poll CLKGATE becoming set */
96                 while ((!(readl(reset_addr) & MODULE_CLKGATE)) && --timeout)
97                         /* nothing */;
98                 if (unlikely(!timeout))
99                         goto error;
100         }
101
102         /* clear and poll SFTRST */
103         ret = clear_poll_bit(reset_addr, MODULE_SFTRST);
104         if (unlikely(ret))
105                 goto error;
106
107         /* clear and poll CLKGATE */
108         ret = clear_poll_bit(reset_addr, MODULE_CLKGATE);
109         if (unlikely(ret))
110                 goto error;
111
112         return 0;
113
114 error:
115         pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
116         return -ETIMEDOUT;
117 }
118
119 static int __gpmi_enable_clk(struct gpmi_nand_data *this, bool v)
120 {
121         struct clk *clk;
122         int ret;
123         int i;
124
125         for (i = 0; i < GPMI_CLK_MAX; i++) {
126                 clk = this->resources.clock[i];
127                 if (!clk)
128                         break;
129
130                 if (v) {
131                         ret = clk_prepare_enable(clk);
132                         if (ret)
133                                 goto err_clk;
134                 } else {
135                         clk_disable_unprepare(clk);
136                 }
137         }
138         return 0;
139
140 err_clk:
141         for (; i > 0; i--)
142                 clk_disable_unprepare(this->resources.clock[i - 1]);
143         return ret;
144 }
145
146 static int gpmi_init(struct gpmi_nand_data *this)
147 {
148         struct resources *r = &this->resources;
149         int ret;
150
151         ret = pm_runtime_get_sync(this->dev);
152         if (ret < 0) {
153                 pm_runtime_put_noidle(this->dev);
154                 return ret;
155         }
156
157         ret = gpmi_reset_block(r->gpmi_regs, false);
158         if (ret)
159                 goto err_out;
160
161         /*
162          * Reset BCH here, too. We got failures otherwise :(
163          * See later BCH reset for explanation of MX23 and MX28 handling
164          */
165         ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
166         if (ret)
167                 goto err_out;
168
169         /* Choose NAND mode. */
170         writel(BM_GPMI_CTRL1_GPMI_MODE, r->gpmi_regs + HW_GPMI_CTRL1_CLR);
171
172         /* Set the IRQ polarity. */
173         writel(BM_GPMI_CTRL1_ATA_IRQRDY_POLARITY,
174                                 r->gpmi_regs + HW_GPMI_CTRL1_SET);
175
176         /* Disable Write-Protection. */
177         writel(BM_GPMI_CTRL1_DEV_RESET, r->gpmi_regs + HW_GPMI_CTRL1_SET);
178
179         /* Select BCH ECC. */
180         writel(BM_GPMI_CTRL1_BCH_MODE, r->gpmi_regs + HW_GPMI_CTRL1_SET);
181
182         /*
183          * Decouple the chip select from dma channel. We use dma0 for all
184          * the chips.
185          */
186         writel(BM_GPMI_CTRL1_DECOUPLE_CS, r->gpmi_regs + HW_GPMI_CTRL1_SET);
187
188 err_out:
189         pm_runtime_mark_last_busy(this->dev);
190         pm_runtime_put_autosuspend(this->dev);
191         return ret;
192 }
193
194 /* This function is very useful. It is called only when the bug occur. */
195 static void gpmi_dump_info(struct gpmi_nand_data *this)
196 {
197         struct resources *r = &this->resources;
198         struct bch_geometry *geo = &this->bch_geometry;
199         u32 reg;
200         int i;
201
202         dev_err(this->dev, "Show GPMI registers :\n");
203         for (i = 0; i <= HW_GPMI_DEBUG / 0x10 + 1; i++) {
204                 reg = readl(r->gpmi_regs + i * 0x10);
205                 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
206         }
207
208         /* start to print out the BCH info */
209         dev_err(this->dev, "Show BCH registers :\n");
210         for (i = 0; i <= HW_BCH_VERSION / 0x10 + 1; i++) {
211                 reg = readl(r->bch_regs + i * 0x10);
212                 dev_err(this->dev, "offset 0x%.3x : 0x%.8x\n", i * 0x10, reg);
213         }
214         dev_err(this->dev, "BCH Geometry :\n"
215                 "GF length              : %u\n"
216                 "ECC Strength           : %u\n"
217                 "Page Size in Bytes     : %u\n"
218                 "Metadata Size in Bytes : %u\n"
219                 "ECC Chunk Size in Bytes: %u\n"
220                 "ECC Chunk Count        : %u\n"
221                 "Payload Size in Bytes  : %u\n"
222                 "Auxiliary Size in Bytes: %u\n"
223                 "Auxiliary Status Offset: %u\n"
224                 "Block Mark Byte Offset : %u\n"
225                 "Block Mark Bit Offset  : %u\n",
226                 geo->gf_len,
227                 geo->ecc_strength,
228                 geo->page_size,
229                 geo->metadata_size,
230                 geo->ecc_chunk_size,
231                 geo->ecc_chunk_count,
232                 geo->payload_size,
233                 geo->auxiliary_size,
234                 geo->auxiliary_status_offset,
235                 geo->block_mark_byte_offset,
236                 geo->block_mark_bit_offset);
237 }
238
239 static inline bool gpmi_check_ecc(struct gpmi_nand_data *this)
240 {
241         struct bch_geometry *geo = &this->bch_geometry;
242
243         /* Do the sanity check. */
244         if (GPMI_IS_MXS(this)) {
245                 /* The mx23/mx28 only support the GF13. */
246                 if (geo->gf_len == 14)
247                         return false;
248         }
249         return geo->ecc_strength <= this->devdata->bch_max_ecc_strength;
250 }
251
252 /*
253  * If we can get the ECC information from the nand chip, we do not
254  * need to calculate them ourselves.
255  *
256  * We may have available oob space in this case.
257  */
258 static int set_geometry_by_ecc_info(struct gpmi_nand_data *this,
259                                     unsigned int ecc_strength,
260                                     unsigned int ecc_step)
261 {
262         struct bch_geometry *geo = &this->bch_geometry;
263         struct nand_chip *chip = &this->nand;
264         struct mtd_info *mtd = nand_to_mtd(chip);
265         unsigned int block_mark_bit_offset;
266
267         switch (ecc_step) {
268         case SZ_512:
269                 geo->gf_len = 13;
270                 break;
271         case SZ_1K:
272                 geo->gf_len = 14;
273                 break;
274         default:
275                 dev_err(this->dev,
276                         "unsupported nand chip. ecc bits : %d, ecc size : %d\n",
277                         nanddev_get_ecc_requirements(&chip->base)->strength,
278                         nanddev_get_ecc_requirements(&chip->base)->step_size);
279                 return -EINVAL;
280         }
281         geo->ecc_chunk_size = ecc_step;
282         geo->ecc_strength = round_up(ecc_strength, 2);
283         if (!gpmi_check_ecc(this))
284                 return -EINVAL;
285
286         /* Keep the C >= O */
287         if (geo->ecc_chunk_size < mtd->oobsize) {
288                 dev_err(this->dev,
289                         "unsupported nand chip. ecc size: %d, oob size : %d\n",
290                         ecc_step, mtd->oobsize);
291                 return -EINVAL;
292         }
293
294         /* The default value, see comment in the legacy_set_geometry(). */
295         geo->metadata_size = 10;
296
297         geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
298
299         /*
300          * Now, the NAND chip with 2K page(data chunk is 512byte) shows below:
301          *
302          *    |                          P                            |
303          *    |<----------------------------------------------------->|
304          *    |                                                       |
305          *    |                                        (Block Mark)   |
306          *    |                      P'                      |      | |     |
307          *    |<-------------------------------------------->|  D   | |  O' |
308          *    |                                              |<---->| |<--->|
309          *    V                                              V      V V     V
310          *    +---+----------+-+----------+-+----------+-+----------+-+-----+
311          *    | M |   data   |E|   data   |E|   data   |E|   data   |E|     |
312          *    +---+----------+-+----------+-+----------+-+----------+-+-----+
313          *                                                   ^              ^
314          *                                                   |      O       |
315          *                                                   |<------------>|
316          *                                                   |              |
317          *
318          *      P : the page size for BCH module.
319          *      E : The ECC strength.
320          *      G : the length of Galois Field.
321          *      N : The chunk count of per page.
322          *      M : the metasize of per page.
323          *      C : the ecc chunk size, aka the "data" above.
324          *      P': the nand chip's page size.
325          *      O : the nand chip's oob size.
326          *      O': the free oob.
327          *
328          *      The formula for P is :
329          *
330          *                  E * G * N
331          *             P = ------------ + P' + M
332          *                      8
333          *
334          * The position of block mark moves forward in the ECC-based view
335          * of page, and the delta is:
336          *
337          *                   E * G * (N - 1)
338          *             D = (---------------- + M)
339          *                          8
340          *
341          * Please see the comment in legacy_set_geometry().
342          * With the condition C >= O , we still can get same result.
343          * So the bit position of the physical block mark within the ECC-based
344          * view of the page is :
345          *             (P' - D) * 8
346          */
347         geo->page_size = mtd->writesize + geo->metadata_size +
348                 (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
349
350         geo->payload_size = mtd->writesize;
351
352         geo->auxiliary_status_offset = ALIGN(geo->metadata_size, 4);
353         geo->auxiliary_size = ALIGN(geo->metadata_size, 4)
354                                 + ALIGN(geo->ecc_chunk_count, 4);
355
356         if (!this->swap_block_mark)
357                 return 0;
358
359         /* For bit swap. */
360         block_mark_bit_offset = mtd->writesize * 8 -
361                 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
362                                 + geo->metadata_size * 8);
363
364         geo->block_mark_byte_offset = block_mark_bit_offset / 8;
365         geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
366         return 0;
367 }
368
369 /*
370  *  Calculate the ECC strength by hand:
371  *      E : The ECC strength.
372  *      G : the length of Galois Field.
373  *      N : The chunk count of per page.
374  *      O : the oobsize of the NAND chip.
375  *      M : the metasize of per page.
376  *
377  *      The formula is :
378  *              E * G * N
379  *            ------------ <= (O - M)
380  *                  8
381  *
382  *      So, we get E by:
383  *                    (O - M) * 8
384  *              E <= -------------
385  *                       G * N
386  */
387 static inline int get_ecc_strength(struct gpmi_nand_data *this)
388 {
389         struct bch_geometry *geo = &this->bch_geometry;
390         struct mtd_info *mtd = nand_to_mtd(&this->nand);
391         int ecc_strength;
392
393         ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8)
394                         / (geo->gf_len * geo->ecc_chunk_count);
395
396         /* We need the minor even number. */
397         return round_down(ecc_strength, 2);
398 }
399
400 static int legacy_set_geometry(struct gpmi_nand_data *this)
401 {
402         struct bch_geometry *geo = &this->bch_geometry;
403         struct mtd_info *mtd = nand_to_mtd(&this->nand);
404         unsigned int metadata_size;
405         unsigned int status_size;
406         unsigned int block_mark_bit_offset;
407
408         /*
409          * The size of the metadata can be changed, though we set it to 10
410          * bytes now. But it can't be too large, because we have to save
411          * enough space for BCH.
412          */
413         geo->metadata_size = 10;
414
415         /* The default for the length of Galois Field. */
416         geo->gf_len = 13;
417
418         /* The default for chunk size. */
419         geo->ecc_chunk_size = 512;
420         while (geo->ecc_chunk_size < mtd->oobsize) {
421                 geo->ecc_chunk_size *= 2; /* keep C >= O */
422                 geo->gf_len = 14;
423         }
424
425         geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size;
426
427         /* We use the same ECC strength for all chunks. */
428         geo->ecc_strength = get_ecc_strength(this);
429         if (!gpmi_check_ecc(this)) {
430                 dev_err(this->dev,
431                         "ecc strength: %d cannot be supported by the controller (%d)\n"
432                         "try to use minimum ecc strength that NAND chip required\n",
433                         geo->ecc_strength,
434                         this->devdata->bch_max_ecc_strength);
435                 return -EINVAL;
436         }
437
438         geo->page_size = mtd->writesize + geo->metadata_size +
439                 (geo->gf_len * geo->ecc_strength * geo->ecc_chunk_count) / 8;
440         geo->payload_size = mtd->writesize;
441
442         /*
443          * The auxiliary buffer contains the metadata and the ECC status. The
444          * metadata is padded to the nearest 32-bit boundary. The ECC status
445          * contains one byte for every ECC chunk, and is also padded to the
446          * nearest 32-bit boundary.
447          */
448         metadata_size = ALIGN(geo->metadata_size, 4);
449         status_size   = ALIGN(geo->ecc_chunk_count, 4);
450
451         geo->auxiliary_size = metadata_size + status_size;
452         geo->auxiliary_status_offset = metadata_size;
453
454         if (!this->swap_block_mark)
455                 return 0;
456
457         /*
458          * We need to compute the byte and bit offsets of
459          * the physical block mark within the ECC-based view of the page.
460          *
461          * NAND chip with 2K page shows below:
462          *                                             (Block Mark)
463          *                                                   |      |
464          *                                                   |  D   |
465          *                                                   |<---->|
466          *                                                   V      V
467          *    +---+----------+-+----------+-+----------+-+----------+-+
468          *    | M |   data   |E|   data   |E|   data   |E|   data   |E|
469          *    +---+----------+-+----------+-+----------+-+----------+-+
470          *
471          * The position of block mark moves forward in the ECC-based view
472          * of page, and the delta is:
473          *
474          *                   E * G * (N - 1)
475          *             D = (---------------- + M)
476          *                          8
477          *
478          * With the formula to compute the ECC strength, and the condition
479          *       : C >= O         (C is the ecc chunk size)
480          *
481          * It's easy to deduce to the following result:
482          *
483          *         E * G       (O - M)      C - M         C - M
484          *      ----------- <= ------- <=  --------  <  ---------
485          *           8            N           N          (N - 1)
486          *
487          *  So, we get:
488          *
489          *                   E * G * (N - 1)
490          *             D = (---------------- + M) < C
491          *                          8
492          *
493          *  The above inequality means the position of block mark
494          *  within the ECC-based view of the page is still in the data chunk,
495          *  and it's NOT in the ECC bits of the chunk.
496          *
497          *  Use the following to compute the bit position of the
498          *  physical block mark within the ECC-based view of the page:
499          *          (page_size - D) * 8
500          *
501          *  --Huang Shijie
502          */
503         block_mark_bit_offset = mtd->writesize * 8 -
504                 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1)
505                                 + geo->metadata_size * 8);
506
507         geo->block_mark_byte_offset = block_mark_bit_offset / 8;
508         geo->block_mark_bit_offset  = block_mark_bit_offset % 8;
509         return 0;
510 }
511
512 static int common_nfc_set_geometry(struct gpmi_nand_data *this)
513 {
514         struct nand_chip *chip = &this->nand;
515         const struct nand_ecc_props *requirements =
516                 nanddev_get_ecc_requirements(&chip->base);
517
518         if (chip->ecc.strength > 0 && chip->ecc.size > 0)
519                 return set_geometry_by_ecc_info(this, chip->ecc.strength,
520                                                 chip->ecc.size);
521
522         if ((of_property_read_bool(this->dev->of_node, "fsl,use-minimum-ecc"))
523                                 || legacy_set_geometry(this)) {
524                 if (!(requirements->strength > 0 && requirements->step_size > 0))
525                         return -EINVAL;
526
527                 return set_geometry_by_ecc_info(this,
528                                                 requirements->strength,
529                                                 requirements->step_size);
530         }
531
532         return 0;
533 }
534
535 /* Configures the geometry for BCH.  */
536 static int bch_set_geometry(struct gpmi_nand_data *this)
537 {
538         struct resources *r = &this->resources;
539         int ret;
540
541         ret = common_nfc_set_geometry(this);
542         if (ret)
543                 return ret;
544
545         ret = pm_runtime_get_sync(this->dev);
546         if (ret < 0) {
547                 pm_runtime_put_autosuspend(this->dev);
548                 return ret;
549         }
550
551         /*
552         * Due to erratum #2847 of the MX23, the BCH cannot be soft reset on this
553         * chip, otherwise it will lock up. So we skip resetting BCH on the MX23.
554         * and MX28.
555         */
556         ret = gpmi_reset_block(r->bch_regs, GPMI_IS_MXS(this));
557         if (ret)
558                 goto err_out;
559
560         /* Set *all* chip selects to use layout 0. */
561         writel(0, r->bch_regs + HW_BCH_LAYOUTSELECT);
562
563         ret = 0;
564 err_out:
565         pm_runtime_mark_last_busy(this->dev);
566         pm_runtime_put_autosuspend(this->dev);
567
568         return ret;
569 }
570
571 /*
572  * <1> Firstly, we should know what's the GPMI-clock means.
573  *     The GPMI-clock is the internal clock in the gpmi nand controller.
574  *     If you set 100MHz to gpmi nand controller, the GPMI-clock's period
575  *     is 10ns. Mark the GPMI-clock's period as GPMI-clock-period.
576  *
577  * <2> Secondly, we should know what's the frequency on the nand chip pins.
578  *     The frequency on the nand chip pins is derived from the GPMI-clock.
579  *     We can get it from the following equation:
580  *
581  *         F = G / (DS + DH)
582  *
583  *         F  : the frequency on the nand chip pins.
584  *         G  : the GPMI clock, such as 100MHz.
585  *         DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP
586  *         DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD
587  *
588  * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz,
589  *     the nand EDO(extended Data Out) timing could be applied.
590  *     The GPMI implements a feedback read strobe to sample the read data.
591  *     The feedback read strobe can be delayed to support the nand EDO timing
592  *     where the read strobe may deasserts before the read data is valid, and
593  *     read data is valid for some time after read strobe.
594  *
595  *     The following figure illustrates some aspects of a NAND Flash read:
596  *
597  *                   |<---tREA---->|
598  *                   |             |
599  *                   |         |   |
600  *                   |<--tRP-->|   |
601  *                   |         |   |
602  *                  __          ___|__________________________________
603  *     RDN            \________/   |
604  *                                 |
605  *                                 /---------\
606  *     Read Data    --------------<           >---------
607  *                                 \---------/
608  *                                |     |
609  *                                |<-D->|
610  *     FeedbackRDN  ________             ____________
611  *                          \___________/
612  *
613  *          D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY.
614  *
615  *
616  * <4> Now, we begin to describe how to compute the right RDN_DELAY.
617  *
618  *  4.1) From the aspect of the nand chip pins:
619  *        Delay = (tREA + C - tRP)               {1}
620  *
621  *        tREA : the maximum read access time.
622  *        C    : a constant to adjust the delay. default is 4000ps.
623  *        tRP  : the read pulse width, which is exactly:
624  *                   tRP = (GPMI-clock-period) * DATA_SETUP
625  *
626  *  4.2) From the aspect of the GPMI nand controller:
627  *         Delay = RDN_DELAY * 0.125 * RP        {2}
628  *
629  *         RP   : the DLL reference period.
630  *            if (GPMI-clock-period > DLL_THRETHOLD)
631  *                   RP = GPMI-clock-period / 2;
632  *            else
633  *                   RP = GPMI-clock-period;
634  *
635  *            Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period
636  *            is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD
637  *            is 16000ps, but in mx6q, we use 12000ps.
638  *
639  *  4.3) since {1} equals {2}, we get:
640  *
641  *                     (tREA + 4000 - tRP) * 8
642  *         RDN_DELAY = -----------------------     {3}
643  *                           RP
644  */
645 static void gpmi_nfc_compute_timings(struct gpmi_nand_data *this,
646                                      const struct nand_sdr_timings *sdr)
647 {
648         struct gpmi_nfc_hardware_timing *hw = &this->hw;
649         struct resources *r = &this->resources;
650         unsigned int dll_threshold_ps = this->devdata->max_chain_delay;
651         unsigned int period_ps, reference_period_ps;
652         unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles;
653         unsigned int tRP_ps;
654         bool use_half_period;
655         int sample_delay_ps, sample_delay_factor;
656         unsigned int busy_timeout_cycles;
657         u8 wrn_dly_sel;
658         u64 busy_timeout_ps;
659
660         if (sdr->tRC_min >= 30000) {
661                 /* ONFI non-EDO modes [0-3] */
662                 hw->clk_rate = 22000000;
663                 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS;
664         } else if (sdr->tRC_min >= 25000) {
665                 /* ONFI EDO mode 4 */
666                 hw->clk_rate = 80000000;
667                 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
668         } else {
669                 /* ONFI EDO mode 5 */
670                 hw->clk_rate = 100000000;
671                 wrn_dly_sel = BV_GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY;
672         }
673
674         hw->clk_rate = clk_round_rate(r->clock[0], hw->clk_rate);
675
676         /* SDR core timings are given in picoseconds */
677         period_ps = div_u64((u64)NSEC_PER_SEC * 1000, hw->clk_rate);
678
679         addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps);
680         data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps);
681         data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps);
682         busy_timeout_ps = max(sdr->tBERS_max, sdr->tPROG_max);
683         busy_timeout_cycles = TO_CYCLES(busy_timeout_ps, period_ps);
684
685         hw->timing0 = BF_GPMI_TIMING0_ADDRESS_SETUP(addr_setup_cycles) |
686                       BF_GPMI_TIMING0_DATA_HOLD(data_hold_cycles) |
687                       BF_GPMI_TIMING0_DATA_SETUP(data_setup_cycles);
688         hw->timing1 = BF_GPMI_TIMING1_BUSY_TIMEOUT(busy_timeout_cycles * 4096);
689
690         /*
691          * Derive NFC ideal delay from {3}:
692          *
693          *                     (tREA + 4000 - tRP) * 8
694          *         RDN_DELAY = -----------------------
695          *                                RP
696          */
697         if (period_ps > dll_threshold_ps) {
698                 use_half_period = true;
699                 reference_period_ps = period_ps / 2;
700         } else {
701                 use_half_period = false;
702                 reference_period_ps = period_ps;
703         }
704
705         tRP_ps = data_setup_cycles * period_ps;
706         sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8;
707         if (sample_delay_ps > 0)
708                 sample_delay_factor = sample_delay_ps / reference_period_ps;
709         else
710                 sample_delay_factor = 0;
711
712         hw->ctrl1n = BF_GPMI_CTRL1_WRN_DLY_SEL(wrn_dly_sel);
713         if (sample_delay_factor)
714                 hw->ctrl1n |= BF_GPMI_CTRL1_RDN_DELAY(sample_delay_factor) |
715                               BM_GPMI_CTRL1_DLL_ENABLE |
716                               (use_half_period ? BM_GPMI_CTRL1_HALF_PERIOD : 0);
717 }
718
719 static int gpmi_nfc_apply_timings(struct gpmi_nand_data *this)
720 {
721         struct gpmi_nfc_hardware_timing *hw = &this->hw;
722         struct resources *r = &this->resources;
723         void __iomem *gpmi_regs = r->gpmi_regs;
724         unsigned int dll_wait_time_us;
725         int ret;
726
727         /* Clock dividers do NOT guarantee a clean clock signal on its output
728          * during the change of the divide factor on i.MX6Q/UL/SX. On i.MX7/8,
729          * all clock dividers provide these guarantee.
730          */
731         if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this))
732                 clk_disable_unprepare(r->clock[0]);
733
734         ret = clk_set_rate(r->clock[0], hw->clk_rate);
735         if (ret) {
736                 dev_err(this->dev, "cannot set clock rate to %lu Hz: %d\n", hw->clk_rate, ret);
737                 return ret;
738         }
739
740         if (GPMI_IS_MX6Q(this) || GPMI_IS_MX6SX(this)) {
741                 ret = clk_prepare_enable(r->clock[0]);
742                 if (ret)
743                         return ret;
744         }
745
746         writel(hw->timing0, gpmi_regs + HW_GPMI_TIMING0);
747         writel(hw->timing1, gpmi_regs + HW_GPMI_TIMING1);
748
749         /*
750          * Clear several CTRL1 fields, DLL must be disabled when setting
751          * RDN_DELAY or HALF_PERIOD.
752          */
753         writel(BM_GPMI_CTRL1_CLEAR_MASK, gpmi_regs + HW_GPMI_CTRL1_CLR);
754         writel(hw->ctrl1n, gpmi_regs + HW_GPMI_CTRL1_SET);
755
756         /* Wait 64 clock cycles before using the GPMI after enabling the DLL */
757         dll_wait_time_us = USEC_PER_SEC / hw->clk_rate * 64;
758         if (!dll_wait_time_us)
759                 dll_wait_time_us = 1;
760
761         /* Wait for the DLL to settle. */
762         udelay(dll_wait_time_us);
763
764         return 0;
765 }
766
767 static int gpmi_setup_interface(struct nand_chip *chip, int chipnr,
768                                 const struct nand_interface_config *conf)
769 {
770         struct gpmi_nand_data *this = nand_get_controller_data(chip);
771         const struct nand_sdr_timings *sdr;
772
773         /* Retrieve required NAND timings */
774         sdr = nand_get_sdr_timings(conf);
775         if (IS_ERR(sdr))
776                 return PTR_ERR(sdr);
777
778         /* Only MX6 GPMI controller can reach EDO timings */
779         if (sdr->tRC_min <= 25000 && !GPMI_IS_MX6(this))
780                 return -ENOTSUPP;
781
782         /* Stop here if this call was just a check */
783         if (chipnr < 0)
784                 return 0;
785
786         /* Do the actual derivation of the controller timings */
787         gpmi_nfc_compute_timings(this, sdr);
788
789         this->hw.must_apply_timings = true;
790
791         return 0;
792 }
793
794 /* Clears a BCH interrupt. */
795 static void gpmi_clear_bch(struct gpmi_nand_data *this)
796 {
797         struct resources *r = &this->resources;
798         writel(BM_BCH_CTRL_COMPLETE_IRQ, r->bch_regs + HW_BCH_CTRL_CLR);
799 }
800
801 static struct dma_chan *get_dma_chan(struct gpmi_nand_data *this)
802 {
803         /* We use the DMA channel 0 to access all the nand chips. */
804         return this->dma_chans[0];
805 }
806
807 /* This will be called after the DMA operation is finished. */
808 static void dma_irq_callback(void *param)
809 {
810         struct gpmi_nand_data *this = param;
811         struct completion *dma_c = &this->dma_done;
812
813         complete(dma_c);
814 }
815
816 static irqreturn_t bch_irq(int irq, void *cookie)
817 {
818         struct gpmi_nand_data *this = cookie;
819
820         gpmi_clear_bch(this);
821         complete(&this->bch_done);
822         return IRQ_HANDLED;
823 }
824
825 static int gpmi_raw_len_to_len(struct gpmi_nand_data *this, int raw_len)
826 {
827         /*
828          * raw_len is the length to read/write including bch data which
829          * we are passed in exec_op. Calculate the data length from it.
830          */
831         if (this->bch)
832                 return ALIGN_DOWN(raw_len, this->bch_geometry.ecc_chunk_size);
833         else
834                 return raw_len;
835 }
836
837 /* Can we use the upper's buffer directly for DMA? */
838 static bool prepare_data_dma(struct gpmi_nand_data *this, const void *buf,
839                              int raw_len, struct scatterlist *sgl,
840                              enum dma_data_direction dr)
841 {
842         int ret;
843         int len = gpmi_raw_len_to_len(this, raw_len);
844
845         /* first try to map the upper buffer directly */
846         if (virt_addr_valid(buf) && !object_is_on_stack(buf)) {
847                 sg_init_one(sgl, buf, len);
848                 ret = dma_map_sg(this->dev, sgl, 1, dr);
849                 if (ret == 0)
850                         goto map_fail;
851
852                 return true;
853         }
854
855 map_fail:
856         /* We have to use our own DMA buffer. */
857         sg_init_one(sgl, this->data_buffer_dma, len);
858
859         if (dr == DMA_TO_DEVICE && buf != this->data_buffer_dma)
860                 memcpy(this->data_buffer_dma, buf, len);
861
862         dma_map_sg(this->dev, sgl, 1, dr);
863
864         return false;
865 }
866
867 /* add our owner bbt descriptor */
868 static uint8_t scan_ff_pattern[] = { 0xff };
869 static struct nand_bbt_descr gpmi_bbt_descr = {
870         .options        = 0,
871         .offs           = 0,
872         .len            = 1,
873         .pattern        = scan_ff_pattern
874 };
875
876 /*
877  * We may change the layout if we can get the ECC info from the datasheet,
878  * else we will use all the (page + OOB).
879  */
880 static int gpmi_ooblayout_ecc(struct mtd_info *mtd, int section,
881                               struct mtd_oob_region *oobregion)
882 {
883         struct nand_chip *chip = mtd_to_nand(mtd);
884         struct gpmi_nand_data *this = nand_get_controller_data(chip);
885         struct bch_geometry *geo = &this->bch_geometry;
886
887         if (section)
888                 return -ERANGE;
889
890         oobregion->offset = 0;
891         oobregion->length = geo->page_size - mtd->writesize;
892
893         return 0;
894 }
895
896 static int gpmi_ooblayout_free(struct mtd_info *mtd, int section,
897                                struct mtd_oob_region *oobregion)
898 {
899         struct nand_chip *chip = mtd_to_nand(mtd);
900         struct gpmi_nand_data *this = nand_get_controller_data(chip);
901         struct bch_geometry *geo = &this->bch_geometry;
902
903         if (section)
904                 return -ERANGE;
905
906         /* The available oob size we have. */
907         if (geo->page_size < mtd->writesize + mtd->oobsize) {
908                 oobregion->offset = geo->page_size - mtd->writesize;
909                 oobregion->length = mtd->oobsize - oobregion->offset;
910         }
911
912         return 0;
913 }
914
915 static const char * const gpmi_clks_for_mx2x[] = {
916         "gpmi_io",
917 };
918
919 static const struct mtd_ooblayout_ops gpmi_ooblayout_ops = {
920         .ecc = gpmi_ooblayout_ecc,
921         .free = gpmi_ooblayout_free,
922 };
923
924 static const struct gpmi_devdata gpmi_devdata_imx23 = {
925         .type = IS_MX23,
926         .bch_max_ecc_strength = 20,
927         .max_chain_delay = 16000,
928         .clks = gpmi_clks_for_mx2x,
929         .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
930 };
931
932 static const struct gpmi_devdata gpmi_devdata_imx28 = {
933         .type = IS_MX28,
934         .bch_max_ecc_strength = 20,
935         .max_chain_delay = 16000,
936         .clks = gpmi_clks_for_mx2x,
937         .clks_count = ARRAY_SIZE(gpmi_clks_for_mx2x),
938 };
939
940 static const char * const gpmi_clks_for_mx6[] = {
941         "gpmi_io", "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch",
942 };
943
944 static const struct gpmi_devdata gpmi_devdata_imx6q = {
945         .type = IS_MX6Q,
946         .bch_max_ecc_strength = 40,
947         .max_chain_delay = 12000,
948         .clks = gpmi_clks_for_mx6,
949         .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
950 };
951
952 static const struct gpmi_devdata gpmi_devdata_imx6sx = {
953         .type = IS_MX6SX,
954         .bch_max_ecc_strength = 62,
955         .max_chain_delay = 12000,
956         .clks = gpmi_clks_for_mx6,
957         .clks_count = ARRAY_SIZE(gpmi_clks_for_mx6),
958 };
959
960 static const char * const gpmi_clks_for_mx7d[] = {
961         "gpmi_io", "gpmi_bch_apb",
962 };
963
964 static const struct gpmi_devdata gpmi_devdata_imx7d = {
965         .type = IS_MX7D,
966         .bch_max_ecc_strength = 62,
967         .max_chain_delay = 12000,
968         .clks = gpmi_clks_for_mx7d,
969         .clks_count = ARRAY_SIZE(gpmi_clks_for_mx7d),
970 };
971
972 static int acquire_register_block(struct gpmi_nand_data *this,
973                                   const char *res_name)
974 {
975         struct platform_device *pdev = this->pdev;
976         struct resources *res = &this->resources;
977         struct resource *r;
978         void __iomem *p;
979
980         r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name);
981         p = devm_ioremap_resource(&pdev->dev, r);
982         if (IS_ERR(p))
983                 return PTR_ERR(p);
984
985         if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME))
986                 res->gpmi_regs = p;
987         else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME))
988                 res->bch_regs = p;
989         else
990                 dev_err(this->dev, "unknown resource name : %s\n", res_name);
991
992         return 0;
993 }
994
995 static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h)
996 {
997         struct platform_device *pdev = this->pdev;
998         const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME;
999         struct resource *r;
1000         int err;
1001
1002         r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name);
1003         if (!r) {
1004                 dev_err(this->dev, "Can't get resource for %s\n", res_name);
1005                 return -ENODEV;
1006         }
1007
1008         err = devm_request_irq(this->dev, r->start, irq_h, 0, res_name, this);
1009         if (err)
1010                 dev_err(this->dev, "error requesting BCH IRQ\n");
1011
1012         return err;
1013 }
1014
1015 static void release_dma_channels(struct gpmi_nand_data *this)
1016 {
1017         unsigned int i;
1018         for (i = 0; i < DMA_CHANS; i++)
1019                 if (this->dma_chans[i]) {
1020                         dma_release_channel(this->dma_chans[i]);
1021                         this->dma_chans[i] = NULL;
1022                 }
1023 }
1024
1025 static int acquire_dma_channels(struct gpmi_nand_data *this)
1026 {
1027         struct platform_device *pdev = this->pdev;
1028         struct dma_chan *dma_chan;
1029         int ret = 0;
1030
1031         /* request dma channel */
1032         dma_chan = dma_request_chan(&pdev->dev, "rx-tx");
1033         if (IS_ERR(dma_chan)) {
1034                 ret = dev_err_probe(this->dev, PTR_ERR(dma_chan),
1035                                     "DMA channel request failed\n");
1036                 release_dma_channels(this);
1037         } else {
1038                 this->dma_chans[0] = dma_chan;
1039         }
1040
1041         return ret;
1042 }
1043
1044 static int gpmi_get_clks(struct gpmi_nand_data *this)
1045 {
1046         struct resources *r = &this->resources;
1047         struct clk *clk;
1048         int err, i;
1049
1050         for (i = 0; i < this->devdata->clks_count; i++) {
1051                 clk = devm_clk_get(this->dev, this->devdata->clks[i]);
1052                 if (IS_ERR(clk)) {
1053                         err = PTR_ERR(clk);
1054                         goto err_clock;
1055                 }
1056
1057                 r->clock[i] = clk;
1058         }
1059
1060         return 0;
1061
1062 err_clock:
1063         dev_dbg(this->dev, "failed in finding the clocks.\n");
1064         return err;
1065 }
1066
1067 static int acquire_resources(struct gpmi_nand_data *this)
1068 {
1069         int ret;
1070
1071         ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME);
1072         if (ret)
1073                 goto exit_regs;
1074
1075         ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME);
1076         if (ret)
1077                 goto exit_regs;
1078
1079         ret = acquire_bch_irq(this, bch_irq);
1080         if (ret)
1081                 goto exit_regs;
1082
1083         ret = acquire_dma_channels(this);
1084         if (ret)
1085                 goto exit_regs;
1086
1087         ret = gpmi_get_clks(this);
1088         if (ret)
1089                 goto exit_clock;
1090         return 0;
1091
1092 exit_clock:
1093         release_dma_channels(this);
1094 exit_regs:
1095         return ret;
1096 }
1097
1098 static void release_resources(struct gpmi_nand_data *this)
1099 {
1100         release_dma_channels(this);
1101 }
1102
1103 static void gpmi_free_dma_buffer(struct gpmi_nand_data *this)
1104 {
1105         struct device *dev = this->dev;
1106         struct bch_geometry *geo = &this->bch_geometry;
1107
1108         if (this->auxiliary_virt && virt_addr_valid(this->auxiliary_virt))
1109                 dma_free_coherent(dev, geo->auxiliary_size,
1110                                         this->auxiliary_virt,
1111                                         this->auxiliary_phys);
1112         kfree(this->data_buffer_dma);
1113         kfree(this->raw_buffer);
1114
1115         this->data_buffer_dma   = NULL;
1116         this->raw_buffer        = NULL;
1117 }
1118
1119 /* Allocate the DMA buffers */
1120 static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this)
1121 {
1122         struct bch_geometry *geo = &this->bch_geometry;
1123         struct device *dev = this->dev;
1124         struct mtd_info *mtd = nand_to_mtd(&this->nand);
1125
1126         /*
1127          * [2] Allocate a read/write data buffer.
1128          *     The gpmi_alloc_dma_buffer can be called twice.
1129          *     We allocate a PAGE_SIZE length buffer if gpmi_alloc_dma_buffer
1130          *     is called before the NAND identification; and we allocate a
1131          *     buffer of the real NAND page size when the gpmi_alloc_dma_buffer
1132          *     is called after.
1133          */
1134         this->data_buffer_dma = kzalloc(mtd->writesize ?: PAGE_SIZE,
1135                                         GFP_DMA | GFP_KERNEL);
1136         if (this->data_buffer_dma == NULL)
1137                 goto error_alloc;
1138
1139         this->auxiliary_virt = dma_alloc_coherent(dev, geo->auxiliary_size,
1140                                         &this->auxiliary_phys, GFP_DMA);
1141         if (!this->auxiliary_virt)
1142                 goto error_alloc;
1143
1144         this->raw_buffer = kzalloc((mtd->writesize ?: PAGE_SIZE) + mtd->oobsize, GFP_KERNEL);
1145         if (!this->raw_buffer)
1146                 goto error_alloc;
1147
1148         return 0;
1149
1150 error_alloc:
1151         gpmi_free_dma_buffer(this);
1152         return -ENOMEM;
1153 }
1154
1155 /*
1156  * Handles block mark swapping.
1157  * It can be called in swapping the block mark, or swapping it back,
1158  * because the the operations are the same.
1159  */
1160 static void block_mark_swapping(struct gpmi_nand_data *this,
1161                                 void *payload, void *auxiliary)
1162 {
1163         struct bch_geometry *nfc_geo = &this->bch_geometry;
1164         unsigned char *p;
1165         unsigned char *a;
1166         unsigned int  bit;
1167         unsigned char mask;
1168         unsigned char from_data;
1169         unsigned char from_oob;
1170
1171         if (!this->swap_block_mark)
1172                 return;
1173
1174         /*
1175          * If control arrives here, we're swapping. Make some convenience
1176          * variables.
1177          */
1178         bit = nfc_geo->block_mark_bit_offset;
1179         p   = payload + nfc_geo->block_mark_byte_offset;
1180         a   = auxiliary;
1181
1182         /*
1183          * Get the byte from the data area that overlays the block mark. Since
1184          * the ECC engine applies its own view to the bits in the page, the
1185          * physical block mark won't (in general) appear on a byte boundary in
1186          * the data.
1187          */
1188         from_data = (p[0] >> bit) | (p[1] << (8 - bit));
1189
1190         /* Get the byte from the OOB. */
1191         from_oob = a[0];
1192
1193         /* Swap them. */
1194         a[0] = from_data;
1195
1196         mask = (0x1 << bit) - 1;
1197         p[0] = (p[0] & mask) | (from_oob << bit);
1198
1199         mask = ~0 << bit;
1200         p[1] = (p[1] & mask) | (from_oob >> (8 - bit));
1201 }
1202
1203 static int gpmi_count_bitflips(struct nand_chip *chip, void *buf, int first,
1204                                int last, int meta)
1205 {
1206         struct gpmi_nand_data *this = nand_get_controller_data(chip);
1207         struct bch_geometry *nfc_geo = &this->bch_geometry;
1208         struct mtd_info *mtd = nand_to_mtd(chip);
1209         int i;
1210         unsigned char *status;
1211         unsigned int max_bitflips = 0;
1212
1213         /* Loop over status bytes, accumulating ECC status. */
1214         status = this->auxiliary_virt + ALIGN(meta, 4);
1215
1216         for (i = first; i < last; i++, status++) {
1217                 if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED))
1218                         continue;
1219
1220                 if (*status == STATUS_UNCORRECTABLE) {
1221                         int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1222                         u8 *eccbuf = this->raw_buffer;
1223                         int offset, bitoffset;
1224                         int eccbytes;
1225                         int flips;
1226
1227                         /* Read ECC bytes into our internal raw_buffer */
1228                         offset = nfc_geo->metadata_size * 8;
1229                         offset += ((8 * nfc_geo->ecc_chunk_size) + eccbits) * (i + 1);
1230                         offset -= eccbits;
1231                         bitoffset = offset % 8;
1232                         eccbytes = DIV_ROUND_UP(offset + eccbits, 8);
1233                         offset /= 8;
1234                         eccbytes -= offset;
1235                         nand_change_read_column_op(chip, offset, eccbuf,
1236                                                    eccbytes, false);
1237
1238                         /*
1239                          * ECC data are not byte aligned and we may have
1240                          * in-band data in the first and last byte of
1241                          * eccbuf. Set non-eccbits to one so that
1242                          * nand_check_erased_ecc_chunk() does not count them
1243                          * as bitflips.
1244                          */
1245                         if (bitoffset)
1246                                 eccbuf[0] |= GENMASK(bitoffset - 1, 0);
1247
1248                         bitoffset = (bitoffset + eccbits) % 8;
1249                         if (bitoffset)
1250                                 eccbuf[eccbytes - 1] |= GENMASK(7, bitoffset);
1251
1252                         /*
1253                          * The ECC hardware has an uncorrectable ECC status
1254                          * code in case we have bitflips in an erased page. As
1255                          * nothing was written into this subpage the ECC is
1256                          * obviously wrong and we can not trust it. We assume
1257                          * at this point that we are reading an erased page and
1258                          * try to correct the bitflips in buffer up to
1259                          * ecc_strength bitflips. If this is a page with random
1260                          * data, we exceed this number of bitflips and have a
1261                          * ECC failure. Otherwise we use the corrected buffer.
1262                          */
1263                         if (i == 0) {
1264                                 /* The first block includes metadata */
1265                                 flips = nand_check_erased_ecc_chunk(
1266                                                 buf + i * nfc_geo->ecc_chunk_size,
1267                                                 nfc_geo->ecc_chunk_size,
1268                                                 eccbuf, eccbytes,
1269                                                 this->auxiliary_virt,
1270                                                 nfc_geo->metadata_size,
1271                                                 nfc_geo->ecc_strength);
1272                         } else {
1273                                 flips = nand_check_erased_ecc_chunk(
1274                                                 buf + i * nfc_geo->ecc_chunk_size,
1275                                                 nfc_geo->ecc_chunk_size,
1276                                                 eccbuf, eccbytes,
1277                                                 NULL, 0,
1278                                                 nfc_geo->ecc_strength);
1279                         }
1280
1281                         if (flips > 0) {
1282                                 max_bitflips = max_t(unsigned int, max_bitflips,
1283                                                      flips);
1284                                 mtd->ecc_stats.corrected += flips;
1285                                 continue;
1286                         }
1287
1288                         mtd->ecc_stats.failed++;
1289                         continue;
1290                 }
1291
1292                 mtd->ecc_stats.corrected += *status;
1293                 max_bitflips = max_t(unsigned int, max_bitflips, *status);
1294         }
1295
1296         return max_bitflips;
1297 }
1298
1299 static void gpmi_bch_layout_std(struct gpmi_nand_data *this)
1300 {
1301         struct bch_geometry *geo = &this->bch_geometry;
1302         unsigned int ecc_strength = geo->ecc_strength >> 1;
1303         unsigned int gf_len = geo->gf_len;
1304         unsigned int block_size = geo->ecc_chunk_size;
1305
1306         this->bch_flashlayout0 =
1307                 BF_BCH_FLASH0LAYOUT0_NBLOCKS(geo->ecc_chunk_count - 1) |
1308                 BF_BCH_FLASH0LAYOUT0_META_SIZE(geo->metadata_size) |
1309                 BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
1310                 BF_BCH_FLASH0LAYOUT0_GF(gf_len, this) |
1311                 BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this);
1312
1313         this->bch_flashlayout1 =
1314                 BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(geo->page_size) |
1315                 BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
1316                 BF_BCH_FLASH0LAYOUT1_GF(gf_len, this) |
1317                 BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this);
1318 }
1319
1320 static int gpmi_ecc_read_page(struct nand_chip *chip, uint8_t *buf,
1321                               int oob_required, int page)
1322 {
1323         struct gpmi_nand_data *this = nand_get_controller_data(chip);
1324         struct mtd_info *mtd = nand_to_mtd(chip);
1325         struct bch_geometry *geo = &this->bch_geometry;
1326         unsigned int max_bitflips;
1327         int ret;
1328
1329         gpmi_bch_layout_std(this);
1330         this->bch = true;
1331
1332         ret = nand_read_page_op(chip, page, 0, buf, geo->page_size);
1333         if (ret)
1334                 return ret;
1335
1336         max_bitflips = gpmi_count_bitflips(chip, buf, 0,
1337                                            geo->ecc_chunk_count,
1338                                            geo->auxiliary_status_offset);
1339
1340         /* handle the block mark swapping */
1341         block_mark_swapping(this, buf, this->auxiliary_virt);
1342
1343         if (oob_required) {
1344                 /*
1345                  * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
1346                  * for details about our policy for delivering the OOB.
1347                  *
1348                  * We fill the caller's buffer with set bits, and then copy the
1349                  * block mark to th caller's buffer. Note that, if block mark
1350                  * swapping was necessary, it has already been done, so we can
1351                  * rely on the first byte of the auxiliary buffer to contain
1352                  * the block mark.
1353                  */
1354                 memset(chip->oob_poi, ~0, mtd->oobsize);
1355                 chip->oob_poi[0] = ((uint8_t *)this->auxiliary_virt)[0];
1356         }
1357
1358         return max_bitflips;
1359 }
1360
1361 /* Fake a virtual small page for the subpage read */
1362 static int gpmi_ecc_read_subpage(struct nand_chip *chip, uint32_t offs,
1363                                  uint32_t len, uint8_t *buf, int page)
1364 {
1365         struct gpmi_nand_data *this = nand_get_controller_data(chip);
1366         struct bch_geometry *geo = &this->bch_geometry;
1367         int size = chip->ecc.size; /* ECC chunk size */
1368         int meta, n, page_size;
1369         unsigned int max_bitflips;
1370         unsigned int ecc_strength;
1371         int first, last, marker_pos;
1372         int ecc_parity_size;
1373         int col = 0;
1374         int ret;
1375
1376         /* The size of ECC parity */
1377         ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
1378
1379         /* Align it with the chunk size */
1380         first = offs / size;
1381         last = (offs + len - 1) / size;
1382
1383         if (this->swap_block_mark) {
1384                 /*
1385                  * Find the chunk which contains the Block Marker.
1386                  * If this chunk is in the range of [first, last],
1387                  * we have to read out the whole page.
1388                  * Why? since we had swapped the data at the position of Block
1389                  * Marker to the metadata which is bound with the chunk 0.
1390                  */
1391                 marker_pos = geo->block_mark_byte_offset / size;
1392                 if (last >= marker_pos && first <= marker_pos) {
1393                         dev_dbg(this->dev,
1394                                 "page:%d, first:%d, last:%d, marker at:%d\n",
1395                                 page, first, last, marker_pos);
1396                         return gpmi_ecc_read_page(chip, buf, 0, page);
1397                 }
1398         }
1399
1400         meta = geo->metadata_size;
1401         if (first) {
1402                 col = meta + (size + ecc_parity_size) * first;
1403                 meta = 0;
1404                 buf = buf + first * size;
1405         }
1406
1407         ecc_parity_size = geo->gf_len * geo->ecc_strength / 8;
1408
1409         n = last - first + 1;
1410         page_size = meta + (size + ecc_parity_size) * n;
1411         ecc_strength = geo->ecc_strength >> 1;
1412
1413         this->bch_flashlayout0 = BF_BCH_FLASH0LAYOUT0_NBLOCKS(n - 1) |
1414                 BF_BCH_FLASH0LAYOUT0_META_SIZE(meta) |
1415                 BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this) |
1416                 BF_BCH_FLASH0LAYOUT0_GF(geo->gf_len, this) |
1417                 BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(geo->ecc_chunk_size, this);
1418
1419         this->bch_flashlayout1 = BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) |
1420                 BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this) |
1421                 BF_BCH_FLASH0LAYOUT1_GF(geo->gf_len, this) |
1422                 BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(geo->ecc_chunk_size, this);
1423
1424         this->bch = true;
1425
1426         ret = nand_read_page_op(chip, page, col, buf, page_size);
1427         if (ret)
1428                 return ret;
1429
1430         dev_dbg(this->dev, "page:%d(%d:%d)%d, chunk:(%d:%d), BCH PG size:%d\n",
1431                 page, offs, len, col, first, n, page_size);
1432
1433         max_bitflips = gpmi_count_bitflips(chip, buf, first, last, meta);
1434
1435         return max_bitflips;
1436 }
1437
1438 static int gpmi_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
1439                                int oob_required, int page)
1440 {
1441         struct mtd_info *mtd = nand_to_mtd(chip);
1442         struct gpmi_nand_data *this = nand_get_controller_data(chip);
1443         struct bch_geometry *nfc_geo = &this->bch_geometry;
1444         int ret;
1445
1446         dev_dbg(this->dev, "ecc write page.\n");
1447
1448         gpmi_bch_layout_std(this);
1449         this->bch = true;
1450
1451         memcpy(this->auxiliary_virt, chip->oob_poi, nfc_geo->auxiliary_size);
1452
1453         if (this->swap_block_mark) {
1454                 /*
1455                  * When doing bad block marker swapping we must always copy the
1456                  * input buffer as we can't modify the const buffer.
1457                  */
1458                 memcpy(this->data_buffer_dma, buf, mtd->writesize);
1459                 buf = this->data_buffer_dma;
1460                 block_mark_swapping(this, this->data_buffer_dma,
1461                                     this->auxiliary_virt);
1462         }
1463
1464         ret = nand_prog_page_op(chip, page, 0, buf, nfc_geo->page_size);
1465
1466         return ret;
1467 }
1468
1469 /*
1470  * There are several places in this driver where we have to handle the OOB and
1471  * block marks. This is the function where things are the most complicated, so
1472  * this is where we try to explain it all. All the other places refer back to
1473  * here.
1474  *
1475  * These are the rules, in order of decreasing importance:
1476  *
1477  * 1) Nothing the caller does can be allowed to imperil the block mark.
1478  *
1479  * 2) In read operations, the first byte of the OOB we return must reflect the
1480  *    true state of the block mark, no matter where that block mark appears in
1481  *    the physical page.
1482  *
1483  * 3) ECC-based read operations return an OOB full of set bits (since we never
1484  *    allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads
1485  *    return).
1486  *
1487  * 4) "Raw" read operations return a direct view of the physical bytes in the
1488  *    page, using the conventional definition of which bytes are data and which
1489  *    are OOB. This gives the caller a way to see the actual, physical bytes
1490  *    in the page, without the distortions applied by our ECC engine.
1491  *
1492  *
1493  * What we do for this specific read operation depends on two questions:
1494  *
1495  * 1) Are we doing a "raw" read, or an ECC-based read?
1496  *
1497  * 2) Are we using block mark swapping or transcription?
1498  *
1499  * There are four cases, illustrated by the following Karnaugh map:
1500  *
1501  *                    |           Raw           |         ECC-based       |
1502  *       -------------+-------------------------+-------------------------+
1503  *                    | Read the conventional   |                         |
1504  *                    | OOB at the end of the   |                         |
1505  *       Swapping     | page and return it. It  |                         |
1506  *                    | contains exactly what   |                         |
1507  *                    | we want.                | Read the block mark and |
1508  *       -------------+-------------------------+ return it in a buffer   |
1509  *                    | Read the conventional   | full of set bits.       |
1510  *                    | OOB at the end of the   |                         |
1511  *                    | page and also the block |                         |
1512  *       Transcribing | mark in the metadata.   |                         |
1513  *                    | Copy the block mark     |                         |
1514  *                    | into the first byte of  |                         |
1515  *                    | the OOB.                |                         |
1516  *       -------------+-------------------------+-------------------------+
1517  *
1518  * Note that we break rule #4 in the Transcribing/Raw case because we're not
1519  * giving an accurate view of the actual, physical bytes in the page (we're
1520  * overwriting the block mark). That's OK because it's more important to follow
1521  * rule #2.
1522  *
1523  * It turns out that knowing whether we want an "ECC-based" or "raw" read is not
1524  * easy. When reading a page, for example, the NAND Flash MTD code calls our
1525  * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an
1526  * ECC-based or raw view of the page is implicit in which function it calls
1527  * (there is a similar pair of ECC-based/raw functions for writing).
1528  */
1529 static int gpmi_ecc_read_oob(struct nand_chip *chip, int page)
1530 {
1531         struct mtd_info *mtd = nand_to_mtd(chip);
1532         struct gpmi_nand_data *this = nand_get_controller_data(chip);
1533         int ret;
1534
1535         /* clear the OOB buffer */
1536         memset(chip->oob_poi, ~0, mtd->oobsize);
1537
1538         /* Read out the conventional OOB. */
1539         ret = nand_read_page_op(chip, page, mtd->writesize, chip->oob_poi,
1540                                 mtd->oobsize);
1541         if (ret)
1542                 return ret;
1543
1544         /*
1545          * Now, we want to make sure the block mark is correct. In the
1546          * non-transcribing case (!GPMI_IS_MX23()), we already have it.
1547          * Otherwise, we need to explicitly read it.
1548          */
1549         if (GPMI_IS_MX23(this)) {
1550                 /* Read the block mark into the first byte of the OOB buffer. */
1551                 ret = nand_read_page_op(chip, page, 0, chip->oob_poi, 1);
1552                 if (ret)
1553                         return ret;
1554         }
1555
1556         return 0;
1557 }
1558
1559 static int gpmi_ecc_write_oob(struct nand_chip *chip, int page)
1560 {
1561         struct mtd_info *mtd = nand_to_mtd(chip);
1562         struct mtd_oob_region of = { };
1563
1564         /* Do we have available oob area? */
1565         mtd_ooblayout_free(mtd, 0, &of);
1566         if (!of.length)
1567                 return -EPERM;
1568
1569         if (!nand_is_slc(chip))
1570                 return -EPERM;
1571
1572         return nand_prog_page_op(chip, page, mtd->writesize + of.offset,
1573                                  chip->oob_poi + of.offset, of.length);
1574 }
1575
1576 /*
1577  * This function reads a NAND page without involving the ECC engine (no HW
1578  * ECC correction).
1579  * The tricky part in the GPMI/BCH controller is that it stores ECC bits
1580  * inline (interleaved with payload DATA), and do not align data chunk on
1581  * byte boundaries.
1582  * We thus need to take care moving the payload data and ECC bits stored in the
1583  * page into the provided buffers, which is why we're using nand_extract_bits().
1584  *
1585  * See set_geometry_by_ecc_info inline comments to have a full description
1586  * of the layout used by the GPMI controller.
1587  */
1588 static int gpmi_ecc_read_page_raw(struct nand_chip *chip, uint8_t *buf,
1589                                   int oob_required, int page)
1590 {
1591         struct mtd_info *mtd = nand_to_mtd(chip);
1592         struct gpmi_nand_data *this = nand_get_controller_data(chip);
1593         struct bch_geometry *nfc_geo = &this->bch_geometry;
1594         int eccsize = nfc_geo->ecc_chunk_size;
1595         int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1596         u8 *tmp_buf = this->raw_buffer;
1597         size_t src_bit_off;
1598         size_t oob_bit_off;
1599         size_t oob_byte_off;
1600         uint8_t *oob = chip->oob_poi;
1601         int step;
1602         int ret;
1603
1604         ret = nand_read_page_op(chip, page, 0, tmp_buf,
1605                                 mtd->writesize + mtd->oobsize);
1606         if (ret)
1607                 return ret;
1608
1609         /*
1610          * If required, swap the bad block marker and the data stored in the
1611          * metadata section, so that we don't wrongly consider a block as bad.
1612          *
1613          * See the layout description for a detailed explanation on why this
1614          * is needed.
1615          */
1616         if (this->swap_block_mark)
1617                 swap(tmp_buf[0], tmp_buf[mtd->writesize]);
1618
1619         /*
1620          * Copy the metadata section into the oob buffer (this section is
1621          * guaranteed to be aligned on a byte boundary).
1622          */
1623         if (oob_required)
1624                 memcpy(oob, tmp_buf, nfc_geo->metadata_size);
1625
1626         oob_bit_off = nfc_geo->metadata_size * 8;
1627         src_bit_off = oob_bit_off;
1628
1629         /* Extract interleaved payload data and ECC bits */
1630         for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
1631                 if (buf)
1632                         nand_extract_bits(buf, step * eccsize * 8, tmp_buf,
1633                                           src_bit_off, eccsize * 8);
1634                 src_bit_off += eccsize * 8;
1635
1636                 /* Align last ECC block to align a byte boundary */
1637                 if (step == nfc_geo->ecc_chunk_count - 1 &&
1638                     (oob_bit_off + eccbits) % 8)
1639                         eccbits += 8 - ((oob_bit_off + eccbits) % 8);
1640
1641                 if (oob_required)
1642                         nand_extract_bits(oob, oob_bit_off, tmp_buf,
1643                                           src_bit_off, eccbits);
1644
1645                 src_bit_off += eccbits;
1646                 oob_bit_off += eccbits;
1647         }
1648
1649         if (oob_required) {
1650                 oob_byte_off = oob_bit_off / 8;
1651
1652                 if (oob_byte_off < mtd->oobsize)
1653                         memcpy(oob + oob_byte_off,
1654                                tmp_buf + mtd->writesize + oob_byte_off,
1655                                mtd->oobsize - oob_byte_off);
1656         }
1657
1658         return 0;
1659 }
1660
1661 /*
1662  * This function writes a NAND page without involving the ECC engine (no HW
1663  * ECC generation).
1664  * The tricky part in the GPMI/BCH controller is that it stores ECC bits
1665  * inline (interleaved with payload DATA), and do not align data chunk on
1666  * byte boundaries.
1667  * We thus need to take care moving the OOB area at the right place in the
1668  * final page, which is why we're using nand_extract_bits().
1669  *
1670  * See set_geometry_by_ecc_info inline comments to have a full description
1671  * of the layout used by the GPMI controller.
1672  */
1673 static int gpmi_ecc_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
1674                                    int oob_required, int page)
1675 {
1676         struct mtd_info *mtd = nand_to_mtd(chip);
1677         struct gpmi_nand_data *this = nand_get_controller_data(chip);
1678         struct bch_geometry *nfc_geo = &this->bch_geometry;
1679         int eccsize = nfc_geo->ecc_chunk_size;
1680         int eccbits = nfc_geo->ecc_strength * nfc_geo->gf_len;
1681         u8 *tmp_buf = this->raw_buffer;
1682         uint8_t *oob = chip->oob_poi;
1683         size_t dst_bit_off;
1684         size_t oob_bit_off;
1685         size_t oob_byte_off;
1686         int step;
1687
1688         /*
1689          * Initialize all bits to 1 in case we don't have a buffer for the
1690          * payload or oob data in order to leave unspecified bits of data
1691          * to their initial state.
1692          */
1693         if (!buf || !oob_required)
1694                 memset(tmp_buf, 0xff, mtd->writesize + mtd->oobsize);
1695
1696         /*
1697          * First copy the metadata section (stored in oob buffer) at the
1698          * beginning of the page, as imposed by the GPMI layout.
1699          */
1700         memcpy(tmp_buf, oob, nfc_geo->metadata_size);
1701         oob_bit_off = nfc_geo->metadata_size * 8;
1702         dst_bit_off = oob_bit_off;
1703
1704         /* Interleave payload data and ECC bits */
1705         for (step = 0; step < nfc_geo->ecc_chunk_count; step++) {
1706                 if (buf)
1707                         nand_extract_bits(tmp_buf, dst_bit_off, buf,
1708                                           step * eccsize * 8, eccsize * 8);
1709                 dst_bit_off += eccsize * 8;
1710
1711                 /* Align last ECC block to align a byte boundary */
1712                 if (step == nfc_geo->ecc_chunk_count - 1 &&
1713                     (oob_bit_off + eccbits) % 8)
1714                         eccbits += 8 - ((oob_bit_off + eccbits) % 8);
1715
1716                 if (oob_required)
1717                         nand_extract_bits(tmp_buf, dst_bit_off, oob,
1718                                           oob_bit_off, eccbits);
1719
1720                 dst_bit_off += eccbits;
1721                 oob_bit_off += eccbits;
1722         }
1723
1724         oob_byte_off = oob_bit_off / 8;
1725
1726         if (oob_required && oob_byte_off < mtd->oobsize)
1727                 memcpy(tmp_buf + mtd->writesize + oob_byte_off,
1728                        oob + oob_byte_off, mtd->oobsize - oob_byte_off);
1729
1730         /*
1731          * If required, swap the bad block marker and the first byte of the
1732          * metadata section, so that we don't modify the bad block marker.
1733          *
1734          * See the layout description for a detailed explanation on why this
1735          * is needed.
1736          */
1737         if (this->swap_block_mark)
1738                 swap(tmp_buf[0], tmp_buf[mtd->writesize]);
1739
1740         return nand_prog_page_op(chip, page, 0, tmp_buf,
1741                                  mtd->writesize + mtd->oobsize);
1742 }
1743
1744 static int gpmi_ecc_read_oob_raw(struct nand_chip *chip, int page)
1745 {
1746         return gpmi_ecc_read_page_raw(chip, NULL, 1, page);
1747 }
1748
1749 static int gpmi_ecc_write_oob_raw(struct nand_chip *chip, int page)
1750 {
1751         return gpmi_ecc_write_page_raw(chip, NULL, 1, page);
1752 }
1753
1754 static int gpmi_block_markbad(struct nand_chip *chip, loff_t ofs)
1755 {
1756         struct mtd_info *mtd = nand_to_mtd(chip);
1757         struct gpmi_nand_data *this = nand_get_controller_data(chip);
1758         int ret = 0;
1759         uint8_t *block_mark;
1760         int column, page, chipnr;
1761
1762         chipnr = (int)(ofs >> chip->chip_shift);
1763         nand_select_target(chip, chipnr);
1764
1765         column = !GPMI_IS_MX23(this) ? mtd->writesize : 0;
1766
1767         /* Write the block mark. */
1768         block_mark = this->data_buffer_dma;
1769         block_mark[0] = 0; /* bad block marker */
1770
1771         /* Shift to get page */
1772         page = (int)(ofs >> chip->page_shift);
1773
1774         ret = nand_prog_page_op(chip, page, column, block_mark, 1);
1775
1776         nand_deselect_target(chip);
1777
1778         return ret;
1779 }
1780
1781 static int nand_boot_set_geometry(struct gpmi_nand_data *this)
1782 {
1783         struct boot_rom_geometry *geometry = &this->rom_geometry;
1784
1785         /*
1786          * Set the boot block stride size.
1787          *
1788          * In principle, we should be reading this from the OTP bits, since
1789          * that's where the ROM is going to get it. In fact, we don't have any
1790          * way to read the OTP bits, so we go with the default and hope for the
1791          * best.
1792          */
1793         geometry->stride_size_in_pages = 64;
1794
1795         /*
1796          * Set the search area stride exponent.
1797          *
1798          * In principle, we should be reading this from the OTP bits, since
1799          * that's where the ROM is going to get it. In fact, we don't have any
1800          * way to read the OTP bits, so we go with the default and hope for the
1801          * best.
1802          */
1803         geometry->search_area_stride_exponent = 2;
1804         return 0;
1805 }
1806
1807 static const char  *fingerprint = "STMP";
1808 static int mx23_check_transcription_stamp(struct gpmi_nand_data *this)
1809 {
1810         struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1811         struct device *dev = this->dev;
1812         struct nand_chip *chip = &this->nand;
1813         unsigned int search_area_size_in_strides;
1814         unsigned int stride;
1815         unsigned int page;
1816         u8 *buffer = nand_get_data_buf(chip);
1817         int found_an_ncb_fingerprint = false;
1818         int ret;
1819
1820         /* Compute the number of strides in a search area. */
1821         search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
1822
1823         nand_select_target(chip, 0);
1824
1825         /*
1826          * Loop through the first search area, looking for the NCB fingerprint.
1827          */
1828         dev_dbg(dev, "Scanning for an NCB fingerprint...\n");
1829
1830         for (stride = 0; stride < search_area_size_in_strides; stride++) {
1831                 /* Compute the page addresses. */
1832                 page = stride * rom_geo->stride_size_in_pages;
1833
1834                 dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page);
1835
1836                 /*
1837                  * Read the NCB fingerprint. The fingerprint is four bytes long
1838                  * and starts in the 12th byte of the page.
1839                  */
1840                 ret = nand_read_page_op(chip, page, 12, buffer,
1841                                         strlen(fingerprint));
1842                 if (ret)
1843                         continue;
1844
1845                 /* Look for the fingerprint. */
1846                 if (!memcmp(buffer, fingerprint, strlen(fingerprint))) {
1847                         found_an_ncb_fingerprint = true;
1848                         break;
1849                 }
1850
1851         }
1852
1853         nand_deselect_target(chip);
1854
1855         if (found_an_ncb_fingerprint)
1856                 dev_dbg(dev, "\tFound a fingerprint\n");
1857         else
1858                 dev_dbg(dev, "\tNo fingerprint found\n");
1859         return found_an_ncb_fingerprint;
1860 }
1861
1862 /* Writes a transcription stamp. */
1863 static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1864 {
1865         struct device *dev = this->dev;
1866         struct boot_rom_geometry *rom_geo = &this->rom_geometry;
1867         struct nand_chip *chip = &this->nand;
1868         struct mtd_info *mtd = nand_to_mtd(chip);
1869         unsigned int block_size_in_pages;
1870         unsigned int search_area_size_in_strides;
1871         unsigned int search_area_size_in_pages;
1872         unsigned int search_area_size_in_blocks;
1873         unsigned int block;
1874         unsigned int stride;
1875         unsigned int page;
1876         u8 *buffer = nand_get_data_buf(chip);
1877         int status;
1878
1879         /* Compute the search area geometry. */
1880         block_size_in_pages = mtd->erasesize / mtd->writesize;
1881         search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent;
1882         search_area_size_in_pages = search_area_size_in_strides *
1883                                         rom_geo->stride_size_in_pages;
1884         search_area_size_in_blocks =
1885                   (search_area_size_in_pages + (block_size_in_pages - 1)) /
1886                                     block_size_in_pages;
1887
1888         dev_dbg(dev, "Search Area Geometry :\n");
1889         dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks);
1890         dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides);
1891         dev_dbg(dev, "\tin Pages  : %u\n", search_area_size_in_pages);
1892
1893         nand_select_target(chip, 0);
1894
1895         /* Loop over blocks in the first search area, erasing them. */
1896         dev_dbg(dev, "Erasing the search area...\n");
1897
1898         for (block = 0; block < search_area_size_in_blocks; block++) {
1899                 /* Erase this block. */
1900                 dev_dbg(dev, "\tErasing block 0x%x\n", block);
1901                 status = nand_erase_op(chip, block);
1902                 if (status)
1903                         dev_err(dev, "[%s] Erase failed.\n", __func__);
1904         }
1905
1906         /* Write the NCB fingerprint into the page buffer. */
1907         memset(buffer, ~0, mtd->writesize);
1908         memcpy(buffer + 12, fingerprint, strlen(fingerprint));
1909
1910         /* Loop through the first search area, writing NCB fingerprints. */
1911         dev_dbg(dev, "Writing NCB fingerprints...\n");
1912         for (stride = 0; stride < search_area_size_in_strides; stride++) {
1913                 /* Compute the page addresses. */
1914                 page = stride * rom_geo->stride_size_in_pages;
1915
1916                 /* Write the first page of the current stride. */
1917                 dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
1918
1919                 status = chip->ecc.write_page_raw(chip, buffer, 0, page);
1920                 if (status)
1921                         dev_err(dev, "[%s] Write failed.\n", __func__);
1922         }
1923
1924         nand_deselect_target(chip);
1925
1926         return 0;
1927 }
1928
1929 static int mx23_boot_init(struct gpmi_nand_data  *this)
1930 {
1931         struct device *dev = this->dev;
1932         struct nand_chip *chip = &this->nand;
1933         struct mtd_info *mtd = nand_to_mtd(chip);
1934         unsigned int block_count;
1935         unsigned int block;
1936         int     chipnr;
1937         int     page;
1938         loff_t  byte;
1939         uint8_t block_mark;
1940         int     ret = 0;
1941
1942         /*
1943          * If control arrives here, we can't use block mark swapping, which
1944          * means we're forced to use transcription. First, scan for the
1945          * transcription stamp. If we find it, then we don't have to do
1946          * anything -- the block marks are already transcribed.
1947          */
1948         if (mx23_check_transcription_stamp(this))
1949                 return 0;
1950
1951         /*
1952          * If control arrives here, we couldn't find a transcription stamp, so
1953          * so we presume the block marks are in the conventional location.
1954          */
1955         dev_dbg(dev, "Transcribing bad block marks...\n");
1956
1957         /* Compute the number of blocks in the entire medium. */
1958         block_count = nanddev_eraseblocks_per_target(&chip->base);
1959
1960         /*
1961          * Loop over all the blocks in the medium, transcribing block marks as
1962          * we go.
1963          */
1964         for (block = 0; block < block_count; block++) {
1965                 /*
1966                  * Compute the chip, page and byte addresses for this block's
1967                  * conventional mark.
1968                  */
1969                 chipnr = block >> (chip->chip_shift - chip->phys_erase_shift);
1970                 page = block << (chip->phys_erase_shift - chip->page_shift);
1971                 byte = block <<  chip->phys_erase_shift;
1972
1973                 /* Send the command to read the conventional block mark. */
1974                 nand_select_target(chip, chipnr);
1975                 ret = nand_read_page_op(chip, page, mtd->writesize, &block_mark,
1976                                         1);
1977                 nand_deselect_target(chip);
1978
1979                 if (ret)
1980                         continue;
1981
1982                 /*
1983                  * Check if the block is marked bad. If so, we need to mark it
1984                  * again, but this time the result will be a mark in the
1985                  * location where we transcribe block marks.
1986                  */
1987                 if (block_mark != 0xff) {
1988                         dev_dbg(dev, "Transcribing mark in block %u\n", block);
1989                         ret = chip->legacy.block_markbad(chip, byte);
1990                         if (ret)
1991                                 dev_err(dev,
1992                                         "Failed to mark block bad with ret %d\n",
1993                                         ret);
1994                 }
1995         }
1996
1997         /* Write the stamp that indicates we've transcribed the block marks. */
1998         mx23_write_transcription_stamp(this);
1999         return 0;
2000 }
2001
2002 static int nand_boot_init(struct gpmi_nand_data  *this)
2003 {
2004         nand_boot_set_geometry(this);
2005
2006         /* This is ROM arch-specific initilization before the BBT scanning. */
2007         if (GPMI_IS_MX23(this))
2008                 return mx23_boot_init(this);
2009         return 0;
2010 }
2011
2012 static int gpmi_set_geometry(struct gpmi_nand_data *this)
2013 {
2014         int ret;
2015
2016         /* Free the temporary DMA memory for reading ID. */
2017         gpmi_free_dma_buffer(this);
2018
2019         /* Set up the NFC geometry which is used by BCH. */
2020         ret = bch_set_geometry(this);
2021         if (ret) {
2022                 dev_err(this->dev, "Error setting BCH geometry : %d\n", ret);
2023                 return ret;
2024         }
2025
2026         /* Alloc the new DMA buffers according to the pagesize and oobsize */
2027         return gpmi_alloc_dma_buffer(this);
2028 }
2029
2030 static int gpmi_init_last(struct gpmi_nand_data *this)
2031 {
2032         struct nand_chip *chip = &this->nand;
2033         struct mtd_info *mtd = nand_to_mtd(chip);
2034         struct nand_ecc_ctrl *ecc = &chip->ecc;
2035         struct bch_geometry *bch_geo = &this->bch_geometry;
2036         int ret;
2037
2038         /* Set up the medium geometry */
2039         ret = gpmi_set_geometry(this);
2040         if (ret)
2041                 return ret;
2042
2043         /* Init the nand_ecc_ctrl{} */
2044         ecc->read_page  = gpmi_ecc_read_page;
2045         ecc->write_page = gpmi_ecc_write_page;
2046         ecc->read_oob   = gpmi_ecc_read_oob;
2047         ecc->write_oob  = gpmi_ecc_write_oob;
2048         ecc->read_page_raw = gpmi_ecc_read_page_raw;
2049         ecc->write_page_raw = gpmi_ecc_write_page_raw;
2050         ecc->read_oob_raw = gpmi_ecc_read_oob_raw;
2051         ecc->write_oob_raw = gpmi_ecc_write_oob_raw;
2052         ecc->engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
2053         ecc->size       = bch_geo->ecc_chunk_size;
2054         ecc->strength   = bch_geo->ecc_strength;
2055         mtd_set_ooblayout(mtd, &gpmi_ooblayout_ops);
2056
2057         /*
2058          * We only enable the subpage read when:
2059          *  (1) the chip is imx6, and
2060          *  (2) the size of the ECC parity is byte aligned.
2061          */
2062         if (GPMI_IS_MX6(this) &&
2063                 ((bch_geo->gf_len * bch_geo->ecc_strength) % 8) == 0) {
2064                 ecc->read_subpage = gpmi_ecc_read_subpage;
2065                 chip->options |= NAND_SUBPAGE_READ;
2066         }
2067
2068         return 0;
2069 }
2070
2071 static int gpmi_nand_attach_chip(struct nand_chip *chip)
2072 {
2073         struct gpmi_nand_data *this = nand_get_controller_data(chip);
2074         int ret;
2075
2076         if (chip->bbt_options & NAND_BBT_USE_FLASH) {
2077                 chip->bbt_options |= NAND_BBT_NO_OOB;
2078
2079                 if (of_property_read_bool(this->dev->of_node,
2080                                           "fsl,no-blockmark-swap"))
2081                         this->swap_block_mark = false;
2082         }
2083         dev_dbg(this->dev, "Blockmark swapping %sabled\n",
2084                 this->swap_block_mark ? "en" : "dis");
2085
2086         ret = gpmi_init_last(this);
2087         if (ret)
2088                 return ret;
2089
2090         chip->options |= NAND_SKIP_BBTSCAN;
2091
2092         return 0;
2093 }
2094
2095 static struct gpmi_transfer *get_next_transfer(struct gpmi_nand_data *this)
2096 {
2097         struct gpmi_transfer *transfer = &this->transfers[this->ntransfers];
2098
2099         this->ntransfers++;
2100
2101         if (this->ntransfers == GPMI_MAX_TRANSFERS)
2102                 return NULL;
2103
2104         return transfer;
2105 }
2106
2107 static struct dma_async_tx_descriptor *gpmi_chain_command(
2108         struct gpmi_nand_data *this, u8 cmd, const u8 *addr, int naddr)
2109 {
2110         struct dma_chan *channel = get_dma_chan(this);
2111         struct dma_async_tx_descriptor *desc;
2112         struct gpmi_transfer *transfer;
2113         int chip = this->nand.cur_cs;
2114         u32 pio[3];
2115
2116         /* [1] send out the PIO words */
2117         pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
2118                 | BM_GPMI_CTRL0_WORD_LENGTH
2119                 | BF_GPMI_CTRL0_CS(chip, this)
2120                 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2121                 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_CLE)
2122                 | BM_GPMI_CTRL0_ADDRESS_INCREMENT
2123                 | BF_GPMI_CTRL0_XFER_COUNT(naddr + 1);
2124         pio[1] = 0;
2125         pio[2] = 0;
2126         desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2127                                       DMA_TRANS_NONE, 0);
2128         if (!desc)
2129                 return NULL;
2130
2131         transfer = get_next_transfer(this);
2132         if (!transfer)
2133                 return NULL;
2134
2135         transfer->cmdbuf[0] = cmd;
2136         if (naddr)
2137                 memcpy(&transfer->cmdbuf[1], addr, naddr);
2138
2139         sg_init_one(&transfer->sgl, transfer->cmdbuf, naddr + 1);
2140         dma_map_sg(this->dev, &transfer->sgl, 1, DMA_TO_DEVICE);
2141
2142         transfer->direction = DMA_TO_DEVICE;
2143
2144         desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1, DMA_MEM_TO_DEV,
2145                                        MXS_DMA_CTRL_WAIT4END);
2146         return desc;
2147 }
2148
2149 static struct dma_async_tx_descriptor *gpmi_chain_wait_ready(
2150         struct gpmi_nand_data *this)
2151 {
2152         struct dma_chan *channel = get_dma_chan(this);
2153         u32 pio[2];
2154
2155         pio[0] =  BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WAIT_FOR_READY)
2156                 | BM_GPMI_CTRL0_WORD_LENGTH
2157                 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2158                 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2159                 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2160                 | BF_GPMI_CTRL0_XFER_COUNT(0);
2161         pio[1] = 0;
2162
2163         return mxs_dmaengine_prep_pio(channel, pio, 2, DMA_TRANS_NONE,
2164                                 MXS_DMA_CTRL_WAIT4END | MXS_DMA_CTRL_WAIT4RDY);
2165 }
2166
2167 static struct dma_async_tx_descriptor *gpmi_chain_data_read(
2168         struct gpmi_nand_data *this, void *buf, int raw_len, bool *direct)
2169 {
2170         struct dma_async_tx_descriptor *desc;
2171         struct dma_chan *channel = get_dma_chan(this);
2172         struct gpmi_transfer *transfer;
2173         u32 pio[6] = {};
2174
2175         transfer = get_next_transfer(this);
2176         if (!transfer)
2177                 return NULL;
2178
2179         transfer->direction = DMA_FROM_DEVICE;
2180
2181         *direct = prepare_data_dma(this, buf, raw_len, &transfer->sgl,
2182                                    DMA_FROM_DEVICE);
2183
2184         pio[0] =  BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__READ)
2185                 | BM_GPMI_CTRL0_WORD_LENGTH
2186                 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2187                 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2188                 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2189                 | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
2190
2191         if (this->bch) {
2192                 pio[2] =  BM_GPMI_ECCCTRL_ENABLE_ECC
2193                         | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_DECODE)
2194                         | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE
2195                                 | BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
2196                 pio[3] = raw_len;
2197                 pio[4] = transfer->sgl.dma_address;
2198                 pio[5] = this->auxiliary_phys;
2199         }
2200
2201         desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2202                                       DMA_TRANS_NONE, 0);
2203         if (!desc)
2204                 return NULL;
2205
2206         if (!this->bch)
2207                 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
2208                                              DMA_DEV_TO_MEM,
2209                                              MXS_DMA_CTRL_WAIT4END);
2210
2211         return desc;
2212 }
2213
2214 static struct dma_async_tx_descriptor *gpmi_chain_data_write(
2215         struct gpmi_nand_data *this, const void *buf, int raw_len)
2216 {
2217         struct dma_chan *channel = get_dma_chan(this);
2218         struct dma_async_tx_descriptor *desc;
2219         struct gpmi_transfer *transfer;
2220         u32 pio[6] = {};
2221
2222         transfer = get_next_transfer(this);
2223         if (!transfer)
2224                 return NULL;
2225
2226         transfer->direction = DMA_TO_DEVICE;
2227
2228         prepare_data_dma(this, buf, raw_len, &transfer->sgl, DMA_TO_DEVICE);
2229
2230         pio[0] = BF_GPMI_CTRL0_COMMAND_MODE(BV_GPMI_CTRL0_COMMAND_MODE__WRITE)
2231                 | BM_GPMI_CTRL0_WORD_LENGTH
2232                 | BF_GPMI_CTRL0_CS(this->nand.cur_cs, this)
2233                 | BF_GPMI_CTRL0_LOCK_CS(LOCK_CS_ENABLE, this)
2234                 | BF_GPMI_CTRL0_ADDRESS(BV_GPMI_CTRL0_ADDRESS__NAND_DATA)
2235                 | BF_GPMI_CTRL0_XFER_COUNT(raw_len);
2236
2237         if (this->bch) {
2238                 pio[2] = BM_GPMI_ECCCTRL_ENABLE_ECC
2239                         | BF_GPMI_ECCCTRL_ECC_CMD(BV_GPMI_ECCCTRL_ECC_CMD__BCH_ENCODE)
2240                         | BF_GPMI_ECCCTRL_BUFFER_MASK(BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_PAGE |
2241                                         BV_GPMI_ECCCTRL_BUFFER_MASK__BCH_AUXONLY);
2242                 pio[3] = raw_len;
2243                 pio[4] = transfer->sgl.dma_address;
2244                 pio[5] = this->auxiliary_phys;
2245         }
2246
2247         desc = mxs_dmaengine_prep_pio(channel, pio, ARRAY_SIZE(pio),
2248                                       DMA_TRANS_NONE,
2249                                       (this->bch ? MXS_DMA_CTRL_WAIT4END : 0));
2250         if (!desc)
2251                 return NULL;
2252
2253         if (!this->bch)
2254                 desc = dmaengine_prep_slave_sg(channel, &transfer->sgl, 1,
2255                                                DMA_MEM_TO_DEV,
2256                                                MXS_DMA_CTRL_WAIT4END);
2257
2258         return desc;
2259 }
2260
2261 static int gpmi_nfc_exec_op(struct nand_chip *chip,
2262                              const struct nand_operation *op,
2263                              bool check_only)
2264 {
2265         const struct nand_op_instr *instr;
2266         struct gpmi_nand_data *this = nand_get_controller_data(chip);
2267         struct dma_async_tx_descriptor *desc = NULL;
2268         int i, ret, buf_len = 0, nbufs = 0;
2269         u8 cmd = 0;
2270         void *buf_read = NULL;
2271         const void *buf_write = NULL;
2272         bool direct = false;
2273         struct completion *dma_completion, *bch_completion;
2274         unsigned long to;
2275
2276         if (check_only)
2277                 return 0;
2278
2279         this->ntransfers = 0;
2280         for (i = 0; i < GPMI_MAX_TRANSFERS; i++)
2281                 this->transfers[i].direction = DMA_NONE;
2282
2283         ret = pm_runtime_get_sync(this->dev);
2284         if (ret < 0) {
2285                 pm_runtime_put_noidle(this->dev);
2286                 return ret;
2287         }
2288
2289         /*
2290          * This driver currently supports only one NAND chip. Plus, dies share
2291          * the same configuration. So once timings have been applied on the
2292          * controller side, they will not change anymore. When the time will
2293          * come, the check on must_apply_timings will have to be dropped.
2294          */
2295         if (this->hw.must_apply_timings) {
2296                 this->hw.must_apply_timings = false;
2297                 ret = gpmi_nfc_apply_timings(this);
2298                 if (ret)
2299                         goto out_pm;
2300         }
2301
2302         dev_dbg(this->dev, "%s: %d instructions\n", __func__, op->ninstrs);
2303
2304         for (i = 0; i < op->ninstrs; i++) {
2305                 instr = &op->instrs[i];
2306
2307                 nand_op_trace("  ", instr);
2308
2309                 switch (instr->type) {
2310                 case NAND_OP_WAITRDY_INSTR:
2311                         desc = gpmi_chain_wait_ready(this);
2312                         break;
2313                 case NAND_OP_CMD_INSTR:
2314                         cmd = instr->ctx.cmd.opcode;
2315
2316                         /*
2317                          * When this command has an address cycle chain it
2318                          * together with the address cycle
2319                          */
2320                         if (i + 1 != op->ninstrs &&
2321                             op->instrs[i + 1].type == NAND_OP_ADDR_INSTR)
2322                                 continue;
2323
2324                         desc = gpmi_chain_command(this, cmd, NULL, 0);
2325
2326                         break;
2327                 case NAND_OP_ADDR_INSTR:
2328                         desc = gpmi_chain_command(this, cmd, instr->ctx.addr.addrs,
2329                                                   instr->ctx.addr.naddrs);
2330                         break;
2331                 case NAND_OP_DATA_OUT_INSTR:
2332                         buf_write = instr->ctx.data.buf.out;
2333                         buf_len = instr->ctx.data.len;
2334                         nbufs++;
2335
2336                         desc = gpmi_chain_data_write(this, buf_write, buf_len);
2337
2338                         break;
2339                 case NAND_OP_DATA_IN_INSTR:
2340                         if (!instr->ctx.data.len)
2341                                 break;
2342                         buf_read = instr->ctx.data.buf.in;
2343                         buf_len = instr->ctx.data.len;
2344                         nbufs++;
2345
2346                         desc = gpmi_chain_data_read(this, buf_read, buf_len,
2347                                                    &direct);
2348                         break;
2349                 }
2350
2351                 if (!desc) {
2352                         ret = -ENXIO;
2353                         goto unmap;
2354                 }
2355         }
2356
2357         dev_dbg(this->dev, "%s setup done\n", __func__);
2358
2359         if (nbufs > 1) {
2360                 dev_err(this->dev, "Multiple data instructions not supported\n");
2361                 ret = -EINVAL;
2362                 goto unmap;
2363         }
2364
2365         if (this->bch) {
2366                 writel(this->bch_flashlayout0,
2367                        this->resources.bch_regs + HW_BCH_FLASH0LAYOUT0);
2368                 writel(this->bch_flashlayout1,
2369                        this->resources.bch_regs + HW_BCH_FLASH0LAYOUT1);
2370         }
2371
2372         desc->callback = dma_irq_callback;
2373         desc->callback_param = this;
2374         dma_completion = &this->dma_done;
2375         bch_completion = NULL;
2376
2377         init_completion(dma_completion);
2378
2379         if (this->bch && buf_read) {
2380                 writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
2381                        this->resources.bch_regs + HW_BCH_CTRL_SET);
2382                 bch_completion = &this->bch_done;
2383                 init_completion(bch_completion);
2384         }
2385
2386         dmaengine_submit(desc);
2387         dma_async_issue_pending(get_dma_chan(this));
2388
2389         to = wait_for_completion_timeout(dma_completion, msecs_to_jiffies(1000));
2390         if (!to) {
2391                 dev_err(this->dev, "DMA timeout, last DMA\n");
2392                 gpmi_dump_info(this);
2393                 ret = -ETIMEDOUT;
2394                 goto unmap;
2395         }
2396
2397         if (this->bch && buf_read) {
2398                 to = wait_for_completion_timeout(bch_completion, msecs_to_jiffies(1000));
2399                 if (!to) {
2400                         dev_err(this->dev, "BCH timeout, last DMA\n");
2401                         gpmi_dump_info(this);
2402                         ret = -ETIMEDOUT;
2403                         goto unmap;
2404                 }
2405         }
2406
2407         writel(BM_BCH_CTRL_COMPLETE_IRQ_EN,
2408                this->resources.bch_regs + HW_BCH_CTRL_CLR);
2409         gpmi_clear_bch(this);
2410
2411         ret = 0;
2412
2413 unmap:
2414         for (i = 0; i < this->ntransfers; i++) {
2415                 struct gpmi_transfer *transfer = &this->transfers[i];
2416
2417                 if (transfer->direction != DMA_NONE)
2418                         dma_unmap_sg(this->dev, &transfer->sgl, 1,
2419                                      transfer->direction);
2420         }
2421
2422         if (!ret && buf_read && !direct)
2423                 memcpy(buf_read, this->data_buffer_dma,
2424                        gpmi_raw_len_to_len(this, buf_len));
2425
2426         this->bch = false;
2427
2428 out_pm:
2429         pm_runtime_mark_last_busy(this->dev);
2430         pm_runtime_put_autosuspend(this->dev);
2431
2432         return ret;
2433 }
2434
2435 static const struct nand_controller_ops gpmi_nand_controller_ops = {
2436         .attach_chip = gpmi_nand_attach_chip,
2437         .setup_interface = gpmi_setup_interface,
2438         .exec_op = gpmi_nfc_exec_op,
2439 };
2440
2441 static int gpmi_nand_init(struct gpmi_nand_data *this)
2442 {
2443         struct nand_chip *chip = &this->nand;
2444         struct mtd_info  *mtd = nand_to_mtd(chip);
2445         int ret;
2446
2447         /* init the MTD data structures */
2448         mtd->name               = "gpmi-nand";
2449         mtd->dev.parent         = this->dev;
2450
2451         /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */
2452         nand_set_controller_data(chip, this);
2453         nand_set_flash_node(chip, this->pdev->dev.of_node);
2454         chip->legacy.block_markbad = gpmi_block_markbad;
2455         chip->badblock_pattern  = &gpmi_bbt_descr;
2456         chip->options           |= NAND_NO_SUBPAGE_WRITE;
2457
2458         /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */
2459         this->swap_block_mark = !GPMI_IS_MX23(this);
2460
2461         /*
2462          * Allocate a temporary DMA buffer for reading ID in the
2463          * nand_scan_ident().
2464          */
2465         this->bch_geometry.payload_size = 1024;
2466         this->bch_geometry.auxiliary_size = 128;
2467         ret = gpmi_alloc_dma_buffer(this);
2468         if (ret)
2469                 return ret;
2470
2471         nand_controller_init(&this->base);
2472         this->base.ops = &gpmi_nand_controller_ops;
2473         chip->controller = &this->base;
2474
2475         ret = nand_scan(chip, GPMI_IS_MX6(this) ? 2 : 1);
2476         if (ret)
2477                 goto err_out;
2478
2479         ret = nand_boot_init(this);
2480         if (ret)
2481                 goto err_nand_cleanup;
2482         ret = nand_create_bbt(chip);
2483         if (ret)
2484                 goto err_nand_cleanup;
2485
2486         ret = mtd_device_register(mtd, NULL, 0);
2487         if (ret)
2488                 goto err_nand_cleanup;
2489         return 0;
2490
2491 err_nand_cleanup:
2492         nand_cleanup(chip);
2493 err_out:
2494         gpmi_free_dma_buffer(this);
2495         return ret;
2496 }
2497
2498 static const struct of_device_id gpmi_nand_id_table[] = {
2499         {
2500                 .compatible = "fsl,imx23-gpmi-nand",
2501                 .data = &gpmi_devdata_imx23,
2502         }, {
2503                 .compatible = "fsl,imx28-gpmi-nand",
2504                 .data = &gpmi_devdata_imx28,
2505         }, {
2506                 .compatible = "fsl,imx6q-gpmi-nand",
2507                 .data = &gpmi_devdata_imx6q,
2508         }, {
2509                 .compatible = "fsl,imx6sx-gpmi-nand",
2510                 .data = &gpmi_devdata_imx6sx,
2511         }, {
2512                 .compatible = "fsl,imx7d-gpmi-nand",
2513                 .data = &gpmi_devdata_imx7d,
2514         }, {}
2515 };
2516 MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
2517
2518 static int gpmi_nand_probe(struct platform_device *pdev)
2519 {
2520         struct gpmi_nand_data *this;
2521         const struct of_device_id *of_id;
2522         int ret;
2523
2524         this = devm_kzalloc(&pdev->dev, sizeof(*this), GFP_KERNEL);
2525         if (!this)
2526                 return -ENOMEM;
2527
2528         of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
2529         if (of_id) {
2530                 this->devdata = of_id->data;
2531         } else {
2532                 dev_err(&pdev->dev, "Failed to find the right device id.\n");
2533                 return -ENODEV;
2534         }
2535
2536         platform_set_drvdata(pdev, this);
2537         this->pdev  = pdev;
2538         this->dev   = &pdev->dev;
2539
2540         ret = acquire_resources(this);
2541         if (ret)
2542                 goto exit_acquire_resources;
2543
2544         ret = __gpmi_enable_clk(this, true);
2545         if (ret)
2546                 goto exit_acquire_resources;
2547
2548         pm_runtime_set_autosuspend_delay(&pdev->dev, 500);
2549         pm_runtime_use_autosuspend(&pdev->dev);
2550         pm_runtime_set_active(&pdev->dev);
2551         pm_runtime_enable(&pdev->dev);
2552         pm_runtime_get_sync(&pdev->dev);
2553
2554         ret = gpmi_init(this);
2555         if (ret)
2556                 goto exit_nfc_init;
2557
2558         ret = gpmi_nand_init(this);
2559         if (ret)
2560                 goto exit_nfc_init;
2561
2562         pm_runtime_mark_last_busy(&pdev->dev);
2563         pm_runtime_put_autosuspend(&pdev->dev);
2564
2565         dev_info(this->dev, "driver registered.\n");
2566
2567         return 0;
2568
2569 exit_nfc_init:
2570         pm_runtime_put(&pdev->dev);
2571         pm_runtime_disable(&pdev->dev);
2572         release_resources(this);
2573 exit_acquire_resources:
2574
2575         return ret;
2576 }
2577
2578 static int gpmi_nand_remove(struct platform_device *pdev)
2579 {
2580         struct gpmi_nand_data *this = platform_get_drvdata(pdev);
2581         struct nand_chip *chip = &this->nand;
2582         int ret;
2583
2584         pm_runtime_put_sync(&pdev->dev);
2585         pm_runtime_disable(&pdev->dev);
2586
2587         ret = mtd_device_unregister(nand_to_mtd(chip));
2588         WARN_ON(ret);
2589         nand_cleanup(chip);
2590         gpmi_free_dma_buffer(this);
2591         release_resources(this);
2592         return 0;
2593 }
2594
2595 #ifdef CONFIG_PM_SLEEP
2596 static int gpmi_pm_suspend(struct device *dev)
2597 {
2598         struct gpmi_nand_data *this = dev_get_drvdata(dev);
2599
2600         release_dma_channels(this);
2601         return 0;
2602 }
2603
2604 static int gpmi_pm_resume(struct device *dev)
2605 {
2606         struct gpmi_nand_data *this = dev_get_drvdata(dev);
2607         int ret;
2608
2609         ret = acquire_dma_channels(this);
2610         if (ret < 0)
2611                 return ret;
2612
2613         /* re-init the GPMI registers */
2614         ret = gpmi_init(this);
2615         if (ret) {
2616                 dev_err(this->dev, "Error setting GPMI : %d\n", ret);
2617                 return ret;
2618         }
2619
2620         /* Set flag to get timing setup restored for next exec_op */
2621         if (this->hw.clk_rate)
2622                 this->hw.must_apply_timings = true;
2623
2624         /* re-init the BCH registers */
2625         ret = bch_set_geometry(this);
2626         if (ret) {
2627                 dev_err(this->dev, "Error setting BCH : %d\n", ret);
2628                 return ret;
2629         }
2630
2631         return 0;
2632 }
2633 #endif /* CONFIG_PM_SLEEP */
2634
2635 static int __maybe_unused gpmi_runtime_suspend(struct device *dev)
2636 {
2637         struct gpmi_nand_data *this = dev_get_drvdata(dev);
2638
2639         return __gpmi_enable_clk(this, false);
2640 }
2641
2642 static int __maybe_unused gpmi_runtime_resume(struct device *dev)
2643 {
2644         struct gpmi_nand_data *this = dev_get_drvdata(dev);
2645
2646         return __gpmi_enable_clk(this, true);
2647 }
2648
2649 static const struct dev_pm_ops gpmi_pm_ops = {
2650         SET_SYSTEM_SLEEP_PM_OPS(gpmi_pm_suspend, gpmi_pm_resume)
2651         SET_RUNTIME_PM_OPS(gpmi_runtime_suspend, gpmi_runtime_resume, NULL)
2652 };
2653
2654 static struct platform_driver gpmi_nand_driver = {
2655         .driver = {
2656                 .name = "gpmi-nand",
2657                 .pm = &gpmi_pm_ops,
2658                 .of_match_table = gpmi_nand_id_table,
2659         },
2660         .probe   = gpmi_nand_probe,
2661         .remove  = gpmi_nand_remove,
2662 };
2663 module_platform_driver(gpmi_nand_driver);
2664
2665 MODULE_AUTHOR("Freescale Semiconductor, Inc.");
2666 MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver");
2667 MODULE_LICENSE("GPL");