GNU Linux-libre 5.10.215-gnu1
[releases.git] / drivers / mtd / nand / raw / nand_base.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  Overview:
4  *   This is the generic MTD driver for NAND flash devices. It should be
5  *   capable of working with almost all NAND chips currently available.
6  *
7  *      Additional technical information is available on
8  *      http://www.linux-mtd.infradead.org/doc/nand.html
9  *
10  *  Copyright (C) 2000 Steven J. Hill (sjhill@realitydiluted.com)
11  *                2002-2006 Thomas Gleixner (tglx@linutronix.de)
12  *
13  *  Credits:
14  *      David Woodhouse for adding multichip support
15  *
16  *      Aleph One Ltd. and Toby Churchill Ltd. for supporting the
17  *      rework for 2K page size chips
18  *
19  *  TODO:
20  *      Enable cached programming for 2k page size chips
21  *      Check, if mtd->ecctype should be set to MTD_ECC_HW
22  *      if we have HW ECC support.
23  *      BBT table is not serialized, has to be fixed
24  */
25
26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
27
28 #include <linux/module.h>
29 #include <linux/delay.h>
30 #include <linux/errno.h>
31 #include <linux/err.h>
32 #include <linux/sched.h>
33 #include <linux/slab.h>
34 #include <linux/mm.h>
35 #include <linux/types.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/nand.h>
38 #include <linux/mtd/nand_ecc.h>
39 #include <linux/mtd/nand_bch.h>
40 #include <linux/interrupt.h>
41 #include <linux/bitops.h>
42 #include <linux/io.h>
43 #include <linux/mtd/partitions.h>
44 #include <linux/of.h>
45 #include <linux/gpio/consumer.h>
46
47 #include "internals.h"
48
49 static int nand_pairing_dist3_get_info(struct mtd_info *mtd, int page,
50                                        struct mtd_pairing_info *info)
51 {
52         int lastpage = (mtd->erasesize / mtd->writesize) - 1;
53         int dist = 3;
54
55         if (page == lastpage)
56                 dist = 2;
57
58         if (!page || (page & 1)) {
59                 info->group = 0;
60                 info->pair = (page + 1) / 2;
61         } else {
62                 info->group = 1;
63                 info->pair = (page + 1 - dist) / 2;
64         }
65
66         return 0;
67 }
68
69 static int nand_pairing_dist3_get_wunit(struct mtd_info *mtd,
70                                         const struct mtd_pairing_info *info)
71 {
72         int lastpair = ((mtd->erasesize / mtd->writesize) - 1) / 2;
73         int page = info->pair * 2;
74         int dist = 3;
75
76         if (!info->group && !info->pair)
77                 return 0;
78
79         if (info->pair == lastpair && info->group)
80                 dist = 2;
81
82         if (!info->group)
83                 page--;
84         else if (info->pair)
85                 page += dist - 1;
86
87         if (page >= mtd->erasesize / mtd->writesize)
88                 return -EINVAL;
89
90         return page;
91 }
92
93 const struct mtd_pairing_scheme dist3_pairing_scheme = {
94         .ngroups = 2,
95         .get_info = nand_pairing_dist3_get_info,
96         .get_wunit = nand_pairing_dist3_get_wunit,
97 };
98
99 static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
100 {
101         int ret = 0;
102
103         /* Start address must align on block boundary */
104         if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
105                 pr_debug("%s: unaligned address\n", __func__);
106                 ret = -EINVAL;
107         }
108
109         /* Length must align on block boundary */
110         if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
111                 pr_debug("%s: length not block aligned\n", __func__);
112                 ret = -EINVAL;
113         }
114
115         return ret;
116 }
117
118 /**
119  * nand_extract_bits - Copy unaligned bits from one buffer to another one
120  * @dst: destination buffer
121  * @dst_off: bit offset at which the writing starts
122  * @src: source buffer
123  * @src_off: bit offset at which the reading starts
124  * @nbits: number of bits to copy from @src to @dst
125  *
126  * Copy bits from one memory region to another (overlap authorized).
127  */
128 void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
129                        unsigned int src_off, unsigned int nbits)
130 {
131         unsigned int tmp, n;
132
133         dst += dst_off / 8;
134         dst_off %= 8;
135         src += src_off / 8;
136         src_off %= 8;
137
138         while (nbits) {
139                 n = min3(8 - dst_off, 8 - src_off, nbits);
140
141                 tmp = (*src >> src_off) & GENMASK(n - 1, 0);
142                 *dst &= ~GENMASK(n - 1 + dst_off, dst_off);
143                 *dst |= tmp << dst_off;
144
145                 dst_off += n;
146                 if (dst_off >= 8) {
147                         dst++;
148                         dst_off -= 8;
149                 }
150
151                 src_off += n;
152                 if (src_off >= 8) {
153                         src++;
154                         src_off -= 8;
155                 }
156
157                 nbits -= n;
158         }
159 }
160 EXPORT_SYMBOL_GPL(nand_extract_bits);
161
162 /**
163  * nand_select_target() - Select a NAND target (A.K.A. die)
164  * @chip: NAND chip object
165  * @cs: the CS line to select. Note that this CS id is always from the chip
166  *      PoV, not the controller one
167  *
168  * Select a NAND target so that further operations executed on @chip go to the
169  * selected NAND target.
170  */
171 void nand_select_target(struct nand_chip *chip, unsigned int cs)
172 {
173         /*
174          * cs should always lie between 0 and nanddev_ntargets(), when that's
175          * not the case it's a bug and the caller should be fixed.
176          */
177         if (WARN_ON(cs > nanddev_ntargets(&chip->base)))
178                 return;
179
180         chip->cur_cs = cs;
181
182         if (chip->legacy.select_chip)
183                 chip->legacy.select_chip(chip, cs);
184 }
185 EXPORT_SYMBOL_GPL(nand_select_target);
186
187 /**
188  * nand_deselect_target() - Deselect the currently selected target
189  * @chip: NAND chip object
190  *
191  * Deselect the currently selected NAND target. The result of operations
192  * executed on @chip after the target has been deselected is undefined.
193  */
194 void nand_deselect_target(struct nand_chip *chip)
195 {
196         if (chip->legacy.select_chip)
197                 chip->legacy.select_chip(chip, -1);
198
199         chip->cur_cs = -1;
200 }
201 EXPORT_SYMBOL_GPL(nand_deselect_target);
202
203 /**
204  * nand_release_device - [GENERIC] release chip
205  * @chip: NAND chip object
206  *
207  * Release chip lock and wake up anyone waiting on the device.
208  */
209 static void nand_release_device(struct nand_chip *chip)
210 {
211         /* Release the controller and the chip */
212         mutex_unlock(&chip->controller->lock);
213         mutex_unlock(&chip->lock);
214 }
215
216 /**
217  * nand_bbm_get_next_page - Get the next page for bad block markers
218  * @chip: NAND chip object
219  * @page: First page to start checking for bad block marker usage
220  *
221  * Returns an integer that corresponds to the page offset within a block, for
222  * a page that is used to store bad block markers. If no more pages are
223  * available, -EINVAL is returned.
224  */
225 int nand_bbm_get_next_page(struct nand_chip *chip, int page)
226 {
227         struct mtd_info *mtd = nand_to_mtd(chip);
228         int last_page = ((mtd->erasesize - mtd->writesize) >>
229                          chip->page_shift) & chip->pagemask;
230         unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE
231                 | NAND_BBM_LASTPAGE;
232
233         if (page == 0 && !(chip->options & bbm_flags))
234                 return 0;
235         if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE)
236                 return 0;
237         if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
238                 return 1;
239         if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
240                 return last_page;
241
242         return -EINVAL;
243 }
244
245 /**
246  * nand_block_bad - [DEFAULT] Read bad block marker from the chip
247  * @chip: NAND chip object
248  * @ofs: offset from device start
249  *
250  * Check, if the block is bad.
251  */
252 static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
253 {
254         int first_page, page_offset;
255         int res;
256         u8 bad;
257
258         first_page = (int)(ofs >> chip->page_shift) & chip->pagemask;
259         page_offset = nand_bbm_get_next_page(chip, 0);
260
261         while (page_offset >= 0) {
262                 res = chip->ecc.read_oob(chip, first_page + page_offset);
263                 if (res < 0)
264                         return res;
265
266                 bad = chip->oob_poi[chip->badblockpos];
267
268                 if (likely(chip->badblockbits == 8))
269                         res = bad != 0xFF;
270                 else
271                         res = hweight8(bad) < chip->badblockbits;
272                 if (res)
273                         return res;
274
275                 page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
276         }
277
278         return 0;
279 }
280
281 static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
282 {
283         if (chip->options & NAND_NO_BBM_QUIRK)
284                 return 0;
285
286         if (chip->legacy.block_bad)
287                 return chip->legacy.block_bad(chip, ofs);
288
289         return nand_block_bad(chip, ofs);
290 }
291
292 /**
293  * nand_get_device - [GENERIC] Get chip for selected access
294  * @chip: NAND chip structure
295  *
296  * Lock the device and its controller for exclusive access
297  *
298  * Return: -EBUSY if the chip has been suspended, 0 otherwise
299  */
300 static void nand_get_device(struct nand_chip *chip)
301 {
302         /* Wait until the device is resumed. */
303         while (1) {
304                 mutex_lock(&chip->lock);
305                 if (!chip->suspended) {
306                         mutex_lock(&chip->controller->lock);
307                         return;
308                 }
309                 mutex_unlock(&chip->lock);
310
311                 wait_event(chip->resume_wq, !chip->suspended);
312         }
313 }
314
315 /**
316  * nand_check_wp - [GENERIC] check if the chip is write protected
317  * @chip: NAND chip object
318  *
319  * Check, if the device is write protected. The function expects, that the
320  * device is already selected.
321  */
322 static int nand_check_wp(struct nand_chip *chip)
323 {
324         u8 status;
325         int ret;
326
327         /* Broken xD cards report WP despite being writable */
328         if (chip->options & NAND_BROKEN_XD)
329                 return 0;
330
331         /* Check the WP bit */
332         ret = nand_status_op(chip, &status);
333         if (ret)
334                 return ret;
335
336         return status & NAND_STATUS_WP ? 0 : 1;
337 }
338
339 /**
340  * nand_fill_oob - [INTERN] Transfer client buffer to oob
341  * @chip: NAND chip object
342  * @oob: oob data buffer
343  * @len: oob data write length
344  * @ops: oob ops structure
345  */
346 static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
347                               struct mtd_oob_ops *ops)
348 {
349         struct mtd_info *mtd = nand_to_mtd(chip);
350         int ret;
351
352         /*
353          * Initialise to all 0xFF, to avoid the possibility of left over OOB
354          * data from a previous OOB read.
355          */
356         memset(chip->oob_poi, 0xff, mtd->oobsize);
357
358         switch (ops->mode) {
359
360         case MTD_OPS_PLACE_OOB:
361         case MTD_OPS_RAW:
362                 memcpy(chip->oob_poi + ops->ooboffs, oob, len);
363                 return oob + len;
364
365         case MTD_OPS_AUTO_OOB:
366                 ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
367                                                   ops->ooboffs, len);
368                 BUG_ON(ret);
369                 return oob + len;
370
371         default:
372                 BUG();
373         }
374         return NULL;
375 }
376
377 /**
378  * nand_do_write_oob - [MTD Interface] NAND write out-of-band
379  * @chip: NAND chip object
380  * @to: offset to write to
381  * @ops: oob operation description structure
382  *
383  * NAND write out-of-band.
384  */
385 static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
386                              struct mtd_oob_ops *ops)
387 {
388         struct mtd_info *mtd = nand_to_mtd(chip);
389         int chipnr, page, status, len, ret;
390
391         pr_debug("%s: to = 0x%08x, len = %i\n",
392                          __func__, (unsigned int)to, (int)ops->ooblen);
393
394         len = mtd_oobavail(mtd, ops);
395
396         /* Do not allow write past end of page */
397         if ((ops->ooboffs + ops->ooblen) > len) {
398                 pr_debug("%s: attempt to write past end of page\n",
399                                 __func__);
400                 return -EINVAL;
401         }
402
403         chipnr = (int)(to >> chip->chip_shift);
404
405         /*
406          * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
407          * of my DiskOnChip 2000 test units) will clear the whole data page too
408          * if we don't do this. I have no clue why, but I seem to have 'fixed'
409          * it in the doc2000 driver in August 1999.  dwmw2.
410          */
411         ret = nand_reset(chip, chipnr);
412         if (ret)
413                 return ret;
414
415         nand_select_target(chip, chipnr);
416
417         /* Shift to get page */
418         page = (int)(to >> chip->page_shift);
419
420         /* Check, if it is write protected */
421         if (nand_check_wp(chip)) {
422                 nand_deselect_target(chip);
423                 return -EROFS;
424         }
425
426         /* Invalidate the page cache, if we write to the cached page */
427         if (page == chip->pagecache.page)
428                 chip->pagecache.page = -1;
429
430         nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
431
432         if (ops->mode == MTD_OPS_RAW)
433                 status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
434         else
435                 status = chip->ecc.write_oob(chip, page & chip->pagemask);
436
437         nand_deselect_target(chip);
438
439         if (status)
440                 return status;
441
442         ops->oobretlen = ops->ooblen;
443
444         return 0;
445 }
446
447 /**
448  * nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
449  * @chip: NAND chip object
450  * @ofs: offset from device start
451  *
452  * This is the default implementation, which can be overridden by a hardware
453  * specific driver. It provides the details for writing a bad block marker to a
454  * block.
455  */
456 static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
457 {
458         struct mtd_info *mtd = nand_to_mtd(chip);
459         struct mtd_oob_ops ops;
460         uint8_t buf[2] = { 0, 0 };
461         int ret = 0, res, page_offset;
462
463         memset(&ops, 0, sizeof(ops));
464         ops.oobbuf = buf;
465         ops.ooboffs = chip->badblockpos;
466         if (chip->options & NAND_BUSWIDTH_16) {
467                 ops.ooboffs &= ~0x01;
468                 ops.len = ops.ooblen = 2;
469         } else {
470                 ops.len = ops.ooblen = 1;
471         }
472         ops.mode = MTD_OPS_PLACE_OOB;
473
474         page_offset = nand_bbm_get_next_page(chip, 0);
475
476         while (page_offset >= 0) {
477                 res = nand_do_write_oob(chip,
478                                         ofs + (page_offset * mtd->writesize),
479                                         &ops);
480
481                 if (!ret)
482                         ret = res;
483
484                 page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
485         }
486
487         return ret;
488 }
489
490 /**
491  * nand_markbad_bbm - mark a block by updating the BBM
492  * @chip: NAND chip object
493  * @ofs: offset of the block to mark bad
494  */
495 int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
496 {
497         if (chip->legacy.block_markbad)
498                 return chip->legacy.block_markbad(chip, ofs);
499
500         return nand_default_block_markbad(chip, ofs);
501 }
502
503 /**
504  * nand_block_markbad_lowlevel - mark a block bad
505  * @chip: NAND chip object
506  * @ofs: offset from device start
507  *
508  * This function performs the generic NAND bad block marking steps (i.e., bad
509  * block table(s) and/or marker(s)). We only allow the hardware driver to
510  * specify how to write bad block markers to OOB (chip->legacy.block_markbad).
511  *
512  * We try operations in the following order:
513  *
514  *  (1) erase the affected block, to allow OOB marker to be written cleanly
515  *  (2) write bad block marker to OOB area of affected block (unless flag
516  *      NAND_BBT_NO_OOB_BBM is present)
517  *  (3) update the BBT
518  *
519  * Note that we retain the first error encountered in (2) or (3), finish the
520  * procedures, and dump the error in the end.
521 */
522 static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
523 {
524         struct mtd_info *mtd = nand_to_mtd(chip);
525         int res, ret = 0;
526
527         if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
528                 struct erase_info einfo;
529
530                 /* Attempt erase before marking OOB */
531                 memset(&einfo, 0, sizeof(einfo));
532                 einfo.addr = ofs;
533                 einfo.len = 1ULL << chip->phys_erase_shift;
534                 nand_erase_nand(chip, &einfo, 0);
535
536                 /* Write bad block marker to OOB */
537                 nand_get_device(chip);
538
539                 ret = nand_markbad_bbm(chip, ofs);
540                 nand_release_device(chip);
541         }
542
543         /* Mark block bad in BBT */
544         if (chip->bbt) {
545                 res = nand_markbad_bbt(chip, ofs);
546                 if (!ret)
547                         ret = res;
548         }
549
550         if (!ret)
551                 mtd->ecc_stats.badblocks++;
552
553         return ret;
554 }
555
556 /**
557  * nand_block_isreserved - [GENERIC] Check if a block is marked reserved.
558  * @mtd: MTD device structure
559  * @ofs: offset from device start
560  *
561  * Check if the block is marked as reserved.
562  */
563 static int nand_block_isreserved(struct mtd_info *mtd, loff_t ofs)
564 {
565         struct nand_chip *chip = mtd_to_nand(mtd);
566
567         if (!chip->bbt)
568                 return 0;
569         /* Return info from the table */
570         return nand_isreserved_bbt(chip, ofs);
571 }
572
573 /**
574  * nand_block_checkbad - [GENERIC] Check if a block is marked bad
575  * @chip: NAND chip object
576  * @ofs: offset from device start
577  * @allowbbt: 1, if its allowed to access the bbt area
578  *
579  * Check, if the block is bad. Either by reading the bad block table or
580  * calling of the scan function.
581  */
582 static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
583 {
584         /* Return info from the table */
585         if (chip->bbt)
586                 return nand_isbad_bbt(chip, ofs, allowbbt);
587
588         return nand_isbad_bbm(chip, ofs);
589 }
590
591 /**
592  * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
593  * @chip: NAND chip structure
594  * @timeout_ms: Timeout in ms
595  *
596  * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
597  * If that does not happen whitin the specified timeout, -ETIMEDOUT is
598  * returned.
599  *
600  * This helper is intended to be used when the controller does not have access
601  * to the NAND R/B pin.
602  *
603  * Be aware that calling this helper from an ->exec_op() implementation means
604  * ->exec_op() must be re-entrant.
605  *
606  * Return 0 if the NAND chip is ready, a negative error otherwise.
607  */
608 int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
609 {
610         const struct nand_sdr_timings *timings;
611         u8 status = 0;
612         int ret;
613
614         if (!nand_has_exec_op(chip))
615                 return -ENOTSUPP;
616
617         /* Wait tWB before polling the STATUS reg. */
618         timings = nand_get_sdr_timings(nand_get_interface_config(chip));
619         ndelay(PSEC_TO_NSEC(timings->tWB_max));
620
621         ret = nand_status_op(chip, NULL);
622         if (ret)
623                 return ret;
624
625         /*
626          * +1 below is necessary because if we are now in the last fraction
627          * of jiffy and msecs_to_jiffies is 1 then we will wait only that
628          * small jiffy fraction - possibly leading to false timeout
629          */
630         timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
631         do {
632                 ret = nand_read_data_op(chip, &status, sizeof(status), true,
633                                         false);
634                 if (ret)
635                         break;
636
637                 if (status & NAND_STATUS_READY)
638                         break;
639
640                 /*
641                  * Typical lowest execution time for a tR on most NANDs is 10us,
642                  * use this as polling delay before doing something smarter (ie.
643                  * deriving a delay from the timeout value, timeout_ms/ratio).
644                  */
645                 udelay(10);
646         } while (time_before(jiffies, timeout_ms));
647
648         /*
649          * We have to exit READ_STATUS mode in order to read real data on the
650          * bus in case the WAITRDY instruction is preceding a DATA_IN
651          * instruction.
652          */
653         nand_exit_status_op(chip);
654
655         if (ret)
656                 return ret;
657
658         return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
659 };
660 EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
661
662 /**
663  * nand_gpio_waitrdy - Poll R/B GPIO pin until ready
664  * @chip: NAND chip structure
665  * @gpiod: GPIO descriptor of R/B pin
666  * @timeout_ms: Timeout in ms
667  *
668  * Poll the R/B GPIO pin until it becomes ready. If that does not happen
669  * whitin the specified timeout, -ETIMEDOUT is returned.
670  *
671  * This helper is intended to be used when the controller has access to the
672  * NAND R/B pin over GPIO.
673  *
674  * Return 0 if the R/B pin indicates chip is ready, a negative error otherwise.
675  */
676 int nand_gpio_waitrdy(struct nand_chip *chip, struct gpio_desc *gpiod,
677                       unsigned long timeout_ms)
678 {
679
680         /*
681          * Wait until R/B pin indicates chip is ready or timeout occurs.
682          * +1 below is necessary because if we are now in the last fraction
683          * of jiffy and msecs_to_jiffies is 1 then we will wait only that
684          * small jiffy fraction - possibly leading to false timeout.
685          */
686         timeout_ms = jiffies + msecs_to_jiffies(timeout_ms) + 1;
687         do {
688                 if (gpiod_get_value_cansleep(gpiod))
689                         return 0;
690
691                 cond_resched();
692         } while (time_before(jiffies, timeout_ms));
693
694         return gpiod_get_value_cansleep(gpiod) ? 0 : -ETIMEDOUT;
695 };
696 EXPORT_SYMBOL_GPL(nand_gpio_waitrdy);
697
698 /**
699  * panic_nand_wait - [GENERIC] wait until the command is done
700  * @chip: NAND chip structure
701  * @timeo: timeout
702  *
703  * Wait for command done. This is a helper function for nand_wait used when
704  * we are in interrupt context. May happen when in panic and trying to write
705  * an oops through mtdoops.
706  */
707 void panic_nand_wait(struct nand_chip *chip, unsigned long timeo)
708 {
709         int i;
710         for (i = 0; i < timeo; i++) {
711                 if (chip->legacy.dev_ready) {
712                         if (chip->legacy.dev_ready(chip))
713                                 break;
714                 } else {
715                         int ret;
716                         u8 status;
717
718                         ret = nand_read_data_op(chip, &status, sizeof(status),
719                                                 true, false);
720                         if (ret)
721                                 return;
722
723                         if (status & NAND_STATUS_READY)
724                                 break;
725                 }
726                 mdelay(1);
727         }
728 }
729
730 static bool nand_supports_get_features(struct nand_chip *chip, int addr)
731 {
732         return (chip->parameters.supports_set_get_features &&
733                 test_bit(addr, chip->parameters.get_feature_list));
734 }
735
736 static bool nand_supports_set_features(struct nand_chip *chip, int addr)
737 {
738         return (chip->parameters.supports_set_get_features &&
739                 test_bit(addr, chip->parameters.set_feature_list));
740 }
741
742 /**
743  * nand_reset_interface - Reset data interface and timings
744  * @chip: The NAND chip
745  * @chipnr: Internal die id
746  *
747  * Reset the Data interface and timings to ONFI mode 0.
748  *
749  * Returns 0 for success or negative error code otherwise.
750  */
751 static int nand_reset_interface(struct nand_chip *chip, int chipnr)
752 {
753         const struct nand_controller_ops *ops = chip->controller->ops;
754         int ret;
755
756         if (!nand_controller_can_setup_interface(chip))
757                 return 0;
758
759         /*
760          * The ONFI specification says:
761          * "
762          * To transition from NV-DDR or NV-DDR2 to the SDR data
763          * interface, the host shall use the Reset (FFh) command
764          * using SDR timing mode 0. A device in any timing mode is
765          * required to recognize Reset (FFh) command issued in SDR
766          * timing mode 0.
767          * "
768          *
769          * Configure the data interface in SDR mode and set the
770          * timings to timing mode 0.
771          */
772
773         chip->current_interface_config = nand_get_reset_interface_config();
774         ret = ops->setup_interface(chip, chipnr,
775                                    chip->current_interface_config);
776         if (ret)
777                 pr_err("Failed to configure data interface to SDR timing mode 0\n");
778
779         return ret;
780 }
781
782 /**
783  * nand_setup_interface - Setup the best data interface and timings
784  * @chip: The NAND chip
785  * @chipnr: Internal die id
786  *
787  * Configure what has been reported to be the best data interface and NAND
788  * timings supported by the chip and the driver.
789  *
790  * Returns 0 for success or negative error code otherwise.
791  */
792 static int nand_setup_interface(struct nand_chip *chip, int chipnr)
793 {
794         const struct nand_controller_ops *ops = chip->controller->ops;
795         u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { };
796         int ret;
797
798         if (!nand_controller_can_setup_interface(chip))
799                 return 0;
800
801         /*
802          * A nand_reset_interface() put both the NAND chip and the NAND
803          * controller in timings mode 0. If the default mode for this chip is
804          * also 0, no need to proceed to the change again. Plus, at probe time,
805          * nand_setup_interface() uses ->set/get_features() which would
806          * fail anyway as the parameter page is not available yet.
807          */
808         if (!chip->best_interface_config)
809                 return 0;
810
811         tmode_param[0] = chip->best_interface_config->timings.mode;
812
813         /* Change the mode on the chip side (if supported by the NAND chip) */
814         if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
815                 nand_select_target(chip, chipnr);
816                 ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
817                                         tmode_param);
818                 nand_deselect_target(chip);
819                 if (ret)
820                         return ret;
821         }
822
823         /* Change the mode on the controller side */
824         ret = ops->setup_interface(chip, chipnr, chip->best_interface_config);
825         if (ret)
826                 return ret;
827
828         /* Check the mode has been accepted by the chip, if supported */
829         if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
830                 goto update_interface_config;
831
832         memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
833         nand_select_target(chip, chipnr);
834         ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
835                                 tmode_param);
836         nand_deselect_target(chip);
837         if (ret)
838                 goto err_reset_chip;
839
840         if (tmode_param[0] != chip->best_interface_config->timings.mode) {
841                 pr_warn("timing mode %d not acknowledged by the NAND chip\n",
842                         chip->best_interface_config->timings.mode);
843                 goto err_reset_chip;
844         }
845
846 update_interface_config:
847         chip->current_interface_config = chip->best_interface_config;
848
849         return 0;
850
851 err_reset_chip:
852         /*
853          * Fallback to mode 0 if the chip explicitly did not ack the chosen
854          * timing mode.
855          */
856         nand_reset_interface(chip, chipnr);
857         nand_select_target(chip, chipnr);
858         nand_reset_op(chip);
859         nand_deselect_target(chip);
860
861         return ret;
862 }
863
864 /**
865  * nand_choose_best_sdr_timings - Pick up the best SDR timings that both the
866  *                                NAND controller and the NAND chip support
867  * @chip: the NAND chip
868  * @iface: the interface configuration (can eventually be updated)
869  * @spec_timings: specific timings, when not fitting the ONFI specification
870  *
871  * If specific timings are provided, use them. Otherwise, retrieve supported
872  * timing modes from ONFI information.
873  */
874 int nand_choose_best_sdr_timings(struct nand_chip *chip,
875                                  struct nand_interface_config *iface,
876                                  struct nand_sdr_timings *spec_timings)
877 {
878         const struct nand_controller_ops *ops = chip->controller->ops;
879         int best_mode = 0, mode, ret;
880
881         iface->type = NAND_SDR_IFACE;
882
883         if (spec_timings) {
884                 iface->timings.sdr = *spec_timings;
885                 iface->timings.mode = onfi_find_closest_sdr_mode(spec_timings);
886
887                 /* Verify the controller supports the requested interface */
888                 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
889                                            iface);
890                 if (!ret) {
891                         chip->best_interface_config = iface;
892                         return ret;
893                 }
894
895                 /* Fallback to slower modes */
896                 best_mode = iface->timings.mode;
897         } else if (chip->parameters.onfi) {
898                 best_mode = fls(chip->parameters.onfi->async_timing_mode) - 1;
899         }
900
901         for (mode = best_mode; mode >= 0; mode--) {
902                 onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, mode);
903
904                 ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
905                                            iface);
906                 if (!ret)
907                         break;
908         }
909
910         chip->best_interface_config = iface;
911
912         return 0;
913 }
914
915 /**
916  * nand_choose_interface_config - find the best data interface and timings
917  * @chip: The NAND chip
918  *
919  * Find the best data interface and NAND timings supported by the chip
920  * and the driver. Eventually let the NAND manufacturer driver propose his own
921  * set of timings.
922  *
923  * After this function nand_chip->interface_config is initialized with the best
924  * timing mode available.
925  *
926  * Returns 0 for success or negative error code otherwise.
927  */
928 static int nand_choose_interface_config(struct nand_chip *chip)
929 {
930         struct nand_interface_config *iface;
931         int ret;
932
933         if (!nand_controller_can_setup_interface(chip))
934                 return 0;
935
936         iface = kzalloc(sizeof(*iface), GFP_KERNEL);
937         if (!iface)
938                 return -ENOMEM;
939
940         if (chip->ops.choose_interface_config)
941                 ret = chip->ops.choose_interface_config(chip, iface);
942         else
943                 ret = nand_choose_best_sdr_timings(chip, iface, NULL);
944
945         if (ret)
946                 kfree(iface);
947
948         return ret;
949 }
950
951 /**
952  * nand_fill_column_cycles - fill the column cycles of an address
953  * @chip: The NAND chip
954  * @addrs: Array of address cycles to fill
955  * @offset_in_page: The offset in the page
956  *
957  * Fills the first or the first two bytes of the @addrs field depending
958  * on the NAND bus width and the page size.
959  *
960  * Returns the number of cycles needed to encode the column, or a negative
961  * error code in case one of the arguments is invalid.
962  */
963 static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
964                                    unsigned int offset_in_page)
965 {
966         struct mtd_info *mtd = nand_to_mtd(chip);
967
968         /* Make sure the offset is less than the actual page size. */
969         if (offset_in_page > mtd->writesize + mtd->oobsize)
970                 return -EINVAL;
971
972         /*
973          * On small page NANDs, there's a dedicated command to access the OOB
974          * area, and the column address is relative to the start of the OOB
975          * area, not the start of the page. Asjust the address accordingly.
976          */
977         if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
978                 offset_in_page -= mtd->writesize;
979
980         /*
981          * The offset in page is expressed in bytes, if the NAND bus is 16-bit
982          * wide, then it must be divided by 2.
983          */
984         if (chip->options & NAND_BUSWIDTH_16) {
985                 if (WARN_ON(offset_in_page % 2))
986                         return -EINVAL;
987
988                 offset_in_page /= 2;
989         }
990
991         addrs[0] = offset_in_page;
992
993         /*
994          * Small page NANDs use 1 cycle for the columns, while large page NANDs
995          * need 2
996          */
997         if (mtd->writesize <= 512)
998                 return 1;
999
1000         addrs[1] = offset_in_page >> 8;
1001
1002         return 2;
1003 }
1004
1005 static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1006                                      unsigned int offset_in_page, void *buf,
1007                                      unsigned int len)
1008 {
1009         const struct nand_sdr_timings *sdr =
1010                 nand_get_sdr_timings(nand_get_interface_config(chip));
1011         struct mtd_info *mtd = nand_to_mtd(chip);
1012         u8 addrs[4];
1013         struct nand_op_instr instrs[] = {
1014                 NAND_OP_CMD(NAND_CMD_READ0, 0),
1015                 NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
1016                 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1017                                  PSEC_TO_NSEC(sdr->tRR_min)),
1018                 NAND_OP_DATA_IN(len, buf, 0),
1019         };
1020         struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1021         int ret;
1022
1023         /* Drop the DATA_IN instruction if len is set to 0. */
1024         if (!len)
1025                 op.ninstrs--;
1026
1027         if (offset_in_page >= mtd->writesize)
1028                 instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1029         else if (offset_in_page >= 256 &&
1030                  !(chip->options & NAND_BUSWIDTH_16))
1031                 instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1032
1033         ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1034         if (ret < 0)
1035                 return ret;
1036
1037         addrs[1] = page;
1038         addrs[2] = page >> 8;
1039
1040         if (chip->options & NAND_ROW_ADDR_3) {
1041                 addrs[3] = page >> 16;
1042                 instrs[1].ctx.addr.naddrs++;
1043         }
1044
1045         return nand_exec_op(chip, &op);
1046 }
1047
1048 static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
1049                                      unsigned int offset_in_page, void *buf,
1050                                      unsigned int len)
1051 {
1052         const struct nand_sdr_timings *sdr =
1053                 nand_get_sdr_timings(nand_get_interface_config(chip));
1054         u8 addrs[5];
1055         struct nand_op_instr instrs[] = {
1056                 NAND_OP_CMD(NAND_CMD_READ0, 0),
1057                 NAND_OP_ADDR(4, addrs, 0),
1058                 NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
1059                 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1060                                  PSEC_TO_NSEC(sdr->tRR_min)),
1061                 NAND_OP_DATA_IN(len, buf, 0),
1062         };
1063         struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1064         int ret;
1065
1066         /* Drop the DATA_IN instruction if len is set to 0. */
1067         if (!len)
1068                 op.ninstrs--;
1069
1070         ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1071         if (ret < 0)
1072                 return ret;
1073
1074         addrs[2] = page;
1075         addrs[3] = page >> 8;
1076
1077         if (chip->options & NAND_ROW_ADDR_3) {
1078                 addrs[4] = page >> 16;
1079                 instrs[1].ctx.addr.naddrs++;
1080         }
1081
1082         return nand_exec_op(chip, &op);
1083 }
1084
1085 /**
1086  * nand_read_page_op - Do a READ PAGE operation
1087  * @chip: The NAND chip
1088  * @page: page to read
1089  * @offset_in_page: offset within the page
1090  * @buf: buffer used to store the data
1091  * @len: length of the buffer
1092  *
1093  * This function issues a READ PAGE operation.
1094  * This function does not select/unselect the CS line.
1095  *
1096  * Returns 0 on success, a negative error code otherwise.
1097  */
1098 int nand_read_page_op(struct nand_chip *chip, unsigned int page,
1099                       unsigned int offset_in_page, void *buf, unsigned int len)
1100 {
1101         struct mtd_info *mtd = nand_to_mtd(chip);
1102
1103         if (len && !buf)
1104                 return -EINVAL;
1105
1106         if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1107                 return -EINVAL;
1108
1109         if (nand_has_exec_op(chip)) {
1110                 if (mtd->writesize > 512)
1111                         return nand_lp_exec_read_page_op(chip, page,
1112                                                          offset_in_page, buf,
1113                                                          len);
1114
1115                 return nand_sp_exec_read_page_op(chip, page, offset_in_page,
1116                                                  buf, len);
1117         }
1118
1119         chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
1120         if (len)
1121                 chip->legacy.read_buf(chip, buf, len);
1122
1123         return 0;
1124 }
1125 EXPORT_SYMBOL_GPL(nand_read_page_op);
1126
1127 /**
1128  * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
1129  * @chip: The NAND chip
1130  * @page: parameter page to read
1131  * @buf: buffer used to store the data
1132  * @len: length of the buffer
1133  *
1134  * This function issues a READ PARAMETER PAGE operation.
1135  * This function does not select/unselect the CS line.
1136  *
1137  * Returns 0 on success, a negative error code otherwise.
1138  */
1139 int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
1140                             unsigned int len)
1141 {
1142         unsigned int i;
1143         u8 *p = buf;
1144
1145         if (len && !buf)
1146                 return -EINVAL;
1147
1148         if (nand_has_exec_op(chip)) {
1149                 const struct nand_sdr_timings *sdr =
1150                         nand_get_sdr_timings(nand_get_interface_config(chip));
1151                 struct nand_op_instr instrs[] = {
1152                         NAND_OP_CMD(NAND_CMD_PARAM, 0),
1153                         NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
1154                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
1155                                          PSEC_TO_NSEC(sdr->tRR_min)),
1156                         NAND_OP_8BIT_DATA_IN(len, buf, 0),
1157                 };
1158                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1159
1160                 /* Drop the DATA_IN instruction if len is set to 0. */
1161                 if (!len)
1162                         op.ninstrs--;
1163
1164                 return nand_exec_op(chip, &op);
1165         }
1166
1167         chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
1168         for (i = 0; i < len; i++)
1169                 p[i] = chip->legacy.read_byte(chip);
1170
1171         return 0;
1172 }
1173
1174 /**
1175  * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
1176  * @chip: The NAND chip
1177  * @offset_in_page: offset within the page
1178  * @buf: buffer used to store the data
1179  * @len: length of the buffer
1180  * @force_8bit: force 8-bit bus access
1181  *
1182  * This function issues a CHANGE READ COLUMN operation.
1183  * This function does not select/unselect the CS line.
1184  *
1185  * Returns 0 on success, a negative error code otherwise.
1186  */
1187 int nand_change_read_column_op(struct nand_chip *chip,
1188                                unsigned int offset_in_page, void *buf,
1189                                unsigned int len, bool force_8bit)
1190 {
1191         struct mtd_info *mtd = nand_to_mtd(chip);
1192
1193         if (len && !buf)
1194                 return -EINVAL;
1195
1196         if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1197                 return -EINVAL;
1198
1199         /* Small page NANDs do not support column change. */
1200         if (mtd->writesize <= 512)
1201                 return -ENOTSUPP;
1202
1203         if (nand_has_exec_op(chip)) {
1204                 const struct nand_sdr_timings *sdr =
1205                         nand_get_sdr_timings(nand_get_interface_config(chip));
1206                 u8 addrs[2] = {};
1207                 struct nand_op_instr instrs[] = {
1208                         NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
1209                         NAND_OP_ADDR(2, addrs, 0),
1210                         NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
1211                                     PSEC_TO_NSEC(sdr->tCCS_min)),
1212                         NAND_OP_DATA_IN(len, buf, 0),
1213                 };
1214                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1215                 int ret;
1216
1217                 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1218                 if (ret < 0)
1219                         return ret;
1220
1221                 /* Drop the DATA_IN instruction if len is set to 0. */
1222                 if (!len)
1223                         op.ninstrs--;
1224
1225                 instrs[3].ctx.data.force_8bit = force_8bit;
1226
1227                 return nand_exec_op(chip, &op);
1228         }
1229
1230         chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
1231         if (len)
1232                 chip->legacy.read_buf(chip, buf, len);
1233
1234         return 0;
1235 }
1236 EXPORT_SYMBOL_GPL(nand_change_read_column_op);
1237
1238 /**
1239  * nand_read_oob_op - Do a READ OOB operation
1240  * @chip: The NAND chip
1241  * @page: page to read
1242  * @offset_in_oob: offset within the OOB area
1243  * @buf: buffer used to store the data
1244  * @len: length of the buffer
1245  *
1246  * This function issues a READ OOB operation.
1247  * This function does not select/unselect the CS line.
1248  *
1249  * Returns 0 on success, a negative error code otherwise.
1250  */
1251 int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
1252                      unsigned int offset_in_oob, void *buf, unsigned int len)
1253 {
1254         struct mtd_info *mtd = nand_to_mtd(chip);
1255
1256         if (len && !buf)
1257                 return -EINVAL;
1258
1259         if (offset_in_oob + len > mtd->oobsize)
1260                 return -EINVAL;
1261
1262         if (nand_has_exec_op(chip))
1263                 return nand_read_page_op(chip, page,
1264                                          mtd->writesize + offset_in_oob,
1265                                          buf, len);
1266
1267         chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
1268         if (len)
1269                 chip->legacy.read_buf(chip, buf, len);
1270
1271         return 0;
1272 }
1273 EXPORT_SYMBOL_GPL(nand_read_oob_op);
1274
1275 static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
1276                                   unsigned int offset_in_page, const void *buf,
1277                                   unsigned int len, bool prog)
1278 {
1279         const struct nand_sdr_timings *sdr =
1280                 nand_get_sdr_timings(nand_get_interface_config(chip));
1281         struct mtd_info *mtd = nand_to_mtd(chip);
1282         u8 addrs[5] = {};
1283         struct nand_op_instr instrs[] = {
1284                 /*
1285                  * The first instruction will be dropped if we're dealing
1286                  * with a large page NAND and adjusted if we're dealing
1287                  * with a small page NAND and the page offset is > 255.
1288                  */
1289                 NAND_OP_CMD(NAND_CMD_READ0, 0),
1290                 NAND_OP_CMD(NAND_CMD_SEQIN, 0),
1291                 NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
1292                 NAND_OP_DATA_OUT(len, buf, 0),
1293                 NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
1294                 NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1295         };
1296         struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1297         int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
1298         int ret;
1299         u8 status;
1300
1301         if (naddrs < 0)
1302                 return naddrs;
1303
1304         addrs[naddrs++] = page;
1305         addrs[naddrs++] = page >> 8;
1306         if (chip->options & NAND_ROW_ADDR_3)
1307                 addrs[naddrs++] = page >> 16;
1308
1309         instrs[2].ctx.addr.naddrs = naddrs;
1310
1311         /* Drop the last two instructions if we're not programming the page. */
1312         if (!prog) {
1313                 op.ninstrs -= 2;
1314                 /* Also drop the DATA_OUT instruction if empty. */
1315                 if (!len)
1316                         op.ninstrs--;
1317         }
1318
1319         if (mtd->writesize <= 512) {
1320                 /*
1321                  * Small pages need some more tweaking: we have to adjust the
1322                  * first instruction depending on the page offset we're trying
1323                  * to access.
1324                  */
1325                 if (offset_in_page >= mtd->writesize)
1326                         instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
1327                 else if (offset_in_page >= 256 &&
1328                          !(chip->options & NAND_BUSWIDTH_16))
1329                         instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
1330         } else {
1331                 /*
1332                  * Drop the first command if we're dealing with a large page
1333                  * NAND.
1334                  */
1335                 op.instrs++;
1336                 op.ninstrs--;
1337         }
1338
1339         ret = nand_exec_op(chip, &op);
1340         if (!prog || ret)
1341                 return ret;
1342
1343         ret = nand_status_op(chip, &status);
1344         if (ret)
1345                 return ret;
1346
1347         return status;
1348 }
1349
1350 /**
1351  * nand_prog_page_begin_op - starts a PROG PAGE operation
1352  * @chip: The NAND chip
1353  * @page: page to write
1354  * @offset_in_page: offset within the page
1355  * @buf: buffer containing the data to write to the page
1356  * @len: length of the buffer
1357  *
1358  * This function issues the first half of a PROG PAGE operation.
1359  * This function does not select/unselect the CS line.
1360  *
1361  * Returns 0 on success, a negative error code otherwise.
1362  */
1363 int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
1364                             unsigned int offset_in_page, const void *buf,
1365                             unsigned int len)
1366 {
1367         struct mtd_info *mtd = nand_to_mtd(chip);
1368
1369         if (len && !buf)
1370                 return -EINVAL;
1371
1372         if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1373                 return -EINVAL;
1374
1375         if (nand_has_exec_op(chip))
1376                 return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1377                                               len, false);
1378
1379         chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
1380
1381         if (buf)
1382                 chip->legacy.write_buf(chip, buf, len);
1383
1384         return 0;
1385 }
1386 EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
1387
1388 /**
1389  * nand_prog_page_end_op - ends a PROG PAGE operation
1390  * @chip: The NAND chip
1391  *
1392  * This function issues the second half of a PROG PAGE operation.
1393  * This function does not select/unselect the CS line.
1394  *
1395  * Returns 0 on success, a negative error code otherwise.
1396  */
1397 int nand_prog_page_end_op(struct nand_chip *chip)
1398 {
1399         int ret;
1400         u8 status;
1401
1402         if (nand_has_exec_op(chip)) {
1403                 const struct nand_sdr_timings *sdr =
1404                         nand_get_sdr_timings(nand_get_interface_config(chip));
1405                 struct nand_op_instr instrs[] = {
1406                         NAND_OP_CMD(NAND_CMD_PAGEPROG,
1407                                     PSEC_TO_NSEC(sdr->tWB_max)),
1408                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
1409                 };
1410                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1411
1412                 ret = nand_exec_op(chip, &op);
1413                 if (ret)
1414                         return ret;
1415
1416                 ret = nand_status_op(chip, &status);
1417                 if (ret)
1418                         return ret;
1419         } else {
1420                 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1421                 ret = chip->legacy.waitfunc(chip);
1422                 if (ret < 0)
1423                         return ret;
1424
1425                 status = ret;
1426         }
1427
1428         if (status & NAND_STATUS_FAIL)
1429                 return -EIO;
1430
1431         return 0;
1432 }
1433 EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
1434
1435 /**
1436  * nand_prog_page_op - Do a full PROG PAGE operation
1437  * @chip: The NAND chip
1438  * @page: page to write
1439  * @offset_in_page: offset within the page
1440  * @buf: buffer containing the data to write to the page
1441  * @len: length of the buffer
1442  *
1443  * This function issues a full PROG PAGE operation.
1444  * This function does not select/unselect the CS line.
1445  *
1446  * Returns 0 on success, a negative error code otherwise.
1447  */
1448 int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
1449                       unsigned int offset_in_page, const void *buf,
1450                       unsigned int len)
1451 {
1452         struct mtd_info *mtd = nand_to_mtd(chip);
1453         int status;
1454
1455         if (!len || !buf)
1456                 return -EINVAL;
1457
1458         if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1459                 return -EINVAL;
1460
1461         if (nand_has_exec_op(chip)) {
1462                 status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
1463                                                 len, true);
1464         } else {
1465                 chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
1466                                      page);
1467                 chip->legacy.write_buf(chip, buf, len);
1468                 chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
1469                 status = chip->legacy.waitfunc(chip);
1470         }
1471
1472         if (status & NAND_STATUS_FAIL)
1473                 return -EIO;
1474
1475         return 0;
1476 }
1477 EXPORT_SYMBOL_GPL(nand_prog_page_op);
1478
1479 /**
1480  * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
1481  * @chip: The NAND chip
1482  * @offset_in_page: offset within the page
1483  * @buf: buffer containing the data to send to the NAND
1484  * @len: length of the buffer
1485  * @force_8bit: force 8-bit bus access
1486  *
1487  * This function issues a CHANGE WRITE COLUMN operation.
1488  * This function does not select/unselect the CS line.
1489  *
1490  * Returns 0 on success, a negative error code otherwise.
1491  */
1492 int nand_change_write_column_op(struct nand_chip *chip,
1493                                 unsigned int offset_in_page,
1494                                 const void *buf, unsigned int len,
1495                                 bool force_8bit)
1496 {
1497         struct mtd_info *mtd = nand_to_mtd(chip);
1498
1499         if (len && !buf)
1500                 return -EINVAL;
1501
1502         if (offset_in_page + len > mtd->writesize + mtd->oobsize)
1503                 return -EINVAL;
1504
1505         /* Small page NANDs do not support column change. */
1506         if (mtd->writesize <= 512)
1507                 return -ENOTSUPP;
1508
1509         if (nand_has_exec_op(chip)) {
1510                 const struct nand_sdr_timings *sdr =
1511                         nand_get_sdr_timings(nand_get_interface_config(chip));
1512                 u8 addrs[2];
1513                 struct nand_op_instr instrs[] = {
1514                         NAND_OP_CMD(NAND_CMD_RNDIN, 0),
1515                         NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
1516                         NAND_OP_DATA_OUT(len, buf, 0),
1517                 };
1518                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1519                 int ret;
1520
1521                 ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
1522                 if (ret < 0)
1523                         return ret;
1524
1525                 instrs[2].ctx.data.force_8bit = force_8bit;
1526
1527                 /* Drop the DATA_OUT instruction if len is set to 0. */
1528                 if (!len)
1529                         op.ninstrs--;
1530
1531                 return nand_exec_op(chip, &op);
1532         }
1533
1534         chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
1535         if (len)
1536                 chip->legacy.write_buf(chip, buf, len);
1537
1538         return 0;
1539 }
1540 EXPORT_SYMBOL_GPL(nand_change_write_column_op);
1541
1542 /**
1543  * nand_readid_op - Do a READID operation
1544  * @chip: The NAND chip
1545  * @addr: address cycle to pass after the READID command
1546  * @buf: buffer used to store the ID
1547  * @len: length of the buffer
1548  *
1549  * This function sends a READID command and reads back the ID returned by the
1550  * NAND.
1551  * This function does not select/unselect the CS line.
1552  *
1553  * Returns 0 on success, a negative error code otherwise.
1554  */
1555 int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
1556                    unsigned int len)
1557 {
1558         unsigned int i;
1559         u8 *id = buf;
1560
1561         if (len && !buf)
1562                 return -EINVAL;
1563
1564         if (nand_has_exec_op(chip)) {
1565                 const struct nand_sdr_timings *sdr =
1566                         nand_get_sdr_timings(nand_get_interface_config(chip));
1567                 struct nand_op_instr instrs[] = {
1568                         NAND_OP_CMD(NAND_CMD_READID, 0),
1569                         NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
1570                         NAND_OP_8BIT_DATA_IN(len, buf, 0),
1571                 };
1572                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1573
1574                 /* Drop the DATA_IN instruction if len is set to 0. */
1575                 if (!len)
1576                         op.ninstrs--;
1577
1578                 return nand_exec_op(chip, &op);
1579         }
1580
1581         chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
1582
1583         for (i = 0; i < len; i++)
1584                 id[i] = chip->legacy.read_byte(chip);
1585
1586         return 0;
1587 }
1588 EXPORT_SYMBOL_GPL(nand_readid_op);
1589
1590 /**
1591  * nand_status_op - Do a STATUS operation
1592  * @chip: The NAND chip
1593  * @status: out variable to store the NAND status
1594  *
1595  * This function sends a STATUS command and reads back the status returned by
1596  * the NAND.
1597  * This function does not select/unselect the CS line.
1598  *
1599  * Returns 0 on success, a negative error code otherwise.
1600  */
1601 int nand_status_op(struct nand_chip *chip, u8 *status)
1602 {
1603         if (nand_has_exec_op(chip)) {
1604                 const struct nand_sdr_timings *sdr =
1605                         nand_get_sdr_timings(nand_get_interface_config(chip));
1606                 struct nand_op_instr instrs[] = {
1607                         NAND_OP_CMD(NAND_CMD_STATUS,
1608                                     PSEC_TO_NSEC(sdr->tADL_min)),
1609                         NAND_OP_8BIT_DATA_IN(1, status, 0),
1610                 };
1611                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1612
1613                 if (!status)
1614                         op.ninstrs--;
1615
1616                 return nand_exec_op(chip, &op);
1617         }
1618
1619         chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
1620         if (status)
1621                 *status = chip->legacy.read_byte(chip);
1622
1623         return 0;
1624 }
1625 EXPORT_SYMBOL_GPL(nand_status_op);
1626
1627 /**
1628  * nand_exit_status_op - Exit a STATUS operation
1629  * @chip: The NAND chip
1630  *
1631  * This function sends a READ0 command to cancel the effect of the STATUS
1632  * command to avoid reading only the status until a new read command is sent.
1633  *
1634  * This function does not select/unselect the CS line.
1635  *
1636  * Returns 0 on success, a negative error code otherwise.
1637  */
1638 int nand_exit_status_op(struct nand_chip *chip)
1639 {
1640         if (nand_has_exec_op(chip)) {
1641                 struct nand_op_instr instrs[] = {
1642                         NAND_OP_CMD(NAND_CMD_READ0, 0),
1643                 };
1644                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1645
1646                 return nand_exec_op(chip, &op);
1647         }
1648
1649         chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
1650
1651         return 0;
1652 }
1653
1654 /**
1655  * nand_erase_op - Do an erase operation
1656  * @chip: The NAND chip
1657  * @eraseblock: block to erase
1658  *
1659  * This function sends an ERASE command and waits for the NAND to be ready
1660  * before returning.
1661  * This function does not select/unselect the CS line.
1662  *
1663  * Returns 0 on success, a negative error code otherwise.
1664  */
1665 int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
1666 {
1667         unsigned int page = eraseblock <<
1668                             (chip->phys_erase_shift - chip->page_shift);
1669         int ret;
1670         u8 status;
1671
1672         if (nand_has_exec_op(chip)) {
1673                 const struct nand_sdr_timings *sdr =
1674                         nand_get_sdr_timings(nand_get_interface_config(chip));
1675                 u8 addrs[3] = { page, page >> 8, page >> 16 };
1676                 struct nand_op_instr instrs[] = {
1677                         NAND_OP_CMD(NAND_CMD_ERASE1, 0),
1678                         NAND_OP_ADDR(2, addrs, 0),
1679                         NAND_OP_CMD(NAND_CMD_ERASE2,
1680                                     PSEC_TO_MSEC(sdr->tWB_max)),
1681                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
1682                 };
1683                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1684
1685                 if (chip->options & NAND_ROW_ADDR_3)
1686                         instrs[1].ctx.addr.naddrs++;
1687
1688                 ret = nand_exec_op(chip, &op);
1689                 if (ret)
1690                         return ret;
1691
1692                 ret = nand_status_op(chip, &status);
1693                 if (ret)
1694                         return ret;
1695         } else {
1696                 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
1697                 chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
1698
1699                 ret = chip->legacy.waitfunc(chip);
1700                 if (ret < 0)
1701                         return ret;
1702
1703                 status = ret;
1704         }
1705
1706         if (status & NAND_STATUS_FAIL)
1707                 return -EIO;
1708
1709         return 0;
1710 }
1711 EXPORT_SYMBOL_GPL(nand_erase_op);
1712
1713 /**
1714  * nand_set_features_op - Do a SET FEATURES operation
1715  * @chip: The NAND chip
1716  * @feature: feature id
1717  * @data: 4 bytes of data
1718  *
1719  * This function sends a SET FEATURES command and waits for the NAND to be
1720  * ready before returning.
1721  * This function does not select/unselect the CS line.
1722  *
1723  * Returns 0 on success, a negative error code otherwise.
1724  */
1725 static int nand_set_features_op(struct nand_chip *chip, u8 feature,
1726                                 const void *data)
1727 {
1728         const u8 *params = data;
1729         int i, ret;
1730
1731         if (nand_has_exec_op(chip)) {
1732                 const struct nand_sdr_timings *sdr =
1733                         nand_get_sdr_timings(nand_get_interface_config(chip));
1734                 struct nand_op_instr instrs[] = {
1735                         NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
1736                         NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
1737                         NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
1738                                               PSEC_TO_NSEC(sdr->tWB_max)),
1739                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
1740                 };
1741                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1742
1743                 return nand_exec_op(chip, &op);
1744         }
1745
1746         chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
1747         for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1748                 chip->legacy.write_byte(chip, params[i]);
1749
1750         ret = chip->legacy.waitfunc(chip);
1751         if (ret < 0)
1752                 return ret;
1753
1754         if (ret & NAND_STATUS_FAIL)
1755                 return -EIO;
1756
1757         return 0;
1758 }
1759
1760 /**
1761  * nand_get_features_op - Do a GET FEATURES operation
1762  * @chip: The NAND chip
1763  * @feature: feature id
1764  * @data: 4 bytes of data
1765  *
1766  * This function sends a GET FEATURES command and waits for the NAND to be
1767  * ready before returning.
1768  * This function does not select/unselect the CS line.
1769  *
1770  * Returns 0 on success, a negative error code otherwise.
1771  */
1772 static int nand_get_features_op(struct nand_chip *chip, u8 feature,
1773                                 void *data)
1774 {
1775         u8 *params = data;
1776         int i;
1777
1778         if (nand_has_exec_op(chip)) {
1779                 const struct nand_sdr_timings *sdr =
1780                         nand_get_sdr_timings(nand_get_interface_config(chip));
1781                 struct nand_op_instr instrs[] = {
1782                         NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
1783                         NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
1784                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
1785                                          PSEC_TO_NSEC(sdr->tRR_min)),
1786                         NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
1787                                              data, 0),
1788                 };
1789                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1790
1791                 return nand_exec_op(chip, &op);
1792         }
1793
1794         chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
1795         for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
1796                 params[i] = chip->legacy.read_byte(chip);
1797
1798         return 0;
1799 }
1800
1801 static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
1802                             unsigned int delay_ns)
1803 {
1804         if (nand_has_exec_op(chip)) {
1805                 struct nand_op_instr instrs[] = {
1806                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
1807                                          PSEC_TO_NSEC(delay_ns)),
1808                 };
1809                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1810
1811                 return nand_exec_op(chip, &op);
1812         }
1813
1814         /* Apply delay or wait for ready/busy pin */
1815         if (!chip->legacy.dev_ready)
1816                 udelay(chip->legacy.chip_delay);
1817         else
1818                 nand_wait_ready(chip);
1819
1820         return 0;
1821 }
1822
1823 /**
1824  * nand_reset_op - Do a reset operation
1825  * @chip: The NAND chip
1826  *
1827  * This function sends a RESET command and waits for the NAND to be ready
1828  * before returning.
1829  * This function does not select/unselect the CS line.
1830  *
1831  * Returns 0 on success, a negative error code otherwise.
1832  */
1833 int nand_reset_op(struct nand_chip *chip)
1834 {
1835         if (nand_has_exec_op(chip)) {
1836                 const struct nand_sdr_timings *sdr =
1837                         nand_get_sdr_timings(nand_get_interface_config(chip));
1838                 struct nand_op_instr instrs[] = {
1839                         NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
1840                         NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
1841                 };
1842                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1843
1844                 return nand_exec_op(chip, &op);
1845         }
1846
1847         chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
1848
1849         return 0;
1850 }
1851 EXPORT_SYMBOL_GPL(nand_reset_op);
1852
1853 /**
1854  * nand_read_data_op - Read data from the NAND
1855  * @chip: The NAND chip
1856  * @buf: buffer used to store the data
1857  * @len: length of the buffer
1858  * @force_8bit: force 8-bit bus access
1859  * @check_only: do not actually run the command, only checks if the
1860  *              controller driver supports it
1861  *
1862  * This function does a raw data read on the bus. Usually used after launching
1863  * another NAND operation like nand_read_page_op().
1864  * This function does not select/unselect the CS line.
1865  *
1866  * Returns 0 on success, a negative error code otherwise.
1867  */
1868 int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
1869                       bool force_8bit, bool check_only)
1870 {
1871         if (!len || !buf)
1872                 return -EINVAL;
1873
1874         if (nand_has_exec_op(chip)) {
1875                 struct nand_op_instr instrs[] = {
1876                         NAND_OP_DATA_IN(len, buf, 0),
1877                 };
1878                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1879
1880                 instrs[0].ctx.data.force_8bit = force_8bit;
1881
1882                 if (check_only)
1883                         return nand_check_op(chip, &op);
1884
1885                 return nand_exec_op(chip, &op);
1886         }
1887
1888         if (check_only)
1889                 return 0;
1890
1891         if (force_8bit) {
1892                 u8 *p = buf;
1893                 unsigned int i;
1894
1895                 for (i = 0; i < len; i++)
1896                         p[i] = chip->legacy.read_byte(chip);
1897         } else {
1898                 chip->legacy.read_buf(chip, buf, len);
1899         }
1900
1901         return 0;
1902 }
1903 EXPORT_SYMBOL_GPL(nand_read_data_op);
1904
1905 /**
1906  * nand_write_data_op - Write data from the NAND
1907  * @chip: The NAND chip
1908  * @buf: buffer containing the data to send on the bus
1909  * @len: length of the buffer
1910  * @force_8bit: force 8-bit bus access
1911  *
1912  * This function does a raw data write on the bus. Usually used after launching
1913  * another NAND operation like nand_write_page_begin_op().
1914  * This function does not select/unselect the CS line.
1915  *
1916  * Returns 0 on success, a negative error code otherwise.
1917  */
1918 int nand_write_data_op(struct nand_chip *chip, const void *buf,
1919                        unsigned int len, bool force_8bit)
1920 {
1921         if (!len || !buf)
1922                 return -EINVAL;
1923
1924         if (nand_has_exec_op(chip)) {
1925                 struct nand_op_instr instrs[] = {
1926                         NAND_OP_DATA_OUT(len, buf, 0),
1927                 };
1928                 struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
1929
1930                 instrs[0].ctx.data.force_8bit = force_8bit;
1931
1932                 return nand_exec_op(chip, &op);
1933         }
1934
1935         if (force_8bit) {
1936                 const u8 *p = buf;
1937                 unsigned int i;
1938
1939                 for (i = 0; i < len; i++)
1940                         chip->legacy.write_byte(chip, p[i]);
1941         } else {
1942                 chip->legacy.write_buf(chip, buf, len);
1943         }
1944
1945         return 0;
1946 }
1947 EXPORT_SYMBOL_GPL(nand_write_data_op);
1948
1949 /**
1950  * struct nand_op_parser_ctx - Context used by the parser
1951  * @instrs: array of all the instructions that must be addressed
1952  * @ninstrs: length of the @instrs array
1953  * @subop: Sub-operation to be passed to the NAND controller
1954  *
1955  * This structure is used by the core to split NAND operations into
1956  * sub-operations that can be handled by the NAND controller.
1957  */
1958 struct nand_op_parser_ctx {
1959         const struct nand_op_instr *instrs;
1960         unsigned int ninstrs;
1961         struct nand_subop subop;
1962 };
1963
1964 /**
1965  * nand_op_parser_must_split_instr - Checks if an instruction must be split
1966  * @pat: the parser pattern element that matches @instr
1967  * @instr: pointer to the instruction to check
1968  * @start_offset: this is an in/out parameter. If @instr has already been
1969  *                split, then @start_offset is the offset from which to start
1970  *                (either an address cycle or an offset in the data buffer).
1971  *                Conversely, if the function returns true (ie. instr must be
1972  *                split), this parameter is updated to point to the first
1973  *                data/address cycle that has not been taken care of.
1974  *
1975  * Some NAND controllers are limited and cannot send X address cycles with a
1976  * unique operation, or cannot read/write more than Y bytes at the same time.
1977  * In this case, split the instruction that does not fit in a single
1978  * controller-operation into two or more chunks.
1979  *
1980  * Returns true if the instruction must be split, false otherwise.
1981  * The @start_offset parameter is also updated to the offset at which the next
1982  * bundle of instruction must start (if an address or a data instruction).
1983  */
1984 static bool
1985 nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
1986                                 const struct nand_op_instr *instr,
1987                                 unsigned int *start_offset)
1988 {
1989         switch (pat->type) {
1990         case NAND_OP_ADDR_INSTR:
1991                 if (!pat->ctx.addr.maxcycles)
1992                         break;
1993
1994                 if (instr->ctx.addr.naddrs - *start_offset >
1995                     pat->ctx.addr.maxcycles) {
1996                         *start_offset += pat->ctx.addr.maxcycles;
1997                         return true;
1998                 }
1999                 break;
2000
2001         case NAND_OP_DATA_IN_INSTR:
2002         case NAND_OP_DATA_OUT_INSTR:
2003                 if (!pat->ctx.data.maxlen)
2004                         break;
2005
2006                 if (instr->ctx.data.len - *start_offset >
2007                     pat->ctx.data.maxlen) {
2008                         *start_offset += pat->ctx.data.maxlen;
2009                         return true;
2010                 }
2011                 break;
2012
2013         default:
2014                 break;
2015         }
2016
2017         return false;
2018 }
2019
2020 /**
2021  * nand_op_parser_match_pat - Checks if a pattern matches the instructions
2022  *                            remaining in the parser context
2023  * @pat: the pattern to test
2024  * @ctx: the parser context structure to match with the pattern @pat
2025  *
2026  * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
2027  * Returns true if this is the case, false ortherwise. When true is returned,
2028  * @ctx->subop is updated with the set of instructions to be passed to the
2029  * controller driver.
2030  */
2031 static bool
2032 nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
2033                          struct nand_op_parser_ctx *ctx)
2034 {
2035         unsigned int instr_offset = ctx->subop.first_instr_start_off;
2036         const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
2037         const struct nand_op_instr *instr = ctx->subop.instrs;
2038         unsigned int i, ninstrs;
2039
2040         for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
2041                 /*
2042                  * The pattern instruction does not match the operation
2043                  * instruction. If the instruction is marked optional in the
2044                  * pattern definition, we skip the pattern element and continue
2045                  * to the next one. If the element is mandatory, there's no
2046                  * match and we can return false directly.
2047                  */
2048                 if (instr->type != pat->elems[i].type) {
2049                         if (!pat->elems[i].optional)
2050                                 return false;
2051
2052                         continue;
2053                 }
2054
2055                 /*
2056                  * Now check the pattern element constraints. If the pattern is
2057                  * not able to handle the whole instruction in a single step,
2058                  * we have to split it.
2059                  * The last_instr_end_off value comes back updated to point to
2060                  * the position where we have to split the instruction (the
2061                  * start of the next subop chunk).
2062                  */
2063                 if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
2064                                                     &instr_offset)) {
2065                         ninstrs++;
2066                         i++;
2067                         break;
2068                 }
2069
2070                 instr++;
2071                 ninstrs++;
2072                 instr_offset = 0;
2073         }
2074
2075         /*
2076          * This can happen if all instructions of a pattern are optional.
2077          * Still, if there's not at least one instruction handled by this
2078          * pattern, this is not a match, and we should try the next one (if
2079          * any).
2080          */
2081         if (!ninstrs)
2082                 return false;
2083
2084         /*
2085          * We had a match on the pattern head, but the pattern may be longer
2086          * than the instructions we're asked to execute. We need to make sure
2087          * there's no mandatory elements in the pattern tail.
2088          */
2089         for (; i < pat->nelems; i++) {
2090                 if (!pat->elems[i].optional)
2091                         return false;
2092         }
2093
2094         /*
2095          * We have a match: update the subop structure accordingly and return
2096          * true.
2097          */
2098         ctx->subop.ninstrs = ninstrs;
2099         ctx->subop.last_instr_end_off = instr_offset;
2100
2101         return true;
2102 }
2103
2104 #if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
2105 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2106 {
2107         const struct nand_op_instr *instr;
2108         char *prefix = "      ";
2109         unsigned int i;
2110
2111         pr_debug("executing subop (CS%d):\n", ctx->subop.cs);
2112
2113         for (i = 0; i < ctx->ninstrs; i++) {
2114                 instr = &ctx->instrs[i];
2115
2116                 if (instr == &ctx->subop.instrs[0])
2117                         prefix = "    ->";
2118
2119                 nand_op_trace(prefix, instr);
2120
2121                 if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
2122                         prefix = "      ";
2123         }
2124 }
2125 #else
2126 static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
2127 {
2128         /* NOP */
2129 }
2130 #endif
2131
2132 static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a,
2133                                   const struct nand_op_parser_ctx *b)
2134 {
2135         if (a->subop.ninstrs < b->subop.ninstrs)
2136                 return -1;
2137         else if (a->subop.ninstrs > b->subop.ninstrs)
2138                 return 1;
2139
2140         if (a->subop.last_instr_end_off < b->subop.last_instr_end_off)
2141                 return -1;
2142         else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off)
2143                 return 1;
2144
2145         return 0;
2146 }
2147
2148 /**
2149  * nand_op_parser_exec_op - exec_op parser
2150  * @chip: the NAND chip
2151  * @parser: patterns description provided by the controller driver
2152  * @op: the NAND operation to address
2153  * @check_only: when true, the function only checks if @op can be handled but
2154  *              does not execute the operation
2155  *
2156  * Helper function designed to ease integration of NAND controller drivers that
2157  * only support a limited set of instruction sequences. The supported sequences
2158  * are described in @parser, and the framework takes care of splitting @op into
2159  * multiple sub-operations (if required) and pass them back to the ->exec()
2160  * callback of the matching pattern if @check_only is set to false.
2161  *
2162  * NAND controller drivers should call this function from their own ->exec_op()
2163  * implementation.
2164  *
2165  * Returns 0 on success, a negative error code otherwise. A failure can be
2166  * caused by an unsupported operation (none of the supported patterns is able
2167  * to handle the requested operation), or an error returned by one of the
2168  * matching pattern->exec() hook.
2169  */
2170 int nand_op_parser_exec_op(struct nand_chip *chip,
2171                            const struct nand_op_parser *parser,
2172                            const struct nand_operation *op, bool check_only)
2173 {
2174         struct nand_op_parser_ctx ctx = {
2175                 .subop.cs = op->cs,
2176                 .subop.instrs = op->instrs,
2177                 .instrs = op->instrs,
2178                 .ninstrs = op->ninstrs,
2179         };
2180         unsigned int i;
2181
2182         while (ctx.subop.instrs < op->instrs + op->ninstrs) {
2183                 const struct nand_op_parser_pattern *pattern;
2184                 struct nand_op_parser_ctx best_ctx;
2185                 int ret, best_pattern = -1;
2186
2187                 for (i = 0; i < parser->npatterns; i++) {
2188                         struct nand_op_parser_ctx test_ctx = ctx;
2189
2190                         pattern = &parser->patterns[i];
2191                         if (!nand_op_parser_match_pat(pattern, &test_ctx))
2192                                 continue;
2193
2194                         if (best_pattern >= 0 &&
2195                             nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0)
2196                                 continue;
2197
2198                         best_pattern = i;
2199                         best_ctx = test_ctx;
2200                 }
2201
2202                 if (best_pattern < 0) {
2203                         pr_debug("->exec_op() parser: pattern not found!\n");
2204                         return -ENOTSUPP;
2205                 }
2206
2207                 ctx = best_ctx;
2208                 nand_op_parser_trace(&ctx);
2209
2210                 if (!check_only) {
2211                         pattern = &parser->patterns[best_pattern];
2212                         ret = pattern->exec(chip, &ctx.subop);
2213                         if (ret)
2214                                 return ret;
2215                 }
2216
2217                 /*
2218                  * Update the context structure by pointing to the start of the
2219                  * next subop.
2220                  */
2221                 ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
2222                 if (ctx.subop.last_instr_end_off)
2223                         ctx.subop.instrs -= 1;
2224
2225                 ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
2226         }
2227
2228         return 0;
2229 }
2230 EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
2231
2232 static bool nand_instr_is_data(const struct nand_op_instr *instr)
2233 {
2234         return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
2235                          instr->type == NAND_OP_DATA_OUT_INSTR);
2236 }
2237
2238 static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
2239                                       unsigned int instr_idx)
2240 {
2241         return subop && instr_idx < subop->ninstrs;
2242 }
2243
2244 static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
2245                                              unsigned int instr_idx)
2246 {
2247         if (instr_idx)
2248                 return 0;
2249
2250         return subop->first_instr_start_off;
2251 }
2252
2253 /**
2254  * nand_subop_get_addr_start_off - Get the start offset in an address array
2255  * @subop: The entire sub-operation
2256  * @instr_idx: Index of the instruction inside the sub-operation
2257  *
2258  * During driver development, one could be tempted to directly use the
2259  * ->addr.addrs field of address instructions. This is wrong as address
2260  * instructions might be split.
2261  *
2262  * Given an address instruction, returns the offset of the first cycle to issue.
2263  */
2264 unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
2265                                            unsigned int instr_idx)
2266 {
2267         if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2268                     subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2269                 return 0;
2270
2271         return nand_subop_get_start_off(subop, instr_idx);
2272 }
2273 EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
2274
2275 /**
2276  * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
2277  * @subop: The entire sub-operation
2278  * @instr_idx: Index of the instruction inside the sub-operation
2279  *
2280  * During driver development, one could be tempted to directly use the
2281  * ->addr->naddrs field of a data instruction. This is wrong as instructions
2282  * might be split.
2283  *
2284  * Given an address instruction, returns the number of address cycle to issue.
2285  */
2286 unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
2287                                          unsigned int instr_idx)
2288 {
2289         int start_off, end_off;
2290
2291         if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2292                     subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
2293                 return 0;
2294
2295         start_off = nand_subop_get_addr_start_off(subop, instr_idx);
2296
2297         if (instr_idx == subop->ninstrs - 1 &&
2298             subop->last_instr_end_off)
2299                 end_off = subop->last_instr_end_off;
2300         else
2301                 end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
2302
2303         return end_off - start_off;
2304 }
2305 EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
2306
2307 /**
2308  * nand_subop_get_data_start_off - Get the start offset in a data array
2309  * @subop: The entire sub-operation
2310  * @instr_idx: Index of the instruction inside the sub-operation
2311  *
2312  * During driver development, one could be tempted to directly use the
2313  * ->data->buf.{in,out} field of data instructions. This is wrong as data
2314  * instructions might be split.
2315  *
2316  * Given a data instruction, returns the offset to start from.
2317  */
2318 unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
2319                                            unsigned int instr_idx)
2320 {
2321         if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2322                     !nand_instr_is_data(&subop->instrs[instr_idx])))
2323                 return 0;
2324
2325         return nand_subop_get_start_off(subop, instr_idx);
2326 }
2327 EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
2328
2329 /**
2330  * nand_subop_get_data_len - Get the number of bytes to retrieve
2331  * @subop: The entire sub-operation
2332  * @instr_idx: Index of the instruction inside the sub-operation
2333  *
2334  * During driver development, one could be tempted to directly use the
2335  * ->data->len field of a data instruction. This is wrong as data instructions
2336  * might be split.
2337  *
2338  * Returns the length of the chunk of data to send/receive.
2339  */
2340 unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
2341                                      unsigned int instr_idx)
2342 {
2343         int start_off = 0, end_off;
2344
2345         if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
2346                     !nand_instr_is_data(&subop->instrs[instr_idx])))
2347                 return 0;
2348
2349         start_off = nand_subop_get_data_start_off(subop, instr_idx);
2350
2351         if (instr_idx == subop->ninstrs - 1 &&
2352             subop->last_instr_end_off)
2353                 end_off = subop->last_instr_end_off;
2354         else
2355                 end_off = subop->instrs[instr_idx].ctx.data.len;
2356
2357         return end_off - start_off;
2358 }
2359 EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
2360
2361 /**
2362  * nand_reset - Reset and initialize a NAND device
2363  * @chip: The NAND chip
2364  * @chipnr: Internal die id
2365  *
2366  * Save the timings data structure, then apply SDR timings mode 0 (see
2367  * nand_reset_interface for details), do the reset operation, and apply
2368  * back the previous timings.
2369  *
2370  * Returns 0 on success, a negative error code otherwise.
2371  */
2372 int nand_reset(struct nand_chip *chip, int chipnr)
2373 {
2374         int ret;
2375
2376         ret = nand_reset_interface(chip, chipnr);
2377         if (ret)
2378                 return ret;
2379
2380         /*
2381          * The CS line has to be released before we can apply the new NAND
2382          * interface settings, hence this weird nand_select_target()
2383          * nand_deselect_target() dance.
2384          */
2385         nand_select_target(chip, chipnr);
2386         ret = nand_reset_op(chip);
2387         nand_deselect_target(chip);
2388         if (ret)
2389                 return ret;
2390
2391         ret = nand_setup_interface(chip, chipnr);
2392         if (ret)
2393                 return ret;
2394
2395         return 0;
2396 }
2397 EXPORT_SYMBOL_GPL(nand_reset);
2398
2399 /**
2400  * nand_get_features - wrapper to perform a GET_FEATURE
2401  * @chip: NAND chip info structure
2402  * @addr: feature address
2403  * @subfeature_param: the subfeature parameters, a four bytes array
2404  *
2405  * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2406  * operation cannot be handled.
2407  */
2408 int nand_get_features(struct nand_chip *chip, int addr,
2409                       u8 *subfeature_param)
2410 {
2411         if (!nand_supports_get_features(chip, addr))
2412                 return -ENOTSUPP;
2413
2414         if (chip->legacy.get_features)
2415                 return chip->legacy.get_features(chip, addr, subfeature_param);
2416
2417         return nand_get_features_op(chip, addr, subfeature_param);
2418 }
2419
2420 /**
2421  * nand_set_features - wrapper to perform a SET_FEATURE
2422  * @chip: NAND chip info structure
2423  * @addr: feature address
2424  * @subfeature_param: the subfeature parameters, a four bytes array
2425  *
2426  * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
2427  * operation cannot be handled.
2428  */
2429 int nand_set_features(struct nand_chip *chip, int addr,
2430                       u8 *subfeature_param)
2431 {
2432         if (!nand_supports_set_features(chip, addr))
2433                 return -ENOTSUPP;
2434
2435         if (chip->legacy.set_features)
2436                 return chip->legacy.set_features(chip, addr, subfeature_param);
2437
2438         return nand_set_features_op(chip, addr, subfeature_param);
2439 }
2440
2441 /**
2442  * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
2443  * @buf: buffer to test
2444  * @len: buffer length
2445  * @bitflips_threshold: maximum number of bitflips
2446  *
2447  * Check if a buffer contains only 0xff, which means the underlying region
2448  * has been erased and is ready to be programmed.
2449  * The bitflips_threshold specify the maximum number of bitflips before
2450  * considering the region is not erased.
2451  * Note: The logic of this function has been extracted from the memweight
2452  * implementation, except that nand_check_erased_buf function exit before
2453  * testing the whole buffer if the number of bitflips exceed the
2454  * bitflips_threshold value.
2455  *
2456  * Returns a positive number of bitflips less than or equal to
2457  * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2458  * threshold.
2459  */
2460 static int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
2461 {
2462         const unsigned char *bitmap = buf;
2463         int bitflips = 0;
2464         int weight;
2465
2466         for (; len && ((uintptr_t)bitmap) % sizeof(long);
2467              len--, bitmap++) {
2468                 weight = hweight8(*bitmap);
2469                 bitflips += BITS_PER_BYTE - weight;
2470                 if (unlikely(bitflips > bitflips_threshold))
2471                         return -EBADMSG;
2472         }
2473
2474         for (; len >= sizeof(long);
2475              len -= sizeof(long), bitmap += sizeof(long)) {
2476                 unsigned long d = *((unsigned long *)bitmap);
2477                 if (d == ~0UL)
2478                         continue;
2479                 weight = hweight_long(d);
2480                 bitflips += BITS_PER_LONG - weight;
2481                 if (unlikely(bitflips > bitflips_threshold))
2482                         return -EBADMSG;
2483         }
2484
2485         for (; len > 0; len--, bitmap++) {
2486                 weight = hweight8(*bitmap);
2487                 bitflips += BITS_PER_BYTE - weight;
2488                 if (unlikely(bitflips > bitflips_threshold))
2489                         return -EBADMSG;
2490         }
2491
2492         return bitflips;
2493 }
2494
2495 /**
2496  * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
2497  *                               0xff data
2498  * @data: data buffer to test
2499  * @datalen: data length
2500  * @ecc: ECC buffer
2501  * @ecclen: ECC length
2502  * @extraoob: extra OOB buffer
2503  * @extraooblen: extra OOB length
2504  * @bitflips_threshold: maximum number of bitflips
2505  *
2506  * Check if a data buffer and its associated ECC and OOB data contains only
2507  * 0xff pattern, which means the underlying region has been erased and is
2508  * ready to be programmed.
2509  * The bitflips_threshold specify the maximum number of bitflips before
2510  * considering the region as not erased.
2511  *
2512  * Note:
2513  * 1/ ECC algorithms are working on pre-defined block sizes which are usually
2514  *    different from the NAND page size. When fixing bitflips, ECC engines will
2515  *    report the number of errors per chunk, and the NAND core infrastructure
2516  *    expect you to return the maximum number of bitflips for the whole page.
2517  *    This is why you should always use this function on a single chunk and
2518  *    not on the whole page. After checking each chunk you should update your
2519  *    max_bitflips value accordingly.
2520  * 2/ When checking for bitflips in erased pages you should not only check
2521  *    the payload data but also their associated ECC data, because a user might
2522  *    have programmed almost all bits to 1 but a few. In this case, we
2523  *    shouldn't consider the chunk as erased, and checking ECC bytes prevent
2524  *    this case.
2525  * 3/ The extraoob argument is optional, and should be used if some of your OOB
2526  *    data are protected by the ECC engine.
2527  *    It could also be used if you support subpages and want to attach some
2528  *    extra OOB data to an ECC chunk.
2529  *
2530  * Returns a positive number of bitflips less than or equal to
2531  * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
2532  * threshold. In case of success, the passed buffers are filled with 0xff.
2533  */
2534 int nand_check_erased_ecc_chunk(void *data, int datalen,
2535                                 void *ecc, int ecclen,
2536                                 void *extraoob, int extraooblen,
2537                                 int bitflips_threshold)
2538 {
2539         int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
2540
2541         data_bitflips = nand_check_erased_buf(data, datalen,
2542                                               bitflips_threshold);
2543         if (data_bitflips < 0)
2544                 return data_bitflips;
2545
2546         bitflips_threshold -= data_bitflips;
2547
2548         ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
2549         if (ecc_bitflips < 0)
2550                 return ecc_bitflips;
2551
2552         bitflips_threshold -= ecc_bitflips;
2553
2554         extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
2555                                                   bitflips_threshold);
2556         if (extraoob_bitflips < 0)
2557                 return extraoob_bitflips;
2558
2559         if (data_bitflips)
2560                 memset(data, 0xff, datalen);
2561
2562         if (ecc_bitflips)
2563                 memset(ecc, 0xff, ecclen);
2564
2565         if (extraoob_bitflips)
2566                 memset(extraoob, 0xff, extraooblen);
2567
2568         return data_bitflips + ecc_bitflips + extraoob_bitflips;
2569 }
2570 EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
2571
2572 /**
2573  * nand_read_page_raw_notsupp - dummy read raw page function
2574  * @chip: nand chip info structure
2575  * @buf: buffer to store read data
2576  * @oob_required: caller requires OOB data read to chip->oob_poi
2577  * @page: page number to read
2578  *
2579  * Returns -ENOTSUPP unconditionally.
2580  */
2581 int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
2582                                int oob_required, int page)
2583 {
2584         return -ENOTSUPP;
2585 }
2586
2587 /**
2588  * nand_read_page_raw - [INTERN] read raw page data without ecc
2589  * @chip: nand chip info structure
2590  * @buf: buffer to store read data
2591  * @oob_required: caller requires OOB data read to chip->oob_poi
2592  * @page: page number to read
2593  *
2594  * Not for syndrome calculating ECC controllers, which use a special oob layout.
2595  */
2596 int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
2597                        int page)
2598 {
2599         struct mtd_info *mtd = nand_to_mtd(chip);
2600         int ret;
2601
2602         ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
2603         if (ret)
2604                 return ret;
2605
2606         if (oob_required) {
2607                 ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
2608                                         false, false);
2609                 if (ret)
2610                         return ret;
2611         }
2612
2613         return 0;
2614 }
2615 EXPORT_SYMBOL(nand_read_page_raw);
2616
2617 /**
2618  * nand_monolithic_read_page_raw - Monolithic page read in raw mode
2619  * @chip: NAND chip info structure
2620  * @buf: buffer to store read data
2621  * @oob_required: caller requires OOB data read to chip->oob_poi
2622  * @page: page number to read
2623  *
2624  * This is a raw page read, ie. without any error detection/correction.
2625  * Monolithic means we are requesting all the relevant data (main plus
2626  * eventually OOB) to be loaded in the NAND cache and sent over the
2627  * bus (from the NAND chip to the NAND controller) in a single
2628  * operation. This is an alternative to nand_read_page_raw(), which
2629  * first reads the main data, and if the OOB data is requested too,
2630  * then reads more data on the bus.
2631  */
2632 int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf,
2633                                   int oob_required, int page)
2634 {
2635         struct mtd_info *mtd = nand_to_mtd(chip);
2636         unsigned int size = mtd->writesize;
2637         u8 *read_buf = buf;
2638         int ret;
2639
2640         if (oob_required) {
2641                 size += mtd->oobsize;
2642
2643                 if (buf != chip->data_buf)
2644                         read_buf = nand_get_data_buf(chip);
2645         }
2646
2647         ret = nand_read_page_op(chip, page, 0, read_buf, size);
2648         if (ret)
2649                 return ret;
2650
2651         if (buf != chip->data_buf)
2652                 memcpy(buf, read_buf, mtd->writesize);
2653
2654         return 0;
2655 }
2656 EXPORT_SYMBOL(nand_monolithic_read_page_raw);
2657
2658 /**
2659  * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
2660  * @chip: nand chip info structure
2661  * @buf: buffer to store read data
2662  * @oob_required: caller requires OOB data read to chip->oob_poi
2663  * @page: page number to read
2664  *
2665  * We need a special oob layout and handling even when OOB isn't used.
2666  */
2667 static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
2668                                        int oob_required, int page)
2669 {
2670         struct mtd_info *mtd = nand_to_mtd(chip);
2671         int eccsize = chip->ecc.size;
2672         int eccbytes = chip->ecc.bytes;
2673         uint8_t *oob = chip->oob_poi;
2674         int steps, size, ret;
2675
2676         ret = nand_read_page_op(chip, page, 0, NULL, 0);
2677         if (ret)
2678                 return ret;
2679
2680         for (steps = chip->ecc.steps; steps > 0; steps--) {
2681                 ret = nand_read_data_op(chip, buf, eccsize, false, false);
2682                 if (ret)
2683                         return ret;
2684
2685                 buf += eccsize;
2686
2687                 if (chip->ecc.prepad) {
2688                         ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
2689                                                 false, false);
2690                         if (ret)
2691                                 return ret;
2692
2693                         oob += chip->ecc.prepad;
2694                 }
2695
2696                 ret = nand_read_data_op(chip, oob, eccbytes, false, false);
2697                 if (ret)
2698                         return ret;
2699
2700                 oob += eccbytes;
2701
2702                 if (chip->ecc.postpad) {
2703                         ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
2704                                                 false, false);
2705                         if (ret)
2706                                 return ret;
2707
2708                         oob += chip->ecc.postpad;
2709                 }
2710         }
2711
2712         size = mtd->oobsize - (oob - chip->oob_poi);
2713         if (size) {
2714                 ret = nand_read_data_op(chip, oob, size, false, false);
2715                 if (ret)
2716                         return ret;
2717         }
2718
2719         return 0;
2720 }
2721
2722 /**
2723  * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
2724  * @chip: nand chip info structure
2725  * @buf: buffer to store read data
2726  * @oob_required: caller requires OOB data read to chip->oob_poi
2727  * @page: page number to read
2728  */
2729 static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
2730                                 int oob_required, int page)
2731 {
2732         struct mtd_info *mtd = nand_to_mtd(chip);
2733         int i, eccsize = chip->ecc.size, ret;
2734         int eccbytes = chip->ecc.bytes;
2735         int eccsteps = chip->ecc.steps;
2736         uint8_t *p = buf;
2737         uint8_t *ecc_calc = chip->ecc.calc_buf;
2738         uint8_t *ecc_code = chip->ecc.code_buf;
2739         unsigned int max_bitflips = 0;
2740
2741         chip->ecc.read_page_raw(chip, buf, 1, page);
2742
2743         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
2744                 chip->ecc.calculate(chip, p, &ecc_calc[i]);
2745
2746         ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2747                                          chip->ecc.total);
2748         if (ret)
2749                 return ret;
2750
2751         eccsteps = chip->ecc.steps;
2752         p = buf;
2753
2754         for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2755                 int stat;
2756
2757                 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2758                 if (stat < 0) {
2759                         mtd->ecc_stats.failed++;
2760                 } else {
2761                         mtd->ecc_stats.corrected += stat;
2762                         max_bitflips = max_t(unsigned int, max_bitflips, stat);
2763                 }
2764         }
2765         return max_bitflips;
2766 }
2767
2768 /**
2769  * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
2770  * @chip: nand chip info structure
2771  * @data_offs: offset of requested data within the page
2772  * @readlen: data length
2773  * @bufpoi: buffer to store read data
2774  * @page: page number to read
2775  */
2776 static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
2777                              uint32_t readlen, uint8_t *bufpoi, int page)
2778 {
2779         struct mtd_info *mtd = nand_to_mtd(chip);
2780         int start_step, end_step, num_steps, ret;
2781         uint8_t *p;
2782         int data_col_addr, i, gaps = 0;
2783         int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
2784         int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
2785         int index, section = 0;
2786         unsigned int max_bitflips = 0;
2787         struct mtd_oob_region oobregion = { };
2788
2789         /* Column address within the page aligned to ECC size (256bytes) */
2790         start_step = data_offs / chip->ecc.size;
2791         end_step = (data_offs + readlen - 1) / chip->ecc.size;
2792         num_steps = end_step - start_step + 1;
2793         index = start_step * chip->ecc.bytes;
2794
2795         /* Data size aligned to ECC ecc.size */
2796         datafrag_len = num_steps * chip->ecc.size;
2797         eccfrag_len = num_steps * chip->ecc.bytes;
2798
2799         data_col_addr = start_step * chip->ecc.size;
2800         /* If we read not a page aligned data */
2801         p = bufpoi + data_col_addr;
2802         ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
2803         if (ret)
2804                 return ret;
2805
2806         /* Calculate ECC */
2807         for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
2808                 chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
2809
2810         /*
2811          * The performance is faster if we position offsets according to
2812          * ecc.pos. Let's make sure that there are no gaps in ECC positions.
2813          */
2814         ret = mtd_ooblayout_find_eccregion(mtd, index, &section, &oobregion);
2815         if (ret)
2816                 return ret;
2817
2818         if (oobregion.length < eccfrag_len)
2819                 gaps = 1;
2820
2821         if (gaps) {
2822                 ret = nand_change_read_column_op(chip, mtd->writesize,
2823                                                  chip->oob_poi, mtd->oobsize,
2824                                                  false);
2825                 if (ret)
2826                         return ret;
2827         } else {
2828                 /*
2829                  * Send the command to read the particular ECC bytes take care
2830                  * about buswidth alignment in read_buf.
2831                  */
2832                 aligned_pos = oobregion.offset & ~(busw - 1);
2833                 aligned_len = eccfrag_len;
2834                 if (oobregion.offset & (busw - 1))
2835                         aligned_len++;
2836                 if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
2837                     (busw - 1))
2838                         aligned_len++;
2839
2840                 ret = nand_change_read_column_op(chip,
2841                                                  mtd->writesize + aligned_pos,
2842                                                  &chip->oob_poi[aligned_pos],
2843                                                  aligned_len, false);
2844                 if (ret)
2845                         return ret;
2846         }
2847
2848         ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
2849                                          chip->oob_poi, index, eccfrag_len);
2850         if (ret)
2851                 return ret;
2852
2853         p = bufpoi + data_col_addr;
2854         for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
2855                 int stat;
2856
2857                 stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
2858                                          &chip->ecc.calc_buf[i]);
2859                 if (stat == -EBADMSG &&
2860                     (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2861                         /* check for empty pages with bitflips */
2862                         stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
2863                                                 &chip->ecc.code_buf[i],
2864                                                 chip->ecc.bytes,
2865                                                 NULL, 0,
2866                                                 chip->ecc.strength);
2867                 }
2868
2869                 if (stat < 0) {
2870                         mtd->ecc_stats.failed++;
2871                 } else {
2872                         mtd->ecc_stats.corrected += stat;
2873                         max_bitflips = max_t(unsigned int, max_bitflips, stat);
2874                 }
2875         }
2876         return max_bitflips;
2877 }
2878
2879 /**
2880  * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
2881  * @chip: nand chip info structure
2882  * @buf: buffer to store read data
2883  * @oob_required: caller requires OOB data read to chip->oob_poi
2884  * @page: page number to read
2885  *
2886  * Not for syndrome calculating ECC controllers which need a special oob layout.
2887  */
2888 static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
2889                                 int oob_required, int page)
2890 {
2891         struct mtd_info *mtd = nand_to_mtd(chip);
2892         int i, eccsize = chip->ecc.size, ret;
2893         int eccbytes = chip->ecc.bytes;
2894         int eccsteps = chip->ecc.steps;
2895         uint8_t *p = buf;
2896         uint8_t *ecc_calc = chip->ecc.calc_buf;
2897         uint8_t *ecc_code = chip->ecc.code_buf;
2898         unsigned int max_bitflips = 0;
2899
2900         ret = nand_read_page_op(chip, page, 0, NULL, 0);
2901         if (ret)
2902                 return ret;
2903
2904         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2905                 chip->ecc.hwctl(chip, NAND_ECC_READ);
2906
2907                 ret = nand_read_data_op(chip, p, eccsize, false, false);
2908                 if (ret)
2909                         return ret;
2910
2911                 chip->ecc.calculate(chip, p, &ecc_calc[i]);
2912         }
2913
2914         ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
2915                                 false);
2916         if (ret)
2917                 return ret;
2918
2919         ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
2920                                          chip->ecc.total);
2921         if (ret)
2922                 return ret;
2923
2924         eccsteps = chip->ecc.steps;
2925         p = buf;
2926
2927         for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2928                 int stat;
2929
2930                 stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
2931                 if (stat == -EBADMSG &&
2932                     (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
2933                         /* check for empty pages with bitflips */
2934                         stat = nand_check_erased_ecc_chunk(p, eccsize,
2935                                                 &ecc_code[i], eccbytes,
2936                                                 NULL, 0,
2937                                                 chip->ecc.strength);
2938                 }
2939
2940                 if (stat < 0) {
2941                         mtd->ecc_stats.failed++;
2942                 } else {
2943                         mtd->ecc_stats.corrected += stat;
2944                         max_bitflips = max_t(unsigned int, max_bitflips, stat);
2945                 }
2946         }
2947         return max_bitflips;
2948 }
2949
2950 /**
2951  * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
2952  * @chip: nand chip info structure
2953  * @buf: buffer to store read data
2954  * @oob_required: caller requires OOB data read to chip->oob_poi
2955  * @page: page number to read
2956  *
2957  * The hw generator calculates the error syndrome automatically. Therefore we
2958  * need a special oob layout and handling.
2959  */
2960 static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
2961                                    int oob_required, int page)
2962 {
2963         struct mtd_info *mtd = nand_to_mtd(chip);
2964         int ret, i, eccsize = chip->ecc.size;
2965         int eccbytes = chip->ecc.bytes;
2966         int eccsteps = chip->ecc.steps;
2967         int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
2968         uint8_t *p = buf;
2969         uint8_t *oob = chip->oob_poi;
2970         unsigned int max_bitflips = 0;
2971
2972         ret = nand_read_page_op(chip, page, 0, NULL, 0);
2973         if (ret)
2974                 return ret;
2975
2976         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
2977                 int stat;
2978
2979                 chip->ecc.hwctl(chip, NAND_ECC_READ);
2980
2981                 ret = nand_read_data_op(chip, p, eccsize, false, false);
2982                 if (ret)
2983                         return ret;
2984
2985                 if (chip->ecc.prepad) {
2986                         ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
2987                                                 false, false);
2988                         if (ret)
2989                                 return ret;
2990
2991                         oob += chip->ecc.prepad;
2992                 }
2993
2994                 chip->ecc.hwctl(chip, NAND_ECC_READSYN);
2995
2996                 ret = nand_read_data_op(chip, oob, eccbytes, false, false);
2997                 if (ret)
2998                         return ret;
2999
3000                 stat = chip->ecc.correct(chip, p, oob, NULL);
3001
3002                 oob += eccbytes;
3003
3004                 if (chip->ecc.postpad) {
3005                         ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
3006                                                 false, false);
3007                         if (ret)
3008                                 return ret;
3009
3010                         oob += chip->ecc.postpad;
3011                 }
3012
3013                 if (stat == -EBADMSG &&
3014                     (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
3015                         /* check for empty pages with bitflips */
3016                         stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
3017                                                            oob - eccpadbytes,
3018                                                            eccpadbytes,
3019                                                            NULL, 0,
3020                                                            chip->ecc.strength);
3021                 }
3022
3023                 if (stat < 0) {
3024                         mtd->ecc_stats.failed++;
3025                 } else {
3026                         mtd->ecc_stats.corrected += stat;
3027                         max_bitflips = max_t(unsigned int, max_bitflips, stat);
3028                 }
3029         }
3030
3031         /* Calculate remaining oob bytes */
3032         i = mtd->oobsize - (oob - chip->oob_poi);
3033         if (i) {
3034                 ret = nand_read_data_op(chip, oob, i, false, false);
3035                 if (ret)
3036                         return ret;
3037         }
3038
3039         return max_bitflips;
3040 }
3041
3042 /**
3043  * nand_transfer_oob - [INTERN] Transfer oob to client buffer
3044  * @chip: NAND chip object
3045  * @oob: oob destination address
3046  * @ops: oob ops structure
3047  * @len: size of oob to transfer
3048  */
3049 static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
3050                                   struct mtd_oob_ops *ops, size_t len)
3051 {
3052         struct mtd_info *mtd = nand_to_mtd(chip);
3053         int ret;
3054
3055         switch (ops->mode) {
3056
3057         case MTD_OPS_PLACE_OOB:
3058         case MTD_OPS_RAW:
3059                 memcpy(oob, chip->oob_poi + ops->ooboffs, len);
3060                 return oob + len;
3061
3062         case MTD_OPS_AUTO_OOB:
3063                 ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
3064                                                   ops->ooboffs, len);
3065                 BUG_ON(ret);
3066                 return oob + len;
3067
3068         default:
3069                 BUG();
3070         }
3071         return NULL;
3072 }
3073
3074 /**
3075  * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
3076  * @chip: NAND chip object
3077  * @retry_mode: the retry mode to use
3078  *
3079  * Some vendors supply a special command to shift the Vt threshold, to be used
3080  * when there are too many bitflips in a page (i.e., ECC error). After setting
3081  * a new threshold, the host should retry reading the page.
3082  */
3083 static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
3084 {
3085         pr_debug("setting READ RETRY mode %d\n", retry_mode);
3086
3087         if (retry_mode >= chip->read_retries)
3088                 return -EINVAL;
3089
3090         if (!chip->ops.setup_read_retry)
3091                 return -EOPNOTSUPP;
3092
3093         return chip->ops.setup_read_retry(chip, retry_mode);
3094 }
3095
3096 static void nand_wait_readrdy(struct nand_chip *chip)
3097 {
3098         const struct nand_sdr_timings *sdr;
3099
3100         if (!(chip->options & NAND_NEED_READRDY))
3101                 return;
3102
3103         sdr = nand_get_sdr_timings(nand_get_interface_config(chip));
3104         WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0));
3105 }
3106
3107 /**
3108  * nand_do_read_ops - [INTERN] Read data with ECC
3109  * @chip: NAND chip object
3110  * @from: offset to read from
3111  * @ops: oob ops structure
3112  *
3113  * Internal function. Called with chip held.
3114  */
3115 static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
3116                             struct mtd_oob_ops *ops)
3117 {
3118         int chipnr, page, realpage, col, bytes, aligned, oob_required;
3119         struct mtd_info *mtd = nand_to_mtd(chip);
3120         int ret = 0;
3121         uint32_t readlen = ops->len;
3122         uint32_t oobreadlen = ops->ooblen;
3123         uint32_t max_oobsize = mtd_oobavail(mtd, ops);
3124
3125         uint8_t *bufpoi, *oob, *buf;
3126         int use_bounce_buf;
3127         unsigned int max_bitflips = 0;
3128         int retry_mode = 0;
3129         bool ecc_fail = false;
3130
3131         chipnr = (int)(from >> chip->chip_shift);
3132         nand_select_target(chip, chipnr);
3133
3134         realpage = (int)(from >> chip->page_shift);
3135         page = realpage & chip->pagemask;
3136
3137         col = (int)(from & (mtd->writesize - 1));
3138
3139         buf = ops->datbuf;
3140         oob = ops->oobbuf;
3141         oob_required = oob ? 1 : 0;
3142
3143         while (1) {
3144                 struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
3145
3146                 bytes = min(mtd->writesize - col, readlen);
3147                 aligned = (bytes == mtd->writesize);
3148
3149                 if (!aligned)
3150                         use_bounce_buf = 1;
3151                 else if (chip->options & NAND_USES_DMA)
3152                         use_bounce_buf = !virt_addr_valid(buf) ||
3153                                          !IS_ALIGNED((unsigned long)buf,
3154                                                      chip->buf_align);
3155                 else
3156                         use_bounce_buf = 0;
3157
3158                 /* Is the current page in the buffer? */
3159                 if (realpage != chip->pagecache.page || oob) {
3160                         bufpoi = use_bounce_buf ? chip->data_buf : buf;
3161
3162                         if (use_bounce_buf && aligned)
3163                                 pr_debug("%s: using read bounce buffer for buf@%p\n",
3164                                                  __func__, buf);
3165
3166 read_retry:
3167                         /*
3168                          * Now read the page into the buffer.  Absent an error,
3169                          * the read methods return max bitflips per ecc step.
3170                          */
3171                         if (unlikely(ops->mode == MTD_OPS_RAW))
3172                                 ret = chip->ecc.read_page_raw(chip, bufpoi,
3173                                                               oob_required,
3174                                                               page);
3175                         else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
3176                                  !oob)
3177                                 ret = chip->ecc.read_subpage(chip, col, bytes,
3178                                                              bufpoi, page);
3179                         else
3180                                 ret = chip->ecc.read_page(chip, bufpoi,
3181                                                           oob_required, page);
3182                         if (ret < 0) {
3183                                 if (use_bounce_buf)
3184                                         /* Invalidate page cache */
3185                                         chip->pagecache.page = -1;
3186                                 break;
3187                         }
3188
3189                         /*
3190                          * Copy back the data in the initial buffer when reading
3191                          * partial pages or when a bounce buffer is required.
3192                          */
3193                         if (use_bounce_buf) {
3194                                 if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
3195                                     !(mtd->ecc_stats.failed - ecc_stats.failed) &&
3196                                     (ops->mode != MTD_OPS_RAW)) {
3197                                         chip->pagecache.page = realpage;
3198                                         chip->pagecache.bitflips = ret;
3199                                 } else {
3200                                         /* Invalidate page cache */
3201                                         chip->pagecache.page = -1;
3202                                 }
3203                                 memcpy(buf, bufpoi + col, bytes);
3204                         }
3205
3206                         if (unlikely(oob)) {
3207                                 int toread = min(oobreadlen, max_oobsize);
3208
3209                                 if (toread) {
3210                                         oob = nand_transfer_oob(chip, oob, ops,
3211                                                                 toread);
3212                                         oobreadlen -= toread;
3213                                 }
3214                         }
3215
3216                         nand_wait_readrdy(chip);
3217
3218                         if (mtd->ecc_stats.failed - ecc_stats.failed) {
3219                                 if (retry_mode + 1 < chip->read_retries) {
3220                                         retry_mode++;
3221                                         ret = nand_setup_read_retry(chip,
3222                                                         retry_mode);
3223                                         if (ret < 0)
3224                                                 break;
3225
3226                                         /* Reset ecc_stats; retry */
3227                                         mtd->ecc_stats = ecc_stats;
3228                                         goto read_retry;
3229                                 } else {
3230                                         /* No more retry modes; real failure */
3231                                         ecc_fail = true;
3232                                 }
3233                         }
3234
3235                         buf += bytes;
3236                         max_bitflips = max_t(unsigned int, max_bitflips, ret);
3237                 } else {
3238                         memcpy(buf, chip->data_buf + col, bytes);
3239                         buf += bytes;
3240                         max_bitflips = max_t(unsigned int, max_bitflips,
3241                                              chip->pagecache.bitflips);
3242                 }
3243
3244                 readlen -= bytes;
3245
3246                 /* Reset to retry mode 0 */
3247                 if (retry_mode) {
3248                         ret = nand_setup_read_retry(chip, 0);
3249                         if (ret < 0)
3250                                 break;
3251                         retry_mode = 0;
3252                 }
3253
3254                 if (!readlen)
3255                         break;
3256
3257                 /* For subsequent reads align to page boundary */
3258                 col = 0;
3259                 /* Increment page address */
3260                 realpage++;
3261
3262                 page = realpage & chip->pagemask;
3263                 /* Check, if we cross a chip boundary */
3264                 if (!page) {
3265                         chipnr++;
3266                         nand_deselect_target(chip);
3267                         nand_select_target(chip, chipnr);
3268                 }
3269         }
3270         nand_deselect_target(chip);
3271
3272         ops->retlen = ops->len - (size_t) readlen;
3273         if (oob)
3274                 ops->oobretlen = ops->ooblen - oobreadlen;
3275
3276         if (ret < 0)
3277                 return ret;
3278
3279         if (ecc_fail)
3280                 return -EBADMSG;
3281
3282         return max_bitflips;
3283 }
3284
3285 /**
3286  * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
3287  * @chip: nand chip info structure
3288  * @page: page number to read
3289  */
3290 int nand_read_oob_std(struct nand_chip *chip, int page)
3291 {
3292         struct mtd_info *mtd = nand_to_mtd(chip);
3293
3294         return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
3295 }
3296 EXPORT_SYMBOL(nand_read_oob_std);
3297
3298 /**
3299  * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
3300  *                          with syndromes
3301  * @chip: nand chip info structure
3302  * @page: page number to read
3303  */
3304 static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
3305 {
3306         struct mtd_info *mtd = nand_to_mtd(chip);
3307         int length = mtd->oobsize;
3308         int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3309         int eccsize = chip->ecc.size;
3310         uint8_t *bufpoi = chip->oob_poi;
3311         int i, toread, sndrnd = 0, pos, ret;
3312
3313         ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
3314         if (ret)
3315                 return ret;
3316
3317         for (i = 0; i < chip->ecc.steps; i++) {
3318                 if (sndrnd) {
3319                         int ret;
3320
3321                         pos = eccsize + i * (eccsize + chunk);
3322                         if (mtd->writesize > 512)
3323                                 ret = nand_change_read_column_op(chip, pos,
3324                                                                  NULL, 0,
3325                                                                  false);
3326                         else
3327                                 ret = nand_read_page_op(chip, page, pos, NULL,
3328                                                         0);
3329
3330                         if (ret)
3331                                 return ret;
3332                 } else
3333                         sndrnd = 1;
3334                 toread = min_t(int, length, chunk);
3335
3336                 ret = nand_read_data_op(chip, bufpoi, toread, false, false);
3337                 if (ret)
3338                         return ret;
3339
3340                 bufpoi += toread;
3341                 length -= toread;
3342         }
3343         if (length > 0) {
3344                 ret = nand_read_data_op(chip, bufpoi, length, false, false);
3345                 if (ret)
3346                         return ret;
3347         }
3348
3349         return 0;
3350 }
3351
3352 /**
3353  * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
3354  * @chip: nand chip info structure
3355  * @page: page number to write
3356  */
3357 int nand_write_oob_std(struct nand_chip *chip, int page)
3358 {
3359         struct mtd_info *mtd = nand_to_mtd(chip);
3360
3361         return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
3362                                  mtd->oobsize);
3363 }
3364 EXPORT_SYMBOL(nand_write_oob_std);
3365
3366 /**
3367  * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
3368  *                           with syndrome - only for large page flash
3369  * @chip: nand chip info structure
3370  * @page: page number to write
3371  */
3372 static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
3373 {
3374         struct mtd_info *mtd = nand_to_mtd(chip);
3375         int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
3376         int eccsize = chip->ecc.size, length = mtd->oobsize;
3377         int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
3378         const uint8_t *bufpoi = chip->oob_poi;
3379
3380         /*
3381          * data-ecc-data-ecc ... ecc-oob
3382          * or
3383          * data-pad-ecc-pad-data-pad .... ecc-pad-oob
3384          */
3385         if (!chip->ecc.prepad && !chip->ecc.postpad) {
3386                 pos = steps * (eccsize + chunk);
3387                 steps = 0;
3388         } else
3389                 pos = eccsize;
3390
3391         ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
3392         if (ret)
3393                 return ret;
3394
3395         for (i = 0; i < steps; i++) {
3396                 if (sndcmd) {
3397                         if (mtd->writesize <= 512) {
3398                                 uint32_t fill = 0xFFFFFFFF;
3399
3400                                 len = eccsize;
3401                                 while (len > 0) {
3402                                         int num = min_t(int, len, 4);
3403
3404                                         ret = nand_write_data_op(chip, &fill,
3405                                                                  num, false);
3406                                         if (ret)
3407                                                 return ret;
3408
3409                                         len -= num;
3410                                 }
3411                         } else {
3412                                 pos = eccsize + i * (eccsize + chunk);
3413                                 ret = nand_change_write_column_op(chip, pos,
3414                                                                   NULL, 0,
3415                                                                   false);
3416                                 if (ret)
3417                                         return ret;
3418                         }
3419                 } else
3420                         sndcmd = 1;
3421                 len = min_t(int, length, chunk);
3422
3423                 ret = nand_write_data_op(chip, bufpoi, len, false);
3424                 if (ret)
3425                         return ret;
3426
3427                 bufpoi += len;
3428                 length -= len;
3429         }
3430         if (length > 0) {
3431                 ret = nand_write_data_op(chip, bufpoi, length, false);
3432                 if (ret)
3433                         return ret;
3434         }
3435
3436         return nand_prog_page_end_op(chip);
3437 }
3438
3439 /**
3440  * nand_do_read_oob - [INTERN] NAND read out-of-band
3441  * @chip: NAND chip object
3442  * @from: offset to read from
3443  * @ops: oob operations description structure
3444  *
3445  * NAND read out-of-band data from the spare area.
3446  */
3447 static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
3448                             struct mtd_oob_ops *ops)
3449 {
3450         struct mtd_info *mtd = nand_to_mtd(chip);
3451         unsigned int max_bitflips = 0;
3452         int page, realpage, chipnr;
3453         struct mtd_ecc_stats stats;
3454         int readlen = ops->ooblen;
3455         int len;
3456         uint8_t *buf = ops->oobbuf;
3457         int ret = 0;
3458
3459         pr_debug("%s: from = 0x%08Lx, len = %i\n",
3460                         __func__, (unsigned long long)from, readlen);
3461
3462         stats = mtd->ecc_stats;
3463
3464         len = mtd_oobavail(mtd, ops);
3465
3466         chipnr = (int)(from >> chip->chip_shift);
3467         nand_select_target(chip, chipnr);
3468
3469         /* Shift to get page */
3470         realpage = (int)(from >> chip->page_shift);
3471         page = realpage & chip->pagemask;
3472
3473         while (1) {
3474                 if (ops->mode == MTD_OPS_RAW)
3475                         ret = chip->ecc.read_oob_raw(chip, page);
3476                 else
3477                         ret = chip->ecc.read_oob(chip, page);
3478
3479                 if (ret < 0)
3480                         break;
3481
3482                 len = min(len, readlen);
3483                 buf = nand_transfer_oob(chip, buf, ops, len);
3484
3485                 nand_wait_readrdy(chip);
3486
3487                 max_bitflips = max_t(unsigned int, max_bitflips, ret);
3488
3489                 readlen -= len;
3490                 if (!readlen)
3491                         break;
3492
3493                 /* Increment page address */
3494                 realpage++;
3495
3496                 page = realpage & chip->pagemask;
3497                 /* Check, if we cross a chip boundary */
3498                 if (!page) {
3499                         chipnr++;
3500                         nand_deselect_target(chip);
3501                         nand_select_target(chip, chipnr);
3502                 }
3503         }
3504         nand_deselect_target(chip);
3505
3506         ops->oobretlen = ops->ooblen - readlen;
3507
3508         if (ret < 0)
3509                 return ret;
3510
3511         if (mtd->ecc_stats.failed - stats.failed)
3512                 return -EBADMSG;
3513
3514         return max_bitflips;
3515 }
3516
3517 /**
3518  * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
3519  * @mtd: MTD device structure
3520  * @from: offset to read from
3521  * @ops: oob operation description structure
3522  *
3523  * NAND read data and/or out-of-band data.
3524  */
3525 static int nand_read_oob(struct mtd_info *mtd, loff_t from,
3526                          struct mtd_oob_ops *ops)
3527 {
3528         struct nand_chip *chip = mtd_to_nand(mtd);
3529         int ret;
3530
3531         ops->retlen = 0;
3532
3533         if (ops->mode != MTD_OPS_PLACE_OOB &&
3534             ops->mode != MTD_OPS_AUTO_OOB &&
3535             ops->mode != MTD_OPS_RAW)
3536                 return -ENOTSUPP;
3537
3538         nand_get_device(chip);
3539
3540         if (!ops->datbuf)
3541                 ret = nand_do_read_oob(chip, from, ops);
3542         else
3543                 ret = nand_do_read_ops(chip, from, ops);
3544
3545         nand_release_device(chip);
3546         return ret;
3547 }
3548
3549 /**
3550  * nand_write_page_raw_notsupp - dummy raw page write function
3551  * @chip: nand chip info structure
3552  * @buf: data buffer
3553  * @oob_required: must write chip->oob_poi to OOB
3554  * @page: page number to write
3555  *
3556  * Returns -ENOTSUPP unconditionally.
3557  */
3558 int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
3559                                 int oob_required, int page)
3560 {
3561         return -ENOTSUPP;
3562 }
3563
3564 /**
3565  * nand_write_page_raw - [INTERN] raw page write function
3566  * @chip: nand chip info structure
3567  * @buf: data buffer
3568  * @oob_required: must write chip->oob_poi to OOB
3569  * @page: page number to write
3570  *
3571  * Not for syndrome calculating ECC controllers, which use a special oob layout.
3572  */
3573 int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
3574                         int oob_required, int page)
3575 {
3576         struct mtd_info *mtd = nand_to_mtd(chip);
3577         int ret;
3578
3579         ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
3580         if (ret)
3581                 return ret;
3582
3583         if (oob_required) {
3584                 ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
3585                                          false);
3586                 if (ret)
3587                         return ret;
3588         }
3589
3590         return nand_prog_page_end_op(chip);
3591 }
3592 EXPORT_SYMBOL(nand_write_page_raw);
3593
3594 /**
3595  * nand_monolithic_write_page_raw - Monolithic page write in raw mode
3596  * @chip: NAND chip info structure
3597  * @buf: data buffer to write
3598  * @oob_required: must write chip->oob_poi to OOB
3599  * @page: page number to write
3600  *
3601  * This is a raw page write, ie. without any error detection/correction.
3602  * Monolithic means we are requesting all the relevant data (main plus
3603  * eventually OOB) to be sent over the bus and effectively programmed
3604  * into the NAND chip arrays in a single operation. This is an
3605  * alternative to nand_write_page_raw(), which first sends the main
3606  * data, then eventually send the OOB data by latching more data
3607  * cycles on the NAND bus, and finally sends the program command to
3608  * synchronyze the NAND chip cache.
3609  */
3610 int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf,
3611                                    int oob_required, int page)
3612 {
3613         struct mtd_info *mtd = nand_to_mtd(chip);
3614         unsigned int size = mtd->writesize;
3615         u8 *write_buf = (u8 *)buf;
3616
3617         if (oob_required) {
3618                 size += mtd->oobsize;
3619
3620                 if (buf != chip->data_buf) {
3621                         write_buf = nand_get_data_buf(chip);
3622                         memcpy(write_buf, buf, mtd->writesize);
3623                 }
3624         }
3625
3626         return nand_prog_page_op(chip, page, 0, write_buf, size);
3627 }
3628 EXPORT_SYMBOL(nand_monolithic_write_page_raw);
3629
3630 /**
3631  * nand_write_page_raw_syndrome - [INTERN] raw page write function
3632  * @chip: nand chip info structure
3633  * @buf: data buffer
3634  * @oob_required: must write chip->oob_poi to OOB
3635  * @page: page number to write
3636  *
3637  * We need a special oob layout and handling even when ECC isn't checked.
3638  */
3639 static int nand_write_page_raw_syndrome(struct nand_chip *chip,
3640                                         const uint8_t *buf, int oob_required,
3641                                         int page)
3642 {
3643         struct mtd_info *mtd = nand_to_mtd(chip);
3644         int eccsize = chip->ecc.size;
3645         int eccbytes = chip->ecc.bytes;
3646         uint8_t *oob = chip->oob_poi;
3647         int steps, size, ret;
3648
3649         ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3650         if (ret)
3651                 return ret;
3652
3653         for (steps = chip->ecc.steps; steps > 0; steps--) {
3654                 ret = nand_write_data_op(chip, buf, eccsize, false);
3655                 if (ret)
3656                         return ret;
3657
3658                 buf += eccsize;
3659
3660                 if (chip->ecc.prepad) {
3661                         ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3662                                                  false);
3663                         if (ret)
3664                                 return ret;
3665
3666                         oob += chip->ecc.prepad;
3667                 }
3668
3669                 ret = nand_write_data_op(chip, oob, eccbytes, false);
3670                 if (ret)
3671                         return ret;
3672
3673                 oob += eccbytes;
3674
3675                 if (chip->ecc.postpad) {
3676                         ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3677                                                  false);
3678                         if (ret)
3679                                 return ret;
3680
3681                         oob += chip->ecc.postpad;
3682                 }
3683         }
3684
3685         size = mtd->oobsize - (oob - chip->oob_poi);
3686         if (size) {
3687                 ret = nand_write_data_op(chip, oob, size, false);
3688                 if (ret)
3689                         return ret;
3690         }
3691
3692         return nand_prog_page_end_op(chip);
3693 }
3694 /**
3695  * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
3696  * @chip: nand chip info structure
3697  * @buf: data buffer
3698  * @oob_required: must write chip->oob_poi to OOB
3699  * @page: page number to write
3700  */
3701 static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
3702                                  int oob_required, int page)
3703 {
3704         struct mtd_info *mtd = nand_to_mtd(chip);
3705         int i, eccsize = chip->ecc.size, ret;
3706         int eccbytes = chip->ecc.bytes;
3707         int eccsteps = chip->ecc.steps;
3708         uint8_t *ecc_calc = chip->ecc.calc_buf;
3709         const uint8_t *p = buf;
3710
3711         /* Software ECC calculation */
3712         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
3713                 chip->ecc.calculate(chip, p, &ecc_calc[i]);
3714
3715         ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3716                                          chip->ecc.total);
3717         if (ret)
3718                 return ret;
3719
3720         return chip->ecc.write_page_raw(chip, buf, 1, page);
3721 }
3722
3723 /**
3724  * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
3725  * @chip: nand chip info structure
3726  * @buf: data buffer
3727  * @oob_required: must write chip->oob_poi to OOB
3728  * @page: page number to write
3729  */
3730 static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
3731                                  int oob_required, int page)
3732 {
3733         struct mtd_info *mtd = nand_to_mtd(chip);
3734         int i, eccsize = chip->ecc.size, ret;
3735         int eccbytes = chip->ecc.bytes;
3736         int eccsteps = chip->ecc.steps;
3737         uint8_t *ecc_calc = chip->ecc.calc_buf;
3738         const uint8_t *p = buf;
3739
3740         ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3741         if (ret)
3742                 return ret;
3743
3744         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3745                 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3746
3747                 ret = nand_write_data_op(chip, p, eccsize, false);
3748                 if (ret)
3749                         return ret;
3750
3751                 chip->ecc.calculate(chip, p, &ecc_calc[i]);
3752         }
3753
3754         ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3755                                          chip->ecc.total);
3756         if (ret)
3757                 return ret;
3758
3759         ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3760         if (ret)
3761                 return ret;
3762
3763         return nand_prog_page_end_op(chip);
3764 }
3765
3766
3767 /**
3768  * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
3769  * @chip:       nand chip info structure
3770  * @offset:     column address of subpage within the page
3771  * @data_len:   data length
3772  * @buf:        data buffer
3773  * @oob_required: must write chip->oob_poi to OOB
3774  * @page: page number to write
3775  */
3776 static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
3777                                     uint32_t data_len, const uint8_t *buf,
3778                                     int oob_required, int page)
3779 {
3780         struct mtd_info *mtd = nand_to_mtd(chip);
3781         uint8_t *oob_buf  = chip->oob_poi;
3782         uint8_t *ecc_calc = chip->ecc.calc_buf;
3783         int ecc_size      = chip->ecc.size;
3784         int ecc_bytes     = chip->ecc.bytes;
3785         int ecc_steps     = chip->ecc.steps;
3786         uint32_t start_step = offset / ecc_size;
3787         uint32_t end_step   = (offset + data_len - 1) / ecc_size;
3788         int oob_bytes       = mtd->oobsize / ecc_steps;
3789         int step, ret;
3790
3791         ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3792         if (ret)
3793                 return ret;
3794
3795         for (step = 0; step < ecc_steps; step++) {
3796                 /* configure controller for WRITE access */
3797                 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3798
3799                 /* write data (untouched subpages already masked by 0xFF) */
3800                 ret = nand_write_data_op(chip, buf, ecc_size, false);
3801                 if (ret)
3802                         return ret;
3803
3804                 /* mask ECC of un-touched subpages by padding 0xFF */
3805                 if ((step < start_step) || (step > end_step))
3806                         memset(ecc_calc, 0xff, ecc_bytes);
3807                 else
3808                         chip->ecc.calculate(chip, buf, ecc_calc);
3809
3810                 /* mask OOB of un-touched subpages by padding 0xFF */
3811                 /* if oob_required, preserve OOB metadata of written subpage */
3812                 if (!oob_required || (step < start_step) || (step > end_step))
3813                         memset(oob_buf, 0xff, oob_bytes);
3814
3815                 buf += ecc_size;
3816                 ecc_calc += ecc_bytes;
3817                 oob_buf  += oob_bytes;
3818         }
3819
3820         /* copy calculated ECC for whole page to chip->buffer->oob */
3821         /* this include masked-value(0xFF) for unwritten subpages */
3822         ecc_calc = chip->ecc.calc_buf;
3823         ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
3824                                          chip->ecc.total);
3825         if (ret)
3826                 return ret;
3827
3828         /* write OOB buffer to NAND device */
3829         ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
3830         if (ret)
3831                 return ret;
3832
3833         return nand_prog_page_end_op(chip);
3834 }
3835
3836
3837 /**
3838  * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
3839  * @chip: nand chip info structure
3840  * @buf: data buffer
3841  * @oob_required: must write chip->oob_poi to OOB
3842  * @page: page number to write
3843  *
3844  * The hw generator calculates the error syndrome automatically. Therefore we
3845  * need a special oob layout and handling.
3846  */
3847 static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
3848                                     int oob_required, int page)
3849 {
3850         struct mtd_info *mtd = nand_to_mtd(chip);
3851         int i, eccsize = chip->ecc.size;
3852         int eccbytes = chip->ecc.bytes;
3853         int eccsteps = chip->ecc.steps;
3854         const uint8_t *p = buf;
3855         uint8_t *oob = chip->oob_poi;
3856         int ret;
3857
3858         ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
3859         if (ret)
3860                 return ret;
3861
3862         for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
3863                 chip->ecc.hwctl(chip, NAND_ECC_WRITE);
3864
3865                 ret = nand_write_data_op(chip, p, eccsize, false);
3866                 if (ret)
3867                         return ret;
3868
3869                 if (chip->ecc.prepad) {
3870                         ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
3871                                                  false);
3872                         if (ret)
3873                                 return ret;
3874
3875                         oob += chip->ecc.prepad;
3876                 }
3877
3878                 chip->ecc.calculate(chip, p, oob);
3879
3880                 ret = nand_write_data_op(chip, oob, eccbytes, false);
3881                 if (ret)
3882                         return ret;
3883
3884                 oob += eccbytes;
3885
3886                 if (chip->ecc.postpad) {
3887                         ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
3888                                                  false);
3889                         if (ret)
3890                                 return ret;
3891
3892                         oob += chip->ecc.postpad;
3893                 }
3894         }
3895
3896         /* Calculate remaining oob bytes */
3897         i = mtd->oobsize - (oob - chip->oob_poi);
3898         if (i) {
3899                 ret = nand_write_data_op(chip, oob, i, false);
3900                 if (ret)
3901                         return ret;
3902         }
3903
3904         return nand_prog_page_end_op(chip);
3905 }
3906
3907 /**
3908  * nand_write_page - write one page
3909  * @chip: NAND chip descriptor
3910  * @offset: address offset within the page
3911  * @data_len: length of actual data to be written
3912  * @buf: the data to write
3913  * @oob_required: must write chip->oob_poi to OOB
3914  * @page: page number to write
3915  * @raw: use _raw version of write_page
3916  */
3917 static int nand_write_page(struct nand_chip *chip, uint32_t offset,
3918                            int data_len, const uint8_t *buf, int oob_required,
3919                            int page, int raw)
3920 {
3921         struct mtd_info *mtd = nand_to_mtd(chip);
3922         int status, subpage;
3923
3924         if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
3925                 chip->ecc.write_subpage)
3926                 subpage = offset || (data_len < mtd->writesize);
3927         else
3928                 subpage = 0;
3929
3930         if (unlikely(raw))
3931                 status = chip->ecc.write_page_raw(chip, buf, oob_required,
3932                                                   page);
3933         else if (subpage)
3934                 status = chip->ecc.write_subpage(chip, offset, data_len, buf,
3935                                                  oob_required, page);
3936         else
3937                 status = chip->ecc.write_page(chip, buf, oob_required, page);
3938
3939         if (status < 0)
3940                 return status;
3941
3942         return 0;
3943 }
3944
3945 #define NOTALIGNED(x)   ((x & (chip->subpagesize - 1)) != 0)
3946
3947 /**
3948  * nand_do_write_ops - [INTERN] NAND write with ECC
3949  * @chip: NAND chip object
3950  * @to: offset to write to
3951  * @ops: oob operations description structure
3952  *
3953  * NAND write with ECC.
3954  */
3955 static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
3956                              struct mtd_oob_ops *ops)
3957 {
3958         struct mtd_info *mtd = nand_to_mtd(chip);
3959         int chipnr, realpage, page, column;
3960         uint32_t writelen = ops->len;
3961
3962         uint32_t oobwritelen = ops->ooblen;
3963         uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
3964
3965         uint8_t *oob = ops->oobbuf;
3966         uint8_t *buf = ops->datbuf;
3967         int ret;
3968         int oob_required = oob ? 1 : 0;
3969
3970         ops->retlen = 0;
3971         if (!writelen)
3972                 return 0;
3973
3974         /* Reject writes, which are not page aligned */
3975         if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
3976                 pr_notice("%s: attempt to write non page aligned data\n",
3977                            __func__);
3978                 return -EINVAL;
3979         }
3980
3981         column = to & (mtd->writesize - 1);
3982
3983         chipnr = (int)(to >> chip->chip_shift);
3984         nand_select_target(chip, chipnr);
3985
3986         /* Check, if it is write protected */
3987         if (nand_check_wp(chip)) {
3988                 ret = -EIO;
3989                 goto err_out;
3990         }
3991
3992         realpage = (int)(to >> chip->page_shift);
3993         page = realpage & chip->pagemask;
3994
3995         /* Invalidate the page cache, when we write to the cached page */
3996         if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) &&
3997             ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len))
3998                 chip->pagecache.page = -1;
3999
4000         /* Don't allow multipage oob writes with offset */
4001         if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
4002                 ret = -EINVAL;
4003                 goto err_out;
4004         }
4005
4006         while (1) {
4007                 int bytes = mtd->writesize;
4008                 uint8_t *wbuf = buf;
4009                 int use_bounce_buf;
4010                 int part_pagewr = (column || writelen < mtd->writesize);
4011
4012                 if (part_pagewr)
4013                         use_bounce_buf = 1;
4014                 else if (chip->options & NAND_USES_DMA)
4015                         use_bounce_buf = !virt_addr_valid(buf) ||
4016                                          !IS_ALIGNED((unsigned long)buf,
4017                                                      chip->buf_align);
4018                 else
4019                         use_bounce_buf = 0;
4020
4021                 /*
4022                  * Copy the data from the initial buffer when doing partial page
4023                  * writes or when a bounce buffer is required.
4024                  */
4025                 if (use_bounce_buf) {
4026                         pr_debug("%s: using write bounce buffer for buf@%p\n",
4027                                          __func__, buf);
4028                         if (part_pagewr)
4029                                 bytes = min_t(int, bytes - column, writelen);
4030                         wbuf = nand_get_data_buf(chip);
4031                         memset(wbuf, 0xff, mtd->writesize);
4032                         memcpy(&wbuf[column], buf, bytes);
4033                 }
4034
4035                 if (unlikely(oob)) {
4036                         size_t len = min(oobwritelen, oobmaxlen);
4037                         oob = nand_fill_oob(chip, oob, len, ops);
4038                         oobwritelen -= len;
4039                 } else {
4040                         /* We still need to erase leftover OOB data */
4041                         memset(chip->oob_poi, 0xff, mtd->oobsize);
4042                 }
4043
4044                 ret = nand_write_page(chip, column, bytes, wbuf,
4045                                       oob_required, page,
4046                                       (ops->mode == MTD_OPS_RAW));
4047                 if (ret)
4048                         break;
4049
4050                 writelen -= bytes;
4051                 if (!writelen)
4052                         break;
4053
4054                 column = 0;
4055                 buf += bytes;
4056                 realpage++;
4057
4058                 page = realpage & chip->pagemask;
4059                 /* Check, if we cross a chip boundary */
4060                 if (!page) {
4061                         chipnr++;
4062                         nand_deselect_target(chip);
4063                         nand_select_target(chip, chipnr);
4064                 }
4065         }
4066
4067         ops->retlen = ops->len - writelen;
4068         if (unlikely(oob))
4069                 ops->oobretlen = ops->ooblen;
4070
4071 err_out:
4072         nand_deselect_target(chip);
4073         return ret;
4074 }
4075
4076 /**
4077  * panic_nand_write - [MTD Interface] NAND write with ECC
4078  * @mtd: MTD device structure
4079  * @to: offset to write to
4080  * @len: number of bytes to write
4081  * @retlen: pointer to variable to store the number of written bytes
4082  * @buf: the data to write
4083  *
4084  * NAND write with ECC. Used when performing writes in interrupt context, this
4085  * may for example be called by mtdoops when writing an oops while in panic.
4086  */
4087 static int panic_nand_write(struct mtd_info *mtd, loff_t to, size_t len,
4088                             size_t *retlen, const uint8_t *buf)
4089 {
4090         struct nand_chip *chip = mtd_to_nand(mtd);
4091         int chipnr = (int)(to >> chip->chip_shift);
4092         struct mtd_oob_ops ops;
4093         int ret;
4094
4095         nand_select_target(chip, chipnr);
4096
4097         /* Wait for the device to get ready */
4098         panic_nand_wait(chip, 400);
4099
4100         memset(&ops, 0, sizeof(ops));
4101         ops.len = len;
4102         ops.datbuf = (uint8_t *)buf;
4103         ops.mode = MTD_OPS_PLACE_OOB;
4104
4105         ret = nand_do_write_ops(chip, to, &ops);
4106
4107         *retlen = ops.retlen;
4108         return ret;
4109 }
4110
4111 /**
4112  * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
4113  * @mtd: MTD device structure
4114  * @to: offset to write to
4115  * @ops: oob operation description structure
4116  */
4117 static int nand_write_oob(struct mtd_info *mtd, loff_t to,
4118                           struct mtd_oob_ops *ops)
4119 {
4120         struct nand_chip *chip = mtd_to_nand(mtd);
4121         int ret = 0;
4122
4123         ops->retlen = 0;
4124
4125         nand_get_device(chip);
4126
4127         switch (ops->mode) {
4128         case MTD_OPS_PLACE_OOB:
4129         case MTD_OPS_AUTO_OOB:
4130         case MTD_OPS_RAW:
4131                 break;
4132
4133         default:
4134                 goto out;
4135         }
4136
4137         if (!ops->datbuf)
4138                 ret = nand_do_write_oob(chip, to, ops);
4139         else
4140                 ret = nand_do_write_ops(chip, to, ops);
4141
4142 out:
4143         nand_release_device(chip);
4144         return ret;
4145 }
4146
4147 /**
4148  * nand_erase - [MTD Interface] erase block(s)
4149  * @mtd: MTD device structure
4150  * @instr: erase instruction
4151  *
4152  * Erase one ore more blocks.
4153  */
4154 static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
4155 {
4156         return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
4157 }
4158
4159 /**
4160  * nand_erase_nand - [INTERN] erase block(s)
4161  * @chip: NAND chip object
4162  * @instr: erase instruction
4163  * @allowbbt: allow erasing the bbt area
4164  *
4165  * Erase one ore more blocks.
4166  */
4167 int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
4168                     int allowbbt)
4169 {
4170         int page, pages_per_block, ret, chipnr;
4171         loff_t len;
4172
4173         pr_debug("%s: start = 0x%012llx, len = %llu\n",
4174                         __func__, (unsigned long long)instr->addr,
4175                         (unsigned long long)instr->len);
4176
4177         if (check_offs_len(chip, instr->addr, instr->len))
4178                 return -EINVAL;
4179
4180         /* Grab the lock and see if the device is available */
4181         nand_get_device(chip);
4182
4183         /* Shift to get first page */
4184         page = (int)(instr->addr >> chip->page_shift);
4185         chipnr = (int)(instr->addr >> chip->chip_shift);
4186
4187         /* Calculate pages in each block */
4188         pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
4189
4190         /* Select the NAND device */
4191         nand_select_target(chip, chipnr);
4192
4193         /* Check, if it is write protected */
4194         if (nand_check_wp(chip)) {
4195                 pr_debug("%s: device is write protected!\n",
4196                                 __func__);
4197                 ret = -EIO;
4198                 goto erase_exit;
4199         }
4200
4201         /* Loop through the pages */
4202         len = instr->len;
4203
4204         while (len) {
4205                 /* Check if we have a bad block, we do not erase bad blocks! */
4206                 if (nand_block_checkbad(chip, ((loff_t) page) <<
4207                                         chip->page_shift, allowbbt)) {
4208                         pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
4209                                     __func__, page);
4210                         ret = -EIO;
4211                         goto erase_exit;
4212                 }
4213
4214                 /*
4215                  * Invalidate the page cache, if we erase the block which
4216                  * contains the current cached page.
4217                  */
4218                 if (page <= chip->pagecache.page && chip->pagecache.page <
4219                     (page + pages_per_block))
4220                         chip->pagecache.page = -1;
4221
4222                 ret = nand_erase_op(chip, (page & chip->pagemask) >>
4223                                     (chip->phys_erase_shift - chip->page_shift));
4224                 if (ret) {
4225                         pr_debug("%s: failed erase, page 0x%08x\n",
4226                                         __func__, page);
4227                         instr->fail_addr =
4228                                 ((loff_t)page << chip->page_shift);
4229                         goto erase_exit;
4230                 }
4231
4232                 /* Increment page address and decrement length */
4233                 len -= (1ULL << chip->phys_erase_shift);
4234                 page += pages_per_block;
4235
4236                 /* Check, if we cross a chip boundary */
4237                 if (len && !(page & chip->pagemask)) {
4238                         chipnr++;
4239                         nand_deselect_target(chip);
4240                         nand_select_target(chip, chipnr);
4241                 }
4242         }
4243
4244         ret = 0;
4245 erase_exit:
4246
4247         /* Deselect and wake up anyone waiting on the device */
4248         nand_deselect_target(chip);
4249         nand_release_device(chip);
4250
4251         /* Return more or less happy */
4252         return ret;
4253 }
4254
4255 /**
4256  * nand_sync - [MTD Interface] sync
4257  * @mtd: MTD device structure
4258  *
4259  * Sync is actually a wait for chip ready function.
4260  */
4261 static void nand_sync(struct mtd_info *mtd)
4262 {
4263         struct nand_chip *chip = mtd_to_nand(mtd);
4264
4265         pr_debug("%s: called\n", __func__);
4266
4267         /* Grab the lock and see if the device is available */
4268         nand_get_device(chip);
4269         /* Release it and go back */
4270         nand_release_device(chip);
4271 }
4272
4273 /**
4274  * nand_block_isbad - [MTD Interface] Check if block at offset is bad
4275  * @mtd: MTD device structure
4276  * @offs: offset relative to mtd start
4277  */
4278 static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
4279 {
4280         struct nand_chip *chip = mtd_to_nand(mtd);
4281         int chipnr = (int)(offs >> chip->chip_shift);
4282         int ret;
4283
4284         /* Select the NAND device */
4285         nand_get_device(chip);
4286
4287         nand_select_target(chip, chipnr);
4288
4289         ret = nand_block_checkbad(chip, offs, 0);
4290
4291         nand_deselect_target(chip);
4292         nand_release_device(chip);
4293
4294         return ret;
4295 }
4296
4297 /**
4298  * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
4299  * @mtd: MTD device structure
4300  * @ofs: offset relative to mtd start
4301  */
4302 static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
4303 {
4304         int ret;
4305
4306         ret = nand_block_isbad(mtd, ofs);
4307         if (ret) {
4308                 /* If it was bad already, return success and do nothing */
4309                 if (ret > 0)
4310                         return 0;
4311                 return ret;
4312         }
4313
4314         return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
4315 }
4316
4317 /**
4318  * nand_suspend - [MTD Interface] Suspend the NAND flash
4319  * @mtd: MTD device structure
4320  *
4321  * Returns 0 for success or negative error code otherwise.
4322  */
4323 static int nand_suspend(struct mtd_info *mtd)
4324 {
4325         struct nand_chip *chip = mtd_to_nand(mtd);
4326         int ret = 0;
4327
4328         mutex_lock(&chip->lock);
4329         if (chip->ops.suspend)
4330                 ret = chip->ops.suspend(chip);
4331         if (!ret)
4332                 chip->suspended = 1;
4333         mutex_unlock(&chip->lock);
4334
4335         return ret;
4336 }
4337
4338 /**
4339  * nand_resume - [MTD Interface] Resume the NAND flash
4340  * @mtd: MTD device structure
4341  */
4342 static void nand_resume(struct mtd_info *mtd)
4343 {
4344         struct nand_chip *chip = mtd_to_nand(mtd);
4345
4346         mutex_lock(&chip->lock);
4347         if (chip->suspended) {
4348                 if (chip->ops.resume)
4349                         chip->ops.resume(chip);
4350                 chip->suspended = 0;
4351         } else {
4352                 pr_err("%s called for a chip which is not in suspended state\n",
4353                         __func__);
4354         }
4355         mutex_unlock(&chip->lock);
4356
4357         wake_up_all(&chip->resume_wq);
4358 }
4359
4360 /**
4361  * nand_shutdown - [MTD Interface] Finish the current NAND operation and
4362  *                 prevent further operations
4363  * @mtd: MTD device structure
4364  */
4365 static void nand_shutdown(struct mtd_info *mtd)
4366 {
4367         nand_suspend(mtd);
4368 }
4369
4370 /**
4371  * nand_lock - [MTD Interface] Lock the NAND flash
4372  * @mtd: MTD device structure
4373  * @ofs: offset byte address
4374  * @len: number of bytes to lock (must be a multiple of block/page size)
4375  */
4376 static int nand_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
4377 {
4378         struct nand_chip *chip = mtd_to_nand(mtd);
4379
4380         if (!chip->ops.lock_area)
4381                 return -ENOTSUPP;
4382
4383         return chip->ops.lock_area(chip, ofs, len);
4384 }
4385
4386 /**
4387  * nand_unlock - [MTD Interface] Unlock the NAND flash
4388  * @mtd: MTD device structure
4389  * @ofs: offset byte address
4390  * @len: number of bytes to unlock (must be a multiple of block/page size)
4391  */
4392 static int nand_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
4393 {
4394         struct nand_chip *chip = mtd_to_nand(mtd);
4395
4396         if (!chip->ops.unlock_area)
4397                 return -ENOTSUPP;
4398
4399         return chip->ops.unlock_area(chip, ofs, len);
4400 }
4401
4402 /* Set default functions */
4403 static void nand_set_defaults(struct nand_chip *chip)
4404 {
4405         /* If no controller is provided, use the dummy, legacy one. */
4406         if (!chip->controller) {
4407                 chip->controller = &chip->legacy.dummy_controller;
4408                 nand_controller_init(chip->controller);
4409         }
4410
4411         nand_legacy_set_defaults(chip);
4412
4413         if (!chip->buf_align)
4414                 chip->buf_align = 1;
4415 }
4416
4417 /* Sanitize ONFI strings so we can safely print them */
4418 void sanitize_string(uint8_t *s, size_t len)
4419 {
4420         ssize_t i;
4421
4422         /* Null terminate */
4423         s[len - 1] = 0;
4424
4425         /* Remove non printable chars */
4426         for (i = 0; i < len - 1; i++) {
4427                 if (s[i] < ' ' || s[i] > 127)
4428                         s[i] = '?';
4429         }
4430
4431         /* Remove trailing spaces */
4432         strim(s);
4433 }
4434
4435 /*
4436  * nand_id_has_period - Check if an ID string has a given wraparound period
4437  * @id_data: the ID string
4438  * @arrlen: the length of the @id_data array
4439  * @period: the period of repitition
4440  *
4441  * Check if an ID string is repeated within a given sequence of bytes at
4442  * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
4443  * period of 3). This is a helper function for nand_id_len(). Returns non-zero
4444  * if the repetition has a period of @period; otherwise, returns zero.
4445  */
4446 static int nand_id_has_period(u8 *id_data, int arrlen, int period)
4447 {
4448         int i, j;
4449         for (i = 0; i < period; i++)
4450                 for (j = i + period; j < arrlen; j += period)
4451                         if (id_data[i] != id_data[j])
4452                                 return 0;
4453         return 1;
4454 }
4455
4456 /*
4457  * nand_id_len - Get the length of an ID string returned by CMD_READID
4458  * @id_data: the ID string
4459  * @arrlen: the length of the @id_data array
4460
4461  * Returns the length of the ID string, according to known wraparound/trailing
4462  * zero patterns. If no pattern exists, returns the length of the array.
4463  */
4464 static int nand_id_len(u8 *id_data, int arrlen)
4465 {
4466         int last_nonzero, period;
4467
4468         /* Find last non-zero byte */
4469         for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
4470                 if (id_data[last_nonzero])
4471                         break;
4472
4473         /* All zeros */
4474         if (last_nonzero < 0)
4475                 return 0;
4476
4477         /* Calculate wraparound period */
4478         for (period = 1; period < arrlen; period++)
4479                 if (nand_id_has_period(id_data, arrlen, period))
4480                         break;
4481
4482         /* There's a repeated pattern */
4483         if (period < arrlen)
4484                 return period;
4485
4486         /* There are trailing zeros */
4487         if (last_nonzero < arrlen - 1)
4488                 return last_nonzero + 1;
4489
4490         /* No pattern detected */
4491         return arrlen;
4492 }
4493
4494 /* Extract the bits of per cell from the 3rd byte of the extended ID */
4495 static int nand_get_bits_per_cell(u8 cellinfo)
4496 {
4497         int bits;
4498
4499         bits = cellinfo & NAND_CI_CELLTYPE_MSK;
4500         bits >>= NAND_CI_CELLTYPE_SHIFT;
4501         return bits + 1;
4502 }
4503
4504 /*
4505  * Many new NAND share similar device ID codes, which represent the size of the
4506  * chip. The rest of the parameters must be decoded according to generic or
4507  * manufacturer-specific "extended ID" decoding patterns.
4508  */
4509 void nand_decode_ext_id(struct nand_chip *chip)
4510 {
4511         struct nand_memory_organization *memorg;
4512         struct mtd_info *mtd = nand_to_mtd(chip);
4513         int extid;
4514         u8 *id_data = chip->id.data;
4515
4516         memorg = nanddev_get_memorg(&chip->base);
4517
4518         /* The 3rd id byte holds MLC / multichip data */
4519         memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4520         /* The 4th id byte is the important one */
4521         extid = id_data[3];
4522
4523         /* Calc pagesize */
4524         memorg->pagesize = 1024 << (extid & 0x03);
4525         mtd->writesize = memorg->pagesize;
4526         extid >>= 2;
4527         /* Calc oobsize */
4528         memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
4529         mtd->oobsize = memorg->oobsize;
4530         extid >>= 2;
4531         /* Calc blocksize. Blocksize is multiples of 64KiB */
4532         memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) /
4533                                        memorg->pagesize;
4534         mtd->erasesize = (64 * 1024) << (extid & 0x03);
4535         extid >>= 2;
4536         /* Get buswidth information */
4537         if (extid & 0x1)
4538                 chip->options |= NAND_BUSWIDTH_16;
4539 }
4540 EXPORT_SYMBOL_GPL(nand_decode_ext_id);
4541
4542 /*
4543  * Old devices have chip data hardcoded in the device ID table. nand_decode_id
4544  * decodes a matching ID table entry and assigns the MTD size parameters for
4545  * the chip.
4546  */
4547 static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
4548 {
4549         struct mtd_info *mtd = nand_to_mtd(chip);
4550         struct nand_memory_organization *memorg;
4551
4552         memorg = nanddev_get_memorg(&chip->base);
4553
4554         memorg->pages_per_eraseblock = type->erasesize / type->pagesize;
4555         mtd->erasesize = type->erasesize;
4556         memorg->pagesize = type->pagesize;
4557         mtd->writesize = memorg->pagesize;
4558         memorg->oobsize = memorg->pagesize / 32;
4559         mtd->oobsize = memorg->oobsize;
4560
4561         /* All legacy ID NAND are small-page, SLC */
4562         memorg->bits_per_cell = 1;
4563 }
4564
4565 /*
4566  * Set the bad block marker/indicator (BBM/BBI) patterns according to some
4567  * heuristic patterns using various detected parameters (e.g., manufacturer,
4568  * page size, cell-type information).
4569  */
4570 static void nand_decode_bbm_options(struct nand_chip *chip)
4571 {
4572         struct mtd_info *mtd = nand_to_mtd(chip);
4573
4574         /* Set the bad block position */
4575         if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
4576                 chip->badblockpos = NAND_BBM_POS_LARGE;
4577         else
4578                 chip->badblockpos = NAND_BBM_POS_SMALL;
4579 }
4580
4581 static inline bool is_full_id_nand(struct nand_flash_dev *type)
4582 {
4583         return type->id_len;
4584 }
4585
4586 static bool find_full_id_nand(struct nand_chip *chip,
4587                               struct nand_flash_dev *type)
4588 {
4589         struct nand_device *base = &chip->base;
4590         struct nand_ecc_props requirements;
4591         struct mtd_info *mtd = nand_to_mtd(chip);
4592         struct nand_memory_organization *memorg;
4593         u8 *id_data = chip->id.data;
4594
4595         memorg = nanddev_get_memorg(&chip->base);
4596
4597         if (!strncmp(type->id, id_data, type->id_len)) {
4598                 memorg->pagesize = type->pagesize;
4599                 mtd->writesize = memorg->pagesize;
4600                 memorg->pages_per_eraseblock = type->erasesize /
4601                                                type->pagesize;
4602                 mtd->erasesize = type->erasesize;
4603                 memorg->oobsize = type->oobsize;
4604                 mtd->oobsize = memorg->oobsize;
4605
4606                 memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
4607                 memorg->eraseblocks_per_lun =
4608                         DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
4609                                            memorg->pagesize *
4610                                            memorg->pages_per_eraseblock);
4611                 chip->options |= type->options;
4612                 requirements.strength = NAND_ECC_STRENGTH(type);
4613                 requirements.step_size = NAND_ECC_STEP(type);
4614                 nanddev_set_ecc_requirements(base, &requirements);
4615
4616                 chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4617                 if (!chip->parameters.model)
4618                         return false;
4619
4620                 return true;
4621         }
4622         return false;
4623 }
4624
4625 /*
4626  * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
4627  * compliant and does not have a full-id or legacy-id entry in the nand_ids
4628  * table.
4629  */
4630 static void nand_manufacturer_detect(struct nand_chip *chip)
4631 {
4632         /*
4633          * Try manufacturer detection if available and use
4634          * nand_decode_ext_id() otherwise.
4635          */
4636         if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4637             chip->manufacturer.desc->ops->detect) {
4638                 struct nand_memory_organization *memorg;
4639
4640                 memorg = nanddev_get_memorg(&chip->base);
4641
4642                 /* The 3rd id byte holds MLC / multichip data */
4643                 memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
4644                 chip->manufacturer.desc->ops->detect(chip);
4645         } else {
4646                 nand_decode_ext_id(chip);
4647         }
4648 }
4649
4650 /*
4651  * Manufacturer initialization. This function is called for all NANDs including
4652  * ONFI and JEDEC compliant ones.
4653  * Manufacturer drivers should put all their specific initialization code in
4654  * their ->init() hook.
4655  */
4656 static int nand_manufacturer_init(struct nand_chip *chip)
4657 {
4658         if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
4659             !chip->manufacturer.desc->ops->init)
4660                 return 0;
4661
4662         return chip->manufacturer.desc->ops->init(chip);
4663 }
4664
4665 /*
4666  * Manufacturer cleanup. This function is called for all NANDs including
4667  * ONFI and JEDEC compliant ones.
4668  * Manufacturer drivers should put all their specific cleanup code in their
4669  * ->cleanup() hook.
4670  */
4671 static void nand_manufacturer_cleanup(struct nand_chip *chip)
4672 {
4673         /* Release manufacturer private data */
4674         if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
4675             chip->manufacturer.desc->ops->cleanup)
4676                 chip->manufacturer.desc->ops->cleanup(chip);
4677 }
4678
4679 static const char *
4680 nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc)
4681 {
4682         return manufacturer_desc ? manufacturer_desc->name : "Unknown";
4683 }
4684
4685 /*
4686  * Get the flash and manufacturer id and lookup if the type is supported.
4687  */
4688 static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
4689 {
4690         const struct nand_manufacturer_desc *manufacturer_desc;
4691         struct mtd_info *mtd = nand_to_mtd(chip);
4692         struct nand_memory_organization *memorg;
4693         int busw, ret;
4694         u8 *id_data = chip->id.data;
4695         u8 maf_id, dev_id;
4696         u64 targetsize;
4697
4698         /*
4699          * Let's start by initializing memorg fields that might be left
4700          * unassigned by the ID-based detection logic.
4701          */
4702         memorg = nanddev_get_memorg(&chip->base);
4703         memorg->planes_per_lun = 1;
4704         memorg->luns_per_target = 1;
4705
4706         /*
4707          * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
4708          * after power-up.
4709          */
4710         ret = nand_reset(chip, 0);
4711         if (ret)
4712                 return ret;
4713
4714         /* Select the device */
4715         nand_select_target(chip, 0);
4716
4717         /* Send the command for reading device ID */
4718         ret = nand_readid_op(chip, 0, id_data, 2);
4719         if (ret)
4720                 return ret;
4721
4722         /* Read manufacturer and device IDs */
4723         maf_id = id_data[0];
4724         dev_id = id_data[1];
4725
4726         /*
4727          * Try again to make sure, as some systems the bus-hold or other
4728          * interface concerns can cause random data which looks like a
4729          * possibly credible NAND flash to appear. If the two results do
4730          * not match, ignore the device completely.
4731          */
4732
4733         /* Read entire ID string */
4734         ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
4735         if (ret)
4736                 return ret;
4737
4738         if (id_data[0] != maf_id || id_data[1] != dev_id) {
4739                 pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
4740                         maf_id, dev_id, id_data[0], id_data[1]);
4741                 return -ENODEV;
4742         }
4743
4744         chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
4745
4746         /* Try to identify manufacturer */
4747         manufacturer_desc = nand_get_manufacturer_desc(maf_id);
4748         chip->manufacturer.desc = manufacturer_desc;
4749
4750         if (!type)
4751                 type = nand_flash_ids;
4752
4753         /*
4754          * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
4755          * override it.
4756          * This is required to make sure initial NAND bus width set by the
4757          * NAND controller driver is coherent with the real NAND bus width
4758          * (extracted by auto-detection code).
4759          */
4760         busw = chip->options & NAND_BUSWIDTH_16;
4761
4762         /*
4763          * The flag is only set (never cleared), reset it to its default value
4764          * before starting auto-detection.
4765          */
4766         chip->options &= ~NAND_BUSWIDTH_16;
4767
4768         for (; type->name != NULL; type++) {
4769                 if (is_full_id_nand(type)) {
4770                         if (find_full_id_nand(chip, type))
4771                                 goto ident_done;
4772                 } else if (dev_id == type->dev_id) {
4773                         break;
4774                 }
4775         }
4776
4777         if (!type->name || !type->pagesize) {
4778                 /* Check if the chip is ONFI compliant */
4779                 ret = nand_onfi_detect(chip);
4780                 if (ret < 0)
4781                         return ret;
4782                 else if (ret)
4783                         goto ident_done;
4784
4785                 /* Check if the chip is JEDEC compliant */
4786                 ret = nand_jedec_detect(chip);
4787                 if (ret < 0)
4788                         return ret;
4789                 else if (ret)
4790                         goto ident_done;
4791         }
4792
4793         if (!type->name)
4794                 return -ENODEV;
4795
4796         chip->parameters.model = kstrdup(type->name, GFP_KERNEL);
4797         if (!chip->parameters.model)
4798                 return -ENOMEM;
4799
4800         if (!type->pagesize)
4801                 nand_manufacturer_detect(chip);
4802         else
4803                 nand_decode_id(chip, type);
4804
4805         /* Get chip options */
4806         chip->options |= type->options;
4807
4808         memorg->eraseblocks_per_lun =
4809                         DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
4810                                            memorg->pagesize *
4811                                            memorg->pages_per_eraseblock);
4812
4813 ident_done:
4814         if (!mtd->name)
4815                 mtd->name = chip->parameters.model;
4816
4817         if (chip->options & NAND_BUSWIDTH_AUTO) {
4818                 WARN_ON(busw & NAND_BUSWIDTH_16);
4819                 nand_set_defaults(chip);
4820         } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
4821                 /*
4822                  * Check, if buswidth is correct. Hardware drivers should set
4823                  * chip correct!
4824                  */
4825                 pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4826                         maf_id, dev_id);
4827                 pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
4828                         mtd->name);
4829                 pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
4830                         (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
4831                 ret = -EINVAL;
4832
4833                 goto free_detect_allocation;
4834         }
4835
4836         nand_decode_bbm_options(chip);
4837
4838         /* Calculate the address shift from the page size */
4839         chip->page_shift = ffs(mtd->writesize) - 1;
4840         /* Convert chipsize to number of pages per chip -1 */
4841         targetsize = nanddev_target_size(&chip->base);
4842         chip->pagemask = (targetsize >> chip->page_shift) - 1;
4843
4844         chip->bbt_erase_shift = chip->phys_erase_shift =
4845                 ffs(mtd->erasesize) - 1;
4846         if (targetsize & 0xffffffff)
4847                 chip->chip_shift = ffs((unsigned)targetsize) - 1;
4848         else {
4849                 chip->chip_shift = ffs((unsigned)(targetsize >> 32));
4850                 chip->chip_shift += 32 - 1;
4851         }
4852
4853         if (chip->chip_shift - chip->page_shift > 16)
4854                 chip->options |= NAND_ROW_ADDR_3;
4855
4856         chip->badblockbits = 8;
4857
4858         nand_legacy_adjust_cmdfunc(chip);
4859
4860         pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
4861                 maf_id, dev_id);
4862         pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
4863                 chip->parameters.model);
4864         pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
4865                 (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
4866                 mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
4867         return 0;
4868
4869 free_detect_allocation:
4870         kfree(chip->parameters.model);
4871
4872         return ret;
4873 }
4874
4875 static enum nand_ecc_engine_type
4876 of_get_rawnand_ecc_engine_type_legacy(struct device_node *np)
4877 {
4878         enum nand_ecc_legacy_mode {
4879                 NAND_ECC_INVALID,
4880                 NAND_ECC_NONE,
4881                 NAND_ECC_SOFT,
4882                 NAND_ECC_SOFT_BCH,
4883                 NAND_ECC_HW,
4884                 NAND_ECC_HW_SYNDROME,
4885                 NAND_ECC_ON_DIE,
4886         };
4887         const char * const nand_ecc_legacy_modes[] = {
4888                 [NAND_ECC_NONE]         = "none",
4889                 [NAND_ECC_SOFT]         = "soft",
4890                 [NAND_ECC_SOFT_BCH]     = "soft_bch",
4891                 [NAND_ECC_HW]           = "hw",
4892                 [NAND_ECC_HW_SYNDROME]  = "hw_syndrome",
4893                 [NAND_ECC_ON_DIE]       = "on-die",
4894         };
4895         enum nand_ecc_legacy_mode eng_type;
4896         const char *pm;
4897         int err;
4898
4899         err = of_property_read_string(np, "nand-ecc-mode", &pm);
4900         if (err)
4901                 return NAND_ECC_ENGINE_TYPE_INVALID;
4902
4903         for (eng_type = NAND_ECC_NONE;
4904              eng_type < ARRAY_SIZE(nand_ecc_legacy_modes); eng_type++) {
4905                 if (!strcasecmp(pm, nand_ecc_legacy_modes[eng_type])) {
4906                         switch (eng_type) {
4907                         case NAND_ECC_NONE:
4908                                 return NAND_ECC_ENGINE_TYPE_NONE;
4909                         case NAND_ECC_SOFT:
4910                         case NAND_ECC_SOFT_BCH:
4911                                 return NAND_ECC_ENGINE_TYPE_SOFT;
4912                         case NAND_ECC_HW:
4913                         case NAND_ECC_HW_SYNDROME:
4914                                 return NAND_ECC_ENGINE_TYPE_ON_HOST;
4915                         case NAND_ECC_ON_DIE:
4916                                 return NAND_ECC_ENGINE_TYPE_ON_DIE;
4917                         default:
4918                                 break;
4919                         }
4920                 }
4921         }
4922
4923         return NAND_ECC_ENGINE_TYPE_INVALID;
4924 }
4925
4926 static enum nand_ecc_placement
4927 of_get_rawnand_ecc_placement_legacy(struct device_node *np)
4928 {
4929         const char *pm;
4930         int err;
4931
4932         err = of_property_read_string(np, "nand-ecc-mode", &pm);
4933         if (!err) {
4934                 if (!strcasecmp(pm, "hw_syndrome"))
4935                         return NAND_ECC_PLACEMENT_INTERLEAVED;
4936         }
4937
4938         return NAND_ECC_PLACEMENT_UNKNOWN;
4939 }
4940
4941 static enum nand_ecc_algo of_get_rawnand_ecc_algo_legacy(struct device_node *np)
4942 {
4943         const char *pm;
4944         int err;
4945
4946         err = of_property_read_string(np, "nand-ecc-mode", &pm);
4947         if (!err) {
4948                 if (!strcasecmp(pm, "soft"))
4949                         return NAND_ECC_ALGO_HAMMING;
4950                 else if (!strcasecmp(pm, "soft_bch"))
4951                         return NAND_ECC_ALGO_BCH;
4952         }
4953
4954         return NAND_ECC_ALGO_UNKNOWN;
4955 }
4956
4957 static void of_get_nand_ecc_legacy_user_config(struct nand_chip *chip)
4958 {
4959         struct device_node *dn = nand_get_flash_node(chip);
4960         struct nand_ecc_props *user_conf = &chip->base.ecc.user_conf;
4961
4962         if (user_conf->engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
4963                 user_conf->engine_type = of_get_rawnand_ecc_engine_type_legacy(dn);
4964
4965         if (user_conf->algo == NAND_ECC_ALGO_UNKNOWN)
4966                 user_conf->algo = of_get_rawnand_ecc_algo_legacy(dn);
4967
4968         if (user_conf->placement == NAND_ECC_PLACEMENT_UNKNOWN)
4969                 user_conf->placement = of_get_rawnand_ecc_placement_legacy(dn);
4970 }
4971
4972 static int of_get_nand_bus_width(struct device_node *np)
4973 {
4974         u32 val;
4975
4976         if (of_property_read_u32(np, "nand-bus-width", &val))
4977                 return 8;
4978
4979         switch (val) {
4980         case 8:
4981         case 16:
4982                 return val;
4983         default:
4984                 return -EIO;
4985         }
4986 }
4987
4988 static bool of_get_nand_on_flash_bbt(struct device_node *np)
4989 {
4990         return of_property_read_bool(np, "nand-on-flash-bbt");
4991 }
4992
4993 static int rawnand_dt_init(struct nand_chip *chip)
4994 {
4995         struct nand_device *nand = mtd_to_nanddev(nand_to_mtd(chip));
4996         struct device_node *dn = nand_get_flash_node(chip);
4997
4998         if (!dn)
4999                 return 0;
5000
5001         if (of_get_nand_bus_width(dn) == 16)
5002                 chip->options |= NAND_BUSWIDTH_16;
5003
5004         if (of_property_read_bool(dn, "nand-is-boot-medium"))
5005                 chip->options |= NAND_IS_BOOT_MEDIUM;
5006
5007         if (of_get_nand_on_flash_bbt(dn))
5008                 chip->bbt_options |= NAND_BBT_USE_FLASH;
5009
5010         of_get_nand_ecc_user_config(nand);
5011         of_get_nand_ecc_legacy_user_config(chip);
5012
5013         /*
5014          * If neither the user nor the NAND controller have requested a specific
5015          * ECC engine type, we will default to NAND_ECC_ENGINE_TYPE_ON_HOST.
5016          */
5017         nand->ecc.defaults.engine_type = NAND_ECC_ENGINE_TYPE_ON_HOST;
5018
5019         /*
5020          * Use the user requested engine type, unless there is none, in this
5021          * case default to the NAND controller choice, otherwise fallback to
5022          * the raw NAND default one.
5023          */
5024         if (nand->ecc.user_conf.engine_type != NAND_ECC_ENGINE_TYPE_INVALID)
5025                 chip->ecc.engine_type = nand->ecc.user_conf.engine_type;
5026         if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_INVALID)
5027                 chip->ecc.engine_type = nand->ecc.defaults.engine_type;
5028
5029         chip->ecc.placement = nand->ecc.user_conf.placement;
5030         chip->ecc.algo = nand->ecc.user_conf.algo;
5031         chip->ecc.strength = nand->ecc.user_conf.strength;
5032         chip->ecc.size = nand->ecc.user_conf.step_size;
5033
5034         return 0;
5035 }
5036
5037 /**
5038  * nand_scan_ident - Scan for the NAND device
5039  * @chip: NAND chip object
5040  * @maxchips: number of chips to scan for
5041  * @table: alternative NAND ID table
5042  *
5043  * This is the first phase of the normal nand_scan() function. It reads the
5044  * flash ID and sets up MTD fields accordingly.
5045  *
5046  * This helper used to be called directly from controller drivers that needed
5047  * to tweak some ECC-related parameters before nand_scan_tail(). This separation
5048  * prevented dynamic allocations during this phase which was unconvenient and
5049  * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
5050  */
5051 static int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
5052                            struct nand_flash_dev *table)
5053 {
5054         struct mtd_info *mtd = nand_to_mtd(chip);
5055         struct nand_memory_organization *memorg;
5056         int nand_maf_id, nand_dev_id;
5057         unsigned int i;
5058         int ret;
5059
5060         memorg = nanddev_get_memorg(&chip->base);
5061
5062         /* Assume all dies are deselected when we enter nand_scan_ident(). */
5063         chip->cur_cs = -1;
5064
5065         mutex_init(&chip->lock);
5066         init_waitqueue_head(&chip->resume_wq);
5067
5068         /* Enforce the right timings for reset/detection */
5069         chip->current_interface_config = nand_get_reset_interface_config();
5070
5071         ret = rawnand_dt_init(chip);
5072         if (ret)
5073                 return ret;
5074
5075         if (!mtd->name && mtd->dev.parent)
5076                 mtd->name = dev_name(mtd->dev.parent);
5077
5078         /* Set the default functions */
5079         nand_set_defaults(chip);
5080
5081         ret = nand_legacy_check_hooks(chip);
5082         if (ret)
5083                 return ret;
5084
5085         memorg->ntargets = maxchips;
5086
5087         /* Read the flash type */
5088         ret = nand_detect(chip, table);
5089         if (ret) {
5090                 if (!(chip->options & NAND_SCAN_SILENT_NODEV))
5091                         pr_warn("No NAND device found\n");
5092                 nand_deselect_target(chip);
5093                 return ret;
5094         }
5095
5096         nand_maf_id = chip->id.data[0];
5097         nand_dev_id = chip->id.data[1];
5098
5099         nand_deselect_target(chip);
5100
5101         /* Check for a chip array */
5102         for (i = 1; i < maxchips; i++) {
5103                 u8 id[2];
5104
5105                 /* See comment in nand_get_flash_type for reset */
5106                 ret = nand_reset(chip, i);
5107                 if (ret)
5108                         break;
5109
5110                 nand_select_target(chip, i);
5111                 /* Send the command for reading device ID */
5112                 ret = nand_readid_op(chip, 0, id, sizeof(id));
5113                 if (ret)
5114                         break;
5115                 /* Read manufacturer and device IDs */
5116                 if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
5117                         nand_deselect_target(chip);
5118                         break;
5119                 }
5120                 nand_deselect_target(chip);
5121         }
5122         if (i > 1)
5123                 pr_info("%d chips detected\n", i);
5124
5125         /* Store the number of chips and calc total size for mtd */
5126         memorg->ntargets = i;
5127         mtd->size = i * nanddev_target_size(&chip->base);
5128
5129         return 0;
5130 }
5131
5132 static void nand_scan_ident_cleanup(struct nand_chip *chip)
5133 {
5134         kfree(chip->parameters.model);
5135         kfree(chip->parameters.onfi);
5136 }
5137
5138 static int nand_set_ecc_on_host_ops(struct nand_chip *chip)
5139 {
5140         struct nand_ecc_ctrl *ecc = &chip->ecc;
5141
5142         switch (ecc->placement) {
5143         case NAND_ECC_PLACEMENT_UNKNOWN:
5144         case NAND_ECC_PLACEMENT_OOB:
5145                 /* Use standard hwecc read page function? */
5146                 if (!ecc->read_page)
5147                         ecc->read_page = nand_read_page_hwecc;
5148                 if (!ecc->write_page)
5149                         ecc->write_page = nand_write_page_hwecc;
5150                 if (!ecc->read_page_raw)
5151                         ecc->read_page_raw = nand_read_page_raw;
5152                 if (!ecc->write_page_raw)
5153                         ecc->write_page_raw = nand_write_page_raw;
5154                 if (!ecc->read_oob)
5155                         ecc->read_oob = nand_read_oob_std;
5156                 if (!ecc->write_oob)
5157                         ecc->write_oob = nand_write_oob_std;
5158                 if (!ecc->read_subpage)
5159                         ecc->read_subpage = nand_read_subpage;
5160                 if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
5161                         ecc->write_subpage = nand_write_subpage_hwecc;
5162                 fallthrough;
5163
5164         case NAND_ECC_PLACEMENT_INTERLEAVED:
5165                 if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
5166                     (!ecc->read_page ||
5167                      ecc->read_page == nand_read_page_hwecc ||
5168                      !ecc->write_page ||
5169                      ecc->write_page == nand_write_page_hwecc)) {
5170                         WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
5171                         return -EINVAL;
5172                 }
5173                 /* Use standard syndrome read/write page function? */
5174                 if (!ecc->read_page)
5175                         ecc->read_page = nand_read_page_syndrome;
5176                 if (!ecc->write_page)
5177                         ecc->write_page = nand_write_page_syndrome;
5178                 if (!ecc->read_page_raw)
5179                         ecc->read_page_raw = nand_read_page_raw_syndrome;
5180                 if (!ecc->write_page_raw)
5181                         ecc->write_page_raw = nand_write_page_raw_syndrome;
5182                 if (!ecc->read_oob)
5183                         ecc->read_oob = nand_read_oob_syndrome;
5184                 if (!ecc->write_oob)
5185                         ecc->write_oob = nand_write_oob_syndrome;
5186                 break;
5187
5188         default:
5189                 pr_warn("Invalid NAND_ECC_PLACEMENT %d\n",
5190                         ecc->placement);
5191                 return -EINVAL;
5192         }
5193
5194         return 0;
5195 }
5196
5197 static int nand_set_ecc_soft_ops(struct nand_chip *chip)
5198 {
5199         struct mtd_info *mtd = nand_to_mtd(chip);
5200         struct nand_device *nanddev = mtd_to_nanddev(mtd);
5201         struct nand_ecc_ctrl *ecc = &chip->ecc;
5202
5203         if (WARN_ON(ecc->engine_type != NAND_ECC_ENGINE_TYPE_SOFT))
5204                 return -EINVAL;
5205
5206         switch (ecc->algo) {
5207         case NAND_ECC_ALGO_HAMMING:
5208                 ecc->calculate = nand_calculate_ecc;
5209                 ecc->correct = nand_correct_data;
5210                 ecc->read_page = nand_read_page_swecc;
5211                 ecc->read_subpage = nand_read_subpage;
5212                 ecc->write_page = nand_write_page_swecc;
5213                 if (!ecc->read_page_raw)
5214                         ecc->read_page_raw = nand_read_page_raw;
5215                 if (!ecc->write_page_raw)
5216                         ecc->write_page_raw = nand_write_page_raw;
5217                 ecc->read_oob = nand_read_oob_std;
5218                 ecc->write_oob = nand_write_oob_std;
5219                 if (!ecc->size)
5220                         ecc->size = 256;
5221                 ecc->bytes = 3;
5222                 ecc->strength = 1;
5223
5224                 if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC))
5225                         ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
5226
5227                 return 0;
5228         case NAND_ECC_ALGO_BCH:
5229                 if (!mtd_nand_has_bch()) {
5230                         WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
5231                         return -EINVAL;
5232                 }
5233                 ecc->calculate = nand_bch_calculate_ecc;
5234                 ecc->correct = nand_bch_correct_data;
5235                 ecc->read_page = nand_read_page_swecc;
5236                 ecc->read_subpage = nand_read_subpage;
5237                 ecc->write_page = nand_write_page_swecc;
5238                 if (!ecc->read_page_raw)
5239                         ecc->read_page_raw = nand_read_page_raw;
5240                 if (!ecc->write_page_raw)
5241                         ecc->write_page_raw = nand_write_page_raw;
5242                 ecc->read_oob = nand_read_oob_std;
5243                 ecc->write_oob = nand_write_oob_std;
5244
5245                 /*
5246                 * Board driver should supply ecc.size and ecc.strength
5247                 * values to select how many bits are correctable.
5248                 * Otherwise, default to 4 bits for large page devices.
5249                 */
5250                 if (!ecc->size && (mtd->oobsize >= 64)) {
5251                         ecc->size = 512;
5252                         ecc->strength = 4;
5253                 }
5254
5255                 /*
5256                  * if no ecc placement scheme was provided pickup the default
5257                  * large page one.
5258                  */
5259                 if (!mtd->ooblayout) {
5260                         /* handle large page devices only */
5261                         if (mtd->oobsize < 64) {
5262                                 WARN(1, "OOB layout is required when using software BCH on small pages\n");
5263                                 return -EINVAL;
5264                         }
5265
5266                         mtd_set_ooblayout(mtd, nand_get_large_page_ooblayout());
5267
5268                 }
5269
5270                 /*
5271                  * We can only maximize ECC config when the default layout is
5272                  * used, otherwise we don't know how many bytes can really be
5273                  * used.
5274                  */
5275                 if (mtd->ooblayout == nand_get_large_page_ooblayout() &&
5276                     nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH) {
5277                         int steps, bytes;
5278
5279                         /* Always prefer 1k blocks over 512bytes ones */
5280                         ecc->size = 1024;
5281                         steps = mtd->writesize / ecc->size;
5282
5283                         /* Reserve 2 bytes for the BBM */
5284                         bytes = (mtd->oobsize - 2) / steps;
5285                         ecc->strength = bytes * 8 / fls(8 * ecc->size);
5286                 }
5287
5288                 /* See nand_bch_init() for details. */
5289                 ecc->bytes = 0;
5290                 ecc->priv = nand_bch_init(mtd);
5291                 if (!ecc->priv) {
5292                         WARN(1, "BCH ECC initialization failed!\n");
5293                         return -EINVAL;
5294                 }
5295                 return 0;
5296         default:
5297                 WARN(1, "Unsupported ECC algorithm!\n");
5298                 return -EINVAL;
5299         }
5300 }
5301
5302 /**
5303  * nand_check_ecc_caps - check the sanity of preset ECC settings
5304  * @chip: nand chip info structure
5305  * @caps: ECC caps info structure
5306  * @oobavail: OOB size that the ECC engine can use
5307  *
5308  * When ECC step size and strength are already set, check if they are supported
5309  * by the controller and the calculated ECC bytes fit within the chip's OOB.
5310  * On success, the calculated ECC bytes is set.
5311  */
5312 static int
5313 nand_check_ecc_caps(struct nand_chip *chip,
5314                     const struct nand_ecc_caps *caps, int oobavail)
5315 {
5316         struct mtd_info *mtd = nand_to_mtd(chip);
5317         const struct nand_ecc_step_info *stepinfo;
5318         int preset_step = chip->ecc.size;
5319         int preset_strength = chip->ecc.strength;
5320         int ecc_bytes, nsteps = mtd->writesize / preset_step;
5321         int i, j;
5322
5323         for (i = 0; i < caps->nstepinfos; i++) {
5324                 stepinfo = &caps->stepinfos[i];
5325
5326                 if (stepinfo->stepsize != preset_step)
5327                         continue;
5328
5329                 for (j = 0; j < stepinfo->nstrengths; j++) {
5330                         if (stepinfo->strengths[j] != preset_strength)
5331                                 continue;
5332
5333                         ecc_bytes = caps->calc_ecc_bytes(preset_step,
5334                                                          preset_strength);
5335                         if (WARN_ON_ONCE(ecc_bytes < 0))
5336                                 return ecc_bytes;
5337
5338                         if (ecc_bytes * nsteps > oobavail) {
5339                                 pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
5340                                        preset_step, preset_strength);
5341                                 return -ENOSPC;
5342                         }
5343
5344                         chip->ecc.bytes = ecc_bytes;
5345
5346                         return 0;
5347                 }
5348         }
5349
5350         pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
5351                preset_step, preset_strength);
5352
5353         return -ENOTSUPP;
5354 }
5355
5356 /**
5357  * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
5358  * @chip: nand chip info structure
5359  * @caps: ECC engine caps info structure
5360  * @oobavail: OOB size that the ECC engine can use
5361  *
5362  * If a chip's ECC requirement is provided, try to meet it with the least
5363  * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
5364  * On success, the chosen ECC settings are set.
5365  */
5366 static int
5367 nand_match_ecc_req(struct nand_chip *chip,
5368                    const struct nand_ecc_caps *caps, int oobavail)
5369 {
5370         const struct nand_ecc_props *requirements =
5371                 nanddev_get_ecc_requirements(&chip->base);
5372         struct mtd_info *mtd = nand_to_mtd(chip);
5373         const struct nand_ecc_step_info *stepinfo;
5374         int req_step = requirements->step_size;
5375         int req_strength = requirements->strength;
5376         int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
5377         int best_step, best_strength, best_ecc_bytes;
5378         int best_ecc_bytes_total = INT_MAX;
5379         int i, j;
5380
5381         /* No information provided by the NAND chip */
5382         if (!req_step || !req_strength)
5383                 return -ENOTSUPP;
5384
5385         /* number of correctable bits the chip requires in a page */
5386         req_corr = mtd->writesize / req_step * req_strength;
5387
5388         for (i = 0; i < caps->nstepinfos; i++) {
5389                 stepinfo = &caps->stepinfos[i];
5390                 step_size = stepinfo->stepsize;
5391
5392                 for (j = 0; j < stepinfo->nstrengths; j++) {
5393                         strength = stepinfo->strengths[j];
5394
5395                         /*
5396                          * If both step size and strength are smaller than the
5397                          * chip's requirement, it is not easy to compare the
5398                          * resulted reliability.
5399                          */
5400                         if (step_size < req_step && strength < req_strength)
5401                                 continue;
5402
5403                         if (mtd->writesize % step_size)
5404                                 continue;
5405
5406                         nsteps = mtd->writesize / step_size;
5407
5408                         ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5409                         if (WARN_ON_ONCE(ecc_bytes < 0))
5410                                 continue;
5411                         ecc_bytes_total = ecc_bytes * nsteps;
5412
5413                         if (ecc_bytes_total > oobavail ||
5414                             strength * nsteps < req_corr)
5415                                 continue;
5416
5417                         /*
5418                          * We assume the best is to meet the chip's requrement
5419                          * with the least number of ECC bytes.
5420                          */
5421                         if (ecc_bytes_total < best_ecc_bytes_total) {
5422                                 best_ecc_bytes_total = ecc_bytes_total;
5423                                 best_step = step_size;
5424                                 best_strength = strength;
5425                                 best_ecc_bytes = ecc_bytes;
5426                         }
5427                 }
5428         }
5429
5430         if (best_ecc_bytes_total == INT_MAX)
5431                 return -ENOTSUPP;
5432
5433         chip->ecc.size = best_step;
5434         chip->ecc.strength = best_strength;
5435         chip->ecc.bytes = best_ecc_bytes;
5436
5437         return 0;
5438 }
5439
5440 /**
5441  * nand_maximize_ecc - choose the max ECC strength available
5442  * @chip: nand chip info structure
5443  * @caps: ECC engine caps info structure
5444  * @oobavail: OOB size that the ECC engine can use
5445  *
5446  * Choose the max ECC strength that is supported on the controller, and can fit
5447  * within the chip's OOB.  On success, the chosen ECC settings are set.
5448  */
5449 static int
5450 nand_maximize_ecc(struct nand_chip *chip,
5451                   const struct nand_ecc_caps *caps, int oobavail)
5452 {
5453         struct mtd_info *mtd = nand_to_mtd(chip);
5454         const struct nand_ecc_step_info *stepinfo;
5455         int step_size, strength, nsteps, ecc_bytes, corr;
5456         int best_corr = 0;
5457         int best_step = 0;
5458         int best_strength, best_ecc_bytes;
5459         int i, j;
5460
5461         for (i = 0; i < caps->nstepinfos; i++) {
5462                 stepinfo = &caps->stepinfos[i];
5463                 step_size = stepinfo->stepsize;
5464
5465                 /* If chip->ecc.size is already set, respect it */
5466                 if (chip->ecc.size && step_size != chip->ecc.size)
5467                         continue;
5468
5469                 for (j = 0; j < stepinfo->nstrengths; j++) {
5470                         strength = stepinfo->strengths[j];
5471
5472                         if (mtd->writesize % step_size)
5473                                 continue;
5474
5475                         nsteps = mtd->writesize / step_size;
5476
5477                         ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
5478                         if (WARN_ON_ONCE(ecc_bytes < 0))
5479                                 continue;
5480
5481                         if (ecc_bytes * nsteps > oobavail)
5482                                 continue;
5483
5484                         corr = strength * nsteps;
5485
5486                         /*
5487                          * If the number of correctable bits is the same,
5488                          * bigger step_size has more reliability.
5489                          */
5490                         if (corr > best_corr ||
5491                             (corr == best_corr && step_size > best_step)) {
5492                                 best_corr = corr;
5493                                 best_step = step_size;
5494                                 best_strength = strength;
5495                                 best_ecc_bytes = ecc_bytes;
5496                         }
5497                 }
5498         }
5499
5500         if (!best_corr)
5501                 return -ENOTSUPP;
5502
5503         chip->ecc.size = best_step;
5504         chip->ecc.strength = best_strength;
5505         chip->ecc.bytes = best_ecc_bytes;
5506
5507         return 0;
5508 }
5509
5510 /**
5511  * nand_ecc_choose_conf - Set the ECC strength and ECC step size
5512  * @chip: nand chip info structure
5513  * @caps: ECC engine caps info structure
5514  * @oobavail: OOB size that the ECC engine can use
5515  *
5516  * Choose the ECC configuration according to following logic.
5517  *
5518  * 1. If both ECC step size and ECC strength are already set (usually by DT)
5519  *    then check if it is supported by this controller.
5520  * 2. If the user provided the nand-ecc-maximize property, then select maximum
5521  *    ECC strength.
5522  * 3. Otherwise, try to match the ECC step size and ECC strength closest
5523  *    to the chip's requirement. If available OOB size can't fit the chip
5524  *    requirement then fallback to the maximum ECC step size and ECC strength.
5525  *
5526  * On success, the chosen ECC settings are set.
5527  */
5528 int nand_ecc_choose_conf(struct nand_chip *chip,
5529                          const struct nand_ecc_caps *caps, int oobavail)
5530 {
5531         struct mtd_info *mtd = nand_to_mtd(chip);
5532         struct nand_device *nanddev = mtd_to_nanddev(mtd);
5533
5534         if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
5535                 return -EINVAL;
5536
5537         if (chip->ecc.size && chip->ecc.strength)
5538                 return nand_check_ecc_caps(chip, caps, oobavail);
5539
5540         if (nanddev->ecc.user_conf.flags & NAND_ECC_MAXIMIZE_STRENGTH)
5541                 return nand_maximize_ecc(chip, caps, oobavail);
5542
5543         if (!nand_match_ecc_req(chip, caps, oobavail))
5544                 return 0;
5545
5546         return nand_maximize_ecc(chip, caps, oobavail);
5547 }
5548 EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
5549
5550 static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos)
5551 {
5552         struct nand_chip *chip = container_of(nand, struct nand_chip,
5553                                               base);
5554         unsigned int eb = nanddev_pos_to_row(nand, pos);
5555         int ret;
5556
5557         eb >>= nand->rowconv.eraseblock_addr_shift;
5558
5559         nand_select_target(chip, pos->target);
5560         ret = nand_erase_op(chip, eb);
5561         nand_deselect_target(chip);
5562
5563         return ret;
5564 }
5565
5566 static int rawnand_markbad(struct nand_device *nand,
5567                            const struct nand_pos *pos)
5568 {
5569         struct nand_chip *chip = container_of(nand, struct nand_chip,
5570                                               base);
5571
5572         return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
5573 }
5574
5575 static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos)
5576 {
5577         struct nand_chip *chip = container_of(nand, struct nand_chip,
5578                                               base);
5579         int ret;
5580
5581         nand_select_target(chip, pos->target);
5582         ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
5583         nand_deselect_target(chip);
5584
5585         return ret;
5586 }
5587
5588 static const struct nand_ops rawnand_ops = {
5589         .erase = rawnand_erase,
5590         .markbad = rawnand_markbad,
5591         .isbad = rawnand_isbad,
5592 };
5593
5594 /**
5595  * nand_scan_tail - Scan for the NAND device
5596  * @chip: NAND chip object
5597  *
5598  * This is the second phase of the normal nand_scan() function. It fills out
5599  * all the uninitialized function pointers with the defaults and scans for a
5600  * bad block table if appropriate.
5601  */
5602 static int nand_scan_tail(struct nand_chip *chip)
5603 {
5604         struct mtd_info *mtd = nand_to_mtd(chip);
5605         struct nand_ecc_ctrl *ecc = &chip->ecc;
5606         int ret, i;
5607
5608         /* New bad blocks should be marked in OOB, flash-based BBT, or both */
5609         if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
5610                    !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
5611                 return -EINVAL;
5612         }
5613
5614         chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
5615         if (!chip->data_buf)
5616                 return -ENOMEM;
5617
5618         /*
5619          * FIXME: some NAND manufacturer drivers expect the first die to be
5620          * selected when manufacturer->init() is called. They should be fixed
5621          * to explictly select the relevant die when interacting with the NAND
5622          * chip.
5623          */
5624         nand_select_target(chip, 0);
5625         ret = nand_manufacturer_init(chip);
5626         nand_deselect_target(chip);
5627         if (ret)
5628                 goto err_free_buf;
5629
5630         /* Set the internal oob buffer location, just after the page data */
5631         chip->oob_poi = chip->data_buf + mtd->writesize;
5632
5633         /*
5634          * If no default placement scheme is given, select an appropriate one.
5635          */
5636         if (!mtd->ooblayout &&
5637             !(ecc->engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
5638               ecc->algo == NAND_ECC_ALGO_BCH)) {
5639                 switch (mtd->oobsize) {
5640                 case 8:
5641                 case 16:
5642                         mtd_set_ooblayout(mtd, nand_get_small_page_ooblayout());
5643                         break;
5644                 case 64:
5645                 case 128:
5646                         mtd_set_ooblayout(mtd,
5647                                           nand_get_large_page_hamming_ooblayout());
5648                         break;
5649                 default:
5650                         /*
5651                          * Expose the whole OOB area to users if ECC_NONE
5652                          * is passed. We could do that for all kind of
5653                          * ->oobsize, but we must keep the old large/small
5654                          * page with ECC layout when ->oobsize <= 128 for
5655                          * compatibility reasons.
5656                          */
5657                         if (ecc->engine_type == NAND_ECC_ENGINE_TYPE_NONE) {
5658                                 mtd_set_ooblayout(mtd,
5659                                                   nand_get_large_page_ooblayout());
5660                                 break;
5661                         }
5662
5663                         WARN(1, "No oob scheme defined for oobsize %d\n",
5664                                 mtd->oobsize);
5665                         ret = -EINVAL;
5666                         goto err_nand_manuf_cleanup;
5667                 }
5668         }
5669
5670         /*
5671          * Check ECC mode, default to software if 3byte/512byte hardware ECC is
5672          * selected and we have 256 byte pagesize fallback to software ECC
5673          */
5674
5675         switch (ecc->engine_type) {
5676         case NAND_ECC_ENGINE_TYPE_ON_HOST:
5677                 ret = nand_set_ecc_on_host_ops(chip);
5678                 if (ret)
5679                         goto err_nand_manuf_cleanup;
5680
5681                 if (mtd->writesize >= ecc->size) {
5682                         if (!ecc->strength) {
5683                                 WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
5684                                 ret = -EINVAL;
5685                                 goto err_nand_manuf_cleanup;
5686                         }
5687                         break;
5688                 }
5689                 pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
5690                         ecc->size, mtd->writesize);
5691                 ecc->engine_type = NAND_ECC_ENGINE_TYPE_SOFT;
5692                 ecc->algo = NAND_ECC_ALGO_HAMMING;
5693                 fallthrough;
5694
5695         case NAND_ECC_ENGINE_TYPE_SOFT:
5696                 ret = nand_set_ecc_soft_ops(chip);
5697                 if (ret)
5698                         goto err_nand_manuf_cleanup;
5699                 break;
5700
5701         case NAND_ECC_ENGINE_TYPE_ON_DIE:
5702                 if (!ecc->read_page || !ecc->write_page) {
5703                         WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
5704                         ret = -EINVAL;
5705                         goto err_nand_manuf_cleanup;
5706                 }
5707                 if (!ecc->read_oob)
5708                         ecc->read_oob = nand_read_oob_std;
5709                 if (!ecc->write_oob)
5710                         ecc->write_oob = nand_write_oob_std;
5711                 break;
5712
5713         case NAND_ECC_ENGINE_TYPE_NONE:
5714                 pr_warn("NAND_ECC_ENGINE_TYPE_NONE selected by board driver. This is not recommended!\n");
5715                 ecc->read_page = nand_read_page_raw;
5716                 ecc->write_page = nand_write_page_raw;
5717                 ecc->read_oob = nand_read_oob_std;
5718                 ecc->read_page_raw = nand_read_page_raw;
5719                 ecc->write_page_raw = nand_write_page_raw;
5720                 ecc->write_oob = nand_write_oob_std;
5721                 ecc->size = mtd->writesize;
5722                 ecc->bytes = 0;
5723                 ecc->strength = 0;
5724                 break;
5725
5726         default:
5727                 WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->engine_type);
5728                 ret = -EINVAL;
5729                 goto err_nand_manuf_cleanup;
5730         }
5731
5732         if (ecc->correct || ecc->calculate) {
5733                 ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5734                 ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
5735                 if (!ecc->calc_buf || !ecc->code_buf) {
5736                         ret = -ENOMEM;
5737                         goto err_nand_manuf_cleanup;
5738                 }
5739         }
5740
5741         /* For many systems, the standard OOB write also works for raw */
5742         if (!ecc->read_oob_raw)
5743                 ecc->read_oob_raw = ecc->read_oob;
5744         if (!ecc->write_oob_raw)
5745                 ecc->write_oob_raw = ecc->write_oob;
5746
5747         /* propagate ecc info to mtd_info */
5748         mtd->ecc_strength = ecc->strength;
5749         mtd->ecc_step_size = ecc->size;
5750
5751         /*
5752          * Set the number of read / write steps for one page depending on ECC
5753          * mode.
5754          */
5755         ecc->steps = mtd->writesize / ecc->size;
5756         if (ecc->steps * ecc->size != mtd->writesize) {
5757                 WARN(1, "Invalid ECC parameters\n");
5758                 ret = -EINVAL;
5759                 goto err_nand_manuf_cleanup;
5760         }
5761
5762         ecc->total = ecc->steps * ecc->bytes;
5763         chip->base.ecc.ctx.total = ecc->total;
5764
5765         if (ecc->total > mtd->oobsize) {
5766                 WARN(1, "Total number of ECC bytes exceeded oobsize\n");
5767                 ret = -EINVAL;
5768                 goto err_nand_manuf_cleanup;
5769         }
5770
5771         /*
5772          * The number of bytes available for a client to place data into
5773          * the out of band area.
5774          */
5775         ret = mtd_ooblayout_count_freebytes(mtd);
5776         if (ret < 0)
5777                 ret = 0;
5778
5779         mtd->oobavail = ret;
5780
5781         /* ECC sanity check: warn if it's too weak */
5782         if (!nand_ecc_is_strong_enough(&chip->base))
5783                 pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n",
5784                         mtd->name, chip->ecc.strength, chip->ecc.size,
5785                         nanddev_get_ecc_requirements(&chip->base)->strength,
5786                         nanddev_get_ecc_requirements(&chip->base)->step_size);
5787
5788         /* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
5789         if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
5790                 switch (ecc->steps) {
5791                 case 2:
5792                         mtd->subpage_sft = 1;
5793                         break;
5794                 case 4:
5795                 case 8:
5796                 case 16:
5797                         mtd->subpage_sft = 2;
5798                         break;
5799                 }
5800         }
5801         chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
5802
5803         /* Invalidate the pagebuffer reference */
5804         chip->pagecache.page = -1;
5805
5806         /* Large page NAND with SOFT_ECC should support subpage reads */
5807         switch (ecc->engine_type) {
5808         case NAND_ECC_ENGINE_TYPE_SOFT:
5809                 if (chip->page_shift > 9)
5810                         chip->options |= NAND_SUBPAGE_READ;
5811                 break;
5812
5813         default:
5814                 break;
5815         }
5816
5817         ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner);
5818         if (ret)
5819                 goto err_nand_manuf_cleanup;
5820
5821         /* Adjust the MTD_CAP_ flags when NAND_ROM is set. */
5822         if (chip->options & NAND_ROM)
5823                 mtd->flags = MTD_CAP_ROM;
5824
5825         /* Fill in remaining MTD driver data */
5826         mtd->_erase = nand_erase;
5827         mtd->_point = NULL;
5828         mtd->_unpoint = NULL;
5829         mtd->_panic_write = panic_nand_write;
5830         mtd->_read_oob = nand_read_oob;
5831         mtd->_write_oob = nand_write_oob;
5832         mtd->_sync = nand_sync;
5833         mtd->_lock = nand_lock;
5834         mtd->_unlock = nand_unlock;
5835         mtd->_suspend = nand_suspend;
5836         mtd->_resume = nand_resume;
5837         mtd->_reboot = nand_shutdown;
5838         mtd->_block_isreserved = nand_block_isreserved;
5839         mtd->_block_isbad = nand_block_isbad;
5840         mtd->_block_markbad = nand_block_markbad;
5841         mtd->_max_bad_blocks = nanddev_mtd_max_bad_blocks;
5842
5843         /*
5844          * Initialize bitflip_threshold to its default prior scan_bbt() call.
5845          * scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
5846          * properly set.
5847          */
5848         if (!mtd->bitflip_threshold)
5849                 mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
5850
5851         /* Find the fastest data interface for this chip */
5852         ret = nand_choose_interface_config(chip);
5853         if (ret)
5854                 goto err_nanddev_cleanup;
5855
5856         /* Enter fastest possible mode on all dies. */
5857         for (i = 0; i < nanddev_ntargets(&chip->base); i++) {
5858                 ret = nand_setup_interface(chip, i);
5859                 if (ret)
5860                         goto err_free_interface_config;
5861         }
5862
5863         /* Check, if we should skip the bad block table scan */
5864         if (chip->options & NAND_SKIP_BBTSCAN)
5865                 return 0;
5866
5867         /* Build bad block table */
5868         ret = nand_create_bbt(chip);
5869         if (ret)
5870                 goto err_free_interface_config;
5871
5872         return 0;
5873
5874 err_free_interface_config:
5875         kfree(chip->best_interface_config);
5876
5877 err_nanddev_cleanup:
5878         nanddev_cleanup(&chip->base);
5879
5880 err_nand_manuf_cleanup:
5881         nand_manufacturer_cleanup(chip);
5882
5883 err_free_buf:
5884         kfree(chip->data_buf);
5885         kfree(ecc->code_buf);
5886         kfree(ecc->calc_buf);
5887
5888         return ret;
5889 }
5890
5891 static int nand_attach(struct nand_chip *chip)
5892 {
5893         if (chip->controller->ops && chip->controller->ops->attach_chip)
5894                 return chip->controller->ops->attach_chip(chip);
5895
5896         return 0;
5897 }
5898
5899 static void nand_detach(struct nand_chip *chip)
5900 {
5901         if (chip->controller->ops && chip->controller->ops->detach_chip)
5902                 chip->controller->ops->detach_chip(chip);
5903 }
5904
5905 /**
5906  * nand_scan_with_ids - [NAND Interface] Scan for the NAND device
5907  * @chip: NAND chip object
5908  * @maxchips: number of chips to scan for.
5909  * @ids: optional flash IDs table
5910  *
5911  * This fills out all the uninitialized function pointers with the defaults.
5912  * The flash ID is read and the mtd/chip structures are filled with the
5913  * appropriate values.
5914  */
5915 int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
5916                        struct nand_flash_dev *ids)
5917 {
5918         int ret;
5919
5920         if (!maxchips)
5921                 return -EINVAL;
5922
5923         ret = nand_scan_ident(chip, maxchips, ids);
5924         if (ret)
5925                 return ret;
5926
5927         ret = nand_attach(chip);
5928         if (ret)
5929                 goto cleanup_ident;
5930
5931         ret = nand_scan_tail(chip);
5932         if (ret)
5933                 goto detach_chip;
5934
5935         return 0;
5936
5937 detach_chip:
5938         nand_detach(chip);
5939 cleanup_ident:
5940         nand_scan_ident_cleanup(chip);
5941
5942         return ret;
5943 }
5944 EXPORT_SYMBOL(nand_scan_with_ids);
5945
5946 /**
5947  * nand_cleanup - [NAND Interface] Free resources held by the NAND device
5948  * @chip: NAND chip object
5949  */
5950 void nand_cleanup(struct nand_chip *chip)
5951 {
5952         if (chip->ecc.engine_type == NAND_ECC_ENGINE_TYPE_SOFT &&
5953             chip->ecc.algo == NAND_ECC_ALGO_BCH)
5954                 nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
5955
5956         nanddev_cleanup(&chip->base);
5957
5958         /* Free bad block table memory */
5959         kfree(chip->bbt);
5960         kfree(chip->data_buf);
5961         kfree(chip->ecc.code_buf);
5962         kfree(chip->ecc.calc_buf);
5963
5964         /* Free bad block descriptor memory */
5965         if (chip->badblock_pattern && chip->badblock_pattern->options
5966                         & NAND_BBT_DYNAMICSTRUCT)
5967                 kfree(chip->badblock_pattern);
5968
5969         /* Free the data interface */
5970         kfree(chip->best_interface_config);
5971
5972         /* Free manufacturer priv data. */
5973         nand_manufacturer_cleanup(chip);
5974
5975         /* Free controller specific allocations after chip identification */
5976         nand_detach(chip);
5977
5978         /* Free identification phase allocations */
5979         nand_scan_ident_cleanup(chip);
5980 }
5981
5982 EXPORT_SYMBOL_GPL(nand_cleanup);
5983
5984 MODULE_LICENSE("GPL");
5985 MODULE_AUTHOR("Steven J. Hill <sjhill@realitydiluted.com>");
5986 MODULE_AUTHOR("Thomas Gleixner <tglx@linutronix.de>");
5987 MODULE_DESCRIPTION("Generic NAND flash driver code");