2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
9 * 2_by_8 routines added by Simon Munton
11 * 4_by_16 work by Carolyn J. Smith
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
28 #include <asm/byteorder.h>
30 #include <linux/errno.h>
31 #include <linux/slab.h>
32 #include <linux/delay.h>
33 #include <linux/interrupt.h>
34 #include <linux/reboot.h>
36 #include <linux/of_platform.h>
37 #include <linux/mtd/map.h>
38 #include <linux/mtd/mtd.h>
39 #include <linux/mtd/cfi.h>
40 #include <linux/mtd/xip.h>
42 #define AMD_BOOTLOC_BUG
43 #define FORCE_WORD_WRITE 0
47 #define SST49LF004B 0x0060
48 #define SST49LF040B 0x0050
49 #define SST49LF008A 0x005a
50 #define AT49BV6416 0x00d6
53 * Status Register bit description. Used by flash devices that don't
54 * support DQ polling (e.g. HyperFlash)
56 #define CFI_SR_DRB BIT(7)
57 #define CFI_SR_ESB BIT(5)
58 #define CFI_SR_PSB BIT(4)
59 #define CFI_SR_WBASB BIT(3)
60 #define CFI_SR_SLSB BIT(1)
63 CFI_QUIRK_DQ_TRUE_DATA = BIT(0),
66 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
67 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
69 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
71 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
72 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
73 static void cfi_amdstd_sync (struct mtd_info *);
74 static int cfi_amdstd_suspend (struct mtd_info *);
75 static void cfi_amdstd_resume (struct mtd_info *);
76 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
77 static int cfi_amdstd_get_fact_prot_info(struct mtd_info *, size_t,
78 size_t *, struct otp_info *);
79 static int cfi_amdstd_get_user_prot_info(struct mtd_info *, size_t,
80 size_t *, struct otp_info *);
81 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
82 static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *, loff_t, size_t,
84 static int cfi_amdstd_read_user_prot_reg(struct mtd_info *, loff_t, size_t,
86 static int cfi_amdstd_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
87 size_t *, const u_char *);
88 static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *, loff_t, size_t);
90 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
91 size_t *retlen, const u_char *buf);
93 static void cfi_amdstd_destroy(struct mtd_info *);
95 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
96 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
98 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
99 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
100 #include "fwh_lock.h"
102 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
103 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
105 static int cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
106 static int cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
107 static int cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len);
109 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
110 .probe = NULL, /* Not usable directly */
111 .destroy = cfi_amdstd_destroy,
112 .name = "cfi_cmdset_0002",
113 .module = THIS_MODULE
117 * Use status register to poll for Erase/write completion when DQ is not
118 * supported. This is indicated by Bit[1:0] of SoftwareFeatures field in
119 * CFI Primary Vendor-Specific Extended Query table 1.5
121 static int cfi_use_status_reg(struct cfi_private *cfi)
123 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
124 u8 poll_mask = CFI_POLL_STATUS_REG | CFI_POLL_DQ;
126 return extp && extp->MinorVersion >= '5' &&
127 (extp->SoftwareFeatures & poll_mask) == CFI_POLL_STATUS_REG;
130 static int cfi_check_err_status(struct map_info *map, struct flchip *chip,
133 struct cfi_private *cfi = map->fldrv_priv;
136 if (!cfi_use_status_reg(cfi))
139 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
140 cfi->device_type, NULL);
141 status = map_read(map, adr);
143 /* The error bits are invalid while the chip's busy */
144 if (!map_word_bitsset(map, status, CMD(CFI_SR_DRB)))
147 if (map_word_bitsset(map, status, CMD(0x3a))) {
148 unsigned long chipstatus = MERGESTATUS(status);
150 if (chipstatus & CFI_SR_ESB)
151 pr_err("%s erase operation failed, status %lx\n",
152 map->name, chipstatus);
153 if (chipstatus & CFI_SR_PSB)
154 pr_err("%s program operation failed, status %lx\n",
155 map->name, chipstatus);
156 if (chipstatus & CFI_SR_WBASB)
157 pr_err("%s buffer program command aborted, status %lx\n",
158 map->name, chipstatus);
159 if (chipstatus & CFI_SR_SLSB)
160 pr_err("%s sector write protected, status %lx\n",
161 map->name, chipstatus);
163 /* Erase/Program status bits are set on the operation failure */
164 if (chipstatus & (CFI_SR_ESB | CFI_SR_PSB))
170 /* #define DEBUG_CFI_FEATURES */
173 #ifdef DEBUG_CFI_FEATURES
174 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
176 const char* erase_suspend[3] = {
177 "Not supported", "Read only", "Read/write"
179 const char* top_bottom[6] = {
180 "No WP", "8x8KiB sectors at top & bottom, no WP",
181 "Bottom boot", "Top boot",
182 "Uniform, Bottom WP", "Uniform, Top WP"
185 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
186 printk(" Address sensitive unlock: %s\n",
187 (extp->SiliconRevision & 1) ? "Not required" : "Required");
189 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
190 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
192 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
194 if (extp->BlkProt == 0)
195 printk(" Block protection: Not supported\n");
197 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
200 printk(" Temporary block unprotect: %s\n",
201 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
202 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
203 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
204 printk(" Burst mode: %s\n",
205 extp->BurstMode ? "Supported" : "Not supported");
206 if (extp->PageMode == 0)
207 printk(" Page mode: Not supported\n");
209 printk(" Page mode: %d word page\n", extp->PageMode << 2);
211 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
212 extp->VppMin >> 4, extp->VppMin & 0xf);
213 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
214 extp->VppMax >> 4, extp->VppMax & 0xf);
216 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
217 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
219 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
223 #ifdef AMD_BOOTLOC_BUG
224 /* Wheee. Bring me the head of someone at AMD. */
225 static void fixup_amd_bootblock(struct mtd_info *mtd)
227 struct map_info *map = mtd->priv;
228 struct cfi_private *cfi = map->fldrv_priv;
229 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
230 __u8 major = extp->MajorVersion;
231 __u8 minor = extp->MinorVersion;
233 if (((major << 8) | minor) < 0x3131) {
234 /* CFI version 1.0 => don't trust bootloc */
236 pr_debug("%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
237 map->name, cfi->mfr, cfi->id);
239 /* AFAICS all 29LV400 with a bottom boot block have a device ID
240 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
241 * These were badly detected as they have the 0x80 bit set
242 * so treat them as a special case.
244 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
246 /* Macronix added CFI to their 2nd generation
247 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
248 * Fujitsu, Spansion, EON, ESI and older Macronix)
251 * Therefore also check the manufacturer.
252 * This reduces the risk of false detection due to
253 * the 8-bit device ID.
255 (cfi->mfr == CFI_MFR_MACRONIX)) {
256 pr_debug("%s: Macronix MX29LV400C with bottom boot block"
257 " detected\n", map->name);
258 extp->TopBottom = 2; /* bottom boot */
260 if (cfi->id & 0x80) {
261 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
262 extp->TopBottom = 3; /* top boot */
264 extp->TopBottom = 2; /* bottom boot */
267 pr_debug("%s: AMD CFI PRI V%c.%c has no boot block field;"
268 " deduced %s from Device ID\n", map->name, major, minor,
269 extp->TopBottom == 2 ? "bottom" : "top");
274 #if !FORCE_WORD_WRITE
275 static void fixup_use_write_buffers(struct mtd_info *mtd)
277 struct map_info *map = mtd->priv;
278 struct cfi_private *cfi = map->fldrv_priv;
280 if (cfi->mfr == CFI_MFR_AMD && cfi->id == 0x2201)
283 if (cfi->cfiq->BufWriteTimeoutTyp) {
284 pr_debug("Using buffer write method\n");
285 mtd->_write = cfi_amdstd_write_buffers;
288 #endif /* !FORCE_WORD_WRITE */
290 /* Atmel chips don't use the same PRI format as AMD chips */
291 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
293 struct map_info *map = mtd->priv;
294 struct cfi_private *cfi = map->fldrv_priv;
295 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
296 struct cfi_pri_atmel atmel_pri;
298 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
299 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
301 if (atmel_pri.Features & 0x02)
302 extp->EraseSuspend = 2;
304 /* Some chips got it backwards... */
305 if (cfi->id == AT49BV6416) {
306 if (atmel_pri.BottomBoot)
311 if (atmel_pri.BottomBoot)
317 /* burst write mode not supported */
318 cfi->cfiq->BufWriteTimeoutTyp = 0;
319 cfi->cfiq->BufWriteTimeoutMax = 0;
322 static void fixup_use_secsi(struct mtd_info *mtd)
324 /* Setup for chips with a secsi area */
325 mtd->_read_user_prot_reg = cfi_amdstd_secsi_read;
326 mtd->_read_fact_prot_reg = cfi_amdstd_secsi_read;
329 static void fixup_use_erase_chip(struct mtd_info *mtd)
331 struct map_info *map = mtd->priv;
332 struct cfi_private *cfi = map->fldrv_priv;
333 if ((cfi->cfiq->NumEraseRegions == 1) &&
334 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
335 mtd->_erase = cfi_amdstd_erase_chip;
341 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
344 static void fixup_use_atmel_lock(struct mtd_info *mtd)
346 mtd->_lock = cfi_atmel_lock;
347 mtd->_unlock = cfi_atmel_unlock;
348 mtd->flags |= MTD_POWERUP_LOCK;
351 static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
353 struct map_info *map = mtd->priv;
354 struct cfi_private *cfi = map->fldrv_priv;
357 * These flashes report two separate eraseblock regions based on the
358 * sector_erase-size and block_erase-size, although they both operate on the
359 * same memory. This is not allowed according to CFI, so we just pick the
362 cfi->cfiq->NumEraseRegions = 1;
365 static void fixup_sst39vf(struct mtd_info *mtd)
367 struct map_info *map = mtd->priv;
368 struct cfi_private *cfi = map->fldrv_priv;
370 fixup_old_sst_eraseregion(mtd);
372 cfi->addr_unlock1 = 0x5555;
373 cfi->addr_unlock2 = 0x2AAA;
376 static void fixup_sst39vf_rev_b(struct mtd_info *mtd)
378 struct map_info *map = mtd->priv;
379 struct cfi_private *cfi = map->fldrv_priv;
381 fixup_old_sst_eraseregion(mtd);
383 cfi->addr_unlock1 = 0x555;
384 cfi->addr_unlock2 = 0x2AA;
386 cfi->sector_erase_cmd = CMD(0x50);
389 static void fixup_sst38vf640x_sectorsize(struct mtd_info *mtd)
391 struct map_info *map = mtd->priv;
392 struct cfi_private *cfi = map->fldrv_priv;
394 fixup_sst39vf_rev_b(mtd);
397 * CFI reports 1024 sectors (0x03ff+1) of 64KBytes (0x0100*256) where
398 * it should report a size of 8KBytes (0x0020*256).
400 cfi->cfiq->EraseRegionInfo[0] = 0x002003ff;
401 pr_warn("%s: Bad 38VF640x CFI data; adjusting sector size from 64 to 8KiB\n",
405 static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
407 struct map_info *map = mtd->priv;
408 struct cfi_private *cfi = map->fldrv_priv;
410 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
411 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
412 pr_warn("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n",
417 static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
419 struct map_info *map = mtd->priv;
420 struct cfi_private *cfi = map->fldrv_priv;
422 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
423 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
424 pr_warn("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n",
429 static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
431 struct map_info *map = mtd->priv;
432 struct cfi_private *cfi = map->fldrv_priv;
435 * S29NS512P flash uses more than 8bits to report number of sectors,
436 * which is not permitted by CFI.
438 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
439 pr_warn("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n",
443 static void fixup_quirks(struct mtd_info *mtd)
445 struct map_info *map = mtd->priv;
446 struct cfi_private *cfi = map->fldrv_priv;
448 if (cfi->mfr == CFI_MFR_AMD && cfi->id == 0x0c01)
449 cfi->quirks |= CFI_QUIRK_DQ_TRUE_DATA;
452 /* Used to fix CFI-Tables of chips without Extended Query Tables */
453 static struct cfi_fixup cfi_nopri_fixup_table[] = {
454 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
455 { CFI_MFR_SST, 0x234b, fixup_sst39vf }, /* SST39VF1601 */
456 { CFI_MFR_SST, 0x235a, fixup_sst39vf }, /* SST39VF3202 */
457 { CFI_MFR_SST, 0x235b, fixup_sst39vf }, /* SST39VF3201 */
458 { CFI_MFR_SST, 0x235c, fixup_sst39vf_rev_b }, /* SST39VF3202B */
459 { CFI_MFR_SST, 0x235d, fixup_sst39vf_rev_b }, /* SST39VF3201B */
460 { CFI_MFR_SST, 0x236c, fixup_sst39vf_rev_b }, /* SST39VF6402B */
461 { CFI_MFR_SST, 0x236d, fixup_sst39vf_rev_b }, /* SST39VF6401B */
465 static struct cfi_fixup cfi_fixup_table[] = {
466 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
467 #ifdef AMD_BOOTLOC_BUG
468 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock },
469 { CFI_MFR_AMIC, CFI_ID_ANY, fixup_amd_bootblock },
470 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock },
472 { CFI_MFR_AMD, 0x0050, fixup_use_secsi },
473 { CFI_MFR_AMD, 0x0053, fixup_use_secsi },
474 { CFI_MFR_AMD, 0x0055, fixup_use_secsi },
475 { CFI_MFR_AMD, 0x0056, fixup_use_secsi },
476 { CFI_MFR_AMD, 0x005C, fixup_use_secsi },
477 { CFI_MFR_AMD, 0x005F, fixup_use_secsi },
478 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors },
479 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
480 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
481 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
482 { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
483 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
484 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
485 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
486 { CFI_MFR_SST, 0x536d, fixup_sst38vf640x_sectorsize }, /* SST38VF6403 */
487 #if !FORCE_WORD_WRITE
488 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
490 { CFI_MFR_ANY, CFI_ID_ANY, fixup_quirks },
493 static struct cfi_fixup jedec_fixup_table[] = {
494 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock },
495 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock },
496 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock },
500 static struct cfi_fixup fixup_table[] = {
501 /* The CFI vendor ids and the JEDEC vendor IDs appear
502 * to be common. It is like the devices id's are as
503 * well. This table is to pick all cases where
504 * we know that is the case.
506 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip },
507 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock },
512 static void cfi_fixup_major_minor(struct cfi_private *cfi,
513 struct cfi_pri_amdstd *extp)
515 if (cfi->mfr == CFI_MFR_SAMSUNG) {
516 if ((extp->MajorVersion == '0' && extp->MinorVersion == '0') ||
517 (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
519 * Samsung K8P2815UQB and K8D6x16UxM chips
520 * report major=0 / minor=0.
521 * K8D3x16UxC chips report major=3 / minor=3.
523 printk(KERN_NOTICE " Fixing Samsung's Amd/Fujitsu"
524 " Extended Query version to 1.%c\n",
526 extp->MajorVersion = '1';
531 * SST 38VF640x chips report major=0xFF / minor=0xFF.
533 if (cfi->mfr == CFI_MFR_SST && (cfi->id >> 4) == 0x0536) {
534 extp->MajorVersion = '1';
535 extp->MinorVersion = '0';
539 static int is_m29ew(struct cfi_private *cfi)
541 if (cfi->mfr == CFI_MFR_INTEL &&
542 ((cfi->device_type == CFI_DEVICETYPE_X8 && (cfi->id & 0xff) == 0x7e) ||
543 (cfi->device_type == CFI_DEVICETYPE_X16 && cfi->id == 0x227e)))
549 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 20:
550 * Some revisions of the M29EW suffer from erase suspend hang ups. In
551 * particular, it can occur when the sequence
552 * Erase Confirm -> Suspend -> Program -> Resume
553 * causes a lockup due to internal timing issues. The consequence is that the
554 * erase cannot be resumed without inserting a dummy command after programming
555 * and prior to resuming. [...] The work-around is to issue a dummy write cycle
556 * that writes an F0 command code before the RESUME command.
558 static void cfi_fixup_m29ew_erase_suspend(struct map_info *map,
561 struct cfi_private *cfi = map->fldrv_priv;
562 /* before resume, insert a dummy 0xF0 cycle for Micron M29EW devices */
564 map_write(map, CMD(0xF0), adr);
568 * From TN-13-07: Patching the Linux Kernel and U-Boot for M29 Flash, page 22:
570 * Some revisions of the M29EW (for example, A1 and A2 step revisions)
571 * are affected by a problem that could cause a hang up when an ERASE SUSPEND
572 * command is issued after an ERASE RESUME operation without waiting for a
573 * minimum delay. The result is that once the ERASE seems to be completed
574 * (no bits are toggling), the contents of the Flash memory block on which
575 * the erase was ongoing could be inconsistent with the expected values
576 * (typically, the array value is stuck to the 0xC0, 0xC4, 0x80, or 0x84
577 * values), causing a consequent failure of the ERASE operation.
578 * The occurrence of this issue could be high, especially when file system
579 * operations on the Flash are intensive. As a result, it is recommended
580 * that a patch be applied. Intensive file system operations can cause many
581 * calls to the garbage routine to free Flash space (also by erasing physical
582 * Flash blocks) and as a result, many consecutive SUSPEND and RESUME
583 * commands can occur. The problem disappears when a delay is inserted after
584 * the RESUME command by using the udelay() function available in Linux.
585 * The DELAY value must be tuned based on the customer's platform.
586 * The maximum value that fixes the problem in all cases is 500us.
587 * But, in our experience, a delay of 30 µs to 50 µs is sufficient
589 * We have chosen 500µs because this latency is acceptable.
591 static void cfi_fixup_m29ew_delay_after_resume(struct cfi_private *cfi)
594 * Resolving the Delay After Resume Issue see Micron TN-13-07
595 * Worst case delay must be 500µs but 30-50µs should be ok as well
601 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
603 struct cfi_private *cfi = map->fldrv_priv;
604 struct device_node __maybe_unused *np = map->device_node;
605 struct mtd_info *mtd;
608 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
612 mtd->type = MTD_NORFLASH;
614 /* Fill in the default mtd operations */
615 mtd->_erase = cfi_amdstd_erase_varsize;
616 mtd->_write = cfi_amdstd_write_words;
617 mtd->_read = cfi_amdstd_read;
618 mtd->_sync = cfi_amdstd_sync;
619 mtd->_suspend = cfi_amdstd_suspend;
620 mtd->_resume = cfi_amdstd_resume;
621 mtd->_read_user_prot_reg = cfi_amdstd_read_user_prot_reg;
622 mtd->_read_fact_prot_reg = cfi_amdstd_read_fact_prot_reg;
623 mtd->_get_fact_prot_info = cfi_amdstd_get_fact_prot_info;
624 mtd->_get_user_prot_info = cfi_amdstd_get_user_prot_info;
625 mtd->_write_user_prot_reg = cfi_amdstd_write_user_prot_reg;
626 mtd->_lock_user_prot_reg = cfi_amdstd_lock_user_prot_reg;
627 mtd->flags = MTD_CAP_NORFLASH;
628 mtd->name = map->name;
630 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
632 pr_debug("MTD %s(): write buffer size %d\n", __func__,
635 mtd->_panic_write = cfi_amdstd_panic_write;
636 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
638 if (cfi->cfi_mode==CFI_MODE_CFI){
639 unsigned char bootloc;
640 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
641 struct cfi_pri_amdstd *extp;
643 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
646 * It's a real CFI chip, not one for which the probe
647 * routine faked a CFI structure.
649 cfi_fixup_major_minor(cfi, extp);
652 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4, 1.5
653 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
654 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
655 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
656 * http://www.spansion.com/Support/Datasheets/S29GL_128S_01GS_00_02_e.pdf
658 if (extp->MajorVersion != '1' ||
659 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '5'))) {
660 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
661 "version %c.%c (%#02x/%#02x).\n",
662 extp->MajorVersion, extp->MinorVersion,
663 extp->MajorVersion, extp->MinorVersion);
669 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n",
670 extp->MajorVersion, extp->MinorVersion);
672 /* Install our own private info structure */
673 cfi->cmdset_priv = extp;
675 /* Apply cfi device specific fixups */
676 cfi_fixup(mtd, cfi_fixup_table);
678 #ifdef DEBUG_CFI_FEATURES
679 /* Tell the user about it in lots of lovely detail */
680 cfi_tell_features(extp);
684 if (np && of_property_read_bool(
685 np, "use-advanced-sector-protection")
686 && extp->BlkProtUnprot == 8) {
687 printk(KERN_INFO " Advanced Sector Protection (PPB Locking) supported\n");
688 mtd->_lock = cfi_ppb_lock;
689 mtd->_unlock = cfi_ppb_unlock;
690 mtd->_is_locked = cfi_ppb_is_locked;
694 bootloc = extp->TopBottom;
695 if ((bootloc < 2) || (bootloc > 5)) {
696 printk(KERN_WARNING "%s: CFI contains unrecognised boot "
697 "bank location (%d). Assuming bottom.\n",
702 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
703 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
705 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
706 int j = (cfi->cfiq->NumEraseRegions-1)-i;
708 swap(cfi->cfiq->EraseRegionInfo[i],
709 cfi->cfiq->EraseRegionInfo[j]);
712 /* Set the default CFI lock/unlock addresses */
713 cfi->addr_unlock1 = 0x555;
714 cfi->addr_unlock2 = 0x2aa;
716 cfi_fixup(mtd, cfi_nopri_fixup_table);
718 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
724 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
725 /* Apply jedec specific fixups */
726 cfi_fixup(mtd, jedec_fixup_table);
728 /* Apply generic fixups */
729 cfi_fixup(mtd, fixup_table);
731 for (i=0; i< cfi->numchips; i++) {
732 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
733 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
734 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
736 * First calculate the timeout max according to timeout field
737 * of struct cfi_ident that probed from chip's CFI aera, if
738 * available. Specify a minimum of 2000us, in case the CFI data
741 if (cfi->cfiq->BufWriteTimeoutTyp &&
742 cfi->cfiq->BufWriteTimeoutMax)
743 cfi->chips[i].buffer_write_time_max =
744 1 << (cfi->cfiq->BufWriteTimeoutTyp +
745 cfi->cfiq->BufWriteTimeoutMax);
747 cfi->chips[i].buffer_write_time_max = 0;
749 cfi->chips[i].buffer_write_time_max =
750 max(cfi->chips[i].buffer_write_time_max, 2000);
752 cfi->chips[i].ref_point_counter = 0;
753 init_waitqueue_head(&(cfi->chips[i].wq));
756 map->fldrv = &cfi_amdstd_chipdrv;
758 return cfi_amdstd_setup(mtd);
760 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
761 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
762 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
763 EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
764 EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
766 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
768 struct map_info *map = mtd->priv;
769 struct cfi_private *cfi = map->fldrv_priv;
770 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
771 unsigned long offset = 0;
774 printk(KERN_NOTICE "number of %s chips: %d\n",
775 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
776 /* Select the correct geometry setup */
777 mtd->size = devsize * cfi->numchips;
779 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
780 mtd->eraseregions = kmalloc_array(mtd->numeraseregions,
781 sizeof(struct mtd_erase_region_info),
783 if (!mtd->eraseregions)
786 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
787 unsigned long ernum, ersize;
788 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
789 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
791 if (mtd->erasesize < ersize) {
792 mtd->erasesize = ersize;
794 for (j=0; j<cfi->numchips; j++) {
795 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
796 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
797 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
799 offset += (ersize * ernum);
801 if (offset != devsize) {
803 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
807 __module_get(THIS_MODULE);
808 register_reboot_notifier(&mtd->reboot_notifier);
812 kfree(mtd->eraseregions);
814 kfree(cfi->cmdset_priv);
819 * Return true if the chip is ready and has the correct value.
821 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
822 * non-suspended sector) and is indicated by no toggle bits toggling.
824 * Error are indicated by toggling bits or bits held with the wrong value,
825 * or with bits toggling.
827 * Note that anything more complicated than checking if no bits are toggling
828 * (including checking DQ5 for an error status) is tricky to get working
829 * correctly and is therefore not done (particularly with interleaved chips
830 * as each chip must be checked independently of the others).
832 static int __xipram chip_ready(struct map_info *map, struct flchip *chip,
833 unsigned long addr, map_word *expected)
835 struct cfi_private *cfi = map->fldrv_priv;
839 if (cfi_use_status_reg(cfi)) {
840 map_word ready = CMD(CFI_SR_DRB);
842 * For chips that support status register, check device
845 cfi_send_gen_cmd(0x70, cfi->addr_unlock1, chip->start, map, cfi,
846 cfi->device_type, NULL);
847 t = map_read(map, addr);
849 return map_word_andequal(map, t, ready, ready);
852 d = map_read(map, addr);
853 t = map_read(map, addr);
855 ret = map_word_equal(map, d, t);
857 if (!ret || !expected)
860 return map_word_equal(map, t, *expected);
863 static int __xipram chip_good(struct map_info *map, struct flchip *chip,
864 unsigned long addr, map_word *expected)
866 struct cfi_private *cfi = map->fldrv_priv;
867 map_word *datum = expected;
869 if (cfi->quirks & CFI_QUIRK_DQ_TRUE_DATA)
872 return chip_ready(map, chip, addr, datum);
875 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
877 DECLARE_WAITQUEUE(wait, current);
878 struct cfi_private *cfi = map->fldrv_priv;
880 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
883 timeo = jiffies + HZ;
885 switch (chip->state) {
889 if (chip_ready(map, chip, adr, NULL))
892 if (time_after(jiffies, timeo)) {
893 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
896 mutex_unlock(&chip->mutex);
898 mutex_lock(&chip->mutex);
899 /* Someone else might have been playing with it. */
910 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
911 !(mode == FL_READY || mode == FL_POINT ||
912 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
915 /* Do not allow suspend iff read/write to EB address */
916 if ((adr & chip->in_progress_block_mask) ==
917 chip->in_progress_block_addr)
921 /* It's harmless to issue the Erase-Suspend and Erase-Resume
922 * commands when the erase algorithm isn't in progress. */
923 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
924 chip->oldstate = FL_ERASING;
925 chip->state = FL_ERASE_SUSPENDING;
926 chip->erase_suspended = 1;
928 if (chip_ready(map, chip, adr, NULL))
931 if (time_after(jiffies, timeo)) {
932 /* Should have suspended the erase by now.
933 * Send an Erase-Resume command as either
934 * there was an error (so leave the erase
935 * routine to recover from it) or we trying to
936 * use the erase-in-progress sector. */
937 put_chip(map, chip, adr);
938 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
942 mutex_unlock(&chip->mutex);
944 mutex_lock(&chip->mutex);
945 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
946 So we can just loop here. */
948 chip->state = FL_READY;
951 case FL_XIP_WHILE_ERASING:
952 if (mode != FL_READY && mode != FL_POINT &&
953 (!cfip || !(cfip->EraseSuspend&2)))
955 chip->oldstate = chip->state;
956 chip->state = FL_READY;
960 /* The machine is rebooting */
964 /* Only if there's no operation suspended... */
965 if (mode == FL_READY && chip->oldstate == FL_READY)
970 set_current_state(TASK_UNINTERRUPTIBLE);
971 add_wait_queue(&chip->wq, &wait);
972 mutex_unlock(&chip->mutex);
974 remove_wait_queue(&chip->wq, &wait);
975 mutex_lock(&chip->mutex);
981 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
983 struct cfi_private *cfi = map->fldrv_priv;
985 switch(chip->oldstate) {
987 cfi_fixup_m29ew_erase_suspend(map,
988 chip->in_progress_block_addr);
989 map_write(map, cfi->sector_erase_cmd, chip->in_progress_block_addr);
990 cfi_fixup_m29ew_delay_after_resume(cfi);
991 chip->oldstate = FL_READY;
992 chip->state = FL_ERASING;
995 case FL_XIP_WHILE_ERASING:
996 chip->state = chip->oldstate;
997 chip->oldstate = FL_READY;
1004 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
1009 #ifdef CONFIG_MTD_XIP
1012 * No interrupt what so ever can be serviced while the flash isn't in array
1013 * mode. This is ensured by the xip_disable() and xip_enable() functions
1014 * enclosing any code path where the flash is known not to be in array mode.
1015 * And within a XIP disabled code path, only functions marked with __xipram
1016 * may be called and nothing else (it's a good thing to inspect generated
1017 * assembly to make sure inline functions were actually inlined and that gcc
1018 * didn't emit calls to its own support functions). Also configuring MTD CFI
1019 * support to a single buswidth and a single interleave is also recommended.
1022 static void xip_disable(struct map_info *map, struct flchip *chip,
1025 /* TODO: chips with no XIP use should ignore and return */
1026 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1027 local_irq_disable();
1030 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1033 struct cfi_private *cfi = map->fldrv_priv;
1035 if (chip->state != FL_POINT && chip->state != FL_READY) {
1036 map_write(map, CMD(0xf0), adr);
1037 chip->state = FL_READY;
1039 (void) map_read(map, adr);
1045 * When a delay is required for the flash operation to complete, the
1046 * xip_udelay() function is polling for both the given timeout and pending
1047 * (but still masked) hardware interrupts. Whenever there is an interrupt
1048 * pending then the flash erase operation is suspended, array mode restored
1049 * and interrupts unmasked. Task scheduling might also happen at that
1050 * point. The CPU eventually returns from the interrupt or the call to
1051 * schedule() and the suspended flash operation is resumed for the remaining
1052 * of the delay period.
1054 * Warning: this function _will_ fool interrupt latency tracing tools.
1057 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
1058 unsigned long adr, int usec)
1060 struct cfi_private *cfi = map->fldrv_priv;
1061 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
1062 map_word status, OK = CMD(0x80);
1063 unsigned long suspended, start = xip_currtime();
1068 if (xip_irqpending() && extp &&
1069 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
1070 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1072 * Let's suspend the erase operation when supported.
1073 * Note that we currently don't try to suspend
1074 * interleaved chips if there is already another
1075 * operation suspended (imagine what happens
1076 * when one chip was already done with the current
1077 * operation while another chip suspended it, then
1078 * we resume the whole thing at once). Yes, it
1081 map_write(map, CMD(0xb0), adr);
1082 usec -= xip_elapsed_since(start);
1083 suspended = xip_currtime();
1085 if (xip_elapsed_since(suspended) > 100000) {
1087 * The chip doesn't want to suspend
1088 * after waiting for 100 msecs.
1089 * This is a critical error but there
1090 * is not much we can do here.
1094 status = map_read(map, adr);
1095 } while (!map_word_andequal(map, status, OK, OK));
1097 /* Suspend succeeded */
1098 oldstate = chip->state;
1099 if (!map_word_bitsset(map, status, CMD(0x40)))
1101 chip->state = FL_XIP_WHILE_ERASING;
1102 chip->erase_suspended = 1;
1103 map_write(map, CMD(0xf0), adr);
1104 (void) map_read(map, adr);
1107 mutex_unlock(&chip->mutex);
1112 * We're back. However someone else might have
1113 * decided to go write to the chip if we are in
1114 * a suspended erase state. If so let's wait
1117 mutex_lock(&chip->mutex);
1118 while (chip->state != FL_XIP_WHILE_ERASING) {
1119 DECLARE_WAITQUEUE(wait, current);
1120 set_current_state(TASK_UNINTERRUPTIBLE);
1121 add_wait_queue(&chip->wq, &wait);
1122 mutex_unlock(&chip->mutex);
1124 remove_wait_queue(&chip->wq, &wait);
1125 mutex_lock(&chip->mutex);
1127 /* Disallow XIP again */
1128 local_irq_disable();
1130 /* Correct Erase Suspend Hangups for M29EW */
1131 cfi_fixup_m29ew_erase_suspend(map, adr);
1132 /* Resume the write or erase operation */
1133 map_write(map, cfi->sector_erase_cmd, adr);
1134 chip->state = oldstate;
1135 start = xip_currtime();
1136 } else if (usec >= 1000000/HZ) {
1138 * Try to save on CPU power when waiting delay
1139 * is at least a system timer tick period.
1140 * No need to be extremely accurate here.
1144 status = map_read(map, adr);
1145 } while (!map_word_andequal(map, status, OK, OK)
1146 && xip_elapsed_since(start) < usec);
1149 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
1152 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1153 * the flash is actively programming or erasing since we have to poll for
1154 * the operation to complete anyway. We can't do that in a generic way with
1155 * a XIP setup so do it before the actual flash operation in this case
1156 * and stub it out from INVALIDATE_CACHE_UDELAY.
1158 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1159 INVALIDATE_CACHED_RANGE(map, from, size)
1161 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1162 UDELAY(map, chip, adr, usec)
1167 * Activating this XIP support changes the way the code works a bit. For
1168 * example the code to suspend the current process when concurrent access
1169 * happens is never executed because xip_udelay() will always return with the
1170 * same chip state as it was entered with. This is why there is no care for
1171 * the presence of add_wait_queue() or schedule() calls from within a couple
1172 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
1173 * The queueing and scheduling are always happening within xip_udelay().
1175 * Similarly, get_chip() and put_chip() just happen to always be executed
1176 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
1177 * is in array mode, therefore never executing many cases therein and not
1178 * causing any problem with XIP.
1183 #define xip_disable(map, chip, adr)
1184 #define xip_enable(map, chip, adr)
1185 #define XIP_INVAL_CACHED_RANGE(x...)
1187 #define UDELAY(map, chip, adr, usec) \
1189 mutex_unlock(&chip->mutex); \
1191 mutex_lock(&chip->mutex); \
1194 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
1196 mutex_unlock(&chip->mutex); \
1197 INVALIDATE_CACHED_RANGE(map, adr, len); \
1199 mutex_lock(&chip->mutex); \
1204 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1206 unsigned long cmd_addr;
1207 struct cfi_private *cfi = map->fldrv_priv;
1212 /* Ensure cmd read/writes are aligned. */
1213 cmd_addr = adr & ~(map_bankwidth(map)-1);
1215 mutex_lock(&chip->mutex);
1216 ret = get_chip(map, chip, cmd_addr, FL_READY);
1218 mutex_unlock(&chip->mutex);
1222 if (chip->state != FL_POINT && chip->state != FL_READY) {
1223 map_write(map, CMD(0xf0), cmd_addr);
1224 chip->state = FL_READY;
1227 map_copy_from(map, buf, adr, len);
1229 put_chip(map, chip, cmd_addr);
1231 mutex_unlock(&chip->mutex);
1236 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1238 struct map_info *map = mtd->priv;
1239 struct cfi_private *cfi = map->fldrv_priv;
1244 /* ofs: offset within the first chip that the first read should start */
1245 chipnum = (from >> cfi->chipshift);
1246 ofs = from - (chipnum << cfi->chipshift);
1249 unsigned long thislen;
1251 if (chipnum >= cfi->numchips)
1254 if ((len + ofs -1) >> cfi->chipshift)
1255 thislen = (1<<cfi->chipshift) - ofs;
1259 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1273 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1274 loff_t adr, size_t len, u_char *buf, size_t grouplen);
1276 static inline void otp_enter(struct map_info *map, struct flchip *chip,
1277 loff_t adr, size_t len)
1279 struct cfi_private *cfi = map->fldrv_priv;
1281 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1282 cfi->device_type, NULL);
1283 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1284 cfi->device_type, NULL);
1285 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi,
1286 cfi->device_type, NULL);
1288 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1291 static inline void otp_exit(struct map_info *map, struct flchip *chip,
1292 loff_t adr, size_t len)
1294 struct cfi_private *cfi = map->fldrv_priv;
1296 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1297 cfi->device_type, NULL);
1298 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1299 cfi->device_type, NULL);
1300 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi,
1301 cfi->device_type, NULL);
1302 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi,
1303 cfi->device_type, NULL);
1305 INVALIDATE_CACHED_RANGE(map, chip->start + adr, len);
1308 static inline int do_read_secsi_onechip(struct map_info *map,
1309 struct flchip *chip, loff_t adr,
1310 size_t len, u_char *buf,
1313 DECLARE_WAITQUEUE(wait, current);
1316 mutex_lock(&chip->mutex);
1318 if (chip->state != FL_READY){
1319 set_current_state(TASK_UNINTERRUPTIBLE);
1320 add_wait_queue(&chip->wq, &wait);
1322 mutex_unlock(&chip->mutex);
1325 remove_wait_queue(&chip->wq, &wait);
1332 chip->state = FL_READY;
1334 otp_enter(map, chip, adr, len);
1335 map_copy_from(map, buf, adr, len);
1336 otp_exit(map, chip, adr, len);
1339 mutex_unlock(&chip->mutex);
1344 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1346 struct map_info *map = mtd->priv;
1347 struct cfi_private *cfi = map->fldrv_priv;
1352 /* ofs: offset within the first chip that the first read should start */
1353 /* 8 secsi bytes per chip */
1358 unsigned long thislen;
1360 if (chipnum >= cfi->numchips)
1363 if ((len + ofs -1) >> 3)
1364 thislen = (1<<3) - ofs;
1368 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs,
1383 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1384 unsigned long adr, map_word datum,
1387 static int do_otp_write(struct map_info *map, struct flchip *chip, loff_t adr,
1388 size_t len, u_char *buf, size_t grouplen)
1392 unsigned long bus_ofs = adr & ~(map_bankwidth(map)-1);
1393 int gap = adr - bus_ofs;
1394 int n = min_t(int, len, map_bankwidth(map) - gap);
1395 map_word datum = map_word_ff(map);
1397 if (n != map_bankwidth(map)) {
1398 /* partial write of a word, load old contents */
1399 otp_enter(map, chip, bus_ofs, map_bankwidth(map));
1400 datum = map_read(map, bus_ofs);
1401 otp_exit(map, chip, bus_ofs, map_bankwidth(map));
1404 datum = map_word_load_partial(map, datum, buf, gap, n);
1405 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
1417 static int do_otp_lock(struct map_info *map, struct flchip *chip, loff_t adr,
1418 size_t len, u_char *buf, size_t grouplen)
1420 struct cfi_private *cfi = map->fldrv_priv;
1422 unsigned long timeo;
1425 /* make sure area matches group boundaries */
1426 if ((adr != 0) || (len != grouplen))
1429 mutex_lock(&chip->mutex);
1430 ret = get_chip(map, chip, chip->start, FL_LOCKING);
1432 mutex_unlock(&chip->mutex);
1435 chip->state = FL_LOCKING;
1437 /* Enter lock register command */
1438 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1439 cfi->device_type, NULL);
1440 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1441 cfi->device_type, NULL);
1442 cfi_send_gen_cmd(0x40, cfi->addr_unlock1, chip->start, map, cfi,
1443 cfi->device_type, NULL);
1445 /* read lock register */
1446 lockreg = cfi_read_query(map, 0);
1448 /* set bit 0 to protect extended memory block */
1451 /* set bit 0 to protect extended memory block */
1452 /* write lock register */
1453 map_write(map, CMD(0xA0), chip->start);
1454 map_write(map, CMD(lockreg), chip->start);
1456 /* wait for chip to become ready */
1457 timeo = jiffies + msecs_to_jiffies(2);
1459 if (chip_ready(map, chip, adr, NULL))
1462 if (time_after(jiffies, timeo)) {
1463 pr_err("Waiting for chip to be ready timed out.\n");
1467 UDELAY(map, chip, 0, 1);
1470 /* exit protection commands */
1471 map_write(map, CMD(0x90), chip->start);
1472 map_write(map, CMD(0x00), chip->start);
1474 chip->state = FL_READY;
1475 put_chip(map, chip, chip->start);
1476 mutex_unlock(&chip->mutex);
1481 static int cfi_amdstd_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
1482 size_t *retlen, u_char *buf,
1483 otp_op_t action, int user_regs)
1485 struct map_info *map = mtd->priv;
1486 struct cfi_private *cfi = map->fldrv_priv;
1487 int ofs_factor = cfi->interleave * cfi->device_type;
1490 struct flchip *chip;
1491 uint8_t otp, lockreg;
1494 size_t user_size, factory_size, otpsize;
1495 loff_t user_offset, factory_offset, otpoffset;
1496 int user_locked = 0, otplocked;
1500 for (chipnum = 0; chipnum < cfi->numchips; chipnum++) {
1501 chip = &cfi->chips[chipnum];
1505 /* Micron M29EW family */
1506 if (is_m29ew(cfi)) {
1509 /* check whether secsi area is factory locked
1511 mutex_lock(&chip->mutex);
1512 ret = get_chip(map, chip, base, FL_CFI_QUERY);
1514 mutex_unlock(&chip->mutex);
1517 cfi_qry_mode_on(base, map, cfi);
1518 otp = cfi_read_query(map, base + 0x3 * ofs_factor);
1519 cfi_qry_mode_off(base, map, cfi);
1520 put_chip(map, chip, base);
1521 mutex_unlock(&chip->mutex);
1524 /* factory locked */
1526 factory_size = 0x100;
1528 /* customer lockable */
1532 mutex_lock(&chip->mutex);
1533 ret = get_chip(map, chip, base, FL_LOCKING);
1535 mutex_unlock(&chip->mutex);
1539 /* Enter lock register command */
1540 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1,
1541 chip->start, map, cfi,
1542 cfi->device_type, NULL);
1543 cfi_send_gen_cmd(0x55, cfi->addr_unlock2,
1544 chip->start, map, cfi,
1545 cfi->device_type, NULL);
1546 cfi_send_gen_cmd(0x40, cfi->addr_unlock1,
1547 chip->start, map, cfi,
1548 cfi->device_type, NULL);
1549 /* read lock register */
1550 lockreg = cfi_read_query(map, 0);
1551 /* exit protection commands */
1552 map_write(map, CMD(0x90), chip->start);
1553 map_write(map, CMD(0x00), chip->start);
1554 put_chip(map, chip, chip->start);
1555 mutex_unlock(&chip->mutex);
1557 user_locked = ((lockreg & 0x01) == 0x00);
1561 otpsize = user_regs ? user_size : factory_size;
1564 otpoffset = user_regs ? user_offset : factory_offset;
1565 otplocked = user_regs ? user_locked : 1;
1568 /* return otpinfo */
1569 struct otp_info *otpinfo;
1570 len -= sizeof(*otpinfo);
1573 otpinfo = (struct otp_info *)buf;
1574 otpinfo->start = from;
1575 otpinfo->length = otpsize;
1576 otpinfo->locked = otplocked;
1577 buf += sizeof(*otpinfo);
1578 *retlen += sizeof(*otpinfo);
1580 } else if ((from < otpsize) && (len > 0)) {
1582 size = (len < otpsize - from) ? len : otpsize - from;
1583 ret = action(map, chip, otpoffset + from, size, buf,
1599 static int cfi_amdstd_get_fact_prot_info(struct mtd_info *mtd, size_t len,
1600 size_t *retlen, struct otp_info *buf)
1602 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1606 static int cfi_amdstd_get_user_prot_info(struct mtd_info *mtd, size_t len,
1607 size_t *retlen, struct otp_info *buf)
1609 return cfi_amdstd_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
1613 static int cfi_amdstd_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
1614 size_t len, size_t *retlen,
1617 return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1618 buf, do_read_secsi_onechip, 0);
1621 static int cfi_amdstd_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
1622 size_t len, size_t *retlen,
1625 return cfi_amdstd_otp_walk(mtd, from, len, retlen,
1626 buf, do_read_secsi_onechip, 1);
1629 static int cfi_amdstd_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
1630 size_t len, size_t *retlen,
1633 return cfi_amdstd_otp_walk(mtd, from, len, retlen, (u_char *)buf,
1637 static int cfi_amdstd_lock_user_prot_reg(struct mtd_info *mtd, loff_t from,
1641 return cfi_amdstd_otp_walk(mtd, from, len, &retlen, NULL,
1645 static int __xipram do_write_oneword_once(struct map_info *map,
1646 struct flchip *chip,
1647 unsigned long adr, map_word datum,
1648 int mode, struct cfi_private *cfi)
1650 unsigned long timeo;
1652 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1653 * have a max write time of a few hundreds usec). However, we should
1654 * use the maximum timeout value given by the chip at probe time
1655 * instead. Unfortunately, struct flchip does have a field for
1656 * maximum timeout, only for typical which can be far too short
1657 * depending of the conditions. The ' + 1' is to avoid having a
1658 * timeout of 0 jiffies if HZ is smaller than 1000.
1660 unsigned long uWriteTimeout = (HZ / 1000) + 1;
1663 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1664 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1665 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1666 map_write(map, datum, adr);
1669 INVALIDATE_CACHE_UDELAY(map, chip,
1670 adr, map_bankwidth(map),
1671 chip->word_write_time);
1673 /* See comment above for timeout value. */
1674 timeo = jiffies + uWriteTimeout;
1676 if (chip->state != mode) {
1677 /* Someone's suspended the write. Sleep */
1678 DECLARE_WAITQUEUE(wait, current);
1680 set_current_state(TASK_UNINTERRUPTIBLE);
1681 add_wait_queue(&chip->wq, &wait);
1682 mutex_unlock(&chip->mutex);
1684 remove_wait_queue(&chip->wq, &wait);
1685 timeo = jiffies + (HZ / 2); /* FIXME */
1686 mutex_lock(&chip->mutex);
1691 * We check "time_after" and "!chip_good" before checking
1692 * "chip_good" to avoid the failure due to scheduling.
1694 if (time_after(jiffies, timeo) &&
1695 !chip_good(map, chip, adr, &datum)) {
1696 xip_enable(map, chip, adr);
1697 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1698 xip_disable(map, chip, adr);
1703 if (chip_good(map, chip, adr, &datum)) {
1704 if (cfi_check_err_status(map, chip, adr))
1709 /* Latency issues. Drop the lock, wait a while and retry */
1710 UDELAY(map, chip, adr, 1);
1716 static int __xipram do_write_oneword_start(struct map_info *map,
1717 struct flchip *chip,
1718 unsigned long adr, int mode)
1722 mutex_lock(&chip->mutex);
1724 ret = get_chip(map, chip, adr, mode);
1726 mutex_unlock(&chip->mutex);
1730 if (mode == FL_OTP_WRITE)
1731 otp_enter(map, chip, adr, map_bankwidth(map));
1736 static void __xipram do_write_oneword_done(struct map_info *map,
1737 struct flchip *chip,
1738 unsigned long adr, int mode)
1740 if (mode == FL_OTP_WRITE)
1741 otp_exit(map, chip, adr, map_bankwidth(map));
1743 chip->state = FL_READY;
1745 put_chip(map, chip, adr);
1747 mutex_unlock(&chip->mutex);
1750 static int __xipram do_write_oneword_retry(struct map_info *map,
1751 struct flchip *chip,
1752 unsigned long adr, map_word datum,
1755 struct cfi_private *cfi = map->fldrv_priv;
1761 * Check for a NOP for the case when the datum to write is already
1762 * present - it saves time and works around buggy chips that corrupt
1763 * data at other locations when 0xff is written to a location that
1764 * already contains 0xff.
1766 oldd = map_read(map, adr);
1767 if (map_word_equal(map, oldd, datum)) {
1768 pr_debug("MTD %s(): NOP\n", __func__);
1772 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1774 xip_disable(map, chip, adr);
1777 ret = do_write_oneword_once(map, chip, adr, datum, mode, cfi);
1779 /* reset on all failures. */
1780 map_write(map, CMD(0xF0), chip->start);
1781 /* FIXME - should have reset delay before continuing */
1783 if (++retry_cnt <= MAX_RETRIES) {
1788 xip_enable(map, chip, adr);
1793 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1794 unsigned long adr, map_word datum,
1801 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n", __func__, adr,
1804 ret = do_write_oneword_start(map, chip, adr, mode);
1808 ret = do_write_oneword_retry(map, chip, adr, datum, mode);
1810 do_write_oneword_done(map, chip, adr, mode);
1816 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1817 size_t *retlen, const u_char *buf)
1819 struct map_info *map = mtd->priv;
1820 struct cfi_private *cfi = map->fldrv_priv;
1823 unsigned long ofs, chipstart;
1824 DECLARE_WAITQUEUE(wait, current);
1826 chipnum = to >> cfi->chipshift;
1827 ofs = to - (chipnum << cfi->chipshift);
1828 chipstart = cfi->chips[chipnum].start;
1830 /* If it's not bus-aligned, do the first byte write */
1831 if (ofs & (map_bankwidth(map)-1)) {
1832 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1833 int i = ofs - bus_ofs;
1838 mutex_lock(&cfi->chips[chipnum].mutex);
1840 if (cfi->chips[chipnum].state != FL_READY) {
1841 set_current_state(TASK_UNINTERRUPTIBLE);
1842 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1844 mutex_unlock(&cfi->chips[chipnum].mutex);
1847 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1851 /* Load 'tmp_buf' with old contents of flash */
1852 tmp_buf = map_read(map, bus_ofs+chipstart);
1854 mutex_unlock(&cfi->chips[chipnum].mutex);
1856 /* Number of bytes to copy from buffer */
1857 n = min_t(int, len, map_bankwidth(map)-i);
1859 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1861 ret = do_write_oneword(map, &cfi->chips[chipnum],
1862 bus_ofs, tmp_buf, FL_WRITING);
1871 if (ofs >> cfi->chipshift) {
1874 if (chipnum == cfi->numchips)
1879 /* We are now aligned, write as much as possible */
1880 while(len >= map_bankwidth(map)) {
1883 datum = map_word_load(map, buf);
1885 ret = do_write_oneword(map, &cfi->chips[chipnum],
1886 ofs, datum, FL_WRITING);
1890 ofs += map_bankwidth(map);
1891 buf += map_bankwidth(map);
1892 (*retlen) += map_bankwidth(map);
1893 len -= map_bankwidth(map);
1895 if (ofs >> cfi->chipshift) {
1898 if (chipnum == cfi->numchips)
1900 chipstart = cfi->chips[chipnum].start;
1904 /* Write the trailing bytes if any */
1905 if (len & (map_bankwidth(map)-1)) {
1909 mutex_lock(&cfi->chips[chipnum].mutex);
1911 if (cfi->chips[chipnum].state != FL_READY) {
1912 set_current_state(TASK_UNINTERRUPTIBLE);
1913 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1915 mutex_unlock(&cfi->chips[chipnum].mutex);
1918 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1922 tmp_buf = map_read(map, ofs + chipstart);
1924 mutex_unlock(&cfi->chips[chipnum].mutex);
1926 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1928 ret = do_write_oneword(map, &cfi->chips[chipnum],
1929 ofs, tmp_buf, FL_WRITING);
1939 #if !FORCE_WORD_WRITE
1940 static int __xipram do_write_buffer_wait(struct map_info *map,
1941 struct flchip *chip, unsigned long adr,
1944 unsigned long timeo;
1945 unsigned long u_write_timeout;
1949 * Timeout is calculated according to CFI data, if available.
1950 * See more comments in cfi_cmdset_0002().
1952 u_write_timeout = usecs_to_jiffies(chip->buffer_write_time_max);
1953 timeo = jiffies + u_write_timeout;
1956 if (chip->state != FL_WRITING) {
1957 /* Someone's suspended the write. Sleep */
1958 DECLARE_WAITQUEUE(wait, current);
1960 set_current_state(TASK_UNINTERRUPTIBLE);
1961 add_wait_queue(&chip->wq, &wait);
1962 mutex_unlock(&chip->mutex);
1964 remove_wait_queue(&chip->wq, &wait);
1965 timeo = jiffies + (HZ / 2); /* FIXME */
1966 mutex_lock(&chip->mutex);
1971 * We check "time_after" and "!chip_good" before checking
1972 * "chip_good" to avoid the failure due to scheduling.
1974 if (time_after(jiffies, timeo) &&
1975 !chip_good(map, chip, adr, &datum)) {
1976 pr_err("MTD %s(): software timeout, address:0x%.8lx.\n",
1982 if (chip_good(map, chip, adr, &datum)) {
1983 if (cfi_check_err_status(map, chip, adr))
1988 /* Latency issues. Drop the lock, wait a while and retry */
1989 UDELAY(map, chip, adr, 1);
1995 static void __xipram do_write_buffer_reset(struct map_info *map,
1996 struct flchip *chip,
1997 struct cfi_private *cfi)
2000 * Recovery from write-buffer programming failures requires
2001 * the write-to-buffer-reset sequence. Since the last part
2002 * of the sequence also works as a normal reset, we can run
2003 * the same commands regardless of why we are here.
2005 * http://www.spansion.com/Support/Application%20Notes/MirrorBit_Write_Buffer_Prog_Page_Buffer_Read_AN.pdf
2007 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2008 cfi->device_type, NULL);
2009 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2010 cfi->device_type, NULL);
2011 cfi_send_gen_cmd(0xF0, cfi->addr_unlock1, chip->start, map, cfi,
2012 cfi->device_type, NULL);
2014 /* FIXME - should have reset delay before continuing */
2018 * FIXME: interleaved mode not tested, and probably not supported!
2020 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
2021 unsigned long adr, const u_char *buf,
2024 struct cfi_private *cfi = map->fldrv_priv;
2026 unsigned long cmd_adr;
2033 mutex_lock(&chip->mutex);
2034 ret = get_chip(map, chip, adr, FL_WRITING);
2036 mutex_unlock(&chip->mutex);
2040 datum = map_word_load(map, buf);
2042 pr_debug("MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
2043 __func__, adr, datum.x[0]);
2045 XIP_INVAL_CACHED_RANGE(map, adr, len);
2047 xip_disable(map, chip, cmd_adr);
2049 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2050 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2052 /* Write Buffer Load */
2053 map_write(map, CMD(0x25), cmd_adr);
2055 chip->state = FL_WRITING_TO_BUFFER;
2057 /* Write length of data to come */
2058 words = len / map_bankwidth(map);
2059 map_write(map, CMD(words - 1), cmd_adr);
2062 while(z < words * map_bankwidth(map)) {
2063 datum = map_word_load(map, buf);
2064 map_write(map, datum, adr + z);
2066 z += map_bankwidth(map);
2067 buf += map_bankwidth(map);
2069 z -= map_bankwidth(map);
2073 /* Write Buffer Program Confirm: GO GO GO */
2074 map_write(map, CMD(0x29), cmd_adr);
2075 chip->state = FL_WRITING;
2077 INVALIDATE_CACHE_UDELAY(map, chip,
2078 adr, map_bankwidth(map),
2079 chip->word_write_time);
2081 ret = do_write_buffer_wait(map, chip, adr, datum);
2083 do_write_buffer_reset(map, chip, cfi);
2085 xip_enable(map, chip, adr);
2087 chip->state = FL_READY;
2089 put_chip(map, chip, adr);
2090 mutex_unlock(&chip->mutex);
2096 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
2097 size_t *retlen, const u_char *buf)
2099 struct map_info *map = mtd->priv;
2100 struct cfi_private *cfi = map->fldrv_priv;
2101 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
2106 chipnum = to >> cfi->chipshift;
2107 ofs = to - (chipnum << cfi->chipshift);
2109 /* If it's not bus-aligned, do the first word write */
2110 if (ofs & (map_bankwidth(map)-1)) {
2111 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
2112 if (local_len > len)
2114 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
2115 local_len, retlen, buf);
2122 if (ofs >> cfi->chipshift) {
2125 if (chipnum == cfi->numchips)
2130 /* Write buffer is worth it only if more than one word to write... */
2131 while (len >= map_bankwidth(map) * 2) {
2132 /* We must not cross write block boundaries */
2133 int size = wbufsize - (ofs & (wbufsize-1));
2137 if (size % map_bankwidth(map))
2138 size -= size % map_bankwidth(map);
2140 ret = do_write_buffer(map, &cfi->chips[chipnum],
2150 if (ofs >> cfi->chipshift) {
2153 if (chipnum == cfi->numchips)
2159 size_t retlen_dregs = 0;
2161 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
2162 len, &retlen_dregs, buf);
2164 *retlen += retlen_dregs;
2170 #endif /* !FORCE_WORD_WRITE */
2173 * Wait for the flash chip to become ready to write data
2175 * This is only called during the panic_write() path. When panic_write()
2176 * is called, the kernel is in the process of a panic, and will soon be
2177 * dead. Therefore we don't take any locks, and attempt to get access
2178 * to the chip as soon as possible.
2180 static int cfi_amdstd_panic_wait(struct map_info *map, struct flchip *chip,
2183 struct cfi_private *cfi = map->fldrv_priv;
2188 * If the driver thinks the chip is idle, and no toggle bits
2189 * are changing, then the chip is actually idle for sure.
2191 if (chip->state == FL_READY && chip_ready(map, chip, adr, NULL))
2195 * Try several times to reset the chip and then wait for it
2196 * to become idle. The upper limit of a few milliseconds of
2197 * delay isn't a big problem: the kernel is dying anyway. It
2198 * is more important to save the messages.
2200 while (retries > 0) {
2201 const unsigned long timeo = (HZ / 1000) + 1;
2203 /* send the reset command */
2204 map_write(map, CMD(0xF0), chip->start);
2206 /* wait for the chip to become ready */
2207 for (i = 0; i < jiffies_to_usecs(timeo); i++) {
2208 if (chip_ready(map, chip, adr, NULL))
2217 /* the chip never became ready */
2222 * Write out one word of data to a single flash chip during a kernel panic
2224 * This is only called during the panic_write() path. When panic_write()
2225 * is called, the kernel is in the process of a panic, and will soon be
2226 * dead. Therefore we don't take any locks, and attempt to get access
2227 * to the chip as soon as possible.
2229 * The implementation of this routine is intentionally similar to
2230 * do_write_oneword(), in order to ease code maintenance.
2232 static int do_panic_write_oneword(struct map_info *map, struct flchip *chip,
2233 unsigned long adr, map_word datum)
2235 const unsigned long uWriteTimeout = (HZ / 1000) + 1;
2236 struct cfi_private *cfi = map->fldrv_priv;
2244 ret = cfi_amdstd_panic_wait(map, chip, adr);
2248 pr_debug("MTD %s(): PANIC WRITE 0x%.8lx(0x%.8lx)\n",
2249 __func__, adr, datum.x[0]);
2252 * Check for a NOP for the case when the datum to write is already
2253 * present - it saves time and works around buggy chips that corrupt
2254 * data at other locations when 0xff is written to a location that
2255 * already contains 0xff.
2257 oldd = map_read(map, adr);
2258 if (map_word_equal(map, oldd, datum)) {
2259 pr_debug("MTD %s(): NOP\n", __func__);
2266 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2267 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2268 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2269 map_write(map, datum, adr);
2271 for (i = 0; i < jiffies_to_usecs(uWriteTimeout); i++) {
2272 if (chip_ready(map, chip, adr, NULL))
2278 if (!chip_ready(map, chip, adr, &datum) ||
2279 cfi_check_err_status(map, chip, adr)) {
2280 /* reset on all failures. */
2281 map_write(map, CMD(0xF0), chip->start);
2282 /* FIXME - should have reset delay before continuing */
2284 if (++retry_cnt <= MAX_RETRIES)
2296 * Write out some data during a kernel panic
2298 * This is used by the mtdoops driver to save the dying messages from a
2299 * kernel which has panic'd.
2301 * This routine ignores all of the locking used throughout the rest of the
2302 * driver, in order to ensure that the data gets written out no matter what
2303 * state this driver (and the flash chip itself) was in when the kernel crashed.
2305 * The implementation of this routine is intentionally similar to
2306 * cfi_amdstd_write_words(), in order to ease code maintenance.
2308 static int cfi_amdstd_panic_write(struct mtd_info *mtd, loff_t to, size_t len,
2309 size_t *retlen, const u_char *buf)
2311 struct map_info *map = mtd->priv;
2312 struct cfi_private *cfi = map->fldrv_priv;
2313 unsigned long ofs, chipstart;
2317 chipnum = to >> cfi->chipshift;
2318 ofs = to - (chipnum << cfi->chipshift);
2319 chipstart = cfi->chips[chipnum].start;
2321 /* If it's not bus aligned, do the first byte write */
2322 if (ofs & (map_bankwidth(map) - 1)) {
2323 unsigned long bus_ofs = ofs & ~(map_bankwidth(map) - 1);
2324 int i = ofs - bus_ofs;
2328 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], bus_ofs);
2332 /* Load 'tmp_buf' with old contents of flash */
2333 tmp_buf = map_read(map, bus_ofs + chipstart);
2335 /* Number of bytes to copy from buffer */
2336 n = min_t(int, len, map_bankwidth(map) - i);
2338 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
2340 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2350 if (ofs >> cfi->chipshift) {
2353 if (chipnum == cfi->numchips)
2358 /* We are now aligned, write as much as possible */
2359 while (len >= map_bankwidth(map)) {
2362 datum = map_word_load(map, buf);
2364 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2369 ofs += map_bankwidth(map);
2370 buf += map_bankwidth(map);
2371 (*retlen) += map_bankwidth(map);
2372 len -= map_bankwidth(map);
2374 if (ofs >> cfi->chipshift) {
2377 if (chipnum == cfi->numchips)
2380 chipstart = cfi->chips[chipnum].start;
2384 /* Write the trailing bytes if any */
2385 if (len & (map_bankwidth(map) - 1)) {
2388 ret = cfi_amdstd_panic_wait(map, &cfi->chips[chipnum], ofs);
2392 tmp_buf = map_read(map, ofs + chipstart);
2394 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
2396 ret = do_panic_write_oneword(map, &cfi->chips[chipnum],
2409 * Handle devices with one erase region, that only implement
2410 * the chip erase command.
2412 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
2414 struct cfi_private *cfi = map->fldrv_priv;
2415 unsigned long timeo = jiffies + HZ;
2416 unsigned long int adr;
2417 DECLARE_WAITQUEUE(wait, current);
2420 map_word datum = map_word_ff(map);
2422 adr = cfi->addr_unlock1;
2424 mutex_lock(&chip->mutex);
2425 ret = get_chip(map, chip, adr, FL_ERASING);
2427 mutex_unlock(&chip->mutex);
2431 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2432 __func__, chip->start);
2434 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
2436 xip_disable(map, chip, adr);
2439 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2440 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2441 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2442 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2443 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2444 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2446 chip->state = FL_ERASING;
2447 chip->erase_suspended = 0;
2448 chip->in_progress_block_addr = adr;
2449 chip->in_progress_block_mask = ~(map->size - 1);
2451 INVALIDATE_CACHE_UDELAY(map, chip,
2453 chip->erase_time*500);
2455 timeo = jiffies + (HZ*20);
2458 if (chip->state != FL_ERASING) {
2459 /* Someone's suspended the erase. Sleep */
2460 set_current_state(TASK_UNINTERRUPTIBLE);
2461 add_wait_queue(&chip->wq, &wait);
2462 mutex_unlock(&chip->mutex);
2464 remove_wait_queue(&chip->wq, &wait);
2465 mutex_lock(&chip->mutex);
2468 if (chip->erase_suspended) {
2469 /* This erase was suspended and resumed.
2470 Adjust the timeout */
2471 timeo = jiffies + (HZ*20); /* FIXME */
2472 chip->erase_suspended = 0;
2475 if (chip_ready(map, chip, adr, &datum)) {
2476 if (cfi_check_err_status(map, chip, adr))
2481 if (time_after(jiffies, timeo)) {
2482 printk(KERN_WARNING "MTD %s(): software timeout\n",
2488 /* Latency issues. Drop the lock, wait a while and retry */
2489 UDELAY(map, chip, adr, 1000000/HZ);
2491 /* Did we succeed? */
2493 /* reset on all failures. */
2494 map_write(map, CMD(0xF0), chip->start);
2495 /* FIXME - should have reset delay before continuing */
2497 if (++retry_cnt <= MAX_RETRIES) {
2503 chip->state = FL_READY;
2504 xip_enable(map, chip, adr);
2506 put_chip(map, chip, adr);
2507 mutex_unlock(&chip->mutex);
2513 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
2515 struct cfi_private *cfi = map->fldrv_priv;
2516 unsigned long timeo = jiffies + HZ;
2517 DECLARE_WAITQUEUE(wait, current);
2520 map_word datum = map_word_ff(map);
2524 mutex_lock(&chip->mutex);
2525 ret = get_chip(map, chip, adr, FL_ERASING);
2527 mutex_unlock(&chip->mutex);
2531 pr_debug("MTD %s(): ERASE 0x%.8lx\n",
2534 XIP_INVAL_CACHED_RANGE(map, adr, len);
2536 xip_disable(map, chip, adr);
2539 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2540 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2541 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2542 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
2543 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
2544 map_write(map, cfi->sector_erase_cmd, adr);
2546 chip->state = FL_ERASING;
2547 chip->erase_suspended = 0;
2548 chip->in_progress_block_addr = adr;
2549 chip->in_progress_block_mask = ~(len - 1);
2551 INVALIDATE_CACHE_UDELAY(map, chip,
2553 chip->erase_time*500);
2555 timeo = jiffies + (HZ*20);
2558 if (chip->state != FL_ERASING) {
2559 /* Someone's suspended the erase. Sleep */
2560 set_current_state(TASK_UNINTERRUPTIBLE);
2561 add_wait_queue(&chip->wq, &wait);
2562 mutex_unlock(&chip->mutex);
2564 remove_wait_queue(&chip->wq, &wait);
2565 mutex_lock(&chip->mutex);
2568 if (chip->erase_suspended) {
2569 /* This erase was suspended and resumed.
2570 Adjust the timeout */
2571 timeo = jiffies + (HZ*20); /* FIXME */
2572 chip->erase_suspended = 0;
2575 if (chip_ready(map, chip, adr, &datum)) {
2576 if (cfi_check_err_status(map, chip, adr))
2581 if (time_after(jiffies, timeo)) {
2582 printk(KERN_WARNING "MTD %s(): software timeout\n",
2588 /* Latency issues. Drop the lock, wait a while and retry */
2589 UDELAY(map, chip, adr, 1000000/HZ);
2591 /* Did we succeed? */
2593 /* reset on all failures. */
2594 map_write(map, CMD(0xF0), chip->start);
2595 /* FIXME - should have reset delay before continuing */
2597 if (++retry_cnt <= MAX_RETRIES) {
2603 chip->state = FL_READY;
2604 xip_enable(map, chip, adr);
2606 put_chip(map, chip, adr);
2607 mutex_unlock(&chip->mutex);
2612 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2614 return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2619 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
2621 struct map_info *map = mtd->priv;
2622 struct cfi_private *cfi = map->fldrv_priv;
2624 if (instr->addr != 0)
2627 if (instr->len != mtd->size)
2630 return do_erase_chip(map, &cfi->chips[0]);
2633 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
2634 unsigned long adr, int len, void *thunk)
2636 struct cfi_private *cfi = map->fldrv_priv;
2639 mutex_lock(&chip->mutex);
2640 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
2643 chip->state = FL_LOCKING;
2645 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2647 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2648 cfi->device_type, NULL);
2649 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2650 cfi->device_type, NULL);
2651 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
2652 cfi->device_type, NULL);
2653 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2654 cfi->device_type, NULL);
2655 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2656 cfi->device_type, NULL);
2657 map_write(map, CMD(0x40), chip->start + adr);
2659 chip->state = FL_READY;
2660 put_chip(map, chip, adr + chip->start);
2664 mutex_unlock(&chip->mutex);
2668 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
2669 unsigned long adr, int len, void *thunk)
2671 struct cfi_private *cfi = map->fldrv_priv;
2674 mutex_lock(&chip->mutex);
2675 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
2678 chip->state = FL_UNLOCKING;
2680 pr_debug("MTD %s(): LOCK 0x%08lx len %d\n", __func__, adr, len);
2682 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2683 cfi->device_type, NULL);
2684 map_write(map, CMD(0x70), adr);
2686 chip->state = FL_READY;
2687 put_chip(map, chip, adr + chip->start);
2691 mutex_unlock(&chip->mutex);
2695 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2697 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
2700 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2702 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
2706 * Advanced Sector Protection - PPB (Persistent Protection Bit) locking
2710 struct flchip *chip;
2715 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *)1)
2716 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *)2)
2717 #define DO_XXLOCK_ONEBLOCK_GETLOCK ((void *)3)
2719 static int __maybe_unused do_ppb_xxlock(struct map_info *map,
2720 struct flchip *chip,
2721 unsigned long adr, int len, void *thunk)
2723 struct cfi_private *cfi = map->fldrv_priv;
2724 unsigned long timeo;
2728 mutex_lock(&chip->mutex);
2729 ret = get_chip(map, chip, adr, FL_LOCKING);
2731 mutex_unlock(&chip->mutex);
2735 pr_debug("MTD %s(): XXLOCK 0x%08lx len %d\n", __func__, adr, len);
2737 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
2738 cfi->device_type, NULL);
2739 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
2740 cfi->device_type, NULL);
2741 /* PPB entry command */
2742 cfi_send_gen_cmd(0xC0, cfi->addr_unlock1, chip->start, map, cfi,
2743 cfi->device_type, NULL);
2745 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2746 chip->state = FL_LOCKING;
2747 map_write(map, CMD(0xA0), adr);
2748 map_write(map, CMD(0x00), adr);
2749 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2751 * Unlocking of one specific sector is not supported, so we
2752 * have to unlock all sectors of this device instead
2754 chip->state = FL_UNLOCKING;
2755 map_write(map, CMD(0x80), chip->start);
2756 map_write(map, CMD(0x30), chip->start);
2757 } else if (thunk == DO_XXLOCK_ONEBLOCK_GETLOCK) {
2758 chip->state = FL_JEDEC_QUERY;
2759 /* Return locked status: 0->locked, 1->unlocked */
2760 ret = !cfi_read_query(map, adr);
2765 * Wait for some time as unlocking of all sectors takes quite long
2767 timeo = jiffies + msecs_to_jiffies(2000); /* 2s max (un)locking */
2769 if (chip_ready(map, chip, adr, NULL))
2772 if (time_after(jiffies, timeo)) {
2773 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
2778 UDELAY(map, chip, adr, 1);
2781 /* Exit BC commands */
2782 map_write(map, CMD(0x90), chip->start);
2783 map_write(map, CMD(0x00), chip->start);
2785 chip->state = FL_READY;
2786 put_chip(map, chip, adr);
2787 mutex_unlock(&chip->mutex);
2792 static int __maybe_unused cfi_ppb_lock(struct mtd_info *mtd, loff_t ofs,
2795 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2796 DO_XXLOCK_ONEBLOCK_LOCK);
2799 static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
2802 struct mtd_erase_region_info *regions = mtd->eraseregions;
2803 struct map_info *map = mtd->priv;
2804 struct cfi_private *cfi = map->fldrv_priv;
2805 struct ppb_lock *sect;
2816 * PPB unlocking always unlocks all sectors of the flash chip.
2817 * We need to re-lock all previously locked sectors. So lets
2818 * first check the locking status of all sectors and save
2819 * it for future use.
2822 for (i = 0; i < mtd->numeraseregions; i++)
2823 max_sectors += regions[i].numblocks;
2825 sect = kcalloc(max_sectors, sizeof(struct ppb_lock), GFP_KERNEL);
2830 * This code to walk all sectors is a slightly modified version
2831 * of the cfi_varsize_frob() code.
2841 int size = regions[i].erasesize;
2844 * Only test sectors that shall not be unlocked. The other
2845 * sectors shall be unlocked, so lets keep their locking
2846 * status at "unlocked" (locked=0) for the final re-locking.
2848 if ((offset < ofs) || (offset >= (ofs + len))) {
2849 sect[sectors].chip = &cfi->chips[chipnum];
2850 sect[sectors].adr = adr;
2851 sect[sectors].locked = do_ppb_xxlock(
2852 map, &cfi->chips[chipnum], adr, 0,
2853 DO_XXLOCK_ONEBLOCK_GETLOCK);
2860 if (offset == regions[i].offset + size * regions[i].numblocks)
2863 if (adr >> cfi->chipshift) {
2864 if (offset >= (ofs + len))
2869 if (chipnum >= cfi->numchips)
2874 if (sectors >= max_sectors) {
2875 printk(KERN_ERR "Only %d sectors for PPB locking supported!\n",
2882 /* Now unlock the whole chip */
2883 ret = cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2884 DO_XXLOCK_ONEBLOCK_UNLOCK);
2891 * PPB unlocking always unlocks all sectors of the flash chip.
2892 * We need to re-lock all previously locked sectors.
2894 for (i = 0; i < sectors; i++) {
2896 do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
2897 DO_XXLOCK_ONEBLOCK_LOCK);
2904 static int __maybe_unused cfi_ppb_is_locked(struct mtd_info *mtd, loff_t ofs,
2907 return cfi_varsize_frob(mtd, do_ppb_xxlock, ofs, len,
2908 DO_XXLOCK_ONEBLOCK_GETLOCK) ? 1 : 0;
2911 static void cfi_amdstd_sync (struct mtd_info *mtd)
2913 struct map_info *map = mtd->priv;
2914 struct cfi_private *cfi = map->fldrv_priv;
2916 struct flchip *chip;
2918 DECLARE_WAITQUEUE(wait, current);
2920 for (i=0; !ret && i<cfi->numchips; i++) {
2921 chip = &cfi->chips[i];
2924 mutex_lock(&chip->mutex);
2926 switch(chip->state) {
2930 case FL_JEDEC_QUERY:
2931 chip->oldstate = chip->state;
2932 chip->state = FL_SYNCING;
2933 /* No need to wake_up() on this state change -
2934 * as the whole point is that nobody can do anything
2935 * with the chip now anyway.
2939 mutex_unlock(&chip->mutex);
2943 /* Not an idle state */
2944 set_current_state(TASK_UNINTERRUPTIBLE);
2945 add_wait_queue(&chip->wq, &wait);
2947 mutex_unlock(&chip->mutex);
2951 remove_wait_queue(&chip->wq, &wait);
2957 /* Unlock the chips again */
2959 for (i--; i >=0; i--) {
2960 chip = &cfi->chips[i];
2962 mutex_lock(&chip->mutex);
2964 if (chip->state == FL_SYNCING) {
2965 chip->state = chip->oldstate;
2968 mutex_unlock(&chip->mutex);
2973 static int cfi_amdstd_suspend(struct mtd_info *mtd)
2975 struct map_info *map = mtd->priv;
2976 struct cfi_private *cfi = map->fldrv_priv;
2978 struct flchip *chip;
2981 for (i=0; !ret && i<cfi->numchips; i++) {
2982 chip = &cfi->chips[i];
2984 mutex_lock(&chip->mutex);
2986 switch(chip->state) {
2990 case FL_JEDEC_QUERY:
2991 chip->oldstate = chip->state;
2992 chip->state = FL_PM_SUSPENDED;
2993 /* No need to wake_up() on this state change -
2994 * as the whole point is that nobody can do anything
2995 * with the chip now anyway.
2998 case FL_PM_SUSPENDED:
3005 mutex_unlock(&chip->mutex);
3008 /* Unlock the chips again */
3011 for (i--; i >=0; i--) {
3012 chip = &cfi->chips[i];
3014 mutex_lock(&chip->mutex);
3016 if (chip->state == FL_PM_SUSPENDED) {
3017 chip->state = chip->oldstate;
3020 mutex_unlock(&chip->mutex);
3028 static void cfi_amdstd_resume(struct mtd_info *mtd)
3030 struct map_info *map = mtd->priv;
3031 struct cfi_private *cfi = map->fldrv_priv;
3033 struct flchip *chip;
3035 for (i=0; i<cfi->numchips; i++) {
3037 chip = &cfi->chips[i];
3039 mutex_lock(&chip->mutex);
3041 if (chip->state == FL_PM_SUSPENDED) {
3042 chip->state = FL_READY;
3043 map_write(map, CMD(0xF0), chip->start);
3047 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
3049 mutex_unlock(&chip->mutex);
3055 * Ensure that the flash device is put back into read array mode before
3056 * unloading the driver or rebooting. On some systems, rebooting while
3057 * the flash is in query/program/erase mode will prevent the CPU from
3058 * fetching the bootloader code, requiring a hard reset or power cycle.
3060 static int cfi_amdstd_reset(struct mtd_info *mtd)
3062 struct map_info *map = mtd->priv;
3063 struct cfi_private *cfi = map->fldrv_priv;
3065 struct flchip *chip;
3067 for (i = 0; i < cfi->numchips; i++) {
3069 chip = &cfi->chips[i];
3071 mutex_lock(&chip->mutex);
3073 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
3075 map_write(map, CMD(0xF0), chip->start);
3076 chip->state = FL_SHUTDOWN;
3077 put_chip(map, chip, chip->start);
3080 mutex_unlock(&chip->mutex);
3087 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
3090 struct mtd_info *mtd;
3092 mtd = container_of(nb, struct mtd_info, reboot_notifier);
3093 cfi_amdstd_reset(mtd);
3098 static void cfi_amdstd_destroy(struct mtd_info *mtd)
3100 struct map_info *map = mtd->priv;
3101 struct cfi_private *cfi = map->fldrv_priv;
3103 cfi_amdstd_reset(mtd);
3104 unregister_reboot_notifier(&mtd->reboot_notifier);
3105 kfree(cfi->cmdset_priv);
3108 kfree(mtd->eraseregions);
3111 MODULE_LICENSE("GPL");
3112 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
3113 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
3114 MODULE_ALIAS("cfi_cmdset_0006");
3115 MODULE_ALIAS("cfi_cmdset_0701");