2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
8 * 10/10/2000 Nicolas Pitre <nico@fluxnic.net>
9 * - completely revamped method functions so they are aware and
10 * independent of the flash geometry (buswidth, interleave, etc.)
11 * - scalability vs code size is completely set at compile-time
12 * (see include/linux/mtd/cfi.h for selection)
13 * - optimized write buffer method
14 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15 * - reworked lock/unlock/erase support for var size flash
16 * 21/03/2007 Rodolfo Giometti <giometti@linux.it>
17 * - auto unlock sectors on resume for auto locking flash on power up
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
25 #include <asm/byteorder.h>
27 #include <linux/errno.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/reboot.h>
32 #include <linux/bitmap.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/cfi.h>
38 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41 // debugging, turns off buffer write mode if set to 1
42 #define FORCE_WORD_WRITE 0
45 #define I82802AB 0x00ad
46 #define I82802AC 0x00ac
47 #define PF38F4476 0x881c
48 #define M28F00AP30 0x8963
49 /* STMicroelectronics chips */
50 #define M50LPW080 0x002F
51 #define M50FLW080A 0x0080
52 #define M50FLW080B 0x0081
54 #define AT49BV640D 0x02de
55 #define AT49BV640DT 0x02db
57 #define LH28F640BFHE_PTTL90 0x00b0
58 #define LH28F640BFHE_PBTL90 0x00b1
59 #define LH28F640BFHE_PTTL70A 0x00b2
60 #define LH28F640BFHE_PBTL70A 0x00b3
62 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
64 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
65 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
66 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
67 static void cfi_intelext_sync (struct mtd_info *);
68 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
69 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
70 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
73 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
74 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
75 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
76 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
77 static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
78 size_t *, struct otp_info *);
79 static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
80 size_t *, struct otp_info *);
82 static int cfi_intelext_suspend (struct mtd_info *);
83 static void cfi_intelext_resume (struct mtd_info *);
84 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
86 static void cfi_intelext_destroy(struct mtd_info *);
88 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
90 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
91 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
93 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
94 size_t *retlen, void **virt, resource_size_t *phys);
95 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
97 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
98 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
99 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
100 #include "fwh_lock.h"
105 * *********** SETUP AND PROBE BITS ***********
108 static struct mtd_chip_driver cfi_intelext_chipdrv = {
109 .probe = NULL, /* Not usable directly */
110 .destroy = cfi_intelext_destroy,
111 .name = "cfi_cmdset_0001",
112 .module = THIS_MODULE
115 /* #define DEBUG_LOCK_BITS */
116 /* #define DEBUG_CFI_FEATURES */
118 #ifdef DEBUG_CFI_FEATURES
119 static void cfi_tell_features(struct cfi_pri_intelext *extp)
122 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
123 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
124 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
125 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
126 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
127 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
128 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
129 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
130 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
131 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
132 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
133 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
134 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
135 for (i=11; i<32; i++) {
136 if (extp->FeatureSupport & (1<<i))
137 printk(" - Unknown Bit %X: supported\n", i);
140 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
141 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
142 for (i=1; i<8; i++) {
143 if (extp->SuspendCmdSupport & (1<<i))
144 printk(" - Unknown Bit %X: supported\n", i);
147 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
148 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
149 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
150 for (i=2; i<3; i++) {
151 if (extp->BlkStatusRegMask & (1<<i))
152 printk(" - Unknown Bit %X Active: yes\n",i);
154 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
155 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
156 for (i=6; i<16; i++) {
157 if (extp->BlkStatusRegMask & (1<<i))
158 printk(" - Unknown Bit %X Active: yes\n",i);
161 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
162 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
163 if (extp->VppOptimal)
164 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
165 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
169 /* Atmel chips don't use the same PRI format as Intel chips */
170 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
172 struct map_info *map = mtd->priv;
173 struct cfi_private *cfi = map->fldrv_priv;
174 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
175 struct cfi_pri_atmel atmel_pri;
176 uint32_t features = 0;
178 /* Reverse byteswapping */
179 extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
180 extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
181 extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
183 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
184 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
186 printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
188 if (atmel_pri.Features & 0x01) /* chip erase supported */
190 if (atmel_pri.Features & 0x02) /* erase suspend supported */
192 if (atmel_pri.Features & 0x04) /* program suspend supported */
194 if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
196 if (atmel_pri.Features & 0x20) /* page mode read supported */
198 if (atmel_pri.Features & 0x40) /* queued erase supported */
200 if (atmel_pri.Features & 0x80) /* Protection bits supported */
203 extp->FeatureSupport = features;
205 /* burst write mode not supported */
206 cfi->cfiq->BufWriteTimeoutTyp = 0;
207 cfi->cfiq->BufWriteTimeoutMax = 0;
210 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
212 struct map_info *map = mtd->priv;
213 struct cfi_private *cfi = map->fldrv_priv;
214 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
216 cfip->FeatureSupport |= (1 << 5);
217 mtd->flags |= MTD_POWERUP_LOCK;
220 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
221 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
222 static void fixup_intel_strataflash(struct mtd_info *mtd)
224 struct map_info *map = mtd->priv;
225 struct cfi_private *cfi = map->fldrv_priv;
226 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
228 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
229 "erase on write disabled.\n");
230 extp->SuspendCmdSupport &= ~1;
234 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
235 static void fixup_no_write_suspend(struct mtd_info *mtd)
237 struct map_info *map = mtd->priv;
238 struct cfi_private *cfi = map->fldrv_priv;
239 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
241 if (cfip && (cfip->FeatureSupport&4)) {
242 cfip->FeatureSupport &= ~4;
243 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
248 static void fixup_st_m28w320ct(struct mtd_info *mtd)
250 struct map_info *map = mtd->priv;
251 struct cfi_private *cfi = map->fldrv_priv;
253 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
254 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
257 static void fixup_st_m28w320cb(struct mtd_info *mtd)
259 struct map_info *map = mtd->priv;
260 struct cfi_private *cfi = map->fldrv_priv;
262 /* Note this is done after the region info is endian swapped */
263 cfi->cfiq->EraseRegionInfo[1] =
264 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
267 static int is_LH28F640BF(struct cfi_private *cfi)
269 /* Sharp LH28F640BF Family */
270 if (cfi->mfr == CFI_MFR_SHARP && (
271 cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
272 cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
277 static void fixup_LH28F640BF(struct mtd_info *mtd)
279 struct map_info *map = mtd->priv;
280 struct cfi_private *cfi = map->fldrv_priv;
281 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
283 /* Reset the Partition Configuration Register on LH28F640BF
284 * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
285 if (is_LH28F640BF(cfi)) {
286 printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
287 map_write(map, CMD(0x60), 0);
288 map_write(map, CMD(0x04), 0);
290 /* We have set one single partition thus
291 * Simultaneous Operations are not allowed */
292 printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
293 extp->FeatureSupport &= ~512;
297 static void fixup_use_point(struct mtd_info *mtd)
299 struct map_info *map = mtd->priv;
300 if (!mtd->_point && map_is_linear(map)) {
301 mtd->_point = cfi_intelext_point;
302 mtd->_unpoint = cfi_intelext_unpoint;
306 static void fixup_use_write_buffers(struct mtd_info *mtd)
308 struct map_info *map = mtd->priv;
309 struct cfi_private *cfi = map->fldrv_priv;
310 if (cfi->cfiq->BufWriteTimeoutTyp) {
311 printk(KERN_INFO "Using buffer write method\n" );
312 mtd->_write = cfi_intelext_write_buffers;
313 mtd->_writev = cfi_intelext_writev;
318 * Some chips power-up with all sectors locked by default.
320 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
322 struct map_info *map = mtd->priv;
323 struct cfi_private *cfi = map->fldrv_priv;
324 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
326 if (cfip->FeatureSupport&32) {
327 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
328 mtd->flags |= MTD_POWERUP_LOCK;
332 static struct cfi_fixup cfi_fixup_table[] = {
333 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
334 { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
335 { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
336 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
337 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
339 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
340 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
342 #if !FORCE_WORD_WRITE
343 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
345 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
346 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
347 { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
348 { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
349 { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
353 static struct cfi_fixup jedec_fixup_table[] = {
354 { CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock },
355 { CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock },
356 { CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock },
357 { CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock },
358 { CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock },
361 static struct cfi_fixup fixup_table[] = {
362 /* The CFI vendor ids and the JEDEC vendor IDs appear
363 * to be common. It is like the devices id's are as
364 * well. This table is to pick all cases where
365 * we know that is the case.
367 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
371 static void cfi_fixup_major_minor(struct cfi_private *cfi,
372 struct cfi_pri_intelext *extp)
374 if (cfi->mfr == CFI_MFR_INTEL &&
375 cfi->id == PF38F4476 && extp->MinorVersion == '3')
376 extp->MinorVersion = '1';
379 static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
382 * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
383 * Erase Supend for their small Erase Blocks(0x8000)
385 if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
390 static inline struct cfi_pri_intelext *
391 read_pri_intelext(struct map_info *map, __u16 adr)
393 struct cfi_private *cfi = map->fldrv_priv;
394 struct cfi_pri_intelext *extp;
395 unsigned int extra_size = 0;
396 unsigned int extp_size = sizeof(*extp);
399 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
403 cfi_fixup_major_minor(cfi, extp);
405 if (extp->MajorVersion != '1' ||
406 (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
407 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
408 "version %c.%c.\n", extp->MajorVersion,
414 /* Do some byteswapping if necessary */
415 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
416 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
417 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
419 if (extp->MinorVersion >= '0') {
422 /* Protection Register info */
423 if (extp->NumProtectionFields) {
424 struct cfi_intelext_otpinfo *otp =
425 (struct cfi_intelext_otpinfo *)&extp->extra[0];
427 extra_size += (extp->NumProtectionFields - 1) *
428 sizeof(struct cfi_intelext_otpinfo);
430 if (extp_size >= sizeof(*extp) + extra_size) {
433 /* Do some byteswapping if necessary */
434 for (i = 0; i < extp->NumProtectionFields - 1; i++) {
435 otp->ProtRegAddr = le32_to_cpu(otp->ProtRegAddr);
436 otp->FactGroups = le16_to_cpu(otp->FactGroups);
437 otp->UserGroups = le16_to_cpu(otp->UserGroups);
444 if (extp->MinorVersion >= '1') {
445 /* Burst Read info */
447 if (extp_size < sizeof(*extp) + extra_size)
449 extra_size += extp->extra[extra_size - 1];
452 if (extp->MinorVersion >= '3') {
455 /* Number of hardware-partitions */
457 if (extp_size < sizeof(*extp) + extra_size)
459 nb_parts = extp->extra[extra_size - 1];
461 /* skip the sizeof(partregion) field in CFI 1.4 */
462 if (extp->MinorVersion >= '4')
465 for (i = 0; i < nb_parts; i++) {
466 struct cfi_intelext_regioninfo *rinfo;
467 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
468 extra_size += sizeof(*rinfo);
469 if (extp_size < sizeof(*extp) + extra_size)
471 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
472 extra_size += (rinfo->NumBlockTypes - 1)
473 * sizeof(struct cfi_intelext_blockinfo);
476 if (extp->MinorVersion >= '4')
477 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
479 if (extp_size < sizeof(*extp) + extra_size) {
481 extp_size = sizeof(*extp) + extra_size;
483 if (extp_size > 4096) {
485 "%s: cfi_pri_intelext is too fat\n",
496 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
498 struct cfi_private *cfi = map->fldrv_priv;
499 struct mtd_info *mtd;
502 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
506 mtd->type = MTD_NORFLASH;
508 /* Fill in the default mtd operations */
509 mtd->_erase = cfi_intelext_erase_varsize;
510 mtd->_read = cfi_intelext_read;
511 mtd->_write = cfi_intelext_write_words;
512 mtd->_sync = cfi_intelext_sync;
513 mtd->_lock = cfi_intelext_lock;
514 mtd->_unlock = cfi_intelext_unlock;
515 mtd->_is_locked = cfi_intelext_is_locked;
516 mtd->_suspend = cfi_intelext_suspend;
517 mtd->_resume = cfi_intelext_resume;
518 mtd->flags = MTD_CAP_NORFLASH;
519 mtd->name = map->name;
521 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
523 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
525 if (cfi->cfi_mode == CFI_MODE_CFI) {
527 * It's a real CFI chip, not one for which the probe
528 * routine faked a CFI structure. So we read the feature
531 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
532 struct cfi_pri_intelext *extp;
534 extp = read_pri_intelext(map, adr);
540 /* Install our own private info structure */
541 cfi->cmdset_priv = extp;
543 cfi_fixup(mtd, cfi_fixup_table);
545 #ifdef DEBUG_CFI_FEATURES
546 /* Tell the user about it in lots of lovely detail */
547 cfi_tell_features(extp);
550 if(extp->SuspendCmdSupport & 1) {
551 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
554 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
555 /* Apply jedec specific fixups */
556 cfi_fixup(mtd, jedec_fixup_table);
558 /* Apply generic fixups */
559 cfi_fixup(mtd, fixup_table);
561 for (i=0; i< cfi->numchips; i++) {
562 if (cfi->cfiq->WordWriteTimeoutTyp)
563 cfi->chips[i].word_write_time =
564 1<<cfi->cfiq->WordWriteTimeoutTyp;
566 cfi->chips[i].word_write_time = 50000;
568 if (cfi->cfiq->BufWriteTimeoutTyp)
569 cfi->chips[i].buffer_write_time =
570 1<<cfi->cfiq->BufWriteTimeoutTyp;
571 /* No default; if it isn't specified, we won't use it */
573 if (cfi->cfiq->BlockEraseTimeoutTyp)
574 cfi->chips[i].erase_time =
575 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
577 cfi->chips[i].erase_time = 2000000;
579 if (cfi->cfiq->WordWriteTimeoutTyp &&
580 cfi->cfiq->WordWriteTimeoutMax)
581 cfi->chips[i].word_write_time_max =
582 1<<(cfi->cfiq->WordWriteTimeoutTyp +
583 cfi->cfiq->WordWriteTimeoutMax);
585 cfi->chips[i].word_write_time_max = 50000 * 8;
587 if (cfi->cfiq->BufWriteTimeoutTyp &&
588 cfi->cfiq->BufWriteTimeoutMax)
589 cfi->chips[i].buffer_write_time_max =
590 1<<(cfi->cfiq->BufWriteTimeoutTyp +
591 cfi->cfiq->BufWriteTimeoutMax);
593 if (cfi->cfiq->BlockEraseTimeoutTyp &&
594 cfi->cfiq->BlockEraseTimeoutMax)
595 cfi->chips[i].erase_time_max =
596 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
597 cfi->cfiq->BlockEraseTimeoutMax);
599 cfi->chips[i].erase_time_max = 2000000 * 8;
601 cfi->chips[i].ref_point_counter = 0;
602 init_waitqueue_head(&(cfi->chips[i].wq));
605 map->fldrv = &cfi_intelext_chipdrv;
607 return cfi_intelext_setup(mtd);
609 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
610 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
611 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
612 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
613 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
615 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
617 struct map_info *map = mtd->priv;
618 struct cfi_private *cfi = map->fldrv_priv;
619 unsigned long offset = 0;
621 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
623 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
625 mtd->size = devsize * cfi->numchips;
627 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
628 mtd->eraseregions = kcalloc(mtd->numeraseregions,
629 sizeof(struct mtd_erase_region_info),
631 if (!mtd->eraseregions)
634 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
635 unsigned long ernum, ersize;
636 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
637 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
639 if (mtd->erasesize < ersize) {
640 mtd->erasesize = ersize;
642 for (j=0; j<cfi->numchips; j++) {
643 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
644 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
645 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
646 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
647 if (!mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap)
650 offset += (ersize * ernum);
653 if (offset != devsize) {
655 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
659 for (i=0; i<mtd->numeraseregions;i++){
660 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
661 i,(unsigned long long)mtd->eraseregions[i].offset,
662 mtd->eraseregions[i].erasesize,
663 mtd->eraseregions[i].numblocks);
666 #ifdef CONFIG_MTD_OTP
667 mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
668 mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
669 mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
670 mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
671 mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
672 mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
675 /* This function has the potential to distort the reality
676 a bit and therefore should be called last. */
677 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
680 __module_get(THIS_MODULE);
681 register_reboot_notifier(&mtd->reboot_notifier);
685 if (mtd->eraseregions)
686 for (i=0; i<cfi->cfiq->NumEraseRegions; i++)
687 for (j=0; j<cfi->numchips; j++)
688 kfree(mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap);
689 kfree(mtd->eraseregions);
691 kfree(cfi->cmdset_priv);
695 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
696 struct cfi_private **pcfi)
698 struct map_info *map = mtd->priv;
699 struct cfi_private *cfi = *pcfi;
700 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
703 * Probing of multi-partition flash chips.
705 * To support multiple partitions when available, we simply arrange
706 * for each of them to have their own flchip structure even if they
707 * are on the same physical chip. This means completely recreating
708 * a new cfi_private structure right here which is a blatent code
709 * layering violation, but this is still the least intrusive
710 * arrangement at this point. This can be rearranged in the future
711 * if someone feels motivated enough. --nico
713 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
714 && extp->FeatureSupport & (1 << 9)) {
716 struct cfi_private *newcfi;
718 struct flchip_shared *shared;
719 int numregions, numparts, partshift, numvirtchips, i, j;
721 /* Protection Register info */
722 if (extp->NumProtectionFields)
723 offs = (extp->NumProtectionFields - 1) *
724 sizeof(struct cfi_intelext_otpinfo);
726 /* Burst Read info */
727 offs += extp->extra[offs+1]+2;
729 /* Number of partition regions */
730 numregions = extp->extra[offs];
733 /* skip the sizeof(partregion) field in CFI 1.4 */
734 if (extp->MinorVersion >= '4')
737 /* Number of hardware partitions */
739 for (i = 0; i < numregions; i++) {
740 struct cfi_intelext_regioninfo *rinfo;
741 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
742 numparts += rinfo->NumIdentPartitions;
743 offs += sizeof(*rinfo)
744 + (rinfo->NumBlockTypes - 1) *
745 sizeof(struct cfi_intelext_blockinfo);
751 /* Programming Region info */
752 if (extp->MinorVersion >= '4') {
753 struct cfi_intelext_programming_regioninfo *prinfo;
754 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
755 mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
756 mtd->flags &= ~MTD_BIT_WRITEABLE;
757 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
758 map->name, mtd->writesize,
759 cfi->interleave * prinfo->ControlValid,
760 cfi->interleave * prinfo->ControlInvalid);
764 * All functions below currently rely on all chips having
765 * the same geometry so we'll just assume that all hardware
766 * partitions are of the same size too.
768 partshift = cfi->chipshift - __ffs(numparts);
770 if ((1 << partshift) < mtd->erasesize) {
772 "%s: bad number of hw partitions (%d)\n",
777 numvirtchips = cfi->numchips * numparts;
778 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
781 shared = kmalloc_array(cfi->numchips,
782 sizeof(struct flchip_shared),
788 memcpy(newcfi, cfi, sizeof(struct cfi_private));
789 newcfi->numchips = numvirtchips;
790 newcfi->chipshift = partshift;
792 chip = &newcfi->chips[0];
793 for (i = 0; i < cfi->numchips; i++) {
794 shared[i].writing = shared[i].erasing = NULL;
795 mutex_init(&shared[i].lock);
796 for (j = 0; j < numparts; j++) {
797 *chip = cfi->chips[i];
798 chip->start += j << partshift;
799 chip->priv = &shared[i];
800 /* those should be reset too since
801 they create memory references. */
802 init_waitqueue_head(&chip->wq);
803 mutex_init(&chip->mutex);
808 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
809 "--> %d partitions of %d KiB\n",
810 map->name, cfi->numchips, cfi->interleave,
811 newcfi->numchips, 1<<(newcfi->chipshift-10));
813 map->fldrv_priv = newcfi;
822 * *********** CHIP ACCESS FUNCTIONS ***********
824 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
826 DECLARE_WAITQUEUE(wait, current);
827 struct cfi_private *cfi = map->fldrv_priv;
828 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
829 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
830 unsigned long timeo = jiffies + HZ;
832 /* Prevent setting state FL_SYNCING for chip in suspended state. */
833 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
836 switch (chip->state) {
840 status = map_read(map, adr);
841 if (map_word_andequal(map, status, status_OK, status_OK))
844 /* At this point we're fine with write operations
845 in other partitions as they don't conflict. */
846 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
849 mutex_unlock(&chip->mutex);
851 mutex_lock(&chip->mutex);
852 /* Someone else might have been playing with it. */
863 !(cfip->FeatureSupport & 2) ||
864 !(mode == FL_READY || mode == FL_POINT ||
865 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
868 /* Do not allow suspend iff read/write to EB address */
869 if ((adr & chip->in_progress_block_mask) ==
870 chip->in_progress_block_addr)
873 /* do not suspend small EBs, buggy Micron Chips */
874 if (cfi_is_micron_28F00AP30(cfi, chip) &&
875 (chip->in_progress_block_mask == ~(0x8000-1)))
879 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
881 /* If the flash has finished erasing, then 'erase suspend'
882 * appears to make some (28F320) flash devices switch to
883 * 'read' mode. Make sure that we switch to 'read status'
884 * mode so we get the right data. --rmk
886 map_write(map, CMD(0x70), chip->in_progress_block_addr);
887 chip->oldstate = FL_ERASING;
888 chip->state = FL_ERASE_SUSPENDING;
889 chip->erase_suspended = 1;
891 status = map_read(map, chip->in_progress_block_addr);
892 if (map_word_andequal(map, status, status_OK, status_OK))
895 if (time_after(jiffies, timeo)) {
896 /* Urgh. Resume and pretend we weren't here.
897 * Make sure we're in 'read status' mode if it had finished */
898 put_chip(map, chip, adr);
899 printk(KERN_ERR "%s: Chip not ready after erase "
900 "suspended: status = 0x%lx\n", map->name, status.x[0]);
904 mutex_unlock(&chip->mutex);
906 mutex_lock(&chip->mutex);
907 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
908 So we can just loop here. */
910 chip->state = FL_STATUS;
913 case FL_XIP_WHILE_ERASING:
914 if (mode != FL_READY && mode != FL_POINT &&
915 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
917 chip->oldstate = chip->state;
918 chip->state = FL_READY;
922 /* The machine is rebooting now,so no one can get chip anymore */
925 /* Only if there's no operation suspended... */
926 if (mode == FL_READY && chip->oldstate == FL_READY)
931 set_current_state(TASK_UNINTERRUPTIBLE);
932 add_wait_queue(&chip->wq, &wait);
933 mutex_unlock(&chip->mutex);
935 remove_wait_queue(&chip->wq, &wait);
936 mutex_lock(&chip->mutex);
941 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
944 DECLARE_WAITQUEUE(wait, current);
948 (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
949 || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
951 * OK. We have possibility for contention on the write/erase
952 * operations which are global to the real chip and not per
953 * partition. So let's fight it over in the partition which
954 * currently has authority on the operation.
956 * The rules are as follows:
958 * - any write operation must own shared->writing.
960 * - any erase operation must own _both_ shared->writing and
963 * - contention arbitration is handled in the owner's context.
965 * The 'shared' struct can be read and/or written only when
968 struct flchip_shared *shared = chip->priv;
969 struct flchip *contender;
970 mutex_lock(&shared->lock);
971 contender = shared->writing;
972 if (contender && contender != chip) {
974 * The engine to perform desired operation on this
975 * partition is already in use by someone else.
976 * Let's fight over it in the context of the chip
977 * currently using it. If it is possible to suspend,
978 * that other partition will do just that, otherwise
979 * it'll happily send us to sleep. In any case, when
980 * get_chip returns success we're clear to go ahead.
982 ret = mutex_trylock(&contender->mutex);
983 mutex_unlock(&shared->lock);
986 mutex_unlock(&chip->mutex);
987 ret = chip_ready(map, contender, contender->start, mode);
988 mutex_lock(&chip->mutex);
990 if (ret == -EAGAIN) {
991 mutex_unlock(&contender->mutex);
995 mutex_unlock(&contender->mutex);
998 mutex_lock(&shared->lock);
1000 /* We should not own chip if it is already
1001 * in FL_SYNCING state. Put contender and retry. */
1002 if (chip->state == FL_SYNCING) {
1003 put_chip(map, contender, contender->start);
1004 mutex_unlock(&contender->mutex);
1007 mutex_unlock(&contender->mutex);
1010 /* Check if we already have suspended erase
1011 * on this chip. Sleep. */
1012 if (mode == FL_ERASING && shared->erasing
1013 && shared->erasing->oldstate == FL_ERASING) {
1014 mutex_unlock(&shared->lock);
1015 set_current_state(TASK_UNINTERRUPTIBLE);
1016 add_wait_queue(&chip->wq, &wait);
1017 mutex_unlock(&chip->mutex);
1019 remove_wait_queue(&chip->wq, &wait);
1020 mutex_lock(&chip->mutex);
1025 shared->writing = chip;
1026 if (mode == FL_ERASING)
1027 shared->erasing = chip;
1028 mutex_unlock(&shared->lock);
1030 ret = chip_ready(map, chip, adr, mode);
1037 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
1039 struct cfi_private *cfi = map->fldrv_priv;
1042 struct flchip_shared *shared = chip->priv;
1043 mutex_lock(&shared->lock);
1044 if (shared->writing == chip && chip->oldstate == FL_READY) {
1045 /* We own the ability to write, but we're done */
1046 shared->writing = shared->erasing;
1047 if (shared->writing && shared->writing != chip) {
1048 /* give back ownership to who we loaned it from */
1049 struct flchip *loaner = shared->writing;
1050 mutex_lock(&loaner->mutex);
1051 mutex_unlock(&shared->lock);
1052 mutex_unlock(&chip->mutex);
1053 put_chip(map, loaner, loaner->start);
1054 mutex_lock(&chip->mutex);
1055 mutex_unlock(&loaner->mutex);
1059 shared->erasing = NULL;
1060 shared->writing = NULL;
1061 } else if (shared->erasing == chip && shared->writing != chip) {
1063 * We own the ability to erase without the ability
1064 * to write, which means the erase was suspended
1065 * and some other partition is currently writing.
1066 * Don't let the switch below mess things up since
1067 * we don't have ownership to resume anything.
1069 mutex_unlock(&shared->lock);
1073 mutex_unlock(&shared->lock);
1076 switch(chip->oldstate) {
1078 /* What if one interleaved chip has finished and the
1079 other hasn't? The old code would leave the finished
1080 one in READY mode. That's bad, and caused -EROFS
1081 errors to be returned from do_erase_oneblock because
1082 that's the only bit it checked for at the time.
1083 As the state machine appears to explicitly allow
1084 sending the 0x70 (Read Status) command to an erasing
1085 chip and expecting it to be ignored, that's what we
1087 map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1088 map_write(map, CMD(0x70), chip->in_progress_block_addr);
1089 chip->oldstate = FL_READY;
1090 chip->state = FL_ERASING;
1093 case FL_XIP_WHILE_ERASING:
1094 chip->state = chip->oldstate;
1095 chip->oldstate = FL_READY;
1100 case FL_JEDEC_QUERY:
1103 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1108 #ifdef CONFIG_MTD_XIP
1111 * No interrupt what so ever can be serviced while the flash isn't in array
1112 * mode. This is ensured by the xip_disable() and xip_enable() functions
1113 * enclosing any code path where the flash is known not to be in array mode.
1114 * And within a XIP disabled code path, only functions marked with __xipram
1115 * may be called and nothing else (it's a good thing to inspect generated
1116 * assembly to make sure inline functions were actually inlined and that gcc
1117 * didn't emit calls to its own support functions). Also configuring MTD CFI
1118 * support to a single buswidth and a single interleave is also recommended.
1121 static void xip_disable(struct map_info *map, struct flchip *chip,
1124 /* TODO: chips with no XIP use should ignore and return */
1125 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1126 local_irq_disable();
1129 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1132 struct cfi_private *cfi = map->fldrv_priv;
1133 if (chip->state != FL_POINT && chip->state != FL_READY) {
1134 map_write(map, CMD(0xff), adr);
1135 chip->state = FL_READY;
1137 (void) map_read(map, adr);
1143 * When a delay is required for the flash operation to complete, the
1144 * xip_wait_for_operation() function is polling for both the given timeout
1145 * and pending (but still masked) hardware interrupts. Whenever there is an
1146 * interrupt pending then the flash erase or write operation is suspended,
1147 * array mode restored and interrupts unmasked. Task scheduling might also
1148 * happen at that point. The CPU eventually returns from the interrupt or
1149 * the call to schedule() and the suspended flash operation is resumed for
1150 * the remaining of the delay period.
1152 * Warning: this function _will_ fool interrupt latency tracing tools.
1155 static int __xipram xip_wait_for_operation(
1156 struct map_info *map, struct flchip *chip,
1157 unsigned long adr, unsigned int chip_op_time_max)
1159 struct cfi_private *cfi = map->fldrv_priv;
1160 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1161 map_word status, OK = CMD(0x80);
1162 unsigned long usec, suspended, start, done;
1163 flstate_t oldstate, newstate;
1165 start = xip_currtime();
1166 usec = chip_op_time_max;
1173 if (xip_irqpending() && cfip &&
1174 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1175 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1176 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1178 * Let's suspend the erase or write operation when
1179 * supported. Note that we currently don't try to
1180 * suspend interleaved chips if there is already
1181 * another operation suspended (imagine what happens
1182 * when one chip was already done with the current
1183 * operation while another chip suspended it, then
1184 * we resume the whole thing at once). Yes, it
1188 map_write(map, CMD(0xb0), adr);
1189 map_write(map, CMD(0x70), adr);
1190 suspended = xip_currtime();
1192 if (xip_elapsed_since(suspended) > 100000) {
1194 * The chip doesn't want to suspend
1195 * after waiting for 100 msecs.
1196 * This is a critical error but there
1197 * is not much we can do here.
1201 status = map_read(map, adr);
1202 } while (!map_word_andequal(map, status, OK, OK));
1204 /* Suspend succeeded */
1205 oldstate = chip->state;
1206 if (oldstate == FL_ERASING) {
1207 if (!map_word_bitsset(map, status, CMD(0x40)))
1209 newstate = FL_XIP_WHILE_ERASING;
1210 chip->erase_suspended = 1;
1212 if (!map_word_bitsset(map, status, CMD(0x04)))
1214 newstate = FL_XIP_WHILE_WRITING;
1215 chip->write_suspended = 1;
1217 chip->state = newstate;
1218 map_write(map, CMD(0xff), adr);
1219 (void) map_read(map, adr);
1222 mutex_unlock(&chip->mutex);
1227 * We're back. However someone else might have
1228 * decided to go write to the chip if we are in
1229 * a suspended erase state. If so let's wait
1232 mutex_lock(&chip->mutex);
1233 while (chip->state != newstate) {
1234 DECLARE_WAITQUEUE(wait, current);
1235 set_current_state(TASK_UNINTERRUPTIBLE);
1236 add_wait_queue(&chip->wq, &wait);
1237 mutex_unlock(&chip->mutex);
1239 remove_wait_queue(&chip->wq, &wait);
1240 mutex_lock(&chip->mutex);
1242 /* Disallow XIP again */
1243 local_irq_disable();
1245 /* Resume the write or erase operation */
1246 map_write(map, CMD(0xd0), adr);
1247 map_write(map, CMD(0x70), adr);
1248 chip->state = oldstate;
1249 start = xip_currtime();
1250 } else if (usec >= 1000000/HZ) {
1252 * Try to save on CPU power when waiting delay
1253 * is at least a system timer tick period.
1254 * No need to be extremely accurate here.
1258 status = map_read(map, adr);
1259 done = xip_elapsed_since(start);
1260 } while (!map_word_andequal(map, status, OK, OK)
1263 return (done >= usec) ? -ETIME : 0;
1267 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1268 * the flash is actively programming or erasing since we have to poll for
1269 * the operation to complete anyway. We can't do that in a generic way with
1270 * a XIP setup so do it before the actual flash operation in this case
1271 * and stub it out from INVAL_CACHE_AND_WAIT.
1273 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1274 INVALIDATE_CACHED_RANGE(map, from, size)
1276 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1277 xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1281 #define xip_disable(map, chip, adr)
1282 #define xip_enable(map, chip, adr)
1283 #define XIP_INVAL_CACHED_RANGE(x...)
1284 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1286 static int inval_cache_and_wait_for_operation(
1287 struct map_info *map, struct flchip *chip,
1288 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1289 unsigned int chip_op_time, unsigned int chip_op_time_max)
1291 struct cfi_private *cfi = map->fldrv_priv;
1292 map_word status, status_OK = CMD(0x80);
1293 int chip_state = chip->state;
1294 unsigned int timeo, sleep_time, reset_timeo;
1296 mutex_unlock(&chip->mutex);
1298 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1299 mutex_lock(&chip->mutex);
1301 timeo = chip_op_time_max;
1304 reset_timeo = timeo;
1305 sleep_time = chip_op_time / 2;
1308 if (chip->state != chip_state) {
1309 /* Someone's suspended the operation: sleep */
1310 DECLARE_WAITQUEUE(wait, current);
1311 set_current_state(TASK_UNINTERRUPTIBLE);
1312 add_wait_queue(&chip->wq, &wait);
1313 mutex_unlock(&chip->mutex);
1315 remove_wait_queue(&chip->wq, &wait);
1316 mutex_lock(&chip->mutex);
1320 status = map_read(map, cmd_adr);
1321 if (map_word_andequal(map, status, status_OK, status_OK))
1324 if (chip->erase_suspended && chip_state == FL_ERASING) {
1325 /* Erase suspend occurred while sleep: reset timeout */
1326 timeo = reset_timeo;
1327 chip->erase_suspended = 0;
1329 if (chip->write_suspended && chip_state == FL_WRITING) {
1330 /* Write suspend occurred while sleep: reset timeout */
1331 timeo = reset_timeo;
1332 chip->write_suspended = 0;
1335 map_write(map, CMD(0x70), cmd_adr);
1336 chip->state = FL_STATUS;
1340 /* OK Still waiting. Drop the lock, wait a while and retry. */
1341 mutex_unlock(&chip->mutex);
1342 if (sleep_time >= 1000000/HZ) {
1344 * Half of the normal delay still remaining
1345 * can be performed with a sleeping delay instead
1348 msleep(sleep_time/1000);
1349 timeo -= sleep_time;
1350 sleep_time = 1000000/HZ;
1356 mutex_lock(&chip->mutex);
1359 /* Done and happy. */
1360 chip->state = FL_STATUS;
1366 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1367 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1370 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1372 unsigned long cmd_addr;
1373 struct cfi_private *cfi = map->fldrv_priv;
1378 /* Ensure cmd read/writes are aligned. */
1379 cmd_addr = adr & ~(map_bankwidth(map)-1);
1381 mutex_lock(&chip->mutex);
1383 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1386 if (chip->state != FL_POINT && chip->state != FL_READY)
1387 map_write(map, CMD(0xff), cmd_addr);
1389 chip->state = FL_POINT;
1390 chip->ref_point_counter++;
1392 mutex_unlock(&chip->mutex);
1397 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1398 size_t *retlen, void **virt, resource_size_t *phys)
1400 struct map_info *map = mtd->priv;
1401 struct cfi_private *cfi = map->fldrv_priv;
1402 unsigned long ofs, last_end = 0;
1409 /* Now lock the chip(s) to POINT state */
1411 /* ofs: offset within the first chip that the first read should start */
1412 chipnum = (from >> cfi->chipshift);
1413 ofs = from - (chipnum << cfi->chipshift);
1415 *virt = map->virt + cfi->chips[chipnum].start + ofs;
1417 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1420 unsigned long thislen;
1422 if (chipnum >= cfi->numchips)
1425 /* We cannot point across chips that are virtually disjoint */
1427 last_end = cfi->chips[chipnum].start;
1428 else if (cfi->chips[chipnum].start != last_end)
1431 if ((len + ofs -1) >> cfi->chipshift)
1432 thislen = (1<<cfi->chipshift) - ofs;
1436 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1444 last_end += 1 << cfi->chipshift;
1450 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1452 struct map_info *map = mtd->priv;
1453 struct cfi_private *cfi = map->fldrv_priv;
1455 int chipnum, err = 0;
1457 /* Now unlock the chip(s) POINT state */
1459 /* ofs: offset within the first chip that the first read should start */
1460 chipnum = (from >> cfi->chipshift);
1461 ofs = from - (chipnum << cfi->chipshift);
1463 while (len && !err) {
1464 unsigned long thislen;
1465 struct flchip *chip;
1467 chip = &cfi->chips[chipnum];
1468 if (chipnum >= cfi->numchips)
1471 if ((len + ofs -1) >> cfi->chipshift)
1472 thislen = (1<<cfi->chipshift) - ofs;
1476 mutex_lock(&chip->mutex);
1477 if (chip->state == FL_POINT) {
1478 chip->ref_point_counter--;
1479 if(chip->ref_point_counter == 0)
1480 chip->state = FL_READY;
1482 printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1486 put_chip(map, chip, chip->start);
1487 mutex_unlock(&chip->mutex);
1497 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1499 unsigned long cmd_addr;
1500 struct cfi_private *cfi = map->fldrv_priv;
1505 /* Ensure cmd read/writes are aligned. */
1506 cmd_addr = adr & ~(map_bankwidth(map)-1);
1508 mutex_lock(&chip->mutex);
1509 ret = get_chip(map, chip, cmd_addr, FL_READY);
1511 mutex_unlock(&chip->mutex);
1515 if (chip->state != FL_POINT && chip->state != FL_READY) {
1516 map_write(map, CMD(0xff), cmd_addr);
1518 chip->state = FL_READY;
1521 map_copy_from(map, buf, adr, len);
1523 put_chip(map, chip, cmd_addr);
1525 mutex_unlock(&chip->mutex);
1529 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1531 struct map_info *map = mtd->priv;
1532 struct cfi_private *cfi = map->fldrv_priv;
1537 /* ofs: offset within the first chip that the first read should start */
1538 chipnum = (from >> cfi->chipshift);
1539 ofs = from - (chipnum << cfi->chipshift);
1542 unsigned long thislen;
1544 if (chipnum >= cfi->numchips)
1547 if ((len + ofs -1) >> cfi->chipshift)
1548 thislen = (1<<cfi->chipshift) - ofs;
1552 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1566 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1567 unsigned long adr, map_word datum, int mode)
1569 struct cfi_private *cfi = map->fldrv_priv;
1570 map_word status, write_cmd;
1577 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1580 write_cmd = CMD(0xc0);
1586 mutex_lock(&chip->mutex);
1587 ret = get_chip(map, chip, adr, mode);
1589 mutex_unlock(&chip->mutex);
1593 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1595 xip_disable(map, chip, adr);
1596 map_write(map, write_cmd, adr);
1597 map_write(map, datum, adr);
1600 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1601 adr, map_bankwidth(map),
1602 chip->word_write_time,
1603 chip->word_write_time_max);
1605 xip_enable(map, chip, adr);
1606 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1610 /* check for errors */
1611 status = map_read(map, adr);
1612 if (map_word_bitsset(map, status, CMD(0x1a))) {
1613 unsigned long chipstatus = MERGESTATUS(status);
1616 map_write(map, CMD(0x50), adr);
1617 map_write(map, CMD(0x70), adr);
1618 xip_enable(map, chip, adr);
1620 if (chipstatus & 0x02) {
1622 } else if (chipstatus & 0x08) {
1623 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1626 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1633 xip_enable(map, chip, adr);
1634 out: DISABLE_VPP(map);
1635 put_chip(map, chip, adr);
1636 mutex_unlock(&chip->mutex);
1641 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1643 struct map_info *map = mtd->priv;
1644 struct cfi_private *cfi = map->fldrv_priv;
1649 chipnum = to >> cfi->chipshift;
1650 ofs = to - (chipnum << cfi->chipshift);
1652 /* If it's not bus-aligned, do the first byte write */
1653 if (ofs & (map_bankwidth(map)-1)) {
1654 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1655 int gap = ofs - bus_ofs;
1659 n = min_t(int, len, map_bankwidth(map)-gap);
1660 datum = map_word_ff(map);
1661 datum = map_word_load_partial(map, datum, buf, gap, n);
1663 ret = do_write_oneword(map, &cfi->chips[chipnum],
1664 bus_ofs, datum, FL_WRITING);
1673 if (ofs >> cfi->chipshift) {
1676 if (chipnum == cfi->numchips)
1681 while(len >= map_bankwidth(map)) {
1682 map_word datum = map_word_load(map, buf);
1684 ret = do_write_oneword(map, &cfi->chips[chipnum],
1685 ofs, datum, FL_WRITING);
1689 ofs += map_bankwidth(map);
1690 buf += map_bankwidth(map);
1691 (*retlen) += map_bankwidth(map);
1692 len -= map_bankwidth(map);
1694 if (ofs >> cfi->chipshift) {
1697 if (chipnum == cfi->numchips)
1702 if (len & (map_bankwidth(map)-1)) {
1705 datum = map_word_ff(map);
1706 datum = map_word_load_partial(map, datum, buf, 0, len);
1708 ret = do_write_oneword(map, &cfi->chips[chipnum],
1709 ofs, datum, FL_WRITING);
1720 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1721 unsigned long adr, const struct kvec **pvec,
1722 unsigned long *pvec_seek, int len)
1724 struct cfi_private *cfi = map->fldrv_priv;
1725 map_word status, write_cmd, datum;
1726 unsigned long cmd_adr;
1727 int ret, wbufsize, word_gap, words;
1728 const struct kvec *vec;
1729 unsigned long vec_seek;
1730 unsigned long initial_adr;
1731 int initial_len = len;
1733 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1736 cmd_adr = adr & ~(wbufsize-1);
1738 /* Sharp LH28F640BF chips need the first address for the
1739 * Page Buffer Program command. See Table 5 of
1740 * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
1741 if (is_LH28F640BF(cfi))
1744 /* Let's determine this according to the interleave only once */
1745 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1747 mutex_lock(&chip->mutex);
1748 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1750 mutex_unlock(&chip->mutex);
1754 XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1756 xip_disable(map, chip, cmd_adr);
1758 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1759 [...], the device will not accept any more Write to Buffer commands".
1760 So we must check here and reset those bits if they're set. Otherwise
1761 we're just pissing in the wind */
1762 if (chip->state != FL_STATUS) {
1763 map_write(map, CMD(0x70), cmd_adr);
1764 chip->state = FL_STATUS;
1766 status = map_read(map, cmd_adr);
1767 if (map_word_bitsset(map, status, CMD(0x30))) {
1768 xip_enable(map, chip, cmd_adr);
1769 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1770 xip_disable(map, chip, cmd_adr);
1771 map_write(map, CMD(0x50), cmd_adr);
1772 map_write(map, CMD(0x70), cmd_adr);
1775 chip->state = FL_WRITING_TO_BUFFER;
1776 map_write(map, write_cmd, cmd_adr);
1777 ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1779 /* Argh. Not ready for write to buffer */
1780 map_word Xstatus = map_read(map, cmd_adr);
1781 map_write(map, CMD(0x70), cmd_adr);
1782 chip->state = FL_STATUS;
1783 status = map_read(map, cmd_adr);
1784 map_write(map, CMD(0x50), cmd_adr);
1785 map_write(map, CMD(0x70), cmd_adr);
1786 xip_enable(map, chip, cmd_adr);
1787 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1788 map->name, Xstatus.x[0], status.x[0]);
1792 /* Figure out the number of words to write */
1793 word_gap = (-adr & (map_bankwidth(map)-1));
1794 words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1798 word_gap = map_bankwidth(map) - word_gap;
1800 datum = map_word_ff(map);
1803 /* Write length of data to come */
1804 map_write(map, CMD(words), cmd_adr );
1808 vec_seek = *pvec_seek;
1810 int n = map_bankwidth(map) - word_gap;
1811 if (n > vec->iov_len - vec_seek)
1812 n = vec->iov_len - vec_seek;
1816 if (!word_gap && len < map_bankwidth(map))
1817 datum = map_word_ff(map);
1819 datum = map_word_load_partial(map, datum,
1820 vec->iov_base + vec_seek,
1825 if (!len || word_gap == map_bankwidth(map)) {
1826 map_write(map, datum, adr);
1827 adr += map_bankwidth(map);
1832 if (vec_seek == vec->iov_len) {
1838 *pvec_seek = vec_seek;
1841 map_write(map, CMD(0xd0), cmd_adr);
1842 chip->state = FL_WRITING;
1844 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1845 initial_adr, initial_len,
1846 chip->buffer_write_time,
1847 chip->buffer_write_time_max);
1849 map_write(map, CMD(0x70), cmd_adr);
1850 chip->state = FL_STATUS;
1851 xip_enable(map, chip, cmd_adr);
1852 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1856 /* check for errors */
1857 status = map_read(map, cmd_adr);
1858 if (map_word_bitsset(map, status, CMD(0x1a))) {
1859 unsigned long chipstatus = MERGESTATUS(status);
1862 map_write(map, CMD(0x50), cmd_adr);
1863 map_write(map, CMD(0x70), cmd_adr);
1864 xip_enable(map, chip, cmd_adr);
1866 if (chipstatus & 0x02) {
1868 } else if (chipstatus & 0x08) {
1869 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1872 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1879 xip_enable(map, chip, cmd_adr);
1880 out: DISABLE_VPP(map);
1881 put_chip(map, chip, cmd_adr);
1882 mutex_unlock(&chip->mutex);
1886 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1887 unsigned long count, loff_t to, size_t *retlen)
1889 struct map_info *map = mtd->priv;
1890 struct cfi_private *cfi = map->fldrv_priv;
1891 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1894 unsigned long ofs, vec_seek, i;
1897 for (i = 0; i < count; i++)
1898 len += vecs[i].iov_len;
1903 chipnum = to >> cfi->chipshift;
1904 ofs = to - (chipnum << cfi->chipshift);
1908 /* We must not cross write block boundaries */
1909 int size = wbufsize - (ofs & (wbufsize-1));
1913 ret = do_write_buffer(map, &cfi->chips[chipnum],
1914 ofs, &vecs, &vec_seek, size);
1922 if (ofs >> cfi->chipshift) {
1925 if (chipnum == cfi->numchips)
1929 /* Be nice and reschedule with the chip in a usable state for other
1938 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1939 size_t len, size_t *retlen, const u_char *buf)
1943 vec.iov_base = (void *) buf;
1946 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1949 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1950 unsigned long adr, int len, void *thunk)
1952 struct cfi_private *cfi = map->fldrv_priv;
1960 mutex_lock(&chip->mutex);
1961 ret = get_chip(map, chip, adr, FL_ERASING);
1963 mutex_unlock(&chip->mutex);
1967 XIP_INVAL_CACHED_RANGE(map, adr, len);
1969 xip_disable(map, chip, adr);
1971 /* Clear the status register first */
1972 map_write(map, CMD(0x50), adr);
1975 map_write(map, CMD(0x20), adr);
1976 map_write(map, CMD(0xD0), adr);
1977 chip->state = FL_ERASING;
1978 chip->erase_suspended = 0;
1979 chip->in_progress_block_addr = adr;
1980 chip->in_progress_block_mask = ~(len - 1);
1982 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1985 chip->erase_time_max);
1987 map_write(map, CMD(0x70), adr);
1988 chip->state = FL_STATUS;
1989 xip_enable(map, chip, adr);
1990 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1994 /* We've broken this before. It doesn't hurt to be safe */
1995 map_write(map, CMD(0x70), adr);
1996 chip->state = FL_STATUS;
1997 status = map_read(map, adr);
1999 /* check for errors */
2000 if (map_word_bitsset(map, status, CMD(0x3a))) {
2001 unsigned long chipstatus = MERGESTATUS(status);
2003 /* Reset the error bits */
2004 map_write(map, CMD(0x50), adr);
2005 map_write(map, CMD(0x70), adr);
2006 xip_enable(map, chip, adr);
2008 if ((chipstatus & 0x30) == 0x30) {
2009 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
2011 } else if (chipstatus & 0x02) {
2012 /* Protection bit set */
2014 } else if (chipstatus & 0x8) {
2016 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
2018 } else if (chipstatus & 0x20 && retries--) {
2019 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
2021 put_chip(map, chip, adr);
2022 mutex_unlock(&chip->mutex);
2025 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
2032 xip_enable(map, chip, adr);
2033 out: DISABLE_VPP(map);
2034 put_chip(map, chip, adr);
2035 mutex_unlock(&chip->mutex);
2039 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2041 return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2045 static void cfi_intelext_sync (struct mtd_info *mtd)
2047 struct map_info *map = mtd->priv;
2048 struct cfi_private *cfi = map->fldrv_priv;
2050 struct flchip *chip;
2053 for (i=0; !ret && i<cfi->numchips; i++) {
2054 chip = &cfi->chips[i];
2056 mutex_lock(&chip->mutex);
2057 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2060 chip->oldstate = chip->state;
2061 chip->state = FL_SYNCING;
2062 /* No need to wake_up() on this state change -
2063 * as the whole point is that nobody can do anything
2064 * with the chip now anyway.
2067 mutex_unlock(&chip->mutex);
2070 /* Unlock the chips again */
2072 for (i--; i >=0; i--) {
2073 chip = &cfi->chips[i];
2075 mutex_lock(&chip->mutex);
2077 if (chip->state == FL_SYNCING) {
2078 chip->state = chip->oldstate;
2079 chip->oldstate = FL_READY;
2082 mutex_unlock(&chip->mutex);
2086 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2087 struct flchip *chip,
2089 int len, void *thunk)
2091 struct cfi_private *cfi = map->fldrv_priv;
2092 int status, ofs_factor = cfi->interleave * cfi->device_type;
2095 xip_disable(map, chip, adr+(2*ofs_factor));
2096 map_write(map, CMD(0x90), adr+(2*ofs_factor));
2097 chip->state = FL_JEDEC_QUERY;
2098 status = cfi_read_query(map, adr+(2*ofs_factor));
2099 xip_enable(map, chip, 0);
2103 #ifdef DEBUG_LOCK_BITS
2104 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2105 struct flchip *chip,
2107 int len, void *thunk)
2109 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2110 adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2115 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
2116 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
2118 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2119 unsigned long adr, int len, void *thunk)
2121 struct cfi_private *cfi = map->fldrv_priv;
2122 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2128 mutex_lock(&chip->mutex);
2129 ret = get_chip(map, chip, adr, FL_LOCKING);
2131 mutex_unlock(&chip->mutex);
2136 xip_disable(map, chip, adr);
2138 map_write(map, CMD(0x60), adr);
2139 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2140 map_write(map, CMD(0x01), adr);
2141 chip->state = FL_LOCKING;
2142 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2143 map_write(map, CMD(0xD0), adr);
2144 chip->state = FL_UNLOCKING;
2149 * If Instant Individual Block Locking supported then no need
2153 * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2154 * lets use a max of 1.5 seconds (1500ms) as timeout.
2156 * See "Clear Block Lock-Bits Time" on page 40 in
2157 * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2158 * from February 2003
2160 mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2162 ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2164 map_write(map, CMD(0x70), adr);
2165 chip->state = FL_STATUS;
2166 xip_enable(map, chip, adr);
2167 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2171 xip_enable(map, chip, adr);
2172 out: DISABLE_VPP(map);
2173 put_chip(map, chip, adr);
2174 mutex_unlock(&chip->mutex);
2178 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2182 #ifdef DEBUG_LOCK_BITS
2183 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2184 __func__, ofs, len);
2185 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2189 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2190 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2192 #ifdef DEBUG_LOCK_BITS
2193 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2195 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2202 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2206 #ifdef DEBUG_LOCK_BITS
2207 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2208 __func__, ofs, len);
2209 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2213 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2214 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2216 #ifdef DEBUG_LOCK_BITS
2217 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2219 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2226 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2229 return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2230 ofs, len, NULL) ? 1 : 0;
2233 #ifdef CONFIG_MTD_OTP
2235 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2236 u_long data_offset, u_char *buf, u_int size,
2237 u_long prot_offset, u_int groupno, u_int groupsize);
2240 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2241 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2243 struct cfi_private *cfi = map->fldrv_priv;
2246 mutex_lock(&chip->mutex);
2247 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2249 mutex_unlock(&chip->mutex);
2253 /* let's ensure we're not reading back cached data from array mode */
2254 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2256 xip_disable(map, chip, chip->start);
2257 if (chip->state != FL_JEDEC_QUERY) {
2258 map_write(map, CMD(0x90), chip->start);
2259 chip->state = FL_JEDEC_QUERY;
2261 map_copy_from(map, buf, chip->start + offset, size);
2262 xip_enable(map, chip, chip->start);
2264 /* then ensure we don't keep OTP data in the cache */
2265 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2267 put_chip(map, chip, chip->start);
2268 mutex_unlock(&chip->mutex);
2273 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2274 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2279 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2280 int gap = offset - bus_ofs;
2281 int n = min_t(int, size, map_bankwidth(map)-gap);
2282 map_word datum = map_word_ff(map);
2284 datum = map_word_load_partial(map, datum, buf, gap, n);
2285 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2298 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2299 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2301 struct cfi_private *cfi = map->fldrv_priv;
2304 /* make sure area matches group boundaries */
2308 datum = map_word_ff(map);
2309 datum = map_word_clr(map, datum, CMD(1 << grpno));
2310 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2313 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2314 size_t *retlen, u_char *buf,
2315 otp_op_t action, int user_regs)
2317 struct map_info *map = mtd->priv;
2318 struct cfi_private *cfi = map->fldrv_priv;
2319 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2320 struct flchip *chip;
2321 struct cfi_intelext_otpinfo *otp;
2322 u_long devsize, reg_prot_offset, data_offset;
2323 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2324 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2329 /* Check that we actually have some OTP registers */
2330 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2333 /* we need real chips here not virtual ones */
2334 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2335 chip_step = devsize >> cfi->chipshift;
2338 /* Some chips have OTP located in the _top_ partition only.
2339 For example: Intel 28F256L18T (T means top-parameter device) */
2340 if (cfi->mfr == CFI_MFR_INTEL) {
2345 chip_num = chip_step - 1;
2349 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2350 chip = &cfi->chips[chip_num];
2351 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2353 /* first OTP region */
2355 reg_prot_offset = extp->ProtRegAddr;
2356 reg_fact_groups = 1;
2357 reg_fact_size = 1 << extp->FactProtRegSize;
2358 reg_user_groups = 1;
2359 reg_user_size = 1 << extp->UserProtRegSize;
2362 /* flash geometry fixup */
2363 data_offset = reg_prot_offset + 1;
2364 data_offset *= cfi->interleave * cfi->device_type;
2365 reg_prot_offset *= cfi->interleave * cfi->device_type;
2366 reg_fact_size *= cfi->interleave;
2367 reg_user_size *= cfi->interleave;
2370 groups = reg_user_groups;
2371 groupsize = reg_user_size;
2372 /* skip over factory reg area */
2373 groupno = reg_fact_groups;
2374 data_offset += reg_fact_groups * reg_fact_size;
2376 groups = reg_fact_groups;
2377 groupsize = reg_fact_size;
2381 while (len > 0 && groups > 0) {
2384 * Special case: if action is NULL
2385 * we fill buf with otp_info records.
2387 struct otp_info *otpinfo;
2389 len -= sizeof(struct otp_info);
2392 ret = do_otp_read(map, chip,
2394 (u_char *)&lockword,
2399 otpinfo = (struct otp_info *)buf;
2400 otpinfo->start = from;
2401 otpinfo->length = groupsize;
2403 !map_word_bitsset(map, lockword,
2406 buf += sizeof(*otpinfo);
2407 *retlen += sizeof(*otpinfo);
2408 } else if (from >= groupsize) {
2410 data_offset += groupsize;
2412 int size = groupsize;
2413 data_offset += from;
2418 ret = action(map, chip, data_offset,
2419 buf, size, reg_prot_offset,
2420 groupno, groupsize);
2426 data_offset += size;
2432 /* next OTP region */
2433 if (++field == extp->NumProtectionFields)
2435 reg_prot_offset = otp->ProtRegAddr;
2436 reg_fact_groups = otp->FactGroups;
2437 reg_fact_size = 1 << otp->FactProtRegSize;
2438 reg_user_groups = otp->UserGroups;
2439 reg_user_size = 1 << otp->UserProtRegSize;
2447 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2448 size_t len, size_t *retlen,
2451 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2452 buf, do_otp_read, 0);
2455 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2456 size_t len, size_t *retlen,
2459 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2460 buf, do_otp_read, 1);
2463 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2464 size_t len, size_t *retlen,
2467 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2468 buf, do_otp_write, 1);
2471 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2472 loff_t from, size_t len)
2475 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2476 NULL, do_otp_lock, 1);
2479 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
2480 size_t *retlen, struct otp_info *buf)
2483 return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2487 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
2488 size_t *retlen, struct otp_info *buf)
2490 return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2496 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2498 struct mtd_erase_region_info *region;
2499 int block, status, i;
2503 for (i = 0; i < mtd->numeraseregions; i++) {
2504 region = &mtd->eraseregions[i];
2505 if (!region->lockmap)
2508 for (block = 0; block < region->numblocks; block++){
2509 len = region->erasesize;
2510 adr = region->offset + block * len;
2512 status = cfi_varsize_frob(mtd,
2513 do_getlockstatus_oneblock, adr, len, NULL);
2515 set_bit(block, region->lockmap);
2517 clear_bit(block, region->lockmap);
2522 static int cfi_intelext_suspend(struct mtd_info *mtd)
2524 struct map_info *map = mtd->priv;
2525 struct cfi_private *cfi = map->fldrv_priv;
2526 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2528 struct flchip *chip;
2531 if ((mtd->flags & MTD_POWERUP_LOCK)
2532 && extp && (extp->FeatureSupport & (1 << 5)))
2533 cfi_intelext_save_locks(mtd);
2535 for (i=0; !ret && i<cfi->numchips; i++) {
2536 chip = &cfi->chips[i];
2538 mutex_lock(&chip->mutex);
2540 switch (chip->state) {
2544 case FL_JEDEC_QUERY:
2545 if (chip->oldstate == FL_READY) {
2546 /* place the chip in a known state before suspend */
2547 map_write(map, CMD(0xFF), cfi->chips[i].start);
2548 chip->oldstate = chip->state;
2549 chip->state = FL_PM_SUSPENDED;
2550 /* No need to wake_up() on this state change -
2551 * as the whole point is that nobody can do anything
2552 * with the chip now anyway.
2555 /* There seems to be an operation pending. We must wait for it. */
2556 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2561 /* Should we actually wait? Once upon a time these routines weren't
2562 allowed to. Or should we return -EAGAIN, because the upper layers
2563 ought to have already shut down anything which was using the device
2564 anyway? The latter for now. */
2565 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2567 case FL_PM_SUSPENDED:
2570 mutex_unlock(&chip->mutex);
2573 /* Unlock the chips again */
2576 for (i--; i >=0; i--) {
2577 chip = &cfi->chips[i];
2579 mutex_lock(&chip->mutex);
2581 if (chip->state == FL_PM_SUSPENDED) {
2582 /* No need to force it into a known state here,
2583 because we're returning failure, and it didn't
2585 chip->state = chip->oldstate;
2586 chip->oldstate = FL_READY;
2589 mutex_unlock(&chip->mutex);
2596 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2598 struct mtd_erase_region_info *region;
2603 for (i = 0; i < mtd->numeraseregions; i++) {
2604 region = &mtd->eraseregions[i];
2605 if (!region->lockmap)
2608 for_each_clear_bit(block, region->lockmap, region->numblocks) {
2609 len = region->erasesize;
2610 adr = region->offset + block * len;
2611 cfi_intelext_unlock(mtd, adr, len);
2616 static void cfi_intelext_resume(struct mtd_info *mtd)
2618 struct map_info *map = mtd->priv;
2619 struct cfi_private *cfi = map->fldrv_priv;
2620 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2622 struct flchip *chip;
2624 for (i=0; i<cfi->numchips; i++) {
2626 chip = &cfi->chips[i];
2628 mutex_lock(&chip->mutex);
2630 /* Go to known state. Chip may have been power cycled */
2631 if (chip->state == FL_PM_SUSPENDED) {
2632 /* Refresh LH28F640BF Partition Config. Register */
2633 fixup_LH28F640BF(mtd);
2634 map_write(map, CMD(0xFF), cfi->chips[i].start);
2635 chip->oldstate = chip->state = FL_READY;
2639 mutex_unlock(&chip->mutex);
2642 if ((mtd->flags & MTD_POWERUP_LOCK)
2643 && extp && (extp->FeatureSupport & (1 << 5)))
2644 cfi_intelext_restore_locks(mtd);
2647 static int cfi_intelext_reset(struct mtd_info *mtd)
2649 struct map_info *map = mtd->priv;
2650 struct cfi_private *cfi = map->fldrv_priv;
2653 for (i=0; i < cfi->numchips; i++) {
2654 struct flchip *chip = &cfi->chips[i];
2656 /* force the completion of any ongoing operation
2657 and switch to array mode so any bootloader in
2658 flash is accessible for soft reboot. */
2659 mutex_lock(&chip->mutex);
2660 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2662 map_write(map, CMD(0xff), chip->start);
2663 chip->state = FL_SHUTDOWN;
2664 put_chip(map, chip, chip->start);
2666 mutex_unlock(&chip->mutex);
2672 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2675 struct mtd_info *mtd;
2677 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2678 cfi_intelext_reset(mtd);
2682 static void cfi_intelext_destroy(struct mtd_info *mtd)
2684 struct map_info *map = mtd->priv;
2685 struct cfi_private *cfi = map->fldrv_priv;
2686 struct mtd_erase_region_info *region;
2688 cfi_intelext_reset(mtd);
2689 unregister_reboot_notifier(&mtd->reboot_notifier);
2690 kfree(cfi->cmdset_priv);
2692 kfree(cfi->chips[0].priv);
2694 for (i = 0; i < mtd->numeraseregions; i++) {
2695 region = &mtd->eraseregions[i];
2696 kfree(region->lockmap);
2698 kfree(mtd->eraseregions);
2701 MODULE_LICENSE("GPL");
2702 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2703 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2704 MODULE_ALIAS("cfi_cmdset_0003");
2705 MODULE_ALIAS("cfi_cmdset_0200");