2 * Common Flash Interface support:
3 * Intel Extended Vendor Command Set (ID 0x0001)
5 * (C) 2000 Red Hat. GPL'd
8 * 10/10/2000 Nicolas Pitre <nico@fluxnic.net>
9 * - completely revamped method functions so they are aware and
10 * independent of the flash geometry (buswidth, interleave, etc.)
11 * - scalability vs code size is completely set at compile-time
12 * (see include/linux/mtd/cfi.h for selection)
13 * - optimized write buffer method
14 * 02/05/2002 Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15 * - reworked lock/unlock/erase support for var size flash
16 * 21/03/2007 Rodolfo Giometti <giometti@linux.it>
17 * - auto unlock sectors on resume for auto locking flash on power up
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
25 #include <asm/byteorder.h>
27 #include <linux/errno.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/reboot.h>
32 #include <linux/bitmap.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/cfi.h>
38 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41 // debugging, turns off buffer write mode if set to 1
42 #define FORCE_WORD_WRITE 0
45 #define I82802AB 0x00ad
46 #define I82802AC 0x00ac
47 #define PF38F4476 0x881c
48 #define M28F00AP30 0x8963
49 /* STMicroelectronics chips */
50 #define M50LPW080 0x002F
51 #define M50FLW080A 0x0080
52 #define M50FLW080B 0x0081
54 #define AT49BV640D 0x02de
55 #define AT49BV640DT 0x02db
57 #define LH28F640BFHE_PTTL90 0x00b0
58 #define LH28F640BFHE_PBTL90 0x00b1
59 #define LH28F640BFHE_PTTL70A 0x00b2
60 #define LH28F640BFHE_PBTL70A 0x00b3
62 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
64 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
65 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
66 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
67 static void cfi_intelext_sync (struct mtd_info *);
68 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
69 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
70 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
73 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
74 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
75 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
76 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
77 static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
78 size_t *, struct otp_info *);
79 static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
80 size_t *, struct otp_info *);
82 static int cfi_intelext_suspend (struct mtd_info *);
83 static void cfi_intelext_resume (struct mtd_info *);
84 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
86 static void cfi_intelext_destroy(struct mtd_info *);
88 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
90 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
91 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
93 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
94 size_t *retlen, void **virt, resource_size_t *phys);
95 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
97 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
98 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
99 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
100 #include "fwh_lock.h"
105 * *********** SETUP AND PROBE BITS ***********
108 static struct mtd_chip_driver cfi_intelext_chipdrv = {
109 .probe = NULL, /* Not usable directly */
110 .destroy = cfi_intelext_destroy,
111 .name = "cfi_cmdset_0001",
112 .module = THIS_MODULE
115 /* #define DEBUG_LOCK_BITS */
116 /* #define DEBUG_CFI_FEATURES */
118 #ifdef DEBUG_CFI_FEATURES
119 static void cfi_tell_features(struct cfi_pri_intelext *extp)
122 printk(" Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
123 printk(" Feature/Command Support: %4.4X\n", extp->FeatureSupport);
124 printk(" - Chip Erase: %s\n", extp->FeatureSupport&1?"supported":"unsupported");
125 printk(" - Suspend Erase: %s\n", extp->FeatureSupport&2?"supported":"unsupported");
126 printk(" - Suspend Program: %s\n", extp->FeatureSupport&4?"supported":"unsupported");
127 printk(" - Legacy Lock/Unlock: %s\n", extp->FeatureSupport&8?"supported":"unsupported");
128 printk(" - Queued Erase: %s\n", extp->FeatureSupport&16?"supported":"unsupported");
129 printk(" - Instant block lock: %s\n", extp->FeatureSupport&32?"supported":"unsupported");
130 printk(" - Protection Bits: %s\n", extp->FeatureSupport&64?"supported":"unsupported");
131 printk(" - Page-mode read: %s\n", extp->FeatureSupport&128?"supported":"unsupported");
132 printk(" - Synchronous read: %s\n", extp->FeatureSupport&256?"supported":"unsupported");
133 printk(" - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
134 printk(" - Extended Flash Array: %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
135 for (i=11; i<32; i++) {
136 if (extp->FeatureSupport & (1<<i))
137 printk(" - Unknown Bit %X: supported\n", i);
140 printk(" Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
141 printk(" - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
142 for (i=1; i<8; i++) {
143 if (extp->SuspendCmdSupport & (1<<i))
144 printk(" - Unknown Bit %X: supported\n", i);
147 printk(" Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
148 printk(" - Lock Bit Active: %s\n", extp->BlkStatusRegMask&1?"yes":"no");
149 printk(" - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
150 for (i=2; i<3; i++) {
151 if (extp->BlkStatusRegMask & (1<<i))
152 printk(" - Unknown Bit %X Active: yes\n",i);
154 printk(" - EFA Lock Bit: %s\n", extp->BlkStatusRegMask&16?"yes":"no");
155 printk(" - EFA Lock-Down Bit: %s\n", extp->BlkStatusRegMask&32?"yes":"no");
156 for (i=6; i<16; i++) {
157 if (extp->BlkStatusRegMask & (1<<i))
158 printk(" - Unknown Bit %X Active: yes\n",i);
161 printk(" Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
162 extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
163 if (extp->VppOptimal)
164 printk(" Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
165 extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
169 /* Atmel chips don't use the same PRI format as Intel chips */
170 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
172 struct map_info *map = mtd->priv;
173 struct cfi_private *cfi = map->fldrv_priv;
174 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
175 struct cfi_pri_atmel atmel_pri;
176 uint32_t features = 0;
178 /* Reverse byteswapping */
179 extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
180 extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
181 extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
183 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
184 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
186 printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
188 if (atmel_pri.Features & 0x01) /* chip erase supported */
190 if (atmel_pri.Features & 0x02) /* erase suspend supported */
192 if (atmel_pri.Features & 0x04) /* program suspend supported */
194 if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
196 if (atmel_pri.Features & 0x20) /* page mode read supported */
198 if (atmel_pri.Features & 0x40) /* queued erase supported */
200 if (atmel_pri.Features & 0x80) /* Protection bits supported */
203 extp->FeatureSupport = features;
205 /* burst write mode not supported */
206 cfi->cfiq->BufWriteTimeoutTyp = 0;
207 cfi->cfiq->BufWriteTimeoutMax = 0;
210 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
212 struct map_info *map = mtd->priv;
213 struct cfi_private *cfi = map->fldrv_priv;
214 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
216 cfip->FeatureSupport |= (1 << 5);
217 mtd->flags |= MTD_POWERUP_LOCK;
220 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
221 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
222 static void fixup_intel_strataflash(struct mtd_info *mtd)
224 struct map_info *map = mtd->priv;
225 struct cfi_private *cfi = map->fldrv_priv;
226 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
228 printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
229 "erase on write disabled.\n");
230 extp->SuspendCmdSupport &= ~1;
234 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
235 static void fixup_no_write_suspend(struct mtd_info *mtd)
237 struct map_info *map = mtd->priv;
238 struct cfi_private *cfi = map->fldrv_priv;
239 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
241 if (cfip && (cfip->FeatureSupport&4)) {
242 cfip->FeatureSupport &= ~4;
243 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
248 static void fixup_st_m28w320ct(struct mtd_info *mtd)
250 struct map_info *map = mtd->priv;
251 struct cfi_private *cfi = map->fldrv_priv;
253 cfi->cfiq->BufWriteTimeoutTyp = 0; /* Not supported */
254 cfi->cfiq->BufWriteTimeoutMax = 0; /* Not supported */
257 static void fixup_st_m28w320cb(struct mtd_info *mtd)
259 struct map_info *map = mtd->priv;
260 struct cfi_private *cfi = map->fldrv_priv;
262 /* Note this is done after the region info is endian swapped */
263 cfi->cfiq->EraseRegionInfo[1] =
264 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
267 static int is_LH28F640BF(struct cfi_private *cfi)
269 /* Sharp LH28F640BF Family */
270 if (cfi->mfr == CFI_MFR_SHARP && (
271 cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
272 cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
277 static void fixup_LH28F640BF(struct mtd_info *mtd)
279 struct map_info *map = mtd->priv;
280 struct cfi_private *cfi = map->fldrv_priv;
281 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
283 /* Reset the Partition Configuration Register on LH28F640BF
284 * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
285 if (is_LH28F640BF(cfi)) {
286 printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
287 map_write(map, CMD(0x60), 0);
288 map_write(map, CMD(0x04), 0);
290 /* We have set one single partition thus
291 * Simultaneous Operations are not allowed */
292 printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
293 extp->FeatureSupport &= ~512;
297 static void fixup_use_point(struct mtd_info *mtd)
299 struct map_info *map = mtd->priv;
300 if (!mtd->_point && map_is_linear(map)) {
301 mtd->_point = cfi_intelext_point;
302 mtd->_unpoint = cfi_intelext_unpoint;
306 static void fixup_use_write_buffers(struct mtd_info *mtd)
308 struct map_info *map = mtd->priv;
309 struct cfi_private *cfi = map->fldrv_priv;
310 if (cfi->cfiq->BufWriteTimeoutTyp) {
311 printk(KERN_INFO "Using buffer write method\n" );
312 mtd->_write = cfi_intelext_write_buffers;
313 mtd->_writev = cfi_intelext_writev;
318 * Some chips power-up with all sectors locked by default.
320 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
322 struct map_info *map = mtd->priv;
323 struct cfi_private *cfi = map->fldrv_priv;
324 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
326 if (cfip->FeatureSupport&32) {
327 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
328 mtd->flags |= MTD_POWERUP_LOCK;
332 static struct cfi_fixup cfi_fixup_table[] = {
333 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
334 { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
335 { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
336 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
337 { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
339 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
340 { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
342 #if !FORCE_WORD_WRITE
343 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
345 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
346 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
347 { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
348 { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
349 { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
353 static struct cfi_fixup jedec_fixup_table[] = {
354 { CFI_MFR_INTEL, I82802AB, fixup_use_fwh_lock },
355 { CFI_MFR_INTEL, I82802AC, fixup_use_fwh_lock },
356 { CFI_MFR_ST, M50LPW080, fixup_use_fwh_lock },
357 { CFI_MFR_ST, M50FLW080A, fixup_use_fwh_lock },
358 { CFI_MFR_ST, M50FLW080B, fixup_use_fwh_lock },
361 static struct cfi_fixup fixup_table[] = {
362 /* The CFI vendor ids and the JEDEC vendor IDs appear
363 * to be common. It is like the devices id's are as
364 * well. This table is to pick all cases where
365 * we know that is the case.
367 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
371 static void cfi_fixup_major_minor(struct cfi_private *cfi,
372 struct cfi_pri_intelext *extp)
374 if (cfi->mfr == CFI_MFR_INTEL &&
375 cfi->id == PF38F4476 && extp->MinorVersion == '3')
376 extp->MinorVersion = '1';
379 static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
382 * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
383 * Erase Supend for their small Erase Blocks(0x8000)
385 if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
390 static inline struct cfi_pri_intelext *
391 read_pri_intelext(struct map_info *map, __u16 adr)
393 struct cfi_private *cfi = map->fldrv_priv;
394 struct cfi_pri_intelext *extp;
395 unsigned int extra_size = 0;
396 unsigned int extp_size = sizeof(*extp);
399 extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
403 cfi_fixup_major_minor(cfi, extp);
405 if (extp->MajorVersion != '1' ||
406 (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
407 printk(KERN_ERR " Unknown Intel/Sharp Extended Query "
408 "version %c.%c.\n", extp->MajorVersion,
414 /* Do some byteswapping if necessary */
415 extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
416 extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
417 extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
419 if (extp->MinorVersion >= '0') {
422 /* Protection Register info */
423 extra_size += (extp->NumProtectionFields - 1) *
424 sizeof(struct cfi_intelext_otpinfo);
427 if (extp->MinorVersion >= '1') {
428 /* Burst Read info */
430 if (extp_size < sizeof(*extp) + extra_size)
432 extra_size += extp->extra[extra_size - 1];
435 if (extp->MinorVersion >= '3') {
438 /* Number of hardware-partitions */
440 if (extp_size < sizeof(*extp) + extra_size)
442 nb_parts = extp->extra[extra_size - 1];
444 /* skip the sizeof(partregion) field in CFI 1.4 */
445 if (extp->MinorVersion >= '4')
448 for (i = 0; i < nb_parts; i++) {
449 struct cfi_intelext_regioninfo *rinfo;
450 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
451 extra_size += sizeof(*rinfo);
452 if (extp_size < sizeof(*extp) + extra_size)
454 rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
455 extra_size += (rinfo->NumBlockTypes - 1)
456 * sizeof(struct cfi_intelext_blockinfo);
459 if (extp->MinorVersion >= '4')
460 extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
462 if (extp_size < sizeof(*extp) + extra_size) {
464 extp_size = sizeof(*extp) + extra_size;
466 if (extp_size > 4096) {
468 "%s: cfi_pri_intelext is too fat\n",
479 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
481 struct cfi_private *cfi = map->fldrv_priv;
482 struct mtd_info *mtd;
485 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
489 mtd->type = MTD_NORFLASH;
491 /* Fill in the default mtd operations */
492 mtd->_erase = cfi_intelext_erase_varsize;
493 mtd->_read = cfi_intelext_read;
494 mtd->_write = cfi_intelext_write_words;
495 mtd->_sync = cfi_intelext_sync;
496 mtd->_lock = cfi_intelext_lock;
497 mtd->_unlock = cfi_intelext_unlock;
498 mtd->_is_locked = cfi_intelext_is_locked;
499 mtd->_suspend = cfi_intelext_suspend;
500 mtd->_resume = cfi_intelext_resume;
501 mtd->flags = MTD_CAP_NORFLASH;
502 mtd->name = map->name;
504 mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
506 mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
508 if (cfi->cfi_mode == CFI_MODE_CFI) {
510 * It's a real CFI chip, not one for which the probe
511 * routine faked a CFI structure. So we read the feature
514 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
515 struct cfi_pri_intelext *extp;
517 extp = read_pri_intelext(map, adr);
523 /* Install our own private info structure */
524 cfi->cmdset_priv = extp;
526 cfi_fixup(mtd, cfi_fixup_table);
528 #ifdef DEBUG_CFI_FEATURES
529 /* Tell the user about it in lots of lovely detail */
530 cfi_tell_features(extp);
533 if(extp->SuspendCmdSupport & 1) {
534 printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
537 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
538 /* Apply jedec specific fixups */
539 cfi_fixup(mtd, jedec_fixup_table);
541 /* Apply generic fixups */
542 cfi_fixup(mtd, fixup_table);
544 for (i=0; i< cfi->numchips; i++) {
545 if (cfi->cfiq->WordWriteTimeoutTyp)
546 cfi->chips[i].word_write_time =
547 1<<cfi->cfiq->WordWriteTimeoutTyp;
549 cfi->chips[i].word_write_time = 50000;
551 if (cfi->cfiq->BufWriteTimeoutTyp)
552 cfi->chips[i].buffer_write_time =
553 1<<cfi->cfiq->BufWriteTimeoutTyp;
554 /* No default; if it isn't specified, we won't use it */
556 if (cfi->cfiq->BlockEraseTimeoutTyp)
557 cfi->chips[i].erase_time =
558 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
560 cfi->chips[i].erase_time = 2000000;
562 if (cfi->cfiq->WordWriteTimeoutTyp &&
563 cfi->cfiq->WordWriteTimeoutMax)
564 cfi->chips[i].word_write_time_max =
565 1<<(cfi->cfiq->WordWriteTimeoutTyp +
566 cfi->cfiq->WordWriteTimeoutMax);
568 cfi->chips[i].word_write_time_max = 50000 * 8;
570 if (cfi->cfiq->BufWriteTimeoutTyp &&
571 cfi->cfiq->BufWriteTimeoutMax)
572 cfi->chips[i].buffer_write_time_max =
573 1<<(cfi->cfiq->BufWriteTimeoutTyp +
574 cfi->cfiq->BufWriteTimeoutMax);
576 if (cfi->cfiq->BlockEraseTimeoutTyp &&
577 cfi->cfiq->BlockEraseTimeoutMax)
578 cfi->chips[i].erase_time_max =
579 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
580 cfi->cfiq->BlockEraseTimeoutMax);
582 cfi->chips[i].erase_time_max = 2000000 * 8;
584 cfi->chips[i].ref_point_counter = 0;
585 init_waitqueue_head(&(cfi->chips[i].wq));
588 map->fldrv = &cfi_intelext_chipdrv;
590 return cfi_intelext_setup(mtd);
592 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
593 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
594 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
595 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
596 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
598 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
600 struct map_info *map = mtd->priv;
601 struct cfi_private *cfi = map->fldrv_priv;
602 unsigned long offset = 0;
604 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
606 //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
608 mtd->size = devsize * cfi->numchips;
610 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
611 mtd->eraseregions = kzalloc(sizeof(struct mtd_erase_region_info)
612 * mtd->numeraseregions, GFP_KERNEL);
613 if (!mtd->eraseregions)
616 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
617 unsigned long ernum, ersize;
618 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
619 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
621 if (mtd->erasesize < ersize) {
622 mtd->erasesize = ersize;
624 for (j=0; j<cfi->numchips; j++) {
625 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
626 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
627 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
628 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
629 if (!mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap)
632 offset += (ersize * ernum);
635 if (offset != devsize) {
637 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
641 for (i=0; i<mtd->numeraseregions;i++){
642 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
643 i,(unsigned long long)mtd->eraseregions[i].offset,
644 mtd->eraseregions[i].erasesize,
645 mtd->eraseregions[i].numblocks);
648 #ifdef CONFIG_MTD_OTP
649 mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
650 mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
651 mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
652 mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
653 mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
654 mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
657 /* This function has the potential to distort the reality
658 a bit and therefore should be called last. */
659 if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
662 __module_get(THIS_MODULE);
663 register_reboot_notifier(&mtd->reboot_notifier);
667 if (mtd->eraseregions)
668 for (i=0; i<cfi->cfiq->NumEraseRegions; i++)
669 for (j=0; j<cfi->numchips; j++)
670 kfree(mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap);
671 kfree(mtd->eraseregions);
673 kfree(cfi->cmdset_priv);
677 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
678 struct cfi_private **pcfi)
680 struct map_info *map = mtd->priv;
681 struct cfi_private *cfi = *pcfi;
682 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
685 * Probing of multi-partition flash chips.
687 * To support multiple partitions when available, we simply arrange
688 * for each of them to have their own flchip structure even if they
689 * are on the same physical chip. This means completely recreating
690 * a new cfi_private structure right here which is a blatent code
691 * layering violation, but this is still the least intrusive
692 * arrangement at this point. This can be rearranged in the future
693 * if someone feels motivated enough. --nico
695 if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
696 && extp->FeatureSupport & (1 << 9)) {
697 struct cfi_private *newcfi;
699 struct flchip_shared *shared;
700 int offs, numregions, numparts, partshift, numvirtchips, i, j;
702 /* Protection Register info */
703 offs = (extp->NumProtectionFields - 1) *
704 sizeof(struct cfi_intelext_otpinfo);
706 /* Burst Read info */
707 offs += extp->extra[offs+1]+2;
709 /* Number of partition regions */
710 numregions = extp->extra[offs];
713 /* skip the sizeof(partregion) field in CFI 1.4 */
714 if (extp->MinorVersion >= '4')
717 /* Number of hardware partitions */
719 for (i = 0; i < numregions; i++) {
720 struct cfi_intelext_regioninfo *rinfo;
721 rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
722 numparts += rinfo->NumIdentPartitions;
723 offs += sizeof(*rinfo)
724 + (rinfo->NumBlockTypes - 1) *
725 sizeof(struct cfi_intelext_blockinfo);
731 /* Programming Region info */
732 if (extp->MinorVersion >= '4') {
733 struct cfi_intelext_programming_regioninfo *prinfo;
734 prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
735 mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
736 mtd->flags &= ~MTD_BIT_WRITEABLE;
737 printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
738 map->name, mtd->writesize,
739 cfi->interleave * prinfo->ControlValid,
740 cfi->interleave * prinfo->ControlInvalid);
744 * All functions below currently rely on all chips having
745 * the same geometry so we'll just assume that all hardware
746 * partitions are of the same size too.
748 partshift = cfi->chipshift - __ffs(numparts);
750 if ((1 << partshift) < mtd->erasesize) {
752 "%s: bad number of hw partitions (%d)\n",
757 numvirtchips = cfi->numchips * numparts;
758 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
761 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
766 memcpy(newcfi, cfi, sizeof(struct cfi_private));
767 newcfi->numchips = numvirtchips;
768 newcfi->chipshift = partshift;
770 chip = &newcfi->chips[0];
771 for (i = 0; i < cfi->numchips; i++) {
772 shared[i].writing = shared[i].erasing = NULL;
773 mutex_init(&shared[i].lock);
774 for (j = 0; j < numparts; j++) {
775 *chip = cfi->chips[i];
776 chip->start += j << partshift;
777 chip->priv = &shared[i];
778 /* those should be reset too since
779 they create memory references. */
780 init_waitqueue_head(&chip->wq);
781 mutex_init(&chip->mutex);
786 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
787 "--> %d partitions of %d KiB\n",
788 map->name, cfi->numchips, cfi->interleave,
789 newcfi->numchips, 1<<(newcfi->chipshift-10));
791 map->fldrv_priv = newcfi;
800 * *********** CHIP ACCESS FUNCTIONS ***********
802 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
804 DECLARE_WAITQUEUE(wait, current);
805 struct cfi_private *cfi = map->fldrv_priv;
806 map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
807 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
808 unsigned long timeo = jiffies + HZ;
810 /* Prevent setting state FL_SYNCING for chip in suspended state. */
811 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
814 switch (chip->state) {
818 status = map_read(map, adr);
819 if (map_word_andequal(map, status, status_OK, status_OK))
822 /* At this point we're fine with write operations
823 in other partitions as they don't conflict. */
824 if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
827 mutex_unlock(&chip->mutex);
829 mutex_lock(&chip->mutex);
830 /* Someone else might have been playing with it. */
841 !(cfip->FeatureSupport & 2) ||
842 !(mode == FL_READY || mode == FL_POINT ||
843 (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
846 /* Do not allow suspend iff read/write to EB address */
847 if ((adr & chip->in_progress_block_mask) ==
848 chip->in_progress_block_addr)
851 /* do not suspend small EBs, buggy Micron Chips */
852 if (cfi_is_micron_28F00AP30(cfi, chip) &&
853 (chip->in_progress_block_mask == ~(0x8000-1)))
857 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
859 /* If the flash has finished erasing, then 'erase suspend'
860 * appears to make some (28F320) flash devices switch to
861 * 'read' mode. Make sure that we switch to 'read status'
862 * mode so we get the right data. --rmk
864 map_write(map, CMD(0x70), chip->in_progress_block_addr);
865 chip->oldstate = FL_ERASING;
866 chip->state = FL_ERASE_SUSPENDING;
867 chip->erase_suspended = 1;
869 status = map_read(map, chip->in_progress_block_addr);
870 if (map_word_andequal(map, status, status_OK, status_OK))
873 if (time_after(jiffies, timeo)) {
874 /* Urgh. Resume and pretend we weren't here.
875 * Make sure we're in 'read status' mode if it had finished */
876 put_chip(map, chip, adr);
877 printk(KERN_ERR "%s: Chip not ready after erase "
878 "suspended: status = 0x%lx\n", map->name, status.x[0]);
882 mutex_unlock(&chip->mutex);
884 mutex_lock(&chip->mutex);
885 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
886 So we can just loop here. */
888 chip->state = FL_STATUS;
891 case FL_XIP_WHILE_ERASING:
892 if (mode != FL_READY && mode != FL_POINT &&
893 (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
895 chip->oldstate = chip->state;
896 chip->state = FL_READY;
900 /* The machine is rebooting now,so no one can get chip anymore */
903 /* Only if there's no operation suspended... */
904 if (mode == FL_READY && chip->oldstate == FL_READY)
909 set_current_state(TASK_UNINTERRUPTIBLE);
910 add_wait_queue(&chip->wq, &wait);
911 mutex_unlock(&chip->mutex);
913 remove_wait_queue(&chip->wq, &wait);
914 mutex_lock(&chip->mutex);
919 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
922 DECLARE_WAITQUEUE(wait, current);
926 (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
927 || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
929 * OK. We have possibility for contention on the write/erase
930 * operations which are global to the real chip and not per
931 * partition. So let's fight it over in the partition which
932 * currently has authority on the operation.
934 * The rules are as follows:
936 * - any write operation must own shared->writing.
938 * - any erase operation must own _both_ shared->writing and
941 * - contention arbitration is handled in the owner's context.
943 * The 'shared' struct can be read and/or written only when
946 struct flchip_shared *shared = chip->priv;
947 struct flchip *contender;
948 mutex_lock(&shared->lock);
949 contender = shared->writing;
950 if (contender && contender != chip) {
952 * The engine to perform desired operation on this
953 * partition is already in use by someone else.
954 * Let's fight over it in the context of the chip
955 * currently using it. If it is possible to suspend,
956 * that other partition will do just that, otherwise
957 * it'll happily send us to sleep. In any case, when
958 * get_chip returns success we're clear to go ahead.
960 ret = mutex_trylock(&contender->mutex);
961 mutex_unlock(&shared->lock);
964 mutex_unlock(&chip->mutex);
965 ret = chip_ready(map, contender, contender->start, mode);
966 mutex_lock(&chip->mutex);
968 if (ret == -EAGAIN) {
969 mutex_unlock(&contender->mutex);
973 mutex_unlock(&contender->mutex);
976 mutex_lock(&shared->lock);
978 /* We should not own chip if it is already
979 * in FL_SYNCING state. Put contender and retry. */
980 if (chip->state == FL_SYNCING) {
981 put_chip(map, contender, contender->start);
982 mutex_unlock(&contender->mutex);
985 mutex_unlock(&contender->mutex);
988 /* Check if we already have suspended erase
989 * on this chip. Sleep. */
990 if (mode == FL_ERASING && shared->erasing
991 && shared->erasing->oldstate == FL_ERASING) {
992 mutex_unlock(&shared->lock);
993 set_current_state(TASK_UNINTERRUPTIBLE);
994 add_wait_queue(&chip->wq, &wait);
995 mutex_unlock(&chip->mutex);
997 remove_wait_queue(&chip->wq, &wait);
998 mutex_lock(&chip->mutex);
1003 shared->writing = chip;
1004 if (mode == FL_ERASING)
1005 shared->erasing = chip;
1006 mutex_unlock(&shared->lock);
1008 ret = chip_ready(map, chip, adr, mode);
1015 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
1017 struct cfi_private *cfi = map->fldrv_priv;
1020 struct flchip_shared *shared = chip->priv;
1021 mutex_lock(&shared->lock);
1022 if (shared->writing == chip && chip->oldstate == FL_READY) {
1023 /* We own the ability to write, but we're done */
1024 shared->writing = shared->erasing;
1025 if (shared->writing && shared->writing != chip) {
1026 /* give back ownership to who we loaned it from */
1027 struct flchip *loaner = shared->writing;
1028 mutex_lock(&loaner->mutex);
1029 mutex_unlock(&shared->lock);
1030 mutex_unlock(&chip->mutex);
1031 put_chip(map, loaner, loaner->start);
1032 mutex_lock(&chip->mutex);
1033 mutex_unlock(&loaner->mutex);
1037 shared->erasing = NULL;
1038 shared->writing = NULL;
1039 } else if (shared->erasing == chip && shared->writing != chip) {
1041 * We own the ability to erase without the ability
1042 * to write, which means the erase was suspended
1043 * and some other partition is currently writing.
1044 * Don't let the switch below mess things up since
1045 * we don't have ownership to resume anything.
1047 mutex_unlock(&shared->lock);
1051 mutex_unlock(&shared->lock);
1054 switch(chip->oldstate) {
1056 /* What if one interleaved chip has finished and the
1057 other hasn't? The old code would leave the finished
1058 one in READY mode. That's bad, and caused -EROFS
1059 errors to be returned from do_erase_oneblock because
1060 that's the only bit it checked for at the time.
1061 As the state machine appears to explicitly allow
1062 sending the 0x70 (Read Status) command to an erasing
1063 chip and expecting it to be ignored, that's what we
1065 map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1066 map_write(map, CMD(0x70), chip->in_progress_block_addr);
1067 chip->oldstate = FL_READY;
1068 chip->state = FL_ERASING;
1071 case FL_XIP_WHILE_ERASING:
1072 chip->state = chip->oldstate;
1073 chip->oldstate = FL_READY;
1078 case FL_JEDEC_QUERY:
1081 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1086 #ifdef CONFIG_MTD_XIP
1089 * No interrupt what so ever can be serviced while the flash isn't in array
1090 * mode. This is ensured by the xip_disable() and xip_enable() functions
1091 * enclosing any code path where the flash is known not to be in array mode.
1092 * And within a XIP disabled code path, only functions marked with __xipram
1093 * may be called and nothing else (it's a good thing to inspect generated
1094 * assembly to make sure inline functions were actually inlined and that gcc
1095 * didn't emit calls to its own support functions). Also configuring MTD CFI
1096 * support to a single buswidth and a single interleave is also recommended.
1099 static void xip_disable(struct map_info *map, struct flchip *chip,
1102 /* TODO: chips with no XIP use should ignore and return */
1103 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1104 local_irq_disable();
1107 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1110 struct cfi_private *cfi = map->fldrv_priv;
1111 if (chip->state != FL_POINT && chip->state != FL_READY) {
1112 map_write(map, CMD(0xff), adr);
1113 chip->state = FL_READY;
1115 (void) map_read(map, adr);
1121 * When a delay is required for the flash operation to complete, the
1122 * xip_wait_for_operation() function is polling for both the given timeout
1123 * and pending (but still masked) hardware interrupts. Whenever there is an
1124 * interrupt pending then the flash erase or write operation is suspended,
1125 * array mode restored and interrupts unmasked. Task scheduling might also
1126 * happen at that point. The CPU eventually returns from the interrupt or
1127 * the call to schedule() and the suspended flash operation is resumed for
1128 * the remaining of the delay period.
1130 * Warning: this function _will_ fool interrupt latency tracing tools.
1133 static int __xipram xip_wait_for_operation(
1134 struct map_info *map, struct flchip *chip,
1135 unsigned long adr, unsigned int chip_op_time_max)
1137 struct cfi_private *cfi = map->fldrv_priv;
1138 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1139 map_word status, OK = CMD(0x80);
1140 unsigned long usec, suspended, start, done;
1141 flstate_t oldstate, newstate;
1143 start = xip_currtime();
1144 usec = chip_op_time_max;
1151 if (xip_irqpending() && cfip &&
1152 ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1153 (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1154 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1156 * Let's suspend the erase or write operation when
1157 * supported. Note that we currently don't try to
1158 * suspend interleaved chips if there is already
1159 * another operation suspended (imagine what happens
1160 * when one chip was already done with the current
1161 * operation while another chip suspended it, then
1162 * we resume the whole thing at once). Yes, it
1166 map_write(map, CMD(0xb0), adr);
1167 map_write(map, CMD(0x70), adr);
1168 suspended = xip_currtime();
1170 if (xip_elapsed_since(suspended) > 100000) {
1172 * The chip doesn't want to suspend
1173 * after waiting for 100 msecs.
1174 * This is a critical error but there
1175 * is not much we can do here.
1179 status = map_read(map, adr);
1180 } while (!map_word_andequal(map, status, OK, OK));
1182 /* Suspend succeeded */
1183 oldstate = chip->state;
1184 if (oldstate == FL_ERASING) {
1185 if (!map_word_bitsset(map, status, CMD(0x40)))
1187 newstate = FL_XIP_WHILE_ERASING;
1188 chip->erase_suspended = 1;
1190 if (!map_word_bitsset(map, status, CMD(0x04)))
1192 newstate = FL_XIP_WHILE_WRITING;
1193 chip->write_suspended = 1;
1195 chip->state = newstate;
1196 map_write(map, CMD(0xff), adr);
1197 (void) map_read(map, adr);
1200 mutex_unlock(&chip->mutex);
1205 * We're back. However someone else might have
1206 * decided to go write to the chip if we are in
1207 * a suspended erase state. If so let's wait
1210 mutex_lock(&chip->mutex);
1211 while (chip->state != newstate) {
1212 DECLARE_WAITQUEUE(wait, current);
1213 set_current_state(TASK_UNINTERRUPTIBLE);
1214 add_wait_queue(&chip->wq, &wait);
1215 mutex_unlock(&chip->mutex);
1217 remove_wait_queue(&chip->wq, &wait);
1218 mutex_lock(&chip->mutex);
1220 /* Disallow XIP again */
1221 local_irq_disable();
1223 /* Resume the write or erase operation */
1224 map_write(map, CMD(0xd0), adr);
1225 map_write(map, CMD(0x70), adr);
1226 chip->state = oldstate;
1227 start = xip_currtime();
1228 } else if (usec >= 1000000/HZ) {
1230 * Try to save on CPU power when waiting delay
1231 * is at least a system timer tick period.
1232 * No need to be extremely accurate here.
1236 status = map_read(map, adr);
1237 done = xip_elapsed_since(start);
1238 } while (!map_word_andequal(map, status, OK, OK)
1241 return (done >= usec) ? -ETIME : 0;
1245 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1246 * the flash is actively programming or erasing since we have to poll for
1247 * the operation to complete anyway. We can't do that in a generic way with
1248 * a XIP setup so do it before the actual flash operation in this case
1249 * and stub it out from INVAL_CACHE_AND_WAIT.
1251 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
1252 INVALIDATE_CACHED_RANGE(map, from, size)
1254 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1255 xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1259 #define xip_disable(map, chip, adr)
1260 #define xip_enable(map, chip, adr)
1261 #define XIP_INVAL_CACHED_RANGE(x...)
1262 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1264 static int inval_cache_and_wait_for_operation(
1265 struct map_info *map, struct flchip *chip,
1266 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1267 unsigned int chip_op_time, unsigned int chip_op_time_max)
1269 struct cfi_private *cfi = map->fldrv_priv;
1270 map_word status, status_OK = CMD(0x80);
1271 int chip_state = chip->state;
1272 unsigned int timeo, sleep_time, reset_timeo;
1274 mutex_unlock(&chip->mutex);
1276 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1277 mutex_lock(&chip->mutex);
1279 timeo = chip_op_time_max;
1282 reset_timeo = timeo;
1283 sleep_time = chip_op_time / 2;
1286 if (chip->state != chip_state) {
1287 /* Someone's suspended the operation: sleep */
1288 DECLARE_WAITQUEUE(wait, current);
1289 set_current_state(TASK_UNINTERRUPTIBLE);
1290 add_wait_queue(&chip->wq, &wait);
1291 mutex_unlock(&chip->mutex);
1293 remove_wait_queue(&chip->wq, &wait);
1294 mutex_lock(&chip->mutex);
1298 status = map_read(map, cmd_adr);
1299 if (map_word_andequal(map, status, status_OK, status_OK))
1302 if (chip->erase_suspended && chip_state == FL_ERASING) {
1303 /* Erase suspend occurred while sleep: reset timeout */
1304 timeo = reset_timeo;
1305 chip->erase_suspended = 0;
1307 if (chip->write_suspended && chip_state == FL_WRITING) {
1308 /* Write suspend occurred while sleep: reset timeout */
1309 timeo = reset_timeo;
1310 chip->write_suspended = 0;
1313 map_write(map, CMD(0x70), cmd_adr);
1314 chip->state = FL_STATUS;
1318 /* OK Still waiting. Drop the lock, wait a while and retry. */
1319 mutex_unlock(&chip->mutex);
1320 if (sleep_time >= 1000000/HZ) {
1322 * Half of the normal delay still remaining
1323 * can be performed with a sleeping delay instead
1326 msleep(sleep_time/1000);
1327 timeo -= sleep_time;
1328 sleep_time = 1000000/HZ;
1334 mutex_lock(&chip->mutex);
1337 /* Done and happy. */
1338 chip->state = FL_STATUS;
1344 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1345 INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1348 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1350 unsigned long cmd_addr;
1351 struct cfi_private *cfi = map->fldrv_priv;
1356 /* Ensure cmd read/writes are aligned. */
1357 cmd_addr = adr & ~(map_bankwidth(map)-1);
1359 mutex_lock(&chip->mutex);
1361 ret = get_chip(map, chip, cmd_addr, FL_POINT);
1364 if (chip->state != FL_POINT && chip->state != FL_READY)
1365 map_write(map, CMD(0xff), cmd_addr);
1367 chip->state = FL_POINT;
1368 chip->ref_point_counter++;
1370 mutex_unlock(&chip->mutex);
1375 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1376 size_t *retlen, void **virt, resource_size_t *phys)
1378 struct map_info *map = mtd->priv;
1379 struct cfi_private *cfi = map->fldrv_priv;
1380 unsigned long ofs, last_end = 0;
1387 /* Now lock the chip(s) to POINT state */
1389 /* ofs: offset within the first chip that the first read should start */
1390 chipnum = (from >> cfi->chipshift);
1391 ofs = from - (chipnum << cfi->chipshift);
1393 *virt = map->virt + cfi->chips[chipnum].start + ofs;
1395 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1398 unsigned long thislen;
1400 if (chipnum >= cfi->numchips)
1403 /* We cannot point across chips that are virtually disjoint */
1405 last_end = cfi->chips[chipnum].start;
1406 else if (cfi->chips[chipnum].start != last_end)
1409 if ((len + ofs -1) >> cfi->chipshift)
1410 thislen = (1<<cfi->chipshift) - ofs;
1414 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1422 last_end += 1 << cfi->chipshift;
1428 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1430 struct map_info *map = mtd->priv;
1431 struct cfi_private *cfi = map->fldrv_priv;
1433 int chipnum, err = 0;
1435 /* Now unlock the chip(s) POINT state */
1437 /* ofs: offset within the first chip that the first read should start */
1438 chipnum = (from >> cfi->chipshift);
1439 ofs = from - (chipnum << cfi->chipshift);
1441 while (len && !err) {
1442 unsigned long thislen;
1443 struct flchip *chip;
1445 chip = &cfi->chips[chipnum];
1446 if (chipnum >= cfi->numchips)
1449 if ((len + ofs -1) >> cfi->chipshift)
1450 thislen = (1<<cfi->chipshift) - ofs;
1454 mutex_lock(&chip->mutex);
1455 if (chip->state == FL_POINT) {
1456 chip->ref_point_counter--;
1457 if(chip->ref_point_counter == 0)
1458 chip->state = FL_READY;
1460 printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1464 put_chip(map, chip, chip->start);
1465 mutex_unlock(&chip->mutex);
1475 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1477 unsigned long cmd_addr;
1478 struct cfi_private *cfi = map->fldrv_priv;
1483 /* Ensure cmd read/writes are aligned. */
1484 cmd_addr = adr & ~(map_bankwidth(map)-1);
1486 mutex_lock(&chip->mutex);
1487 ret = get_chip(map, chip, cmd_addr, FL_READY);
1489 mutex_unlock(&chip->mutex);
1493 if (chip->state != FL_POINT && chip->state != FL_READY) {
1494 map_write(map, CMD(0xff), cmd_addr);
1496 chip->state = FL_READY;
1499 map_copy_from(map, buf, adr, len);
1501 put_chip(map, chip, cmd_addr);
1503 mutex_unlock(&chip->mutex);
1507 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1509 struct map_info *map = mtd->priv;
1510 struct cfi_private *cfi = map->fldrv_priv;
1515 /* ofs: offset within the first chip that the first read should start */
1516 chipnum = (from >> cfi->chipshift);
1517 ofs = from - (chipnum << cfi->chipshift);
1520 unsigned long thislen;
1522 if (chipnum >= cfi->numchips)
1525 if ((len + ofs -1) >> cfi->chipshift)
1526 thislen = (1<<cfi->chipshift) - ofs;
1530 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1544 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1545 unsigned long adr, map_word datum, int mode)
1547 struct cfi_private *cfi = map->fldrv_priv;
1548 map_word status, write_cmd;
1555 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1558 write_cmd = CMD(0xc0);
1564 mutex_lock(&chip->mutex);
1565 ret = get_chip(map, chip, adr, mode);
1567 mutex_unlock(&chip->mutex);
1571 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1573 xip_disable(map, chip, adr);
1574 map_write(map, write_cmd, adr);
1575 map_write(map, datum, adr);
1578 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1579 adr, map_bankwidth(map),
1580 chip->word_write_time,
1581 chip->word_write_time_max);
1583 xip_enable(map, chip, adr);
1584 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1588 /* check for errors */
1589 status = map_read(map, adr);
1590 if (map_word_bitsset(map, status, CMD(0x1a))) {
1591 unsigned long chipstatus = MERGESTATUS(status);
1594 map_write(map, CMD(0x50), adr);
1595 map_write(map, CMD(0x70), adr);
1596 xip_enable(map, chip, adr);
1598 if (chipstatus & 0x02) {
1600 } else if (chipstatus & 0x08) {
1601 printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1604 printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1611 xip_enable(map, chip, adr);
1612 out: DISABLE_VPP(map);
1613 put_chip(map, chip, adr);
1614 mutex_unlock(&chip->mutex);
1619 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1621 struct map_info *map = mtd->priv;
1622 struct cfi_private *cfi = map->fldrv_priv;
1627 chipnum = to >> cfi->chipshift;
1628 ofs = to - (chipnum << cfi->chipshift);
1630 /* If it's not bus-aligned, do the first byte write */
1631 if (ofs & (map_bankwidth(map)-1)) {
1632 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1633 int gap = ofs - bus_ofs;
1637 n = min_t(int, len, map_bankwidth(map)-gap);
1638 datum = map_word_ff(map);
1639 datum = map_word_load_partial(map, datum, buf, gap, n);
1641 ret = do_write_oneword(map, &cfi->chips[chipnum],
1642 bus_ofs, datum, FL_WRITING);
1651 if (ofs >> cfi->chipshift) {
1654 if (chipnum == cfi->numchips)
1659 while(len >= map_bankwidth(map)) {
1660 map_word datum = map_word_load(map, buf);
1662 ret = do_write_oneword(map, &cfi->chips[chipnum],
1663 ofs, datum, FL_WRITING);
1667 ofs += map_bankwidth(map);
1668 buf += map_bankwidth(map);
1669 (*retlen) += map_bankwidth(map);
1670 len -= map_bankwidth(map);
1672 if (ofs >> cfi->chipshift) {
1675 if (chipnum == cfi->numchips)
1680 if (len & (map_bankwidth(map)-1)) {
1683 datum = map_word_ff(map);
1684 datum = map_word_load_partial(map, datum, buf, 0, len);
1686 ret = do_write_oneword(map, &cfi->chips[chipnum],
1687 ofs, datum, FL_WRITING);
1698 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1699 unsigned long adr, const struct kvec **pvec,
1700 unsigned long *pvec_seek, int len)
1702 struct cfi_private *cfi = map->fldrv_priv;
1703 map_word status, write_cmd, datum;
1704 unsigned long cmd_adr;
1705 int ret, wbufsize, word_gap, words;
1706 const struct kvec *vec;
1707 unsigned long vec_seek;
1708 unsigned long initial_adr;
1709 int initial_len = len;
1711 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1714 cmd_adr = adr & ~(wbufsize-1);
1716 /* Sharp LH28F640BF chips need the first address for the
1717 * Page Buffer Program command. See Table 5 of
1718 * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
1719 if (is_LH28F640BF(cfi))
1722 /* Let's determine this according to the interleave only once */
1723 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1725 mutex_lock(&chip->mutex);
1726 ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1728 mutex_unlock(&chip->mutex);
1732 XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1734 xip_disable(map, chip, cmd_adr);
1736 /* §4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1737 [...], the device will not accept any more Write to Buffer commands".
1738 So we must check here and reset those bits if they're set. Otherwise
1739 we're just pissing in the wind */
1740 if (chip->state != FL_STATUS) {
1741 map_write(map, CMD(0x70), cmd_adr);
1742 chip->state = FL_STATUS;
1744 status = map_read(map, cmd_adr);
1745 if (map_word_bitsset(map, status, CMD(0x30))) {
1746 xip_enable(map, chip, cmd_adr);
1747 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1748 xip_disable(map, chip, cmd_adr);
1749 map_write(map, CMD(0x50), cmd_adr);
1750 map_write(map, CMD(0x70), cmd_adr);
1753 chip->state = FL_WRITING_TO_BUFFER;
1754 map_write(map, write_cmd, cmd_adr);
1755 ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1757 /* Argh. Not ready for write to buffer */
1758 map_word Xstatus = map_read(map, cmd_adr);
1759 map_write(map, CMD(0x70), cmd_adr);
1760 chip->state = FL_STATUS;
1761 status = map_read(map, cmd_adr);
1762 map_write(map, CMD(0x50), cmd_adr);
1763 map_write(map, CMD(0x70), cmd_adr);
1764 xip_enable(map, chip, cmd_adr);
1765 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1766 map->name, Xstatus.x[0], status.x[0]);
1770 /* Figure out the number of words to write */
1771 word_gap = (-adr & (map_bankwidth(map)-1));
1772 words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1776 word_gap = map_bankwidth(map) - word_gap;
1778 datum = map_word_ff(map);
1781 /* Write length of data to come */
1782 map_write(map, CMD(words), cmd_adr );
1786 vec_seek = *pvec_seek;
1788 int n = map_bankwidth(map) - word_gap;
1789 if (n > vec->iov_len - vec_seek)
1790 n = vec->iov_len - vec_seek;
1794 if (!word_gap && len < map_bankwidth(map))
1795 datum = map_word_ff(map);
1797 datum = map_word_load_partial(map, datum,
1798 vec->iov_base + vec_seek,
1803 if (!len || word_gap == map_bankwidth(map)) {
1804 map_write(map, datum, adr);
1805 adr += map_bankwidth(map);
1810 if (vec_seek == vec->iov_len) {
1816 *pvec_seek = vec_seek;
1819 map_write(map, CMD(0xd0), cmd_adr);
1820 chip->state = FL_WRITING;
1822 ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1823 initial_adr, initial_len,
1824 chip->buffer_write_time,
1825 chip->buffer_write_time_max);
1827 map_write(map, CMD(0x70), cmd_adr);
1828 chip->state = FL_STATUS;
1829 xip_enable(map, chip, cmd_adr);
1830 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1834 /* check for errors */
1835 status = map_read(map, cmd_adr);
1836 if (map_word_bitsset(map, status, CMD(0x1a))) {
1837 unsigned long chipstatus = MERGESTATUS(status);
1840 map_write(map, CMD(0x50), cmd_adr);
1841 map_write(map, CMD(0x70), cmd_adr);
1842 xip_enable(map, chip, cmd_adr);
1844 if (chipstatus & 0x02) {
1846 } else if (chipstatus & 0x08) {
1847 printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1850 printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1857 xip_enable(map, chip, cmd_adr);
1858 out: DISABLE_VPP(map);
1859 put_chip(map, chip, cmd_adr);
1860 mutex_unlock(&chip->mutex);
1864 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1865 unsigned long count, loff_t to, size_t *retlen)
1867 struct map_info *map = mtd->priv;
1868 struct cfi_private *cfi = map->fldrv_priv;
1869 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1872 unsigned long ofs, vec_seek, i;
1875 for (i = 0; i < count; i++)
1876 len += vecs[i].iov_len;
1881 chipnum = to >> cfi->chipshift;
1882 ofs = to - (chipnum << cfi->chipshift);
1886 /* We must not cross write block boundaries */
1887 int size = wbufsize - (ofs & (wbufsize-1));
1891 ret = do_write_buffer(map, &cfi->chips[chipnum],
1892 ofs, &vecs, &vec_seek, size);
1900 if (ofs >> cfi->chipshift) {
1903 if (chipnum == cfi->numchips)
1907 /* Be nice and reschedule with the chip in a usable state for other
1916 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1917 size_t len, size_t *retlen, const u_char *buf)
1921 vec.iov_base = (void *) buf;
1924 return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1927 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1928 unsigned long adr, int len, void *thunk)
1930 struct cfi_private *cfi = map->fldrv_priv;
1938 mutex_lock(&chip->mutex);
1939 ret = get_chip(map, chip, adr, FL_ERASING);
1941 mutex_unlock(&chip->mutex);
1945 XIP_INVAL_CACHED_RANGE(map, adr, len);
1947 xip_disable(map, chip, adr);
1949 /* Clear the status register first */
1950 map_write(map, CMD(0x50), adr);
1953 map_write(map, CMD(0x20), adr);
1954 map_write(map, CMD(0xD0), adr);
1955 chip->state = FL_ERASING;
1956 chip->erase_suspended = 0;
1957 chip->in_progress_block_addr = adr;
1958 chip->in_progress_block_mask = ~(len - 1);
1960 ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1963 chip->erase_time_max);
1965 map_write(map, CMD(0x70), adr);
1966 chip->state = FL_STATUS;
1967 xip_enable(map, chip, adr);
1968 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1972 /* We've broken this before. It doesn't hurt to be safe */
1973 map_write(map, CMD(0x70), adr);
1974 chip->state = FL_STATUS;
1975 status = map_read(map, adr);
1977 /* check for errors */
1978 if (map_word_bitsset(map, status, CMD(0x3a))) {
1979 unsigned long chipstatus = MERGESTATUS(status);
1981 /* Reset the error bits */
1982 map_write(map, CMD(0x50), adr);
1983 map_write(map, CMD(0x70), adr);
1984 xip_enable(map, chip, adr);
1986 if ((chipstatus & 0x30) == 0x30) {
1987 printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1989 } else if (chipstatus & 0x02) {
1990 /* Protection bit set */
1992 } else if (chipstatus & 0x8) {
1994 printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1996 } else if (chipstatus & 0x20 && retries--) {
1997 printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1999 put_chip(map, chip, adr);
2000 mutex_unlock(&chip->mutex);
2003 printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
2010 xip_enable(map, chip, adr);
2011 out: DISABLE_VPP(map);
2012 put_chip(map, chip, adr);
2013 mutex_unlock(&chip->mutex);
2017 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2019 unsigned long ofs, len;
2025 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
2029 instr->state = MTD_ERASE_DONE;
2030 mtd_erase_callback(instr);
2035 static void cfi_intelext_sync (struct mtd_info *mtd)
2037 struct map_info *map = mtd->priv;
2038 struct cfi_private *cfi = map->fldrv_priv;
2040 struct flchip *chip;
2043 for (i=0; !ret && i<cfi->numchips; i++) {
2044 chip = &cfi->chips[i];
2046 mutex_lock(&chip->mutex);
2047 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2050 chip->oldstate = chip->state;
2051 chip->state = FL_SYNCING;
2052 /* No need to wake_up() on this state change -
2053 * as the whole point is that nobody can do anything
2054 * with the chip now anyway.
2057 mutex_unlock(&chip->mutex);
2060 /* Unlock the chips again */
2062 for (i--; i >=0; i--) {
2063 chip = &cfi->chips[i];
2065 mutex_lock(&chip->mutex);
2067 if (chip->state == FL_SYNCING) {
2068 chip->state = chip->oldstate;
2069 chip->oldstate = FL_READY;
2072 mutex_unlock(&chip->mutex);
2076 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2077 struct flchip *chip,
2079 int len, void *thunk)
2081 struct cfi_private *cfi = map->fldrv_priv;
2082 int status, ofs_factor = cfi->interleave * cfi->device_type;
2085 xip_disable(map, chip, adr+(2*ofs_factor));
2086 map_write(map, CMD(0x90), adr+(2*ofs_factor));
2087 chip->state = FL_JEDEC_QUERY;
2088 status = cfi_read_query(map, adr+(2*ofs_factor));
2089 xip_enable(map, chip, 0);
2093 #ifdef DEBUG_LOCK_BITS
2094 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2095 struct flchip *chip,
2097 int len, void *thunk)
2099 printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2100 adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2105 #define DO_XXLOCK_ONEBLOCK_LOCK ((void *) 1)
2106 #define DO_XXLOCK_ONEBLOCK_UNLOCK ((void *) 2)
2108 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2109 unsigned long adr, int len, void *thunk)
2111 struct cfi_private *cfi = map->fldrv_priv;
2112 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2118 mutex_lock(&chip->mutex);
2119 ret = get_chip(map, chip, adr, FL_LOCKING);
2121 mutex_unlock(&chip->mutex);
2126 xip_disable(map, chip, adr);
2128 map_write(map, CMD(0x60), adr);
2129 if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2130 map_write(map, CMD(0x01), adr);
2131 chip->state = FL_LOCKING;
2132 } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2133 map_write(map, CMD(0xD0), adr);
2134 chip->state = FL_UNLOCKING;
2139 * If Instant Individual Block Locking supported then no need
2143 * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2144 * lets use a max of 1.5 seconds (1500ms) as timeout.
2146 * See "Clear Block Lock-Bits Time" on page 40 in
2147 * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2148 * from February 2003
2150 mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2152 ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2154 map_write(map, CMD(0x70), adr);
2155 chip->state = FL_STATUS;
2156 xip_enable(map, chip, adr);
2157 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2161 xip_enable(map, chip, adr);
2162 out: DISABLE_VPP(map);
2163 put_chip(map, chip, adr);
2164 mutex_unlock(&chip->mutex);
2168 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2172 #ifdef DEBUG_LOCK_BITS
2173 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2174 __func__, ofs, len);
2175 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2179 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2180 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2182 #ifdef DEBUG_LOCK_BITS
2183 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2185 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2192 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2196 #ifdef DEBUG_LOCK_BITS
2197 printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2198 __func__, ofs, len);
2199 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2203 ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2204 ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2206 #ifdef DEBUG_LOCK_BITS
2207 printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2209 cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2216 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2219 return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2220 ofs, len, NULL) ? 1 : 0;
2223 #ifdef CONFIG_MTD_OTP
2225 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2226 u_long data_offset, u_char *buf, u_int size,
2227 u_long prot_offset, u_int groupno, u_int groupsize);
2230 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2231 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2233 struct cfi_private *cfi = map->fldrv_priv;
2236 mutex_lock(&chip->mutex);
2237 ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2239 mutex_unlock(&chip->mutex);
2243 /* let's ensure we're not reading back cached data from array mode */
2244 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2246 xip_disable(map, chip, chip->start);
2247 if (chip->state != FL_JEDEC_QUERY) {
2248 map_write(map, CMD(0x90), chip->start);
2249 chip->state = FL_JEDEC_QUERY;
2251 map_copy_from(map, buf, chip->start + offset, size);
2252 xip_enable(map, chip, chip->start);
2254 /* then ensure we don't keep OTP data in the cache */
2255 INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2257 put_chip(map, chip, chip->start);
2258 mutex_unlock(&chip->mutex);
2263 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2264 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2269 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2270 int gap = offset - bus_ofs;
2271 int n = min_t(int, size, map_bankwidth(map)-gap);
2272 map_word datum = map_word_ff(map);
2274 datum = map_word_load_partial(map, datum, buf, gap, n);
2275 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2288 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2289 u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2291 struct cfi_private *cfi = map->fldrv_priv;
2294 /* make sure area matches group boundaries */
2298 datum = map_word_ff(map);
2299 datum = map_word_clr(map, datum, CMD(1 << grpno));
2300 return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2303 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2304 size_t *retlen, u_char *buf,
2305 otp_op_t action, int user_regs)
2307 struct map_info *map = mtd->priv;
2308 struct cfi_private *cfi = map->fldrv_priv;
2309 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2310 struct flchip *chip;
2311 struct cfi_intelext_otpinfo *otp;
2312 u_long devsize, reg_prot_offset, data_offset;
2313 u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2314 u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2319 /* Check that we actually have some OTP registers */
2320 if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2323 /* we need real chips here not virtual ones */
2324 devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2325 chip_step = devsize >> cfi->chipshift;
2328 /* Some chips have OTP located in the _top_ partition only.
2329 For example: Intel 28F256L18T (T means top-parameter device) */
2330 if (cfi->mfr == CFI_MFR_INTEL) {
2335 chip_num = chip_step - 1;
2339 for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2340 chip = &cfi->chips[chip_num];
2341 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2343 /* first OTP region */
2345 reg_prot_offset = extp->ProtRegAddr;
2346 reg_fact_groups = 1;
2347 reg_fact_size = 1 << extp->FactProtRegSize;
2348 reg_user_groups = 1;
2349 reg_user_size = 1 << extp->UserProtRegSize;
2352 /* flash geometry fixup */
2353 data_offset = reg_prot_offset + 1;
2354 data_offset *= cfi->interleave * cfi->device_type;
2355 reg_prot_offset *= cfi->interleave * cfi->device_type;
2356 reg_fact_size *= cfi->interleave;
2357 reg_user_size *= cfi->interleave;
2360 groups = reg_user_groups;
2361 groupsize = reg_user_size;
2362 /* skip over factory reg area */
2363 groupno = reg_fact_groups;
2364 data_offset += reg_fact_groups * reg_fact_size;
2366 groups = reg_fact_groups;
2367 groupsize = reg_fact_size;
2371 while (len > 0 && groups > 0) {
2374 * Special case: if action is NULL
2375 * we fill buf with otp_info records.
2377 struct otp_info *otpinfo;
2379 len -= sizeof(struct otp_info);
2382 ret = do_otp_read(map, chip,
2384 (u_char *)&lockword,
2389 otpinfo = (struct otp_info *)buf;
2390 otpinfo->start = from;
2391 otpinfo->length = groupsize;
2393 !map_word_bitsset(map, lockword,
2396 buf += sizeof(*otpinfo);
2397 *retlen += sizeof(*otpinfo);
2398 } else if (from >= groupsize) {
2400 data_offset += groupsize;
2402 int size = groupsize;
2403 data_offset += from;
2408 ret = action(map, chip, data_offset,
2409 buf, size, reg_prot_offset,
2410 groupno, groupsize);
2416 data_offset += size;
2422 /* next OTP region */
2423 if (++field == extp->NumProtectionFields)
2425 reg_prot_offset = otp->ProtRegAddr;
2426 reg_fact_groups = otp->FactGroups;
2427 reg_fact_size = 1 << otp->FactProtRegSize;
2428 reg_user_groups = otp->UserGroups;
2429 reg_user_size = 1 << otp->UserProtRegSize;
2437 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2438 size_t len, size_t *retlen,
2441 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2442 buf, do_otp_read, 0);
2445 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2446 size_t len, size_t *retlen,
2449 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2450 buf, do_otp_read, 1);
2453 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2454 size_t len, size_t *retlen,
2457 return cfi_intelext_otp_walk(mtd, from, len, retlen,
2458 buf, do_otp_write, 1);
2461 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2462 loff_t from, size_t len)
2465 return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2466 NULL, do_otp_lock, 1);
2469 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
2470 size_t *retlen, struct otp_info *buf)
2473 return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2477 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
2478 size_t *retlen, struct otp_info *buf)
2480 return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2486 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2488 struct mtd_erase_region_info *region;
2489 int block, status, i;
2493 for (i = 0; i < mtd->numeraseregions; i++) {
2494 region = &mtd->eraseregions[i];
2495 if (!region->lockmap)
2498 for (block = 0; block < region->numblocks; block++){
2499 len = region->erasesize;
2500 adr = region->offset + block * len;
2502 status = cfi_varsize_frob(mtd,
2503 do_getlockstatus_oneblock, adr, len, NULL);
2505 set_bit(block, region->lockmap);
2507 clear_bit(block, region->lockmap);
2512 static int cfi_intelext_suspend(struct mtd_info *mtd)
2514 struct map_info *map = mtd->priv;
2515 struct cfi_private *cfi = map->fldrv_priv;
2516 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2518 struct flchip *chip;
2521 if ((mtd->flags & MTD_POWERUP_LOCK)
2522 && extp && (extp->FeatureSupport & (1 << 5)))
2523 cfi_intelext_save_locks(mtd);
2525 for (i=0; !ret && i<cfi->numchips; i++) {
2526 chip = &cfi->chips[i];
2528 mutex_lock(&chip->mutex);
2530 switch (chip->state) {
2534 case FL_JEDEC_QUERY:
2535 if (chip->oldstate == FL_READY) {
2536 /* place the chip in a known state before suspend */
2537 map_write(map, CMD(0xFF), cfi->chips[i].start);
2538 chip->oldstate = chip->state;
2539 chip->state = FL_PM_SUSPENDED;
2540 /* No need to wake_up() on this state change -
2541 * as the whole point is that nobody can do anything
2542 * with the chip now anyway.
2545 /* There seems to be an operation pending. We must wait for it. */
2546 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2551 /* Should we actually wait? Once upon a time these routines weren't
2552 allowed to. Or should we return -EAGAIN, because the upper layers
2553 ought to have already shut down anything which was using the device
2554 anyway? The latter for now. */
2555 printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2557 case FL_PM_SUSPENDED:
2560 mutex_unlock(&chip->mutex);
2563 /* Unlock the chips again */
2566 for (i--; i >=0; i--) {
2567 chip = &cfi->chips[i];
2569 mutex_lock(&chip->mutex);
2571 if (chip->state == FL_PM_SUSPENDED) {
2572 /* No need to force it into a known state here,
2573 because we're returning failure, and it didn't
2575 chip->state = chip->oldstate;
2576 chip->oldstate = FL_READY;
2579 mutex_unlock(&chip->mutex);
2586 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2588 struct mtd_erase_region_info *region;
2593 for (i = 0; i < mtd->numeraseregions; i++) {
2594 region = &mtd->eraseregions[i];
2595 if (!region->lockmap)
2598 for_each_clear_bit(block, region->lockmap, region->numblocks) {
2599 len = region->erasesize;
2600 adr = region->offset + block * len;
2601 cfi_intelext_unlock(mtd, adr, len);
2606 static void cfi_intelext_resume(struct mtd_info *mtd)
2608 struct map_info *map = mtd->priv;
2609 struct cfi_private *cfi = map->fldrv_priv;
2610 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2612 struct flchip *chip;
2614 for (i=0; i<cfi->numchips; i++) {
2616 chip = &cfi->chips[i];
2618 mutex_lock(&chip->mutex);
2620 /* Go to known state. Chip may have been power cycled */
2621 if (chip->state == FL_PM_SUSPENDED) {
2622 /* Refresh LH28F640BF Partition Config. Register */
2623 fixup_LH28F640BF(mtd);
2624 map_write(map, CMD(0xFF), cfi->chips[i].start);
2625 chip->oldstate = chip->state = FL_READY;
2629 mutex_unlock(&chip->mutex);
2632 if ((mtd->flags & MTD_POWERUP_LOCK)
2633 && extp && (extp->FeatureSupport & (1 << 5)))
2634 cfi_intelext_restore_locks(mtd);
2637 static int cfi_intelext_reset(struct mtd_info *mtd)
2639 struct map_info *map = mtd->priv;
2640 struct cfi_private *cfi = map->fldrv_priv;
2643 for (i=0; i < cfi->numchips; i++) {
2644 struct flchip *chip = &cfi->chips[i];
2646 /* force the completion of any ongoing operation
2647 and switch to array mode so any bootloader in
2648 flash is accessible for soft reboot. */
2649 mutex_lock(&chip->mutex);
2650 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2652 map_write(map, CMD(0xff), chip->start);
2653 chip->state = FL_SHUTDOWN;
2654 put_chip(map, chip, chip->start);
2656 mutex_unlock(&chip->mutex);
2662 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2665 struct mtd_info *mtd;
2667 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2668 cfi_intelext_reset(mtd);
2672 static void cfi_intelext_destroy(struct mtd_info *mtd)
2674 struct map_info *map = mtd->priv;
2675 struct cfi_private *cfi = map->fldrv_priv;
2676 struct mtd_erase_region_info *region;
2678 cfi_intelext_reset(mtd);
2679 unregister_reboot_notifier(&mtd->reboot_notifier);
2680 kfree(cfi->cmdset_priv);
2682 kfree(cfi->chips[0].priv);
2684 for (i = 0; i < mtd->numeraseregions; i++) {
2685 region = &mtd->eraseregions[i];
2686 kfree(region->lockmap);
2688 kfree(mtd->eraseregions);
2691 MODULE_LICENSE("GPL");
2692 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2693 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2694 MODULE_ALIAS("cfi_cmdset_0003");
2695 MODULE_ALIAS("cfi_cmdset_0200");