GNU Linux-libre 5.10.217-gnu1
[releases.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  *
8  * 10/10/2000   Nicolas Pitre <nico@fluxnic.net>
9  *      - completely revamped method functions so they are aware and
10  *        independent of the flash geometry (buswidth, interleave, etc.)
11  *      - scalability vs code size is completely set at compile-time
12  *        (see include/linux/mtd/cfi.h for selection)
13  *      - optimized write buffer method
14  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15  *      - reworked lock/unlock/erase support for var size flash
16  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
17  *      - auto unlock sectors on resume for auto locking flash on power up
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <asm/io.h>
25 #include <asm/byteorder.h>
26
27 #include <linux/errno.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/reboot.h>
32 #include <linux/bitmap.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/cfi.h>
37
38 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
40
41 // debugging, turns off buffer write mode if set to 1
42 #define FORCE_WORD_WRITE 0
43
44 /* Intel chips */
45 #define I82802AB        0x00ad
46 #define I82802AC        0x00ac
47 #define PF38F4476       0x881c
48 #define M28F00AP30      0x8963
49 /* STMicroelectronics chips */
50 #define M50LPW080       0x002F
51 #define M50FLW080A      0x0080
52 #define M50FLW080B      0x0081
53 /* Atmel chips */
54 #define AT49BV640D      0x02de
55 #define AT49BV640DT     0x02db
56 /* Sharp chips */
57 #define LH28F640BFHE_PTTL90     0x00b0
58 #define LH28F640BFHE_PBTL90     0x00b1
59 #define LH28F640BFHE_PTTL70A    0x00b2
60 #define LH28F640BFHE_PBTL70A    0x00b3
61
62 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
64 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
65 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
66 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
67 static void cfi_intelext_sync (struct mtd_info *);
68 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
69 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
70 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
71                                   uint64_t len);
72 #ifdef CONFIG_MTD_OTP
73 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
74 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
75 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
76 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
77 static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
78                                            size_t *, struct otp_info *);
79 static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
80                                            size_t *, struct otp_info *);
81 #endif
82 static int cfi_intelext_suspend (struct mtd_info *);
83 static void cfi_intelext_resume (struct mtd_info *);
84 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
85
86 static void cfi_intelext_destroy(struct mtd_info *);
87
88 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
89
90 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
91 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
92
93 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
94                      size_t *retlen, void **virt, resource_size_t *phys);
95 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
96
97 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
98 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
99 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
100 #include "fwh_lock.h"
101
102
103
104 /*
105  *  *********** SETUP AND PROBE BITS  ***********
106  */
107
108 static struct mtd_chip_driver cfi_intelext_chipdrv = {
109         .probe          = NULL, /* Not usable directly */
110         .destroy        = cfi_intelext_destroy,
111         .name           = "cfi_cmdset_0001",
112         .module         = THIS_MODULE
113 };
114
115 /* #define DEBUG_LOCK_BITS */
116 /* #define DEBUG_CFI_FEATURES */
117
118 #ifdef DEBUG_CFI_FEATURES
119 static void cfi_tell_features(struct cfi_pri_intelext *extp)
120 {
121         int i;
122         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
123         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
124         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
125         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
126         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
127         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
128         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
129         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
130         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
131         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
132         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
133         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
134         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
135         for (i=11; i<32; i++) {
136                 if (extp->FeatureSupport & (1<<i))
137                         printk("     - Unknown Bit %X:      supported\n", i);
138         }
139
140         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
141         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
142         for (i=1; i<8; i++) {
143                 if (extp->SuspendCmdSupport & (1<<i))
144                         printk("     - Unknown Bit %X:               supported\n", i);
145         }
146
147         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
148         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
149         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
150         for (i=2; i<3; i++) {
151                 if (extp->BlkStatusRegMask & (1<<i))
152                         printk("     - Unknown Bit %X Active: yes\n",i);
153         }
154         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
155         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
156         for (i=6; i<16; i++) {
157                 if (extp->BlkStatusRegMask & (1<<i))
158                         printk("     - Unknown Bit %X Active: yes\n",i);
159         }
160
161         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
162                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
163         if (extp->VppOptimal)
164                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
165                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
166 }
167 #endif
168
169 /* Atmel chips don't use the same PRI format as Intel chips */
170 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
171 {
172         struct map_info *map = mtd->priv;
173         struct cfi_private *cfi = map->fldrv_priv;
174         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
175         struct cfi_pri_atmel atmel_pri;
176         uint32_t features = 0;
177
178         /* Reverse byteswapping */
179         extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
180         extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
181         extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
182
183         memcpy(&atmel_pri, extp, sizeof(atmel_pri));
184         memset((char *)extp + 5, 0, sizeof(*extp) - 5);
185
186         printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
187
188         if (atmel_pri.Features & 0x01) /* chip erase supported */
189                 features |= (1<<0);
190         if (atmel_pri.Features & 0x02) /* erase suspend supported */
191                 features |= (1<<1);
192         if (atmel_pri.Features & 0x04) /* program suspend supported */
193                 features |= (1<<2);
194         if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
195                 features |= (1<<9);
196         if (atmel_pri.Features & 0x20) /* page mode read supported */
197                 features |= (1<<7);
198         if (atmel_pri.Features & 0x40) /* queued erase supported */
199                 features |= (1<<4);
200         if (atmel_pri.Features & 0x80) /* Protection bits supported */
201                 features |= (1<<6);
202
203         extp->FeatureSupport = features;
204
205         /* burst write mode not supported */
206         cfi->cfiq->BufWriteTimeoutTyp = 0;
207         cfi->cfiq->BufWriteTimeoutMax = 0;
208 }
209
210 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
211 {
212         struct map_info *map = mtd->priv;
213         struct cfi_private *cfi = map->fldrv_priv;
214         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
215
216         cfip->FeatureSupport |= (1 << 5);
217         mtd->flags |= MTD_POWERUP_LOCK;
218 }
219
220 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
221 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
222 static void fixup_intel_strataflash(struct mtd_info *mtd)
223 {
224         struct map_info *map = mtd->priv;
225         struct cfi_private *cfi = map->fldrv_priv;
226         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
227
228         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
229                             "erase on write disabled.\n");
230         extp->SuspendCmdSupport &= ~1;
231 }
232 #endif
233
234 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
235 static void fixup_no_write_suspend(struct mtd_info *mtd)
236 {
237         struct map_info *map = mtd->priv;
238         struct cfi_private *cfi = map->fldrv_priv;
239         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
240
241         if (cfip && (cfip->FeatureSupport&4)) {
242                 cfip->FeatureSupport &= ~4;
243                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
244         }
245 }
246 #endif
247
248 static void fixup_st_m28w320ct(struct mtd_info *mtd)
249 {
250         struct map_info *map = mtd->priv;
251         struct cfi_private *cfi = map->fldrv_priv;
252
253         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
254         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
255 }
256
257 static void fixup_st_m28w320cb(struct mtd_info *mtd)
258 {
259         struct map_info *map = mtd->priv;
260         struct cfi_private *cfi = map->fldrv_priv;
261
262         /* Note this is done after the region info is endian swapped */
263         cfi->cfiq->EraseRegionInfo[1] =
264                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
265 };
266
267 static int is_LH28F640BF(struct cfi_private *cfi)
268 {
269         /* Sharp LH28F640BF Family */
270         if (cfi->mfr == CFI_MFR_SHARP && (
271             cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
272             cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
273                 return 1;
274         return 0;
275 }
276
277 static void fixup_LH28F640BF(struct mtd_info *mtd)
278 {
279         struct map_info *map = mtd->priv;
280         struct cfi_private *cfi = map->fldrv_priv;
281         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
282
283         /* Reset the Partition Configuration Register on LH28F640BF
284          * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
285         if (is_LH28F640BF(cfi)) {
286                 printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
287                 map_write(map, CMD(0x60), 0);
288                 map_write(map, CMD(0x04), 0);
289
290                 /* We have set one single partition thus
291                  * Simultaneous Operations are not allowed */
292                 printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
293                 extp->FeatureSupport &= ~512;
294         }
295 }
296
297 static void fixup_use_point(struct mtd_info *mtd)
298 {
299         struct map_info *map = mtd->priv;
300         if (!mtd->_point && map_is_linear(map)) {
301                 mtd->_point   = cfi_intelext_point;
302                 mtd->_unpoint = cfi_intelext_unpoint;
303         }
304 }
305
306 static void fixup_use_write_buffers(struct mtd_info *mtd)
307 {
308         struct map_info *map = mtd->priv;
309         struct cfi_private *cfi = map->fldrv_priv;
310         if (cfi->cfiq->BufWriteTimeoutTyp) {
311                 printk(KERN_INFO "Using buffer write method\n" );
312                 mtd->_write = cfi_intelext_write_buffers;
313                 mtd->_writev = cfi_intelext_writev;
314         }
315 }
316
317 /*
318  * Some chips power-up with all sectors locked by default.
319  */
320 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
321 {
322         struct map_info *map = mtd->priv;
323         struct cfi_private *cfi = map->fldrv_priv;
324         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
325
326         if (cfip->FeatureSupport&32) {
327                 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
328                 mtd->flags |= MTD_POWERUP_LOCK;
329         }
330 }
331
332 static struct cfi_fixup cfi_fixup_table[] = {
333         { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
334         { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
335         { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
336 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
337         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
338 #endif
339 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
340         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
341 #endif
342 #if !FORCE_WORD_WRITE
343         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
344 #endif
345         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
346         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
347         { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
348         { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
349         { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
350         { 0, 0, NULL }
351 };
352
353 static struct cfi_fixup jedec_fixup_table[] = {
354         { CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
355         { CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
356         { CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
357         { CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
358         { CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
359         { 0, 0, NULL }
360 };
361 static struct cfi_fixup fixup_table[] = {
362         /* The CFI vendor ids and the JEDEC vendor IDs appear
363          * to be common.  It is like the devices id's are as
364          * well.  This table is to pick all cases where
365          * we know that is the case.
366          */
367         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
368         { 0, 0, NULL }
369 };
370
371 static void cfi_fixup_major_minor(struct cfi_private *cfi,
372                                                 struct cfi_pri_intelext *extp)
373 {
374         if (cfi->mfr == CFI_MFR_INTEL &&
375                         cfi->id == PF38F4476 && extp->MinorVersion == '3')
376                 extp->MinorVersion = '1';
377 }
378
379 static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
380 {
381         /*
382          * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
383          * Erase Supend for their small Erase Blocks(0x8000)
384          */
385         if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
386                 return 1;
387         return 0;
388 }
389
390 static inline struct cfi_pri_intelext *
391 read_pri_intelext(struct map_info *map, __u16 adr)
392 {
393         struct cfi_private *cfi = map->fldrv_priv;
394         struct cfi_pri_intelext *extp;
395         unsigned int extra_size = 0;
396         unsigned int extp_size = sizeof(*extp);
397
398  again:
399         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
400         if (!extp)
401                 return NULL;
402
403         cfi_fixup_major_minor(cfi, extp);
404
405         if (extp->MajorVersion != '1' ||
406             (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
407                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
408                        "version %c.%c.\n",  extp->MajorVersion,
409                        extp->MinorVersion);
410                 kfree(extp);
411                 return NULL;
412         }
413
414         /* Do some byteswapping if necessary */
415         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
416         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
417         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
418
419         if (extp->MinorVersion >= '0') {
420                 extra_size = 0;
421
422                 /* Protection Register info */
423                 if (extp->NumProtectionFields) {
424                         struct cfi_intelext_otpinfo *otp =
425                                 (struct cfi_intelext_otpinfo *)&extp->extra[0];
426
427                         extra_size += (extp->NumProtectionFields - 1) *
428                                 sizeof(struct cfi_intelext_otpinfo);
429
430                         if (extp_size >= sizeof(*extp) + extra_size) {
431                                 int i;
432
433                                 /* Do some byteswapping if necessary */
434                                 for (i = 0; i < extp->NumProtectionFields - 1; i++) {
435                                         otp->ProtRegAddr = le32_to_cpu(otp->ProtRegAddr);
436                                         otp->FactGroups = le16_to_cpu(otp->FactGroups);
437                                         otp->UserGroups = le16_to_cpu(otp->UserGroups);
438                                         otp++;
439                                 }
440                         }
441                 }
442         }
443
444         if (extp->MinorVersion >= '1') {
445                 /* Burst Read info */
446                 extra_size += 2;
447                 if (extp_size < sizeof(*extp) + extra_size)
448                         goto need_more;
449                 extra_size += extp->extra[extra_size - 1];
450         }
451
452         if (extp->MinorVersion >= '3') {
453                 int nb_parts, i;
454
455                 /* Number of hardware-partitions */
456                 extra_size += 1;
457                 if (extp_size < sizeof(*extp) + extra_size)
458                         goto need_more;
459                 nb_parts = extp->extra[extra_size - 1];
460
461                 /* skip the sizeof(partregion) field in CFI 1.4 */
462                 if (extp->MinorVersion >= '4')
463                         extra_size += 2;
464
465                 for (i = 0; i < nb_parts; i++) {
466                         struct cfi_intelext_regioninfo *rinfo;
467                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
468                         extra_size += sizeof(*rinfo);
469                         if (extp_size < sizeof(*extp) + extra_size)
470                                 goto need_more;
471                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
472                         extra_size += (rinfo->NumBlockTypes - 1)
473                                       * sizeof(struct cfi_intelext_blockinfo);
474                 }
475
476                 if (extp->MinorVersion >= '4')
477                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
478
479                 if (extp_size < sizeof(*extp) + extra_size) {
480                         need_more:
481                         extp_size = sizeof(*extp) + extra_size;
482                         kfree(extp);
483                         if (extp_size > 4096) {
484                                 printk(KERN_ERR
485                                         "%s: cfi_pri_intelext is too fat\n",
486                                         __func__);
487                                 return NULL;
488                         }
489                         goto again;
490                 }
491         }
492
493         return extp;
494 }
495
496 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
497 {
498         struct cfi_private *cfi = map->fldrv_priv;
499         struct mtd_info *mtd;
500         int i;
501
502         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
503         if (!mtd)
504                 return NULL;
505         mtd->priv = map;
506         mtd->type = MTD_NORFLASH;
507
508         /* Fill in the default mtd operations */
509         mtd->_erase   = cfi_intelext_erase_varsize;
510         mtd->_read    = cfi_intelext_read;
511         mtd->_write   = cfi_intelext_write_words;
512         mtd->_sync    = cfi_intelext_sync;
513         mtd->_lock    = cfi_intelext_lock;
514         mtd->_unlock  = cfi_intelext_unlock;
515         mtd->_is_locked = cfi_intelext_is_locked;
516         mtd->_suspend = cfi_intelext_suspend;
517         mtd->_resume  = cfi_intelext_resume;
518         mtd->flags   = MTD_CAP_NORFLASH;
519         mtd->name    = map->name;
520         mtd->writesize = 1;
521         mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
522
523         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
524
525         if (cfi->cfi_mode == CFI_MODE_CFI) {
526                 /*
527                  * It's a real CFI chip, not one for which the probe
528                  * routine faked a CFI structure. So we read the feature
529                  * table from it.
530                  */
531                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
532                 struct cfi_pri_intelext *extp;
533
534                 extp = read_pri_intelext(map, adr);
535                 if (!extp) {
536                         kfree(mtd);
537                         return NULL;
538                 }
539
540                 /* Install our own private info structure */
541                 cfi->cmdset_priv = extp;
542
543                 cfi_fixup(mtd, cfi_fixup_table);
544
545 #ifdef DEBUG_CFI_FEATURES
546                 /* Tell the user about it in lots of lovely detail */
547                 cfi_tell_features(extp);
548 #endif
549
550                 if(extp->SuspendCmdSupport & 1) {
551                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
552                 }
553         }
554         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
555                 /* Apply jedec specific fixups */
556                 cfi_fixup(mtd, jedec_fixup_table);
557         }
558         /* Apply generic fixups */
559         cfi_fixup(mtd, fixup_table);
560
561         for (i=0; i< cfi->numchips; i++) {
562                 if (cfi->cfiq->WordWriteTimeoutTyp)
563                         cfi->chips[i].word_write_time =
564                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
565                 else
566                         cfi->chips[i].word_write_time = 50000;
567
568                 if (cfi->cfiq->BufWriteTimeoutTyp)
569                         cfi->chips[i].buffer_write_time =
570                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
571                 /* No default; if it isn't specified, we won't use it */
572
573                 if (cfi->cfiq->BlockEraseTimeoutTyp)
574                         cfi->chips[i].erase_time =
575                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
576                 else
577                         cfi->chips[i].erase_time = 2000000;
578
579                 if (cfi->cfiq->WordWriteTimeoutTyp &&
580                     cfi->cfiq->WordWriteTimeoutMax)
581                         cfi->chips[i].word_write_time_max =
582                                 1<<(cfi->cfiq->WordWriteTimeoutTyp +
583                                     cfi->cfiq->WordWriteTimeoutMax);
584                 else
585                         cfi->chips[i].word_write_time_max = 50000 * 8;
586
587                 if (cfi->cfiq->BufWriteTimeoutTyp &&
588                     cfi->cfiq->BufWriteTimeoutMax)
589                         cfi->chips[i].buffer_write_time_max =
590                                 1<<(cfi->cfiq->BufWriteTimeoutTyp +
591                                     cfi->cfiq->BufWriteTimeoutMax);
592
593                 if (cfi->cfiq->BlockEraseTimeoutTyp &&
594                     cfi->cfiq->BlockEraseTimeoutMax)
595                         cfi->chips[i].erase_time_max =
596                                 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
597                                        cfi->cfiq->BlockEraseTimeoutMax);
598                 else
599                         cfi->chips[i].erase_time_max = 2000000 * 8;
600
601                 cfi->chips[i].ref_point_counter = 0;
602                 init_waitqueue_head(&(cfi->chips[i].wq));
603         }
604
605         map->fldrv = &cfi_intelext_chipdrv;
606
607         return cfi_intelext_setup(mtd);
608 }
609 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
610 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
611 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
612 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
613 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
614
615 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
616 {
617         struct map_info *map = mtd->priv;
618         struct cfi_private *cfi = map->fldrv_priv;
619         unsigned long offset = 0;
620         int i,j;
621         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
622
623         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
624
625         mtd->size = devsize * cfi->numchips;
626
627         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
628         mtd->eraseregions = kcalloc(mtd->numeraseregions,
629                                     sizeof(struct mtd_erase_region_info),
630                                     GFP_KERNEL);
631         if (!mtd->eraseregions)
632                 goto setup_err;
633
634         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
635                 unsigned long ernum, ersize;
636                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
637                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
638
639                 if (mtd->erasesize < ersize) {
640                         mtd->erasesize = ersize;
641                 }
642                 for (j=0; j<cfi->numchips; j++) {
643                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
644                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
645                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
646                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
647                         if (!mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap)
648                                 goto setup_err;
649                 }
650                 offset += (ersize * ernum);
651         }
652
653         if (offset != devsize) {
654                 /* Argh */
655                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
656                 goto setup_err;
657         }
658
659         for (i=0; i<mtd->numeraseregions;i++){
660                 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
661                        i,(unsigned long long)mtd->eraseregions[i].offset,
662                        mtd->eraseregions[i].erasesize,
663                        mtd->eraseregions[i].numblocks);
664         }
665
666 #ifdef CONFIG_MTD_OTP
667         mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
668         mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
669         mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
670         mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
671         mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
672         mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
673 #endif
674
675         /* This function has the potential to distort the reality
676            a bit and therefore should be called last. */
677         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
678                 goto setup_err;
679
680         __module_get(THIS_MODULE);
681         register_reboot_notifier(&mtd->reboot_notifier);
682         return mtd;
683
684  setup_err:
685         if (mtd->eraseregions)
686                 for (i=0; i<cfi->cfiq->NumEraseRegions; i++)
687                         for (j=0; j<cfi->numchips; j++)
688                                 kfree(mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap);
689         kfree(mtd->eraseregions);
690         kfree(mtd);
691         kfree(cfi->cmdset_priv);
692         return NULL;
693 }
694
695 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
696                                         struct cfi_private **pcfi)
697 {
698         struct map_info *map = mtd->priv;
699         struct cfi_private *cfi = *pcfi;
700         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
701
702         /*
703          * Probing of multi-partition flash chips.
704          *
705          * To support multiple partitions when available, we simply arrange
706          * for each of them to have their own flchip structure even if they
707          * are on the same physical chip.  This means completely recreating
708          * a new cfi_private structure right here which is a blatent code
709          * layering violation, but this is still the least intrusive
710          * arrangement at this point. This can be rearranged in the future
711          * if someone feels motivated enough.  --nico
712          */
713         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
714             && extp->FeatureSupport & (1 << 9)) {
715                 int offs = 0;
716                 struct cfi_private *newcfi;
717                 struct flchip *chip;
718                 struct flchip_shared *shared;
719                 int numregions, numparts, partshift, numvirtchips, i, j;
720
721                 /* Protection Register info */
722                 if (extp->NumProtectionFields)
723                         offs = (extp->NumProtectionFields - 1) *
724                                sizeof(struct cfi_intelext_otpinfo);
725
726                 /* Burst Read info */
727                 offs += extp->extra[offs+1]+2;
728
729                 /* Number of partition regions */
730                 numregions = extp->extra[offs];
731                 offs += 1;
732
733                 /* skip the sizeof(partregion) field in CFI 1.4 */
734                 if (extp->MinorVersion >= '4')
735                         offs += 2;
736
737                 /* Number of hardware partitions */
738                 numparts = 0;
739                 for (i = 0; i < numregions; i++) {
740                         struct cfi_intelext_regioninfo *rinfo;
741                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
742                         numparts += rinfo->NumIdentPartitions;
743                         offs += sizeof(*rinfo)
744                                 + (rinfo->NumBlockTypes - 1) *
745                                   sizeof(struct cfi_intelext_blockinfo);
746                 }
747
748                 if (!numparts)
749                         numparts = 1;
750
751                 /* Programming Region info */
752                 if (extp->MinorVersion >= '4') {
753                         struct cfi_intelext_programming_regioninfo *prinfo;
754                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
755                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
756                         mtd->flags &= ~MTD_BIT_WRITEABLE;
757                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
758                                map->name, mtd->writesize,
759                                cfi->interleave * prinfo->ControlValid,
760                                cfi->interleave * prinfo->ControlInvalid);
761                 }
762
763                 /*
764                  * All functions below currently rely on all chips having
765                  * the same geometry so we'll just assume that all hardware
766                  * partitions are of the same size too.
767                  */
768                 partshift = cfi->chipshift - __ffs(numparts);
769
770                 if ((1 << partshift) < mtd->erasesize) {
771                         printk( KERN_ERR
772                                 "%s: bad number of hw partitions (%d)\n",
773                                 __func__, numparts);
774                         return -EINVAL;
775                 }
776
777                 numvirtchips = cfi->numchips * numparts;
778                 newcfi = kmalloc(struct_size(newcfi, chips, numvirtchips),
779                                  GFP_KERNEL);
780                 if (!newcfi)
781                         return -ENOMEM;
782                 shared = kmalloc_array(cfi->numchips,
783                                        sizeof(struct flchip_shared),
784                                        GFP_KERNEL);
785                 if (!shared) {
786                         kfree(newcfi);
787                         return -ENOMEM;
788                 }
789                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
790                 newcfi->numchips = numvirtchips;
791                 newcfi->chipshift = partshift;
792
793                 chip = &newcfi->chips[0];
794                 for (i = 0; i < cfi->numchips; i++) {
795                         shared[i].writing = shared[i].erasing = NULL;
796                         mutex_init(&shared[i].lock);
797                         for (j = 0; j < numparts; j++) {
798                                 *chip = cfi->chips[i];
799                                 chip->start += j << partshift;
800                                 chip->priv = &shared[i];
801                                 /* those should be reset too since
802                                    they create memory references. */
803                                 init_waitqueue_head(&chip->wq);
804                                 mutex_init(&chip->mutex);
805                                 chip++;
806                         }
807                 }
808
809                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
810                                   "--> %d partitions of %d KiB\n",
811                                   map->name, cfi->numchips, cfi->interleave,
812                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
813
814                 map->fldrv_priv = newcfi;
815                 *pcfi = newcfi;
816                 kfree(cfi);
817         }
818
819         return 0;
820 }
821
822 /*
823  *  *********** CHIP ACCESS FUNCTIONS ***********
824  */
825 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
826 {
827         DECLARE_WAITQUEUE(wait, current);
828         struct cfi_private *cfi = map->fldrv_priv;
829         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
830         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
831         unsigned long timeo = jiffies + HZ;
832
833         /* Prevent setting state FL_SYNCING for chip in suspended state. */
834         if (mode == FL_SYNCING && chip->oldstate != FL_READY)
835                 goto sleep;
836
837         switch (chip->state) {
838
839         case FL_STATUS:
840                 for (;;) {
841                         status = map_read(map, adr);
842                         if (map_word_andequal(map, status, status_OK, status_OK))
843                                 break;
844
845                         /* At this point we're fine with write operations
846                            in other partitions as they don't conflict. */
847                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
848                                 break;
849
850                         mutex_unlock(&chip->mutex);
851                         cfi_udelay(1);
852                         mutex_lock(&chip->mutex);
853                         /* Someone else might have been playing with it. */
854                         return -EAGAIN;
855                 }
856                 fallthrough;
857         case FL_READY:
858         case FL_CFI_QUERY:
859         case FL_JEDEC_QUERY:
860                 return 0;
861
862         case FL_ERASING:
863                 if (!cfip ||
864                     !(cfip->FeatureSupport & 2) ||
865                     !(mode == FL_READY || mode == FL_POINT ||
866                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
867                         goto sleep;
868
869                 /* Do not allow suspend iff read/write to EB address */
870                 if ((adr & chip->in_progress_block_mask) ==
871                     chip->in_progress_block_addr)
872                         goto sleep;
873
874                 /* do not suspend small EBs, buggy Micron Chips */
875                 if (cfi_is_micron_28F00AP30(cfi, chip) &&
876                     (chip->in_progress_block_mask == ~(0x8000-1)))
877                         goto sleep;
878
879                 /* Erase suspend */
880                 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
881
882                 /* If the flash has finished erasing, then 'erase suspend'
883                  * appears to make some (28F320) flash devices switch to
884                  * 'read' mode.  Make sure that we switch to 'read status'
885                  * mode so we get the right data. --rmk
886                  */
887                 map_write(map, CMD(0x70), chip->in_progress_block_addr);
888                 chip->oldstate = FL_ERASING;
889                 chip->state = FL_ERASE_SUSPENDING;
890                 chip->erase_suspended = 1;
891                 for (;;) {
892                         status = map_read(map, chip->in_progress_block_addr);
893                         if (map_word_andequal(map, status, status_OK, status_OK))
894                                 break;
895
896                         if (time_after(jiffies, timeo)) {
897                                 /* Urgh. Resume and pretend we weren't here.
898                                  * Make sure we're in 'read status' mode if it had finished */
899                                 put_chip(map, chip, adr);
900                                 printk(KERN_ERR "%s: Chip not ready after erase "
901                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
902                                 return -EIO;
903                         }
904
905                         mutex_unlock(&chip->mutex);
906                         cfi_udelay(1);
907                         mutex_lock(&chip->mutex);
908                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
909                            So we can just loop here. */
910                 }
911                 chip->state = FL_STATUS;
912                 return 0;
913
914         case FL_XIP_WHILE_ERASING:
915                 if (mode != FL_READY && mode != FL_POINT &&
916                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
917                         goto sleep;
918                 chip->oldstate = chip->state;
919                 chip->state = FL_READY;
920                 return 0;
921
922         case FL_SHUTDOWN:
923                 /* The machine is rebooting now,so no one can get chip anymore */
924                 return -EIO;
925         case FL_POINT:
926                 /* Only if there's no operation suspended... */
927                 if (mode == FL_READY && chip->oldstate == FL_READY)
928                         return 0;
929                 fallthrough;
930         default:
931         sleep:
932                 set_current_state(TASK_UNINTERRUPTIBLE);
933                 add_wait_queue(&chip->wq, &wait);
934                 mutex_unlock(&chip->mutex);
935                 schedule();
936                 remove_wait_queue(&chip->wq, &wait);
937                 mutex_lock(&chip->mutex);
938                 return -EAGAIN;
939         }
940 }
941
942 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
943 {
944         int ret;
945         DECLARE_WAITQUEUE(wait, current);
946
947  retry:
948         if (chip->priv &&
949             (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
950             || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
951                 /*
952                  * OK. We have possibility for contention on the write/erase
953                  * operations which are global to the real chip and not per
954                  * partition.  So let's fight it over in the partition which
955                  * currently has authority on the operation.
956                  *
957                  * The rules are as follows:
958                  *
959                  * - any write operation must own shared->writing.
960                  *
961                  * - any erase operation must own _both_ shared->writing and
962                  *   shared->erasing.
963                  *
964                  * - contention arbitration is handled in the owner's context.
965                  *
966                  * The 'shared' struct can be read and/or written only when
967                  * its lock is taken.
968                  */
969                 struct flchip_shared *shared = chip->priv;
970                 struct flchip *contender;
971                 mutex_lock(&shared->lock);
972                 contender = shared->writing;
973                 if (contender && contender != chip) {
974                         /*
975                          * The engine to perform desired operation on this
976                          * partition is already in use by someone else.
977                          * Let's fight over it in the context of the chip
978                          * currently using it.  If it is possible to suspend,
979                          * that other partition will do just that, otherwise
980                          * it'll happily send us to sleep.  In any case, when
981                          * get_chip returns success we're clear to go ahead.
982                          */
983                         ret = mutex_trylock(&contender->mutex);
984                         mutex_unlock(&shared->lock);
985                         if (!ret)
986                                 goto retry;
987                         mutex_unlock(&chip->mutex);
988                         ret = chip_ready(map, contender, contender->start, mode);
989                         mutex_lock(&chip->mutex);
990
991                         if (ret == -EAGAIN) {
992                                 mutex_unlock(&contender->mutex);
993                                 goto retry;
994                         }
995                         if (ret) {
996                                 mutex_unlock(&contender->mutex);
997                                 return ret;
998                         }
999                         mutex_lock(&shared->lock);
1000
1001                         /* We should not own chip if it is already
1002                          * in FL_SYNCING state. Put contender and retry. */
1003                         if (chip->state == FL_SYNCING) {
1004                                 put_chip(map, contender, contender->start);
1005                                 mutex_unlock(&contender->mutex);
1006                                 goto retry;
1007                         }
1008                         mutex_unlock(&contender->mutex);
1009                 }
1010
1011                 /* Check if we already have suspended erase
1012                  * on this chip. Sleep. */
1013                 if (mode == FL_ERASING && shared->erasing
1014                     && shared->erasing->oldstate == FL_ERASING) {
1015                         mutex_unlock(&shared->lock);
1016                         set_current_state(TASK_UNINTERRUPTIBLE);
1017                         add_wait_queue(&chip->wq, &wait);
1018                         mutex_unlock(&chip->mutex);
1019                         schedule();
1020                         remove_wait_queue(&chip->wq, &wait);
1021                         mutex_lock(&chip->mutex);
1022                         goto retry;
1023                 }
1024
1025                 /* We now own it */
1026                 shared->writing = chip;
1027                 if (mode == FL_ERASING)
1028                         shared->erasing = chip;
1029                 mutex_unlock(&shared->lock);
1030         }
1031         ret = chip_ready(map, chip, adr, mode);
1032         if (ret == -EAGAIN)
1033                 goto retry;
1034
1035         return ret;
1036 }
1037
1038 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
1039 {
1040         struct cfi_private *cfi = map->fldrv_priv;
1041
1042         if (chip->priv) {
1043                 struct flchip_shared *shared = chip->priv;
1044                 mutex_lock(&shared->lock);
1045                 if (shared->writing == chip && chip->oldstate == FL_READY) {
1046                         /* We own the ability to write, but we're done */
1047                         shared->writing = shared->erasing;
1048                         if (shared->writing && shared->writing != chip) {
1049                                 /* give back ownership to who we loaned it from */
1050                                 struct flchip *loaner = shared->writing;
1051                                 mutex_lock(&loaner->mutex);
1052                                 mutex_unlock(&shared->lock);
1053                                 mutex_unlock(&chip->mutex);
1054                                 put_chip(map, loaner, loaner->start);
1055                                 mutex_lock(&chip->mutex);
1056                                 mutex_unlock(&loaner->mutex);
1057                                 wake_up(&chip->wq);
1058                                 return;
1059                         }
1060                         shared->erasing = NULL;
1061                         shared->writing = NULL;
1062                 } else if (shared->erasing == chip && shared->writing != chip) {
1063                         /*
1064                          * We own the ability to erase without the ability
1065                          * to write, which means the erase was suspended
1066                          * and some other partition is currently writing.
1067                          * Don't let the switch below mess things up since
1068                          * we don't have ownership to resume anything.
1069                          */
1070                         mutex_unlock(&shared->lock);
1071                         wake_up(&chip->wq);
1072                         return;
1073                 }
1074                 mutex_unlock(&shared->lock);
1075         }
1076
1077         switch(chip->oldstate) {
1078         case FL_ERASING:
1079                 /* What if one interleaved chip has finished and the
1080                    other hasn't? The old code would leave the finished
1081                    one in READY mode. That's bad, and caused -EROFS
1082                    errors to be returned from do_erase_oneblock because
1083                    that's the only bit it checked for at the time.
1084                    As the state machine appears to explicitly allow
1085                    sending the 0x70 (Read Status) command to an erasing
1086                    chip and expecting it to be ignored, that's what we
1087                    do. */
1088                 map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1089                 map_write(map, CMD(0x70), chip->in_progress_block_addr);
1090                 chip->oldstate = FL_READY;
1091                 chip->state = FL_ERASING;
1092                 break;
1093
1094         case FL_XIP_WHILE_ERASING:
1095                 chip->state = chip->oldstate;
1096                 chip->oldstate = FL_READY;
1097                 break;
1098
1099         case FL_READY:
1100         case FL_STATUS:
1101         case FL_JEDEC_QUERY:
1102                 break;
1103         default:
1104                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1105         }
1106         wake_up(&chip->wq);
1107 }
1108
1109 #ifdef CONFIG_MTD_XIP
1110
1111 /*
1112  * No interrupt what so ever can be serviced while the flash isn't in array
1113  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1114  * enclosing any code path where the flash is known not to be in array mode.
1115  * And within a XIP disabled code path, only functions marked with __xipram
1116  * may be called and nothing else (it's a good thing to inspect generated
1117  * assembly to make sure inline functions were actually inlined and that gcc
1118  * didn't emit calls to its own support functions). Also configuring MTD CFI
1119  * support to a single buswidth and a single interleave is also recommended.
1120  */
1121
1122 static void xip_disable(struct map_info *map, struct flchip *chip,
1123                         unsigned long adr)
1124 {
1125         /* TODO: chips with no XIP use should ignore and return */
1126         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1127         local_irq_disable();
1128 }
1129
1130 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1131                                 unsigned long adr)
1132 {
1133         struct cfi_private *cfi = map->fldrv_priv;
1134         if (chip->state != FL_POINT && chip->state != FL_READY) {
1135                 map_write(map, CMD(0xff), adr);
1136                 chip->state = FL_READY;
1137         }
1138         (void) map_read(map, adr);
1139         xip_iprefetch();
1140         local_irq_enable();
1141 }
1142
1143 /*
1144  * When a delay is required for the flash operation to complete, the
1145  * xip_wait_for_operation() function is polling for both the given timeout
1146  * and pending (but still masked) hardware interrupts.  Whenever there is an
1147  * interrupt pending then the flash erase or write operation is suspended,
1148  * array mode restored and interrupts unmasked.  Task scheduling might also
1149  * happen at that point.  The CPU eventually returns from the interrupt or
1150  * the call to schedule() and the suspended flash operation is resumed for
1151  * the remaining of the delay period.
1152  *
1153  * Warning: this function _will_ fool interrupt latency tracing tools.
1154  */
1155
1156 static int __xipram xip_wait_for_operation(
1157                 struct map_info *map, struct flchip *chip,
1158                 unsigned long adr, unsigned int chip_op_time_max)
1159 {
1160         struct cfi_private *cfi = map->fldrv_priv;
1161         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1162         map_word status, OK = CMD(0x80);
1163         unsigned long usec, suspended, start, done;
1164         flstate_t oldstate, newstate;
1165
1166         start = xip_currtime();
1167         usec = chip_op_time_max;
1168         if (usec == 0)
1169                 usec = 500000;
1170         done = 0;
1171
1172         do {
1173                 cpu_relax();
1174                 if (xip_irqpending() && cfip &&
1175                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1176                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1177                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1178                         /*
1179                          * Let's suspend the erase or write operation when
1180                          * supported.  Note that we currently don't try to
1181                          * suspend interleaved chips if there is already
1182                          * another operation suspended (imagine what happens
1183                          * when one chip was already done with the current
1184                          * operation while another chip suspended it, then
1185                          * we resume the whole thing at once).  Yes, it
1186                          * can happen!
1187                          */
1188                         usec -= done;
1189                         map_write(map, CMD(0xb0), adr);
1190                         map_write(map, CMD(0x70), adr);
1191                         suspended = xip_currtime();
1192                         do {
1193                                 if (xip_elapsed_since(suspended) > 100000) {
1194                                         /*
1195                                          * The chip doesn't want to suspend
1196                                          * after waiting for 100 msecs.
1197                                          * This is a critical error but there
1198                                          * is not much we can do here.
1199                                          */
1200                                         return -EIO;
1201                                 }
1202                                 status = map_read(map, adr);
1203                         } while (!map_word_andequal(map, status, OK, OK));
1204
1205                         /* Suspend succeeded */
1206                         oldstate = chip->state;
1207                         if (oldstate == FL_ERASING) {
1208                                 if (!map_word_bitsset(map, status, CMD(0x40)))
1209                                         break;
1210                                 newstate = FL_XIP_WHILE_ERASING;
1211                                 chip->erase_suspended = 1;
1212                         } else {
1213                                 if (!map_word_bitsset(map, status, CMD(0x04)))
1214                                         break;
1215                                 newstate = FL_XIP_WHILE_WRITING;
1216                                 chip->write_suspended = 1;
1217                         }
1218                         chip->state = newstate;
1219                         map_write(map, CMD(0xff), adr);
1220                         (void) map_read(map, adr);
1221                         xip_iprefetch();
1222                         local_irq_enable();
1223                         mutex_unlock(&chip->mutex);
1224                         xip_iprefetch();
1225                         cond_resched();
1226
1227                         /*
1228                          * We're back.  However someone else might have
1229                          * decided to go write to the chip if we are in
1230                          * a suspended erase state.  If so let's wait
1231                          * until it's done.
1232                          */
1233                         mutex_lock(&chip->mutex);
1234                         while (chip->state != newstate) {
1235                                 DECLARE_WAITQUEUE(wait, current);
1236                                 set_current_state(TASK_UNINTERRUPTIBLE);
1237                                 add_wait_queue(&chip->wq, &wait);
1238                                 mutex_unlock(&chip->mutex);
1239                                 schedule();
1240                                 remove_wait_queue(&chip->wq, &wait);
1241                                 mutex_lock(&chip->mutex);
1242                         }
1243                         /* Disallow XIP again */
1244                         local_irq_disable();
1245
1246                         /* Resume the write or erase operation */
1247                         map_write(map, CMD(0xd0), adr);
1248                         map_write(map, CMD(0x70), adr);
1249                         chip->state = oldstate;
1250                         start = xip_currtime();
1251                 } else if (usec >= 1000000/HZ) {
1252                         /*
1253                          * Try to save on CPU power when waiting delay
1254                          * is at least a system timer tick period.
1255                          * No need to be extremely accurate here.
1256                          */
1257                         xip_cpu_idle();
1258                 }
1259                 status = map_read(map, adr);
1260                 done = xip_elapsed_since(start);
1261         } while (!map_word_andequal(map, status, OK, OK)
1262                  && done < usec);
1263
1264         return (done >= usec) ? -ETIME : 0;
1265 }
1266
1267 /*
1268  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1269  * the flash is actively programming or erasing since we have to poll for
1270  * the operation to complete anyway.  We can't do that in a generic way with
1271  * a XIP setup so do it before the actual flash operation in this case
1272  * and stub it out from INVAL_CACHE_AND_WAIT.
1273  */
1274 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1275         INVALIDATE_CACHED_RANGE(map, from, size)
1276
1277 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1278         xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1279
1280 #else
1281
1282 #define xip_disable(map, chip, adr)
1283 #define xip_enable(map, chip, adr)
1284 #define XIP_INVAL_CACHED_RANGE(x...)
1285 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1286
1287 static int inval_cache_and_wait_for_operation(
1288                 struct map_info *map, struct flchip *chip,
1289                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1290                 unsigned int chip_op_time, unsigned int chip_op_time_max)
1291 {
1292         struct cfi_private *cfi = map->fldrv_priv;
1293         map_word status, status_OK = CMD(0x80);
1294         int chip_state = chip->state;
1295         unsigned int timeo, sleep_time, reset_timeo;
1296
1297         mutex_unlock(&chip->mutex);
1298         if (inval_len)
1299                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1300         mutex_lock(&chip->mutex);
1301
1302         timeo = chip_op_time_max;
1303         if (!timeo)
1304                 timeo = 500000;
1305         reset_timeo = timeo;
1306         sleep_time = chip_op_time / 2;
1307
1308         for (;;) {
1309                 if (chip->state != chip_state) {
1310                         /* Someone's suspended the operation: sleep */
1311                         DECLARE_WAITQUEUE(wait, current);
1312                         set_current_state(TASK_UNINTERRUPTIBLE);
1313                         add_wait_queue(&chip->wq, &wait);
1314                         mutex_unlock(&chip->mutex);
1315                         schedule();
1316                         remove_wait_queue(&chip->wq, &wait);
1317                         mutex_lock(&chip->mutex);
1318                         continue;
1319                 }
1320
1321                 status = map_read(map, cmd_adr);
1322                 if (map_word_andequal(map, status, status_OK, status_OK))
1323                         break;
1324
1325                 if (chip->erase_suspended && chip_state == FL_ERASING)  {
1326                         /* Erase suspend occurred while sleep: reset timeout */
1327                         timeo = reset_timeo;
1328                         chip->erase_suspended = 0;
1329                 }
1330                 if (chip->write_suspended && chip_state == FL_WRITING)  {
1331                         /* Write suspend occurred while sleep: reset timeout */
1332                         timeo = reset_timeo;
1333                         chip->write_suspended = 0;
1334                 }
1335                 if (!timeo) {
1336                         map_write(map, CMD(0x70), cmd_adr);
1337                         chip->state = FL_STATUS;
1338                         return -ETIME;
1339                 }
1340
1341                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1342                 mutex_unlock(&chip->mutex);
1343                 if (sleep_time >= 1000000/HZ) {
1344                         /*
1345                          * Half of the normal delay still remaining
1346                          * can be performed with a sleeping delay instead
1347                          * of busy waiting.
1348                          */
1349                         msleep(sleep_time/1000);
1350                         timeo -= sleep_time;
1351                         sleep_time = 1000000/HZ;
1352                 } else {
1353                         udelay(1);
1354                         cond_resched();
1355                         timeo--;
1356                 }
1357                 mutex_lock(&chip->mutex);
1358         }
1359
1360         /* Done and happy. */
1361         chip->state = FL_STATUS;
1362         return 0;
1363 }
1364
1365 #endif
1366
1367 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1368         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1369
1370
1371 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1372 {
1373         unsigned long cmd_addr;
1374         struct cfi_private *cfi = map->fldrv_priv;
1375         int ret;
1376
1377         adr += chip->start;
1378
1379         /* Ensure cmd read/writes are aligned. */
1380         cmd_addr = adr & ~(map_bankwidth(map)-1);
1381
1382         mutex_lock(&chip->mutex);
1383
1384         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1385
1386         if (!ret) {
1387                 if (chip->state != FL_POINT && chip->state != FL_READY)
1388                         map_write(map, CMD(0xff), cmd_addr);
1389
1390                 chip->state = FL_POINT;
1391                 chip->ref_point_counter++;
1392         }
1393         mutex_unlock(&chip->mutex);
1394
1395         return ret;
1396 }
1397
1398 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1399                 size_t *retlen, void **virt, resource_size_t *phys)
1400 {
1401         struct map_info *map = mtd->priv;
1402         struct cfi_private *cfi = map->fldrv_priv;
1403         unsigned long ofs, last_end = 0;
1404         int chipnum;
1405         int ret;
1406
1407         if (!map->virt)
1408                 return -EINVAL;
1409
1410         /* Now lock the chip(s) to POINT state */
1411
1412         /* ofs: offset within the first chip that the first read should start */
1413         chipnum = (from >> cfi->chipshift);
1414         ofs = from - (chipnum << cfi->chipshift);
1415
1416         *virt = map->virt + cfi->chips[chipnum].start + ofs;
1417         if (phys)
1418                 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1419
1420         while (len) {
1421                 unsigned long thislen;
1422
1423                 if (chipnum >= cfi->numchips)
1424                         break;
1425
1426                 /* We cannot point across chips that are virtually disjoint */
1427                 if (!last_end)
1428                         last_end = cfi->chips[chipnum].start;
1429                 else if (cfi->chips[chipnum].start != last_end)
1430                         break;
1431
1432                 if ((len + ofs -1) >> cfi->chipshift)
1433                         thislen = (1<<cfi->chipshift) - ofs;
1434                 else
1435                         thislen = len;
1436
1437                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1438                 if (ret)
1439                         break;
1440
1441                 *retlen += thislen;
1442                 len -= thislen;
1443
1444                 ofs = 0;
1445                 last_end += 1 << cfi->chipshift;
1446                 chipnum++;
1447         }
1448         return 0;
1449 }
1450
1451 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1452 {
1453         struct map_info *map = mtd->priv;
1454         struct cfi_private *cfi = map->fldrv_priv;
1455         unsigned long ofs;
1456         int chipnum, err = 0;
1457
1458         /* Now unlock the chip(s) POINT state */
1459
1460         /* ofs: offset within the first chip that the first read should start */
1461         chipnum = (from >> cfi->chipshift);
1462         ofs = from - (chipnum <<  cfi->chipshift);
1463
1464         while (len && !err) {
1465                 unsigned long thislen;
1466                 struct flchip *chip;
1467
1468                 chip = &cfi->chips[chipnum];
1469                 if (chipnum >= cfi->numchips)
1470                         break;
1471
1472                 if ((len + ofs -1) >> cfi->chipshift)
1473                         thislen = (1<<cfi->chipshift) - ofs;
1474                 else
1475                         thislen = len;
1476
1477                 mutex_lock(&chip->mutex);
1478                 if (chip->state == FL_POINT) {
1479                         chip->ref_point_counter--;
1480                         if(chip->ref_point_counter == 0)
1481                                 chip->state = FL_READY;
1482                 } else {
1483                         printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1484                         err = -EINVAL;
1485                 }
1486
1487                 put_chip(map, chip, chip->start);
1488                 mutex_unlock(&chip->mutex);
1489
1490                 len -= thislen;
1491                 ofs = 0;
1492                 chipnum++;
1493         }
1494
1495         return err;
1496 }
1497
1498 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1499 {
1500         unsigned long cmd_addr;
1501         struct cfi_private *cfi = map->fldrv_priv;
1502         int ret;
1503
1504         adr += chip->start;
1505
1506         /* Ensure cmd read/writes are aligned. */
1507         cmd_addr = adr & ~(map_bankwidth(map)-1);
1508
1509         mutex_lock(&chip->mutex);
1510         ret = get_chip(map, chip, cmd_addr, FL_READY);
1511         if (ret) {
1512                 mutex_unlock(&chip->mutex);
1513                 return ret;
1514         }
1515
1516         if (chip->state != FL_POINT && chip->state != FL_READY) {
1517                 map_write(map, CMD(0xff), cmd_addr);
1518
1519                 chip->state = FL_READY;
1520         }
1521
1522         map_copy_from(map, buf, adr, len);
1523
1524         put_chip(map, chip, cmd_addr);
1525
1526         mutex_unlock(&chip->mutex);
1527         return 0;
1528 }
1529
1530 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1531 {
1532         struct map_info *map = mtd->priv;
1533         struct cfi_private *cfi = map->fldrv_priv;
1534         unsigned long ofs;
1535         int chipnum;
1536         int ret = 0;
1537
1538         /* ofs: offset within the first chip that the first read should start */
1539         chipnum = (from >> cfi->chipshift);
1540         ofs = from - (chipnum <<  cfi->chipshift);
1541
1542         while (len) {
1543                 unsigned long thislen;
1544
1545                 if (chipnum >= cfi->numchips)
1546                         break;
1547
1548                 if ((len + ofs -1) >> cfi->chipshift)
1549                         thislen = (1<<cfi->chipshift) - ofs;
1550                 else
1551                         thislen = len;
1552
1553                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1554                 if (ret)
1555                         break;
1556
1557                 *retlen += thislen;
1558                 len -= thislen;
1559                 buf += thislen;
1560
1561                 ofs = 0;
1562                 chipnum++;
1563         }
1564         return ret;
1565 }
1566
1567 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1568                                      unsigned long adr, map_word datum, int mode)
1569 {
1570         struct cfi_private *cfi = map->fldrv_priv;
1571         map_word status, write_cmd;
1572         int ret;
1573
1574         adr += chip->start;
1575
1576         switch (mode) {
1577         case FL_WRITING:
1578                 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1579                 break;
1580         case FL_OTP_WRITE:
1581                 write_cmd = CMD(0xc0);
1582                 break;
1583         default:
1584                 return -EINVAL;
1585         }
1586
1587         mutex_lock(&chip->mutex);
1588         ret = get_chip(map, chip, adr, mode);
1589         if (ret) {
1590                 mutex_unlock(&chip->mutex);
1591                 return ret;
1592         }
1593
1594         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1595         ENABLE_VPP(map);
1596         xip_disable(map, chip, adr);
1597         map_write(map, write_cmd, adr);
1598         map_write(map, datum, adr);
1599         chip->state = mode;
1600
1601         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1602                                    adr, map_bankwidth(map),
1603                                    chip->word_write_time,
1604                                    chip->word_write_time_max);
1605         if (ret) {
1606                 xip_enable(map, chip, adr);
1607                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1608                 goto out;
1609         }
1610
1611         /* check for errors */
1612         status = map_read(map, adr);
1613         if (map_word_bitsset(map, status, CMD(0x1a))) {
1614                 unsigned long chipstatus = MERGESTATUS(status);
1615
1616                 /* reset status */
1617                 map_write(map, CMD(0x50), adr);
1618                 map_write(map, CMD(0x70), adr);
1619                 xip_enable(map, chip, adr);
1620
1621                 if (chipstatus & 0x02) {
1622                         ret = -EROFS;
1623                 } else if (chipstatus & 0x08) {
1624                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1625                         ret = -EIO;
1626                 } else {
1627                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1628                         ret = -EINVAL;
1629                 }
1630
1631                 goto out;
1632         }
1633
1634         xip_enable(map, chip, adr);
1635  out:   DISABLE_VPP(map);
1636         put_chip(map, chip, adr);
1637         mutex_unlock(&chip->mutex);
1638         return ret;
1639 }
1640
1641
1642 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1643 {
1644         struct map_info *map = mtd->priv;
1645         struct cfi_private *cfi = map->fldrv_priv;
1646         int ret;
1647         int chipnum;
1648         unsigned long ofs;
1649
1650         chipnum = to >> cfi->chipshift;
1651         ofs = to  - (chipnum << cfi->chipshift);
1652
1653         /* If it's not bus-aligned, do the first byte write */
1654         if (ofs & (map_bankwidth(map)-1)) {
1655                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1656                 int gap = ofs - bus_ofs;
1657                 int n;
1658                 map_word datum;
1659
1660                 n = min_t(int, len, map_bankwidth(map)-gap);
1661                 datum = map_word_ff(map);
1662                 datum = map_word_load_partial(map, datum, buf, gap, n);
1663
1664                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1665                                                bus_ofs, datum, FL_WRITING);
1666                 if (ret)
1667                         return ret;
1668
1669                 len -= n;
1670                 ofs += n;
1671                 buf += n;
1672                 (*retlen) += n;
1673
1674                 if (ofs >> cfi->chipshift) {
1675                         chipnum ++;
1676                         ofs = 0;
1677                         if (chipnum == cfi->numchips)
1678                                 return 0;
1679                 }
1680         }
1681
1682         while(len >= map_bankwidth(map)) {
1683                 map_word datum = map_word_load(map, buf);
1684
1685                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1686                                        ofs, datum, FL_WRITING);
1687                 if (ret)
1688                         return ret;
1689
1690                 ofs += map_bankwidth(map);
1691                 buf += map_bankwidth(map);
1692                 (*retlen) += map_bankwidth(map);
1693                 len -= map_bankwidth(map);
1694
1695                 if (ofs >> cfi->chipshift) {
1696                         chipnum ++;
1697                         ofs = 0;
1698                         if (chipnum == cfi->numchips)
1699                                 return 0;
1700                 }
1701         }
1702
1703         if (len & (map_bankwidth(map)-1)) {
1704                 map_word datum;
1705
1706                 datum = map_word_ff(map);
1707                 datum = map_word_load_partial(map, datum, buf, 0, len);
1708
1709                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1710                                        ofs, datum, FL_WRITING);
1711                 if (ret)
1712                         return ret;
1713
1714                 (*retlen) += len;
1715         }
1716
1717         return 0;
1718 }
1719
1720
1721 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1722                                     unsigned long adr, const struct kvec **pvec,
1723                                     unsigned long *pvec_seek, int len)
1724 {
1725         struct cfi_private *cfi = map->fldrv_priv;
1726         map_word status, write_cmd, datum;
1727         unsigned long cmd_adr;
1728         int ret, wbufsize, word_gap, words;
1729         const struct kvec *vec;
1730         unsigned long vec_seek;
1731         unsigned long initial_adr;
1732         int initial_len = len;
1733
1734         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1735         adr += chip->start;
1736         initial_adr = adr;
1737         cmd_adr = adr & ~(wbufsize-1);
1738
1739         /* Sharp LH28F640BF chips need the first address for the
1740          * Page Buffer Program command. See Table 5 of
1741          * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
1742         if (is_LH28F640BF(cfi))
1743                 cmd_adr = adr;
1744
1745         /* Let's determine this according to the interleave only once */
1746         write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1747
1748         mutex_lock(&chip->mutex);
1749         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1750         if (ret) {
1751                 mutex_unlock(&chip->mutex);
1752                 return ret;
1753         }
1754
1755         XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1756         ENABLE_VPP(map);
1757         xip_disable(map, chip, cmd_adr);
1758
1759         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1760            [...], the device will not accept any more Write to Buffer commands".
1761            So we must check here and reset those bits if they're set. Otherwise
1762            we're just pissing in the wind */
1763         if (chip->state != FL_STATUS) {
1764                 map_write(map, CMD(0x70), cmd_adr);
1765                 chip->state = FL_STATUS;
1766         }
1767         status = map_read(map, cmd_adr);
1768         if (map_word_bitsset(map, status, CMD(0x30))) {
1769                 xip_enable(map, chip, cmd_adr);
1770                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1771                 xip_disable(map, chip, cmd_adr);
1772                 map_write(map, CMD(0x50), cmd_adr);
1773                 map_write(map, CMD(0x70), cmd_adr);
1774         }
1775
1776         chip->state = FL_WRITING_TO_BUFFER;
1777         map_write(map, write_cmd, cmd_adr);
1778         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1779         if (ret) {
1780                 /* Argh. Not ready for write to buffer */
1781                 map_word Xstatus = map_read(map, cmd_adr);
1782                 map_write(map, CMD(0x70), cmd_adr);
1783                 chip->state = FL_STATUS;
1784                 status = map_read(map, cmd_adr);
1785                 map_write(map, CMD(0x50), cmd_adr);
1786                 map_write(map, CMD(0x70), cmd_adr);
1787                 xip_enable(map, chip, cmd_adr);
1788                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1789                                 map->name, Xstatus.x[0], status.x[0]);
1790                 goto out;
1791         }
1792
1793         /* Figure out the number of words to write */
1794         word_gap = (-adr & (map_bankwidth(map)-1));
1795         words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1796         if (!word_gap) {
1797                 words--;
1798         } else {
1799                 word_gap = map_bankwidth(map) - word_gap;
1800                 adr -= word_gap;
1801                 datum = map_word_ff(map);
1802         }
1803
1804         /* Write length of data to come */
1805         map_write(map, CMD(words), cmd_adr );
1806
1807         /* Write data */
1808         vec = *pvec;
1809         vec_seek = *pvec_seek;
1810         do {
1811                 int n = map_bankwidth(map) - word_gap;
1812                 if (n > vec->iov_len - vec_seek)
1813                         n = vec->iov_len - vec_seek;
1814                 if (n > len)
1815                         n = len;
1816
1817                 if (!word_gap && len < map_bankwidth(map))
1818                         datum = map_word_ff(map);
1819
1820                 datum = map_word_load_partial(map, datum,
1821                                               vec->iov_base + vec_seek,
1822                                               word_gap, n);
1823
1824                 len -= n;
1825                 word_gap += n;
1826                 if (!len || word_gap == map_bankwidth(map)) {
1827                         map_write(map, datum, adr);
1828                         adr += map_bankwidth(map);
1829                         word_gap = 0;
1830                 }
1831
1832                 vec_seek += n;
1833                 if (vec_seek == vec->iov_len) {
1834                         vec++;
1835                         vec_seek = 0;
1836                 }
1837         } while (len);
1838         *pvec = vec;
1839         *pvec_seek = vec_seek;
1840
1841         /* GO GO GO */
1842         map_write(map, CMD(0xd0), cmd_adr);
1843         chip->state = FL_WRITING;
1844
1845         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1846                                    initial_adr, initial_len,
1847                                    chip->buffer_write_time,
1848                                    chip->buffer_write_time_max);
1849         if (ret) {
1850                 map_write(map, CMD(0x70), cmd_adr);
1851                 chip->state = FL_STATUS;
1852                 xip_enable(map, chip, cmd_adr);
1853                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1854                 goto out;
1855         }
1856
1857         /* check for errors */
1858         status = map_read(map, cmd_adr);
1859         if (map_word_bitsset(map, status, CMD(0x1a))) {
1860                 unsigned long chipstatus = MERGESTATUS(status);
1861
1862                 /* reset status */
1863                 map_write(map, CMD(0x50), cmd_adr);
1864                 map_write(map, CMD(0x70), cmd_adr);
1865                 xip_enable(map, chip, cmd_adr);
1866
1867                 if (chipstatus & 0x02) {
1868                         ret = -EROFS;
1869                 } else if (chipstatus & 0x08) {
1870                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1871                         ret = -EIO;
1872                 } else {
1873                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1874                         ret = -EINVAL;
1875                 }
1876
1877                 goto out;
1878         }
1879
1880         xip_enable(map, chip, cmd_adr);
1881  out:   DISABLE_VPP(map);
1882         put_chip(map, chip, cmd_adr);
1883         mutex_unlock(&chip->mutex);
1884         return ret;
1885 }
1886
1887 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1888                                 unsigned long count, loff_t to, size_t *retlen)
1889 {
1890         struct map_info *map = mtd->priv;
1891         struct cfi_private *cfi = map->fldrv_priv;
1892         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1893         int ret;
1894         int chipnum;
1895         unsigned long ofs, vec_seek, i;
1896         size_t len = 0;
1897
1898         for (i = 0; i < count; i++)
1899                 len += vecs[i].iov_len;
1900
1901         if (!len)
1902                 return 0;
1903
1904         chipnum = to >> cfi->chipshift;
1905         ofs = to - (chipnum << cfi->chipshift);
1906         vec_seek = 0;
1907
1908         do {
1909                 /* We must not cross write block boundaries */
1910                 int size = wbufsize - (ofs & (wbufsize-1));
1911
1912                 if (size > len)
1913                         size = len;
1914                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1915                                       ofs, &vecs, &vec_seek, size);
1916                 if (ret)
1917                         return ret;
1918
1919                 ofs += size;
1920                 (*retlen) += size;
1921                 len -= size;
1922
1923                 if (ofs >> cfi->chipshift) {
1924                         chipnum ++;
1925                         ofs = 0;
1926                         if (chipnum == cfi->numchips)
1927                                 return 0;
1928                 }
1929
1930                 /* Be nice and reschedule with the chip in a usable state for other
1931                    processes. */
1932                 cond_resched();
1933
1934         } while (len);
1935
1936         return 0;
1937 }
1938
1939 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1940                                        size_t len, size_t *retlen, const u_char *buf)
1941 {
1942         struct kvec vec;
1943
1944         vec.iov_base = (void *) buf;
1945         vec.iov_len = len;
1946
1947         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1948 }
1949
1950 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1951                                       unsigned long adr, int len, void *thunk)
1952 {
1953         struct cfi_private *cfi = map->fldrv_priv;
1954         map_word status;
1955         int retries = 3;
1956         int ret;
1957
1958         adr += chip->start;
1959
1960  retry:
1961         mutex_lock(&chip->mutex);
1962         ret = get_chip(map, chip, adr, FL_ERASING);
1963         if (ret) {
1964                 mutex_unlock(&chip->mutex);
1965                 return ret;
1966         }
1967
1968         XIP_INVAL_CACHED_RANGE(map, adr, len);
1969         ENABLE_VPP(map);
1970         xip_disable(map, chip, adr);
1971
1972         /* Clear the status register first */
1973         map_write(map, CMD(0x50), adr);
1974
1975         /* Now erase */
1976         map_write(map, CMD(0x20), adr);
1977         map_write(map, CMD(0xD0), adr);
1978         chip->state = FL_ERASING;
1979         chip->erase_suspended = 0;
1980         chip->in_progress_block_addr = adr;
1981         chip->in_progress_block_mask = ~(len - 1);
1982
1983         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1984                                    adr, len,
1985                                    chip->erase_time,
1986                                    chip->erase_time_max);
1987         if (ret) {
1988                 map_write(map, CMD(0x70), adr);
1989                 chip->state = FL_STATUS;
1990                 xip_enable(map, chip, adr);
1991                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1992                 goto out;
1993         }
1994
1995         /* We've broken this before. It doesn't hurt to be safe */
1996         map_write(map, CMD(0x70), adr);
1997         chip->state = FL_STATUS;
1998         status = map_read(map, adr);
1999
2000         /* check for errors */
2001         if (map_word_bitsset(map, status, CMD(0x3a))) {
2002                 unsigned long chipstatus = MERGESTATUS(status);
2003
2004                 /* Reset the error bits */
2005                 map_write(map, CMD(0x50), adr);
2006                 map_write(map, CMD(0x70), adr);
2007                 xip_enable(map, chip, adr);
2008
2009                 if ((chipstatus & 0x30) == 0x30) {
2010                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
2011                         ret = -EINVAL;
2012                 } else if (chipstatus & 0x02) {
2013                         /* Protection bit set */
2014                         ret = -EROFS;
2015                 } else if (chipstatus & 0x8) {
2016                         /* Voltage */
2017                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
2018                         ret = -EIO;
2019                 } else if (chipstatus & 0x20 && retries--) {
2020                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
2021                         DISABLE_VPP(map);
2022                         put_chip(map, chip, adr);
2023                         mutex_unlock(&chip->mutex);
2024                         goto retry;
2025                 } else {
2026                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
2027                         ret = -EIO;
2028                 }
2029
2030                 goto out;
2031         }
2032
2033         xip_enable(map, chip, adr);
2034  out:   DISABLE_VPP(map);
2035         put_chip(map, chip, adr);
2036         mutex_unlock(&chip->mutex);
2037         return ret;
2038 }
2039
2040 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2041 {
2042         return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2043                                 instr->len, NULL);
2044 }
2045
2046 static void cfi_intelext_sync (struct mtd_info *mtd)
2047 {
2048         struct map_info *map = mtd->priv;
2049         struct cfi_private *cfi = map->fldrv_priv;
2050         int i;
2051         struct flchip *chip;
2052         int ret = 0;
2053
2054         for (i=0; !ret && i<cfi->numchips; i++) {
2055                 chip = &cfi->chips[i];
2056
2057                 mutex_lock(&chip->mutex);
2058                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2059
2060                 if (!ret) {
2061                         chip->oldstate = chip->state;
2062                         chip->state = FL_SYNCING;
2063                         /* No need to wake_up() on this state change -
2064                          * as the whole point is that nobody can do anything
2065                          * with the chip now anyway.
2066                          */
2067                 }
2068                 mutex_unlock(&chip->mutex);
2069         }
2070
2071         /* Unlock the chips again */
2072
2073         for (i--; i >=0; i--) {
2074                 chip = &cfi->chips[i];
2075
2076                 mutex_lock(&chip->mutex);
2077
2078                 if (chip->state == FL_SYNCING) {
2079                         chip->state = chip->oldstate;
2080                         chip->oldstate = FL_READY;
2081                         wake_up(&chip->wq);
2082                 }
2083                 mutex_unlock(&chip->mutex);
2084         }
2085 }
2086
2087 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2088                                                 struct flchip *chip,
2089                                                 unsigned long adr,
2090                                                 int len, void *thunk)
2091 {
2092         struct cfi_private *cfi = map->fldrv_priv;
2093         int status, ofs_factor = cfi->interleave * cfi->device_type;
2094
2095         adr += chip->start;
2096         xip_disable(map, chip, adr+(2*ofs_factor));
2097         map_write(map, CMD(0x90), adr+(2*ofs_factor));
2098         chip->state = FL_JEDEC_QUERY;
2099         status = cfi_read_query(map, adr+(2*ofs_factor));
2100         xip_enable(map, chip, 0);
2101         return status;
2102 }
2103
2104 #ifdef DEBUG_LOCK_BITS
2105 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2106                                                 struct flchip *chip,
2107                                                 unsigned long adr,
2108                                                 int len, void *thunk)
2109 {
2110         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2111                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2112         return 0;
2113 }
2114 #endif
2115
2116 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
2117 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
2118
2119 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2120                                        unsigned long adr, int len, void *thunk)
2121 {
2122         struct cfi_private *cfi = map->fldrv_priv;
2123         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2124         int mdelay;
2125         int ret;
2126
2127         adr += chip->start;
2128
2129         mutex_lock(&chip->mutex);
2130         ret = get_chip(map, chip, adr, FL_LOCKING);
2131         if (ret) {
2132                 mutex_unlock(&chip->mutex);
2133                 return ret;
2134         }
2135
2136         ENABLE_VPP(map);
2137         xip_disable(map, chip, adr);
2138
2139         map_write(map, CMD(0x60), adr);
2140         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2141                 map_write(map, CMD(0x01), adr);
2142                 chip->state = FL_LOCKING;
2143         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2144                 map_write(map, CMD(0xD0), adr);
2145                 chip->state = FL_UNLOCKING;
2146         } else
2147                 BUG();
2148
2149         /*
2150          * If Instant Individual Block Locking supported then no need
2151          * to delay.
2152          */
2153         /*
2154          * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2155          * lets use a max of 1.5 seconds (1500ms) as timeout.
2156          *
2157          * See "Clear Block Lock-Bits Time" on page 40 in
2158          * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2159          * from February 2003
2160          */
2161         mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2162
2163         ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2164         if (ret) {
2165                 map_write(map, CMD(0x70), adr);
2166                 chip->state = FL_STATUS;
2167                 xip_enable(map, chip, adr);
2168                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2169                 goto out;
2170         }
2171
2172         xip_enable(map, chip, adr);
2173  out:   DISABLE_VPP(map);
2174         put_chip(map, chip, adr);
2175         mutex_unlock(&chip->mutex);
2176         return ret;
2177 }
2178
2179 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2180 {
2181         int ret;
2182
2183 #ifdef DEBUG_LOCK_BITS
2184         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2185                __func__, ofs, len);
2186         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2187                 ofs, len, NULL);
2188 #endif
2189
2190         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2191                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2192
2193 #ifdef DEBUG_LOCK_BITS
2194         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2195                __func__, ret);
2196         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2197                 ofs, len, NULL);
2198 #endif
2199
2200         return ret;
2201 }
2202
2203 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2204 {
2205         int ret;
2206
2207 #ifdef DEBUG_LOCK_BITS
2208         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2209                __func__, ofs, len);
2210         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2211                 ofs, len, NULL);
2212 #endif
2213
2214         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2215                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2216
2217 #ifdef DEBUG_LOCK_BITS
2218         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2219                __func__, ret);
2220         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2221                 ofs, len, NULL);
2222 #endif
2223
2224         return ret;
2225 }
2226
2227 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2228                                   uint64_t len)
2229 {
2230         return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2231                                 ofs, len, NULL) ? 1 : 0;
2232 }
2233
2234 #ifdef CONFIG_MTD_OTP
2235
2236 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2237                         u_long data_offset, u_char *buf, u_int size,
2238                         u_long prot_offset, u_int groupno, u_int groupsize);
2239
2240 static int __xipram
2241 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2242             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2243 {
2244         struct cfi_private *cfi = map->fldrv_priv;
2245         int ret;
2246
2247         mutex_lock(&chip->mutex);
2248         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2249         if (ret) {
2250                 mutex_unlock(&chip->mutex);
2251                 return ret;
2252         }
2253
2254         /* let's ensure we're not reading back cached data from array mode */
2255         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2256
2257         xip_disable(map, chip, chip->start);
2258         if (chip->state != FL_JEDEC_QUERY) {
2259                 map_write(map, CMD(0x90), chip->start);
2260                 chip->state = FL_JEDEC_QUERY;
2261         }
2262         map_copy_from(map, buf, chip->start + offset, size);
2263         xip_enable(map, chip, chip->start);
2264
2265         /* then ensure we don't keep OTP data in the cache */
2266         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2267
2268         put_chip(map, chip, chip->start);
2269         mutex_unlock(&chip->mutex);
2270         return 0;
2271 }
2272
2273 static int
2274 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2275              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2276 {
2277         int ret;
2278
2279         while (size) {
2280                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2281                 int gap = offset - bus_ofs;
2282                 int n = min_t(int, size, map_bankwidth(map)-gap);
2283                 map_word datum = map_word_ff(map);
2284
2285                 datum = map_word_load_partial(map, datum, buf, gap, n);
2286                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2287                 if (ret)
2288                         return ret;
2289
2290                 offset += n;
2291                 buf += n;
2292                 size -= n;
2293         }
2294
2295         return 0;
2296 }
2297
2298 static int
2299 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2300             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2301 {
2302         struct cfi_private *cfi = map->fldrv_priv;
2303         map_word datum;
2304
2305         /* make sure area matches group boundaries */
2306         if (size != grpsz)
2307                 return -EXDEV;
2308
2309         datum = map_word_ff(map);
2310         datum = map_word_clr(map, datum, CMD(1 << grpno));
2311         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2312 }
2313
2314 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2315                                  size_t *retlen, u_char *buf,
2316                                  otp_op_t action, int user_regs)
2317 {
2318         struct map_info *map = mtd->priv;
2319         struct cfi_private *cfi = map->fldrv_priv;
2320         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2321         struct flchip *chip;
2322         struct cfi_intelext_otpinfo *otp;
2323         u_long devsize, reg_prot_offset, data_offset;
2324         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2325         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2326         int ret;
2327
2328         *retlen = 0;
2329
2330         /* Check that we actually have some OTP registers */
2331         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2332                 return -ENODATA;
2333
2334         /* we need real chips here not virtual ones */
2335         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2336         chip_step = devsize >> cfi->chipshift;
2337         chip_num = 0;
2338
2339         /* Some chips have OTP located in the _top_ partition only.
2340            For example: Intel 28F256L18T (T means top-parameter device) */
2341         if (cfi->mfr == CFI_MFR_INTEL) {
2342                 switch (cfi->id) {
2343                 case 0x880b:
2344                 case 0x880c:
2345                 case 0x880d:
2346                         chip_num = chip_step - 1;
2347                 }
2348         }
2349
2350         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2351                 chip = &cfi->chips[chip_num];
2352                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2353
2354                 /* first OTP region */
2355                 field = 0;
2356                 reg_prot_offset = extp->ProtRegAddr;
2357                 reg_fact_groups = 1;
2358                 reg_fact_size = 1 << extp->FactProtRegSize;
2359                 reg_user_groups = 1;
2360                 reg_user_size = 1 << extp->UserProtRegSize;
2361
2362                 while (len > 0) {
2363                         /* flash geometry fixup */
2364                         data_offset = reg_prot_offset + 1;
2365                         data_offset *= cfi->interleave * cfi->device_type;
2366                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2367                         reg_fact_size *= cfi->interleave;
2368                         reg_user_size *= cfi->interleave;
2369
2370                         if (user_regs) {
2371                                 groups = reg_user_groups;
2372                                 groupsize = reg_user_size;
2373                                 /* skip over factory reg area */
2374                                 groupno = reg_fact_groups;
2375                                 data_offset += reg_fact_groups * reg_fact_size;
2376                         } else {
2377                                 groups = reg_fact_groups;
2378                                 groupsize = reg_fact_size;
2379                                 groupno = 0;
2380                         }
2381
2382                         while (len > 0 && groups > 0) {
2383                                 if (!action) {
2384                                         /*
2385                                          * Special case: if action is NULL
2386                                          * we fill buf with otp_info records.
2387                                          */
2388                                         struct otp_info *otpinfo;
2389                                         map_word lockword;
2390                                         len -= sizeof(struct otp_info);
2391                                         if (len <= 0)
2392                                                 return -ENOSPC;
2393                                         ret = do_otp_read(map, chip,
2394                                                           reg_prot_offset,
2395                                                           (u_char *)&lockword,
2396                                                           map_bankwidth(map),
2397                                                           0, 0,  0);
2398                                         if (ret)
2399                                                 return ret;
2400                                         otpinfo = (struct otp_info *)buf;
2401                                         otpinfo->start = from;
2402                                         otpinfo->length = groupsize;
2403                                         otpinfo->locked =
2404                                            !map_word_bitsset(map, lockword,
2405                                                              CMD(1 << groupno));
2406                                         from += groupsize;
2407                                         buf += sizeof(*otpinfo);
2408                                         *retlen += sizeof(*otpinfo);
2409                                 } else if (from >= groupsize) {
2410                                         from -= groupsize;
2411                                         data_offset += groupsize;
2412                                 } else {
2413                                         int size = groupsize;
2414                                         data_offset += from;
2415                                         size -= from;
2416                                         from = 0;
2417                                         if (size > len)
2418                                                 size = len;
2419                                         ret = action(map, chip, data_offset,
2420                                                      buf, size, reg_prot_offset,
2421                                                      groupno, groupsize);
2422                                         if (ret < 0)
2423                                                 return ret;
2424                                         buf += size;
2425                                         len -= size;
2426                                         *retlen += size;
2427                                         data_offset += size;
2428                                 }
2429                                 groupno++;
2430                                 groups--;
2431                         }
2432
2433                         /* next OTP region */
2434                         if (++field == extp->NumProtectionFields)
2435                                 break;
2436                         reg_prot_offset = otp->ProtRegAddr;
2437                         reg_fact_groups = otp->FactGroups;
2438                         reg_fact_size = 1 << otp->FactProtRegSize;
2439                         reg_user_groups = otp->UserGroups;
2440                         reg_user_size = 1 << otp->UserProtRegSize;
2441                         otp++;
2442                 }
2443         }
2444
2445         return 0;
2446 }
2447
2448 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2449                                            size_t len, size_t *retlen,
2450                                             u_char *buf)
2451 {
2452         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2453                                      buf, do_otp_read, 0);
2454 }
2455
2456 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2457                                            size_t len, size_t *retlen,
2458                                             u_char *buf)
2459 {
2460         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2461                                      buf, do_otp_read, 1);
2462 }
2463
2464 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2465                                             size_t len, size_t *retlen,
2466                                              u_char *buf)
2467 {
2468         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2469                                      buf, do_otp_write, 1);
2470 }
2471
2472 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2473                                            loff_t from, size_t len)
2474 {
2475         size_t retlen;
2476         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2477                                      NULL, do_otp_lock, 1);
2478 }
2479
2480 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
2481                                            size_t *retlen, struct otp_info *buf)
2482
2483 {
2484         return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2485                                      NULL, 0);
2486 }
2487
2488 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
2489                                            size_t *retlen, struct otp_info *buf)
2490 {
2491         return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2492                                      NULL, 1);
2493 }
2494
2495 #endif
2496
2497 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2498 {
2499         struct mtd_erase_region_info *region;
2500         int block, status, i;
2501         unsigned long adr;
2502         size_t len;
2503
2504         for (i = 0; i < mtd->numeraseregions; i++) {
2505                 region = &mtd->eraseregions[i];
2506                 if (!region->lockmap)
2507                         continue;
2508
2509                 for (block = 0; block < region->numblocks; block++){
2510                         len = region->erasesize;
2511                         adr = region->offset + block * len;
2512
2513                         status = cfi_varsize_frob(mtd,
2514                                         do_getlockstatus_oneblock, adr, len, NULL);
2515                         if (status)
2516                                 set_bit(block, region->lockmap);
2517                         else
2518                                 clear_bit(block, region->lockmap);
2519                 }
2520         }
2521 }
2522
2523 static int cfi_intelext_suspend(struct mtd_info *mtd)
2524 {
2525         struct map_info *map = mtd->priv;
2526         struct cfi_private *cfi = map->fldrv_priv;
2527         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2528         int i;
2529         struct flchip *chip;
2530         int ret = 0;
2531
2532         if ((mtd->flags & MTD_POWERUP_LOCK)
2533             && extp && (extp->FeatureSupport & (1 << 5)))
2534                 cfi_intelext_save_locks(mtd);
2535
2536         for (i=0; !ret && i<cfi->numchips; i++) {
2537                 chip = &cfi->chips[i];
2538
2539                 mutex_lock(&chip->mutex);
2540
2541                 switch (chip->state) {
2542                 case FL_READY:
2543                 case FL_STATUS:
2544                 case FL_CFI_QUERY:
2545                 case FL_JEDEC_QUERY:
2546                         if (chip->oldstate == FL_READY) {
2547                                 /* place the chip in a known state before suspend */
2548                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2549                                 chip->oldstate = chip->state;
2550                                 chip->state = FL_PM_SUSPENDED;
2551                                 /* No need to wake_up() on this state change -
2552                                  * as the whole point is that nobody can do anything
2553                                  * with the chip now anyway.
2554                                  */
2555                         } else {
2556                                 /* There seems to be an operation pending. We must wait for it. */
2557                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2558                                 ret = -EAGAIN;
2559                         }
2560                         break;
2561                 default:
2562                         /* Should we actually wait? Once upon a time these routines weren't
2563                            allowed to. Or should we return -EAGAIN, because the upper layers
2564                            ought to have already shut down anything which was using the device
2565                            anyway? The latter for now. */
2566                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2567                         ret = -EAGAIN;
2568                 case FL_PM_SUSPENDED:
2569                         break;
2570                 }
2571                 mutex_unlock(&chip->mutex);
2572         }
2573
2574         /* Unlock the chips again */
2575
2576         if (ret) {
2577                 for (i--; i >=0; i--) {
2578                         chip = &cfi->chips[i];
2579
2580                         mutex_lock(&chip->mutex);
2581
2582                         if (chip->state == FL_PM_SUSPENDED) {
2583                                 /* No need to force it into a known state here,
2584                                    because we're returning failure, and it didn't
2585                                    get power cycled */
2586                                 chip->state = chip->oldstate;
2587                                 chip->oldstate = FL_READY;
2588                                 wake_up(&chip->wq);
2589                         }
2590                         mutex_unlock(&chip->mutex);
2591                 }
2592         }
2593
2594         return ret;
2595 }
2596
2597 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2598 {
2599         struct mtd_erase_region_info *region;
2600         int block, i;
2601         unsigned long adr;
2602         size_t len;
2603
2604         for (i = 0; i < mtd->numeraseregions; i++) {
2605                 region = &mtd->eraseregions[i];
2606                 if (!region->lockmap)
2607                         continue;
2608
2609                 for_each_clear_bit(block, region->lockmap, region->numblocks) {
2610                         len = region->erasesize;
2611                         adr = region->offset + block * len;
2612                         cfi_intelext_unlock(mtd, adr, len);
2613                 }
2614         }
2615 }
2616
2617 static void cfi_intelext_resume(struct mtd_info *mtd)
2618 {
2619         struct map_info *map = mtd->priv;
2620         struct cfi_private *cfi = map->fldrv_priv;
2621         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2622         int i;
2623         struct flchip *chip;
2624
2625         for (i=0; i<cfi->numchips; i++) {
2626
2627                 chip = &cfi->chips[i];
2628
2629                 mutex_lock(&chip->mutex);
2630
2631                 /* Go to known state. Chip may have been power cycled */
2632                 if (chip->state == FL_PM_SUSPENDED) {
2633                         /* Refresh LH28F640BF Partition Config. Register */
2634                         fixup_LH28F640BF(mtd);
2635                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2636                         chip->oldstate = chip->state = FL_READY;
2637                         wake_up(&chip->wq);
2638                 }
2639
2640                 mutex_unlock(&chip->mutex);
2641         }
2642
2643         if ((mtd->flags & MTD_POWERUP_LOCK)
2644             && extp && (extp->FeatureSupport & (1 << 5)))
2645                 cfi_intelext_restore_locks(mtd);
2646 }
2647
2648 static int cfi_intelext_reset(struct mtd_info *mtd)
2649 {
2650         struct map_info *map = mtd->priv;
2651         struct cfi_private *cfi = map->fldrv_priv;
2652         int i, ret;
2653
2654         for (i=0; i < cfi->numchips; i++) {
2655                 struct flchip *chip = &cfi->chips[i];
2656
2657                 /* force the completion of any ongoing operation
2658                    and switch to array mode so any bootloader in
2659                    flash is accessible for soft reboot. */
2660                 mutex_lock(&chip->mutex);
2661                 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2662                 if (!ret) {
2663                         map_write(map, CMD(0xff), chip->start);
2664                         chip->state = FL_SHUTDOWN;
2665                         put_chip(map, chip, chip->start);
2666                 }
2667                 mutex_unlock(&chip->mutex);
2668         }
2669
2670         return 0;
2671 }
2672
2673 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2674                                void *v)
2675 {
2676         struct mtd_info *mtd;
2677
2678         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2679         cfi_intelext_reset(mtd);
2680         return NOTIFY_DONE;
2681 }
2682
2683 static void cfi_intelext_destroy(struct mtd_info *mtd)
2684 {
2685         struct map_info *map = mtd->priv;
2686         struct cfi_private *cfi = map->fldrv_priv;
2687         struct mtd_erase_region_info *region;
2688         int i;
2689         cfi_intelext_reset(mtd);
2690         unregister_reboot_notifier(&mtd->reboot_notifier);
2691         kfree(cfi->cmdset_priv);
2692         kfree(cfi->cfiq);
2693         kfree(cfi->chips[0].priv);
2694         kfree(cfi);
2695         for (i = 0; i < mtd->numeraseregions; i++) {
2696                 region = &mtd->eraseregions[i];
2697                 kfree(region->lockmap);
2698         }
2699         kfree(mtd->eraseregions);
2700 }
2701
2702 MODULE_LICENSE("GPL");
2703 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2704 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2705 MODULE_ALIAS("cfi_cmdset_0003");
2706 MODULE_ALIAS("cfi_cmdset_0200");