GNU Linux-libre 4.19.314-gnu1
[releases.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  *
8  * 10/10/2000   Nicolas Pitre <nico@fluxnic.net>
9  *      - completely revamped method functions so they are aware and
10  *        independent of the flash geometry (buswidth, interleave, etc.)
11  *      - scalability vs code size is completely set at compile-time
12  *        (see include/linux/mtd/cfi.h for selection)
13  *      - optimized write buffer method
14  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
15  *      - reworked lock/unlock/erase support for var size flash
16  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
17  *      - auto unlock sectors on resume for auto locking flash on power up
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <asm/io.h>
25 #include <asm/byteorder.h>
26
27 #include <linux/errno.h>
28 #include <linux/slab.h>
29 #include <linux/delay.h>
30 #include <linux/interrupt.h>
31 #include <linux/reboot.h>
32 #include <linux/bitmap.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/cfi.h>
37
38 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
39 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
40
41 // debugging, turns off buffer write mode if set to 1
42 #define FORCE_WORD_WRITE 0
43
44 /* Intel chips */
45 #define I82802AB        0x00ad
46 #define I82802AC        0x00ac
47 #define PF38F4476       0x881c
48 #define M28F00AP30      0x8963
49 /* STMicroelectronics chips */
50 #define M50LPW080       0x002F
51 #define M50FLW080A      0x0080
52 #define M50FLW080B      0x0081
53 /* Atmel chips */
54 #define AT49BV640D      0x02de
55 #define AT49BV640DT     0x02db
56 /* Sharp chips */
57 #define LH28F640BFHE_PTTL90     0x00b0
58 #define LH28F640BFHE_PBTL90     0x00b1
59 #define LH28F640BFHE_PTTL70A    0x00b2
60 #define LH28F640BFHE_PBTL70A    0x00b3
61
62 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
64 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
65 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
66 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
67 static void cfi_intelext_sync (struct mtd_info *);
68 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
69 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
70 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
71                                   uint64_t len);
72 #ifdef CONFIG_MTD_OTP
73 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
74 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
75 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
76 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
77 static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
78                                            size_t *, struct otp_info *);
79 static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
80                                            size_t *, struct otp_info *);
81 #endif
82 static int cfi_intelext_suspend (struct mtd_info *);
83 static void cfi_intelext_resume (struct mtd_info *);
84 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
85
86 static void cfi_intelext_destroy(struct mtd_info *);
87
88 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
89
90 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
91 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
92
93 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
94                      size_t *retlen, void **virt, resource_size_t *phys);
95 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
96
97 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
98 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
99 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
100 #include "fwh_lock.h"
101
102
103
104 /*
105  *  *********** SETUP AND PROBE BITS  ***********
106  */
107
108 static struct mtd_chip_driver cfi_intelext_chipdrv = {
109         .probe          = NULL, /* Not usable directly */
110         .destroy        = cfi_intelext_destroy,
111         .name           = "cfi_cmdset_0001",
112         .module         = THIS_MODULE
113 };
114
115 /* #define DEBUG_LOCK_BITS */
116 /* #define DEBUG_CFI_FEATURES */
117
118 #ifdef DEBUG_CFI_FEATURES
119 static void cfi_tell_features(struct cfi_pri_intelext *extp)
120 {
121         int i;
122         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
123         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
124         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
125         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
126         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
127         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
128         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
129         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
130         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
131         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
132         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
133         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
134         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
135         for (i=11; i<32; i++) {
136                 if (extp->FeatureSupport & (1<<i))
137                         printk("     - Unknown Bit %X:      supported\n", i);
138         }
139
140         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
141         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
142         for (i=1; i<8; i++) {
143                 if (extp->SuspendCmdSupport & (1<<i))
144                         printk("     - Unknown Bit %X:               supported\n", i);
145         }
146
147         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
148         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
149         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
150         for (i=2; i<3; i++) {
151                 if (extp->BlkStatusRegMask & (1<<i))
152                         printk("     - Unknown Bit %X Active: yes\n",i);
153         }
154         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
155         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
156         for (i=6; i<16; i++) {
157                 if (extp->BlkStatusRegMask & (1<<i))
158                         printk("     - Unknown Bit %X Active: yes\n",i);
159         }
160
161         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
162                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
163         if (extp->VppOptimal)
164                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
165                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
166 }
167 #endif
168
169 /* Atmel chips don't use the same PRI format as Intel chips */
170 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
171 {
172         struct map_info *map = mtd->priv;
173         struct cfi_private *cfi = map->fldrv_priv;
174         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
175         struct cfi_pri_atmel atmel_pri;
176         uint32_t features = 0;
177
178         /* Reverse byteswapping */
179         extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
180         extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
181         extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
182
183         memcpy(&atmel_pri, extp, sizeof(atmel_pri));
184         memset((char *)extp + 5, 0, sizeof(*extp) - 5);
185
186         printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
187
188         if (atmel_pri.Features & 0x01) /* chip erase supported */
189                 features |= (1<<0);
190         if (atmel_pri.Features & 0x02) /* erase suspend supported */
191                 features |= (1<<1);
192         if (atmel_pri.Features & 0x04) /* program suspend supported */
193                 features |= (1<<2);
194         if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
195                 features |= (1<<9);
196         if (atmel_pri.Features & 0x20) /* page mode read supported */
197                 features |= (1<<7);
198         if (atmel_pri.Features & 0x40) /* queued erase supported */
199                 features |= (1<<4);
200         if (atmel_pri.Features & 0x80) /* Protection bits supported */
201                 features |= (1<<6);
202
203         extp->FeatureSupport = features;
204
205         /* burst write mode not supported */
206         cfi->cfiq->BufWriteTimeoutTyp = 0;
207         cfi->cfiq->BufWriteTimeoutMax = 0;
208 }
209
210 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
211 {
212         struct map_info *map = mtd->priv;
213         struct cfi_private *cfi = map->fldrv_priv;
214         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
215
216         cfip->FeatureSupport |= (1 << 5);
217         mtd->flags |= MTD_POWERUP_LOCK;
218 }
219
220 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
221 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
222 static void fixup_intel_strataflash(struct mtd_info *mtd)
223 {
224         struct map_info *map = mtd->priv;
225         struct cfi_private *cfi = map->fldrv_priv;
226         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
227
228         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
229                             "erase on write disabled.\n");
230         extp->SuspendCmdSupport &= ~1;
231 }
232 #endif
233
234 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
235 static void fixup_no_write_suspend(struct mtd_info *mtd)
236 {
237         struct map_info *map = mtd->priv;
238         struct cfi_private *cfi = map->fldrv_priv;
239         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
240
241         if (cfip && (cfip->FeatureSupport&4)) {
242                 cfip->FeatureSupport &= ~4;
243                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
244         }
245 }
246 #endif
247
248 static void fixup_st_m28w320ct(struct mtd_info *mtd)
249 {
250         struct map_info *map = mtd->priv;
251         struct cfi_private *cfi = map->fldrv_priv;
252
253         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
254         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
255 }
256
257 static void fixup_st_m28w320cb(struct mtd_info *mtd)
258 {
259         struct map_info *map = mtd->priv;
260         struct cfi_private *cfi = map->fldrv_priv;
261
262         /* Note this is done after the region info is endian swapped */
263         cfi->cfiq->EraseRegionInfo[1] =
264                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
265 };
266
267 static int is_LH28F640BF(struct cfi_private *cfi)
268 {
269         /* Sharp LH28F640BF Family */
270         if (cfi->mfr == CFI_MFR_SHARP && (
271             cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
272             cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
273                 return 1;
274         return 0;
275 }
276
277 static void fixup_LH28F640BF(struct mtd_info *mtd)
278 {
279         struct map_info *map = mtd->priv;
280         struct cfi_private *cfi = map->fldrv_priv;
281         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
282
283         /* Reset the Partition Configuration Register on LH28F640BF
284          * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
285         if (is_LH28F640BF(cfi)) {
286                 printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
287                 map_write(map, CMD(0x60), 0);
288                 map_write(map, CMD(0x04), 0);
289
290                 /* We have set one single partition thus
291                  * Simultaneous Operations are not allowed */
292                 printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
293                 extp->FeatureSupport &= ~512;
294         }
295 }
296
297 static void fixup_use_point(struct mtd_info *mtd)
298 {
299         struct map_info *map = mtd->priv;
300         if (!mtd->_point && map_is_linear(map)) {
301                 mtd->_point   = cfi_intelext_point;
302                 mtd->_unpoint = cfi_intelext_unpoint;
303         }
304 }
305
306 static void fixup_use_write_buffers(struct mtd_info *mtd)
307 {
308         struct map_info *map = mtd->priv;
309         struct cfi_private *cfi = map->fldrv_priv;
310         if (cfi->cfiq->BufWriteTimeoutTyp) {
311                 printk(KERN_INFO "Using buffer write method\n" );
312                 mtd->_write = cfi_intelext_write_buffers;
313                 mtd->_writev = cfi_intelext_writev;
314         }
315 }
316
317 /*
318  * Some chips power-up with all sectors locked by default.
319  */
320 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
321 {
322         struct map_info *map = mtd->priv;
323         struct cfi_private *cfi = map->fldrv_priv;
324         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
325
326         if (cfip->FeatureSupport&32) {
327                 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
328                 mtd->flags |= MTD_POWERUP_LOCK;
329         }
330 }
331
332 static struct cfi_fixup cfi_fixup_table[] = {
333         { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
334         { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
335         { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
336 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
337         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
338 #endif
339 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
340         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
341 #endif
342 #if !FORCE_WORD_WRITE
343         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
344 #endif
345         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
346         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
347         { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
348         { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
349         { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
350         { 0, 0, NULL }
351 };
352
353 static struct cfi_fixup jedec_fixup_table[] = {
354         { CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
355         { CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
356         { CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
357         { CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
358         { CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
359         { 0, 0, NULL }
360 };
361 static struct cfi_fixup fixup_table[] = {
362         /* The CFI vendor ids and the JEDEC vendor IDs appear
363          * to be common.  It is like the devices id's are as
364          * well.  This table is to pick all cases where
365          * we know that is the case.
366          */
367         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
368         { 0, 0, NULL }
369 };
370
371 static void cfi_fixup_major_minor(struct cfi_private *cfi,
372                                                 struct cfi_pri_intelext *extp)
373 {
374         if (cfi->mfr == CFI_MFR_INTEL &&
375                         cfi->id == PF38F4476 && extp->MinorVersion == '3')
376                 extp->MinorVersion = '1';
377 }
378
379 static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
380 {
381         /*
382          * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
383          * Erase Supend for their small Erase Blocks(0x8000)
384          */
385         if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
386                 return 1;
387         return 0;
388 }
389
390 static inline struct cfi_pri_intelext *
391 read_pri_intelext(struct map_info *map, __u16 adr)
392 {
393         struct cfi_private *cfi = map->fldrv_priv;
394         struct cfi_pri_intelext *extp;
395         unsigned int extra_size = 0;
396         unsigned int extp_size = sizeof(*extp);
397
398  again:
399         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
400         if (!extp)
401                 return NULL;
402
403         cfi_fixup_major_minor(cfi, extp);
404
405         if (extp->MajorVersion != '1' ||
406             (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
407                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
408                        "version %c.%c.\n",  extp->MajorVersion,
409                        extp->MinorVersion);
410                 kfree(extp);
411                 return NULL;
412         }
413
414         /* Do some byteswapping if necessary */
415         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
416         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
417         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
418
419         if (extp->MinorVersion >= '0') {
420                 extra_size = 0;
421
422                 /* Protection Register info */
423                 if (extp->NumProtectionFields) {
424                         struct cfi_intelext_otpinfo *otp =
425                                 (struct cfi_intelext_otpinfo *)&extp->extra[0];
426
427                         extra_size += (extp->NumProtectionFields - 1) *
428                                 sizeof(struct cfi_intelext_otpinfo);
429
430                         if (extp_size >= sizeof(*extp) + extra_size) {
431                                 int i;
432
433                                 /* Do some byteswapping if necessary */
434                                 for (i = 0; i < extp->NumProtectionFields - 1; i++) {
435                                         otp->ProtRegAddr = le32_to_cpu(otp->ProtRegAddr);
436                                         otp->FactGroups = le16_to_cpu(otp->FactGroups);
437                                         otp->UserGroups = le16_to_cpu(otp->UserGroups);
438                                         otp++;
439                                 }
440                         }
441                 }
442         }
443
444         if (extp->MinorVersion >= '1') {
445                 /* Burst Read info */
446                 extra_size += 2;
447                 if (extp_size < sizeof(*extp) + extra_size)
448                         goto need_more;
449                 extra_size += extp->extra[extra_size - 1];
450         }
451
452         if (extp->MinorVersion >= '3') {
453                 int nb_parts, i;
454
455                 /* Number of hardware-partitions */
456                 extra_size += 1;
457                 if (extp_size < sizeof(*extp) + extra_size)
458                         goto need_more;
459                 nb_parts = extp->extra[extra_size - 1];
460
461                 /* skip the sizeof(partregion) field in CFI 1.4 */
462                 if (extp->MinorVersion >= '4')
463                         extra_size += 2;
464
465                 for (i = 0; i < nb_parts; i++) {
466                         struct cfi_intelext_regioninfo *rinfo;
467                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
468                         extra_size += sizeof(*rinfo);
469                         if (extp_size < sizeof(*extp) + extra_size)
470                                 goto need_more;
471                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
472                         extra_size += (rinfo->NumBlockTypes - 1)
473                                       * sizeof(struct cfi_intelext_blockinfo);
474                 }
475
476                 if (extp->MinorVersion >= '4')
477                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
478
479                 if (extp_size < sizeof(*extp) + extra_size) {
480                         need_more:
481                         extp_size = sizeof(*extp) + extra_size;
482                         kfree(extp);
483                         if (extp_size > 4096) {
484                                 printk(KERN_ERR
485                                         "%s: cfi_pri_intelext is too fat\n",
486                                         __func__);
487                                 return NULL;
488                         }
489                         goto again;
490                 }
491         }
492
493         return extp;
494 }
495
496 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
497 {
498         struct cfi_private *cfi = map->fldrv_priv;
499         struct mtd_info *mtd;
500         int i;
501
502         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
503         if (!mtd)
504                 return NULL;
505         mtd->priv = map;
506         mtd->type = MTD_NORFLASH;
507
508         /* Fill in the default mtd operations */
509         mtd->_erase   = cfi_intelext_erase_varsize;
510         mtd->_read    = cfi_intelext_read;
511         mtd->_write   = cfi_intelext_write_words;
512         mtd->_sync    = cfi_intelext_sync;
513         mtd->_lock    = cfi_intelext_lock;
514         mtd->_unlock  = cfi_intelext_unlock;
515         mtd->_is_locked = cfi_intelext_is_locked;
516         mtd->_suspend = cfi_intelext_suspend;
517         mtd->_resume  = cfi_intelext_resume;
518         mtd->flags   = MTD_CAP_NORFLASH;
519         mtd->name    = map->name;
520         mtd->writesize = 1;
521         mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
522
523         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
524
525         if (cfi->cfi_mode == CFI_MODE_CFI) {
526                 /*
527                  * It's a real CFI chip, not one for which the probe
528                  * routine faked a CFI structure. So we read the feature
529                  * table from it.
530                  */
531                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
532                 struct cfi_pri_intelext *extp;
533
534                 extp = read_pri_intelext(map, adr);
535                 if (!extp) {
536                         kfree(mtd);
537                         return NULL;
538                 }
539
540                 /* Install our own private info structure */
541                 cfi->cmdset_priv = extp;
542
543                 cfi_fixup(mtd, cfi_fixup_table);
544
545 #ifdef DEBUG_CFI_FEATURES
546                 /* Tell the user about it in lots of lovely detail */
547                 cfi_tell_features(extp);
548 #endif
549
550                 if(extp->SuspendCmdSupport & 1) {
551                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
552                 }
553         }
554         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
555                 /* Apply jedec specific fixups */
556                 cfi_fixup(mtd, jedec_fixup_table);
557         }
558         /* Apply generic fixups */
559         cfi_fixup(mtd, fixup_table);
560
561         for (i=0; i< cfi->numchips; i++) {
562                 if (cfi->cfiq->WordWriteTimeoutTyp)
563                         cfi->chips[i].word_write_time =
564                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
565                 else
566                         cfi->chips[i].word_write_time = 50000;
567
568                 if (cfi->cfiq->BufWriteTimeoutTyp)
569                         cfi->chips[i].buffer_write_time =
570                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
571                 /* No default; if it isn't specified, we won't use it */
572
573                 if (cfi->cfiq->BlockEraseTimeoutTyp)
574                         cfi->chips[i].erase_time =
575                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
576                 else
577                         cfi->chips[i].erase_time = 2000000;
578
579                 if (cfi->cfiq->WordWriteTimeoutTyp &&
580                     cfi->cfiq->WordWriteTimeoutMax)
581                         cfi->chips[i].word_write_time_max =
582                                 1<<(cfi->cfiq->WordWriteTimeoutTyp +
583                                     cfi->cfiq->WordWriteTimeoutMax);
584                 else
585                         cfi->chips[i].word_write_time_max = 50000 * 8;
586
587                 if (cfi->cfiq->BufWriteTimeoutTyp &&
588                     cfi->cfiq->BufWriteTimeoutMax)
589                         cfi->chips[i].buffer_write_time_max =
590                                 1<<(cfi->cfiq->BufWriteTimeoutTyp +
591                                     cfi->cfiq->BufWriteTimeoutMax);
592
593                 if (cfi->cfiq->BlockEraseTimeoutTyp &&
594                     cfi->cfiq->BlockEraseTimeoutMax)
595                         cfi->chips[i].erase_time_max =
596                                 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
597                                        cfi->cfiq->BlockEraseTimeoutMax);
598                 else
599                         cfi->chips[i].erase_time_max = 2000000 * 8;
600
601                 cfi->chips[i].ref_point_counter = 0;
602                 init_waitqueue_head(&(cfi->chips[i].wq));
603         }
604
605         map->fldrv = &cfi_intelext_chipdrv;
606
607         return cfi_intelext_setup(mtd);
608 }
609 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
610 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
611 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
612 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
613 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
614
615 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
616 {
617         struct map_info *map = mtd->priv;
618         struct cfi_private *cfi = map->fldrv_priv;
619         unsigned long offset = 0;
620         int i,j;
621         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
622
623         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
624
625         mtd->size = devsize * cfi->numchips;
626
627         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
628         mtd->eraseregions = kcalloc(mtd->numeraseregions,
629                                     sizeof(struct mtd_erase_region_info),
630                                     GFP_KERNEL);
631         if (!mtd->eraseregions)
632                 goto setup_err;
633
634         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
635                 unsigned long ernum, ersize;
636                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
637                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
638
639                 if (mtd->erasesize < ersize) {
640                         mtd->erasesize = ersize;
641                 }
642                 for (j=0; j<cfi->numchips; j++) {
643                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
644                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
645                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
646                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
647                         if (!mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap)
648                                 goto setup_err;
649                 }
650                 offset += (ersize * ernum);
651         }
652
653         if (offset != devsize) {
654                 /* Argh */
655                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
656                 goto setup_err;
657         }
658
659         for (i=0; i<mtd->numeraseregions;i++){
660                 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
661                        i,(unsigned long long)mtd->eraseregions[i].offset,
662                        mtd->eraseregions[i].erasesize,
663                        mtd->eraseregions[i].numblocks);
664         }
665
666 #ifdef CONFIG_MTD_OTP
667         mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
668         mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
669         mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
670         mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
671         mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
672         mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
673 #endif
674
675         /* This function has the potential to distort the reality
676            a bit and therefore should be called last. */
677         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
678                 goto setup_err;
679
680         __module_get(THIS_MODULE);
681         register_reboot_notifier(&mtd->reboot_notifier);
682         return mtd;
683
684  setup_err:
685         if (mtd->eraseregions)
686                 for (i=0; i<cfi->cfiq->NumEraseRegions; i++)
687                         for (j=0; j<cfi->numchips; j++)
688                                 kfree(mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap);
689         kfree(mtd->eraseregions);
690         kfree(mtd);
691         kfree(cfi->cmdset_priv);
692         return NULL;
693 }
694
695 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
696                                         struct cfi_private **pcfi)
697 {
698         struct map_info *map = mtd->priv;
699         struct cfi_private *cfi = *pcfi;
700         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
701
702         /*
703          * Probing of multi-partition flash chips.
704          *
705          * To support multiple partitions when available, we simply arrange
706          * for each of them to have their own flchip structure even if they
707          * are on the same physical chip.  This means completely recreating
708          * a new cfi_private structure right here which is a blatent code
709          * layering violation, but this is still the least intrusive
710          * arrangement at this point. This can be rearranged in the future
711          * if someone feels motivated enough.  --nico
712          */
713         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
714             && extp->FeatureSupport & (1 << 9)) {
715                 int offs = 0;
716                 struct cfi_private *newcfi;
717                 struct flchip *chip;
718                 struct flchip_shared *shared;
719                 int numregions, numparts, partshift, numvirtchips, i, j;
720
721                 /* Protection Register info */
722                 if (extp->NumProtectionFields)
723                         offs = (extp->NumProtectionFields - 1) *
724                                sizeof(struct cfi_intelext_otpinfo);
725
726                 /* Burst Read info */
727                 offs += extp->extra[offs+1]+2;
728
729                 /* Number of partition regions */
730                 numregions = extp->extra[offs];
731                 offs += 1;
732
733                 /* skip the sizeof(partregion) field in CFI 1.4 */
734                 if (extp->MinorVersion >= '4')
735                         offs += 2;
736
737                 /* Number of hardware partitions */
738                 numparts = 0;
739                 for (i = 0; i < numregions; i++) {
740                         struct cfi_intelext_regioninfo *rinfo;
741                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
742                         numparts += rinfo->NumIdentPartitions;
743                         offs += sizeof(*rinfo)
744                                 + (rinfo->NumBlockTypes - 1) *
745                                   sizeof(struct cfi_intelext_blockinfo);
746                 }
747
748                 if (!numparts)
749                         numparts = 1;
750
751                 /* Programming Region info */
752                 if (extp->MinorVersion >= '4') {
753                         struct cfi_intelext_programming_regioninfo *prinfo;
754                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
755                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
756                         mtd->flags &= ~MTD_BIT_WRITEABLE;
757                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
758                                map->name, mtd->writesize,
759                                cfi->interleave * prinfo->ControlValid,
760                                cfi->interleave * prinfo->ControlInvalid);
761                 }
762
763                 /*
764                  * All functions below currently rely on all chips having
765                  * the same geometry so we'll just assume that all hardware
766                  * partitions are of the same size too.
767                  */
768                 partshift = cfi->chipshift - __ffs(numparts);
769
770                 if ((1 << partshift) < mtd->erasesize) {
771                         printk( KERN_ERR
772                                 "%s: bad number of hw partitions (%d)\n",
773                                 __func__, numparts);
774                         return -EINVAL;
775                 }
776
777                 numvirtchips = cfi->numchips * numparts;
778                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
779                 if (!newcfi)
780                         return -ENOMEM;
781                 shared = kmalloc_array(cfi->numchips,
782                                        sizeof(struct flchip_shared),
783                                        GFP_KERNEL);
784                 if (!shared) {
785                         kfree(newcfi);
786                         return -ENOMEM;
787                 }
788                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
789                 newcfi->numchips = numvirtchips;
790                 newcfi->chipshift = partshift;
791
792                 chip = &newcfi->chips[0];
793                 for (i = 0; i < cfi->numchips; i++) {
794                         shared[i].writing = shared[i].erasing = NULL;
795                         mutex_init(&shared[i].lock);
796                         for (j = 0; j < numparts; j++) {
797                                 *chip = cfi->chips[i];
798                                 chip->start += j << partshift;
799                                 chip->priv = &shared[i];
800                                 /* those should be reset too since
801                                    they create memory references. */
802                                 init_waitqueue_head(&chip->wq);
803                                 mutex_init(&chip->mutex);
804                                 chip++;
805                         }
806                 }
807
808                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
809                                   "--> %d partitions of %d KiB\n",
810                                   map->name, cfi->numchips, cfi->interleave,
811                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
812
813                 map->fldrv_priv = newcfi;
814                 *pcfi = newcfi;
815                 kfree(cfi);
816         }
817
818         return 0;
819 }
820
821 /*
822  *  *********** CHIP ACCESS FUNCTIONS ***********
823  */
824 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
825 {
826         DECLARE_WAITQUEUE(wait, current);
827         struct cfi_private *cfi = map->fldrv_priv;
828         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
829         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
830         unsigned long timeo = jiffies + HZ;
831
832         /* Prevent setting state FL_SYNCING for chip in suspended state. */
833         if (mode == FL_SYNCING && chip->oldstate != FL_READY)
834                 goto sleep;
835
836         switch (chip->state) {
837
838         case FL_STATUS:
839                 for (;;) {
840                         status = map_read(map, adr);
841                         if (map_word_andequal(map, status, status_OK, status_OK))
842                                 break;
843
844                         /* At this point we're fine with write operations
845                            in other partitions as they don't conflict. */
846                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
847                                 break;
848
849                         mutex_unlock(&chip->mutex);
850                         cfi_udelay(1);
851                         mutex_lock(&chip->mutex);
852                         /* Someone else might have been playing with it. */
853                         return -EAGAIN;
854                 }
855                 /* Fall through */
856         case FL_READY:
857         case FL_CFI_QUERY:
858         case FL_JEDEC_QUERY:
859                 return 0;
860
861         case FL_ERASING:
862                 if (!cfip ||
863                     !(cfip->FeatureSupport & 2) ||
864                     !(mode == FL_READY || mode == FL_POINT ||
865                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
866                         goto sleep;
867
868                 /* Do not allow suspend iff read/write to EB address */
869                 if ((adr & chip->in_progress_block_mask) ==
870                     chip->in_progress_block_addr)
871                         goto sleep;
872
873                 /* do not suspend small EBs, buggy Micron Chips */
874                 if (cfi_is_micron_28F00AP30(cfi, chip) &&
875                     (chip->in_progress_block_mask == ~(0x8000-1)))
876                         goto sleep;
877
878                 /* Erase suspend */
879                 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
880
881                 /* If the flash has finished erasing, then 'erase suspend'
882                  * appears to make some (28F320) flash devices switch to
883                  * 'read' mode.  Make sure that we switch to 'read status'
884                  * mode so we get the right data. --rmk
885                  */
886                 map_write(map, CMD(0x70), chip->in_progress_block_addr);
887                 chip->oldstate = FL_ERASING;
888                 chip->state = FL_ERASE_SUSPENDING;
889                 chip->erase_suspended = 1;
890                 for (;;) {
891                         status = map_read(map, chip->in_progress_block_addr);
892                         if (map_word_andequal(map, status, status_OK, status_OK))
893                                 break;
894
895                         if (time_after(jiffies, timeo)) {
896                                 /* Urgh. Resume and pretend we weren't here.
897                                  * Make sure we're in 'read status' mode if it had finished */
898                                 put_chip(map, chip, adr);
899                                 printk(KERN_ERR "%s: Chip not ready after erase "
900                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
901                                 return -EIO;
902                         }
903
904                         mutex_unlock(&chip->mutex);
905                         cfi_udelay(1);
906                         mutex_lock(&chip->mutex);
907                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
908                            So we can just loop here. */
909                 }
910                 chip->state = FL_STATUS;
911                 return 0;
912
913         case FL_XIP_WHILE_ERASING:
914                 if (mode != FL_READY && mode != FL_POINT &&
915                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
916                         goto sleep;
917                 chip->oldstate = chip->state;
918                 chip->state = FL_READY;
919                 return 0;
920
921         case FL_SHUTDOWN:
922                 /* The machine is rebooting now,so no one can get chip anymore */
923                 return -EIO;
924         case FL_POINT:
925                 /* Only if there's no operation suspended... */
926                 if (mode == FL_READY && chip->oldstate == FL_READY)
927                         return 0;
928                 /* Fall through */
929         default:
930         sleep:
931                 set_current_state(TASK_UNINTERRUPTIBLE);
932                 add_wait_queue(&chip->wq, &wait);
933                 mutex_unlock(&chip->mutex);
934                 schedule();
935                 remove_wait_queue(&chip->wq, &wait);
936                 mutex_lock(&chip->mutex);
937                 return -EAGAIN;
938         }
939 }
940
941 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
942 {
943         int ret;
944         DECLARE_WAITQUEUE(wait, current);
945
946  retry:
947         if (chip->priv &&
948             (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
949             || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
950                 /*
951                  * OK. We have possibility for contention on the write/erase
952                  * operations which are global to the real chip and not per
953                  * partition.  So let's fight it over in the partition which
954                  * currently has authority on the operation.
955                  *
956                  * The rules are as follows:
957                  *
958                  * - any write operation must own shared->writing.
959                  *
960                  * - any erase operation must own _both_ shared->writing and
961                  *   shared->erasing.
962                  *
963                  * - contention arbitration is handled in the owner's context.
964                  *
965                  * The 'shared' struct can be read and/or written only when
966                  * its lock is taken.
967                  */
968                 struct flchip_shared *shared = chip->priv;
969                 struct flchip *contender;
970                 mutex_lock(&shared->lock);
971                 contender = shared->writing;
972                 if (contender && contender != chip) {
973                         /*
974                          * The engine to perform desired operation on this
975                          * partition is already in use by someone else.
976                          * Let's fight over it in the context of the chip
977                          * currently using it.  If it is possible to suspend,
978                          * that other partition will do just that, otherwise
979                          * it'll happily send us to sleep.  In any case, when
980                          * get_chip returns success we're clear to go ahead.
981                          */
982                         ret = mutex_trylock(&contender->mutex);
983                         mutex_unlock(&shared->lock);
984                         if (!ret)
985                                 goto retry;
986                         mutex_unlock(&chip->mutex);
987                         ret = chip_ready(map, contender, contender->start, mode);
988                         mutex_lock(&chip->mutex);
989
990                         if (ret == -EAGAIN) {
991                                 mutex_unlock(&contender->mutex);
992                                 goto retry;
993                         }
994                         if (ret) {
995                                 mutex_unlock(&contender->mutex);
996                                 return ret;
997                         }
998                         mutex_lock(&shared->lock);
999
1000                         /* We should not own chip if it is already
1001                          * in FL_SYNCING state. Put contender and retry. */
1002                         if (chip->state == FL_SYNCING) {
1003                                 put_chip(map, contender, contender->start);
1004                                 mutex_unlock(&contender->mutex);
1005                                 goto retry;
1006                         }
1007                         mutex_unlock(&contender->mutex);
1008                 }
1009
1010                 /* Check if we already have suspended erase
1011                  * on this chip. Sleep. */
1012                 if (mode == FL_ERASING && shared->erasing
1013                     && shared->erasing->oldstate == FL_ERASING) {
1014                         mutex_unlock(&shared->lock);
1015                         set_current_state(TASK_UNINTERRUPTIBLE);
1016                         add_wait_queue(&chip->wq, &wait);
1017                         mutex_unlock(&chip->mutex);
1018                         schedule();
1019                         remove_wait_queue(&chip->wq, &wait);
1020                         mutex_lock(&chip->mutex);
1021                         goto retry;
1022                 }
1023
1024                 /* We now own it */
1025                 shared->writing = chip;
1026                 if (mode == FL_ERASING)
1027                         shared->erasing = chip;
1028                 mutex_unlock(&shared->lock);
1029         }
1030         ret = chip_ready(map, chip, adr, mode);
1031         if (ret == -EAGAIN)
1032                 goto retry;
1033
1034         return ret;
1035 }
1036
1037 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
1038 {
1039         struct cfi_private *cfi = map->fldrv_priv;
1040
1041         if (chip->priv) {
1042                 struct flchip_shared *shared = chip->priv;
1043                 mutex_lock(&shared->lock);
1044                 if (shared->writing == chip && chip->oldstate == FL_READY) {
1045                         /* We own the ability to write, but we're done */
1046                         shared->writing = shared->erasing;
1047                         if (shared->writing && shared->writing != chip) {
1048                                 /* give back ownership to who we loaned it from */
1049                                 struct flchip *loaner = shared->writing;
1050                                 mutex_lock(&loaner->mutex);
1051                                 mutex_unlock(&shared->lock);
1052                                 mutex_unlock(&chip->mutex);
1053                                 put_chip(map, loaner, loaner->start);
1054                                 mutex_lock(&chip->mutex);
1055                                 mutex_unlock(&loaner->mutex);
1056                                 wake_up(&chip->wq);
1057                                 return;
1058                         }
1059                         shared->erasing = NULL;
1060                         shared->writing = NULL;
1061                 } else if (shared->erasing == chip && shared->writing != chip) {
1062                         /*
1063                          * We own the ability to erase without the ability
1064                          * to write, which means the erase was suspended
1065                          * and some other partition is currently writing.
1066                          * Don't let the switch below mess things up since
1067                          * we don't have ownership to resume anything.
1068                          */
1069                         mutex_unlock(&shared->lock);
1070                         wake_up(&chip->wq);
1071                         return;
1072                 }
1073                 mutex_unlock(&shared->lock);
1074         }
1075
1076         switch(chip->oldstate) {
1077         case FL_ERASING:
1078                 /* What if one interleaved chip has finished and the
1079                    other hasn't? The old code would leave the finished
1080                    one in READY mode. That's bad, and caused -EROFS
1081                    errors to be returned from do_erase_oneblock because
1082                    that's the only bit it checked for at the time.
1083                    As the state machine appears to explicitly allow
1084                    sending the 0x70 (Read Status) command to an erasing
1085                    chip and expecting it to be ignored, that's what we
1086                    do. */
1087                 map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1088                 map_write(map, CMD(0x70), chip->in_progress_block_addr);
1089                 chip->oldstate = FL_READY;
1090                 chip->state = FL_ERASING;
1091                 break;
1092
1093         case FL_XIP_WHILE_ERASING:
1094                 chip->state = chip->oldstate;
1095                 chip->oldstate = FL_READY;
1096                 break;
1097
1098         case FL_READY:
1099         case FL_STATUS:
1100         case FL_JEDEC_QUERY:
1101                 break;
1102         default:
1103                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1104         }
1105         wake_up(&chip->wq);
1106 }
1107
1108 #ifdef CONFIG_MTD_XIP
1109
1110 /*
1111  * No interrupt what so ever can be serviced while the flash isn't in array
1112  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1113  * enclosing any code path where the flash is known not to be in array mode.
1114  * And within a XIP disabled code path, only functions marked with __xipram
1115  * may be called and nothing else (it's a good thing to inspect generated
1116  * assembly to make sure inline functions were actually inlined and that gcc
1117  * didn't emit calls to its own support functions). Also configuring MTD CFI
1118  * support to a single buswidth and a single interleave is also recommended.
1119  */
1120
1121 static void xip_disable(struct map_info *map, struct flchip *chip,
1122                         unsigned long adr)
1123 {
1124         /* TODO: chips with no XIP use should ignore and return */
1125         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1126         local_irq_disable();
1127 }
1128
1129 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1130                                 unsigned long adr)
1131 {
1132         struct cfi_private *cfi = map->fldrv_priv;
1133         if (chip->state != FL_POINT && chip->state != FL_READY) {
1134                 map_write(map, CMD(0xff), adr);
1135                 chip->state = FL_READY;
1136         }
1137         (void) map_read(map, adr);
1138         xip_iprefetch();
1139         local_irq_enable();
1140 }
1141
1142 /*
1143  * When a delay is required for the flash operation to complete, the
1144  * xip_wait_for_operation() function is polling for both the given timeout
1145  * and pending (but still masked) hardware interrupts.  Whenever there is an
1146  * interrupt pending then the flash erase or write operation is suspended,
1147  * array mode restored and interrupts unmasked.  Task scheduling might also
1148  * happen at that point.  The CPU eventually returns from the interrupt or
1149  * the call to schedule() and the suspended flash operation is resumed for
1150  * the remaining of the delay period.
1151  *
1152  * Warning: this function _will_ fool interrupt latency tracing tools.
1153  */
1154
1155 static int __xipram xip_wait_for_operation(
1156                 struct map_info *map, struct flchip *chip,
1157                 unsigned long adr, unsigned int chip_op_time_max)
1158 {
1159         struct cfi_private *cfi = map->fldrv_priv;
1160         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1161         map_word status, OK = CMD(0x80);
1162         unsigned long usec, suspended, start, done;
1163         flstate_t oldstate, newstate;
1164
1165         start = xip_currtime();
1166         usec = chip_op_time_max;
1167         if (usec == 0)
1168                 usec = 500000;
1169         done = 0;
1170
1171         do {
1172                 cpu_relax();
1173                 if (xip_irqpending() && cfip &&
1174                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1175                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1176                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1177                         /*
1178                          * Let's suspend the erase or write operation when
1179                          * supported.  Note that we currently don't try to
1180                          * suspend interleaved chips if there is already
1181                          * another operation suspended (imagine what happens
1182                          * when one chip was already done with the current
1183                          * operation while another chip suspended it, then
1184                          * we resume the whole thing at once).  Yes, it
1185                          * can happen!
1186                          */
1187                         usec -= done;
1188                         map_write(map, CMD(0xb0), adr);
1189                         map_write(map, CMD(0x70), adr);
1190                         suspended = xip_currtime();
1191                         do {
1192                                 if (xip_elapsed_since(suspended) > 100000) {
1193                                         /*
1194                                          * The chip doesn't want to suspend
1195                                          * after waiting for 100 msecs.
1196                                          * This is a critical error but there
1197                                          * is not much we can do here.
1198                                          */
1199                                         return -EIO;
1200                                 }
1201                                 status = map_read(map, adr);
1202                         } while (!map_word_andequal(map, status, OK, OK));
1203
1204                         /* Suspend succeeded */
1205                         oldstate = chip->state;
1206                         if (oldstate == FL_ERASING) {
1207                                 if (!map_word_bitsset(map, status, CMD(0x40)))
1208                                         break;
1209                                 newstate = FL_XIP_WHILE_ERASING;
1210                                 chip->erase_suspended = 1;
1211                         } else {
1212                                 if (!map_word_bitsset(map, status, CMD(0x04)))
1213                                         break;
1214                                 newstate = FL_XIP_WHILE_WRITING;
1215                                 chip->write_suspended = 1;
1216                         }
1217                         chip->state = newstate;
1218                         map_write(map, CMD(0xff), adr);
1219                         (void) map_read(map, adr);
1220                         xip_iprefetch();
1221                         local_irq_enable();
1222                         mutex_unlock(&chip->mutex);
1223                         xip_iprefetch();
1224                         cond_resched();
1225
1226                         /*
1227                          * We're back.  However someone else might have
1228                          * decided to go write to the chip if we are in
1229                          * a suspended erase state.  If so let's wait
1230                          * until it's done.
1231                          */
1232                         mutex_lock(&chip->mutex);
1233                         while (chip->state != newstate) {
1234                                 DECLARE_WAITQUEUE(wait, current);
1235                                 set_current_state(TASK_UNINTERRUPTIBLE);
1236                                 add_wait_queue(&chip->wq, &wait);
1237                                 mutex_unlock(&chip->mutex);
1238                                 schedule();
1239                                 remove_wait_queue(&chip->wq, &wait);
1240                                 mutex_lock(&chip->mutex);
1241                         }
1242                         /* Disallow XIP again */
1243                         local_irq_disable();
1244
1245                         /* Resume the write or erase operation */
1246                         map_write(map, CMD(0xd0), adr);
1247                         map_write(map, CMD(0x70), adr);
1248                         chip->state = oldstate;
1249                         start = xip_currtime();
1250                 } else if (usec >= 1000000/HZ) {
1251                         /*
1252                          * Try to save on CPU power when waiting delay
1253                          * is at least a system timer tick period.
1254                          * No need to be extremely accurate here.
1255                          */
1256                         xip_cpu_idle();
1257                 }
1258                 status = map_read(map, adr);
1259                 done = xip_elapsed_since(start);
1260         } while (!map_word_andequal(map, status, OK, OK)
1261                  && done < usec);
1262
1263         return (done >= usec) ? -ETIME : 0;
1264 }
1265
1266 /*
1267  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1268  * the flash is actively programming or erasing since we have to poll for
1269  * the operation to complete anyway.  We can't do that in a generic way with
1270  * a XIP setup so do it before the actual flash operation in this case
1271  * and stub it out from INVAL_CACHE_AND_WAIT.
1272  */
1273 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1274         INVALIDATE_CACHED_RANGE(map, from, size)
1275
1276 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1277         xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1278
1279 #else
1280
1281 #define xip_disable(map, chip, adr)
1282 #define xip_enable(map, chip, adr)
1283 #define XIP_INVAL_CACHED_RANGE(x...)
1284 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1285
1286 static int inval_cache_and_wait_for_operation(
1287                 struct map_info *map, struct flchip *chip,
1288                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1289                 unsigned int chip_op_time, unsigned int chip_op_time_max)
1290 {
1291         struct cfi_private *cfi = map->fldrv_priv;
1292         map_word status, status_OK = CMD(0x80);
1293         int chip_state = chip->state;
1294         unsigned int timeo, sleep_time, reset_timeo;
1295
1296         mutex_unlock(&chip->mutex);
1297         if (inval_len)
1298                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1299         mutex_lock(&chip->mutex);
1300
1301         timeo = chip_op_time_max;
1302         if (!timeo)
1303                 timeo = 500000;
1304         reset_timeo = timeo;
1305         sleep_time = chip_op_time / 2;
1306
1307         for (;;) {
1308                 if (chip->state != chip_state) {
1309                         /* Someone's suspended the operation: sleep */
1310                         DECLARE_WAITQUEUE(wait, current);
1311                         set_current_state(TASK_UNINTERRUPTIBLE);
1312                         add_wait_queue(&chip->wq, &wait);
1313                         mutex_unlock(&chip->mutex);
1314                         schedule();
1315                         remove_wait_queue(&chip->wq, &wait);
1316                         mutex_lock(&chip->mutex);
1317                         continue;
1318                 }
1319
1320                 status = map_read(map, cmd_adr);
1321                 if (map_word_andequal(map, status, status_OK, status_OK))
1322                         break;
1323
1324                 if (chip->erase_suspended && chip_state == FL_ERASING)  {
1325                         /* Erase suspend occurred while sleep: reset timeout */
1326                         timeo = reset_timeo;
1327                         chip->erase_suspended = 0;
1328                 }
1329                 if (chip->write_suspended && chip_state == FL_WRITING)  {
1330                         /* Write suspend occurred while sleep: reset timeout */
1331                         timeo = reset_timeo;
1332                         chip->write_suspended = 0;
1333                 }
1334                 if (!timeo) {
1335                         map_write(map, CMD(0x70), cmd_adr);
1336                         chip->state = FL_STATUS;
1337                         return -ETIME;
1338                 }
1339
1340                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1341                 mutex_unlock(&chip->mutex);
1342                 if (sleep_time >= 1000000/HZ) {
1343                         /*
1344                          * Half of the normal delay still remaining
1345                          * can be performed with a sleeping delay instead
1346                          * of busy waiting.
1347                          */
1348                         msleep(sleep_time/1000);
1349                         timeo -= sleep_time;
1350                         sleep_time = 1000000/HZ;
1351                 } else {
1352                         udelay(1);
1353                         cond_resched();
1354                         timeo--;
1355                 }
1356                 mutex_lock(&chip->mutex);
1357         }
1358
1359         /* Done and happy. */
1360         chip->state = FL_STATUS;
1361         return 0;
1362 }
1363
1364 #endif
1365
1366 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1367         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1368
1369
1370 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1371 {
1372         unsigned long cmd_addr;
1373         struct cfi_private *cfi = map->fldrv_priv;
1374         int ret = 0;
1375
1376         adr += chip->start;
1377
1378         /* Ensure cmd read/writes are aligned. */
1379         cmd_addr = adr & ~(map_bankwidth(map)-1);
1380
1381         mutex_lock(&chip->mutex);
1382
1383         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1384
1385         if (!ret) {
1386                 if (chip->state != FL_POINT && chip->state != FL_READY)
1387                         map_write(map, CMD(0xff), cmd_addr);
1388
1389                 chip->state = FL_POINT;
1390                 chip->ref_point_counter++;
1391         }
1392         mutex_unlock(&chip->mutex);
1393
1394         return ret;
1395 }
1396
1397 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1398                 size_t *retlen, void **virt, resource_size_t *phys)
1399 {
1400         struct map_info *map = mtd->priv;
1401         struct cfi_private *cfi = map->fldrv_priv;
1402         unsigned long ofs, last_end = 0;
1403         int chipnum;
1404         int ret = 0;
1405
1406         if (!map->virt)
1407                 return -EINVAL;
1408
1409         /* Now lock the chip(s) to POINT state */
1410
1411         /* ofs: offset within the first chip that the first read should start */
1412         chipnum = (from >> cfi->chipshift);
1413         ofs = from - (chipnum << cfi->chipshift);
1414
1415         *virt = map->virt + cfi->chips[chipnum].start + ofs;
1416         if (phys)
1417                 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1418
1419         while (len) {
1420                 unsigned long thislen;
1421
1422                 if (chipnum >= cfi->numchips)
1423                         break;
1424
1425                 /* We cannot point across chips that are virtually disjoint */
1426                 if (!last_end)
1427                         last_end = cfi->chips[chipnum].start;
1428                 else if (cfi->chips[chipnum].start != last_end)
1429                         break;
1430
1431                 if ((len + ofs -1) >> cfi->chipshift)
1432                         thislen = (1<<cfi->chipshift) - ofs;
1433                 else
1434                         thislen = len;
1435
1436                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1437                 if (ret)
1438                         break;
1439
1440                 *retlen += thislen;
1441                 len -= thislen;
1442
1443                 ofs = 0;
1444                 last_end += 1 << cfi->chipshift;
1445                 chipnum++;
1446         }
1447         return 0;
1448 }
1449
1450 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1451 {
1452         struct map_info *map = mtd->priv;
1453         struct cfi_private *cfi = map->fldrv_priv;
1454         unsigned long ofs;
1455         int chipnum, err = 0;
1456
1457         /* Now unlock the chip(s) POINT state */
1458
1459         /* ofs: offset within the first chip that the first read should start */
1460         chipnum = (from >> cfi->chipshift);
1461         ofs = from - (chipnum <<  cfi->chipshift);
1462
1463         while (len && !err) {
1464                 unsigned long thislen;
1465                 struct flchip *chip;
1466
1467                 chip = &cfi->chips[chipnum];
1468                 if (chipnum >= cfi->numchips)
1469                         break;
1470
1471                 if ((len + ofs -1) >> cfi->chipshift)
1472                         thislen = (1<<cfi->chipshift) - ofs;
1473                 else
1474                         thislen = len;
1475
1476                 mutex_lock(&chip->mutex);
1477                 if (chip->state == FL_POINT) {
1478                         chip->ref_point_counter--;
1479                         if(chip->ref_point_counter == 0)
1480                                 chip->state = FL_READY;
1481                 } else {
1482                         printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1483                         err = -EINVAL;
1484                 }
1485
1486                 put_chip(map, chip, chip->start);
1487                 mutex_unlock(&chip->mutex);
1488
1489                 len -= thislen;
1490                 ofs = 0;
1491                 chipnum++;
1492         }
1493
1494         return err;
1495 }
1496
1497 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1498 {
1499         unsigned long cmd_addr;
1500         struct cfi_private *cfi = map->fldrv_priv;
1501         int ret;
1502
1503         adr += chip->start;
1504
1505         /* Ensure cmd read/writes are aligned. */
1506         cmd_addr = adr & ~(map_bankwidth(map)-1);
1507
1508         mutex_lock(&chip->mutex);
1509         ret = get_chip(map, chip, cmd_addr, FL_READY);
1510         if (ret) {
1511                 mutex_unlock(&chip->mutex);
1512                 return ret;
1513         }
1514
1515         if (chip->state != FL_POINT && chip->state != FL_READY) {
1516                 map_write(map, CMD(0xff), cmd_addr);
1517
1518                 chip->state = FL_READY;
1519         }
1520
1521         map_copy_from(map, buf, adr, len);
1522
1523         put_chip(map, chip, cmd_addr);
1524
1525         mutex_unlock(&chip->mutex);
1526         return 0;
1527 }
1528
1529 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1530 {
1531         struct map_info *map = mtd->priv;
1532         struct cfi_private *cfi = map->fldrv_priv;
1533         unsigned long ofs;
1534         int chipnum;
1535         int ret = 0;
1536
1537         /* ofs: offset within the first chip that the first read should start */
1538         chipnum = (from >> cfi->chipshift);
1539         ofs = from - (chipnum <<  cfi->chipshift);
1540
1541         while (len) {
1542                 unsigned long thislen;
1543
1544                 if (chipnum >= cfi->numchips)
1545                         break;
1546
1547                 if ((len + ofs -1) >> cfi->chipshift)
1548                         thislen = (1<<cfi->chipshift) - ofs;
1549                 else
1550                         thislen = len;
1551
1552                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1553                 if (ret)
1554                         break;
1555
1556                 *retlen += thislen;
1557                 len -= thislen;
1558                 buf += thislen;
1559
1560                 ofs = 0;
1561                 chipnum++;
1562         }
1563         return ret;
1564 }
1565
1566 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1567                                      unsigned long adr, map_word datum, int mode)
1568 {
1569         struct cfi_private *cfi = map->fldrv_priv;
1570         map_word status, write_cmd;
1571         int ret=0;
1572
1573         adr += chip->start;
1574
1575         switch (mode) {
1576         case FL_WRITING:
1577                 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1578                 break;
1579         case FL_OTP_WRITE:
1580                 write_cmd = CMD(0xc0);
1581                 break;
1582         default:
1583                 return -EINVAL;
1584         }
1585
1586         mutex_lock(&chip->mutex);
1587         ret = get_chip(map, chip, adr, mode);
1588         if (ret) {
1589                 mutex_unlock(&chip->mutex);
1590                 return ret;
1591         }
1592
1593         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1594         ENABLE_VPP(map);
1595         xip_disable(map, chip, adr);
1596         map_write(map, write_cmd, adr);
1597         map_write(map, datum, adr);
1598         chip->state = mode;
1599
1600         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1601                                    adr, map_bankwidth(map),
1602                                    chip->word_write_time,
1603                                    chip->word_write_time_max);
1604         if (ret) {
1605                 xip_enable(map, chip, adr);
1606                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1607                 goto out;
1608         }
1609
1610         /* check for errors */
1611         status = map_read(map, adr);
1612         if (map_word_bitsset(map, status, CMD(0x1a))) {
1613                 unsigned long chipstatus = MERGESTATUS(status);
1614
1615                 /* reset status */
1616                 map_write(map, CMD(0x50), adr);
1617                 map_write(map, CMD(0x70), adr);
1618                 xip_enable(map, chip, adr);
1619
1620                 if (chipstatus & 0x02) {
1621                         ret = -EROFS;
1622                 } else if (chipstatus & 0x08) {
1623                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1624                         ret = -EIO;
1625                 } else {
1626                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1627                         ret = -EINVAL;
1628                 }
1629
1630                 goto out;
1631         }
1632
1633         xip_enable(map, chip, adr);
1634  out:   DISABLE_VPP(map);
1635         put_chip(map, chip, adr);
1636         mutex_unlock(&chip->mutex);
1637         return ret;
1638 }
1639
1640
1641 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1642 {
1643         struct map_info *map = mtd->priv;
1644         struct cfi_private *cfi = map->fldrv_priv;
1645         int ret = 0;
1646         int chipnum;
1647         unsigned long ofs;
1648
1649         chipnum = to >> cfi->chipshift;
1650         ofs = to  - (chipnum << cfi->chipshift);
1651
1652         /* If it's not bus-aligned, do the first byte write */
1653         if (ofs & (map_bankwidth(map)-1)) {
1654                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1655                 int gap = ofs - bus_ofs;
1656                 int n;
1657                 map_word datum;
1658
1659                 n = min_t(int, len, map_bankwidth(map)-gap);
1660                 datum = map_word_ff(map);
1661                 datum = map_word_load_partial(map, datum, buf, gap, n);
1662
1663                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1664                                                bus_ofs, datum, FL_WRITING);
1665                 if (ret)
1666                         return ret;
1667
1668                 len -= n;
1669                 ofs += n;
1670                 buf += n;
1671                 (*retlen) += n;
1672
1673                 if (ofs >> cfi->chipshift) {
1674                         chipnum ++;
1675                         ofs = 0;
1676                         if (chipnum == cfi->numchips)
1677                                 return 0;
1678                 }
1679         }
1680
1681         while(len >= map_bankwidth(map)) {
1682                 map_word datum = map_word_load(map, buf);
1683
1684                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1685                                        ofs, datum, FL_WRITING);
1686                 if (ret)
1687                         return ret;
1688
1689                 ofs += map_bankwidth(map);
1690                 buf += map_bankwidth(map);
1691                 (*retlen) += map_bankwidth(map);
1692                 len -= map_bankwidth(map);
1693
1694                 if (ofs >> cfi->chipshift) {
1695                         chipnum ++;
1696                         ofs = 0;
1697                         if (chipnum == cfi->numchips)
1698                                 return 0;
1699                 }
1700         }
1701
1702         if (len & (map_bankwidth(map)-1)) {
1703                 map_word datum;
1704
1705                 datum = map_word_ff(map);
1706                 datum = map_word_load_partial(map, datum, buf, 0, len);
1707
1708                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1709                                        ofs, datum, FL_WRITING);
1710                 if (ret)
1711                         return ret;
1712
1713                 (*retlen) += len;
1714         }
1715
1716         return 0;
1717 }
1718
1719
1720 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1721                                     unsigned long adr, const struct kvec **pvec,
1722                                     unsigned long *pvec_seek, int len)
1723 {
1724         struct cfi_private *cfi = map->fldrv_priv;
1725         map_word status, write_cmd, datum;
1726         unsigned long cmd_adr;
1727         int ret, wbufsize, word_gap, words;
1728         const struct kvec *vec;
1729         unsigned long vec_seek;
1730         unsigned long initial_adr;
1731         int initial_len = len;
1732
1733         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1734         adr += chip->start;
1735         initial_adr = adr;
1736         cmd_adr = adr & ~(wbufsize-1);
1737
1738         /* Sharp LH28F640BF chips need the first address for the
1739          * Page Buffer Program command. See Table 5 of
1740          * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
1741         if (is_LH28F640BF(cfi))
1742                 cmd_adr = adr;
1743
1744         /* Let's determine this according to the interleave only once */
1745         write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1746
1747         mutex_lock(&chip->mutex);
1748         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1749         if (ret) {
1750                 mutex_unlock(&chip->mutex);
1751                 return ret;
1752         }
1753
1754         XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1755         ENABLE_VPP(map);
1756         xip_disable(map, chip, cmd_adr);
1757
1758         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1759            [...], the device will not accept any more Write to Buffer commands".
1760            So we must check here and reset those bits if they're set. Otherwise
1761            we're just pissing in the wind */
1762         if (chip->state != FL_STATUS) {
1763                 map_write(map, CMD(0x70), cmd_adr);
1764                 chip->state = FL_STATUS;
1765         }
1766         status = map_read(map, cmd_adr);
1767         if (map_word_bitsset(map, status, CMD(0x30))) {
1768                 xip_enable(map, chip, cmd_adr);
1769                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1770                 xip_disable(map, chip, cmd_adr);
1771                 map_write(map, CMD(0x50), cmd_adr);
1772                 map_write(map, CMD(0x70), cmd_adr);
1773         }
1774
1775         chip->state = FL_WRITING_TO_BUFFER;
1776         map_write(map, write_cmd, cmd_adr);
1777         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1778         if (ret) {
1779                 /* Argh. Not ready for write to buffer */
1780                 map_word Xstatus = map_read(map, cmd_adr);
1781                 map_write(map, CMD(0x70), cmd_adr);
1782                 chip->state = FL_STATUS;
1783                 status = map_read(map, cmd_adr);
1784                 map_write(map, CMD(0x50), cmd_adr);
1785                 map_write(map, CMD(0x70), cmd_adr);
1786                 xip_enable(map, chip, cmd_adr);
1787                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1788                                 map->name, Xstatus.x[0], status.x[0]);
1789                 goto out;
1790         }
1791
1792         /* Figure out the number of words to write */
1793         word_gap = (-adr & (map_bankwidth(map)-1));
1794         words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1795         if (!word_gap) {
1796                 words--;
1797         } else {
1798                 word_gap = map_bankwidth(map) - word_gap;
1799                 adr -= word_gap;
1800                 datum = map_word_ff(map);
1801         }
1802
1803         /* Write length of data to come */
1804         map_write(map, CMD(words), cmd_adr );
1805
1806         /* Write data */
1807         vec = *pvec;
1808         vec_seek = *pvec_seek;
1809         do {
1810                 int n = map_bankwidth(map) - word_gap;
1811                 if (n > vec->iov_len - vec_seek)
1812                         n = vec->iov_len - vec_seek;
1813                 if (n > len)
1814                         n = len;
1815
1816                 if (!word_gap && len < map_bankwidth(map))
1817                         datum = map_word_ff(map);
1818
1819                 datum = map_word_load_partial(map, datum,
1820                                               vec->iov_base + vec_seek,
1821                                               word_gap, n);
1822
1823                 len -= n;
1824                 word_gap += n;
1825                 if (!len || word_gap == map_bankwidth(map)) {
1826                         map_write(map, datum, adr);
1827                         adr += map_bankwidth(map);
1828                         word_gap = 0;
1829                 }
1830
1831                 vec_seek += n;
1832                 if (vec_seek == vec->iov_len) {
1833                         vec++;
1834                         vec_seek = 0;
1835                 }
1836         } while (len);
1837         *pvec = vec;
1838         *pvec_seek = vec_seek;
1839
1840         /* GO GO GO */
1841         map_write(map, CMD(0xd0), cmd_adr);
1842         chip->state = FL_WRITING;
1843
1844         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1845                                    initial_adr, initial_len,
1846                                    chip->buffer_write_time,
1847                                    chip->buffer_write_time_max);
1848         if (ret) {
1849                 map_write(map, CMD(0x70), cmd_adr);
1850                 chip->state = FL_STATUS;
1851                 xip_enable(map, chip, cmd_adr);
1852                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1853                 goto out;
1854         }
1855
1856         /* check for errors */
1857         status = map_read(map, cmd_adr);
1858         if (map_word_bitsset(map, status, CMD(0x1a))) {
1859                 unsigned long chipstatus = MERGESTATUS(status);
1860
1861                 /* reset status */
1862                 map_write(map, CMD(0x50), cmd_adr);
1863                 map_write(map, CMD(0x70), cmd_adr);
1864                 xip_enable(map, chip, cmd_adr);
1865
1866                 if (chipstatus & 0x02) {
1867                         ret = -EROFS;
1868                 } else if (chipstatus & 0x08) {
1869                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1870                         ret = -EIO;
1871                 } else {
1872                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1873                         ret = -EINVAL;
1874                 }
1875
1876                 goto out;
1877         }
1878
1879         xip_enable(map, chip, cmd_adr);
1880  out:   DISABLE_VPP(map);
1881         put_chip(map, chip, cmd_adr);
1882         mutex_unlock(&chip->mutex);
1883         return ret;
1884 }
1885
1886 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1887                                 unsigned long count, loff_t to, size_t *retlen)
1888 {
1889         struct map_info *map = mtd->priv;
1890         struct cfi_private *cfi = map->fldrv_priv;
1891         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1892         int ret = 0;
1893         int chipnum;
1894         unsigned long ofs, vec_seek, i;
1895         size_t len = 0;
1896
1897         for (i = 0; i < count; i++)
1898                 len += vecs[i].iov_len;
1899
1900         if (!len)
1901                 return 0;
1902
1903         chipnum = to >> cfi->chipshift;
1904         ofs = to - (chipnum << cfi->chipshift);
1905         vec_seek = 0;
1906
1907         do {
1908                 /* We must not cross write block boundaries */
1909                 int size = wbufsize - (ofs & (wbufsize-1));
1910
1911                 if (size > len)
1912                         size = len;
1913                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1914                                       ofs, &vecs, &vec_seek, size);
1915                 if (ret)
1916                         return ret;
1917
1918                 ofs += size;
1919                 (*retlen) += size;
1920                 len -= size;
1921
1922                 if (ofs >> cfi->chipshift) {
1923                         chipnum ++;
1924                         ofs = 0;
1925                         if (chipnum == cfi->numchips)
1926                                 return 0;
1927                 }
1928
1929                 /* Be nice and reschedule with the chip in a usable state for other
1930                    processes. */
1931                 cond_resched();
1932
1933         } while (len);
1934
1935         return 0;
1936 }
1937
1938 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1939                                        size_t len, size_t *retlen, const u_char *buf)
1940 {
1941         struct kvec vec;
1942
1943         vec.iov_base = (void *) buf;
1944         vec.iov_len = len;
1945
1946         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1947 }
1948
1949 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1950                                       unsigned long adr, int len, void *thunk)
1951 {
1952         struct cfi_private *cfi = map->fldrv_priv;
1953         map_word status;
1954         int retries = 3;
1955         int ret;
1956
1957         adr += chip->start;
1958
1959  retry:
1960         mutex_lock(&chip->mutex);
1961         ret = get_chip(map, chip, adr, FL_ERASING);
1962         if (ret) {
1963                 mutex_unlock(&chip->mutex);
1964                 return ret;
1965         }
1966
1967         XIP_INVAL_CACHED_RANGE(map, adr, len);
1968         ENABLE_VPP(map);
1969         xip_disable(map, chip, adr);
1970
1971         /* Clear the status register first */
1972         map_write(map, CMD(0x50), adr);
1973
1974         /* Now erase */
1975         map_write(map, CMD(0x20), adr);
1976         map_write(map, CMD(0xD0), adr);
1977         chip->state = FL_ERASING;
1978         chip->erase_suspended = 0;
1979         chip->in_progress_block_addr = adr;
1980         chip->in_progress_block_mask = ~(len - 1);
1981
1982         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1983                                    adr, len,
1984                                    chip->erase_time,
1985                                    chip->erase_time_max);
1986         if (ret) {
1987                 map_write(map, CMD(0x70), adr);
1988                 chip->state = FL_STATUS;
1989                 xip_enable(map, chip, adr);
1990                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1991                 goto out;
1992         }
1993
1994         /* We've broken this before. It doesn't hurt to be safe */
1995         map_write(map, CMD(0x70), adr);
1996         chip->state = FL_STATUS;
1997         status = map_read(map, adr);
1998
1999         /* check for errors */
2000         if (map_word_bitsset(map, status, CMD(0x3a))) {
2001                 unsigned long chipstatus = MERGESTATUS(status);
2002
2003                 /* Reset the error bits */
2004                 map_write(map, CMD(0x50), adr);
2005                 map_write(map, CMD(0x70), adr);
2006                 xip_enable(map, chip, adr);
2007
2008                 if ((chipstatus & 0x30) == 0x30) {
2009                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
2010                         ret = -EINVAL;
2011                 } else if (chipstatus & 0x02) {
2012                         /* Protection bit set */
2013                         ret = -EROFS;
2014                 } else if (chipstatus & 0x8) {
2015                         /* Voltage */
2016                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
2017                         ret = -EIO;
2018                 } else if (chipstatus & 0x20 && retries--) {
2019                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
2020                         DISABLE_VPP(map);
2021                         put_chip(map, chip, adr);
2022                         mutex_unlock(&chip->mutex);
2023                         goto retry;
2024                 } else {
2025                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
2026                         ret = -EIO;
2027                 }
2028
2029                 goto out;
2030         }
2031
2032         xip_enable(map, chip, adr);
2033  out:   DISABLE_VPP(map);
2034         put_chip(map, chip, adr);
2035         mutex_unlock(&chip->mutex);
2036         return ret;
2037 }
2038
2039 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2040 {
2041         return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2042                                 instr->len, NULL);
2043 }
2044
2045 static void cfi_intelext_sync (struct mtd_info *mtd)
2046 {
2047         struct map_info *map = mtd->priv;
2048         struct cfi_private *cfi = map->fldrv_priv;
2049         int i;
2050         struct flchip *chip;
2051         int ret = 0;
2052
2053         for (i=0; !ret && i<cfi->numchips; i++) {
2054                 chip = &cfi->chips[i];
2055
2056                 mutex_lock(&chip->mutex);
2057                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2058
2059                 if (!ret) {
2060                         chip->oldstate = chip->state;
2061                         chip->state = FL_SYNCING;
2062                         /* No need to wake_up() on this state change -
2063                          * as the whole point is that nobody can do anything
2064                          * with the chip now anyway.
2065                          */
2066                 }
2067                 mutex_unlock(&chip->mutex);
2068         }
2069
2070         /* Unlock the chips again */
2071
2072         for (i--; i >=0; i--) {
2073                 chip = &cfi->chips[i];
2074
2075                 mutex_lock(&chip->mutex);
2076
2077                 if (chip->state == FL_SYNCING) {
2078                         chip->state = chip->oldstate;
2079                         chip->oldstate = FL_READY;
2080                         wake_up(&chip->wq);
2081                 }
2082                 mutex_unlock(&chip->mutex);
2083         }
2084 }
2085
2086 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2087                                                 struct flchip *chip,
2088                                                 unsigned long adr,
2089                                                 int len, void *thunk)
2090 {
2091         struct cfi_private *cfi = map->fldrv_priv;
2092         int status, ofs_factor = cfi->interleave * cfi->device_type;
2093
2094         adr += chip->start;
2095         xip_disable(map, chip, adr+(2*ofs_factor));
2096         map_write(map, CMD(0x90), adr+(2*ofs_factor));
2097         chip->state = FL_JEDEC_QUERY;
2098         status = cfi_read_query(map, adr+(2*ofs_factor));
2099         xip_enable(map, chip, 0);
2100         return status;
2101 }
2102
2103 #ifdef DEBUG_LOCK_BITS
2104 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2105                                                 struct flchip *chip,
2106                                                 unsigned long adr,
2107                                                 int len, void *thunk)
2108 {
2109         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2110                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2111         return 0;
2112 }
2113 #endif
2114
2115 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
2116 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
2117
2118 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2119                                        unsigned long adr, int len, void *thunk)
2120 {
2121         struct cfi_private *cfi = map->fldrv_priv;
2122         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2123         int mdelay;
2124         int ret;
2125
2126         adr += chip->start;
2127
2128         mutex_lock(&chip->mutex);
2129         ret = get_chip(map, chip, adr, FL_LOCKING);
2130         if (ret) {
2131                 mutex_unlock(&chip->mutex);
2132                 return ret;
2133         }
2134
2135         ENABLE_VPP(map);
2136         xip_disable(map, chip, adr);
2137
2138         map_write(map, CMD(0x60), adr);
2139         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2140                 map_write(map, CMD(0x01), adr);
2141                 chip->state = FL_LOCKING;
2142         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2143                 map_write(map, CMD(0xD0), adr);
2144                 chip->state = FL_UNLOCKING;
2145         } else
2146                 BUG();
2147
2148         /*
2149          * If Instant Individual Block Locking supported then no need
2150          * to delay.
2151          */
2152         /*
2153          * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2154          * lets use a max of 1.5 seconds (1500ms) as timeout.
2155          *
2156          * See "Clear Block Lock-Bits Time" on page 40 in
2157          * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2158          * from February 2003
2159          */
2160         mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2161
2162         ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2163         if (ret) {
2164                 map_write(map, CMD(0x70), adr);
2165                 chip->state = FL_STATUS;
2166                 xip_enable(map, chip, adr);
2167                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2168                 goto out;
2169         }
2170
2171         xip_enable(map, chip, adr);
2172  out:   DISABLE_VPP(map);
2173         put_chip(map, chip, adr);
2174         mutex_unlock(&chip->mutex);
2175         return ret;
2176 }
2177
2178 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2179 {
2180         int ret;
2181
2182 #ifdef DEBUG_LOCK_BITS
2183         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2184                __func__, ofs, len);
2185         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2186                 ofs, len, NULL);
2187 #endif
2188
2189         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2190                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2191
2192 #ifdef DEBUG_LOCK_BITS
2193         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2194                __func__, ret);
2195         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2196                 ofs, len, NULL);
2197 #endif
2198
2199         return ret;
2200 }
2201
2202 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2203 {
2204         int ret;
2205
2206 #ifdef DEBUG_LOCK_BITS
2207         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2208                __func__, ofs, len);
2209         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2210                 ofs, len, NULL);
2211 #endif
2212
2213         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2214                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2215
2216 #ifdef DEBUG_LOCK_BITS
2217         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2218                __func__, ret);
2219         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2220                 ofs, len, NULL);
2221 #endif
2222
2223         return ret;
2224 }
2225
2226 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2227                                   uint64_t len)
2228 {
2229         return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2230                                 ofs, len, NULL) ? 1 : 0;
2231 }
2232
2233 #ifdef CONFIG_MTD_OTP
2234
2235 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2236                         u_long data_offset, u_char *buf, u_int size,
2237                         u_long prot_offset, u_int groupno, u_int groupsize);
2238
2239 static int __xipram
2240 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2241             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2242 {
2243         struct cfi_private *cfi = map->fldrv_priv;
2244         int ret;
2245
2246         mutex_lock(&chip->mutex);
2247         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2248         if (ret) {
2249                 mutex_unlock(&chip->mutex);
2250                 return ret;
2251         }
2252
2253         /* let's ensure we're not reading back cached data from array mode */
2254         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2255
2256         xip_disable(map, chip, chip->start);
2257         if (chip->state != FL_JEDEC_QUERY) {
2258                 map_write(map, CMD(0x90), chip->start);
2259                 chip->state = FL_JEDEC_QUERY;
2260         }
2261         map_copy_from(map, buf, chip->start + offset, size);
2262         xip_enable(map, chip, chip->start);
2263
2264         /* then ensure we don't keep OTP data in the cache */
2265         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2266
2267         put_chip(map, chip, chip->start);
2268         mutex_unlock(&chip->mutex);
2269         return 0;
2270 }
2271
2272 static int
2273 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2274              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2275 {
2276         int ret;
2277
2278         while (size) {
2279                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2280                 int gap = offset - bus_ofs;
2281                 int n = min_t(int, size, map_bankwidth(map)-gap);
2282                 map_word datum = map_word_ff(map);
2283
2284                 datum = map_word_load_partial(map, datum, buf, gap, n);
2285                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2286                 if (ret)
2287                         return ret;
2288
2289                 offset += n;
2290                 buf += n;
2291                 size -= n;
2292         }
2293
2294         return 0;
2295 }
2296
2297 static int
2298 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2299             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2300 {
2301         struct cfi_private *cfi = map->fldrv_priv;
2302         map_word datum;
2303
2304         /* make sure area matches group boundaries */
2305         if (size != grpsz)
2306                 return -EXDEV;
2307
2308         datum = map_word_ff(map);
2309         datum = map_word_clr(map, datum, CMD(1 << grpno));
2310         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2311 }
2312
2313 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2314                                  size_t *retlen, u_char *buf,
2315                                  otp_op_t action, int user_regs)
2316 {
2317         struct map_info *map = mtd->priv;
2318         struct cfi_private *cfi = map->fldrv_priv;
2319         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2320         struct flchip *chip;
2321         struct cfi_intelext_otpinfo *otp;
2322         u_long devsize, reg_prot_offset, data_offset;
2323         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2324         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2325         int ret;
2326
2327         *retlen = 0;
2328
2329         /* Check that we actually have some OTP registers */
2330         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2331                 return -ENODATA;
2332
2333         /* we need real chips here not virtual ones */
2334         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2335         chip_step = devsize >> cfi->chipshift;
2336         chip_num = 0;
2337
2338         /* Some chips have OTP located in the _top_ partition only.
2339            For example: Intel 28F256L18T (T means top-parameter device) */
2340         if (cfi->mfr == CFI_MFR_INTEL) {
2341                 switch (cfi->id) {
2342                 case 0x880b:
2343                 case 0x880c:
2344                 case 0x880d:
2345                         chip_num = chip_step - 1;
2346                 }
2347         }
2348
2349         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2350                 chip = &cfi->chips[chip_num];
2351                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2352
2353                 /* first OTP region */
2354                 field = 0;
2355                 reg_prot_offset = extp->ProtRegAddr;
2356                 reg_fact_groups = 1;
2357                 reg_fact_size = 1 << extp->FactProtRegSize;
2358                 reg_user_groups = 1;
2359                 reg_user_size = 1 << extp->UserProtRegSize;
2360
2361                 while (len > 0) {
2362                         /* flash geometry fixup */
2363                         data_offset = reg_prot_offset + 1;
2364                         data_offset *= cfi->interleave * cfi->device_type;
2365                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2366                         reg_fact_size *= cfi->interleave;
2367                         reg_user_size *= cfi->interleave;
2368
2369                         if (user_regs) {
2370                                 groups = reg_user_groups;
2371                                 groupsize = reg_user_size;
2372                                 /* skip over factory reg area */
2373                                 groupno = reg_fact_groups;
2374                                 data_offset += reg_fact_groups * reg_fact_size;
2375                         } else {
2376                                 groups = reg_fact_groups;
2377                                 groupsize = reg_fact_size;
2378                                 groupno = 0;
2379                         }
2380
2381                         while (len > 0 && groups > 0) {
2382                                 if (!action) {
2383                                         /*
2384                                          * Special case: if action is NULL
2385                                          * we fill buf with otp_info records.
2386                                          */
2387                                         struct otp_info *otpinfo;
2388                                         map_word lockword;
2389                                         len -= sizeof(struct otp_info);
2390                                         if (len <= 0)
2391                                                 return -ENOSPC;
2392                                         ret = do_otp_read(map, chip,
2393                                                           reg_prot_offset,
2394                                                           (u_char *)&lockword,
2395                                                           map_bankwidth(map),
2396                                                           0, 0,  0);
2397                                         if (ret)
2398                                                 return ret;
2399                                         otpinfo = (struct otp_info *)buf;
2400                                         otpinfo->start = from;
2401                                         otpinfo->length = groupsize;
2402                                         otpinfo->locked =
2403                                            !map_word_bitsset(map, lockword,
2404                                                              CMD(1 << groupno));
2405                                         from += groupsize;
2406                                         buf += sizeof(*otpinfo);
2407                                         *retlen += sizeof(*otpinfo);
2408                                 } else if (from >= groupsize) {
2409                                         from -= groupsize;
2410                                         data_offset += groupsize;
2411                                 } else {
2412                                         int size = groupsize;
2413                                         data_offset += from;
2414                                         size -= from;
2415                                         from = 0;
2416                                         if (size > len)
2417                                                 size = len;
2418                                         ret = action(map, chip, data_offset,
2419                                                      buf, size, reg_prot_offset,
2420                                                      groupno, groupsize);
2421                                         if (ret < 0)
2422                                                 return ret;
2423                                         buf += size;
2424                                         len -= size;
2425                                         *retlen += size;
2426                                         data_offset += size;
2427                                 }
2428                                 groupno++;
2429                                 groups--;
2430                         }
2431
2432                         /* next OTP region */
2433                         if (++field == extp->NumProtectionFields)
2434                                 break;
2435                         reg_prot_offset = otp->ProtRegAddr;
2436                         reg_fact_groups = otp->FactGroups;
2437                         reg_fact_size = 1 << otp->FactProtRegSize;
2438                         reg_user_groups = otp->UserGroups;
2439                         reg_user_size = 1 << otp->UserProtRegSize;
2440                         otp++;
2441                 }
2442         }
2443
2444         return 0;
2445 }
2446
2447 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2448                                            size_t len, size_t *retlen,
2449                                             u_char *buf)
2450 {
2451         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2452                                      buf, do_otp_read, 0);
2453 }
2454
2455 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2456                                            size_t len, size_t *retlen,
2457                                             u_char *buf)
2458 {
2459         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2460                                      buf, do_otp_read, 1);
2461 }
2462
2463 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2464                                             size_t len, size_t *retlen,
2465                                              u_char *buf)
2466 {
2467         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2468                                      buf, do_otp_write, 1);
2469 }
2470
2471 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2472                                            loff_t from, size_t len)
2473 {
2474         size_t retlen;
2475         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2476                                      NULL, do_otp_lock, 1);
2477 }
2478
2479 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
2480                                            size_t *retlen, struct otp_info *buf)
2481
2482 {
2483         return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2484                                      NULL, 0);
2485 }
2486
2487 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
2488                                            size_t *retlen, struct otp_info *buf)
2489 {
2490         return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2491                                      NULL, 1);
2492 }
2493
2494 #endif
2495
2496 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2497 {
2498         struct mtd_erase_region_info *region;
2499         int block, status, i;
2500         unsigned long adr;
2501         size_t len;
2502
2503         for (i = 0; i < mtd->numeraseregions; i++) {
2504                 region = &mtd->eraseregions[i];
2505                 if (!region->lockmap)
2506                         continue;
2507
2508                 for (block = 0; block < region->numblocks; block++){
2509                         len = region->erasesize;
2510                         adr = region->offset + block * len;
2511
2512                         status = cfi_varsize_frob(mtd,
2513                                         do_getlockstatus_oneblock, adr, len, NULL);
2514                         if (status)
2515                                 set_bit(block, region->lockmap);
2516                         else
2517                                 clear_bit(block, region->lockmap);
2518                 }
2519         }
2520 }
2521
2522 static int cfi_intelext_suspend(struct mtd_info *mtd)
2523 {
2524         struct map_info *map = mtd->priv;
2525         struct cfi_private *cfi = map->fldrv_priv;
2526         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2527         int i;
2528         struct flchip *chip;
2529         int ret = 0;
2530
2531         if ((mtd->flags & MTD_POWERUP_LOCK)
2532             && extp && (extp->FeatureSupport & (1 << 5)))
2533                 cfi_intelext_save_locks(mtd);
2534
2535         for (i=0; !ret && i<cfi->numchips; i++) {
2536                 chip = &cfi->chips[i];
2537
2538                 mutex_lock(&chip->mutex);
2539
2540                 switch (chip->state) {
2541                 case FL_READY:
2542                 case FL_STATUS:
2543                 case FL_CFI_QUERY:
2544                 case FL_JEDEC_QUERY:
2545                         if (chip->oldstate == FL_READY) {
2546                                 /* place the chip in a known state before suspend */
2547                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2548                                 chip->oldstate = chip->state;
2549                                 chip->state = FL_PM_SUSPENDED;
2550                                 /* No need to wake_up() on this state change -
2551                                  * as the whole point is that nobody can do anything
2552                                  * with the chip now anyway.
2553                                  */
2554                         } else {
2555                                 /* There seems to be an operation pending. We must wait for it. */
2556                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2557                                 ret = -EAGAIN;
2558                         }
2559                         break;
2560                 default:
2561                         /* Should we actually wait? Once upon a time these routines weren't
2562                            allowed to. Or should we return -EAGAIN, because the upper layers
2563                            ought to have already shut down anything which was using the device
2564                            anyway? The latter for now. */
2565                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2566                         ret = -EAGAIN;
2567                 case FL_PM_SUSPENDED:
2568                         break;
2569                 }
2570                 mutex_unlock(&chip->mutex);
2571         }
2572
2573         /* Unlock the chips again */
2574
2575         if (ret) {
2576                 for (i--; i >=0; i--) {
2577                         chip = &cfi->chips[i];
2578
2579                         mutex_lock(&chip->mutex);
2580
2581                         if (chip->state == FL_PM_SUSPENDED) {
2582                                 /* No need to force it into a known state here,
2583                                    because we're returning failure, and it didn't
2584                                    get power cycled */
2585                                 chip->state = chip->oldstate;
2586                                 chip->oldstate = FL_READY;
2587                                 wake_up(&chip->wq);
2588                         }
2589                         mutex_unlock(&chip->mutex);
2590                 }
2591         }
2592
2593         return ret;
2594 }
2595
2596 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2597 {
2598         struct mtd_erase_region_info *region;
2599         int block, i;
2600         unsigned long adr;
2601         size_t len;
2602
2603         for (i = 0; i < mtd->numeraseregions; i++) {
2604                 region = &mtd->eraseregions[i];
2605                 if (!region->lockmap)
2606                         continue;
2607
2608                 for_each_clear_bit(block, region->lockmap, region->numblocks) {
2609                         len = region->erasesize;
2610                         adr = region->offset + block * len;
2611                         cfi_intelext_unlock(mtd, adr, len);
2612                 }
2613         }
2614 }
2615
2616 static void cfi_intelext_resume(struct mtd_info *mtd)
2617 {
2618         struct map_info *map = mtd->priv;
2619         struct cfi_private *cfi = map->fldrv_priv;
2620         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2621         int i;
2622         struct flchip *chip;
2623
2624         for (i=0; i<cfi->numchips; i++) {
2625
2626                 chip = &cfi->chips[i];
2627
2628                 mutex_lock(&chip->mutex);
2629
2630                 /* Go to known state. Chip may have been power cycled */
2631                 if (chip->state == FL_PM_SUSPENDED) {
2632                         /* Refresh LH28F640BF Partition Config. Register */
2633                         fixup_LH28F640BF(mtd);
2634                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2635                         chip->oldstate = chip->state = FL_READY;
2636                         wake_up(&chip->wq);
2637                 }
2638
2639                 mutex_unlock(&chip->mutex);
2640         }
2641
2642         if ((mtd->flags & MTD_POWERUP_LOCK)
2643             && extp && (extp->FeatureSupport & (1 << 5)))
2644                 cfi_intelext_restore_locks(mtd);
2645 }
2646
2647 static int cfi_intelext_reset(struct mtd_info *mtd)
2648 {
2649         struct map_info *map = mtd->priv;
2650         struct cfi_private *cfi = map->fldrv_priv;
2651         int i, ret;
2652
2653         for (i=0; i < cfi->numchips; i++) {
2654                 struct flchip *chip = &cfi->chips[i];
2655
2656                 /* force the completion of any ongoing operation
2657                    and switch to array mode so any bootloader in
2658                    flash is accessible for soft reboot. */
2659                 mutex_lock(&chip->mutex);
2660                 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2661                 if (!ret) {
2662                         map_write(map, CMD(0xff), chip->start);
2663                         chip->state = FL_SHUTDOWN;
2664                         put_chip(map, chip, chip->start);
2665                 }
2666                 mutex_unlock(&chip->mutex);
2667         }
2668
2669         return 0;
2670 }
2671
2672 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2673                                void *v)
2674 {
2675         struct mtd_info *mtd;
2676
2677         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2678         cfi_intelext_reset(mtd);
2679         return NOTIFY_DONE;
2680 }
2681
2682 static void cfi_intelext_destroy(struct mtd_info *mtd)
2683 {
2684         struct map_info *map = mtd->priv;
2685         struct cfi_private *cfi = map->fldrv_priv;
2686         struct mtd_erase_region_info *region;
2687         int i;
2688         cfi_intelext_reset(mtd);
2689         unregister_reboot_notifier(&mtd->reboot_notifier);
2690         kfree(cfi->cmdset_priv);
2691         kfree(cfi->cfiq);
2692         kfree(cfi->chips[0].priv);
2693         kfree(cfi);
2694         for (i = 0; i < mtd->numeraseregions; i++) {
2695                 region = &mtd->eraseregions[i];
2696                 kfree(region->lockmap);
2697         }
2698         kfree(mtd->eraseregions);
2699 }
2700
2701 MODULE_LICENSE("GPL");
2702 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2703 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2704 MODULE_ALIAS("cfi_cmdset_0003");
2705 MODULE_ALIAS("cfi_cmdset_0200");