GNU Linux-libre 6.8.7-gnu
[releases.git] / drivers / mtd / chips / cfi_cmdset_0001.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common Flash Interface support:
4  *   Intel Extended Vendor Command Set (ID 0x0001)
5  *
6  * (C) 2000 Red Hat.
7  *
8  *
9  * 10/10/2000   Nicolas Pitre <nico@fluxnic.net>
10  *      - completely revamped method functions so they are aware and
11  *        independent of the flash geometry (buswidth, interleave, etc.)
12  *      - scalability vs code size is completely set at compile-time
13  *        (see include/linux/mtd/cfi.h for selection)
14  *      - optimized write buffer method
15  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
16  *      - reworked lock/unlock/erase support for var size flash
17  * 21/03/2007   Rodolfo Giometti <giometti@linux.it>
18  *      - auto unlock sectors on resume for auto locking flash on power up
19  */
20
21 #include <linux/module.h>
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/sched.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/bitmap.h>
34 #include <linux/mtd/xip.h>
35 #include <linux/mtd/map.h>
36 #include <linux/mtd/mtd.h>
37 #include <linux/mtd/cfi.h>
38
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
44
45 /* Intel chips */
46 #define I82802AB        0x00ad
47 #define I82802AC        0x00ac
48 #define PF38F4476       0x881c
49 #define M28F00AP30      0x8963
50 /* STMicroelectronics chips */
51 #define M50LPW080       0x002F
52 #define M50FLW080A      0x0080
53 #define M50FLW080B      0x0081
54 /* Atmel chips */
55 #define AT49BV640D      0x02de
56 #define AT49BV640DT     0x02db
57 /* Sharp chips */
58 #define LH28F640BFHE_PTTL90     0x00b0
59 #define LH28F640BFHE_PBTL90     0x00b1
60 #define LH28F640BFHE_PTTL70A    0x00b2
61 #define LH28F640BFHE_PBTL70A    0x00b3
62
63 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
64 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
65 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
66 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
67 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
68 static void cfi_intelext_sync (struct mtd_info *);
69 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
70 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
71 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
72                                   uint64_t len);
73 #ifdef CONFIG_MTD_OTP
74 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
75 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
76 static int cfi_intelext_write_user_prot_reg(struct mtd_info *, loff_t, size_t,
77                                             size_t *, const u_char *);
78 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
79 static int cfi_intelext_get_fact_prot_info(struct mtd_info *, size_t,
80                                            size_t *, struct otp_info *);
81 static int cfi_intelext_get_user_prot_info(struct mtd_info *, size_t,
82                                            size_t *, struct otp_info *);
83 #endif
84 static int cfi_intelext_suspend (struct mtd_info *);
85 static void cfi_intelext_resume (struct mtd_info *);
86 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
87
88 static void cfi_intelext_destroy(struct mtd_info *);
89
90 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
91
92 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
93 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
94
95 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
96                      size_t *retlen, void **virt, resource_size_t *phys);
97 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len);
98
99 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
100 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
101 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
102 #include "fwh_lock.h"
103
104
105
106 /*
107  *  *********** SETUP AND PROBE BITS  ***********
108  */
109
110 static struct mtd_chip_driver cfi_intelext_chipdrv = {
111         .probe          = NULL, /* Not usable directly */
112         .destroy        = cfi_intelext_destroy,
113         .name           = "cfi_cmdset_0001",
114         .module         = THIS_MODULE
115 };
116
117 /* #define DEBUG_LOCK_BITS */
118 /* #define DEBUG_CFI_FEATURES */
119
120 #ifdef DEBUG_CFI_FEATURES
121 static void cfi_tell_features(struct cfi_pri_intelext *extp)
122 {
123         int i;
124         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
125         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
126         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
127         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
128         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
129         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
130         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
131         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
132         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
133         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
134         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
135         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
136         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
137         for (i=11; i<32; i++) {
138                 if (extp->FeatureSupport & (1<<i))
139                         printk("     - Unknown Bit %X:      supported\n", i);
140         }
141
142         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
143         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
144         for (i=1; i<8; i++) {
145                 if (extp->SuspendCmdSupport & (1<<i))
146                         printk("     - Unknown Bit %X:               supported\n", i);
147         }
148
149         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
150         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
151         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
152         for (i=2; i<3; i++) {
153                 if (extp->BlkStatusRegMask & (1<<i))
154                         printk("     - Unknown Bit %X Active: yes\n",i);
155         }
156         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
157         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
158         for (i=6; i<16; i++) {
159                 if (extp->BlkStatusRegMask & (1<<i))
160                         printk("     - Unknown Bit %X Active: yes\n",i);
161         }
162
163         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
164                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
165         if (extp->VppOptimal)
166                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
167                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
168 }
169 #endif
170
171 /* Atmel chips don't use the same PRI format as Intel chips */
172 static void fixup_convert_atmel_pri(struct mtd_info *mtd)
173 {
174         struct map_info *map = mtd->priv;
175         struct cfi_private *cfi = map->fldrv_priv;
176         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
177         struct cfi_pri_atmel atmel_pri;
178         uint32_t features = 0;
179
180         /* Reverse byteswapping */
181         extp->FeatureSupport = cpu_to_le32(extp->FeatureSupport);
182         extp->BlkStatusRegMask = cpu_to_le16(extp->BlkStatusRegMask);
183         extp->ProtRegAddr = cpu_to_le16(extp->ProtRegAddr);
184
185         memcpy(&atmel_pri, extp, sizeof(atmel_pri));
186         memset((char *)extp + 5, 0, sizeof(*extp) - 5);
187
188         printk(KERN_ERR "atmel Features: %02x\n", atmel_pri.Features);
189
190         if (atmel_pri.Features & 0x01) /* chip erase supported */
191                 features |= (1<<0);
192         if (atmel_pri.Features & 0x02) /* erase suspend supported */
193                 features |= (1<<1);
194         if (atmel_pri.Features & 0x04) /* program suspend supported */
195                 features |= (1<<2);
196         if (atmel_pri.Features & 0x08) /* simultaneous operations supported */
197                 features |= (1<<9);
198         if (atmel_pri.Features & 0x20) /* page mode read supported */
199                 features |= (1<<7);
200         if (atmel_pri.Features & 0x40) /* queued erase supported */
201                 features |= (1<<4);
202         if (atmel_pri.Features & 0x80) /* Protection bits supported */
203                 features |= (1<<6);
204
205         extp->FeatureSupport = features;
206
207         /* burst write mode not supported */
208         cfi->cfiq->BufWriteTimeoutTyp = 0;
209         cfi->cfiq->BufWriteTimeoutMax = 0;
210 }
211
212 static void fixup_at49bv640dx_lock(struct mtd_info *mtd)
213 {
214         struct map_info *map = mtd->priv;
215         struct cfi_private *cfi = map->fldrv_priv;
216         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
217
218         cfip->FeatureSupport |= (1 << 5);
219         mtd->flags |= MTD_POWERUP_LOCK;
220 }
221
222 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
223 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
224 static void fixup_intel_strataflash(struct mtd_info *mtd)
225 {
226         struct map_info *map = mtd->priv;
227         struct cfi_private *cfi = map->fldrv_priv;
228         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
229
230         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
231                             "erase on write disabled.\n");
232         extp->SuspendCmdSupport &= ~1;
233 }
234 #endif
235
236 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
237 static void fixup_no_write_suspend(struct mtd_info *mtd)
238 {
239         struct map_info *map = mtd->priv;
240         struct cfi_private *cfi = map->fldrv_priv;
241         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
242
243         if (cfip && (cfip->FeatureSupport&4)) {
244                 cfip->FeatureSupport &= ~4;
245                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
246         }
247 }
248 #endif
249
250 static void fixup_st_m28w320ct(struct mtd_info *mtd)
251 {
252         struct map_info *map = mtd->priv;
253         struct cfi_private *cfi = map->fldrv_priv;
254
255         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
256         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
257 }
258
259 static void fixup_st_m28w320cb(struct mtd_info *mtd)
260 {
261         struct map_info *map = mtd->priv;
262         struct cfi_private *cfi = map->fldrv_priv;
263
264         /* Note this is done after the region info is endian swapped */
265         cfi->cfiq->EraseRegionInfo[1] =
266                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
267 };
268
269 static int is_LH28F640BF(struct cfi_private *cfi)
270 {
271         /* Sharp LH28F640BF Family */
272         if (cfi->mfr == CFI_MFR_SHARP && (
273             cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
274             cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
275                 return 1;
276         return 0;
277 }
278
279 static void fixup_LH28F640BF(struct mtd_info *mtd)
280 {
281         struct map_info *map = mtd->priv;
282         struct cfi_private *cfi = map->fldrv_priv;
283         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
284
285         /* Reset the Partition Configuration Register on LH28F640BF
286          * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
287         if (is_LH28F640BF(cfi)) {
288                 printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
289                 map_write(map, CMD(0x60), 0);
290                 map_write(map, CMD(0x04), 0);
291
292                 /* We have set one single partition thus
293                  * Simultaneous Operations are not allowed */
294                 printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
295                 extp->FeatureSupport &= ~512;
296         }
297 }
298
299 static void fixup_use_point(struct mtd_info *mtd)
300 {
301         struct map_info *map = mtd->priv;
302         if (!mtd->_point && map_is_linear(map)) {
303                 mtd->_point   = cfi_intelext_point;
304                 mtd->_unpoint = cfi_intelext_unpoint;
305         }
306 }
307
308 static void fixup_use_write_buffers(struct mtd_info *mtd)
309 {
310         struct map_info *map = mtd->priv;
311         struct cfi_private *cfi = map->fldrv_priv;
312         if (cfi->cfiq->BufWriteTimeoutTyp) {
313                 printk(KERN_INFO "Using buffer write method\n" );
314                 mtd->_write = cfi_intelext_write_buffers;
315                 mtd->_writev = cfi_intelext_writev;
316         }
317 }
318
319 /*
320  * Some chips power-up with all sectors locked by default.
321  */
322 static void fixup_unlock_powerup_lock(struct mtd_info *mtd)
323 {
324         struct map_info *map = mtd->priv;
325         struct cfi_private *cfi = map->fldrv_priv;
326         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
327
328         if (cfip->FeatureSupport&32) {
329                 printk(KERN_INFO "Using auto-unlock on power-up/resume\n" );
330                 mtd->flags |= MTD_POWERUP_LOCK;
331         }
332 }
333
334 static struct cfi_fixup cfi_fixup_table[] = {
335         { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri },
336         { CFI_MFR_ATMEL, AT49BV640D, fixup_at49bv640dx_lock },
337         { CFI_MFR_ATMEL, AT49BV640DT, fixup_at49bv640dx_lock },
338 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
339         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash },
340 #endif
341 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
342         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend },
343 #endif
344 #if !FORCE_WORD_WRITE
345         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers },
346 #endif
347         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
348         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
349         { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
350         { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
351         { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
352         { 0, 0, NULL }
353 };
354
355 static struct cfi_fixup jedec_fixup_table[] = {
356         { CFI_MFR_INTEL, I82802AB,   fixup_use_fwh_lock },
357         { CFI_MFR_INTEL, I82802AC,   fixup_use_fwh_lock },
358         { CFI_MFR_ST,    M50LPW080,  fixup_use_fwh_lock },
359         { CFI_MFR_ST,    M50FLW080A, fixup_use_fwh_lock },
360         { CFI_MFR_ST,    M50FLW080B, fixup_use_fwh_lock },
361         { 0, 0, NULL }
362 };
363 static struct cfi_fixup fixup_table[] = {
364         /* The CFI vendor ids and the JEDEC vendor IDs appear
365          * to be common.  It is like the devices id's are as
366          * well.  This table is to pick all cases where
367          * we know that is the case.
368          */
369         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point },
370         { 0, 0, NULL }
371 };
372
373 static void cfi_fixup_major_minor(struct cfi_private *cfi,
374                                                 struct cfi_pri_intelext *extp)
375 {
376         if (cfi->mfr == CFI_MFR_INTEL &&
377                         cfi->id == PF38F4476 && extp->MinorVersion == '3')
378                 extp->MinorVersion = '1';
379 }
380
381 static int cfi_is_micron_28F00AP30(struct cfi_private *cfi, struct flchip *chip)
382 {
383         /*
384          * Micron(was Numonyx) 1Gbit bottom boot are buggy w.r.t
385          * Erase Supend for their small Erase Blocks(0x8000)
386          */
387         if (cfi->mfr == CFI_MFR_INTEL && cfi->id == M28F00AP30)
388                 return 1;
389         return 0;
390 }
391
392 static inline struct cfi_pri_intelext *
393 read_pri_intelext(struct map_info *map, __u16 adr)
394 {
395         struct cfi_private *cfi = map->fldrv_priv;
396         struct cfi_pri_intelext *extp;
397         unsigned int extra_size = 0;
398         unsigned int extp_size = sizeof(*extp);
399
400  again:
401         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
402         if (!extp)
403                 return NULL;
404
405         cfi_fixup_major_minor(cfi, extp);
406
407         if (extp->MajorVersion != '1' ||
408             (extp->MinorVersion < '0' || extp->MinorVersion > '5')) {
409                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
410                        "version %c.%c.\n",  extp->MajorVersion,
411                        extp->MinorVersion);
412                 kfree(extp);
413                 return NULL;
414         }
415
416         /* Do some byteswapping if necessary */
417         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
418         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
419         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
420
421         if (extp->MinorVersion >= '0') {
422                 extra_size = 0;
423
424                 /* Protection Register info */
425                 if (extp->NumProtectionFields) {
426                         struct cfi_intelext_otpinfo *otp =
427                                 (struct cfi_intelext_otpinfo *)&extp->extra[0];
428
429                         extra_size += (extp->NumProtectionFields - 1) *
430                                 sizeof(struct cfi_intelext_otpinfo);
431
432                         if (extp_size >= sizeof(*extp) + extra_size) {
433                                 int i;
434
435                                 /* Do some byteswapping if necessary */
436                                 for (i = 0; i < extp->NumProtectionFields - 1; i++) {
437                                         otp->ProtRegAddr = le32_to_cpu(otp->ProtRegAddr);
438                                         otp->FactGroups = le16_to_cpu(otp->FactGroups);
439                                         otp->UserGroups = le16_to_cpu(otp->UserGroups);
440                                         otp++;
441                                 }
442                         }
443                 }
444         }
445
446         if (extp->MinorVersion >= '1') {
447                 /* Burst Read info */
448                 extra_size += 2;
449                 if (extp_size < sizeof(*extp) + extra_size)
450                         goto need_more;
451                 extra_size += extp->extra[extra_size - 1];
452         }
453
454         if (extp->MinorVersion >= '3') {
455                 int nb_parts, i;
456
457                 /* Number of hardware-partitions */
458                 extra_size += 1;
459                 if (extp_size < sizeof(*extp) + extra_size)
460                         goto need_more;
461                 nb_parts = extp->extra[extra_size - 1];
462
463                 /* skip the sizeof(partregion) field in CFI 1.4 */
464                 if (extp->MinorVersion >= '4')
465                         extra_size += 2;
466
467                 for (i = 0; i < nb_parts; i++) {
468                         struct cfi_intelext_regioninfo *rinfo;
469                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
470                         extra_size += sizeof(*rinfo);
471                         if (extp_size < sizeof(*extp) + extra_size)
472                                 goto need_more;
473                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
474                         extra_size += (rinfo->NumBlockTypes - 1)
475                                       * sizeof(struct cfi_intelext_blockinfo);
476                 }
477
478                 if (extp->MinorVersion >= '4')
479                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
480
481                 if (extp_size < sizeof(*extp) + extra_size) {
482                         need_more:
483                         extp_size = sizeof(*extp) + extra_size;
484                         kfree(extp);
485                         if (extp_size > 4096) {
486                                 printk(KERN_ERR
487                                         "%s: cfi_pri_intelext is too fat\n",
488                                         __func__);
489                                 return NULL;
490                         }
491                         goto again;
492                 }
493         }
494
495         return extp;
496 }
497
498 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
499 {
500         struct cfi_private *cfi = map->fldrv_priv;
501         struct mtd_info *mtd;
502         int i;
503
504         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
505         if (!mtd)
506                 return NULL;
507         mtd->priv = map;
508         mtd->type = MTD_NORFLASH;
509
510         /* Fill in the default mtd operations */
511         mtd->_erase   = cfi_intelext_erase_varsize;
512         mtd->_read    = cfi_intelext_read;
513         mtd->_write   = cfi_intelext_write_words;
514         mtd->_sync    = cfi_intelext_sync;
515         mtd->_lock    = cfi_intelext_lock;
516         mtd->_unlock  = cfi_intelext_unlock;
517         mtd->_is_locked = cfi_intelext_is_locked;
518         mtd->_suspend = cfi_intelext_suspend;
519         mtd->_resume  = cfi_intelext_resume;
520         mtd->flags   = MTD_CAP_NORFLASH;
521         mtd->name    = map->name;
522         mtd->writesize = 1;
523         mtd->writebufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
524
525         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
526
527         if (cfi->cfi_mode == CFI_MODE_CFI) {
528                 /*
529                  * It's a real CFI chip, not one for which the probe
530                  * routine faked a CFI structure. So we read the feature
531                  * table from it.
532                  */
533                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
534                 struct cfi_pri_intelext *extp;
535
536                 extp = read_pri_intelext(map, adr);
537                 if (!extp) {
538                         kfree(mtd);
539                         return NULL;
540                 }
541
542                 /* Install our own private info structure */
543                 cfi->cmdset_priv = extp;
544
545                 cfi_fixup(mtd, cfi_fixup_table);
546
547 #ifdef DEBUG_CFI_FEATURES
548                 /* Tell the user about it in lots of lovely detail */
549                 cfi_tell_features(extp);
550 #endif
551
552                 if(extp->SuspendCmdSupport & 1) {
553                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
554                 }
555         }
556         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
557                 /* Apply jedec specific fixups */
558                 cfi_fixup(mtd, jedec_fixup_table);
559         }
560         /* Apply generic fixups */
561         cfi_fixup(mtd, fixup_table);
562
563         for (i=0; i< cfi->numchips; i++) {
564                 if (cfi->cfiq->WordWriteTimeoutTyp)
565                         cfi->chips[i].word_write_time =
566                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
567                 else
568                         cfi->chips[i].word_write_time = 50000;
569
570                 if (cfi->cfiq->BufWriteTimeoutTyp)
571                         cfi->chips[i].buffer_write_time =
572                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
573                 /* No default; if it isn't specified, we won't use it */
574
575                 if (cfi->cfiq->BlockEraseTimeoutTyp)
576                         cfi->chips[i].erase_time =
577                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
578                 else
579                         cfi->chips[i].erase_time = 2000000;
580
581                 if (cfi->cfiq->WordWriteTimeoutTyp &&
582                     cfi->cfiq->WordWriteTimeoutMax)
583                         cfi->chips[i].word_write_time_max =
584                                 1<<(cfi->cfiq->WordWriteTimeoutTyp +
585                                     cfi->cfiq->WordWriteTimeoutMax);
586                 else
587                         cfi->chips[i].word_write_time_max = 50000 * 8;
588
589                 if (cfi->cfiq->BufWriteTimeoutTyp &&
590                     cfi->cfiq->BufWriteTimeoutMax)
591                         cfi->chips[i].buffer_write_time_max =
592                                 1<<(cfi->cfiq->BufWriteTimeoutTyp +
593                                     cfi->cfiq->BufWriteTimeoutMax);
594
595                 if (cfi->cfiq->BlockEraseTimeoutTyp &&
596                     cfi->cfiq->BlockEraseTimeoutMax)
597                         cfi->chips[i].erase_time_max =
598                                 1000<<(cfi->cfiq->BlockEraseTimeoutTyp +
599                                        cfi->cfiq->BlockEraseTimeoutMax);
600                 else
601                         cfi->chips[i].erase_time_max = 2000000 * 8;
602
603                 cfi->chips[i].ref_point_counter = 0;
604                 init_waitqueue_head(&(cfi->chips[i].wq));
605         }
606
607         map->fldrv = &cfi_intelext_chipdrv;
608
609         return cfi_intelext_setup(mtd);
610 }
611 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
612 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
613 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
614 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
615 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
616
617 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
618 {
619         struct map_info *map = mtd->priv;
620         struct cfi_private *cfi = map->fldrv_priv;
621         unsigned long offset = 0;
622         int i,j;
623         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
624
625         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
626
627         mtd->size = devsize * cfi->numchips;
628
629         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
630         mtd->eraseregions = kcalloc(mtd->numeraseregions,
631                                     sizeof(struct mtd_erase_region_info),
632                                     GFP_KERNEL);
633         if (!mtd->eraseregions)
634                 goto setup_err;
635
636         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
637                 unsigned long ernum, ersize;
638                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
639                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
640
641                 if (mtd->erasesize < ersize) {
642                         mtd->erasesize = ersize;
643                 }
644                 for (j=0; j<cfi->numchips; j++) {
645                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
646                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
647                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
648                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap = kmalloc(ernum / 8 + 1, GFP_KERNEL);
649                         if (!mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap)
650                                 goto setup_err;
651                 }
652                 offset += (ersize * ernum);
653         }
654
655         if (offset != devsize) {
656                 /* Argh */
657                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
658                 goto setup_err;
659         }
660
661         for (i=0; i<mtd->numeraseregions;i++){
662                 printk(KERN_DEBUG "erase region %d: offset=0x%llx,size=0x%x,blocks=%d\n",
663                        i,(unsigned long long)mtd->eraseregions[i].offset,
664                        mtd->eraseregions[i].erasesize,
665                        mtd->eraseregions[i].numblocks);
666         }
667
668 #ifdef CONFIG_MTD_OTP
669         mtd->_read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
670         mtd->_read_user_prot_reg = cfi_intelext_read_user_prot_reg;
671         mtd->_write_user_prot_reg = cfi_intelext_write_user_prot_reg;
672         mtd->_lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
673         mtd->_get_fact_prot_info = cfi_intelext_get_fact_prot_info;
674         mtd->_get_user_prot_info = cfi_intelext_get_user_prot_info;
675 #endif
676
677         /* This function has the potential to distort the reality
678            a bit and therefore should be called last. */
679         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
680                 goto setup_err;
681
682         __module_get(THIS_MODULE);
683         register_reboot_notifier(&mtd->reboot_notifier);
684         return mtd;
685
686  setup_err:
687         if (mtd->eraseregions)
688                 for (i=0; i<cfi->cfiq->NumEraseRegions; i++)
689                         for (j=0; j<cfi->numchips; j++)
690                                 kfree(mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].lockmap);
691         kfree(mtd->eraseregions);
692         kfree(mtd);
693         kfree(cfi->cmdset_priv);
694         return NULL;
695 }
696
697 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
698                                         struct cfi_private **pcfi)
699 {
700         struct map_info *map = mtd->priv;
701         struct cfi_private *cfi = *pcfi;
702         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
703
704         /*
705          * Probing of multi-partition flash chips.
706          *
707          * To support multiple partitions when available, we simply arrange
708          * for each of them to have their own flchip structure even if they
709          * are on the same physical chip.  This means completely recreating
710          * a new cfi_private structure right here which is a blatent code
711          * layering violation, but this is still the least intrusive
712          * arrangement at this point. This can be rearranged in the future
713          * if someone feels motivated enough.  --nico
714          */
715         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
716             && extp->FeatureSupport & (1 << 9)) {
717                 int offs = 0;
718                 struct cfi_private *newcfi;
719                 struct flchip *chip;
720                 struct flchip_shared *shared;
721                 int numregions, numparts, partshift, numvirtchips, i, j;
722
723                 /* Protection Register info */
724                 if (extp->NumProtectionFields)
725                         offs = (extp->NumProtectionFields - 1) *
726                                sizeof(struct cfi_intelext_otpinfo);
727
728                 /* Burst Read info */
729                 offs += extp->extra[offs+1]+2;
730
731                 /* Number of partition regions */
732                 numregions = extp->extra[offs];
733                 offs += 1;
734
735                 /* skip the sizeof(partregion) field in CFI 1.4 */
736                 if (extp->MinorVersion >= '4')
737                         offs += 2;
738
739                 /* Number of hardware partitions */
740                 numparts = 0;
741                 for (i = 0; i < numregions; i++) {
742                         struct cfi_intelext_regioninfo *rinfo;
743                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
744                         numparts += rinfo->NumIdentPartitions;
745                         offs += sizeof(*rinfo)
746                                 + (rinfo->NumBlockTypes - 1) *
747                                   sizeof(struct cfi_intelext_blockinfo);
748                 }
749
750                 if (!numparts)
751                         numparts = 1;
752
753                 /* Programming Region info */
754                 if (extp->MinorVersion >= '4') {
755                         struct cfi_intelext_programming_regioninfo *prinfo;
756                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
757                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
758                         mtd->flags &= ~MTD_BIT_WRITEABLE;
759                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
760                                map->name, mtd->writesize,
761                                cfi->interleave * prinfo->ControlValid,
762                                cfi->interleave * prinfo->ControlInvalid);
763                 }
764
765                 /*
766                  * All functions below currently rely on all chips having
767                  * the same geometry so we'll just assume that all hardware
768                  * partitions are of the same size too.
769                  */
770                 partshift = cfi->chipshift - __ffs(numparts);
771
772                 if ((1 << partshift) < mtd->erasesize) {
773                         printk( KERN_ERR
774                                 "%s: bad number of hw partitions (%d)\n",
775                                 __func__, numparts);
776                         return -EINVAL;
777                 }
778
779                 numvirtchips = cfi->numchips * numparts;
780                 newcfi = kmalloc(struct_size(newcfi, chips, numvirtchips),
781                                  GFP_KERNEL);
782                 if (!newcfi)
783                         return -ENOMEM;
784                 shared = kmalloc_array(cfi->numchips,
785                                        sizeof(struct flchip_shared),
786                                        GFP_KERNEL);
787                 if (!shared) {
788                         kfree(newcfi);
789                         return -ENOMEM;
790                 }
791                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
792                 newcfi->numchips = numvirtchips;
793                 newcfi->chipshift = partshift;
794
795                 chip = &newcfi->chips[0];
796                 for (i = 0; i < cfi->numchips; i++) {
797                         shared[i].writing = shared[i].erasing = NULL;
798                         mutex_init(&shared[i].lock);
799                         for (j = 0; j < numparts; j++) {
800                                 *chip = cfi->chips[i];
801                                 chip->start += j << partshift;
802                                 chip->priv = &shared[i];
803                                 /* those should be reset too since
804                                    they create memory references. */
805                                 init_waitqueue_head(&chip->wq);
806                                 mutex_init(&chip->mutex);
807                                 chip++;
808                         }
809                 }
810
811                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
812                                   "--> %d partitions of %d KiB\n",
813                                   map->name, cfi->numchips, cfi->interleave,
814                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
815
816                 map->fldrv_priv = newcfi;
817                 *pcfi = newcfi;
818                 kfree(cfi);
819         }
820
821         return 0;
822 }
823
824 /*
825  *  *********** CHIP ACCESS FUNCTIONS ***********
826  */
827 static int chip_ready (struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
828 {
829         DECLARE_WAITQUEUE(wait, current);
830         struct cfi_private *cfi = map->fldrv_priv;
831         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
832         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
833         unsigned long timeo = jiffies + HZ;
834
835         /* Prevent setting state FL_SYNCING for chip in suspended state. */
836         if (mode == FL_SYNCING && chip->oldstate != FL_READY)
837                 goto sleep;
838
839         switch (chip->state) {
840
841         case FL_STATUS:
842                 for (;;) {
843                         status = map_read(map, adr);
844                         if (map_word_andequal(map, status, status_OK, status_OK))
845                                 break;
846
847                         /* At this point we're fine with write operations
848                            in other partitions as they don't conflict. */
849                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
850                                 break;
851
852                         mutex_unlock(&chip->mutex);
853                         cfi_udelay(1);
854                         mutex_lock(&chip->mutex);
855                         /* Someone else might have been playing with it. */
856                         return -EAGAIN;
857                 }
858                 fallthrough;
859         case FL_READY:
860         case FL_CFI_QUERY:
861         case FL_JEDEC_QUERY:
862                 return 0;
863
864         case FL_ERASING:
865                 if (!cfip ||
866                     !(cfip->FeatureSupport & 2) ||
867                     !(mode == FL_READY || mode == FL_POINT ||
868                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
869                         goto sleep;
870
871                 /* Do not allow suspend iff read/write to EB address */
872                 if ((adr & chip->in_progress_block_mask) ==
873                     chip->in_progress_block_addr)
874                         goto sleep;
875
876                 /* do not suspend small EBs, buggy Micron Chips */
877                 if (cfi_is_micron_28F00AP30(cfi, chip) &&
878                     (chip->in_progress_block_mask == ~(0x8000-1)))
879                         goto sleep;
880
881                 /* Erase suspend */
882                 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
883
884                 /* If the flash has finished erasing, then 'erase suspend'
885                  * appears to make some (28F320) flash devices switch to
886                  * 'read' mode.  Make sure that we switch to 'read status'
887                  * mode so we get the right data. --rmk
888                  */
889                 map_write(map, CMD(0x70), chip->in_progress_block_addr);
890                 chip->oldstate = FL_ERASING;
891                 chip->state = FL_ERASE_SUSPENDING;
892                 chip->erase_suspended = 1;
893                 for (;;) {
894                         status = map_read(map, chip->in_progress_block_addr);
895                         if (map_word_andequal(map, status, status_OK, status_OK))
896                                 break;
897
898                         if (time_after(jiffies, timeo)) {
899                                 /* Urgh. Resume and pretend we weren't here.
900                                  * Make sure we're in 'read status' mode if it had finished */
901                                 put_chip(map, chip, adr);
902                                 printk(KERN_ERR "%s: Chip not ready after erase "
903                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
904                                 return -EIO;
905                         }
906
907                         mutex_unlock(&chip->mutex);
908                         cfi_udelay(1);
909                         mutex_lock(&chip->mutex);
910                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
911                            So we can just loop here. */
912                 }
913                 chip->state = FL_STATUS;
914                 return 0;
915
916         case FL_XIP_WHILE_ERASING:
917                 if (mode != FL_READY && mode != FL_POINT &&
918                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
919                         goto sleep;
920                 chip->oldstate = chip->state;
921                 chip->state = FL_READY;
922                 return 0;
923
924         case FL_SHUTDOWN:
925                 /* The machine is rebooting now,so no one can get chip anymore */
926                 return -EIO;
927         case FL_POINT:
928                 /* Only if there's no operation suspended... */
929                 if (mode == FL_READY && chip->oldstate == FL_READY)
930                         return 0;
931                 fallthrough;
932         default:
933         sleep:
934                 set_current_state(TASK_UNINTERRUPTIBLE);
935                 add_wait_queue(&chip->wq, &wait);
936                 mutex_unlock(&chip->mutex);
937                 schedule();
938                 remove_wait_queue(&chip->wq, &wait);
939                 mutex_lock(&chip->mutex);
940                 return -EAGAIN;
941         }
942 }
943
944 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
945 {
946         int ret;
947         DECLARE_WAITQUEUE(wait, current);
948
949  retry:
950         if (chip->priv &&
951             (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE
952             || mode == FL_SHUTDOWN) && chip->state != FL_SYNCING) {
953                 /*
954                  * OK. We have possibility for contention on the write/erase
955                  * operations which are global to the real chip and not per
956                  * partition.  So let's fight it over in the partition which
957                  * currently has authority on the operation.
958                  *
959                  * The rules are as follows:
960                  *
961                  * - any write operation must own shared->writing.
962                  *
963                  * - any erase operation must own _both_ shared->writing and
964                  *   shared->erasing.
965                  *
966                  * - contention arbitration is handled in the owner's context.
967                  *
968                  * The 'shared' struct can be read and/or written only when
969                  * its lock is taken.
970                  */
971                 struct flchip_shared *shared = chip->priv;
972                 struct flchip *contender;
973                 mutex_lock(&shared->lock);
974                 contender = shared->writing;
975                 if (contender && contender != chip) {
976                         /*
977                          * The engine to perform desired operation on this
978                          * partition is already in use by someone else.
979                          * Let's fight over it in the context of the chip
980                          * currently using it.  If it is possible to suspend,
981                          * that other partition will do just that, otherwise
982                          * it'll happily send us to sleep.  In any case, when
983                          * get_chip returns success we're clear to go ahead.
984                          */
985                         ret = mutex_trylock(&contender->mutex);
986                         mutex_unlock(&shared->lock);
987                         if (!ret)
988                                 goto retry;
989                         mutex_unlock(&chip->mutex);
990                         ret = chip_ready(map, contender, contender->start, mode);
991                         mutex_lock(&chip->mutex);
992
993                         if (ret == -EAGAIN) {
994                                 mutex_unlock(&contender->mutex);
995                                 goto retry;
996                         }
997                         if (ret) {
998                                 mutex_unlock(&contender->mutex);
999                                 return ret;
1000                         }
1001                         mutex_lock(&shared->lock);
1002
1003                         /* We should not own chip if it is already
1004                          * in FL_SYNCING state. Put contender and retry. */
1005                         if (chip->state == FL_SYNCING) {
1006                                 put_chip(map, contender, contender->start);
1007                                 mutex_unlock(&contender->mutex);
1008                                 goto retry;
1009                         }
1010                         mutex_unlock(&contender->mutex);
1011                 }
1012
1013                 /* Check if we already have suspended erase
1014                  * on this chip. Sleep. */
1015                 if (mode == FL_ERASING && shared->erasing
1016                     && shared->erasing->oldstate == FL_ERASING) {
1017                         mutex_unlock(&shared->lock);
1018                         set_current_state(TASK_UNINTERRUPTIBLE);
1019                         add_wait_queue(&chip->wq, &wait);
1020                         mutex_unlock(&chip->mutex);
1021                         schedule();
1022                         remove_wait_queue(&chip->wq, &wait);
1023                         mutex_lock(&chip->mutex);
1024                         goto retry;
1025                 }
1026
1027                 /* We now own it */
1028                 shared->writing = chip;
1029                 if (mode == FL_ERASING)
1030                         shared->erasing = chip;
1031                 mutex_unlock(&shared->lock);
1032         }
1033         ret = chip_ready(map, chip, adr, mode);
1034         if (ret == -EAGAIN)
1035                 goto retry;
1036
1037         return ret;
1038 }
1039
1040 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
1041 {
1042         struct cfi_private *cfi = map->fldrv_priv;
1043
1044         if (chip->priv) {
1045                 struct flchip_shared *shared = chip->priv;
1046                 mutex_lock(&shared->lock);
1047                 if (shared->writing == chip && chip->oldstate == FL_READY) {
1048                         /* We own the ability to write, but we're done */
1049                         shared->writing = shared->erasing;
1050                         if (shared->writing && shared->writing != chip) {
1051                                 /* give back ownership to who we loaned it from */
1052                                 struct flchip *loaner = shared->writing;
1053                                 mutex_lock(&loaner->mutex);
1054                                 mutex_unlock(&shared->lock);
1055                                 mutex_unlock(&chip->mutex);
1056                                 put_chip(map, loaner, loaner->start);
1057                                 mutex_lock(&chip->mutex);
1058                                 mutex_unlock(&loaner->mutex);
1059                                 wake_up(&chip->wq);
1060                                 return;
1061                         }
1062                         shared->erasing = NULL;
1063                         shared->writing = NULL;
1064                 } else if (shared->erasing == chip && shared->writing != chip) {
1065                         /*
1066                          * We own the ability to erase without the ability
1067                          * to write, which means the erase was suspended
1068                          * and some other partition is currently writing.
1069                          * Don't let the switch below mess things up since
1070                          * we don't have ownership to resume anything.
1071                          */
1072                         mutex_unlock(&shared->lock);
1073                         wake_up(&chip->wq);
1074                         return;
1075                 }
1076                 mutex_unlock(&shared->lock);
1077         }
1078
1079         switch(chip->oldstate) {
1080         case FL_ERASING:
1081                 /* What if one interleaved chip has finished and the
1082                    other hasn't? The old code would leave the finished
1083                    one in READY mode. That's bad, and caused -EROFS
1084                    errors to be returned from do_erase_oneblock because
1085                    that's the only bit it checked for at the time.
1086                    As the state machine appears to explicitly allow
1087                    sending the 0x70 (Read Status) command to an erasing
1088                    chip and expecting it to be ignored, that's what we
1089                    do. */
1090                 map_write(map, CMD(0xd0), chip->in_progress_block_addr);
1091                 map_write(map, CMD(0x70), chip->in_progress_block_addr);
1092                 chip->oldstate = FL_READY;
1093                 chip->state = FL_ERASING;
1094                 break;
1095
1096         case FL_XIP_WHILE_ERASING:
1097                 chip->state = chip->oldstate;
1098                 chip->oldstate = FL_READY;
1099                 break;
1100
1101         case FL_READY:
1102         case FL_STATUS:
1103         case FL_JEDEC_QUERY:
1104                 break;
1105         default:
1106                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
1107         }
1108         wake_up(&chip->wq);
1109 }
1110
1111 #ifdef CONFIG_MTD_XIP
1112
1113 /*
1114  * No interrupt what so ever can be serviced while the flash isn't in array
1115  * mode.  This is ensured by the xip_disable() and xip_enable() functions
1116  * enclosing any code path where the flash is known not to be in array mode.
1117  * And within a XIP disabled code path, only functions marked with __xipram
1118  * may be called and nothing else (it's a good thing to inspect generated
1119  * assembly to make sure inline functions were actually inlined and that gcc
1120  * didn't emit calls to its own support functions). Also configuring MTD CFI
1121  * support to a single buswidth and a single interleave is also recommended.
1122  */
1123
1124 static void xip_disable(struct map_info *map, struct flchip *chip,
1125                         unsigned long adr)
1126 {
1127         /* TODO: chips with no XIP use should ignore and return */
1128         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
1129         local_irq_disable();
1130 }
1131
1132 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
1133                                 unsigned long adr)
1134 {
1135         struct cfi_private *cfi = map->fldrv_priv;
1136         if (chip->state != FL_POINT && chip->state != FL_READY) {
1137                 map_write(map, CMD(0xff), adr);
1138                 chip->state = FL_READY;
1139         }
1140         (void) map_read(map, adr);
1141         xip_iprefetch();
1142         local_irq_enable();
1143 }
1144
1145 /*
1146  * When a delay is required for the flash operation to complete, the
1147  * xip_wait_for_operation() function is polling for both the given timeout
1148  * and pending (but still masked) hardware interrupts.  Whenever there is an
1149  * interrupt pending then the flash erase or write operation is suspended,
1150  * array mode restored and interrupts unmasked.  Task scheduling might also
1151  * happen at that point.  The CPU eventually returns from the interrupt or
1152  * the call to schedule() and the suspended flash operation is resumed for
1153  * the remaining of the delay period.
1154  *
1155  * Warning: this function _will_ fool interrupt latency tracing tools.
1156  */
1157
1158 static int __xipram xip_wait_for_operation(
1159                 struct map_info *map, struct flchip *chip,
1160                 unsigned long adr, unsigned int chip_op_time_max)
1161 {
1162         struct cfi_private *cfi = map->fldrv_priv;
1163         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
1164         map_word status, OK = CMD(0x80);
1165         unsigned long usec, suspended, start, done;
1166         flstate_t oldstate, newstate;
1167
1168         start = xip_currtime();
1169         usec = chip_op_time_max;
1170         if (usec == 0)
1171                 usec = 500000;
1172         done = 0;
1173
1174         do {
1175                 cpu_relax();
1176                 if (xip_irqpending() && cfip &&
1177                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
1178                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
1179                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
1180                         /*
1181                          * Let's suspend the erase or write operation when
1182                          * supported.  Note that we currently don't try to
1183                          * suspend interleaved chips if there is already
1184                          * another operation suspended (imagine what happens
1185                          * when one chip was already done with the current
1186                          * operation while another chip suspended it, then
1187                          * we resume the whole thing at once).  Yes, it
1188                          * can happen!
1189                          */
1190                         usec -= done;
1191                         map_write(map, CMD(0xb0), adr);
1192                         map_write(map, CMD(0x70), adr);
1193                         suspended = xip_currtime();
1194                         do {
1195                                 if (xip_elapsed_since(suspended) > 100000) {
1196                                         /*
1197                                          * The chip doesn't want to suspend
1198                                          * after waiting for 100 msecs.
1199                                          * This is a critical error but there
1200                                          * is not much we can do here.
1201                                          */
1202                                         return -EIO;
1203                                 }
1204                                 status = map_read(map, adr);
1205                         } while (!map_word_andequal(map, status, OK, OK));
1206
1207                         /* Suspend succeeded */
1208                         oldstate = chip->state;
1209                         if (oldstate == FL_ERASING) {
1210                                 if (!map_word_bitsset(map, status, CMD(0x40)))
1211                                         break;
1212                                 newstate = FL_XIP_WHILE_ERASING;
1213                                 chip->erase_suspended = 1;
1214                         } else {
1215                                 if (!map_word_bitsset(map, status, CMD(0x04)))
1216                                         break;
1217                                 newstate = FL_XIP_WHILE_WRITING;
1218                                 chip->write_suspended = 1;
1219                         }
1220                         chip->state = newstate;
1221                         map_write(map, CMD(0xff), adr);
1222                         (void) map_read(map, adr);
1223                         xip_iprefetch();
1224                         local_irq_enable();
1225                         mutex_unlock(&chip->mutex);
1226                         xip_iprefetch();
1227                         cond_resched();
1228
1229                         /*
1230                          * We're back.  However someone else might have
1231                          * decided to go write to the chip if we are in
1232                          * a suspended erase state.  If so let's wait
1233                          * until it's done.
1234                          */
1235                         mutex_lock(&chip->mutex);
1236                         while (chip->state != newstate) {
1237                                 DECLARE_WAITQUEUE(wait, current);
1238                                 set_current_state(TASK_UNINTERRUPTIBLE);
1239                                 add_wait_queue(&chip->wq, &wait);
1240                                 mutex_unlock(&chip->mutex);
1241                                 schedule();
1242                                 remove_wait_queue(&chip->wq, &wait);
1243                                 mutex_lock(&chip->mutex);
1244                         }
1245                         /* Disallow XIP again */
1246                         local_irq_disable();
1247
1248                         /* Resume the write or erase operation */
1249                         map_write(map, CMD(0xd0), adr);
1250                         map_write(map, CMD(0x70), adr);
1251                         chip->state = oldstate;
1252                         start = xip_currtime();
1253                 } else if (usec >= 1000000/HZ) {
1254                         /*
1255                          * Try to save on CPU power when waiting delay
1256                          * is at least a system timer tick period.
1257                          * No need to be extremely accurate here.
1258                          */
1259                         xip_cpu_idle();
1260                 }
1261                 status = map_read(map, adr);
1262                 done = xip_elapsed_since(start);
1263         } while (!map_word_andequal(map, status, OK, OK)
1264                  && done < usec);
1265
1266         return (done >= usec) ? -ETIME : 0;
1267 }
1268
1269 /*
1270  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1271  * the flash is actively programming or erasing since we have to poll for
1272  * the operation to complete anyway.  We can't do that in a generic way with
1273  * a XIP setup so do it before the actual flash operation in this case
1274  * and stub it out from INVAL_CACHE_AND_WAIT.
1275  */
1276 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1277         INVALIDATE_CACHED_RANGE(map, from, size)
1278
1279 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec, usec_max) \
1280         xip_wait_for_operation(map, chip, cmd_adr, usec_max)
1281
1282 #else
1283
1284 #define xip_disable(map, chip, adr)
1285 #define xip_enable(map, chip, adr)
1286 #define XIP_INVAL_CACHED_RANGE(x...)
1287 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1288
1289 static int inval_cache_and_wait_for_operation(
1290                 struct map_info *map, struct flchip *chip,
1291                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1292                 unsigned int chip_op_time, unsigned int chip_op_time_max)
1293 {
1294         struct cfi_private *cfi = map->fldrv_priv;
1295         map_word status, status_OK = CMD(0x80);
1296         int chip_state = chip->state;
1297         unsigned int timeo, sleep_time, reset_timeo;
1298
1299         mutex_unlock(&chip->mutex);
1300         if (inval_len)
1301                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1302         mutex_lock(&chip->mutex);
1303
1304         timeo = chip_op_time_max;
1305         if (!timeo)
1306                 timeo = 500000;
1307         reset_timeo = timeo;
1308         sleep_time = chip_op_time / 2;
1309
1310         for (;;) {
1311                 if (chip->state != chip_state) {
1312                         /* Someone's suspended the operation: sleep */
1313                         DECLARE_WAITQUEUE(wait, current);
1314                         set_current_state(TASK_UNINTERRUPTIBLE);
1315                         add_wait_queue(&chip->wq, &wait);
1316                         mutex_unlock(&chip->mutex);
1317                         schedule();
1318                         remove_wait_queue(&chip->wq, &wait);
1319                         mutex_lock(&chip->mutex);
1320                         continue;
1321                 }
1322
1323                 status = map_read(map, cmd_adr);
1324                 if (map_word_andequal(map, status, status_OK, status_OK))
1325                         break;
1326
1327                 if (chip->erase_suspended && chip_state == FL_ERASING)  {
1328                         /* Erase suspend occurred while sleep: reset timeout */
1329                         timeo = reset_timeo;
1330                         chip->erase_suspended = 0;
1331                 }
1332                 if (chip->write_suspended && chip_state == FL_WRITING)  {
1333                         /* Write suspend occurred while sleep: reset timeout */
1334                         timeo = reset_timeo;
1335                         chip->write_suspended = 0;
1336                 }
1337                 if (!timeo) {
1338                         map_write(map, CMD(0x70), cmd_adr);
1339                         chip->state = FL_STATUS;
1340                         return -ETIME;
1341                 }
1342
1343                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1344                 mutex_unlock(&chip->mutex);
1345                 if (sleep_time >= 1000000/HZ) {
1346                         /*
1347                          * Half of the normal delay still remaining
1348                          * can be performed with a sleeping delay instead
1349                          * of busy waiting.
1350                          */
1351                         msleep(sleep_time/1000);
1352                         timeo -= sleep_time;
1353                         sleep_time = 1000000/HZ;
1354                 } else {
1355                         udelay(1);
1356                         cond_resched();
1357                         timeo--;
1358                 }
1359                 mutex_lock(&chip->mutex);
1360         }
1361
1362         /* Done and happy. */
1363         chip->state = FL_STATUS;
1364         return 0;
1365 }
1366
1367 #endif
1368
1369 #define WAIT_TIMEOUT(map, chip, adr, udelay, udelay_max) \
1370         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay, udelay_max);
1371
1372
1373 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1374 {
1375         unsigned long cmd_addr;
1376         struct cfi_private *cfi = map->fldrv_priv;
1377         int ret;
1378
1379         adr += chip->start;
1380
1381         /* Ensure cmd read/writes are aligned. */
1382         cmd_addr = adr & ~(map_bankwidth(map)-1);
1383
1384         mutex_lock(&chip->mutex);
1385
1386         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1387
1388         if (!ret) {
1389                 if (chip->state != FL_POINT && chip->state != FL_READY)
1390                         map_write(map, CMD(0xff), cmd_addr);
1391
1392                 chip->state = FL_POINT;
1393                 chip->ref_point_counter++;
1394         }
1395         mutex_unlock(&chip->mutex);
1396
1397         return ret;
1398 }
1399
1400 static int cfi_intelext_point(struct mtd_info *mtd, loff_t from, size_t len,
1401                 size_t *retlen, void **virt, resource_size_t *phys)
1402 {
1403         struct map_info *map = mtd->priv;
1404         struct cfi_private *cfi = map->fldrv_priv;
1405         unsigned long ofs, last_end = 0;
1406         int chipnum;
1407         int ret;
1408
1409         if (!map->virt)
1410                 return -EINVAL;
1411
1412         /* Now lock the chip(s) to POINT state */
1413
1414         /* ofs: offset within the first chip that the first read should start */
1415         chipnum = (from >> cfi->chipshift);
1416         ofs = from - (chipnum << cfi->chipshift);
1417
1418         *virt = map->virt + cfi->chips[chipnum].start + ofs;
1419         if (phys)
1420                 *phys = map->phys + cfi->chips[chipnum].start + ofs;
1421
1422         while (len) {
1423                 unsigned long thislen;
1424
1425                 if (chipnum >= cfi->numchips)
1426                         break;
1427
1428                 /* We cannot point across chips that are virtually disjoint */
1429                 if (!last_end)
1430                         last_end = cfi->chips[chipnum].start;
1431                 else if (cfi->chips[chipnum].start != last_end)
1432                         break;
1433
1434                 if ((len + ofs -1) >> cfi->chipshift)
1435                         thislen = (1<<cfi->chipshift) - ofs;
1436                 else
1437                         thislen = len;
1438
1439                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1440                 if (ret)
1441                         break;
1442
1443                 *retlen += thislen;
1444                 len -= thislen;
1445
1446                 ofs = 0;
1447                 last_end += 1 << cfi->chipshift;
1448                 chipnum++;
1449         }
1450         return 0;
1451 }
1452
1453 static int cfi_intelext_unpoint(struct mtd_info *mtd, loff_t from, size_t len)
1454 {
1455         struct map_info *map = mtd->priv;
1456         struct cfi_private *cfi = map->fldrv_priv;
1457         unsigned long ofs;
1458         int chipnum, err = 0;
1459
1460         /* Now unlock the chip(s) POINT state */
1461
1462         /* ofs: offset within the first chip that the first read should start */
1463         chipnum = (from >> cfi->chipshift);
1464         ofs = from - (chipnum <<  cfi->chipshift);
1465
1466         while (len && !err) {
1467                 unsigned long thislen;
1468                 struct flchip *chip;
1469
1470                 chip = &cfi->chips[chipnum];
1471                 if (chipnum >= cfi->numchips)
1472                         break;
1473
1474                 if ((len + ofs -1) >> cfi->chipshift)
1475                         thislen = (1<<cfi->chipshift) - ofs;
1476                 else
1477                         thislen = len;
1478
1479                 mutex_lock(&chip->mutex);
1480                 if (chip->state == FL_POINT) {
1481                         chip->ref_point_counter--;
1482                         if(chip->ref_point_counter == 0)
1483                                 chip->state = FL_READY;
1484                 } else {
1485                         printk(KERN_ERR "%s: Error: unpoint called on non pointed region\n", map->name);
1486                         err = -EINVAL;
1487                 }
1488
1489                 put_chip(map, chip, chip->start);
1490                 mutex_unlock(&chip->mutex);
1491
1492                 len -= thislen;
1493                 ofs = 0;
1494                 chipnum++;
1495         }
1496
1497         return err;
1498 }
1499
1500 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1501 {
1502         unsigned long cmd_addr;
1503         struct cfi_private *cfi = map->fldrv_priv;
1504         int ret;
1505
1506         adr += chip->start;
1507
1508         /* Ensure cmd read/writes are aligned. */
1509         cmd_addr = adr & ~(map_bankwidth(map)-1);
1510
1511         mutex_lock(&chip->mutex);
1512         ret = get_chip(map, chip, cmd_addr, FL_READY);
1513         if (ret) {
1514                 mutex_unlock(&chip->mutex);
1515                 return ret;
1516         }
1517
1518         if (chip->state != FL_POINT && chip->state != FL_READY) {
1519                 map_write(map, CMD(0xff), cmd_addr);
1520
1521                 chip->state = FL_READY;
1522         }
1523
1524         map_copy_from(map, buf, adr, len);
1525
1526         put_chip(map, chip, cmd_addr);
1527
1528         mutex_unlock(&chip->mutex);
1529         return 0;
1530 }
1531
1532 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1533 {
1534         struct map_info *map = mtd->priv;
1535         struct cfi_private *cfi = map->fldrv_priv;
1536         unsigned long ofs;
1537         int chipnum;
1538         int ret = 0;
1539
1540         /* ofs: offset within the first chip that the first read should start */
1541         chipnum = (from >> cfi->chipshift);
1542         ofs = from - (chipnum <<  cfi->chipshift);
1543
1544         while (len) {
1545                 unsigned long thislen;
1546
1547                 if (chipnum >= cfi->numchips)
1548                         break;
1549
1550                 if ((len + ofs -1) >> cfi->chipshift)
1551                         thislen = (1<<cfi->chipshift) - ofs;
1552                 else
1553                         thislen = len;
1554
1555                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1556                 if (ret)
1557                         break;
1558
1559                 *retlen += thislen;
1560                 len -= thislen;
1561                 buf += thislen;
1562
1563                 ofs = 0;
1564                 chipnum++;
1565         }
1566         return ret;
1567 }
1568
1569 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1570                                      unsigned long adr, map_word datum, int mode)
1571 {
1572         struct cfi_private *cfi = map->fldrv_priv;
1573         map_word status, write_cmd;
1574         int ret;
1575
1576         adr += chip->start;
1577
1578         switch (mode) {
1579         case FL_WRITING:
1580                 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0x40) : CMD(0x41);
1581                 break;
1582         case FL_OTP_WRITE:
1583                 write_cmd = CMD(0xc0);
1584                 break;
1585         default:
1586                 return -EINVAL;
1587         }
1588
1589         mutex_lock(&chip->mutex);
1590         ret = get_chip(map, chip, adr, mode);
1591         if (ret) {
1592                 mutex_unlock(&chip->mutex);
1593                 return ret;
1594         }
1595
1596         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1597         ENABLE_VPP(map);
1598         xip_disable(map, chip, adr);
1599         map_write(map, write_cmd, adr);
1600         map_write(map, datum, adr);
1601         chip->state = mode;
1602
1603         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1604                                    adr, map_bankwidth(map),
1605                                    chip->word_write_time,
1606                                    chip->word_write_time_max);
1607         if (ret) {
1608                 xip_enable(map, chip, adr);
1609                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1610                 goto out;
1611         }
1612
1613         /* check for errors */
1614         status = map_read(map, adr);
1615         if (map_word_bitsset(map, status, CMD(0x1a))) {
1616                 unsigned long chipstatus = MERGESTATUS(status);
1617
1618                 /* reset status */
1619                 map_write(map, CMD(0x50), adr);
1620                 map_write(map, CMD(0x70), adr);
1621                 xip_enable(map, chip, adr);
1622
1623                 if (chipstatus & 0x02) {
1624                         ret = -EROFS;
1625                 } else if (chipstatus & 0x08) {
1626                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1627                         ret = -EIO;
1628                 } else {
1629                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1630                         ret = -EINVAL;
1631                 }
1632
1633                 goto out;
1634         }
1635
1636         xip_enable(map, chip, adr);
1637  out:   DISABLE_VPP(map);
1638         put_chip(map, chip, adr);
1639         mutex_unlock(&chip->mutex);
1640         return ret;
1641 }
1642
1643
1644 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1645 {
1646         struct map_info *map = mtd->priv;
1647         struct cfi_private *cfi = map->fldrv_priv;
1648         int ret;
1649         int chipnum;
1650         unsigned long ofs;
1651
1652         chipnum = to >> cfi->chipshift;
1653         ofs = to  - (chipnum << cfi->chipshift);
1654
1655         /* If it's not bus-aligned, do the first byte write */
1656         if (ofs & (map_bankwidth(map)-1)) {
1657                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1658                 int gap = ofs - bus_ofs;
1659                 int n;
1660                 map_word datum;
1661
1662                 n = min_t(int, len, map_bankwidth(map)-gap);
1663                 datum = map_word_ff(map);
1664                 datum = map_word_load_partial(map, datum, buf, gap, n);
1665
1666                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1667                                                bus_ofs, datum, FL_WRITING);
1668                 if (ret)
1669                         return ret;
1670
1671                 len -= n;
1672                 ofs += n;
1673                 buf += n;
1674                 (*retlen) += n;
1675
1676                 if (ofs >> cfi->chipshift) {
1677                         chipnum ++;
1678                         ofs = 0;
1679                         if (chipnum == cfi->numchips)
1680                                 return 0;
1681                 }
1682         }
1683
1684         while(len >= map_bankwidth(map)) {
1685                 map_word datum = map_word_load(map, buf);
1686
1687                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1688                                        ofs, datum, FL_WRITING);
1689                 if (ret)
1690                         return ret;
1691
1692                 ofs += map_bankwidth(map);
1693                 buf += map_bankwidth(map);
1694                 (*retlen) += map_bankwidth(map);
1695                 len -= map_bankwidth(map);
1696
1697                 if (ofs >> cfi->chipshift) {
1698                         chipnum ++;
1699                         ofs = 0;
1700                         if (chipnum == cfi->numchips)
1701                                 return 0;
1702                 }
1703         }
1704
1705         if (len & (map_bankwidth(map)-1)) {
1706                 map_word datum;
1707
1708                 datum = map_word_ff(map);
1709                 datum = map_word_load_partial(map, datum, buf, 0, len);
1710
1711                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1712                                        ofs, datum, FL_WRITING);
1713                 if (ret)
1714                         return ret;
1715
1716                 (*retlen) += len;
1717         }
1718
1719         return 0;
1720 }
1721
1722
1723 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1724                                     unsigned long adr, const struct kvec **pvec,
1725                                     unsigned long *pvec_seek, int len)
1726 {
1727         struct cfi_private *cfi = map->fldrv_priv;
1728         map_word status, write_cmd, datum;
1729         unsigned long cmd_adr;
1730         int ret, wbufsize, word_gap, words;
1731         const struct kvec *vec;
1732         unsigned long vec_seek;
1733         unsigned long initial_adr;
1734         int initial_len = len;
1735
1736         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1737         adr += chip->start;
1738         initial_adr = adr;
1739         cmd_adr = adr & ~(wbufsize-1);
1740
1741         /* Sharp LH28F640BF chips need the first address for the
1742          * Page Buffer Program command. See Table 5 of
1743          * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
1744         if (is_LH28F640BF(cfi))
1745                 cmd_adr = adr;
1746
1747         /* Let's determine this according to the interleave only once */
1748         write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1749
1750         mutex_lock(&chip->mutex);
1751         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1752         if (ret) {
1753                 mutex_unlock(&chip->mutex);
1754                 return ret;
1755         }
1756
1757         XIP_INVAL_CACHED_RANGE(map, initial_adr, initial_len);
1758         ENABLE_VPP(map);
1759         xip_disable(map, chip, cmd_adr);
1760
1761         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1762            [...], the device will not accept any more Write to Buffer commands".
1763            So we must check here and reset those bits if they're set. Otherwise
1764            we're just pissing in the wind */
1765         if (chip->state != FL_STATUS) {
1766                 map_write(map, CMD(0x70), cmd_adr);
1767                 chip->state = FL_STATUS;
1768         }
1769         status = map_read(map, cmd_adr);
1770         if (map_word_bitsset(map, status, CMD(0x30))) {
1771                 xip_enable(map, chip, cmd_adr);
1772                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1773                 xip_disable(map, chip, cmd_adr);
1774                 map_write(map, CMD(0x50), cmd_adr);
1775                 map_write(map, CMD(0x70), cmd_adr);
1776         }
1777
1778         chip->state = FL_WRITING_TO_BUFFER;
1779         map_write(map, write_cmd, cmd_adr);
1780         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0, 0);
1781         if (ret) {
1782                 /* Argh. Not ready for write to buffer */
1783                 map_word Xstatus = map_read(map, cmd_adr);
1784                 map_write(map, CMD(0x70), cmd_adr);
1785                 chip->state = FL_STATUS;
1786                 status = map_read(map, cmd_adr);
1787                 map_write(map, CMD(0x50), cmd_adr);
1788                 map_write(map, CMD(0x70), cmd_adr);
1789                 xip_enable(map, chip, cmd_adr);
1790                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1791                                 map->name, Xstatus.x[0], status.x[0]);
1792                 goto out;
1793         }
1794
1795         /* Figure out the number of words to write */
1796         word_gap = (-adr & (map_bankwidth(map)-1));
1797         words = DIV_ROUND_UP(len - word_gap, map_bankwidth(map));
1798         if (!word_gap) {
1799                 words--;
1800         } else {
1801                 word_gap = map_bankwidth(map) - word_gap;
1802                 adr -= word_gap;
1803                 datum = map_word_ff(map);
1804         }
1805
1806         /* Write length of data to come */
1807         map_write(map, CMD(words), cmd_adr );
1808
1809         /* Write data */
1810         vec = *pvec;
1811         vec_seek = *pvec_seek;
1812         do {
1813                 int n = map_bankwidth(map) - word_gap;
1814                 if (n > vec->iov_len - vec_seek)
1815                         n = vec->iov_len - vec_seek;
1816                 if (n > len)
1817                         n = len;
1818
1819                 if (!word_gap && len < map_bankwidth(map))
1820                         datum = map_word_ff(map);
1821
1822                 datum = map_word_load_partial(map, datum,
1823                                               vec->iov_base + vec_seek,
1824                                               word_gap, n);
1825
1826                 len -= n;
1827                 word_gap += n;
1828                 if (!len || word_gap == map_bankwidth(map)) {
1829                         map_write(map, datum, adr);
1830                         adr += map_bankwidth(map);
1831                         word_gap = 0;
1832                 }
1833
1834                 vec_seek += n;
1835                 if (vec_seek == vec->iov_len) {
1836                         vec++;
1837                         vec_seek = 0;
1838                 }
1839         } while (len);
1840         *pvec = vec;
1841         *pvec_seek = vec_seek;
1842
1843         /* GO GO GO */
1844         map_write(map, CMD(0xd0), cmd_adr);
1845         chip->state = FL_WRITING;
1846
1847         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1848                                    initial_adr, initial_len,
1849                                    chip->buffer_write_time,
1850                                    chip->buffer_write_time_max);
1851         if (ret) {
1852                 map_write(map, CMD(0x70), cmd_adr);
1853                 chip->state = FL_STATUS;
1854                 xip_enable(map, chip, cmd_adr);
1855                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1856                 goto out;
1857         }
1858
1859         /* check for errors */
1860         status = map_read(map, cmd_adr);
1861         if (map_word_bitsset(map, status, CMD(0x1a))) {
1862                 unsigned long chipstatus = MERGESTATUS(status);
1863
1864                 /* reset status */
1865                 map_write(map, CMD(0x50), cmd_adr);
1866                 map_write(map, CMD(0x70), cmd_adr);
1867                 xip_enable(map, chip, cmd_adr);
1868
1869                 if (chipstatus & 0x02) {
1870                         ret = -EROFS;
1871                 } else if (chipstatus & 0x08) {
1872                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1873                         ret = -EIO;
1874                 } else {
1875                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1876                         ret = -EINVAL;
1877                 }
1878
1879                 goto out;
1880         }
1881
1882         xip_enable(map, chip, cmd_adr);
1883  out:   DISABLE_VPP(map);
1884         put_chip(map, chip, cmd_adr);
1885         mutex_unlock(&chip->mutex);
1886         return ret;
1887 }
1888
1889 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1890                                 unsigned long count, loff_t to, size_t *retlen)
1891 {
1892         struct map_info *map = mtd->priv;
1893         struct cfi_private *cfi = map->fldrv_priv;
1894         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1895         int ret;
1896         int chipnum;
1897         unsigned long ofs, vec_seek, i;
1898         size_t len = 0;
1899
1900         for (i = 0; i < count; i++)
1901                 len += vecs[i].iov_len;
1902
1903         if (!len)
1904                 return 0;
1905
1906         chipnum = to >> cfi->chipshift;
1907         ofs = to - (chipnum << cfi->chipshift);
1908         vec_seek = 0;
1909
1910         do {
1911                 /* We must not cross write block boundaries */
1912                 int size = wbufsize - (ofs & (wbufsize-1));
1913
1914                 if (size > len)
1915                         size = len;
1916                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1917                                       ofs, &vecs, &vec_seek, size);
1918                 if (ret)
1919                         return ret;
1920
1921                 ofs += size;
1922                 (*retlen) += size;
1923                 len -= size;
1924
1925                 if (ofs >> cfi->chipshift) {
1926                         chipnum ++;
1927                         ofs = 0;
1928                         if (chipnum == cfi->numchips)
1929                                 return 0;
1930                 }
1931
1932                 /* Be nice and reschedule with the chip in a usable state for other
1933                    processes. */
1934                 cond_resched();
1935
1936         } while (len);
1937
1938         return 0;
1939 }
1940
1941 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1942                                        size_t len, size_t *retlen, const u_char *buf)
1943 {
1944         struct kvec vec;
1945
1946         vec.iov_base = (void *) buf;
1947         vec.iov_len = len;
1948
1949         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1950 }
1951
1952 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1953                                       unsigned long adr, int len, void *thunk)
1954 {
1955         struct cfi_private *cfi = map->fldrv_priv;
1956         map_word status;
1957         int retries = 3;
1958         int ret;
1959
1960         adr += chip->start;
1961
1962  retry:
1963         mutex_lock(&chip->mutex);
1964         ret = get_chip(map, chip, adr, FL_ERASING);
1965         if (ret) {
1966                 mutex_unlock(&chip->mutex);
1967                 return ret;
1968         }
1969
1970         XIP_INVAL_CACHED_RANGE(map, adr, len);
1971         ENABLE_VPP(map);
1972         xip_disable(map, chip, adr);
1973
1974         /* Clear the status register first */
1975         map_write(map, CMD(0x50), adr);
1976
1977         /* Now erase */
1978         map_write(map, CMD(0x20), adr);
1979         map_write(map, CMD(0xD0), adr);
1980         chip->state = FL_ERASING;
1981         chip->erase_suspended = 0;
1982         chip->in_progress_block_addr = adr;
1983         chip->in_progress_block_mask = ~(len - 1);
1984
1985         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1986                                    adr, len,
1987                                    chip->erase_time,
1988                                    chip->erase_time_max);
1989         if (ret) {
1990                 map_write(map, CMD(0x70), adr);
1991                 chip->state = FL_STATUS;
1992                 xip_enable(map, chip, adr);
1993                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1994                 goto out;
1995         }
1996
1997         /* We've broken this before. It doesn't hurt to be safe */
1998         map_write(map, CMD(0x70), adr);
1999         chip->state = FL_STATUS;
2000         status = map_read(map, adr);
2001
2002         /* check for errors */
2003         if (map_word_bitsset(map, status, CMD(0x3a))) {
2004                 unsigned long chipstatus = MERGESTATUS(status);
2005
2006                 /* Reset the error bits */
2007                 map_write(map, CMD(0x50), adr);
2008                 map_write(map, CMD(0x70), adr);
2009                 xip_enable(map, chip, adr);
2010
2011                 if ((chipstatus & 0x30) == 0x30) {
2012                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
2013                         ret = -EINVAL;
2014                 } else if (chipstatus & 0x02) {
2015                         /* Protection bit set */
2016                         ret = -EROFS;
2017                 } else if (chipstatus & 0x8) {
2018                         /* Voltage */
2019                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
2020                         ret = -EIO;
2021                 } else if (chipstatus & 0x20 && retries--) {
2022                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
2023                         DISABLE_VPP(map);
2024                         put_chip(map, chip, adr);
2025                         mutex_unlock(&chip->mutex);
2026                         goto retry;
2027                 } else {
2028                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
2029                         ret = -EIO;
2030                 }
2031
2032                 goto out;
2033         }
2034
2035         xip_enable(map, chip, adr);
2036  out:   DISABLE_VPP(map);
2037         put_chip(map, chip, adr);
2038         mutex_unlock(&chip->mutex);
2039         return ret;
2040 }
2041
2042 static int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
2043 {
2044         return cfi_varsize_frob(mtd, do_erase_oneblock, instr->addr,
2045                                 instr->len, NULL);
2046 }
2047
2048 static void cfi_intelext_sync (struct mtd_info *mtd)
2049 {
2050         struct map_info *map = mtd->priv;
2051         struct cfi_private *cfi = map->fldrv_priv;
2052         int i;
2053         struct flchip *chip;
2054         int ret = 0;
2055
2056         for (i=0; !ret && i<cfi->numchips; i++) {
2057                 chip = &cfi->chips[i];
2058
2059                 mutex_lock(&chip->mutex);
2060                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2061
2062                 if (!ret) {
2063                         chip->oldstate = chip->state;
2064                         chip->state = FL_SYNCING;
2065                         /* No need to wake_up() on this state change -
2066                          * as the whole point is that nobody can do anything
2067                          * with the chip now anyway.
2068                          */
2069                 }
2070                 mutex_unlock(&chip->mutex);
2071         }
2072
2073         /* Unlock the chips again */
2074
2075         for (i--; i >=0; i--) {
2076                 chip = &cfi->chips[i];
2077
2078                 mutex_lock(&chip->mutex);
2079
2080                 if (chip->state == FL_SYNCING) {
2081                         chip->state = chip->oldstate;
2082                         chip->oldstate = FL_READY;
2083                         wake_up(&chip->wq);
2084                 }
2085                 mutex_unlock(&chip->mutex);
2086         }
2087 }
2088
2089 static int __xipram do_getlockstatus_oneblock(struct map_info *map,
2090                                                 struct flchip *chip,
2091                                                 unsigned long adr,
2092                                                 int len, void *thunk)
2093 {
2094         struct cfi_private *cfi = map->fldrv_priv;
2095         int status, ofs_factor = cfi->interleave * cfi->device_type;
2096
2097         adr += chip->start;
2098         xip_disable(map, chip, adr+(2*ofs_factor));
2099         map_write(map, CMD(0x90), adr+(2*ofs_factor));
2100         chip->state = FL_JEDEC_QUERY;
2101         status = cfi_read_query(map, adr+(2*ofs_factor));
2102         xip_enable(map, chip, 0);
2103         return status;
2104 }
2105
2106 #ifdef DEBUG_LOCK_BITS
2107 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
2108                                                 struct flchip *chip,
2109                                                 unsigned long adr,
2110                                                 int len, void *thunk)
2111 {
2112         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
2113                adr, do_getlockstatus_oneblock(map, chip, adr, len, thunk));
2114         return 0;
2115 }
2116 #endif
2117
2118 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
2119 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
2120
2121 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
2122                                        unsigned long adr, int len, void *thunk)
2123 {
2124         struct cfi_private *cfi = map->fldrv_priv;
2125         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2126         int mdelay;
2127         int ret;
2128
2129         adr += chip->start;
2130
2131         mutex_lock(&chip->mutex);
2132         ret = get_chip(map, chip, adr, FL_LOCKING);
2133         if (ret) {
2134                 mutex_unlock(&chip->mutex);
2135                 return ret;
2136         }
2137
2138         ENABLE_VPP(map);
2139         xip_disable(map, chip, adr);
2140
2141         map_write(map, CMD(0x60), adr);
2142         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
2143                 map_write(map, CMD(0x01), adr);
2144                 chip->state = FL_LOCKING;
2145         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
2146                 map_write(map, CMD(0xD0), adr);
2147                 chip->state = FL_UNLOCKING;
2148         } else
2149                 BUG();
2150
2151         /*
2152          * If Instant Individual Block Locking supported then no need
2153          * to delay.
2154          */
2155         /*
2156          * Unlocking may take up to 1.4 seconds on some Intel flashes. So
2157          * lets use a max of 1.5 seconds (1500ms) as timeout.
2158          *
2159          * See "Clear Block Lock-Bits Time" on page 40 in
2160          * "3 Volt Intel StrataFlash Memory" 28F128J3,28F640J3,28F320J3 manual
2161          * from February 2003
2162          */
2163         mdelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1500 : 0;
2164
2165         ret = WAIT_TIMEOUT(map, chip, adr, mdelay, mdelay * 1000);
2166         if (ret) {
2167                 map_write(map, CMD(0x70), adr);
2168                 chip->state = FL_STATUS;
2169                 xip_enable(map, chip, adr);
2170                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
2171                 goto out;
2172         }
2173
2174         xip_enable(map, chip, adr);
2175  out:   DISABLE_VPP(map);
2176         put_chip(map, chip, adr);
2177         mutex_unlock(&chip->mutex);
2178         return ret;
2179 }
2180
2181 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2182 {
2183         int ret;
2184
2185 #ifdef DEBUG_LOCK_BITS
2186         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2187                __func__, ofs, len);
2188         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2189                 ofs, len, NULL);
2190 #endif
2191
2192         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2193                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
2194
2195 #ifdef DEBUG_LOCK_BITS
2196         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2197                __func__, ret);
2198         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2199                 ofs, len, NULL);
2200 #endif
2201
2202         return ret;
2203 }
2204
2205 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
2206 {
2207         int ret;
2208
2209 #ifdef DEBUG_LOCK_BITS
2210         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
2211                __func__, ofs, len);
2212         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2213                 ofs, len, NULL);
2214 #endif
2215
2216         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
2217                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
2218
2219 #ifdef DEBUG_LOCK_BITS
2220         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
2221                __func__, ret);
2222         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
2223                 ofs, len, NULL);
2224 #endif
2225
2226         return ret;
2227 }
2228
2229 static int cfi_intelext_is_locked(struct mtd_info *mtd, loff_t ofs,
2230                                   uint64_t len)
2231 {
2232         return cfi_varsize_frob(mtd, do_getlockstatus_oneblock,
2233                                 ofs, len, NULL) ? 1 : 0;
2234 }
2235
2236 #ifdef CONFIG_MTD_OTP
2237
2238 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
2239                         u_long data_offset, u_char *buf, u_int size,
2240                         u_long prot_offset, u_int groupno, u_int groupsize);
2241
2242 static int __xipram
2243 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
2244             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2245 {
2246         struct cfi_private *cfi = map->fldrv_priv;
2247         int ret;
2248
2249         mutex_lock(&chip->mutex);
2250         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
2251         if (ret) {
2252                 mutex_unlock(&chip->mutex);
2253                 return ret;
2254         }
2255
2256         /* let's ensure we're not reading back cached data from array mode */
2257         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2258
2259         xip_disable(map, chip, chip->start);
2260         if (chip->state != FL_JEDEC_QUERY) {
2261                 map_write(map, CMD(0x90), chip->start);
2262                 chip->state = FL_JEDEC_QUERY;
2263         }
2264         map_copy_from(map, buf, chip->start + offset, size);
2265         xip_enable(map, chip, chip->start);
2266
2267         /* then ensure we don't keep OTP data in the cache */
2268         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
2269
2270         put_chip(map, chip, chip->start);
2271         mutex_unlock(&chip->mutex);
2272         return 0;
2273 }
2274
2275 static int
2276 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
2277              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2278 {
2279         int ret;
2280
2281         while (size) {
2282                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2283                 int gap = offset - bus_ofs;
2284                 int n = min_t(int, size, map_bankwidth(map)-gap);
2285                 map_word datum = map_word_ff(map);
2286
2287                 datum = map_word_load_partial(map, datum, buf, gap, n);
2288                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2289                 if (ret)
2290                         return ret;
2291
2292                 offset += n;
2293                 buf += n;
2294                 size -= n;
2295         }
2296
2297         return 0;
2298 }
2299
2300 static int
2301 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2302             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2303 {
2304         struct cfi_private *cfi = map->fldrv_priv;
2305         map_word datum;
2306
2307         /* make sure area matches group boundaries */
2308         if (size != grpsz)
2309                 return -EXDEV;
2310
2311         datum = map_word_ff(map);
2312         datum = map_word_clr(map, datum, CMD(1 << grpno));
2313         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2314 }
2315
2316 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2317                                  size_t *retlen, u_char *buf,
2318                                  otp_op_t action, int user_regs)
2319 {
2320         struct map_info *map = mtd->priv;
2321         struct cfi_private *cfi = map->fldrv_priv;
2322         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2323         struct flchip *chip;
2324         struct cfi_intelext_otpinfo *otp;
2325         u_long devsize, reg_prot_offset, data_offset;
2326         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2327         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2328         int ret;
2329
2330         *retlen = 0;
2331
2332         /* Check that we actually have some OTP registers */
2333         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2334                 return -ENODATA;
2335
2336         /* we need real chips here not virtual ones */
2337         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2338         chip_step = devsize >> cfi->chipshift;
2339         chip_num = 0;
2340
2341         /* Some chips have OTP located in the _top_ partition only.
2342            For example: Intel 28F256L18T (T means top-parameter device) */
2343         if (cfi->mfr == CFI_MFR_INTEL) {
2344                 switch (cfi->id) {
2345                 case 0x880b:
2346                 case 0x880c:
2347                 case 0x880d:
2348                         chip_num = chip_step - 1;
2349                 }
2350         }
2351
2352         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2353                 chip = &cfi->chips[chip_num];
2354                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2355
2356                 /* first OTP region */
2357                 field = 0;
2358                 reg_prot_offset = extp->ProtRegAddr;
2359                 reg_fact_groups = 1;
2360                 reg_fact_size = 1 << extp->FactProtRegSize;
2361                 reg_user_groups = 1;
2362                 reg_user_size = 1 << extp->UserProtRegSize;
2363
2364                 while (len > 0) {
2365                         /* flash geometry fixup */
2366                         data_offset = reg_prot_offset + 1;
2367                         data_offset *= cfi->interleave * cfi->device_type;
2368                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2369                         reg_fact_size *= cfi->interleave;
2370                         reg_user_size *= cfi->interleave;
2371
2372                         if (user_regs) {
2373                                 groups = reg_user_groups;
2374                                 groupsize = reg_user_size;
2375                                 /* skip over factory reg area */
2376                                 groupno = reg_fact_groups;
2377                                 data_offset += reg_fact_groups * reg_fact_size;
2378                         } else {
2379                                 groups = reg_fact_groups;
2380                                 groupsize = reg_fact_size;
2381                                 groupno = 0;
2382                         }
2383
2384                         while (len > 0 && groups > 0) {
2385                                 if (!action) {
2386                                         /*
2387                                          * Special case: if action is NULL
2388                                          * we fill buf with otp_info records.
2389                                          */
2390                                         struct otp_info *otpinfo;
2391                                         map_word lockword;
2392                                         len -= sizeof(struct otp_info);
2393                                         if (len <= 0)
2394                                                 return -ENOSPC;
2395                                         ret = do_otp_read(map, chip,
2396                                                           reg_prot_offset,
2397                                                           (u_char *)&lockword,
2398                                                           map_bankwidth(map),
2399                                                           0, 0,  0);
2400                                         if (ret)
2401                                                 return ret;
2402                                         otpinfo = (struct otp_info *)buf;
2403                                         otpinfo->start = from;
2404                                         otpinfo->length = groupsize;
2405                                         otpinfo->locked =
2406                                            !map_word_bitsset(map, lockword,
2407                                                              CMD(1 << groupno));
2408                                         from += groupsize;
2409                                         buf += sizeof(*otpinfo);
2410                                         *retlen += sizeof(*otpinfo);
2411                                 } else if (from >= groupsize) {
2412                                         from -= groupsize;
2413                                         data_offset += groupsize;
2414                                 } else {
2415                                         int size = groupsize;
2416                                         data_offset += from;
2417                                         size -= from;
2418                                         from = 0;
2419                                         if (size > len)
2420                                                 size = len;
2421                                         ret = action(map, chip, data_offset,
2422                                                      buf, size, reg_prot_offset,
2423                                                      groupno, groupsize);
2424                                         if (ret < 0)
2425                                                 return ret;
2426                                         buf += size;
2427                                         len -= size;
2428                                         *retlen += size;
2429                                         data_offset += size;
2430                                 }
2431                                 groupno++;
2432                                 groups--;
2433                         }
2434
2435                         /* next OTP region */
2436                         if (++field == extp->NumProtectionFields)
2437                                 break;
2438                         reg_prot_offset = otp->ProtRegAddr;
2439                         reg_fact_groups = otp->FactGroups;
2440                         reg_fact_size = 1 << otp->FactProtRegSize;
2441                         reg_user_groups = otp->UserGroups;
2442                         reg_user_size = 1 << otp->UserProtRegSize;
2443                         otp++;
2444                 }
2445         }
2446
2447         return 0;
2448 }
2449
2450 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2451                                            size_t len, size_t *retlen,
2452                                             u_char *buf)
2453 {
2454         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2455                                      buf, do_otp_read, 0);
2456 }
2457
2458 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2459                                            size_t len, size_t *retlen,
2460                                             u_char *buf)
2461 {
2462         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2463                                      buf, do_otp_read, 1);
2464 }
2465
2466 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2467                                             size_t len, size_t *retlen,
2468                                             const u_char *buf)
2469 {
2470         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2471                                      (u_char *)buf, do_otp_write, 1);
2472 }
2473
2474 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2475                                            loff_t from, size_t len)
2476 {
2477         size_t retlen;
2478         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2479                                      NULL, do_otp_lock, 1);
2480 }
2481
2482 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd, size_t len,
2483                                            size_t *retlen, struct otp_info *buf)
2484
2485 {
2486         return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2487                                      NULL, 0);
2488 }
2489
2490 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd, size_t len,
2491                                            size_t *retlen, struct otp_info *buf)
2492 {
2493         return cfi_intelext_otp_walk(mtd, 0, len, retlen, (u_char *)buf,
2494                                      NULL, 1);
2495 }
2496
2497 #endif
2498
2499 static void cfi_intelext_save_locks(struct mtd_info *mtd)
2500 {
2501         struct mtd_erase_region_info *region;
2502         int block, status, i;
2503         unsigned long adr;
2504         size_t len;
2505
2506         for (i = 0; i < mtd->numeraseregions; i++) {
2507                 region = &mtd->eraseregions[i];
2508                 if (!region->lockmap)
2509                         continue;
2510
2511                 for (block = 0; block < region->numblocks; block++){
2512                         len = region->erasesize;
2513                         adr = region->offset + block * len;
2514
2515                         status = cfi_varsize_frob(mtd,
2516                                         do_getlockstatus_oneblock, adr, len, NULL);
2517                         if (status)
2518                                 set_bit(block, region->lockmap);
2519                         else
2520                                 clear_bit(block, region->lockmap);
2521                 }
2522         }
2523 }
2524
2525 static int cfi_intelext_suspend(struct mtd_info *mtd)
2526 {
2527         struct map_info *map = mtd->priv;
2528         struct cfi_private *cfi = map->fldrv_priv;
2529         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2530         int i;
2531         struct flchip *chip;
2532         int ret = 0;
2533
2534         if ((mtd->flags & MTD_POWERUP_LOCK)
2535             && extp && (extp->FeatureSupport & (1 << 5)))
2536                 cfi_intelext_save_locks(mtd);
2537
2538         for (i=0; !ret && i<cfi->numchips; i++) {
2539                 chip = &cfi->chips[i];
2540
2541                 mutex_lock(&chip->mutex);
2542
2543                 switch (chip->state) {
2544                 case FL_READY:
2545                 case FL_STATUS:
2546                 case FL_CFI_QUERY:
2547                 case FL_JEDEC_QUERY:
2548                         if (chip->oldstate == FL_READY) {
2549                                 /* place the chip in a known state before suspend */
2550                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2551                                 chip->oldstate = chip->state;
2552                                 chip->state = FL_PM_SUSPENDED;
2553                                 /* No need to wake_up() on this state change -
2554                                  * as the whole point is that nobody can do anything
2555                                  * with the chip now anyway.
2556                                  */
2557                         } else {
2558                                 /* There seems to be an operation pending. We must wait for it. */
2559                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2560                                 ret = -EAGAIN;
2561                         }
2562                         break;
2563                 default:
2564                         /* Should we actually wait? Once upon a time these routines weren't
2565                            allowed to. Or should we return -EAGAIN, because the upper layers
2566                            ought to have already shut down anything which was using the device
2567                            anyway? The latter for now. */
2568                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->state);
2569                         ret = -EAGAIN;
2570                         break;
2571                 case FL_PM_SUSPENDED:
2572                         break;
2573                 }
2574                 mutex_unlock(&chip->mutex);
2575         }
2576
2577         /* Unlock the chips again */
2578
2579         if (ret) {
2580                 for (i--; i >=0; i--) {
2581                         chip = &cfi->chips[i];
2582
2583                         mutex_lock(&chip->mutex);
2584
2585                         if (chip->state == FL_PM_SUSPENDED) {
2586                                 /* No need to force it into a known state here,
2587                                    because we're returning failure, and it didn't
2588                                    get power cycled */
2589                                 chip->state = chip->oldstate;
2590                                 chip->oldstate = FL_READY;
2591                                 wake_up(&chip->wq);
2592                         }
2593                         mutex_unlock(&chip->mutex);
2594                 }
2595         }
2596
2597         return ret;
2598 }
2599
2600 static void cfi_intelext_restore_locks(struct mtd_info *mtd)
2601 {
2602         struct mtd_erase_region_info *region;
2603         int block, i;
2604         unsigned long adr;
2605         size_t len;
2606
2607         for (i = 0; i < mtd->numeraseregions; i++) {
2608                 region = &mtd->eraseregions[i];
2609                 if (!region->lockmap)
2610                         continue;
2611
2612                 for_each_clear_bit(block, region->lockmap, region->numblocks) {
2613                         len = region->erasesize;
2614                         adr = region->offset + block * len;
2615                         cfi_intelext_unlock(mtd, adr, len);
2616                 }
2617         }
2618 }
2619
2620 static void cfi_intelext_resume(struct mtd_info *mtd)
2621 {
2622         struct map_info *map = mtd->priv;
2623         struct cfi_private *cfi = map->fldrv_priv;
2624         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2625         int i;
2626         struct flchip *chip;
2627
2628         for (i=0; i<cfi->numchips; i++) {
2629
2630                 chip = &cfi->chips[i];
2631
2632                 mutex_lock(&chip->mutex);
2633
2634                 /* Go to known state. Chip may have been power cycled */
2635                 if (chip->state == FL_PM_SUSPENDED) {
2636                         /* Refresh LH28F640BF Partition Config. Register */
2637                         fixup_LH28F640BF(mtd);
2638                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2639                         chip->oldstate = chip->state = FL_READY;
2640                         wake_up(&chip->wq);
2641                 }
2642
2643                 mutex_unlock(&chip->mutex);
2644         }
2645
2646         if ((mtd->flags & MTD_POWERUP_LOCK)
2647             && extp && (extp->FeatureSupport & (1 << 5)))
2648                 cfi_intelext_restore_locks(mtd);
2649 }
2650
2651 static int cfi_intelext_reset(struct mtd_info *mtd)
2652 {
2653         struct map_info *map = mtd->priv;
2654         struct cfi_private *cfi = map->fldrv_priv;
2655         int i, ret;
2656
2657         for (i=0; i < cfi->numchips; i++) {
2658                 struct flchip *chip = &cfi->chips[i];
2659
2660                 /* force the completion of any ongoing operation
2661                    and switch to array mode so any bootloader in
2662                    flash is accessible for soft reboot. */
2663                 mutex_lock(&chip->mutex);
2664                 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2665                 if (!ret) {
2666                         map_write(map, CMD(0xff), chip->start);
2667                         chip->state = FL_SHUTDOWN;
2668                         put_chip(map, chip, chip->start);
2669                 }
2670                 mutex_unlock(&chip->mutex);
2671         }
2672
2673         return 0;
2674 }
2675
2676 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2677                                void *v)
2678 {
2679         struct mtd_info *mtd;
2680
2681         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2682         cfi_intelext_reset(mtd);
2683         return NOTIFY_DONE;
2684 }
2685
2686 static void cfi_intelext_destroy(struct mtd_info *mtd)
2687 {
2688         struct map_info *map = mtd->priv;
2689         struct cfi_private *cfi = map->fldrv_priv;
2690         struct mtd_erase_region_info *region;
2691         int i;
2692         cfi_intelext_reset(mtd);
2693         unregister_reboot_notifier(&mtd->reboot_notifier);
2694         kfree(cfi->cmdset_priv);
2695         kfree(cfi->cfiq);
2696         kfree(cfi->chips[0].priv);
2697         kfree(cfi);
2698         for (i = 0; i < mtd->numeraseregions; i++) {
2699                 region = &mtd->eraseregions[i];
2700                 kfree(region->lockmap);
2701         }
2702         kfree(mtd->eraseregions);
2703 }
2704
2705 MODULE_LICENSE("GPL");
2706 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2707 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2708 MODULE_ALIAS("cfi_cmdset_0003");
2709 MODULE_ALIAS("cfi_cmdset_0200");