]> Pileus Git - ~andy/linux/blob - drivers/mtd/chips/cfi_cmdset_0001.c
[MTD] Fix default timeouts for Intel NOR flash
[~andy/linux] / drivers / mtd / chips / cfi_cmdset_0001.c
1 /*
2  * Common Flash Interface support:
3  *   Intel Extended Vendor Command Set (ID 0x0001)
4  *
5  * (C) 2000 Red Hat. GPL'd
6  *
7  * $Id: cfi_cmdset_0001.c,v 1.186 2005/11/23 22:07:52 nico Exp $
8  *
9  *
10  * 10/10/2000   Nicolas Pitre <nico@cam.org>
11  *      - completely revamped method functions so they are aware and
12  *        independent of the flash geometry (buswidth, interleave, etc.)
13  *      - scalability vs code size is completely set at compile-time
14  *        (see include/linux/mtd/cfi.h for selection)
15  *      - optimized write buffer method
16  * 02/05/2002   Christopher Hoover <ch@hpl.hp.com>/<ch@murgatroid.com>
17  *      - reworked lock/unlock/erase support for var size flash
18  */
19
20 #include <linux/module.h>
21 #include <linux/types.h>
22 #include <linux/kernel.h>
23 #include <linux/sched.h>
24 #include <linux/init.h>
25 #include <asm/io.h>
26 #include <asm/byteorder.h>
27
28 #include <linux/errno.h>
29 #include <linux/slab.h>
30 #include <linux/delay.h>
31 #include <linux/interrupt.h>
32 #include <linux/reboot.h>
33 #include <linux/mtd/xip.h>
34 #include <linux/mtd/map.h>
35 #include <linux/mtd/mtd.h>
36 #include <linux/mtd/compatmac.h>
37 #include <linux/mtd/cfi.h>
38
39 /* #define CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE */
40 /* #define CMDSET0001_DISABLE_WRITE_SUSPEND */
41
42 // debugging, turns off buffer write mode if set to 1
43 #define FORCE_WORD_WRITE 0
44
45 #define MANUFACTURER_INTEL      0x0089
46 #define I82802AB        0x00ad
47 #define I82802AC        0x00ac
48 #define MANUFACTURER_ST         0x0020
49 #define M50LPW080       0x002F
50
51 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_intelext_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_intelext_writev(struct mtd_info *, const struct kvec *, unsigned long, loff_t, size_t *);
55 static int cfi_intelext_erase_varsize(struct mtd_info *, struct erase_info *);
56 static void cfi_intelext_sync (struct mtd_info *);
57 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len);
58 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len);
59 #ifdef CONFIG_MTD_OTP
60 static int cfi_intelext_read_fact_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
61 static int cfi_intelext_read_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static int cfi_intelext_write_user_prot_reg (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
63 static int cfi_intelext_lock_user_prot_reg (struct mtd_info *, loff_t, size_t);
64 static int cfi_intelext_get_fact_prot_info (struct mtd_info *,
65                                             struct otp_info *, size_t);
66 static int cfi_intelext_get_user_prot_info (struct mtd_info *,
67                                             struct otp_info *, size_t);
68 #endif
69 static int cfi_intelext_suspend (struct mtd_info *);
70 static void cfi_intelext_resume (struct mtd_info *);
71 static int cfi_intelext_reboot (struct notifier_block *, unsigned long, void *);
72
73 static void cfi_intelext_destroy(struct mtd_info *);
74
75 struct mtd_info *cfi_cmdset_0001(struct map_info *, int);
76
77 static struct mtd_info *cfi_intelext_setup (struct mtd_info *);
78 static int cfi_intelext_partition_fixup(struct mtd_info *, struct cfi_private **);
79
80 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len,
81                      size_t *retlen, u_char **mtdbuf);
82 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from,
83                         size_t len);
84
85 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
86 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
87 #include "fwh_lock.h"
88
89
90
91 /*
92  *  *********** SETUP AND PROBE BITS  ***********
93  */
94
95 static struct mtd_chip_driver cfi_intelext_chipdrv = {
96         .probe          = NULL, /* Not usable directly */
97         .destroy        = cfi_intelext_destroy,
98         .name           = "cfi_cmdset_0001",
99         .module         = THIS_MODULE
100 };
101
102 /* #define DEBUG_LOCK_BITS */
103 /* #define DEBUG_CFI_FEATURES */
104
105 #ifdef DEBUG_CFI_FEATURES
106 static void cfi_tell_features(struct cfi_pri_intelext *extp)
107 {
108         int i;
109         printk("  Extended Query version %c.%c\n", extp->MajorVersion, extp->MinorVersion);
110         printk("  Feature/Command Support:      %4.4X\n", extp->FeatureSupport);
111         printk("     - Chip Erase:              %s\n", extp->FeatureSupport&1?"supported":"unsupported");
112         printk("     - Suspend Erase:           %s\n", extp->FeatureSupport&2?"supported":"unsupported");
113         printk("     - Suspend Program:         %s\n", extp->FeatureSupport&4?"supported":"unsupported");
114         printk("     - Legacy Lock/Unlock:      %s\n", extp->FeatureSupport&8?"supported":"unsupported");
115         printk("     - Queued Erase:            %s\n", extp->FeatureSupport&16?"supported":"unsupported");
116         printk("     - Instant block lock:      %s\n", extp->FeatureSupport&32?"supported":"unsupported");
117         printk("     - Protection Bits:         %s\n", extp->FeatureSupport&64?"supported":"unsupported");
118         printk("     - Page-mode read:          %s\n", extp->FeatureSupport&128?"supported":"unsupported");
119         printk("     - Synchronous read:        %s\n", extp->FeatureSupport&256?"supported":"unsupported");
120         printk("     - Simultaneous operations: %s\n", extp->FeatureSupport&512?"supported":"unsupported");
121         printk("     - Extended Flash Array:    %s\n", extp->FeatureSupport&1024?"supported":"unsupported");
122         for (i=11; i<32; i++) {
123                 if (extp->FeatureSupport & (1<<i))
124                         printk("     - Unknown Bit %X:      supported\n", i);
125         }
126
127         printk("  Supported functions after Suspend: %2.2X\n", extp->SuspendCmdSupport);
128         printk("     - Program after Erase Suspend: %s\n", extp->SuspendCmdSupport&1?"supported":"unsupported");
129         for (i=1; i<8; i++) {
130                 if (extp->SuspendCmdSupport & (1<<i))
131                         printk("     - Unknown Bit %X:               supported\n", i);
132         }
133
134         printk("  Block Status Register Mask: %4.4X\n", extp->BlkStatusRegMask);
135         printk("     - Lock Bit Active:      %s\n", extp->BlkStatusRegMask&1?"yes":"no");
136         printk("     - Lock-Down Bit Active: %s\n", extp->BlkStatusRegMask&2?"yes":"no");
137         for (i=2; i<3; i++) {
138                 if (extp->BlkStatusRegMask & (1<<i))
139                         printk("     - Unknown Bit %X Active: yes\n",i);
140         }
141         printk("     - EFA Lock Bit:         %s\n", extp->BlkStatusRegMask&16?"yes":"no");
142         printk("     - EFA Lock-Down Bit:    %s\n", extp->BlkStatusRegMask&32?"yes":"no");
143         for (i=6; i<16; i++) {
144                 if (extp->BlkStatusRegMask & (1<<i))
145                         printk("     - Unknown Bit %X Active: yes\n",i);
146         }
147
148         printk("  Vcc Logic Supply Optimum Program/Erase Voltage: %d.%d V\n",
149                extp->VccOptimal >> 4, extp->VccOptimal & 0xf);
150         if (extp->VppOptimal)
151                 printk("  Vpp Programming Supply Optimum Program/Erase Voltage: %d.%d V\n",
152                        extp->VppOptimal >> 4, extp->VppOptimal & 0xf);
153 }
154 #endif
155
156 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
157 /* Some Intel Strata Flash prior to FPO revision C has bugs in this area */
158 static void fixup_intel_strataflash(struct mtd_info *mtd, void* param)
159 {
160         struct map_info *map = mtd->priv;
161         struct cfi_private *cfi = map->fldrv_priv;
162         struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
163
164         printk(KERN_WARNING "cfi_cmdset_0001: Suspend "
165                             "erase on write disabled.\n");
166         extp->SuspendCmdSupport &= ~1;
167 }
168 #endif
169
170 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
171 static void fixup_no_write_suspend(struct mtd_info *mtd, void* param)
172 {
173         struct map_info *map = mtd->priv;
174         struct cfi_private *cfi = map->fldrv_priv;
175         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
176
177         if (cfip && (cfip->FeatureSupport&4)) {
178                 cfip->FeatureSupport &= ~4;
179                 printk(KERN_WARNING "cfi_cmdset_0001: write suspend disabled\n");
180         }
181 }
182 #endif
183
184 static void fixup_st_m28w320ct(struct mtd_info *mtd, void* param)
185 {
186         struct map_info *map = mtd->priv;
187         struct cfi_private *cfi = map->fldrv_priv;
188
189         cfi->cfiq->BufWriteTimeoutTyp = 0;      /* Not supported */
190         cfi->cfiq->BufWriteTimeoutMax = 0;      /* Not supported */
191 }
192
193 static void fixup_st_m28w320cb(struct mtd_info *mtd, void* param)
194 {
195         struct map_info *map = mtd->priv;
196         struct cfi_private *cfi = map->fldrv_priv;
197
198         /* Note this is done after the region info is endian swapped */
199         cfi->cfiq->EraseRegionInfo[1] =
200                 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
201 };
202
203 static void fixup_use_point(struct mtd_info *mtd, void *param)
204 {
205         struct map_info *map = mtd->priv;
206         if (!mtd->point && map_is_linear(map)) {
207                 mtd->point   = cfi_intelext_point;
208                 mtd->unpoint = cfi_intelext_unpoint;
209         }
210 }
211
212 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
213 {
214         struct map_info *map = mtd->priv;
215         struct cfi_private *cfi = map->fldrv_priv;
216         if (cfi->cfiq->BufWriteTimeoutTyp) {
217                 printk(KERN_INFO "Using buffer write method\n" );
218                 mtd->write = cfi_intelext_write_buffers;
219                 mtd->writev = cfi_intelext_writev;
220         }
221 }
222
223 static struct cfi_fixup cfi_fixup_table[] = {
224 #ifdef CMDSET0001_DISABLE_ERASE_SUSPEND_ON_WRITE
225         { CFI_MFR_ANY, CFI_ID_ANY, fixup_intel_strataflash, NULL },
226 #endif
227 #ifdef CMDSET0001_DISABLE_WRITE_SUSPEND
228         { CFI_MFR_ANY, CFI_ID_ANY, fixup_no_write_suspend, NULL },
229 #endif
230 #if !FORCE_WORD_WRITE
231         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL },
232 #endif
233         { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct, NULL },
234         { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb, NULL },
235         { 0, 0, NULL, NULL }
236 };
237
238 static struct cfi_fixup jedec_fixup_table[] = {
239         { MANUFACTURER_INTEL, I82802AB,   fixup_use_fwh_lock, NULL, },
240         { MANUFACTURER_INTEL, I82802AC,   fixup_use_fwh_lock, NULL, },
241         { MANUFACTURER_ST,    M50LPW080,  fixup_use_fwh_lock, NULL, },
242         { 0, 0, NULL, NULL }
243 };
244 static struct cfi_fixup fixup_table[] = {
245         /* The CFI vendor ids and the JEDEC vendor IDs appear
246          * to be common.  It is like the devices id's are as
247          * well.  This table is to pick all cases where
248          * we know that is the case.
249          */
250         { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_point, NULL },
251         { 0, 0, NULL, NULL }
252 };
253
254 static inline struct cfi_pri_intelext *
255 read_pri_intelext(struct map_info *map, __u16 adr)
256 {
257         struct cfi_pri_intelext *extp;
258         unsigned int extp_size = sizeof(*extp);
259
260  again:
261         extp = (struct cfi_pri_intelext *)cfi_read_pri(map, adr, extp_size, "Intel/Sharp");
262         if (!extp)
263                 return NULL;
264
265         if (extp->MajorVersion != '1' ||
266             (extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
267                 printk(KERN_ERR "  Unknown Intel/Sharp Extended Query "
268                        "version %c.%c.\n",  extp->MajorVersion,
269                        extp->MinorVersion);
270                 kfree(extp);
271                 return NULL;
272         }
273
274         /* Do some byteswapping if necessary */
275         extp->FeatureSupport = le32_to_cpu(extp->FeatureSupport);
276         extp->BlkStatusRegMask = le16_to_cpu(extp->BlkStatusRegMask);
277         extp->ProtRegAddr = le16_to_cpu(extp->ProtRegAddr);
278
279         if (extp->MajorVersion == '1' && extp->MinorVersion >= '3') {
280                 unsigned int extra_size = 0;
281                 int nb_parts, i;
282
283                 /* Protection Register info */
284                 extra_size += (extp->NumProtectionFields - 1) *
285                               sizeof(struct cfi_intelext_otpinfo);
286
287                 /* Burst Read info */
288                 extra_size += 2;
289                 if (extp_size < sizeof(*extp) + extra_size)
290                         goto need_more;
291                 extra_size += extp->extra[extra_size-1];
292
293                 /* Number of hardware-partitions */
294                 extra_size += 1;
295                 if (extp_size < sizeof(*extp) + extra_size)
296                         goto need_more;
297                 nb_parts = extp->extra[extra_size - 1];
298
299                 /* skip the sizeof(partregion) field in CFI 1.4 */
300                 if (extp->MinorVersion >= '4')
301                         extra_size += 2;
302
303                 for (i = 0; i < nb_parts; i++) {
304                         struct cfi_intelext_regioninfo *rinfo;
305                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[extra_size];
306                         extra_size += sizeof(*rinfo);
307                         if (extp_size < sizeof(*extp) + extra_size)
308                                 goto need_more;
309                         rinfo->NumIdentPartitions=le16_to_cpu(rinfo->NumIdentPartitions);
310                         extra_size += (rinfo->NumBlockTypes - 1)
311                                       * sizeof(struct cfi_intelext_blockinfo);
312                 }
313
314                 if (extp->MinorVersion >= '4')
315                         extra_size += sizeof(struct cfi_intelext_programming_regioninfo);
316
317                 if (extp_size < sizeof(*extp) + extra_size) {
318                         need_more:
319                         extp_size = sizeof(*extp) + extra_size;
320                         kfree(extp);
321                         if (extp_size > 4096) {
322                                 printk(KERN_ERR
323                                         "%s: cfi_pri_intelext is too fat\n",
324                                         __FUNCTION__);
325                                 return NULL;
326                         }
327                         goto again;
328                 }
329         }
330
331         return extp;
332 }
333
334 struct mtd_info *cfi_cmdset_0001(struct map_info *map, int primary)
335 {
336         struct cfi_private *cfi = map->fldrv_priv;
337         struct mtd_info *mtd;
338         int i;
339
340         mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
341         if (!mtd) {
342                 printk(KERN_ERR "Failed to allocate memory for MTD device\n");
343                 return NULL;
344         }
345         mtd->priv = map;
346         mtd->type = MTD_NORFLASH;
347
348         /* Fill in the default mtd operations */
349         mtd->erase   = cfi_intelext_erase_varsize;
350         mtd->read    = cfi_intelext_read;
351         mtd->write   = cfi_intelext_write_words;
352         mtd->sync    = cfi_intelext_sync;
353         mtd->lock    = cfi_intelext_lock;
354         mtd->unlock  = cfi_intelext_unlock;
355         mtd->suspend = cfi_intelext_suspend;
356         mtd->resume  = cfi_intelext_resume;
357         mtd->flags   = MTD_CAP_NORFLASH;
358         mtd->name    = map->name;
359         mtd->writesize = 1;
360
361         mtd->reboot_notifier.notifier_call = cfi_intelext_reboot;
362
363         if (cfi->cfi_mode == CFI_MODE_CFI) {
364                 /*
365                  * It's a real CFI chip, not one for which the probe
366                  * routine faked a CFI structure. So we read the feature
367                  * table from it.
368                  */
369                 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
370                 struct cfi_pri_intelext *extp;
371
372                 extp = read_pri_intelext(map, adr);
373                 if (!extp) {
374                         kfree(mtd);
375                         return NULL;
376                 }
377
378                 /* Install our own private info structure */
379                 cfi->cmdset_priv = extp;
380
381                 cfi_fixup(mtd, cfi_fixup_table);
382
383 #ifdef DEBUG_CFI_FEATURES
384                 /* Tell the user about it in lots of lovely detail */
385                 cfi_tell_features(extp);
386 #endif
387
388                 if(extp->SuspendCmdSupport & 1) {
389                         printk(KERN_NOTICE "cfi_cmdset_0001: Erase suspend on write enabled\n");
390                 }
391         }
392         else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
393                 /* Apply jedec specific fixups */
394                 cfi_fixup(mtd, jedec_fixup_table);
395         }
396         /* Apply generic fixups */
397         cfi_fixup(mtd, fixup_table);
398
399         for (i=0; i< cfi->numchips; i++) {
400                 if (cfi->cfiq->WordWriteTimeoutTyp)
401                         cfi->chips[i].word_write_time =
402                                 1<<cfi->cfiq->WordWriteTimeoutTyp;
403                 else
404                         cfi->chips[i].word_write_time = 50000;
405
406                 if (cfi->cfiq->BufWriteTimeoutTyp)
407                         cfi->chips[i].buffer_write_time =
408                                 1<<cfi->cfiq->BufWriteTimeoutTyp;
409                 /* No default; if it isn't specified, we won't use it */
410
411                 if (cfi->cfiq->BlockEraseTimeoutTyp)
412                         cfi->chips[i].erase_time =
413                                 1000<<cfi->cfiq->BlockEraseTimeoutTyp;
414                 else
415                         cfi->chips[i].erase_time = 2000000;
416
417                 cfi->chips[i].ref_point_counter = 0;
418                 init_waitqueue_head(&(cfi->chips[i].wq));
419         }
420
421         map->fldrv = &cfi_intelext_chipdrv;
422
423         return cfi_intelext_setup(mtd);
424 }
425 struct mtd_info *cfi_cmdset_0003(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
426 struct mtd_info *cfi_cmdset_0200(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0001")));
427 EXPORT_SYMBOL_GPL(cfi_cmdset_0001);
428 EXPORT_SYMBOL_GPL(cfi_cmdset_0003);
429 EXPORT_SYMBOL_GPL(cfi_cmdset_0200);
430
431 static struct mtd_info *cfi_intelext_setup(struct mtd_info *mtd)
432 {
433         struct map_info *map = mtd->priv;
434         struct cfi_private *cfi = map->fldrv_priv;
435         unsigned long offset = 0;
436         int i,j;
437         unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
438
439         //printk(KERN_DEBUG "number of CFI chips: %d\n", cfi->numchips);
440
441         mtd->size = devsize * cfi->numchips;
442
443         mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
444         mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
445                         * mtd->numeraseregions, GFP_KERNEL);
446         if (!mtd->eraseregions) {
447                 printk(KERN_ERR "Failed to allocate memory for MTD erase region info\n");
448                 goto setup_err;
449         }
450
451         for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
452                 unsigned long ernum, ersize;
453                 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
454                 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
455
456                 if (mtd->erasesize < ersize) {
457                         mtd->erasesize = ersize;
458                 }
459                 for (j=0; j<cfi->numchips; j++) {
460                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
461                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
462                         mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
463                 }
464                 offset += (ersize * ernum);
465         }
466
467         if (offset != devsize) {
468                 /* Argh */
469                 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
470                 goto setup_err;
471         }
472
473         for (i=0; i<mtd->numeraseregions;i++){
474                 printk(KERN_DEBUG "erase region %d: offset=0x%x,size=0x%x,blocks=%d\n",
475                        i,mtd->eraseregions[i].offset,
476                        mtd->eraseregions[i].erasesize,
477                        mtd->eraseregions[i].numblocks);
478         }
479
480 #ifdef CONFIG_MTD_OTP
481         mtd->read_fact_prot_reg = cfi_intelext_read_fact_prot_reg;
482         mtd->read_user_prot_reg = cfi_intelext_read_user_prot_reg;
483         mtd->write_user_prot_reg = cfi_intelext_write_user_prot_reg;
484         mtd->lock_user_prot_reg = cfi_intelext_lock_user_prot_reg;
485         mtd->get_fact_prot_info = cfi_intelext_get_fact_prot_info;
486         mtd->get_user_prot_info = cfi_intelext_get_user_prot_info;
487 #endif
488
489         /* This function has the potential to distort the reality
490            a bit and therefore should be called last. */
491         if (cfi_intelext_partition_fixup(mtd, &cfi) != 0)
492                 goto setup_err;
493
494         __module_get(THIS_MODULE);
495         register_reboot_notifier(&mtd->reboot_notifier);
496         return mtd;
497
498  setup_err:
499         if(mtd) {
500                 kfree(mtd->eraseregions);
501                 kfree(mtd);
502         }
503         kfree(cfi->cmdset_priv);
504         return NULL;
505 }
506
507 static int cfi_intelext_partition_fixup(struct mtd_info *mtd,
508                                         struct cfi_private **pcfi)
509 {
510         struct map_info *map = mtd->priv;
511         struct cfi_private *cfi = *pcfi;
512         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
513
514         /*
515          * Probing of multi-partition flash ships.
516          *
517          * To support multiple partitions when available, we simply arrange
518          * for each of them to have their own flchip structure even if they
519          * are on the same physical chip.  This means completely recreating
520          * a new cfi_private structure right here which is a blatent code
521          * layering violation, but this is still the least intrusive
522          * arrangement at this point. This can be rearranged in the future
523          * if someone feels motivated enough.  --nico
524          */
525         if (extp && extp->MajorVersion == '1' && extp->MinorVersion >= '3'
526             && extp->FeatureSupport & (1 << 9)) {
527                 struct cfi_private *newcfi;
528                 struct flchip *chip;
529                 struct flchip_shared *shared;
530                 int offs, numregions, numparts, partshift, numvirtchips, i, j;
531
532                 /* Protection Register info */
533                 offs = (extp->NumProtectionFields - 1) *
534                        sizeof(struct cfi_intelext_otpinfo);
535
536                 /* Burst Read info */
537                 offs += extp->extra[offs+1]+2;
538
539                 /* Number of partition regions */
540                 numregions = extp->extra[offs];
541                 offs += 1;
542
543                 /* skip the sizeof(partregion) field in CFI 1.4 */
544                 if (extp->MinorVersion >= '4')
545                         offs += 2;
546
547                 /* Number of hardware partitions */
548                 numparts = 0;
549                 for (i = 0; i < numregions; i++) {
550                         struct cfi_intelext_regioninfo *rinfo;
551                         rinfo = (struct cfi_intelext_regioninfo *)&extp->extra[offs];
552                         numparts += rinfo->NumIdentPartitions;
553                         offs += sizeof(*rinfo)
554                                 + (rinfo->NumBlockTypes - 1) *
555                                   sizeof(struct cfi_intelext_blockinfo);
556                 }
557
558                 /* Programming Region info */
559                 if (extp->MinorVersion >= '4') {
560                         struct cfi_intelext_programming_regioninfo *prinfo;
561                         prinfo = (struct cfi_intelext_programming_regioninfo *)&extp->extra[offs];
562                         mtd->writesize = cfi->interleave << prinfo->ProgRegShift;
563                         MTD_PROGREGION_CTRLMODE_VALID(mtd) = cfi->interleave * prinfo->ControlValid;
564                         MTD_PROGREGION_CTRLMODE_INVALID(mtd) = cfi->interleave * prinfo->ControlInvalid;
565                         mtd->flags &= ~MTD_BIT_WRITEABLE;
566                         printk(KERN_DEBUG "%s: program region size/ctrl_valid/ctrl_inval = %d/%d/%d\n",
567                                map->name, mtd->writesize,
568                                MTD_PROGREGION_CTRLMODE_VALID(mtd),
569                                MTD_PROGREGION_CTRLMODE_INVALID(mtd));
570                 }
571
572                 /*
573                  * All functions below currently rely on all chips having
574                  * the same geometry so we'll just assume that all hardware
575                  * partitions are of the same size too.
576                  */
577                 partshift = cfi->chipshift - __ffs(numparts);
578
579                 if ((1 << partshift) < mtd->erasesize) {
580                         printk( KERN_ERR
581                                 "%s: bad number of hw partitions (%d)\n",
582                                 __FUNCTION__, numparts);
583                         return -EINVAL;
584                 }
585
586                 numvirtchips = cfi->numchips * numparts;
587                 newcfi = kmalloc(sizeof(struct cfi_private) + numvirtchips * sizeof(struct flchip), GFP_KERNEL);
588                 if (!newcfi)
589                         return -ENOMEM;
590                 shared = kmalloc(sizeof(struct flchip_shared) * cfi->numchips, GFP_KERNEL);
591                 if (!shared) {
592                         kfree(newcfi);
593                         return -ENOMEM;
594                 }
595                 memcpy(newcfi, cfi, sizeof(struct cfi_private));
596                 newcfi->numchips = numvirtchips;
597                 newcfi->chipshift = partshift;
598
599                 chip = &newcfi->chips[0];
600                 for (i = 0; i < cfi->numchips; i++) {
601                         shared[i].writing = shared[i].erasing = NULL;
602                         spin_lock_init(&shared[i].lock);
603                         for (j = 0; j < numparts; j++) {
604                                 *chip = cfi->chips[i];
605                                 chip->start += j << partshift;
606                                 chip->priv = &shared[i];
607                                 /* those should be reset too since
608                                    they create memory references. */
609                                 init_waitqueue_head(&chip->wq);
610                                 spin_lock_init(&chip->_spinlock);
611                                 chip->mutex = &chip->_spinlock;
612                                 chip++;
613                         }
614                 }
615
616                 printk(KERN_DEBUG "%s: %d set(s) of %d interleaved chips "
617                                   "--> %d partitions of %d KiB\n",
618                                   map->name, cfi->numchips, cfi->interleave,
619                                   newcfi->numchips, 1<<(newcfi->chipshift-10));
620
621                 map->fldrv_priv = newcfi;
622                 *pcfi = newcfi;
623                 kfree(cfi);
624         }
625
626         return 0;
627 }
628
629 /*
630  *  *********** CHIP ACCESS FUNCTIONS ***********
631  */
632
633 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
634 {
635         DECLARE_WAITQUEUE(wait, current);
636         struct cfi_private *cfi = map->fldrv_priv;
637         map_word status, status_OK = CMD(0x80), status_PWS = CMD(0x01);
638         unsigned long timeo;
639         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
640
641  resettime:
642         timeo = jiffies + HZ;
643  retry:
644         if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING || mode == FL_OTP_WRITE)) {
645                 /*
646                  * OK. We have possibility for contension on the write/erase
647                  * operations which are global to the real chip and not per
648                  * partition.  So let's fight it over in the partition which
649                  * currently has authority on the operation.
650                  *
651                  * The rules are as follows:
652                  *
653                  * - any write operation must own shared->writing.
654                  *
655                  * - any erase operation must own _both_ shared->writing and
656                  *   shared->erasing.
657                  *
658                  * - contension arbitration is handled in the owner's context.
659                  *
660                  * The 'shared' struct can be read and/or written only when
661                  * its lock is taken.
662                  */
663                 struct flchip_shared *shared = chip->priv;
664                 struct flchip *contender;
665                 spin_lock(&shared->lock);
666                 contender = shared->writing;
667                 if (contender && contender != chip) {
668                         /*
669                          * The engine to perform desired operation on this
670                          * partition is already in use by someone else.
671                          * Let's fight over it in the context of the chip
672                          * currently using it.  If it is possible to suspend,
673                          * that other partition will do just that, otherwise
674                          * it'll happily send us to sleep.  In any case, when
675                          * get_chip returns success we're clear to go ahead.
676                          */
677                         int ret = spin_trylock(contender->mutex);
678                         spin_unlock(&shared->lock);
679                         if (!ret)
680                                 goto retry;
681                         spin_unlock(chip->mutex);
682                         ret = get_chip(map, contender, contender->start, mode);
683                         spin_lock(chip->mutex);
684                         if (ret) {
685                                 spin_unlock(contender->mutex);
686                                 return ret;
687                         }
688                         timeo = jiffies + HZ;
689                         spin_lock(&shared->lock);
690                         spin_unlock(contender->mutex);
691                 }
692
693                 /* We now own it */
694                 shared->writing = chip;
695                 if (mode == FL_ERASING)
696                         shared->erasing = chip;
697                 spin_unlock(&shared->lock);
698         }
699
700         switch (chip->state) {
701
702         case FL_STATUS:
703                 for (;;) {
704                         status = map_read(map, adr);
705                         if (map_word_andequal(map, status, status_OK, status_OK))
706                                 break;
707
708                         /* At this point we're fine with write operations
709                            in other partitions as they don't conflict. */
710                         if (chip->priv && map_word_andequal(map, status, status_PWS, status_PWS))
711                                 break;
712
713                         if (time_after(jiffies, timeo)) {
714                                 printk(KERN_ERR "%s: Waiting for chip to be ready timed out. Status %lx\n",
715                                        map->name, status.x[0]);
716                                 return -EIO;
717                         }
718                         spin_unlock(chip->mutex);
719                         cfi_udelay(1);
720                         spin_lock(chip->mutex);
721                         /* Someone else might have been playing with it. */
722                         goto retry;
723                 }
724
725         case FL_READY:
726         case FL_CFI_QUERY:
727         case FL_JEDEC_QUERY:
728                 return 0;
729
730         case FL_ERASING:
731                 if (!cfip ||
732                     !(cfip->FeatureSupport & 2) ||
733                     !(mode == FL_READY || mode == FL_POINT ||
734                      (mode == FL_WRITING && (cfip->SuspendCmdSupport & 1))))
735                         goto sleep;
736
737
738                 /* Erase suspend */
739                 map_write(map, CMD(0xB0), adr);
740
741                 /* If the flash has finished erasing, then 'erase suspend'
742                  * appears to make some (28F320) flash devices switch to
743                  * 'read' mode.  Make sure that we switch to 'read status'
744                  * mode so we get the right data. --rmk
745                  */
746                 map_write(map, CMD(0x70), adr);
747                 chip->oldstate = FL_ERASING;
748                 chip->state = FL_ERASE_SUSPENDING;
749                 chip->erase_suspended = 1;
750                 for (;;) {
751                         status = map_read(map, adr);
752                         if (map_word_andequal(map, status, status_OK, status_OK))
753                                 break;
754
755                         if (time_after(jiffies, timeo)) {
756                                 /* Urgh. Resume and pretend we weren't here.  */
757                                 map_write(map, CMD(0xd0), adr);
758                                 /* Make sure we're in 'read status' mode if it had finished */
759                                 map_write(map, CMD(0x70), adr);
760                                 chip->state = FL_ERASING;
761                                 chip->oldstate = FL_READY;
762                                 printk(KERN_ERR "%s: Chip not ready after erase "
763                                        "suspended: status = 0x%lx\n", map->name, status.x[0]);
764                                 return -EIO;
765                         }
766
767                         spin_unlock(chip->mutex);
768                         cfi_udelay(1);
769                         spin_lock(chip->mutex);
770                         /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
771                            So we can just loop here. */
772                 }
773                 chip->state = FL_STATUS;
774                 return 0;
775
776         case FL_XIP_WHILE_ERASING:
777                 if (mode != FL_READY && mode != FL_POINT &&
778                     (mode != FL_WRITING || !cfip || !(cfip->SuspendCmdSupport&1)))
779                         goto sleep;
780                 chip->oldstate = chip->state;
781                 chip->state = FL_READY;
782                 return 0;
783
784         case FL_POINT:
785                 /* Only if there's no operation suspended... */
786                 if (mode == FL_READY && chip->oldstate == FL_READY)
787                         return 0;
788
789         default:
790         sleep:
791                 set_current_state(TASK_UNINTERRUPTIBLE);
792                 add_wait_queue(&chip->wq, &wait);
793                 spin_unlock(chip->mutex);
794                 schedule();
795                 remove_wait_queue(&chip->wq, &wait);
796                 spin_lock(chip->mutex);
797                 goto resettime;
798         }
799 }
800
801 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
802 {
803         struct cfi_private *cfi = map->fldrv_priv;
804
805         if (chip->priv) {
806                 struct flchip_shared *shared = chip->priv;
807                 spin_lock(&shared->lock);
808                 if (shared->writing == chip && chip->oldstate == FL_READY) {
809                         /* We own the ability to write, but we're done */
810                         shared->writing = shared->erasing;
811                         if (shared->writing && shared->writing != chip) {
812                                 /* give back ownership to who we loaned it from */
813                                 struct flchip *loaner = shared->writing;
814                                 spin_lock(loaner->mutex);
815                                 spin_unlock(&shared->lock);
816                                 spin_unlock(chip->mutex);
817                                 put_chip(map, loaner, loaner->start);
818                                 spin_lock(chip->mutex);
819                                 spin_unlock(loaner->mutex);
820                                 wake_up(&chip->wq);
821                                 return;
822                         }
823                         shared->erasing = NULL;
824                         shared->writing = NULL;
825                 } else if (shared->erasing == chip && shared->writing != chip) {
826                         /*
827                          * We own the ability to erase without the ability
828                          * to write, which means the erase was suspended
829                          * and some other partition is currently writing.
830                          * Don't let the switch below mess things up since
831                          * we don't have ownership to resume anything.
832                          */
833                         spin_unlock(&shared->lock);
834                         wake_up(&chip->wq);
835                         return;
836                 }
837                 spin_unlock(&shared->lock);
838         }
839
840         switch(chip->oldstate) {
841         case FL_ERASING:
842                 chip->state = chip->oldstate;
843                 /* What if one interleaved chip has finished and the
844                    other hasn't? The old code would leave the finished
845                    one in READY mode. That's bad, and caused -EROFS
846                    errors to be returned from do_erase_oneblock because
847                    that's the only bit it checked for at the time.
848                    As the state machine appears to explicitly allow
849                    sending the 0x70 (Read Status) command to an erasing
850                    chip and expecting it to be ignored, that's what we
851                    do. */
852                 map_write(map, CMD(0xd0), adr);
853                 map_write(map, CMD(0x70), adr);
854                 chip->oldstate = FL_READY;
855                 chip->state = FL_ERASING;
856                 break;
857
858         case FL_XIP_WHILE_ERASING:
859                 chip->state = chip->oldstate;
860                 chip->oldstate = FL_READY;
861                 break;
862
863         case FL_READY:
864         case FL_STATUS:
865         case FL_JEDEC_QUERY:
866                 /* We should really make set_vpp() count, rather than doing this */
867                 DISABLE_VPP(map);
868                 break;
869         default:
870                 printk(KERN_ERR "%s: put_chip() called with oldstate %d!!\n", map->name, chip->oldstate);
871         }
872         wake_up(&chip->wq);
873 }
874
875 #ifdef CONFIG_MTD_XIP
876
877 /*
878  * No interrupt what so ever can be serviced while the flash isn't in array
879  * mode.  This is ensured by the xip_disable() and xip_enable() functions
880  * enclosing any code path where the flash is known not to be in array mode.
881  * And within a XIP disabled code path, only functions marked with __xipram
882  * may be called and nothing else (it's a good thing to inspect generated
883  * assembly to make sure inline functions were actually inlined and that gcc
884  * didn't emit calls to its own support functions). Also configuring MTD CFI
885  * support to a single buswidth and a single interleave is also recommended.
886  */
887
888 static void xip_disable(struct map_info *map, struct flchip *chip,
889                         unsigned long adr)
890 {
891         /* TODO: chips with no XIP use should ignore and return */
892         (void) map_read(map, adr); /* ensure mmu mapping is up to date */
893         local_irq_disable();
894 }
895
896 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
897                                 unsigned long adr)
898 {
899         struct cfi_private *cfi = map->fldrv_priv;
900         if (chip->state != FL_POINT && chip->state != FL_READY) {
901                 map_write(map, CMD(0xff), adr);
902                 chip->state = FL_READY;
903         }
904         (void) map_read(map, adr);
905         xip_iprefetch();
906         local_irq_enable();
907 }
908
909 /*
910  * When a delay is required for the flash operation to complete, the
911  * xip_wait_for_operation() function is polling for both the given timeout
912  * and pending (but still masked) hardware interrupts.  Whenever there is an
913  * interrupt pending then the flash erase or write operation is suspended,
914  * array mode restored and interrupts unmasked.  Task scheduling might also
915  * happen at that point.  The CPU eventually returns from the interrupt or
916  * the call to schedule() and the suspended flash operation is resumed for
917  * the remaining of the delay period.
918  *
919  * Warning: this function _will_ fool interrupt latency tracing tools.
920  */
921
922 static int __xipram xip_wait_for_operation(
923                 struct map_info *map, struct flchip *chip,
924                 unsigned long adr, unsigned int chip_op_time )
925 {
926         struct cfi_private *cfi = map->fldrv_priv;
927         struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
928         map_word status, OK = CMD(0x80);
929         unsigned long usec, suspended, start, done;
930         flstate_t oldstate, newstate;
931
932         start = xip_currtime();
933         usec = chip_op_time * 8;
934         if (usec == 0)
935                 usec = 500000;
936         done = 0;
937
938         do {
939                 cpu_relax();
940                 if (xip_irqpending() && cfip &&
941                     ((chip->state == FL_ERASING && (cfip->FeatureSupport&2)) ||
942                      (chip->state == FL_WRITING && (cfip->FeatureSupport&4))) &&
943                     (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
944                         /*
945                          * Let's suspend the erase or write operation when
946                          * supported.  Note that we currently don't try to
947                          * suspend interleaved chips if there is already
948                          * another operation suspended (imagine what happens
949                          * when one chip was already done with the current
950                          * operation while another chip suspended it, then
951                          * we resume the whole thing at once).  Yes, it
952                          * can happen!
953                          */
954                         usec -= done;
955                         map_write(map, CMD(0xb0), adr);
956                         map_write(map, CMD(0x70), adr);
957                         suspended = xip_currtime();
958                         do {
959                                 if (xip_elapsed_since(suspended) > 100000) {
960                                         /*
961                                          * The chip doesn't want to suspend
962                                          * after waiting for 100 msecs.
963                                          * This is a critical error but there
964                                          * is not much we can do here.
965                                          */
966                                         return -EIO;
967                                 }
968                                 status = map_read(map, adr);
969                         } while (!map_word_andequal(map, status, OK, OK));
970
971                         /* Suspend succeeded */
972                         oldstate = chip->state;
973                         if (oldstate == FL_ERASING) {
974                                 if (!map_word_bitsset(map, status, CMD(0x40)))
975                                         break;
976                                 newstate = FL_XIP_WHILE_ERASING;
977                                 chip->erase_suspended = 1;
978                         } else {
979                                 if (!map_word_bitsset(map, status, CMD(0x04)))
980                                         break;
981                                 newstate = FL_XIP_WHILE_WRITING;
982                                 chip->write_suspended = 1;
983                         }
984                         chip->state = newstate;
985                         map_write(map, CMD(0xff), adr);
986                         (void) map_read(map, adr);
987                         asm volatile (".rep 8; nop; .endr");
988                         local_irq_enable();
989                         spin_unlock(chip->mutex);
990                         asm volatile (".rep 8; nop; .endr");
991                         cond_resched();
992
993                         /*
994                          * We're back.  However someone else might have
995                          * decided to go write to the chip if we are in
996                          * a suspended erase state.  If so let's wait
997                          * until it's done.
998                          */
999                         spin_lock(chip->mutex);
1000                         while (chip->state != newstate) {
1001                                 DECLARE_WAITQUEUE(wait, current);
1002                                 set_current_state(TASK_UNINTERRUPTIBLE);
1003                                 add_wait_queue(&chip->wq, &wait);
1004                                 spin_unlock(chip->mutex);
1005                                 schedule();
1006                                 remove_wait_queue(&chip->wq, &wait);
1007                                 spin_lock(chip->mutex);
1008                         }
1009                         /* Disallow XIP again */
1010                         local_irq_disable();
1011
1012                         /* Resume the write or erase operation */
1013                         map_write(map, CMD(0xd0), adr);
1014                         map_write(map, CMD(0x70), adr);
1015                         chip->state = oldstate;
1016                         start = xip_currtime();
1017                 } else if (usec >= 1000000/HZ) {
1018                         /*
1019                          * Try to save on CPU power when waiting delay
1020                          * is at least a system timer tick period.
1021                          * No need to be extremely accurate here.
1022                          */
1023                         xip_cpu_idle();
1024                 }
1025                 status = map_read(map, adr);
1026                 done = xip_elapsed_since(start);
1027         } while (!map_word_andequal(map, status, OK, OK)
1028                  && done < usec);
1029
1030         return (done >= usec) ? -ETIME : 0;
1031 }
1032
1033 /*
1034  * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
1035  * the flash is actively programming or erasing since we have to poll for
1036  * the operation to complete anyway.  We can't do that in a generic way with
1037  * a XIP setup so do it before the actual flash operation in this case
1038  * and stub it out from INVAL_CACHE_AND_WAIT.
1039  */
1040 #define XIP_INVAL_CACHED_RANGE(map, from, size)  \
1041         INVALIDATE_CACHED_RANGE(map, from, size)
1042
1043 #define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
1044         xip_wait_for_operation(map, chip, cmd_adr, usec)
1045
1046 #else
1047
1048 #define xip_disable(map, chip, adr)
1049 #define xip_enable(map, chip, adr)
1050 #define XIP_INVAL_CACHED_RANGE(x...)
1051 #define INVAL_CACHE_AND_WAIT inval_cache_and_wait_for_operation
1052
1053 static int inval_cache_and_wait_for_operation(
1054                 struct map_info *map, struct flchip *chip,
1055                 unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
1056                 unsigned int chip_op_time)
1057 {
1058         struct cfi_private *cfi = map->fldrv_priv;
1059         map_word status, status_OK = CMD(0x80);
1060         int chip_state = chip->state;
1061         unsigned int timeo, sleep_time;
1062
1063         spin_unlock(chip->mutex);
1064         if (inval_len)
1065                 INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
1066         spin_lock(chip->mutex);
1067
1068         /* set our timeout to 8 times the expected delay */
1069         timeo = chip_op_time * 8;
1070         if (!timeo)
1071                 timeo = 500000;
1072         sleep_time = chip_op_time / 2;
1073
1074         for (;;) {
1075                 status = map_read(map, cmd_adr);
1076                 if (map_word_andequal(map, status, status_OK, status_OK))
1077                         break;
1078
1079                 if (!timeo) {
1080                         map_write(map, CMD(0x70), cmd_adr);
1081                         chip->state = FL_STATUS;
1082                         return -ETIME;
1083                 }
1084
1085                 /* OK Still waiting. Drop the lock, wait a while and retry. */
1086                 spin_unlock(chip->mutex);
1087                 if (sleep_time >= 1000000/HZ) {
1088                         /*
1089                          * Half of the normal delay still remaining
1090                          * can be performed with a sleeping delay instead
1091                          * of busy waiting.
1092                          */
1093                         msleep(sleep_time/1000);
1094                         timeo -= sleep_time;
1095                         sleep_time = 1000000/HZ;
1096                 } else {
1097                         udelay(1);
1098                         cond_resched();
1099                         timeo--;
1100                 }
1101                 spin_lock(chip->mutex);
1102
1103                 while (chip->state != chip_state) {
1104                         /* Someone's suspended the operation: sleep */
1105                         DECLARE_WAITQUEUE(wait, current);
1106                         set_current_state(TASK_UNINTERRUPTIBLE);
1107                         add_wait_queue(&chip->wq, &wait);
1108                         spin_unlock(chip->mutex);
1109                         schedule();
1110                         remove_wait_queue(&chip->wq, &wait);
1111                         spin_lock(chip->mutex);
1112                 }
1113         }
1114
1115         /* Done and happy. */
1116         chip->state = FL_STATUS;
1117         return 0;
1118 }
1119
1120 #endif
1121
1122 #define WAIT_TIMEOUT(map, chip, adr, udelay) \
1123         INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
1124
1125
1126 static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
1127 {
1128         unsigned long cmd_addr;
1129         struct cfi_private *cfi = map->fldrv_priv;
1130         int ret = 0;
1131
1132         adr += chip->start;
1133
1134         /* Ensure cmd read/writes are aligned. */
1135         cmd_addr = adr & ~(map_bankwidth(map)-1);
1136
1137         spin_lock(chip->mutex);
1138
1139         ret = get_chip(map, chip, cmd_addr, FL_POINT);
1140
1141         if (!ret) {
1142                 if (chip->state != FL_POINT && chip->state != FL_READY)
1143                         map_write(map, CMD(0xff), cmd_addr);
1144
1145                 chip->state = FL_POINT;
1146                 chip->ref_point_counter++;
1147         }
1148         spin_unlock(chip->mutex);
1149
1150         return ret;
1151 }
1152
1153 static int cfi_intelext_point (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char **mtdbuf)
1154 {
1155         struct map_info *map = mtd->priv;
1156         struct cfi_private *cfi = map->fldrv_priv;
1157         unsigned long ofs;
1158         int chipnum;
1159         int ret = 0;
1160
1161         if (!map->virt || (from + len > mtd->size))
1162                 return -EINVAL;
1163
1164         *mtdbuf = (void *)map->virt + from;
1165         *retlen = 0;
1166
1167         /* Now lock the chip(s) to POINT state */
1168
1169         /* ofs: offset within the first chip that the first read should start */
1170         chipnum = (from >> cfi->chipshift);
1171         ofs = from - (chipnum << cfi->chipshift);
1172
1173         while (len) {
1174                 unsigned long thislen;
1175
1176                 if (chipnum >= cfi->numchips)
1177                         break;
1178
1179                 if ((len + ofs -1) >> cfi->chipshift)
1180                         thislen = (1<<cfi->chipshift) - ofs;
1181                 else
1182                         thislen = len;
1183
1184                 ret = do_point_onechip(map, &cfi->chips[chipnum], ofs, thislen);
1185                 if (ret)
1186                         break;
1187
1188                 *retlen += thislen;
1189                 len -= thislen;
1190
1191                 ofs = 0;
1192                 chipnum++;
1193         }
1194         return 0;
1195 }
1196
1197 static void cfi_intelext_unpoint (struct mtd_info *mtd, u_char *addr, loff_t from, size_t len)
1198 {
1199         struct map_info *map = mtd->priv;
1200         struct cfi_private *cfi = map->fldrv_priv;
1201         unsigned long ofs;
1202         int chipnum;
1203
1204         /* Now unlock the chip(s) POINT state */
1205
1206         /* ofs: offset within the first chip that the first read should start */
1207         chipnum = (from >> cfi->chipshift);
1208         ofs = from - (chipnum <<  cfi->chipshift);
1209
1210         while (len) {
1211                 unsigned long thislen;
1212                 struct flchip *chip;
1213
1214                 chip = &cfi->chips[chipnum];
1215                 if (chipnum >= cfi->numchips)
1216                         break;
1217
1218                 if ((len + ofs -1) >> cfi->chipshift)
1219                         thislen = (1<<cfi->chipshift) - ofs;
1220                 else
1221                         thislen = len;
1222
1223                 spin_lock(chip->mutex);
1224                 if (chip->state == FL_POINT) {
1225                         chip->ref_point_counter--;
1226                         if(chip->ref_point_counter == 0)
1227                                 chip->state = FL_READY;
1228                 } else
1229                         printk(KERN_ERR "%s: Warning: unpoint called on non pointed region\n", map->name); /* Should this give an error? */
1230
1231                 put_chip(map, chip, chip->start);
1232                 spin_unlock(chip->mutex);
1233
1234                 len -= thislen;
1235                 ofs = 0;
1236                 chipnum++;
1237         }
1238 }
1239
1240 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1241 {
1242         unsigned long cmd_addr;
1243         struct cfi_private *cfi = map->fldrv_priv;
1244         int ret;
1245
1246         adr += chip->start;
1247
1248         /* Ensure cmd read/writes are aligned. */
1249         cmd_addr = adr & ~(map_bankwidth(map)-1);
1250
1251         spin_lock(chip->mutex);
1252         ret = get_chip(map, chip, cmd_addr, FL_READY);
1253         if (ret) {
1254                 spin_unlock(chip->mutex);
1255                 return ret;
1256         }
1257
1258         if (chip->state != FL_POINT && chip->state != FL_READY) {
1259                 map_write(map, CMD(0xff), cmd_addr);
1260
1261                 chip->state = FL_READY;
1262         }
1263
1264         map_copy_from(map, buf, adr, len);
1265
1266         put_chip(map, chip, cmd_addr);
1267
1268         spin_unlock(chip->mutex);
1269         return 0;
1270 }
1271
1272 static int cfi_intelext_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1273 {
1274         struct map_info *map = mtd->priv;
1275         struct cfi_private *cfi = map->fldrv_priv;
1276         unsigned long ofs;
1277         int chipnum;
1278         int ret = 0;
1279
1280         /* ofs: offset within the first chip that the first read should start */
1281         chipnum = (from >> cfi->chipshift);
1282         ofs = from - (chipnum <<  cfi->chipshift);
1283
1284         *retlen = 0;
1285
1286         while (len) {
1287                 unsigned long thislen;
1288
1289                 if (chipnum >= cfi->numchips)
1290                         break;
1291
1292                 if ((len + ofs -1) >> cfi->chipshift)
1293                         thislen = (1<<cfi->chipshift) - ofs;
1294                 else
1295                         thislen = len;
1296
1297                 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1298                 if (ret)
1299                         break;
1300
1301                 *retlen += thislen;
1302                 len -= thislen;
1303                 buf += thislen;
1304
1305                 ofs = 0;
1306                 chipnum++;
1307         }
1308         return ret;
1309 }
1310
1311 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip,
1312                                      unsigned long adr, map_word datum, int mode)
1313 {
1314         struct cfi_private *cfi = map->fldrv_priv;
1315         map_word status, write_cmd;
1316         int ret=0;
1317
1318         adr += chip->start;
1319
1320         switch (mode) {
1321         case FL_WRITING:
1322                 write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0x40) : CMD(0x41);
1323                 break;
1324         case FL_OTP_WRITE:
1325                 write_cmd = CMD(0xc0);
1326                 break;
1327         default:
1328                 return -EINVAL;
1329         }
1330
1331         spin_lock(chip->mutex);
1332         ret = get_chip(map, chip, adr, mode);
1333         if (ret) {
1334                 spin_unlock(chip->mutex);
1335                 return ret;
1336         }
1337
1338         XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1339         ENABLE_VPP(map);
1340         xip_disable(map, chip, adr);
1341         map_write(map, write_cmd, adr);
1342         map_write(map, datum, adr);
1343         chip->state = mode;
1344
1345         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1346                                    adr, map_bankwidth(map),
1347                                    chip->word_write_time);
1348         if (ret) {
1349                 xip_enable(map, chip, adr);
1350                 printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
1351                 goto out;
1352         }
1353
1354         /* check for errors */
1355         status = map_read(map, adr);
1356         if (map_word_bitsset(map, status, CMD(0x1a))) {
1357                 unsigned long chipstatus = MERGESTATUS(status);
1358
1359                 /* reset status */
1360                 map_write(map, CMD(0x50), adr);
1361                 map_write(map, CMD(0x70), adr);
1362                 xip_enable(map, chip, adr);
1363
1364                 if (chipstatus & 0x02) {
1365                         ret = -EROFS;
1366                 } else if (chipstatus & 0x08) {
1367                         printk(KERN_ERR "%s: word write error (bad VPP)\n", map->name);
1368                         ret = -EIO;
1369                 } else {
1370                         printk(KERN_ERR "%s: word write error (status 0x%lx)\n", map->name, chipstatus);
1371                         ret = -EINVAL;
1372                 }
1373
1374                 goto out;
1375         }
1376
1377         xip_enable(map, chip, adr);
1378  out:   put_chip(map, chip, adr);
1379         spin_unlock(chip->mutex);
1380         return ret;
1381 }
1382
1383
1384 static int cfi_intelext_write_words (struct mtd_info *mtd, loff_t to , size_t len, size_t *retlen, const u_char *buf)
1385 {
1386         struct map_info *map = mtd->priv;
1387         struct cfi_private *cfi = map->fldrv_priv;
1388         int ret = 0;
1389         int chipnum;
1390         unsigned long ofs;
1391
1392         *retlen = 0;
1393         if (!len)
1394                 return 0;
1395
1396         chipnum = to >> cfi->chipshift;
1397         ofs = to  - (chipnum << cfi->chipshift);
1398
1399         /* If it's not bus-aligned, do the first byte write */
1400         if (ofs & (map_bankwidth(map)-1)) {
1401                 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1402                 int gap = ofs - bus_ofs;
1403                 int n;
1404                 map_word datum;
1405
1406                 n = min_t(int, len, map_bankwidth(map)-gap);
1407                 datum = map_word_ff(map);
1408                 datum = map_word_load_partial(map, datum, buf, gap, n);
1409
1410                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1411                                                bus_ofs, datum, FL_WRITING);
1412                 if (ret)
1413                         return ret;
1414
1415                 len -= n;
1416                 ofs += n;
1417                 buf += n;
1418                 (*retlen) += n;
1419
1420                 if (ofs >> cfi->chipshift) {
1421                         chipnum ++;
1422                         ofs = 0;
1423                         if (chipnum == cfi->numchips)
1424                                 return 0;
1425                 }
1426         }
1427
1428         while(len >= map_bankwidth(map)) {
1429                 map_word datum = map_word_load(map, buf);
1430
1431                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1432                                        ofs, datum, FL_WRITING);
1433                 if (ret)
1434                         return ret;
1435
1436                 ofs += map_bankwidth(map);
1437                 buf += map_bankwidth(map);
1438                 (*retlen) += map_bankwidth(map);
1439                 len -= map_bankwidth(map);
1440
1441                 if (ofs >> cfi->chipshift) {
1442                         chipnum ++;
1443                         ofs = 0;
1444                         if (chipnum == cfi->numchips)
1445                                 return 0;
1446                 }
1447         }
1448
1449         if (len & (map_bankwidth(map)-1)) {
1450                 map_word datum;
1451
1452                 datum = map_word_ff(map);
1453                 datum = map_word_load_partial(map, datum, buf, 0, len);
1454
1455                 ret = do_write_oneword(map, &cfi->chips[chipnum],
1456                                        ofs, datum, FL_WRITING);
1457                 if (ret)
1458                         return ret;
1459
1460                 (*retlen) += len;
1461         }
1462
1463         return 0;
1464 }
1465
1466
1467 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1468                                     unsigned long adr, const struct kvec **pvec,
1469                                     unsigned long *pvec_seek, int len)
1470 {
1471         struct cfi_private *cfi = map->fldrv_priv;
1472         map_word status, write_cmd, datum;
1473         unsigned long cmd_adr;
1474         int ret, wbufsize, word_gap, words;
1475         const struct kvec *vec;
1476         unsigned long vec_seek;
1477
1478         wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1479         adr += chip->start;
1480         cmd_adr = adr & ~(wbufsize-1);
1481
1482         /* Let's determine this according to the interleave only once */
1483         write_cmd = (cfi->cfiq->P_ID != 0x0200) ? CMD(0xe8) : CMD(0xe9);
1484
1485         spin_lock(chip->mutex);
1486         ret = get_chip(map, chip, cmd_adr, FL_WRITING);
1487         if (ret) {
1488                 spin_unlock(chip->mutex);
1489                 return ret;
1490         }
1491
1492         XIP_INVAL_CACHED_RANGE(map, adr, len);
1493         ENABLE_VPP(map);
1494         xip_disable(map, chip, cmd_adr);
1495
1496         /* Â§4.8 of the 28FxxxJ3A datasheet says "Any time SR.4 and/or SR.5 is set
1497            [...], the device will not accept any more Write to Buffer commands".
1498            So we must check here and reset those bits if they're set. Otherwise
1499            we're just pissing in the wind */
1500         if (chip->state != FL_STATUS) {
1501                 map_write(map, CMD(0x70), cmd_adr);
1502                 chip->state = FL_STATUS;
1503         }
1504         status = map_read(map, cmd_adr);
1505         if (map_word_bitsset(map, status, CMD(0x30))) {
1506                 xip_enable(map, chip, cmd_adr);
1507                 printk(KERN_WARNING "SR.4 or SR.5 bits set in buffer write (status %lx). Clearing.\n", status.x[0]);
1508                 xip_disable(map, chip, cmd_adr);
1509                 map_write(map, CMD(0x50), cmd_adr);
1510                 map_write(map, CMD(0x70), cmd_adr);
1511         }
1512
1513         chip->state = FL_WRITING_TO_BUFFER;
1514         map_write(map, write_cmd, cmd_adr);
1515         ret = WAIT_TIMEOUT(map, chip, cmd_adr, 0);
1516         if (ret) {
1517                 /* Argh. Not ready for write to buffer */
1518                 map_word Xstatus = map_read(map, cmd_adr);
1519                 map_write(map, CMD(0x70), cmd_adr);
1520                 chip->state = FL_STATUS;
1521                 status = map_read(map, cmd_adr);
1522                 map_write(map, CMD(0x50), cmd_adr);
1523                 map_write(map, CMD(0x70), cmd_adr);
1524                 xip_enable(map, chip, cmd_adr);
1525                 printk(KERN_ERR "%s: Chip not ready for buffer write. Xstatus = %lx, status = %lx\n",
1526                                 map->name, Xstatus.x[0], status.x[0]);
1527                 goto out;
1528         }
1529
1530         /* Figure out the number of words to write */
1531         word_gap = (-adr & (map_bankwidth(map)-1));
1532         words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
1533         if (!word_gap) {
1534                 words--;
1535         } else {
1536                 word_gap = map_bankwidth(map) - word_gap;
1537                 adr -= word_gap;
1538                 datum = map_word_ff(map);
1539         }
1540
1541         /* Write length of data to come */
1542         map_write(map, CMD(words), cmd_adr );
1543
1544         /* Write data */
1545         vec = *pvec;
1546         vec_seek = *pvec_seek;
1547         do {
1548                 int n = map_bankwidth(map) - word_gap;
1549                 if (n > vec->iov_len - vec_seek)
1550                         n = vec->iov_len - vec_seek;
1551                 if (n > len)
1552                         n = len;
1553
1554                 if (!word_gap && len < map_bankwidth(map))
1555                         datum = map_word_ff(map);
1556
1557                 datum = map_word_load_partial(map, datum,
1558                                               vec->iov_base + vec_seek,
1559                                               word_gap, n);
1560
1561                 len -= n;
1562                 word_gap += n;
1563                 if (!len || word_gap == map_bankwidth(map)) {
1564                         map_write(map, datum, adr);
1565                         adr += map_bankwidth(map);
1566                         word_gap = 0;
1567                 }
1568
1569                 vec_seek += n;
1570                 if (vec_seek == vec->iov_len) {
1571                         vec++;
1572                         vec_seek = 0;
1573                 }
1574         } while (len);
1575         *pvec = vec;
1576         *pvec_seek = vec_seek;
1577
1578         /* GO GO GO */
1579         map_write(map, CMD(0xd0), cmd_adr);
1580         chip->state = FL_WRITING;
1581
1582         ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
1583                                    adr, len,
1584                                    chip->buffer_write_time);
1585         if (ret) {
1586                 map_write(map, CMD(0x70), cmd_adr);
1587                 chip->state = FL_STATUS;
1588                 xip_enable(map, chip, cmd_adr);
1589                 printk(KERN_ERR "%s: buffer write error (status timeout)\n", map->name);
1590                 goto out;
1591         }
1592
1593         /* check for errors */
1594         status = map_read(map, cmd_adr);
1595         if (map_word_bitsset(map, status, CMD(0x1a))) {
1596                 unsigned long chipstatus = MERGESTATUS(status);
1597
1598                 /* reset status */
1599                 map_write(map, CMD(0x50), cmd_adr);
1600                 map_write(map, CMD(0x70), cmd_adr);
1601                 xip_enable(map, chip, cmd_adr);
1602
1603                 if (chipstatus & 0x02) {
1604                         ret = -EROFS;
1605                 } else if (chipstatus & 0x08) {
1606                         printk(KERN_ERR "%s: buffer write error (bad VPP)\n", map->name);
1607                         ret = -EIO;
1608                 } else {
1609                         printk(KERN_ERR "%s: buffer write error (status 0x%lx)\n", map->name, chipstatus);
1610                         ret = -EINVAL;
1611                 }
1612
1613                 goto out;
1614         }
1615
1616         xip_enable(map, chip, cmd_adr);
1617  out:   put_chip(map, chip, cmd_adr);
1618         spin_unlock(chip->mutex);
1619         return ret;
1620 }
1621
1622 static int cfi_intelext_writev (struct mtd_info *mtd, const struct kvec *vecs,
1623                                 unsigned long count, loff_t to, size_t *retlen)
1624 {
1625         struct map_info *map = mtd->priv;
1626         struct cfi_private *cfi = map->fldrv_priv;
1627         int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1628         int ret = 0;
1629         int chipnum;
1630         unsigned long ofs, vec_seek, i;
1631         size_t len = 0;
1632
1633         for (i = 0; i < count; i++)
1634                 len += vecs[i].iov_len;
1635
1636         *retlen = 0;
1637         if (!len)
1638                 return 0;
1639
1640         chipnum = to >> cfi->chipshift;
1641         ofs = to - (chipnum << cfi->chipshift);
1642         vec_seek = 0;
1643
1644         do {
1645                 /* We must not cross write block boundaries */
1646                 int size = wbufsize - (ofs & (wbufsize-1));
1647
1648                 if (size > len)
1649                         size = len;
1650                 ret = do_write_buffer(map, &cfi->chips[chipnum],
1651                                       ofs, &vecs, &vec_seek, size);
1652                 if (ret)
1653                         return ret;
1654
1655                 ofs += size;
1656                 (*retlen) += size;
1657                 len -= size;
1658
1659                 if (ofs >> cfi->chipshift) {
1660                         chipnum ++;
1661                         ofs = 0;
1662                         if (chipnum == cfi->numchips)
1663                                 return 0;
1664                 }
1665
1666                 /* Be nice and reschedule with the chip in a usable state for other
1667                    processes. */
1668                 cond_resched();
1669
1670         } while (len);
1671
1672         return 0;
1673 }
1674
1675 static int cfi_intelext_write_buffers (struct mtd_info *mtd, loff_t to,
1676                                        size_t len, size_t *retlen, const u_char *buf)
1677 {
1678         struct kvec vec;
1679
1680         vec.iov_base = (void *) buf;
1681         vec.iov_len = len;
1682
1683         return cfi_intelext_writev(mtd, &vec, 1, to, retlen);
1684 }
1685
1686 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip,
1687                                       unsigned long adr, int len, void *thunk)
1688 {
1689         struct cfi_private *cfi = map->fldrv_priv;
1690         map_word status;
1691         int retries = 3;
1692         int ret;
1693
1694         adr += chip->start;
1695
1696  retry:
1697         spin_lock(chip->mutex);
1698         ret = get_chip(map, chip, adr, FL_ERASING);
1699         if (ret) {
1700                 spin_unlock(chip->mutex);
1701                 return ret;
1702         }
1703
1704         XIP_INVAL_CACHED_RANGE(map, adr, len);
1705         ENABLE_VPP(map);
1706         xip_disable(map, chip, adr);
1707
1708         /* Clear the status register first */
1709         map_write(map, CMD(0x50), adr);
1710
1711         /* Now erase */
1712         map_write(map, CMD(0x20), adr);
1713         map_write(map, CMD(0xD0), adr);
1714         chip->state = FL_ERASING;
1715         chip->erase_suspended = 0;
1716
1717         ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
1718                                    adr, len,
1719                                    chip->erase_time);
1720         if (ret) {
1721                 map_write(map, CMD(0x70), adr);
1722                 chip->state = FL_STATUS;
1723                 xip_enable(map, chip, adr);
1724                 printk(KERN_ERR "%s: block erase error: (status timeout)\n", map->name);
1725                 goto out;
1726         }
1727
1728         /* We've broken this before. It doesn't hurt to be safe */
1729         map_write(map, CMD(0x70), adr);
1730         chip->state = FL_STATUS;
1731         status = map_read(map, adr);
1732
1733         /* check for errors */
1734         if (map_word_bitsset(map, status, CMD(0x3a))) {
1735                 unsigned long chipstatus = MERGESTATUS(status);
1736
1737                 /* Reset the error bits */
1738                 map_write(map, CMD(0x50), adr);
1739                 map_write(map, CMD(0x70), adr);
1740                 xip_enable(map, chip, adr);
1741
1742                 if ((chipstatus & 0x30) == 0x30) {
1743                         printk(KERN_ERR "%s: block erase error: (bad command sequence, status 0x%lx)\n", map->name, chipstatus);
1744                         ret = -EINVAL;
1745                 } else if (chipstatus & 0x02) {
1746                         /* Protection bit set */
1747                         ret = -EROFS;
1748                 } else if (chipstatus & 0x8) {
1749                         /* Voltage */
1750                         printk(KERN_ERR "%s: block erase error: (bad VPP)\n", map->name);
1751                         ret = -EIO;
1752                 } else if (chipstatus & 0x20 && retries--) {
1753                         printk(KERN_DEBUG "block erase failed at 0x%08lx: status 0x%lx. Retrying...\n", adr, chipstatus);
1754                         put_chip(map, chip, adr);
1755                         spin_unlock(chip->mutex);
1756                         goto retry;
1757                 } else {
1758                         printk(KERN_ERR "%s: block erase failed at 0x%08lx (status 0x%lx)\n", map->name, adr, chipstatus);
1759                         ret = -EIO;
1760                 }
1761
1762                 goto out;
1763         }
1764
1765         xip_enable(map, chip, adr);
1766  out:   put_chip(map, chip, adr);
1767         spin_unlock(chip->mutex);
1768         return ret;
1769 }
1770
1771 int cfi_intelext_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1772 {
1773         unsigned long ofs, len;
1774         int ret;
1775
1776         ofs = instr->addr;
1777         len = instr->len;
1778
1779         ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1780         if (ret)
1781                 return ret;
1782
1783         instr->state = MTD_ERASE_DONE;
1784         mtd_erase_callback(instr);
1785
1786         return 0;
1787 }
1788
1789 static void cfi_intelext_sync (struct mtd_info *mtd)
1790 {
1791         struct map_info *map = mtd->priv;
1792         struct cfi_private *cfi = map->fldrv_priv;
1793         int i;
1794         struct flchip *chip;
1795         int ret = 0;
1796
1797         for (i=0; !ret && i<cfi->numchips; i++) {
1798                 chip = &cfi->chips[i];
1799
1800                 spin_lock(chip->mutex);
1801                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
1802
1803                 if (!ret) {
1804                         chip->oldstate = chip->state;
1805                         chip->state = FL_SYNCING;
1806                         /* No need to wake_up() on this state change -
1807                          * as the whole point is that nobody can do anything
1808                          * with the chip now anyway.
1809                          */
1810                 }
1811                 spin_unlock(chip->mutex);
1812         }
1813
1814         /* Unlock the chips again */
1815
1816         for (i--; i >=0; i--) {
1817                 chip = &cfi->chips[i];
1818
1819                 spin_lock(chip->mutex);
1820
1821                 if (chip->state == FL_SYNCING) {
1822                         chip->state = chip->oldstate;
1823                         chip->oldstate = FL_READY;
1824                         wake_up(&chip->wq);
1825                 }
1826                 spin_unlock(chip->mutex);
1827         }
1828 }
1829
1830 #ifdef DEBUG_LOCK_BITS
1831 static int __xipram do_printlockstatus_oneblock(struct map_info *map,
1832                                                 struct flchip *chip,
1833                                                 unsigned long adr,
1834                                                 int len, void *thunk)
1835 {
1836         struct cfi_private *cfi = map->fldrv_priv;
1837         int status, ofs_factor = cfi->interleave * cfi->device_type;
1838
1839         adr += chip->start;
1840         xip_disable(map, chip, adr+(2*ofs_factor));
1841         map_write(map, CMD(0x90), adr+(2*ofs_factor));
1842         chip->state = FL_JEDEC_QUERY;
1843         status = cfi_read_query(map, adr+(2*ofs_factor));
1844         xip_enable(map, chip, 0);
1845         printk(KERN_DEBUG "block status register for 0x%08lx is %x\n",
1846                adr, status);
1847         return 0;
1848 }
1849 #endif
1850
1851 #define DO_XXLOCK_ONEBLOCK_LOCK         ((void *) 1)
1852 #define DO_XXLOCK_ONEBLOCK_UNLOCK       ((void *) 2)
1853
1854 static int __xipram do_xxlock_oneblock(struct map_info *map, struct flchip *chip,
1855                                        unsigned long adr, int len, void *thunk)
1856 {
1857         struct cfi_private *cfi = map->fldrv_priv;
1858         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
1859         int udelay;
1860         int ret;
1861
1862         adr += chip->start;
1863
1864         spin_lock(chip->mutex);
1865         ret = get_chip(map, chip, adr, FL_LOCKING);
1866         if (ret) {
1867                 spin_unlock(chip->mutex);
1868                 return ret;
1869         }
1870
1871         ENABLE_VPP(map);
1872         xip_disable(map, chip, adr);
1873
1874         map_write(map, CMD(0x60), adr);
1875         if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
1876                 map_write(map, CMD(0x01), adr);
1877                 chip->state = FL_LOCKING;
1878         } else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
1879                 map_write(map, CMD(0xD0), adr);
1880                 chip->state = FL_UNLOCKING;
1881         } else
1882                 BUG();
1883
1884         /*
1885          * If Instant Individual Block Locking supported then no need
1886          * to delay.
1887          */
1888         udelay = (!extp || !(extp->FeatureSupport & (1 << 5))) ? 1000000/HZ : 0;
1889
1890         ret = WAIT_TIMEOUT(map, chip, adr, udelay);
1891         if (ret) {
1892                 map_write(map, CMD(0x70), adr);
1893                 chip->state = FL_STATUS;
1894                 xip_enable(map, chip, adr);
1895                 printk(KERN_ERR "%s: block unlock error: (status timeout)\n", map->name);
1896                 goto out;
1897         }
1898
1899         xip_enable(map, chip, adr);
1900 out:    put_chip(map, chip, adr);
1901         spin_unlock(chip->mutex);
1902         return ret;
1903 }
1904
1905 static int cfi_intelext_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
1906 {
1907         int ret;
1908
1909 #ifdef DEBUG_LOCK_BITS
1910         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1911                __FUNCTION__, ofs, len);
1912         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1913                 ofs, len, 0);
1914 #endif
1915
1916         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1917                 ofs, len, DO_XXLOCK_ONEBLOCK_LOCK);
1918
1919 #ifdef DEBUG_LOCK_BITS
1920         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1921                __FUNCTION__, ret);
1922         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1923                 ofs, len, 0);
1924 #endif
1925
1926         return ret;
1927 }
1928
1929 static int cfi_intelext_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
1930 {
1931         int ret;
1932
1933 #ifdef DEBUG_LOCK_BITS
1934         printk(KERN_DEBUG "%s: lock status before, ofs=0x%08llx, len=0x%08X\n",
1935                __FUNCTION__, ofs, len);
1936         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1937                 ofs, len, 0);
1938 #endif
1939
1940         ret = cfi_varsize_frob(mtd, do_xxlock_oneblock,
1941                                         ofs, len, DO_XXLOCK_ONEBLOCK_UNLOCK);
1942
1943 #ifdef DEBUG_LOCK_BITS
1944         printk(KERN_DEBUG "%s: lock status after, ret=%d\n",
1945                __FUNCTION__, ret);
1946         cfi_varsize_frob(mtd, do_printlockstatus_oneblock,
1947                 ofs, len, 0);
1948 #endif
1949
1950         return ret;
1951 }
1952
1953 #ifdef CONFIG_MTD_OTP
1954
1955 typedef int (*otp_op_t)(struct map_info *map, struct flchip *chip,
1956                         u_long data_offset, u_char *buf, u_int size,
1957                         u_long prot_offset, u_int groupno, u_int groupsize);
1958
1959 static int __xipram
1960 do_otp_read(struct map_info *map, struct flchip *chip, u_long offset,
1961             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1962 {
1963         struct cfi_private *cfi = map->fldrv_priv;
1964         int ret;
1965
1966         spin_lock(chip->mutex);
1967         ret = get_chip(map, chip, chip->start, FL_JEDEC_QUERY);
1968         if (ret) {
1969                 spin_unlock(chip->mutex);
1970                 return ret;
1971         }
1972
1973         /* let's ensure we're not reading back cached data from array mode */
1974         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1975
1976         xip_disable(map, chip, chip->start);
1977         if (chip->state != FL_JEDEC_QUERY) {
1978                 map_write(map, CMD(0x90), chip->start);
1979                 chip->state = FL_JEDEC_QUERY;
1980         }
1981         map_copy_from(map, buf, chip->start + offset, size);
1982         xip_enable(map, chip, chip->start);
1983
1984         /* then ensure we don't keep OTP data in the cache */
1985         INVALIDATE_CACHED_RANGE(map, chip->start + offset, size);
1986
1987         put_chip(map, chip, chip->start);
1988         spin_unlock(chip->mutex);
1989         return 0;
1990 }
1991
1992 static int
1993 do_otp_write(struct map_info *map, struct flchip *chip, u_long offset,
1994              u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
1995 {
1996         int ret;
1997
1998         while (size) {
1999                 unsigned long bus_ofs = offset & ~(map_bankwidth(map)-1);
2000                 int gap = offset - bus_ofs;
2001                 int n = min_t(int, size, map_bankwidth(map)-gap);
2002                 map_word datum = map_word_ff(map);
2003
2004                 datum = map_word_load_partial(map, datum, buf, gap, n);
2005                 ret = do_write_oneword(map, chip, bus_ofs, datum, FL_OTP_WRITE);
2006                 if (ret)
2007                         return ret;
2008
2009                 offset += n;
2010                 buf += n;
2011                 size -= n;
2012         }
2013
2014         return 0;
2015 }
2016
2017 static int
2018 do_otp_lock(struct map_info *map, struct flchip *chip, u_long offset,
2019             u_char *buf, u_int size, u_long prot, u_int grpno, u_int grpsz)
2020 {
2021         struct cfi_private *cfi = map->fldrv_priv;
2022         map_word datum;
2023
2024         /* make sure area matches group boundaries */
2025         if (size != grpsz)
2026                 return -EXDEV;
2027
2028         datum = map_word_ff(map);
2029         datum = map_word_clr(map, datum, CMD(1 << grpno));
2030         return do_write_oneword(map, chip, prot, datum, FL_OTP_WRITE);
2031 }
2032
2033 static int cfi_intelext_otp_walk(struct mtd_info *mtd, loff_t from, size_t len,
2034                                  size_t *retlen, u_char *buf,
2035                                  otp_op_t action, int user_regs)
2036 {
2037         struct map_info *map = mtd->priv;
2038         struct cfi_private *cfi = map->fldrv_priv;
2039         struct cfi_pri_intelext *extp = cfi->cmdset_priv;
2040         struct flchip *chip;
2041         struct cfi_intelext_otpinfo *otp;
2042         u_long devsize, reg_prot_offset, data_offset;
2043         u_int chip_num, chip_step, field, reg_fact_size, reg_user_size;
2044         u_int groups, groupno, groupsize, reg_fact_groups, reg_user_groups;
2045         int ret;
2046
2047         *retlen = 0;
2048
2049         /* Check that we actually have some OTP registers */
2050         if (!extp || !(extp->FeatureSupport & 64) || !extp->NumProtectionFields)
2051                 return -ENODATA;
2052
2053         /* we need real chips here not virtual ones */
2054         devsize = (1 << cfi->cfiq->DevSize) * cfi->interleave;
2055         chip_step = devsize >> cfi->chipshift;
2056         chip_num = 0;
2057
2058         /* Some chips have OTP located in the _top_ partition only.
2059            For example: Intel 28F256L18T (T means top-parameter device) */
2060         if (cfi->mfr == MANUFACTURER_INTEL) {
2061                 switch (cfi->id) {
2062                 case 0x880b:
2063                 case 0x880c:
2064                 case 0x880d:
2065                         chip_num = chip_step - 1;
2066                 }
2067         }
2068
2069         for ( ; chip_num < cfi->numchips; chip_num += chip_step) {
2070                 chip = &cfi->chips[chip_num];
2071                 otp = (struct cfi_intelext_otpinfo *)&extp->extra[0];
2072
2073                 /* first OTP region */
2074                 field = 0;
2075                 reg_prot_offset = extp->ProtRegAddr;
2076                 reg_fact_groups = 1;
2077                 reg_fact_size = 1 << extp->FactProtRegSize;
2078                 reg_user_groups = 1;
2079                 reg_user_size = 1 << extp->UserProtRegSize;
2080
2081                 while (len > 0) {
2082                         /* flash geometry fixup */
2083                         data_offset = reg_prot_offset + 1;
2084                         data_offset *= cfi->interleave * cfi->device_type;
2085                         reg_prot_offset *= cfi->interleave * cfi->device_type;
2086                         reg_fact_size *= cfi->interleave;
2087                         reg_user_size *= cfi->interleave;
2088
2089                         if (user_regs) {
2090                                 groups = reg_user_groups;
2091                                 groupsize = reg_user_size;
2092                                 /* skip over factory reg area */
2093                                 groupno = reg_fact_groups;
2094                                 data_offset += reg_fact_groups * reg_fact_size;
2095                         } else {
2096                                 groups = reg_fact_groups;
2097                                 groupsize = reg_fact_size;
2098                                 groupno = 0;
2099                         }
2100
2101                         while (len > 0 && groups > 0) {
2102                                 if (!action) {
2103                                         /*
2104                                          * Special case: if action is NULL
2105                                          * we fill buf with otp_info records.
2106                                          */
2107                                         struct otp_info *otpinfo;
2108                                         map_word lockword;
2109                                         len -= sizeof(struct otp_info);
2110                                         if (len <= 0)
2111                                                 return -ENOSPC;
2112                                         ret = do_otp_read(map, chip,
2113                                                           reg_prot_offset,
2114                                                           (u_char *)&lockword,
2115                                                           map_bankwidth(map),
2116                                                           0, 0,  0);
2117                                         if (ret)
2118                                                 return ret;
2119                                         otpinfo = (struct otp_info *)buf;
2120                                         otpinfo->start = from;
2121                                         otpinfo->length = groupsize;
2122                                         otpinfo->locked =
2123                                            !map_word_bitsset(map, lockword,
2124                                                              CMD(1 << groupno));
2125                                         from += groupsize;
2126                                         buf += sizeof(*otpinfo);
2127                                         *retlen += sizeof(*otpinfo);
2128                                 } else if (from >= groupsize) {
2129                                         from -= groupsize;
2130                                         data_offset += groupsize;
2131                                 } else {
2132                                         int size = groupsize;
2133                                         data_offset += from;
2134                                         size -= from;
2135                                         from = 0;
2136                                         if (size > len)
2137                                                 size = len;
2138                                         ret = action(map, chip, data_offset,
2139                                                      buf, size, reg_prot_offset,
2140                                                      groupno, groupsize);
2141                                         if (ret < 0)
2142                                                 return ret;
2143                                         buf += size;
2144                                         len -= size;
2145                                         *retlen += size;
2146                                         data_offset += size;
2147                                 }
2148                                 groupno++;
2149                                 groups--;
2150                         }
2151
2152                         /* next OTP region */
2153                         if (++field == extp->NumProtectionFields)
2154                                 break;
2155                         reg_prot_offset = otp->ProtRegAddr;
2156                         reg_fact_groups = otp->FactGroups;
2157                         reg_fact_size = 1 << otp->FactProtRegSize;
2158                         reg_user_groups = otp->UserGroups;
2159                         reg_user_size = 1 << otp->UserProtRegSize;
2160                         otp++;
2161                 }
2162         }
2163
2164         return 0;
2165 }
2166
2167 static int cfi_intelext_read_fact_prot_reg(struct mtd_info *mtd, loff_t from,
2168                                            size_t len, size_t *retlen,
2169                                             u_char *buf)
2170 {
2171         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2172                                      buf, do_otp_read, 0);
2173 }
2174
2175 static int cfi_intelext_read_user_prot_reg(struct mtd_info *mtd, loff_t from,
2176                                            size_t len, size_t *retlen,
2177                                             u_char *buf)
2178 {
2179         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2180                                      buf, do_otp_read, 1);
2181 }
2182
2183 static int cfi_intelext_write_user_prot_reg(struct mtd_info *mtd, loff_t from,
2184                                             size_t len, size_t *retlen,
2185                                              u_char *buf)
2186 {
2187         return cfi_intelext_otp_walk(mtd, from, len, retlen,
2188                                      buf, do_otp_write, 1);
2189 }
2190
2191 static int cfi_intelext_lock_user_prot_reg(struct mtd_info *mtd,
2192                                            loff_t from, size_t len)
2193 {
2194         size_t retlen;
2195         return cfi_intelext_otp_walk(mtd, from, len, &retlen,
2196                                      NULL, do_otp_lock, 1);
2197 }
2198
2199 static int cfi_intelext_get_fact_prot_info(struct mtd_info *mtd,
2200                                            struct otp_info *buf, size_t len)
2201 {
2202         size_t retlen;
2203         int ret;
2204
2205         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 0);
2206         return ret ? : retlen;
2207 }
2208
2209 static int cfi_intelext_get_user_prot_info(struct mtd_info *mtd,
2210                                            struct otp_info *buf, size_t len)
2211 {
2212         size_t retlen;
2213         int ret;
2214
2215         ret = cfi_intelext_otp_walk(mtd, 0, len, &retlen, (u_char *)buf, NULL, 1);
2216         return ret ? : retlen;
2217 }
2218
2219 #endif
2220
2221 static int cfi_intelext_suspend(struct mtd_info *mtd)
2222 {
2223         struct map_info *map = mtd->priv;
2224         struct cfi_private *cfi = map->fldrv_priv;
2225         int i;
2226         struct flchip *chip;
2227         int ret = 0;
2228
2229         for (i=0; !ret && i<cfi->numchips; i++) {
2230                 chip = &cfi->chips[i];
2231
2232                 spin_lock(chip->mutex);
2233
2234                 switch (chip->state) {
2235                 case FL_READY:
2236                 case FL_STATUS:
2237                 case FL_CFI_QUERY:
2238                 case FL_JEDEC_QUERY:
2239                         if (chip->oldstate == FL_READY) {
2240                                 /* place the chip in a known state before suspend */
2241                                 map_write(map, CMD(0xFF), cfi->chips[i].start);
2242                                 chip->oldstate = chip->state;
2243                                 chip->state = FL_PM_SUSPENDED;
2244                                 /* No need to wake_up() on this state change -
2245                                  * as the whole point is that nobody can do anything
2246                                  * with the chip now anyway.
2247                                  */
2248                         } else {
2249                                 /* There seems to be an operation pending. We must wait for it. */
2250                                 printk(KERN_NOTICE "Flash device refused suspend due to pending operation (oldstate %d)\n", chip->oldstate);
2251                                 ret = -EAGAIN;
2252                         }
2253                         break;
2254                 default:
2255                         /* Should we actually wait? Once upon a time these routines weren't
2256                            allowed to. Or should we return -EAGAIN, because the upper layers
2257                            ought to have already shut down anything which was using the device
2258                            anyway? The latter for now. */
2259                         printk(KERN_NOTICE "Flash device refused suspend due to active operation (state %d)\n", chip->oldstate);
2260                         ret = -EAGAIN;
2261                 case FL_PM_SUSPENDED:
2262                         break;
2263                 }
2264                 spin_unlock(chip->mutex);
2265         }
2266
2267         /* Unlock the chips again */
2268
2269         if (ret) {
2270                 for (i--; i >=0; i--) {
2271                         chip = &cfi->chips[i];
2272
2273                         spin_lock(chip->mutex);
2274
2275                         if (chip->state == FL_PM_SUSPENDED) {
2276                                 /* No need to force it into a known state here,
2277                                    because we're returning failure, and it didn't
2278                                    get power cycled */
2279                                 chip->state = chip->oldstate;
2280                                 chip->oldstate = FL_READY;
2281                                 wake_up(&chip->wq);
2282                         }
2283                         spin_unlock(chip->mutex);
2284                 }
2285         }
2286
2287         return ret;
2288 }
2289
2290 static void cfi_intelext_resume(struct mtd_info *mtd)
2291 {
2292         struct map_info *map = mtd->priv;
2293         struct cfi_private *cfi = map->fldrv_priv;
2294         int i;
2295         struct flchip *chip;
2296
2297         for (i=0; i<cfi->numchips; i++) {
2298
2299                 chip = &cfi->chips[i];
2300
2301                 spin_lock(chip->mutex);
2302
2303                 /* Go to known state. Chip may have been power cycled */
2304                 if (chip->state == FL_PM_SUSPENDED) {
2305                         map_write(map, CMD(0xFF), cfi->chips[i].start);
2306                         chip->oldstate = chip->state = FL_READY;
2307                         wake_up(&chip->wq);
2308                 }
2309
2310                 spin_unlock(chip->mutex);
2311         }
2312 }
2313
2314 static int cfi_intelext_reset(struct mtd_info *mtd)
2315 {
2316         struct map_info *map = mtd->priv;
2317         struct cfi_private *cfi = map->fldrv_priv;
2318         int i, ret;
2319
2320         for (i=0; i < cfi->numchips; i++) {
2321                 struct flchip *chip = &cfi->chips[i];
2322
2323                 /* force the completion of any ongoing operation
2324                    and switch to array mode so any bootloader in
2325                    flash is accessible for soft reboot. */
2326                 spin_lock(chip->mutex);
2327                 ret = get_chip(map, chip, chip->start, FL_SYNCING);
2328                 if (!ret) {
2329                         map_write(map, CMD(0xff), chip->start);
2330                         chip->state = FL_READY;
2331                 }
2332                 spin_unlock(chip->mutex);
2333         }
2334
2335         return 0;
2336 }
2337
2338 static int cfi_intelext_reboot(struct notifier_block *nb, unsigned long val,
2339                                void *v)
2340 {
2341         struct mtd_info *mtd;
2342
2343         mtd = container_of(nb, struct mtd_info, reboot_notifier);
2344         cfi_intelext_reset(mtd);
2345         return NOTIFY_DONE;
2346 }
2347
2348 static void cfi_intelext_destroy(struct mtd_info *mtd)
2349 {
2350         struct map_info *map = mtd->priv;
2351         struct cfi_private *cfi = map->fldrv_priv;
2352         cfi_intelext_reset(mtd);
2353         unregister_reboot_notifier(&mtd->reboot_notifier);
2354         kfree(cfi->cmdset_priv);
2355         kfree(cfi->cfiq);
2356         kfree(cfi->chips[0].priv);
2357         kfree(cfi);
2358         kfree(mtd->eraseregions);
2359 }
2360
2361 MODULE_LICENSE("GPL");
2362 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org> et al.");
2363 MODULE_DESCRIPTION("MTD chip driver for Intel/Sharp flash chips");
2364 MODULE_ALIAS("cfi_cmdset_0003");
2365 MODULE_ALIAS("cfi_cmdset_0200");