2 * Common Flash Interface support:
3 * AMD & Fujitsu Standard Vendor Command Set (ID 0x0002)
5 * Copyright (C) 2000 Crossnet Co. <info@crossnet.co.jp>
6 * Copyright (C) 2004 Arcom Control Systems Ltd <linux@arcom.com>
7 * Copyright (C) 2005 MontaVista Software Inc. <source@mvista.com>
9 * 2_by_8 routines added by Simon Munton
11 * 4_by_16 work by Carolyn J. Smith
13 * XIP support hooks by Vitaly Wool (based on code for Intel flash
16 * 25/09/2008 Christopher Moore: TopBottom fixup for many Macronix with CFI V1.0
18 * Occasionally maintained by Thayne Harbaugh tharbaugh at lnxi dot com
23 #include <linux/module.h>
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/sched.h>
27 #include <linux/init.h>
29 #include <asm/byteorder.h>
31 #include <linux/errno.h>
32 #include <linux/slab.h>
33 #include <linux/delay.h>
34 #include <linux/interrupt.h>
35 #include <linux/reboot.h>
36 #include <linux/mtd/map.h>
37 #include <linux/mtd/mtd.h>
38 #include <linux/mtd/cfi.h>
39 #include <linux/mtd/xip.h>
41 #define AMD_BOOTLOC_BUG
42 #define FORCE_WORD_WRITE 0
44 #define MAX_WORD_RETRIES 3
46 #define SST49LF004B 0x0060
47 #define SST49LF040B 0x0050
48 #define SST49LF008A 0x005a
49 #define AT49BV6416 0x00d6
51 static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
52 static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
53 static int cfi_amdstd_write_buffers(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
54 static int cfi_amdstd_erase_chip(struct mtd_info *, struct erase_info *);
55 static int cfi_amdstd_erase_varsize(struct mtd_info *, struct erase_info *);
56 static void cfi_amdstd_sync (struct mtd_info *);
57 static int cfi_amdstd_suspend (struct mtd_info *);
58 static void cfi_amdstd_resume (struct mtd_info *);
59 static int cfi_amdstd_reboot(struct notifier_block *, unsigned long, void *);
60 static int cfi_amdstd_secsi_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
62 static void cfi_amdstd_destroy(struct mtd_info *);
64 struct mtd_info *cfi_cmdset_0002(struct map_info *, int);
65 static struct mtd_info *cfi_amdstd_setup (struct mtd_info *);
67 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode);
68 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr);
71 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
72 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
74 static struct mtd_chip_driver cfi_amdstd_chipdrv = {
75 .probe = NULL, /* Not usable directly */
76 .destroy = cfi_amdstd_destroy,
77 .name = "cfi_cmdset_0002",
82 /* #define DEBUG_CFI_FEATURES */
85 #ifdef DEBUG_CFI_FEATURES
86 static void cfi_tell_features(struct cfi_pri_amdstd *extp)
88 const char* erase_suspend[3] = {
89 "Not supported", "Read only", "Read/write"
91 const char* top_bottom[6] = {
92 "No WP", "8x8KiB sectors at top & bottom, no WP",
93 "Bottom boot", "Top boot",
94 "Uniform, Bottom WP", "Uniform, Top WP"
97 printk(" Silicon revision: %d\n", extp->SiliconRevision >> 1);
98 printk(" Address sensitive unlock: %s\n",
99 (extp->SiliconRevision & 1) ? "Not required" : "Required");
101 if (extp->EraseSuspend < ARRAY_SIZE(erase_suspend))
102 printk(" Erase Suspend: %s\n", erase_suspend[extp->EraseSuspend]);
104 printk(" Erase Suspend: Unknown value %d\n", extp->EraseSuspend);
106 if (extp->BlkProt == 0)
107 printk(" Block protection: Not supported\n");
109 printk(" Block protection: %d sectors per group\n", extp->BlkProt);
112 printk(" Temporary block unprotect: %s\n",
113 extp->TmpBlkUnprotect ? "Supported" : "Not supported");
114 printk(" Block protect/unprotect scheme: %d\n", extp->BlkProtUnprot);
115 printk(" Number of simultaneous operations: %d\n", extp->SimultaneousOps);
116 printk(" Burst mode: %s\n",
117 extp->BurstMode ? "Supported" : "Not supported");
118 if (extp->PageMode == 0)
119 printk(" Page mode: Not supported\n");
121 printk(" Page mode: %d word page\n", extp->PageMode << 2);
123 printk(" Vpp Supply Minimum Program/Erase Voltage: %d.%d V\n",
124 extp->VppMin >> 4, extp->VppMin & 0xf);
125 printk(" Vpp Supply Maximum Program/Erase Voltage: %d.%d V\n",
126 extp->VppMax >> 4, extp->VppMax & 0xf);
128 if (extp->TopBottom < ARRAY_SIZE(top_bottom))
129 printk(" Top/Bottom Boot Block: %s\n", top_bottom[extp->TopBottom]);
131 printk(" Top/Bottom Boot Block: Unknown value %d\n", extp->TopBottom);
135 #ifdef AMD_BOOTLOC_BUG
136 /* Wheee. Bring me the head of someone at AMD. */
137 static void fixup_amd_bootblock(struct mtd_info *mtd, void* param)
139 struct map_info *map = mtd->priv;
140 struct cfi_private *cfi = map->fldrv_priv;
141 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
142 __u8 major = extp->MajorVersion;
143 __u8 minor = extp->MinorVersion;
145 if (((major << 8) | minor) < 0x3131) {
146 /* CFI version 1.0 => don't trust bootloc */
148 DEBUG(MTD_DEBUG_LEVEL1,
149 "%s: JEDEC Vendor ID is 0x%02X Device ID is 0x%02X\n",
150 map->name, cfi->mfr, cfi->id);
152 /* AFAICS all 29LV400 with a bottom boot block have a device ID
153 * of 0x22BA in 16-bit mode and 0xBA in 8-bit mode.
154 * These were badly detected as they have the 0x80 bit set
155 * so treat them as a special case.
157 if (((cfi->id == 0xBA) || (cfi->id == 0x22BA)) &&
159 /* Macronix added CFI to their 2nd generation
160 * MX29LV400C B/T but AFAICS no other 29LV400 (AMD,
161 * Fujitsu, Spansion, EON, ESI and older Macronix)
164 * Therefore also check the manufacturer.
165 * This reduces the risk of false detection due to
166 * the 8-bit device ID.
168 (cfi->mfr == CFI_MFR_MACRONIX)) {
169 DEBUG(MTD_DEBUG_LEVEL1,
170 "%s: Macronix MX29LV400C with bottom boot block"
171 " detected\n", map->name);
172 extp->TopBottom = 2; /* bottom boot */
174 if (cfi->id & 0x80) {
175 printk(KERN_WARNING "%s: JEDEC Device ID is 0x%02X. Assuming broken CFI table.\n", map->name, cfi->id);
176 extp->TopBottom = 3; /* top boot */
178 extp->TopBottom = 2; /* bottom boot */
181 DEBUG(MTD_DEBUG_LEVEL1,
182 "%s: AMD CFI PRI V%c.%c has no boot block field;"
183 " deduced %s from Device ID\n", map->name, major, minor,
184 extp->TopBottom == 2 ? "bottom" : "top");
189 static void fixup_use_write_buffers(struct mtd_info *mtd, void *param)
191 struct map_info *map = mtd->priv;
192 struct cfi_private *cfi = map->fldrv_priv;
193 if (cfi->cfiq->BufWriteTimeoutTyp) {
194 DEBUG(MTD_DEBUG_LEVEL1, "Using buffer write method\n" );
195 mtd->write = cfi_amdstd_write_buffers;
199 /* Atmel chips don't use the same PRI format as AMD chips */
200 static void fixup_convert_atmel_pri(struct mtd_info *mtd, void *param)
202 struct map_info *map = mtd->priv;
203 struct cfi_private *cfi = map->fldrv_priv;
204 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
205 struct cfi_pri_atmel atmel_pri;
207 memcpy(&atmel_pri, extp, sizeof(atmel_pri));
208 memset((char *)extp + 5, 0, sizeof(*extp) - 5);
210 if (atmel_pri.Features & 0x02)
211 extp->EraseSuspend = 2;
213 /* Some chips got it backwards... */
214 if (cfi->id == AT49BV6416) {
215 if (atmel_pri.BottomBoot)
220 if (atmel_pri.BottomBoot)
226 /* burst write mode not supported */
227 cfi->cfiq->BufWriteTimeoutTyp = 0;
228 cfi->cfiq->BufWriteTimeoutMax = 0;
231 static void fixup_use_secsi(struct mtd_info *mtd, void *param)
233 /* Setup for chips with a secsi area */
234 mtd->read_user_prot_reg = cfi_amdstd_secsi_read;
235 mtd->read_fact_prot_reg = cfi_amdstd_secsi_read;
238 static void fixup_use_erase_chip(struct mtd_info *mtd, void *param)
240 struct map_info *map = mtd->priv;
241 struct cfi_private *cfi = map->fldrv_priv;
242 if ((cfi->cfiq->NumEraseRegions == 1) &&
243 ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0)) {
244 mtd->erase = cfi_amdstd_erase_chip;
250 * Some Atmel chips (e.g. the AT49BV6416) power-up with all sectors
253 static void fixup_use_atmel_lock(struct mtd_info *mtd, void *param)
255 mtd->lock = cfi_atmel_lock;
256 mtd->unlock = cfi_atmel_unlock;
257 mtd->flags |= MTD_POWERUP_LOCK;
260 static void fixup_old_sst_eraseregion(struct mtd_info *mtd)
262 struct map_info *map = mtd->priv;
263 struct cfi_private *cfi = map->fldrv_priv;
266 * These flashes report two seperate eraseblock regions based on the
267 * sector_erase-size and block_erase-size, although they both operate on the
268 * same memory. This is not allowed according to CFI, so we just pick the
271 cfi->cfiq->NumEraseRegions = 1;
274 static void fixup_sst39vf(struct mtd_info *mtd, void *param)
276 struct map_info *map = mtd->priv;
277 struct cfi_private *cfi = map->fldrv_priv;
279 fixup_old_sst_eraseregion(mtd);
281 cfi->addr_unlock1 = 0x5555;
282 cfi->addr_unlock2 = 0x2AAA;
285 static void fixup_sst39vf_rev_b(struct mtd_info *mtd, void *param)
287 struct map_info *map = mtd->priv;
288 struct cfi_private *cfi = map->fldrv_priv;
290 fixup_old_sst_eraseregion(mtd);
292 cfi->addr_unlock1 = 0x555;
293 cfi->addr_unlock2 = 0x2AA;
296 static void fixup_s29gl064n_sectors(struct mtd_info *mtd, void *param)
298 struct map_info *map = mtd->priv;
299 struct cfi_private *cfi = map->fldrv_priv;
301 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
302 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
303 pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name);
307 static void fixup_s29gl032n_sectors(struct mtd_info *mtd, void *param)
309 struct map_info *map = mtd->priv;
310 struct cfi_private *cfi = map->fldrv_priv;
312 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
313 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
314 pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name);
318 /* Used to fix CFI-Tables of chips without Extended Query Tables */
319 static struct cfi_fixup cfi_nopri_fixup_table[] = {
320 { CFI_MFR_SST, 0x234A, fixup_sst39vf, NULL, }, // SST39VF1602
321 { CFI_MFR_SST, 0x234B, fixup_sst39vf, NULL, }, // SST39VF1601
322 { CFI_MFR_SST, 0x235A, fixup_sst39vf, NULL, }, // SST39VF3202
323 { CFI_MFR_SST, 0x235B, fixup_sst39vf, NULL, }, // SST39VF3201
324 { CFI_MFR_SST, 0x235C, fixup_sst39vf_rev_b, NULL, }, // SST39VF3202B
325 { CFI_MFR_SST, 0x235D, fixup_sst39vf_rev_b, NULL, }, // SST39VF3201B
326 { CFI_MFR_SST, 0x236C, fixup_sst39vf_rev_b, NULL, }, // SST39VF6402B
327 { CFI_MFR_SST, 0x236D, fixup_sst39vf_rev_b, NULL, }, // SST39VF6401B
331 static struct cfi_fixup cfi_fixup_table[] = {
332 { CFI_MFR_ATMEL, CFI_ID_ANY, fixup_convert_atmel_pri, NULL },
333 #ifdef AMD_BOOTLOC_BUG
334 { CFI_MFR_AMD, CFI_ID_ANY, fixup_amd_bootblock, NULL },
335 { CFI_MFR_MACRONIX, CFI_ID_ANY, fixup_amd_bootblock, NULL },
337 { CFI_MFR_AMD, 0x0050, fixup_use_secsi, NULL, },
338 { CFI_MFR_AMD, 0x0053, fixup_use_secsi, NULL, },
339 { CFI_MFR_AMD, 0x0055, fixup_use_secsi, NULL, },
340 { CFI_MFR_AMD, 0x0056, fixup_use_secsi, NULL, },
341 { CFI_MFR_AMD, 0x005C, fixup_use_secsi, NULL, },
342 { CFI_MFR_AMD, 0x005F, fixup_use_secsi, NULL, },
343 { CFI_MFR_AMD, 0x0c01, fixup_s29gl064n_sectors, NULL, },
344 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors, NULL, },
345 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors, NULL, },
346 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors, NULL, },
347 #if !FORCE_WORD_WRITE
348 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_write_buffers, NULL, },
352 static struct cfi_fixup jedec_fixup_table[] = {
353 { CFI_MFR_SST, SST49LF004B, fixup_use_fwh_lock, NULL, },
354 { CFI_MFR_SST, SST49LF040B, fixup_use_fwh_lock, NULL, },
355 { CFI_MFR_SST, SST49LF008A, fixup_use_fwh_lock, NULL, },
359 static struct cfi_fixup fixup_table[] = {
360 /* The CFI vendor ids and the JEDEC vendor IDs appear
361 * to be common. It is like the devices id's are as
362 * well. This table is to pick all cases where
363 * we know that is the case.
365 { CFI_MFR_ANY, CFI_ID_ANY, fixup_use_erase_chip, NULL },
366 { CFI_MFR_ATMEL, AT49BV6416, fixup_use_atmel_lock, NULL },
371 static void cfi_fixup_major_minor(struct cfi_private *cfi,
372 struct cfi_pri_amdstd *extp)
374 if (cfi->mfr == CFI_MFR_SAMSUNG && cfi->id == 0x257e &&
375 extp->MajorVersion == '0')
376 extp->MajorVersion = '1';
379 struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
381 struct cfi_private *cfi = map->fldrv_priv;
382 struct mtd_info *mtd;
385 mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
387 printk(KERN_WARNING "Failed to allocate memory for MTD device\n");
391 mtd->type = MTD_NORFLASH;
393 /* Fill in the default mtd operations */
394 mtd->erase = cfi_amdstd_erase_varsize;
395 mtd->write = cfi_amdstd_write_words;
396 mtd->read = cfi_amdstd_read;
397 mtd->sync = cfi_amdstd_sync;
398 mtd->suspend = cfi_amdstd_suspend;
399 mtd->resume = cfi_amdstd_resume;
400 mtd->flags = MTD_CAP_NORFLASH;
401 mtd->name = map->name;
404 mtd->reboot_notifier.notifier_call = cfi_amdstd_reboot;
406 if (cfi->cfi_mode==CFI_MODE_CFI){
407 unsigned char bootloc;
408 __u16 adr = primary?cfi->cfiq->P_ADR:cfi->cfiq->A_ADR;
409 struct cfi_pri_amdstd *extp;
411 extp = (struct cfi_pri_amdstd*)cfi_read_pri(map, adr, sizeof(*extp), "Amd/Fujitsu");
414 * It's a real CFI chip, not one for which the probe
415 * routine faked a CFI structure.
417 cfi_fixup_major_minor(cfi, extp);
420 * Valid primary extension versions are: 1.0, 1.1, 1.2, 1.3, 1.4
421 * see: http://cs.ozerki.net/zap/pub/axim-x5/docs/cfi_r20.pdf, page 19
422 * http://www.spansion.com/Support/AppNotes/cfi_100_20011201.pdf
423 * http://www.spansion.com/Support/Datasheets/s29ws-p_00_a12_e.pdf
425 if (extp->MajorVersion != '1' ||
426 (extp->MajorVersion == '1' && (extp->MinorVersion < '0' || extp->MinorVersion > '4'))) {
427 printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
428 "version %c.%c (%#02x/%#02x).\n",
429 extp->MajorVersion, extp->MinorVersion,
430 extp->MajorVersion, extp->MinorVersion);
436 printk(KERN_INFO " Amd/Fujitsu Extended Query version %c.%c.\n",
437 extp->MajorVersion, extp->MinorVersion);
439 /* Install our own private info structure */
440 cfi->cmdset_priv = extp;
442 /* Apply cfi device specific fixups */
443 cfi_fixup(mtd, cfi_fixup_table);
445 #ifdef DEBUG_CFI_FEATURES
446 /* Tell the user about it in lots of lovely detail */
447 cfi_tell_features(extp);
450 bootloc = extp->TopBottom;
451 if ((bootloc < 2) || (bootloc > 5)) {
452 printk(KERN_WARNING "%s: CFI contains unrecognised boot "
453 "bank location (%d). Assuming bottom.\n",
458 if (bootloc == 3 && cfi->cfiq->NumEraseRegions > 1) {
459 printk(KERN_WARNING "%s: Swapping erase regions for top-boot CFI table.\n", map->name);
461 for (i=0; i<cfi->cfiq->NumEraseRegions / 2; i++) {
462 int j = (cfi->cfiq->NumEraseRegions-1)-i;
465 swap = cfi->cfiq->EraseRegionInfo[i];
466 cfi->cfiq->EraseRegionInfo[i] = cfi->cfiq->EraseRegionInfo[j];
467 cfi->cfiq->EraseRegionInfo[j] = swap;
470 /* Set the default CFI lock/unlock addresses */
471 cfi->addr_unlock1 = 0x555;
472 cfi->addr_unlock2 = 0x2aa;
474 cfi_fixup(mtd, cfi_nopri_fixup_table);
476 if (!cfi->addr_unlock1 || !cfi->addr_unlock2) {
482 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
483 /* Apply jedec specific fixups */
484 cfi_fixup(mtd, jedec_fixup_table);
486 /* Apply generic fixups */
487 cfi_fixup(mtd, fixup_table);
489 for (i=0; i< cfi->numchips; i++) {
490 cfi->chips[i].word_write_time = 1<<cfi->cfiq->WordWriteTimeoutTyp;
491 cfi->chips[i].buffer_write_time = 1<<cfi->cfiq->BufWriteTimeoutTyp;
492 cfi->chips[i].erase_time = 1<<cfi->cfiq->BlockEraseTimeoutTyp;
493 cfi->chips[i].ref_point_counter = 0;
494 init_waitqueue_head(&(cfi->chips[i].wq));
497 map->fldrv = &cfi_amdstd_chipdrv;
499 return cfi_amdstd_setup(mtd);
501 struct mtd_info *cfi_cmdset_0006(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
502 struct mtd_info *cfi_cmdset_0701(struct map_info *map, int primary) __attribute__((alias("cfi_cmdset_0002")));
503 EXPORT_SYMBOL_GPL(cfi_cmdset_0002);
504 EXPORT_SYMBOL_GPL(cfi_cmdset_0006);
505 EXPORT_SYMBOL_GPL(cfi_cmdset_0701);
507 static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
509 struct map_info *map = mtd->priv;
510 struct cfi_private *cfi = map->fldrv_priv;
511 unsigned long devsize = (1<<cfi->cfiq->DevSize) * cfi->interleave;
512 unsigned long offset = 0;
515 printk(KERN_NOTICE "number of %s chips: %d\n",
516 (cfi->cfi_mode == CFI_MODE_CFI)?"CFI":"JEDEC",cfi->numchips);
517 /* Select the correct geometry setup */
518 mtd->size = devsize * cfi->numchips;
520 mtd->numeraseregions = cfi->cfiq->NumEraseRegions * cfi->numchips;
521 mtd->eraseregions = kmalloc(sizeof(struct mtd_erase_region_info)
522 * mtd->numeraseregions, GFP_KERNEL);
523 if (!mtd->eraseregions) {
524 printk(KERN_WARNING "Failed to allocate memory for MTD erase region info\n");
528 for (i=0; i<cfi->cfiq->NumEraseRegions; i++) {
529 unsigned long ernum, ersize;
530 ersize = ((cfi->cfiq->EraseRegionInfo[i] >> 8) & ~0xff) * cfi->interleave;
531 ernum = (cfi->cfiq->EraseRegionInfo[i] & 0xffff) + 1;
533 if (mtd->erasesize < ersize) {
534 mtd->erasesize = ersize;
536 for (j=0; j<cfi->numchips; j++) {
537 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].offset = (j*devsize)+offset;
538 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].erasesize = ersize;
539 mtd->eraseregions[(j*cfi->cfiq->NumEraseRegions)+i].numblocks = ernum;
541 offset += (ersize * ernum);
543 if (offset != devsize) {
545 printk(KERN_WARNING "Sum of regions (%lx) != total size of set of interleaved chips (%lx)\n", offset, devsize);
550 for (i=0; i<mtd->numeraseregions;i++){
551 printk("%d: offset=0x%x,size=0x%x,blocks=%d\n",
552 i,mtd->eraseregions[i].offset,
553 mtd->eraseregions[i].erasesize,
554 mtd->eraseregions[i].numblocks);
558 __module_get(THIS_MODULE);
559 register_reboot_notifier(&mtd->reboot_notifier);
563 kfree(mtd->eraseregions);
565 kfree(cfi->cmdset_priv);
571 * Return true if the chip is ready.
573 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
574 * non-suspended sector) and is indicated by no toggle bits toggling.
576 * Note that anything more complicated than checking if no bits are toggling
577 * (including checking DQ5 for an error status) is tricky to get working
578 * correctly and is therefore not done (particulary with interleaved chips
579 * as each chip must be checked independantly of the others).
581 static int __xipram chip_ready(struct map_info *map, unsigned long addr)
585 d = map_read(map, addr);
586 t = map_read(map, addr);
588 return map_word_equal(map, d, t);
592 * Return true if the chip is ready and has the correct value.
594 * Ready is one of: read mode, query mode, erase-suspend-read mode (in any
595 * non-suspended sector) and it is indicated by no bits toggling.
597 * Error are indicated by toggling bits or bits held with the wrong value,
598 * or with bits toggling.
600 * Note that anything more complicated than checking if no bits are toggling
601 * (including checking DQ5 for an error status) is tricky to get working
602 * correctly and is therefore not done (particulary with interleaved chips
603 * as each chip must be checked independantly of the others).
606 static int __xipram chip_good(struct map_info *map, unsigned long addr, map_word expected)
610 oldd = map_read(map, addr);
611 curd = map_read(map, addr);
613 return map_word_equal(map, oldd, curd) &&
614 map_word_equal(map, curd, expected);
617 static int get_chip(struct map_info *map, struct flchip *chip, unsigned long adr, int mode)
619 DECLARE_WAITQUEUE(wait, current);
620 struct cfi_private *cfi = map->fldrv_priv;
622 struct cfi_pri_amdstd *cfip = (struct cfi_pri_amdstd *)cfi->cmdset_priv;
625 timeo = jiffies + HZ;
627 switch (chip->state) {
631 if (chip_ready(map, adr))
634 if (time_after(jiffies, timeo)) {
635 printk(KERN_ERR "Waiting for chip to be ready timed out.\n");
638 mutex_unlock(&chip->mutex);
640 mutex_lock(&chip->mutex);
641 /* Someone else might have been playing with it. */
651 if (!cfip || !(cfip->EraseSuspend & (0x1|0x2)) ||
652 !(mode == FL_READY || mode == FL_POINT ||
653 (mode == FL_WRITING && (cfip->EraseSuspend & 0x2))))
656 /* We could check to see if we're trying to access the sector
657 * that is currently being erased. However, no user will try
658 * anything like that so we just wait for the timeout. */
661 /* It's harmless to issue the Erase-Suspend and Erase-Resume
662 * commands when the erase algorithm isn't in progress. */
663 map_write(map, CMD(0xB0), chip->in_progress_block_addr);
664 chip->oldstate = FL_ERASING;
665 chip->state = FL_ERASE_SUSPENDING;
666 chip->erase_suspended = 1;
668 if (chip_ready(map, adr))
671 if (time_after(jiffies, timeo)) {
672 /* Should have suspended the erase by now.
673 * Send an Erase-Resume command as either
674 * there was an error (so leave the erase
675 * routine to recover from it) or we trying to
676 * use the erase-in-progress sector. */
677 map_write(map, CMD(0x30), chip->in_progress_block_addr);
678 chip->state = FL_ERASING;
679 chip->oldstate = FL_READY;
680 printk(KERN_ERR "MTD %s(): chip not ready after erase suspend\n", __func__);
684 mutex_unlock(&chip->mutex);
686 mutex_lock(&chip->mutex);
687 /* Nobody will touch it while it's in state FL_ERASE_SUSPENDING.
688 So we can just loop here. */
690 chip->state = FL_READY;
693 case FL_XIP_WHILE_ERASING:
694 if (mode != FL_READY && mode != FL_POINT &&
695 (!cfip || !(cfip->EraseSuspend&2)))
697 chip->oldstate = chip->state;
698 chip->state = FL_READY;
702 /* The machine is rebooting */
706 /* Only if there's no operation suspended... */
707 if (mode == FL_READY && chip->oldstate == FL_READY)
712 set_current_state(TASK_UNINTERRUPTIBLE);
713 add_wait_queue(&chip->wq, &wait);
714 mutex_unlock(&chip->mutex);
716 remove_wait_queue(&chip->wq, &wait);
717 mutex_lock(&chip->mutex);
723 static void put_chip(struct map_info *map, struct flchip *chip, unsigned long adr)
725 struct cfi_private *cfi = map->fldrv_priv;
727 switch(chip->oldstate) {
729 chip->state = chip->oldstate;
730 map_write(map, CMD(0x30), chip->in_progress_block_addr);
731 chip->oldstate = FL_READY;
732 chip->state = FL_ERASING;
735 case FL_XIP_WHILE_ERASING:
736 chip->state = chip->oldstate;
737 chip->oldstate = FL_READY;
742 /* We should really make set_vpp() count, rather than doing this */
746 printk(KERN_ERR "MTD: put_chip() called with oldstate %d!!\n", chip->oldstate);
751 #ifdef CONFIG_MTD_XIP
754 * No interrupt what so ever can be serviced while the flash isn't in array
755 * mode. This is ensured by the xip_disable() and xip_enable() functions
756 * enclosing any code path where the flash is known not to be in array mode.
757 * And within a XIP disabled code path, only functions marked with __xipram
758 * may be called and nothing else (it's a good thing to inspect generated
759 * assembly to make sure inline functions were actually inlined and that gcc
760 * didn't emit calls to its own support functions). Also configuring MTD CFI
761 * support to a single buswidth and a single interleave is also recommended.
764 static void xip_disable(struct map_info *map, struct flchip *chip,
767 /* TODO: chips with no XIP use should ignore and return */
768 (void) map_read(map, adr); /* ensure mmu mapping is up to date */
772 static void __xipram xip_enable(struct map_info *map, struct flchip *chip,
775 struct cfi_private *cfi = map->fldrv_priv;
777 if (chip->state != FL_POINT && chip->state != FL_READY) {
778 map_write(map, CMD(0xf0), adr);
779 chip->state = FL_READY;
781 (void) map_read(map, adr);
787 * When a delay is required for the flash operation to complete, the
788 * xip_udelay() function is polling for both the given timeout and pending
789 * (but still masked) hardware interrupts. Whenever there is an interrupt
790 * pending then the flash erase operation is suspended, array mode restored
791 * and interrupts unmasked. Task scheduling might also happen at that
792 * point. The CPU eventually returns from the interrupt or the call to
793 * schedule() and the suspended flash operation is resumed for the remaining
794 * of the delay period.
796 * Warning: this function _will_ fool interrupt latency tracing tools.
799 static void __xipram xip_udelay(struct map_info *map, struct flchip *chip,
800 unsigned long adr, int usec)
802 struct cfi_private *cfi = map->fldrv_priv;
803 struct cfi_pri_amdstd *extp = cfi->cmdset_priv;
804 map_word status, OK = CMD(0x80);
805 unsigned long suspended, start = xip_currtime();
810 if (xip_irqpending() && extp &&
811 ((chip->state == FL_ERASING && (extp->EraseSuspend & 2))) &&
812 (cfi_interleave_is_1(cfi) || chip->oldstate == FL_READY)) {
814 * Let's suspend the erase operation when supported.
815 * Note that we currently don't try to suspend
816 * interleaved chips if there is already another
817 * operation suspended (imagine what happens
818 * when one chip was already done with the current
819 * operation while another chip suspended it, then
820 * we resume the whole thing at once). Yes, it
823 map_write(map, CMD(0xb0), adr);
824 usec -= xip_elapsed_since(start);
825 suspended = xip_currtime();
827 if (xip_elapsed_since(suspended) > 100000) {
829 * The chip doesn't want to suspend
830 * after waiting for 100 msecs.
831 * This is a critical error but there
832 * is not much we can do here.
836 status = map_read(map, adr);
837 } while (!map_word_andequal(map, status, OK, OK));
839 /* Suspend succeeded */
840 oldstate = chip->state;
841 if (!map_word_bitsset(map, status, CMD(0x40)))
843 chip->state = FL_XIP_WHILE_ERASING;
844 chip->erase_suspended = 1;
845 map_write(map, CMD(0xf0), adr);
846 (void) map_read(map, adr);
849 mutex_unlock(&chip->mutex);
854 * We're back. However someone else might have
855 * decided to go write to the chip if we are in
856 * a suspended erase state. If so let's wait
859 mutex_lock(&chip->mutex);
860 while (chip->state != FL_XIP_WHILE_ERASING) {
861 DECLARE_WAITQUEUE(wait, current);
862 set_current_state(TASK_UNINTERRUPTIBLE);
863 add_wait_queue(&chip->wq, &wait);
864 mutex_unlock(&chip->mutex);
866 remove_wait_queue(&chip->wq, &wait);
867 mutex_lock(&chip->mutex);
869 /* Disallow XIP again */
872 /* Resume the write or erase operation */
873 map_write(map, CMD(0x30), adr);
874 chip->state = oldstate;
875 start = xip_currtime();
876 } else if (usec >= 1000000/HZ) {
878 * Try to save on CPU power when waiting delay
879 * is at least a system timer tick period.
880 * No need to be extremely accurate here.
884 status = map_read(map, adr);
885 } while (!map_word_andequal(map, status, OK, OK)
886 && xip_elapsed_since(start) < usec);
889 #define UDELAY(map, chip, adr, usec) xip_udelay(map, chip, adr, usec)
892 * The INVALIDATE_CACHED_RANGE() macro is normally used in parallel while
893 * the flash is actively programming or erasing since we have to poll for
894 * the operation to complete anyway. We can't do that in a generic way with
895 * a XIP setup so do it before the actual flash operation in this case
896 * and stub it out from INVALIDATE_CACHE_UDELAY.
898 #define XIP_INVAL_CACHED_RANGE(map, from, size) \
899 INVALIDATE_CACHED_RANGE(map, from, size)
901 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
902 UDELAY(map, chip, adr, usec)
907 * Activating this XIP support changes the way the code works a bit. For
908 * example the code to suspend the current process when concurrent access
909 * happens is never executed because xip_udelay() will always return with the
910 * same chip state as it was entered with. This is why there is no care for
911 * the presence of add_wait_queue() or schedule() calls from within a couple
912 * xip_disable()'d areas of code, like in do_erase_oneblock for example.
913 * The queueing and scheduling are always happening within xip_udelay().
915 * Similarly, get_chip() and put_chip() just happen to always be executed
916 * with chip->state set to FL_READY (or FL_XIP_WHILE_*) where flash state
917 * is in array mode, therefore never executing many cases therein and not
918 * causing any problem with XIP.
923 #define xip_disable(map, chip, adr)
924 #define xip_enable(map, chip, adr)
925 #define XIP_INVAL_CACHED_RANGE(x...)
927 #define UDELAY(map, chip, adr, usec) \
929 mutex_unlock(&chip->mutex); \
931 mutex_lock(&chip->mutex); \
934 #define INVALIDATE_CACHE_UDELAY(map, chip, adr, len, usec) \
936 mutex_unlock(&chip->mutex); \
937 INVALIDATE_CACHED_RANGE(map, adr, len); \
939 mutex_lock(&chip->mutex); \
944 static inline int do_read_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
946 unsigned long cmd_addr;
947 struct cfi_private *cfi = map->fldrv_priv;
952 /* Ensure cmd read/writes are aligned. */
953 cmd_addr = adr & ~(map_bankwidth(map)-1);
955 mutex_lock(&chip->mutex);
956 ret = get_chip(map, chip, cmd_addr, FL_READY);
958 mutex_unlock(&chip->mutex);
962 if (chip->state != FL_POINT && chip->state != FL_READY) {
963 map_write(map, CMD(0xf0), cmd_addr);
964 chip->state = FL_READY;
967 map_copy_from(map, buf, adr, len);
969 put_chip(map, chip, cmd_addr);
971 mutex_unlock(&chip->mutex);
976 static int cfi_amdstd_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
978 struct map_info *map = mtd->priv;
979 struct cfi_private *cfi = map->fldrv_priv;
984 /* ofs: offset within the first chip that the first read should start */
986 chipnum = (from >> cfi->chipshift);
987 ofs = from - (chipnum << cfi->chipshift);
993 unsigned long thislen;
995 if (chipnum >= cfi->numchips)
998 if ((len + ofs -1) >> cfi->chipshift)
999 thislen = (1<<cfi->chipshift) - ofs;
1003 ret = do_read_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1018 static inline int do_read_secsi_onechip(struct map_info *map, struct flchip *chip, loff_t adr, size_t len, u_char *buf)
1020 DECLARE_WAITQUEUE(wait, current);
1021 unsigned long timeo = jiffies + HZ;
1022 struct cfi_private *cfi = map->fldrv_priv;
1025 mutex_lock(&chip->mutex);
1027 if (chip->state != FL_READY){
1029 printk(KERN_DEBUG "Waiting for chip to read, status = %d\n", chip->state);
1031 set_current_state(TASK_UNINTERRUPTIBLE);
1032 add_wait_queue(&chip->wq, &wait);
1034 mutex_unlock(&chip->mutex);
1037 remove_wait_queue(&chip->wq, &wait);
1039 if(signal_pending(current))
1042 timeo = jiffies + HZ;
1049 chip->state = FL_READY;
1051 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1052 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1053 cfi_send_gen_cmd(0x88, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1055 map_copy_from(map, buf, adr, len);
1057 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1058 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1059 cfi_send_gen_cmd(0x90, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1060 cfi_send_gen_cmd(0x00, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1063 mutex_unlock(&chip->mutex);
1068 static int cfi_amdstd_secsi_read (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf)
1070 struct map_info *map = mtd->priv;
1071 struct cfi_private *cfi = map->fldrv_priv;
1077 /* ofs: offset within the first chip that the first read should start */
1079 /* 8 secsi bytes per chip */
1087 unsigned long thislen;
1089 if (chipnum >= cfi->numchips)
1092 if ((len + ofs -1) >> 3)
1093 thislen = (1<<3) - ofs;
1097 ret = do_read_secsi_onechip(map, &cfi->chips[chipnum], ofs, thislen, buf);
1112 static int __xipram do_write_oneword(struct map_info *map, struct flchip *chip, unsigned long adr, map_word datum)
1114 struct cfi_private *cfi = map->fldrv_priv;
1115 unsigned long timeo = jiffies + HZ;
1117 * We use a 1ms + 1 jiffies generic timeout for writes (most devices
1118 * have a max write time of a few hundreds usec). However, we should
1119 * use the maximum timeout value given by the chip at probe time
1120 * instead. Unfortunately, struct flchip does have a field for
1121 * maximum timeout, only for typical which can be far too short
1122 * depending of the conditions. The ' + 1' is to avoid having a
1123 * timeout of 0 jiffies if HZ is smaller than 1000.
1125 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1132 mutex_lock(&chip->mutex);
1133 ret = get_chip(map, chip, adr, FL_WRITING);
1135 mutex_unlock(&chip->mutex);
1139 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1140 __func__, adr, datum.x[0] );
1143 * Check for a NOP for the case when the datum to write is already
1144 * present - it saves time and works around buggy chips that corrupt
1145 * data at other locations when 0xff is written to a location that
1146 * already contains 0xff.
1148 oldd = map_read(map, adr);
1149 if (map_word_equal(map, oldd, datum)) {
1150 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): NOP\n",
1155 XIP_INVAL_CACHED_RANGE(map, adr, map_bankwidth(map));
1157 xip_disable(map, chip, adr);
1159 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1160 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1161 cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1162 map_write(map, datum, adr);
1163 chip->state = FL_WRITING;
1165 INVALIDATE_CACHE_UDELAY(map, chip,
1166 adr, map_bankwidth(map),
1167 chip->word_write_time);
1169 /* See comment above for timeout value. */
1170 timeo = jiffies + uWriteTimeout;
1172 if (chip->state != FL_WRITING) {
1173 /* Someone's suspended the write. Sleep */
1174 DECLARE_WAITQUEUE(wait, current);
1176 set_current_state(TASK_UNINTERRUPTIBLE);
1177 add_wait_queue(&chip->wq, &wait);
1178 mutex_unlock(&chip->mutex);
1180 remove_wait_queue(&chip->wq, &wait);
1181 timeo = jiffies + (HZ / 2); /* FIXME */
1182 mutex_lock(&chip->mutex);
1186 if (time_after(jiffies, timeo) && !chip_ready(map, adr)){
1187 xip_enable(map, chip, adr);
1188 printk(KERN_WARNING "MTD %s(): software timeout\n", __func__);
1189 xip_disable(map, chip, adr);
1193 if (chip_ready(map, adr))
1196 /* Latency issues. Drop the lock, wait a while and retry */
1197 UDELAY(map, chip, adr, 1);
1199 /* Did we succeed? */
1200 if (!chip_good(map, adr, datum)) {
1201 /* reset on all failures. */
1202 map_write( map, CMD(0xF0), chip->start );
1203 /* FIXME - should have reset delay before continuing */
1205 if (++retry_cnt <= MAX_WORD_RETRIES)
1210 xip_enable(map, chip, adr);
1212 chip->state = FL_READY;
1213 put_chip(map, chip, adr);
1214 mutex_unlock(&chip->mutex);
1220 static int cfi_amdstd_write_words(struct mtd_info *mtd, loff_t to, size_t len,
1221 size_t *retlen, const u_char *buf)
1223 struct map_info *map = mtd->priv;
1224 struct cfi_private *cfi = map->fldrv_priv;
1227 unsigned long ofs, chipstart;
1228 DECLARE_WAITQUEUE(wait, current);
1234 chipnum = to >> cfi->chipshift;
1235 ofs = to - (chipnum << cfi->chipshift);
1236 chipstart = cfi->chips[chipnum].start;
1238 /* If it's not bus-aligned, do the first byte write */
1239 if (ofs & (map_bankwidth(map)-1)) {
1240 unsigned long bus_ofs = ofs & ~(map_bankwidth(map)-1);
1241 int i = ofs - bus_ofs;
1246 mutex_lock(&cfi->chips[chipnum].mutex);
1248 if (cfi->chips[chipnum].state != FL_READY) {
1250 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1252 set_current_state(TASK_UNINTERRUPTIBLE);
1253 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1255 mutex_unlock(&cfi->chips[chipnum].mutex);
1258 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1260 if(signal_pending(current))
1266 /* Load 'tmp_buf' with old contents of flash */
1267 tmp_buf = map_read(map, bus_ofs+chipstart);
1269 mutex_unlock(&cfi->chips[chipnum].mutex);
1271 /* Number of bytes to copy from buffer */
1272 n = min_t(int, len, map_bankwidth(map)-i);
1274 tmp_buf = map_word_load_partial(map, tmp_buf, buf, i, n);
1276 ret = do_write_oneword(map, &cfi->chips[chipnum],
1286 if (ofs >> cfi->chipshift) {
1289 if (chipnum == cfi->numchips)
1294 /* We are now aligned, write as much as possible */
1295 while(len >= map_bankwidth(map)) {
1298 datum = map_word_load(map, buf);
1300 ret = do_write_oneword(map, &cfi->chips[chipnum],
1305 ofs += map_bankwidth(map);
1306 buf += map_bankwidth(map);
1307 (*retlen) += map_bankwidth(map);
1308 len -= map_bankwidth(map);
1310 if (ofs >> cfi->chipshift) {
1313 if (chipnum == cfi->numchips)
1315 chipstart = cfi->chips[chipnum].start;
1319 /* Write the trailing bytes if any */
1320 if (len & (map_bankwidth(map)-1)) {
1324 mutex_lock(&cfi->chips[chipnum].mutex);
1326 if (cfi->chips[chipnum].state != FL_READY) {
1328 printk(KERN_DEBUG "Waiting for chip to write, status = %d\n", cfi->chips[chipnum].state);
1330 set_current_state(TASK_UNINTERRUPTIBLE);
1331 add_wait_queue(&cfi->chips[chipnum].wq, &wait);
1333 mutex_unlock(&cfi->chips[chipnum].mutex);
1336 remove_wait_queue(&cfi->chips[chipnum].wq, &wait);
1338 if(signal_pending(current))
1344 tmp_buf = map_read(map, ofs + chipstart);
1346 mutex_unlock(&cfi->chips[chipnum].mutex);
1348 tmp_buf = map_word_load_partial(map, tmp_buf, buf, 0, len);
1350 ret = do_write_oneword(map, &cfi->chips[chipnum],
1363 * FIXME: interleaved mode not tested, and probably not supported!
1365 static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1366 unsigned long adr, const u_char *buf,
1369 struct cfi_private *cfi = map->fldrv_priv;
1370 unsigned long timeo = jiffies + HZ;
1371 /* see comments in do_write_oneword() regarding uWriteTimeo. */
1372 unsigned long uWriteTimeout = ( HZ / 1000 ) + 1;
1374 unsigned long cmd_adr;
1381 mutex_lock(&chip->mutex);
1382 ret = get_chip(map, chip, adr, FL_WRITING);
1384 mutex_unlock(&chip->mutex);
1388 datum = map_word_load(map, buf);
1390 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): WRITE 0x%.8lx(0x%.8lx)\n",
1391 __func__, adr, datum.x[0] );
1393 XIP_INVAL_CACHED_RANGE(map, adr, len);
1395 xip_disable(map, chip, cmd_adr);
1397 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1398 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1399 //cfi_send_gen_cmd(0xA0, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1401 /* Write Buffer Load */
1402 map_write(map, CMD(0x25), cmd_adr);
1404 chip->state = FL_WRITING_TO_BUFFER;
1406 /* Write length of data to come */
1407 words = len / map_bankwidth(map);
1408 map_write(map, CMD(words - 1), cmd_adr);
1411 while(z < words * map_bankwidth(map)) {
1412 datum = map_word_load(map, buf);
1413 map_write(map, datum, adr + z);
1415 z += map_bankwidth(map);
1416 buf += map_bankwidth(map);
1418 z -= map_bankwidth(map);
1422 /* Write Buffer Program Confirm: GO GO GO */
1423 map_write(map, CMD(0x29), cmd_adr);
1424 chip->state = FL_WRITING;
1426 INVALIDATE_CACHE_UDELAY(map, chip,
1427 adr, map_bankwidth(map),
1428 chip->word_write_time);
1430 timeo = jiffies + uWriteTimeout;
1433 if (chip->state != FL_WRITING) {
1434 /* Someone's suspended the write. Sleep */
1435 DECLARE_WAITQUEUE(wait, current);
1437 set_current_state(TASK_UNINTERRUPTIBLE);
1438 add_wait_queue(&chip->wq, &wait);
1439 mutex_unlock(&chip->mutex);
1441 remove_wait_queue(&chip->wq, &wait);
1442 timeo = jiffies + (HZ / 2); /* FIXME */
1443 mutex_lock(&chip->mutex);
1447 if (time_after(jiffies, timeo) && !chip_ready(map, adr))
1450 if (chip_ready(map, adr)) {
1451 xip_enable(map, chip, adr);
1455 /* Latency issues. Drop the lock, wait a while and retry */
1456 UDELAY(map, chip, adr, 1);
1459 /* reset on all failures. */
1460 map_write( map, CMD(0xF0), chip->start );
1461 xip_enable(map, chip, adr);
1462 /* FIXME - should have reset delay before continuing */
1464 printk(KERN_WARNING "MTD %s(): software timeout\n",
1469 chip->state = FL_READY;
1470 put_chip(map, chip, adr);
1471 mutex_unlock(&chip->mutex);
1477 static int cfi_amdstd_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
1478 size_t *retlen, const u_char *buf)
1480 struct map_info *map = mtd->priv;
1481 struct cfi_private *cfi = map->fldrv_priv;
1482 int wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
1491 chipnum = to >> cfi->chipshift;
1492 ofs = to - (chipnum << cfi->chipshift);
1494 /* If it's not bus-aligned, do the first word write */
1495 if (ofs & (map_bankwidth(map)-1)) {
1496 size_t local_len = (-ofs)&(map_bankwidth(map)-1);
1497 if (local_len > len)
1499 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1500 local_len, retlen, buf);
1507 if (ofs >> cfi->chipshift) {
1510 if (chipnum == cfi->numchips)
1515 /* Write buffer is worth it only if more than one word to write... */
1516 while (len >= map_bankwidth(map) * 2) {
1517 /* We must not cross write block boundaries */
1518 int size = wbufsize - (ofs & (wbufsize-1));
1522 if (size % map_bankwidth(map))
1523 size -= size % map_bankwidth(map);
1525 ret = do_write_buffer(map, &cfi->chips[chipnum],
1535 if (ofs >> cfi->chipshift) {
1538 if (chipnum == cfi->numchips)
1544 size_t retlen_dregs = 0;
1546 ret = cfi_amdstd_write_words(mtd, ofs + (chipnum<<cfi->chipshift),
1547 len, &retlen_dregs, buf);
1549 *retlen += retlen_dregs;
1558 * Handle devices with one erase region, that only implement
1559 * the chip erase command.
1561 static int __xipram do_erase_chip(struct map_info *map, struct flchip *chip)
1563 struct cfi_private *cfi = map->fldrv_priv;
1564 unsigned long timeo = jiffies + HZ;
1565 unsigned long int adr;
1566 DECLARE_WAITQUEUE(wait, current);
1569 adr = cfi->addr_unlock1;
1571 mutex_lock(&chip->mutex);
1572 ret = get_chip(map, chip, adr, FL_WRITING);
1574 mutex_unlock(&chip->mutex);
1578 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1579 __func__, chip->start );
1581 XIP_INVAL_CACHED_RANGE(map, adr, map->size);
1583 xip_disable(map, chip, adr);
1585 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1586 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1587 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1588 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1589 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1590 cfi_send_gen_cmd(0x10, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1592 chip->state = FL_ERASING;
1593 chip->erase_suspended = 0;
1594 chip->in_progress_block_addr = adr;
1596 INVALIDATE_CACHE_UDELAY(map, chip,
1598 chip->erase_time*500);
1600 timeo = jiffies + (HZ*20);
1603 if (chip->state != FL_ERASING) {
1604 /* Someone's suspended the erase. Sleep */
1605 set_current_state(TASK_UNINTERRUPTIBLE);
1606 add_wait_queue(&chip->wq, &wait);
1607 mutex_unlock(&chip->mutex);
1609 remove_wait_queue(&chip->wq, &wait);
1610 mutex_lock(&chip->mutex);
1613 if (chip->erase_suspended) {
1614 /* This erase was suspended and resumed.
1615 Adjust the timeout */
1616 timeo = jiffies + (HZ*20); /* FIXME */
1617 chip->erase_suspended = 0;
1620 if (chip_ready(map, adr))
1623 if (time_after(jiffies, timeo)) {
1624 printk(KERN_WARNING "MTD %s(): software timeout\n",
1629 /* Latency issues. Drop the lock, wait a while and retry */
1630 UDELAY(map, chip, adr, 1000000/HZ);
1632 /* Did we succeed? */
1633 if (!chip_good(map, adr, map_word_ff(map))) {
1634 /* reset on all failures. */
1635 map_write( map, CMD(0xF0), chip->start );
1636 /* FIXME - should have reset delay before continuing */
1641 chip->state = FL_READY;
1642 xip_enable(map, chip, adr);
1643 put_chip(map, chip, adr);
1644 mutex_unlock(&chip->mutex);
1650 static int __xipram do_erase_oneblock(struct map_info *map, struct flchip *chip, unsigned long adr, int len, void *thunk)
1652 struct cfi_private *cfi = map->fldrv_priv;
1653 unsigned long timeo = jiffies + HZ;
1654 DECLARE_WAITQUEUE(wait, current);
1659 mutex_lock(&chip->mutex);
1660 ret = get_chip(map, chip, adr, FL_ERASING);
1662 mutex_unlock(&chip->mutex);
1666 DEBUG( MTD_DEBUG_LEVEL3, "MTD %s(): ERASE 0x%.8lx\n",
1669 XIP_INVAL_CACHED_RANGE(map, adr, len);
1671 xip_disable(map, chip, adr);
1673 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1674 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1675 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1676 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi, cfi->device_type, NULL);
1677 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi, cfi->device_type, NULL);
1678 map_write(map, CMD(0x30), adr);
1680 chip->state = FL_ERASING;
1681 chip->erase_suspended = 0;
1682 chip->in_progress_block_addr = adr;
1684 INVALIDATE_CACHE_UDELAY(map, chip,
1686 chip->erase_time*500);
1688 timeo = jiffies + (HZ*20);
1691 if (chip->state != FL_ERASING) {
1692 /* Someone's suspended the erase. Sleep */
1693 set_current_state(TASK_UNINTERRUPTIBLE);
1694 add_wait_queue(&chip->wq, &wait);
1695 mutex_unlock(&chip->mutex);
1697 remove_wait_queue(&chip->wq, &wait);
1698 mutex_lock(&chip->mutex);
1701 if (chip->erase_suspended) {
1702 /* This erase was suspended and resumed.
1703 Adjust the timeout */
1704 timeo = jiffies + (HZ*20); /* FIXME */
1705 chip->erase_suspended = 0;
1708 if (chip_ready(map, adr)) {
1709 xip_enable(map, chip, adr);
1713 if (time_after(jiffies, timeo)) {
1714 xip_enable(map, chip, adr);
1715 printk(KERN_WARNING "MTD %s(): software timeout\n",
1720 /* Latency issues. Drop the lock, wait a while and retry */
1721 UDELAY(map, chip, adr, 1000000/HZ);
1723 /* Did we succeed? */
1724 if (!chip_good(map, adr, map_word_ff(map))) {
1725 /* reset on all failures. */
1726 map_write( map, CMD(0xF0), chip->start );
1727 /* FIXME - should have reset delay before continuing */
1732 chip->state = FL_READY;
1733 put_chip(map, chip, adr);
1734 mutex_unlock(&chip->mutex);
1739 static int cfi_amdstd_erase_varsize(struct mtd_info *mtd, struct erase_info *instr)
1741 unsigned long ofs, len;
1747 ret = cfi_varsize_frob(mtd, do_erase_oneblock, ofs, len, NULL);
1751 instr->state = MTD_ERASE_DONE;
1752 mtd_erase_callback(instr);
1758 static int cfi_amdstd_erase_chip(struct mtd_info *mtd, struct erase_info *instr)
1760 struct map_info *map = mtd->priv;
1761 struct cfi_private *cfi = map->fldrv_priv;
1764 if (instr->addr != 0)
1767 if (instr->len != mtd->size)
1770 ret = do_erase_chip(map, &cfi->chips[0]);
1774 instr->state = MTD_ERASE_DONE;
1775 mtd_erase_callback(instr);
1780 static int do_atmel_lock(struct map_info *map, struct flchip *chip,
1781 unsigned long adr, int len, void *thunk)
1783 struct cfi_private *cfi = map->fldrv_priv;
1786 mutex_lock(&chip->mutex);
1787 ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
1790 chip->state = FL_LOCKING;
1792 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1793 __func__, adr, len);
1795 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1796 cfi->device_type, NULL);
1797 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1798 cfi->device_type, NULL);
1799 cfi_send_gen_cmd(0x80, cfi->addr_unlock1, chip->start, map, cfi,
1800 cfi->device_type, NULL);
1801 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1802 cfi->device_type, NULL);
1803 cfi_send_gen_cmd(0x55, cfi->addr_unlock2, chip->start, map, cfi,
1804 cfi->device_type, NULL);
1805 map_write(map, CMD(0x40), chip->start + adr);
1807 chip->state = FL_READY;
1808 put_chip(map, chip, adr + chip->start);
1812 mutex_unlock(&chip->mutex);
1816 static int do_atmel_unlock(struct map_info *map, struct flchip *chip,
1817 unsigned long adr, int len, void *thunk)
1819 struct cfi_private *cfi = map->fldrv_priv;
1822 mutex_lock(&chip->mutex);
1823 ret = get_chip(map, chip, adr + chip->start, FL_UNLOCKING);
1826 chip->state = FL_UNLOCKING;
1828 DEBUG(MTD_DEBUG_LEVEL3, "MTD %s(): LOCK 0x%08lx len %d\n",
1829 __func__, adr, len);
1831 cfi_send_gen_cmd(0xAA, cfi->addr_unlock1, chip->start, map, cfi,
1832 cfi->device_type, NULL);
1833 map_write(map, CMD(0x70), adr);
1835 chip->state = FL_READY;
1836 put_chip(map, chip, adr + chip->start);
1840 mutex_unlock(&chip->mutex);
1844 static int cfi_atmel_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1846 return cfi_varsize_frob(mtd, do_atmel_lock, ofs, len, NULL);
1849 static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
1851 return cfi_varsize_frob(mtd, do_atmel_unlock, ofs, len, NULL);
1855 static void cfi_amdstd_sync (struct mtd_info *mtd)
1857 struct map_info *map = mtd->priv;
1858 struct cfi_private *cfi = map->fldrv_priv;
1860 struct flchip *chip;
1862 DECLARE_WAITQUEUE(wait, current);
1864 for (i=0; !ret && i<cfi->numchips; i++) {
1865 chip = &cfi->chips[i];
1868 mutex_lock(&chip->mutex);
1870 switch(chip->state) {
1874 case FL_JEDEC_QUERY:
1875 chip->oldstate = chip->state;
1876 chip->state = FL_SYNCING;
1877 /* No need to wake_up() on this state change -
1878 * as the whole point is that nobody can do anything
1879 * with the chip now anyway.
1882 mutex_unlock(&chip->mutex);
1886 /* Not an idle state */
1887 set_current_state(TASK_UNINTERRUPTIBLE);
1888 add_wait_queue(&chip->wq, &wait);
1890 mutex_unlock(&chip->mutex);
1894 remove_wait_queue(&chip->wq, &wait);
1900 /* Unlock the chips again */
1902 for (i--; i >=0; i--) {
1903 chip = &cfi->chips[i];
1905 mutex_lock(&chip->mutex);
1907 if (chip->state == FL_SYNCING) {
1908 chip->state = chip->oldstate;
1911 mutex_unlock(&chip->mutex);
1916 static int cfi_amdstd_suspend(struct mtd_info *mtd)
1918 struct map_info *map = mtd->priv;
1919 struct cfi_private *cfi = map->fldrv_priv;
1921 struct flchip *chip;
1924 for (i=0; !ret && i<cfi->numchips; i++) {
1925 chip = &cfi->chips[i];
1927 mutex_lock(&chip->mutex);
1929 switch(chip->state) {
1933 case FL_JEDEC_QUERY:
1934 chip->oldstate = chip->state;
1935 chip->state = FL_PM_SUSPENDED;
1936 /* No need to wake_up() on this state change -
1937 * as the whole point is that nobody can do anything
1938 * with the chip now anyway.
1940 case FL_PM_SUSPENDED:
1947 mutex_unlock(&chip->mutex);
1950 /* Unlock the chips again */
1953 for (i--; i >=0; i--) {
1954 chip = &cfi->chips[i];
1956 mutex_lock(&chip->mutex);
1958 if (chip->state == FL_PM_SUSPENDED) {
1959 chip->state = chip->oldstate;
1962 mutex_unlock(&chip->mutex);
1970 static void cfi_amdstd_resume(struct mtd_info *mtd)
1972 struct map_info *map = mtd->priv;
1973 struct cfi_private *cfi = map->fldrv_priv;
1975 struct flchip *chip;
1977 for (i=0; i<cfi->numchips; i++) {
1979 chip = &cfi->chips[i];
1981 mutex_lock(&chip->mutex);
1983 if (chip->state == FL_PM_SUSPENDED) {
1984 chip->state = FL_READY;
1985 map_write(map, CMD(0xF0), chip->start);
1989 printk(KERN_ERR "Argh. Chip not in PM_SUSPENDED state upon resume()\n");
1991 mutex_unlock(&chip->mutex);
1997 * Ensure that the flash device is put back into read array mode before
1998 * unloading the driver or rebooting. On some systems, rebooting while
1999 * the flash is in query/program/erase mode will prevent the CPU from
2000 * fetching the bootloader code, requiring a hard reset or power cycle.
2002 static int cfi_amdstd_reset(struct mtd_info *mtd)
2004 struct map_info *map = mtd->priv;
2005 struct cfi_private *cfi = map->fldrv_priv;
2007 struct flchip *chip;
2009 for (i = 0; i < cfi->numchips; i++) {
2011 chip = &cfi->chips[i];
2013 mutex_lock(&chip->mutex);
2015 ret = get_chip(map, chip, chip->start, FL_SHUTDOWN);
2017 map_write(map, CMD(0xF0), chip->start);
2018 chip->state = FL_SHUTDOWN;
2019 put_chip(map, chip, chip->start);
2022 mutex_unlock(&chip->mutex);
2029 static int cfi_amdstd_reboot(struct notifier_block *nb, unsigned long val,
2032 struct mtd_info *mtd;
2034 mtd = container_of(nb, struct mtd_info, reboot_notifier);
2035 cfi_amdstd_reset(mtd);
2040 static void cfi_amdstd_destroy(struct mtd_info *mtd)
2042 struct map_info *map = mtd->priv;
2043 struct cfi_private *cfi = map->fldrv_priv;
2045 cfi_amdstd_reset(mtd);
2046 unregister_reboot_notifier(&mtd->reboot_notifier);
2047 kfree(cfi->cmdset_priv);
2050 kfree(mtd->eraseregions);
2053 MODULE_LICENSE("GPL");
2054 MODULE_AUTHOR("Crossnet Co. <info@crossnet.co.jp> et al.");
2055 MODULE_DESCRIPTION("MTD chip driver for AMD/Fujitsu flash chips");
2056 MODULE_ALIAS("cfi_cmdset_0006");
2057 MODULE_ALIAS("cfi_cmdset_0701");