2 * Character-device access to raw MTD devices.
6 #include <linux/device.h>
10 #include <linux/init.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/sched.h>
15 #include <linux/smp_lock.h>
16 #include <linux/backing-dev.h>
18 #include <linux/mtd/mtd.h>
19 #include <linux/mtd/compatmac.h>
21 #include <asm/uaccess.h>
23 static struct class *mtd_class;
25 static void mtd_notify_add(struct mtd_info* mtd)
30 device_create(mtd_class, NULL, MKDEV(MTD_CHAR_MAJOR, mtd->index*2),
31 NULL, "mtd%d", mtd->index);
33 device_create(mtd_class, NULL, MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1),
34 NULL, "mtd%dro", mtd->index);
37 static void mtd_notify_remove(struct mtd_info* mtd)
42 device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2));
43 device_destroy(mtd_class, MKDEV(MTD_CHAR_MAJOR, mtd->index*2+1));
46 static struct mtd_notifier notifier = {
47 .add = mtd_notify_add,
48 .remove = mtd_notify_remove,
52 * Data structure to hold the pointer to the mtd device as well
53 * as mode information ofr various use cases.
55 struct mtd_file_info {
57 enum mtd_file_modes mode;
60 static loff_t mtd_lseek (struct file *file, loff_t offset, int orig)
62 struct mtd_file_info *mfi = file->private_data;
63 struct mtd_info *mtd = mfi->mtd;
69 offset += file->f_pos;
78 if (offset >= 0 && offset <= mtd->size)
79 return file->f_pos = offset;
86 static int mtd_open(struct inode *inode, struct file *file)
88 int minor = iminor(inode);
89 int devnum = minor >> 1;
92 struct mtd_file_info *mfi;
94 DEBUG(MTD_DEBUG_LEVEL0, "MTD_open\n");
96 if (devnum >= MAX_MTD_DEVICES)
99 /* You can't open the RO devices RW */
100 if ((file->f_mode & FMODE_WRITE) && (minor & 1))
104 mtd = get_mtd_device(NULL, devnum);
111 if (mtd->type == MTD_ABSENT) {
117 if (mtd->backing_dev_info)
118 file->f_mapping->backing_dev_info = mtd->backing_dev_info;
120 /* You can't open it RW if it's not a writeable device */
121 if ((file->f_mode & FMODE_WRITE) && !(mtd->flags & MTD_WRITEABLE)) {
127 mfi = kzalloc(sizeof(*mfi), GFP_KERNEL);
134 file->private_data = mfi;
141 /*====================================================================*/
143 static int mtd_close(struct inode *inode, struct file *file)
145 struct mtd_file_info *mfi = file->private_data;
146 struct mtd_info *mtd = mfi->mtd;
148 DEBUG(MTD_DEBUG_LEVEL0, "MTD_close\n");
150 /* Only sync if opened RW */
151 if ((file->f_mode & FMODE_WRITE) && mtd->sync)
155 file->private_data = NULL;
161 /* FIXME: This _really_ needs to die. In 2.5, we should lock the
162 userspace buffer down and use it directly with readv/writev.
164 #define MAX_KMALLOC_SIZE 0x20000
166 static ssize_t mtd_read(struct file *file, char __user *buf, size_t count,loff_t *ppos)
168 struct mtd_file_info *mfi = file->private_data;
169 struct mtd_info *mtd = mfi->mtd;
171 size_t total_retlen=0;
176 DEBUG(MTD_DEBUG_LEVEL0,"MTD_read\n");
178 if (*ppos + count > mtd->size)
179 count = mtd->size - *ppos;
184 /* FIXME: Use kiovec in 2.5 to lock down the user's buffers
185 and pass them directly to the MTD functions */
187 if (count > MAX_KMALLOC_SIZE)
188 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
190 kbuf=kmalloc(count, GFP_KERNEL);
197 if (count > MAX_KMALLOC_SIZE)
198 len = MAX_KMALLOC_SIZE;
203 case MTD_MODE_OTP_FACTORY:
204 ret = mtd->read_fact_prot_reg(mtd, *ppos, len, &retlen, kbuf);
206 case MTD_MODE_OTP_USER:
207 ret = mtd->read_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
211 struct mtd_oob_ops ops;
213 ops.mode = MTD_OOB_RAW;
218 ret = mtd->read_oob(mtd, *ppos, &ops);
223 ret = mtd->read(mtd, *ppos, len, &retlen, kbuf);
225 /* Nand returns -EBADMSG on ecc errors, but it returns
226 * the data. For our userspace tools it is important
227 * to dump areas with ecc errors !
228 * For kernel internal usage it also might return -EUCLEAN
229 * to signal the caller that a bitflip has occured and has
230 * been corrected by the ECC algorithm.
231 * Userspace software which accesses NAND this way
232 * must be aware of the fact that it deals with NAND
234 if (!ret || (ret == -EUCLEAN) || (ret == -EBADMSG)) {
236 if (copy_to_user(buf, kbuf, retlen)) {
241 total_retlen += retlen;
259 static ssize_t mtd_write(struct file *file, const char __user *buf, size_t count,loff_t *ppos)
261 struct mtd_file_info *mfi = file->private_data;
262 struct mtd_info *mtd = mfi->mtd;
265 size_t total_retlen=0;
269 DEBUG(MTD_DEBUG_LEVEL0,"MTD_write\n");
271 if (*ppos == mtd->size)
274 if (*ppos + count > mtd->size)
275 count = mtd->size - *ppos;
280 if (count > MAX_KMALLOC_SIZE)
281 kbuf=kmalloc(MAX_KMALLOC_SIZE, GFP_KERNEL);
283 kbuf=kmalloc(count, GFP_KERNEL);
290 if (count > MAX_KMALLOC_SIZE)
291 len = MAX_KMALLOC_SIZE;
295 if (copy_from_user(kbuf, buf, len)) {
301 case MTD_MODE_OTP_FACTORY:
304 case MTD_MODE_OTP_USER:
305 if (!mtd->write_user_prot_reg) {
309 ret = mtd->write_user_prot_reg(mtd, *ppos, len, &retlen, kbuf);
314 struct mtd_oob_ops ops;
316 ops.mode = MTD_OOB_RAW;
321 ret = mtd->write_oob(mtd, *ppos, &ops);
327 ret = (*(mtd->write))(mtd, *ppos, len, &retlen, kbuf);
331 total_retlen += retlen;
345 /*======================================================================
347 IOCTL calls for getting device parameters.
349 ======================================================================*/
350 static void mtdchar_erase_callback (struct erase_info *instr)
352 wake_up((wait_queue_head_t *)instr->priv);
355 #ifdef CONFIG_HAVE_MTD_OTP
356 static int otp_select_filemode(struct mtd_file_info *mfi, int mode)
358 struct mtd_info *mtd = mfi->mtd;
362 case MTD_OTP_FACTORY:
363 if (!mtd->read_fact_prot_reg)
366 mfi->mode = MTD_MODE_OTP_FACTORY;
369 if (!mtd->read_fact_prot_reg)
372 mfi->mode = MTD_MODE_OTP_USER;
382 # define otp_select_filemode(f,m) -EOPNOTSUPP
385 static int mtd_ioctl(struct inode *inode, struct file *file,
386 u_int cmd, u_long arg)
388 struct mtd_file_info *mfi = file->private_data;
389 struct mtd_info *mtd = mfi->mtd;
390 void __user *argp = (void __user *)arg;
393 struct mtd_info_user info;
395 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
397 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
399 if (!access_ok(VERIFY_READ, argp, size))
403 if (!access_ok(VERIFY_WRITE, argp, size))
408 case MEMGETREGIONCOUNT:
409 if (copy_to_user(argp, &(mtd->numeraseregions), sizeof(int)))
413 case MEMGETREGIONINFO:
416 struct mtd_erase_region_info *kr;
417 struct region_info_user *ur = (struct region_info_user *) argp;
419 if (get_user(ur_idx, &(ur->regionindex)))
422 kr = &(mtd->eraseregions[ur_idx]);
424 if (put_user(kr->offset, &(ur->offset))
425 || put_user(kr->erasesize, &(ur->erasesize))
426 || put_user(kr->numblocks, &(ur->numblocks)))
433 info.type = mtd->type;
434 info.flags = mtd->flags;
435 info.size = mtd->size;
436 info.erasesize = mtd->erasesize;
437 info.writesize = mtd->writesize;
438 info.oobsize = mtd->oobsize;
439 /* The below fields are obsolete */
442 if (copy_to_user(argp, &info, sizeof(struct mtd_info_user)))
448 struct erase_info *erase;
450 if(!(file->f_mode & FMODE_WRITE))
453 erase=kzalloc(sizeof(struct erase_info),GFP_KERNEL);
457 struct erase_info_user einfo;
459 wait_queue_head_t waitq;
460 DECLARE_WAITQUEUE(wait, current);
462 init_waitqueue_head(&waitq);
464 if (copy_from_user(&einfo, argp,
465 sizeof(struct erase_info_user))) {
469 erase->addr = einfo.start;
470 erase->len = einfo.length;
472 erase->callback = mtdchar_erase_callback;
473 erase->priv = (unsigned long)&waitq;
476 FIXME: Allow INTERRUPTIBLE. Which means
477 not having the wait_queue head on the stack.
479 If the wq_head is on the stack, and we
480 leave because we got interrupted, then the
481 wq_head is no longer there when the
482 callback routine tries to wake us up.
484 ret = mtd->erase(mtd, erase);
486 set_current_state(TASK_UNINTERRUPTIBLE);
487 add_wait_queue(&waitq, &wait);
488 if (erase->state != MTD_ERASE_DONE &&
489 erase->state != MTD_ERASE_FAILED)
491 remove_wait_queue(&waitq, &wait);
492 set_current_state(TASK_RUNNING);
494 ret = (erase->state == MTD_ERASE_FAILED)?-EIO:0;
503 struct mtd_oob_buf buf;
504 struct mtd_oob_ops ops;
505 struct mtd_oob_buf __user *user_buf = argp;
508 if(!(file->f_mode & FMODE_WRITE))
511 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
514 if (buf.length > 4096)
520 ret = access_ok(VERIFY_READ, buf.ptr,
521 buf.length) ? 0 : EFAULT;
526 ops.ooblen = buf.length;
527 ops.ooboffs = buf.start & (mtd->oobsize - 1);
529 ops.mode = MTD_OOB_PLACE;
531 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
534 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
538 if (copy_from_user(ops.oobbuf, buf.ptr, buf.length)) {
543 buf.start &= ~(mtd->oobsize - 1);
544 ret = mtd->write_oob(mtd, buf.start, &ops);
546 if (ops.oobretlen > 0xFFFFFFFFU)
548 retlen = ops.oobretlen;
549 if (copy_to_user(&user_buf->length, &retlen, sizeof(buf.length)))
559 struct mtd_oob_buf buf;
560 struct mtd_oob_ops ops;
562 if (copy_from_user(&buf, argp, sizeof(struct mtd_oob_buf)))
565 if (buf.length > 4096)
571 ret = access_ok(VERIFY_WRITE, buf.ptr,
572 buf.length) ? 0 : -EFAULT;
576 ops.ooblen = buf.length;
577 ops.ooboffs = buf.start & (mtd->oobsize - 1);
579 ops.mode = MTD_OOB_PLACE;
581 if (ops.ooboffs && ops.ooblen > (mtd->oobsize - ops.ooboffs))
584 ops.oobbuf = kmalloc(buf.length, GFP_KERNEL);
588 buf.start &= ~(mtd->oobsize - 1);
589 ret = mtd->read_oob(mtd, buf.start, &ops);
591 if (put_user(ops.oobretlen, (uint32_t __user *)argp))
593 else if (ops.oobretlen && copy_to_user(buf.ptr, ops.oobbuf,
603 struct erase_info_user einfo;
605 if (copy_from_user(&einfo, argp, sizeof(einfo)))
611 ret = mtd->lock(mtd, einfo.start, einfo.length);
617 struct erase_info_user einfo;
619 if (copy_from_user(&einfo, argp, sizeof(einfo)))
625 ret = mtd->unlock(mtd, einfo.start, einfo.length);
629 /* Legacy interface */
632 struct nand_oobinfo oi;
636 if (mtd->ecclayout->eccbytes > ARRAY_SIZE(oi.eccpos))
639 oi.useecc = MTD_NANDECC_AUTOPLACE;
640 memcpy(&oi.eccpos, mtd->ecclayout->eccpos, sizeof(oi.eccpos));
641 memcpy(&oi.oobfree, mtd->ecclayout->oobfree,
643 oi.eccbytes = mtd->ecclayout->eccbytes;
645 if (copy_to_user(argp, &oi, sizeof(struct nand_oobinfo)))
654 if (copy_from_user(&offs, argp, sizeof(loff_t)))
656 if (!mtd->block_isbad)
659 return mtd->block_isbad(mtd, offs);
667 if (copy_from_user(&offs, argp, sizeof(loff_t)))
669 if (!mtd->block_markbad)
672 return mtd->block_markbad(mtd, offs);
676 #ifdef CONFIG_HAVE_MTD_OTP
680 if (copy_from_user(&mode, argp, sizeof(int)))
683 mfi->mode = MTD_MODE_NORMAL;
685 ret = otp_select_filemode(mfi, mode);
691 case OTPGETREGIONCOUNT:
692 case OTPGETREGIONINFO:
694 struct otp_info *buf = kmalloc(4096, GFP_KERNEL);
699 case MTD_MODE_OTP_FACTORY:
700 if (mtd->get_fact_prot_info)
701 ret = mtd->get_fact_prot_info(mtd, buf, 4096);
703 case MTD_MODE_OTP_USER:
704 if (mtd->get_user_prot_info)
705 ret = mtd->get_user_prot_info(mtd, buf, 4096);
711 if (cmd == OTPGETREGIONCOUNT) {
712 int nbr = ret / sizeof(struct otp_info);
713 ret = copy_to_user(argp, &nbr, sizeof(int));
715 ret = copy_to_user(argp, buf, ret);
725 struct otp_info oinfo;
727 if (mfi->mode != MTD_MODE_OTP_USER)
729 if (copy_from_user(&oinfo, argp, sizeof(oinfo)))
731 if (!mtd->lock_user_prot_reg)
733 ret = mtd->lock_user_prot_reg(mtd, oinfo.start, oinfo.length);
743 if (copy_to_user(argp, mtd->ecclayout,
744 sizeof(struct nand_ecclayout)))
751 if (copy_to_user(argp, &mtd->ecc_stats,
752 sizeof(struct mtd_ecc_stats)))
762 case MTD_MODE_OTP_FACTORY:
763 case MTD_MODE_OTP_USER:
764 ret = otp_select_filemode(mfi, arg);
768 if (!mtd->read_oob || !mtd->write_oob)
772 case MTD_MODE_NORMAL:
789 * try to determine where a shared mapping can be made
790 * - only supported for NOMMU at the moment (MMU can't doesn't copy private
794 static unsigned long mtd_get_unmapped_area(struct file *file,
800 struct mtd_file_info *mfi = file->private_data;
801 struct mtd_info *mtd = mfi->mtd;
803 if (mtd->get_unmapped_area) {
804 unsigned long offset;
807 return (unsigned long) -EINVAL;
809 if (len > mtd->size || pgoff >= (mtd->size >> PAGE_SHIFT))
810 return (unsigned long) -EINVAL;
812 offset = pgoff << PAGE_SHIFT;
813 if (offset > mtd->size - len)
814 return (unsigned long) -EINVAL;
816 return mtd->get_unmapped_area(mtd, len, offset, flags);
819 /* can't map directly */
820 return (unsigned long) -ENOSYS;
825 * set up a mapping for shared memory segments
827 static int mtd_mmap(struct file *file, struct vm_area_struct *vma)
830 struct mtd_file_info *mfi = file->private_data;
831 struct mtd_info *mtd = mfi->mtd;
833 if (mtd->type == MTD_RAM || mtd->type == MTD_ROM)
837 return vma->vm_flags & VM_SHARED ? 0 : -ENOSYS;
841 static const struct file_operations mtd_fops = {
842 .owner = THIS_MODULE,
848 .release = mtd_close,
851 .get_unmapped_area = mtd_get_unmapped_area,
855 static int __init init_mtdchar(void)
857 if (register_chrdev(MTD_CHAR_MAJOR, "mtd", &mtd_fops)) {
858 printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
863 mtd_class = class_create(THIS_MODULE, "mtd");
865 if (IS_ERR(mtd_class)) {
866 printk(KERN_ERR "Error creating mtd class.\n");
867 unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
868 return PTR_ERR(mtd_class);
871 register_mtd_user(¬ifier);
875 static void __exit cleanup_mtdchar(void)
877 unregister_mtd_user(¬ifier);
878 class_destroy(mtd_class);
879 unregister_chrdev(MTD_CHAR_MAJOR, "mtd");
882 module_init(init_mtdchar);
883 module_exit(cleanup_mtdchar);
886 MODULE_LICENSE("GPL");
887 MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
888 MODULE_DESCRIPTION("Direct character-device access to MTD devices");
889 MODULE_ALIAS_CHARDEV_MAJOR(MTD_CHAR_MAJOR);