]> Pileus Git - ~andy/linux/blob - drivers/char/mem.c
random32: assign to network folks in MAINTAINERS
[~andy/linux] / drivers / char / mem.c
1 /*
2  *  linux/drivers/char/mem.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  *
6  *  Added devfs support.
7  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
8  *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
9  */
10
11 #include <linux/mm.h>
12 #include <linux/miscdevice.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/mman.h>
16 #include <linux/random.h>
17 #include <linux/init.h>
18 #include <linux/raw.h>
19 #include <linux/tty.h>
20 #include <linux/capability.h>
21 #include <linux/ptrace.h>
22 #include <linux/device.h>
23 #include <linux/highmem.h>
24 #include <linux/backing-dev.h>
25 #include <linux/splice.h>
26 #include <linux/pfn.h>
27 #include <linux/export.h>
28 #include <linux/io.h>
29 #include <linux/aio.h>
30
31 #include <asm/uaccess.h>
32
33 #ifdef CONFIG_IA64
34 # include <linux/efi.h>
35 #endif
36
37 #define DEVPORT_MINOR   4
38
39 static inline unsigned long size_inside_page(unsigned long start,
40                                              unsigned long size)
41 {
42         unsigned long sz;
43
44         sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
45
46         return min(sz, size);
47 }
48
49 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
50 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
51 {
52         return addr + count <= __pa(high_memory);
53 }
54
55 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
56 {
57         return 1;
58 }
59 #endif
60
61 #ifdef CONFIG_STRICT_DEVMEM
62 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
63 {
64         u64 from = ((u64)pfn) << PAGE_SHIFT;
65         u64 to = from + size;
66         u64 cursor = from;
67
68         while (cursor < to) {
69                 if (!devmem_is_allowed(pfn)) {
70                         printk(KERN_INFO
71                 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
72                                 current->comm, from, to);
73                         return 0;
74                 }
75                 cursor += PAGE_SIZE;
76                 pfn++;
77         }
78         return 1;
79 }
80 #else
81 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
82 {
83         return 1;
84 }
85 #endif
86
87 void __weak unxlate_dev_mem_ptr(unsigned long phys, void *addr)
88 {
89 }
90
91 /*
92  * This funcion reads the *physical* memory. The f_pos points directly to the
93  * memory location.
94  */
95 static ssize_t read_mem(struct file *file, char __user *buf,
96                         size_t count, loff_t *ppos)
97 {
98         phys_addr_t p = *ppos;
99         ssize_t read, sz;
100         char *ptr;
101
102         if (!valid_phys_addr_range(p, count))
103                 return -EFAULT;
104         read = 0;
105 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
106         /* we don't have page 0 mapped on sparc and m68k.. */
107         if (p < PAGE_SIZE) {
108                 sz = size_inside_page(p, count);
109                 if (sz > 0) {
110                         if (clear_user(buf, sz))
111                                 return -EFAULT;
112                         buf += sz;
113                         p += sz;
114                         count -= sz;
115                         read += sz;
116                 }
117         }
118 #endif
119
120         while (count > 0) {
121                 unsigned long remaining;
122
123                 sz = size_inside_page(p, count);
124
125                 if (!range_is_allowed(p >> PAGE_SHIFT, count))
126                         return -EPERM;
127
128                 /*
129                  * On ia64 if a page has been mapped somewhere as uncached, then
130                  * it must also be accessed uncached by the kernel or data
131                  * corruption may occur.
132                  */
133                 ptr = xlate_dev_mem_ptr(p);
134                 if (!ptr)
135                         return -EFAULT;
136
137                 remaining = copy_to_user(buf, ptr, sz);
138                 unxlate_dev_mem_ptr(p, ptr);
139                 if (remaining)
140                         return -EFAULT;
141
142                 buf += sz;
143                 p += sz;
144                 count -= sz;
145                 read += sz;
146         }
147
148         *ppos += read;
149         return read;
150 }
151
152 static ssize_t write_mem(struct file *file, const char __user *buf,
153                          size_t count, loff_t *ppos)
154 {
155         phys_addr_t p = *ppos;
156         ssize_t written, sz;
157         unsigned long copied;
158         void *ptr;
159
160         if (!valid_phys_addr_range(p, count))
161                 return -EFAULT;
162
163         written = 0;
164
165 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
166         /* we don't have page 0 mapped on sparc and m68k.. */
167         if (p < PAGE_SIZE) {
168                 sz = size_inside_page(p, count);
169                 /* Hmm. Do something? */
170                 buf += sz;
171                 p += sz;
172                 count -= sz;
173                 written += sz;
174         }
175 #endif
176
177         while (count > 0) {
178                 sz = size_inside_page(p, count);
179
180                 if (!range_is_allowed(p >> PAGE_SHIFT, sz))
181                         return -EPERM;
182
183                 /*
184                  * On ia64 if a page has been mapped somewhere as uncached, then
185                  * it must also be accessed uncached by the kernel or data
186                  * corruption may occur.
187                  */
188                 ptr = xlate_dev_mem_ptr(p);
189                 if (!ptr) {
190                         if (written)
191                                 break;
192                         return -EFAULT;
193                 }
194
195                 copied = copy_from_user(ptr, buf, sz);
196                 unxlate_dev_mem_ptr(p, ptr);
197                 if (copied) {
198                         written += sz - copied;
199                         if (written)
200                                 break;
201                         return -EFAULT;
202                 }
203
204                 buf += sz;
205                 p += sz;
206                 count -= sz;
207                 written += sz;
208         }
209
210         *ppos += written;
211         return written;
212 }
213
214 int __weak phys_mem_access_prot_allowed(struct file *file,
215         unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
216 {
217         return 1;
218 }
219
220 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
221
222 /*
223  * Architectures vary in how they handle caching for addresses
224  * outside of main memory.
225  *
226  */
227 #ifdef pgprot_noncached
228 static int uncached_access(struct file *file, phys_addr_t addr)
229 {
230 #if defined(CONFIG_IA64)
231         /*
232          * On ia64, we ignore O_DSYNC because we cannot tolerate memory
233          * attribute aliases.
234          */
235         return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
236 #elif defined(CONFIG_MIPS)
237         {
238                 extern int __uncached_access(struct file *file,
239                                              unsigned long addr);
240
241                 return __uncached_access(file, addr);
242         }
243 #else
244         /*
245          * Accessing memory above the top the kernel knows about or through a
246          * file pointer
247          * that was marked O_DSYNC will be done non-cached.
248          */
249         if (file->f_flags & O_DSYNC)
250                 return 1;
251         return addr >= __pa(high_memory);
252 #endif
253 }
254 #endif
255
256 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
257                                      unsigned long size, pgprot_t vma_prot)
258 {
259 #ifdef pgprot_noncached
260         phys_addr_t offset = pfn << PAGE_SHIFT;
261
262         if (uncached_access(file, offset))
263                 return pgprot_noncached(vma_prot);
264 #endif
265         return vma_prot;
266 }
267 #endif
268
269 #ifndef CONFIG_MMU
270 static unsigned long get_unmapped_area_mem(struct file *file,
271                                            unsigned long addr,
272                                            unsigned long len,
273                                            unsigned long pgoff,
274                                            unsigned long flags)
275 {
276         if (!valid_mmap_phys_addr_range(pgoff, len))
277                 return (unsigned long) -EINVAL;
278         return pgoff << PAGE_SHIFT;
279 }
280
281 /* can't do an in-place private mapping if there's no MMU */
282 static inline int private_mapping_ok(struct vm_area_struct *vma)
283 {
284         return vma->vm_flags & VM_MAYSHARE;
285 }
286 #else
287 #define get_unmapped_area_mem   NULL
288
289 static inline int private_mapping_ok(struct vm_area_struct *vma)
290 {
291         return 1;
292 }
293 #endif
294
295 static const struct vm_operations_struct mmap_mem_ops = {
296 #ifdef CONFIG_HAVE_IOREMAP_PROT
297         .access = generic_access_phys
298 #endif
299 };
300
301 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
302 {
303         size_t size = vma->vm_end - vma->vm_start;
304
305         if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
306                 return -EINVAL;
307
308         if (!private_mapping_ok(vma))
309                 return -ENOSYS;
310
311         if (!range_is_allowed(vma->vm_pgoff, size))
312                 return -EPERM;
313
314         if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
315                                                 &vma->vm_page_prot))
316                 return -EINVAL;
317
318         vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
319                                                  size,
320                                                  vma->vm_page_prot);
321
322         vma->vm_ops = &mmap_mem_ops;
323
324         /* Remap-pfn-range will mark the range VM_IO */
325         if (remap_pfn_range(vma,
326                             vma->vm_start,
327                             vma->vm_pgoff,
328                             size,
329                             vma->vm_page_prot)) {
330                 return -EAGAIN;
331         }
332         return 0;
333 }
334
335 #ifdef CONFIG_DEVKMEM
336 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
337 {
338         unsigned long pfn;
339
340         /* Turn a kernel-virtual address into a physical page frame */
341         pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
342
343         /*
344          * RED-PEN: on some architectures there is more mapped memory than
345          * available in mem_map which pfn_valid checks for. Perhaps should add a
346          * new macro here.
347          *
348          * RED-PEN: vmalloc is not supported right now.
349          */
350         if (!pfn_valid(pfn))
351                 return -EIO;
352
353         vma->vm_pgoff = pfn;
354         return mmap_mem(file, vma);
355 }
356 #endif
357
358 #ifdef CONFIG_DEVKMEM
359 /*
360  * This function reads the *virtual* memory as seen by the kernel.
361  */
362 static ssize_t read_kmem(struct file *file, char __user *buf,
363                          size_t count, loff_t *ppos)
364 {
365         unsigned long p = *ppos;
366         ssize_t low_count, read, sz;
367         char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
368         int err = 0;
369
370         read = 0;
371         if (p < (unsigned long) high_memory) {
372                 low_count = count;
373                 if (count > (unsigned long)high_memory - p)
374                         low_count = (unsigned long)high_memory - p;
375
376 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
377                 /* we don't have page 0 mapped on sparc and m68k.. */
378                 if (p < PAGE_SIZE && low_count > 0) {
379                         sz = size_inside_page(p, low_count);
380                         if (clear_user(buf, sz))
381                                 return -EFAULT;
382                         buf += sz;
383                         p += sz;
384                         read += sz;
385                         low_count -= sz;
386                         count -= sz;
387                 }
388 #endif
389                 while (low_count > 0) {
390                         sz = size_inside_page(p, low_count);
391
392                         /*
393                          * On ia64 if a page has been mapped somewhere as
394                          * uncached, then it must also be accessed uncached
395                          * by the kernel or data corruption may occur
396                          */
397                         kbuf = xlate_dev_kmem_ptr((char *)p);
398
399                         if (copy_to_user(buf, kbuf, sz))
400                                 return -EFAULT;
401                         buf += sz;
402                         p += sz;
403                         read += sz;
404                         low_count -= sz;
405                         count -= sz;
406                 }
407         }
408
409         if (count > 0) {
410                 kbuf = (char *)__get_free_page(GFP_KERNEL);
411                 if (!kbuf)
412                         return -ENOMEM;
413                 while (count > 0) {
414                         sz = size_inside_page(p, count);
415                         if (!is_vmalloc_or_module_addr((void *)p)) {
416                                 err = -ENXIO;
417                                 break;
418                         }
419                         sz = vread(kbuf, (char *)p, sz);
420                         if (!sz)
421                                 break;
422                         if (copy_to_user(buf, kbuf, sz)) {
423                                 err = -EFAULT;
424                                 break;
425                         }
426                         count -= sz;
427                         buf += sz;
428                         read += sz;
429                         p += sz;
430                 }
431                 free_page((unsigned long)kbuf);
432         }
433         *ppos = p;
434         return read ? read : err;
435 }
436
437
438 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
439                                 size_t count, loff_t *ppos)
440 {
441         ssize_t written, sz;
442         unsigned long copied;
443
444         written = 0;
445 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
446         /* we don't have page 0 mapped on sparc and m68k.. */
447         if (p < PAGE_SIZE) {
448                 sz = size_inside_page(p, count);
449                 /* Hmm. Do something? */
450                 buf += sz;
451                 p += sz;
452                 count -= sz;
453                 written += sz;
454         }
455 #endif
456
457         while (count > 0) {
458                 char *ptr;
459
460                 sz = size_inside_page(p, count);
461
462                 /*
463                  * On ia64 if a page has been mapped somewhere as uncached, then
464                  * it must also be accessed uncached by the kernel or data
465                  * corruption may occur.
466                  */
467                 ptr = xlate_dev_kmem_ptr((char *)p);
468
469                 copied = copy_from_user(ptr, buf, sz);
470                 if (copied) {
471                         written += sz - copied;
472                         if (written)
473                                 break;
474                         return -EFAULT;
475                 }
476                 buf += sz;
477                 p += sz;
478                 count -= sz;
479                 written += sz;
480         }
481
482         *ppos += written;
483         return written;
484 }
485
486 /*
487  * This function writes to the *virtual* memory as seen by the kernel.
488  */
489 static ssize_t write_kmem(struct file *file, const char __user *buf,
490                           size_t count, loff_t *ppos)
491 {
492         unsigned long p = *ppos;
493         ssize_t wrote = 0;
494         ssize_t virtr = 0;
495         char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
496         int err = 0;
497
498         if (p < (unsigned long) high_memory) {
499                 unsigned long to_write = min_t(unsigned long, count,
500                                                (unsigned long)high_memory - p);
501                 wrote = do_write_kmem(p, buf, to_write, ppos);
502                 if (wrote != to_write)
503                         return wrote;
504                 p += wrote;
505                 buf += wrote;
506                 count -= wrote;
507         }
508
509         if (count > 0) {
510                 kbuf = (char *)__get_free_page(GFP_KERNEL);
511                 if (!kbuf)
512                         return wrote ? wrote : -ENOMEM;
513                 while (count > 0) {
514                         unsigned long sz = size_inside_page(p, count);
515                         unsigned long n;
516
517                         if (!is_vmalloc_or_module_addr((void *)p)) {
518                                 err = -ENXIO;
519                                 break;
520                         }
521                         n = copy_from_user(kbuf, buf, sz);
522                         if (n) {
523                                 err = -EFAULT;
524                                 break;
525                         }
526                         vwrite(kbuf, (char *)p, sz);
527                         count -= sz;
528                         buf += sz;
529                         virtr += sz;
530                         p += sz;
531                 }
532                 free_page((unsigned long)kbuf);
533         }
534
535         *ppos = p;
536         return virtr + wrote ? : err;
537 }
538 #endif
539
540 #ifdef CONFIG_DEVPORT
541 static ssize_t read_port(struct file *file, char __user *buf,
542                          size_t count, loff_t *ppos)
543 {
544         unsigned long i = *ppos;
545         char __user *tmp = buf;
546
547         if (!access_ok(VERIFY_WRITE, buf, count))
548                 return -EFAULT;
549         while (count-- > 0 && i < 65536) {
550                 if (__put_user(inb(i), tmp) < 0)
551                         return -EFAULT;
552                 i++;
553                 tmp++;
554         }
555         *ppos = i;
556         return tmp-buf;
557 }
558
559 static ssize_t write_port(struct file *file, const char __user *buf,
560                           size_t count, loff_t *ppos)
561 {
562         unsigned long i = *ppos;
563         const char __user *tmp = buf;
564
565         if (!access_ok(VERIFY_READ, buf, count))
566                 return -EFAULT;
567         while (count-- > 0 && i < 65536) {
568                 char c;
569                 if (__get_user(c, tmp)) {
570                         if (tmp > buf)
571                                 break;
572                         return -EFAULT;
573                 }
574                 outb(c, i);
575                 i++;
576                 tmp++;
577         }
578         *ppos = i;
579         return tmp-buf;
580 }
581 #endif
582
583 static ssize_t read_null(struct file *file, char __user *buf,
584                          size_t count, loff_t *ppos)
585 {
586         return 0;
587 }
588
589 static ssize_t write_null(struct file *file, const char __user *buf,
590                           size_t count, loff_t *ppos)
591 {
592         return count;
593 }
594
595 static ssize_t aio_read_null(struct kiocb *iocb, const struct iovec *iov,
596                              unsigned long nr_segs, loff_t pos)
597 {
598         return 0;
599 }
600
601 static ssize_t aio_write_null(struct kiocb *iocb, const struct iovec *iov,
602                               unsigned long nr_segs, loff_t pos)
603 {
604         return iov_length(iov, nr_segs);
605 }
606
607 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
608                         struct splice_desc *sd)
609 {
610         return sd->len;
611 }
612
613 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
614                                  loff_t *ppos, size_t len, unsigned int flags)
615 {
616         return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
617 }
618
619 static ssize_t read_zero(struct file *file, char __user *buf,
620                          size_t count, loff_t *ppos)
621 {
622         size_t written;
623
624         if (!count)
625                 return 0;
626
627         if (!access_ok(VERIFY_WRITE, buf, count))
628                 return -EFAULT;
629
630         written = 0;
631         while (count) {
632                 unsigned long unwritten;
633                 size_t chunk = count;
634
635                 if (chunk > PAGE_SIZE)
636                         chunk = PAGE_SIZE;      /* Just for latency reasons */
637                 unwritten = __clear_user(buf, chunk);
638                 written += chunk - unwritten;
639                 if (unwritten)
640                         break;
641                 if (signal_pending(current))
642                         return written ? written : -ERESTARTSYS;
643                 buf += chunk;
644                 count -= chunk;
645                 cond_resched();
646         }
647         return written ? written : -EFAULT;
648 }
649
650 static ssize_t aio_read_zero(struct kiocb *iocb, const struct iovec *iov,
651                              unsigned long nr_segs, loff_t pos)
652 {
653         size_t written = 0;
654         unsigned long i;
655         ssize_t ret;
656
657         for (i = 0; i < nr_segs; i++) {
658                 ret = read_zero(iocb->ki_filp, iov[i].iov_base, iov[i].iov_len,
659                                 &pos);
660                 if (ret < 0)
661                         break;
662                 written += ret;
663         }
664
665         return written ? written : -EFAULT;
666 }
667
668 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
669 {
670 #ifndef CONFIG_MMU
671         return -ENOSYS;
672 #endif
673         if (vma->vm_flags & VM_SHARED)
674                 return shmem_zero_setup(vma);
675         return 0;
676 }
677
678 static ssize_t write_full(struct file *file, const char __user *buf,
679                           size_t count, loff_t *ppos)
680 {
681         return -ENOSPC;
682 }
683
684 /*
685  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
686  * can fopen() both devices with "a" now.  This was previously impossible.
687  * -- SRB.
688  */
689 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
690 {
691         return file->f_pos = 0;
692 }
693
694 /*
695  * The memory devices use the full 32/64 bits of the offset, and so we cannot
696  * check against negative addresses: they are ok. The return value is weird,
697  * though, in that case (0).
698  *
699  * also note that seeking relative to the "end of file" isn't supported:
700  * it has no meaning, so it returns -EINVAL.
701  */
702 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
703 {
704         loff_t ret;
705
706         mutex_lock(&file_inode(file)->i_mutex);
707         switch (orig) {
708         case SEEK_CUR:
709                 offset += file->f_pos;
710         case SEEK_SET:
711                 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
712                 if (IS_ERR_VALUE((unsigned long long)offset)) {
713                         ret = -EOVERFLOW;
714                         break;
715                 }
716                 file->f_pos = offset;
717                 ret = file->f_pos;
718                 force_successful_syscall_return();
719                 break;
720         default:
721                 ret = -EINVAL;
722         }
723         mutex_unlock(&file_inode(file)->i_mutex);
724         return ret;
725 }
726
727 static int open_port(struct inode *inode, struct file *filp)
728 {
729         return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
730 }
731
732 #define zero_lseek      null_lseek
733 #define full_lseek      null_lseek
734 #define write_zero      write_null
735 #define read_full       read_zero
736 #define aio_write_zero  aio_write_null
737 #define open_mem        open_port
738 #define open_kmem       open_mem
739
740 static const struct file_operations mem_fops = {
741         .llseek         = memory_lseek,
742         .read           = read_mem,
743         .write          = write_mem,
744         .mmap           = mmap_mem,
745         .open           = open_mem,
746         .get_unmapped_area = get_unmapped_area_mem,
747 };
748
749 #ifdef CONFIG_DEVKMEM
750 static const struct file_operations kmem_fops = {
751         .llseek         = memory_lseek,
752         .read           = read_kmem,
753         .write          = write_kmem,
754         .mmap           = mmap_kmem,
755         .open           = open_kmem,
756         .get_unmapped_area = get_unmapped_area_mem,
757 };
758 #endif
759
760 static const struct file_operations null_fops = {
761         .llseek         = null_lseek,
762         .read           = read_null,
763         .write          = write_null,
764         .aio_read       = aio_read_null,
765         .aio_write      = aio_write_null,
766         .splice_write   = splice_write_null,
767 };
768
769 #ifdef CONFIG_DEVPORT
770 static const struct file_operations port_fops = {
771         .llseek         = memory_lseek,
772         .read           = read_port,
773         .write          = write_port,
774         .open           = open_port,
775 };
776 #endif
777
778 static const struct file_operations zero_fops = {
779         .llseek         = zero_lseek,
780         .read           = read_zero,
781         .write          = write_zero,
782         .aio_read       = aio_read_zero,
783         .aio_write      = aio_write_zero,
784         .mmap           = mmap_zero,
785 };
786
787 /*
788  * capabilities for /dev/zero
789  * - permits private mappings, "copies" are taken of the source of zeros
790  * - no writeback happens
791  */
792 static struct backing_dev_info zero_bdi = {
793         .name           = "char/mem",
794         .capabilities   = BDI_CAP_MAP_COPY | BDI_CAP_NO_ACCT_AND_WRITEBACK,
795 };
796
797 static const struct file_operations full_fops = {
798         .llseek         = full_lseek,
799         .read           = read_full,
800         .write          = write_full,
801 };
802
803 static const struct memdev {
804         const char *name;
805         umode_t mode;
806         const struct file_operations *fops;
807         struct backing_dev_info *dev_info;
808 } devlist[] = {
809          [1] = { "mem", 0, &mem_fops, &directly_mappable_cdev_bdi },
810 #ifdef CONFIG_DEVKMEM
811          [2] = { "kmem", 0, &kmem_fops, &directly_mappable_cdev_bdi },
812 #endif
813          [3] = { "null", 0666, &null_fops, NULL },
814 #ifdef CONFIG_DEVPORT
815          [4] = { "port", 0, &port_fops, NULL },
816 #endif
817          [5] = { "zero", 0666, &zero_fops, &zero_bdi },
818          [7] = { "full", 0666, &full_fops, NULL },
819          [8] = { "random", 0666, &random_fops, NULL },
820          [9] = { "urandom", 0666, &urandom_fops, NULL },
821 #ifdef CONFIG_PRINTK
822         [11] = { "kmsg", 0644, &kmsg_fops, NULL },
823 #endif
824 };
825
826 static int memory_open(struct inode *inode, struct file *filp)
827 {
828         int minor;
829         const struct memdev *dev;
830
831         minor = iminor(inode);
832         if (minor >= ARRAY_SIZE(devlist))
833                 return -ENXIO;
834
835         dev = &devlist[minor];
836         if (!dev->fops)
837                 return -ENXIO;
838
839         filp->f_op = dev->fops;
840         if (dev->dev_info)
841                 filp->f_mapping->backing_dev_info = dev->dev_info;
842
843         /* Is /dev/mem or /dev/kmem ? */
844         if (dev->dev_info == &directly_mappable_cdev_bdi)
845                 filp->f_mode |= FMODE_UNSIGNED_OFFSET;
846
847         if (dev->fops->open)
848                 return dev->fops->open(inode, filp);
849
850         return 0;
851 }
852
853 static const struct file_operations memory_fops = {
854         .open = memory_open,
855         .llseek = noop_llseek,
856 };
857
858 static char *mem_devnode(struct device *dev, umode_t *mode)
859 {
860         if (mode && devlist[MINOR(dev->devt)].mode)
861                 *mode = devlist[MINOR(dev->devt)].mode;
862         return NULL;
863 }
864
865 static struct class *mem_class;
866
867 static int __init chr_dev_init(void)
868 {
869         int minor;
870         int err;
871
872         err = bdi_init(&zero_bdi);
873         if (err)
874                 return err;
875
876         if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
877                 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
878
879         mem_class = class_create(THIS_MODULE, "mem");
880         if (IS_ERR(mem_class))
881                 return PTR_ERR(mem_class);
882
883         mem_class->devnode = mem_devnode;
884         for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
885                 if (!devlist[minor].name)
886                         continue;
887
888                 /*
889                  * Create /dev/port?
890                  */
891                 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
892                         continue;
893
894                 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
895                               NULL, devlist[minor].name);
896         }
897
898         return tty_init();
899 }
900
901 fs_initcall(chr_dev_init);