GNU Linux-libre 4.14.332-gnu1
[releases.git] / drivers / char / mem.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/drivers/char/mem.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  *
7  *  Added devfs support.
8  *    Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
9  *  Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
10  */
11
12 #include <linux/mm.h>
13 #include <linux/miscdevice.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mman.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/raw.h>
20 #include <linux/tty.h>
21 #include <linux/capability.h>
22 #include <linux/ptrace.h>
23 #include <linux/device.h>
24 #include <linux/highmem.h>
25 #include <linux/backing-dev.h>
26 #include <linux/shmem_fs.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29 #include <linux/export.h>
30 #include <linux/io.h>
31 #include <linux/uio.h>
32
33 #include <linux/uaccess.h>
34
35 #ifdef CONFIG_IA64
36 # include <linux/efi.h>
37 #endif
38
39 #define DEVPORT_MINOR   4
40
41 static inline unsigned long size_inside_page(unsigned long start,
42                                              unsigned long size)
43 {
44         unsigned long sz;
45
46         sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
47
48         return min(sz, size);
49 }
50
51 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
52 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
53 {
54         return addr + count <= __pa(high_memory);
55 }
56
57 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
58 {
59         return 1;
60 }
61 #endif
62
63 #ifdef CONFIG_STRICT_DEVMEM
64 static inline int page_is_allowed(unsigned long pfn)
65 {
66         return devmem_is_allowed(pfn);
67 }
68 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
69 {
70         u64 from = ((u64)pfn) << PAGE_SHIFT;
71         u64 to = from + size;
72         u64 cursor = from;
73
74         while (cursor < to) {
75                 if (!devmem_is_allowed(pfn))
76                         return 0;
77                 cursor += PAGE_SIZE;
78                 pfn++;
79         }
80         return 1;
81 }
82 #else
83 static inline int page_is_allowed(unsigned long pfn)
84 {
85         return 1;
86 }
87 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
88 {
89         return 1;
90 }
91 #endif
92
93 #ifndef unxlate_dev_mem_ptr
94 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
95 void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
96 {
97 }
98 #endif
99
100 static inline bool should_stop_iteration(void)
101 {
102         if (need_resched())
103                 cond_resched();
104         return fatal_signal_pending(current);
105 }
106
107 /*
108  * This funcion reads the *physical* memory. The f_pos points directly to the
109  * memory location.
110  */
111 static ssize_t read_mem(struct file *file, char __user *buf,
112                         size_t count, loff_t *ppos)
113 {
114         phys_addr_t p = *ppos;
115         ssize_t read, sz;
116         void *ptr;
117         char *bounce;
118         int err;
119
120         if (p != *ppos)
121                 return 0;
122
123         if (!valid_phys_addr_range(p, count))
124                 return -EFAULT;
125         read = 0;
126 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
127         /* we don't have page 0 mapped on sparc and m68k.. */
128         if (p < PAGE_SIZE) {
129                 sz = size_inside_page(p, count);
130                 if (sz > 0) {
131                         if (clear_user(buf, sz))
132                                 return -EFAULT;
133                         buf += sz;
134                         p += sz;
135                         count -= sz;
136                         read += sz;
137                 }
138         }
139 #endif
140
141         bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
142         if (!bounce)
143                 return -ENOMEM;
144
145         while (count > 0) {
146                 unsigned long remaining;
147                 int allowed, probe;
148
149                 sz = size_inside_page(p, count);
150
151                 err = -EPERM;
152                 allowed = page_is_allowed(p >> PAGE_SHIFT);
153                 if (!allowed)
154                         goto failed;
155
156                 err = -EFAULT;
157                 if (allowed == 2) {
158                         /* Show zeros for restricted memory. */
159                         remaining = clear_user(buf, sz);
160                 } else {
161                         /*
162                          * On ia64 if a page has been mapped somewhere as
163                          * uncached, then it must also be accessed uncached
164                          * by the kernel or data corruption may occur.
165                          */
166                         ptr = xlate_dev_mem_ptr(p);
167                         if (!ptr)
168                                 goto failed;
169
170                         probe = probe_kernel_read(bounce, ptr, sz);
171                         unxlate_dev_mem_ptr(p, ptr);
172                         if (probe)
173                                 goto failed;
174
175                         remaining = copy_to_user(buf, bounce, sz);
176                 }
177
178                 if (remaining)
179                         goto failed;
180
181                 buf += sz;
182                 p += sz;
183                 count -= sz;
184                 read += sz;
185                 if (should_stop_iteration())
186                         break;
187         }
188         kfree(bounce);
189
190         *ppos += read;
191         return read;
192
193 failed:
194         kfree(bounce);
195         return err;
196 }
197
198 static ssize_t write_mem(struct file *file, const char __user *buf,
199                          size_t count, loff_t *ppos)
200 {
201         phys_addr_t p = *ppos;
202         ssize_t written, sz;
203         unsigned long copied;
204         void *ptr;
205
206         if (p != *ppos)
207                 return -EFBIG;
208
209         if (!valid_phys_addr_range(p, count))
210                 return -EFAULT;
211
212         written = 0;
213
214 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
215         /* we don't have page 0 mapped on sparc and m68k.. */
216         if (p < PAGE_SIZE) {
217                 sz = size_inside_page(p, count);
218                 /* Hmm. Do something? */
219                 buf += sz;
220                 p += sz;
221                 count -= sz;
222                 written += sz;
223         }
224 #endif
225
226         while (count > 0) {
227                 int allowed;
228
229                 sz = size_inside_page(p, count);
230
231                 allowed = page_is_allowed(p >> PAGE_SHIFT);
232                 if (!allowed)
233                         return -EPERM;
234
235                 /* Skip actual writing when a page is marked as restricted. */
236                 if (allowed == 1) {
237                         /*
238                          * On ia64 if a page has been mapped somewhere as
239                          * uncached, then it must also be accessed uncached
240                          * by the kernel or data corruption may occur.
241                          */
242                         ptr = xlate_dev_mem_ptr(p);
243                         if (!ptr) {
244                                 if (written)
245                                         break;
246                                 return -EFAULT;
247                         }
248
249                         copied = copy_from_user(ptr, buf, sz);
250                         unxlate_dev_mem_ptr(p, ptr);
251                         if (copied) {
252                                 written += sz - copied;
253                                 if (written)
254                                         break;
255                                 return -EFAULT;
256                         }
257                 }
258
259                 buf += sz;
260                 p += sz;
261                 count -= sz;
262                 written += sz;
263                 if (should_stop_iteration())
264                         break;
265         }
266
267         *ppos += written;
268         return written;
269 }
270
271 int __weak phys_mem_access_prot_allowed(struct file *file,
272         unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
273 {
274         return 1;
275 }
276
277 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
278
279 /*
280  * Architectures vary in how they handle caching for addresses
281  * outside of main memory.
282  *
283  */
284 #ifdef pgprot_noncached
285 static int uncached_access(struct file *file, phys_addr_t addr)
286 {
287 #if defined(CONFIG_IA64)
288         /*
289          * On ia64, we ignore O_DSYNC because we cannot tolerate memory
290          * attribute aliases.
291          */
292         return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
293 #elif defined(CONFIG_MIPS)
294         {
295                 extern int __uncached_access(struct file *file,
296                                              unsigned long addr);
297
298                 return __uncached_access(file, addr);
299         }
300 #else
301         /*
302          * Accessing memory above the top the kernel knows about or through a
303          * file pointer
304          * that was marked O_DSYNC will be done non-cached.
305          */
306         if (file->f_flags & O_DSYNC)
307                 return 1;
308         return addr >= __pa(high_memory);
309 #endif
310 }
311 #endif
312
313 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
314                                      unsigned long size, pgprot_t vma_prot)
315 {
316 #ifdef pgprot_noncached
317         phys_addr_t offset = pfn << PAGE_SHIFT;
318
319         if (uncached_access(file, offset))
320                 return pgprot_noncached(vma_prot);
321 #endif
322         return vma_prot;
323 }
324 #endif
325
326 #ifndef CONFIG_MMU
327 static unsigned long get_unmapped_area_mem(struct file *file,
328                                            unsigned long addr,
329                                            unsigned long len,
330                                            unsigned long pgoff,
331                                            unsigned long flags)
332 {
333         if (!valid_mmap_phys_addr_range(pgoff, len))
334                 return (unsigned long) -EINVAL;
335         return pgoff << PAGE_SHIFT;
336 }
337
338 /* permit direct mmap, for read, write or exec */
339 static unsigned memory_mmap_capabilities(struct file *file)
340 {
341         return NOMMU_MAP_DIRECT |
342                 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
343 }
344
345 static unsigned zero_mmap_capabilities(struct file *file)
346 {
347         return NOMMU_MAP_COPY;
348 }
349
350 /* can't do an in-place private mapping if there's no MMU */
351 static inline int private_mapping_ok(struct vm_area_struct *vma)
352 {
353         return vma->vm_flags & VM_MAYSHARE;
354 }
355 #else
356
357 static inline int private_mapping_ok(struct vm_area_struct *vma)
358 {
359         return 1;
360 }
361 #endif
362
363 static const struct vm_operations_struct mmap_mem_ops = {
364 #ifdef CONFIG_HAVE_IOREMAP_PROT
365         .access = generic_access_phys
366 #endif
367 };
368
369 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
370 {
371         size_t size = vma->vm_end - vma->vm_start;
372         phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
373
374         /* It's illegal to wrap around the end of the physical address space. */
375         if (offset + (phys_addr_t)size - 1 < offset)
376                 return -EINVAL;
377
378         if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
379                 return -EINVAL;
380
381         if (!private_mapping_ok(vma))
382                 return -ENOSYS;
383
384         if (!range_is_allowed(vma->vm_pgoff, size))
385                 return -EPERM;
386
387         if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
388                                                 &vma->vm_page_prot))
389                 return -EINVAL;
390
391         vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
392                                                  size,
393                                                  vma->vm_page_prot);
394
395         vma->vm_ops = &mmap_mem_ops;
396
397         /* Remap-pfn-range will mark the range VM_IO */
398         if (remap_pfn_range(vma,
399                             vma->vm_start,
400                             vma->vm_pgoff,
401                             size,
402                             vma->vm_page_prot)) {
403                 return -EAGAIN;
404         }
405         return 0;
406 }
407
408 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
409 {
410         unsigned long pfn;
411
412         /* Turn a kernel-virtual address into a physical page frame */
413         pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
414
415         /*
416          * RED-PEN: on some architectures there is more mapped memory than
417          * available in mem_map which pfn_valid checks for. Perhaps should add a
418          * new macro here.
419          *
420          * RED-PEN: vmalloc is not supported right now.
421          */
422         if (!pfn_valid(pfn))
423                 return -EIO;
424
425         vma->vm_pgoff = pfn;
426         return mmap_mem(file, vma);
427 }
428
429 /*
430  * This function reads the *virtual* memory as seen by the kernel.
431  */
432 static ssize_t read_kmem(struct file *file, char __user *buf,
433                          size_t count, loff_t *ppos)
434 {
435         unsigned long p = *ppos;
436         ssize_t low_count, read, sz;
437         char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
438         int err = 0;
439
440         read = 0;
441         if (p < (unsigned long) high_memory) {
442                 low_count = count;
443                 if (count > (unsigned long)high_memory - p)
444                         low_count = (unsigned long)high_memory - p;
445
446 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
447                 /* we don't have page 0 mapped on sparc and m68k.. */
448                 if (p < PAGE_SIZE && low_count > 0) {
449                         sz = size_inside_page(p, low_count);
450                         if (clear_user(buf, sz))
451                                 return -EFAULT;
452                         buf += sz;
453                         p += sz;
454                         read += sz;
455                         low_count -= sz;
456                         count -= sz;
457                 }
458 #endif
459                 while (low_count > 0) {
460                         sz = size_inside_page(p, low_count);
461
462                         /*
463                          * On ia64 if a page has been mapped somewhere as
464                          * uncached, then it must also be accessed uncached
465                          * by the kernel or data corruption may occur
466                          */
467                         kbuf = xlate_dev_kmem_ptr((void *)p);
468                         if (!virt_addr_valid(kbuf))
469                                 return -ENXIO;
470
471                         if (copy_to_user(buf, kbuf, sz))
472                                 return -EFAULT;
473                         buf += sz;
474                         p += sz;
475                         read += sz;
476                         low_count -= sz;
477                         count -= sz;
478                         if (should_stop_iteration()) {
479                                 count = 0;
480                                 break;
481                         }
482                 }
483         }
484
485         if (count > 0) {
486                 kbuf = (char *)__get_free_page(GFP_KERNEL);
487                 if (!kbuf)
488                         return -ENOMEM;
489                 while (count > 0) {
490                         sz = size_inside_page(p, count);
491                         if (!is_vmalloc_or_module_addr((void *)p)) {
492                                 err = -ENXIO;
493                                 break;
494                         }
495                         sz = vread(kbuf, (char *)p, sz);
496                         if (!sz)
497                                 break;
498                         if (copy_to_user(buf, kbuf, sz)) {
499                                 err = -EFAULT;
500                                 break;
501                         }
502                         count -= sz;
503                         buf += sz;
504                         read += sz;
505                         p += sz;
506                         if (should_stop_iteration())
507                                 break;
508                 }
509                 free_page((unsigned long)kbuf);
510         }
511         *ppos = p;
512         return read ? read : err;
513 }
514
515
516 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
517                                 size_t count, loff_t *ppos)
518 {
519         ssize_t written, sz;
520         unsigned long copied;
521
522         written = 0;
523 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
524         /* we don't have page 0 mapped on sparc and m68k.. */
525         if (p < PAGE_SIZE) {
526                 sz = size_inside_page(p, count);
527                 /* Hmm. Do something? */
528                 buf += sz;
529                 p += sz;
530                 count -= sz;
531                 written += sz;
532         }
533 #endif
534
535         while (count > 0) {
536                 void *ptr;
537
538                 sz = size_inside_page(p, count);
539
540                 /*
541                  * On ia64 if a page has been mapped somewhere as uncached, then
542                  * it must also be accessed uncached by the kernel or data
543                  * corruption may occur.
544                  */
545                 ptr = xlate_dev_kmem_ptr((void *)p);
546                 if (!virt_addr_valid(ptr))
547                         return -ENXIO;
548
549                 copied = copy_from_user(ptr, buf, sz);
550                 if (copied) {
551                         written += sz - copied;
552                         if (written)
553                                 break;
554                         return -EFAULT;
555                 }
556                 buf += sz;
557                 p += sz;
558                 count -= sz;
559                 written += sz;
560                 if (should_stop_iteration())
561                         break;
562         }
563
564         *ppos += written;
565         return written;
566 }
567
568 /*
569  * This function writes to the *virtual* memory as seen by the kernel.
570  */
571 static ssize_t write_kmem(struct file *file, const char __user *buf,
572                           size_t count, loff_t *ppos)
573 {
574         unsigned long p = *ppos;
575         ssize_t wrote = 0;
576         ssize_t virtr = 0;
577         char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
578         int err = 0;
579
580         if (p < (unsigned long) high_memory) {
581                 unsigned long to_write = min_t(unsigned long, count,
582                                                (unsigned long)high_memory - p);
583                 wrote = do_write_kmem(p, buf, to_write, ppos);
584                 if (wrote != to_write)
585                         return wrote;
586                 p += wrote;
587                 buf += wrote;
588                 count -= wrote;
589         }
590
591         if (count > 0) {
592                 kbuf = (char *)__get_free_page(GFP_KERNEL);
593                 if (!kbuf)
594                         return wrote ? wrote : -ENOMEM;
595                 while (count > 0) {
596                         unsigned long sz = size_inside_page(p, count);
597                         unsigned long n;
598
599                         if (!is_vmalloc_or_module_addr((void *)p)) {
600                                 err = -ENXIO;
601                                 break;
602                         }
603                         n = copy_from_user(kbuf, buf, sz);
604                         if (n) {
605                                 err = -EFAULT;
606                                 break;
607                         }
608                         vwrite(kbuf, (char *)p, sz);
609                         count -= sz;
610                         buf += sz;
611                         virtr += sz;
612                         p += sz;
613                         if (should_stop_iteration())
614                                 break;
615                 }
616                 free_page((unsigned long)kbuf);
617         }
618
619         *ppos = p;
620         return virtr + wrote ? : err;
621 }
622
623 static ssize_t read_port(struct file *file, char __user *buf,
624                          size_t count, loff_t *ppos)
625 {
626         unsigned long i = *ppos;
627         char __user *tmp = buf;
628
629         if (!access_ok(VERIFY_WRITE, buf, count))
630                 return -EFAULT;
631         while (count-- > 0 && i < 65536) {
632                 if (__put_user(inb(i), tmp) < 0)
633                         return -EFAULT;
634                 i++;
635                 tmp++;
636         }
637         *ppos = i;
638         return tmp-buf;
639 }
640
641 static ssize_t write_port(struct file *file, const char __user *buf,
642                           size_t count, loff_t *ppos)
643 {
644         unsigned long i = *ppos;
645         const char __user *tmp = buf;
646
647         if (!access_ok(VERIFY_READ, buf, count))
648                 return -EFAULT;
649         while (count-- > 0 && i < 65536) {
650                 char c;
651
652                 if (__get_user(c, tmp)) {
653                         if (tmp > buf)
654                                 break;
655                         return -EFAULT;
656                 }
657                 outb(c, i);
658                 i++;
659                 tmp++;
660         }
661         *ppos = i;
662         return tmp-buf;
663 }
664
665 static ssize_t read_null(struct file *file, char __user *buf,
666                          size_t count, loff_t *ppos)
667 {
668         return 0;
669 }
670
671 static ssize_t write_null(struct file *file, const char __user *buf,
672                           size_t count, loff_t *ppos)
673 {
674         return count;
675 }
676
677 static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
678 {
679         return 0;
680 }
681
682 static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
683 {
684         size_t count = iov_iter_count(from);
685         iov_iter_advance(from, count);
686         return count;
687 }
688
689 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
690                         struct splice_desc *sd)
691 {
692         return sd->len;
693 }
694
695 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
696                                  loff_t *ppos, size_t len, unsigned int flags)
697 {
698         return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
699 }
700
701 static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
702 {
703         size_t written = 0;
704
705         while (iov_iter_count(iter)) {
706                 size_t chunk = iov_iter_count(iter), n;
707
708                 if (chunk > PAGE_SIZE)
709                         chunk = PAGE_SIZE;      /* Just for latency reasons */
710                 n = iov_iter_zero(chunk, iter);
711                 if (!n && iov_iter_count(iter))
712                         return written ? written : -EFAULT;
713                 written += n;
714                 if (signal_pending(current))
715                         return written ? written : -ERESTARTSYS;
716                 cond_resched();
717         }
718         return written;
719 }
720
721 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
722 {
723 #ifndef CONFIG_MMU
724         return -ENOSYS;
725 #endif
726         if (vma->vm_flags & VM_SHARED)
727                 return shmem_zero_setup(vma);
728         return 0;
729 }
730
731 static unsigned long get_unmapped_area_zero(struct file *file,
732                                 unsigned long addr, unsigned long len,
733                                 unsigned long pgoff, unsigned long flags)
734 {
735 #ifdef CONFIG_MMU
736         if (flags & MAP_SHARED) {
737                 /*
738                  * mmap_zero() will call shmem_zero_setup() to create a file,
739                  * so use shmem's get_unmapped_area in case it can be huge;
740                  * and pass NULL for file as in mmap.c's get_unmapped_area(),
741                  * so as not to confuse shmem with our handle on "/dev/zero".
742                  */
743                 return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
744         }
745
746         /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
747         return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
748 #else
749         return -ENOSYS;
750 #endif
751 }
752
753 static ssize_t write_full(struct file *file, const char __user *buf,
754                           size_t count, loff_t *ppos)
755 {
756         return -ENOSPC;
757 }
758
759 /*
760  * Special lseek() function for /dev/null and /dev/zero.  Most notably, you
761  * can fopen() both devices with "a" now.  This was previously impossible.
762  * -- SRB.
763  */
764 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
765 {
766         return file->f_pos = 0;
767 }
768
769 /*
770  * The memory devices use the full 32/64 bits of the offset, and so we cannot
771  * check against negative addresses: they are ok. The return value is weird,
772  * though, in that case (0).
773  *
774  * also note that seeking relative to the "end of file" isn't supported:
775  * it has no meaning, so it returns -EINVAL.
776  */
777 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
778 {
779         loff_t ret;
780
781         inode_lock(file_inode(file));
782         switch (orig) {
783         case SEEK_CUR:
784                 offset += file->f_pos;
785         case SEEK_SET:
786                 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
787                 if ((unsigned long long)offset >= -MAX_ERRNO) {
788                         ret = -EOVERFLOW;
789                         break;
790                 }
791                 file->f_pos = offset;
792                 ret = file->f_pos;
793                 force_successful_syscall_return();
794                 break;
795         default:
796                 ret = -EINVAL;
797         }
798         inode_unlock(file_inode(file));
799         return ret;
800 }
801
802 static int open_port(struct inode *inode, struct file *filp)
803 {
804         return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
805 }
806
807 #define zero_lseek      null_lseek
808 #define full_lseek      null_lseek
809 #define write_zero      write_null
810 #define write_iter_zero write_iter_null
811 #define open_mem        open_port
812 #define open_kmem       open_mem
813
814 static const struct file_operations __maybe_unused mem_fops = {
815         .llseek         = memory_lseek,
816         .read           = read_mem,
817         .write          = write_mem,
818         .mmap           = mmap_mem,
819         .open           = open_mem,
820 #ifndef CONFIG_MMU
821         .get_unmapped_area = get_unmapped_area_mem,
822         .mmap_capabilities = memory_mmap_capabilities,
823 #endif
824 };
825
826 static const struct file_operations __maybe_unused kmem_fops = {
827         .llseek         = memory_lseek,
828         .read           = read_kmem,
829         .write          = write_kmem,
830         .mmap           = mmap_kmem,
831         .open           = open_kmem,
832 #ifndef CONFIG_MMU
833         .get_unmapped_area = get_unmapped_area_mem,
834         .mmap_capabilities = memory_mmap_capabilities,
835 #endif
836 };
837
838 static const struct file_operations null_fops = {
839         .llseek         = null_lseek,
840         .read           = read_null,
841         .write          = write_null,
842         .read_iter      = read_iter_null,
843         .write_iter     = write_iter_null,
844         .splice_write   = splice_write_null,
845 };
846
847 static const struct file_operations __maybe_unused port_fops = {
848         .llseek         = memory_lseek,
849         .read           = read_port,
850         .write          = write_port,
851         .open           = open_port,
852 };
853
854 static const struct file_operations zero_fops = {
855         .llseek         = zero_lseek,
856         .write          = write_zero,
857         .read_iter      = read_iter_zero,
858         .write_iter     = write_iter_zero,
859         .mmap           = mmap_zero,
860         .get_unmapped_area = get_unmapped_area_zero,
861 #ifndef CONFIG_MMU
862         .mmap_capabilities = zero_mmap_capabilities,
863 #endif
864 };
865
866 static const struct file_operations full_fops = {
867         .llseek         = full_lseek,
868         .read_iter      = read_iter_zero,
869         .write          = write_full,
870 };
871
872 static const struct memdev {
873         const char *name;
874         umode_t mode;
875         const struct file_operations *fops;
876         fmode_t fmode;
877 } devlist[] = {
878 #ifdef CONFIG_DEVMEM
879          [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
880 #endif
881 #ifdef CONFIG_DEVKMEM
882          [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
883 #endif
884          [3] = { "null", 0666, &null_fops, 0 },
885 #ifdef CONFIG_DEVPORT
886          [4] = { "port", 0, &port_fops, 0 },
887 #endif
888          [5] = { "zero", 0666, &zero_fops, 0 },
889          [7] = { "full", 0666, &full_fops, 0 },
890          [8] = { "random", 0666, &random_fops, FMODE_NOWAIT },
891          [9] = { "urandom", 0666, &urandom_fops, FMODE_NOWAIT },
892 #ifdef CONFIG_PRINTK
893         [11] = { "kmsg", 0644, &kmsg_fops, 0 },
894 #endif
895 };
896
897 static int memory_open(struct inode *inode, struct file *filp)
898 {
899         int minor;
900         const struct memdev *dev;
901
902         minor = iminor(inode);
903         if (minor >= ARRAY_SIZE(devlist))
904                 return -ENXIO;
905
906         dev = &devlist[minor];
907         if (!dev->fops)
908                 return -ENXIO;
909
910         filp->f_op = dev->fops;
911         filp->f_mode |= dev->fmode;
912
913         if (dev->fops->open)
914                 return dev->fops->open(inode, filp);
915
916         return 0;
917 }
918
919 static const struct file_operations memory_fops = {
920         .open = memory_open,
921         .llseek = noop_llseek,
922 };
923
924 static char *mem_devnode(struct device *dev, umode_t *mode)
925 {
926         if (mode && devlist[MINOR(dev->devt)].mode)
927                 *mode = devlist[MINOR(dev->devt)].mode;
928         return NULL;
929 }
930
931 static struct class *mem_class;
932
933 static int __init chr_dev_init(void)
934 {
935         int minor;
936
937         if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
938                 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
939
940         mem_class = class_create(THIS_MODULE, "mem");
941         if (IS_ERR(mem_class))
942                 return PTR_ERR(mem_class);
943
944         mem_class->devnode = mem_devnode;
945         for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
946                 if (!devlist[minor].name)
947                         continue;
948
949                 /*
950                  * Create /dev/port?
951                  */
952                 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
953                         continue;
954
955                 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
956                               NULL, devlist[minor].name);
957         }
958
959         return tty_init();
960 }
961
962 fs_initcall(chr_dev_init);