1 // SPDX-License-Identifier: GPL-2.0
3 * linux/drivers/char/mem.c
5 * Copyright (C) 1991, 1992 Linus Torvalds
8 * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
9 * Shared /dev/zero mmapping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
13 #include <linux/miscdevice.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mman.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/raw.h>
20 #include <linux/tty.h>
21 #include <linux/capability.h>
22 #include <linux/ptrace.h>
23 #include <linux/device.h>
24 #include <linux/highmem.h>
25 #include <linux/backing-dev.h>
26 #include <linux/shmem_fs.h>
27 #include <linux/splice.h>
28 #include <linux/pfn.h>
29 #include <linux/export.h>
31 #include <linux/uio.h>
33 #include <linux/uaccess.h>
36 # include <linux/efi.h>
39 #define DEVPORT_MINOR 4
41 static inline unsigned long size_inside_page(unsigned long start,
46 sz = PAGE_SIZE - (start & (PAGE_SIZE - 1));
51 #ifndef ARCH_HAS_VALID_PHYS_ADDR_RANGE
52 static inline int valid_phys_addr_range(phys_addr_t addr, size_t count)
54 return addr + count <= __pa(high_memory);
57 static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
63 #ifdef CONFIG_STRICT_DEVMEM
64 static inline int page_is_allowed(unsigned long pfn)
66 return devmem_is_allowed(pfn);
68 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
70 u64 from = ((u64)pfn) << PAGE_SHIFT;
75 if (!devmem_is_allowed(pfn))
83 static inline int page_is_allowed(unsigned long pfn)
87 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
93 #ifndef unxlate_dev_mem_ptr
94 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
95 void __weak unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
100 static inline bool should_stop_iteration(void)
104 return fatal_signal_pending(current);
108 * This funcion reads the *physical* memory. The f_pos points directly to the
111 static ssize_t read_mem(struct file *file, char __user *buf,
112 size_t count, loff_t *ppos)
114 phys_addr_t p = *ppos;
123 if (!valid_phys_addr_range(p, count))
126 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
127 /* we don't have page 0 mapped on sparc and m68k.. */
129 sz = size_inside_page(p, count);
131 if (clear_user(buf, sz))
141 bounce = kmalloc(PAGE_SIZE, GFP_KERNEL);
146 unsigned long remaining;
149 sz = size_inside_page(p, count);
152 allowed = page_is_allowed(p >> PAGE_SHIFT);
158 /* Show zeros for restricted memory. */
159 remaining = clear_user(buf, sz);
162 * On ia64 if a page has been mapped somewhere as
163 * uncached, then it must also be accessed uncached
164 * by the kernel or data corruption may occur.
166 ptr = xlate_dev_mem_ptr(p);
170 probe = probe_kernel_read(bounce, ptr, sz);
171 unxlate_dev_mem_ptr(p, ptr);
175 remaining = copy_to_user(buf, bounce, sz);
185 if (should_stop_iteration())
198 static ssize_t write_mem(struct file *file, const char __user *buf,
199 size_t count, loff_t *ppos)
201 phys_addr_t p = *ppos;
203 unsigned long copied;
209 if (!valid_phys_addr_range(p, count))
214 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
215 /* we don't have page 0 mapped on sparc and m68k.. */
217 sz = size_inside_page(p, count);
218 /* Hmm. Do something? */
229 sz = size_inside_page(p, count);
231 allowed = page_is_allowed(p >> PAGE_SHIFT);
235 /* Skip actual writing when a page is marked as restricted. */
238 * On ia64 if a page has been mapped somewhere as
239 * uncached, then it must also be accessed uncached
240 * by the kernel or data corruption may occur.
242 ptr = xlate_dev_mem_ptr(p);
249 copied = copy_from_user(ptr, buf, sz);
250 unxlate_dev_mem_ptr(p, ptr);
252 written += sz - copied;
263 if (should_stop_iteration())
271 int __weak phys_mem_access_prot_allowed(struct file *file,
272 unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
277 #ifndef __HAVE_PHYS_MEM_ACCESS_PROT
280 * Architectures vary in how they handle caching for addresses
281 * outside of main memory.
284 #ifdef pgprot_noncached
285 static int uncached_access(struct file *file, phys_addr_t addr)
287 #if defined(CONFIG_IA64)
289 * On ia64, we ignore O_DSYNC because we cannot tolerate memory
292 return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
293 #elif defined(CONFIG_MIPS)
295 extern int __uncached_access(struct file *file,
298 return __uncached_access(file, addr);
302 * Accessing memory above the top the kernel knows about or through a
304 * that was marked O_DSYNC will be done non-cached.
306 if (file->f_flags & O_DSYNC)
308 return addr >= __pa(high_memory);
313 static pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
314 unsigned long size, pgprot_t vma_prot)
316 #ifdef pgprot_noncached
317 phys_addr_t offset = pfn << PAGE_SHIFT;
319 if (uncached_access(file, offset))
320 return pgprot_noncached(vma_prot);
327 static unsigned long get_unmapped_area_mem(struct file *file,
333 if (!valid_mmap_phys_addr_range(pgoff, len))
334 return (unsigned long) -EINVAL;
335 return pgoff << PAGE_SHIFT;
338 /* permit direct mmap, for read, write or exec */
339 static unsigned memory_mmap_capabilities(struct file *file)
341 return NOMMU_MAP_DIRECT |
342 NOMMU_MAP_READ | NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
345 static unsigned zero_mmap_capabilities(struct file *file)
347 return NOMMU_MAP_COPY;
350 /* can't do an in-place private mapping if there's no MMU */
351 static inline int private_mapping_ok(struct vm_area_struct *vma)
353 return vma->vm_flags & VM_MAYSHARE;
357 static inline int private_mapping_ok(struct vm_area_struct *vma)
363 static const struct vm_operations_struct mmap_mem_ops = {
364 #ifdef CONFIG_HAVE_IOREMAP_PROT
365 .access = generic_access_phys
369 static int mmap_mem(struct file *file, struct vm_area_struct *vma)
371 size_t size = vma->vm_end - vma->vm_start;
372 phys_addr_t offset = (phys_addr_t)vma->vm_pgoff << PAGE_SHIFT;
374 /* It's illegal to wrap around the end of the physical address space. */
375 if (offset + (phys_addr_t)size - 1 < offset)
378 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
381 if (!private_mapping_ok(vma))
384 if (!range_is_allowed(vma->vm_pgoff, size))
387 if (!phys_mem_access_prot_allowed(file, vma->vm_pgoff, size,
391 vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff,
395 vma->vm_ops = &mmap_mem_ops;
397 /* Remap-pfn-range will mark the range VM_IO */
398 if (remap_pfn_range(vma,
402 vma->vm_page_prot)) {
408 static int mmap_kmem(struct file *file, struct vm_area_struct *vma)
412 /* Turn a kernel-virtual address into a physical page frame */
413 pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
416 * RED-PEN: on some architectures there is more mapped memory than
417 * available in mem_map which pfn_valid checks for. Perhaps should add a
420 * RED-PEN: vmalloc is not supported right now.
426 return mmap_mem(file, vma);
430 * This function reads the *virtual* memory as seen by the kernel.
432 static ssize_t read_kmem(struct file *file, char __user *buf,
433 size_t count, loff_t *ppos)
435 unsigned long p = *ppos;
436 ssize_t low_count, read, sz;
437 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
441 if (p < (unsigned long) high_memory) {
443 if (count > (unsigned long)high_memory - p)
444 low_count = (unsigned long)high_memory - p;
446 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
447 /* we don't have page 0 mapped on sparc and m68k.. */
448 if (p < PAGE_SIZE && low_count > 0) {
449 sz = size_inside_page(p, low_count);
450 if (clear_user(buf, sz))
459 while (low_count > 0) {
460 sz = size_inside_page(p, low_count);
463 * On ia64 if a page has been mapped somewhere as
464 * uncached, then it must also be accessed uncached
465 * by the kernel or data corruption may occur
467 kbuf = xlate_dev_kmem_ptr((void *)p);
468 if (!virt_addr_valid(kbuf))
471 if (copy_to_user(buf, kbuf, sz))
478 if (should_stop_iteration()) {
486 kbuf = (char *)__get_free_page(GFP_KERNEL);
490 sz = size_inside_page(p, count);
491 if (!is_vmalloc_or_module_addr((void *)p)) {
495 sz = vread(kbuf, (char *)p, sz);
498 if (copy_to_user(buf, kbuf, sz)) {
506 if (should_stop_iteration())
509 free_page((unsigned long)kbuf);
512 return read ? read : err;
516 static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
517 size_t count, loff_t *ppos)
520 unsigned long copied;
523 #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED
524 /* we don't have page 0 mapped on sparc and m68k.. */
526 sz = size_inside_page(p, count);
527 /* Hmm. Do something? */
538 sz = size_inside_page(p, count);
541 * On ia64 if a page has been mapped somewhere as uncached, then
542 * it must also be accessed uncached by the kernel or data
543 * corruption may occur.
545 ptr = xlate_dev_kmem_ptr((void *)p);
546 if (!virt_addr_valid(ptr))
549 copied = copy_from_user(ptr, buf, sz);
551 written += sz - copied;
560 if (should_stop_iteration())
569 * This function writes to the *virtual* memory as seen by the kernel.
571 static ssize_t write_kmem(struct file *file, const char __user *buf,
572 size_t count, loff_t *ppos)
574 unsigned long p = *ppos;
577 char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
580 if (p < (unsigned long) high_memory) {
581 unsigned long to_write = min_t(unsigned long, count,
582 (unsigned long)high_memory - p);
583 wrote = do_write_kmem(p, buf, to_write, ppos);
584 if (wrote != to_write)
592 kbuf = (char *)__get_free_page(GFP_KERNEL);
594 return wrote ? wrote : -ENOMEM;
596 unsigned long sz = size_inside_page(p, count);
599 if (!is_vmalloc_or_module_addr((void *)p)) {
603 n = copy_from_user(kbuf, buf, sz);
608 vwrite(kbuf, (char *)p, sz);
613 if (should_stop_iteration())
616 free_page((unsigned long)kbuf);
620 return virtr + wrote ? : err;
623 static ssize_t read_port(struct file *file, char __user *buf,
624 size_t count, loff_t *ppos)
626 unsigned long i = *ppos;
627 char __user *tmp = buf;
629 if (!access_ok(VERIFY_WRITE, buf, count))
631 while (count-- > 0 && i < 65536) {
632 if (__put_user(inb(i), tmp) < 0)
641 static ssize_t write_port(struct file *file, const char __user *buf,
642 size_t count, loff_t *ppos)
644 unsigned long i = *ppos;
645 const char __user *tmp = buf;
647 if (!access_ok(VERIFY_READ, buf, count))
649 while (count-- > 0 && i < 65536) {
652 if (__get_user(c, tmp)) {
665 static ssize_t read_null(struct file *file, char __user *buf,
666 size_t count, loff_t *ppos)
671 static ssize_t write_null(struct file *file, const char __user *buf,
672 size_t count, loff_t *ppos)
677 static ssize_t read_iter_null(struct kiocb *iocb, struct iov_iter *to)
682 static ssize_t write_iter_null(struct kiocb *iocb, struct iov_iter *from)
684 size_t count = iov_iter_count(from);
685 iov_iter_advance(from, count);
689 static int pipe_to_null(struct pipe_inode_info *info, struct pipe_buffer *buf,
690 struct splice_desc *sd)
695 static ssize_t splice_write_null(struct pipe_inode_info *pipe, struct file *out,
696 loff_t *ppos, size_t len, unsigned int flags)
698 return splice_from_pipe(pipe, out, ppos, len, flags, pipe_to_null);
701 static ssize_t read_iter_zero(struct kiocb *iocb, struct iov_iter *iter)
705 while (iov_iter_count(iter)) {
706 size_t chunk = iov_iter_count(iter), n;
708 if (chunk > PAGE_SIZE)
709 chunk = PAGE_SIZE; /* Just for latency reasons */
710 n = iov_iter_zero(chunk, iter);
711 if (!n && iov_iter_count(iter))
712 return written ? written : -EFAULT;
714 if (signal_pending(current))
715 return written ? written : -ERESTARTSYS;
721 static int mmap_zero(struct file *file, struct vm_area_struct *vma)
726 if (vma->vm_flags & VM_SHARED)
727 return shmem_zero_setup(vma);
731 static unsigned long get_unmapped_area_zero(struct file *file,
732 unsigned long addr, unsigned long len,
733 unsigned long pgoff, unsigned long flags)
736 if (flags & MAP_SHARED) {
738 * mmap_zero() will call shmem_zero_setup() to create a file,
739 * so use shmem's get_unmapped_area in case it can be huge;
740 * and pass NULL for file as in mmap.c's get_unmapped_area(),
741 * so as not to confuse shmem with our handle on "/dev/zero".
743 return shmem_get_unmapped_area(NULL, addr, len, pgoff, flags);
746 /* Otherwise flags & MAP_PRIVATE: with no shmem object beneath it */
747 return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
753 static ssize_t write_full(struct file *file, const char __user *buf,
754 size_t count, loff_t *ppos)
760 * Special lseek() function for /dev/null and /dev/zero. Most notably, you
761 * can fopen() both devices with "a" now. This was previously impossible.
764 static loff_t null_lseek(struct file *file, loff_t offset, int orig)
766 return file->f_pos = 0;
770 * The memory devices use the full 32/64 bits of the offset, and so we cannot
771 * check against negative addresses: they are ok. The return value is weird,
772 * though, in that case (0).
774 * also note that seeking relative to the "end of file" isn't supported:
775 * it has no meaning, so it returns -EINVAL.
777 static loff_t memory_lseek(struct file *file, loff_t offset, int orig)
781 inode_lock(file_inode(file));
784 offset += file->f_pos;
786 /* to avoid userland mistaking f_pos=-9 as -EBADF=-9 */
787 if ((unsigned long long)offset >= -MAX_ERRNO) {
791 file->f_pos = offset;
793 force_successful_syscall_return();
798 inode_unlock(file_inode(file));
802 static int open_port(struct inode *inode, struct file *filp)
804 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
807 #define zero_lseek null_lseek
808 #define full_lseek null_lseek
809 #define write_zero write_null
810 #define write_iter_zero write_iter_null
811 #define open_mem open_port
812 #define open_kmem open_mem
814 static const struct file_operations __maybe_unused mem_fops = {
815 .llseek = memory_lseek,
821 .get_unmapped_area = get_unmapped_area_mem,
822 .mmap_capabilities = memory_mmap_capabilities,
826 static const struct file_operations __maybe_unused kmem_fops = {
827 .llseek = memory_lseek,
833 .get_unmapped_area = get_unmapped_area_mem,
834 .mmap_capabilities = memory_mmap_capabilities,
838 static const struct file_operations null_fops = {
839 .llseek = null_lseek,
842 .read_iter = read_iter_null,
843 .write_iter = write_iter_null,
844 .splice_write = splice_write_null,
847 static const struct file_operations __maybe_unused port_fops = {
848 .llseek = memory_lseek,
854 static const struct file_operations zero_fops = {
855 .llseek = zero_lseek,
857 .read_iter = read_iter_zero,
858 .write_iter = write_iter_zero,
860 .get_unmapped_area = get_unmapped_area_zero,
862 .mmap_capabilities = zero_mmap_capabilities,
866 static const struct file_operations full_fops = {
867 .llseek = full_lseek,
868 .read_iter = read_iter_zero,
872 static const struct memdev {
875 const struct file_operations *fops;
879 [1] = { "mem", 0, &mem_fops, FMODE_UNSIGNED_OFFSET },
881 #ifdef CONFIG_DEVKMEM
882 [2] = { "kmem", 0, &kmem_fops, FMODE_UNSIGNED_OFFSET },
884 [3] = { "null", 0666, &null_fops, 0 },
885 #ifdef CONFIG_DEVPORT
886 [4] = { "port", 0, &port_fops, 0 },
888 [5] = { "zero", 0666, &zero_fops, 0 },
889 [7] = { "full", 0666, &full_fops, 0 },
890 [8] = { "random", 0666, &random_fops, FMODE_NOWAIT },
891 [9] = { "urandom", 0666, &urandom_fops, FMODE_NOWAIT },
893 [11] = { "kmsg", 0644, &kmsg_fops, 0 },
897 static int memory_open(struct inode *inode, struct file *filp)
900 const struct memdev *dev;
902 minor = iminor(inode);
903 if (minor >= ARRAY_SIZE(devlist))
906 dev = &devlist[minor];
910 filp->f_op = dev->fops;
911 filp->f_mode |= dev->fmode;
914 return dev->fops->open(inode, filp);
919 static const struct file_operations memory_fops = {
921 .llseek = noop_llseek,
924 static char *mem_devnode(struct device *dev, umode_t *mode)
926 if (mode && devlist[MINOR(dev->devt)].mode)
927 *mode = devlist[MINOR(dev->devt)].mode;
931 static struct class *mem_class;
933 static int __init chr_dev_init(void)
937 if (register_chrdev(MEM_MAJOR, "mem", &memory_fops))
938 printk("unable to get major %d for memory devs\n", MEM_MAJOR);
940 mem_class = class_create(THIS_MODULE, "mem");
941 if (IS_ERR(mem_class))
942 return PTR_ERR(mem_class);
944 mem_class->devnode = mem_devnode;
945 for (minor = 1; minor < ARRAY_SIZE(devlist); minor++) {
946 if (!devlist[minor].name)
952 if ((minor == DEVPORT_MINOR) && !arch_has_dev_port())
955 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
956 NULL, devlist[minor].name);
962 fs_initcall(chr_dev_init);