4 * Copyright (C) 2008 - 2009 Paul Mundt
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
12 #include <linux/sched/mm.h>
13 #include <linux/mman.h>
14 #include <linux/module.h>
16 #include <asm/processor.h>
18 unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
19 EXPORT_SYMBOL(shm_align_mask);
22 static const pgprot_t protection_map[16] = {
23 [VM_NONE] = PAGE_NONE,
24 [VM_READ] = PAGE_READONLY,
25 [VM_WRITE] = PAGE_COPY,
26 [VM_WRITE | VM_READ] = PAGE_COPY,
27 [VM_EXEC] = PAGE_EXECREAD,
28 [VM_EXEC | VM_READ] = PAGE_EXECREAD,
29 [VM_EXEC | VM_WRITE] = PAGE_COPY,
30 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY,
31 [VM_SHARED] = PAGE_NONE,
32 [VM_SHARED | VM_READ] = PAGE_READONLY,
33 [VM_SHARED | VM_WRITE] = PAGE_WRITEONLY,
34 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
35 [VM_SHARED | VM_EXEC] = PAGE_EXECREAD,
36 [VM_SHARED | VM_EXEC | VM_READ] = PAGE_EXECREAD,
37 [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX,
38 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX
40 DECLARE_VM_GET_PAGE_PROT
43 * To avoid cache aliases, we map the shared page with same color.
45 static inline unsigned long COLOUR_ALIGN(unsigned long addr,
48 unsigned long base = (addr + shm_align_mask) & ~shm_align_mask;
49 unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
54 unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
55 unsigned long len, unsigned long pgoff, unsigned long flags)
57 struct mm_struct *mm = current->mm;
58 struct vm_area_struct *vma;
60 struct vm_unmapped_area_info info;
62 if (flags & MAP_FIXED) {
63 /* We do not accept a shared mapping if it would violate
64 * cache aliasing constraints.
66 if ((flags & MAP_SHARED) &&
67 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
72 if (unlikely(len > TASK_SIZE))
76 if (filp || (flags & MAP_SHARED))
81 addr = COLOUR_ALIGN(addr, pgoff);
83 addr = PAGE_ALIGN(addr);
85 vma = find_vma(mm, addr);
86 if (TASK_SIZE - len >= addr &&
87 (!vma || addr + len <= vm_start_gap(vma)))
93 info.low_limit = TASK_UNMAPPED_BASE;
94 info.high_limit = TASK_SIZE;
95 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
96 info.align_offset = pgoff << PAGE_SHIFT;
97 return vm_unmapped_area(&info);
101 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
102 const unsigned long len, const unsigned long pgoff,
103 const unsigned long flags)
105 struct vm_area_struct *vma;
106 struct mm_struct *mm = current->mm;
107 unsigned long addr = addr0;
109 struct vm_unmapped_area_info info;
111 if (flags & MAP_FIXED) {
112 /* We do not accept a shared mapping if it would violate
113 * cache aliasing constraints.
115 if ((flags & MAP_SHARED) &&
116 ((addr - (pgoff << PAGE_SHIFT)) & shm_align_mask))
121 if (unlikely(len > TASK_SIZE))
125 if (filp || (flags & MAP_SHARED))
128 /* requesting a specific address */
131 addr = COLOUR_ALIGN(addr, pgoff);
133 addr = PAGE_ALIGN(addr);
135 vma = find_vma(mm, addr);
136 if (TASK_SIZE - len >= addr &&
137 (!vma || addr + len <= vm_start_gap(vma)))
141 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
143 info.low_limit = PAGE_SIZE;
144 info.high_limit = mm->mmap_base;
145 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
146 info.align_offset = pgoff << PAGE_SHIFT;
147 addr = vm_unmapped_area(&info);
150 * A failed mmap() very likely causes application failure,
151 * so fall back to the bottom-up function here. This scenario
152 * can happen with large stack limits and large mmap()
155 if (addr & ~PAGE_MASK) {
156 VM_BUG_ON(addr != -ENOMEM);
158 info.low_limit = TASK_UNMAPPED_BASE;
159 info.high_limit = TASK_SIZE;
160 addr = vm_unmapped_area(&info);
165 #endif /* CONFIG_MMU */
168 * You really shouldn't be using read() or write() on /dev/mem. This
169 * might go away in the future.
171 int valid_phys_addr_range(phys_addr_t addr, size_t count)
173 if (addr < __MEMORY_START)
175 if (addr + count > __pa(high_memory))
181 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)