1 // SPDX-License-Identifier: GPL-2.0
3 * linux/arch/arm/mm/mmap.c
7 #include <linux/mman.h>
9 #include <linux/sched/signal.h>
10 #include <linux/sched/mm.h>
12 #include <linux/personality.h>
13 #include <linux/random.h>
14 #include <asm/cachetype.h>
16 #define COLOUR_ALIGN(addr,pgoff) \
17 ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
18 (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1)))
20 /* gap between mmap and stack */
21 #define MIN_GAP (128*1024*1024UL)
22 #define MAX_GAP ((STACK_TOP)/6*5)
23 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12))
25 static int mmap_is_legacy(struct rlimit *rlim_stack)
27 if (current->personality & ADDR_COMPAT_LAYOUT)
30 if (rlim_stack->rlim_cur == RLIM_INFINITY)
33 return sysctl_legacy_va_layout;
36 static unsigned long mmap_base(unsigned long rnd, struct rlimit *rlim_stack)
38 unsigned long gap = rlim_stack->rlim_cur;
39 unsigned long pad = stack_guard_gap;
41 /* Account for stack randomization if necessary */
42 if (current->flags & PF_RANDOMIZE)
43 pad += (STACK_RND_MASK << PAGE_SHIFT);
45 /* Values close to RLIM_INFINITY can overflow. */
51 else if (gap > MAX_GAP)
54 return PAGE_ALIGN(STACK_TOP - gap - rnd);
58 * We need to ensure that shared mappings are correctly aligned to
59 * avoid aliasing issues with VIPT caches. We need to ensure that
60 * a specific page of an object is always mapped at a multiple of
63 * We unconditionally provide this function for all cases, however
64 * in the VIVT case, we optimise out the alignment rules.
67 arch_get_unmapped_area(struct file *filp, unsigned long addr,
68 unsigned long len, unsigned long pgoff, unsigned long flags)
70 struct mm_struct *mm = current->mm;
71 struct vm_area_struct *vma;
73 int aliasing = cache_is_vipt_aliasing();
74 struct vm_unmapped_area_info info;
77 * We only need to do colour alignment if either the I or D
81 do_align = filp || (flags & MAP_SHARED);
84 * We enforce the MAP_FIXED case.
86 if (flags & MAP_FIXED) {
87 if (aliasing && flags & MAP_SHARED &&
88 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
98 addr = COLOUR_ALIGN(addr, pgoff);
100 addr = PAGE_ALIGN(addr);
102 vma = find_vma(mm, addr);
103 if (TASK_SIZE - len >= addr &&
104 (!vma || addr + len <= vm_start_gap(vma)))
110 info.low_limit = mm->mmap_base;
111 info.high_limit = TASK_SIZE;
112 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
113 info.align_offset = pgoff << PAGE_SHIFT;
114 return vm_unmapped_area(&info);
118 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
119 const unsigned long len, const unsigned long pgoff,
120 const unsigned long flags)
122 struct vm_area_struct *vma;
123 struct mm_struct *mm = current->mm;
124 unsigned long addr = addr0;
126 int aliasing = cache_is_vipt_aliasing();
127 struct vm_unmapped_area_info info;
130 * We only need to do colour alignment if either the I or D
134 do_align = filp || (flags & MAP_SHARED);
136 /* requested length too big for entire address space */
140 if (flags & MAP_FIXED) {
141 if (aliasing && flags & MAP_SHARED &&
142 (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))
147 /* requesting a specific address */
150 addr = COLOUR_ALIGN(addr, pgoff);
152 addr = PAGE_ALIGN(addr);
153 vma = find_vma(mm, addr);
154 if (TASK_SIZE - len >= addr &&
155 (!vma || addr + len <= vm_start_gap(vma)))
159 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
161 info.low_limit = FIRST_USER_ADDRESS;
162 info.high_limit = mm->mmap_base;
163 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
164 info.align_offset = pgoff << PAGE_SHIFT;
165 addr = vm_unmapped_area(&info);
168 * A failed mmap() very likely causes application failure,
169 * so fall back to the bottom-up function here. This scenario
170 * can happen with large stack limits and large mmap()
173 if (addr & ~PAGE_MASK) {
174 VM_BUG_ON(addr != -ENOMEM);
176 info.low_limit = mm->mmap_base;
177 info.high_limit = TASK_SIZE;
178 addr = vm_unmapped_area(&info);
184 unsigned long arch_mmap_rnd(void)
188 rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
190 return rnd << PAGE_SHIFT;
193 void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack)
195 unsigned long random_factor = 0UL;
197 if (current->flags & PF_RANDOMIZE)
198 random_factor = arch_mmap_rnd();
200 if (mmap_is_legacy(rlim_stack)) {
201 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
202 mm->get_unmapped_area = arch_get_unmapped_area;
204 mm->mmap_base = mmap_base(random_factor, rlim_stack);
205 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
210 * You really shouldn't be using read() or write() on /dev/mem. This
211 * might go away in the future.
213 int valid_phys_addr_range(phys_addr_t addr, size_t size)
215 if (addr < PHYS_OFFSET)
217 if (addr + size > __pa(high_memory - 1) + 1)
224 * Do not allow /dev/mem mappings beyond the supported physical range.
226 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
228 return (pfn + (size >> PAGE_SHIFT)) <= (1 + (PHYS_MASK >> PAGE_SHIFT));
231 #ifdef CONFIG_STRICT_DEVMEM
233 #include <linux/ioport.h>
236 * devmem_is_allowed() checks to see if /dev/mem access to a certain
237 * address is valid. The argument is a physical page number.
238 * We mimic x86 here by disallowing access to system RAM as well as
239 * device-exclusive MMIO regions. This effectively disable read()/write()
242 int devmem_is_allowed(unsigned long pfn)
244 if (iomem_is_exclusive(pfn << PAGE_SHIFT))
246 if (!page_is_ram(pfn))