1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/errno.h>
3 #include <linux/sched.h>
4 #include <linux/sched/mm.h>
5 #include <linux/syscalls.h>
10 #include <linux/msg.h>
11 #include <linux/shm.h>
12 #include <linux/stat.h>
13 #include <linux/mman.h>
14 #include <linux/file.h>
15 #include <linux/utsname.h>
16 #include <linux/personality.h>
17 #include <linux/random.h>
18 #include <linux/uaccess.h>
19 #include <linux/elf.h>
22 #include <asm/compat.h>
24 #include <asm/syscalls.h>
28 * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
30 static unsigned long get_align_mask(void)
32 /* handle 32- and 64-bit case with a single conditional */
33 if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32())))
36 if (!(current->flags & PF_RANDOMIZE))
43 * To avoid aliasing in the I$ on AMD F15h, the bits defined by the
44 * va_align.bits, [12:upper_bit), are set to a random value instead of
45 * zeroing them. This random value is computed once per boot. This form
46 * of ASLR is known as "per-boot ASLR".
48 * To achieve this, the random value is added to the info.align_offset
49 * value before calling vm_unmapped_area() or ORed directly to the
52 static unsigned long get_align_bits(void)
54 return va_align.bits & get_align_mask();
57 unsigned long align_vdso_addr(unsigned long addr)
59 unsigned long align_mask = get_align_mask();
60 addr = (addr + align_mask) & ~align_mask;
61 return addr | get_align_bits();
64 static int __init control_va_addr_alignment(char *str)
66 /* guard against enabling this on other CPU families */
67 if (va_align.flags < 0)
73 if (!strcmp(str, "32"))
74 va_align.flags = ALIGN_VA_32;
75 else if (!strcmp(str, "64"))
76 va_align.flags = ALIGN_VA_64;
77 else if (!strcmp(str, "off"))
79 else if (!strcmp(str, "on"))
80 va_align.flags = ALIGN_VA_32 | ALIGN_VA_64;
82 pr_warn("invalid option value: 'align_va_addr=%s'\n", str);
86 __setup("align_va_addr=", control_va_addr_alignment);
88 SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
89 unsigned long, prot, unsigned long, flags,
90 unsigned long, fd, unsigned long, off)
97 error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
102 static void find_start_end(unsigned long addr, unsigned long flags,
103 unsigned long *begin, unsigned long *end)
105 if (!in_compat_syscall() && (flags & MAP_32BIT)) {
106 /* This is usually used needed to map code in small
107 model, so it needs to be in the first 31bit. Limit
108 it to that. This means we need to move the
109 unmapped base down for this case. This can give
110 conflicts with the heap, but we assume that glibc
111 malloc knows how to fall back to mmap. Give it 1GB
112 of playground for now. -AK */
115 if (current->flags & PF_RANDOMIZE) {
116 *begin = randomize_page(*begin, 0x02000000);
121 *begin = get_mmap_base(1);
122 if (in_compat_syscall())
123 *end = task_size_32bit();
125 *end = task_size_64bit(addr > DEFAULT_MAP_WINDOW);
129 arch_get_unmapped_area(struct file *filp, unsigned long addr,
130 unsigned long len, unsigned long pgoff, unsigned long flags)
132 struct mm_struct *mm = current->mm;
133 struct vm_area_struct *vma;
134 struct vm_unmapped_area_info info;
135 unsigned long begin, end;
137 addr = mpx_unmapped_area_check(addr, len, flags);
138 if (IS_ERR_VALUE(addr))
141 if (flags & MAP_FIXED)
144 find_start_end(addr, flags, &begin, &end);
150 addr = PAGE_ALIGN(addr);
151 vma = find_vma(mm, addr);
152 if (end - len >= addr &&
153 (!vma || addr + len <= vm_start_gap(vma)))
159 info.low_limit = begin;
160 info.high_limit = end;
162 info.align_offset = pgoff << PAGE_SHIFT;
164 info.align_mask = get_align_mask();
165 info.align_offset += get_align_bits();
167 return vm_unmapped_area(&info);
171 arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
172 const unsigned long len, const unsigned long pgoff,
173 const unsigned long flags)
175 struct vm_area_struct *vma;
176 struct mm_struct *mm = current->mm;
177 unsigned long addr = addr0;
178 struct vm_unmapped_area_info info;
180 addr = mpx_unmapped_area_check(addr, len, flags);
181 if (IS_ERR_VALUE(addr))
184 /* requested length too big for entire address space */
188 if (flags & MAP_FIXED)
191 /* for MAP_32BIT mappings we force the legacy mmap base */
192 if (!in_compat_syscall() && (flags & MAP_32BIT))
195 /* requesting a specific address */
197 addr = PAGE_ALIGN(addr);
198 vma = find_vma(mm, addr);
199 if (TASK_SIZE - len >= addr &&
200 (!vma || addr + len <= vm_start_gap(vma)))
204 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
206 info.low_limit = PAGE_SIZE;
207 info.high_limit = get_mmap_base(0);
210 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
211 * in the full address space.
213 * !in_compat_syscall() check to avoid high addresses for x32.
215 if (addr > DEFAULT_MAP_WINDOW && !in_compat_syscall())
216 info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
219 info.align_offset = pgoff << PAGE_SHIFT;
221 info.align_mask = get_align_mask();
222 info.align_offset += get_align_bits();
224 addr = vm_unmapped_area(&info);
225 if (!(addr & ~PAGE_MASK))
227 VM_BUG_ON(addr != -ENOMEM);
231 * A failed mmap() very likely causes application failure,
232 * so fall back to the bottom-up function here. This scenario
233 * can happen with large stack limits and large mmap()
236 return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);