1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
5 * <benh@kernel.crashing.org>
8 #include <linux/errno.h>
9 #include <linux/sched.h>
10 #include <linux/kernel.h>
12 #include <linux/smp.h>
13 #include <linux/stddef.h>
14 #include <linux/unistd.h>
15 #include <linux/slab.h>
16 #include <linux/user.h>
17 #include <linux/elf.h>
18 #include <linux/security.h>
19 #include <linux/memblock.h>
20 #include <linux/syscalls.h>
21 #include <linux/time_namespace.h>
22 #include <vdso/datapage.h>
24 #include <asm/syscall.h>
25 #include <asm/processor.h>
27 #include <asm/mmu_context.h>
28 #include <asm/machdep.h>
29 #include <asm/cputable.h>
30 #include <asm/sections.h>
31 #include <asm/firmware.h>
33 #include <asm/vdso_datapage.h>
34 #include <asm/setup.h>
36 /* The alignment of the vDSO */
37 #define VDSO_ALIGNMENT (1 << 16)
39 extern char vdso32_start, vdso32_end;
40 extern char vdso64_start, vdso64_end;
42 long sys_ni_syscall(void);
45 * The vdso data page (aka. systemcfg for old ppc64 fans) is here.
46 * Once the early boot kernel code no longer needs to muck around
47 * with it, it will become dynamically allocated
50 struct vdso_arch_data data;
52 } vdso_data_store __page_aligned_data;
53 struct vdso_arch_data *vdso_data = &vdso_data_store.data;
56 VVAR_DATA_PAGE_OFFSET,
57 VVAR_TIMENS_PAGE_OFFSET,
61 static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma,
62 unsigned long text_size)
64 unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
66 if (new_size != text_size)
69 current->mm->context.vdso = (void __user *)new_vma->vm_start;
74 static int vdso32_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
76 return vdso_mremap(sm, new_vma, &vdso32_end - &vdso32_start);
79 static int vdso64_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
81 return vdso_mremap(sm, new_vma, &vdso64_end - &vdso64_start);
84 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
85 struct vm_area_struct *vma, struct vm_fault *vmf);
87 static struct vm_special_mapping vvar_spec __ro_after_init = {
92 static struct vm_special_mapping vdso32_spec __ro_after_init = {
94 .mremap = vdso32_mremap,
97 static struct vm_special_mapping vdso64_spec __ro_after_init = {
99 .mremap = vdso64_mremap,
102 #ifdef CONFIG_TIME_NS
103 struct vdso_data *arch_get_vdso_data(void *vvar_page)
105 return ((struct vdso_arch_data *)vvar_page)->data;
109 * The vvar mapping contains data for a specific time namespace, so when a task
110 * changes namespace we must unmap its vvar data for the old namespace.
111 * Subsequent faults will map in data for the new namespace.
113 * For more details see timens_setup_vdso_data().
115 int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
117 struct mm_struct *mm = task->mm;
118 VMA_ITERATOR(vmi, mm, 0);
119 struct vm_area_struct *vma;
122 for_each_vma(vmi, vma) {
123 unsigned long size = vma->vm_end - vma->vm_start;
125 if (vma_is_special_mapping(vma, &vvar_spec))
126 zap_page_range(vma, vma->vm_start, size);
128 mmap_read_unlock(mm);
133 static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
135 if (likely(vma->vm_mm == current->mm))
136 return current->nsproxy->time_ns->vvar_page;
139 * VM_PFNMAP | VM_IO protect .fault() handler from being called
140 * through interfaces like /proc/$pid/mem or
141 * process_vm_{readv,writev}() as long as there's no .access()
142 * in special_mapping_vmops.
143 * For more details check_vma_flags() and __access_remote_vm()
145 WARN(1, "vvar_page accessed remotely");
150 static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
156 static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
157 struct vm_area_struct *vma, struct vm_fault *vmf)
159 struct page *timens_page = find_timens_vvar_page(vma);
162 switch (vmf->pgoff) {
163 case VVAR_DATA_PAGE_OFFSET:
165 pfn = page_to_pfn(timens_page);
167 pfn = virt_to_pfn(vdso_data);
169 #ifdef CONFIG_TIME_NS
170 case VVAR_TIMENS_PAGE_OFFSET:
172 * If a task belongs to a time namespace then a namespace
173 * specific VVAR is mapped with the VVAR_DATA_PAGE_OFFSET and
174 * the real VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET
176 * See also the comment near timens_setup_vdso_data().
179 return VM_FAULT_SIGBUS;
180 pfn = virt_to_pfn(vdso_data);
182 #endif /* CONFIG_TIME_NS */
184 return VM_FAULT_SIGBUS;
187 return vmf_insert_pfn(vma, vmf->address, pfn);
191 * This is called from binfmt_elf, we create the special vma for the
192 * vDSO and insert it into the mm struct tree
194 static int __arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
196 unsigned long vdso_size, vdso_base, mappings_size;
197 struct vm_special_mapping *vdso_spec;
198 unsigned long vvar_size = VVAR_NR_PAGES * PAGE_SIZE;
199 struct mm_struct *mm = current->mm;
200 struct vm_area_struct *vma;
202 if (is_32bit_task()) {
203 vdso_spec = &vdso32_spec;
204 vdso_size = &vdso32_end - &vdso32_start;
206 vdso_spec = &vdso64_spec;
207 vdso_size = &vdso64_end - &vdso64_start;
210 mappings_size = vdso_size + vvar_size;
211 mappings_size += (VDSO_ALIGNMENT - 1) & PAGE_MASK;
214 * Pick a base address for the vDSO in process space.
215 * Add enough to the size so that the result can be aligned.
217 vdso_base = get_unmapped_area(NULL, 0, mappings_size, 0, 0);
218 if (IS_ERR_VALUE(vdso_base))
221 /* Add required alignment. */
222 vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT);
225 * Put vDSO base into mm struct. We need to do this before calling
226 * install_special_mapping or the perf counter mmap tracking code
227 * will fail to recognise it as a vDSO.
229 mm->context.vdso = (void __user *)vdso_base + vvar_size;
231 vma = _install_special_mapping(mm, vdso_base, vvar_size,
232 VM_READ | VM_MAYREAD | VM_IO |
233 VM_DONTDUMP | VM_PFNMAP, &vvar_spec);
238 * our vma flags don't have VM_WRITE so by default, the process isn't
239 * allowed to write those pages.
240 * gdb can break that with ptrace interface, and thus trigger COW on
241 * those pages but it's then your responsibility to never do that on
242 * the "data" page of the vDSO or you'll stop getting kernel updates
243 * and your nice userland gettimeofday will be totally dead.
244 * It's fine to use that for setting breakpoints in the vDSO code
247 vma = _install_special_mapping(mm, vdso_base + vvar_size, vdso_size,
248 VM_READ | VM_EXEC | VM_MAYREAD |
249 VM_MAYWRITE | VM_MAYEXEC, vdso_spec);
251 do_munmap(mm, vdso_base, vvar_size, NULL);
253 return PTR_ERR_OR_ZERO(vma);
256 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
258 struct mm_struct *mm = current->mm;
261 mm->context.vdso = NULL;
263 if (mmap_write_lock_killable(mm))
266 rc = __arch_setup_additional_pages(bprm, uses_interp);
268 mm->context.vdso = NULL;
270 mmap_write_unlock(mm);
274 #define VDSO_DO_FIXUPS(type, value, bits, sec) do { \
275 void *__start = (void *)VDSO##bits##_SYMBOL(&vdso##bits##_start, sec##_start); \
276 void *__end = (void *)VDSO##bits##_SYMBOL(&vdso##bits##_start, sec##_end); \
278 do_##type##_fixups((value), __start, __end); \
281 static void __init vdso_fixup_features(void)
284 VDSO_DO_FIXUPS(feature, cur_cpu_spec->cpu_features, 64, ftr_fixup);
285 VDSO_DO_FIXUPS(feature, cur_cpu_spec->mmu_features, 64, mmu_ftr_fixup);
286 VDSO_DO_FIXUPS(feature, powerpc_firmware_features, 64, fw_ftr_fixup);
287 VDSO_DO_FIXUPS(lwsync, cur_cpu_spec->cpu_features, 64, lwsync_fixup);
288 #endif /* CONFIG_PPC64 */
291 VDSO_DO_FIXUPS(feature, cur_cpu_spec->cpu_features, 32, ftr_fixup);
292 VDSO_DO_FIXUPS(feature, cur_cpu_spec->mmu_features, 32, mmu_ftr_fixup);
294 VDSO_DO_FIXUPS(feature, powerpc_firmware_features, 32, fw_ftr_fixup);
295 #endif /* CONFIG_PPC64 */
296 VDSO_DO_FIXUPS(lwsync, cur_cpu_spec->cpu_features, 32, lwsync_fixup);
301 * Called from setup_arch to initialize the bitmap of available
302 * syscalls in the systemcfg page
304 static void __init vdso_setup_syscall_map(void)
308 for (i = 0; i < NR_syscalls; i++) {
309 if (sys_call_table[i] != (void *)&sys_ni_syscall)
310 vdso_data->syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f);
311 if (IS_ENABLED(CONFIG_COMPAT) &&
312 compat_sys_call_table[i] != (void *)&sys_ni_syscall)
313 vdso_data->compat_syscall_map[i >> 5] |= 0x80000000UL >> (i & 0x1f);
318 int vdso_getcpu_init(void)
320 unsigned long cpu, node, val;
323 * SPRG_VDSO contains the CPU in the bottom 16 bits and the NUMA node
324 * in the next 16 bits. The VDSO uses this to implement getcpu().
327 WARN_ON_ONCE(cpu > 0xffff);
329 node = cpu_to_node(cpu);
330 WARN_ON_ONCE(node > 0xffff);
332 val = (cpu & 0xffff) | ((node & 0xffff) << 16);
333 mtspr(SPRN_SPRG_VDSO_WRITE, val);
334 get_paca()->sprg_vdso = val;
340 /* We need to call this before SMP init */
341 early_initcall(vdso_getcpu_init);
344 static struct page ** __init vdso_setup_pages(void *start, void *end)
347 struct page **pagelist;
348 int pages = (end - start) >> PAGE_SHIFT;
350 pagelist = kcalloc(pages + 1, sizeof(struct page *), GFP_KERNEL);
352 panic("%s: Cannot allocate page list for VDSO", __func__);
354 for (i = 0; i < pages; i++)
355 pagelist[i] = virt_to_page(start + i * PAGE_SIZE);
360 static int __init vdso_init(void)
364 * Fill up the "systemcfg" stuff for backward compatibility
366 strcpy((char *)vdso_data->eye_catcher, "SYSTEMCFG:PPC64");
367 vdso_data->version.major = SYSTEMCFG_MAJOR;
368 vdso_data->version.minor = SYSTEMCFG_MINOR;
369 vdso_data->processor = mfspr(SPRN_PVR);
371 * Fake the old platform number for pSeries and add
372 * in LPAR bit if necessary
374 vdso_data->platform = 0x100;
375 if (firmware_has_feature(FW_FEATURE_LPAR))
376 vdso_data->platform |= 1;
377 vdso_data->physicalMemorySize = memblock_phys_mem_size();
378 vdso_data->dcache_size = ppc64_caches.l1d.size;
379 vdso_data->dcache_line_size = ppc64_caches.l1d.line_size;
380 vdso_data->icache_size = ppc64_caches.l1i.size;
381 vdso_data->icache_line_size = ppc64_caches.l1i.line_size;
382 vdso_data->dcache_block_size = ppc64_caches.l1d.block_size;
383 vdso_data->icache_block_size = ppc64_caches.l1i.block_size;
384 vdso_data->dcache_log_block_size = ppc64_caches.l1d.log_block_size;
385 vdso_data->icache_log_block_size = ppc64_caches.l1i.log_block_size;
386 #endif /* CONFIG_PPC64 */
388 vdso_setup_syscall_map();
390 vdso_fixup_features();
392 if (IS_ENABLED(CONFIG_VDSO32))
393 vdso32_spec.pages = vdso_setup_pages(&vdso32_start, &vdso32_end);
395 if (IS_ENABLED(CONFIG_PPC64))
396 vdso64_spec.pages = vdso_setup_pages(&vdso64_start, &vdso64_end);
402 arch_initcall(vdso_init);