1 // SPDX-License-Identifier: GPL-2.0
5 * Copyright IBM Corp. 2008
6 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
9 #include <linux/init.h>
10 #include <linux/errno.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
14 #include <linux/smp.h>
15 #include <linux/stddef.h>
16 #include <linux/unistd.h>
17 #include <linux/slab.h>
18 #include <linux/user.h>
19 #include <linux/elf.h>
20 #include <linux/security.h>
21 #include <linux/memblock.h>
22 #include <linux/compat.h>
23 #include <asm/asm-offsets.h>
24 #include <asm/pgtable.h>
25 #include <asm/processor.h>
27 #include <asm/mmu_context.h>
28 #include <asm/sections.h>
30 #include <asm/facility.h>
32 #ifdef CONFIG_COMPAT_VDSO
33 extern char vdso32_start, vdso32_end;
34 static void *vdso32_kbase = &vdso32_start;
35 static unsigned int vdso32_pages;
36 static struct page **vdso32_pagelist;
39 extern char vdso64_start, vdso64_end;
40 static void *vdso64_kbase = &vdso64_start;
41 static unsigned int vdso64_pages;
42 static struct page **vdso64_pagelist;
45 * Should the kernel map a VDSO page into processes and pass its
46 * address down to glibc upon exec()?
48 unsigned int __read_mostly vdso_enabled = 1;
50 static vm_fault_t vdso_fault(const struct vm_special_mapping *sm,
51 struct vm_area_struct *vma, struct vm_fault *vmf)
53 struct page **vdso_pagelist;
54 unsigned long vdso_pages;
56 vdso_pagelist = vdso64_pagelist;
57 vdso_pages = vdso64_pages;
58 #ifdef CONFIG_COMPAT_VDSO
59 if (vma->vm_mm->context.compat_mm) {
60 vdso_pagelist = vdso32_pagelist;
61 vdso_pages = vdso32_pages;
65 if (vmf->pgoff >= vdso_pages)
66 return VM_FAULT_SIGBUS;
68 vmf->page = vdso_pagelist[vmf->pgoff];
73 static int vdso_mremap(const struct vm_special_mapping *sm,
74 struct vm_area_struct *vma)
76 unsigned long vdso_pages;
78 vdso_pages = vdso64_pages;
79 #ifdef CONFIG_COMPAT_VDSO
80 if (vma->vm_mm->context.compat_mm)
81 vdso_pages = vdso32_pages;
84 if ((vdso_pages << PAGE_SHIFT) != vma->vm_end - vma->vm_start)
87 if (WARN_ON_ONCE(current->mm != vma->vm_mm))
90 current->mm->context.vdso_base = vma->vm_start;
94 static const struct vm_special_mapping vdso_mapping = {
97 .mremap = vdso_mremap,
100 static int __init vdso_setup(char *str)
104 if (!kstrtobool(str, &enabled))
105 vdso_enabled = enabled;
108 __setup("vdso=", vdso_setup);
114 struct vdso_data data;
116 } vdso_data_store __page_aligned_data;
117 struct vdso_data *vdso_data = &vdso_data_store.data;
120 * Setup vdso data page.
122 static void __init vdso_init_data(struct vdso_data *vd)
124 vd->ectg_available = test_facility(31);
128 * Allocate/free per cpu vdso data.
130 #define SEGMENT_ORDER 2
133 * The initial vdso_data structure for the boot CPU. Eventually
134 * it is replaced with a properly allocated structure in vdso_init.
135 * This is necessary because a valid S390_lowcore.vdso_per_cpu_data
136 * pointer is required to be able to return from an interrupt or
137 * program check. See the exit paths in entry.S.
139 struct vdso_data boot_vdso_data __initdata;
141 void __init vdso_alloc_boot_cpu(struct lowcore *lowcore)
143 lowcore->vdso_per_cpu_data = (unsigned long) &boot_vdso_data;
146 int vdso_alloc_per_cpu(struct lowcore *lowcore)
148 unsigned long segment_table, page_table, page_frame;
149 struct vdso_per_cpu_data *vd;
151 segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
152 page_table = get_zeroed_page(GFP_KERNEL);
153 page_frame = get_zeroed_page(GFP_KERNEL);
154 if (!segment_table || !page_table || !page_frame)
156 arch_set_page_dat(virt_to_page(segment_table), SEGMENT_ORDER);
157 arch_set_page_dat(virt_to_page(page_table), 0);
159 /* Initialize per-cpu vdso data page */
160 vd = (struct vdso_per_cpu_data *) page_frame;
161 vd->cpu_nr = lowcore->cpu_nr;
162 vd->node_id = cpu_to_node(vd->cpu_nr);
164 /* Set up page table for the vdso address space */
165 memset64((u64 *)segment_table, _SEGMENT_ENTRY_EMPTY, _CRST_ENTRIES);
166 memset64((u64 *)page_table, _PAGE_INVALID, PTRS_PER_PTE);
168 *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
169 *(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
171 lowcore->vdso_asce = segment_table +
172 _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
173 lowcore->vdso_per_cpu_data = page_frame;
178 free_page(page_frame);
179 free_page(page_table);
180 free_pages(segment_table, SEGMENT_ORDER);
184 void vdso_free_per_cpu(struct lowcore *lowcore)
186 unsigned long segment_table, page_table, page_frame;
188 segment_table = lowcore->vdso_asce & PAGE_MASK;
189 page_table = *(unsigned long *) segment_table;
190 page_frame = *(unsigned long *) page_table;
192 free_page(page_frame);
193 free_page(page_table);
194 free_pages(segment_table, SEGMENT_ORDER);
198 * This is called from binfmt_elf, we create the special vma for the
199 * vDSO and insert it into the mm struct tree
201 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
203 struct mm_struct *mm = current->mm;
204 struct vm_area_struct *vma;
205 unsigned long vdso_pages;
206 unsigned long vdso_base;
212 vdso_pages = vdso64_pages;
213 #ifdef CONFIG_COMPAT_VDSO
214 mm->context.compat_mm = is_compat_task();
215 if (mm->context.compat_mm)
216 vdso_pages = vdso32_pages;
219 * vDSO has a problem and was disabled, just don't "enable" it for
226 * pick a base address for the vDSO in process space. We try to put
227 * it at vdso_base which is the "natural" base for it, but we might
228 * fail and end up putting it elsewhere.
230 if (down_write_killable(&mm->mmap_sem))
232 vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
233 if (IS_ERR_VALUE(vdso_base)) {
239 * our vma flags don't have VM_WRITE so by default, the process
240 * isn't allowed to write those pages.
241 * gdb can break that with ptrace interface, and thus trigger COW
242 * on those pages but it's then your responsibility to never do that
243 * on the "data" page of the vDSO or you'll stop getting kernel
244 * updates and your nice userland gettimeofday will be totally dead.
245 * It's fine to use that for setting breakpoints in the vDSO code
248 vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
250 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
257 current->mm->context.vdso_base = vdso_base;
261 up_write(&mm->mmap_sem);
265 static int __init vdso_init(void)
269 vdso_init_data(vdso_data);
270 #ifdef CONFIG_COMPAT_VDSO
271 /* Calculate the size of the 32 bit vDSO */
272 vdso32_pages = ((&vdso32_end - &vdso32_start
273 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
275 /* Make sure pages are in the correct state */
276 vdso32_pagelist = kcalloc(vdso32_pages + 1, sizeof(struct page *),
278 BUG_ON(vdso32_pagelist == NULL);
279 for (i = 0; i < vdso32_pages - 1; i++) {
280 struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
282 vdso32_pagelist[i] = pg;
284 vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data);
285 vdso32_pagelist[vdso32_pages] = NULL;
288 /* Calculate the size of the 64 bit vDSO */
289 vdso64_pages = ((&vdso64_end - &vdso64_start
290 + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
292 /* Make sure pages are in the correct state */
293 vdso64_pagelist = kcalloc(vdso64_pages + 1, sizeof(struct page *),
295 BUG_ON(vdso64_pagelist == NULL);
296 for (i = 0; i < vdso64_pages - 1; i++) {
297 struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
299 vdso64_pagelist[i] = pg;
301 vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
302 vdso64_pagelist[vdso64_pages] = NULL;
303 if (vdso_alloc_per_cpu(&S390_lowcore))
306 get_page(virt_to_page(vdso_data));
310 early_initcall(vdso_init);