2 * Copyright 2007 Andi Kleen, SUSE Labs.
3 * Subject to the GPL, v.2
5 * This contains most of the x86 vDSO kernel-side code.
9 #include <linux/sched.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/random.h>
13 #include <linux/elf.h>
14 #include <linux/cpu.h>
15 #include <asm/pvclock.h>
16 #include <asm/vgtod.h>
17 #include <asm/proto.h>
23 #include <asm/cpufeature.h>
25 #if defined(CONFIG_X86_64)
26 unsigned int __read_mostly vdso64_enabled = 1;
29 void __init init_vdso_image(const struct vdso_image *image)
32 int npages = (image->size) / PAGE_SIZE;
34 BUG_ON(image->size % PAGE_SIZE != 0);
35 for (i = 0; i < npages; i++)
36 image->text_mapping.pages[i] =
37 virt_to_page(image->data + i*PAGE_SIZE);
39 apply_alternatives((struct alt_instr *)(image->data + image->alt),
40 (struct alt_instr *)(image->data + image->alt +
47 * Put the vdso above the (randomized) stack with another randomized
48 * offset. This way there is no hole in the middle of address space.
49 * To save memory make sure it is still in the same PTE as the stack
50 * top. This doesn't give that many random bits.
52 * Note that this algorithm is imperfect: the distribution of the vdso
53 * start address within a PMD is biased toward the end.
55 * Only used for the 64-bit and x32 vdsos.
57 static unsigned long vdso_addr(unsigned long start, unsigned len)
62 unsigned long addr, end;
66 * Round up the start address. It can start out unaligned as a result
67 * of stack start randomization.
69 start = PAGE_ALIGN(start);
71 /* Round the lowest possible end address up to a PMD boundary. */
72 end = (start + len + PMD_SIZE - 1) & PMD_MASK;
73 if (end >= TASK_SIZE_MAX)
78 offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
79 addr = start + (offset << PAGE_SHIFT);
85 * Forcibly align the final address in case we have a hardware
86 * issue that requires alignment for performance reasons.
88 addr = align_vdso_addr(addr);
94 static int map_vdso(const struct vdso_image *image, bool calculate_addr)
96 struct mm_struct *mm = current->mm;
97 struct vm_area_struct *vma;
98 unsigned long addr, text_start;
100 static struct page *no_pages[] = {NULL};
101 static struct vm_special_mapping vvar_mapping = {
105 struct pvclock_vsyscall_time_info *pvti;
107 if (calculate_addr) {
108 addr = vdso_addr(current->mm->start_stack,
109 image->size - image->sym_vvar_start);
114 down_write(&mm->mmap_sem);
116 addr = get_unmapped_area(NULL, addr,
117 image->size - image->sym_vvar_start, 0, 0);
118 if (IS_ERR_VALUE(addr)) {
123 text_start = addr - image->sym_vvar_start;
124 current->mm->context.vdso = (void __user *)text_start;
127 * MAYWRITE to allow gdb to COW and set breakpoints
129 vma = _install_special_mapping(mm,
133 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
134 &image->text_mapping);
141 vma = _install_special_mapping(mm,
143 -image->sym_vvar_start,
152 if (image->sym_vvar_page)
153 ret = remap_pfn_range(vma,
154 text_start + image->sym_vvar_page,
155 __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
162 #ifdef CONFIG_HPET_TIMER
163 if (hpet_address && image->sym_hpet_page) {
164 ret = io_remap_pfn_range(vma,
165 text_start + image->sym_hpet_page,
166 hpet_address >> PAGE_SHIFT,
168 pgprot_noncached(PAGE_READONLY));
175 pvti = pvclock_pvti_cpu0_va();
176 if (pvti && image->sym_pvclock_page) {
177 ret = remap_pfn_range(vma,
178 text_start + image->sym_pvclock_page,
179 __pa(pvti) >> PAGE_SHIFT,
189 current->mm->context.vdso = NULL;
191 up_write(&mm->mmap_sem);
195 #if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
196 static int load_vdso32(void)
198 if (vdso32_enabled != 1) /* Other values all mean "disabled" */
201 return map_vdso(&vdso_image_32, false);
206 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
211 return map_vdso(&vdso_image_64, true);
215 int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
218 #ifdef CONFIG_X86_X32_ABI
219 if (test_thread_flag(TIF_X32)) {
223 return map_vdso(&vdso_image_x32, true);
226 #ifdef CONFIG_IA32_EMULATION
227 return load_vdso32();
234 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
236 return load_vdso32();
241 static __init int vdso_setup(char *s)
243 vdso64_enabled = simple_strtoul(s, NULL, 0);
246 __setup("vdso=", vdso_setup);
250 static void vgetcpu_cpu_init(void *arg)
252 int cpu = smp_processor_id();
253 struct desc_struct d = { };
254 unsigned long node = 0;
256 node = cpu_to_node(cpu);
258 if (static_cpu_has(X86_FEATURE_RDTSCP))
259 write_rdtscp_aux((node << 12) | cpu);
262 * Store cpu number in limit so that it can be loaded
263 * quickly in user space in vgetcpu. (12 bits for the CPU
264 * and 8 bits for the node)
266 d.limit0 = cpu | ((node & 0xf) << 12);
268 d.type = 5; /* RO data, expand down, accessed */
269 d.dpl = 3; /* Visible to user code */
270 d.s = 1; /* Not a system segment */
271 d.p = 1; /* Present */
272 d.d = 1; /* 32-bit */
274 write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
278 vgetcpu_cpu_notifier(struct notifier_block *n, unsigned long action, void *arg)
280 long cpu = (long)arg;
282 if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
283 smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
288 static int __init init_vdso(void)
290 init_vdso_image(&vdso_image_64);
292 #ifdef CONFIG_X86_X32_ABI
293 init_vdso_image(&vdso_image_x32);
296 cpu_notifier_register_begin();
298 on_each_cpu(vgetcpu_cpu_init, NULL, 1);
299 /* notifier priority > KVM */
300 __hotcpu_notifier(vgetcpu_cpu_notifier, 30);
302 cpu_notifier_register_done();
306 subsys_initcall(init_vdso);
307 #endif /* CONFIG_X86_64 */