GNU Linux-libre 4.14.324-gnu1
[releases.git] / arch / s390 / kernel / vdso.c
1 /*
2  * vdso setup for s390
3  *
4  *  Copyright IBM Corp. 2008
5  *  Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License (version 2 only)
9  * as published by the Free Software Foundation.
10  */
11
12 #include <linux/init.h>
13 #include <linux/errno.h>
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/mm.h>
17 #include <linux/smp.h>
18 #include <linux/stddef.h>
19 #include <linux/unistd.h>
20 #include <linux/slab.h>
21 #include <linux/user.h>
22 #include <linux/elf.h>
23 #include <linux/security.h>
24 #include <linux/bootmem.h>
25 #include <linux/compat.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/pgtable.h>
28 #include <asm/processor.h>
29 #include <asm/mmu.h>
30 #include <asm/mmu_context.h>
31 #include <asm/sections.h>
32 #include <asm/vdso.h>
33 #include <asm/facility.h>
34
35 #ifdef CONFIG_COMPAT
36 extern char vdso32_start, vdso32_end;
37 static void *vdso32_kbase = &vdso32_start;
38 static unsigned int vdso32_pages;
39 static struct page **vdso32_pagelist;
40 #endif
41
42 extern char vdso64_start, vdso64_end;
43 static void *vdso64_kbase = &vdso64_start;
44 static unsigned int vdso64_pages;
45 static struct page **vdso64_pagelist;
46
47 /*
48  * Should the kernel map a VDSO page into processes and pass its
49  * address down to glibc upon exec()?
50  */
51 unsigned int __read_mostly vdso_enabled = 1;
52
53 static int vdso_fault(const struct vm_special_mapping *sm,
54                       struct vm_area_struct *vma, struct vm_fault *vmf)
55 {
56         struct page **vdso_pagelist;
57         unsigned long vdso_pages;
58
59         vdso_pagelist = vdso64_pagelist;
60         vdso_pages = vdso64_pages;
61 #ifdef CONFIG_COMPAT
62         if (is_compat_task()) {
63                 vdso_pagelist = vdso32_pagelist;
64                 vdso_pages = vdso32_pages;
65         }
66 #endif
67
68         if (vmf->pgoff >= vdso_pages)
69                 return VM_FAULT_SIGBUS;
70
71         vmf->page = vdso_pagelist[vmf->pgoff];
72         get_page(vmf->page);
73         return 0;
74 }
75
76 static int vdso_mremap(const struct vm_special_mapping *sm,
77                        struct vm_area_struct *vma)
78 {
79         unsigned long vdso_pages;
80
81         vdso_pages = vdso64_pages;
82 #ifdef CONFIG_COMPAT
83         if (is_compat_task())
84                 vdso_pages = vdso32_pages;
85 #endif
86
87         if ((vdso_pages << PAGE_SHIFT) != vma->vm_end - vma->vm_start)
88                 return -EINVAL;
89
90         if (WARN_ON_ONCE(current->mm != vma->vm_mm))
91                 return -EFAULT;
92
93         current->mm->context.vdso_base = vma->vm_start;
94         return 0;
95 }
96
97 static const struct vm_special_mapping vdso_mapping = {
98         .name = "[vdso]",
99         .fault = vdso_fault,
100         .mremap = vdso_mremap,
101 };
102
103 static int __init vdso_setup(char *s)
104 {
105         unsigned long val;
106         int rc;
107
108         rc = 0;
109         if (strncmp(s, "on", 3) == 0)
110                 vdso_enabled = 1;
111         else if (strncmp(s, "off", 4) == 0)
112                 vdso_enabled = 0;
113         else {
114                 rc = kstrtoul(s, 0, &val);
115                 vdso_enabled = rc ? 0 : !!val;
116         }
117         return !rc;
118 }
119 __setup("vdso=", vdso_setup);
120
121 /*
122  * The vdso data page
123  */
124 static union {
125         struct vdso_data        data;
126         u8                      page[PAGE_SIZE];
127 } vdso_data_store __page_aligned_data;
128 struct vdso_data *vdso_data = &vdso_data_store.data;
129
130 /*
131  * Setup vdso data page.
132  */
133 static void __init vdso_init_data(struct vdso_data *vd)
134 {
135         vd->ectg_available = test_facility(31);
136 }
137
138 /*
139  * Allocate/free per cpu vdso data.
140  */
141 #define SEGMENT_ORDER   2
142
143 int vdso_alloc_per_cpu(struct lowcore *lowcore)
144 {
145         unsigned long segment_table, page_table, page_frame;
146         struct vdso_per_cpu_data *vd;
147         u32 *psal, *aste;
148         int i;
149
150         lowcore->vdso_per_cpu_data = __LC_PASTE;
151
152         if (!vdso_enabled)
153                 return 0;
154
155         segment_table = __get_free_pages(GFP_KERNEL, SEGMENT_ORDER);
156         page_table = get_zeroed_page(GFP_KERNEL | GFP_DMA);
157         page_frame = get_zeroed_page(GFP_KERNEL);
158         if (!segment_table || !page_table || !page_frame)
159                 goto out;
160         arch_set_page_dat(virt_to_page(segment_table), SEGMENT_ORDER);
161         arch_set_page_dat(virt_to_page(page_table), 0);
162
163         /* Initialize per-cpu vdso data page */
164         vd = (struct vdso_per_cpu_data *) page_frame;
165         vd->cpu_nr = lowcore->cpu_nr;
166         vd->node_id = cpu_to_node(vd->cpu_nr);
167
168         /* Set up access register mode page table */
169         clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY,
170                     PAGE_SIZE << SEGMENT_ORDER);
171         clear_table((unsigned long *) page_table, _PAGE_INVALID,
172                     256*sizeof(unsigned long));
173
174         *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
175         *(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
176
177         psal = (u32 *) (page_table + 256*sizeof(unsigned long));
178         aste = psal + 32;
179
180         for (i = 4; i < 32; i += 4)
181                 psal[i] = 0x80000000;
182
183         lowcore->paste[4] = (u32)(addr_t) psal;
184         psal[0] = 0x02000000;
185         psal[2] = (u32)(addr_t) aste;
186         *(unsigned long *) (aste + 2) = segment_table +
187                 _ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
188         aste[4] = (u32)(addr_t) psal;
189         lowcore->vdso_per_cpu_data = page_frame;
190
191         return 0;
192
193 out:
194         free_page(page_frame);
195         free_page(page_table);
196         free_pages(segment_table, SEGMENT_ORDER);
197         return -ENOMEM;
198 }
199
200 void vdso_free_per_cpu(struct lowcore *lowcore)
201 {
202         unsigned long segment_table, page_table, page_frame;
203         u32 *psal, *aste;
204
205         if (!vdso_enabled)
206                 return;
207
208         psal = (u32 *)(addr_t) lowcore->paste[4];
209         aste = (u32 *)(addr_t) psal[2];
210         segment_table = *(unsigned long *)(aste + 2) & PAGE_MASK;
211         page_table = *(unsigned long *) segment_table;
212         page_frame = *(unsigned long *) page_table;
213
214         free_page(page_frame);
215         free_page(page_table);
216         free_pages(segment_table, SEGMENT_ORDER);
217 }
218
219 static void vdso_init_cr5(void)
220 {
221         unsigned long cr5;
222
223         if (!vdso_enabled)
224                 return;
225         cr5 = offsetof(struct lowcore, paste);
226         __ctl_load(cr5, 5, 5);
227 }
228
229 /*
230  * This is called from binfmt_elf, we create the special vma for the
231  * vDSO and insert it into the mm struct tree
232  */
233 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
234 {
235         struct mm_struct *mm = current->mm;
236         struct vm_area_struct *vma;
237         unsigned long vdso_pages;
238         unsigned long vdso_base;
239         int rc;
240
241         if (!vdso_enabled)
242                 return 0;
243         /*
244          * Only map the vdso for dynamically linked elf binaries.
245          */
246         if (!uses_interp)
247                 return 0;
248
249         vdso_pages = vdso64_pages;
250 #ifdef CONFIG_COMPAT
251         if (is_compat_task())
252                 vdso_pages = vdso32_pages;
253 #endif
254         /*
255          * vDSO has a problem and was disabled, just don't "enable" it for
256          * the process
257          */
258         if (vdso_pages == 0)
259                 return 0;
260
261         /*
262          * pick a base address for the vDSO in process space. We try to put
263          * it at vdso_base which is the "natural" base for it, but we might
264          * fail and end up putting it elsewhere.
265          */
266         if (down_write_killable(&mm->mmap_sem))
267                 return -EINTR;
268         vdso_base = get_unmapped_area(NULL, 0, vdso_pages << PAGE_SHIFT, 0, 0);
269         if (IS_ERR_VALUE(vdso_base)) {
270                 rc = vdso_base;
271                 goto out_up;
272         }
273
274         /*
275          * our vma flags don't have VM_WRITE so by default, the process
276          * isn't allowed to write those pages.
277          * gdb can break that with ptrace interface, and thus trigger COW
278          * on those pages but it's then your responsibility to never do that
279          * on the "data" page of the vDSO or you'll stop getting kernel
280          * updates and your nice userland gettimeofday will be totally dead.
281          * It's fine to use that for setting breakpoints in the vDSO code
282          * pages though.
283          */
284         vma = _install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
285                                        VM_READ|VM_EXEC|
286                                        VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
287                                        &vdso_mapping);
288         if (IS_ERR(vma)) {
289                 rc = PTR_ERR(vma);
290                 goto out_up;
291         }
292
293         current->mm->context.vdso_base = vdso_base;
294         rc = 0;
295
296 out_up:
297         up_write(&mm->mmap_sem);
298         return rc;
299 }
300
301 static int __init vdso_init(void)
302 {
303         int i;
304
305         if (!vdso_enabled)
306                 return 0;
307         vdso_init_data(vdso_data);
308 #ifdef CONFIG_COMPAT
309         /* Calculate the size of the 32 bit vDSO */
310         vdso32_pages = ((&vdso32_end - &vdso32_start
311                          + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
312
313         /* Make sure pages are in the correct state */
314         vdso32_pagelist = kzalloc(sizeof(struct page *) * (vdso32_pages + 1),
315                                   GFP_KERNEL);
316         BUG_ON(vdso32_pagelist == NULL);
317         for (i = 0; i < vdso32_pages - 1; i++) {
318                 struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
319                 ClearPageReserved(pg);
320                 get_page(pg);
321                 vdso32_pagelist[i] = pg;
322         }
323         vdso32_pagelist[vdso32_pages - 1] = virt_to_page(vdso_data);
324         vdso32_pagelist[vdso32_pages] = NULL;
325 #endif
326
327         /* Calculate the size of the 64 bit vDSO */
328         vdso64_pages = ((&vdso64_end - &vdso64_start
329                          + PAGE_SIZE - 1) >> PAGE_SHIFT) + 1;
330
331         /* Make sure pages are in the correct state */
332         vdso64_pagelist = kzalloc(sizeof(struct page *) * (vdso64_pages + 1),
333                                   GFP_KERNEL);
334         BUG_ON(vdso64_pagelist == NULL);
335         for (i = 0; i < vdso64_pages - 1; i++) {
336                 struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
337                 ClearPageReserved(pg);
338                 get_page(pg);
339                 vdso64_pagelist[i] = pg;
340         }
341         vdso64_pagelist[vdso64_pages - 1] = virt_to_page(vdso_data);
342         vdso64_pagelist[vdso64_pages] = NULL;
343         if (vdso_alloc_per_cpu(&S390_lowcore))
344                 BUG();
345         vdso_init_cr5();
346
347         get_page(virt_to_page(vdso_data));
348
349         return 0;
350 }
351 early_initcall(vdso_init);