GNU Linux-libre 5.10.217-gnu1
[releases.git] / arch / powerpc / kernel / vdso.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2
3 /*
4  *    Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
5  *                       <benh@kernel.crashing.org>
6  */
7
8 #include <linux/errno.h>
9 #include <linux/sched.h>
10 #include <linux/kernel.h>
11 #include <linux/mm.h>
12 #include <linux/smp.h>
13 #include <linux/stddef.h>
14 #include <linux/unistd.h>
15 #include <linux/slab.h>
16 #include <linux/user.h>
17 #include <linux/elf.h>
18 #include <linux/security.h>
19 #include <linux/memblock.h>
20
21 #include <asm/processor.h>
22 #include <asm/mmu.h>
23 #include <asm/mmu_context.h>
24 #include <asm/prom.h>
25 #include <asm/machdep.h>
26 #include <asm/cputable.h>
27 #include <asm/sections.h>
28 #include <asm/firmware.h>
29 #include <asm/vdso.h>
30 #include <asm/vdso_datapage.h>
31 #include <asm/setup.h>
32
33 #undef DEBUG
34
35 #ifdef DEBUG
36 #define DBG(fmt...) printk(fmt)
37 #else
38 #define DBG(fmt...)
39 #endif
40
41 /* Max supported size for symbol names */
42 #define MAX_SYMNAME     64
43
44 /* The alignment of the vDSO */
45 #define VDSO_ALIGNMENT  (1 << 16)
46
47 static unsigned int vdso32_pages;
48 static void *vdso32_kbase;
49 static struct page **vdso32_pagelist;
50 unsigned long vdso32_sigtramp;
51 unsigned long vdso32_rt_sigtramp;
52
53 #ifdef CONFIG_VDSO32
54 extern char vdso32_start, vdso32_end;
55 #endif
56
57 #ifdef CONFIG_PPC64
58 extern char vdso64_start, vdso64_end;
59 static void *vdso64_kbase = &vdso64_start;
60 static unsigned int vdso64_pages;
61 static struct page **vdso64_pagelist;
62 unsigned long vdso64_rt_sigtramp;
63 #endif /* CONFIG_PPC64 */
64
65 static int vdso_ready;
66
67 /*
68  * The vdso data page (aka. systemcfg for old ppc64 fans) is here.
69  * Once the early boot kernel code no longer needs to muck around
70  * with it, it will become dynamically allocated
71  */
72 static union {
73         struct vdso_data        data;
74         u8                      page[PAGE_SIZE];
75 } vdso_data_store __page_aligned_data;
76 struct vdso_data *vdso_data = &vdso_data_store.data;
77
78 /* Format of the patch table */
79 struct vdso_patch_def
80 {
81         unsigned long   ftr_mask, ftr_value;
82         const char      *gen_name;
83         const char      *fix_name;
84 };
85
86 /* Table of functions to patch based on the CPU type/revision
87  *
88  * Currently, we only change sync_dicache to do nothing on processors
89  * with a coherent icache
90  */
91 static struct vdso_patch_def vdso_patches[] = {
92         {
93                 CPU_FTR_COHERENT_ICACHE, CPU_FTR_COHERENT_ICACHE,
94                 "__kernel_sync_dicache", "__kernel_sync_dicache_p5"
95         },
96 };
97
98 /*
99  * Some infos carried around for each of them during parsing at
100  * boot time.
101  */
102 struct lib32_elfinfo
103 {
104         Elf32_Ehdr      *hdr;           /* ptr to ELF */
105         Elf32_Sym       *dynsym;        /* ptr to .dynsym section */
106         unsigned long   dynsymsize;     /* size of .dynsym section */
107         char            *dynstr;        /* ptr to .dynstr section */
108         unsigned long   text;           /* offset of .text section in .so */
109 };
110
111 struct lib64_elfinfo
112 {
113         Elf64_Ehdr      *hdr;
114         Elf64_Sym       *dynsym;
115         unsigned long   dynsymsize;
116         char            *dynstr;
117         unsigned long   text;
118 };
119
120
121 /*
122  * This is called from binfmt_elf, we create the special vma for the
123  * vDSO and insert it into the mm struct tree
124  */
125 int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
126 {
127         struct mm_struct *mm = current->mm;
128         struct page **vdso_pagelist;
129         unsigned long vdso_pages;
130         unsigned long vdso_base;
131         int rc;
132
133         if (!vdso_ready)
134                 return 0;
135
136 #ifdef CONFIG_PPC64
137         if (is_32bit_task()) {
138                 vdso_pagelist = vdso32_pagelist;
139                 vdso_pages = vdso32_pages;
140                 vdso_base = VDSO32_MBASE;
141         } else {
142                 vdso_pagelist = vdso64_pagelist;
143                 vdso_pages = vdso64_pages;
144                 /*
145                  * On 64bit we don't have a preferred map address. This
146                  * allows get_unmapped_area to find an area near other mmaps
147                  * and most likely share a SLB entry.
148                  */
149                 vdso_base = 0;
150         }
151 #else
152         vdso_pagelist = vdso32_pagelist;
153         vdso_pages = vdso32_pages;
154         vdso_base = VDSO32_MBASE;
155 #endif
156
157         current->mm->context.vdso_base = 0;
158
159         /* vDSO has a problem and was disabled, just don't "enable" it for the
160          * process
161          */
162         if (vdso_pages == 0)
163                 return 0;
164         /* Add a page to the vdso size for the data page */
165         vdso_pages ++;
166
167         /*
168          * pick a base address for the vDSO in process space. We try to put it
169          * at vdso_base which is the "natural" base for it, but we might fail
170          * and end up putting it elsewhere.
171          * Add enough to the size so that the result can be aligned.
172          */
173         if (mmap_write_lock_killable(mm))
174                 return -EINTR;
175         vdso_base = get_unmapped_area(NULL, vdso_base,
176                                       (vdso_pages << PAGE_SHIFT) +
177                                       ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
178                                       0, 0);
179         if (IS_ERR_VALUE(vdso_base)) {
180                 rc = vdso_base;
181                 goto fail_mmapsem;
182         }
183
184         /* Add required alignment. */
185         vdso_base = ALIGN(vdso_base, VDSO_ALIGNMENT);
186
187         /*
188          * Put vDSO base into mm struct. We need to do this before calling
189          * install_special_mapping or the perf counter mmap tracking code
190          * will fail to recognise it as a vDSO (since arch_vma_name fails).
191          */
192         current->mm->context.vdso_base = vdso_base;
193
194         /*
195          * our vma flags don't have VM_WRITE so by default, the process isn't
196          * allowed to write those pages.
197          * gdb can break that with ptrace interface, and thus trigger COW on
198          * those pages but it's then your responsibility to never do that on
199          * the "data" page of the vDSO or you'll stop getting kernel updates
200          * and your nice userland gettimeofday will be totally dead.
201          * It's fine to use that for setting breakpoints in the vDSO code
202          * pages though.
203          */
204         rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
205                                      VM_READ|VM_EXEC|
206                                      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
207                                      vdso_pagelist);
208         if (rc) {
209                 current->mm->context.vdso_base = 0;
210                 goto fail_mmapsem;
211         }
212
213         mmap_write_unlock(mm);
214         return 0;
215
216  fail_mmapsem:
217         mmap_write_unlock(mm);
218         return rc;
219 }
220
221 const char *arch_vma_name(struct vm_area_struct *vma)
222 {
223         if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso_base)
224                 return "[vdso]";
225         return NULL;
226 }
227
228
229
230 #ifdef CONFIG_VDSO32
231 static void * __init find_section32(Elf32_Ehdr *ehdr, const char *secname,
232                                   unsigned long *size)
233 {
234         Elf32_Shdr *sechdrs;
235         unsigned int i;
236         char *secnames;
237
238         /* Grab section headers and strings so we can tell who is who */
239         sechdrs = (void *)ehdr + ehdr->e_shoff;
240         secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset;
241
242         /* Find the section they want */
243         for (i = 1; i < ehdr->e_shnum; i++) {
244                 if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) {
245                         if (size)
246                                 *size = sechdrs[i].sh_size;
247                         return (void *)ehdr + sechdrs[i].sh_offset;
248                 }
249         }
250         *size = 0;
251         return NULL;
252 }
253
254 static Elf32_Sym * __init find_symbol32(struct lib32_elfinfo *lib,
255                                         const char *symname)
256 {
257         unsigned int i;
258         char name[MAX_SYMNAME], *c;
259
260         for (i = 0; i < (lib->dynsymsize / sizeof(Elf32_Sym)); i++) {
261                 if (lib->dynsym[i].st_name == 0)
262                         continue;
263                 strlcpy(name, lib->dynstr + lib->dynsym[i].st_name,
264                         MAX_SYMNAME);
265                 c = strchr(name, '@');
266                 if (c)
267                         *c = 0;
268                 if (strcmp(symname, name) == 0)
269                         return &lib->dynsym[i];
270         }
271         return NULL;
272 }
273
274 /* Note that we assume the section is .text and the symbol is relative to
275  * the library base
276  */
277 static unsigned long __init find_function32(struct lib32_elfinfo *lib,
278                                             const char *symname)
279 {
280         Elf32_Sym *sym = find_symbol32(lib, symname);
281
282         if (sym == NULL) {
283                 printk(KERN_WARNING "vDSO32: function %s not found !\n",
284                        symname);
285                 return 0;
286         }
287         return sym->st_value - VDSO32_LBASE;
288 }
289
290 static int __init vdso_do_func_patch32(struct lib32_elfinfo *v32,
291                                        struct lib64_elfinfo *v64,
292                                        const char *orig, const char *fix)
293 {
294         Elf32_Sym *sym32_gen, *sym32_fix;
295
296         sym32_gen = find_symbol32(v32, orig);
297         if (sym32_gen == NULL) {
298                 printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", orig);
299                 return -1;
300         }
301         if (fix == NULL) {
302                 sym32_gen->st_name = 0;
303                 return 0;
304         }
305         sym32_fix = find_symbol32(v32, fix);
306         if (sym32_fix == NULL) {
307                 printk(KERN_ERR "vDSO32: Can't find symbol %s !\n", fix);
308                 return -1;
309         }
310         sym32_gen->st_value = sym32_fix->st_value;
311         sym32_gen->st_size = sym32_fix->st_size;
312         sym32_gen->st_info = sym32_fix->st_info;
313         sym32_gen->st_other = sym32_fix->st_other;
314         sym32_gen->st_shndx = sym32_fix->st_shndx;
315
316         return 0;
317 }
318 #else /* !CONFIG_VDSO32 */
319 static unsigned long __init find_function32(struct lib32_elfinfo *lib,
320                                             const char *symname)
321 {
322         return 0;
323 }
324
325 static int __init vdso_do_func_patch32(struct lib32_elfinfo *v32,
326                                        struct lib64_elfinfo *v64,
327                                        const char *orig, const char *fix)
328 {
329         return 0;
330 }
331 #endif /* CONFIG_VDSO32 */
332
333
334 #ifdef CONFIG_PPC64
335
336 static void * __init find_section64(Elf64_Ehdr *ehdr, const char *secname,
337                                   unsigned long *size)
338 {
339         Elf64_Shdr *sechdrs;
340         unsigned int i;
341         char *secnames;
342
343         /* Grab section headers and strings so we can tell who is who */
344         sechdrs = (void *)ehdr + ehdr->e_shoff;
345         secnames = (void *)ehdr + sechdrs[ehdr->e_shstrndx].sh_offset;
346
347         /* Find the section they want */
348         for (i = 1; i < ehdr->e_shnum; i++) {
349                 if (strcmp(secnames+sechdrs[i].sh_name, secname) == 0) {
350                         if (size)
351                                 *size = sechdrs[i].sh_size;
352                         return (void *)ehdr + sechdrs[i].sh_offset;
353                 }
354         }
355         if (size)
356                 *size = 0;
357         return NULL;
358 }
359
360 static Elf64_Sym * __init find_symbol64(struct lib64_elfinfo *lib,
361                                         const char *symname)
362 {
363         unsigned int i;
364         char name[MAX_SYMNAME], *c;
365
366         for (i = 0; i < (lib->dynsymsize / sizeof(Elf64_Sym)); i++) {
367                 if (lib->dynsym[i].st_name == 0)
368                         continue;
369                 strlcpy(name, lib->dynstr + lib->dynsym[i].st_name,
370                         MAX_SYMNAME);
371                 c = strchr(name, '@');
372                 if (c)
373                         *c = 0;
374                 if (strcmp(symname, name) == 0)
375                         return &lib->dynsym[i];
376         }
377         return NULL;
378 }
379
380 /* Note that we assume the section is .text and the symbol is relative to
381  * the library base
382  */
383 static unsigned long __init find_function64(struct lib64_elfinfo *lib,
384                                             const char *symname)
385 {
386         Elf64_Sym *sym = find_symbol64(lib, symname);
387
388         if (sym == NULL) {
389                 printk(KERN_WARNING "vDSO64: function %s not found !\n",
390                        symname);
391                 return 0;
392         }
393         return sym->st_value - VDSO64_LBASE;
394 }
395
396 static int __init vdso_do_func_patch64(struct lib32_elfinfo *v32,
397                                        struct lib64_elfinfo *v64,
398                                        const char *orig, const char *fix)
399 {
400         Elf64_Sym *sym64_gen, *sym64_fix;
401
402         sym64_gen = find_symbol64(v64, orig);
403         if (sym64_gen == NULL) {
404                 printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", orig);
405                 return -1;
406         }
407         if (fix == NULL) {
408                 sym64_gen->st_name = 0;
409                 return 0;
410         }
411         sym64_fix = find_symbol64(v64, fix);
412         if (sym64_fix == NULL) {
413                 printk(KERN_ERR "vDSO64: Can't find symbol %s !\n", fix);
414                 return -1;
415         }
416         sym64_gen->st_value = sym64_fix->st_value;
417         sym64_gen->st_size = sym64_fix->st_size;
418         sym64_gen->st_info = sym64_fix->st_info;
419         sym64_gen->st_other = sym64_fix->st_other;
420         sym64_gen->st_shndx = sym64_fix->st_shndx;
421
422         return 0;
423 }
424
425 #endif /* CONFIG_PPC64 */
426
427
428 static __init int vdso_do_find_sections(struct lib32_elfinfo *v32,
429                                         struct lib64_elfinfo *v64)
430 {
431         void *sect;
432
433         /*
434          * Locate symbol tables & text section
435          */
436
437 #ifdef CONFIG_VDSO32
438         v32->dynsym = find_section32(v32->hdr, ".dynsym", &v32->dynsymsize);
439         v32->dynstr = find_section32(v32->hdr, ".dynstr", NULL);
440         if (v32->dynsym == NULL || v32->dynstr == NULL) {
441                 printk(KERN_ERR "vDSO32: required symbol section not found\n");
442                 return -1;
443         }
444         sect = find_section32(v32->hdr, ".text", NULL);
445         if (sect == NULL) {
446                 printk(KERN_ERR "vDSO32: the .text section was not found\n");
447                 return -1;
448         }
449         v32->text = sect - vdso32_kbase;
450 #endif
451
452 #ifdef CONFIG_PPC64
453         v64->dynsym = find_section64(v64->hdr, ".dynsym", &v64->dynsymsize);
454         v64->dynstr = find_section64(v64->hdr, ".dynstr", NULL);
455         if (v64->dynsym == NULL || v64->dynstr == NULL) {
456                 printk(KERN_ERR "vDSO64: required symbol section not found\n");
457                 return -1;
458         }
459         sect = find_section64(v64->hdr, ".text", NULL);
460         if (sect == NULL) {
461                 printk(KERN_ERR "vDSO64: the .text section was not found\n");
462                 return -1;
463         }
464         v64->text = sect - vdso64_kbase;
465 #endif /* CONFIG_PPC64 */
466
467         return 0;
468 }
469
470 static __init void vdso_setup_trampolines(struct lib32_elfinfo *v32,
471                                           struct lib64_elfinfo *v64)
472 {
473         /*
474          * Find signal trampolines
475          */
476
477 #ifdef CONFIG_PPC64
478         vdso64_rt_sigtramp = find_function64(v64, "__kernel_start_sigtramp_rt64");
479 #endif
480         vdso32_sigtramp    = find_function32(v32, "__kernel_sigtramp32");
481         vdso32_rt_sigtramp = find_function32(v32, "__kernel_sigtramp_rt32");
482 }
483
484 static __init int vdso_fixup_datapage(struct lib32_elfinfo *v32,
485                                        struct lib64_elfinfo *v64)
486 {
487 #ifdef CONFIG_VDSO32
488         Elf32_Sym *sym32;
489 #endif
490 #ifdef CONFIG_PPC64
491         Elf64_Sym *sym64;
492
493         sym64 = find_symbol64(v64, "__kernel_datapage_offset");
494         if (sym64 == NULL) {
495                 printk(KERN_ERR "vDSO64: Can't find symbol "
496                        "__kernel_datapage_offset !\n");
497                 return -1;
498         }
499         *((int *)(vdso64_kbase + sym64->st_value - VDSO64_LBASE)) =
500                 (vdso64_pages << PAGE_SHIFT) -
501                 (sym64->st_value - VDSO64_LBASE);
502 #endif /* CONFIG_PPC64 */
503
504 #ifdef CONFIG_VDSO32
505         sym32 = find_symbol32(v32, "__kernel_datapage_offset");
506         if (sym32 == NULL) {
507                 printk(KERN_ERR "vDSO32: Can't find symbol "
508                        "__kernel_datapage_offset !\n");
509                 return -1;
510         }
511         *((int *)(vdso32_kbase + (sym32->st_value - VDSO32_LBASE))) =
512                 (vdso32_pages << PAGE_SHIFT) -
513                 (sym32->st_value - VDSO32_LBASE);
514 #endif
515
516         return 0;
517 }
518
519
520 static __init int vdso_fixup_features(struct lib32_elfinfo *v32,
521                                       struct lib64_elfinfo *v64)
522 {
523         unsigned long size;
524         void *start;
525
526 #ifdef CONFIG_PPC64
527         start = find_section64(v64->hdr, "__ftr_fixup", &size);
528         if (start)
529                 do_feature_fixups(cur_cpu_spec->cpu_features,
530                                   start, start + size);
531
532         start = find_section64(v64->hdr, "__mmu_ftr_fixup", &size);
533         if (start)
534                 do_feature_fixups(cur_cpu_spec->mmu_features,
535                                   start, start + size);
536
537         start = find_section64(v64->hdr, "__fw_ftr_fixup", &size);
538         if (start)
539                 do_feature_fixups(powerpc_firmware_features,
540                                   start, start + size);
541
542         start = find_section64(v64->hdr, "__lwsync_fixup", &size);
543         if (start)
544                 do_lwsync_fixups(cur_cpu_spec->cpu_features,
545                                  start, start + size);
546 #endif /* CONFIG_PPC64 */
547
548 #ifdef CONFIG_VDSO32
549         start = find_section32(v32->hdr, "__ftr_fixup", &size);
550         if (start)
551                 do_feature_fixups(cur_cpu_spec->cpu_features,
552                                   start, start + size);
553
554         start = find_section32(v32->hdr, "__mmu_ftr_fixup", &size);
555         if (start)
556                 do_feature_fixups(cur_cpu_spec->mmu_features,
557                                   start, start + size);
558
559 #ifdef CONFIG_PPC64
560         start = find_section32(v32->hdr, "__fw_ftr_fixup", &size);
561         if (start)
562                 do_feature_fixups(powerpc_firmware_features,
563                                   start, start + size);
564 #endif /* CONFIG_PPC64 */
565
566         start = find_section32(v32->hdr, "__lwsync_fixup", &size);
567         if (start)
568                 do_lwsync_fixups(cur_cpu_spec->cpu_features,
569                                  start, start + size);
570 #endif
571
572         return 0;
573 }
574
575 static __init int vdso_fixup_alt_funcs(struct lib32_elfinfo *v32,
576                                        struct lib64_elfinfo *v64)
577 {
578         int i;
579
580         for (i = 0; i < ARRAY_SIZE(vdso_patches); i++) {
581                 struct vdso_patch_def *patch = &vdso_patches[i];
582                 int match = (cur_cpu_spec->cpu_features & patch->ftr_mask)
583                         == patch->ftr_value;
584                 if (!match)
585                         continue;
586
587                 DBG("replacing %s with %s...\n", patch->gen_name,
588                     patch->fix_name ? "NONE" : patch->fix_name);
589
590                 /*
591                  * Patch the 32 bits and 64 bits symbols. Note that we do not
592                  * patch the "." symbol on 64 bits.
593                  * It would be easy to do, but doesn't seem to be necessary,
594                  * patching the OPD symbol is enough.
595                  */
596                 vdso_do_func_patch32(v32, v64, patch->gen_name,
597                                      patch->fix_name);
598 #ifdef CONFIG_PPC64
599                 vdso_do_func_patch64(v32, v64, patch->gen_name,
600                                      patch->fix_name);
601 #endif /* CONFIG_PPC64 */
602         }
603
604         return 0;
605 }
606
607
608 static __init int vdso_setup(void)
609 {
610         struct lib32_elfinfo    v32;
611         struct lib64_elfinfo    v64;
612
613         v32.hdr = vdso32_kbase;
614 #ifdef CONFIG_PPC64
615         v64.hdr = vdso64_kbase;
616 #endif
617         if (vdso_do_find_sections(&v32, &v64))
618                 return -1;
619
620         if (vdso_fixup_datapage(&v32, &v64))
621                 return -1;
622
623         if (vdso_fixup_features(&v32, &v64))
624                 return -1;
625
626         if (vdso_fixup_alt_funcs(&v32, &v64))
627                 return -1;
628
629         vdso_setup_trampolines(&v32, &v64);
630
631         return 0;
632 }
633
634 /*
635  * Called from setup_arch to initialize the bitmap of available
636  * syscalls in the systemcfg page
637  */
638 static void __init vdso_setup_syscall_map(void)
639 {
640         unsigned int i;
641         extern unsigned long *sys_call_table;
642 #ifdef CONFIG_PPC64
643         extern unsigned long *compat_sys_call_table;
644 #endif
645         extern unsigned long sys_ni_syscall;
646
647
648         for (i = 0; i < NR_syscalls; i++) {
649 #ifdef CONFIG_PPC64
650                 if (sys_call_table[i] != sys_ni_syscall)
651                         vdso_data->syscall_map_64[i >> 5] |=
652                                 0x80000000UL >> (i & 0x1f);
653                 if (IS_ENABLED(CONFIG_COMPAT) &&
654                     compat_sys_call_table[i] != sys_ni_syscall)
655                         vdso_data->syscall_map_32[i >> 5] |=
656                                 0x80000000UL >> (i & 0x1f);
657 #else /* CONFIG_PPC64 */
658                 if (sys_call_table[i] != sys_ni_syscall)
659                         vdso_data->syscall_map_32[i >> 5] |=
660                                 0x80000000UL >> (i & 0x1f);
661 #endif /* CONFIG_PPC64 */
662         }
663 }
664
665 #ifdef CONFIG_PPC64
666 int vdso_getcpu_init(void)
667 {
668         unsigned long cpu, node, val;
669
670         /*
671          * SPRG_VDSO contains the CPU in the bottom 16 bits and the NUMA node
672          * in the next 16 bits.  The VDSO uses this to implement getcpu().
673          */
674         cpu = get_cpu();
675         WARN_ON_ONCE(cpu > 0xffff);
676
677         node = cpu_to_node(cpu);
678         WARN_ON_ONCE(node > 0xffff);
679
680         val = (cpu & 0xffff) | ((node & 0xffff) << 16);
681         mtspr(SPRN_SPRG_VDSO_WRITE, val);
682         get_paca()->sprg_vdso = val;
683
684         put_cpu();
685
686         return 0;
687 }
688 /* We need to call this before SMP init */
689 early_initcall(vdso_getcpu_init);
690 #endif
691
692 static int __init vdso_init(void)
693 {
694         int i;
695
696 #ifdef CONFIG_PPC64
697         /*
698          * Fill up the "systemcfg" stuff for backward compatibility
699          */
700         strcpy((char *)vdso_data->eye_catcher, "SYSTEMCFG:PPC64");
701         vdso_data->version.major = SYSTEMCFG_MAJOR;
702         vdso_data->version.minor = SYSTEMCFG_MINOR;
703         vdso_data->processor = mfspr(SPRN_PVR);
704         /*
705          * Fake the old platform number for pSeries and add
706          * in LPAR bit if necessary
707          */
708         vdso_data->platform = 0x100;
709         if (firmware_has_feature(FW_FEATURE_LPAR))
710                 vdso_data->platform |= 1;
711         vdso_data->physicalMemorySize = memblock_phys_mem_size();
712         vdso_data->dcache_size = ppc64_caches.l1d.size;
713         vdso_data->dcache_line_size = ppc64_caches.l1d.line_size;
714         vdso_data->icache_size = ppc64_caches.l1i.size;
715         vdso_data->icache_line_size = ppc64_caches.l1i.line_size;
716         vdso_data->dcache_block_size = ppc64_caches.l1d.block_size;
717         vdso_data->icache_block_size = ppc64_caches.l1i.block_size;
718         vdso_data->dcache_log_block_size = ppc64_caches.l1d.log_block_size;
719         vdso_data->icache_log_block_size = ppc64_caches.l1i.log_block_size;
720
721         /*
722          * Calculate the size of the 64 bits vDSO
723          */
724         vdso64_pages = (&vdso64_end - &vdso64_start) >> PAGE_SHIFT;
725         DBG("vdso64_kbase: %p, 0x%x pages\n", vdso64_kbase, vdso64_pages);
726 #endif /* CONFIG_PPC64 */
727
728
729 #ifdef CONFIG_VDSO32
730         vdso32_kbase = &vdso32_start;
731
732         /*
733          * Calculate the size of the 32 bits vDSO
734          */
735         vdso32_pages = (&vdso32_end - &vdso32_start) >> PAGE_SHIFT;
736         DBG("vdso32_kbase: %p, 0x%x pages\n", vdso32_kbase, vdso32_pages);
737 #endif
738
739
740         /*
741          * Setup the syscall map in the vDOS
742          */
743         vdso_setup_syscall_map();
744
745         /*
746          * Initialize the vDSO images in memory, that is do necessary
747          * fixups of vDSO symbols, locate trampolines, etc...
748          */
749         if (vdso_setup()) {
750                 printk(KERN_ERR "vDSO setup failure, not enabled !\n");
751                 vdso32_pages = 0;
752 #ifdef CONFIG_PPC64
753                 vdso64_pages = 0;
754 #endif
755                 return 0;
756         }
757
758 #ifdef CONFIG_VDSO32
759         /* Make sure pages are in the correct state */
760         vdso32_pagelist = kcalloc(vdso32_pages + 2, sizeof(struct page *),
761                                   GFP_KERNEL);
762         BUG_ON(vdso32_pagelist == NULL);
763         for (i = 0; i < vdso32_pages; i++) {
764                 struct page *pg = virt_to_page(vdso32_kbase + i*PAGE_SIZE);
765                 get_page(pg);
766                 vdso32_pagelist[i] = pg;
767         }
768         vdso32_pagelist[i++] = virt_to_page(vdso_data);
769         vdso32_pagelist[i] = NULL;
770 #endif
771
772 #ifdef CONFIG_PPC64
773         vdso64_pagelist = kcalloc(vdso64_pages + 2, sizeof(struct page *),
774                                   GFP_KERNEL);
775         BUG_ON(vdso64_pagelist == NULL);
776         for (i = 0; i < vdso64_pages; i++) {
777                 struct page *pg = virt_to_page(vdso64_kbase + i*PAGE_SIZE);
778                 get_page(pg);
779                 vdso64_pagelist[i] = pg;
780         }
781         vdso64_pagelist[i++] = virt_to_page(vdso_data);
782         vdso64_pagelist[i] = NULL;
783 #endif /* CONFIG_PPC64 */
784
785         get_page(virt_to_page(vdso_data));
786
787         smp_wmb();
788         vdso_ready = 1;
789
790         return 0;
791 }
792 arch_initcall(vdso_init);