GNU Linux-libre 5.4.274-gnu1
[releases.git] / arch / x86 / mm / dump_pagetables.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Debug helper to dump the current kernel pagetables of the system
4  * so that we can see what the various memory ranges are set to.
5  *
6  * (C) Copyright 2008 Intel Corporation
7  *
8  * Author: Arjan van de Ven <arjan@linux.intel.com>
9  */
10
11 #include <linux/debugfs.h>
12 #include <linux/kasan.h>
13 #include <linux/mm.h>
14 #include <linux/init.h>
15 #include <linux/sched.h>
16 #include <linux/seq_file.h>
17 #include <linux/highmem.h>
18 #include <linux/pci.h>
19
20 #include <asm/e820/types.h>
21 #include <asm/pgtable.h>
22
23 /*
24  * The dumper groups pagetable entries of the same type into one, and for
25  * that it needs to keep some state when walking, and flush this state
26  * when a "break" in the continuity is found.
27  */
28 struct pg_state {
29         int level;
30         pgprot_t current_prot;
31         pgprotval_t effective_prot;
32         unsigned long start_address;
33         unsigned long current_address;
34         const struct addr_marker *marker;
35         unsigned long lines;
36         bool to_dmesg;
37         bool check_wx;
38         unsigned long wx_pages;
39 };
40
41 struct addr_marker {
42         unsigned long start_address;
43         const char *name;
44         unsigned long max_lines;
45 };
46
47 /* Address space markers hints */
48
49 #ifdef CONFIG_X86_64
50
51 enum address_markers_idx {
52         USER_SPACE_NR = 0,
53         KERNEL_SPACE_NR,
54 #ifdef CONFIG_MODIFY_LDT_SYSCALL
55         LDT_NR,
56 #endif
57         LOW_KERNEL_NR,
58         VMALLOC_START_NR,
59         VMEMMAP_START_NR,
60 #ifdef CONFIG_KASAN
61         KASAN_SHADOW_START_NR,
62         KASAN_SHADOW_END_NR,
63 #endif
64         CPU_ENTRY_AREA_NR,
65 #ifdef CONFIG_X86_ESPFIX64
66         ESPFIX_START_NR,
67 #endif
68 #ifdef CONFIG_EFI
69         EFI_END_NR,
70 #endif
71         HIGH_KERNEL_NR,
72         MODULES_VADDR_NR,
73         MODULES_END_NR,
74         FIXADDR_START_NR,
75         END_OF_SPACE_NR,
76 };
77
78 static struct addr_marker address_markers[] = {
79         [USER_SPACE_NR]         = { 0,                  "User Space" },
80         [KERNEL_SPACE_NR]       = { (1UL << 63),        "Kernel Space" },
81         [LOW_KERNEL_NR]         = { 0UL,                "Low Kernel Mapping" },
82         [VMALLOC_START_NR]      = { 0UL,                "vmalloc() Area" },
83         [VMEMMAP_START_NR]      = { 0UL,                "Vmemmap" },
84 #ifdef CONFIG_KASAN
85         /*
86          * These fields get initialized with the (dynamic)
87          * KASAN_SHADOW_{START,END} values in pt_dump_init().
88          */
89         [KASAN_SHADOW_START_NR] = { 0UL,                "KASAN shadow" },
90         [KASAN_SHADOW_END_NR]   = { 0UL,                "KASAN shadow end" },
91 #endif
92 #ifdef CONFIG_MODIFY_LDT_SYSCALL
93         [LDT_NR]                = { 0UL,                "LDT remap" },
94 #endif
95         [CPU_ENTRY_AREA_NR]     = { CPU_ENTRY_AREA_BASE,"CPU entry Area" },
96 #ifdef CONFIG_X86_ESPFIX64
97         [ESPFIX_START_NR]       = { ESPFIX_BASE_ADDR,   "ESPfix Area", 16 },
98 #endif
99 #ifdef CONFIG_EFI
100         [EFI_END_NR]            = { EFI_VA_END,         "EFI Runtime Services" },
101 #endif
102         [HIGH_KERNEL_NR]        = { __START_KERNEL_map, "High Kernel Mapping" },
103         [MODULES_VADDR_NR]      = { MODULES_VADDR,      "Modules" },
104         [MODULES_END_NR]        = { MODULES_END,        "End Modules" },
105         [FIXADDR_START_NR]      = { FIXADDR_START,      "Fixmap Area" },
106         [END_OF_SPACE_NR]       = { -1,                 NULL }
107 };
108
109 #define INIT_PGD        ((pgd_t *) &init_top_pgt)
110
111 #else /* CONFIG_X86_64 */
112
113 enum address_markers_idx {
114         USER_SPACE_NR = 0,
115         KERNEL_SPACE_NR,
116         VMALLOC_START_NR,
117         VMALLOC_END_NR,
118 #ifdef CONFIG_HIGHMEM
119         PKMAP_BASE_NR,
120 #endif
121 #ifdef CONFIG_MODIFY_LDT_SYSCALL
122         LDT_NR,
123 #endif
124         CPU_ENTRY_AREA_NR,
125         FIXADDR_START_NR,
126         END_OF_SPACE_NR,
127 };
128
129 static struct addr_marker address_markers[] = {
130         [USER_SPACE_NR]         = { 0,                  "User Space" },
131         [KERNEL_SPACE_NR]       = { PAGE_OFFSET,        "Kernel Mapping" },
132         [VMALLOC_START_NR]      = { 0UL,                "vmalloc() Area" },
133         [VMALLOC_END_NR]        = { 0UL,                "vmalloc() End" },
134 #ifdef CONFIG_HIGHMEM
135         [PKMAP_BASE_NR]         = { 0UL,                "Persistent kmap() Area" },
136 #endif
137 #ifdef CONFIG_MODIFY_LDT_SYSCALL
138         [LDT_NR]                = { 0UL,                "LDT remap" },
139 #endif
140         [CPU_ENTRY_AREA_NR]     = { 0UL,                "CPU entry area" },
141         [FIXADDR_START_NR]      = { 0UL,                "Fixmap area" },
142         [END_OF_SPACE_NR]       = { -1,                 NULL }
143 };
144
145 #define INIT_PGD        (swapper_pg_dir)
146
147 #endif /* !CONFIG_X86_64 */
148
149 /* Multipliers for offsets within the PTEs */
150 #define PTE_LEVEL_MULT (PAGE_SIZE)
151 #define PMD_LEVEL_MULT (PTRS_PER_PTE * PTE_LEVEL_MULT)
152 #define PUD_LEVEL_MULT (PTRS_PER_PMD * PMD_LEVEL_MULT)
153 #define P4D_LEVEL_MULT (PTRS_PER_PUD * PUD_LEVEL_MULT)
154 #define PGD_LEVEL_MULT (PTRS_PER_P4D * P4D_LEVEL_MULT)
155
156 #define pt_dump_seq_printf(m, to_dmesg, fmt, args...)           \
157 ({                                                              \
158         if (to_dmesg)                                   \
159                 printk(KERN_INFO fmt, ##args);                  \
160         else                                                    \
161                 if (m)                                          \
162                         seq_printf(m, fmt, ##args);             \
163 })
164
165 #define pt_dump_cont_printf(m, to_dmesg, fmt, args...)          \
166 ({                                                              \
167         if (to_dmesg)                                   \
168                 printk(KERN_CONT fmt, ##args);                  \
169         else                                                    \
170                 if (m)                                          \
171                         seq_printf(m, fmt, ##args);             \
172 })
173
174 /*
175  * Print a readable form of a pgprot_t to the seq_file
176  */
177 static void printk_prot(struct seq_file *m, pgprot_t prot, int level, bool dmsg)
178 {
179         pgprotval_t pr = pgprot_val(prot);
180         static const char * const level_name[] =
181                 { "cr3", "pgd", "p4d", "pud", "pmd", "pte" };
182
183         if (!(pr & _PAGE_PRESENT)) {
184                 /* Not present */
185                 pt_dump_cont_printf(m, dmsg, "                              ");
186         } else {
187                 if (pr & _PAGE_USER)
188                         pt_dump_cont_printf(m, dmsg, "USR ");
189                 else
190                         pt_dump_cont_printf(m, dmsg, "    ");
191                 if (pr & _PAGE_RW)
192                         pt_dump_cont_printf(m, dmsg, "RW ");
193                 else
194                         pt_dump_cont_printf(m, dmsg, "ro ");
195                 if (pr & _PAGE_PWT)
196                         pt_dump_cont_printf(m, dmsg, "PWT ");
197                 else
198                         pt_dump_cont_printf(m, dmsg, "    ");
199                 if (pr & _PAGE_PCD)
200                         pt_dump_cont_printf(m, dmsg, "PCD ");
201                 else
202                         pt_dump_cont_printf(m, dmsg, "    ");
203
204                 /* Bit 7 has a different meaning on level 3 vs 4 */
205                 if (level <= 4 && pr & _PAGE_PSE)
206                         pt_dump_cont_printf(m, dmsg, "PSE ");
207                 else
208                         pt_dump_cont_printf(m, dmsg, "    ");
209                 if ((level == 5 && pr & _PAGE_PAT) ||
210                     ((level == 4 || level == 3) && pr & _PAGE_PAT_LARGE))
211                         pt_dump_cont_printf(m, dmsg, "PAT ");
212                 else
213                         pt_dump_cont_printf(m, dmsg, "    ");
214                 if (pr & _PAGE_GLOBAL)
215                         pt_dump_cont_printf(m, dmsg, "GLB ");
216                 else
217                         pt_dump_cont_printf(m, dmsg, "    ");
218                 if (pr & _PAGE_NX)
219                         pt_dump_cont_printf(m, dmsg, "NX ");
220                 else
221                         pt_dump_cont_printf(m, dmsg, "x  ");
222         }
223         pt_dump_cont_printf(m, dmsg, "%s\n", level_name[level]);
224 }
225
226 /*
227  * On 64 bits, sign-extend the 48 bit address to 64 bit
228  */
229 static unsigned long normalize_addr(unsigned long u)
230 {
231         int shift;
232         if (!IS_ENABLED(CONFIG_X86_64))
233                 return u;
234
235         shift = 64 - (__VIRTUAL_MASK_SHIFT + 1);
236         return (signed long)(u << shift) >> shift;
237 }
238
239 static void note_wx(struct pg_state *st)
240 {
241         unsigned long npages;
242
243         npages = (st->current_address - st->start_address) / PAGE_SIZE;
244
245 #ifdef CONFIG_PCI_BIOS
246         /*
247          * If PCI BIOS is enabled, the PCI BIOS area is forced to WX.
248          * Inform about it, but avoid the warning.
249          */
250         if (pcibios_enabled && st->start_address >= PAGE_OFFSET + BIOS_BEGIN &&
251             st->current_address <= PAGE_OFFSET + BIOS_END) {
252                 pr_warn_once("x86/mm: PCI BIOS W+X mapping %lu pages\n", npages);
253                 return;
254         }
255 #endif
256         /* Account the WX pages */
257         st->wx_pages += npages;
258         WARN_ONCE(__supported_pte_mask & _PAGE_NX,
259                   "x86/mm: Found insecure W+X mapping at address %pS\n",
260                   (void *)st->start_address);
261 }
262
263 /*
264  * This function gets called on a break in a continuous series
265  * of PTE entries; the next one is different so we need to
266  * print what we collected so far.
267  */
268 static void note_page(struct seq_file *m, struct pg_state *st,
269                       pgprot_t new_prot, pgprotval_t new_eff, int level)
270 {
271         pgprotval_t prot, cur, eff;
272         static const char units[] = "BKMGTPE";
273
274         /*
275          * If we have a "break" in the series, we need to flush the state that
276          * we have now. "break" is either changing perms, levels or
277          * address space marker.
278          */
279         prot = pgprot_val(new_prot);
280         cur = pgprot_val(st->current_prot);
281         eff = st->effective_prot;
282
283         if (!st->level) {
284                 /* First entry */
285                 st->current_prot = new_prot;
286                 st->effective_prot = new_eff;
287                 st->level = level;
288                 st->marker = address_markers;
289                 st->lines = 0;
290                 pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
291                                    st->marker->name);
292         } else if (prot != cur || new_eff != eff || level != st->level ||
293                    st->current_address >= st->marker[1].start_address) {
294                 const char *unit = units;
295                 unsigned long delta;
296                 int width = sizeof(unsigned long) * 2;
297
298                 if (st->check_wx && (eff & _PAGE_RW) && !(eff & _PAGE_NX))
299                         note_wx(st);
300
301                 /*
302                  * Now print the actual finished series
303                  */
304                 if (!st->marker->max_lines ||
305                     st->lines < st->marker->max_lines) {
306                         pt_dump_seq_printf(m, st->to_dmesg,
307                                            "0x%0*lx-0x%0*lx   ",
308                                            width, st->start_address,
309                                            width, st->current_address);
310
311                         delta = st->current_address - st->start_address;
312                         while (!(delta & 1023) && unit[1]) {
313                                 delta >>= 10;
314                                 unit++;
315                         }
316                         pt_dump_cont_printf(m, st->to_dmesg, "%9lu%c ",
317                                             delta, *unit);
318                         printk_prot(m, st->current_prot, st->level,
319                                     st->to_dmesg);
320                 }
321                 st->lines++;
322
323                 /*
324                  * We print markers for special areas of address space,
325                  * such as the start of vmalloc space etc.
326                  * This helps in the interpretation.
327                  */
328                 if (st->current_address >= st->marker[1].start_address) {
329                         if (st->marker->max_lines &&
330                             st->lines > st->marker->max_lines) {
331                                 unsigned long nskip =
332                                         st->lines - st->marker->max_lines;
333                                 pt_dump_seq_printf(m, st->to_dmesg,
334                                                    "... %lu entr%s skipped ... \n",
335                                                    nskip,
336                                                    nskip == 1 ? "y" : "ies");
337                         }
338                         st->marker++;
339                         st->lines = 0;
340                         pt_dump_seq_printf(m, st->to_dmesg, "---[ %s ]---\n",
341                                            st->marker->name);
342                 }
343
344                 st->start_address = st->current_address;
345                 st->current_prot = new_prot;
346                 st->effective_prot = new_eff;
347                 st->level = level;
348         }
349 }
350
351 static inline pgprotval_t effective_prot(pgprotval_t prot1, pgprotval_t prot2)
352 {
353         return (prot1 & prot2 & (_PAGE_USER | _PAGE_RW)) |
354                ((prot1 | prot2) & _PAGE_NX);
355 }
356
357 static void walk_pte_level(struct seq_file *m, struct pg_state *st, pmd_t addr,
358                            pgprotval_t eff_in, unsigned long P)
359 {
360         int i;
361         pte_t *pte;
362         pgprotval_t prot, eff;
363
364         for (i = 0; i < PTRS_PER_PTE; i++) {
365                 st->current_address = normalize_addr(P + i * PTE_LEVEL_MULT);
366                 pte = pte_offset_map(&addr, st->current_address);
367                 prot = pte_flags(*pte);
368                 eff = effective_prot(eff_in, prot);
369                 note_page(m, st, __pgprot(prot), eff, 5);
370                 pte_unmap(pte);
371         }
372 }
373 #ifdef CONFIG_KASAN
374
375 /*
376  * This is an optimization for KASAN=y case. Since all kasan page tables
377  * eventually point to the kasan_early_shadow_page we could call note_page()
378  * right away without walking through lower level page tables. This saves
379  * us dozens of seconds (minutes for 5-level config) while checking for
380  * W+X mapping or reading kernel_page_tables debugfs file.
381  */
382 static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
383                                 void *pt)
384 {
385         if (__pa(pt) == __pa(kasan_early_shadow_pmd) ||
386             (pgtable_l5_enabled() &&
387                         __pa(pt) == __pa(kasan_early_shadow_p4d)) ||
388             __pa(pt) == __pa(kasan_early_shadow_pud)) {
389                 pgprotval_t prot = pte_flags(kasan_early_shadow_pte[0]);
390                 note_page(m, st, __pgprot(prot), 0, 5);
391                 return true;
392         }
393         return false;
394 }
395 #else
396 static inline bool kasan_page_table(struct seq_file *m, struct pg_state *st,
397                                 void *pt)
398 {
399         return false;
400 }
401 #endif
402
403 #if PTRS_PER_PMD > 1
404
405 static void walk_pmd_level(struct seq_file *m, struct pg_state *st, pud_t addr,
406                            pgprotval_t eff_in, unsigned long P)
407 {
408         int i;
409         pmd_t *start, *pmd_start;
410         pgprotval_t prot, eff;
411
412         pmd_start = start = (pmd_t *)pud_page_vaddr(addr);
413         for (i = 0; i < PTRS_PER_PMD; i++) {
414                 st->current_address = normalize_addr(P + i * PMD_LEVEL_MULT);
415                 if (!pmd_none(*start)) {
416                         prot = pmd_flags(*start);
417                         eff = effective_prot(eff_in, prot);
418                         if (pmd_large(*start) || !pmd_present(*start)) {
419                                 note_page(m, st, __pgprot(prot), eff, 4);
420                         } else if (!kasan_page_table(m, st, pmd_start)) {
421                                 walk_pte_level(m, st, *start, eff,
422                                                P + i * PMD_LEVEL_MULT);
423                         }
424                 } else
425                         note_page(m, st, __pgprot(0), 0, 4);
426                 start++;
427         }
428 }
429
430 #else
431 #define walk_pmd_level(m,s,a,e,p) walk_pte_level(m,s,__pmd(pud_val(a)),e,p)
432 #define pud_large(a) pmd_large(__pmd(pud_val(a)))
433 #define pud_none(a)  pmd_none(__pmd(pud_val(a)))
434 #endif
435
436 #if PTRS_PER_PUD > 1
437
438 static void walk_pud_level(struct seq_file *m, struct pg_state *st, p4d_t addr,
439                            pgprotval_t eff_in, unsigned long P)
440 {
441         int i;
442         pud_t *start, *pud_start;
443         pgprotval_t prot, eff;
444
445         pud_start = start = (pud_t *)p4d_page_vaddr(addr);
446
447         for (i = 0; i < PTRS_PER_PUD; i++) {
448                 st->current_address = normalize_addr(P + i * PUD_LEVEL_MULT);
449                 if (!pud_none(*start)) {
450                         prot = pud_flags(*start);
451                         eff = effective_prot(eff_in, prot);
452                         if (pud_large(*start) || !pud_present(*start)) {
453                                 note_page(m, st, __pgprot(prot), eff, 3);
454                         } else if (!kasan_page_table(m, st, pud_start)) {
455                                 walk_pmd_level(m, st, *start, eff,
456                                                P + i * PUD_LEVEL_MULT);
457                         }
458                 } else
459                         note_page(m, st, __pgprot(0), 0, 3);
460
461                 start++;
462         }
463 }
464
465 #else
466 #define walk_pud_level(m,s,a,e,p) walk_pmd_level(m,s,__pud(p4d_val(a)),e,p)
467 #define p4d_large(a) pud_large(__pud(p4d_val(a)))
468 #define p4d_none(a)  pud_none(__pud(p4d_val(a)))
469 #endif
470
471 static void walk_p4d_level(struct seq_file *m, struct pg_state *st, pgd_t addr,
472                            pgprotval_t eff_in, unsigned long P)
473 {
474         int i;
475         p4d_t *start, *p4d_start;
476         pgprotval_t prot, eff;
477
478         if (PTRS_PER_P4D == 1)
479                 return walk_pud_level(m, st, __p4d(pgd_val(addr)), eff_in, P);
480
481         p4d_start = start = (p4d_t *)pgd_page_vaddr(addr);
482
483         for (i = 0; i < PTRS_PER_P4D; i++) {
484                 st->current_address = normalize_addr(P + i * P4D_LEVEL_MULT);
485                 if (!p4d_none(*start)) {
486                         prot = p4d_flags(*start);
487                         eff = effective_prot(eff_in, prot);
488                         if (p4d_large(*start) || !p4d_present(*start)) {
489                                 note_page(m, st, __pgprot(prot), eff, 2);
490                         } else if (!kasan_page_table(m, st, p4d_start)) {
491                                 walk_pud_level(m, st, *start, eff,
492                                                P + i * P4D_LEVEL_MULT);
493                         }
494                 } else
495                         note_page(m, st, __pgprot(0), 0, 2);
496
497                 start++;
498         }
499 }
500
501 #define pgd_large(a) (pgtable_l5_enabled() ? pgd_large(a) : p4d_large(__p4d(pgd_val(a))))
502 #define pgd_none(a)  (pgtable_l5_enabled() ? pgd_none(a) : p4d_none(__p4d(pgd_val(a))))
503
504 static inline bool is_hypervisor_range(int idx)
505 {
506 #ifdef CONFIG_X86_64
507         /*
508          * A hole in the beginning of kernel address space reserved
509          * for a hypervisor.
510          */
511         return  (idx >= pgd_index(GUARD_HOLE_BASE_ADDR)) &&
512                 (idx <  pgd_index(GUARD_HOLE_END_ADDR));
513 #else
514         return false;
515 #endif
516 }
517
518 static void ptdump_walk_pgd_level_core(struct seq_file *m, pgd_t *pgd,
519                                        bool checkwx, bool dmesg)
520 {
521         pgd_t *start = INIT_PGD;
522         pgprotval_t prot, eff;
523         int i;
524         struct pg_state st = {};
525
526         if (pgd) {
527                 start = pgd;
528                 st.to_dmesg = dmesg;
529         }
530
531         st.check_wx = checkwx;
532         if (checkwx)
533                 st.wx_pages = 0;
534
535         for (i = 0; i < PTRS_PER_PGD; i++) {
536                 st.current_address = normalize_addr(i * PGD_LEVEL_MULT);
537                 if (!pgd_none(*start) && !is_hypervisor_range(i)) {
538                         prot = pgd_flags(*start);
539 #ifdef CONFIG_X86_PAE
540                         eff = _PAGE_USER | _PAGE_RW;
541 #else
542                         eff = prot;
543 #endif
544                         if (pgd_large(*start) || !pgd_present(*start)) {
545                                 note_page(m, &st, __pgprot(prot), eff, 1);
546                         } else {
547                                 walk_p4d_level(m, &st, *start, eff,
548                                                i * PGD_LEVEL_MULT);
549                         }
550                 } else
551                         note_page(m, &st, __pgprot(0), 0, 1);
552
553                 cond_resched();
554                 start++;
555         }
556
557         /* Flush out the last page */
558         st.current_address = normalize_addr(PTRS_PER_PGD*PGD_LEVEL_MULT);
559         note_page(m, &st, __pgprot(0), 0, 0);
560         if (!checkwx)
561                 return;
562         if (st.wx_pages)
563                 pr_info("x86/mm: Checked W+X mappings: FAILED, %lu W+X pages found.\n",
564                         st.wx_pages);
565         else
566                 pr_info("x86/mm: Checked W+X mappings: passed, no W+X pages found.\n");
567 }
568
569 void ptdump_walk_pgd_level(struct seq_file *m, pgd_t *pgd)
570 {
571         ptdump_walk_pgd_level_core(m, pgd, false, true);
572 }
573
574 void ptdump_walk_pgd_level_debugfs(struct seq_file *m, pgd_t *pgd, bool user)
575 {
576 #ifdef CONFIG_PAGE_TABLE_ISOLATION
577         if (user && boot_cpu_has(X86_FEATURE_PTI))
578                 pgd = kernel_to_user_pgdp(pgd);
579 #endif
580         ptdump_walk_pgd_level_core(m, pgd, false, false);
581 }
582 EXPORT_SYMBOL_GPL(ptdump_walk_pgd_level_debugfs);
583
584 void ptdump_walk_user_pgd_level_checkwx(void)
585 {
586 #ifdef CONFIG_PAGE_TABLE_ISOLATION
587         pgd_t *pgd = INIT_PGD;
588
589         if (!(__supported_pte_mask & _PAGE_NX) ||
590             !boot_cpu_has(X86_FEATURE_PTI))
591                 return;
592
593         pr_info("x86/mm: Checking user space page tables\n");
594         pgd = kernel_to_user_pgdp(pgd);
595         ptdump_walk_pgd_level_core(NULL, pgd, true, false);
596 #endif
597 }
598
599 void ptdump_walk_pgd_level_checkwx(void)
600 {
601         ptdump_walk_pgd_level_core(NULL, NULL, true, false);
602 }
603
604 static int __init pt_dump_init(void)
605 {
606         /*
607          * Various markers are not compile-time constants, so assign them
608          * here.
609          */
610 #ifdef CONFIG_X86_64
611         address_markers[LOW_KERNEL_NR].start_address = PAGE_OFFSET;
612         address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
613         address_markers[VMEMMAP_START_NR].start_address = VMEMMAP_START;
614 #ifdef CONFIG_MODIFY_LDT_SYSCALL
615         address_markers[LDT_NR].start_address = LDT_BASE_ADDR;
616 #endif
617 #ifdef CONFIG_KASAN
618         address_markers[KASAN_SHADOW_START_NR].start_address = KASAN_SHADOW_START;
619         address_markers[KASAN_SHADOW_END_NR].start_address = KASAN_SHADOW_END;
620 #endif
621 #endif
622 #ifdef CONFIG_X86_32
623         address_markers[VMALLOC_START_NR].start_address = VMALLOC_START;
624         address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
625 # ifdef CONFIG_HIGHMEM
626         address_markers[PKMAP_BASE_NR].start_address = PKMAP_BASE;
627 # endif
628         address_markers[FIXADDR_START_NR].start_address = FIXADDR_START;
629         address_markers[CPU_ENTRY_AREA_NR].start_address = CPU_ENTRY_AREA_BASE;
630 # ifdef CONFIG_MODIFY_LDT_SYSCALL
631         address_markers[LDT_NR].start_address = LDT_BASE_ADDR;
632 # endif
633 #endif
634         return 0;
635 }
636 __initcall(pt_dump_init);