2 * Copyright(c) 2017 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * This code is based in part on work published here:
15 * https://github.com/IAIK/KAISER
17 * The original work was written by and and signed off by for the Linux
20 * Signed-off-by: Richard Fellner <richard.fellner@student.tugraz.at>
21 * Signed-off-by: Moritz Lipp <moritz.lipp@iaik.tugraz.at>
22 * Signed-off-by: Daniel Gruss <daniel.gruss@iaik.tugraz.at>
23 * Signed-off-by: Michael Schwarz <michael.schwarz@iaik.tugraz.at>
25 * Major changes to the original code by: Dave Hansen <dave.hansen@intel.com>
26 * Mostly rewritten by Thomas Gleixner <tglx@linutronix.de> and
27 * Andy Lutomirsky <luto@amacapital.net>
29 #include <linux/kernel.h>
30 #include <linux/errno.h>
31 #include <linux/string.h>
32 #include <linux/types.h>
33 #include <linux/bug.h>
34 #include <linux/init.h>
35 #include <linux/spinlock.h>
37 #include <linux/uaccess.h>
38 #include <linux/cpu.h>
40 #include <asm/cpufeature.h>
41 #include <asm/hypervisor.h>
42 #include <asm/vsyscall.h>
43 #include <asm/cmdline.h>
45 #include <asm/pgtable.h>
46 #include <asm/pgalloc.h>
47 #include <asm/tlbflush.h>
49 #include <asm/sections.h>
52 #define pr_fmt(fmt) "Kernel/User page tables isolation: " fmt
54 /* Backporting helper */
56 #define __GFP_NOTRACK 0
59 static void __init pti_print_if_insecure(const char *reason)
61 if (boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
62 pr_info("%s\n", reason);
65 static void __init pti_print_if_secure(const char *reason)
67 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
68 pr_info("%s\n", reason);
71 void __init pti_check_boottime_disable(void)
76 if (hypervisor_is_type(X86_HYPER_XEN_PV)) {
77 pti_print_if_insecure("disabled on XEN PV.");
81 ret = cmdline_find_option(boot_command_line, "pti", arg, sizeof(arg));
83 if (ret == 3 && !strncmp(arg, "off", 3)) {
84 pti_print_if_insecure("disabled on command line.");
87 if (ret == 2 && !strncmp(arg, "on", 2)) {
88 pti_print_if_secure("force enabled on command line.");
91 if (ret == 4 && !strncmp(arg, "auto", 4))
95 if (cmdline_find_option_bool(boot_command_line, "nopti") ||
96 cpu_mitigations_off()) {
97 pti_print_if_insecure("disabled on command line.");
102 if (!boot_cpu_has_bug(X86_BUG_CPU_MELTDOWN))
105 setup_force_cpu_cap(X86_FEATURE_PTI);
108 pgd_t __pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
111 * Changes to the high (kernel) portion of the kernelmode page
112 * tables are not automatically propagated to the usermode tables.
114 * Users should keep in mind that, unlike the kernelmode tables,
115 * there is no vmalloc_fault equivalent for the usermode tables.
116 * Top-level entries added to init_mm's usermode pgd after boot
117 * will not be automatically propagated to other mms.
119 if (!pgdp_maps_userspace(pgdp))
123 * The user page tables get the full PGD, accessible from
126 kernel_to_user_pgdp(pgdp)->pgd = pgd.pgd;
129 * If this is normal user memory, make it NX in the kernel
130 * pagetables so that, if we somehow screw up and return to
131 * usermode with the kernel CR3 loaded, we'll get a page fault
132 * instead of allowing user code to execute with the wrong CR3.
134 * As exceptions, we don't set NX if:
135 * - _PAGE_USER is not set. This could be an executable
136 * EFI runtime mapping or something similar, and the kernel
137 * may execute from it
138 * - we don't have NX support
139 * - we're clearing the PGD (i.e. the new pgd is not present).
141 if ((pgd.pgd & (_PAGE_USER|_PAGE_PRESENT)) == (_PAGE_USER|_PAGE_PRESENT) &&
142 (__supported_pte_mask & _PAGE_NX))
145 /* return the copy of the PGD we want the kernel to use: */
150 * Walk the user copy of the page tables (optionally) trying to allocate
151 * page table pages on the way down.
153 * Returns a pointer to a P4D on success, or NULL on failure.
155 static __init p4d_t *pti_user_pagetable_walk_p4d(unsigned long address)
157 pgd_t *pgd = kernel_to_user_pgdp(pgd_offset_k(address));
158 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
160 if (address < PAGE_OFFSET) {
161 WARN_ONCE(1, "attempt to walk user address\n");
165 if (pgd_none(*pgd)) {
166 unsigned long new_p4d_page = __get_free_page(gfp);
167 if (WARN_ON_ONCE(!new_p4d_page))
170 set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(new_p4d_page)));
172 BUILD_BUG_ON(pgd_large(*pgd) != 0);
174 return p4d_offset(pgd, address);
178 * Walk the user copy of the page tables (optionally) trying to allocate
179 * page table pages on the way down.
181 * Returns a pointer to a PMD on success, or NULL on failure.
183 static __init pmd_t *pti_user_pagetable_walk_pmd(unsigned long address)
185 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
189 p4d = pti_user_pagetable_walk_p4d(address);
193 BUILD_BUG_ON(p4d_large(*p4d) != 0);
194 if (p4d_none(*p4d)) {
195 unsigned long new_pud_page = __get_free_page(gfp);
196 if (WARN_ON_ONCE(!new_pud_page))
199 set_p4d(p4d, __p4d(_KERNPG_TABLE | __pa(new_pud_page)));
202 pud = pud_offset(p4d, address);
203 /* The user page tables do not use large mappings: */
204 if (pud_large(*pud)) {
208 if (pud_none(*pud)) {
209 unsigned long new_pmd_page = __get_free_page(gfp);
210 if (WARN_ON_ONCE(!new_pmd_page))
213 set_pud(pud, __pud(_KERNPG_TABLE | __pa(new_pmd_page)));
216 return pmd_offset(pud, address);
219 #ifdef CONFIG_X86_VSYSCALL_EMULATION
221 * Walk the shadow copy of the page tables (optionally) trying to allocate
222 * page table pages on the way down. Does not support large pages.
224 * Note: this is only used when mapping *new* kernel data into the
225 * user/shadow page tables. It is never used for userspace data.
227 * Returns a pointer to a PTE on success, or NULL on failure.
229 static pte_t *pti_user_pagetable_walk_pte(unsigned long address)
231 gfp_t gfp = (GFP_KERNEL | __GFP_NOTRACK | __GFP_ZERO);
235 pmd = pti_user_pagetable_walk_pmd(address);
239 /* We can't do anything sensible if we hit a large mapping. */
240 if (pmd_large(*pmd)) {
245 if (pmd_none(*pmd)) {
246 unsigned long new_pte_page = __get_free_page(gfp);
250 set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(new_pte_page)));
253 pte = pte_offset_kernel(pmd, address);
254 if (pte_flags(*pte) & _PAGE_USER) {
255 WARN_ONCE(1, "attempt to walk to user pte\n");
261 static void __init pti_setup_vsyscall(void)
263 pte_t *pte, *target_pte;
266 pte = lookup_address(VSYSCALL_ADDR, &level);
267 if (!pte || WARN_ON(level != PG_LEVEL_4K) || pte_none(*pte))
270 target_pte = pti_user_pagetable_walk_pte(VSYSCALL_ADDR);
271 if (WARN_ON(!target_pte))
275 set_vsyscall_pgtable_user_bits(kernel_to_user_pgdp(swapper_pg_dir));
278 static void __init pti_setup_vsyscall(void) { }
282 pti_clone_pmds(unsigned long start, unsigned long end, pmdval_t clear)
287 * Clone the populated PMDs which cover start to end. These PMD areas
290 for (addr = start; addr < end; addr += PMD_SIZE) {
291 pmd_t *pmd, *target_pmd;
300 pgd = pgd_offset_k(addr);
301 if (WARN_ON(pgd_none(*pgd)))
303 p4d = p4d_offset(pgd, addr);
304 if (WARN_ON(p4d_none(*p4d)))
306 pud = pud_offset(p4d, addr);
309 pmd = pmd_offset(pud, addr);
313 target_pmd = pti_user_pagetable_walk_pmd(addr);
314 if (WARN_ON(!target_pmd))
318 * Copy the PMD. That is, the kernelmode and usermode
319 * tables will share the last-level page tables of this
322 *target_pmd = pmd_clear_flags(*pmd, clear);
327 * Clone a single p4d (i.e. a top-level entry on 4-level systems and a
328 * next-level entry on 5-level systems.
330 static void __init pti_clone_p4d(unsigned long addr)
332 p4d_t *kernel_p4d, *user_p4d;
335 user_p4d = pti_user_pagetable_walk_p4d(addr);
339 kernel_pgd = pgd_offset_k(addr);
340 kernel_p4d = p4d_offset(kernel_pgd, addr);
341 *user_p4d = *kernel_p4d;
345 * Clone the CPU_ENTRY_AREA into the user space visible page table.
347 static void __init pti_clone_user_shared(void)
349 pti_clone_p4d(CPU_ENTRY_AREA_BASE);
353 * Clone the ESPFIX P4D into the user space visinble page table
355 static void __init pti_setup_espfix64(void)
357 #ifdef CONFIG_X86_ESPFIX64
358 pti_clone_p4d(ESPFIX_BASE_ADDR);
363 * Clone the populated PMDs of the entry and irqentry text and force it RO.
365 static void __init pti_clone_entry_text(void)
367 pti_clone_pmds((unsigned long) __entry_text_start,
368 (unsigned long) __irqentry_text_end,
369 _PAGE_RW | _PAGE_GLOBAL);
373 * Initialize kernel page table isolation
375 void __init pti_init(void)
377 if (!static_cpu_has(X86_FEATURE_PTI))
380 pr_info("enabled\n");
382 pti_clone_user_shared();
383 pti_clone_entry_text();
384 pti_setup_espfix64();
385 pti_setup_vsyscall();