1 // SPDX-License-Identifier: GPL-2.0-only
3 * AMD Memory Encryption Support
5 * Copyright (C) 2016 Advanced Micro Devices, Inc.
7 * Author: Tom Lendacky <thomas.lendacky@amd.com>
10 #define DISABLE_BRANCH_PROFILING
13 * Since we're dealing with identity mappings, physical and virtual
14 * addresses are the same, so override these defines which are ultimately
15 * used by the headers in misc.h.
17 #define __pa(x) ((unsigned long)(x))
18 #define __va(x) ((void *)((unsigned long)(x)))
21 * Special hack: we have to be careful, because no indirections are
22 * allowed here, and paravirt_ops is a kind of one. As it will only run in
23 * baremetal anyway, we just keep it from happening. (This list needs to
24 * be extended when new paravirt and debugging variants are added.)
26 #undef CONFIG_PARAVIRT
27 #undef CONFIG_PARAVIRT_XXL
28 #undef CONFIG_PARAVIRT_SPINLOCKS
31 * This code runs before CPU feature bits are set. By default, the
32 * pgtable_l5_enabled() function uses bit X86_FEATURE_LA57 to determine if
33 * 5-level paging is active, so that won't work here. USE_EARLY_PGTABLE_L5
34 * is provided to handle this situation and, instead, use a variable that
35 * has been set by the early boot code.
37 #define USE_EARLY_PGTABLE_L5
39 #include <linux/kernel.h>
41 #include <linux/mem_encrypt.h>
42 #include <linux/cc_platform.h>
45 #include <asm/setup.h>
46 #include <asm/sections.h>
47 #include <asm/cmdline.h>
51 #include "mm_internal.h"
53 #define PGD_FLAGS _KERNPG_TABLE_NOENC
54 #define P4D_FLAGS _KERNPG_TABLE_NOENC
55 #define PUD_FLAGS _KERNPG_TABLE_NOENC
56 #define PMD_FLAGS _KERNPG_TABLE_NOENC
58 #define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
60 #define PMD_FLAGS_DEC PMD_FLAGS_LARGE
61 #define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_LARGE_CACHE_MASK) | \
62 (_PAGE_PAT_LARGE | _PAGE_PWT))
64 #define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC)
66 #define PTE_FLAGS (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL)
68 #define PTE_FLAGS_DEC PTE_FLAGS
69 #define PTE_FLAGS_DEC_WP ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
70 (_PAGE_PAT | _PAGE_PWT))
72 #define PTE_FLAGS_ENC (PTE_FLAGS | _PAGE_ENC)
74 struct sme_populate_pgd_data {
83 unsigned long vaddr_end;
87 * This work area lives in the .init.scratch section, which lives outside of
88 * the kernel proper. It is sized to hold the intermediate copy buffer and
89 * more than enough pagetable pages.
91 * By using this section, the kernel can be encrypted in place and it
92 * avoids any possibility of boot parameters or initramfs images being
93 * placed such that the in-place encryption logic overwrites them. This
94 * section is 2MB aligned to allow for simple pagetable setup using only
95 * PMD entries (see vmlinux.lds.S).
97 static char sme_workarea[2 * PMD_SIZE] __section(".init.scratch");
99 static char sme_cmdline_arg[] __initdata = "mem_encrypt";
100 static char sme_cmdline_on[] __initdata = "on";
102 static void __head sme_clear_pgd(struct sme_populate_pgd_data *ppd)
104 unsigned long pgd_start, pgd_end, pgd_size;
107 pgd_start = ppd->vaddr & PGDIR_MASK;
108 pgd_end = ppd->vaddr_end & PGDIR_MASK;
110 pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t);
112 pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
114 memset(pgd_p, 0, pgd_size);
117 static pud_t __head *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
124 pgd = ppd->pgd + pgd_index(ppd->vaddr);
125 if (pgd_none(*pgd)) {
126 p4d = ppd->pgtable_area;
127 memset(p4d, 0, sizeof(*p4d) * PTRS_PER_P4D);
128 ppd->pgtable_area += sizeof(*p4d) * PTRS_PER_P4D;
129 set_pgd(pgd, __pgd(PGD_FLAGS | __pa(p4d)));
132 p4d = p4d_offset(pgd, ppd->vaddr);
133 if (p4d_none(*p4d)) {
134 pud = ppd->pgtable_area;
135 memset(pud, 0, sizeof(*pud) * PTRS_PER_PUD);
136 ppd->pgtable_area += sizeof(*pud) * PTRS_PER_PUD;
137 set_p4d(p4d, __p4d(P4D_FLAGS | __pa(pud)));
140 pud = pud_offset(p4d, ppd->vaddr);
141 if (pud_none(*pud)) {
142 pmd = ppd->pgtable_area;
143 memset(pmd, 0, sizeof(*pmd) * PTRS_PER_PMD);
144 ppd->pgtable_area += sizeof(*pmd) * PTRS_PER_PMD;
145 set_pud(pud, __pud(PUD_FLAGS | __pa(pmd)));
154 static void __head sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
159 pud = sme_prepare_pgd(ppd);
163 pmd = pmd_offset(pud, ppd->vaddr);
167 set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags));
170 static void __head sme_populate_pgd(struct sme_populate_pgd_data *ppd)
176 pud = sme_prepare_pgd(ppd);
180 pmd = pmd_offset(pud, ppd->vaddr);
181 if (pmd_none(*pmd)) {
182 pte = ppd->pgtable_area;
183 memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE);
184 ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE;
185 set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
191 pte = pte_offset_map(pmd, ppd->vaddr);
193 set_pte(pte, __pte(ppd->paddr | ppd->pte_flags));
196 static void __head __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
198 while (ppd->vaddr < ppd->vaddr_end) {
199 sme_populate_pgd_large(ppd);
201 ppd->vaddr += PMD_SIZE;
202 ppd->paddr += PMD_SIZE;
206 static void __head __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
208 while (ppd->vaddr < ppd->vaddr_end) {
209 sme_populate_pgd(ppd);
211 ppd->vaddr += PAGE_SIZE;
212 ppd->paddr += PAGE_SIZE;
216 static void __head __sme_map_range(struct sme_populate_pgd_data *ppd,
217 pmdval_t pmd_flags, pteval_t pte_flags)
219 unsigned long vaddr_end;
221 ppd->pmd_flags = pmd_flags;
222 ppd->pte_flags = pte_flags;
224 /* Save original end value since we modify the struct value */
225 vaddr_end = ppd->vaddr_end;
227 /* If start is not 2MB aligned, create PTE entries */
228 ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_SIZE);
229 __sme_map_range_pte(ppd);
231 /* Create PMD entries */
232 ppd->vaddr_end = vaddr_end & PMD_MASK;
233 __sme_map_range_pmd(ppd);
235 /* If end is not 2MB aligned, create PTE entries */
236 ppd->vaddr_end = vaddr_end;
237 __sme_map_range_pte(ppd);
240 static void __head sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
242 __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
245 static void __head sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
247 __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
250 static void __head sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
252 __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
255 static unsigned long __head sme_pgtable_calc(unsigned long len)
257 unsigned long entries = 0, tables = 0;
260 * Perform a relatively simplistic calculation of the pagetable
261 * entries that are needed. Those mappings will be covered mostly
262 * by 2MB PMD entries so we can conservatively calculate the required
263 * number of P4D, PUD and PMD structures needed to perform the
264 * mappings. For mappings that are not 2MB aligned, PTE mappings
265 * would be needed for the start and end portion of the address range
266 * that fall outside of the 2MB alignment. This results in, at most,
267 * two extra pages to hold PTE entries for each range that is mapped.
268 * Incrementing the count for each covers the case where the addresses
272 /* PGDIR_SIZE is equal to P4D_SIZE on 4-level machine. */
273 if (PTRS_PER_P4D > 1)
274 entries += (DIV_ROUND_UP(len, PGDIR_SIZE) + 1) * sizeof(p4d_t) * PTRS_PER_P4D;
275 entries += (DIV_ROUND_UP(len, P4D_SIZE) + 1) * sizeof(pud_t) * PTRS_PER_PUD;
276 entries += (DIV_ROUND_UP(len, PUD_SIZE) + 1) * sizeof(pmd_t) * PTRS_PER_PMD;
277 entries += 2 * sizeof(pte_t) * PTRS_PER_PTE;
280 * Now calculate the added pagetable structures needed to populate
281 * the new pagetables.
284 if (PTRS_PER_P4D > 1)
285 tables += DIV_ROUND_UP(entries, PGDIR_SIZE) * sizeof(p4d_t) * PTRS_PER_P4D;
286 tables += DIV_ROUND_UP(entries, P4D_SIZE) * sizeof(pud_t) * PTRS_PER_PUD;
287 tables += DIV_ROUND_UP(entries, PUD_SIZE) * sizeof(pmd_t) * PTRS_PER_PMD;
289 return entries + tables;
292 void __head sme_encrypt_kernel(struct boot_params *bp)
294 unsigned long workarea_start, workarea_end, workarea_len;
295 unsigned long execute_start, execute_end, execute_len;
296 unsigned long kernel_start, kernel_end, kernel_len;
297 unsigned long initrd_start, initrd_end, initrd_len;
298 struct sme_populate_pgd_data ppd;
299 unsigned long pgtable_area_len;
300 unsigned long decrypted_base;
303 * This is early code, use an open coded check for SME instead of
304 * using cc_platform_has(). This eliminates worries about removing
305 * instrumentation or checking boot_cpu_data in the cc_platform_has()
308 if (!sme_get_me_mask() ||
309 RIP_REL_REF(sev_status) & MSR_AMD64_SEV_ENABLED)
313 * Prepare for encrypting the kernel and initrd by building new
314 * pagetables with the necessary attributes needed to encrypt the
317 * One range of virtual addresses will map the memory occupied
318 * by the kernel and initrd as encrypted.
320 * Another range of virtual addresses will map the memory occupied
321 * by the kernel and initrd as decrypted and write-protected.
323 * The use of write-protect attribute will prevent any of the
324 * memory from being cached.
327 kernel_start = (unsigned long)RIP_REL_REF(_text);
328 kernel_end = ALIGN((unsigned long)RIP_REL_REF(_end), PMD_SIZE);
329 kernel_len = kernel_end - kernel_start;
334 #ifdef CONFIG_BLK_DEV_INITRD
335 initrd_len = (unsigned long)bp->hdr.ramdisk_size |
336 ((unsigned long)bp->ext_ramdisk_size << 32);
338 initrd_start = (unsigned long)bp->hdr.ramdisk_image |
339 ((unsigned long)bp->ext_ramdisk_image << 32);
340 initrd_end = PAGE_ALIGN(initrd_start + initrd_len);
341 initrd_len = initrd_end - initrd_start;
346 * Calculate required number of workarea bytes needed:
347 * executable encryption area size:
348 * stack page (PAGE_SIZE)
349 * encryption routine page (PAGE_SIZE)
350 * intermediate copy buffer (PMD_SIZE)
351 * pagetable structures for the encryption of the kernel
352 * pagetable structures for workarea (in case not currently mapped)
354 execute_start = workarea_start = (unsigned long)RIP_REL_REF(sme_workarea);
355 execute_end = execute_start + (PAGE_SIZE * 2) + PMD_SIZE;
356 execute_len = execute_end - execute_start;
359 * One PGD for both encrypted and decrypted mappings and a set of
360 * PUDs and PMDs for each of the encrypted and decrypted mappings.
362 pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD;
363 pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2;
365 pgtable_area_len += sme_pgtable_calc(initrd_len) * 2;
367 /* PUDs and PMDs needed in the current pagetables for the workarea */
368 pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len);
371 * The total workarea includes the executable encryption area and
372 * the pagetable area. The start of the workarea is already 2MB
373 * aligned, align the end of the workarea on a 2MB boundary so that
374 * we don't try to create/allocate PTE entries from the workarea
375 * before it is mapped.
377 workarea_len = execute_len + pgtable_area_len;
378 workarea_end = ALIGN(workarea_start + workarea_len, PMD_SIZE);
381 * Set the address to the start of where newly created pagetable
382 * structures (PGDs, PUDs and PMDs) will be allocated. New pagetable
383 * structures are created when the workarea is added to the current
384 * pagetables and when the new encrypted and decrypted kernel
385 * mappings are populated.
387 ppd.pgtable_area = (void *)execute_end;
390 * Make sure the current pagetable structure has entries for
391 * addressing the workarea.
393 ppd.pgd = (pgd_t *)native_read_cr3_pa();
394 ppd.paddr = workarea_start;
395 ppd.vaddr = workarea_start;
396 ppd.vaddr_end = workarea_end;
397 sme_map_range_decrypted(&ppd);
399 /* Flush the TLB - no globals so cr3 is enough */
400 native_write_cr3(__native_read_cr3());
403 * A new pagetable structure is being built to allow for the kernel
404 * and initrd to be encrypted. It starts with an empty PGD that will
405 * then be populated with new PUDs and PMDs as the encrypted and
406 * decrypted kernel mappings are created.
408 ppd.pgd = ppd.pgtable_area;
409 memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
410 ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD;
413 * A different PGD index/entry must be used to get different
414 * pagetable entries for the decrypted mapping. Choose the next
415 * PGD index and convert it to a virtual address to be used as
416 * the base of the mapping.
418 decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
420 unsigned long check_base;
422 check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1);
423 decrypted_base = max(decrypted_base, check_base);
425 decrypted_base <<= PGDIR_SHIFT;
427 /* Add encrypted kernel (identity) mappings */
428 ppd.paddr = kernel_start;
429 ppd.vaddr = kernel_start;
430 ppd.vaddr_end = kernel_end;
431 sme_map_range_encrypted(&ppd);
433 /* Add decrypted, write-protected kernel (non-identity) mappings */
434 ppd.paddr = kernel_start;
435 ppd.vaddr = kernel_start + decrypted_base;
436 ppd.vaddr_end = kernel_end + decrypted_base;
437 sme_map_range_decrypted_wp(&ppd);
440 /* Add encrypted initrd (identity) mappings */
441 ppd.paddr = initrd_start;
442 ppd.vaddr = initrd_start;
443 ppd.vaddr_end = initrd_end;
444 sme_map_range_encrypted(&ppd);
446 * Add decrypted, write-protected initrd (non-identity) mappings
448 ppd.paddr = initrd_start;
449 ppd.vaddr = initrd_start + decrypted_base;
450 ppd.vaddr_end = initrd_end + decrypted_base;
451 sme_map_range_decrypted_wp(&ppd);
454 /* Add decrypted workarea mappings to both kernel mappings */
455 ppd.paddr = workarea_start;
456 ppd.vaddr = workarea_start;
457 ppd.vaddr_end = workarea_end;
458 sme_map_range_decrypted(&ppd);
460 ppd.paddr = workarea_start;
461 ppd.vaddr = workarea_start + decrypted_base;
462 ppd.vaddr_end = workarea_end + decrypted_base;
463 sme_map_range_decrypted(&ppd);
465 /* Perform the encryption */
466 sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
467 kernel_len, workarea_start, (unsigned long)ppd.pgd);
470 sme_encrypt_execute(initrd_start, initrd_start + decrypted_base,
471 initrd_len, workarea_start,
472 (unsigned long)ppd.pgd);
475 * At this point we are running encrypted. Remove the mappings for
476 * the decrypted areas - all that is needed for this is to remove
477 * the PGD entry/entries.
479 ppd.vaddr = kernel_start + decrypted_base;
480 ppd.vaddr_end = kernel_end + decrypted_base;
484 ppd.vaddr = initrd_start + decrypted_base;
485 ppd.vaddr_end = initrd_end + decrypted_base;
489 ppd.vaddr = workarea_start + decrypted_base;
490 ppd.vaddr_end = workarea_end + decrypted_base;
493 /* Flush the TLB - no globals so cr3 is enough */
494 native_write_cr3(__native_read_cr3());
497 void __head sme_enable(struct boot_params *bp)
499 const char *cmdline_ptr, *cmdline_arg, *cmdline_on;
500 unsigned int eax, ebx, ecx, edx;
501 unsigned long feature_mask;
502 unsigned long me_mask;
509 /* Check for the SME/SEV support leaf */
512 native_cpuid(&eax, &ebx, &ecx, &edx);
513 if (eax < 0x8000001f)
516 #define AMD_SME_BIT BIT(0)
517 #define AMD_SEV_BIT BIT(1)
520 * Check for the SME/SEV feature:
521 * CPUID Fn8000_001F[EAX]
522 * - Bit 0 - Secure Memory Encryption support
523 * - Bit 1 - Secure Encrypted Virtualization support
524 * CPUID Fn8000_001F[EBX]
525 * - Bits 5:0 - Pagetable bit position used to indicate encryption
529 native_cpuid(&eax, &ebx, &ecx, &edx);
530 /* Check whether SEV or SME is supported */
531 if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT)))
534 me_mask = 1UL << (ebx & 0x3f);
536 /* Check the SEV MSR whether SEV or SME is enabled */
537 RIP_REL_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV);
538 feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
540 /* The SEV-SNP CC blob should never be present unless SEV-SNP is enabled. */
541 if (snp && !(msr & MSR_AMD64_SEV_SNP_ENABLED))
544 /* Check if memory encryption is enabled */
545 if (feature_mask == AMD_SME_BIT) {
547 * No SME if Hypervisor bit is set. This check is here to
548 * prevent a guest from trying to enable SME. For running as a
549 * KVM guest the MSR_AMD64_SYSCFG will be sufficient, but there
550 * might be other hypervisors which emulate that MSR as non-zero
551 * or even pass it through to the guest.
552 * A malicious hypervisor can still trick a guest into this
553 * path, but there is no way to protect against that.
557 native_cpuid(&eax, &ebx, &ecx, &edx);
561 /* For SME, check the SYSCFG MSR */
562 msr = __rdmsr(MSR_AMD64_SYSCFG);
563 if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
566 /* SEV state cannot be controlled by a command line option */
571 * Fixups have not been applied to phys_base yet and we're running
572 * identity mapped, so we must obtain the address to the SME command
573 * line argument data using rip-relative addressing.
575 asm ("lea sme_cmdline_arg(%%rip), %0"
577 : "p" (sme_cmdline_arg));
578 asm ("lea sme_cmdline_on(%%rip), %0"
580 : "p" (sme_cmdline_on));
582 cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
583 ((u64)bp->ext_cmd_line_ptr << 32));
585 if (cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer)) < 0 ||
586 strncmp(buffer, cmdline_on, sizeof(buffer)))
590 RIP_REL_REF(sme_me_mask) = me_mask;
591 physical_mask &= ~me_mask;
592 cc_vendor = CC_VENDOR_AMD;
593 cc_set_mask(me_mask);