1 // SPDX-License-Identifier: GPL-2.0-only
3 * AMD Memory Encryption Support
5 * Copyright (C) 2019 SUSE
7 * Author: Joerg Roedel <jroedel@suse.de>
10 #define pr_fmt(fmt) "SEV: " fmt
12 #include <linux/sched/debug.h> /* For show_regs() */
13 #include <linux/percpu-defs.h>
14 #include <linux/cc_platform.h>
15 #include <linux/printk.h>
16 #include <linux/mm_types.h>
17 #include <linux/set_memory.h>
18 #include <linux/memblock.h>
19 #include <linux/kernel.h>
21 #include <linux/cpumask.h>
22 #include <linux/efi.h>
23 #include <linux/platform_device.h>
25 #include <linux/psp-sev.h>
26 #include <linux/dmi.h>
27 #include <uapi/linux/sev-guest.h>
30 #include <asm/cpu_entry_area.h>
31 #include <asm/stacktrace.h>
33 #include <asm/insn-eval.h>
34 #include <asm/fpu/xcr.h>
35 #include <asm/processor.h>
36 #include <asm/realmode.h>
37 #include <asm/setup.h>
38 #include <asm/traps.h>
43 #include <asm/cpuid.h>
44 #include <asm/cmdline.h>
46 #define DR7_RESET_VALUE 0x400
48 /* AP INIT values as documented in the APM2 section "Processor Initialization State" */
49 #define AP_INIT_CS_LIMIT 0xffff
50 #define AP_INIT_DS_LIMIT 0xffff
51 #define AP_INIT_LDTR_LIMIT 0xffff
52 #define AP_INIT_GDTR_LIMIT 0xffff
53 #define AP_INIT_IDTR_LIMIT 0xffff
54 #define AP_INIT_TR_LIMIT 0xffff
55 #define AP_INIT_RFLAGS_DEFAULT 0x2
56 #define AP_INIT_DR6_DEFAULT 0xffff0ff0
57 #define AP_INIT_GPAT_DEFAULT 0x0007040600070406ULL
58 #define AP_INIT_XCR0_DEFAULT 0x1
59 #define AP_INIT_X87_FTW_DEFAULT 0x5555
60 #define AP_INIT_X87_FCW_DEFAULT 0x0040
61 #define AP_INIT_CR0_DEFAULT 0x60000010
62 #define AP_INIT_MXCSR_DEFAULT 0x1f80
64 /* For early boot hypervisor communication in SEV-ES enabled guests */
65 static struct ghcb boot_ghcb_page __bss_decrypted __aligned(PAGE_SIZE);
68 * Needs to be in the .data section because we need it NULL before bss is
71 static struct ghcb *boot_ghcb __section(".data");
73 /* Bitmap of SEV features supported by the hypervisor */
74 static u64 sev_hv_features __ro_after_init;
76 /* #VC handler runtime per-CPU data */
77 struct sev_es_runtime_data {
78 struct ghcb ghcb_page;
81 * Reserve one page per CPU as backup storage for the unencrypted GHCB.
82 * It is needed when an NMI happens while the #VC handler uses the real
83 * GHCB, and the NMI handler itself is causing another #VC exception. In
84 * that case the GHCB content of the first handler needs to be backed up
87 struct ghcb backup_ghcb;
90 * Mark the per-cpu GHCBs as in-use to detect nested #VC exceptions.
91 * There is no need for it to be atomic, because nothing is written to
92 * the GHCB between the read and the write of ghcb_active. So it is safe
93 * to use it when a nested #VC exception happens before the write.
95 * This is necessary for example in the #VC->NMI->#VC case when the NMI
96 * happens while the first #VC handler uses the GHCB. When the NMI code
97 * raises a second #VC handler it might overwrite the contents of the
98 * GHCB written by the first handler. To avoid this the content of the
99 * GHCB is saved and restored when the GHCB is detected to be in use
103 bool backup_ghcb_active;
106 * Cached DR7 value - write it on DR7 writes and return it on reads.
107 * That value will never make it to the real hardware DR7 as debugging
108 * is currently unsupported in SEV-ES guests.
117 static DEFINE_PER_CPU(struct sev_es_runtime_data*, runtime_data);
118 DEFINE_STATIC_KEY_FALSE(sev_es_enable_key);
120 static DEFINE_PER_CPU(struct sev_es_save_area *, sev_vmsa);
127 static struct sev_config sev_cfg __read_mostly;
129 static __always_inline bool on_vc_stack(struct pt_regs *regs)
131 unsigned long sp = regs->sp;
133 /* User-mode RSP is not trusted */
137 /* SYSCALL gap still has user-mode RSP */
138 if (ip_within_syscall_gap(regs))
141 return ((sp >= __this_cpu_ist_bottom_va(VC)) && (sp < __this_cpu_ist_top_va(VC)));
145 * This function handles the case when an NMI is raised in the #VC
146 * exception handler entry code, before the #VC handler has switched off
147 * its IST stack. In this case, the IST entry for #VC must be adjusted,
148 * so that any nested #VC exception will not overwrite the stack
149 * contents of the interrupted #VC handler.
151 * The IST entry is adjusted unconditionally so that it can be also be
152 * unconditionally adjusted back in __sev_es_ist_exit(). Otherwise a
153 * nested sev_es_ist_exit() call may adjust back the IST entry too
156 * The __sev_es_ist_enter() and __sev_es_ist_exit() functions always run
157 * on the NMI IST stack, as they are only called from NMI handling code
160 void noinstr __sev_es_ist_enter(struct pt_regs *regs)
162 unsigned long old_ist, new_ist;
164 /* Read old IST entry */
165 new_ist = old_ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
168 * If NMI happened while on the #VC IST stack, set the new IST
169 * value below regs->sp, so that the interrupted stack frame is
170 * not overwritten by subsequent #VC exceptions.
172 if (on_vc_stack(regs))
176 * Reserve additional 8 bytes and store old IST value so this
177 * adjustment can be unrolled in __sev_es_ist_exit().
179 new_ist -= sizeof(old_ist);
180 *(unsigned long *)new_ist = old_ist;
182 /* Set new IST entry */
183 this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], new_ist);
186 void noinstr __sev_es_ist_exit(void)
191 ist = __this_cpu_read(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC]);
193 if (WARN_ON(ist == __this_cpu_ist_top_va(VC)))
196 /* Read back old IST entry and write it to the TSS */
197 this_cpu_write(cpu_tss_rw.x86_tss.ist[IST_INDEX_VC], *(unsigned long *)ist);
201 * Nothing shall interrupt this code path while holding the per-CPU
202 * GHCB. The backup GHCB is only for NMIs interrupting this path.
204 * Callers must disable local interrupts around it.
206 static noinstr struct ghcb *__sev_get_ghcb(struct ghcb_state *state)
208 struct sev_es_runtime_data *data;
211 WARN_ON(!irqs_disabled());
213 data = this_cpu_read(runtime_data);
214 ghcb = &data->ghcb_page;
216 if (unlikely(data->ghcb_active)) {
217 /* GHCB is already in use - save its contents */
219 if (unlikely(data->backup_ghcb_active)) {
221 * Backup-GHCB is also already in use. There is no way
222 * to continue here so just kill the machine. To make
223 * panic() work, mark GHCBs inactive so that messages
224 * can be printed out.
226 data->ghcb_active = false;
227 data->backup_ghcb_active = false;
229 instrumentation_begin();
230 panic("Unable to handle #VC exception! GHCB and Backup GHCB are already in use");
231 instrumentation_end();
234 /* Mark backup_ghcb active before writing to it */
235 data->backup_ghcb_active = true;
237 state->ghcb = &data->backup_ghcb;
239 /* Backup GHCB content */
240 *state->ghcb = *ghcb;
243 data->ghcb_active = true;
249 static inline u64 sev_es_rd_ghcb_msr(void)
251 return __rdmsr(MSR_AMD64_SEV_ES_GHCB);
254 static __always_inline void sev_es_wr_ghcb_msr(u64 val)
259 high = (u32)(val >> 32);
261 native_wrmsr(MSR_AMD64_SEV_ES_GHCB, low, high);
264 static int vc_fetch_insn_kernel(struct es_em_ctxt *ctxt,
265 unsigned char *buffer)
267 return copy_from_kernel_nofault(buffer, (unsigned char *)ctxt->regs->ip, MAX_INSN_SIZE);
270 static enum es_result __vc_decode_user_insn(struct es_em_ctxt *ctxt)
272 char buffer[MAX_INSN_SIZE];
275 insn_bytes = insn_fetch_from_user_inatomic(ctxt->regs, buffer);
276 if (insn_bytes == 0) {
277 /* Nothing could be copied */
278 ctxt->fi.vector = X86_TRAP_PF;
279 ctxt->fi.error_code = X86_PF_INSTR | X86_PF_USER;
280 ctxt->fi.cr2 = ctxt->regs->ip;
282 } else if (insn_bytes == -EINVAL) {
283 /* Effective RIP could not be calculated */
284 ctxt->fi.vector = X86_TRAP_GP;
285 ctxt->fi.error_code = 0;
290 if (!insn_decode_from_regs(&ctxt->insn, ctxt->regs, buffer, insn_bytes))
291 return ES_DECODE_FAILED;
293 if (ctxt->insn.immediate.got)
296 return ES_DECODE_FAILED;
299 static enum es_result __vc_decode_kern_insn(struct es_em_ctxt *ctxt)
301 char buffer[MAX_INSN_SIZE];
304 res = vc_fetch_insn_kernel(ctxt, buffer);
306 ctxt->fi.vector = X86_TRAP_PF;
307 ctxt->fi.error_code = X86_PF_INSTR;
308 ctxt->fi.cr2 = ctxt->regs->ip;
312 ret = insn_decode(&ctxt->insn, buffer, MAX_INSN_SIZE, INSN_MODE_64);
314 return ES_DECODE_FAILED;
319 static enum es_result vc_decode_insn(struct es_em_ctxt *ctxt)
321 if (user_mode(ctxt->regs))
322 return __vc_decode_user_insn(ctxt);
324 return __vc_decode_kern_insn(ctxt);
327 static enum es_result vc_write_mem(struct es_em_ctxt *ctxt,
328 char *dst, char *buf, size_t size)
330 unsigned long error_code = X86_PF_PROT | X86_PF_WRITE;
333 * This function uses __put_user() independent of whether kernel or user
334 * memory is accessed. This works fine because __put_user() does no
335 * sanity checks of the pointer being accessed. All that it does is
336 * to report when the access failed.
338 * Also, this function runs in atomic context, so __put_user() is not
339 * allowed to sleep. The page-fault handler detects that it is running
340 * in atomic context and will not try to take mmap_sem and handle the
341 * fault, so additional pagefault_enable()/disable() calls are not
344 * The access can't be done via copy_to_user() here because
345 * vc_write_mem() must not use string instructions to access unsafe
346 * memory. The reason is that MOVS is emulated by the #VC handler by
347 * splitting the move up into a read and a write and taking a nested #VC
348 * exception on whatever of them is the MMIO access. Using string
349 * instructions here would cause infinite nesting.
354 u8 __user *target = (u8 __user *)dst;
357 if (__put_user(d1, target))
363 u16 __user *target = (u16 __user *)dst;
366 if (__put_user(d2, target))
372 u32 __user *target = (u32 __user *)dst;
375 if (__put_user(d4, target))
381 u64 __user *target = (u64 __user *)dst;
384 if (__put_user(d8, target))
389 WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
390 return ES_UNSUPPORTED;
396 if (user_mode(ctxt->regs))
397 error_code |= X86_PF_USER;
399 ctxt->fi.vector = X86_TRAP_PF;
400 ctxt->fi.error_code = error_code;
401 ctxt->fi.cr2 = (unsigned long)dst;
406 static enum es_result vc_read_mem(struct es_em_ctxt *ctxt,
407 char *src, char *buf, size_t size)
409 unsigned long error_code = X86_PF_PROT;
412 * This function uses __get_user() independent of whether kernel or user
413 * memory is accessed. This works fine because __get_user() does no
414 * sanity checks of the pointer being accessed. All that it does is
415 * to report when the access failed.
417 * Also, this function runs in atomic context, so __get_user() is not
418 * allowed to sleep. The page-fault handler detects that it is running
419 * in atomic context and will not try to take mmap_sem and handle the
420 * fault, so additional pagefault_enable()/disable() calls are not
423 * The access can't be done via copy_from_user() here because
424 * vc_read_mem() must not use string instructions to access unsafe
425 * memory. The reason is that MOVS is emulated by the #VC handler by
426 * splitting the move up into a read and a write and taking a nested #VC
427 * exception on whatever of them is the MMIO access. Using string
428 * instructions here would cause infinite nesting.
433 u8 __user *s = (u8 __user *)src;
435 if (__get_user(d1, s))
442 u16 __user *s = (u16 __user *)src;
444 if (__get_user(d2, s))
451 u32 __user *s = (u32 __user *)src;
453 if (__get_user(d4, s))
460 u64 __user *s = (u64 __user *)src;
461 if (__get_user(d8, s))
467 WARN_ONCE(1, "%s: Invalid size: %zu\n", __func__, size);
468 return ES_UNSUPPORTED;
474 if (user_mode(ctxt->regs))
475 error_code |= X86_PF_USER;
477 ctxt->fi.vector = X86_TRAP_PF;
478 ctxt->fi.error_code = error_code;
479 ctxt->fi.cr2 = (unsigned long)src;
484 static enum es_result vc_slow_virt_to_phys(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
485 unsigned long vaddr, phys_addr_t *paddr)
487 unsigned long va = (unsigned long)vaddr;
493 pgd = __va(read_cr3_pa());
494 pgd = &pgd[pgd_index(va)];
495 pte = lookup_address_in_pgd(pgd, va, &level);
497 ctxt->fi.vector = X86_TRAP_PF;
498 ctxt->fi.cr2 = vaddr;
499 ctxt->fi.error_code = 0;
501 if (user_mode(ctxt->regs))
502 ctxt->fi.error_code |= X86_PF_USER;
507 if (WARN_ON_ONCE(pte_val(*pte) & _PAGE_ENC))
508 /* Emulated MMIO to/from encrypted memory not supported */
509 return ES_UNSUPPORTED;
511 pa = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
512 pa |= va & ~page_level_mask(level);
519 static enum es_result vc_ioio_check(struct es_em_ctxt *ctxt, u16 port, size_t size)
523 if (user_mode(ctxt->regs)) {
524 struct thread_struct *t = ¤t->thread;
525 struct io_bitmap *iobm = t->io_bitmap;
531 for (idx = port; idx < port + size; ++idx) {
532 if (test_bit(idx, iobm->bitmap))
540 ctxt->fi.vector = X86_TRAP_GP;
541 ctxt->fi.error_code = 0;
546 /* Include code shared with pre-decompression boot stage */
547 #include "sev-shared.c"
549 static noinstr void __sev_put_ghcb(struct ghcb_state *state)
551 struct sev_es_runtime_data *data;
554 WARN_ON(!irqs_disabled());
556 data = this_cpu_read(runtime_data);
557 ghcb = &data->ghcb_page;
560 /* Restore GHCB from Backup */
561 *ghcb = *state->ghcb;
562 data->backup_ghcb_active = false;
566 * Invalidate the GHCB so a VMGEXIT instruction issued
567 * from userspace won't appear to be valid.
569 vc_ghcb_invalidate(ghcb);
570 data->ghcb_active = false;
574 void noinstr __sev_es_nmi_complete(void)
576 struct ghcb_state state;
579 ghcb = __sev_get_ghcb(&state);
581 vc_ghcb_invalidate(ghcb);
582 ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_NMI_COMPLETE);
583 ghcb_set_sw_exit_info_1(ghcb, 0);
584 ghcb_set_sw_exit_info_2(ghcb, 0);
586 sev_es_wr_ghcb_msr(__pa_nodebug(ghcb));
589 __sev_put_ghcb(&state);
592 static u64 __init get_secrets_page(void)
594 u64 pa_data = boot_params.cc_blob_address;
595 struct cc_blob_sev_info info;
599 * The CC blob contains the address of the secrets page, check if the
605 map = early_memremap(pa_data, sizeof(info));
607 pr_err("Unable to locate SNP secrets page: failed to map the Confidential Computing blob.\n");
610 memcpy(&info, map, sizeof(info));
611 early_memunmap(map, sizeof(info));
613 /* smoke-test the secrets page passed */
614 if (!info.secrets_phys || info.secrets_len != PAGE_SIZE)
617 return info.secrets_phys;
620 static u64 __init get_snp_jump_table_addr(void)
622 struct snp_secrets_page_layout *layout;
626 pa = get_secrets_page();
630 mem = ioremap_encrypted(pa, PAGE_SIZE);
632 pr_err("Unable to locate AP jump table address: failed to map the SNP secrets page.\n");
636 layout = (__force struct snp_secrets_page_layout *)mem;
638 addr = layout->os_area.ap_jump_table_pa;
644 static u64 __init get_jump_table_addr(void)
646 struct ghcb_state state;
651 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
652 return get_snp_jump_table_addr();
654 local_irq_save(flags);
656 ghcb = __sev_get_ghcb(&state);
658 vc_ghcb_invalidate(ghcb);
659 ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_JUMP_TABLE);
660 ghcb_set_sw_exit_info_1(ghcb, SVM_VMGEXIT_GET_AP_JUMP_TABLE);
661 ghcb_set_sw_exit_info_2(ghcb, 0);
663 sev_es_wr_ghcb_msr(__pa(ghcb));
666 if (ghcb_sw_exit_info_1_is_valid(ghcb) &&
667 ghcb_sw_exit_info_2_is_valid(ghcb))
668 ret = ghcb->save.sw_exit_info_2;
670 __sev_put_ghcb(&state);
672 local_irq_restore(flags);
677 static void pvalidate_pages(unsigned long vaddr, unsigned long npages, bool validate)
679 unsigned long vaddr_end;
682 vaddr = vaddr & PAGE_MASK;
683 vaddr_end = vaddr + (npages << PAGE_SHIFT);
685 while (vaddr < vaddr_end) {
686 rc = pvalidate(vaddr, RMP_PG_SIZE_4K, validate);
687 if (WARN(rc, "Failed to validate address 0x%lx ret %d", vaddr, rc))
688 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PVALIDATE);
690 vaddr = vaddr + PAGE_SIZE;
694 static void __head early_set_pages_state(unsigned long paddr, unsigned long npages, enum psc_op op)
696 unsigned long paddr_end;
699 paddr = paddr & PAGE_MASK;
700 paddr_end = paddr + (npages << PAGE_SHIFT);
702 while (paddr < paddr_end) {
704 * Use the MSR protocol because this function can be called before
705 * the GHCB is established.
707 sev_es_wr_ghcb_msr(GHCB_MSR_PSC_REQ_GFN(paddr >> PAGE_SHIFT, op));
710 val = sev_es_rd_ghcb_msr();
712 if (WARN(GHCB_RESP_CODE(val) != GHCB_MSR_PSC_RESP,
713 "Wrong PSC response code: 0x%x\n",
714 (unsigned int)GHCB_RESP_CODE(val)))
717 if (WARN(GHCB_MSR_PSC_RESP_VAL(val),
718 "Failed to change page state to '%s' paddr 0x%lx error 0x%llx\n",
719 op == SNP_PAGE_STATE_PRIVATE ? "private" : "shared",
720 paddr, GHCB_MSR_PSC_RESP_VAL(val)))
723 paddr = paddr + PAGE_SIZE;
729 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
732 void __head early_snp_set_memory_private(unsigned long vaddr, unsigned long paddr,
733 unsigned long npages)
736 * This can be invoked in early boot while running identity mapped, so
737 * use an open coded check for SNP instead of using cc_platform_has().
738 * This eliminates worries about jump tables or checking boot_cpu_data
739 * in the cc_platform_has() function.
741 if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
745 * Ask the hypervisor to mark the memory pages as private in the RMP
748 early_set_pages_state(paddr, npages, SNP_PAGE_STATE_PRIVATE);
750 /* Validate the memory pages after they've been added in the RMP table. */
751 pvalidate_pages(vaddr, npages, true);
754 void __init early_snp_set_memory_shared(unsigned long vaddr, unsigned long paddr,
755 unsigned long npages)
758 * This can be invoked in early boot while running identity mapped, so
759 * use an open coded check for SNP instead of using cc_platform_has().
760 * This eliminates worries about jump tables or checking boot_cpu_data
761 * in the cc_platform_has() function.
763 if (!(RIP_REL_REF(sev_status) & MSR_AMD64_SEV_SNP_ENABLED))
766 /* Invalidate the memory pages before they are marked shared in the RMP table. */
767 pvalidate_pages(vaddr, npages, false);
769 /* Ask hypervisor to mark the memory pages shared in the RMP table. */
770 early_set_pages_state(paddr, npages, SNP_PAGE_STATE_SHARED);
773 static int vmgexit_psc(struct snp_psc_desc *desc)
775 int cur_entry, end_entry, ret = 0;
776 struct snp_psc_desc *data;
777 struct ghcb_state state;
778 struct es_em_ctxt ctxt;
783 * __sev_get_ghcb() needs to run with IRQs disabled because it is using
786 local_irq_save(flags);
788 ghcb = __sev_get_ghcb(&state);
794 /* Copy the input desc into GHCB shared buffer */
795 data = (struct snp_psc_desc *)ghcb->shared_buffer;
796 memcpy(ghcb->shared_buffer, desc, min_t(int, GHCB_SHARED_BUF_SIZE, sizeof(*desc)));
799 * As per the GHCB specification, the hypervisor can resume the guest
800 * before processing all the entries. Check whether all the entries
801 * are processed. If not, then keep retrying. Note, the hypervisor
802 * will update the data memory directly to indicate the status, so
803 * reference the data->hdr everywhere.
805 * The strategy here is to wait for the hypervisor to change the page
806 * state in the RMP table before guest accesses the memory pages. If the
807 * page state change was not successful, then later memory access will
810 cur_entry = data->hdr.cur_entry;
811 end_entry = data->hdr.end_entry;
813 while (data->hdr.cur_entry <= data->hdr.end_entry) {
814 ghcb_set_sw_scratch(ghcb, (u64)__pa(data));
816 /* This will advance the shared buffer data points to. */
817 ret = sev_es_ghcb_hv_call(ghcb, &ctxt, SVM_VMGEXIT_PSC, 0, 0);
820 * Page State Change VMGEXIT can pass error code through
823 if (WARN(ret || ghcb->save.sw_exit_info_2,
824 "SNP: PSC failed ret=%d exit_info_2=%llx\n",
825 ret, ghcb->save.sw_exit_info_2)) {
830 /* Verify that reserved bit is not set */
831 if (WARN(data->hdr.reserved, "Reserved bit is set in the PSC header\n")) {
837 * Sanity check that entry processing is not going backwards.
838 * This will happen only if hypervisor is tricking us.
840 if (WARN(data->hdr.end_entry > end_entry || cur_entry > data->hdr.cur_entry,
841 "SNP: PSC processing going backward, end_entry %d (got %d) cur_entry %d (got %d)\n",
842 end_entry, data->hdr.end_entry, cur_entry, data->hdr.cur_entry)) {
849 __sev_put_ghcb(&state);
852 local_irq_restore(flags);
857 static void __set_pages_state(struct snp_psc_desc *data, unsigned long vaddr,
858 unsigned long vaddr_end, int op)
868 memset(data, 0, sizeof(*data));
871 while (vaddr < vaddr_end) {
872 if (is_vmalloc_addr((void *)vaddr))
873 pfn = vmalloc_to_pfn((void *)vaddr);
875 pfn = __pa(vaddr) >> PAGE_SHIFT;
882 * Current SNP implementation doesn't keep track of the RMP page
883 * size so use 4K for simplicity.
885 e->pagesize = RMP_PG_SIZE_4K;
887 vaddr = vaddr + PAGE_SIZE;
892 if (vmgexit_psc(data))
893 sev_es_terminate(SEV_TERM_SET_LINUX, GHCB_TERM_PSC);
896 static void set_pages_state(unsigned long vaddr, unsigned long npages, int op)
898 unsigned long vaddr_end, next_vaddr;
899 struct snp_psc_desc *desc;
901 desc = kmalloc(sizeof(*desc), GFP_KERNEL_ACCOUNT);
903 panic("SNP: failed to allocate memory for PSC descriptor\n");
905 vaddr = vaddr & PAGE_MASK;
906 vaddr_end = vaddr + (npages << PAGE_SHIFT);
908 while (vaddr < vaddr_end) {
909 /* Calculate the last vaddr that fits in one struct snp_psc_desc. */
910 next_vaddr = min_t(unsigned long, vaddr_end,
911 (VMGEXIT_PSC_MAX_ENTRY * PAGE_SIZE) + vaddr);
913 __set_pages_state(desc, vaddr, next_vaddr, op);
921 void snp_set_memory_shared(unsigned long vaddr, unsigned long npages)
923 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
926 pvalidate_pages(vaddr, npages, false);
928 set_pages_state(vaddr, npages, SNP_PAGE_STATE_SHARED);
931 void snp_set_memory_private(unsigned long vaddr, unsigned long npages)
933 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
936 set_pages_state(vaddr, npages, SNP_PAGE_STATE_PRIVATE);
938 pvalidate_pages(vaddr, npages, true);
941 static int snp_set_vmsa(void *va, bool vmsa)
946 * Running at VMPL0 allows the kernel to change the VMSA bit for a page
947 * using the RMPADJUST instruction. However, for the instruction to
948 * succeed it must target the permissions of a lesser privileged
949 * (higher numbered) VMPL level, so use VMPL1 (refer to the RMPADJUST
950 * instruction in the AMD64 APM Volume 3).
954 attrs |= RMPADJUST_VMSA_PAGE_BIT;
956 return rmpadjust((unsigned long)va, RMP_PG_SIZE_4K, attrs);
959 #define __ATTR_BASE (SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK)
960 #define INIT_CS_ATTRIBS (__ATTR_BASE | SVM_SELECTOR_READ_MASK | SVM_SELECTOR_CODE_MASK)
961 #define INIT_DS_ATTRIBS (__ATTR_BASE | SVM_SELECTOR_WRITE_MASK)
963 #define INIT_LDTR_ATTRIBS (SVM_SELECTOR_P_MASK | 2)
964 #define INIT_TR_ATTRIBS (SVM_SELECTOR_P_MASK | 3)
966 static void *snp_alloc_vmsa_page(void)
971 * Allocate VMSA page to work around the SNP erratum where the CPU will
972 * incorrectly signal an RMP violation #PF if a large page (2MB or 1GB)
973 * collides with the RMP entry of VMSA page. The recommended workaround
974 * is to not use a large page.
976 * Allocate an 8k page which is also 8k-aligned.
978 p = alloc_pages(GFP_KERNEL_ACCOUNT | __GFP_ZERO, 1);
984 /* Free the first 4k. This page may be 2M/1G aligned and cannot be used. */
987 return page_address(p + 1);
990 static void snp_cleanup_vmsa(struct sev_es_save_area *vmsa)
994 err = snp_set_vmsa(vmsa, false);
996 pr_err("clear VMSA page failed (%u), leaking page\n", err);
998 free_page((unsigned long)vmsa);
1001 static int wakeup_cpu_via_vmgexit(int apic_id, unsigned long start_ip)
1003 struct sev_es_save_area *cur_vmsa, *vmsa;
1004 struct ghcb_state state;
1005 unsigned long flags;
1012 * The hypervisor SNP feature support check has happened earlier, just check
1013 * the AP_CREATION one here.
1015 if (!(sev_hv_features & GHCB_HV_FT_SNP_AP_CREATION))
1019 * Verify the desired start IP against the known trampoline start IP
1020 * to catch any future new trampolines that may be introduced that
1021 * would require a new protected guest entry point.
1023 if (WARN_ONCE(start_ip != real_mode_header->trampoline_start,
1024 "Unsupported SNP start_ip: %lx\n", start_ip))
1027 /* Override start_ip with known protected guest start IP */
1028 start_ip = real_mode_header->sev_es_trampoline_start;
1030 /* Find the logical CPU for the APIC ID */
1031 for_each_present_cpu(cpu) {
1032 if (arch_match_cpu_phys_id(cpu, apic_id))
1035 if (cpu >= nr_cpu_ids)
1038 cur_vmsa = per_cpu(sev_vmsa, cpu);
1041 * A new VMSA is created each time because there is no guarantee that
1042 * the current VMSA is the kernels or that the vCPU is not running. If
1043 * an attempt was done to use the current VMSA with a running vCPU, a
1044 * #VMEXIT of that vCPU would wipe out all of the settings being done
1047 vmsa = (struct sev_es_save_area *)snp_alloc_vmsa_page();
1051 /* CR4 should maintain the MCE value */
1052 cr4 = native_read_cr4() & X86_CR4_MCE;
1054 /* Set the CS value based on the start_ip converted to a SIPI vector */
1055 sipi_vector = (start_ip >> 12);
1056 vmsa->cs.base = sipi_vector << 12;
1057 vmsa->cs.limit = AP_INIT_CS_LIMIT;
1058 vmsa->cs.attrib = INIT_CS_ATTRIBS;
1059 vmsa->cs.selector = sipi_vector << 8;
1061 /* Set the RIP value based on start_ip */
1062 vmsa->rip = start_ip & 0xfff;
1064 /* Set AP INIT defaults as documented in the APM */
1065 vmsa->ds.limit = AP_INIT_DS_LIMIT;
1066 vmsa->ds.attrib = INIT_DS_ATTRIBS;
1067 vmsa->es = vmsa->ds;
1068 vmsa->fs = vmsa->ds;
1069 vmsa->gs = vmsa->ds;
1070 vmsa->ss = vmsa->ds;
1072 vmsa->gdtr.limit = AP_INIT_GDTR_LIMIT;
1073 vmsa->ldtr.limit = AP_INIT_LDTR_LIMIT;
1074 vmsa->ldtr.attrib = INIT_LDTR_ATTRIBS;
1075 vmsa->idtr.limit = AP_INIT_IDTR_LIMIT;
1076 vmsa->tr.limit = AP_INIT_TR_LIMIT;
1077 vmsa->tr.attrib = INIT_TR_ATTRIBS;
1080 vmsa->cr0 = AP_INIT_CR0_DEFAULT;
1081 vmsa->dr7 = DR7_RESET_VALUE;
1082 vmsa->dr6 = AP_INIT_DR6_DEFAULT;
1083 vmsa->rflags = AP_INIT_RFLAGS_DEFAULT;
1084 vmsa->g_pat = AP_INIT_GPAT_DEFAULT;
1085 vmsa->xcr0 = AP_INIT_XCR0_DEFAULT;
1086 vmsa->mxcsr = AP_INIT_MXCSR_DEFAULT;
1087 vmsa->x87_ftw = AP_INIT_X87_FTW_DEFAULT;
1088 vmsa->x87_fcw = AP_INIT_X87_FCW_DEFAULT;
1090 /* SVME must be set. */
1091 vmsa->efer = EFER_SVME;
1094 * Set the SNP-specific fields for this VMSA:
1096 * SEV_FEATURES (matches the SEV STATUS MSR right shifted 2 bits)
1099 vmsa->sev_features = sev_status >> 2;
1101 /* Switch the page over to a VMSA page now that it is initialized */
1102 ret = snp_set_vmsa(vmsa, true);
1104 pr_err("set VMSA page failed (%u)\n", ret);
1105 free_page((unsigned long)vmsa);
1110 /* Issue VMGEXIT AP Creation NAE event */
1111 local_irq_save(flags);
1113 ghcb = __sev_get_ghcb(&state);
1115 vc_ghcb_invalidate(ghcb);
1116 ghcb_set_rax(ghcb, vmsa->sev_features);
1117 ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_CREATION);
1118 ghcb_set_sw_exit_info_1(ghcb, ((u64)apic_id << 32) | SVM_VMGEXIT_AP_CREATE);
1119 ghcb_set_sw_exit_info_2(ghcb, __pa(vmsa));
1121 sev_es_wr_ghcb_msr(__pa(ghcb));
1124 if (!ghcb_sw_exit_info_1_is_valid(ghcb) ||
1125 lower_32_bits(ghcb->save.sw_exit_info_1)) {
1126 pr_err("SNP AP Creation error\n");
1130 __sev_put_ghcb(&state);
1132 local_irq_restore(flags);
1134 /* Perform cleanup if there was an error */
1136 snp_cleanup_vmsa(vmsa);
1140 /* Free up any previous VMSA page */
1142 snp_cleanup_vmsa(cur_vmsa);
1144 /* Record the current VMSA page */
1145 per_cpu(sev_vmsa, cpu) = vmsa;
1150 void snp_set_wakeup_secondary_cpu(void)
1152 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1156 * Always set this override if SNP is enabled. This makes it the
1157 * required method to start APs under SNP. If the hypervisor does
1158 * not support AP creation, then no APs will be started.
1160 apic->wakeup_secondary_cpu = wakeup_cpu_via_vmgexit;
1163 int __init sev_es_setup_ap_jump_table(struct real_mode_header *rmh)
1165 u16 startup_cs, startup_ip;
1166 phys_addr_t jump_table_pa;
1167 u64 jump_table_addr;
1168 u16 __iomem *jump_table;
1170 jump_table_addr = get_jump_table_addr();
1172 /* On UP guests there is no jump table so this is not a failure */
1173 if (!jump_table_addr)
1176 /* Check if AP Jump Table is page-aligned */
1177 if (jump_table_addr & ~PAGE_MASK)
1180 jump_table_pa = jump_table_addr & PAGE_MASK;
1182 startup_cs = (u16)(rmh->trampoline_start >> 4);
1183 startup_ip = (u16)(rmh->sev_es_trampoline_start -
1184 rmh->trampoline_start);
1186 jump_table = ioremap_encrypted(jump_table_pa, PAGE_SIZE);
1190 writew(startup_ip, &jump_table[0]);
1191 writew(startup_cs, &jump_table[1]);
1193 iounmap(jump_table);
1199 * This is needed by the OVMF UEFI firmware which will use whatever it finds in
1200 * the GHCB MSR as its GHCB to talk to the hypervisor. So make sure the per-cpu
1201 * runtime GHCBs used by the kernel are also mapped in the EFI page-table.
1203 int __init sev_es_efi_map_ghcbs(pgd_t *pgd)
1205 struct sev_es_runtime_data *data;
1206 unsigned long address, pflags;
1210 if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1213 pflags = _PAGE_NX | _PAGE_RW;
1215 for_each_possible_cpu(cpu) {
1216 data = per_cpu(runtime_data, cpu);
1218 address = __pa(&data->ghcb_page);
1219 pfn = address >> PAGE_SHIFT;
1221 if (kernel_map_pages_in_pgd(pgd, pfn, address, 1, pflags))
1228 static enum es_result vc_handle_msr(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1230 struct pt_regs *regs = ctxt->regs;
1234 /* Is it a WRMSR? */
1235 exit_info_1 = (ctxt->insn.opcode.bytes[1] == 0x30) ? 1 : 0;
1237 ghcb_set_rcx(ghcb, regs->cx);
1239 ghcb_set_rax(ghcb, regs->ax);
1240 ghcb_set_rdx(ghcb, regs->dx);
1243 ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_MSR, exit_info_1, 0);
1245 if ((ret == ES_OK) && (!exit_info_1)) {
1246 regs->ax = ghcb->save.rax;
1247 regs->dx = ghcb->save.rdx;
1253 static void snp_register_per_cpu_ghcb(void)
1255 struct sev_es_runtime_data *data;
1258 data = this_cpu_read(runtime_data);
1259 ghcb = &data->ghcb_page;
1261 snp_register_ghcb_early(__pa(ghcb));
1264 void setup_ghcb(void)
1266 if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1270 * Check whether the runtime #VC exception handler is active. It uses
1271 * the per-CPU GHCB page which is set up by sev_es_init_vc_handling().
1273 * If SNP is active, register the per-CPU GHCB page so that the runtime
1274 * exception handler can use it.
1276 if (initial_vc_handler == (unsigned long)kernel_exc_vmm_communication) {
1277 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1278 snp_register_per_cpu_ghcb();
1284 * Make sure the hypervisor talks a supported protocol.
1285 * This gets called only in the BSP boot phase.
1287 if (!sev_es_negotiate_protocol())
1288 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
1291 * Clear the boot_ghcb. The first exception comes in before the bss
1292 * section is cleared.
1294 memset(&boot_ghcb_page, 0, PAGE_SIZE);
1296 /* Alright - Make the boot-ghcb public */
1297 boot_ghcb = &boot_ghcb_page;
1299 /* SNP guest requires that GHCB GPA must be registered. */
1300 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
1301 snp_register_ghcb_early(__pa(&boot_ghcb_page));
1304 #ifdef CONFIG_HOTPLUG_CPU
1305 static void sev_es_ap_hlt_loop(void)
1307 struct ghcb_state state;
1310 ghcb = __sev_get_ghcb(&state);
1313 vc_ghcb_invalidate(ghcb);
1314 ghcb_set_sw_exit_code(ghcb, SVM_VMGEXIT_AP_HLT_LOOP);
1315 ghcb_set_sw_exit_info_1(ghcb, 0);
1316 ghcb_set_sw_exit_info_2(ghcb, 0);
1318 sev_es_wr_ghcb_msr(__pa(ghcb));
1321 /* Wakeup signal? */
1322 if (ghcb_sw_exit_info_2_is_valid(ghcb) &&
1323 ghcb->save.sw_exit_info_2)
1327 __sev_put_ghcb(&state);
1331 * Play_dead handler when running under SEV-ES. This is needed because
1332 * the hypervisor can't deliver an SIPI request to restart the AP.
1333 * Instead the kernel has to issue a VMGEXIT to halt the VCPU until the
1334 * hypervisor wakes it up again.
1336 static void sev_es_play_dead(void)
1340 /* IRQs now disabled */
1342 sev_es_ap_hlt_loop();
1345 * If we get here, the VCPU was woken up again. Jump to CPU
1346 * startup code to get it back online.
1350 #else /* CONFIG_HOTPLUG_CPU */
1351 #define sev_es_play_dead native_play_dead
1352 #endif /* CONFIG_HOTPLUG_CPU */
1355 static void __init sev_es_setup_play_dead(void)
1357 smp_ops.play_dead = sev_es_play_dead;
1360 static inline void sev_es_setup_play_dead(void) { }
1363 static void __init alloc_runtime_data(int cpu)
1365 struct sev_es_runtime_data *data;
1367 data = memblock_alloc(sizeof(*data), PAGE_SIZE);
1369 panic("Can't allocate SEV-ES runtime data");
1371 per_cpu(runtime_data, cpu) = data;
1374 static void __init init_ghcb(int cpu)
1376 struct sev_es_runtime_data *data;
1379 data = per_cpu(runtime_data, cpu);
1381 err = early_set_memory_decrypted((unsigned long)&data->ghcb_page,
1382 sizeof(data->ghcb_page));
1384 panic("Can't map GHCBs unencrypted");
1386 memset(&data->ghcb_page, 0, sizeof(data->ghcb_page));
1388 data->ghcb_active = false;
1389 data->backup_ghcb_active = false;
1392 void __init sev_es_init_vc_handling(void)
1396 BUILD_BUG_ON(offsetof(struct sev_es_runtime_data, ghcb_page) % PAGE_SIZE);
1398 if (!cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
1401 if (!sev_es_check_cpu_features())
1402 panic("SEV-ES CPU Features missing");
1405 * SNP is supported in v2 of the GHCB spec which mandates support for HV
1408 if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP)) {
1409 sev_hv_features = get_hv_features();
1411 if (!(sev_hv_features & GHCB_HV_FT_SNP))
1412 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
1415 /* Enable SEV-ES special handling */
1416 static_branch_enable(&sev_es_enable_key);
1418 /* Initialize per-cpu GHCB pages */
1419 for_each_possible_cpu(cpu) {
1420 alloc_runtime_data(cpu);
1424 sev_es_setup_play_dead();
1426 /* Secondary CPUs use the runtime #VC handler */
1427 initial_vc_handler = (unsigned long)kernel_exc_vmm_communication;
1430 static void __init vc_early_forward_exception(struct es_em_ctxt *ctxt)
1432 int trapnr = ctxt->fi.vector;
1434 if (trapnr == X86_TRAP_PF)
1435 native_write_cr2(ctxt->fi.cr2);
1437 ctxt->regs->orig_ax = ctxt->fi.error_code;
1438 do_early_exception(ctxt->regs, trapnr);
1441 static long *vc_insn_get_rm(struct es_em_ctxt *ctxt)
1446 reg_array = (long *)ctxt->regs;
1447 offset = insn_get_modrm_rm_off(&ctxt->insn, ctxt->regs);
1452 offset /= sizeof(long);
1454 return reg_array + offset;
1456 static enum es_result vc_do_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt,
1457 unsigned int bytes, bool read)
1459 u64 exit_code, exit_info_1, exit_info_2;
1460 unsigned long ghcb_pa = __pa(ghcb);
1465 ref = insn_get_addr_ref(&ctxt->insn, ctxt->regs);
1466 if (ref == (void __user *)-1L)
1467 return ES_UNSUPPORTED;
1469 exit_code = read ? SVM_VMGEXIT_MMIO_READ : SVM_VMGEXIT_MMIO_WRITE;
1471 res = vc_slow_virt_to_phys(ghcb, ctxt, (unsigned long)ref, &paddr);
1473 if (res == ES_EXCEPTION && !read)
1474 ctxt->fi.error_code |= X86_PF_WRITE;
1479 exit_info_1 = paddr;
1480 /* Can never be greater than 8 */
1481 exit_info_2 = bytes;
1483 ghcb_set_sw_scratch(ghcb, ghcb_pa + offsetof(struct ghcb, shared_buffer));
1485 return sev_es_ghcb_hv_call(ghcb, ctxt, exit_code, exit_info_1, exit_info_2);
1489 * The MOVS instruction has two memory operands, which raises the
1490 * problem that it is not known whether the access to the source or the
1491 * destination caused the #VC exception (and hence whether an MMIO read
1492 * or write operation needs to be emulated).
1494 * Instead of playing games with walking page-tables and trying to guess
1495 * whether the source or destination is an MMIO range, split the move
1496 * into two operations, a read and a write with only one memory operand.
1497 * This will cause a nested #VC exception on the MMIO address which can
1500 * This implementation has the benefit that it also supports MOVS where
1501 * source _and_ destination are MMIO regions.
1503 * It will slow MOVS on MMIO down a lot, but in SEV-ES guests it is a
1504 * rare operation. If it turns out to be a performance problem the split
1505 * operations can be moved to memcpy_fromio() and memcpy_toio().
1507 static enum es_result vc_handle_mmio_movs(struct es_em_ctxt *ctxt,
1510 unsigned long ds_base, es_base;
1511 unsigned char *src, *dst;
1512 unsigned char buffer[8];
1517 ds_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_DS);
1518 es_base = insn_get_seg_base(ctxt->regs, INAT_SEG_REG_ES);
1520 if (ds_base == -1L || es_base == -1L) {
1521 ctxt->fi.vector = X86_TRAP_GP;
1522 ctxt->fi.error_code = 0;
1523 return ES_EXCEPTION;
1526 src = ds_base + (unsigned char *)ctxt->regs->si;
1527 dst = es_base + (unsigned char *)ctxt->regs->di;
1529 ret = vc_read_mem(ctxt, src, buffer, bytes);
1533 ret = vc_write_mem(ctxt, dst, buffer, bytes);
1537 if (ctxt->regs->flags & X86_EFLAGS_DF)
1542 ctxt->regs->si += off;
1543 ctxt->regs->di += off;
1545 rep = insn_has_rep_prefix(&ctxt->insn);
1547 ctxt->regs->cx -= 1;
1549 if (!rep || ctxt->regs->cx == 0)
1555 static enum es_result vc_handle_mmio(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1557 struct insn *insn = &ctxt->insn;
1558 unsigned int bytes = 0;
1559 enum mmio_type mmio;
1564 mmio = insn_decode_mmio(insn, &bytes);
1565 if (mmio == MMIO_DECODE_FAILED)
1566 return ES_DECODE_FAILED;
1568 if (mmio != MMIO_WRITE_IMM && mmio != MMIO_MOVS) {
1569 reg_data = insn_get_modrm_reg_ptr(insn, ctxt->regs);
1571 return ES_DECODE_FAILED;
1574 if (user_mode(ctxt->regs))
1575 return ES_UNSUPPORTED;
1579 memcpy(ghcb->shared_buffer, reg_data, bytes);
1580 ret = vc_do_mmio(ghcb, ctxt, bytes, false);
1582 case MMIO_WRITE_IMM:
1583 memcpy(ghcb->shared_buffer, insn->immediate1.bytes, bytes);
1584 ret = vc_do_mmio(ghcb, ctxt, bytes, false);
1587 ret = vc_do_mmio(ghcb, ctxt, bytes, true);
1591 /* Zero-extend for 32-bit operation */
1595 memcpy(reg_data, ghcb->shared_buffer, bytes);
1597 case MMIO_READ_ZERO_EXTEND:
1598 ret = vc_do_mmio(ghcb, ctxt, bytes, true);
1602 /* Zero extend based on operand size */
1603 memset(reg_data, 0, insn->opnd_bytes);
1604 memcpy(reg_data, ghcb->shared_buffer, bytes);
1606 case MMIO_READ_SIGN_EXTEND:
1607 ret = vc_do_mmio(ghcb, ctxt, bytes, true);
1612 u8 *val = (u8 *)ghcb->shared_buffer;
1614 sign_byte = (*val & 0x80) ? 0xff : 0x00;
1616 u16 *val = (u16 *)ghcb->shared_buffer;
1618 sign_byte = (*val & 0x8000) ? 0xff : 0x00;
1621 /* Sign extend based on operand size */
1622 memset(reg_data, sign_byte, insn->opnd_bytes);
1623 memcpy(reg_data, ghcb->shared_buffer, bytes);
1626 ret = vc_handle_mmio_movs(ctxt, bytes);
1629 ret = ES_UNSUPPORTED;
1636 static enum es_result vc_handle_dr7_write(struct ghcb *ghcb,
1637 struct es_em_ctxt *ctxt)
1639 struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
1640 long val, *reg = vc_insn_get_rm(ctxt);
1644 return ES_DECODE_FAILED;
1648 /* Upper 32 bits must be written as zeroes */
1650 ctxt->fi.vector = X86_TRAP_GP;
1651 ctxt->fi.error_code = 0;
1652 return ES_EXCEPTION;
1655 /* Clear out other reserved bits and set bit 10 */
1656 val = (val & 0xffff23ffL) | BIT(10);
1658 /* Early non-zero writes to DR7 are not supported */
1659 if (!data && (val & ~DR7_RESET_VALUE))
1660 return ES_UNSUPPORTED;
1662 /* Using a value of 0 for ExitInfo1 means RAX holds the value */
1663 ghcb_set_rax(ghcb, val);
1664 ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WRITE_DR7, 0, 0);
1674 static enum es_result vc_handle_dr7_read(struct ghcb *ghcb,
1675 struct es_em_ctxt *ctxt)
1677 struct sev_es_runtime_data *data = this_cpu_read(runtime_data);
1678 long *reg = vc_insn_get_rm(ctxt);
1681 return ES_DECODE_FAILED;
1686 *reg = DR7_RESET_VALUE;
1691 static enum es_result vc_handle_wbinvd(struct ghcb *ghcb,
1692 struct es_em_ctxt *ctxt)
1694 return sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_WBINVD, 0, 0);
1697 static enum es_result vc_handle_rdpmc(struct ghcb *ghcb, struct es_em_ctxt *ctxt)
1701 ghcb_set_rcx(ghcb, ctxt->regs->cx);
1703 ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_RDPMC, 0, 0);
1707 if (!(ghcb_rax_is_valid(ghcb) && ghcb_rdx_is_valid(ghcb)))
1708 return ES_VMM_ERROR;
1710 ctxt->regs->ax = ghcb->save.rax;
1711 ctxt->regs->dx = ghcb->save.rdx;
1716 static enum es_result vc_handle_monitor(struct ghcb *ghcb,
1717 struct es_em_ctxt *ctxt)
1720 * Treat it as a NOP and do not leak a physical address to the
1726 static enum es_result vc_handle_mwait(struct ghcb *ghcb,
1727 struct es_em_ctxt *ctxt)
1729 /* Treat the same as MONITOR/MONITORX */
1733 static enum es_result vc_handle_vmmcall(struct ghcb *ghcb,
1734 struct es_em_ctxt *ctxt)
1738 ghcb_set_rax(ghcb, ctxt->regs->ax);
1739 ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0);
1741 if (x86_platform.hyper.sev_es_hcall_prepare)
1742 x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs);
1744 ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0);
1748 if (!ghcb_rax_is_valid(ghcb))
1749 return ES_VMM_ERROR;
1751 ctxt->regs->ax = ghcb->save.rax;
1754 * Call sev_es_hcall_finish() after regs->ax is already set.
1755 * This allows the hypervisor handler to overwrite it again if
1758 if (x86_platform.hyper.sev_es_hcall_finish &&
1759 !x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs))
1760 return ES_VMM_ERROR;
1765 static enum es_result vc_handle_trap_ac(struct ghcb *ghcb,
1766 struct es_em_ctxt *ctxt)
1769 * Calling ecx_alignment_check() directly does not work, because it
1770 * enables IRQs and the GHCB is active. Forward the exception and call
1771 * it later from vc_forward_exception().
1773 ctxt->fi.vector = X86_TRAP_AC;
1774 ctxt->fi.error_code = 0;
1775 return ES_EXCEPTION;
1778 static enum es_result vc_handle_exitcode(struct es_em_ctxt *ctxt,
1780 unsigned long exit_code)
1782 enum es_result result;
1784 switch (exit_code) {
1785 case SVM_EXIT_READ_DR7:
1786 result = vc_handle_dr7_read(ghcb, ctxt);
1788 case SVM_EXIT_WRITE_DR7:
1789 result = vc_handle_dr7_write(ghcb, ctxt);
1791 case SVM_EXIT_EXCP_BASE + X86_TRAP_AC:
1792 result = vc_handle_trap_ac(ghcb, ctxt);
1794 case SVM_EXIT_RDTSC:
1795 case SVM_EXIT_RDTSCP:
1796 result = vc_handle_rdtsc(ghcb, ctxt, exit_code);
1798 case SVM_EXIT_RDPMC:
1799 result = vc_handle_rdpmc(ghcb, ctxt);
1802 pr_err_ratelimited("#VC exception for INVD??? Seriously???\n");
1803 result = ES_UNSUPPORTED;
1805 case SVM_EXIT_CPUID:
1806 result = vc_handle_cpuid(ghcb, ctxt);
1809 result = vc_handle_ioio(ghcb, ctxt);
1812 result = vc_handle_msr(ghcb, ctxt);
1814 case SVM_EXIT_VMMCALL:
1815 result = vc_handle_vmmcall(ghcb, ctxt);
1817 case SVM_EXIT_WBINVD:
1818 result = vc_handle_wbinvd(ghcb, ctxt);
1820 case SVM_EXIT_MONITOR:
1821 result = vc_handle_monitor(ghcb, ctxt);
1823 case SVM_EXIT_MWAIT:
1824 result = vc_handle_mwait(ghcb, ctxt);
1827 result = vc_handle_mmio(ghcb, ctxt);
1831 * Unexpected #VC exception
1833 result = ES_UNSUPPORTED;
1839 static __always_inline void vc_forward_exception(struct es_em_ctxt *ctxt)
1841 long error_code = ctxt->fi.error_code;
1842 int trapnr = ctxt->fi.vector;
1844 ctxt->regs->orig_ax = ctxt->fi.error_code;
1848 exc_general_protection(ctxt->regs, error_code);
1851 exc_invalid_op(ctxt->regs);
1854 write_cr2(ctxt->fi.cr2);
1855 exc_page_fault(ctxt->regs, error_code);
1858 exc_alignment_check(ctxt->regs, error_code);
1861 pr_emerg("Unsupported exception in #VC instruction emulation - can't continue\n");
1866 static __always_inline bool is_vc2_stack(unsigned long sp)
1868 return (sp >= __this_cpu_ist_bottom_va(VC2) && sp < __this_cpu_ist_top_va(VC2));
1871 static __always_inline bool vc_from_invalid_context(struct pt_regs *regs)
1873 unsigned long sp, prev_sp;
1875 sp = (unsigned long)regs;
1879 * If the code was already executing on the VC2 stack when the #VC
1880 * happened, let it proceed to the normal handling routine. This way the
1881 * code executing on the VC2 stack can cause #VC exceptions to get handled.
1883 return is_vc2_stack(sp) && !is_vc2_stack(prev_sp);
1886 static bool vc_raw_handle_exception(struct pt_regs *regs, unsigned long error_code)
1888 struct ghcb_state state;
1889 struct es_em_ctxt ctxt;
1890 enum es_result result;
1894 ghcb = __sev_get_ghcb(&state);
1896 vc_ghcb_invalidate(ghcb);
1897 result = vc_init_em_ctxt(&ctxt, regs, error_code);
1899 if (result == ES_OK)
1900 result = vc_handle_exitcode(&ctxt, ghcb, error_code);
1902 __sev_put_ghcb(&state);
1904 /* Done - now check the result */
1907 vc_finish_insn(&ctxt);
1909 case ES_UNSUPPORTED:
1910 pr_err_ratelimited("Unsupported exit-code 0x%02lx in #VC exception (IP: 0x%lx)\n",
1911 error_code, regs->ip);
1915 pr_err_ratelimited("Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
1916 error_code, regs->ip);
1919 case ES_DECODE_FAILED:
1920 pr_err_ratelimited("Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
1921 error_code, regs->ip);
1925 vc_forward_exception(&ctxt);
1931 pr_emerg("Unknown result in %s():%d\n", __func__, result);
1933 * Emulating the instruction which caused the #VC exception
1934 * failed - can't continue so print debug information
1942 static __always_inline bool vc_is_db(unsigned long error_code)
1944 return error_code == SVM_EXIT_EXCP_BASE + X86_TRAP_DB;
1948 * Runtime #VC exception handler when raised from kernel mode. Runs in NMI mode
1949 * and will panic when an error happens.
1951 DEFINE_IDTENTRY_VC_KERNEL(exc_vmm_communication)
1953 irqentry_state_t irq_state;
1956 * With the current implementation it is always possible to switch to a
1957 * safe stack because #VC exceptions only happen at known places, like
1958 * intercepted instructions or accesses to MMIO areas/IO ports. They can
1959 * also happen with code instrumentation when the hypervisor intercepts
1960 * #DB, but the critical paths are forbidden to be instrumented, so #DB
1961 * exceptions currently also only happen in safe places.
1963 * But keep this here in case the noinstr annotations are violated due
1966 if (unlikely(vc_from_invalid_context(regs))) {
1967 instrumentation_begin();
1968 panic("Can't handle #VC exception from unsupported context\n");
1969 instrumentation_end();
1973 * Handle #DB before calling into !noinstr code to avoid recursive #DB.
1975 if (vc_is_db(error_code)) {
1980 irq_state = irqentry_nmi_enter(regs);
1982 instrumentation_begin();
1984 if (!vc_raw_handle_exception(regs, error_code)) {
1985 /* Show some debug info */
1988 /* Ask hypervisor to sev_es_terminate */
1989 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
1991 /* If that fails and we get here - just panic */
1992 panic("Returned from Terminate-Request to Hypervisor\n");
1995 instrumentation_end();
1996 irqentry_nmi_exit(regs, irq_state);
2000 * Runtime #VC exception handler when raised from user mode. Runs in IRQ mode
2001 * and will kill the current task with SIGBUS when an error happens.
2003 DEFINE_IDTENTRY_VC_USER(exc_vmm_communication)
2006 * Handle #DB before calling into !noinstr code to avoid recursive #DB.
2008 if (vc_is_db(error_code)) {
2009 noist_exc_debug(regs);
2013 irqentry_enter_from_user_mode(regs);
2014 instrumentation_begin();
2016 if (!vc_raw_handle_exception(regs, error_code)) {
2018 * Do not kill the machine if user-space triggered the
2019 * exception. Send SIGBUS instead and let user-space deal with
2022 force_sig_fault(SIGBUS, BUS_OBJERR, (void __user *)0);
2025 instrumentation_end();
2026 irqentry_exit_to_user_mode(regs);
2029 bool __init handle_vc_boot_ghcb(struct pt_regs *regs)
2031 unsigned long exit_code = regs->orig_ax;
2032 struct es_em_ctxt ctxt;
2033 enum es_result result;
2035 vc_ghcb_invalidate(boot_ghcb);
2037 result = vc_init_em_ctxt(&ctxt, regs, exit_code);
2038 if (result == ES_OK)
2039 result = vc_handle_exitcode(&ctxt, boot_ghcb, exit_code);
2041 /* Done - now check the result */
2044 vc_finish_insn(&ctxt);
2046 case ES_UNSUPPORTED:
2047 early_printk("PANIC: Unsupported exit-code 0x%02lx in early #VC exception (IP: 0x%lx)\n",
2048 exit_code, regs->ip);
2051 early_printk("PANIC: Failure in communication with VMM (exit-code 0x%02lx IP: 0x%lx)\n",
2052 exit_code, regs->ip);
2054 case ES_DECODE_FAILED:
2055 early_printk("PANIC: Failed to decode instruction (exit-code 0x%02lx IP: 0x%lx)\n",
2056 exit_code, regs->ip);
2059 vc_early_forward_exception(&ctxt);
2073 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SEV_ES_GEN_REQ);
2077 * Initial set up of SNP relies on information provided by the
2078 * Confidential Computing blob, which can be passed to the kernel
2079 * in the following ways, depending on how it is booted:
2081 * - when booted via the boot/decompress kernel:
2084 * - when booted directly by firmware/bootloader (e.g. CONFIG_PVH):
2085 * - via a setup_data entry, as defined by the Linux Boot Protocol
2087 * Scan for the blob in that order.
2089 static __head struct cc_blob_sev_info *find_cc_blob(struct boot_params *bp)
2091 struct cc_blob_sev_info *cc_info;
2093 /* Boot kernel would have passed the CC blob via boot_params. */
2094 if (bp->cc_blob_address) {
2095 cc_info = (struct cc_blob_sev_info *)(unsigned long)bp->cc_blob_address;
2100 * If kernel was booted directly, without the use of the
2101 * boot/decompression kernel, the CC blob may have been passed via
2102 * setup_data instead.
2104 cc_info = find_cc_blob_setup_data(bp);
2109 if (cc_info->magic != CC_BLOB_SEV_HDR_MAGIC)
2115 bool __head snp_init(struct boot_params *bp)
2117 struct cc_blob_sev_info *cc_info;
2122 cc_info = find_cc_blob(bp);
2126 setup_cpuid_table(cc_info);
2129 * The CC blob will be used later to access the secrets page. Cache
2130 * it here like the boot kernel does.
2132 bp->cc_blob_address = (u32)(unsigned long)cc_info;
2137 void __head __noreturn snp_abort(void)
2139 sev_es_terminate(SEV_TERM_SET_GEN, GHCB_SNP_UNSUPPORTED);
2143 * SEV-SNP guests should only execute dmi_setup() if EFI_CONFIG_TABLES are
2144 * enabled, as the alternative (fallback) logic for DMI probing in the legacy
2145 * ROM region can cause a crash since this region is not pre-validated.
2147 void __init snp_dmi_setup(void)
2149 if (efi_enabled(EFI_CONFIG_TABLES))
2153 static void dump_cpuid_table(void)
2155 const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
2158 pr_info("count=%d reserved=0x%x reserved2=0x%llx\n",
2159 cpuid_table->count, cpuid_table->__reserved1, cpuid_table->__reserved2);
2161 for (i = 0; i < SNP_CPUID_COUNT_MAX; i++) {
2162 const struct snp_cpuid_fn *fn = &cpuid_table->fn[i];
2164 pr_info("index=%3d fn=0x%08x subfn=0x%08x: eax=0x%08x ebx=0x%08x ecx=0x%08x edx=0x%08x xcr0_in=0x%016llx xss_in=0x%016llx reserved=0x%016llx\n",
2165 i, fn->eax_in, fn->ecx_in, fn->eax, fn->ebx, fn->ecx,
2166 fn->edx, fn->xcr0_in, fn->xss_in, fn->__reserved);
2171 * It is useful from an auditing/testing perspective to provide an easy way
2172 * for the guest owner to know that the CPUID table has been initialized as
2173 * expected, but that initialization happens too early in boot to print any
2174 * sort of indicator, and there's not really any other good place to do it,
2177 static int __init report_cpuid_table(void)
2179 const struct snp_cpuid_table *cpuid_table = snp_cpuid_get_table();
2181 if (!cpuid_table->count)
2184 pr_info("Using SNP CPUID table, %d entries present.\n",
2185 cpuid_table->count);
2192 arch_initcall(report_cpuid_table);
2194 static int __init init_sev_config(char *str)
2198 while ((s = strsep(&str, ","))) {
2199 if (!strcmp(s, "debug")) {
2200 sev_cfg.debug = true;
2204 pr_info("SEV command-line option '%s' was not recognized\n", s);
2209 __setup("sev=", init_sev_config);
2211 int snp_issue_guest_request(u64 exit_code, struct snp_req_data *input, struct snp_guest_request_ioctl *rio)
2213 struct ghcb_state state;
2214 struct es_em_ctxt ctxt;
2215 unsigned long flags;
2219 rio->exitinfo2 = SEV_RET_NO_FW_CALL;
2222 * __sev_get_ghcb() needs to run with IRQs disabled because it is using
2225 local_irq_save(flags);
2227 ghcb = __sev_get_ghcb(&state);
2233 vc_ghcb_invalidate(ghcb);
2235 if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
2236 ghcb_set_rax(ghcb, input->data_gpa);
2237 ghcb_set_rbx(ghcb, input->data_npages);
2240 ret = sev_es_ghcb_hv_call(ghcb, &ctxt, exit_code, input->req_gpa, input->resp_gpa);
2244 rio->exitinfo2 = ghcb->save.sw_exit_info_2;
2245 switch (rio->exitinfo2) {
2249 case SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_BUSY):
2253 case SNP_GUEST_VMM_ERR(SNP_GUEST_VMM_ERR_INVALID_LEN):
2254 /* Number of expected pages are returned in RBX */
2255 if (exit_code == SVM_VMGEXIT_EXT_GUEST_REQUEST) {
2256 input->data_npages = ghcb_get_rbx(ghcb);
2267 __sev_put_ghcb(&state);
2269 local_irq_restore(flags);
2273 EXPORT_SYMBOL_GPL(snp_issue_guest_request);
2275 static struct platform_device sev_guest_device = {
2276 .name = "sev-guest",
2280 static int __init snp_init_platform_device(void)
2282 struct sev_guest_platform_data data;
2285 if (!cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
2288 gpa = get_secrets_page();
2292 data.secrets_gpa = gpa;
2293 if (platform_device_add_data(&sev_guest_device, &data, sizeof(data)))
2296 if (platform_device_register(&sev_guest_device))
2299 pr_info("SNP guest platform device initialized.\n");
2302 device_initcall(snp_init_platform_device);