1 // SPDX-License-Identifier: GPL-2.0-only
3 * Kernel-based Virtual Machine driver for Linux
7 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
10 #include <linux/kvm_types.h>
11 #include <linux/kvm_host.h>
12 #include <linux/kernel.h>
13 #include <linux/highmem.h>
14 #include <linux/psp-sev.h>
15 #include <linux/pagemap.h>
16 #include <linux/swap.h>
17 #include <linux/misc_cgroup.h>
18 #include <linux/processor.h>
19 #include <linux/trace_events.h>
22 #include <asm/trapnr.h>
23 #include <asm/fpu/xcr.h>
32 #ifndef CONFIG_KVM_AMD_SEV
34 * When this config is not defined, SEV feature is not supported and APIs in
35 * this file are not used but this file still gets compiled into the KVM AMD
38 * We will not have MISC_CG_RES_SEV and MISC_CG_RES_SEV_ES entries in the enum
39 * misc_res_type {} defined in linux/misc_cgroup.h.
41 * Below macros allow compilation to succeed.
43 #define MISC_CG_RES_SEV MISC_CG_RES_TYPES
44 #define MISC_CG_RES_SEV_ES MISC_CG_RES_TYPES
47 #ifdef CONFIG_KVM_AMD_SEV
48 /* enable/disable SEV support */
49 static bool sev_enabled = true;
50 module_param_named(sev, sev_enabled, bool, 0444);
52 /* enable/disable SEV-ES support */
53 static bool sev_es_enabled = true;
54 module_param_named(sev_es, sev_es_enabled, bool, 0444);
56 #define sev_enabled false
57 #define sev_es_enabled false
58 #endif /* CONFIG_KVM_AMD_SEV */
60 static u8 sev_enc_bit;
61 static DECLARE_RWSEM(sev_deactivate_lock);
62 static DEFINE_MUTEX(sev_bitmap_lock);
63 unsigned int max_sev_asid;
64 static unsigned int min_sev_asid;
65 static unsigned long sev_me_mask;
66 static unsigned int nr_asids;
67 static unsigned long *sev_asid_bitmap;
68 static unsigned long *sev_reclaim_asid_bitmap;
71 struct list_head list;
78 /* Called with the sev_bitmap_lock held, or on shutdown */
79 static int sev_flush_asids(unsigned int min_asid, unsigned int max_asid)
84 /* Check if there are any ASIDs to reclaim before performing a flush */
85 asid = find_next_bit(sev_reclaim_asid_bitmap, nr_asids, min_asid);
90 * DEACTIVATE will clear the WBINVD indicator causing DF_FLUSH to fail,
91 * so it must be guarded.
93 down_write(&sev_deactivate_lock);
96 ret = sev_guest_df_flush(&error);
98 up_write(&sev_deactivate_lock);
101 pr_err("SEV: DF_FLUSH failed, ret=%d, error=%#x\n", ret, error);
106 static inline bool is_mirroring_enc_context(struct kvm *kvm)
108 return !!to_kvm_svm(kvm)->sev_info.enc_context_owner;
111 /* Must be called with the sev_bitmap_lock held */
112 static bool __sev_recycle_asids(unsigned int min_asid, unsigned int max_asid)
114 if (sev_flush_asids(min_asid, max_asid))
117 /* The flush process will flush all reclaimable SEV and SEV-ES ASIDs */
118 bitmap_xor(sev_asid_bitmap, sev_asid_bitmap, sev_reclaim_asid_bitmap,
120 bitmap_zero(sev_reclaim_asid_bitmap, nr_asids);
125 static int sev_misc_cg_try_charge(struct kvm_sev_info *sev)
127 enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
128 return misc_cg_try_charge(type, sev->misc_cg, 1);
131 static void sev_misc_cg_uncharge(struct kvm_sev_info *sev)
133 enum misc_res_type type = sev->es_active ? MISC_CG_RES_SEV_ES : MISC_CG_RES_SEV;
134 misc_cg_uncharge(type, sev->misc_cg, 1);
137 static int sev_asid_new(struct kvm_sev_info *sev)
140 * SEV-enabled guests must use asid from min_sev_asid to max_sev_asid.
141 * SEV-ES-enabled guest can use from 1 to min_sev_asid - 1.
142 * Note: min ASID can end up larger than the max if basic SEV support is
143 * effectively disabled by disallowing use of ASIDs for SEV guests.
145 unsigned int min_asid = sev->es_active ? 1 : min_sev_asid;
146 unsigned int max_asid = sev->es_active ? min_sev_asid - 1 : max_sev_asid;
151 if (min_asid > max_asid)
154 WARN_ON(sev->misc_cg);
155 sev->misc_cg = get_current_misc_cg();
156 ret = sev_misc_cg_try_charge(sev);
158 put_misc_cg(sev->misc_cg);
163 mutex_lock(&sev_bitmap_lock);
166 asid = find_next_zero_bit(sev_asid_bitmap, max_asid + 1, min_asid);
167 if (asid > max_asid) {
168 if (retry && __sev_recycle_asids(min_asid, max_asid)) {
172 mutex_unlock(&sev_bitmap_lock);
177 __set_bit(asid, sev_asid_bitmap);
179 mutex_unlock(&sev_bitmap_lock);
183 sev_misc_cg_uncharge(sev);
184 put_misc_cg(sev->misc_cg);
189 static unsigned int sev_get_asid(struct kvm *kvm)
191 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
196 static void sev_asid_free(struct kvm_sev_info *sev)
198 struct svm_cpu_data *sd;
201 mutex_lock(&sev_bitmap_lock);
203 __set_bit(sev->asid, sev_reclaim_asid_bitmap);
205 for_each_possible_cpu(cpu) {
206 sd = per_cpu_ptr(&svm_data, cpu);
207 sd->sev_vmcbs[sev->asid] = NULL;
210 mutex_unlock(&sev_bitmap_lock);
212 sev_misc_cg_uncharge(sev);
213 put_misc_cg(sev->misc_cg);
217 static void sev_decommission(unsigned int handle)
219 struct sev_data_decommission decommission;
224 decommission.handle = handle;
225 sev_guest_decommission(&decommission, NULL);
228 static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
230 struct sev_data_deactivate deactivate;
235 deactivate.handle = handle;
237 /* Guard DEACTIVATE against WBINVD/DF_FLUSH used in ASID recycling */
238 down_read(&sev_deactivate_lock);
239 sev_guest_deactivate(&deactivate, NULL);
240 up_read(&sev_deactivate_lock);
242 sev_decommission(handle);
245 static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
247 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
250 if (kvm->created_vcpus)
254 if (unlikely(sev->active))
258 sev->es_active = argp->id == KVM_SEV_ES_INIT;
259 asid = sev_asid_new(sev);
264 ret = sev_platform_init(&argp->error);
268 INIT_LIST_HEAD(&sev->regions_list);
269 INIT_LIST_HEAD(&sev->mirror_vms);
271 kvm_set_apicv_inhibit(kvm, APICV_INHIBIT_REASON_SEV);
279 sev->es_active = false;
284 static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
286 unsigned int asid = sev_get_asid(kvm);
287 struct sev_data_activate activate;
290 /* activate ASID on the given handle */
291 activate.handle = handle;
292 activate.asid = asid;
293 ret = sev_guest_activate(&activate, error);
298 static int __sev_issue_cmd(int fd, int id, void *data, int *error)
307 ret = sev_issue_cmd_external_user(f.file, id, data, error);
313 static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
315 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
317 return __sev_issue_cmd(sev->fd, id, data, error);
320 static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
322 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
323 struct sev_data_launch_start start;
324 struct kvm_sev_launch_start params;
325 void *dh_blob, *session_blob;
326 int *error = &argp->error;
332 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
335 memset(&start, 0, sizeof(start));
338 if (params.dh_uaddr) {
339 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
341 return PTR_ERR(dh_blob);
343 start.dh_cert_address = __sme_set(__pa(dh_blob));
344 start.dh_cert_len = params.dh_len;
348 if (params.session_uaddr) {
349 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
350 if (IS_ERR(session_blob)) {
351 ret = PTR_ERR(session_blob);
355 start.session_address = __sme_set(__pa(session_blob));
356 start.session_len = params.session_len;
359 start.handle = params.handle;
360 start.policy = params.policy;
362 /* create memory encryption context */
363 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, &start, error);
367 /* Bind ASID to this guest */
368 ret = sev_bind_asid(kvm, start.handle, error);
370 sev_decommission(start.handle);
374 /* return handle to userspace */
375 params.handle = start.handle;
376 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params))) {
377 sev_unbind_asid(kvm, start.handle);
382 sev->handle = start.handle;
383 sev->fd = argp->sev_fd;
392 static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
393 unsigned long ulen, unsigned long *n,
396 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
397 unsigned long npages, size;
399 unsigned long locked, lock_limit;
401 unsigned long first, last;
404 lockdep_assert_held(&kvm->lock);
406 if (ulen == 0 || uaddr + ulen < uaddr)
407 return ERR_PTR(-EINVAL);
409 /* Calculate number of pages. */
410 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
411 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
412 npages = (last - first + 1);
414 locked = sev->pages_locked + npages;
415 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
416 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
417 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
418 return ERR_PTR(-ENOMEM);
421 if (WARN_ON_ONCE(npages > INT_MAX))
422 return ERR_PTR(-EINVAL);
424 /* Avoid using vmalloc for smaller buffers. */
425 size = npages * sizeof(struct page *);
426 if (size > PAGE_SIZE)
427 pages = __vmalloc(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO);
429 pages = kmalloc(size, GFP_KERNEL_ACCOUNT);
432 return ERR_PTR(-ENOMEM);
434 /* Pin the user virtual address. */
435 npinned = pin_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
436 if (npinned != npages) {
437 pr_err("SEV: Failure locking %lu pages.\n", npages);
443 sev->pages_locked = locked;
449 unpin_user_pages(pages, npinned);
455 static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
456 unsigned long npages)
458 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
460 unpin_user_pages(pages, npages);
462 sev->pages_locked -= npages;
465 static void sev_clflush_pages(struct page *pages[], unsigned long npages)
467 uint8_t *page_virtual;
470 if (this_cpu_has(X86_FEATURE_SME_COHERENT) || npages == 0 ||
474 for (i = 0; i < npages; i++) {
475 page_virtual = kmap_atomic(pages[i]);
476 clflush_cache_range(page_virtual, PAGE_SIZE);
477 kunmap_atomic(page_virtual);
482 static unsigned long get_num_contig_pages(unsigned long idx,
483 struct page **inpages, unsigned long npages)
485 unsigned long paddr, next_paddr;
486 unsigned long i = idx + 1, pages = 1;
488 /* find the number of contiguous pages starting from idx */
489 paddr = __sme_page_pa(inpages[idx]);
491 next_paddr = __sme_page_pa(inpages[i++]);
492 if ((paddr + PAGE_SIZE) == next_paddr) {
503 static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
505 unsigned long vaddr, vaddr_end, next_vaddr, npages, pages, size, i;
506 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
507 struct kvm_sev_launch_update_data params;
508 struct sev_data_launch_update_data data;
509 struct page **inpages;
515 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
518 vaddr = params.uaddr;
520 vaddr_end = vaddr + size;
522 /* Lock the user memory. */
523 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
525 return PTR_ERR(inpages);
528 * Flush (on non-coherent CPUs) before LAUNCH_UPDATE encrypts pages in
529 * place; the cache may contain the data that was written unencrypted.
531 sev_clflush_pages(inpages, npages);
534 data.handle = sev->handle;
536 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
540 * If the user buffer is not page-aligned, calculate the offset
543 offset = vaddr & (PAGE_SIZE - 1);
545 /* Calculate the number of pages that can be encrypted in one go. */
546 pages = get_num_contig_pages(i, inpages, npages);
548 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
551 data.address = __sme_page_pa(inpages[i]) + offset;
552 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, &data, &argp->error);
557 next_vaddr = vaddr + len;
561 /* content of memory is updated, mark pages dirty */
562 for (i = 0; i < npages; i++) {
563 set_page_dirty_lock(inpages[i]);
564 mark_page_accessed(inpages[i]);
566 /* unlock the user pages */
567 sev_unpin_memory(kvm, inpages, npages);
571 static int sev_es_sync_vmsa(struct vcpu_svm *svm)
573 struct sev_es_save_area *save = svm->sev_es.vmsa;
575 /* Check some debug related fields before encrypting the VMSA */
576 if (svm->vcpu.guest_debug || (svm->vmcb->save.dr7 & ~DR7_FIXED_1))
580 * SEV-ES will use a VMSA that is pointed to by the VMCB, not
581 * the traditional VMSA that is part of the VMCB. Copy the
582 * traditional VMSA as it has been built so far (in prep
583 * for LAUNCH_UPDATE_VMSA) to be the initial SEV-ES state.
585 memcpy(save, &svm->vmcb->save, sizeof(svm->vmcb->save));
587 /* Sync registgers */
588 save->rax = svm->vcpu.arch.regs[VCPU_REGS_RAX];
589 save->rbx = svm->vcpu.arch.regs[VCPU_REGS_RBX];
590 save->rcx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
591 save->rdx = svm->vcpu.arch.regs[VCPU_REGS_RDX];
592 save->rsp = svm->vcpu.arch.regs[VCPU_REGS_RSP];
593 save->rbp = svm->vcpu.arch.regs[VCPU_REGS_RBP];
594 save->rsi = svm->vcpu.arch.regs[VCPU_REGS_RSI];
595 save->rdi = svm->vcpu.arch.regs[VCPU_REGS_RDI];
597 save->r8 = svm->vcpu.arch.regs[VCPU_REGS_R8];
598 save->r9 = svm->vcpu.arch.regs[VCPU_REGS_R9];
599 save->r10 = svm->vcpu.arch.regs[VCPU_REGS_R10];
600 save->r11 = svm->vcpu.arch.regs[VCPU_REGS_R11];
601 save->r12 = svm->vcpu.arch.regs[VCPU_REGS_R12];
602 save->r13 = svm->vcpu.arch.regs[VCPU_REGS_R13];
603 save->r14 = svm->vcpu.arch.regs[VCPU_REGS_R14];
604 save->r15 = svm->vcpu.arch.regs[VCPU_REGS_R15];
606 save->rip = svm->vcpu.arch.regs[VCPU_REGS_RIP];
608 /* Sync some non-GPR registers before encrypting */
609 save->xcr0 = svm->vcpu.arch.xcr0;
610 save->pkru = svm->vcpu.arch.pkru;
611 save->xss = svm->vcpu.arch.ia32_xss;
612 save->dr6 = svm->vcpu.arch.dr6;
614 pr_debug("Virtual Machine Save Area (VMSA):\n");
615 print_hex_dump_debug("", DUMP_PREFIX_NONE, 16, 1, save, sizeof(*save), false);
620 static int __sev_launch_update_vmsa(struct kvm *kvm, struct kvm_vcpu *vcpu,
623 struct sev_data_launch_update_vmsa vmsa;
624 struct vcpu_svm *svm = to_svm(vcpu);
627 /* Perform some pre-encryption checks against the VMSA */
628 ret = sev_es_sync_vmsa(svm);
633 * The LAUNCH_UPDATE_VMSA command will perform in-place encryption of
634 * the VMSA memory content (i.e it will write the same memory region
635 * with the guest's key), so invalidate it first.
637 clflush_cache_range(svm->sev_es.vmsa, PAGE_SIZE);
640 vmsa.handle = to_kvm_svm(kvm)->sev_info.handle;
641 vmsa.address = __sme_pa(svm->sev_es.vmsa);
642 vmsa.len = PAGE_SIZE;
643 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_VMSA, &vmsa, error);
647 vcpu->arch.guest_state_protected = true;
651 static int sev_launch_update_vmsa(struct kvm *kvm, struct kvm_sev_cmd *argp)
653 struct kvm_vcpu *vcpu;
657 if (!sev_es_guest(kvm))
660 kvm_for_each_vcpu(i, vcpu, kvm) {
661 ret = mutex_lock_killable(&vcpu->mutex);
665 ret = __sev_launch_update_vmsa(kvm, vcpu, &argp->error);
667 mutex_unlock(&vcpu->mutex);
675 static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
677 void __user *measure = (void __user *)(uintptr_t)argp->data;
678 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
679 struct sev_data_launch_measure data;
680 struct kvm_sev_launch_measure params;
681 void __user *p = NULL;
688 if (copy_from_user(¶ms, measure, sizeof(params)))
691 memset(&data, 0, sizeof(data));
693 /* User wants to query the blob length */
697 p = (void __user *)(uintptr_t)params.uaddr;
699 if (params.len > SEV_FW_BLOB_MAX_SIZE)
702 blob = kzalloc(params.len, GFP_KERNEL_ACCOUNT);
706 data.address = __psp_pa(blob);
707 data.len = params.len;
711 data.handle = sev->handle;
712 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, &data, &argp->error);
715 * If we query the session length, FW responded with expected data.
724 if (copy_to_user(p, blob, params.len))
729 params.len = data.len;
730 if (copy_to_user(measure, ¶ms, sizeof(params)))
737 static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
739 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
740 struct sev_data_launch_finish data;
745 data.handle = sev->handle;
746 return sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, &data, &argp->error);
749 static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
751 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
752 struct kvm_sev_guest_status params;
753 struct sev_data_guest_status data;
759 memset(&data, 0, sizeof(data));
761 data.handle = sev->handle;
762 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, &data, &argp->error);
766 params.policy = data.policy;
767 params.state = data.state;
768 params.handle = data.handle;
770 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms, sizeof(params)))
776 static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
777 unsigned long dst, int size,
778 int *error, bool enc)
780 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
781 struct sev_data_dbg data;
784 data.handle = sev->handle;
789 return sev_issue_cmd(kvm,
790 enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
794 static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
795 unsigned long dst_paddr, int sz, int *err)
800 * Its safe to read more than we are asked, caller should ensure that
801 * destination has enough space.
803 offset = src_paddr & 15;
804 src_paddr = round_down(src_paddr, 16);
805 sz = round_up(sz + offset, 16);
807 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
810 static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
811 void __user *dst_uaddr,
812 unsigned long dst_paddr,
815 struct page *tpage = NULL;
818 /* if inputs are not 16-byte then use intermediate buffer */
819 if (!IS_ALIGNED(dst_paddr, 16) ||
820 !IS_ALIGNED(paddr, 16) ||
821 !IS_ALIGNED(size, 16)) {
822 tpage = (void *)alloc_page(GFP_KERNEL | __GFP_ZERO);
826 dst_paddr = __sme_page_pa(tpage);
829 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
835 if (copy_to_user(dst_uaddr, page_address(tpage) + offset, size))
846 static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
848 unsigned long dst_paddr,
849 void __user *dst_vaddr,
850 int size, int *error)
852 struct page *src_tpage = NULL;
853 struct page *dst_tpage = NULL;
856 /* If source buffer is not aligned then use an intermediate buffer */
857 if (!IS_ALIGNED((unsigned long)vaddr, 16)) {
858 src_tpage = alloc_page(GFP_KERNEL_ACCOUNT);
862 if (copy_from_user(page_address(src_tpage), vaddr, size)) {
863 __free_page(src_tpage);
867 paddr = __sme_page_pa(src_tpage);
871 * If destination buffer or length is not aligned then do read-modify-write:
872 * - decrypt destination in an intermediate buffer
873 * - copy the source buffer in an intermediate buffer
874 * - use the intermediate buffer as source buffer
876 if (!IS_ALIGNED((unsigned long)dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
879 dst_tpage = alloc_page(GFP_KERNEL_ACCOUNT);
885 ret = __sev_dbg_decrypt(kvm, dst_paddr,
886 __sme_page_pa(dst_tpage), size, error);
891 * If source is kernel buffer then use memcpy() otherwise
894 dst_offset = dst_paddr & 15;
897 memcpy(page_address(dst_tpage) + dst_offset,
898 page_address(src_tpage), size);
900 if (copy_from_user(page_address(dst_tpage) + dst_offset,
907 paddr = __sme_page_pa(dst_tpage);
908 dst_paddr = round_down(dst_paddr, 16);
909 len = round_up(size, 16);
912 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
916 __free_page(src_tpage);
918 __free_page(dst_tpage);
922 static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
924 unsigned long vaddr, vaddr_end, next_vaddr;
925 unsigned long dst_vaddr;
926 struct page **src_p, **dst_p;
927 struct kvm_sev_dbg debug;
935 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
938 if (!debug.len || debug.src_uaddr + debug.len < debug.src_uaddr)
940 if (!debug.dst_uaddr)
943 vaddr = debug.src_uaddr;
945 vaddr_end = vaddr + size;
946 dst_vaddr = debug.dst_uaddr;
948 for (; vaddr < vaddr_end; vaddr = next_vaddr) {
949 int len, s_off, d_off;
951 /* lock userspace source and destination page */
952 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
954 return PTR_ERR(src_p);
956 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
958 sev_unpin_memory(kvm, src_p, n);
959 return PTR_ERR(dst_p);
963 * Flush (on non-coherent CPUs) before DBG_{DE,EN}CRYPT read or modify
964 * the pages; flush the destination too so that future accesses do not
967 sev_clflush_pages(src_p, 1);
968 sev_clflush_pages(dst_p, 1);
971 * Since user buffer may not be page aligned, calculate the
972 * offset within the page.
974 s_off = vaddr & ~PAGE_MASK;
975 d_off = dst_vaddr & ~PAGE_MASK;
976 len = min_t(size_t, (PAGE_SIZE - s_off), size);
979 ret = __sev_dbg_decrypt_user(kvm,
980 __sme_page_pa(src_p[0]) + s_off,
981 (void __user *)dst_vaddr,
982 __sme_page_pa(dst_p[0]) + d_off,
985 ret = __sev_dbg_encrypt_user(kvm,
986 __sme_page_pa(src_p[0]) + s_off,
987 (void __user *)vaddr,
988 __sme_page_pa(dst_p[0]) + d_off,
989 (void __user *)dst_vaddr,
992 sev_unpin_memory(kvm, src_p, n);
993 sev_unpin_memory(kvm, dst_p, n);
998 next_vaddr = vaddr + len;
999 dst_vaddr = dst_vaddr + len;
1006 static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
1008 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1009 struct sev_data_launch_secret data;
1010 struct kvm_sev_launch_secret params;
1011 struct page **pages;
1016 if (!sev_guest(kvm))
1019 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
1022 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
1024 return PTR_ERR(pages);
1027 * Flush (on non-coherent CPUs) before LAUNCH_SECRET encrypts pages in
1028 * place; the cache may contain the data that was written unencrypted.
1030 sev_clflush_pages(pages, n);
1033 * The secret must be copied into contiguous memory region, lets verify
1034 * that userspace memory pages are contiguous before we issue command.
1036 if (get_num_contig_pages(0, pages, n) != n) {
1038 goto e_unpin_memory;
1041 memset(&data, 0, sizeof(data));
1043 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1044 data.guest_address = __sme_page_pa(pages[0]) + offset;
1045 data.guest_len = params.guest_len;
1047 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1049 ret = PTR_ERR(blob);
1050 goto e_unpin_memory;
1053 data.trans_address = __psp_pa(blob);
1054 data.trans_len = params.trans_len;
1056 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1061 data.hdr_address = __psp_pa(hdr);
1062 data.hdr_len = params.hdr_len;
1064 data.handle = sev->handle;
1065 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, &data, &argp->error);
1072 /* content of memory is updated, mark pages dirty */
1073 for (i = 0; i < n; i++) {
1074 set_page_dirty_lock(pages[i]);
1075 mark_page_accessed(pages[i]);
1077 sev_unpin_memory(kvm, pages, n);
1081 static int sev_get_attestation_report(struct kvm *kvm, struct kvm_sev_cmd *argp)
1083 void __user *report = (void __user *)(uintptr_t)argp->data;
1084 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1085 struct sev_data_attestation_report data;
1086 struct kvm_sev_attestation_report params;
1091 if (!sev_guest(kvm))
1094 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data, sizeof(params)))
1097 memset(&data, 0, sizeof(data));
1099 /* User wants to query the blob length */
1103 p = (void __user *)(uintptr_t)params.uaddr;
1105 if (params.len > SEV_FW_BLOB_MAX_SIZE)
1108 blob = kzalloc(params.len, GFP_KERNEL_ACCOUNT);
1112 data.address = __psp_pa(blob);
1113 data.len = params.len;
1114 memcpy(data.mnonce, params.mnonce, sizeof(params.mnonce));
1117 data.handle = sev->handle;
1118 ret = sev_issue_cmd(kvm, SEV_CMD_ATTESTATION_REPORT, &data, &argp->error);
1120 * If we query the session length, FW responded with expected data.
1129 if (copy_to_user(p, blob, params.len))
1134 params.len = data.len;
1135 if (copy_to_user(report, ¶ms, sizeof(params)))
1142 /* Userspace wants to query session length. */
1144 __sev_send_start_query_session_length(struct kvm *kvm, struct kvm_sev_cmd *argp,
1145 struct kvm_sev_send_start *params)
1147 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1148 struct sev_data_send_start data;
1151 memset(&data, 0, sizeof(data));
1152 data.handle = sev->handle;
1153 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
1155 params->session_len = data.session_len;
1156 if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
1157 sizeof(struct kvm_sev_send_start)))
1163 static int sev_send_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1165 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1166 struct sev_data_send_start data;
1167 struct kvm_sev_send_start params;
1168 void *amd_certs, *session_data;
1169 void *pdh_cert, *plat_certs;
1172 if (!sev_guest(kvm))
1175 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1176 sizeof(struct kvm_sev_send_start)))
1179 /* if session_len is zero, userspace wants to query the session length */
1180 if (!params.session_len)
1181 return __sev_send_start_query_session_length(kvm, argp,
1184 /* some sanity checks */
1185 if (!params.pdh_cert_uaddr || !params.pdh_cert_len ||
1186 !params.session_uaddr || params.session_len > SEV_FW_BLOB_MAX_SIZE)
1189 /* allocate the memory to hold the session data blob */
1190 session_data = kzalloc(params.session_len, GFP_KERNEL_ACCOUNT);
1194 /* copy the certificate blobs from userspace */
1195 pdh_cert = psp_copy_user_blob(params.pdh_cert_uaddr,
1196 params.pdh_cert_len);
1197 if (IS_ERR(pdh_cert)) {
1198 ret = PTR_ERR(pdh_cert);
1199 goto e_free_session;
1202 plat_certs = psp_copy_user_blob(params.plat_certs_uaddr,
1203 params.plat_certs_len);
1204 if (IS_ERR(plat_certs)) {
1205 ret = PTR_ERR(plat_certs);
1209 amd_certs = psp_copy_user_blob(params.amd_certs_uaddr,
1210 params.amd_certs_len);
1211 if (IS_ERR(amd_certs)) {
1212 ret = PTR_ERR(amd_certs);
1213 goto e_free_plat_cert;
1216 /* populate the FW SEND_START field with system physical address */
1217 memset(&data, 0, sizeof(data));
1218 data.pdh_cert_address = __psp_pa(pdh_cert);
1219 data.pdh_cert_len = params.pdh_cert_len;
1220 data.plat_certs_address = __psp_pa(plat_certs);
1221 data.plat_certs_len = params.plat_certs_len;
1222 data.amd_certs_address = __psp_pa(amd_certs);
1223 data.amd_certs_len = params.amd_certs_len;
1224 data.session_address = __psp_pa(session_data);
1225 data.session_len = params.session_len;
1226 data.handle = sev->handle;
1228 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_START, &data, &argp->error);
1230 if (!ret && copy_to_user((void __user *)(uintptr_t)params.session_uaddr,
1231 session_data, params.session_len)) {
1233 goto e_free_amd_cert;
1236 params.policy = data.policy;
1237 params.session_len = data.session_len;
1238 if (copy_to_user((void __user *)(uintptr_t)argp->data, ¶ms,
1239 sizeof(struct kvm_sev_send_start)))
1249 kfree(session_data);
1253 /* Userspace wants to query either header or trans length. */
1255 __sev_send_update_data_query_lengths(struct kvm *kvm, struct kvm_sev_cmd *argp,
1256 struct kvm_sev_send_update_data *params)
1258 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1259 struct sev_data_send_update_data data;
1262 memset(&data, 0, sizeof(data));
1263 data.handle = sev->handle;
1264 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
1266 params->hdr_len = data.hdr_len;
1267 params->trans_len = data.trans_len;
1269 if (copy_to_user((void __user *)(uintptr_t)argp->data, params,
1270 sizeof(struct kvm_sev_send_update_data)))
1276 static int sev_send_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1278 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1279 struct sev_data_send_update_data data;
1280 struct kvm_sev_send_update_data params;
1281 void *hdr, *trans_data;
1282 struct page **guest_page;
1286 if (!sev_guest(kvm))
1289 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1290 sizeof(struct kvm_sev_send_update_data)))
1293 /* userspace wants to query either header or trans length */
1294 if (!params.trans_len || !params.hdr_len)
1295 return __sev_send_update_data_query_lengths(kvm, argp, ¶ms);
1297 if (!params.trans_uaddr || !params.guest_uaddr ||
1298 !params.guest_len || !params.hdr_uaddr)
1301 /* Check if we are crossing the page boundary */
1302 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1303 if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE)
1306 /* Pin guest memory */
1307 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1309 if (IS_ERR(guest_page))
1310 return PTR_ERR(guest_page);
1312 /* allocate memory for header and transport buffer */
1314 hdr = kzalloc(params.hdr_len, GFP_KERNEL_ACCOUNT);
1318 trans_data = kzalloc(params.trans_len, GFP_KERNEL_ACCOUNT);
1322 memset(&data, 0, sizeof(data));
1323 data.hdr_address = __psp_pa(hdr);
1324 data.hdr_len = params.hdr_len;
1325 data.trans_address = __psp_pa(trans_data);
1326 data.trans_len = params.trans_len;
1328 /* The SEND_UPDATE_DATA command requires C-bit to be always set. */
1329 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1330 data.guest_address |= sev_me_mask;
1331 data.guest_len = params.guest_len;
1332 data.handle = sev->handle;
1334 ret = sev_issue_cmd(kvm, SEV_CMD_SEND_UPDATE_DATA, &data, &argp->error);
1337 goto e_free_trans_data;
1339 /* copy transport buffer to user space */
1340 if (copy_to_user((void __user *)(uintptr_t)params.trans_uaddr,
1341 trans_data, params.trans_len)) {
1343 goto e_free_trans_data;
1346 /* Copy packet header to userspace. */
1347 if (copy_to_user((void __user *)(uintptr_t)params.hdr_uaddr, hdr,
1356 sev_unpin_memory(kvm, guest_page, n);
1361 static int sev_send_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1363 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1364 struct sev_data_send_finish data;
1366 if (!sev_guest(kvm))
1369 data.handle = sev->handle;
1370 return sev_issue_cmd(kvm, SEV_CMD_SEND_FINISH, &data, &argp->error);
1373 static int sev_send_cancel(struct kvm *kvm, struct kvm_sev_cmd *argp)
1375 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1376 struct sev_data_send_cancel data;
1378 if (!sev_guest(kvm))
1381 data.handle = sev->handle;
1382 return sev_issue_cmd(kvm, SEV_CMD_SEND_CANCEL, &data, &argp->error);
1385 static int sev_receive_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
1387 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1388 struct sev_data_receive_start start;
1389 struct kvm_sev_receive_start params;
1390 int *error = &argp->error;
1395 if (!sev_guest(kvm))
1398 /* Get parameter from the userspace */
1399 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1400 sizeof(struct kvm_sev_receive_start)))
1403 /* some sanity checks */
1404 if (!params.pdh_uaddr || !params.pdh_len ||
1405 !params.session_uaddr || !params.session_len)
1408 pdh_data = psp_copy_user_blob(params.pdh_uaddr, params.pdh_len);
1409 if (IS_ERR(pdh_data))
1410 return PTR_ERR(pdh_data);
1412 session_data = psp_copy_user_blob(params.session_uaddr,
1413 params.session_len);
1414 if (IS_ERR(session_data)) {
1415 ret = PTR_ERR(session_data);
1419 memset(&start, 0, sizeof(start));
1420 start.handle = params.handle;
1421 start.policy = params.policy;
1422 start.pdh_cert_address = __psp_pa(pdh_data);
1423 start.pdh_cert_len = params.pdh_len;
1424 start.session_address = __psp_pa(session_data);
1425 start.session_len = params.session_len;
1427 /* create memory encryption context */
1428 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_RECEIVE_START, &start,
1431 goto e_free_session;
1433 /* Bind ASID to this guest */
1434 ret = sev_bind_asid(kvm, start.handle, error);
1436 sev_decommission(start.handle);
1437 goto e_free_session;
1440 params.handle = start.handle;
1441 if (copy_to_user((void __user *)(uintptr_t)argp->data,
1442 ¶ms, sizeof(struct kvm_sev_receive_start))) {
1444 sev_unbind_asid(kvm, start.handle);
1445 goto e_free_session;
1448 sev->handle = start.handle;
1449 sev->fd = argp->sev_fd;
1452 kfree(session_data);
1459 static int sev_receive_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
1461 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1462 struct kvm_sev_receive_update_data params;
1463 struct sev_data_receive_update_data data;
1464 void *hdr = NULL, *trans = NULL;
1465 struct page **guest_page;
1469 if (!sev_guest(kvm))
1472 if (copy_from_user(¶ms, (void __user *)(uintptr_t)argp->data,
1473 sizeof(struct kvm_sev_receive_update_data)))
1476 if (!params.hdr_uaddr || !params.hdr_len ||
1477 !params.guest_uaddr || !params.guest_len ||
1478 !params.trans_uaddr || !params.trans_len)
1481 /* Check if we are crossing the page boundary */
1482 offset = params.guest_uaddr & (PAGE_SIZE - 1);
1483 if (params.guest_len > PAGE_SIZE || (params.guest_len + offset) > PAGE_SIZE)
1486 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
1488 return PTR_ERR(hdr);
1490 trans = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
1491 if (IS_ERR(trans)) {
1492 ret = PTR_ERR(trans);
1496 memset(&data, 0, sizeof(data));
1497 data.hdr_address = __psp_pa(hdr);
1498 data.hdr_len = params.hdr_len;
1499 data.trans_address = __psp_pa(trans);
1500 data.trans_len = params.trans_len;
1502 /* Pin guest memory */
1503 guest_page = sev_pin_memory(kvm, params.guest_uaddr & PAGE_MASK,
1505 if (IS_ERR(guest_page)) {
1506 ret = PTR_ERR(guest_page);
1511 * Flush (on non-coherent CPUs) before RECEIVE_UPDATE_DATA, the PSP
1512 * encrypts the written data with the guest's key, and the cache may
1513 * contain dirty, unencrypted data.
1515 sev_clflush_pages(guest_page, n);
1517 /* The RECEIVE_UPDATE_DATA command requires C-bit to be always set. */
1518 data.guest_address = (page_to_pfn(guest_page[0]) << PAGE_SHIFT) + offset;
1519 data.guest_address |= sev_me_mask;
1520 data.guest_len = params.guest_len;
1521 data.handle = sev->handle;
1523 ret = sev_issue_cmd(kvm, SEV_CMD_RECEIVE_UPDATE_DATA, &data,
1526 sev_unpin_memory(kvm, guest_page, n);
1536 static int sev_receive_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
1538 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1539 struct sev_data_receive_finish data;
1541 if (!sev_guest(kvm))
1544 data.handle = sev->handle;
1545 return sev_issue_cmd(kvm, SEV_CMD_RECEIVE_FINISH, &data, &argp->error);
1548 static bool is_cmd_allowed_from_mirror(u32 cmd_id)
1551 * Allow mirrors VM to call KVM_SEV_LAUNCH_UPDATE_VMSA to enable SEV-ES
1552 * active mirror VMs. Also allow the debugging and status commands.
1554 if (cmd_id == KVM_SEV_LAUNCH_UPDATE_VMSA ||
1555 cmd_id == KVM_SEV_GUEST_STATUS || cmd_id == KVM_SEV_DBG_DECRYPT ||
1556 cmd_id == KVM_SEV_DBG_ENCRYPT)
1562 static int sev_lock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
1564 struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
1565 struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
1568 if (dst_kvm == src_kvm)
1572 * Bail if these VMs are already involved in a migration to avoid
1573 * deadlock between two VMs trying to migrate to/from each other.
1575 if (atomic_cmpxchg_acquire(&dst_sev->migration_in_progress, 0, 1))
1578 if (atomic_cmpxchg_acquire(&src_sev->migration_in_progress, 0, 1))
1582 if (mutex_lock_killable(&dst_kvm->lock))
1584 if (mutex_lock_killable_nested(&src_kvm->lock, SINGLE_DEPTH_NESTING))
1589 mutex_unlock(&dst_kvm->lock);
1591 atomic_set_release(&src_sev->migration_in_progress, 0);
1593 atomic_set_release(&dst_sev->migration_in_progress, 0);
1597 static void sev_unlock_two_vms(struct kvm *dst_kvm, struct kvm *src_kvm)
1599 struct kvm_sev_info *dst_sev = &to_kvm_svm(dst_kvm)->sev_info;
1600 struct kvm_sev_info *src_sev = &to_kvm_svm(src_kvm)->sev_info;
1602 mutex_unlock(&dst_kvm->lock);
1603 mutex_unlock(&src_kvm->lock);
1604 atomic_set_release(&dst_sev->migration_in_progress, 0);
1605 atomic_set_release(&src_sev->migration_in_progress, 0);
1608 /* vCPU mutex subclasses. */
1609 enum sev_migration_role {
1610 SEV_MIGRATION_SOURCE = 0,
1611 SEV_MIGRATION_TARGET,
1612 SEV_NR_MIGRATION_ROLES,
1615 static int sev_lock_vcpus_for_migration(struct kvm *kvm,
1616 enum sev_migration_role role)
1618 struct kvm_vcpu *vcpu;
1621 kvm_for_each_vcpu(i, vcpu, kvm) {
1622 if (mutex_lock_killable_nested(&vcpu->mutex, role))
1625 #ifdef CONFIG_PROVE_LOCKING
1628 * Reset the role to one that avoids colliding with
1629 * the role used for the first vcpu mutex.
1631 role = SEV_NR_MIGRATION_ROLES;
1633 mutex_release(&vcpu->mutex.dep_map, _THIS_IP_);
1641 kvm_for_each_vcpu(j, vcpu, kvm) {
1645 #ifdef CONFIG_PROVE_LOCKING
1647 mutex_acquire(&vcpu->mutex.dep_map, role, 0, _THIS_IP_);
1650 mutex_unlock(&vcpu->mutex);
1655 static void sev_unlock_vcpus_for_migration(struct kvm *kvm)
1657 struct kvm_vcpu *vcpu;
1661 kvm_for_each_vcpu(i, vcpu, kvm) {
1665 mutex_acquire(&vcpu->mutex.dep_map,
1666 SEV_NR_MIGRATION_ROLES, 0, _THIS_IP_);
1668 mutex_unlock(&vcpu->mutex);
1672 static void sev_migrate_from(struct kvm *dst_kvm, struct kvm *src_kvm)
1674 struct kvm_sev_info *dst = &to_kvm_svm(dst_kvm)->sev_info;
1675 struct kvm_sev_info *src = &to_kvm_svm(src_kvm)->sev_info;
1676 struct kvm_vcpu *dst_vcpu, *src_vcpu;
1677 struct vcpu_svm *dst_svm, *src_svm;
1678 struct kvm_sev_info *mirror;
1682 dst->asid = src->asid;
1683 dst->handle = src->handle;
1684 dst->pages_locked = src->pages_locked;
1685 dst->enc_context_owner = src->enc_context_owner;
1686 dst->es_active = src->es_active;
1689 src->active = false;
1691 src->pages_locked = 0;
1692 src->enc_context_owner = NULL;
1693 src->es_active = false;
1695 list_cut_before(&dst->regions_list, &src->regions_list, &src->regions_list);
1698 * If this VM has mirrors, "transfer" each mirror's refcount of the
1699 * source to the destination (this KVM). The caller holds a reference
1700 * to the source, so there's no danger of use-after-free.
1702 list_cut_before(&dst->mirror_vms, &src->mirror_vms, &src->mirror_vms);
1703 list_for_each_entry(mirror, &dst->mirror_vms, mirror_entry) {
1704 kvm_get_kvm(dst_kvm);
1705 kvm_put_kvm(src_kvm);
1706 mirror->enc_context_owner = dst_kvm;
1710 * If this VM is a mirror, remove the old mirror from the owners list
1711 * and add the new mirror to the list.
1713 if (is_mirroring_enc_context(dst_kvm)) {
1714 struct kvm_sev_info *owner_sev_info =
1715 &to_kvm_svm(dst->enc_context_owner)->sev_info;
1717 list_del(&src->mirror_entry);
1718 list_add_tail(&dst->mirror_entry, &owner_sev_info->mirror_vms);
1721 kvm_for_each_vcpu(i, dst_vcpu, dst_kvm) {
1722 dst_svm = to_svm(dst_vcpu);
1724 sev_init_vmcb(dst_svm);
1726 if (!dst->es_active)
1730 * Note, the source is not required to have the same number of
1731 * vCPUs as the destination when migrating a vanilla SEV VM.
1733 src_vcpu = kvm_get_vcpu(src_kvm, i);
1734 src_svm = to_svm(src_vcpu);
1737 * Transfer VMSA and GHCB state to the destination. Nullify and
1738 * clear source fields as appropriate, the state now belongs to
1741 memcpy(&dst_svm->sev_es, &src_svm->sev_es, sizeof(src_svm->sev_es));
1742 dst_svm->vmcb->control.ghcb_gpa = src_svm->vmcb->control.ghcb_gpa;
1743 dst_svm->vmcb->control.vmsa_pa = src_svm->vmcb->control.vmsa_pa;
1744 dst_vcpu->arch.guest_state_protected = true;
1746 memset(&src_svm->sev_es, 0, sizeof(src_svm->sev_es));
1747 src_svm->vmcb->control.ghcb_gpa = INVALID_PAGE;
1748 src_svm->vmcb->control.vmsa_pa = INVALID_PAGE;
1749 src_vcpu->arch.guest_state_protected = false;
1753 static int sev_check_source_vcpus(struct kvm *dst, struct kvm *src)
1755 struct kvm_vcpu *src_vcpu;
1758 if (!sev_es_guest(src))
1761 if (atomic_read(&src->online_vcpus) != atomic_read(&dst->online_vcpus))
1764 kvm_for_each_vcpu(i, src_vcpu, src) {
1765 if (!src_vcpu->arch.guest_state_protected)
1772 int sev_vm_move_enc_context_from(struct kvm *kvm, unsigned int source_fd)
1774 struct kvm_sev_info *dst_sev = &to_kvm_svm(kvm)->sev_info;
1775 struct kvm_sev_info *src_sev, *cg_cleanup_sev;
1776 struct file *source_kvm_file;
1777 struct kvm *source_kvm;
1778 bool charged = false;
1781 source_kvm_file = fget(source_fd);
1782 if (!file_is_kvm(source_kvm_file)) {
1787 source_kvm = source_kvm_file->private_data;
1788 ret = sev_lock_two_vms(kvm, source_kvm);
1792 if (sev_guest(kvm) || !sev_guest(source_kvm)) {
1797 src_sev = &to_kvm_svm(source_kvm)->sev_info;
1799 dst_sev->misc_cg = get_current_misc_cg();
1800 cg_cleanup_sev = dst_sev;
1801 if (dst_sev->misc_cg != src_sev->misc_cg) {
1802 ret = sev_misc_cg_try_charge(dst_sev);
1804 goto out_dst_cgroup;
1808 ret = sev_lock_vcpus_for_migration(kvm, SEV_MIGRATION_SOURCE);
1810 goto out_dst_cgroup;
1811 ret = sev_lock_vcpus_for_migration(source_kvm, SEV_MIGRATION_TARGET);
1815 ret = sev_check_source_vcpus(kvm, source_kvm);
1817 goto out_source_vcpu;
1819 sev_migrate_from(kvm, source_kvm);
1820 kvm_vm_dead(source_kvm);
1821 cg_cleanup_sev = src_sev;
1825 sev_unlock_vcpus_for_migration(source_kvm);
1827 sev_unlock_vcpus_for_migration(kvm);
1829 /* Operates on the source on success, on the destination on failure. */
1831 sev_misc_cg_uncharge(cg_cleanup_sev);
1832 put_misc_cg(cg_cleanup_sev->misc_cg);
1833 cg_cleanup_sev->misc_cg = NULL;
1835 sev_unlock_two_vms(kvm, source_kvm);
1837 if (source_kvm_file)
1838 fput(source_kvm_file);
1842 int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
1844 struct kvm_sev_cmd sev_cmd;
1853 if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
1856 mutex_lock(&kvm->lock);
1858 /* Only the enc_context_owner handles some memory enc operations. */
1859 if (is_mirroring_enc_context(kvm) &&
1860 !is_cmd_allowed_from_mirror(sev_cmd.id)) {
1865 switch (sev_cmd.id) {
1866 case KVM_SEV_ES_INIT:
1867 if (!sev_es_enabled) {
1873 r = sev_guest_init(kvm, &sev_cmd);
1875 case KVM_SEV_LAUNCH_START:
1876 r = sev_launch_start(kvm, &sev_cmd);
1878 case KVM_SEV_LAUNCH_UPDATE_DATA:
1879 r = sev_launch_update_data(kvm, &sev_cmd);
1881 case KVM_SEV_LAUNCH_UPDATE_VMSA:
1882 r = sev_launch_update_vmsa(kvm, &sev_cmd);
1884 case KVM_SEV_LAUNCH_MEASURE:
1885 r = sev_launch_measure(kvm, &sev_cmd);
1887 case KVM_SEV_LAUNCH_FINISH:
1888 r = sev_launch_finish(kvm, &sev_cmd);
1890 case KVM_SEV_GUEST_STATUS:
1891 r = sev_guest_status(kvm, &sev_cmd);
1893 case KVM_SEV_DBG_DECRYPT:
1894 r = sev_dbg_crypt(kvm, &sev_cmd, true);
1896 case KVM_SEV_DBG_ENCRYPT:
1897 r = sev_dbg_crypt(kvm, &sev_cmd, false);
1899 case KVM_SEV_LAUNCH_SECRET:
1900 r = sev_launch_secret(kvm, &sev_cmd);
1902 case KVM_SEV_GET_ATTESTATION_REPORT:
1903 r = sev_get_attestation_report(kvm, &sev_cmd);
1905 case KVM_SEV_SEND_START:
1906 r = sev_send_start(kvm, &sev_cmd);
1908 case KVM_SEV_SEND_UPDATE_DATA:
1909 r = sev_send_update_data(kvm, &sev_cmd);
1911 case KVM_SEV_SEND_FINISH:
1912 r = sev_send_finish(kvm, &sev_cmd);
1914 case KVM_SEV_SEND_CANCEL:
1915 r = sev_send_cancel(kvm, &sev_cmd);
1917 case KVM_SEV_RECEIVE_START:
1918 r = sev_receive_start(kvm, &sev_cmd);
1920 case KVM_SEV_RECEIVE_UPDATE_DATA:
1921 r = sev_receive_update_data(kvm, &sev_cmd);
1923 case KVM_SEV_RECEIVE_FINISH:
1924 r = sev_receive_finish(kvm, &sev_cmd);
1931 if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
1935 mutex_unlock(&kvm->lock);
1939 int sev_mem_enc_register_region(struct kvm *kvm,
1940 struct kvm_enc_region *range)
1942 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1943 struct enc_region *region;
1946 if (!sev_guest(kvm))
1949 /* If kvm is mirroring encryption context it isn't responsible for it */
1950 if (is_mirroring_enc_context(kvm))
1953 if (range->addr > ULONG_MAX || range->size > ULONG_MAX)
1956 region = kzalloc(sizeof(*region), GFP_KERNEL_ACCOUNT);
1960 mutex_lock(&kvm->lock);
1961 region->pages = sev_pin_memory(kvm, range->addr, range->size, ®ion->npages, 1);
1962 if (IS_ERR(region->pages)) {
1963 ret = PTR_ERR(region->pages);
1964 mutex_unlock(&kvm->lock);
1969 * The guest may change the memory encryption attribute from C=0 -> C=1
1970 * or vice versa for this memory range. Lets make sure caches are
1971 * flushed to ensure that guest data gets written into memory with
1972 * correct C-bit. Note, this must be done before dropping kvm->lock,
1973 * as region and its array of pages can be freed by a different task
1974 * once kvm->lock is released.
1976 sev_clflush_pages(region->pages, region->npages);
1978 region->uaddr = range->addr;
1979 region->size = range->size;
1981 list_add_tail(®ion->list, &sev->regions_list);
1982 mutex_unlock(&kvm->lock);
1991 static struct enc_region *
1992 find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
1994 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
1995 struct list_head *head = &sev->regions_list;
1996 struct enc_region *i;
1998 list_for_each_entry(i, head, list) {
1999 if (i->uaddr == range->addr &&
2000 i->size == range->size)
2007 static void __unregister_enc_region_locked(struct kvm *kvm,
2008 struct enc_region *region)
2010 sev_unpin_memory(kvm, region->pages, region->npages);
2011 list_del(®ion->list);
2015 int sev_mem_enc_unregister_region(struct kvm *kvm,
2016 struct kvm_enc_region *range)
2018 struct enc_region *region;
2021 /* If kvm is mirroring encryption context it isn't responsible for it */
2022 if (is_mirroring_enc_context(kvm))
2025 mutex_lock(&kvm->lock);
2027 if (!sev_guest(kvm)) {
2032 region = find_enc_region(kvm, range);
2039 * Ensure that all guest tagged cache entries are flushed before
2040 * releasing the pages back to the system for use. CLFLUSH will
2041 * not do this, so issue a WBINVD.
2043 wbinvd_on_all_cpus();
2045 __unregister_enc_region_locked(kvm, region);
2047 mutex_unlock(&kvm->lock);
2051 mutex_unlock(&kvm->lock);
2055 int sev_vm_copy_enc_context_from(struct kvm *kvm, unsigned int source_fd)
2057 struct file *source_kvm_file;
2058 struct kvm *source_kvm;
2059 struct kvm_sev_info *source_sev, *mirror_sev;
2062 source_kvm_file = fget(source_fd);
2063 if (!file_is_kvm(source_kvm_file)) {
2068 source_kvm = source_kvm_file->private_data;
2069 ret = sev_lock_two_vms(kvm, source_kvm);
2074 * Mirrors of mirrors should work, but let's not get silly. Also
2075 * disallow out-of-band SEV/SEV-ES init if the target is already an
2076 * SEV guest, or if vCPUs have been created. KVM relies on vCPUs being
2077 * created after SEV/SEV-ES initialization, e.g. to init intercepts.
2079 if (sev_guest(kvm) || !sev_guest(source_kvm) ||
2080 is_mirroring_enc_context(source_kvm) || kvm->created_vcpus) {
2086 * The mirror kvm holds an enc_context_owner ref so its asid can't
2087 * disappear until we're done with it
2089 source_sev = &to_kvm_svm(source_kvm)->sev_info;
2090 kvm_get_kvm(source_kvm);
2091 mirror_sev = &to_kvm_svm(kvm)->sev_info;
2092 list_add_tail(&mirror_sev->mirror_entry, &source_sev->mirror_vms);
2094 /* Set enc_context_owner and copy its encryption context over */
2095 mirror_sev->enc_context_owner = source_kvm;
2096 mirror_sev->active = true;
2097 mirror_sev->asid = source_sev->asid;
2098 mirror_sev->fd = source_sev->fd;
2099 mirror_sev->es_active = source_sev->es_active;
2100 mirror_sev->handle = source_sev->handle;
2101 INIT_LIST_HEAD(&mirror_sev->regions_list);
2102 INIT_LIST_HEAD(&mirror_sev->mirror_vms);
2106 * Do not copy ap_jump_table. Since the mirror does not share the same
2107 * KVM contexts as the original, and they may have different
2112 sev_unlock_two_vms(kvm, source_kvm);
2114 if (source_kvm_file)
2115 fput(source_kvm_file);
2119 void sev_vm_destroy(struct kvm *kvm)
2121 struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
2122 struct list_head *head = &sev->regions_list;
2123 struct list_head *pos, *q;
2125 if (!sev_guest(kvm))
2128 WARN_ON(!list_empty(&sev->mirror_vms));
2130 /* If this is a mirror_kvm release the enc_context_owner and skip sev cleanup */
2131 if (is_mirroring_enc_context(kvm)) {
2132 struct kvm *owner_kvm = sev->enc_context_owner;
2134 mutex_lock(&owner_kvm->lock);
2135 list_del(&sev->mirror_entry);
2136 mutex_unlock(&owner_kvm->lock);
2137 kvm_put_kvm(owner_kvm);
2142 * Ensure that all guest tagged cache entries are flushed before
2143 * releasing the pages back to the system for use. CLFLUSH will
2144 * not do this, so issue a WBINVD.
2146 wbinvd_on_all_cpus();
2149 * if userspace was terminated before unregistering the memory regions
2150 * then lets unpin all the registered memory.
2152 if (!list_empty(head)) {
2153 list_for_each_safe(pos, q, head) {
2154 __unregister_enc_region_locked(kvm,
2155 list_entry(pos, struct enc_region, list));
2160 sev_unbind_asid(kvm, sev->handle);
2164 void __init sev_set_cpu_caps(void)
2167 kvm_cpu_cap_clear(X86_FEATURE_SEV);
2168 if (!sev_es_enabled)
2169 kvm_cpu_cap_clear(X86_FEATURE_SEV_ES);
2172 void __init sev_hardware_setup(void)
2174 #ifdef CONFIG_KVM_AMD_SEV
2175 unsigned int eax, ebx, ecx, edx, sev_asid_count, sev_es_asid_count;
2176 bool sev_es_supported = false;
2177 bool sev_supported = false;
2179 if (!sev_enabled || !npt_enabled)
2183 * SEV must obviously be supported in hardware. Sanity check that the
2184 * CPU supports decode assists, which is mandatory for SEV guests to
2185 * support instruction emulation.
2187 if (!boot_cpu_has(X86_FEATURE_SEV) ||
2188 WARN_ON_ONCE(!boot_cpu_has(X86_FEATURE_DECODEASSISTS)))
2191 /* Retrieve SEV CPUID information */
2192 cpuid(0x8000001f, &eax, &ebx, &ecx, &edx);
2194 /* Set encryption bit location for SEV-ES guests */
2195 sev_enc_bit = ebx & 0x3f;
2197 /* Maximum number of encrypted guests supported simultaneously */
2202 /* Minimum ASID value that should be used for SEV guest */
2204 sev_me_mask = 1UL << (ebx & 0x3f);
2207 * Initialize SEV ASID bitmaps. Allocate space for ASID 0 in the bitmap,
2208 * even though it's never used, so that the bitmap is indexed by the
2211 nr_asids = max_sev_asid + 1;
2212 sev_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
2213 if (!sev_asid_bitmap)
2216 sev_reclaim_asid_bitmap = bitmap_zalloc(nr_asids, GFP_KERNEL);
2217 if (!sev_reclaim_asid_bitmap) {
2218 bitmap_free(sev_asid_bitmap);
2219 sev_asid_bitmap = NULL;
2223 if (min_sev_asid <= max_sev_asid) {
2224 sev_asid_count = max_sev_asid - min_sev_asid + 1;
2225 WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV, sev_asid_count));
2227 sev_supported = true;
2229 /* SEV-ES support requested? */
2230 if (!sev_es_enabled)
2234 * SEV-ES requires MMIO caching as KVM doesn't have access to the guest
2235 * instruction stream, i.e. can't emulate in response to a #NPF and
2236 * instead relies on #NPF(RSVD) being reflected into the guest as #VC
2237 * (the guest can then do a #VMGEXIT to request MMIO emulation).
2239 if (!enable_mmio_caching)
2242 /* Does the CPU support SEV-ES? */
2243 if (!boot_cpu_has(X86_FEATURE_SEV_ES))
2246 /* Has the system been allocated ASIDs for SEV-ES? */
2247 if (min_sev_asid == 1)
2250 sev_es_asid_count = min_sev_asid - 1;
2251 WARN_ON_ONCE(misc_cg_set_capacity(MISC_CG_RES_SEV_ES, sev_es_asid_count));
2252 sev_es_supported = true;
2255 if (boot_cpu_has(X86_FEATURE_SEV))
2256 pr_info("SEV %s (ASIDs %u - %u)\n",
2257 sev_supported ? min_sev_asid <= max_sev_asid ? "enabled" :
2260 min_sev_asid, max_sev_asid);
2261 if (boot_cpu_has(X86_FEATURE_SEV_ES))
2262 pr_info("SEV-ES %s (ASIDs %u - %u)\n",
2263 sev_es_supported ? "enabled" : "disabled",
2264 min_sev_asid > 1 ? 1 : 0, min_sev_asid - 1);
2266 sev_enabled = sev_supported;
2267 sev_es_enabled = sev_es_supported;
2271 void sev_hardware_unsetup(void)
2276 /* No need to take sev_bitmap_lock, all VMs have been destroyed. */
2277 sev_flush_asids(1, max_sev_asid);
2279 bitmap_free(sev_asid_bitmap);
2280 bitmap_free(sev_reclaim_asid_bitmap);
2282 misc_cg_set_capacity(MISC_CG_RES_SEV, 0);
2283 misc_cg_set_capacity(MISC_CG_RES_SEV_ES, 0);
2286 int sev_cpu_init(struct svm_cpu_data *sd)
2291 sd->sev_vmcbs = kcalloc(nr_asids, sizeof(void *), GFP_KERNEL);
2299 * Pages used by hardware to hold guest encrypted state must be flushed before
2300 * returning them to the system.
2302 static void sev_flush_encrypted_page(struct kvm_vcpu *vcpu, void *va)
2304 unsigned int asid = sev_get_asid(vcpu->kvm);
2307 * Note! The address must be a kernel address, as regular page walk
2308 * checks are performed by VM_PAGE_FLUSH, i.e. operating on a user
2309 * address is non-deterministic and unsafe. This function deliberately
2310 * takes a pointer to deter passing in a user address.
2312 unsigned long addr = (unsigned long)va;
2315 * If CPU enforced cache coherency for encrypted mappings of the
2316 * same physical page is supported, use CLFLUSHOPT instead. NOTE: cache
2317 * flush is still needed in order to work properly with DMA devices.
2319 if (boot_cpu_has(X86_FEATURE_SME_COHERENT)) {
2320 clflush_cache_range(va, PAGE_SIZE);
2325 * VM Page Flush takes a host virtual address and a guest ASID. Fall
2326 * back to WBINVD if this faults so as not to make any problems worse
2327 * by leaving stale encrypted data in the cache.
2329 if (WARN_ON_ONCE(wrmsrl_safe(MSR_AMD64_VM_PAGE_FLUSH, addr | asid)))
2335 wbinvd_on_all_cpus();
2338 void sev_guest_memory_reclaimed(struct kvm *kvm)
2340 if (!sev_guest(kvm))
2343 wbinvd_on_all_cpus();
2346 void sev_free_vcpu(struct kvm_vcpu *vcpu)
2348 struct vcpu_svm *svm;
2350 if (!sev_es_guest(vcpu->kvm))
2355 if (vcpu->arch.guest_state_protected)
2356 sev_flush_encrypted_page(vcpu, svm->sev_es.vmsa);
2358 __free_page(virt_to_page(svm->sev_es.vmsa));
2360 if (svm->sev_es.ghcb_sa_free)
2361 kvfree(svm->sev_es.ghcb_sa);
2364 static void dump_ghcb(struct vcpu_svm *svm)
2366 struct ghcb *ghcb = svm->sev_es.ghcb;
2369 /* Re-use the dump_invalid_vmcb module parameter */
2370 if (!dump_invalid_vmcb) {
2371 pr_warn_ratelimited("set kvm_amd.dump_invalid_vmcb=1 to dump internal KVM state.\n");
2375 nbits = sizeof(ghcb->save.valid_bitmap) * 8;
2377 pr_err("GHCB (GPA=%016llx):\n", svm->vmcb->control.ghcb_gpa);
2378 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_code",
2379 ghcb->save.sw_exit_code, ghcb_sw_exit_code_is_valid(ghcb));
2380 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_1",
2381 ghcb->save.sw_exit_info_1, ghcb_sw_exit_info_1_is_valid(ghcb));
2382 pr_err("%-20s%016llx is_valid: %u\n", "sw_exit_info_2",
2383 ghcb->save.sw_exit_info_2, ghcb_sw_exit_info_2_is_valid(ghcb));
2384 pr_err("%-20s%016llx is_valid: %u\n", "sw_scratch",
2385 ghcb->save.sw_scratch, ghcb_sw_scratch_is_valid(ghcb));
2386 pr_err("%-20s%*pb\n", "valid_bitmap", nbits, ghcb->save.valid_bitmap);
2389 static void sev_es_sync_to_ghcb(struct vcpu_svm *svm)
2391 struct kvm_vcpu *vcpu = &svm->vcpu;
2392 struct ghcb *ghcb = svm->sev_es.ghcb;
2395 * The GHCB protocol so far allows for the following data
2397 * GPRs RAX, RBX, RCX, RDX
2399 * Copy their values, even if they may not have been written during the
2400 * VM-Exit. It's the guest's responsibility to not consume random data.
2402 ghcb_set_rax(ghcb, vcpu->arch.regs[VCPU_REGS_RAX]);
2403 ghcb_set_rbx(ghcb, vcpu->arch.regs[VCPU_REGS_RBX]);
2404 ghcb_set_rcx(ghcb, vcpu->arch.regs[VCPU_REGS_RCX]);
2405 ghcb_set_rdx(ghcb, vcpu->arch.regs[VCPU_REGS_RDX]);
2408 static void sev_es_sync_from_ghcb(struct vcpu_svm *svm)
2410 struct vmcb_control_area *control = &svm->vmcb->control;
2411 struct kvm_vcpu *vcpu = &svm->vcpu;
2412 struct ghcb *ghcb = svm->sev_es.ghcb;
2416 * The GHCB protocol so far allows for the following data
2418 * GPRs RAX, RBX, RCX, RDX
2422 * VMMCALL allows the guest to provide extra registers. KVM also
2423 * expects RSI for hypercalls, so include that, too.
2425 * Copy their values to the appropriate location if supplied.
2427 memset(vcpu->arch.regs, 0, sizeof(vcpu->arch.regs));
2429 BUILD_BUG_ON(sizeof(svm->sev_es.valid_bitmap) != sizeof(ghcb->save.valid_bitmap));
2430 memcpy(&svm->sev_es.valid_bitmap, &ghcb->save.valid_bitmap, sizeof(ghcb->save.valid_bitmap));
2432 vcpu->arch.regs[VCPU_REGS_RAX] = kvm_ghcb_get_rax_if_valid(svm, ghcb);
2433 vcpu->arch.regs[VCPU_REGS_RBX] = kvm_ghcb_get_rbx_if_valid(svm, ghcb);
2434 vcpu->arch.regs[VCPU_REGS_RCX] = kvm_ghcb_get_rcx_if_valid(svm, ghcb);
2435 vcpu->arch.regs[VCPU_REGS_RDX] = kvm_ghcb_get_rdx_if_valid(svm, ghcb);
2436 vcpu->arch.regs[VCPU_REGS_RSI] = kvm_ghcb_get_rsi_if_valid(svm, ghcb);
2438 svm->vmcb->save.cpl = kvm_ghcb_get_cpl_if_valid(svm, ghcb);
2440 if (kvm_ghcb_xcr0_is_valid(svm)) {
2441 vcpu->arch.xcr0 = ghcb_get_xcr0(ghcb);
2442 kvm_update_cpuid_runtime(vcpu);
2445 /* Copy the GHCB exit information into the VMCB fields */
2446 exit_code = ghcb_get_sw_exit_code(ghcb);
2447 control->exit_code = lower_32_bits(exit_code);
2448 control->exit_code_hi = upper_32_bits(exit_code);
2449 control->exit_info_1 = ghcb_get_sw_exit_info_1(ghcb);
2450 control->exit_info_2 = ghcb_get_sw_exit_info_2(ghcb);
2451 svm->sev_es.sw_scratch = kvm_ghcb_get_sw_scratch_if_valid(svm, ghcb);
2453 /* Clear the valid entries fields */
2454 memset(ghcb->save.valid_bitmap, 0, sizeof(ghcb->save.valid_bitmap));
2457 static u64 kvm_ghcb_get_sw_exit_code(struct vmcb_control_area *control)
2459 return (((u64)control->exit_code_hi) << 32) | control->exit_code;
2462 static int sev_es_validate_vmgexit(struct vcpu_svm *svm)
2464 struct vmcb_control_area *control = &svm->vmcb->control;
2465 struct kvm_vcpu *vcpu = &svm->vcpu;
2470 ghcb = svm->sev_es.ghcb;
2473 * Retrieve the exit code now even though it may not be marked valid
2474 * as it could help with debugging.
2476 exit_code = kvm_ghcb_get_sw_exit_code(control);
2478 /* Only GHCB Usage code 0 is supported */
2479 if (ghcb->ghcb_usage) {
2480 reason = GHCB_ERR_INVALID_USAGE;
2484 reason = GHCB_ERR_MISSING_INPUT;
2486 if (!kvm_ghcb_sw_exit_code_is_valid(svm) ||
2487 !kvm_ghcb_sw_exit_info_1_is_valid(svm) ||
2488 !kvm_ghcb_sw_exit_info_2_is_valid(svm))
2491 switch (exit_code) {
2492 case SVM_EXIT_READ_DR7:
2494 case SVM_EXIT_WRITE_DR7:
2495 if (!kvm_ghcb_rax_is_valid(svm))
2498 case SVM_EXIT_RDTSC:
2500 case SVM_EXIT_RDPMC:
2501 if (!kvm_ghcb_rcx_is_valid(svm))
2504 case SVM_EXIT_CPUID:
2505 if (!kvm_ghcb_rax_is_valid(svm) ||
2506 !kvm_ghcb_rcx_is_valid(svm))
2508 if (vcpu->arch.regs[VCPU_REGS_RAX] == 0xd)
2509 if (!kvm_ghcb_xcr0_is_valid(svm))
2515 if (control->exit_info_1 & SVM_IOIO_STR_MASK) {
2516 if (!kvm_ghcb_sw_scratch_is_valid(svm))
2519 if (!(control->exit_info_1 & SVM_IOIO_TYPE_MASK))
2520 if (!kvm_ghcb_rax_is_valid(svm))
2525 if (!kvm_ghcb_rcx_is_valid(svm))
2527 if (control->exit_info_1) {
2528 if (!kvm_ghcb_rax_is_valid(svm) ||
2529 !kvm_ghcb_rdx_is_valid(svm))
2533 case SVM_EXIT_VMMCALL:
2534 if (!kvm_ghcb_rax_is_valid(svm) ||
2535 !kvm_ghcb_cpl_is_valid(svm))
2538 case SVM_EXIT_RDTSCP:
2540 case SVM_EXIT_WBINVD:
2542 case SVM_EXIT_MONITOR:
2543 if (!kvm_ghcb_rax_is_valid(svm) ||
2544 !kvm_ghcb_rcx_is_valid(svm) ||
2545 !kvm_ghcb_rdx_is_valid(svm))
2548 case SVM_EXIT_MWAIT:
2549 if (!kvm_ghcb_rax_is_valid(svm) ||
2550 !kvm_ghcb_rcx_is_valid(svm))
2553 case SVM_VMGEXIT_MMIO_READ:
2554 case SVM_VMGEXIT_MMIO_WRITE:
2555 if (!kvm_ghcb_sw_scratch_is_valid(svm))
2558 case SVM_VMGEXIT_NMI_COMPLETE:
2559 case SVM_VMGEXIT_AP_HLT_LOOP:
2560 case SVM_VMGEXIT_AP_JUMP_TABLE:
2561 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
2564 reason = GHCB_ERR_INVALID_EVENT;
2571 if (reason == GHCB_ERR_INVALID_USAGE) {
2572 vcpu_unimpl(vcpu, "vmgexit: ghcb usage %#x is not valid\n",
2574 } else if (reason == GHCB_ERR_INVALID_EVENT) {
2575 vcpu_unimpl(vcpu, "vmgexit: exit code %#llx is not valid\n",
2578 vcpu_unimpl(vcpu, "vmgexit: exit code %#llx input is not valid\n",
2583 ghcb_set_sw_exit_info_1(ghcb, 2);
2584 ghcb_set_sw_exit_info_2(ghcb, reason);
2586 /* Resume the guest to "return" the error code. */
2590 void sev_es_unmap_ghcb(struct vcpu_svm *svm)
2592 if (!svm->sev_es.ghcb)
2595 if (svm->sev_es.ghcb_sa_free) {
2597 * The scratch area lives outside the GHCB, so there is a
2598 * buffer that, depending on the operation performed, may
2599 * need to be synced, then freed.
2601 if (svm->sev_es.ghcb_sa_sync) {
2602 kvm_write_guest(svm->vcpu.kvm,
2603 svm->sev_es.sw_scratch,
2604 svm->sev_es.ghcb_sa,
2605 svm->sev_es.ghcb_sa_len);
2606 svm->sev_es.ghcb_sa_sync = false;
2609 kvfree(svm->sev_es.ghcb_sa);
2610 svm->sev_es.ghcb_sa = NULL;
2611 svm->sev_es.ghcb_sa_free = false;
2614 trace_kvm_vmgexit_exit(svm->vcpu.vcpu_id, svm->sev_es.ghcb);
2616 sev_es_sync_to_ghcb(svm);
2618 kvm_vcpu_unmap(&svm->vcpu, &svm->sev_es.ghcb_map, true);
2619 svm->sev_es.ghcb = NULL;
2622 void pre_sev_run(struct vcpu_svm *svm, int cpu)
2624 struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
2625 unsigned int asid = sev_get_asid(svm->vcpu.kvm);
2627 /* Assign the asid allocated with this SEV guest */
2633 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
2634 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
2636 if (sd->sev_vmcbs[asid] == svm->vmcb &&
2637 svm->vcpu.arch.last_vmentry_cpu == cpu)
2640 sd->sev_vmcbs[asid] = svm->vmcb;
2641 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
2642 vmcb_mark_dirty(svm->vmcb, VMCB_ASID);
2645 #define GHCB_SCRATCH_AREA_LIMIT (16ULL * PAGE_SIZE)
2646 static int setup_vmgexit_scratch(struct vcpu_svm *svm, bool sync, u64 len)
2648 struct vmcb_control_area *control = &svm->vmcb->control;
2649 struct ghcb *ghcb = svm->sev_es.ghcb;
2650 u64 ghcb_scratch_beg, ghcb_scratch_end;
2651 u64 scratch_gpa_beg, scratch_gpa_end;
2654 scratch_gpa_beg = svm->sev_es.sw_scratch;
2655 if (!scratch_gpa_beg) {
2656 pr_err("vmgexit: scratch gpa not provided\n");
2660 scratch_gpa_end = scratch_gpa_beg + len;
2661 if (scratch_gpa_end < scratch_gpa_beg) {
2662 pr_err("vmgexit: scratch length (%#llx) not valid for scratch address (%#llx)\n",
2663 len, scratch_gpa_beg);
2667 if ((scratch_gpa_beg & PAGE_MASK) == control->ghcb_gpa) {
2668 /* Scratch area begins within GHCB */
2669 ghcb_scratch_beg = control->ghcb_gpa +
2670 offsetof(struct ghcb, shared_buffer);
2671 ghcb_scratch_end = control->ghcb_gpa +
2672 offsetof(struct ghcb, reserved_1);
2675 * If the scratch area begins within the GHCB, it must be
2676 * completely contained in the GHCB shared buffer area.
2678 if (scratch_gpa_beg < ghcb_scratch_beg ||
2679 scratch_gpa_end > ghcb_scratch_end) {
2680 pr_err("vmgexit: scratch area is outside of GHCB shared buffer area (%#llx - %#llx)\n",
2681 scratch_gpa_beg, scratch_gpa_end);
2685 scratch_va = (void *)svm->sev_es.ghcb;
2686 scratch_va += (scratch_gpa_beg - control->ghcb_gpa);
2689 * The guest memory must be read into a kernel buffer, so
2692 if (len > GHCB_SCRATCH_AREA_LIMIT) {
2693 pr_err("vmgexit: scratch area exceeds KVM limits (%#llx requested, %#llx limit)\n",
2694 len, GHCB_SCRATCH_AREA_LIMIT);
2697 scratch_va = kvzalloc(len, GFP_KERNEL_ACCOUNT);
2701 if (kvm_read_guest(svm->vcpu.kvm, scratch_gpa_beg, scratch_va, len)) {
2702 /* Unable to copy scratch area from guest */
2703 pr_err("vmgexit: kvm_read_guest for scratch area failed\n");
2710 * The scratch area is outside the GHCB. The operation will
2711 * dictate whether the buffer needs to be synced before running
2712 * the vCPU next time (i.e. a read was requested so the data
2713 * must be written back to the guest memory).
2715 svm->sev_es.ghcb_sa_sync = sync;
2716 svm->sev_es.ghcb_sa_free = true;
2719 svm->sev_es.ghcb_sa = scratch_va;
2720 svm->sev_es.ghcb_sa_len = len;
2725 ghcb_set_sw_exit_info_1(ghcb, 2);
2726 ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_SCRATCH_AREA);
2731 static void set_ghcb_msr_bits(struct vcpu_svm *svm, u64 value, u64 mask,
2734 svm->vmcb->control.ghcb_gpa &= ~(mask << pos);
2735 svm->vmcb->control.ghcb_gpa |= (value & mask) << pos;
2738 static u64 get_ghcb_msr_bits(struct vcpu_svm *svm, u64 mask, unsigned int pos)
2740 return (svm->vmcb->control.ghcb_gpa >> pos) & mask;
2743 static void set_ghcb_msr(struct vcpu_svm *svm, u64 value)
2745 svm->vmcb->control.ghcb_gpa = value;
2748 static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)
2750 struct vmcb_control_area *control = &svm->vmcb->control;
2751 struct kvm_vcpu *vcpu = &svm->vcpu;
2755 ghcb_info = control->ghcb_gpa & GHCB_MSR_INFO_MASK;
2757 trace_kvm_vmgexit_msr_protocol_enter(svm->vcpu.vcpu_id,
2760 switch (ghcb_info) {
2761 case GHCB_MSR_SEV_INFO_REQ:
2762 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
2766 case GHCB_MSR_CPUID_REQ: {
2767 u64 cpuid_fn, cpuid_reg, cpuid_value;
2769 cpuid_fn = get_ghcb_msr_bits(svm,
2770 GHCB_MSR_CPUID_FUNC_MASK,
2771 GHCB_MSR_CPUID_FUNC_POS);
2773 /* Initialize the registers needed by the CPUID intercept */
2774 vcpu->arch.regs[VCPU_REGS_RAX] = cpuid_fn;
2775 vcpu->arch.regs[VCPU_REGS_RCX] = 0;
2777 ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_CPUID);
2779 /* Error, keep GHCB MSR value as-is */
2783 cpuid_reg = get_ghcb_msr_bits(svm,
2784 GHCB_MSR_CPUID_REG_MASK,
2785 GHCB_MSR_CPUID_REG_POS);
2787 cpuid_value = vcpu->arch.regs[VCPU_REGS_RAX];
2788 else if (cpuid_reg == 1)
2789 cpuid_value = vcpu->arch.regs[VCPU_REGS_RBX];
2790 else if (cpuid_reg == 2)
2791 cpuid_value = vcpu->arch.regs[VCPU_REGS_RCX];
2793 cpuid_value = vcpu->arch.regs[VCPU_REGS_RDX];
2795 set_ghcb_msr_bits(svm, cpuid_value,
2796 GHCB_MSR_CPUID_VALUE_MASK,
2797 GHCB_MSR_CPUID_VALUE_POS);
2799 set_ghcb_msr_bits(svm, GHCB_MSR_CPUID_RESP,
2804 case GHCB_MSR_TERM_REQ: {
2805 u64 reason_set, reason_code;
2807 reason_set = get_ghcb_msr_bits(svm,
2808 GHCB_MSR_TERM_REASON_SET_MASK,
2809 GHCB_MSR_TERM_REASON_SET_POS);
2810 reason_code = get_ghcb_msr_bits(svm,
2811 GHCB_MSR_TERM_REASON_MASK,
2812 GHCB_MSR_TERM_REASON_POS);
2813 pr_info("SEV-ES guest requested termination: %#llx:%#llx\n",
2814 reason_set, reason_code);
2816 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
2817 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SEV_TERM;
2818 vcpu->run->system_event.ndata = 1;
2819 vcpu->run->system_event.data[0] = control->ghcb_gpa;
2824 /* Error, keep GHCB MSR value as-is */
2828 trace_kvm_vmgexit_msr_protocol_exit(svm->vcpu.vcpu_id,
2829 control->ghcb_gpa, ret);
2834 int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
2836 struct vcpu_svm *svm = to_svm(vcpu);
2837 struct vmcb_control_area *control = &svm->vmcb->control;
2838 u64 ghcb_gpa, exit_code;
2842 /* Validate the GHCB */
2843 ghcb_gpa = control->ghcb_gpa;
2844 if (ghcb_gpa & GHCB_MSR_INFO_MASK)
2845 return sev_handle_vmgexit_msr_protocol(svm);
2848 vcpu_unimpl(vcpu, "vmgexit: GHCB gpa is not set\n");
2850 /* Without a GHCB, just return right back to the guest */
2854 if (kvm_vcpu_map(vcpu, ghcb_gpa >> PAGE_SHIFT, &svm->sev_es.ghcb_map)) {
2855 /* Unable to map GHCB from guest */
2856 vcpu_unimpl(vcpu, "vmgexit: error mapping GHCB [%#llx] from guest\n",
2859 /* Without a GHCB, just return right back to the guest */
2863 svm->sev_es.ghcb = svm->sev_es.ghcb_map.hva;
2864 ghcb = svm->sev_es.ghcb_map.hva;
2866 trace_kvm_vmgexit_enter(vcpu->vcpu_id, ghcb);
2868 sev_es_sync_from_ghcb(svm);
2869 ret = sev_es_validate_vmgexit(svm);
2873 ghcb_set_sw_exit_info_1(ghcb, 0);
2874 ghcb_set_sw_exit_info_2(ghcb, 0);
2876 exit_code = kvm_ghcb_get_sw_exit_code(control);
2877 switch (exit_code) {
2878 case SVM_VMGEXIT_MMIO_READ:
2879 ret = setup_vmgexit_scratch(svm, true, control->exit_info_2);
2883 ret = kvm_sev_es_mmio_read(vcpu,
2884 control->exit_info_1,
2885 control->exit_info_2,
2886 svm->sev_es.ghcb_sa);
2888 case SVM_VMGEXIT_MMIO_WRITE:
2889 ret = setup_vmgexit_scratch(svm, false, control->exit_info_2);
2893 ret = kvm_sev_es_mmio_write(vcpu,
2894 control->exit_info_1,
2895 control->exit_info_2,
2896 svm->sev_es.ghcb_sa);
2898 case SVM_VMGEXIT_NMI_COMPLETE:
2899 ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
2901 case SVM_VMGEXIT_AP_HLT_LOOP:
2902 ret = kvm_emulate_ap_reset_hold(vcpu);
2904 case SVM_VMGEXIT_AP_JUMP_TABLE: {
2905 struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
2907 switch (control->exit_info_1) {
2909 /* Set AP jump table address */
2910 sev->ap_jump_table = control->exit_info_2;
2913 /* Get AP jump table address */
2914 ghcb_set_sw_exit_info_2(ghcb, sev->ap_jump_table);
2917 pr_err("svm: vmgexit: unsupported AP jump table request - exit_info_1=%#llx\n",
2918 control->exit_info_1);
2919 ghcb_set_sw_exit_info_1(ghcb, 2);
2920 ghcb_set_sw_exit_info_2(ghcb, GHCB_ERR_INVALID_INPUT);
2926 case SVM_VMGEXIT_UNSUPPORTED_EVENT:
2928 "vmgexit: unsupported event - exit_info_1=%#llx, exit_info_2=%#llx\n",
2929 control->exit_info_1, control->exit_info_2);
2933 ret = svm_invoke_exit_handler(vcpu, exit_code);
2939 int sev_es_string_io(struct vcpu_svm *svm, int size, unsigned int port, int in)
2945 if (svm->vmcb->control.exit_info_2 > INT_MAX)
2948 count = svm->vmcb->control.exit_info_2;
2949 if (unlikely(check_mul_overflow(count, size, &bytes)))
2952 r = setup_vmgexit_scratch(svm, in, bytes);
2956 return kvm_sev_es_string_io(&svm->vcpu, size, port, svm->sev_es.ghcb_sa,
2960 static void sev_es_vcpu_after_set_cpuid(struct vcpu_svm *svm)
2962 struct kvm_vcpu *vcpu = &svm->vcpu;
2964 if (boot_cpu_has(X86_FEATURE_V_TSC_AUX)) {
2965 bool v_tsc_aux = guest_cpuid_has(vcpu, X86_FEATURE_RDTSCP) ||
2966 guest_cpuid_has(vcpu, X86_FEATURE_RDPID);
2968 set_msr_interception(vcpu, svm->msrpm, MSR_TSC_AUX, v_tsc_aux, v_tsc_aux);
2972 void sev_vcpu_after_set_cpuid(struct vcpu_svm *svm)
2974 struct kvm_vcpu *vcpu = &svm->vcpu;
2975 struct kvm_cpuid_entry2 *best;
2977 /* For sev guests, the memory encryption bit is not reserved in CR3. */
2978 best = kvm_find_cpuid_entry(vcpu, 0x8000001F);
2980 vcpu->arch.reserved_gpa_bits &= ~(1UL << (best->ebx & 0x3f));
2982 if (sev_es_guest(svm->vcpu.kvm))
2983 sev_es_vcpu_after_set_cpuid(svm);
2986 static void sev_es_init_vmcb(struct vcpu_svm *svm)
2988 struct kvm_vcpu *vcpu = &svm->vcpu;
2990 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ES_ENABLE;
2991 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
2994 * An SEV-ES guest requires a VMSA area that is a separate from the
2995 * VMCB page. Do not include the encryption mask on the VMSA physical
2996 * address since hardware will access it using the guest key. Note,
2997 * the VMSA will be NULL if this vCPU is the destination for intrahost
2998 * migration, and will be copied later.
3000 if (svm->sev_es.vmsa)
3001 svm->vmcb->control.vmsa_pa = __pa(svm->sev_es.vmsa);
3003 /* Can't intercept CR register access, HV can't modify CR registers */
3004 svm_clr_intercept(svm, INTERCEPT_CR0_READ);
3005 svm_clr_intercept(svm, INTERCEPT_CR4_READ);
3006 svm_clr_intercept(svm, INTERCEPT_CR8_READ);
3007 svm_clr_intercept(svm, INTERCEPT_CR0_WRITE);
3008 svm_clr_intercept(svm, INTERCEPT_CR4_WRITE);
3009 svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
3011 svm_clr_intercept(svm, INTERCEPT_SELECTIVE_CR0);
3013 /* Track EFER/CR register changes */
3014 svm_set_intercept(svm, TRAP_EFER_WRITE);
3015 svm_set_intercept(svm, TRAP_CR0_WRITE);
3016 svm_set_intercept(svm, TRAP_CR4_WRITE);
3017 svm_set_intercept(svm, TRAP_CR8_WRITE);
3019 /* No support for enable_vmware_backdoor */
3020 clr_exception_intercept(svm, GP_VECTOR);
3022 /* Can't intercept XSETBV, HV can't modify XCR0 directly */
3023 svm_clr_intercept(svm, INTERCEPT_XSETBV);
3025 /* Clear intercepts on selected MSRs */
3026 set_msr_interception(vcpu, svm->msrpm, MSR_EFER, 1, 1);
3027 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_CR_PAT, 1, 1);
3028 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
3029 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
3030 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
3031 set_msr_interception(vcpu, svm->msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
3034 void sev_init_vmcb(struct vcpu_svm *svm)
3036 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
3037 clr_exception_intercept(svm, UD_VECTOR);
3039 if (sev_es_guest(svm->vcpu.kvm))
3040 sev_es_init_vmcb(svm);
3043 void sev_es_vcpu_reset(struct vcpu_svm *svm)
3046 * Set the GHCB MSR value as per the GHCB specification when emulating
3047 * vCPU RESET for an SEV-ES guest.
3049 set_ghcb_msr(svm, GHCB_MSR_SEV_INFO(GHCB_VERSION_MAX,
3054 void sev_es_prepare_switch_to_guest(struct sev_es_save_area *hostsa)
3057 * As an SEV-ES guest, hardware will restore the host state on VMEXIT,
3058 * of which one step is to perform a VMLOAD. KVM performs the
3059 * corresponding VMSAVE in svm_prepare_guest_switch for both
3060 * traditional and SEV-ES guests.
3063 /* XCR0 is restored on VMEXIT, save the current host value */
3064 hostsa->xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
3066 /* PKRU is restored on VMEXIT, save the current host value */
3067 hostsa->pkru = read_pkru();
3069 /* MSR_IA32_XSS is restored on VMEXIT, save the currnet host value */
3070 hostsa->xss = host_xss;
3073 void sev_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, u8 vector)
3075 struct vcpu_svm *svm = to_svm(vcpu);
3077 /* First SIPI: Use the values as initially set by the VMM */
3078 if (!svm->sev_es.received_first_sipi) {
3079 svm->sev_es.received_first_sipi = true;
3084 * Subsequent SIPI: Return from an AP Reset Hold VMGEXIT, where
3085 * the guest will set the CS and RIP. Set SW_EXIT_INFO_2 to a
3088 if (!svm->sev_es.ghcb)
3091 ghcb_set_sw_exit_info_2(svm->sev_es.ghcb, 1);