1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/objtool.h>
4 #include <linux/percpu.h>
6 #include <asm/debugreg.h>
7 #include <asm/mmu_context.h>
20 static bool __read_mostly enable_shadow_vmcs = 1;
21 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
23 static bool __read_mostly nested_early_check = 0;
24 module_param(nested_early_check, bool, S_IRUGO);
26 #define CC KVM_NESTED_VMENTER_CONSISTENCY_CHECK
29 * Hyper-V requires all of these, so mark them as supported even though
30 * they are just treated the same as all-context.
32 #define VMX_VPID_EXTENT_SUPPORTED_MASK \
33 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
34 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
35 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
36 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
38 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
45 static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
47 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
48 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
50 struct shadow_vmcs_field {
54 static struct shadow_vmcs_field shadow_read_only_fields[] = {
55 #define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) },
56 #include "vmcs_shadow_fields.h"
58 static int max_shadow_read_only_fields =
59 ARRAY_SIZE(shadow_read_only_fields);
61 static struct shadow_vmcs_field shadow_read_write_fields[] = {
62 #define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) },
63 #include "vmcs_shadow_fields.h"
65 static int max_shadow_read_write_fields =
66 ARRAY_SIZE(shadow_read_write_fields);
68 static void init_vmcs_shadow_fields(void)
72 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
73 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
75 for (i = j = 0; i < max_shadow_read_only_fields; i++) {
76 struct shadow_vmcs_field entry = shadow_read_only_fields[i];
77 u16 field = entry.encoding;
79 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
80 (i + 1 == max_shadow_read_only_fields ||
81 shadow_read_only_fields[i + 1].encoding != field + 1))
82 pr_err("Missing field from shadow_read_only_field %x\n",
85 clear_bit(field, vmx_vmread_bitmap);
90 entry.offset += sizeof(u32);
92 shadow_read_only_fields[j++] = entry;
94 max_shadow_read_only_fields = j;
96 for (i = j = 0; i < max_shadow_read_write_fields; i++) {
97 struct shadow_vmcs_field entry = shadow_read_write_fields[i];
98 u16 field = entry.encoding;
100 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
101 (i + 1 == max_shadow_read_write_fields ||
102 shadow_read_write_fields[i + 1].encoding != field + 1))
103 pr_err("Missing field from shadow_read_write_field %x\n",
106 WARN_ONCE(field >= GUEST_ES_AR_BYTES &&
107 field <= GUEST_TR_AR_BYTES,
108 "Update vmcs12_write_any() to drop reserved bits from AR_BYTES");
111 * PML and the preemption timer can be emulated, but the
112 * processor cannot vmwrite to fields that don't exist
116 case GUEST_PML_INDEX:
117 if (!cpu_has_vmx_pml())
120 case VMX_PREEMPTION_TIMER_VALUE:
121 if (!cpu_has_vmx_preemption_timer())
124 case GUEST_INTR_STATUS:
125 if (!cpu_has_vmx_apicv())
132 clear_bit(field, vmx_vmwrite_bitmap);
133 clear_bit(field, vmx_vmread_bitmap);
138 entry.offset += sizeof(u32);
140 shadow_read_write_fields[j++] = entry;
142 max_shadow_read_write_fields = j;
146 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
147 * set the success or error code of an emulated VMX instruction (as specified
148 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
151 static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
153 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
154 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
155 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
156 return kvm_skip_emulated_instruction(vcpu);
159 static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
161 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
162 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
163 X86_EFLAGS_SF | X86_EFLAGS_OF))
165 return kvm_skip_emulated_instruction(vcpu);
168 static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
169 u32 vm_instruction_error)
171 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
172 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
173 X86_EFLAGS_SF | X86_EFLAGS_OF))
175 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
177 * We don't need to force sync to shadow VMCS because
178 * VM_INSTRUCTION_ERROR is not shadowed. Enlightened VMCS 'shadows' all
179 * fields and thus must be synced.
181 if (to_vmx(vcpu)->nested.hv_evmcs_vmptr != EVMPTR_INVALID)
182 to_vmx(vcpu)->nested.need_vmcs12_to_shadow_sync = true;
184 return kvm_skip_emulated_instruction(vcpu);
187 static int nested_vmx_fail(struct kvm_vcpu *vcpu, u32 vm_instruction_error)
189 struct vcpu_vmx *vmx = to_vmx(vcpu);
192 * failValid writes the error number to the current VMCS, which
193 * can't be done if there isn't a current VMCS.
195 if (vmx->nested.current_vmptr == INVALID_GPA &&
196 !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
197 return nested_vmx_failInvalid(vcpu);
199 return nested_vmx_failValid(vcpu, vm_instruction_error);
202 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
204 /* TODO: not to reset guest simply here. */
205 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
206 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
209 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
211 return fixed_bits_valid(control, low, high);
214 static inline u64 vmx_control_msr(u32 low, u32 high)
216 return low | ((u64)high << 32);
219 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
221 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
222 vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA);
223 vmx->nested.need_vmcs12_to_shadow_sync = false;
226 static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
228 struct vcpu_vmx *vmx = to_vmx(vcpu);
230 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
231 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
232 vmx->nested.hv_evmcs = NULL;
235 vmx->nested.hv_evmcs_vmptr = EVMPTR_INVALID;
238 static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
239 struct loaded_vmcs *prev)
241 struct vmcs_host_state *dest, *src;
243 if (unlikely(!vmx->guest_state_loaded))
246 src = &prev->host_state;
247 dest = &vmx->loaded_vmcs->host_state;
249 vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base);
250 dest->ldt_sel = src->ldt_sel;
252 dest->ds_sel = src->ds_sel;
253 dest->es_sel = src->es_sel;
257 static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
259 struct vcpu_vmx *vmx = to_vmx(vcpu);
260 struct loaded_vmcs *prev;
263 if (WARN_ON_ONCE(vmx->loaded_vmcs == vmcs))
267 prev = vmx->loaded_vmcs;
268 vmx->loaded_vmcs = vmcs;
269 vmx_vcpu_load_vmcs(vcpu, cpu, prev);
270 vmx_sync_vmcs_host_state(vmx, prev);
273 vcpu->arch.regs_avail = ~VMX_REGS_LAZY_LOAD_SET;
276 * All lazily updated registers will be reloaded from VMCS12 on both
277 * vmentry and vmexit.
279 vcpu->arch.regs_dirty = 0;
283 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
284 * just stops using VMX.
286 static void free_nested(struct kvm_vcpu *vcpu)
288 struct vcpu_vmx *vmx = to_vmx(vcpu);
290 if (WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01))
291 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
293 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
296 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
298 vmx->nested.vmxon = false;
299 vmx->nested.smm.vmxon = false;
300 vmx->nested.vmxon_ptr = INVALID_GPA;
301 free_vpid(vmx->nested.vpid02);
302 vmx->nested.posted_intr_nv = -1;
303 vmx->nested.current_vmptr = INVALID_GPA;
304 if (enable_shadow_vmcs) {
305 vmx_disable_shadow_vmcs(vmx);
306 vmcs_clear(vmx->vmcs01.shadow_vmcs);
307 free_vmcs(vmx->vmcs01.shadow_vmcs);
308 vmx->vmcs01.shadow_vmcs = NULL;
310 kfree(vmx->nested.cached_vmcs12);
311 vmx->nested.cached_vmcs12 = NULL;
312 kfree(vmx->nested.cached_shadow_vmcs12);
313 vmx->nested.cached_shadow_vmcs12 = NULL;
315 * Unpin physical memory we referred to in the vmcs02. The APIC access
316 * page's backing page (yeah, confusing) shouldn't actually be accessed,
317 * and if it is written, the contents are irrelevant.
319 kvm_vcpu_unmap(vcpu, &vmx->nested.apic_access_page_map, false);
320 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
321 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
322 vmx->nested.pi_desc = NULL;
324 kvm_mmu_free_roots(vcpu->kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
326 nested_release_evmcs(vcpu);
328 free_loaded_vmcs(&vmx->nested.vmcs02);
332 * Ensure that the current vmcs of the logical processor is the
333 * vmcs01 of the vcpu before calling free_nested().
335 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
338 vmx_leave_nested(vcpu);
342 #define EPTP_PA_MASK GENMASK_ULL(51, 12)
344 static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
346 return VALID_PAGE(root_hpa) &&
347 ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
350 static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp,
354 struct kvm_mmu_root_info *cached_root;
356 WARN_ON_ONCE(!mmu_is_nested(vcpu));
358 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
359 cached_root = &vcpu->arch.mmu->prev_roots[i];
361 if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd,
363 vcpu->arch.mmu->invlpg(vcpu, addr, cached_root->hpa);
367 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
368 struct x86_exception *fault)
370 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
371 struct vcpu_vmx *vmx = to_vmx(vcpu);
373 unsigned long exit_qualification = vcpu->arch.exit_qualification;
375 if (vmx->nested.pml_full) {
376 vm_exit_reason = EXIT_REASON_PML_FULL;
377 vmx->nested.pml_full = false;
378 exit_qualification &= INTR_INFO_UNBLOCK_NMI;
380 if (fault->error_code & PFERR_RSVD_MASK)
381 vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
383 vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
386 * Although the caller (kvm_inject_emulated_page_fault) would
387 * have already synced the faulting address in the shadow EPT
388 * tables for the current EPTP12, we also need to sync it for
389 * any other cached EPTP02s based on the same EP4TA, since the
390 * TLB associates mappings to the EP4TA rather than the full EPTP.
392 nested_ept_invalidate_addr(vcpu, vmcs12->ept_pointer,
396 nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification);
397 vmcs12->guest_physical_address = fault->address;
400 static void nested_ept_new_eptp(struct kvm_vcpu *vcpu)
402 struct vcpu_vmx *vmx = to_vmx(vcpu);
403 bool execonly = vmx->nested.msrs.ept_caps & VMX_EPT_EXECUTE_ONLY_BIT;
404 int ept_lpage_level = ept_caps_to_lpage_level(vmx->nested.msrs.ept_caps);
406 kvm_init_shadow_ept_mmu(vcpu, execonly, ept_lpage_level,
407 nested_ept_ad_enabled(vcpu),
408 nested_ept_get_eptp(vcpu));
411 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
413 WARN_ON(mmu_is_nested(vcpu));
415 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
416 nested_ept_new_eptp(vcpu);
417 vcpu->arch.mmu->get_guest_pgd = nested_ept_get_eptp;
418 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
419 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read;
421 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
424 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
426 vcpu->arch.mmu = &vcpu->arch.root_mmu;
427 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
430 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
433 bool inequality, bit;
435 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
437 (error_code & vmcs12->page_fault_error_code_mask) !=
438 vmcs12->page_fault_error_code_match;
439 return inequality ^ bit;
442 static bool nested_vmx_is_exception_vmexit(struct kvm_vcpu *vcpu, u8 vector,
445 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
448 * Drop bits 31:16 of the error code when performing the #PF mask+match
449 * check. All VMCS fields involved are 32 bits, but Intel CPUs never
450 * set bits 31:16 and VMX disallows setting bits 31:16 in the injected
451 * error code. Including the to-be-dropped bits in the check might
452 * result in an "impossible" or missed exit from L1's perspective.
454 if (vector == PF_VECTOR)
455 return nested_vmx_is_page_fault_vmexit(vmcs12, (u16)error_code);
457 return (vmcs12->exception_bitmap & (1u << vector));
460 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
461 struct vmcs12 *vmcs12)
463 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
466 if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) ||
467 CC(!page_address_valid(vcpu, vmcs12->io_bitmap_b)))
473 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
474 struct vmcs12 *vmcs12)
476 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
479 if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap)))
485 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
486 struct vmcs12 *vmcs12)
488 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
491 if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr)))
498 * For x2APIC MSRs, ignore the vmcs01 bitmap. L1 can enable x2APIC without L1
499 * itself utilizing x2APIC. All MSRs were previously set to be intercepted,
500 * only the "disable intercept" case needs to be handled.
502 static void nested_vmx_disable_intercept_for_x2apic_msr(unsigned long *msr_bitmap_l1,
503 unsigned long *msr_bitmap_l0,
506 if (type & MSR_TYPE_R && !vmx_test_msr_bitmap_read(msr_bitmap_l1, msr))
507 vmx_clear_msr_bitmap_read(msr_bitmap_l0, msr);
509 if (type & MSR_TYPE_W && !vmx_test_msr_bitmap_write(msr_bitmap_l1, msr))
510 vmx_clear_msr_bitmap_write(msr_bitmap_l0, msr);
513 static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap)
517 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
518 unsigned word = msr / BITS_PER_LONG;
520 msr_bitmap[word] = ~0;
521 msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
525 #define BUILD_NVMX_MSR_INTERCEPT_HELPER(rw) \
527 void nested_vmx_set_msr_##rw##_intercept(struct vcpu_vmx *vmx, \
528 unsigned long *msr_bitmap_l1, \
529 unsigned long *msr_bitmap_l0, u32 msr) \
531 if (vmx_test_msr_bitmap_##rw(vmx->vmcs01.msr_bitmap, msr) || \
532 vmx_test_msr_bitmap_##rw(msr_bitmap_l1, msr)) \
533 vmx_set_msr_bitmap_##rw(msr_bitmap_l0, msr); \
535 vmx_clear_msr_bitmap_##rw(msr_bitmap_l0, msr); \
537 BUILD_NVMX_MSR_INTERCEPT_HELPER(read)
538 BUILD_NVMX_MSR_INTERCEPT_HELPER(write)
540 static inline void nested_vmx_set_intercept_for_msr(struct vcpu_vmx *vmx,
541 unsigned long *msr_bitmap_l1,
542 unsigned long *msr_bitmap_l0,
545 if (types & MSR_TYPE_R)
546 nested_vmx_set_msr_read_intercept(vmx, msr_bitmap_l1,
548 if (types & MSR_TYPE_W)
549 nested_vmx_set_msr_write_intercept(vmx, msr_bitmap_l1,
554 * Merge L0's and L1's MSR bitmap, return false to indicate that
555 * we do not use the hardware.
557 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
558 struct vmcs12 *vmcs12)
560 struct vcpu_vmx *vmx = to_vmx(vcpu);
562 unsigned long *msr_bitmap_l1;
563 unsigned long *msr_bitmap_l0 = vmx->nested.vmcs02.msr_bitmap;
564 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
565 struct kvm_host_map *map = &vmx->nested.msr_bitmap_map;
567 /* Nothing to do if the MSR bitmap is not in use. */
568 if (!cpu_has_vmx_msr_bitmap() ||
569 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
573 * MSR bitmap update can be skipped when:
574 * - MSR bitmap for L1 hasn't changed.
575 * - Nested hypervisor (L1) is attempting to launch the same L2 as
577 * - Nested hypervisor (L1) has enabled 'Enlightened MSR Bitmap' feature
578 * and tells KVM (L0) there were no changes in MSR bitmap for L2.
580 if (!vmx->nested.force_msr_bitmap_recalc && evmcs &&
581 evmcs->hv_enlightenments_control.msr_bitmap &&
582 evmcs->hv_clean_fields & HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP)
585 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map))
588 msr_bitmap_l1 = (unsigned long *)map->hva;
591 * To keep the control flow simple, pay eight 8-byte writes (sixteen
592 * 4-byte writes on 32-bit systems) up front to enable intercepts for
593 * the x2APIC MSR range and selectively toggle those relevant to L2.
595 enable_x2apic_msr_intercepts(msr_bitmap_l0);
597 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
598 if (nested_cpu_has_apic_reg_virt(vmcs12)) {
600 * L0 need not intercept reads for MSRs between 0x800
601 * and 0x8ff, it just lets the processor take the value
602 * from the virtual-APIC page; take those 256 bits
603 * directly from the L1 bitmap.
605 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
606 unsigned word = msr / BITS_PER_LONG;
608 msr_bitmap_l0[word] = msr_bitmap_l1[word];
612 nested_vmx_disable_intercept_for_x2apic_msr(
613 msr_bitmap_l1, msr_bitmap_l0,
614 X2APIC_MSR(APIC_TASKPRI),
615 MSR_TYPE_R | MSR_TYPE_W);
617 if (nested_cpu_has_vid(vmcs12)) {
618 nested_vmx_disable_intercept_for_x2apic_msr(
619 msr_bitmap_l1, msr_bitmap_l0,
620 X2APIC_MSR(APIC_EOI),
622 nested_vmx_disable_intercept_for_x2apic_msr(
623 msr_bitmap_l1, msr_bitmap_l0,
624 X2APIC_MSR(APIC_SELF_IPI),
630 * Always check vmcs01's bitmap to honor userspace MSR filters and any
631 * other runtime changes to vmcs01's bitmap, e.g. dynamic pass-through.
634 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
635 MSR_FS_BASE, MSR_TYPE_RW);
637 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
638 MSR_GS_BASE, MSR_TYPE_RW);
640 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
641 MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
643 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
644 MSR_IA32_SPEC_CTRL, MSR_TYPE_RW);
646 nested_vmx_set_intercept_for_msr(vmx, msr_bitmap_l1, msr_bitmap_l0,
647 MSR_IA32_PRED_CMD, MSR_TYPE_W);
649 kvm_vcpu_unmap(vcpu, &vmx->nested.msr_bitmap_map, false);
651 vmx->nested.force_msr_bitmap_recalc = false;
656 static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
657 struct vmcs12 *vmcs12)
659 struct vcpu_vmx *vmx = to_vmx(vcpu);
660 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
662 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
663 vmcs12->vmcs_link_pointer == INVALID_GPA)
666 if (ghc->gpa != vmcs12->vmcs_link_pointer &&
667 kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc,
668 vmcs12->vmcs_link_pointer, VMCS12_SIZE))
671 kvm_read_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu),
675 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
676 struct vmcs12 *vmcs12)
678 struct vcpu_vmx *vmx = to_vmx(vcpu);
679 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
681 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
682 vmcs12->vmcs_link_pointer == INVALID_GPA)
685 if (ghc->gpa != vmcs12->vmcs_link_pointer &&
686 kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc,
687 vmcs12->vmcs_link_pointer, VMCS12_SIZE))
690 kvm_write_guest_cached(vmx->vcpu.kvm, ghc, get_shadow_vmcs12(vcpu),
695 * In nested virtualization, check if L1 has set
696 * VM_EXIT_ACK_INTR_ON_EXIT
698 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
700 return get_vmcs12(vcpu)->vm_exit_controls &
701 VM_EXIT_ACK_INTR_ON_EXIT;
704 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
705 struct vmcs12 *vmcs12)
707 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
708 CC(!page_address_valid(vcpu, vmcs12->apic_access_addr)))
714 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
715 struct vmcs12 *vmcs12)
717 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
718 !nested_cpu_has_apic_reg_virt(vmcs12) &&
719 !nested_cpu_has_vid(vmcs12) &&
720 !nested_cpu_has_posted_intr(vmcs12))
724 * If virtualize x2apic mode is enabled,
725 * virtualize apic access must be disabled.
727 if (CC(nested_cpu_has_virt_x2apic_mode(vmcs12) &&
728 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)))
732 * If virtual interrupt delivery is enabled,
733 * we must exit on external interrupts.
735 if (CC(nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu)))
739 * bits 15:8 should be zero in posted_intr_nv,
740 * the descriptor address has been already checked
741 * in nested_get_vmcs12_pages.
743 * bits 5:0 of posted_intr_desc_addr should be zero.
745 if (nested_cpu_has_posted_intr(vmcs12) &&
746 (CC(!nested_cpu_has_vid(vmcs12)) ||
747 CC(!nested_exit_intr_ack_set(vcpu)) ||
748 CC((vmcs12->posted_intr_nv & 0xff00)) ||
749 CC(!kvm_vcpu_is_legal_aligned_gpa(vcpu, vmcs12->posted_intr_desc_addr, 64))))
752 /* tpr shadow is needed by all apicv features. */
753 if (CC(!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)))
759 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
765 if (!kvm_vcpu_is_legal_aligned_gpa(vcpu, addr, 16) ||
766 !kvm_vcpu_is_legal_gpa(vcpu, (addr + count * sizeof(struct vmx_msr_entry) - 1)))
772 static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
773 struct vmcs12 *vmcs12)
775 if (CC(nested_vmx_check_msr_switch(vcpu,
776 vmcs12->vm_exit_msr_load_count,
777 vmcs12->vm_exit_msr_load_addr)) ||
778 CC(nested_vmx_check_msr_switch(vcpu,
779 vmcs12->vm_exit_msr_store_count,
780 vmcs12->vm_exit_msr_store_addr)))
786 static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu,
787 struct vmcs12 *vmcs12)
789 if (CC(nested_vmx_check_msr_switch(vcpu,
790 vmcs12->vm_entry_msr_load_count,
791 vmcs12->vm_entry_msr_load_addr)))
797 static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
798 struct vmcs12 *vmcs12)
800 if (!nested_cpu_has_pml(vmcs12))
803 if (CC(!nested_cpu_has_ept(vmcs12)) ||
804 CC(!page_address_valid(vcpu, vmcs12->pml_address)))
810 static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu,
811 struct vmcs12 *vmcs12)
813 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) &&
814 !nested_cpu_has_ept(vmcs12)))
819 static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu,
820 struct vmcs12 *vmcs12)
822 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) &&
823 !nested_cpu_has_ept(vmcs12)))
828 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu,
829 struct vmcs12 *vmcs12)
831 if (!nested_cpu_has_shadow_vmcs(vmcs12))
834 if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) ||
835 CC(!page_address_valid(vcpu, vmcs12->vmwrite_bitmap)))
841 static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
842 struct vmx_msr_entry *e)
844 /* x2APIC MSR accesses are not allowed */
845 if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8))
847 if (CC(e->index == MSR_IA32_UCODE_WRITE) || /* SDM Table 35-2 */
848 CC(e->index == MSR_IA32_UCODE_REV))
850 if (CC(e->reserved != 0))
855 static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
856 struct vmx_msr_entry *e)
858 if (CC(e->index == MSR_FS_BASE) ||
859 CC(e->index == MSR_GS_BASE) ||
860 CC(e->index == MSR_IA32_SMM_MONITOR_CTL) || /* SMM is not supported */
861 nested_vmx_msr_check_common(vcpu, e))
866 static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
867 struct vmx_msr_entry *e)
869 if (CC(e->index == MSR_IA32_SMBASE) || /* SMM is not supported */
870 nested_vmx_msr_check_common(vcpu, e))
875 static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu)
877 struct vcpu_vmx *vmx = to_vmx(vcpu);
878 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
879 vmx->nested.msrs.misc_high);
881 return (vmx_misc_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER;
885 * Load guest's/host's msr at nested entry/exit.
886 * return 0 for success, entry index for failure.
888 * One of the failure modes for MSR load/store is when a list exceeds the
889 * virtual hardware's capacity. To maintain compatibility with hardware inasmuch
890 * as possible, process all valid entries before failing rather than precheck
891 * for a capacity violation.
893 static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
896 struct vmx_msr_entry e;
897 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
899 for (i = 0; i < count; i++) {
900 if (unlikely(i >= max_msr_list_size))
903 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
905 pr_debug_ratelimited(
906 "%s cannot read MSR entry (%u, 0x%08llx)\n",
907 __func__, i, gpa + i * sizeof(e));
910 if (nested_vmx_load_msr_check(vcpu, &e)) {
911 pr_debug_ratelimited(
912 "%s check failed (%u, 0x%x, 0x%x)\n",
913 __func__, i, e.index, e.reserved);
916 if (kvm_set_msr(vcpu, e.index, e.value)) {
917 pr_debug_ratelimited(
918 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
919 __func__, i, e.index, e.value);
925 /* Note, max_msr_list_size is at most 4096, i.e. this can't wrap. */
929 static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu,
933 struct vcpu_vmx *vmx = to_vmx(vcpu);
936 * If the L0 hypervisor stored a more accurate value for the TSC that
937 * does not include the time taken for emulation of the L2->L1
938 * VM-exit in L0, use the more accurate value.
940 if (msr_index == MSR_IA32_TSC) {
941 int i = vmx_find_loadstore_msr_slot(&vmx->msr_autostore.guest,
945 u64 val = vmx->msr_autostore.guest.val[i].value;
947 *data = kvm_read_l1_tsc(vcpu, val);
952 if (kvm_get_msr(vcpu, msr_index, data)) {
953 pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__,
960 static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i,
961 struct vmx_msr_entry *e)
963 if (kvm_vcpu_read_guest(vcpu,
964 gpa + i * sizeof(*e),
965 e, 2 * sizeof(u32))) {
966 pr_debug_ratelimited(
967 "%s cannot read MSR entry (%u, 0x%08llx)\n",
968 __func__, i, gpa + i * sizeof(*e));
971 if (nested_vmx_store_msr_check(vcpu, e)) {
972 pr_debug_ratelimited(
973 "%s check failed (%u, 0x%x, 0x%x)\n",
974 __func__, i, e->index, e->reserved);
980 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
984 struct vmx_msr_entry e;
985 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
987 for (i = 0; i < count; i++) {
988 if (unlikely(i >= max_msr_list_size))
991 if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
994 if (!nested_vmx_get_vmexit_msr_value(vcpu, e.index, &data))
997 if (kvm_vcpu_write_guest(vcpu,
998 gpa + i * sizeof(e) +
999 offsetof(struct vmx_msr_entry, value),
1000 &data, sizeof(data))) {
1001 pr_debug_ratelimited(
1002 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
1003 __func__, i, e.index, data);
1010 static bool nested_msr_store_list_has_msr(struct kvm_vcpu *vcpu, u32 msr_index)
1012 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1013 u32 count = vmcs12->vm_exit_msr_store_count;
1014 u64 gpa = vmcs12->vm_exit_msr_store_addr;
1015 struct vmx_msr_entry e;
1018 for (i = 0; i < count; i++) {
1019 if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
1022 if (e.index == msr_index)
1028 static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu,
1031 struct vcpu_vmx *vmx = to_vmx(vcpu);
1032 struct vmx_msrs *autostore = &vmx->msr_autostore.guest;
1033 bool in_vmcs12_store_list;
1034 int msr_autostore_slot;
1035 bool in_autostore_list;
1038 msr_autostore_slot = vmx_find_loadstore_msr_slot(autostore, msr_index);
1039 in_autostore_list = msr_autostore_slot >= 0;
1040 in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index);
1042 if (in_vmcs12_store_list && !in_autostore_list) {
1043 if (autostore->nr == MAX_NR_LOADSTORE_MSRS) {
1045 * Emulated VMEntry does not fail here. Instead a less
1046 * accurate value will be returned by
1047 * nested_vmx_get_vmexit_msr_value() using kvm_get_msr()
1048 * instead of reading the value from the vmcs02 VMExit
1051 pr_warn_ratelimited(
1052 "Not enough msr entries in msr_autostore. Can't add msr %x\n",
1056 last = autostore->nr++;
1057 autostore->val[last].index = msr_index;
1058 } else if (!in_vmcs12_store_list && in_autostore_list) {
1059 last = --autostore->nr;
1060 autostore->val[msr_autostore_slot] = autostore->val[last];
1065 * Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are
1066 * emulating VM-Entry into a guest with EPT enabled. On failure, the expected
1067 * Exit Qualification (for a VM-Entry consistency check VM-Exit) is assigned to
1068 * @entry_failure_code.
1070 static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3,
1071 bool nested_ept, bool reload_pdptrs,
1072 enum vm_entry_failure_code *entry_failure_code)
1074 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, cr3))) {
1075 *entry_failure_code = ENTRY_FAIL_DEFAULT;
1080 * If PAE paging and EPT are both on, CR3 is not used by the CPU and
1081 * must not be dereferenced.
1083 if (reload_pdptrs && !nested_ept && is_pae_paging(vcpu) &&
1084 CC(!load_pdptrs(vcpu, cr3))) {
1085 *entry_failure_code = ENTRY_FAIL_PDPTE;
1089 vcpu->arch.cr3 = cr3;
1090 kvm_register_mark_dirty(vcpu, VCPU_EXREG_CR3);
1092 /* Re-initialize the MMU, e.g. to pick up CR4 MMU role changes. */
1096 kvm_mmu_new_pgd(vcpu, cr3);
1102 * Returns if KVM is able to config CPU to tag TLB entries
1103 * populated by L2 differently than TLB entries populated
1106 * If L0 uses EPT, L1 and L2 run with different EPTP because
1107 * guest_mode is part of kvm_mmu_page_role. Thus, TLB entries
1108 * are tagged with different EPTP.
1110 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
1111 * with different VPID (L1 entries are tagged with vmx->vpid
1112 * while L2 entries are tagged with vmx->nested.vpid02).
1114 static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
1116 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1118 return enable_ept ||
1119 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
1122 static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
1123 struct vmcs12 *vmcs12,
1126 struct vcpu_vmx *vmx = to_vmx(vcpu);
1129 * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
1130 * for *all* contexts to be flushed on VM-Enter/VM-Exit, i.e. it's a
1131 * full TLB flush from the guest's perspective. This is required even
1132 * if VPID is disabled in the host as KVM may need to synchronize the
1133 * MMU in response to the guest TLB flush.
1135 * Note, using TLB_FLUSH_GUEST is correct even if nested EPT is in use.
1136 * EPT is a special snowflake, as guest-physical mappings aren't
1137 * flushed on VPID invalidations, including VM-Enter or VM-Exit with
1138 * VPID disabled. As a result, KVM _never_ needs to sync nEPT
1139 * entries on VM-Enter because L1 can't rely on VM-Enter to flush
1142 if (!nested_cpu_has_vpid(vmcs12)) {
1143 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
1147 /* L2 should never have a VPID if VPID is disabled. */
1148 WARN_ON(!enable_vpid);
1151 * VPID is enabled and in use by vmcs12. If vpid12 is changing, then
1152 * emulate a guest TLB flush as KVM does not track vpid12 history nor
1153 * is the VPID incorporated into the MMU context. I.e. KVM must assume
1154 * that the new vpid12 has never been used and thus represents a new
1155 * guest ASID that cannot have entries in the TLB.
1157 if (is_vmenter && vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
1158 vmx->nested.last_vpid = vmcs12->virtual_processor_id;
1159 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
1164 * If VPID is enabled, used by vmc12, and vpid12 is not changing but
1165 * does not have a unique TLB tag (ASID), i.e. EPT is disabled and
1166 * KVM was unable to allocate a VPID for L2, flush the current context
1167 * as the effective ASID is common to both L1 and L2.
1169 if (!nested_has_guest_tlb_tag(vcpu))
1170 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1173 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
1178 return (superset | subset) == superset;
1181 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
1183 const u64 feature_and_reserved =
1184 /* feature (except bit 48; see below) */
1185 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
1187 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
1188 u64 vmx_basic = vmcs_config.nested.basic;
1190 if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
1194 * KVM does not emulate a version of VMX that constrains physical
1195 * addresses of VMX structures (e.g. VMCS) to 32-bits.
1197 if (data & BIT_ULL(48))
1200 if (vmx_basic_vmcs_revision_id(vmx_basic) !=
1201 vmx_basic_vmcs_revision_id(data))
1204 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
1207 vmx->nested.msrs.basic = data;
1211 static void vmx_get_control_msr(struct nested_vmx_msrs *msrs, u32 msr_index,
1212 u32 **low, u32 **high)
1214 switch (msr_index) {
1215 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1216 *low = &msrs->pinbased_ctls_low;
1217 *high = &msrs->pinbased_ctls_high;
1219 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1220 *low = &msrs->procbased_ctls_low;
1221 *high = &msrs->procbased_ctls_high;
1223 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1224 *low = &msrs->exit_ctls_low;
1225 *high = &msrs->exit_ctls_high;
1227 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1228 *low = &msrs->entry_ctls_low;
1229 *high = &msrs->entry_ctls_high;
1231 case MSR_IA32_VMX_PROCBASED_CTLS2:
1232 *low = &msrs->secondary_ctls_low;
1233 *high = &msrs->secondary_ctls_high;
1241 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1246 vmx_get_control_msr(&vmcs_config.nested, msr_index, &lowp, &highp);
1248 supported = vmx_control_msr(*lowp, *highp);
1250 /* Check must-be-1 bits are still 1. */
1251 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
1254 /* Check must-be-0 bits are still 0. */
1255 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
1258 vmx_get_control_msr(&vmx->nested.msrs, msr_index, &lowp, &highp);
1260 *highp = data >> 32;
1264 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
1266 const u64 feature_and_reserved_bits =
1268 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
1269 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
1271 GENMASK_ULL(13, 9) | BIT_ULL(31);
1272 u64 vmx_misc = vmx_control_msr(vmcs_config.nested.misc_low,
1273 vmcs_config.nested.misc_high);
1275 if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
1278 if ((vmx->nested.msrs.pinbased_ctls_high &
1279 PIN_BASED_VMX_PREEMPTION_TIMER) &&
1280 vmx_misc_preemption_timer_rate(data) !=
1281 vmx_misc_preemption_timer_rate(vmx_misc))
1284 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
1287 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
1290 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
1293 vmx->nested.msrs.misc_low = data;
1294 vmx->nested.msrs.misc_high = data >> 32;
1299 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
1301 u64 vmx_ept_vpid_cap = vmx_control_msr(vmcs_config.nested.ept_caps,
1302 vmcs_config.nested.vpid_caps);
1304 /* Every bit is either reserved or a feature bit. */
1305 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
1308 vmx->nested.msrs.ept_caps = data;
1309 vmx->nested.msrs.vpid_caps = data >> 32;
1313 static u64 *vmx_get_fixed0_msr(struct nested_vmx_msrs *msrs, u32 msr_index)
1315 switch (msr_index) {
1316 case MSR_IA32_VMX_CR0_FIXED0:
1317 return &msrs->cr0_fixed0;
1318 case MSR_IA32_VMX_CR4_FIXED0:
1319 return &msrs->cr4_fixed0;
1325 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1327 const u64 *msr = vmx_get_fixed0_msr(&vmcs_config.nested, msr_index);
1330 * 1 bits (which indicates bits which "must-be-1" during VMX operation)
1331 * must be 1 in the restored value.
1333 if (!is_bitwise_subset(data, *msr, -1ULL))
1336 *vmx_get_fixed0_msr(&vmx->nested.msrs, msr_index) = data;
1341 * Called when userspace is restoring VMX MSRs.
1343 * Returns 0 on success, non-0 otherwise.
1345 int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1347 struct vcpu_vmx *vmx = to_vmx(vcpu);
1350 * Don't allow changes to the VMX capability MSRs while the vCPU
1351 * is in VMX operation.
1353 if (vmx->nested.vmxon)
1356 switch (msr_index) {
1357 case MSR_IA32_VMX_BASIC:
1358 return vmx_restore_vmx_basic(vmx, data);
1359 case MSR_IA32_VMX_PINBASED_CTLS:
1360 case MSR_IA32_VMX_PROCBASED_CTLS:
1361 case MSR_IA32_VMX_EXIT_CTLS:
1362 case MSR_IA32_VMX_ENTRY_CTLS:
1364 * The "non-true" VMX capability MSRs are generated from the
1365 * "true" MSRs, so we do not support restoring them directly.
1367 * If userspace wants to emulate VMX_BASIC[55]=0, userspace
1368 * should restore the "true" MSRs with the must-be-1 bits
1369 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
1370 * DEFAULT SETTINGS".
1373 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1374 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1375 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1376 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1377 case MSR_IA32_VMX_PROCBASED_CTLS2:
1378 return vmx_restore_control_msr(vmx, msr_index, data);
1379 case MSR_IA32_VMX_MISC:
1380 return vmx_restore_vmx_misc(vmx, data);
1381 case MSR_IA32_VMX_CR0_FIXED0:
1382 case MSR_IA32_VMX_CR4_FIXED0:
1383 return vmx_restore_fixed0_msr(vmx, msr_index, data);
1384 case MSR_IA32_VMX_CR0_FIXED1:
1385 case MSR_IA32_VMX_CR4_FIXED1:
1387 * These MSRs are generated based on the vCPU's CPUID, so we
1388 * do not support restoring them directly.
1391 case MSR_IA32_VMX_EPT_VPID_CAP:
1392 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
1393 case MSR_IA32_VMX_VMCS_ENUM:
1394 vmx->nested.msrs.vmcs_enum = data;
1396 case MSR_IA32_VMX_VMFUNC:
1397 if (data & ~vmcs_config.nested.vmfunc_controls)
1399 vmx->nested.msrs.vmfunc_controls = data;
1403 * The rest of the VMX capability MSRs do not support restore.
1409 /* Returns 0 on success, non-0 otherwise. */
1410 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
1412 switch (msr_index) {
1413 case MSR_IA32_VMX_BASIC:
1414 *pdata = msrs->basic;
1416 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1417 case MSR_IA32_VMX_PINBASED_CTLS:
1418 *pdata = vmx_control_msr(
1419 msrs->pinbased_ctls_low,
1420 msrs->pinbased_ctls_high);
1421 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
1422 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1424 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1425 case MSR_IA32_VMX_PROCBASED_CTLS:
1426 *pdata = vmx_control_msr(
1427 msrs->procbased_ctls_low,
1428 msrs->procbased_ctls_high);
1429 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
1430 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1432 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1433 case MSR_IA32_VMX_EXIT_CTLS:
1434 *pdata = vmx_control_msr(
1435 msrs->exit_ctls_low,
1436 msrs->exit_ctls_high);
1437 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
1438 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
1440 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1441 case MSR_IA32_VMX_ENTRY_CTLS:
1442 *pdata = vmx_control_msr(
1443 msrs->entry_ctls_low,
1444 msrs->entry_ctls_high);
1445 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
1446 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
1448 case MSR_IA32_VMX_MISC:
1449 *pdata = vmx_control_msr(
1453 case MSR_IA32_VMX_CR0_FIXED0:
1454 *pdata = msrs->cr0_fixed0;
1456 case MSR_IA32_VMX_CR0_FIXED1:
1457 *pdata = msrs->cr0_fixed1;
1459 case MSR_IA32_VMX_CR4_FIXED0:
1460 *pdata = msrs->cr4_fixed0;
1462 case MSR_IA32_VMX_CR4_FIXED1:
1463 *pdata = msrs->cr4_fixed1;
1465 case MSR_IA32_VMX_VMCS_ENUM:
1466 *pdata = msrs->vmcs_enum;
1468 case MSR_IA32_VMX_PROCBASED_CTLS2:
1469 *pdata = vmx_control_msr(
1470 msrs->secondary_ctls_low,
1471 msrs->secondary_ctls_high);
1473 case MSR_IA32_VMX_EPT_VPID_CAP:
1474 *pdata = msrs->ept_caps |
1475 ((u64)msrs->vpid_caps << 32);
1477 case MSR_IA32_VMX_VMFUNC:
1478 *pdata = msrs->vmfunc_controls;
1488 * Copy the writable VMCS shadow fields back to the VMCS12, in case they have
1489 * been modified by the L1 guest. Note, "writable" in this context means
1490 * "writable by the guest", i.e. tagged SHADOW_FIELD_RW; the set of
1491 * fields tagged SHADOW_FIELD_RO may or may not align with the "read-only"
1492 * VM-exit information fields (which are actually writable if the vCPU is
1493 * configured to support "VMWRITE to any supported field in the VMCS").
1495 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
1497 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1498 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1499 struct shadow_vmcs_field field;
1503 if (WARN_ON(!shadow_vmcs))
1508 vmcs_load(shadow_vmcs);
1510 for (i = 0; i < max_shadow_read_write_fields; i++) {
1511 field = shadow_read_write_fields[i];
1512 val = __vmcs_readl(field.encoding);
1513 vmcs12_write_any(vmcs12, field.encoding, field.offset, val);
1516 vmcs_clear(shadow_vmcs);
1517 vmcs_load(vmx->loaded_vmcs->vmcs);
1522 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
1524 const struct shadow_vmcs_field *fields[] = {
1525 shadow_read_write_fields,
1526 shadow_read_only_fields
1528 const int max_fields[] = {
1529 max_shadow_read_write_fields,
1530 max_shadow_read_only_fields
1532 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1533 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1534 struct shadow_vmcs_field field;
1538 if (WARN_ON(!shadow_vmcs))
1541 vmcs_load(shadow_vmcs);
1543 for (q = 0; q < ARRAY_SIZE(fields); q++) {
1544 for (i = 0; i < max_fields[q]; i++) {
1545 field = fields[q][i];
1546 val = vmcs12_read_any(vmcs12, field.encoding,
1548 __vmcs_writel(field.encoding, val);
1552 vmcs_clear(shadow_vmcs);
1553 vmcs_load(vmx->loaded_vmcs->vmcs);
1556 static void copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx, u32 hv_clean_fields)
1558 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1559 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1561 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
1562 vmcs12->tpr_threshold = evmcs->tpr_threshold;
1563 vmcs12->guest_rip = evmcs->guest_rip;
1565 if (unlikely(!(hv_clean_fields &
1566 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
1567 vmcs12->guest_rsp = evmcs->guest_rsp;
1568 vmcs12->guest_rflags = evmcs->guest_rflags;
1569 vmcs12->guest_interruptibility_info =
1570 evmcs->guest_interruptibility_info;
1572 * Not present in struct vmcs12:
1573 * vmcs12->guest_ssp = evmcs->guest_ssp;
1577 if (unlikely(!(hv_clean_fields &
1578 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
1579 vmcs12->cpu_based_vm_exec_control =
1580 evmcs->cpu_based_vm_exec_control;
1583 if (unlikely(!(hv_clean_fields &
1584 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) {
1585 vmcs12->exception_bitmap = evmcs->exception_bitmap;
1588 if (unlikely(!(hv_clean_fields &
1589 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) {
1590 vmcs12->vm_entry_controls = evmcs->vm_entry_controls;
1593 if (unlikely(!(hv_clean_fields &
1594 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) {
1595 vmcs12->vm_entry_intr_info_field =
1596 evmcs->vm_entry_intr_info_field;
1597 vmcs12->vm_entry_exception_error_code =
1598 evmcs->vm_entry_exception_error_code;
1599 vmcs12->vm_entry_instruction_len =
1600 evmcs->vm_entry_instruction_len;
1603 if (unlikely(!(hv_clean_fields &
1604 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
1605 vmcs12->host_ia32_pat = evmcs->host_ia32_pat;
1606 vmcs12->host_ia32_efer = evmcs->host_ia32_efer;
1607 vmcs12->host_cr0 = evmcs->host_cr0;
1608 vmcs12->host_cr3 = evmcs->host_cr3;
1609 vmcs12->host_cr4 = evmcs->host_cr4;
1610 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp;
1611 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip;
1612 vmcs12->host_rip = evmcs->host_rip;
1613 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs;
1614 vmcs12->host_es_selector = evmcs->host_es_selector;
1615 vmcs12->host_cs_selector = evmcs->host_cs_selector;
1616 vmcs12->host_ss_selector = evmcs->host_ss_selector;
1617 vmcs12->host_ds_selector = evmcs->host_ds_selector;
1618 vmcs12->host_fs_selector = evmcs->host_fs_selector;
1619 vmcs12->host_gs_selector = evmcs->host_gs_selector;
1620 vmcs12->host_tr_selector = evmcs->host_tr_selector;
1621 vmcs12->host_ia32_perf_global_ctrl = evmcs->host_ia32_perf_global_ctrl;
1623 * Not present in struct vmcs12:
1624 * vmcs12->host_ia32_s_cet = evmcs->host_ia32_s_cet;
1625 * vmcs12->host_ssp = evmcs->host_ssp;
1626 * vmcs12->host_ia32_int_ssp_table_addr = evmcs->host_ia32_int_ssp_table_addr;
1630 if (unlikely(!(hv_clean_fields &
1631 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) {
1632 vmcs12->pin_based_vm_exec_control =
1633 evmcs->pin_based_vm_exec_control;
1634 vmcs12->vm_exit_controls = evmcs->vm_exit_controls;
1635 vmcs12->secondary_vm_exec_control =
1636 evmcs->secondary_vm_exec_control;
1639 if (unlikely(!(hv_clean_fields &
1640 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) {
1641 vmcs12->io_bitmap_a = evmcs->io_bitmap_a;
1642 vmcs12->io_bitmap_b = evmcs->io_bitmap_b;
1645 if (unlikely(!(hv_clean_fields &
1646 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) {
1647 vmcs12->msr_bitmap = evmcs->msr_bitmap;
1650 if (unlikely(!(hv_clean_fields &
1651 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) {
1652 vmcs12->guest_es_base = evmcs->guest_es_base;
1653 vmcs12->guest_cs_base = evmcs->guest_cs_base;
1654 vmcs12->guest_ss_base = evmcs->guest_ss_base;
1655 vmcs12->guest_ds_base = evmcs->guest_ds_base;
1656 vmcs12->guest_fs_base = evmcs->guest_fs_base;
1657 vmcs12->guest_gs_base = evmcs->guest_gs_base;
1658 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base;
1659 vmcs12->guest_tr_base = evmcs->guest_tr_base;
1660 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base;
1661 vmcs12->guest_idtr_base = evmcs->guest_idtr_base;
1662 vmcs12->guest_es_limit = evmcs->guest_es_limit;
1663 vmcs12->guest_cs_limit = evmcs->guest_cs_limit;
1664 vmcs12->guest_ss_limit = evmcs->guest_ss_limit;
1665 vmcs12->guest_ds_limit = evmcs->guest_ds_limit;
1666 vmcs12->guest_fs_limit = evmcs->guest_fs_limit;
1667 vmcs12->guest_gs_limit = evmcs->guest_gs_limit;
1668 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit;
1669 vmcs12->guest_tr_limit = evmcs->guest_tr_limit;
1670 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit;
1671 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit;
1672 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes;
1673 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes;
1674 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes;
1675 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes;
1676 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes;
1677 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes;
1678 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes;
1679 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes;
1680 vmcs12->guest_es_selector = evmcs->guest_es_selector;
1681 vmcs12->guest_cs_selector = evmcs->guest_cs_selector;
1682 vmcs12->guest_ss_selector = evmcs->guest_ss_selector;
1683 vmcs12->guest_ds_selector = evmcs->guest_ds_selector;
1684 vmcs12->guest_fs_selector = evmcs->guest_fs_selector;
1685 vmcs12->guest_gs_selector = evmcs->guest_gs_selector;
1686 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector;
1687 vmcs12->guest_tr_selector = evmcs->guest_tr_selector;
1690 if (unlikely(!(hv_clean_fields &
1691 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) {
1692 vmcs12->tsc_offset = evmcs->tsc_offset;
1693 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
1694 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
1695 vmcs12->encls_exiting_bitmap = evmcs->encls_exiting_bitmap;
1696 vmcs12->tsc_multiplier = evmcs->tsc_multiplier;
1699 if (unlikely(!(hv_clean_fields &
1700 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) {
1701 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask;
1702 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask;
1703 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow;
1704 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow;
1705 vmcs12->guest_cr0 = evmcs->guest_cr0;
1706 vmcs12->guest_cr3 = evmcs->guest_cr3;
1707 vmcs12->guest_cr4 = evmcs->guest_cr4;
1708 vmcs12->guest_dr7 = evmcs->guest_dr7;
1711 if (unlikely(!(hv_clean_fields &
1712 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) {
1713 vmcs12->host_fs_base = evmcs->host_fs_base;
1714 vmcs12->host_gs_base = evmcs->host_gs_base;
1715 vmcs12->host_tr_base = evmcs->host_tr_base;
1716 vmcs12->host_gdtr_base = evmcs->host_gdtr_base;
1717 vmcs12->host_idtr_base = evmcs->host_idtr_base;
1718 vmcs12->host_rsp = evmcs->host_rsp;
1721 if (unlikely(!(hv_clean_fields &
1722 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) {
1723 vmcs12->ept_pointer = evmcs->ept_pointer;
1724 vmcs12->virtual_processor_id = evmcs->virtual_processor_id;
1727 if (unlikely(!(hv_clean_fields &
1728 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) {
1729 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer;
1730 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl;
1731 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat;
1732 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer;
1733 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0;
1734 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1;
1735 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2;
1736 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3;
1737 vmcs12->guest_pending_dbg_exceptions =
1738 evmcs->guest_pending_dbg_exceptions;
1739 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp;
1740 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip;
1741 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs;
1742 vmcs12->guest_activity_state = evmcs->guest_activity_state;
1743 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs;
1744 vmcs12->guest_ia32_perf_global_ctrl = evmcs->guest_ia32_perf_global_ctrl;
1746 * Not present in struct vmcs12:
1747 * vmcs12->guest_ia32_s_cet = evmcs->guest_ia32_s_cet;
1748 * vmcs12->guest_ia32_lbr_ctl = evmcs->guest_ia32_lbr_ctl;
1749 * vmcs12->guest_ia32_int_ssp_table_addr = evmcs->guest_ia32_int_ssp_table_addr;
1755 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
1756 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
1757 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
1758 * vmcs12->page_fault_error_code_mask =
1759 * evmcs->page_fault_error_code_mask;
1760 * vmcs12->page_fault_error_code_match =
1761 * evmcs->page_fault_error_code_match;
1762 * vmcs12->cr3_target_count = evmcs->cr3_target_count;
1763 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
1764 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
1765 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
1770 * vmcs12->guest_physical_address = evmcs->guest_physical_address;
1771 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
1772 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
1773 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
1774 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
1775 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
1776 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
1777 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
1778 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
1779 * vmcs12->exit_qualification = evmcs->exit_qualification;
1780 * vmcs12->guest_linear_address = evmcs->guest_linear_address;
1782 * Not present in struct vmcs12:
1783 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
1784 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
1785 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
1786 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
1792 static void copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
1794 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1795 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1798 * Should not be changed by KVM:
1800 * evmcs->host_es_selector = vmcs12->host_es_selector;
1801 * evmcs->host_cs_selector = vmcs12->host_cs_selector;
1802 * evmcs->host_ss_selector = vmcs12->host_ss_selector;
1803 * evmcs->host_ds_selector = vmcs12->host_ds_selector;
1804 * evmcs->host_fs_selector = vmcs12->host_fs_selector;
1805 * evmcs->host_gs_selector = vmcs12->host_gs_selector;
1806 * evmcs->host_tr_selector = vmcs12->host_tr_selector;
1807 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
1808 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
1809 * evmcs->host_cr0 = vmcs12->host_cr0;
1810 * evmcs->host_cr3 = vmcs12->host_cr3;
1811 * evmcs->host_cr4 = vmcs12->host_cr4;
1812 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
1813 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
1814 * evmcs->host_rip = vmcs12->host_rip;
1815 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
1816 * evmcs->host_fs_base = vmcs12->host_fs_base;
1817 * evmcs->host_gs_base = vmcs12->host_gs_base;
1818 * evmcs->host_tr_base = vmcs12->host_tr_base;
1819 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
1820 * evmcs->host_idtr_base = vmcs12->host_idtr_base;
1821 * evmcs->host_rsp = vmcs12->host_rsp;
1822 * sync_vmcs02_to_vmcs12() doesn't read these:
1823 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
1824 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
1825 * evmcs->msr_bitmap = vmcs12->msr_bitmap;
1826 * evmcs->ept_pointer = vmcs12->ept_pointer;
1827 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
1828 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
1829 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
1830 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
1831 * evmcs->tpr_threshold = vmcs12->tpr_threshold;
1832 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
1833 * evmcs->exception_bitmap = vmcs12->exception_bitmap;
1834 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
1835 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
1836 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
1837 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
1838 * evmcs->page_fault_error_code_mask =
1839 * vmcs12->page_fault_error_code_mask;
1840 * evmcs->page_fault_error_code_match =
1841 * vmcs12->page_fault_error_code_match;
1842 * evmcs->cr3_target_count = vmcs12->cr3_target_count;
1843 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
1844 * evmcs->tsc_offset = vmcs12->tsc_offset;
1845 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
1846 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
1847 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
1848 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
1849 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
1850 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
1851 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
1852 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
1853 * evmcs->guest_ia32_perf_global_ctrl = vmcs12->guest_ia32_perf_global_ctrl;
1854 * evmcs->host_ia32_perf_global_ctrl = vmcs12->host_ia32_perf_global_ctrl;
1855 * evmcs->encls_exiting_bitmap = vmcs12->encls_exiting_bitmap;
1856 * evmcs->tsc_multiplier = vmcs12->tsc_multiplier;
1858 * Not present in struct vmcs12:
1859 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
1860 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
1861 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
1862 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
1863 * evmcs->host_ia32_s_cet = vmcs12->host_ia32_s_cet;
1864 * evmcs->host_ssp = vmcs12->host_ssp;
1865 * evmcs->host_ia32_int_ssp_table_addr = vmcs12->host_ia32_int_ssp_table_addr;
1866 * evmcs->guest_ia32_s_cet = vmcs12->guest_ia32_s_cet;
1867 * evmcs->guest_ia32_lbr_ctl = vmcs12->guest_ia32_lbr_ctl;
1868 * evmcs->guest_ia32_int_ssp_table_addr = vmcs12->guest_ia32_int_ssp_table_addr;
1869 * evmcs->guest_ssp = vmcs12->guest_ssp;
1872 evmcs->guest_es_selector = vmcs12->guest_es_selector;
1873 evmcs->guest_cs_selector = vmcs12->guest_cs_selector;
1874 evmcs->guest_ss_selector = vmcs12->guest_ss_selector;
1875 evmcs->guest_ds_selector = vmcs12->guest_ds_selector;
1876 evmcs->guest_fs_selector = vmcs12->guest_fs_selector;
1877 evmcs->guest_gs_selector = vmcs12->guest_gs_selector;
1878 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector;
1879 evmcs->guest_tr_selector = vmcs12->guest_tr_selector;
1881 evmcs->guest_es_limit = vmcs12->guest_es_limit;
1882 evmcs->guest_cs_limit = vmcs12->guest_cs_limit;
1883 evmcs->guest_ss_limit = vmcs12->guest_ss_limit;
1884 evmcs->guest_ds_limit = vmcs12->guest_ds_limit;
1885 evmcs->guest_fs_limit = vmcs12->guest_fs_limit;
1886 evmcs->guest_gs_limit = vmcs12->guest_gs_limit;
1887 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit;
1888 evmcs->guest_tr_limit = vmcs12->guest_tr_limit;
1889 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit;
1890 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit;
1892 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes;
1893 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes;
1894 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes;
1895 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes;
1896 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes;
1897 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes;
1898 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes;
1899 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes;
1901 evmcs->guest_es_base = vmcs12->guest_es_base;
1902 evmcs->guest_cs_base = vmcs12->guest_cs_base;
1903 evmcs->guest_ss_base = vmcs12->guest_ss_base;
1904 evmcs->guest_ds_base = vmcs12->guest_ds_base;
1905 evmcs->guest_fs_base = vmcs12->guest_fs_base;
1906 evmcs->guest_gs_base = vmcs12->guest_gs_base;
1907 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base;
1908 evmcs->guest_tr_base = vmcs12->guest_tr_base;
1909 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base;
1910 evmcs->guest_idtr_base = vmcs12->guest_idtr_base;
1912 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat;
1913 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer;
1915 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0;
1916 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1;
1917 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2;
1918 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3;
1920 evmcs->guest_pending_dbg_exceptions =
1921 vmcs12->guest_pending_dbg_exceptions;
1922 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp;
1923 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip;
1925 evmcs->guest_activity_state = vmcs12->guest_activity_state;
1926 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs;
1928 evmcs->guest_cr0 = vmcs12->guest_cr0;
1929 evmcs->guest_cr3 = vmcs12->guest_cr3;
1930 evmcs->guest_cr4 = vmcs12->guest_cr4;
1931 evmcs->guest_dr7 = vmcs12->guest_dr7;
1933 evmcs->guest_physical_address = vmcs12->guest_physical_address;
1935 evmcs->vm_instruction_error = vmcs12->vm_instruction_error;
1936 evmcs->vm_exit_reason = vmcs12->vm_exit_reason;
1937 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info;
1938 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code;
1939 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field;
1940 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code;
1941 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len;
1942 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info;
1944 evmcs->exit_qualification = vmcs12->exit_qualification;
1946 evmcs->guest_linear_address = vmcs12->guest_linear_address;
1947 evmcs->guest_rsp = vmcs12->guest_rsp;
1948 evmcs->guest_rflags = vmcs12->guest_rflags;
1950 evmcs->guest_interruptibility_info =
1951 vmcs12->guest_interruptibility_info;
1952 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control;
1953 evmcs->vm_entry_controls = vmcs12->vm_entry_controls;
1954 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field;
1955 evmcs->vm_entry_exception_error_code =
1956 vmcs12->vm_entry_exception_error_code;
1957 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len;
1959 evmcs->guest_rip = vmcs12->guest_rip;
1961 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
1967 * This is an equivalent of the nested hypervisor executing the vmptrld
1970 static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
1971 struct kvm_vcpu *vcpu, bool from_launch)
1973 struct vcpu_vmx *vmx = to_vmx(vcpu);
1974 bool evmcs_gpa_changed = false;
1977 if (likely(!guest_cpuid_has_evmcs(vcpu)))
1978 return EVMPTRLD_DISABLED;
1980 if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa)) {
1981 nested_release_evmcs(vcpu);
1982 return EVMPTRLD_DISABLED;
1985 if (unlikely(evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
1986 vmx->nested.current_vmptr = INVALID_GPA;
1988 nested_release_evmcs(vcpu);
1990 if (kvm_vcpu_map(vcpu, gpa_to_gfn(evmcs_gpa),
1991 &vmx->nested.hv_evmcs_map))
1992 return EVMPTRLD_ERROR;
1994 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva;
1997 * Currently, KVM only supports eVMCS version 1
1998 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this
1999 * value to first u32 field of eVMCS which should specify eVMCS
2002 * Guest should be aware of supported eVMCS versions by host by
2003 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is
2004 * expected to set this CPUID leaf according to the value
2005 * returned in vmcs_version from nested_enable_evmcs().
2007 * However, it turns out that Microsoft Hyper-V fails to comply
2008 * to their own invented interface: When Hyper-V use eVMCS, it
2009 * just sets first u32 field of eVMCS to revision_id specified
2010 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number
2011 * which is one of the supported versions specified in
2012 * CPUID.0x4000000A.EAX[0:15].
2014 * To overcome Hyper-V bug, we accept here either a supported
2015 * eVMCS version or VMCS12 revision_id as valid values for first
2016 * u32 field of eVMCS.
2018 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) &&
2019 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) {
2020 nested_release_evmcs(vcpu);
2021 return EVMPTRLD_VMFAIL;
2024 vmx->nested.hv_evmcs_vmptr = evmcs_gpa;
2026 evmcs_gpa_changed = true;
2028 * Unlike normal vmcs12, enlightened vmcs12 is not fully
2029 * reloaded from guest's memory (read only fields, fields not
2030 * present in struct hv_enlightened_vmcs, ...). Make sure there
2034 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2035 memset(vmcs12, 0, sizeof(*vmcs12));
2036 vmcs12->hdr.revision_id = VMCS12_REVISION;
2042 * Clean fields data can't be used on VMLAUNCH and when we switch
2043 * between different L2 guests as KVM keeps a single VMCS12 per L1.
2045 if (from_launch || evmcs_gpa_changed) {
2046 vmx->nested.hv_evmcs->hv_clean_fields &=
2047 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
2049 vmx->nested.force_msr_bitmap_recalc = true;
2052 return EVMPTRLD_SUCCEEDED;
2055 void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu)
2057 struct vcpu_vmx *vmx = to_vmx(vcpu);
2059 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
2060 copy_vmcs12_to_enlightened(vmx);
2062 copy_vmcs12_to_shadow(vmx);
2064 vmx->nested.need_vmcs12_to_shadow_sync = false;
2067 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
2069 struct vcpu_vmx *vmx =
2070 container_of(timer, struct vcpu_vmx, nested.preemption_timer);
2072 vmx->nested.preemption_timer_expired = true;
2073 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
2074 kvm_vcpu_kick(&vmx->vcpu);
2076 return HRTIMER_NORESTART;
2079 static u64 vmx_calc_preemption_timer_value(struct kvm_vcpu *vcpu)
2081 struct vcpu_vmx *vmx = to_vmx(vcpu);
2082 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2084 u64 l1_scaled_tsc = kvm_read_l1_tsc(vcpu, rdtsc()) >>
2085 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
2087 if (!vmx->nested.has_preemption_timer_deadline) {
2088 vmx->nested.preemption_timer_deadline =
2089 vmcs12->vmx_preemption_timer_value + l1_scaled_tsc;
2090 vmx->nested.has_preemption_timer_deadline = true;
2092 return vmx->nested.preemption_timer_deadline - l1_scaled_tsc;
2095 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu,
2096 u64 preemption_timeout)
2098 struct vcpu_vmx *vmx = to_vmx(vcpu);
2101 * A timer value of zero is architecturally guaranteed to cause
2102 * a VMExit prior to executing any instructions in the guest.
2104 if (preemption_timeout == 0) {
2105 vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
2109 if (vcpu->arch.virtual_tsc_khz == 0)
2112 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
2113 preemption_timeout *= 1000000;
2114 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
2115 hrtimer_start(&vmx->nested.preemption_timer,
2116 ktime_add_ns(ktime_get(), preemption_timeout),
2117 HRTIMER_MODE_ABS_PINNED);
2120 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2122 if (vmx->nested.nested_run_pending &&
2123 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
2124 return vmcs12->guest_ia32_efer;
2125 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
2126 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
2128 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
2131 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
2133 struct kvm *kvm = vmx->vcpu.kvm;
2136 * If vmcs02 hasn't been initialized, set the constant vmcs02 state
2137 * according to L0's settings (vmcs12 is irrelevant here). Host
2138 * fields that come from L0 and are not constant, e.g. HOST_CR3,
2139 * will be set as needed prior to VMLAUNCH/VMRESUME.
2141 if (vmx->nested.vmcs02_initialized)
2143 vmx->nested.vmcs02_initialized = true;
2146 * We don't care what the EPTP value is we just need to guarantee
2147 * it's valid so we don't get a false positive when doing early
2148 * consistency checks.
2150 if (enable_ept && nested_early_check)
2151 vmcs_write64(EPT_POINTER,
2152 construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL));
2154 /* All VMFUNCs are currently emulated through L0 vmexits. */
2155 if (cpu_has_vmx_vmfunc())
2156 vmcs_write64(VM_FUNCTION_CONTROL, 0);
2158 if (cpu_has_vmx_posted_intr())
2159 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
2161 if (cpu_has_vmx_msr_bitmap())
2162 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
2165 * PML is emulated for L2, but never enabled in hardware as the MMU
2166 * handles A/D emulation. Disabling PML for L2 also avoids having to
2167 * deal with filtering out L2 GPAs from the buffer.
2170 vmcs_write64(PML_ADDRESS, 0);
2171 vmcs_write16(GUEST_PML_INDEX, -1);
2174 if (cpu_has_vmx_encls_vmexit())
2175 vmcs_write64(ENCLS_EXITING_BITMAP, INVALID_GPA);
2177 if (kvm_notify_vmexit_enabled(kvm))
2178 vmcs_write32(NOTIFY_WINDOW, kvm->arch.notify_window);
2181 * Set the MSR load/store lists to match L0's settings. Only the
2182 * addresses are constant (for vmcs02), the counts can change based
2183 * on L2's behavior, e.g. switching to/from long mode.
2185 vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val));
2186 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
2187 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
2189 vmx_set_constant_host_state(vmx);
2192 static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx,
2193 struct vmcs12 *vmcs12)
2195 prepare_vmcs02_constant_state(vmx);
2197 vmcs_write64(VMCS_LINK_POINTER, INVALID_GPA);
2200 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
2201 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
2203 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
2207 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01,
2208 struct vmcs12 *vmcs12)
2211 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
2213 if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
2214 prepare_vmcs02_early_rare(vmx, vmcs12);
2219 exec_control = __pin_controls_get(vmcs01);
2220 exec_control |= (vmcs12->pin_based_vm_exec_control &
2221 ~PIN_BASED_VMX_PREEMPTION_TIMER);
2223 /* Posted interrupts setting is only taken from vmcs12. */
2224 vmx->nested.pi_pending = false;
2225 if (nested_cpu_has_posted_intr(vmcs12))
2226 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
2228 exec_control &= ~PIN_BASED_POSTED_INTR;
2229 pin_controls_set(vmx, exec_control);
2234 exec_control = __exec_controls_get(vmcs01); /* L0's desires */
2235 exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING;
2236 exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING;
2237 exec_control &= ~CPU_BASED_TPR_SHADOW;
2238 exec_control |= vmcs12->cpu_based_vm_exec_control;
2240 vmx->nested.l1_tpr_threshold = -1;
2241 if (exec_control & CPU_BASED_TPR_SHADOW)
2242 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
2243 #ifdef CONFIG_X86_64
2245 exec_control |= CPU_BASED_CR8_LOAD_EXITING |
2246 CPU_BASED_CR8_STORE_EXITING;
2250 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
2251 * for I/O port accesses.
2253 exec_control |= CPU_BASED_UNCOND_IO_EXITING;
2254 exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
2257 * This bit will be computed in nested_get_vmcs12_pages, because
2258 * we do not have access to L1's MSR bitmap yet. For now, keep
2259 * the same bit as before, hoping to avoid multiple VMWRITEs that
2260 * only set/clear this bit.
2262 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
2263 exec_control |= exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS;
2265 exec_controls_set(vmx, exec_control);
2268 * SECONDARY EXEC CONTROLS
2270 if (cpu_has_secondary_exec_ctrls()) {
2271 exec_control = __secondary_exec_controls_get(vmcs01);
2273 /* Take the following fields only from vmcs12 */
2274 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2275 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2276 SECONDARY_EXEC_ENABLE_INVPCID |
2277 SECONDARY_EXEC_ENABLE_RDTSCP |
2278 SECONDARY_EXEC_XSAVES |
2279 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
2280 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2281 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2282 SECONDARY_EXEC_ENABLE_VMFUNC |
2283 SECONDARY_EXEC_DESC);
2285 if (nested_cpu_has(vmcs12,
2286 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
2287 exec_control |= vmcs12->secondary_vm_exec_control;
2289 /* PML is emulated and never enabled in hardware for L2. */
2290 exec_control &= ~SECONDARY_EXEC_ENABLE_PML;
2292 /* VMCS shadowing for L2 is emulated for now */
2293 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
2296 * Preset *DT exiting when emulating UMIP, so that vmx_set_cr4()
2297 * will not have to rewrite the controls just for this bit.
2299 if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated() &&
2300 (vmcs12->guest_cr4 & X86_CR4_UMIP))
2301 exec_control |= SECONDARY_EXEC_DESC;
2303 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
2304 vmcs_write16(GUEST_INTR_STATUS,
2305 vmcs12->guest_intr_status);
2307 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
2308 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
2310 if (exec_control & SECONDARY_EXEC_ENCLS_EXITING)
2311 vmx_write_encls_bitmap(&vmx->vcpu, vmcs12);
2313 secondary_exec_controls_set(vmx, exec_control);
2319 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
2320 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
2321 * on the related bits (if supported by the CPU) in the hope that
2322 * we can avoid VMWrites during vmx_set_efer().
2324 * Similarly, take vmcs01's PERF_GLOBAL_CTRL in the hope that if KVM is
2325 * loading PERF_GLOBAL_CTRL via the VMCS for L1, then KVM will want to
2326 * do the same for L2.
2328 exec_control = __vm_entry_controls_get(vmcs01);
2329 exec_control |= (vmcs12->vm_entry_controls &
2330 ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL);
2331 exec_control &= ~(VM_ENTRY_IA32E_MODE | VM_ENTRY_LOAD_IA32_EFER);
2332 if (cpu_has_load_ia32_efer()) {
2333 if (guest_efer & EFER_LMA)
2334 exec_control |= VM_ENTRY_IA32E_MODE;
2335 if (guest_efer != host_efer)
2336 exec_control |= VM_ENTRY_LOAD_IA32_EFER;
2338 vm_entry_controls_set(vmx, exec_control);
2343 * L2->L1 exit controls are emulated - the hardware exit is to L0 so
2344 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
2345 * bits may be modified by vmx_set_efer() in prepare_vmcs02().
2347 exec_control = __vm_exit_controls_get(vmcs01);
2348 if (cpu_has_load_ia32_efer() && guest_efer != host_efer)
2349 exec_control |= VM_EXIT_LOAD_IA32_EFER;
2351 exec_control &= ~VM_EXIT_LOAD_IA32_EFER;
2352 vm_exit_controls_set(vmx, exec_control);
2355 * Interrupt/Exception Fields
2357 if (vmx->nested.nested_run_pending) {
2358 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2359 vmcs12->vm_entry_intr_info_field);
2360 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
2361 vmcs12->vm_entry_exception_error_code);
2362 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2363 vmcs12->vm_entry_instruction_len);
2364 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
2365 vmcs12->guest_interruptibility_info);
2366 vmx->loaded_vmcs->nmi_known_unmasked =
2367 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
2369 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
2373 static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2375 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
2377 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2378 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
2379 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
2380 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
2381 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
2382 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
2383 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
2384 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
2385 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
2386 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
2387 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
2388 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
2389 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
2390 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
2391 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
2392 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
2393 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
2394 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
2395 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
2396 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
2397 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
2398 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
2399 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
2400 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
2401 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
2402 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
2403 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
2404 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
2405 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
2406 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
2407 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
2408 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
2409 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
2410 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
2411 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
2412 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
2413 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
2414 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
2416 vmx->segment_cache.bitmask = 0;
2419 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2420 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) {
2421 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
2422 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
2423 vmcs12->guest_pending_dbg_exceptions);
2424 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
2425 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
2428 * L1 may access the L2's PDPTR, so save them to construct
2432 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2433 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2434 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2435 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2438 if (kvm_mpx_supported() && vmx->nested.nested_run_pending &&
2439 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
2440 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
2443 if (nested_cpu_has_xsaves(vmcs12))
2444 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
2447 * Whether page-faults are trapped is determined by a combination of
2448 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. If L0
2449 * doesn't care about page faults then we should set all of these to
2450 * L1's desires. However, if L0 does care about (some) page faults, it
2451 * is not easy (if at all possible?) to merge L0 and L1's desires, we
2452 * simply ask to exit on each and every L2 page fault. This is done by
2453 * setting MASK=MATCH=0 and (see below) EB.PF=1.
2454 * Note that below we don't need special code to set EB.PF beyond the
2455 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
2456 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
2457 * !enable_ept, EB.PF is 1, so the "or" will always be 1.
2459 if (vmx_need_pf_intercept(&vmx->vcpu)) {
2461 * TODO: if both L0 and L1 need the same MASK and MATCH,
2462 * go ahead and use it?
2464 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
2465 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
2467 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, vmcs12->page_fault_error_code_mask);
2468 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, vmcs12->page_fault_error_code_match);
2471 if (cpu_has_vmx_apicv()) {
2472 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
2473 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
2474 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
2475 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
2479 * Make sure the msr_autostore list is up to date before we set the
2480 * count in the vmcs02.
2482 prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC);
2484 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr);
2485 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
2486 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
2488 set_cr4_guest_host_mask(vmx);
2492 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
2493 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
2494 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
2495 * guest in a way that will both be appropriate to L1's requests, and our
2496 * needs. In addition to modifying the active vmcs (which is vmcs02), this
2497 * function also has additional necessary side-effects, like setting various
2498 * vcpu->arch fields.
2499 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
2500 * is assigned to entry_failure_code on failure.
2502 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
2504 enum vm_entry_failure_code *entry_failure_code)
2506 struct vcpu_vmx *vmx = to_vmx(vcpu);
2507 bool load_guest_pdptrs_vmcs12 = false;
2509 if (vmx->nested.dirty_vmcs12 || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
2510 prepare_vmcs02_rare(vmx, vmcs12);
2511 vmx->nested.dirty_vmcs12 = false;
2513 load_guest_pdptrs_vmcs12 = !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) ||
2514 !(vmx->nested.hv_evmcs->hv_clean_fields &
2515 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1);
2518 if (vmx->nested.nested_run_pending &&
2519 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
2520 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
2521 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
2523 kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
2524 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.pre_vmenter_debugctl);
2526 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending ||
2527 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
2528 vmcs_write64(GUEST_BNDCFGS, vmx->nested.pre_vmenter_bndcfgs);
2529 vmx_set_rflags(vcpu, vmcs12->guest_rflags);
2531 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
2532 * bitwise-or of what L1 wants to trap for L2, and what we want to
2533 * trap. Note that CR0.TS also needs updating - we do this later.
2535 vmx_update_exception_bitmap(vcpu);
2536 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
2537 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
2539 if (vmx->nested.nested_run_pending &&
2540 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
2541 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
2542 vcpu->arch.pat = vmcs12->guest_ia32_pat;
2543 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2544 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
2547 vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
2548 vcpu->arch.l1_tsc_offset,
2549 vmx_get_l2_tsc_offset(vcpu),
2550 vmx_get_l2_tsc_multiplier(vcpu));
2552 vcpu->arch.tsc_scaling_ratio = kvm_calc_nested_tsc_multiplier(
2553 vcpu->arch.l1_tsc_scaling_ratio,
2554 vmx_get_l2_tsc_multiplier(vcpu));
2556 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
2557 if (kvm_caps.has_tsc_control)
2558 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
2560 nested_vmx_transition_tlb_flush(vcpu, vmcs12, true);
2562 if (nested_cpu_has_ept(vmcs12))
2563 nested_ept_init_mmu_context(vcpu);
2566 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
2567 * bits which we consider mandatory enabled.
2568 * The CR0_READ_SHADOW is what L2 should have expected to read given
2569 * the specifications by L1; It's not enough to take
2570 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we
2571 * have more bits than L1 expected.
2573 vmx_set_cr0(vcpu, vmcs12->guest_cr0);
2574 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
2576 vmx_set_cr4(vcpu, vmcs12->guest_cr4);
2577 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
2579 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
2580 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
2581 vmx_set_efer(vcpu, vcpu->arch.efer);
2584 * Guest state is invalid and unrestricted guest is disabled,
2585 * which means L1 attempted VMEntry to L2 with invalid state.
2588 * However when force loading the guest state (SMM exit or
2589 * loading nested state after migration, it is possible to
2590 * have invalid guest state now, which will be later fixed by
2591 * restoring L2 register state
2593 if (CC(from_vmentry && !vmx_guest_state_valid(vcpu))) {
2594 *entry_failure_code = ENTRY_FAIL_DEFAULT;
2598 /* Shadow page tables on either EPT or shadow page tables. */
2599 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
2600 from_vmentry, entry_failure_code))
2604 * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12
2605 * on nested VM-Exit, which can occur without actually running L2 and
2606 * thus without hitting vmx_load_mmu_pgd(), e.g. if L1 is entering L2 with
2607 * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the
2608 * transition to HLT instead of running L2.
2611 vmcs_writel(GUEST_CR3, vmcs12->guest_cr3);
2613 /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */
2614 if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) &&
2615 is_pae_paging(vcpu)) {
2616 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2617 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2618 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2619 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2622 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
2623 intel_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)) &&
2624 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
2625 vmcs12->guest_ia32_perf_global_ctrl))) {
2626 *entry_failure_code = ENTRY_FAIL_DEFAULT;
2630 kvm_rsp_write(vcpu, vmcs12->guest_rsp);
2631 kvm_rip_write(vcpu, vmcs12->guest_rip);
2634 * It was observed that genuine Hyper-V running in L1 doesn't reset
2635 * 'hv_clean_fields' by itself, it only sets the corresponding dirty
2636 * bits when it changes a field in eVMCS. Mark all fields as clean
2639 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
2640 vmx->nested.hv_evmcs->hv_clean_fields |=
2641 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
2646 static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
2648 if (CC(!nested_cpu_has_nmi_exiting(vmcs12) &&
2649 nested_cpu_has_virtual_nmis(vmcs12)))
2652 if (CC(!nested_cpu_has_virtual_nmis(vmcs12) &&
2653 nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING)))
2659 static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp)
2661 struct vcpu_vmx *vmx = to_vmx(vcpu);
2663 /* Check for memory type validity */
2664 switch (new_eptp & VMX_EPTP_MT_MASK) {
2665 case VMX_EPTP_MT_UC:
2666 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT)))
2669 case VMX_EPTP_MT_WB:
2670 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT)))
2677 /* Page-walk levels validity. */
2678 switch (new_eptp & VMX_EPTP_PWL_MASK) {
2679 case VMX_EPTP_PWL_5:
2680 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT)))
2683 case VMX_EPTP_PWL_4:
2684 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT)))
2691 /* Reserved bits should not be set */
2692 if (CC(kvm_vcpu_is_illegal_gpa(vcpu, new_eptp) || ((new_eptp >> 7) & 0x1f)))
2695 /* AD, if set, should be supported */
2696 if (new_eptp & VMX_EPTP_AD_ENABLE_BIT) {
2697 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT)))
2705 * Checks related to VM-Execution Control Fields
2707 static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
2708 struct vmcs12 *vmcs12)
2710 struct vcpu_vmx *vmx = to_vmx(vcpu);
2712 if (CC(!vmx_control_verify(vmcs12->pin_based_vm_exec_control,
2713 vmx->nested.msrs.pinbased_ctls_low,
2714 vmx->nested.msrs.pinbased_ctls_high)) ||
2715 CC(!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
2716 vmx->nested.msrs.procbased_ctls_low,
2717 vmx->nested.msrs.procbased_ctls_high)))
2720 if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
2721 CC(!vmx_control_verify(vmcs12->secondary_vm_exec_control,
2722 vmx->nested.msrs.secondary_ctls_low,
2723 vmx->nested.msrs.secondary_ctls_high)))
2726 if (CC(vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) ||
2727 nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) ||
2728 nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) ||
2729 nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) ||
2730 nested_vmx_check_apic_access_controls(vcpu, vmcs12) ||
2731 nested_vmx_check_apicv_controls(vcpu, vmcs12) ||
2732 nested_vmx_check_nmi_controls(vmcs12) ||
2733 nested_vmx_check_pml_controls(vcpu, vmcs12) ||
2734 nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) ||
2735 nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) ||
2736 nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) ||
2737 CC(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
2740 if (!nested_cpu_has_preemption_timer(vmcs12) &&
2741 nested_cpu_has_save_preemption_timer(vmcs12))
2744 if (nested_cpu_has_ept(vmcs12) &&
2745 CC(!nested_vmx_check_eptp(vcpu, vmcs12->ept_pointer)))
2748 if (nested_cpu_has_vmfunc(vmcs12)) {
2749 if (CC(vmcs12->vm_function_control &
2750 ~vmx->nested.msrs.vmfunc_controls))
2753 if (nested_cpu_has_eptp_switching(vmcs12)) {
2754 if (CC(!nested_cpu_has_ept(vmcs12)) ||
2755 CC(!page_address_valid(vcpu, vmcs12->eptp_list_address)))
2764 * Checks related to VM-Exit Control Fields
2766 static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu,
2767 struct vmcs12 *vmcs12)
2769 struct vcpu_vmx *vmx = to_vmx(vcpu);
2771 if (CC(!vmx_control_verify(vmcs12->vm_exit_controls,
2772 vmx->nested.msrs.exit_ctls_low,
2773 vmx->nested.msrs.exit_ctls_high)) ||
2774 CC(nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12)))
2781 * Checks related to VM-Entry Control Fields
2783 static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
2784 struct vmcs12 *vmcs12)
2786 struct vcpu_vmx *vmx = to_vmx(vcpu);
2788 if (CC(!vmx_control_verify(vmcs12->vm_entry_controls,
2789 vmx->nested.msrs.entry_ctls_low,
2790 vmx->nested.msrs.entry_ctls_high)))
2794 * From the Intel SDM, volume 3:
2795 * Fields relevant to VM-entry event injection must be set properly.
2796 * These fields are the VM-entry interruption-information field, the
2797 * VM-entry exception error code, and the VM-entry instruction length.
2799 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
2800 u32 intr_info = vmcs12->vm_entry_intr_info_field;
2801 u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
2802 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
2803 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
2804 bool should_have_error_code;
2805 bool urg = nested_cpu_has2(vmcs12,
2806 SECONDARY_EXEC_UNRESTRICTED_GUEST);
2807 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
2809 /* VM-entry interruption-info field: interruption type */
2810 if (CC(intr_type == INTR_TYPE_RESERVED) ||
2811 CC(intr_type == INTR_TYPE_OTHER_EVENT &&
2812 !nested_cpu_supports_monitor_trap_flag(vcpu)))
2815 /* VM-entry interruption-info field: vector */
2816 if (CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
2817 CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
2818 CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
2821 /* VM-entry interruption-info field: deliver error code */
2822 should_have_error_code =
2823 intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
2824 x86_exception_has_error_code(vector);
2825 if (CC(has_error_code != should_have_error_code))
2828 /* VM-entry exception error code */
2829 if (CC(has_error_code &&
2830 vmcs12->vm_entry_exception_error_code & GENMASK(31, 16)))
2833 /* VM-entry interruption-info field: reserved bits */
2834 if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK))
2837 /* VM-entry instruction length */
2838 switch (intr_type) {
2839 case INTR_TYPE_SOFT_EXCEPTION:
2840 case INTR_TYPE_SOFT_INTR:
2841 case INTR_TYPE_PRIV_SW_EXCEPTION:
2842 if (CC(vmcs12->vm_entry_instruction_len > 15) ||
2843 CC(vmcs12->vm_entry_instruction_len == 0 &&
2844 CC(!nested_cpu_has_zero_length_injection(vcpu))))
2849 if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12))
2855 static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
2856 struct vmcs12 *vmcs12)
2858 if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
2859 nested_check_vm_exit_controls(vcpu, vmcs12) ||
2860 nested_check_vm_entry_controls(vcpu, vmcs12))
2863 if (guest_cpuid_has_evmcs(vcpu))
2864 return nested_evmcs_check_controls(vmcs12);
2869 static int nested_vmx_check_address_space_size(struct kvm_vcpu *vcpu,
2870 struct vmcs12 *vmcs12)
2872 #ifdef CONFIG_X86_64
2873 if (CC(!!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) !=
2874 !!(vcpu->arch.efer & EFER_LMA)))
2880 static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
2881 struct vmcs12 *vmcs12)
2885 if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) ||
2886 CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) ||
2887 CC(kvm_vcpu_is_illegal_gpa(vcpu, vmcs12->host_cr3)))
2890 if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) ||
2891 CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu)))
2894 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) &&
2895 CC(!kvm_pat_valid(vmcs12->host_ia32_pat)))
2898 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) &&
2899 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu),
2900 vmcs12->host_ia32_perf_global_ctrl)))
2903 #ifdef CONFIG_X86_64
2904 ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE);
2910 if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
2913 if (CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
2914 CC(vmcs12->host_cr4 & X86_CR4_PCIDE) ||
2915 CC((vmcs12->host_rip) >> 32))
2919 if (CC(vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2920 CC(vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2921 CC(vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2922 CC(vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2923 CC(vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2924 CC(vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2925 CC(vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2926 CC(vmcs12->host_cs_selector == 0) ||
2927 CC(vmcs12->host_tr_selector == 0) ||
2928 CC(vmcs12->host_ss_selector == 0 && !ia32e))
2931 if (CC(is_noncanonical_address(vmcs12->host_fs_base, vcpu)) ||
2932 CC(is_noncanonical_address(vmcs12->host_gs_base, vcpu)) ||
2933 CC(is_noncanonical_address(vmcs12->host_gdtr_base, vcpu)) ||
2934 CC(is_noncanonical_address(vmcs12->host_idtr_base, vcpu)) ||
2935 CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)) ||
2936 CC(is_noncanonical_address(vmcs12->host_rip, vcpu)))
2940 * If the load IA32_EFER VM-exit control is 1, bits reserved in the
2941 * IA32_EFER MSR must be 0 in the field for that register. In addition,
2942 * the values of the LMA and LME bits in the field must each be that of
2943 * the host address-space size VM-exit control.
2945 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
2946 if (CC(!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer)) ||
2947 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA)) ||
2948 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)))
2955 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
2956 struct vmcs12 *vmcs12)
2958 struct vcpu_vmx *vmx = to_vmx(vcpu);
2959 struct gfn_to_hva_cache *ghc = &vmx->nested.shadow_vmcs12_cache;
2960 struct vmcs_hdr hdr;
2962 if (vmcs12->vmcs_link_pointer == INVALID_GPA)
2965 if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer)))
2968 if (ghc->gpa != vmcs12->vmcs_link_pointer &&
2969 CC(kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc,
2970 vmcs12->vmcs_link_pointer, VMCS12_SIZE)))
2973 if (CC(kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr,
2974 offsetof(struct vmcs12, hdr),
2978 if (CC(hdr.revision_id != VMCS12_REVISION) ||
2979 CC(hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12)))
2986 * Checks related to Guest Non-register State
2988 static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
2990 if (CC(vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
2991 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT &&
2992 vmcs12->guest_activity_state != GUEST_ACTIVITY_WAIT_SIPI))
2998 static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
2999 struct vmcs12 *vmcs12,
3000 enum vm_entry_failure_code *entry_failure_code)
3002 bool ia32e = !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE);
3004 *entry_failure_code = ENTRY_FAIL_DEFAULT;
3006 if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) ||
3007 CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)))
3010 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) &&
3011 CC(!kvm_dr7_valid(vmcs12->guest_dr7)))
3014 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) &&
3015 CC(!kvm_pat_valid(vmcs12->guest_ia32_pat)))
3018 if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
3019 *entry_failure_code = ENTRY_FAIL_VMCS_LINK_PTR;
3023 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
3024 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu),
3025 vmcs12->guest_ia32_perf_global_ctrl)))
3028 if (CC((vmcs12->guest_cr0 & (X86_CR0_PG | X86_CR0_PE)) == X86_CR0_PG))
3031 if (CC(ia32e && !(vmcs12->guest_cr4 & X86_CR4_PAE)) ||
3032 CC(ia32e && !(vmcs12->guest_cr0 & X86_CR0_PG)))
3036 * If the load IA32_EFER VM-entry control is 1, the following checks
3037 * are performed on the field for the IA32_EFER MSR:
3038 * - Bits reserved in the IA32_EFER MSR must be 0.
3039 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
3040 * the IA-32e mode guest VM-exit control. It must also be identical
3041 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
3044 if (to_vmx(vcpu)->nested.nested_run_pending &&
3045 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
3046 if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) ||
3047 CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) ||
3048 CC(((vmcs12->guest_cr0 & X86_CR0_PG) &&
3049 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))))
3053 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
3054 (CC(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) ||
3055 CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))))
3058 if (nested_check_guest_non_reg_state(vmcs12))
3064 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
3066 struct vcpu_vmx *vmx = to_vmx(vcpu);
3067 unsigned long cr3, cr4;
3070 if (!nested_early_check)
3073 if (vmx->msr_autoload.host.nr)
3074 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
3075 if (vmx->msr_autoload.guest.nr)
3076 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
3080 vmx_prepare_switch_to_guest(vcpu);
3083 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
3084 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to
3085 * be written (by prepare_vmcs02()) before the "real" VMEnter, i.e.
3086 * there is no need to preserve other bits or save/restore the field.
3088 vmcs_writel(GUEST_RFLAGS, 0);
3090 cr3 = __get_current_cr3_fast();
3091 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
3092 vmcs_writel(HOST_CR3, cr3);
3093 vmx->loaded_vmcs->host_state.cr3 = cr3;
3096 cr4 = cr4_read_shadow();
3097 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
3098 vmcs_writel(HOST_CR4, cr4);
3099 vmx->loaded_vmcs->host_state.cr4 = cr4;
3102 vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
3103 __vmx_vcpu_run_flags(vmx));
3105 if (vmx->msr_autoload.host.nr)
3106 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
3107 if (vmx->msr_autoload.guest.nr)
3108 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
3111 u32 error = vmcs_read32(VM_INSTRUCTION_ERROR);
3115 trace_kvm_nested_vmenter_failed(
3116 "early hardware check VM-instruction error: ", error);
3117 WARN_ON_ONCE(error != VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3122 * VMExit clears RFLAGS.IF and DR7, even on a consistency check.
3124 if (hw_breakpoint_active())
3125 set_debugreg(__this_cpu_read(cpu_dr7), 7);
3130 * A non-failing VMEntry means we somehow entered guest mode with
3131 * an illegal RIP, and that's just the tip of the iceberg. There
3132 * is no telling what memory has been modified or what state has
3133 * been exposed to unknown code. Hitting this all but guarantees
3134 * a (very critical) hardware issue.
3136 WARN_ON(!(vmcs_read32(VM_EXIT_REASON) &
3137 VMX_EXIT_REASONS_FAILED_VMENTRY));
3142 static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
3144 struct vcpu_vmx *vmx = to_vmx(vcpu);
3147 * hv_evmcs may end up being not mapped after migration (when
3148 * L2 was running), map it here to make sure vmcs12 changes are
3149 * properly reflected.
3151 if (guest_cpuid_has_evmcs(vcpu) &&
3152 vmx->nested.hv_evmcs_vmptr == EVMPTR_MAP_PENDING) {
3153 enum nested_evmptrld_status evmptrld_status =
3154 nested_vmx_handle_enlightened_vmptrld(vcpu, false);
3156 if (evmptrld_status == EVMPTRLD_VMFAIL ||
3157 evmptrld_status == EVMPTRLD_ERROR)
3161 * Post migration VMCS12 always provides the most actual
3162 * information, copy it to eVMCS upon entry.
3164 vmx->nested.need_vmcs12_to_shadow_sync = true;
3170 static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
3172 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3173 struct vcpu_vmx *vmx = to_vmx(vcpu);
3174 struct kvm_host_map *map;
3176 if (!vcpu->arch.pdptrs_from_userspace &&
3177 !nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) {
3179 * Reload the guest's PDPTRs since after a migration
3180 * the guest CR3 might be restored prior to setting the nested
3181 * state which can lead to a load of wrong PDPTRs.
3183 if (CC(!load_pdptrs(vcpu, vcpu->arch.cr3)))
3188 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3189 map = &vmx->nested.apic_access_page_map;
3191 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->apic_access_addr), map)) {
3192 vmcs_write64(APIC_ACCESS_ADDR, pfn_to_hpa(map->pfn));
3194 pr_debug_ratelimited("%s: no backing for APIC-access address in vmcs12\n",
3196 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3197 vcpu->run->internal.suberror =
3198 KVM_INTERNAL_ERROR_EMULATION;
3199 vcpu->run->internal.ndata = 0;
3204 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3205 map = &vmx->nested.virtual_apic_map;
3207 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) {
3208 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn));
3209 } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) &&
3210 nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) &&
3211 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3213 * The processor will never use the TPR shadow, simply
3214 * clear the bit from the execution control. Such a
3215 * configuration is useless, but it happens in tests.
3216 * For any other configuration, failing the vm entry is
3217 * _not_ what the processor does but it's basically the
3218 * only possibility we have.
3220 exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW);
3223 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to
3224 * force VM-Entry to fail.
3226 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, INVALID_GPA);
3230 if (nested_cpu_has_posted_intr(vmcs12)) {
3231 map = &vmx->nested.pi_desc_map;
3233 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) {
3234 vmx->nested.pi_desc =
3235 (struct pi_desc *)(((void *)map->hva) +
3236 offset_in_page(vmcs12->posted_intr_desc_addr));
3237 vmcs_write64(POSTED_INTR_DESC_ADDR,
3238 pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr));
3241 * Defer the KVM_INTERNAL_EXIT until KVM tries to
3242 * access the contents of the VMCS12 posted interrupt
3243 * descriptor. (Note that KVM may do this when it
3244 * should not, per the architectural specification.)
3246 vmx->nested.pi_desc = NULL;
3247 pin_controls_clearbit(vmx, PIN_BASED_POSTED_INTR);
3250 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
3251 exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
3253 exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
3258 static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
3260 if (!nested_get_evmcs_page(vcpu)) {
3261 pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
3263 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3264 vcpu->run->internal.suberror =
3265 KVM_INTERNAL_ERROR_EMULATION;
3266 vcpu->run->internal.ndata = 0;
3271 if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
3277 static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
3279 struct vmcs12 *vmcs12;
3280 struct vcpu_vmx *vmx = to_vmx(vcpu);
3283 if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
3286 if (WARN_ON_ONCE(vmx->nested.pml_full))
3290 * Check if PML is enabled for the nested guest. Whether eptp bit 6 is
3291 * set is already checked as part of A/D emulation.
3293 vmcs12 = get_vmcs12(vcpu);
3294 if (!nested_cpu_has_pml(vmcs12))
3297 if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
3298 vmx->nested.pml_full = true;
3303 dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
3305 if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
3306 offset_in_page(dst), sizeof(gpa)))
3309 vmcs12->guest_pml_index--;
3315 * Intel's VMX Instruction Reference specifies a common set of prerequisites
3316 * for running VMX instructions (except VMXON, whose prerequisites are
3317 * slightly different). It also specifies what exception to inject otherwise.
3318 * Note that many of these exceptions have priority over VM exits, so they
3319 * don't have to be checked again here.
3321 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
3323 if (!to_vmx(vcpu)->nested.vmxon) {
3324 kvm_queue_exception(vcpu, UD_VECTOR);
3328 if (vmx_get_cpl(vcpu)) {
3329 kvm_inject_gp(vcpu, 0);
3336 static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
3338 u8 rvi = vmx_get_rvi();
3339 u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
3341 return ((rvi & 0xf0) > (vppr & 0xf0));
3344 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
3345 struct vmcs12 *vmcs12);
3348 * If from_vmentry is false, this is being called from state restore (either RSM
3349 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
3352 * NVMX_VMENTRY_SUCCESS: Entered VMX non-root mode
3353 * NVMX_VMENTRY_VMFAIL: Consistency check VMFail
3354 * NVMX_VMENTRY_VMEXIT: Consistency check VMExit
3355 * NVMX_VMENTRY_KVM_INTERNAL_ERROR: KVM internal error
3357 enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
3360 struct vcpu_vmx *vmx = to_vmx(vcpu);
3361 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3362 enum vm_entry_failure_code entry_failure_code;
3363 bool evaluate_pending_interrupts;
3364 union vmx_exit_reason exit_reason = {
3365 .basic = EXIT_REASON_INVALID_STATE,
3366 .failed_vmentry = 1,
3370 trace_kvm_nested_vmenter(kvm_rip_read(vcpu),
3371 vmx->nested.current_vmptr,
3373 vmcs12->guest_intr_status,
3374 vmcs12->vm_entry_intr_info_field,
3375 vmcs12->secondary_vm_exec_control & SECONDARY_EXEC_ENABLE_EPT,
3376 vmcs12->ept_pointer,
3380 kvm_service_local_tlb_flush_requests(vcpu);
3382 evaluate_pending_interrupts = exec_controls_get(vmx) &
3383 (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING);
3384 if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
3385 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
3386 if (!evaluate_pending_interrupts)
3387 evaluate_pending_interrupts |= kvm_apic_has_pending_init_or_sipi(vcpu);
3389 if (!vmx->nested.nested_run_pending ||
3390 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
3391 vmx->nested.pre_vmenter_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
3392 if (kvm_mpx_supported() &&
3393 (!vmx->nested.nested_run_pending ||
3394 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
3395 vmx->nested.pre_vmenter_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
3398 * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and*
3399 * nested early checks are disabled. In the event of a "late" VM-Fail,
3400 * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its
3401 * software model to the pre-VMEntry host state. When EPT is disabled,
3402 * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes
3403 * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing
3404 * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to
3405 * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested
3406 * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is
3407 * guaranteed to be overwritten with a shadow CR3 prior to re-entering
3408 * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as
3409 * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks
3410 * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail
3411 * path would need to manually save/restore vmcs01.GUEST_CR3.
3413 if (!enable_ept && !nested_early_check)
3414 vmcs_writel(GUEST_CR3, vcpu->arch.cr3);
3416 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
3418 prepare_vmcs02_early(vmx, &vmx->vmcs01, vmcs12);
3421 if (unlikely(!nested_get_vmcs12_pages(vcpu))) {
3422 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3423 return NVMX_VMENTRY_KVM_INTERNAL_ERROR;
3426 if (nested_vmx_check_vmentry_hw(vcpu)) {
3427 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3428 return NVMX_VMENTRY_VMFAIL;
3431 if (nested_vmx_check_guest_state(vcpu, vmcs12,
3432 &entry_failure_code)) {
3433 exit_reason.basic = EXIT_REASON_INVALID_STATE;
3434 vmcs12->exit_qualification = entry_failure_code;
3435 goto vmentry_fail_vmexit;
3439 enter_guest_mode(vcpu);
3441 if (prepare_vmcs02(vcpu, vmcs12, from_vmentry, &entry_failure_code)) {
3442 exit_reason.basic = EXIT_REASON_INVALID_STATE;
3443 vmcs12->exit_qualification = entry_failure_code;
3444 goto vmentry_fail_vmexit_guest_mode;
3448 failed_index = nested_vmx_load_msr(vcpu,
3449 vmcs12->vm_entry_msr_load_addr,
3450 vmcs12->vm_entry_msr_load_count);
3452 exit_reason.basic = EXIT_REASON_MSR_LOAD_FAIL;
3453 vmcs12->exit_qualification = failed_index;
3454 goto vmentry_fail_vmexit_guest_mode;
3458 * The MMU is not initialized to point at the right entities yet and
3459 * "get pages" would need to read data from the guest (i.e. we will
3460 * need to perform gpa to hpa translation). Request a call
3461 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs
3462 * have already been set at vmentry time and should not be reset.
3464 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
3468 * Re-evaluate pending events if L1 had a pending IRQ/NMI/INIT/SIPI
3469 * when it executed VMLAUNCH/VMRESUME, as entering non-root mode can
3470 * effectively unblock various events, e.g. INIT/SIPI cause VM-Exit
3473 if (unlikely(evaluate_pending_interrupts))
3474 kvm_make_request(KVM_REQ_EVENT, vcpu);
3477 * Do not start the preemption timer hrtimer until after we know
3478 * we are successful, so that only nested_vmx_vmexit needs to cancel
3481 vmx->nested.preemption_timer_expired = false;
3482 if (nested_cpu_has_preemption_timer(vmcs12)) {
3483 u64 timer_value = vmx_calc_preemption_timer_value(vcpu);
3484 vmx_start_preemption_timer(vcpu, timer_value);
3488 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
3489 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
3490 * returned as far as L1 is concerned. It will only return (and set
3491 * the success flag) when L2 exits (see nested_vmx_vmexit()).
3493 return NVMX_VMENTRY_SUCCESS;
3496 * A failed consistency check that leads to a VMExit during L1's
3497 * VMEnter to L2 is a variation of a normal VMexit, as explained in
3498 * 26.7 "VM-entry failures during or after loading guest state".
3500 vmentry_fail_vmexit_guest_mode:
3501 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
3502 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
3503 leave_guest_mode(vcpu);
3505 vmentry_fail_vmexit:
3506 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3509 return NVMX_VMENTRY_VMEXIT;
3511 load_vmcs12_host_state(vcpu, vmcs12);
3512 vmcs12->vm_exit_reason = exit_reason.full;
3513 if (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
3514 vmx->nested.need_vmcs12_to_shadow_sync = true;
3515 return NVMX_VMENTRY_VMEXIT;
3519 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
3520 * for running an L2 nested guest.
3522 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
3524 struct vmcs12 *vmcs12;
3525 enum nvmx_vmentry_status status;
3526 struct vcpu_vmx *vmx = to_vmx(vcpu);
3527 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
3528 enum nested_evmptrld_status evmptrld_status;
3530 if (!nested_vmx_check_permission(vcpu))
3533 evmptrld_status = nested_vmx_handle_enlightened_vmptrld(vcpu, launch);
3534 if (evmptrld_status == EVMPTRLD_ERROR) {
3535 kvm_queue_exception(vcpu, UD_VECTOR);
3539 kvm_pmu_trigger_event(vcpu, PERF_COUNT_HW_BRANCH_INSTRUCTIONS);
3541 if (CC(evmptrld_status == EVMPTRLD_VMFAIL))
3542 return nested_vmx_failInvalid(vcpu);
3544 if (CC(!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr) &&
3545 vmx->nested.current_vmptr == INVALID_GPA))
3546 return nested_vmx_failInvalid(vcpu);
3548 vmcs12 = get_vmcs12(vcpu);
3551 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact
3552 * that there *is* a valid VMCS pointer, RFLAGS.CF is set
3553 * rather than RFLAGS.ZF, and no error number is stored to the
3554 * VM-instruction error field.
3556 if (CC(vmcs12->hdr.shadow_vmcs))
3557 return nested_vmx_failInvalid(vcpu);
3559 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
3560 copy_enlightened_to_vmcs12(vmx, vmx->nested.hv_evmcs->hv_clean_fields);
3561 /* Enlightened VMCS doesn't have launch state */
3562 vmcs12->launch_state = !launch;
3563 } else if (enable_shadow_vmcs) {
3564 copy_shadow_to_vmcs12(vmx);
3568 * The nested entry process starts with enforcing various prerequisites
3569 * on vmcs12 as required by the Intel SDM, and act appropriately when
3570 * they fail: As the SDM explains, some conditions should cause the
3571 * instruction to fail, while others will cause the instruction to seem
3572 * to succeed, but return an EXIT_REASON_INVALID_STATE.
3573 * To speed up the normal (success) code path, we should avoid checking
3574 * for misconfigurations which will anyway be caught by the processor
3575 * when using the merged vmcs02.
3577 if (CC(interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS))
3578 return nested_vmx_fail(vcpu, VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
3580 if (CC(vmcs12->launch_state == launch))
3581 return nested_vmx_fail(vcpu,
3582 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
3583 : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
3585 if (nested_vmx_check_controls(vcpu, vmcs12))
3586 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3588 if (nested_vmx_check_address_space_size(vcpu, vmcs12))
3589 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
3591 if (nested_vmx_check_host_state(vcpu, vmcs12))
3592 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
3595 * We're finally done with prerequisite checking, and can start with
3598 vmx->nested.nested_run_pending = 1;
3599 vmx->nested.has_preemption_timer_deadline = false;
3600 status = nested_vmx_enter_non_root_mode(vcpu, true);
3601 if (unlikely(status != NVMX_VMENTRY_SUCCESS))
3602 goto vmentry_failed;
3604 /* Emulate processing of posted interrupts on VM-Enter. */
3605 if (nested_cpu_has_posted_intr(vmcs12) &&
3606 kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) {
3607 vmx->nested.pi_pending = true;
3608 kvm_make_request(KVM_REQ_EVENT, vcpu);
3609 kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv);
3612 /* Hide L1D cache contents from the nested guest. */
3613 vmx->vcpu.arch.l1tf_flush_l1d = true;
3616 * Must happen outside of nested_vmx_enter_non_root_mode() as it will
3617 * also be used as part of restoring nVMX state for
3618 * snapshot restore (migration).
3620 * In this flow, it is assumed that vmcs12 cache was
3621 * transferred as part of captured nVMX state and should
3622 * therefore not be read from guest memory (which may not
3623 * exist on destination host yet).
3625 nested_cache_shadow_vmcs12(vcpu, vmcs12);
3627 switch (vmcs12->guest_activity_state) {
3628 case GUEST_ACTIVITY_HLT:
3630 * If we're entering a halted L2 vcpu and the L2 vcpu won't be
3631 * awakened by event injection or by an NMI-window VM-exit or
3632 * by an interrupt-window VM-exit, halt the vcpu.
3634 if (!(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) &&
3635 !nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING) &&
3636 !(nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING) &&
3637 (vmcs12->guest_rflags & X86_EFLAGS_IF))) {
3638 vmx->nested.nested_run_pending = 0;
3639 return kvm_emulate_halt_noskip(vcpu);
3642 case GUEST_ACTIVITY_WAIT_SIPI:
3643 vmx->nested.nested_run_pending = 0;
3644 vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
3653 vmx->nested.nested_run_pending = 0;
3654 if (status == NVMX_VMENTRY_KVM_INTERNAL_ERROR)
3656 if (status == NVMX_VMENTRY_VMEXIT)
3658 WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL);
3659 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3663 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
3664 * because L2 may have changed some cr0 bits directly (CR0_GUEST_HOST_MASK).
3665 * This function returns the new value we should put in vmcs12.guest_cr0.
3666 * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
3667 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
3668 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
3669 * didn't trap the bit, because if L1 did, so would L0).
3670 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
3671 * been modified by L2, and L1 knows it. So just leave the old value of
3672 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
3673 * isn't relevant, because if L0 traps this bit it can set it to anything.
3674 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
3675 * changed these bits, and therefore they need to be updated, but L0
3676 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
3677 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
3679 static inline unsigned long
3680 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3683 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
3684 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
3685 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
3686 vcpu->arch.cr0_guest_owned_bits));
3689 static inline unsigned long
3690 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3693 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
3694 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
3695 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
3696 vcpu->arch.cr4_guest_owned_bits));
3699 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
3700 struct vmcs12 *vmcs12,
3701 u32 vm_exit_reason, u32 exit_intr_info)
3707 * Per the SDM, VM-Exits due to double and triple faults are never
3708 * considered to occur during event delivery, even if the double/triple
3709 * fault is the result of an escalating vectoring issue.
3711 * Note, the SDM qualifies the double fault behavior with "The original
3712 * event results in a double-fault exception". It's unclear why the
3713 * qualification exists since exits due to double fault can occur only
3714 * while vectoring a different exception (injected events are never
3715 * subject to interception), i.e. there's _always_ an original event.
3717 * The SDM also uses NMI as a confusing example for the "original event
3718 * causes the VM exit directly" clause. NMI isn't special in any way,
3719 * the same rule applies to all events that cause an exit directly.
3720 * NMI is an odd choice for the example because NMIs can only occur on
3721 * instruction boundaries, i.e. they _can't_ occur during vectoring.
3723 if ((u16)vm_exit_reason == EXIT_REASON_TRIPLE_FAULT ||
3724 ((u16)vm_exit_reason == EXIT_REASON_EXCEPTION_NMI &&
3725 is_double_fault(exit_intr_info))) {
3726 vmcs12->idt_vectoring_info_field = 0;
3727 } else if (vcpu->arch.exception.injected) {
3728 nr = vcpu->arch.exception.vector;
3729 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3731 if (kvm_exception_is_soft(nr)) {
3732 vmcs12->vm_exit_instruction_len =
3733 vcpu->arch.event_exit_inst_len;
3734 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
3736 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
3738 if (vcpu->arch.exception.has_error_code) {
3739 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
3740 vmcs12->idt_vectoring_error_code =
3741 vcpu->arch.exception.error_code;
3744 vmcs12->idt_vectoring_info_field = idt_vectoring;
3745 } else if (vcpu->arch.nmi_injected) {
3746 vmcs12->idt_vectoring_info_field =
3747 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
3748 } else if (vcpu->arch.interrupt.injected) {
3749 nr = vcpu->arch.interrupt.nr;
3750 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3752 if (vcpu->arch.interrupt.soft) {
3753 idt_vectoring |= INTR_TYPE_SOFT_INTR;
3754 vmcs12->vm_entry_instruction_len =
3755 vcpu->arch.event_exit_inst_len;
3757 idt_vectoring |= INTR_TYPE_EXT_INTR;
3759 vmcs12->idt_vectoring_info_field = idt_vectoring;
3761 vmcs12->idt_vectoring_info_field = 0;
3766 void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
3768 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3772 * Don't need to mark the APIC access page dirty; it is never
3773 * written to by the CPU during APIC virtualization.
3776 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3777 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
3778 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3781 if (nested_cpu_has_posted_intr(vmcs12)) {
3782 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
3783 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3787 static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
3789 struct vcpu_vmx *vmx = to_vmx(vcpu);
3794 if (!vmx->nested.pi_pending)
3797 if (!vmx->nested.pi_desc)
3800 vmx->nested.pi_pending = false;
3802 if (!pi_test_and_clear_on(vmx->nested.pi_desc))
3805 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
3806 if (max_irr != 256) {
3807 vapic_page = vmx->nested.virtual_apic_map.hva;
3811 __kvm_apic_update_irr(vmx->nested.pi_desc->pir,
3812 vapic_page, &max_irr);
3813 status = vmcs_read16(GUEST_INTR_STATUS);
3814 if ((u8)max_irr > ((u8)status & 0xff)) {
3816 status |= (u8)max_irr;
3817 vmcs_write16(GUEST_INTR_STATUS, status);
3821 nested_mark_vmcs12_pages_dirty(vcpu);
3825 kvm_handle_memory_failure(vcpu, X86EMUL_IO_NEEDED, NULL);
3829 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu)
3831 struct kvm_queued_exception *ex = &vcpu->arch.exception_vmexit;
3832 u32 intr_info = ex->vector | INTR_INFO_VALID_MASK;
3833 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3834 unsigned long exit_qual;
3836 if (ex->has_payload) {
3837 exit_qual = ex->payload;
3838 } else if (ex->vector == PF_VECTOR) {
3839 exit_qual = vcpu->arch.cr2;
3840 } else if (ex->vector == DB_VECTOR) {
3841 exit_qual = vcpu->arch.dr6;
3842 exit_qual &= ~DR6_BT;
3843 exit_qual ^= DR6_ACTIVE_LOW;
3849 * Unlike AMD's Paged Real Mode, which reports an error code on #PF
3850 * VM-Exits even if the CPU is in Real Mode, Intel VMX never sets the
3851 * "has error code" flags on VM-Exit if the CPU is in Real Mode.
3853 if (ex->has_error_code && is_protmode(vcpu)) {
3855 * Intel CPUs do not generate error codes with bits 31:16 set,
3856 * and more importantly VMX disallows setting bits 31:16 in the
3857 * injected error code for VM-Entry. Drop the bits to mimic
3858 * hardware and avoid inducing failure on nested VM-Entry if L1
3859 * chooses to inject the exception back to L2. AMD CPUs _do_
3860 * generate "full" 32-bit error codes, so KVM allows userspace
3861 * to inject exception error codes with bits 31:16 set.
3863 vmcs12->vm_exit_intr_error_code = (u16)ex->error_code;
3864 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3867 if (kvm_exception_is_soft(ex->vector))
3868 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3870 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3872 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
3873 vmx_get_nmi_mask(vcpu))
3874 intr_info |= INTR_INFO_UNBLOCK_NMI;
3876 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
3880 * Returns true if a debug trap is (likely) pending delivery. Infer the class
3881 * of a #DB (trap-like vs. fault-like) from the exception payload (to-be-DR6).
3882 * Using the payload is flawed because code breakpoints (fault-like) and data
3883 * breakpoints (trap-like) set the same bits in DR6 (breakpoint detected), i.e.
3884 * this will return false positives if a to-be-injected code breakpoint #DB is
3885 * pending (from KVM's perspective, but not "pending" across an instruction
3886 * boundary). ICEBP, a.k.a. INT1, is also not reflected here even though it
3889 * KVM "works" despite these flaws as ICEBP isn't currently supported by the
3890 * emulator, Monitor Trap Flag is not marked pending on intercepted #DBs (the
3891 * #DB has already happened), and MTF isn't marked pending on code breakpoints
3892 * from the emulator (because such #DBs are fault-like and thus don't trigger
3893 * actions that fire on instruction retire).
3895 static unsigned long vmx_get_pending_dbg_trap(struct kvm_queued_exception *ex)
3897 if (!ex->pending || ex->vector != DB_VECTOR)
3900 /* General Detect #DBs are always fault-like. */
3901 return ex->payload & ~DR6_BD;
3905 * Returns true if there's a pending #DB exception that is lower priority than
3906 * a pending Monitor Trap Flag VM-Exit. TSS T-flag #DBs are not emulated by
3907 * KVM, but could theoretically be injected by userspace. Note, this code is
3908 * imperfect, see above.
3910 static bool vmx_is_low_priority_db_trap(struct kvm_queued_exception *ex)
3912 return vmx_get_pending_dbg_trap(ex) & ~DR6_BT;
3916 * Certain VM-exits set the 'pending debug exceptions' field to indicate a
3917 * recognized #DB (data or single-step) that has yet to be delivered. Since KVM
3918 * represents these debug traps with a payload that is said to be compatible
3919 * with the 'pending debug exceptions' field, write the payload to the VMCS
3920 * field if a VM-exit is delivered before the debug trap.
3922 static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu)
3924 unsigned long pending_dbg;
3926 pending_dbg = vmx_get_pending_dbg_trap(&vcpu->arch.exception);
3928 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, pending_dbg);
3931 static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu)
3933 return nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
3934 to_vmx(vcpu)->nested.preemption_timer_expired;
3937 static bool vmx_has_nested_events(struct kvm_vcpu *vcpu)
3939 return nested_vmx_preemption_timer_pending(vcpu) ||
3940 to_vmx(vcpu)->nested.mtf_pending;
3944 * Per the Intel SDM's table "Priority Among Concurrent Events", with minor
3945 * edits to fill in missing examples, e.g. #DB due to split-lock accesses,
3946 * and less minor edits to splice in the priority of VMX Non-Root specific
3947 * events, e.g. MTF and NMI/INTR-window exiting.
3949 * 1 Hardware Reset and Machine Checks
3953 * 2 Trap on Task Switch
3954 * - T flag in TSS is set (on task switch)
3956 * 3 External Hardware Interventions
3962 * 3.5 Monitor Trap Flag (MTF) VM-exit[1]
3964 * 4 Traps on Previous Instruction
3966 * - Trap-class Debug Exceptions (#DB due to TF flag set, data/I-O
3967 * breakpoint, or #DB due to a split-lock access)
3969 * 4.3 VMX-preemption timer expired VM-exit
3971 * 4.6 NMI-window exiting VM-exit[2]
3973 * 5 Nonmaskable Interrupts (NMI)
3975 * 5.5 Interrupt-window exiting VM-exit and Virtual-interrupt delivery
3977 * 6 Maskable Hardware Interrupts
3979 * 7 Code Breakpoint Fault
3981 * 8 Faults from Fetching Next Instruction
3982 * - Code-Segment Limit Violation
3984 * - Control protection exception (missing ENDBRANCH at target of indirect
3987 * 9 Faults from Decoding Next Instruction
3988 * - Instruction length > 15 bytes
3990 * - Coprocessor Not Available
3992 *10 Faults on Executing Instruction
3996 * - Segment Not Present
3998 * - General Protection
4001 * - x86 FPU Floating-point exception
4002 * - SIMD floating-point exception
4003 * - Virtualization exception
4004 * - Control protection exception
4006 * [1] Per the "Monitor Trap Flag" section: System-management interrupts (SMIs),
4007 * INIT signals, and higher priority events take priority over MTF VM exits.
4008 * MTF VM exits take priority over debug-trap exceptions and lower priority
4011 * [2] Debug-trap exceptions and higher priority events take priority over VM exits
4012 * caused by the VMX-preemption timer. VM exits caused by the VMX-preemption
4013 * timer take priority over VM exits caused by the "NMI-window exiting"
4014 * VM-execution control and lower priority events.
4016 * [3] Debug-trap exceptions and higher priority events take priority over VM exits
4017 * caused by "NMI-window exiting". VM exits caused by this control take
4018 * priority over non-maskable interrupts (NMIs) and lower priority events.
4020 * [4] Virtual-interrupt delivery has the same priority as that of VM exits due to
4021 * the 1-setting of the "interrupt-window exiting" VM-execution control. Thus,
4022 * non-maskable interrupts (NMIs) and higher priority events take priority over
4023 * delivery of a virtual interrupt; delivery of a virtual interrupt takes
4024 * priority over external interrupts and lower priority events.
4026 static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
4028 struct kvm_lapic *apic = vcpu->arch.apic;
4029 struct vcpu_vmx *vmx = to_vmx(vcpu);
4031 * Only a pending nested run blocks a pending exception. If there is a
4032 * previously injected event, the pending exception occurred while said
4033 * event was being delivered and thus needs to be handled.
4035 bool block_nested_exceptions = vmx->nested.nested_run_pending;
4037 * New events (not exceptions) are only recognized at instruction
4038 * boundaries. If an event needs reinjection, then KVM is handling a
4039 * VM-Exit that occurred _during_ instruction execution; new events are
4040 * blocked until the instruction completes.
4042 bool block_nested_events = block_nested_exceptions ||
4043 kvm_event_needs_reinjection(vcpu);
4045 if (lapic_in_kernel(vcpu) &&
4046 test_bit(KVM_APIC_INIT, &apic->pending_events)) {
4047 if (block_nested_events)
4049 nested_vmx_update_pending_dbg(vcpu);
4050 clear_bit(KVM_APIC_INIT, &apic->pending_events);
4051 if (vcpu->arch.mp_state != KVM_MP_STATE_INIT_RECEIVED)
4052 nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0);
4054 /* MTF is discarded if the vCPU is in WFS. */
4055 vmx->nested.mtf_pending = false;
4059 if (lapic_in_kernel(vcpu) &&
4060 test_bit(KVM_APIC_SIPI, &apic->pending_events)) {
4061 if (block_nested_events)
4064 clear_bit(KVM_APIC_SIPI, &apic->pending_events);
4065 if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
4066 nested_vmx_vmexit(vcpu, EXIT_REASON_SIPI_SIGNAL, 0,
4067 apic->sipi_vector & 0xFFUL);
4070 /* Fallthrough, the SIPI is completely ignored. */
4074 * Process exceptions that are higher priority than Monitor Trap Flag:
4075 * fault-like exceptions, TSS T flag #DB (not emulated by KVM, but
4076 * could theoretically come in from userspace), and ICEBP (INT1).
4078 * TODO: SMIs have higher priority than MTF and trap-like #DBs (except
4079 * for TSS T flag #DBs). KVM also doesn't save/restore pending MTF
4080 * across SMI/RSM as it should; that needs to be addressed in order to
4081 * prioritize SMI over MTF and trap-like #DBs.
4083 if (vcpu->arch.exception_vmexit.pending &&
4084 !vmx_is_low_priority_db_trap(&vcpu->arch.exception_vmexit)) {
4085 if (block_nested_exceptions)
4088 nested_vmx_inject_exception_vmexit(vcpu);
4092 if (vcpu->arch.exception.pending &&
4093 !vmx_is_low_priority_db_trap(&vcpu->arch.exception)) {
4094 if (block_nested_exceptions)
4099 if (vmx->nested.mtf_pending) {
4100 if (block_nested_events)
4102 nested_vmx_update_pending_dbg(vcpu);
4103 nested_vmx_vmexit(vcpu, EXIT_REASON_MONITOR_TRAP_FLAG, 0, 0);
4107 if (vcpu->arch.exception_vmexit.pending) {
4108 if (block_nested_exceptions)
4111 nested_vmx_inject_exception_vmexit(vcpu);
4115 if (vcpu->arch.exception.pending) {
4116 if (block_nested_exceptions)
4121 if (nested_vmx_preemption_timer_pending(vcpu)) {
4122 if (block_nested_events)
4124 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
4128 if (vcpu->arch.smi_pending && !is_smm(vcpu)) {
4129 if (block_nested_events)
4134 if (vcpu->arch.nmi_pending && !vmx_nmi_blocked(vcpu)) {
4135 if (block_nested_events)
4137 if (!nested_exit_on_nmi(vcpu))
4140 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
4141 NMI_VECTOR | INTR_TYPE_NMI_INTR |
4142 INTR_INFO_VALID_MASK, 0);
4144 * The NMI-triggered VM exit counts as injection:
4145 * clear this one and block further NMIs.
4147 vcpu->arch.nmi_pending = 0;
4148 vmx_set_nmi_mask(vcpu, true);
4152 if (kvm_cpu_has_interrupt(vcpu) && !vmx_interrupt_blocked(vcpu)) {
4153 if (block_nested_events)
4155 if (!nested_exit_on_intr(vcpu))
4157 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
4162 return vmx_complete_nested_posted_interrupt(vcpu);
4165 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
4168 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
4171 if (ktime_to_ns(remaining) <= 0)
4174 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
4175 do_div(value, 1000000);
4176 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
4179 static bool is_vmcs12_ext_field(unsigned long field)
4182 case GUEST_ES_SELECTOR:
4183 case GUEST_CS_SELECTOR:
4184 case GUEST_SS_SELECTOR:
4185 case GUEST_DS_SELECTOR:
4186 case GUEST_FS_SELECTOR:
4187 case GUEST_GS_SELECTOR:
4188 case GUEST_LDTR_SELECTOR:
4189 case GUEST_TR_SELECTOR:
4190 case GUEST_ES_LIMIT:
4191 case GUEST_CS_LIMIT:
4192 case GUEST_SS_LIMIT:
4193 case GUEST_DS_LIMIT:
4194 case GUEST_FS_LIMIT:
4195 case GUEST_GS_LIMIT:
4196 case GUEST_LDTR_LIMIT:
4197 case GUEST_TR_LIMIT:
4198 case GUEST_GDTR_LIMIT:
4199 case GUEST_IDTR_LIMIT:
4200 case GUEST_ES_AR_BYTES:
4201 case GUEST_DS_AR_BYTES:
4202 case GUEST_FS_AR_BYTES:
4203 case GUEST_GS_AR_BYTES:
4204 case GUEST_LDTR_AR_BYTES:
4205 case GUEST_TR_AR_BYTES:
4212 case GUEST_LDTR_BASE:
4214 case GUEST_GDTR_BASE:
4215 case GUEST_IDTR_BASE:
4216 case GUEST_PENDING_DBG_EXCEPTIONS:
4226 static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
4227 struct vmcs12 *vmcs12)
4229 struct vcpu_vmx *vmx = to_vmx(vcpu);
4231 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
4232 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
4233 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
4234 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
4235 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
4236 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
4237 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
4238 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
4239 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
4240 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
4241 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
4242 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
4243 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
4244 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
4245 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
4246 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
4247 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
4248 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
4249 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
4250 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
4251 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
4252 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
4253 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
4254 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
4255 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
4256 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
4257 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
4258 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
4259 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
4260 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
4261 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
4262 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
4263 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
4264 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
4265 vmcs12->guest_pending_dbg_exceptions =
4266 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
4268 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false;
4271 static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
4272 struct vmcs12 *vmcs12)
4274 struct vcpu_vmx *vmx = to_vmx(vcpu);
4277 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare)
4281 WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01);
4284 vmx->loaded_vmcs = &vmx->nested.vmcs02;
4285 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->vmcs01);
4287 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4289 vmx->loaded_vmcs = &vmx->vmcs01;
4290 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->nested.vmcs02);
4295 * Update the guest state fields of vmcs12 to reflect changes that
4296 * occurred while L2 was running. (The "IA-32e mode guest" bit of the
4297 * VM-entry controls is also updated, since this is really a guest
4300 static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
4302 struct vcpu_vmx *vmx = to_vmx(vcpu);
4304 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
4305 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4307 vmx->nested.need_sync_vmcs02_to_vmcs12_rare =
4308 !evmptr_is_valid(vmx->nested.hv_evmcs_vmptr);
4310 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
4311 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
4313 vmcs12->guest_rsp = kvm_rsp_read(vcpu);
4314 vmcs12->guest_rip = kvm_rip_read(vcpu);
4315 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
4317 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
4318 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
4320 vmcs12->guest_interruptibility_info =
4321 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
4323 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
4324 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
4325 else if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED)
4326 vmcs12->guest_activity_state = GUEST_ACTIVITY_WAIT_SIPI;
4328 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
4330 if (nested_cpu_has_preemption_timer(vmcs12) &&
4331 vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER &&
4332 !vmx->nested.nested_run_pending)
4333 vmcs12->vmx_preemption_timer_value =
4334 vmx_get_preemption_timer_value(vcpu);
4337 * In some cases (usually, nested EPT), L2 is allowed to change its
4338 * own CR3 without exiting. If it has changed it, we must keep it.
4339 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
4340 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
4342 * Additionally, restore L2's PDPTR to vmcs12.
4345 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3);
4346 if (nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) {
4347 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
4348 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
4349 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
4350 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
4354 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
4356 if (nested_cpu_has_vid(vmcs12))
4357 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
4359 vmcs12->vm_entry_controls =
4360 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
4361 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
4363 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS)
4364 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
4366 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
4367 vmcs12->guest_ia32_efer = vcpu->arch.efer;
4371 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
4372 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
4373 * and this function updates it to reflect the changes to the guest state while
4374 * L2 was running (and perhaps made some exits which were handled directly by L0
4375 * without going back to L1), and to reflect the exit reason.
4376 * Note that we do not have to copy here all VMCS fields, just those that
4377 * could have changed by the L2 guest or the exit - i.e., the guest-state and
4378 * exit-information fields only. Other fields are modified by L1 with VMWRITE,
4379 * which already writes to vmcs12 directly.
4381 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
4382 u32 vm_exit_reason, u32 exit_intr_info,
4383 unsigned long exit_qualification)
4385 /* update exit information fields: */
4386 vmcs12->vm_exit_reason = vm_exit_reason;
4387 if (to_vmx(vcpu)->exit_reason.enclave_mode)
4388 vmcs12->vm_exit_reason |= VMX_EXIT_REASONS_SGX_ENCLAVE_MODE;
4389 vmcs12->exit_qualification = exit_qualification;
4392 * On VM-Exit due to a failed VM-Entry, the VMCS isn't marked launched
4393 * and only EXIT_REASON and EXIT_QUALIFICATION are updated, all other
4394 * exit info fields are unmodified.
4396 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
4397 vmcs12->launch_state = 1;
4399 /* vm_entry_intr_info_field is cleared on exit. Emulate this
4400 * instead of reading the real value. */
4401 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
4404 * Transfer the event that L0 or L1 may wanted to inject into
4405 * L2 to IDT_VECTORING_INFO_FIELD.
4407 vmcs12_save_pending_event(vcpu, vmcs12,
4408 vm_exit_reason, exit_intr_info);
4410 vmcs12->vm_exit_intr_info = exit_intr_info;
4411 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
4412 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4415 * According to spec, there's no need to store the guest's
4416 * MSRs if the exit is due to a VM-entry failure that occurs
4417 * during or after loading the guest state. Since this exit
4418 * does not fall in that category, we need to save the MSRs.
4420 if (nested_vmx_store_msr(vcpu,
4421 vmcs12->vm_exit_msr_store_addr,
4422 vmcs12->vm_exit_msr_store_count))
4423 nested_vmx_abort(vcpu,
4424 VMX_ABORT_SAVE_GUEST_MSR_FAIL);
4429 * A part of what we need to when the nested L2 guest exits and we want to
4430 * run its L1 parent, is to reset L1's guest state to the host state specified
4432 * This function is to be called not only on normal nested exit, but also on
4433 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
4434 * Failures During or After Loading Guest State").
4435 * This function should be called when the active VMCS is L1's (vmcs01).
4437 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
4438 struct vmcs12 *vmcs12)
4440 enum vm_entry_failure_code ignored;
4441 struct kvm_segment seg;
4443 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
4444 vcpu->arch.efer = vmcs12->host_ia32_efer;
4445 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
4446 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
4448 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
4449 vmx_set_efer(vcpu, vcpu->arch.efer);
4451 kvm_rsp_write(vcpu, vmcs12->host_rsp);
4452 kvm_rip_write(vcpu, vmcs12->host_rip);
4453 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
4454 vmx_set_interrupt_shadow(vcpu, 0);
4457 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
4458 * actually changed, because vmx_set_cr0 refers to efer set above.
4460 * CR0_GUEST_HOST_MASK is already set in the original vmcs01
4461 * (KVM doesn't change it);
4463 vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
4464 vmx_set_cr0(vcpu, vmcs12->host_cr0);
4466 /* Same as above - no reason to call set_cr4_guest_host_mask(). */
4467 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
4468 vmx_set_cr4(vcpu, vmcs12->host_cr4);
4470 nested_ept_uninit_mmu_context(vcpu);
4473 * Only PDPTE load can fail as the value of cr3 was checked on entry and
4474 * couldn't have changed.
4476 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, true, &ignored))
4477 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
4479 nested_vmx_transition_tlb_flush(vcpu, vmcs12, false);
4481 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
4482 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
4483 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
4484 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
4485 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
4486 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
4487 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
4489 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
4490 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
4491 vmcs_write64(GUEST_BNDCFGS, 0);
4493 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
4494 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
4495 vcpu->arch.pat = vmcs12->host_ia32_pat;
4497 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) &&
4498 intel_pmu_has_perf_global_ctrl(vcpu_to_pmu(vcpu)))
4499 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
4500 vmcs12->host_ia32_perf_global_ctrl));
4502 /* Set L1 segment info according to Intel SDM
4503 27.5.2 Loading Host Segment and Descriptor-Table Registers */
4504 seg = (struct kvm_segment) {
4506 .limit = 0xFFFFFFFF,
4507 .selector = vmcs12->host_cs_selector,
4513 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
4517 __vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
4518 seg = (struct kvm_segment) {
4520 .limit = 0xFFFFFFFF,
4527 seg.selector = vmcs12->host_ds_selector;
4528 __vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
4529 seg.selector = vmcs12->host_es_selector;
4530 __vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
4531 seg.selector = vmcs12->host_ss_selector;
4532 __vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
4533 seg.selector = vmcs12->host_fs_selector;
4534 seg.base = vmcs12->host_fs_base;
4535 __vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
4536 seg.selector = vmcs12->host_gs_selector;
4537 seg.base = vmcs12->host_gs_base;
4538 __vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
4539 seg = (struct kvm_segment) {
4540 .base = vmcs12->host_tr_base,
4542 .selector = vmcs12->host_tr_selector,
4546 __vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
4548 memset(&seg, 0, sizeof(seg));
4550 __vmx_set_segment(vcpu, &seg, VCPU_SREG_LDTR);
4552 kvm_set_dr(vcpu, 7, 0x400);
4553 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
4555 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
4556 vmcs12->vm_exit_msr_load_count))
4557 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
4559 to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
4562 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
4564 struct vmx_uret_msr *efer_msr;
4567 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
4568 return vmcs_read64(GUEST_IA32_EFER);
4570 if (cpu_has_load_ia32_efer())
4573 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
4574 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
4575 return vmx->msr_autoload.guest.val[i].value;
4578 efer_msr = vmx_find_uret_msr(vmx, MSR_EFER);
4580 return efer_msr->data;
4585 static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
4587 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4588 struct vcpu_vmx *vmx = to_vmx(vcpu);
4589 struct vmx_msr_entry g, h;
4593 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
4595 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
4597 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
4598 * as vmcs01.GUEST_DR7 contains a userspace defined value
4599 * and vcpu->arch.dr7 is not squirreled away before the
4600 * nested VMENTER (not worth adding a variable in nested_vmx).
4602 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
4603 kvm_set_dr(vcpu, 7, DR7_FIXED_1);
4605 WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
4609 * Note that calling vmx_set_{efer,cr0,cr4} is important as they
4610 * handle a variety of side effects to KVM's software model.
4612 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
4614 vcpu->arch.cr0_guest_owned_bits = vmx_l1_guest_owned_cr0_bits();
4615 vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
4617 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
4618 vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
4620 nested_ept_uninit_mmu_context(vcpu);
4621 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
4622 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
4625 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
4626 * from vmcs01 (if necessary). The PDPTRs are not loaded on
4627 * VMFail, like everything else we just need to ensure our
4628 * software model is up-to-date.
4630 if (enable_ept && is_pae_paging(vcpu))
4631 ept_save_pdptrs(vcpu);
4633 kvm_mmu_reset_context(vcpu);
4636 * This nasty bit of open coding is a compromise between blindly
4637 * loading L1's MSRs using the exit load lists (incorrect emulation
4638 * of VMFail), leaving the nested VM's MSRs in the software model
4639 * (incorrect behavior) and snapshotting the modified MSRs (too
4640 * expensive since the lists are unbound by hardware). For each
4641 * MSR that was (prematurely) loaded from the nested VMEntry load
4642 * list, reload it from the exit load list if it exists and differs
4643 * from the guest value. The intent is to stuff host state as
4644 * silently as possible, not to fully process the exit load list.
4646 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
4647 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
4648 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
4649 pr_debug_ratelimited(
4650 "%s read MSR index failed (%u, 0x%08llx)\n",
4655 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
4656 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
4657 if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
4658 pr_debug_ratelimited(
4659 "%s read MSR failed (%u, 0x%08llx)\n",
4663 if (h.index != g.index)
4665 if (h.value == g.value)
4668 if (nested_vmx_load_msr_check(vcpu, &h)) {
4669 pr_debug_ratelimited(
4670 "%s check failed (%u, 0x%x, 0x%x)\n",
4671 __func__, j, h.index, h.reserved);
4675 if (kvm_set_msr(vcpu, h.index, h.value)) {
4676 pr_debug_ratelimited(
4677 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
4678 __func__, j, h.index, h.value);
4687 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
4691 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
4692 * and modify vmcs12 to make it see what it would expect to see there if
4693 * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
4695 void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
4696 u32 exit_intr_info, unsigned long exit_qualification)
4698 struct vcpu_vmx *vmx = to_vmx(vcpu);
4699 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4701 /* Pending MTF traps are discarded on VM-Exit. */
4702 vmx->nested.mtf_pending = false;
4704 /* trying to cancel vmlaunch/vmresume is a bug */
4705 WARN_ON_ONCE(vmx->nested.nested_run_pending);
4707 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
4709 * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
4710 * Enlightened VMCS after migration and we still need to
4711 * do that when something is forcing L2->L1 exit prior to
4714 (void)nested_get_evmcs_page(vcpu);
4717 /* Service pending TLB flush requests for L2 before switching to L1. */
4718 kvm_service_local_tlb_flush_requests(vcpu);
4721 * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
4722 * now and the new vmentry. Ensure that the VMCS02 PDPTR fields are
4723 * up-to-date before switching to L1.
4725 if (enable_ept && is_pae_paging(vcpu))
4726 vmx_ept_load_pdptrs(vcpu);
4728 leave_guest_mode(vcpu);
4730 if (nested_cpu_has_preemption_timer(vmcs12))
4731 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
4733 if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETTING)) {
4734 vcpu->arch.tsc_offset = vcpu->arch.l1_tsc_offset;
4735 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_TSC_SCALING))
4736 vcpu->arch.tsc_scaling_ratio = vcpu->arch.l1_tsc_scaling_ratio;
4739 if (likely(!vmx->fail)) {
4740 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
4742 if (vm_exit_reason != -1)
4743 prepare_vmcs12(vcpu, vmcs12, vm_exit_reason,
4744 exit_intr_info, exit_qualification);
4747 * Must happen outside of sync_vmcs02_to_vmcs12() as it will
4748 * also be used to capture vmcs12 cache as part of
4749 * capturing nVMX state for snapshot (migration).
4751 * Otherwise, this flush will dirty guest memory at a
4752 * point it is already assumed by user-space to be
4755 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12);
4758 * The only expected VM-instruction error is "VM entry with
4759 * invalid control field(s)." Anything else indicates a
4760 * problem with L0. And we should never get here with a
4761 * VMFail of any type if early consistency checks are enabled.
4763 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
4764 VMXERR_ENTRY_INVALID_CONTROL_FIELD);
4765 WARN_ON_ONCE(nested_early_check);
4769 * Drop events/exceptions that were queued for re-injection to L2
4770 * (picked up via vmx_complete_interrupts()), as well as exceptions
4771 * that were pending for L2. Note, this must NOT be hoisted above
4772 * prepare_vmcs12(), events/exceptions queued for re-injection need to
4773 * be captured in vmcs12 (see vmcs12_save_pending_event()).
4775 vcpu->arch.nmi_injected = false;
4776 kvm_clear_exception_queue(vcpu);
4777 kvm_clear_interrupt_queue(vcpu);
4779 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
4782 * If IBRS is advertised to the vCPU, KVM must flush the indirect
4783 * branch predictors when transitioning from L2 to L1, as L1 expects
4784 * hardware (KVM in this case) to provide separate predictor modes.
4785 * Bare metal isolates VMX root (host) from VMX non-root (guest), but
4786 * doesn't isolate different VMCSs, i.e. in this case, doesn't provide
4787 * separate modes for L2 vs L1.
4789 if (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
4790 indirect_branch_prediction_barrier();
4792 /* Update any VMCS fields that might have changed while L2 ran */
4793 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
4794 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
4795 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
4796 if (kvm_caps.has_tsc_control)
4797 vmcs_write64(TSC_MULTIPLIER, vcpu->arch.tsc_scaling_ratio);
4799 if (vmx->nested.l1_tpr_threshold != -1)
4800 vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold);
4802 if (vmx->nested.change_vmcs01_virtual_apic_mode) {
4803 vmx->nested.change_vmcs01_virtual_apic_mode = false;
4804 vmx_set_virtual_apic_mode(vcpu);
4807 if (vmx->nested.update_vmcs01_cpu_dirty_logging) {
4808 vmx->nested.update_vmcs01_cpu_dirty_logging = false;
4809 vmx_update_cpu_dirty_logging(vcpu);
4812 /* Unpin physical memory we referred to in vmcs02 */
4813 kvm_vcpu_unmap(vcpu, &vmx->nested.apic_access_page_map, false);
4814 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
4815 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
4816 vmx->nested.pi_desc = NULL;
4818 if (vmx->nested.reload_vmcs01_apic_access_page) {
4819 vmx->nested.reload_vmcs01_apic_access_page = false;
4820 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4823 if (vmx->nested.update_vmcs01_apicv_status) {
4824 vmx->nested.update_vmcs01_apicv_status = false;
4825 kvm_make_request(KVM_REQ_APICV_UPDATE, vcpu);
4828 if ((vm_exit_reason != -1) &&
4829 (enable_shadow_vmcs || evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)))
4830 vmx->nested.need_vmcs12_to_shadow_sync = true;
4832 /* in case we halted in L2 */
4833 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4835 if (likely(!vmx->fail)) {
4836 if ((u16)vm_exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
4837 nested_exit_intr_ack_set(vcpu)) {
4838 int irq = kvm_cpu_get_interrupt(vcpu);
4840 vmcs12->vm_exit_intr_info = irq |
4841 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
4844 if (vm_exit_reason != -1)
4845 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
4846 vmcs12->exit_qualification,
4847 vmcs12->idt_vectoring_info_field,
4848 vmcs12->vm_exit_intr_info,
4849 vmcs12->vm_exit_intr_error_code,
4852 load_vmcs12_host_state(vcpu, vmcs12);
4858 * After an early L2 VM-entry failure, we're now back
4859 * in L1 which thinks it just finished a VMLAUNCH or
4860 * VMRESUME instruction, so we need to set the failure
4861 * flag and the VM-instruction error field of the VMCS
4862 * accordingly, and skip the emulated instruction.
4864 (void)nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
4867 * Restore L1's host state to KVM's software model. We're here
4868 * because a consistency check was caught by hardware, which
4869 * means some amount of guest state has been propagated to KVM's
4870 * model and needs to be unwound to the host's state.
4872 nested_vmx_restore_host_state(vcpu);
4877 static void nested_vmx_triple_fault(struct kvm_vcpu *vcpu)
4879 kvm_clear_request(KVM_REQ_TRIPLE_FAULT, vcpu);
4880 nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0);
4884 * Decode the memory-address operand of a vmx instruction, as recorded on an
4885 * exit caused by such an instruction (run by a guest hypervisor).
4886 * On success, returns 0. When the operand is invalid, returns 1 and throws
4889 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
4890 u32 vmx_instruction_info, bool wr, int len, gva_t *ret)
4894 struct kvm_segment s;
4897 * According to Vol. 3B, "Information for VM Exits Due to Instruction
4898 * Execution", on an exit, vmx_instruction_info holds most of the
4899 * addressing components of the operand. Only the displacement part
4900 * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
4901 * For how an actual address is calculated from all these components,
4902 * refer to Vol. 1, "Operand Addressing".
4904 int scaling = vmx_instruction_info & 3;
4905 int addr_size = (vmx_instruction_info >> 7) & 7;
4906 bool is_reg = vmx_instruction_info & (1u << 10);
4907 int seg_reg = (vmx_instruction_info >> 15) & 7;
4908 int index_reg = (vmx_instruction_info >> 18) & 0xf;
4909 bool index_is_valid = !(vmx_instruction_info & (1u << 22));
4910 int base_reg = (vmx_instruction_info >> 23) & 0xf;
4911 bool base_is_valid = !(vmx_instruction_info & (1u << 27));
4914 kvm_queue_exception(vcpu, UD_VECTOR);
4918 /* Addr = segment_base + offset */
4919 /* offset = base + [index * scale] + displacement */
4920 off = exit_qualification; /* holds the displacement */
4922 off = (gva_t)sign_extend64(off, 31);
4923 else if (addr_size == 0)
4924 off = (gva_t)sign_extend64(off, 15);
4926 off += kvm_register_read(vcpu, base_reg);
4928 off += kvm_register_read(vcpu, index_reg) << scaling;
4929 vmx_get_segment(vcpu, &s, seg_reg);
4932 * The effective address, i.e. @off, of a memory operand is truncated
4933 * based on the address size of the instruction. Note that this is
4934 * the *effective address*, i.e. the address prior to accounting for
4935 * the segment's base.
4937 if (addr_size == 1) /* 32 bit */
4939 else if (addr_size == 0) /* 16 bit */
4942 /* Checks for #GP/#SS exceptions. */
4944 if (is_long_mode(vcpu)) {
4946 * The virtual/linear address is never truncated in 64-bit
4947 * mode, e.g. a 32-bit address size can yield a 64-bit virtual
4948 * address when using FS/GS with a non-zero base.
4950 if (seg_reg == VCPU_SREG_FS || seg_reg == VCPU_SREG_GS)
4951 *ret = s.base + off;
4955 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
4956 * non-canonical form. This is the only check on the memory
4957 * destination for long mode!
4959 exn = is_noncanonical_address(*ret, vcpu);
4962 * When not in long mode, the virtual/linear address is
4963 * unconditionally truncated to 32 bits regardless of the
4966 *ret = (s.base + off) & 0xffffffff;
4968 /* Protected mode: apply checks for segment validity in the
4970 * - segment type check (#GP(0) may be thrown)
4971 * - usability check (#GP(0)/#SS(0))
4972 * - limit check (#GP(0)/#SS(0))
4975 /* #GP(0) if the destination operand is located in a
4976 * read-only data segment or any code segment.
4978 exn = ((s.type & 0xa) == 0 || (s.type & 8));
4980 /* #GP(0) if the source operand is located in an
4981 * execute-only code segment
4983 exn = ((s.type & 0xa) == 8);
4985 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
4988 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
4990 exn = (s.unusable != 0);
4993 * Protected mode: #GP(0)/#SS(0) if the memory operand is
4994 * outside the segment limit. All CPUs that support VMX ignore
4995 * limit checks for flat segments, i.e. segments with base==0,
4996 * limit==0xffffffff and of type expand-up data or code.
4998 if (!(s.base == 0 && s.limit == 0xffffffff &&
4999 ((s.type & 8) || !(s.type & 4))))
5000 exn = exn || ((u64)off + len - 1 > s.limit);
5003 kvm_queue_exception_e(vcpu,
5004 seg_reg == VCPU_SREG_SS ?
5005 SS_VECTOR : GP_VECTOR,
5013 static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer,
5017 struct x86_exception e;
5020 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
5021 vmcs_read32(VMX_INSTRUCTION_INFO), false,
5022 sizeof(*vmpointer), &gva)) {
5027 r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e);
5028 if (r != X86EMUL_CONTINUE) {
5029 *ret = kvm_handle_memory_failure(vcpu, r, &e);
5037 * Allocate a shadow VMCS and associate it with the currently loaded
5038 * VMCS, unless such a shadow VMCS already exists. The newly allocated
5039 * VMCS is also VMCLEARed, so that it is ready for use.
5041 static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
5043 struct vcpu_vmx *vmx = to_vmx(vcpu);
5044 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
5047 * KVM allocates a shadow VMCS only when L1 executes VMXON and frees it
5048 * when L1 executes VMXOFF or the vCPU is forced out of nested
5049 * operation. VMXON faults if the CPU is already post-VMXON, so it
5050 * should be impossible to already have an allocated shadow VMCS. KVM
5051 * doesn't support virtualization of VMCS shadowing, so vmcs01 should
5052 * always be the loaded VMCS.
5054 if (WARN_ON(loaded_vmcs != &vmx->vmcs01 || loaded_vmcs->shadow_vmcs))
5055 return loaded_vmcs->shadow_vmcs;
5057 loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
5058 if (loaded_vmcs->shadow_vmcs)
5059 vmcs_clear(loaded_vmcs->shadow_vmcs);
5061 return loaded_vmcs->shadow_vmcs;
5064 static int enter_vmx_operation(struct kvm_vcpu *vcpu)
5066 struct vcpu_vmx *vmx = to_vmx(vcpu);
5069 r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
5073 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
5074 if (!vmx->nested.cached_vmcs12)
5075 goto out_cached_vmcs12;
5077 vmx->nested.shadow_vmcs12_cache.gpa = INVALID_GPA;
5078 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
5079 if (!vmx->nested.cached_shadow_vmcs12)
5080 goto out_cached_shadow_vmcs12;
5082 if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu))
5083 goto out_shadow_vmcs;
5085 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
5086 HRTIMER_MODE_ABS_PINNED);
5087 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
5089 vmx->nested.vpid02 = allocate_vpid();
5091 vmx->nested.vmcs02_initialized = false;
5092 vmx->nested.vmxon = true;
5094 if (vmx_pt_mode_is_host_guest()) {
5095 vmx->pt_desc.guest.ctl = 0;
5096 pt_update_intercept_for_msr(vcpu);
5102 kfree(vmx->nested.cached_shadow_vmcs12);
5104 out_cached_shadow_vmcs12:
5105 kfree(vmx->nested.cached_vmcs12);
5108 free_loaded_vmcs(&vmx->nested.vmcs02);
5114 /* Emulate the VMXON instruction. */
5115 static int handle_vmxon(struct kvm_vcpu *vcpu)
5120 struct vcpu_vmx *vmx = to_vmx(vcpu);
5121 const u64 VMXON_NEEDED_FEATURES = FEAT_CTL_LOCKED
5122 | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
5125 * Manually check CR4.VMXE checks, KVM must force CR4.VMXE=1 to enter
5126 * the guest and so cannot rely on hardware to perform the check,
5127 * which has higher priority than VM-Exit (see Intel SDM's pseudocode
5130 * Rely on hardware for the other pre-VM-Exit checks, CR0.PE=1, !VM86
5131 * and !COMPATIBILITY modes. For an unrestricted guest, KVM doesn't
5132 * force any of the relevant guest state. For a restricted guest, KVM
5133 * does force CR0.PE=1, but only to also force VM86 in order to emulate
5134 * Real Mode, and so there's no need to check CR0.PE manually.
5136 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
5137 kvm_queue_exception(vcpu, UD_VECTOR);
5142 * The CPL is checked for "not in VMX operation" and for "in VMX root",
5143 * and has higher priority than the VM-Fail due to being post-VMXON,
5144 * i.e. VMXON #GPs outside of VMX non-root if CPL!=0. In VMX non-root,
5145 * VMXON causes VM-Exit and KVM unconditionally forwards VMXON VM-Exits
5146 * from L2 to L1, i.e. there's no need to check for the vCPU being in
5149 * Forwarding the VM-Exit unconditionally, i.e. without performing the
5150 * #UD checks (see above), is functionally ok because KVM doesn't allow
5151 * L1 to run L2 without CR4.VMXE=0, and because KVM never modifies L2's
5152 * CR0 or CR4, i.e. it's L2's responsibility to emulate #UDs that are
5153 * missed by hardware due to shadowing CR0 and/or CR4.
5155 if (vmx_get_cpl(vcpu)) {
5156 kvm_inject_gp(vcpu, 0);
5160 if (vmx->nested.vmxon)
5161 return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
5164 * Invalid CR0/CR4 generates #GP. These checks are performed if and
5165 * only if the vCPU isn't already in VMX operation, i.e. effectively
5166 * have lower priority than the VM-Fail above.
5168 if (!nested_host_cr0_valid(vcpu, kvm_read_cr0(vcpu)) ||
5169 !nested_host_cr4_valid(vcpu, kvm_read_cr4(vcpu))) {
5170 kvm_inject_gp(vcpu, 0);
5174 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
5175 != VMXON_NEEDED_FEATURES) {
5176 kvm_inject_gp(vcpu, 0);
5180 if (nested_vmx_get_vmptr(vcpu, &vmptr, &ret))
5185 * The first 4 bytes of VMXON region contain the supported
5186 * VMCS revision identifier
5188 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
5189 * which replaces physical address width with 32
5191 if (!page_address_valid(vcpu, vmptr))
5192 return nested_vmx_failInvalid(vcpu);
5194 if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) ||
5195 revision != VMCS12_REVISION)
5196 return nested_vmx_failInvalid(vcpu);
5198 vmx->nested.vmxon_ptr = vmptr;
5199 ret = enter_vmx_operation(vcpu);
5203 return nested_vmx_succeed(vcpu);
5206 static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu)
5208 struct vcpu_vmx *vmx = to_vmx(vcpu);
5210 if (vmx->nested.current_vmptr == INVALID_GPA)
5213 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
5215 if (enable_shadow_vmcs) {
5216 /* copy to memory all shadowed fields in case
5217 they were modified */
5218 copy_shadow_to_vmcs12(vmx);
5219 vmx_disable_shadow_vmcs(vmx);
5221 vmx->nested.posted_intr_nv = -1;
5223 /* Flush VMCS12 to guest memory */
5224 kvm_vcpu_write_guest_page(vcpu,
5225 vmx->nested.current_vmptr >> PAGE_SHIFT,
5226 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
5228 kvm_mmu_free_roots(vcpu->kvm, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
5230 vmx->nested.current_vmptr = INVALID_GPA;
5233 /* Emulate the VMXOFF instruction */
5234 static int handle_vmxoff(struct kvm_vcpu *vcpu)
5236 if (!nested_vmx_check_permission(vcpu))
5241 if (kvm_apic_has_pending_init_or_sipi(vcpu))
5242 kvm_make_request(KVM_REQ_EVENT, vcpu);
5244 return nested_vmx_succeed(vcpu);
5247 /* Emulate the VMCLEAR instruction */
5248 static int handle_vmclear(struct kvm_vcpu *vcpu)
5250 struct vcpu_vmx *vmx = to_vmx(vcpu);
5256 if (!nested_vmx_check_permission(vcpu))
5259 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r))
5262 if (!page_address_valid(vcpu, vmptr))
5263 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
5265 if (vmptr == vmx->nested.vmxon_ptr)
5266 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
5269 * When Enlightened VMEntry is enabled on the calling CPU we treat
5270 * memory area pointer by vmptr as Enlightened VMCS (as there's no good
5271 * way to distinguish it from VMCS12) and we must not corrupt it by
5272 * writing to the non-existent 'launch_state' field. The area doesn't
5273 * have to be the currently active EVMCS on the calling CPU and there's
5274 * nothing KVM has to do to transition it from 'active' to 'non-active'
5275 * state. It is possible that the area will stay mapped as
5276 * vmx->nested.hv_evmcs but this shouldn't be a problem.
5278 if (likely(!guest_cpuid_has_evmcs(vcpu) ||
5279 !nested_enlightened_vmentry(vcpu, &evmcs_gpa))) {
5280 if (vmptr == vmx->nested.current_vmptr)
5281 nested_release_vmcs12(vcpu);
5283 kvm_vcpu_write_guest(vcpu,
5284 vmptr + offsetof(struct vmcs12,
5286 &zero, sizeof(zero));
5287 } else if (vmx->nested.hv_evmcs && vmptr == vmx->nested.hv_evmcs_vmptr) {
5288 nested_release_evmcs(vcpu);
5291 return nested_vmx_succeed(vcpu);
5294 /* Emulate the VMLAUNCH instruction */
5295 static int handle_vmlaunch(struct kvm_vcpu *vcpu)
5297 return nested_vmx_run(vcpu, true);
5300 /* Emulate the VMRESUME instruction */
5301 static int handle_vmresume(struct kvm_vcpu *vcpu)
5304 return nested_vmx_run(vcpu, false);
5307 static int handle_vmread(struct kvm_vcpu *vcpu)
5309 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
5311 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5312 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5313 struct vcpu_vmx *vmx = to_vmx(vcpu);
5314 struct x86_exception e;
5315 unsigned long field;
5321 if (!nested_vmx_check_permission(vcpu))
5324 /* Decode instruction info and find the field to read */
5325 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
5327 if (!evmptr_is_valid(vmx->nested.hv_evmcs_vmptr)) {
5329 * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA,
5330 * any VMREAD sets the ALU flags for VMfailInvalid.
5332 if (vmx->nested.current_vmptr == INVALID_GPA ||
5333 (is_guest_mode(vcpu) &&
5334 get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA))
5335 return nested_vmx_failInvalid(vcpu);
5337 offset = get_vmcs12_field_offset(field);
5339 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5341 if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field))
5342 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
5344 /* Read the field, zero-extended to a u64 value */
5345 value = vmcs12_read_any(vmcs12, field, offset);
5348 * Hyper-V TLFS (as of 6.0b) explicitly states, that while an
5349 * enlightened VMCS is active VMREAD/VMWRITE instructions are
5350 * unsupported. Unfortunately, certain versions of Windows 11
5351 * don't comply with this requirement which is not enforced in
5352 * genuine Hyper-V. Allow VMREAD from an enlightened VMCS as a
5353 * workaround, as misbehaving guests will panic on VM-Fail.
5354 * Note, enlightened VMCS is incompatible with shadow VMCS so
5355 * all VMREADs from L2 should go to L1.
5357 if (WARN_ON_ONCE(is_guest_mode(vcpu)))
5358 return nested_vmx_failInvalid(vcpu);
5360 offset = evmcs_field_offset(field, NULL);
5362 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5364 /* Read the field, zero-extended to a u64 value */
5365 value = evmcs_read_any(vmx->nested.hv_evmcs, field, offset);
5369 * Now copy part of this value to register or memory, as requested.
5370 * Note that the number of bits actually copied is 32 or 64 depending
5371 * on the guest's mode (32 or 64 bit), not on the given field's length.
5373 if (instr_info & BIT(10)) {
5374 kvm_register_write(vcpu, (((instr_info) >> 3) & 0xf), value);
5376 len = is_64_bit_mode(vcpu) ? 8 : 4;
5377 if (get_vmx_mem_address(vcpu, exit_qualification,
5378 instr_info, true, len, &gva))
5380 /* _system ok, nested_vmx_check_permission has verified cpl=0 */
5381 r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e);
5382 if (r != X86EMUL_CONTINUE)
5383 return kvm_handle_memory_failure(vcpu, r, &e);
5386 return nested_vmx_succeed(vcpu);
5389 static bool is_shadow_field_rw(unsigned long field)
5392 #define SHADOW_FIELD_RW(x, y) case x:
5393 #include "vmcs_shadow_fields.h"
5401 static bool is_shadow_field_ro(unsigned long field)
5404 #define SHADOW_FIELD_RO(x, y) case x:
5405 #include "vmcs_shadow_fields.h"
5413 static int handle_vmwrite(struct kvm_vcpu *vcpu)
5415 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
5417 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5418 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5419 struct vcpu_vmx *vmx = to_vmx(vcpu);
5420 struct x86_exception e;
5421 unsigned long field;
5427 * The value to write might be 32 or 64 bits, depending on L1's long
5428 * mode, and eventually we need to write that into a field of several
5429 * possible lengths. The code below first zero-extends the value to 64
5430 * bit (value), and then copies only the appropriate number of
5431 * bits into the vmcs12 field.
5435 if (!nested_vmx_check_permission(vcpu))
5439 * In VMX non-root operation, when the VMCS-link pointer is INVALID_GPA,
5440 * any VMWRITE sets the ALU flags for VMfailInvalid.
5442 if (vmx->nested.current_vmptr == INVALID_GPA ||
5443 (is_guest_mode(vcpu) &&
5444 get_vmcs12(vcpu)->vmcs_link_pointer == INVALID_GPA))
5445 return nested_vmx_failInvalid(vcpu);
5447 if (instr_info & BIT(10))
5448 value = kvm_register_read(vcpu, (((instr_info) >> 3) & 0xf));
5450 len = is_64_bit_mode(vcpu) ? 8 : 4;
5451 if (get_vmx_mem_address(vcpu, exit_qualification,
5452 instr_info, false, len, &gva))
5454 r = kvm_read_guest_virt(vcpu, gva, &value, len, &e);
5455 if (r != X86EMUL_CONTINUE)
5456 return kvm_handle_memory_failure(vcpu, r, &e);
5459 field = kvm_register_read(vcpu, (((instr_info) >> 28) & 0xf));
5461 offset = get_vmcs12_field_offset(field);
5463 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5466 * If the vCPU supports "VMWRITE to any supported field in the
5467 * VMCS," then the "read-only" fields are actually read/write.
5469 if (vmcs_field_readonly(field) &&
5470 !nested_cpu_has_vmwrite_any_field(vcpu))
5471 return nested_vmx_fail(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
5474 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
5475 * vmcs12, else we may crush a field or consume a stale value.
5477 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field))
5478 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
5481 * Some Intel CPUs intentionally drop the reserved bits of the AR byte
5482 * fields on VMWRITE. Emulate this behavior to ensure consistent KVM
5483 * behavior regardless of the underlying hardware, e.g. if an AR_BYTE
5484 * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD
5485 * from L1 will return a different value than VMREAD from L2 (L1 sees
5486 * the stripped down value, L2 sees the full value as stored by KVM).
5488 if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES)
5491 vmcs12_write_any(vmcs12, field, offset, value);
5494 * Do not track vmcs12 dirty-state if in guest-mode as we actually
5495 * dirty shadow vmcs12 instead of vmcs12. Fields that can be updated
5496 * by L1 without a vmexit are always updated in the vmcs02, i.e. don't
5497 * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path.
5499 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) {
5501 * L1 can read these fields without exiting, ensure the
5502 * shadow VMCS is up-to-date.
5504 if (enable_shadow_vmcs && is_shadow_field_ro(field)) {
5506 vmcs_load(vmx->vmcs01.shadow_vmcs);
5508 __vmcs_writel(field, value);
5510 vmcs_clear(vmx->vmcs01.shadow_vmcs);
5511 vmcs_load(vmx->loaded_vmcs->vmcs);
5514 vmx->nested.dirty_vmcs12 = true;
5517 return nested_vmx_succeed(vcpu);
5520 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
5522 vmx->nested.current_vmptr = vmptr;
5523 if (enable_shadow_vmcs) {
5524 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
5525 vmcs_write64(VMCS_LINK_POINTER,
5526 __pa(vmx->vmcs01.shadow_vmcs));
5527 vmx->nested.need_vmcs12_to_shadow_sync = true;
5529 vmx->nested.dirty_vmcs12 = true;
5530 vmx->nested.force_msr_bitmap_recalc = true;
5533 /* Emulate the VMPTRLD instruction */
5534 static int handle_vmptrld(struct kvm_vcpu *vcpu)
5536 struct vcpu_vmx *vmx = to_vmx(vcpu);
5540 if (!nested_vmx_check_permission(vcpu))
5543 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r))
5546 if (!page_address_valid(vcpu, vmptr))
5547 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
5549 if (vmptr == vmx->nested.vmxon_ptr)
5550 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
5552 /* Forbid normal VMPTRLD if Enlightened version was used */
5553 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
5556 if (vmx->nested.current_vmptr != vmptr) {
5557 struct gfn_to_hva_cache *ghc = &vmx->nested.vmcs12_cache;
5558 struct vmcs_hdr hdr;
5560 if (kvm_gfn_to_hva_cache_init(vcpu->kvm, ghc, vmptr, VMCS12_SIZE)) {
5562 * Reads from an unbacked page return all 1s,
5563 * which means that the 32 bits located at the
5564 * given physical address won't match the required
5565 * VMCS12_REVISION identifier.
5567 return nested_vmx_fail(vcpu,
5568 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5571 if (kvm_read_guest_offset_cached(vcpu->kvm, ghc, &hdr,
5572 offsetof(struct vmcs12, hdr),
5574 return nested_vmx_fail(vcpu,
5575 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5578 if (hdr.revision_id != VMCS12_REVISION ||
5580 !nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
5581 return nested_vmx_fail(vcpu,
5582 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5585 nested_release_vmcs12(vcpu);
5588 * Load VMCS12 from guest memory since it is not already
5591 if (kvm_read_guest_cached(vcpu->kvm, ghc, vmx->nested.cached_vmcs12,
5593 return nested_vmx_fail(vcpu,
5594 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5597 set_current_vmptr(vmx, vmptr);
5600 return nested_vmx_succeed(vcpu);
5603 /* Emulate the VMPTRST instruction */
5604 static int handle_vmptrst(struct kvm_vcpu *vcpu)
5606 unsigned long exit_qual = vmx_get_exit_qual(vcpu);
5607 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5608 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
5609 struct x86_exception e;
5613 if (!nested_vmx_check_permission(vcpu))
5616 if (unlikely(evmptr_is_valid(to_vmx(vcpu)->nested.hv_evmcs_vmptr)))
5619 if (get_vmx_mem_address(vcpu, exit_qual, instr_info,
5620 true, sizeof(gpa_t), &gva))
5622 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
5623 r = kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr,
5625 if (r != X86EMUL_CONTINUE)
5626 return kvm_handle_memory_failure(vcpu, r, &e);
5628 return nested_vmx_succeed(vcpu);
5631 /* Emulate the INVEPT instruction */
5632 static int handle_invept(struct kvm_vcpu *vcpu)
5634 struct vcpu_vmx *vmx = to_vmx(vcpu);
5635 u32 vmx_instruction_info, types;
5636 unsigned long type, roots_to_free;
5637 struct kvm_mmu *mmu;
5639 struct x86_exception e;
5643 int i, r, gpr_index;
5645 if (!(vmx->nested.msrs.secondary_ctls_high &
5646 SECONDARY_EXEC_ENABLE_EPT) ||
5647 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) {
5648 kvm_queue_exception(vcpu, UD_VECTOR);
5652 if (!nested_vmx_check_permission(vcpu))
5655 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5656 gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info);
5657 type = kvm_register_read(vcpu, gpr_index);
5659 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
5661 if (type >= 32 || !(types & (1 << type)))
5662 return nested_vmx_fail(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5664 /* According to the Intel VMX instruction reference, the memory
5665 * operand is read even if it isn't needed (e.g., for type==global)
5667 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
5668 vmx_instruction_info, false, sizeof(operand), &gva))
5670 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
5671 if (r != X86EMUL_CONTINUE)
5672 return kvm_handle_memory_failure(vcpu, r, &e);
5675 * Nested EPT roots are always held through guest_mmu,
5678 mmu = &vcpu->arch.guest_mmu;
5681 case VMX_EPT_EXTENT_CONTEXT:
5682 if (!nested_vmx_check_eptp(vcpu, operand.eptp))
5683 return nested_vmx_fail(vcpu,
5684 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5687 if (nested_ept_root_matches(mmu->root.hpa, mmu->root.pgd,
5689 roots_to_free |= KVM_MMU_ROOT_CURRENT;
5691 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5692 if (nested_ept_root_matches(mmu->prev_roots[i].hpa,
5693 mmu->prev_roots[i].pgd,
5695 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
5698 case VMX_EPT_EXTENT_GLOBAL:
5699 roots_to_free = KVM_MMU_ROOTS_ALL;
5707 kvm_mmu_free_roots(vcpu->kvm, mmu, roots_to_free);
5709 return nested_vmx_succeed(vcpu);
5712 static int handle_invvpid(struct kvm_vcpu *vcpu)
5714 struct vcpu_vmx *vmx = to_vmx(vcpu);
5715 u32 vmx_instruction_info;
5716 unsigned long type, types;
5718 struct x86_exception e;
5726 if (!(vmx->nested.msrs.secondary_ctls_high &
5727 SECONDARY_EXEC_ENABLE_VPID) ||
5728 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) {
5729 kvm_queue_exception(vcpu, UD_VECTOR);
5733 if (!nested_vmx_check_permission(vcpu))
5736 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5737 gpr_index = vmx_get_instr_info_reg2(vmx_instruction_info);
5738 type = kvm_register_read(vcpu, gpr_index);
5740 types = (vmx->nested.msrs.vpid_caps &
5741 VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
5743 if (type >= 32 || !(types & (1 << type)))
5744 return nested_vmx_fail(vcpu,
5745 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5747 /* according to the intel vmx instruction reference, the memory
5748 * operand is read even if it isn't needed (e.g., for type==global)
5750 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
5751 vmx_instruction_info, false, sizeof(operand), &gva))
5753 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
5754 if (r != X86EMUL_CONTINUE)
5755 return kvm_handle_memory_failure(vcpu, r, &e);
5757 if (operand.vpid >> 16)
5758 return nested_vmx_fail(vcpu,
5759 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5761 vpid02 = nested_get_vpid02(vcpu);
5763 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
5764 if (!operand.vpid ||
5765 is_noncanonical_address(operand.gla, vcpu))
5766 return nested_vmx_fail(vcpu,
5767 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5768 vpid_sync_vcpu_addr(vpid02, operand.gla);
5770 case VMX_VPID_EXTENT_SINGLE_CONTEXT:
5771 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
5773 return nested_vmx_fail(vcpu,
5774 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5775 vpid_sync_context(vpid02);
5777 case VMX_VPID_EXTENT_ALL_CONTEXT:
5778 vpid_sync_context(vpid02);
5782 return kvm_skip_emulated_instruction(vcpu);
5786 * Sync the shadow page tables if EPT is disabled, L1 is invalidating
5787 * linear mappings for L2 (tagged with L2's VPID). Free all guest
5788 * roots as VPIDs are not tracked in the MMU role.
5790 * Note, this operates on root_mmu, not guest_mmu, as L1 and L2 share
5791 * an MMU when EPT is disabled.
5793 * TODO: sync only the affected SPTEs for INVDIVIDUAL_ADDR.
5796 kvm_mmu_free_guest_mode_roots(vcpu->kvm, &vcpu->arch.root_mmu);
5798 return nested_vmx_succeed(vcpu);
5801 static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
5802 struct vmcs12 *vmcs12)
5804 u32 index = kvm_rcx_read(vcpu);
5807 if (WARN_ON_ONCE(!nested_cpu_has_ept(vmcs12)))
5809 if (index >= VMFUNC_EPTP_ENTRIES)
5812 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
5813 &new_eptp, index * 8, 8))
5817 * If the (L2) guest does a vmfunc to the currently
5818 * active ept pointer, we don't have to do anything else
5820 if (vmcs12->ept_pointer != new_eptp) {
5821 if (!nested_vmx_check_eptp(vcpu, new_eptp))
5824 vmcs12->ept_pointer = new_eptp;
5825 nested_ept_new_eptp(vcpu);
5827 if (!nested_cpu_has_vpid(vmcs12))
5828 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
5834 static int handle_vmfunc(struct kvm_vcpu *vcpu)
5836 struct vcpu_vmx *vmx = to_vmx(vcpu);
5837 struct vmcs12 *vmcs12;
5838 u32 function = kvm_rax_read(vcpu);
5841 * VMFUNC is only supported for nested guests, but we always enable the
5842 * secondary control for simplicity; for non-nested mode, fake that we
5843 * didn't by injecting #UD.
5845 if (!is_guest_mode(vcpu)) {
5846 kvm_queue_exception(vcpu, UD_VECTOR);
5850 vmcs12 = get_vmcs12(vcpu);
5853 * #UD on out-of-bounds function has priority over VM-Exit, and VMFUNC
5854 * is enabled in vmcs02 if and only if it's enabled in vmcs12.
5856 if (WARN_ON_ONCE((function > 63) || !nested_cpu_has_vmfunc(vmcs12))) {
5857 kvm_queue_exception(vcpu, UD_VECTOR);
5861 if (!(vmcs12->vm_function_control & BIT_ULL(function)))
5866 if (nested_vmx_eptp_switching(vcpu, vmcs12))
5872 return kvm_skip_emulated_instruction(vcpu);
5876 * This is effectively a reflected VM-Exit, as opposed to a synthesized
5877 * nested VM-Exit. Pass the original exit reason, i.e. don't hardcode
5878 * EXIT_REASON_VMFUNC as the exit reason.
5880 nested_vmx_vmexit(vcpu, vmx->exit_reason.full,
5881 vmx_get_intr_info(vcpu),
5882 vmx_get_exit_qual(vcpu));
5887 * Return true if an IO instruction with the specified port and size should cause
5888 * a VM-exit into L1.
5890 bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
5893 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5894 gpa_t bitmap, last_bitmap;
5897 last_bitmap = INVALID_GPA;
5902 bitmap = vmcs12->io_bitmap_a;
5903 else if (port < 0x10000)
5904 bitmap = vmcs12->io_bitmap_b;
5907 bitmap += (port & 0x7fff) / 8;
5909 if (last_bitmap != bitmap)
5910 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
5912 if (b & (1 << (port & 7)))
5917 last_bitmap = bitmap;
5923 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
5924 struct vmcs12 *vmcs12)
5926 unsigned long exit_qualification;
5927 unsigned short port;
5930 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
5931 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
5933 exit_qualification = vmx_get_exit_qual(vcpu);
5935 port = exit_qualification >> 16;
5936 size = (exit_qualification & 7) + 1;
5938 return nested_vmx_check_io_bitmaps(vcpu, port, size);
5942 * Return 1 if we should exit from L2 to L1 to handle an MSR access,
5943 * rather than handle it ourselves in L0. I.e., check whether L1 expressed
5944 * disinterest in the current event (read or write a specific MSR) by using an
5945 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
5947 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
5948 struct vmcs12 *vmcs12,
5949 union vmx_exit_reason exit_reason)
5951 u32 msr_index = kvm_rcx_read(vcpu);
5954 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
5958 * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
5959 * for the four combinations of read/write and low/high MSR numbers.
5960 * First we need to figure out which of the four to use:
5962 bitmap = vmcs12->msr_bitmap;
5963 if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
5965 if (msr_index >= 0xc0000000) {
5966 msr_index -= 0xc0000000;
5970 /* Then read the msr_index'th bit from this bitmap: */
5971 if (msr_index < 1024*8) {
5973 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
5975 return 1 & (b >> (msr_index & 7));
5977 return true; /* let L1 handle the wrong parameter */
5981 * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
5982 * rather than handle it ourselves in L0. I.e., check if L1 wanted to
5983 * intercept (via guest_host_mask etc.) the current event.
5985 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
5986 struct vmcs12 *vmcs12)
5988 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5989 int cr = exit_qualification & 15;
5993 switch ((exit_qualification >> 4) & 3) {
5994 case 0: /* mov to cr */
5995 reg = (exit_qualification >> 8) & 15;
5996 val = kvm_register_read(vcpu, reg);
5999 if (vmcs12->cr0_guest_host_mask &
6000 (val ^ vmcs12->cr0_read_shadow))
6004 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
6008 if (vmcs12->cr4_guest_host_mask &
6009 (vmcs12->cr4_read_shadow ^ val))
6013 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
6019 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
6020 (vmcs12->cr0_read_shadow & X86_CR0_TS))
6023 case 1: /* mov from cr */
6026 if (vmcs12->cpu_based_vm_exec_control &
6027 CPU_BASED_CR3_STORE_EXITING)
6031 if (vmcs12->cpu_based_vm_exec_control &
6032 CPU_BASED_CR8_STORE_EXITING)
6039 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
6040 * cr0. Other attempted changes are ignored, with no exit.
6042 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
6043 if (vmcs12->cr0_guest_host_mask & 0xe &
6044 (val ^ vmcs12->cr0_read_shadow))
6046 if ((vmcs12->cr0_guest_host_mask & 0x1) &&
6047 !(vmcs12->cr0_read_shadow & 0x1) &&
6055 static bool nested_vmx_exit_handled_encls(struct kvm_vcpu *vcpu,
6056 struct vmcs12 *vmcs12)
6060 if (!guest_cpuid_has(vcpu, X86_FEATURE_SGX) ||
6061 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENCLS_EXITING))
6064 encls_leaf = kvm_rax_read(vcpu);
6065 if (encls_leaf > 62)
6067 return vmcs12->encls_exiting_bitmap & BIT_ULL(encls_leaf);
6070 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
6071 struct vmcs12 *vmcs12, gpa_t bitmap)
6073 u32 vmx_instruction_info;
6074 unsigned long field;
6077 if (!nested_cpu_has_shadow_vmcs(vmcs12))
6080 /* Decode instruction info and find the field to access */
6081 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
6082 field = kvm_register_read(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
6084 /* Out-of-range fields always cause a VM exit from L2 to L1 */
6088 if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1))
6091 return 1 & (b >> (field & 7));
6094 static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12)
6096 u32 entry_intr_info = vmcs12->vm_entry_intr_info_field;
6098 if (nested_cpu_has_mtf(vmcs12))
6102 * An MTF VM-exit may be injected into the guest by setting the
6103 * interruption-type to 7 (other event) and the vector field to 0. Such
6104 * is the case regardless of the 'monitor trap flag' VM-execution
6107 return entry_intr_info == (INTR_INFO_VALID_MASK
6108 | INTR_TYPE_OTHER_EVENT);
6112 * Return true if L0 wants to handle an exit from L2 regardless of whether or not
6113 * L1 wants the exit. Only call this when in is_guest_mode (L2).
6115 static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
6116 union vmx_exit_reason exit_reason)
6120 switch ((u16)exit_reason.basic) {
6121 case EXIT_REASON_EXCEPTION_NMI:
6122 intr_info = vmx_get_intr_info(vcpu);
6123 if (is_nmi(intr_info))
6125 else if (is_page_fault(intr_info))
6126 return vcpu->arch.apf.host_apf_flags ||
6127 vmx_need_pf_intercept(vcpu);
6128 else if (is_debug(intr_info) &&
6130 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
6132 else if (is_breakpoint(intr_info) &&
6133 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
6135 else if (is_alignment_check(intr_info) &&
6136 !vmx_guest_inject_ac(vcpu))
6139 case EXIT_REASON_EXTERNAL_INTERRUPT:
6141 case EXIT_REASON_MCE_DURING_VMENTRY:
6143 case EXIT_REASON_EPT_VIOLATION:
6145 * L0 always deals with the EPT violation. If nested EPT is
6146 * used, and the nested mmu code discovers that the address is
6147 * missing in the guest EPT table (EPT12), the EPT violation
6148 * will be injected with nested_ept_inject_page_fault()
6151 case EXIT_REASON_EPT_MISCONFIG:
6153 * L2 never uses directly L1's EPT, but rather L0's own EPT
6154 * table (shadow on EPT) or a merged EPT table that L0 built
6155 * (EPT on EPT). So any problems with the structure of the
6156 * table is L0's fault.
6159 case EXIT_REASON_PREEMPTION_TIMER:
6161 case EXIT_REASON_PML_FULL:
6163 * PML is emulated for an L1 VMM and should never be enabled in
6164 * vmcs02, always "handle" PML_FULL by exiting to userspace.
6167 case EXIT_REASON_VMFUNC:
6168 /* VM functions are emulated through L2->L0 vmexits. */
6170 case EXIT_REASON_BUS_LOCK:
6172 * At present, bus lock VM exit is never exposed to L1.
6173 * Handle L2's bus locks in L0 directly.
6183 * Return 1 if L1 wants to intercept an exit from L2. Only call this when in
6184 * is_guest_mode (L2).
6186 static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu,
6187 union vmx_exit_reason exit_reason)
6189 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6192 switch ((u16)exit_reason.basic) {
6193 case EXIT_REASON_EXCEPTION_NMI:
6194 intr_info = vmx_get_intr_info(vcpu);
6195 if (is_nmi(intr_info))
6197 else if (is_page_fault(intr_info))
6199 return vmcs12->exception_bitmap &
6200 (1u << (intr_info & INTR_INFO_VECTOR_MASK));
6201 case EXIT_REASON_EXTERNAL_INTERRUPT:
6202 return nested_exit_on_intr(vcpu);
6203 case EXIT_REASON_TRIPLE_FAULT:
6205 case EXIT_REASON_INTERRUPT_WINDOW:
6206 return nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING);
6207 case EXIT_REASON_NMI_WINDOW:
6208 return nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING);
6209 case EXIT_REASON_TASK_SWITCH:
6211 case EXIT_REASON_CPUID:
6213 case EXIT_REASON_HLT:
6214 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
6215 case EXIT_REASON_INVD:
6217 case EXIT_REASON_INVLPG:
6218 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
6219 case EXIT_REASON_RDPMC:
6220 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
6221 case EXIT_REASON_RDRAND:
6222 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING);
6223 case EXIT_REASON_RDSEED:
6224 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING);
6225 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
6226 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
6227 case EXIT_REASON_VMREAD:
6228 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
6229 vmcs12->vmread_bitmap);
6230 case EXIT_REASON_VMWRITE:
6231 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
6232 vmcs12->vmwrite_bitmap);
6233 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
6234 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
6235 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME:
6236 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
6237 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
6239 * VMX instructions trap unconditionally. This allows L1 to
6240 * emulate them for its L2 guest, i.e., allows 3-level nesting!
6243 case EXIT_REASON_CR_ACCESS:
6244 return nested_vmx_exit_handled_cr(vcpu, vmcs12);
6245 case EXIT_REASON_DR_ACCESS:
6246 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
6247 case EXIT_REASON_IO_INSTRUCTION:
6248 return nested_vmx_exit_handled_io(vcpu, vmcs12);
6249 case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR:
6250 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
6251 case EXIT_REASON_MSR_READ:
6252 case EXIT_REASON_MSR_WRITE:
6253 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
6254 case EXIT_REASON_INVALID_STATE:
6256 case EXIT_REASON_MWAIT_INSTRUCTION:
6257 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
6258 case EXIT_REASON_MONITOR_TRAP_FLAG:
6259 return nested_vmx_exit_handled_mtf(vmcs12);
6260 case EXIT_REASON_MONITOR_INSTRUCTION:
6261 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
6262 case EXIT_REASON_PAUSE_INSTRUCTION:
6263 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
6264 nested_cpu_has2(vmcs12,
6265 SECONDARY_EXEC_PAUSE_LOOP_EXITING);
6266 case EXIT_REASON_MCE_DURING_VMENTRY:
6268 case EXIT_REASON_TPR_BELOW_THRESHOLD:
6269 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
6270 case EXIT_REASON_APIC_ACCESS:
6271 case EXIT_REASON_APIC_WRITE:
6272 case EXIT_REASON_EOI_INDUCED:
6274 * The controls for "virtualize APIC accesses," "APIC-
6275 * register virtualization," and "virtual-interrupt
6276 * delivery" only come from vmcs12.
6279 case EXIT_REASON_INVPCID:
6281 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) &&
6282 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
6283 case EXIT_REASON_WBINVD:
6284 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
6285 case EXIT_REASON_XSETBV:
6287 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
6289 * This should never happen, since it is not possible to
6290 * set XSS to a non-zero value---neither in L1 nor in L2.
6291 * If if it were, XSS would have to be checked against
6292 * the XSS exit bitmap in vmcs12.
6294 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
6295 case EXIT_REASON_UMWAIT:
6296 case EXIT_REASON_TPAUSE:
6297 return nested_cpu_has2(vmcs12,
6298 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE);
6299 case EXIT_REASON_ENCLS:
6300 return nested_vmx_exit_handled_encls(vcpu, vmcs12);
6301 case EXIT_REASON_NOTIFY:
6302 /* Notify VM exit is not exposed to L1 */
6310 * Conditionally reflect a VM-Exit into L1. Returns %true if the VM-Exit was
6311 * reflected into L1.
6313 bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
6315 struct vcpu_vmx *vmx = to_vmx(vcpu);
6316 union vmx_exit_reason exit_reason = vmx->exit_reason;
6317 unsigned long exit_qual;
6320 WARN_ON_ONCE(vmx->nested.nested_run_pending);
6323 * Late nested VM-Fail shares the same flow as nested VM-Exit since KVM
6324 * has already loaded L2's state.
6326 if (unlikely(vmx->fail)) {
6327 trace_kvm_nested_vmenter_failed(
6328 "hardware VM-instruction error: ",
6329 vmcs_read32(VM_INSTRUCTION_ERROR));
6332 goto reflect_vmexit;
6335 trace_kvm_nested_vmexit(vcpu, KVM_ISA_VMX);
6337 /* If L0 (KVM) wants the exit, it trumps L1's desires. */
6338 if (nested_vmx_l0_wants_exit(vcpu, exit_reason))
6341 /* If L1 doesn't want the exit, handle it in L0. */
6342 if (!nested_vmx_l1_wants_exit(vcpu, exit_reason))
6346 * vmcs.VM_EXIT_INTR_INFO is only valid for EXCEPTION_NMI exits. For
6347 * EXTERNAL_INTERRUPT, the value for vmcs12->vm_exit_intr_info would
6348 * need to be synthesized by querying the in-kernel LAPIC, but external
6349 * interrupts are never reflected to L1 so it's a non-issue.
6351 exit_intr_info = vmx_get_intr_info(vcpu);
6352 if (is_exception_with_error_code(exit_intr_info)) {
6353 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6355 vmcs12->vm_exit_intr_error_code =
6356 vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
6358 exit_qual = vmx_get_exit_qual(vcpu);
6361 nested_vmx_vmexit(vcpu, exit_reason.full, exit_intr_info, exit_qual);
6365 static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
6366 struct kvm_nested_state __user *user_kvm_nested_state,
6369 struct vcpu_vmx *vmx;
6370 struct vmcs12 *vmcs12;
6371 struct kvm_nested_state kvm_state = {
6373 .format = KVM_STATE_NESTED_FORMAT_VMX,
6374 .size = sizeof(kvm_state),
6376 .hdr.vmx.vmxon_pa = INVALID_GPA,
6377 .hdr.vmx.vmcs12_pa = INVALID_GPA,
6378 .hdr.vmx.preemption_timer_deadline = 0,
6380 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
6381 &user_kvm_nested_state->data.vmx[0];
6384 return kvm_state.size + sizeof(*user_vmx_nested_state);
6387 vmcs12 = get_vmcs12(vcpu);
6389 if (nested_vmx_allowed(vcpu) &&
6390 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
6391 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
6392 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr;
6394 if (vmx_has_valid_vmcs12(vcpu)) {
6395 kvm_state.size += sizeof(user_vmx_nested_state->vmcs12);
6397 /* 'hv_evmcs_vmptr' can also be EVMPTR_MAP_PENDING here */
6398 if (vmx->nested.hv_evmcs_vmptr != EVMPTR_INVALID)
6399 kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
6401 if (is_guest_mode(vcpu) &&
6402 nested_cpu_has_shadow_vmcs(vmcs12) &&
6403 vmcs12->vmcs_link_pointer != INVALID_GPA)
6404 kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12);
6407 if (vmx->nested.smm.vmxon)
6408 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
6410 if (vmx->nested.smm.guest_mode)
6411 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
6413 if (is_guest_mode(vcpu)) {
6414 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
6416 if (vmx->nested.nested_run_pending)
6417 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
6419 if (vmx->nested.mtf_pending)
6420 kvm_state.flags |= KVM_STATE_NESTED_MTF_PENDING;
6422 if (nested_cpu_has_preemption_timer(vmcs12) &&
6423 vmx->nested.has_preemption_timer_deadline) {
6424 kvm_state.hdr.vmx.flags |=
6425 KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE;
6426 kvm_state.hdr.vmx.preemption_timer_deadline =
6427 vmx->nested.preemption_timer_deadline;
6432 if (user_data_size < kvm_state.size)
6435 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
6438 if (!vmx_has_valid_vmcs12(vcpu))
6442 * When running L2, the authoritative vmcs12 state is in the
6443 * vmcs02. When running L1, the authoritative vmcs12 state is
6444 * in the shadow or enlightened vmcs linked to vmcs01, unless
6445 * need_vmcs12_to_shadow_sync is set, in which case, the authoritative
6446 * vmcs12 state is in the vmcs12 already.
6448 if (is_guest_mode(vcpu)) {
6449 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
6450 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
6452 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
6453 if (!vmx->nested.need_vmcs12_to_shadow_sync) {
6454 if (evmptr_is_valid(vmx->nested.hv_evmcs_vmptr))
6456 * L1 hypervisor is not obliged to keep eVMCS
6457 * clean fields data always up-to-date while
6458 * not in guest mode, 'hv_clean_fields' is only
6459 * supposed to be actual upon vmentry so we need
6460 * to ignore it here and do full copy.
6462 copy_enlightened_to_vmcs12(vmx, 0);
6463 else if (enable_shadow_vmcs)
6464 copy_shadow_to_vmcs12(vmx);
6468 BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
6469 BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE);
6472 * Copy over the full allocated size of vmcs12 rather than just the size
6475 if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE))
6478 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
6479 vmcs12->vmcs_link_pointer != INVALID_GPA) {
6480 if (copy_to_user(user_vmx_nested_state->shadow_vmcs12,
6481 get_shadow_vmcs12(vcpu), VMCS12_SIZE))
6485 return kvm_state.size;
6488 void vmx_leave_nested(struct kvm_vcpu *vcpu)
6490 if (is_guest_mode(vcpu)) {
6491 to_vmx(vcpu)->nested.nested_run_pending = 0;
6492 nested_vmx_vmexit(vcpu, -1, 0, 0);
6497 static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
6498 struct kvm_nested_state __user *user_kvm_nested_state,
6499 struct kvm_nested_state *kvm_state)
6501 struct vcpu_vmx *vmx = to_vmx(vcpu);
6502 struct vmcs12 *vmcs12;
6503 enum vm_entry_failure_code ignored;
6504 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
6505 &user_kvm_nested_state->data.vmx[0];
6508 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX)
6511 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA) {
6512 if (kvm_state->hdr.vmx.smm.flags)
6515 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA)
6519 * KVM_STATE_NESTED_EVMCS used to signal that KVM should
6520 * enable eVMCS capability on vCPU. However, since then
6521 * code was changed such that flag signals vmcs12 should
6522 * be copied into eVMCS in guest memory.
6524 * To preserve backwards compatability, allow user
6525 * to set this flag even when there is no VMXON region.
6527 if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS)
6530 if (!nested_vmx_allowed(vcpu))
6533 if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
6537 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
6538 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
6541 if (kvm_state->hdr.vmx.smm.flags &
6542 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
6545 if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE)
6549 * SMM temporarily disables VMX, so we cannot be in guest mode,
6550 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
6555 (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING))
6556 : kvm_state->hdr.vmx.smm.flags)
6559 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
6560 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
6563 if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) &&
6564 (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled))
6567 vmx_leave_nested(vcpu);
6569 if (kvm_state->hdr.vmx.vmxon_pa == INVALID_GPA)
6572 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa;
6573 ret = enter_vmx_operation(vcpu);
6577 /* Empty 'VMXON' state is permitted if no VMCS loaded */
6578 if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) {
6579 /* See vmx_has_valid_vmcs12. */
6580 if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) ||
6581 (kvm_state->flags & KVM_STATE_NESTED_EVMCS) ||
6582 (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA))
6588 if (kvm_state->hdr.vmx.vmcs12_pa != INVALID_GPA) {
6589 if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||
6590 !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa))
6593 set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa);
6594 } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
6596 * nested_vmx_handle_enlightened_vmptrld() cannot be called
6597 * directly from here as HV_X64_MSR_VP_ASSIST_PAGE may not be
6598 * restored yet. EVMCS will be mapped from
6599 * nested_get_vmcs12_pages().
6601 vmx->nested.hv_evmcs_vmptr = EVMPTR_MAP_PENDING;
6602 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
6607 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
6608 vmx->nested.smm.vmxon = true;
6609 vmx->nested.vmxon = false;
6611 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
6612 vmx->nested.smm.guest_mode = true;
6615 vmcs12 = get_vmcs12(vcpu);
6616 if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12)))
6619 if (vmcs12->hdr.revision_id != VMCS12_REVISION)
6622 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
6625 vmx->nested.nested_run_pending =
6626 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
6628 vmx->nested.mtf_pending =
6629 !!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING);
6632 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
6633 vmcs12->vmcs_link_pointer != INVALID_GPA) {
6634 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
6636 if (kvm_state->size <
6637 sizeof(*kvm_state) +
6638 sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12))
6639 goto error_guest_mode;
6641 if (copy_from_user(shadow_vmcs12,
6642 user_vmx_nested_state->shadow_vmcs12,
6643 sizeof(*shadow_vmcs12))) {
6645 goto error_guest_mode;
6648 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION ||
6649 !shadow_vmcs12->hdr.shadow_vmcs)
6650 goto error_guest_mode;
6653 vmx->nested.has_preemption_timer_deadline = false;
6654 if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) {
6655 vmx->nested.has_preemption_timer_deadline = true;
6656 vmx->nested.preemption_timer_deadline =
6657 kvm_state->hdr.vmx.preemption_timer_deadline;
6660 if (nested_vmx_check_controls(vcpu, vmcs12) ||
6661 nested_vmx_check_host_state(vcpu, vmcs12) ||
6662 nested_vmx_check_guest_state(vcpu, vmcs12, &ignored))
6663 goto error_guest_mode;
6665 vmx->nested.dirty_vmcs12 = true;
6666 vmx->nested.force_msr_bitmap_recalc = true;
6667 ret = nested_vmx_enter_non_root_mode(vcpu, false);
6669 goto error_guest_mode;
6671 if (vmx->nested.mtf_pending)
6672 kvm_make_request(KVM_REQ_EVENT, vcpu);
6677 vmx->nested.nested_run_pending = 0;
6681 void nested_vmx_set_vmcs_shadowing_bitmap(void)
6683 if (enable_shadow_vmcs) {
6684 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
6685 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
6690 * Indexing into the vmcs12 uses the VMCS encoding rotated left by 6. Undo
6691 * that madness to get the encoding for comparison.
6693 #define VMCS12_IDX_TO_ENC(idx) ((u16)(((u16)(idx) >> 6) | ((u16)(idx) << 10)))
6695 static u64 nested_vmx_calc_vmcs_enum_msr(void)
6698 * Note these are the so called "index" of the VMCS field encoding, not
6699 * the index into vmcs12.
6701 unsigned int max_idx, idx;
6705 * For better or worse, KVM allows VMREAD/VMWRITE to all fields in
6706 * vmcs12, regardless of whether or not the associated feature is
6707 * exposed to L1. Simply find the field with the highest index.
6710 for (i = 0; i < nr_vmcs12_fields; i++) {
6711 /* The vmcs12 table is very, very sparsely populated. */
6712 if (!vmcs12_field_offsets[i])
6715 idx = vmcs_field_index(VMCS12_IDX_TO_ENC(i));
6720 return (u64)max_idx << VMCS_FIELD_INDEX_SHIFT;
6724 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
6725 * returned for the various VMX controls MSRs when nested VMX is enabled.
6726 * The same values should also be used to verify that vmcs12 control fields are
6727 * valid during nested entry from L1 to L2.
6728 * Each of these control msrs has a low and high 32-bit half: A low bit is on
6729 * if the corresponding bit in the (32-bit) control field *must* be on, and a
6730 * bit in the high half is on if the corresponding bit in the control field
6731 * may be on. See also vmx_control_verify().
6733 void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps)
6735 struct nested_vmx_msrs *msrs = &vmcs_conf->nested;
6738 * Note that as a general rule, the high half of the MSRs (bits in
6739 * the control fields which may be 1) should be initialized by the
6740 * intersection of the underlying hardware's MSR (i.e., features which
6741 * can be supported) and the list of features we want to expose -
6742 * because they are known to be properly supported in our code.
6743 * Also, usually, the low half of the MSRs (bits which must be 1) can
6744 * be set to 0, meaning that L1 may turn off any of these bits. The
6745 * reason is that if one of these bits is necessary, it will appear
6746 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
6747 * fields of vmcs01 and vmcs02, will turn these bits off - and
6748 * nested_vmx_l1_wants_exit() will not pass related exits to L1.
6749 * These rules have exceptions below.
6752 /* pin-based controls */
6753 msrs->pinbased_ctls_low =
6754 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
6756 msrs->pinbased_ctls_high = vmcs_conf->pin_based_exec_ctrl;
6757 msrs->pinbased_ctls_high &=
6758 PIN_BASED_EXT_INTR_MASK |
6759 PIN_BASED_NMI_EXITING |
6760 PIN_BASED_VIRTUAL_NMIS |
6761 (enable_apicv ? PIN_BASED_POSTED_INTR : 0);
6762 msrs->pinbased_ctls_high |=
6763 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
6764 PIN_BASED_VMX_PREEMPTION_TIMER;
6767 msrs->exit_ctls_low =
6768 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
6770 msrs->exit_ctls_high = vmcs_conf->vmexit_ctrl;
6771 msrs->exit_ctls_high &=
6772 #ifdef CONFIG_X86_64
6773 VM_EXIT_HOST_ADDR_SPACE_SIZE |
6775 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT |
6776 VM_EXIT_CLEAR_BNDCFGS;
6777 msrs->exit_ctls_high |=
6778 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
6779 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
6780 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT |
6781 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
6783 /* We support free control of debug control saving. */
6784 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
6786 /* entry controls */
6787 msrs->entry_ctls_low =
6788 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
6790 msrs->entry_ctls_high = vmcs_conf->vmentry_ctrl;
6791 msrs->entry_ctls_high &=
6792 #ifdef CONFIG_X86_64
6793 VM_ENTRY_IA32E_MODE |
6795 VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS;
6796 msrs->entry_ctls_high |=
6797 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER |
6798 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL);
6800 /* We support free control of debug control loading. */
6801 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
6803 /* cpu-based controls */
6804 msrs->procbased_ctls_low =
6805 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
6807 msrs->procbased_ctls_high = vmcs_conf->cpu_based_exec_ctrl;
6808 msrs->procbased_ctls_high &=
6809 CPU_BASED_INTR_WINDOW_EXITING |
6810 CPU_BASED_NMI_WINDOW_EXITING | CPU_BASED_USE_TSC_OFFSETTING |
6811 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
6812 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
6813 CPU_BASED_CR3_STORE_EXITING |
6814 #ifdef CONFIG_X86_64
6815 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
6817 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
6818 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
6819 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
6820 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
6821 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
6823 * We can allow some features even when not supported by the
6824 * hardware. For example, L1 can specify an MSR bitmap - and we
6825 * can use it to avoid exits to L1 - even when L0 runs L2
6826 * without MSR bitmaps.
6828 msrs->procbased_ctls_high |=
6829 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
6830 CPU_BASED_USE_MSR_BITMAPS;
6832 /* We support free control of CR3 access interception. */
6833 msrs->procbased_ctls_low &=
6834 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
6837 * secondary cpu-based controls. Do not include those that
6838 * depend on CPUID bits, they are added later by
6839 * vmx_vcpu_after_set_cpuid.
6841 msrs->secondary_ctls_low = 0;
6843 msrs->secondary_ctls_high = vmcs_conf->cpu_based_2nd_exec_ctrl;
6844 msrs->secondary_ctls_high &=
6845 SECONDARY_EXEC_DESC |
6846 SECONDARY_EXEC_ENABLE_RDTSCP |
6847 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
6848 SECONDARY_EXEC_WBINVD_EXITING |
6849 SECONDARY_EXEC_APIC_REGISTER_VIRT |
6850 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
6851 SECONDARY_EXEC_RDRAND_EXITING |
6852 SECONDARY_EXEC_ENABLE_INVPCID |
6853 SECONDARY_EXEC_RDSEED_EXITING |
6854 SECONDARY_EXEC_XSAVES |
6855 SECONDARY_EXEC_TSC_SCALING |
6856 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE;
6859 * We can emulate "VMCS shadowing," even if the hardware
6860 * doesn't support it.
6862 msrs->secondary_ctls_high |=
6863 SECONDARY_EXEC_SHADOW_VMCS;
6866 /* nested EPT: emulate EPT also to L1 */
6867 msrs->secondary_ctls_high |=
6868 SECONDARY_EXEC_ENABLE_EPT;
6870 VMX_EPT_PAGE_WALK_4_BIT |
6871 VMX_EPT_PAGE_WALK_5_BIT |
6873 VMX_EPT_INVEPT_BIT |
6874 VMX_EPT_EXECUTE_ONLY_BIT;
6876 msrs->ept_caps &= ept_caps;
6877 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
6878 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
6879 VMX_EPT_1GB_PAGE_BIT;
6880 if (enable_ept_ad_bits) {
6881 msrs->secondary_ctls_high |=
6882 SECONDARY_EXEC_ENABLE_PML;
6883 msrs->ept_caps |= VMX_EPT_AD_BIT;
6887 if (cpu_has_vmx_vmfunc()) {
6888 msrs->secondary_ctls_high |=
6889 SECONDARY_EXEC_ENABLE_VMFUNC;
6891 * Advertise EPTP switching unconditionally
6892 * since we emulate it
6895 msrs->vmfunc_controls =
6896 VMX_VMFUNC_EPTP_SWITCHING;
6900 * Old versions of KVM use the single-context version without
6901 * checking for support, so declare that it is supported even
6902 * though it is treated as global context. The alternative is
6903 * not failing the single-context invvpid, and it is worse.
6906 msrs->secondary_ctls_high |=
6907 SECONDARY_EXEC_ENABLE_VPID;
6908 msrs->vpid_caps = VMX_VPID_INVVPID_BIT |
6909 VMX_VPID_EXTENT_SUPPORTED_MASK;
6912 if (enable_unrestricted_guest)
6913 msrs->secondary_ctls_high |=
6914 SECONDARY_EXEC_UNRESTRICTED_GUEST;
6916 if (flexpriority_enabled)
6917 msrs->secondary_ctls_high |=
6918 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6921 msrs->secondary_ctls_high |= SECONDARY_EXEC_ENCLS_EXITING;
6923 /* miscellaneous data */
6924 msrs->misc_low = (u32)vmcs_conf->misc & VMX_MISC_SAVE_EFER_LMA;
6926 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
6927 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
6928 VMX_MISC_ACTIVITY_HLT |
6929 VMX_MISC_ACTIVITY_WAIT_SIPI;
6930 msrs->misc_high = 0;
6933 * This MSR reports some information about VMX support. We
6934 * should return information about the VMX we emulate for the
6935 * guest, and the VMCS structure we give it - not about the
6936 * VMX support of the underlying hardware.
6940 VMX_BASIC_TRUE_CTLS |
6941 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
6942 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
6944 if (cpu_has_vmx_basic_inout())
6945 msrs->basic |= VMX_BASIC_INOUT;
6948 * These MSRs specify bits which the guest must keep fixed on
6949 * while L1 is in VMXON mode (in L1's root mode, or running an L2).
6950 * We picked the standard core2 setting.
6952 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
6953 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE
6954 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON;
6955 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON;
6957 /* These MSRs specify bits which the guest must keep fixed off. */
6958 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
6959 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
6961 if (vmx_umip_emulated())
6962 msrs->cr4_fixed1 |= X86_CR4_UMIP;
6964 msrs->vmcs_enum = nested_vmx_calc_vmcs_enum_msr();
6967 void nested_vmx_hardware_unsetup(void)
6971 if (enable_shadow_vmcs) {
6972 for (i = 0; i < VMX_BITMAP_NR; i++)
6973 free_page((unsigned long)vmx_bitmap[i]);
6977 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
6981 if (!cpu_has_vmx_shadow_vmcs())
6982 enable_shadow_vmcs = 0;
6983 if (enable_shadow_vmcs) {
6984 for (i = 0; i < VMX_BITMAP_NR; i++) {
6986 * The vmx_bitmap is not tied to a VM and so should
6987 * not be charged to a memcg.
6989 vmx_bitmap[i] = (unsigned long *)
6990 __get_free_page(GFP_KERNEL);
6991 if (!vmx_bitmap[i]) {
6992 nested_vmx_hardware_unsetup();
6997 init_vmcs_shadow_fields();
7000 exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear;
7001 exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch;
7002 exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld;
7003 exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst;
7004 exit_handlers[EXIT_REASON_VMREAD] = handle_vmread;
7005 exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume;
7006 exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite;
7007 exit_handlers[EXIT_REASON_VMOFF] = handle_vmxoff;
7008 exit_handlers[EXIT_REASON_VMON] = handle_vmxon;
7009 exit_handlers[EXIT_REASON_INVEPT] = handle_invept;
7010 exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid;
7011 exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc;
7016 struct kvm_x86_nested_ops vmx_nested_ops = {
7017 .leave_nested = vmx_leave_nested,
7018 .is_exception_vmexit = nested_vmx_is_exception_vmexit,
7019 .check_events = vmx_check_nested_events,
7020 .has_events = vmx_has_nested_events,
7021 .triple_fault = nested_vmx_triple_fault,
7022 .get_state = vmx_get_nested_state,
7023 .set_state = vmx_set_nested_state,
7024 .get_nested_state_pages = vmx_get_nested_state_pages,
7025 .write_log_dirty = nested_vmx_write_pml_buffer,
7026 .enable_evmcs = nested_enable_evmcs,
7027 .get_evmcs_version = nested_get_evmcs_version,