1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/objtool.h>
4 #include <linux/percpu.h>
6 #include <asm/debugreg.h>
7 #include <asm/mmu_context.h>
18 static bool __read_mostly enable_shadow_vmcs = 1;
19 module_param_named(enable_shadow_vmcs, enable_shadow_vmcs, bool, S_IRUGO);
21 static bool __read_mostly nested_early_check = 0;
22 module_param(nested_early_check, bool, S_IRUGO);
24 #define CC(consistency_check) \
26 bool failed = (consistency_check); \
28 trace_kvm_nested_vmenter_failed(#consistency_check, 0); \
33 * Hyper-V requires all of these, so mark them as supported even though
34 * they are just treated the same as all-context.
36 #define VMX_VPID_EXTENT_SUPPORTED_MASK \
37 (VMX_VPID_EXTENT_INDIVIDUAL_ADDR_BIT | \
38 VMX_VPID_EXTENT_SINGLE_CONTEXT_BIT | \
39 VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT | \
40 VMX_VPID_EXTENT_SINGLE_NON_GLOBAL_BIT)
42 #define VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE 5
49 static unsigned long *vmx_bitmap[VMX_BITMAP_NR];
51 #define vmx_vmread_bitmap (vmx_bitmap[VMX_VMREAD_BITMAP])
52 #define vmx_vmwrite_bitmap (vmx_bitmap[VMX_VMWRITE_BITMAP])
54 struct shadow_vmcs_field {
58 static struct shadow_vmcs_field shadow_read_only_fields[] = {
59 #define SHADOW_FIELD_RO(x, y) { x, offsetof(struct vmcs12, y) },
60 #include "vmcs_shadow_fields.h"
62 static int max_shadow_read_only_fields =
63 ARRAY_SIZE(shadow_read_only_fields);
65 static struct shadow_vmcs_field shadow_read_write_fields[] = {
66 #define SHADOW_FIELD_RW(x, y) { x, offsetof(struct vmcs12, y) },
67 #include "vmcs_shadow_fields.h"
69 static int max_shadow_read_write_fields =
70 ARRAY_SIZE(shadow_read_write_fields);
72 static void init_vmcs_shadow_fields(void)
76 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
77 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
79 for (i = j = 0; i < max_shadow_read_only_fields; i++) {
80 struct shadow_vmcs_field entry = shadow_read_only_fields[i];
81 u16 field = entry.encoding;
83 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
84 (i + 1 == max_shadow_read_only_fields ||
85 shadow_read_only_fields[i + 1].encoding != field + 1))
86 pr_err("Missing field from shadow_read_only_field %x\n",
89 clear_bit(field, vmx_vmread_bitmap);
94 entry.offset += sizeof(u32);
96 shadow_read_only_fields[j++] = entry;
98 max_shadow_read_only_fields = j;
100 for (i = j = 0; i < max_shadow_read_write_fields; i++) {
101 struct shadow_vmcs_field entry = shadow_read_write_fields[i];
102 u16 field = entry.encoding;
104 if (vmcs_field_width(field) == VMCS_FIELD_WIDTH_U64 &&
105 (i + 1 == max_shadow_read_write_fields ||
106 shadow_read_write_fields[i + 1].encoding != field + 1))
107 pr_err("Missing field from shadow_read_write_field %x\n",
110 WARN_ONCE(field >= GUEST_ES_AR_BYTES &&
111 field <= GUEST_TR_AR_BYTES,
112 "Update vmcs12_write_any() to drop reserved bits from AR_BYTES");
115 * PML and the preemption timer can be emulated, but the
116 * processor cannot vmwrite to fields that don't exist
120 case GUEST_PML_INDEX:
121 if (!cpu_has_vmx_pml())
124 case VMX_PREEMPTION_TIMER_VALUE:
125 if (!cpu_has_vmx_preemption_timer())
128 case GUEST_INTR_STATUS:
129 if (!cpu_has_vmx_apicv())
136 clear_bit(field, vmx_vmwrite_bitmap);
137 clear_bit(field, vmx_vmread_bitmap);
142 entry.offset += sizeof(u32);
144 shadow_read_write_fields[j++] = entry;
146 max_shadow_read_write_fields = j;
150 * The following 3 functions, nested_vmx_succeed()/failValid()/failInvalid(),
151 * set the success or error code of an emulated VMX instruction (as specified
152 * by Vol 2B, VMX Instruction Reference, "Conventions"), and skip the emulated
155 static int nested_vmx_succeed(struct kvm_vcpu *vcpu)
157 vmx_set_rflags(vcpu, vmx_get_rflags(vcpu)
158 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
159 X86_EFLAGS_ZF | X86_EFLAGS_SF | X86_EFLAGS_OF));
160 return kvm_skip_emulated_instruction(vcpu);
163 static int nested_vmx_failInvalid(struct kvm_vcpu *vcpu)
165 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
166 & ~(X86_EFLAGS_PF | X86_EFLAGS_AF | X86_EFLAGS_ZF |
167 X86_EFLAGS_SF | X86_EFLAGS_OF))
169 return kvm_skip_emulated_instruction(vcpu);
172 static int nested_vmx_failValid(struct kvm_vcpu *vcpu,
173 u32 vm_instruction_error)
175 vmx_set_rflags(vcpu, (vmx_get_rflags(vcpu)
176 & ~(X86_EFLAGS_CF | X86_EFLAGS_PF | X86_EFLAGS_AF |
177 X86_EFLAGS_SF | X86_EFLAGS_OF))
179 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
181 * We don't need to force a shadow sync because
182 * VM_INSTRUCTION_ERROR is not shadowed
184 return kvm_skip_emulated_instruction(vcpu);
187 static int nested_vmx_fail(struct kvm_vcpu *vcpu, u32 vm_instruction_error)
189 struct vcpu_vmx *vmx = to_vmx(vcpu);
192 * failValid writes the error number to the current VMCS, which
193 * can't be done if there isn't a current VMCS.
195 if (vmx->nested.current_vmptr == -1ull && !vmx->nested.hv_evmcs)
196 return nested_vmx_failInvalid(vcpu);
198 return nested_vmx_failValid(vcpu, vm_instruction_error);
201 static void nested_vmx_abort(struct kvm_vcpu *vcpu, u32 indicator)
203 /* TODO: not to reset guest simply here. */
204 kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu);
205 pr_debug_ratelimited("kvm: nested vmx abort, indicator %d\n", indicator);
208 static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
210 return fixed_bits_valid(control, low, high);
213 static inline u64 vmx_control_msr(u32 low, u32 high)
215 return low | ((u64)high << 32);
218 static void vmx_disable_shadow_vmcs(struct vcpu_vmx *vmx)
220 secondary_exec_controls_clearbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
221 vmcs_write64(VMCS_LINK_POINTER, -1ull);
222 vmx->nested.need_vmcs12_to_shadow_sync = false;
225 static inline void nested_release_evmcs(struct kvm_vcpu *vcpu)
227 struct vcpu_vmx *vmx = to_vmx(vcpu);
229 if (!vmx->nested.hv_evmcs)
232 kvm_vcpu_unmap(vcpu, &vmx->nested.hv_evmcs_map, true);
233 vmx->nested.hv_evmcs_vmptr = 0;
234 vmx->nested.hv_evmcs = NULL;
237 static void vmx_sync_vmcs_host_state(struct vcpu_vmx *vmx,
238 struct loaded_vmcs *prev)
240 struct vmcs_host_state *dest, *src;
242 if (unlikely(!vmx->guest_state_loaded))
245 src = &prev->host_state;
246 dest = &vmx->loaded_vmcs->host_state;
248 vmx_set_host_fs_gs(dest, src->fs_sel, src->gs_sel, src->fs_base, src->gs_base);
249 dest->ldt_sel = src->ldt_sel;
251 dest->ds_sel = src->ds_sel;
252 dest->es_sel = src->es_sel;
256 static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs)
258 struct vcpu_vmx *vmx = to_vmx(vcpu);
259 struct loaded_vmcs *prev;
262 if (WARN_ON_ONCE(vmx->loaded_vmcs == vmcs))
266 prev = vmx->loaded_vmcs;
267 vmx->loaded_vmcs = vmcs;
268 vmx_vcpu_load_vmcs(vcpu, cpu, prev);
269 vmx_sync_vmcs_host_state(vmx, prev);
272 vmx_register_cache_reset(vcpu);
276 * Free whatever needs to be freed from vmx->nested when L1 goes down, or
277 * just stops using VMX.
279 static void free_nested(struct kvm_vcpu *vcpu)
281 struct vcpu_vmx *vmx = to_vmx(vcpu);
283 if (WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01))
284 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
286 if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
289 kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
291 vmx->nested.vmxon = false;
292 vmx->nested.smm.vmxon = false;
293 free_vpid(vmx->nested.vpid02);
294 vmx->nested.posted_intr_nv = -1;
295 vmx->nested.current_vmptr = -1ull;
296 if (enable_shadow_vmcs) {
297 vmx_disable_shadow_vmcs(vmx);
298 vmcs_clear(vmx->vmcs01.shadow_vmcs);
299 free_vmcs(vmx->vmcs01.shadow_vmcs);
300 vmx->vmcs01.shadow_vmcs = NULL;
302 kfree(vmx->nested.cached_vmcs12);
303 vmx->nested.cached_vmcs12 = NULL;
304 kfree(vmx->nested.cached_shadow_vmcs12);
305 vmx->nested.cached_shadow_vmcs12 = NULL;
306 /* Unpin physical memory we referred to in the vmcs02 */
307 if (vmx->nested.apic_access_page) {
308 kvm_release_page_clean(vmx->nested.apic_access_page);
309 vmx->nested.apic_access_page = NULL;
311 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
312 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
313 vmx->nested.pi_desc = NULL;
315 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
317 nested_release_evmcs(vcpu);
319 free_loaded_vmcs(&vmx->nested.vmcs02);
323 * Ensure that the current vmcs of the logical processor is the
324 * vmcs01 of the vcpu before calling free_nested().
326 void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
329 vmx_leave_nested(vcpu);
333 static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
334 struct x86_exception *fault)
336 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
337 struct vcpu_vmx *vmx = to_vmx(vcpu);
339 unsigned long exit_qualification = vcpu->arch.exit_qualification;
341 if (vmx->nested.pml_full) {
342 vm_exit_reason = EXIT_REASON_PML_FULL;
343 vmx->nested.pml_full = false;
344 exit_qualification &= INTR_INFO_UNBLOCK_NMI;
345 } else if (fault->error_code & PFERR_RSVD_MASK)
346 vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
348 vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
350 nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification);
351 vmcs12->guest_physical_address = fault->address;
354 static void nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
356 WARN_ON(mmu_is_nested(vcpu));
358 vcpu->arch.mmu = &vcpu->arch.guest_mmu;
359 kvm_init_shadow_ept_mmu(vcpu,
360 to_vmx(vcpu)->nested.msrs.ept_caps &
361 VMX_EPT_EXECUTE_ONLY_BIT,
362 nested_ept_ad_enabled(vcpu),
363 nested_ept_get_eptp(vcpu));
364 vcpu->arch.mmu->get_guest_pgd = nested_ept_get_eptp;
365 vcpu->arch.mmu->inject_page_fault = nested_ept_inject_page_fault;
366 vcpu->arch.mmu->get_pdptr = kvm_pdptr_read;
368 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
371 static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
373 vcpu->arch.mmu = &vcpu->arch.root_mmu;
374 vcpu->arch.walk_mmu = &vcpu->arch.root_mmu;
377 static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12,
380 bool inequality, bit;
382 bit = (vmcs12->exception_bitmap & (1u << PF_VECTOR)) != 0;
384 (error_code & vmcs12->page_fault_error_code_mask) !=
385 vmcs12->page_fault_error_code_match;
386 return inequality ^ bit;
391 * KVM wants to inject page-faults which it got to the guest. This function
392 * checks whether in a nested guest, we need to inject them to L1 or L2.
394 static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned long *exit_qual)
396 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
397 unsigned int nr = vcpu->arch.exception.nr;
398 bool has_payload = vcpu->arch.exception.has_payload;
399 unsigned long payload = vcpu->arch.exception.payload;
401 if (nr == PF_VECTOR) {
402 if (vcpu->arch.exception.nested_apf) {
403 *exit_qual = vcpu->arch.apf.nested_apf_token;
406 if (nested_vmx_is_page_fault_vmexit(vmcs12,
407 vcpu->arch.exception.error_code)) {
408 *exit_qual = has_payload ? payload : vcpu->arch.cr2;
411 } else if (vmcs12->exception_bitmap & (1u << nr)) {
412 if (nr == DB_VECTOR) {
414 payload = vcpu->arch.dr6;
415 payload &= ~(DR6_FIXED_1 | DR6_BT);
418 *exit_qual = payload;
428 static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
429 struct x86_exception *fault)
431 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
433 WARN_ON(!is_guest_mode(vcpu));
435 if (nested_vmx_is_page_fault_vmexit(vmcs12, fault->error_code) &&
436 !to_vmx(vcpu)->nested.nested_run_pending) {
437 vmcs12->vm_exit_intr_error_code = fault->error_code;
438 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
439 PF_VECTOR | INTR_TYPE_HARD_EXCEPTION |
440 INTR_INFO_DELIVER_CODE_MASK | INTR_INFO_VALID_MASK,
443 kvm_inject_page_fault(vcpu, fault);
447 static int nested_vmx_check_io_bitmap_controls(struct kvm_vcpu *vcpu,
448 struct vmcs12 *vmcs12)
450 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
453 if (CC(!page_address_valid(vcpu, vmcs12->io_bitmap_a)) ||
454 CC(!page_address_valid(vcpu, vmcs12->io_bitmap_b)))
460 static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
461 struct vmcs12 *vmcs12)
463 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
466 if (CC(!page_address_valid(vcpu, vmcs12->msr_bitmap)))
472 static int nested_vmx_check_tpr_shadow_controls(struct kvm_vcpu *vcpu,
473 struct vmcs12 *vmcs12)
475 if (!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW))
478 if (CC(!page_address_valid(vcpu, vmcs12->virtual_apic_page_addr)))
485 * Check if MSR is intercepted for L01 MSR bitmap.
487 static bool msr_write_intercepted_l01(struct kvm_vcpu *vcpu, u32 msr)
489 unsigned long *msr_bitmap;
490 int f = sizeof(unsigned long);
492 if (!cpu_has_vmx_msr_bitmap())
495 msr_bitmap = to_vmx(vcpu)->vmcs01.msr_bitmap;
498 return !!test_bit(msr, msr_bitmap + 0x800 / f);
499 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
501 return !!test_bit(msr, msr_bitmap + 0xc00 / f);
508 * If a msr is allowed by L0, we should check whether it is allowed by L1.
509 * The corresponding bit will be cleared unless both of L0 and L1 allow it.
511 static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
512 unsigned long *msr_bitmap_nested,
515 int f = sizeof(unsigned long);
518 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
519 * have the write-low and read-high bitmap offsets the wrong way round.
520 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
523 if (type & MSR_TYPE_R &&
524 !test_bit(msr, msr_bitmap_l1 + 0x000 / f))
526 __clear_bit(msr, msr_bitmap_nested + 0x000 / f);
528 if (type & MSR_TYPE_W &&
529 !test_bit(msr, msr_bitmap_l1 + 0x800 / f))
531 __clear_bit(msr, msr_bitmap_nested + 0x800 / f);
533 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
535 if (type & MSR_TYPE_R &&
536 !test_bit(msr, msr_bitmap_l1 + 0x400 / f))
538 __clear_bit(msr, msr_bitmap_nested + 0x400 / f);
540 if (type & MSR_TYPE_W &&
541 !test_bit(msr, msr_bitmap_l1 + 0xc00 / f))
543 __clear_bit(msr, msr_bitmap_nested + 0xc00 / f);
548 static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap)
552 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
553 unsigned word = msr / BITS_PER_LONG;
555 msr_bitmap[word] = ~0;
556 msr_bitmap[word + (0x800 / sizeof(long))] = ~0;
561 * Merge L0's and L1's MSR bitmap, return false to indicate that
562 * we do not use the hardware.
564 static inline bool nested_vmx_prepare_msr_bitmap(struct kvm_vcpu *vcpu,
565 struct vmcs12 *vmcs12)
568 unsigned long *msr_bitmap_l1;
569 unsigned long *msr_bitmap_l0 = to_vmx(vcpu)->nested.vmcs02.msr_bitmap;
570 struct kvm_host_map *map = &to_vmx(vcpu)->nested.msr_bitmap_map;
572 /* Nothing to do if the MSR bitmap is not in use. */
573 if (!cpu_has_vmx_msr_bitmap() ||
574 !nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
577 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->msr_bitmap), map))
580 msr_bitmap_l1 = (unsigned long *)map->hva;
583 * To keep the control flow simple, pay eight 8-byte writes (sixteen
584 * 4-byte writes on 32-bit systems) up front to enable intercepts for
585 * the x2APIC MSR range and selectively disable them below.
587 enable_x2apic_msr_intercepts(msr_bitmap_l0);
589 if (nested_cpu_has_virt_x2apic_mode(vmcs12)) {
590 if (nested_cpu_has_apic_reg_virt(vmcs12)) {
592 * L0 need not intercept reads for MSRs between 0x800
593 * and 0x8ff, it just lets the processor take the value
594 * from the virtual-APIC page; take those 256 bits
595 * directly from the L1 bitmap.
597 for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
598 unsigned word = msr / BITS_PER_LONG;
600 msr_bitmap_l0[word] = msr_bitmap_l1[word];
604 nested_vmx_disable_intercept_for_msr(
605 msr_bitmap_l1, msr_bitmap_l0,
606 X2APIC_MSR(APIC_TASKPRI),
607 MSR_TYPE_R | MSR_TYPE_W);
609 if (nested_cpu_has_vid(vmcs12)) {
610 nested_vmx_disable_intercept_for_msr(
611 msr_bitmap_l1, msr_bitmap_l0,
612 X2APIC_MSR(APIC_EOI),
614 nested_vmx_disable_intercept_for_msr(
615 msr_bitmap_l1, msr_bitmap_l0,
616 X2APIC_MSR(APIC_SELF_IPI),
621 /* KVM unconditionally exposes the FS/GS base MSRs to L1. */
623 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
624 MSR_FS_BASE, MSR_TYPE_RW);
626 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
627 MSR_GS_BASE, MSR_TYPE_RW);
629 nested_vmx_disable_intercept_for_msr(msr_bitmap_l1, msr_bitmap_l0,
630 MSR_KERNEL_GS_BASE, MSR_TYPE_RW);
634 * Checking the L0->L1 bitmap is trying to verify two things:
636 * 1. L0 gave a permission to L1 to actually passthrough the MSR. This
637 * ensures that we do not accidentally generate an L02 MSR bitmap
638 * from the L12 MSR bitmap that is too permissive.
639 * 2. That L1 or L2s have actually used the MSR. This avoids
640 * unnecessarily merging of the bitmap if the MSR is unused. This
641 * works properly because we only update the L01 MSR bitmap lazily.
642 * So even if L0 should pass L1 these MSRs, the L01 bitmap is only
643 * updated to reflect this when L1 (or its L2s) actually write to
646 if (!msr_write_intercepted_l01(vcpu, MSR_IA32_SPEC_CTRL))
647 nested_vmx_disable_intercept_for_msr(
648 msr_bitmap_l1, msr_bitmap_l0,
650 MSR_TYPE_R | MSR_TYPE_W);
652 if (!msr_write_intercepted_l01(vcpu, MSR_IA32_PRED_CMD))
653 nested_vmx_disable_intercept_for_msr(
654 msr_bitmap_l1, msr_bitmap_l0,
658 kvm_vcpu_unmap(vcpu, &to_vmx(vcpu)->nested.msr_bitmap_map, false);
663 static void nested_cache_shadow_vmcs12(struct kvm_vcpu *vcpu,
664 struct vmcs12 *vmcs12)
666 struct kvm_host_map map;
667 struct vmcs12 *shadow;
669 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
670 vmcs12->vmcs_link_pointer == -1ull)
673 shadow = get_shadow_vmcs12(vcpu);
675 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map))
678 memcpy(shadow, map.hva, VMCS12_SIZE);
679 kvm_vcpu_unmap(vcpu, &map, false);
682 static void nested_flush_cached_shadow_vmcs12(struct kvm_vcpu *vcpu,
683 struct vmcs12 *vmcs12)
685 struct vcpu_vmx *vmx = to_vmx(vcpu);
687 if (!nested_cpu_has_shadow_vmcs(vmcs12) ||
688 vmcs12->vmcs_link_pointer == -1ull)
691 kvm_write_guest(vmx->vcpu.kvm, vmcs12->vmcs_link_pointer,
692 get_shadow_vmcs12(vcpu), VMCS12_SIZE);
696 * In nested virtualization, check if L1 has set
697 * VM_EXIT_ACK_INTR_ON_EXIT
699 static bool nested_exit_intr_ack_set(struct kvm_vcpu *vcpu)
701 return get_vmcs12(vcpu)->vm_exit_controls &
702 VM_EXIT_ACK_INTR_ON_EXIT;
705 static int nested_vmx_check_apic_access_controls(struct kvm_vcpu *vcpu,
706 struct vmcs12 *vmcs12)
708 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
709 CC(!page_address_valid(vcpu, vmcs12->apic_access_addr)))
715 static int nested_vmx_check_apicv_controls(struct kvm_vcpu *vcpu,
716 struct vmcs12 *vmcs12)
718 if (!nested_cpu_has_virt_x2apic_mode(vmcs12) &&
719 !nested_cpu_has_apic_reg_virt(vmcs12) &&
720 !nested_cpu_has_vid(vmcs12) &&
721 !nested_cpu_has_posted_intr(vmcs12))
725 * If virtualize x2apic mode is enabled,
726 * virtualize apic access must be disabled.
728 if (CC(nested_cpu_has_virt_x2apic_mode(vmcs12) &&
729 nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)))
733 * If virtual interrupt delivery is enabled,
734 * we must exit on external interrupts.
736 if (CC(nested_cpu_has_vid(vmcs12) && !nested_exit_on_intr(vcpu)))
740 * bits 15:8 should be zero in posted_intr_nv,
741 * the descriptor address has been already checked
742 * in nested_get_vmcs12_pages.
744 * bits 5:0 of posted_intr_desc_addr should be zero.
746 if (nested_cpu_has_posted_intr(vmcs12) &&
747 (CC(!nested_cpu_has_vid(vmcs12)) ||
748 CC(!nested_exit_intr_ack_set(vcpu)) ||
749 CC((vmcs12->posted_intr_nv & 0xff00)) ||
750 CC((vmcs12->posted_intr_desc_addr & 0x3f)) ||
751 CC((vmcs12->posted_intr_desc_addr >> cpuid_maxphyaddr(vcpu)))))
754 /* tpr shadow is needed by all apicv features. */
755 if (CC(!nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)))
761 static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
768 maxphyaddr = cpuid_maxphyaddr(vcpu);
769 if (!IS_ALIGNED(addr, 16) || addr >> maxphyaddr ||
770 (addr + count * sizeof(struct vmx_msr_entry) - 1) >> maxphyaddr)
776 static int nested_vmx_check_exit_msr_switch_controls(struct kvm_vcpu *vcpu,
777 struct vmcs12 *vmcs12)
779 if (CC(nested_vmx_check_msr_switch(vcpu,
780 vmcs12->vm_exit_msr_load_count,
781 vmcs12->vm_exit_msr_load_addr)) ||
782 CC(nested_vmx_check_msr_switch(vcpu,
783 vmcs12->vm_exit_msr_store_count,
784 vmcs12->vm_exit_msr_store_addr)))
790 static int nested_vmx_check_entry_msr_switch_controls(struct kvm_vcpu *vcpu,
791 struct vmcs12 *vmcs12)
793 if (CC(nested_vmx_check_msr_switch(vcpu,
794 vmcs12->vm_entry_msr_load_count,
795 vmcs12->vm_entry_msr_load_addr)))
801 static int nested_vmx_check_pml_controls(struct kvm_vcpu *vcpu,
802 struct vmcs12 *vmcs12)
804 if (!nested_cpu_has_pml(vmcs12))
807 if (CC(!nested_cpu_has_ept(vmcs12)) ||
808 CC(!page_address_valid(vcpu, vmcs12->pml_address)))
814 static int nested_vmx_check_unrestricted_guest_controls(struct kvm_vcpu *vcpu,
815 struct vmcs12 *vmcs12)
817 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST) &&
818 !nested_cpu_has_ept(vmcs12)))
823 static int nested_vmx_check_mode_based_ept_exec_controls(struct kvm_vcpu *vcpu,
824 struct vmcs12 *vmcs12)
826 if (CC(nested_cpu_has2(vmcs12, SECONDARY_EXEC_MODE_BASED_EPT_EXEC) &&
827 !nested_cpu_has_ept(vmcs12)))
832 static int nested_vmx_check_shadow_vmcs_controls(struct kvm_vcpu *vcpu,
833 struct vmcs12 *vmcs12)
835 if (!nested_cpu_has_shadow_vmcs(vmcs12))
838 if (CC(!page_address_valid(vcpu, vmcs12->vmread_bitmap)) ||
839 CC(!page_address_valid(vcpu, vmcs12->vmwrite_bitmap)))
845 static int nested_vmx_msr_check_common(struct kvm_vcpu *vcpu,
846 struct vmx_msr_entry *e)
848 /* x2APIC MSR accesses are not allowed */
849 if (CC(vcpu->arch.apic_base & X2APIC_ENABLE && e->index >> 8 == 0x8))
851 if (CC(e->index == MSR_IA32_UCODE_WRITE) || /* SDM Table 35-2 */
852 CC(e->index == MSR_IA32_UCODE_REV))
854 if (CC(e->reserved != 0))
859 static int nested_vmx_load_msr_check(struct kvm_vcpu *vcpu,
860 struct vmx_msr_entry *e)
862 if (CC(e->index == MSR_FS_BASE) ||
863 CC(e->index == MSR_GS_BASE) ||
864 CC(e->index == MSR_IA32_SMM_MONITOR_CTL) || /* SMM is not supported */
865 nested_vmx_msr_check_common(vcpu, e))
870 static int nested_vmx_store_msr_check(struct kvm_vcpu *vcpu,
871 struct vmx_msr_entry *e)
873 if (CC(e->index == MSR_IA32_SMBASE) || /* SMM is not supported */
874 nested_vmx_msr_check_common(vcpu, e))
879 static u32 nested_vmx_max_atomic_switch_msrs(struct kvm_vcpu *vcpu)
881 struct vcpu_vmx *vmx = to_vmx(vcpu);
882 u64 vmx_misc = vmx_control_msr(vmx->nested.msrs.misc_low,
883 vmx->nested.msrs.misc_high);
885 return (vmx_misc_max_msr(vmx_misc) + 1) * VMX_MISC_MSR_LIST_MULTIPLIER;
889 * Load guest's/host's msr at nested entry/exit.
890 * return 0 for success, entry index for failure.
892 * One of the failure modes for MSR load/store is when a list exceeds the
893 * virtual hardware's capacity. To maintain compatibility with hardware inasmuch
894 * as possible, process all valid entries before failing rather than precheck
895 * for a capacity violation.
897 static u32 nested_vmx_load_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
900 struct vmx_msr_entry e;
901 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
903 for (i = 0; i < count; i++) {
904 if (unlikely(i >= max_msr_list_size))
907 if (kvm_vcpu_read_guest(vcpu, gpa + i * sizeof(e),
909 pr_debug_ratelimited(
910 "%s cannot read MSR entry (%u, 0x%08llx)\n",
911 __func__, i, gpa + i * sizeof(e));
914 if (nested_vmx_load_msr_check(vcpu, &e)) {
915 pr_debug_ratelimited(
916 "%s check failed (%u, 0x%x, 0x%x)\n",
917 __func__, i, e.index, e.reserved);
920 if (kvm_set_msr(vcpu, e.index, e.value)) {
921 pr_debug_ratelimited(
922 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
923 __func__, i, e.index, e.value);
929 /* Note, max_msr_list_size is at most 4096, i.e. this can't wrap. */
933 static bool nested_vmx_get_vmexit_msr_value(struct kvm_vcpu *vcpu,
937 struct vcpu_vmx *vmx = to_vmx(vcpu);
940 * If the L0 hypervisor stored a more accurate value for the TSC that
941 * does not include the time taken for emulation of the L2->L1
942 * VM-exit in L0, use the more accurate value.
944 if (msr_index == MSR_IA32_TSC) {
945 int i = vmx_find_loadstore_msr_slot(&vmx->msr_autostore.guest,
949 u64 val = vmx->msr_autostore.guest.val[i].value;
951 *data = kvm_read_l1_tsc(vcpu, val);
956 if (kvm_get_msr(vcpu, msr_index, data)) {
957 pr_debug_ratelimited("%s cannot read MSR (0x%x)\n", __func__,
964 static bool read_and_check_msr_entry(struct kvm_vcpu *vcpu, u64 gpa, int i,
965 struct vmx_msr_entry *e)
967 if (kvm_vcpu_read_guest(vcpu,
968 gpa + i * sizeof(*e),
969 e, 2 * sizeof(u32))) {
970 pr_debug_ratelimited(
971 "%s cannot read MSR entry (%u, 0x%08llx)\n",
972 __func__, i, gpa + i * sizeof(*e));
975 if (nested_vmx_store_msr_check(vcpu, e)) {
976 pr_debug_ratelimited(
977 "%s check failed (%u, 0x%x, 0x%x)\n",
978 __func__, i, e->index, e->reserved);
984 static int nested_vmx_store_msr(struct kvm_vcpu *vcpu, u64 gpa, u32 count)
988 struct vmx_msr_entry e;
989 u32 max_msr_list_size = nested_vmx_max_atomic_switch_msrs(vcpu);
991 for (i = 0; i < count; i++) {
992 if (unlikely(i >= max_msr_list_size))
995 if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
998 if (!nested_vmx_get_vmexit_msr_value(vcpu, e.index, &data))
1001 if (kvm_vcpu_write_guest(vcpu,
1002 gpa + i * sizeof(e) +
1003 offsetof(struct vmx_msr_entry, value),
1004 &data, sizeof(data))) {
1005 pr_debug_ratelimited(
1006 "%s cannot write MSR (%u, 0x%x, 0x%llx)\n",
1007 __func__, i, e.index, data);
1014 static bool nested_msr_store_list_has_msr(struct kvm_vcpu *vcpu, u32 msr_index)
1016 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1017 u32 count = vmcs12->vm_exit_msr_store_count;
1018 u64 gpa = vmcs12->vm_exit_msr_store_addr;
1019 struct vmx_msr_entry e;
1022 for (i = 0; i < count; i++) {
1023 if (!read_and_check_msr_entry(vcpu, gpa, i, &e))
1026 if (e.index == msr_index)
1032 static void prepare_vmx_msr_autostore_list(struct kvm_vcpu *vcpu,
1035 struct vcpu_vmx *vmx = to_vmx(vcpu);
1036 struct vmx_msrs *autostore = &vmx->msr_autostore.guest;
1037 bool in_vmcs12_store_list;
1038 int msr_autostore_slot;
1039 bool in_autostore_list;
1042 msr_autostore_slot = vmx_find_loadstore_msr_slot(autostore, msr_index);
1043 in_autostore_list = msr_autostore_slot >= 0;
1044 in_vmcs12_store_list = nested_msr_store_list_has_msr(vcpu, msr_index);
1046 if (in_vmcs12_store_list && !in_autostore_list) {
1047 if (autostore->nr == MAX_NR_LOADSTORE_MSRS) {
1049 * Emulated VMEntry does not fail here. Instead a less
1050 * accurate value will be returned by
1051 * nested_vmx_get_vmexit_msr_value() using kvm_get_msr()
1052 * instead of reading the value from the vmcs02 VMExit
1055 pr_warn_ratelimited(
1056 "Not enough msr entries in msr_autostore. Can't add msr %x\n",
1060 last = autostore->nr++;
1061 autostore->val[last].index = msr_index;
1062 } else if (!in_vmcs12_store_list && in_autostore_list) {
1063 last = --autostore->nr;
1064 autostore->val[msr_autostore_slot] = autostore->val[last];
1068 static bool nested_cr3_valid(struct kvm_vcpu *vcpu, unsigned long val)
1070 unsigned long invalid_mask;
1072 invalid_mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
1073 return (val & invalid_mask) == 0;
1077 * Returns true if the MMU needs to be sync'd on nested VM-Enter/VM-Exit.
1078 * tl;dr: the MMU needs a sync if L0 is using shadow paging and L1 didn't
1079 * enable VPID for L2 (implying it expects a TLB flush on VMX transitions).
1082 * If EPT is enabled by L0 a sync is never needed:
1083 * - if it is disabled by L1, then L0 is not shadowing L1 or L2 PTEs, there
1084 * cannot be unsync'd SPTEs for either L1 or L2.
1086 * - if it is also enabled by L1, then L0 doesn't need to sync on VM-Enter
1087 * VM-Enter as VM-Enter isn't required to invalidate guest-physical mappings
1088 * (irrespective of VPID), i.e. L1 can't rely on the (virtual) CPU to flush
1089 * stale guest-physical mappings for L2 from the TLB. And as above, L0 isn't
1090 * shadowing L1 PTEs so there are no unsync'd SPTEs to sync on VM-Exit.
1092 * If EPT is disabled by L0:
1093 * - if VPID is enabled by L1 (for L2), the situation is similar to when L1
1094 * enables EPT: L0 doesn't need to sync as VM-Enter and VM-Exit aren't
1095 * required to invalidate linear mappings (EPT is disabled so there are
1096 * no combined or guest-physical mappings), i.e. L1 can't rely on the
1097 * (virtual) CPU to flush stale linear mappings for either L2 or itself (L1).
1099 * - however if VPID is disabled by L1, then a sync is needed as L1 expects all
1100 * linear mappings (EPT is disabled so there are no combined or guest-physical
1101 * mappings) to be invalidated on both VM-Enter and VM-Exit.
1103 * Note, this logic is subtly different than nested_has_guest_tlb_tag(), which
1104 * additionally checks that L2 has been assigned a VPID (when EPT is disabled).
1105 * Whether or not L2 has been assigned a VPID by L0 is irrelevant with respect
1106 * to L1's expectations, e.g. L0 needs to invalidate hardware TLB entries if L2
1107 * doesn't have a unique VPID to prevent reusing L1's entries (assuming L1 has
1108 * been assigned a VPID), but L0 doesn't need to do a MMU sync because L1
1109 * doesn't expect stale (virtual) TLB entries to be flushed, i.e. L1 doesn't
1110 * know that L0 will flush the TLB and so L1 will do INVVPID as needed to flush
1111 * stale TLB entries, at which point L0 will sync L2's MMU.
1113 static bool nested_vmx_transition_mmu_sync(struct kvm_vcpu *vcpu)
1115 return !enable_ept && !nested_cpu_has_vpid(get_vmcs12(vcpu));
1119 * Load guest's/host's cr3 at nested entry/exit. @nested_ept is true if we are
1120 * emulating VM-Entry into a guest with EPT enabled. On failure, the expected
1121 * Exit Qualification (for a VM-Entry consistency check VM-Exit) is assigned to
1122 * @entry_failure_code.
1124 static int nested_vmx_load_cr3(struct kvm_vcpu *vcpu, unsigned long cr3, bool nested_ept,
1125 enum vm_entry_failure_code *entry_failure_code)
1127 if (CC(!nested_cr3_valid(vcpu, cr3))) {
1128 *entry_failure_code = ENTRY_FAIL_DEFAULT;
1133 * If PAE paging and EPT are both on, CR3 is not used by the CPU and
1134 * must not be dereferenced.
1136 if (!nested_ept && is_pae_paging(vcpu) &&
1137 (cr3 != kvm_read_cr3(vcpu) || pdptrs_changed(vcpu))) {
1138 if (CC(!load_pdptrs(vcpu, vcpu->arch.walk_mmu, cr3))) {
1139 *entry_failure_code = ENTRY_FAIL_PDPTE;
1145 * Unconditionally skip the TLB flush on fast CR3 switch, all TLB
1146 * flushes are handled by nested_vmx_transition_tlb_flush().
1149 kvm_mmu_new_pgd(vcpu, cr3, true, true);
1152 * A TLB flush on VM-Enter/VM-Exit flushes all linear mappings
1153 * across all PCIDs, i.e. all PGDs need to be synchronized.
1154 * See nested_vmx_transition_mmu_sync() for more details.
1156 if (nested_vmx_transition_mmu_sync(vcpu))
1157 kvm_make_request(KVM_REQ_TLB_FLUSH_GUEST, vcpu);
1160 vcpu->arch.cr3 = cr3;
1161 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
1163 kvm_init_mmu(vcpu, false);
1169 * Returns if KVM is able to config CPU to tag TLB entries
1170 * populated by L2 differently than TLB entries populated
1173 * If L0 uses EPT, L1 and L2 run with different EPTP because
1174 * guest_mode is part of kvm_mmu_page_role. Thus, TLB entries
1175 * are tagged with different EPTP.
1177 * If L1 uses VPID and we allocated a vpid02, TLB entries are tagged
1178 * with different VPID (L1 entries are tagged with vmx->vpid
1179 * while L2 entries are tagged with vmx->nested.vpid02).
1181 static bool nested_has_guest_tlb_tag(struct kvm_vcpu *vcpu)
1183 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
1185 return enable_ept ||
1186 (nested_cpu_has_vpid(vmcs12) && to_vmx(vcpu)->nested.vpid02);
1189 static void nested_vmx_transition_tlb_flush(struct kvm_vcpu *vcpu,
1190 struct vmcs12 *vmcs12,
1193 struct vcpu_vmx *vmx = to_vmx(vcpu);
1196 * If VPID is disabled, linear and combined mappings are flushed on
1197 * VM-Enter/VM-Exit, and guest-physical mappings are valid only for
1198 * their associated EPTP.
1204 * If vmcs12 doesn't use VPID, L1 expects linear and combined mappings
1205 * for *all* contexts to be flushed on VM-Enter/VM-Exit.
1207 * If VPID is enabled and used by vmc12, but L2 does not have a unique
1208 * TLB tag (ASID), i.e. EPT is disabled and KVM was unable to allocate
1209 * a VPID for L2, flush the current context as the effective ASID is
1210 * common to both L1 and L2.
1212 * Defer the flush so that it runs after vmcs02.EPTP has been set by
1213 * KVM_REQ_LOAD_MMU_PGD (if nested EPT is enabled) and to avoid
1214 * redundant flushes further down the nested pipeline.
1216 * If a TLB flush isn't required due to any of the above, and vpid12 is
1217 * changing then the new "virtual" VPID (vpid12) will reuse the same
1218 * "real" VPID (vpid02), and so needs to be sync'd. There is no direct
1219 * mapping between vpid02 and vpid12, vpid02 is per-vCPU and reused for
1222 if (!nested_cpu_has_vpid(vmcs12)) {
1223 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1224 } else if (!nested_has_guest_tlb_tag(vcpu)) {
1225 kvm_make_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu);
1226 } else if (is_vmenter &&
1227 vmcs12->virtual_processor_id != vmx->nested.last_vpid) {
1228 vmx->nested.last_vpid = vmcs12->virtual_processor_id;
1229 vpid_sync_context(nested_get_vpid02(vcpu));
1233 static bool is_bitwise_subset(u64 superset, u64 subset, u64 mask)
1238 return (superset | subset) == superset;
1241 static int vmx_restore_vmx_basic(struct vcpu_vmx *vmx, u64 data)
1243 const u64 feature_and_reserved =
1244 /* feature (except bit 48; see below) */
1245 BIT_ULL(49) | BIT_ULL(54) | BIT_ULL(55) |
1247 BIT_ULL(31) | GENMASK_ULL(47, 45) | GENMASK_ULL(63, 56);
1248 u64 vmx_basic = vmcs_config.nested.basic;
1250 if (!is_bitwise_subset(vmx_basic, data, feature_and_reserved))
1254 * KVM does not emulate a version of VMX that constrains physical
1255 * addresses of VMX structures (e.g. VMCS) to 32-bits.
1257 if (data & BIT_ULL(48))
1260 if (vmx_basic_vmcs_revision_id(vmx_basic) !=
1261 vmx_basic_vmcs_revision_id(data))
1264 if (vmx_basic_vmcs_size(vmx_basic) > vmx_basic_vmcs_size(data))
1267 vmx->nested.msrs.basic = data;
1271 static void vmx_get_control_msr(struct nested_vmx_msrs *msrs, u32 msr_index,
1272 u32 **low, u32 **high)
1274 switch (msr_index) {
1275 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1276 *low = &msrs->pinbased_ctls_low;
1277 *high = &msrs->pinbased_ctls_high;
1279 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1280 *low = &msrs->procbased_ctls_low;
1281 *high = &msrs->procbased_ctls_high;
1283 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1284 *low = &msrs->exit_ctls_low;
1285 *high = &msrs->exit_ctls_high;
1287 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1288 *low = &msrs->entry_ctls_low;
1289 *high = &msrs->entry_ctls_high;
1291 case MSR_IA32_VMX_PROCBASED_CTLS2:
1292 *low = &msrs->secondary_ctls_low;
1293 *high = &msrs->secondary_ctls_high;
1301 vmx_restore_control_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1306 vmx_get_control_msr(&vmcs_config.nested, msr_index, &lowp, &highp);
1308 supported = vmx_control_msr(*lowp, *highp);
1310 /* Check must-be-1 bits are still 1. */
1311 if (!is_bitwise_subset(data, supported, GENMASK_ULL(31, 0)))
1314 /* Check must-be-0 bits are still 0. */
1315 if (!is_bitwise_subset(supported, data, GENMASK_ULL(63, 32)))
1318 vmx_get_control_msr(&vmx->nested.msrs, msr_index, &lowp, &highp);
1320 *highp = data >> 32;
1324 static int vmx_restore_vmx_misc(struct vcpu_vmx *vmx, u64 data)
1326 const u64 feature_and_reserved_bits =
1328 BIT_ULL(5) | GENMASK_ULL(8, 6) | BIT_ULL(14) | BIT_ULL(15) |
1329 BIT_ULL(28) | BIT_ULL(29) | BIT_ULL(30) |
1331 GENMASK_ULL(13, 9) | BIT_ULL(31);
1332 u64 vmx_misc = vmx_control_msr(vmcs_config.nested.misc_low,
1333 vmcs_config.nested.misc_high);
1335 if (!is_bitwise_subset(vmx_misc, data, feature_and_reserved_bits))
1338 if ((vmx->nested.msrs.pinbased_ctls_high &
1339 PIN_BASED_VMX_PREEMPTION_TIMER) &&
1340 vmx_misc_preemption_timer_rate(data) !=
1341 vmx_misc_preemption_timer_rate(vmx_misc))
1344 if (vmx_misc_cr3_count(data) > vmx_misc_cr3_count(vmx_misc))
1347 if (vmx_misc_max_msr(data) > vmx_misc_max_msr(vmx_misc))
1350 if (vmx_misc_mseg_revid(data) != vmx_misc_mseg_revid(vmx_misc))
1353 vmx->nested.msrs.misc_low = data;
1354 vmx->nested.msrs.misc_high = data >> 32;
1359 static int vmx_restore_vmx_ept_vpid_cap(struct vcpu_vmx *vmx, u64 data)
1361 u64 vmx_ept_vpid_cap = vmx_control_msr(vmcs_config.nested.ept_caps,
1362 vmcs_config.nested.vpid_caps);
1364 /* Every bit is either reserved or a feature bit. */
1365 if (!is_bitwise_subset(vmx_ept_vpid_cap, data, -1ULL))
1368 vmx->nested.msrs.ept_caps = data;
1369 vmx->nested.msrs.vpid_caps = data >> 32;
1373 static u64 *vmx_get_fixed0_msr(struct nested_vmx_msrs *msrs, u32 msr_index)
1375 switch (msr_index) {
1376 case MSR_IA32_VMX_CR0_FIXED0:
1377 return &msrs->cr0_fixed0;
1378 case MSR_IA32_VMX_CR4_FIXED0:
1379 return &msrs->cr4_fixed0;
1385 static int vmx_restore_fixed0_msr(struct vcpu_vmx *vmx, u32 msr_index, u64 data)
1387 const u64 *msr = vmx_get_fixed0_msr(&vmcs_config.nested, msr_index);
1390 * 1 bits (which indicates bits which "must-be-1" during VMX operation)
1391 * must be 1 in the restored value.
1393 if (!is_bitwise_subset(data, *msr, -1ULL))
1396 *vmx_get_fixed0_msr(&vmx->nested.msrs, msr_index) = data;
1401 * Called when userspace is restoring VMX MSRs.
1403 * Returns 0 on success, non-0 otherwise.
1405 int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1407 struct vcpu_vmx *vmx = to_vmx(vcpu);
1410 * Don't allow changes to the VMX capability MSRs while the vCPU
1411 * is in VMX operation.
1413 if (vmx->nested.vmxon)
1416 switch (msr_index) {
1417 case MSR_IA32_VMX_BASIC:
1418 return vmx_restore_vmx_basic(vmx, data);
1419 case MSR_IA32_VMX_PINBASED_CTLS:
1420 case MSR_IA32_VMX_PROCBASED_CTLS:
1421 case MSR_IA32_VMX_EXIT_CTLS:
1422 case MSR_IA32_VMX_ENTRY_CTLS:
1424 * The "non-true" VMX capability MSRs are generated from the
1425 * "true" MSRs, so we do not support restoring them directly.
1427 * If userspace wants to emulate VMX_BASIC[55]=0, userspace
1428 * should restore the "true" MSRs with the must-be-1 bits
1429 * set according to the SDM Vol 3. A.2 "RESERVED CONTROLS AND
1430 * DEFAULT SETTINGS".
1433 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1434 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1435 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1436 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1437 case MSR_IA32_VMX_PROCBASED_CTLS2:
1438 return vmx_restore_control_msr(vmx, msr_index, data);
1439 case MSR_IA32_VMX_MISC:
1440 return vmx_restore_vmx_misc(vmx, data);
1441 case MSR_IA32_VMX_CR0_FIXED0:
1442 case MSR_IA32_VMX_CR4_FIXED0:
1443 return vmx_restore_fixed0_msr(vmx, msr_index, data);
1444 case MSR_IA32_VMX_CR0_FIXED1:
1445 case MSR_IA32_VMX_CR4_FIXED1:
1447 * These MSRs are generated based on the vCPU's CPUID, so we
1448 * do not support restoring them directly.
1451 case MSR_IA32_VMX_EPT_VPID_CAP:
1452 return vmx_restore_vmx_ept_vpid_cap(vmx, data);
1453 case MSR_IA32_VMX_VMCS_ENUM:
1454 vmx->nested.msrs.vmcs_enum = data;
1456 case MSR_IA32_VMX_VMFUNC:
1457 if (data & ~vmcs_config.nested.vmfunc_controls)
1459 vmx->nested.msrs.vmfunc_controls = data;
1463 * The rest of the VMX capability MSRs do not support restore.
1469 /* Returns 0 on success, non-0 otherwise. */
1470 int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata)
1472 switch (msr_index) {
1473 case MSR_IA32_VMX_BASIC:
1474 *pdata = msrs->basic;
1476 case MSR_IA32_VMX_TRUE_PINBASED_CTLS:
1477 case MSR_IA32_VMX_PINBASED_CTLS:
1478 *pdata = vmx_control_msr(
1479 msrs->pinbased_ctls_low,
1480 msrs->pinbased_ctls_high);
1481 if (msr_index == MSR_IA32_VMX_PINBASED_CTLS)
1482 *pdata |= PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1484 case MSR_IA32_VMX_TRUE_PROCBASED_CTLS:
1485 case MSR_IA32_VMX_PROCBASED_CTLS:
1486 *pdata = vmx_control_msr(
1487 msrs->procbased_ctls_low,
1488 msrs->procbased_ctls_high);
1489 if (msr_index == MSR_IA32_VMX_PROCBASED_CTLS)
1490 *pdata |= CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
1492 case MSR_IA32_VMX_TRUE_EXIT_CTLS:
1493 case MSR_IA32_VMX_EXIT_CTLS:
1494 *pdata = vmx_control_msr(
1495 msrs->exit_ctls_low,
1496 msrs->exit_ctls_high);
1497 if (msr_index == MSR_IA32_VMX_EXIT_CTLS)
1498 *pdata |= VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
1500 case MSR_IA32_VMX_TRUE_ENTRY_CTLS:
1501 case MSR_IA32_VMX_ENTRY_CTLS:
1502 *pdata = vmx_control_msr(
1503 msrs->entry_ctls_low,
1504 msrs->entry_ctls_high);
1505 if (msr_index == MSR_IA32_VMX_ENTRY_CTLS)
1506 *pdata |= VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
1508 case MSR_IA32_VMX_MISC:
1509 *pdata = vmx_control_msr(
1513 case MSR_IA32_VMX_CR0_FIXED0:
1514 *pdata = msrs->cr0_fixed0;
1516 case MSR_IA32_VMX_CR0_FIXED1:
1517 *pdata = msrs->cr0_fixed1;
1519 case MSR_IA32_VMX_CR4_FIXED0:
1520 *pdata = msrs->cr4_fixed0;
1522 case MSR_IA32_VMX_CR4_FIXED1:
1523 *pdata = msrs->cr4_fixed1;
1525 case MSR_IA32_VMX_VMCS_ENUM:
1526 *pdata = msrs->vmcs_enum;
1528 case MSR_IA32_VMX_PROCBASED_CTLS2:
1529 *pdata = vmx_control_msr(
1530 msrs->secondary_ctls_low,
1531 msrs->secondary_ctls_high);
1533 case MSR_IA32_VMX_EPT_VPID_CAP:
1534 *pdata = msrs->ept_caps |
1535 ((u64)msrs->vpid_caps << 32);
1537 case MSR_IA32_VMX_VMFUNC:
1538 *pdata = msrs->vmfunc_controls;
1548 * Copy the writable VMCS shadow fields back to the VMCS12, in case they have
1549 * been modified by the L1 guest. Note, "writable" in this context means
1550 * "writable by the guest", i.e. tagged SHADOW_FIELD_RW; the set of
1551 * fields tagged SHADOW_FIELD_RO may or may not align with the "read-only"
1552 * VM-exit information fields (which are actually writable if the vCPU is
1553 * configured to support "VMWRITE to any supported field in the VMCS").
1555 static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
1557 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1558 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1559 struct shadow_vmcs_field field;
1563 if (WARN_ON(!shadow_vmcs))
1568 vmcs_load(shadow_vmcs);
1570 for (i = 0; i < max_shadow_read_write_fields; i++) {
1571 field = shadow_read_write_fields[i];
1572 val = __vmcs_readl(field.encoding);
1573 vmcs12_write_any(vmcs12, field.encoding, field.offset, val);
1576 vmcs_clear(shadow_vmcs);
1577 vmcs_load(vmx->loaded_vmcs->vmcs);
1582 static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
1584 const struct shadow_vmcs_field *fields[] = {
1585 shadow_read_write_fields,
1586 shadow_read_only_fields
1588 const int max_fields[] = {
1589 max_shadow_read_write_fields,
1590 max_shadow_read_only_fields
1592 struct vmcs *shadow_vmcs = vmx->vmcs01.shadow_vmcs;
1593 struct vmcs12 *vmcs12 = get_vmcs12(&vmx->vcpu);
1594 struct shadow_vmcs_field field;
1598 if (WARN_ON(!shadow_vmcs))
1601 vmcs_load(shadow_vmcs);
1603 for (q = 0; q < ARRAY_SIZE(fields); q++) {
1604 for (i = 0; i < max_fields[q]; i++) {
1605 field = fields[q][i];
1606 val = vmcs12_read_any(vmcs12, field.encoding,
1608 __vmcs_writel(field.encoding, val);
1612 vmcs_clear(shadow_vmcs);
1613 vmcs_load(vmx->loaded_vmcs->vmcs);
1616 static int copy_enlightened_to_vmcs12(struct vcpu_vmx *vmx)
1618 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1619 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1621 /* HV_VMX_ENLIGHTENED_CLEAN_FIELD_NONE */
1622 vmcs12->tpr_threshold = evmcs->tpr_threshold;
1623 vmcs12->guest_rip = evmcs->guest_rip;
1625 if (unlikely(!(evmcs->hv_clean_fields &
1626 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_BASIC))) {
1627 vmcs12->guest_rsp = evmcs->guest_rsp;
1628 vmcs12->guest_rflags = evmcs->guest_rflags;
1629 vmcs12->guest_interruptibility_info =
1630 evmcs->guest_interruptibility_info;
1633 if (unlikely(!(evmcs->hv_clean_fields &
1634 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_PROC))) {
1635 vmcs12->cpu_based_vm_exec_control =
1636 evmcs->cpu_based_vm_exec_control;
1639 if (unlikely(!(evmcs->hv_clean_fields &
1640 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EXCPN))) {
1641 vmcs12->exception_bitmap = evmcs->exception_bitmap;
1644 if (unlikely(!(evmcs->hv_clean_fields &
1645 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_ENTRY))) {
1646 vmcs12->vm_entry_controls = evmcs->vm_entry_controls;
1649 if (unlikely(!(evmcs->hv_clean_fields &
1650 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_EVENT))) {
1651 vmcs12->vm_entry_intr_info_field =
1652 evmcs->vm_entry_intr_info_field;
1653 vmcs12->vm_entry_exception_error_code =
1654 evmcs->vm_entry_exception_error_code;
1655 vmcs12->vm_entry_instruction_len =
1656 evmcs->vm_entry_instruction_len;
1659 if (unlikely(!(evmcs->hv_clean_fields &
1660 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_GRP1))) {
1661 vmcs12->host_ia32_pat = evmcs->host_ia32_pat;
1662 vmcs12->host_ia32_efer = evmcs->host_ia32_efer;
1663 vmcs12->host_cr0 = evmcs->host_cr0;
1664 vmcs12->host_cr3 = evmcs->host_cr3;
1665 vmcs12->host_cr4 = evmcs->host_cr4;
1666 vmcs12->host_ia32_sysenter_esp = evmcs->host_ia32_sysenter_esp;
1667 vmcs12->host_ia32_sysenter_eip = evmcs->host_ia32_sysenter_eip;
1668 vmcs12->host_rip = evmcs->host_rip;
1669 vmcs12->host_ia32_sysenter_cs = evmcs->host_ia32_sysenter_cs;
1670 vmcs12->host_es_selector = evmcs->host_es_selector;
1671 vmcs12->host_cs_selector = evmcs->host_cs_selector;
1672 vmcs12->host_ss_selector = evmcs->host_ss_selector;
1673 vmcs12->host_ds_selector = evmcs->host_ds_selector;
1674 vmcs12->host_fs_selector = evmcs->host_fs_selector;
1675 vmcs12->host_gs_selector = evmcs->host_gs_selector;
1676 vmcs12->host_tr_selector = evmcs->host_tr_selector;
1679 if (unlikely(!(evmcs->hv_clean_fields &
1680 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP1))) {
1681 vmcs12->pin_based_vm_exec_control =
1682 evmcs->pin_based_vm_exec_control;
1683 vmcs12->vm_exit_controls = evmcs->vm_exit_controls;
1684 vmcs12->secondary_vm_exec_control =
1685 evmcs->secondary_vm_exec_control;
1688 if (unlikely(!(evmcs->hv_clean_fields &
1689 HV_VMX_ENLIGHTENED_CLEAN_FIELD_IO_BITMAP))) {
1690 vmcs12->io_bitmap_a = evmcs->io_bitmap_a;
1691 vmcs12->io_bitmap_b = evmcs->io_bitmap_b;
1694 if (unlikely(!(evmcs->hv_clean_fields &
1695 HV_VMX_ENLIGHTENED_CLEAN_FIELD_MSR_BITMAP))) {
1696 vmcs12->msr_bitmap = evmcs->msr_bitmap;
1699 if (unlikely(!(evmcs->hv_clean_fields &
1700 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2))) {
1701 vmcs12->guest_es_base = evmcs->guest_es_base;
1702 vmcs12->guest_cs_base = evmcs->guest_cs_base;
1703 vmcs12->guest_ss_base = evmcs->guest_ss_base;
1704 vmcs12->guest_ds_base = evmcs->guest_ds_base;
1705 vmcs12->guest_fs_base = evmcs->guest_fs_base;
1706 vmcs12->guest_gs_base = evmcs->guest_gs_base;
1707 vmcs12->guest_ldtr_base = evmcs->guest_ldtr_base;
1708 vmcs12->guest_tr_base = evmcs->guest_tr_base;
1709 vmcs12->guest_gdtr_base = evmcs->guest_gdtr_base;
1710 vmcs12->guest_idtr_base = evmcs->guest_idtr_base;
1711 vmcs12->guest_es_limit = evmcs->guest_es_limit;
1712 vmcs12->guest_cs_limit = evmcs->guest_cs_limit;
1713 vmcs12->guest_ss_limit = evmcs->guest_ss_limit;
1714 vmcs12->guest_ds_limit = evmcs->guest_ds_limit;
1715 vmcs12->guest_fs_limit = evmcs->guest_fs_limit;
1716 vmcs12->guest_gs_limit = evmcs->guest_gs_limit;
1717 vmcs12->guest_ldtr_limit = evmcs->guest_ldtr_limit;
1718 vmcs12->guest_tr_limit = evmcs->guest_tr_limit;
1719 vmcs12->guest_gdtr_limit = evmcs->guest_gdtr_limit;
1720 vmcs12->guest_idtr_limit = evmcs->guest_idtr_limit;
1721 vmcs12->guest_es_ar_bytes = evmcs->guest_es_ar_bytes;
1722 vmcs12->guest_cs_ar_bytes = evmcs->guest_cs_ar_bytes;
1723 vmcs12->guest_ss_ar_bytes = evmcs->guest_ss_ar_bytes;
1724 vmcs12->guest_ds_ar_bytes = evmcs->guest_ds_ar_bytes;
1725 vmcs12->guest_fs_ar_bytes = evmcs->guest_fs_ar_bytes;
1726 vmcs12->guest_gs_ar_bytes = evmcs->guest_gs_ar_bytes;
1727 vmcs12->guest_ldtr_ar_bytes = evmcs->guest_ldtr_ar_bytes;
1728 vmcs12->guest_tr_ar_bytes = evmcs->guest_tr_ar_bytes;
1729 vmcs12->guest_es_selector = evmcs->guest_es_selector;
1730 vmcs12->guest_cs_selector = evmcs->guest_cs_selector;
1731 vmcs12->guest_ss_selector = evmcs->guest_ss_selector;
1732 vmcs12->guest_ds_selector = evmcs->guest_ds_selector;
1733 vmcs12->guest_fs_selector = evmcs->guest_fs_selector;
1734 vmcs12->guest_gs_selector = evmcs->guest_gs_selector;
1735 vmcs12->guest_ldtr_selector = evmcs->guest_ldtr_selector;
1736 vmcs12->guest_tr_selector = evmcs->guest_tr_selector;
1739 if (unlikely(!(evmcs->hv_clean_fields &
1740 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_GRP2))) {
1741 vmcs12->tsc_offset = evmcs->tsc_offset;
1742 vmcs12->virtual_apic_page_addr = evmcs->virtual_apic_page_addr;
1743 vmcs12->xss_exit_bitmap = evmcs->xss_exit_bitmap;
1746 if (unlikely(!(evmcs->hv_clean_fields &
1747 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CRDR))) {
1748 vmcs12->cr0_guest_host_mask = evmcs->cr0_guest_host_mask;
1749 vmcs12->cr4_guest_host_mask = evmcs->cr4_guest_host_mask;
1750 vmcs12->cr0_read_shadow = evmcs->cr0_read_shadow;
1751 vmcs12->cr4_read_shadow = evmcs->cr4_read_shadow;
1752 vmcs12->guest_cr0 = evmcs->guest_cr0;
1753 vmcs12->guest_cr3 = evmcs->guest_cr3;
1754 vmcs12->guest_cr4 = evmcs->guest_cr4;
1755 vmcs12->guest_dr7 = evmcs->guest_dr7;
1758 if (unlikely(!(evmcs->hv_clean_fields &
1759 HV_VMX_ENLIGHTENED_CLEAN_FIELD_HOST_POINTER))) {
1760 vmcs12->host_fs_base = evmcs->host_fs_base;
1761 vmcs12->host_gs_base = evmcs->host_gs_base;
1762 vmcs12->host_tr_base = evmcs->host_tr_base;
1763 vmcs12->host_gdtr_base = evmcs->host_gdtr_base;
1764 vmcs12->host_idtr_base = evmcs->host_idtr_base;
1765 vmcs12->host_rsp = evmcs->host_rsp;
1768 if (unlikely(!(evmcs->hv_clean_fields &
1769 HV_VMX_ENLIGHTENED_CLEAN_FIELD_CONTROL_XLAT))) {
1770 vmcs12->ept_pointer = evmcs->ept_pointer;
1771 vmcs12->virtual_processor_id = evmcs->virtual_processor_id;
1774 if (unlikely(!(evmcs->hv_clean_fields &
1775 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1))) {
1776 vmcs12->vmcs_link_pointer = evmcs->vmcs_link_pointer;
1777 vmcs12->guest_ia32_debugctl = evmcs->guest_ia32_debugctl;
1778 vmcs12->guest_ia32_pat = evmcs->guest_ia32_pat;
1779 vmcs12->guest_ia32_efer = evmcs->guest_ia32_efer;
1780 vmcs12->guest_pdptr0 = evmcs->guest_pdptr0;
1781 vmcs12->guest_pdptr1 = evmcs->guest_pdptr1;
1782 vmcs12->guest_pdptr2 = evmcs->guest_pdptr2;
1783 vmcs12->guest_pdptr3 = evmcs->guest_pdptr3;
1784 vmcs12->guest_pending_dbg_exceptions =
1785 evmcs->guest_pending_dbg_exceptions;
1786 vmcs12->guest_sysenter_esp = evmcs->guest_sysenter_esp;
1787 vmcs12->guest_sysenter_eip = evmcs->guest_sysenter_eip;
1788 vmcs12->guest_bndcfgs = evmcs->guest_bndcfgs;
1789 vmcs12->guest_activity_state = evmcs->guest_activity_state;
1790 vmcs12->guest_sysenter_cs = evmcs->guest_sysenter_cs;
1795 * vmcs12->vm_exit_msr_store_addr = evmcs->vm_exit_msr_store_addr;
1796 * vmcs12->vm_exit_msr_load_addr = evmcs->vm_exit_msr_load_addr;
1797 * vmcs12->vm_entry_msr_load_addr = evmcs->vm_entry_msr_load_addr;
1798 * vmcs12->page_fault_error_code_mask =
1799 * evmcs->page_fault_error_code_mask;
1800 * vmcs12->page_fault_error_code_match =
1801 * evmcs->page_fault_error_code_match;
1802 * vmcs12->cr3_target_count = evmcs->cr3_target_count;
1803 * vmcs12->vm_exit_msr_store_count = evmcs->vm_exit_msr_store_count;
1804 * vmcs12->vm_exit_msr_load_count = evmcs->vm_exit_msr_load_count;
1805 * vmcs12->vm_entry_msr_load_count = evmcs->vm_entry_msr_load_count;
1810 * vmcs12->guest_physical_address = evmcs->guest_physical_address;
1811 * vmcs12->vm_instruction_error = evmcs->vm_instruction_error;
1812 * vmcs12->vm_exit_reason = evmcs->vm_exit_reason;
1813 * vmcs12->vm_exit_intr_info = evmcs->vm_exit_intr_info;
1814 * vmcs12->vm_exit_intr_error_code = evmcs->vm_exit_intr_error_code;
1815 * vmcs12->idt_vectoring_info_field = evmcs->idt_vectoring_info_field;
1816 * vmcs12->idt_vectoring_error_code = evmcs->idt_vectoring_error_code;
1817 * vmcs12->vm_exit_instruction_len = evmcs->vm_exit_instruction_len;
1818 * vmcs12->vmx_instruction_info = evmcs->vmx_instruction_info;
1819 * vmcs12->exit_qualification = evmcs->exit_qualification;
1820 * vmcs12->guest_linear_address = evmcs->guest_linear_address;
1822 * Not present in struct vmcs12:
1823 * vmcs12->exit_io_instruction_ecx = evmcs->exit_io_instruction_ecx;
1824 * vmcs12->exit_io_instruction_esi = evmcs->exit_io_instruction_esi;
1825 * vmcs12->exit_io_instruction_edi = evmcs->exit_io_instruction_edi;
1826 * vmcs12->exit_io_instruction_eip = evmcs->exit_io_instruction_eip;
1832 static int copy_vmcs12_to_enlightened(struct vcpu_vmx *vmx)
1834 struct vmcs12 *vmcs12 = vmx->nested.cached_vmcs12;
1835 struct hv_enlightened_vmcs *evmcs = vmx->nested.hv_evmcs;
1838 * Should not be changed by KVM:
1840 * evmcs->host_es_selector = vmcs12->host_es_selector;
1841 * evmcs->host_cs_selector = vmcs12->host_cs_selector;
1842 * evmcs->host_ss_selector = vmcs12->host_ss_selector;
1843 * evmcs->host_ds_selector = vmcs12->host_ds_selector;
1844 * evmcs->host_fs_selector = vmcs12->host_fs_selector;
1845 * evmcs->host_gs_selector = vmcs12->host_gs_selector;
1846 * evmcs->host_tr_selector = vmcs12->host_tr_selector;
1847 * evmcs->host_ia32_pat = vmcs12->host_ia32_pat;
1848 * evmcs->host_ia32_efer = vmcs12->host_ia32_efer;
1849 * evmcs->host_cr0 = vmcs12->host_cr0;
1850 * evmcs->host_cr3 = vmcs12->host_cr3;
1851 * evmcs->host_cr4 = vmcs12->host_cr4;
1852 * evmcs->host_ia32_sysenter_esp = vmcs12->host_ia32_sysenter_esp;
1853 * evmcs->host_ia32_sysenter_eip = vmcs12->host_ia32_sysenter_eip;
1854 * evmcs->host_rip = vmcs12->host_rip;
1855 * evmcs->host_ia32_sysenter_cs = vmcs12->host_ia32_sysenter_cs;
1856 * evmcs->host_fs_base = vmcs12->host_fs_base;
1857 * evmcs->host_gs_base = vmcs12->host_gs_base;
1858 * evmcs->host_tr_base = vmcs12->host_tr_base;
1859 * evmcs->host_gdtr_base = vmcs12->host_gdtr_base;
1860 * evmcs->host_idtr_base = vmcs12->host_idtr_base;
1861 * evmcs->host_rsp = vmcs12->host_rsp;
1862 * sync_vmcs02_to_vmcs12() doesn't read these:
1863 * evmcs->io_bitmap_a = vmcs12->io_bitmap_a;
1864 * evmcs->io_bitmap_b = vmcs12->io_bitmap_b;
1865 * evmcs->msr_bitmap = vmcs12->msr_bitmap;
1866 * evmcs->ept_pointer = vmcs12->ept_pointer;
1867 * evmcs->xss_exit_bitmap = vmcs12->xss_exit_bitmap;
1868 * evmcs->vm_exit_msr_store_addr = vmcs12->vm_exit_msr_store_addr;
1869 * evmcs->vm_exit_msr_load_addr = vmcs12->vm_exit_msr_load_addr;
1870 * evmcs->vm_entry_msr_load_addr = vmcs12->vm_entry_msr_load_addr;
1871 * evmcs->tpr_threshold = vmcs12->tpr_threshold;
1872 * evmcs->virtual_processor_id = vmcs12->virtual_processor_id;
1873 * evmcs->exception_bitmap = vmcs12->exception_bitmap;
1874 * evmcs->vmcs_link_pointer = vmcs12->vmcs_link_pointer;
1875 * evmcs->pin_based_vm_exec_control = vmcs12->pin_based_vm_exec_control;
1876 * evmcs->vm_exit_controls = vmcs12->vm_exit_controls;
1877 * evmcs->secondary_vm_exec_control = vmcs12->secondary_vm_exec_control;
1878 * evmcs->page_fault_error_code_mask =
1879 * vmcs12->page_fault_error_code_mask;
1880 * evmcs->page_fault_error_code_match =
1881 * vmcs12->page_fault_error_code_match;
1882 * evmcs->cr3_target_count = vmcs12->cr3_target_count;
1883 * evmcs->virtual_apic_page_addr = vmcs12->virtual_apic_page_addr;
1884 * evmcs->tsc_offset = vmcs12->tsc_offset;
1885 * evmcs->guest_ia32_debugctl = vmcs12->guest_ia32_debugctl;
1886 * evmcs->cr0_guest_host_mask = vmcs12->cr0_guest_host_mask;
1887 * evmcs->cr4_guest_host_mask = vmcs12->cr4_guest_host_mask;
1888 * evmcs->cr0_read_shadow = vmcs12->cr0_read_shadow;
1889 * evmcs->cr4_read_shadow = vmcs12->cr4_read_shadow;
1890 * evmcs->vm_exit_msr_store_count = vmcs12->vm_exit_msr_store_count;
1891 * evmcs->vm_exit_msr_load_count = vmcs12->vm_exit_msr_load_count;
1892 * evmcs->vm_entry_msr_load_count = vmcs12->vm_entry_msr_load_count;
1894 * Not present in struct vmcs12:
1895 * evmcs->exit_io_instruction_ecx = vmcs12->exit_io_instruction_ecx;
1896 * evmcs->exit_io_instruction_esi = vmcs12->exit_io_instruction_esi;
1897 * evmcs->exit_io_instruction_edi = vmcs12->exit_io_instruction_edi;
1898 * evmcs->exit_io_instruction_eip = vmcs12->exit_io_instruction_eip;
1901 evmcs->guest_es_selector = vmcs12->guest_es_selector;
1902 evmcs->guest_cs_selector = vmcs12->guest_cs_selector;
1903 evmcs->guest_ss_selector = vmcs12->guest_ss_selector;
1904 evmcs->guest_ds_selector = vmcs12->guest_ds_selector;
1905 evmcs->guest_fs_selector = vmcs12->guest_fs_selector;
1906 evmcs->guest_gs_selector = vmcs12->guest_gs_selector;
1907 evmcs->guest_ldtr_selector = vmcs12->guest_ldtr_selector;
1908 evmcs->guest_tr_selector = vmcs12->guest_tr_selector;
1910 evmcs->guest_es_limit = vmcs12->guest_es_limit;
1911 evmcs->guest_cs_limit = vmcs12->guest_cs_limit;
1912 evmcs->guest_ss_limit = vmcs12->guest_ss_limit;
1913 evmcs->guest_ds_limit = vmcs12->guest_ds_limit;
1914 evmcs->guest_fs_limit = vmcs12->guest_fs_limit;
1915 evmcs->guest_gs_limit = vmcs12->guest_gs_limit;
1916 evmcs->guest_ldtr_limit = vmcs12->guest_ldtr_limit;
1917 evmcs->guest_tr_limit = vmcs12->guest_tr_limit;
1918 evmcs->guest_gdtr_limit = vmcs12->guest_gdtr_limit;
1919 evmcs->guest_idtr_limit = vmcs12->guest_idtr_limit;
1921 evmcs->guest_es_ar_bytes = vmcs12->guest_es_ar_bytes;
1922 evmcs->guest_cs_ar_bytes = vmcs12->guest_cs_ar_bytes;
1923 evmcs->guest_ss_ar_bytes = vmcs12->guest_ss_ar_bytes;
1924 evmcs->guest_ds_ar_bytes = vmcs12->guest_ds_ar_bytes;
1925 evmcs->guest_fs_ar_bytes = vmcs12->guest_fs_ar_bytes;
1926 evmcs->guest_gs_ar_bytes = vmcs12->guest_gs_ar_bytes;
1927 evmcs->guest_ldtr_ar_bytes = vmcs12->guest_ldtr_ar_bytes;
1928 evmcs->guest_tr_ar_bytes = vmcs12->guest_tr_ar_bytes;
1930 evmcs->guest_es_base = vmcs12->guest_es_base;
1931 evmcs->guest_cs_base = vmcs12->guest_cs_base;
1932 evmcs->guest_ss_base = vmcs12->guest_ss_base;
1933 evmcs->guest_ds_base = vmcs12->guest_ds_base;
1934 evmcs->guest_fs_base = vmcs12->guest_fs_base;
1935 evmcs->guest_gs_base = vmcs12->guest_gs_base;
1936 evmcs->guest_ldtr_base = vmcs12->guest_ldtr_base;
1937 evmcs->guest_tr_base = vmcs12->guest_tr_base;
1938 evmcs->guest_gdtr_base = vmcs12->guest_gdtr_base;
1939 evmcs->guest_idtr_base = vmcs12->guest_idtr_base;
1941 evmcs->guest_ia32_pat = vmcs12->guest_ia32_pat;
1942 evmcs->guest_ia32_efer = vmcs12->guest_ia32_efer;
1944 evmcs->guest_pdptr0 = vmcs12->guest_pdptr0;
1945 evmcs->guest_pdptr1 = vmcs12->guest_pdptr1;
1946 evmcs->guest_pdptr2 = vmcs12->guest_pdptr2;
1947 evmcs->guest_pdptr3 = vmcs12->guest_pdptr3;
1949 evmcs->guest_pending_dbg_exceptions =
1950 vmcs12->guest_pending_dbg_exceptions;
1951 evmcs->guest_sysenter_esp = vmcs12->guest_sysenter_esp;
1952 evmcs->guest_sysenter_eip = vmcs12->guest_sysenter_eip;
1954 evmcs->guest_activity_state = vmcs12->guest_activity_state;
1955 evmcs->guest_sysenter_cs = vmcs12->guest_sysenter_cs;
1957 evmcs->guest_cr0 = vmcs12->guest_cr0;
1958 evmcs->guest_cr3 = vmcs12->guest_cr3;
1959 evmcs->guest_cr4 = vmcs12->guest_cr4;
1960 evmcs->guest_dr7 = vmcs12->guest_dr7;
1962 evmcs->guest_physical_address = vmcs12->guest_physical_address;
1964 evmcs->vm_instruction_error = vmcs12->vm_instruction_error;
1965 evmcs->vm_exit_reason = vmcs12->vm_exit_reason;
1966 evmcs->vm_exit_intr_info = vmcs12->vm_exit_intr_info;
1967 evmcs->vm_exit_intr_error_code = vmcs12->vm_exit_intr_error_code;
1968 evmcs->idt_vectoring_info_field = vmcs12->idt_vectoring_info_field;
1969 evmcs->idt_vectoring_error_code = vmcs12->idt_vectoring_error_code;
1970 evmcs->vm_exit_instruction_len = vmcs12->vm_exit_instruction_len;
1971 evmcs->vmx_instruction_info = vmcs12->vmx_instruction_info;
1973 evmcs->exit_qualification = vmcs12->exit_qualification;
1975 evmcs->guest_linear_address = vmcs12->guest_linear_address;
1976 evmcs->guest_rsp = vmcs12->guest_rsp;
1977 evmcs->guest_rflags = vmcs12->guest_rflags;
1979 evmcs->guest_interruptibility_info =
1980 vmcs12->guest_interruptibility_info;
1981 evmcs->cpu_based_vm_exec_control = vmcs12->cpu_based_vm_exec_control;
1982 evmcs->vm_entry_controls = vmcs12->vm_entry_controls;
1983 evmcs->vm_entry_intr_info_field = vmcs12->vm_entry_intr_info_field;
1984 evmcs->vm_entry_exception_error_code =
1985 vmcs12->vm_entry_exception_error_code;
1986 evmcs->vm_entry_instruction_len = vmcs12->vm_entry_instruction_len;
1988 evmcs->guest_rip = vmcs12->guest_rip;
1990 evmcs->guest_bndcfgs = vmcs12->guest_bndcfgs;
1996 * This is an equivalent of the nested hypervisor executing the vmptrld
1999 static enum nested_evmptrld_status nested_vmx_handle_enlightened_vmptrld(
2000 struct kvm_vcpu *vcpu, bool from_launch)
2002 struct vcpu_vmx *vmx = to_vmx(vcpu);
2003 bool evmcs_gpa_changed = false;
2006 if (likely(!vmx->nested.enlightened_vmcs_enabled))
2007 return EVMPTRLD_DISABLED;
2009 if (!nested_enlightened_vmentry(vcpu, &evmcs_gpa))
2010 return EVMPTRLD_DISABLED;
2012 if (unlikely(!vmx->nested.hv_evmcs ||
2013 evmcs_gpa != vmx->nested.hv_evmcs_vmptr)) {
2014 if (!vmx->nested.hv_evmcs)
2015 vmx->nested.current_vmptr = -1ull;
2017 nested_release_evmcs(vcpu);
2019 if (kvm_vcpu_map(vcpu, gpa_to_gfn(evmcs_gpa),
2020 &vmx->nested.hv_evmcs_map))
2021 return EVMPTRLD_ERROR;
2023 vmx->nested.hv_evmcs = vmx->nested.hv_evmcs_map.hva;
2026 * Currently, KVM only supports eVMCS version 1
2027 * (== KVM_EVMCS_VERSION) and thus we expect guest to set this
2028 * value to first u32 field of eVMCS which should specify eVMCS
2031 * Guest should be aware of supported eVMCS versions by host by
2032 * examining CPUID.0x4000000A.EAX[0:15]. Host userspace VMM is
2033 * expected to set this CPUID leaf according to the value
2034 * returned in vmcs_version from nested_enable_evmcs().
2036 * However, it turns out that Microsoft Hyper-V fails to comply
2037 * to their own invented interface: When Hyper-V use eVMCS, it
2038 * just sets first u32 field of eVMCS to revision_id specified
2039 * in MSR_IA32_VMX_BASIC. Instead of used eVMCS version number
2040 * which is one of the supported versions specified in
2041 * CPUID.0x4000000A.EAX[0:15].
2043 * To overcome Hyper-V bug, we accept here either a supported
2044 * eVMCS version or VMCS12 revision_id as valid values for first
2045 * u32 field of eVMCS.
2047 if ((vmx->nested.hv_evmcs->revision_id != KVM_EVMCS_VERSION) &&
2048 (vmx->nested.hv_evmcs->revision_id != VMCS12_REVISION)) {
2049 nested_release_evmcs(vcpu);
2050 return EVMPTRLD_VMFAIL;
2053 vmx->nested.dirty_vmcs12 = true;
2054 vmx->nested.hv_evmcs_vmptr = evmcs_gpa;
2056 evmcs_gpa_changed = true;
2058 * Unlike normal vmcs12, enlightened vmcs12 is not fully
2059 * reloaded from guest's memory (read only fields, fields not
2060 * present in struct hv_enlightened_vmcs, ...). Make sure there
2064 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2065 memset(vmcs12, 0, sizeof(*vmcs12));
2066 vmcs12->hdr.revision_id = VMCS12_REVISION;
2072 * Clean fields data can't be used on VMLAUNCH and when we switch
2073 * between different L2 guests as KVM keeps a single VMCS12 per L1.
2075 if (from_launch || evmcs_gpa_changed)
2076 vmx->nested.hv_evmcs->hv_clean_fields &=
2077 ~HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
2079 return EVMPTRLD_SUCCEEDED;
2082 void nested_sync_vmcs12_to_shadow(struct kvm_vcpu *vcpu)
2084 struct vcpu_vmx *vmx = to_vmx(vcpu);
2086 if (vmx->nested.hv_evmcs) {
2087 copy_vmcs12_to_enlightened(vmx);
2088 /* All fields are clean */
2089 vmx->nested.hv_evmcs->hv_clean_fields |=
2090 HV_VMX_ENLIGHTENED_CLEAN_FIELD_ALL;
2092 copy_vmcs12_to_shadow(vmx);
2095 vmx->nested.need_vmcs12_to_shadow_sync = false;
2098 static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
2100 struct vcpu_vmx *vmx =
2101 container_of(timer, struct vcpu_vmx, nested.preemption_timer);
2103 vmx->nested.preemption_timer_expired = true;
2104 kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu);
2105 kvm_vcpu_kick(&vmx->vcpu);
2107 return HRTIMER_NORESTART;
2110 static u64 vmx_calc_preemption_timer_value(struct kvm_vcpu *vcpu)
2112 struct vcpu_vmx *vmx = to_vmx(vcpu);
2113 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
2115 u64 l1_scaled_tsc = kvm_read_l1_tsc(vcpu, rdtsc()) >>
2116 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
2118 if (!vmx->nested.has_preemption_timer_deadline) {
2119 vmx->nested.preemption_timer_deadline =
2120 vmcs12->vmx_preemption_timer_value + l1_scaled_tsc;
2121 vmx->nested.has_preemption_timer_deadline = true;
2123 return vmx->nested.preemption_timer_deadline - l1_scaled_tsc;
2126 static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu,
2127 u64 preemption_timeout)
2129 struct vcpu_vmx *vmx = to_vmx(vcpu);
2132 * A timer value of zero is architecturally guaranteed to cause
2133 * a VMExit prior to executing any instructions in the guest.
2135 if (preemption_timeout == 0) {
2136 vmx_preemption_timer_fn(&vmx->nested.preemption_timer);
2140 if (vcpu->arch.virtual_tsc_khz == 0)
2143 preemption_timeout <<= VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
2144 preemption_timeout *= 1000000;
2145 do_div(preemption_timeout, vcpu->arch.virtual_tsc_khz);
2146 hrtimer_start(&vmx->nested.preemption_timer,
2147 ktime_add_ns(ktime_get(), preemption_timeout),
2148 HRTIMER_MODE_ABS_PINNED);
2151 static u64 nested_vmx_calc_efer(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2153 if (vmx->nested.nested_run_pending &&
2154 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER))
2155 return vmcs12->guest_ia32_efer;
2156 else if (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE)
2157 return vmx->vcpu.arch.efer | (EFER_LMA | EFER_LME);
2159 return vmx->vcpu.arch.efer & ~(EFER_LMA | EFER_LME);
2162 static void prepare_vmcs02_constant_state(struct vcpu_vmx *vmx)
2165 * If vmcs02 hasn't been initialized, set the constant vmcs02 state
2166 * according to L0's settings (vmcs12 is irrelevant here). Host
2167 * fields that come from L0 and are not constant, e.g. HOST_CR3,
2168 * will be set as needed prior to VMLAUNCH/VMRESUME.
2170 if (vmx->nested.vmcs02_initialized)
2172 vmx->nested.vmcs02_initialized = true;
2175 * We don't care what the EPTP value is we just need to guarantee
2176 * it's valid so we don't get a false positive when doing early
2177 * consistency checks.
2179 if (enable_ept && nested_early_check)
2180 vmcs_write64(EPT_POINTER,
2181 construct_eptp(&vmx->vcpu, 0, PT64_ROOT_4LEVEL));
2183 /* All VMFUNCs are currently emulated through L0 vmexits. */
2184 if (cpu_has_vmx_vmfunc())
2185 vmcs_write64(VM_FUNCTION_CONTROL, 0);
2187 if (cpu_has_vmx_posted_intr())
2188 vmcs_write16(POSTED_INTR_NV, POSTED_INTR_NESTED_VECTOR);
2190 if (cpu_has_vmx_msr_bitmap())
2191 vmcs_write64(MSR_BITMAP, __pa(vmx->nested.vmcs02.msr_bitmap));
2194 * The PML address never changes, so it is constant in vmcs02.
2195 * Conceptually we want to copy the PML index from vmcs01 here,
2196 * and then back to vmcs01 on nested vmexit. But since we flush
2197 * the log and reset GUEST_PML_INDEX on each vmexit, the PML
2198 * index is also effectively constant in vmcs02.
2201 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
2202 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
2205 if (cpu_has_vmx_encls_vmexit())
2206 vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
2209 * Set the MSR load/store lists to match L0's settings. Only the
2210 * addresses are constant (for vmcs02), the counts can change based
2211 * on L2's behavior, e.g. switching to/from long mode.
2213 vmcs_write64(VM_EXIT_MSR_STORE_ADDR, __pa(vmx->msr_autostore.guest.val));
2214 vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host.val));
2215 vmcs_write64(VM_ENTRY_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.guest.val));
2217 vmx_set_constant_host_state(vmx);
2220 static void prepare_vmcs02_early_rare(struct vcpu_vmx *vmx,
2221 struct vmcs12 *vmcs12)
2223 prepare_vmcs02_constant_state(vmx);
2225 vmcs_write64(VMCS_LINK_POINTER, -1ull);
2228 if (nested_cpu_has_vpid(vmcs12) && vmx->nested.vpid02)
2229 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->nested.vpid02);
2231 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
2235 static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct loaded_vmcs *vmcs01,
2236 struct vmcs12 *vmcs12)
2238 u32 exec_control, vmcs12_exec_ctrl;
2239 u64 guest_efer = nested_vmx_calc_efer(vmx, vmcs12);
2241 if (vmx->nested.dirty_vmcs12 || vmx->nested.hv_evmcs)
2242 prepare_vmcs02_early_rare(vmx, vmcs12);
2247 exec_control = __pin_controls_get(vmcs01);
2248 exec_control |= (vmcs12->pin_based_vm_exec_control &
2249 ~PIN_BASED_VMX_PREEMPTION_TIMER);
2251 /* Posted interrupts setting is only taken from vmcs12. */
2252 vmx->nested.pi_pending = false;
2253 if (nested_cpu_has_posted_intr(vmcs12))
2254 vmx->nested.posted_intr_nv = vmcs12->posted_intr_nv;
2256 exec_control &= ~PIN_BASED_POSTED_INTR;
2257 pin_controls_set(vmx, exec_control);
2262 exec_control = __exec_controls_get(vmcs01); /* L0's desires */
2263 exec_control &= ~CPU_BASED_INTR_WINDOW_EXITING;
2264 exec_control &= ~CPU_BASED_NMI_WINDOW_EXITING;
2265 exec_control &= ~CPU_BASED_TPR_SHADOW;
2266 exec_control |= vmcs12->cpu_based_vm_exec_control;
2268 vmx->nested.l1_tpr_threshold = -1;
2269 if (exec_control & CPU_BASED_TPR_SHADOW)
2270 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
2271 #ifdef CONFIG_X86_64
2273 exec_control |= CPU_BASED_CR8_LOAD_EXITING |
2274 CPU_BASED_CR8_STORE_EXITING;
2278 * A vmexit (to either L1 hypervisor or L0 userspace) is always needed
2279 * for I/O port accesses.
2281 exec_control |= CPU_BASED_UNCOND_IO_EXITING;
2282 exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
2285 * This bit will be computed in nested_get_vmcs12_pages, because
2286 * we do not have access to L1's MSR bitmap yet. For now, keep
2287 * the same bit as before, hoping to avoid multiple VMWRITEs that
2288 * only set/clear this bit.
2290 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
2291 exec_control |= exec_controls_get(vmx) & CPU_BASED_USE_MSR_BITMAPS;
2293 exec_controls_set(vmx, exec_control);
2296 * SECONDARY EXEC CONTROLS
2298 if (cpu_has_secondary_exec_ctrls()) {
2299 exec_control = __secondary_exec_controls_get(vmcs01);
2301 /* Take the following fields only from vmcs12 */
2302 exec_control &= ~(SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2303 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2304 SECONDARY_EXEC_ENABLE_INVPCID |
2305 SECONDARY_EXEC_ENABLE_RDTSCP |
2306 SECONDARY_EXEC_XSAVES |
2307 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE |
2308 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
2309 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2310 SECONDARY_EXEC_ENABLE_VMFUNC |
2311 SECONDARY_EXEC_DESC);
2313 if (nested_cpu_has(vmcs12,
2314 CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)) {
2315 vmcs12_exec_ctrl = vmcs12->secondary_vm_exec_control &
2316 ~SECONDARY_EXEC_ENABLE_PML;
2317 exec_control |= vmcs12_exec_ctrl;
2320 /* VMCS shadowing for L2 is emulated for now */
2321 exec_control &= ~SECONDARY_EXEC_SHADOW_VMCS;
2324 * Preset *DT exiting when emulating UMIP, so that vmx_set_cr4()
2325 * will not have to rewrite the controls just for this bit.
2327 if (!boot_cpu_has(X86_FEATURE_UMIP) && vmx_umip_emulated() &&
2328 (vmcs12->guest_cr4 & X86_CR4_UMIP))
2329 exec_control |= SECONDARY_EXEC_DESC;
2331 if (exec_control & SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY)
2332 vmcs_write16(GUEST_INTR_STATUS,
2333 vmcs12->guest_intr_status);
2335 if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
2336 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
2338 secondary_exec_controls_set(vmx, exec_control);
2344 * vmcs12's VM_{ENTRY,EXIT}_LOAD_IA32_EFER and VM_ENTRY_IA32E_MODE
2345 * are emulated by vmx_set_efer() in prepare_vmcs02(), but speculate
2346 * on the related bits (if supported by the CPU) in the hope that
2347 * we can avoid VMWrites during vmx_set_efer().
2349 * Similarly, take vmcs01's PERF_GLOBAL_CTRL in the hope that if KVM is
2350 * loading PERF_GLOBAL_CTRL via the VMCS for L1, then KVM will want to
2351 * do the same for L2.
2353 exec_control = __vm_entry_controls_get(vmcs01);
2354 exec_control |= (vmcs12->vm_entry_controls &
2355 ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL);
2356 exec_control &= ~(VM_ENTRY_IA32E_MODE | VM_ENTRY_LOAD_IA32_EFER);
2357 if (cpu_has_load_ia32_efer()) {
2358 if (guest_efer & EFER_LMA)
2359 exec_control |= VM_ENTRY_IA32E_MODE;
2360 if (guest_efer != host_efer)
2361 exec_control |= VM_ENTRY_LOAD_IA32_EFER;
2363 vm_entry_controls_set(vmx, exec_control);
2368 * L2->L1 exit controls are emulated - the hardware exit is to L0 so
2369 * we should use its exit controls. Note that VM_EXIT_LOAD_IA32_EFER
2370 * bits may be modified by vmx_set_efer() in prepare_vmcs02().
2372 exec_control = __vm_exit_controls_get(vmcs01);
2373 if (cpu_has_load_ia32_efer() && guest_efer != host_efer)
2374 exec_control |= VM_EXIT_LOAD_IA32_EFER;
2376 exec_control &= ~VM_EXIT_LOAD_IA32_EFER;
2377 vm_exit_controls_set(vmx, exec_control);
2380 * Interrupt/Exception Fields
2382 if (vmx->nested.nested_run_pending) {
2383 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD,
2384 vmcs12->vm_entry_intr_info_field);
2385 vmcs_write32(VM_ENTRY_EXCEPTION_ERROR_CODE,
2386 vmcs12->vm_entry_exception_error_code);
2387 vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
2388 vmcs12->vm_entry_instruction_len);
2389 vmcs_write32(GUEST_INTERRUPTIBILITY_INFO,
2390 vmcs12->guest_interruptibility_info);
2391 vmx->loaded_vmcs->nmi_known_unmasked =
2392 !(vmcs12->guest_interruptibility_info & GUEST_INTR_STATE_NMI);
2394 vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, 0);
2398 static void prepare_vmcs02_rare(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2400 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
2402 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2403 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP2)) {
2404 vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector);
2405 vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector);
2406 vmcs_write16(GUEST_SS_SELECTOR, vmcs12->guest_ss_selector);
2407 vmcs_write16(GUEST_DS_SELECTOR, vmcs12->guest_ds_selector);
2408 vmcs_write16(GUEST_FS_SELECTOR, vmcs12->guest_fs_selector);
2409 vmcs_write16(GUEST_GS_SELECTOR, vmcs12->guest_gs_selector);
2410 vmcs_write16(GUEST_LDTR_SELECTOR, vmcs12->guest_ldtr_selector);
2411 vmcs_write16(GUEST_TR_SELECTOR, vmcs12->guest_tr_selector);
2412 vmcs_write32(GUEST_ES_LIMIT, vmcs12->guest_es_limit);
2413 vmcs_write32(GUEST_CS_LIMIT, vmcs12->guest_cs_limit);
2414 vmcs_write32(GUEST_SS_LIMIT, vmcs12->guest_ss_limit);
2415 vmcs_write32(GUEST_DS_LIMIT, vmcs12->guest_ds_limit);
2416 vmcs_write32(GUEST_FS_LIMIT, vmcs12->guest_fs_limit);
2417 vmcs_write32(GUEST_GS_LIMIT, vmcs12->guest_gs_limit);
2418 vmcs_write32(GUEST_LDTR_LIMIT, vmcs12->guest_ldtr_limit);
2419 vmcs_write32(GUEST_TR_LIMIT, vmcs12->guest_tr_limit);
2420 vmcs_write32(GUEST_GDTR_LIMIT, vmcs12->guest_gdtr_limit);
2421 vmcs_write32(GUEST_IDTR_LIMIT, vmcs12->guest_idtr_limit);
2422 vmcs_write32(GUEST_CS_AR_BYTES, vmcs12->guest_cs_ar_bytes);
2423 vmcs_write32(GUEST_SS_AR_BYTES, vmcs12->guest_ss_ar_bytes);
2424 vmcs_write32(GUEST_ES_AR_BYTES, vmcs12->guest_es_ar_bytes);
2425 vmcs_write32(GUEST_DS_AR_BYTES, vmcs12->guest_ds_ar_bytes);
2426 vmcs_write32(GUEST_FS_AR_BYTES, vmcs12->guest_fs_ar_bytes);
2427 vmcs_write32(GUEST_GS_AR_BYTES, vmcs12->guest_gs_ar_bytes);
2428 vmcs_write32(GUEST_LDTR_AR_BYTES, vmcs12->guest_ldtr_ar_bytes);
2429 vmcs_write32(GUEST_TR_AR_BYTES, vmcs12->guest_tr_ar_bytes);
2430 vmcs_writel(GUEST_ES_BASE, vmcs12->guest_es_base);
2431 vmcs_writel(GUEST_CS_BASE, vmcs12->guest_cs_base);
2432 vmcs_writel(GUEST_SS_BASE, vmcs12->guest_ss_base);
2433 vmcs_writel(GUEST_DS_BASE, vmcs12->guest_ds_base);
2434 vmcs_writel(GUEST_FS_BASE, vmcs12->guest_fs_base);
2435 vmcs_writel(GUEST_GS_BASE, vmcs12->guest_gs_base);
2436 vmcs_writel(GUEST_LDTR_BASE, vmcs12->guest_ldtr_base);
2437 vmcs_writel(GUEST_TR_BASE, vmcs12->guest_tr_base);
2438 vmcs_writel(GUEST_GDTR_BASE, vmcs12->guest_gdtr_base);
2439 vmcs_writel(GUEST_IDTR_BASE, vmcs12->guest_idtr_base);
2441 vmx->segment_cache.bitmask = 0;
2444 if (!hv_evmcs || !(hv_evmcs->hv_clean_fields &
2445 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1)) {
2446 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->guest_sysenter_cs);
2447 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
2448 vmcs12->guest_pending_dbg_exceptions);
2449 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->guest_sysenter_esp);
2450 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->guest_sysenter_eip);
2453 * L1 may access the L2's PDPTR, so save them to construct
2457 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2458 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2459 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2460 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2463 if (kvm_mpx_supported() && vmx->nested.nested_run_pending &&
2464 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS))
2465 vmcs_write64(GUEST_BNDCFGS, vmcs12->guest_bndcfgs);
2468 if (nested_cpu_has_xsaves(vmcs12))
2469 vmcs_write64(XSS_EXIT_BITMAP, vmcs12->xss_exit_bitmap);
2472 * Whether page-faults are trapped is determined by a combination of
2473 * 3 settings: PFEC_MASK, PFEC_MATCH and EXCEPTION_BITMAP.PF. If L0
2474 * doesn't care about page faults then we should set all of these to
2475 * L1's desires. However, if L0 does care about (some) page faults, it
2476 * is not easy (if at all possible?) to merge L0 and L1's desires, we
2477 * simply ask to exit on each and every L2 page fault. This is done by
2478 * setting MASK=MATCH=0 and (see below) EB.PF=1.
2479 * Note that below we don't need special code to set EB.PF beyond the
2480 * "or"ing of the EB of vmcs01 and vmcs12, because when enable_ept,
2481 * vmcs01's EB.PF is 0 so the "or" will take vmcs12's value, and when
2482 * !enable_ept, EB.PF is 1, so the "or" will always be 1.
2484 if (vmx_need_pf_intercept(&vmx->vcpu)) {
2486 * TODO: if both L0 and L1 need the same MASK and MATCH,
2487 * go ahead and use it?
2489 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, 0);
2490 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, 0);
2492 vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, vmcs12->page_fault_error_code_mask);
2493 vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, vmcs12->page_fault_error_code_match);
2496 if (cpu_has_vmx_apicv()) {
2497 vmcs_write64(EOI_EXIT_BITMAP0, vmcs12->eoi_exit_bitmap0);
2498 vmcs_write64(EOI_EXIT_BITMAP1, vmcs12->eoi_exit_bitmap1);
2499 vmcs_write64(EOI_EXIT_BITMAP2, vmcs12->eoi_exit_bitmap2);
2500 vmcs_write64(EOI_EXIT_BITMAP3, vmcs12->eoi_exit_bitmap3);
2504 * Make sure the msr_autostore list is up to date before we set the
2505 * count in the vmcs02.
2507 prepare_vmx_msr_autostore_list(&vmx->vcpu, MSR_IA32_TSC);
2509 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, vmx->msr_autostore.guest.nr);
2510 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
2511 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
2513 set_cr4_guest_host_mask(vmx);
2517 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
2518 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
2519 * with L0's requirements for its guest (a.k.a. vmcs01), so we can run the L2
2520 * guest in a way that will both be appropriate to L1's requests, and our
2521 * needs. In addition to modifying the active vmcs (which is vmcs02), this
2522 * function also has additional necessary side-effects, like setting various
2523 * vcpu->arch fields.
2524 * Returns 0 on success, 1 on failure. Invalid state exit qualification code
2525 * is assigned to entry_failure_code on failure.
2527 static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
2528 enum vm_entry_failure_code *entry_failure_code)
2530 struct vcpu_vmx *vmx = to_vmx(vcpu);
2531 struct hv_enlightened_vmcs *hv_evmcs = vmx->nested.hv_evmcs;
2532 bool load_guest_pdptrs_vmcs12 = false;
2534 if (vmx->nested.dirty_vmcs12 || hv_evmcs) {
2535 prepare_vmcs02_rare(vmx, vmcs12);
2536 vmx->nested.dirty_vmcs12 = false;
2538 load_guest_pdptrs_vmcs12 = !hv_evmcs ||
2539 !(hv_evmcs->hv_clean_fields &
2540 HV_VMX_ENLIGHTENED_CLEAN_FIELD_GUEST_GRP1);
2543 if (vmx->nested.nested_run_pending &&
2544 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS)) {
2545 kvm_set_dr(vcpu, 7, vmcs12->guest_dr7);
2546 vmcs_write64(GUEST_IA32_DEBUGCTL, vmcs12->guest_ia32_debugctl);
2548 kvm_set_dr(vcpu, 7, vcpu->arch.dr7);
2549 vmcs_write64(GUEST_IA32_DEBUGCTL, vmx->nested.vmcs01_debugctl);
2551 if (kvm_mpx_supported() && (!vmx->nested.nested_run_pending ||
2552 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
2553 vmcs_write64(GUEST_BNDCFGS, vmx->nested.vmcs01_guest_bndcfgs);
2554 vmx_set_rflags(vcpu, vmcs12->guest_rflags);
2556 /* EXCEPTION_BITMAP and CR0_GUEST_HOST_MASK should basically be the
2557 * bitwise-or of what L1 wants to trap for L2, and what we want to
2558 * trap. Note that CR0.TS also needs updating - we do this later.
2560 update_exception_bitmap(vcpu);
2561 vcpu->arch.cr0_guest_owned_bits &= ~vmcs12->cr0_guest_host_mask;
2562 vmcs_writel(CR0_GUEST_HOST_MASK, ~vcpu->arch.cr0_guest_owned_bits);
2564 if (vmx->nested.nested_run_pending &&
2565 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT)) {
2566 vmcs_write64(GUEST_IA32_PAT, vmcs12->guest_ia32_pat);
2567 vcpu->arch.pat = vmcs12->guest_ia32_pat;
2568 } else if (vmcs_config.vmentry_ctrl & VM_ENTRY_LOAD_IA32_PAT) {
2569 vmcs_write64(GUEST_IA32_PAT, vmx->vcpu.arch.pat);
2572 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
2574 if (kvm_has_tsc_control)
2575 decache_tsc_multiplier(vmx);
2577 nested_vmx_transition_tlb_flush(vcpu, vmcs12, true);
2579 if (nested_cpu_has_ept(vmcs12))
2580 nested_ept_init_mmu_context(vcpu);
2583 * This sets GUEST_CR0 to vmcs12->guest_cr0, possibly modifying those
2584 * bits which we consider mandatory enabled.
2585 * The CR0_READ_SHADOW is what L2 should have expected to read given
2586 * the specifications by L1; It's not enough to take
2587 * vmcs12->cr0_read_shadow because on our cr0_guest_host_mask we we
2588 * have more bits than L1 expected.
2590 vmx_set_cr0(vcpu, vmcs12->guest_cr0);
2591 vmcs_writel(CR0_READ_SHADOW, nested_read_cr0(vmcs12));
2593 vmx_set_cr4(vcpu, vmcs12->guest_cr4);
2594 vmcs_writel(CR4_READ_SHADOW, nested_read_cr4(vmcs12));
2596 vcpu->arch.efer = nested_vmx_calc_efer(vmx, vmcs12);
2597 /* Note: may modify VM_ENTRY/EXIT_CONTROLS and GUEST/HOST_IA32_EFER */
2598 vmx_set_efer(vcpu, vcpu->arch.efer);
2601 * Guest state is invalid and unrestricted guest is disabled,
2602 * which means L1 attempted VMEntry to L2 with invalid state.
2605 if (CC(!vmx_guest_state_valid(vcpu))) {
2606 *entry_failure_code = ENTRY_FAIL_DEFAULT;
2610 /* Shadow page tables on either EPT or shadow page tables. */
2611 if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12),
2612 entry_failure_code))
2616 * Immediately write vmcs02.GUEST_CR3. It will be propagated to vmcs12
2617 * on nested VM-Exit, which can occur without actually running L2 and
2618 * thus without hitting vmx_load_mmu_pgd(), e.g. if L1 is entering L2 with
2619 * vmcs12.GUEST_ACTIVITYSTATE=HLT, in which case KVM will intercept the
2620 * transition to HLT instead of running L2.
2623 vmcs_writel(GUEST_CR3, vmcs12->guest_cr3);
2625 /* Late preparation of GUEST_PDPTRs now that EFER and CRs are set. */
2626 if (load_guest_pdptrs_vmcs12 && nested_cpu_has_ept(vmcs12) &&
2627 is_pae_paging(vcpu)) {
2628 vmcs_write64(GUEST_PDPTR0, vmcs12->guest_pdptr0);
2629 vmcs_write64(GUEST_PDPTR1, vmcs12->guest_pdptr1);
2630 vmcs_write64(GUEST_PDPTR2, vmcs12->guest_pdptr2);
2631 vmcs_write64(GUEST_PDPTR3, vmcs12->guest_pdptr3);
2635 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
2637 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
2638 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
2639 vmcs12->guest_ia32_perf_global_ctrl))) {
2640 *entry_failure_code = ENTRY_FAIL_DEFAULT;
2644 kvm_rsp_write(vcpu, vmcs12->guest_rsp);
2645 kvm_rip_write(vcpu, vmcs12->guest_rip);
2649 static int nested_vmx_check_nmi_controls(struct vmcs12 *vmcs12)
2651 if (CC(!nested_cpu_has_nmi_exiting(vmcs12) &&
2652 nested_cpu_has_virtual_nmis(vmcs12)))
2655 if (CC(!nested_cpu_has_virtual_nmis(vmcs12) &&
2656 nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING)))
2662 static bool nested_vmx_check_eptp(struct kvm_vcpu *vcpu, u64 new_eptp)
2664 struct vcpu_vmx *vmx = to_vmx(vcpu);
2665 int maxphyaddr = cpuid_maxphyaddr(vcpu);
2667 /* Check for memory type validity */
2668 switch (new_eptp & VMX_EPTP_MT_MASK) {
2669 case VMX_EPTP_MT_UC:
2670 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_UC_BIT)))
2673 case VMX_EPTP_MT_WB:
2674 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPTP_WB_BIT)))
2681 /* Page-walk levels validity. */
2682 switch (new_eptp & VMX_EPTP_PWL_MASK) {
2683 case VMX_EPTP_PWL_5:
2684 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_5_BIT)))
2687 case VMX_EPTP_PWL_4:
2688 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_PAGE_WALK_4_BIT)))
2695 /* Reserved bits should not be set */
2696 if (CC(new_eptp >> maxphyaddr || ((new_eptp >> 7) & 0x1f)))
2699 /* AD, if set, should be supported */
2700 if (new_eptp & VMX_EPTP_AD_ENABLE_BIT) {
2701 if (CC(!(vmx->nested.msrs.ept_caps & VMX_EPT_AD_BIT)))
2709 * Checks related to VM-Execution Control Fields
2711 static int nested_check_vm_execution_controls(struct kvm_vcpu *vcpu,
2712 struct vmcs12 *vmcs12)
2714 struct vcpu_vmx *vmx = to_vmx(vcpu);
2716 if (CC(!vmx_control_verify(vmcs12->pin_based_vm_exec_control,
2717 vmx->nested.msrs.pinbased_ctls_low,
2718 vmx->nested.msrs.pinbased_ctls_high)) ||
2719 CC(!vmx_control_verify(vmcs12->cpu_based_vm_exec_control,
2720 vmx->nested.msrs.procbased_ctls_low,
2721 vmx->nested.msrs.procbased_ctls_high)))
2724 if (nested_cpu_has(vmcs12, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
2725 CC(!vmx_control_verify(vmcs12->secondary_vm_exec_control,
2726 vmx->nested.msrs.secondary_ctls_low,
2727 vmx->nested.msrs.secondary_ctls_high)))
2730 if (CC(vmcs12->cr3_target_count > nested_cpu_vmx_misc_cr3_count(vcpu)) ||
2731 nested_vmx_check_io_bitmap_controls(vcpu, vmcs12) ||
2732 nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12) ||
2733 nested_vmx_check_tpr_shadow_controls(vcpu, vmcs12) ||
2734 nested_vmx_check_apic_access_controls(vcpu, vmcs12) ||
2735 nested_vmx_check_apicv_controls(vcpu, vmcs12) ||
2736 nested_vmx_check_nmi_controls(vmcs12) ||
2737 nested_vmx_check_pml_controls(vcpu, vmcs12) ||
2738 nested_vmx_check_unrestricted_guest_controls(vcpu, vmcs12) ||
2739 nested_vmx_check_mode_based_ept_exec_controls(vcpu, vmcs12) ||
2740 nested_vmx_check_shadow_vmcs_controls(vcpu, vmcs12) ||
2741 CC(nested_cpu_has_vpid(vmcs12) && !vmcs12->virtual_processor_id))
2744 if (!nested_cpu_has_preemption_timer(vmcs12) &&
2745 nested_cpu_has_save_preemption_timer(vmcs12))
2748 if (nested_cpu_has_ept(vmcs12) &&
2749 CC(!nested_vmx_check_eptp(vcpu, vmcs12->ept_pointer)))
2752 if (nested_cpu_has_vmfunc(vmcs12)) {
2753 if (CC(vmcs12->vm_function_control &
2754 ~vmx->nested.msrs.vmfunc_controls))
2757 if (nested_cpu_has_eptp_switching(vmcs12)) {
2758 if (CC(!nested_cpu_has_ept(vmcs12)) ||
2759 CC(!page_address_valid(vcpu, vmcs12->eptp_list_address)))
2768 * Checks related to VM-Exit Control Fields
2770 static int nested_check_vm_exit_controls(struct kvm_vcpu *vcpu,
2771 struct vmcs12 *vmcs12)
2773 struct vcpu_vmx *vmx = to_vmx(vcpu);
2775 if (CC(!vmx_control_verify(vmcs12->vm_exit_controls,
2776 vmx->nested.msrs.exit_ctls_low,
2777 vmx->nested.msrs.exit_ctls_high)) ||
2778 CC(nested_vmx_check_exit_msr_switch_controls(vcpu, vmcs12)))
2785 * Checks related to VM-Entry Control Fields
2787 static int nested_check_vm_entry_controls(struct kvm_vcpu *vcpu,
2788 struct vmcs12 *vmcs12)
2790 struct vcpu_vmx *vmx = to_vmx(vcpu);
2792 if (CC(!vmx_control_verify(vmcs12->vm_entry_controls,
2793 vmx->nested.msrs.entry_ctls_low,
2794 vmx->nested.msrs.entry_ctls_high)))
2798 * From the Intel SDM, volume 3:
2799 * Fields relevant to VM-entry event injection must be set properly.
2800 * These fields are the VM-entry interruption-information field, the
2801 * VM-entry exception error code, and the VM-entry instruction length.
2803 if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
2804 u32 intr_info = vmcs12->vm_entry_intr_info_field;
2805 u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
2806 u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
2807 bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
2808 bool should_have_error_code;
2809 bool urg = nested_cpu_has2(vmcs12,
2810 SECONDARY_EXEC_UNRESTRICTED_GUEST);
2811 bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
2813 /* VM-entry interruption-info field: interruption type */
2814 if (CC(intr_type == INTR_TYPE_RESERVED) ||
2815 CC(intr_type == INTR_TYPE_OTHER_EVENT &&
2816 !nested_cpu_supports_monitor_trap_flag(vcpu)))
2819 /* VM-entry interruption-info field: vector */
2820 if (CC(intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
2821 CC(intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
2822 CC(intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
2825 /* VM-entry interruption-info field: deliver error code */
2826 should_have_error_code =
2827 intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
2828 x86_exception_has_error_code(vector);
2829 if (CC(has_error_code != should_have_error_code))
2832 /* VM-entry exception error code */
2833 if (CC(has_error_code &&
2834 vmcs12->vm_entry_exception_error_code & GENMASK(31, 16)))
2837 /* VM-entry interruption-info field: reserved bits */
2838 if (CC(intr_info & INTR_INFO_RESVD_BITS_MASK))
2841 /* VM-entry instruction length */
2842 switch (intr_type) {
2843 case INTR_TYPE_SOFT_EXCEPTION:
2844 case INTR_TYPE_SOFT_INTR:
2845 case INTR_TYPE_PRIV_SW_EXCEPTION:
2846 if (CC(vmcs12->vm_entry_instruction_len > 15) ||
2847 CC(vmcs12->vm_entry_instruction_len == 0 &&
2848 CC(!nested_cpu_has_zero_length_injection(vcpu))))
2853 if (nested_vmx_check_entry_msr_switch_controls(vcpu, vmcs12))
2859 static int nested_vmx_check_controls(struct kvm_vcpu *vcpu,
2860 struct vmcs12 *vmcs12)
2862 if (nested_check_vm_execution_controls(vcpu, vmcs12) ||
2863 nested_check_vm_exit_controls(vcpu, vmcs12) ||
2864 nested_check_vm_entry_controls(vcpu, vmcs12))
2867 if (to_vmx(vcpu)->nested.enlightened_vmcs_enabled)
2868 return nested_evmcs_check_controls(vmcs12);
2873 static int nested_vmx_check_address_space_size(struct kvm_vcpu *vcpu,
2874 struct vmcs12 *vmcs12)
2876 #ifdef CONFIG_X86_64
2877 if (CC(!!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE) !=
2878 !!(vcpu->arch.efer & EFER_LMA)))
2884 static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu,
2885 struct vmcs12 *vmcs12)
2889 if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) ||
2890 CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) ||
2891 CC(!nested_cr3_valid(vcpu, vmcs12->host_cr3)))
2894 if (CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_esp, vcpu)) ||
2895 CC(is_noncanonical_address(vmcs12->host_ia32_sysenter_eip, vcpu)))
2898 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) &&
2899 CC(!kvm_pat_valid(vmcs12->host_ia32_pat)))
2902 if ((vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL) &&
2903 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu),
2904 vmcs12->host_ia32_perf_global_ctrl)))
2907 #ifdef CONFIG_X86_64
2908 ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE);
2914 if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE)))
2917 if (CC(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) ||
2918 CC(vmcs12->host_cr4 & X86_CR4_PCIDE) ||
2919 CC((vmcs12->host_rip) >> 32))
2923 if (CC(vmcs12->host_cs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2924 CC(vmcs12->host_ss_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2925 CC(vmcs12->host_ds_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2926 CC(vmcs12->host_es_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2927 CC(vmcs12->host_fs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2928 CC(vmcs12->host_gs_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2929 CC(vmcs12->host_tr_selector & (SEGMENT_RPL_MASK | SEGMENT_TI_MASK)) ||
2930 CC(vmcs12->host_cs_selector == 0) ||
2931 CC(vmcs12->host_tr_selector == 0) ||
2932 CC(vmcs12->host_ss_selector == 0 && !ia32e))
2935 if (CC(is_noncanonical_address(vmcs12->host_fs_base, vcpu)) ||
2936 CC(is_noncanonical_address(vmcs12->host_gs_base, vcpu)) ||
2937 CC(is_noncanonical_address(vmcs12->host_gdtr_base, vcpu)) ||
2938 CC(is_noncanonical_address(vmcs12->host_idtr_base, vcpu)) ||
2939 CC(is_noncanonical_address(vmcs12->host_tr_base, vcpu)) ||
2940 CC(is_noncanonical_address(vmcs12->host_rip, vcpu)))
2944 * If the load IA32_EFER VM-exit control is 1, bits reserved in the
2945 * IA32_EFER MSR must be 0 in the field for that register. In addition,
2946 * the values of the LMA and LME bits in the field must each be that of
2947 * the host address-space size VM-exit control.
2949 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
2950 if (CC(!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer)) ||
2951 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA)) ||
2952 CC(ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)))
2959 static int nested_vmx_check_vmcs_link_ptr(struct kvm_vcpu *vcpu,
2960 struct vmcs12 *vmcs12)
2963 struct vmcs12 *shadow;
2964 struct kvm_host_map map;
2966 if (vmcs12->vmcs_link_pointer == -1ull)
2969 if (CC(!page_address_valid(vcpu, vmcs12->vmcs_link_pointer)))
2972 if (CC(kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->vmcs_link_pointer), &map)))
2977 if (CC(shadow->hdr.revision_id != VMCS12_REVISION) ||
2978 CC(shadow->hdr.shadow_vmcs != nested_cpu_has_shadow_vmcs(vmcs12)))
2981 kvm_vcpu_unmap(vcpu, &map, false);
2986 * Checks related to Guest Non-register State
2988 static int nested_check_guest_non_reg_state(struct vmcs12 *vmcs12)
2990 if (CC(vmcs12->guest_activity_state != GUEST_ACTIVITY_ACTIVE &&
2991 vmcs12->guest_activity_state != GUEST_ACTIVITY_HLT))
2997 static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
2998 struct vmcs12 *vmcs12,
2999 enum vm_entry_failure_code *entry_failure_code)
3001 bool ia32e = !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE);
3003 *entry_failure_code = ENTRY_FAIL_DEFAULT;
3005 if (CC(!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0)) ||
3006 CC(!nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)))
3009 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) &&
3010 CC(!kvm_dr7_valid(vmcs12->guest_dr7)))
3013 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) &&
3014 CC(!kvm_pat_valid(vmcs12->guest_ia32_pat)))
3017 if (nested_vmx_check_vmcs_link_ptr(vcpu, vmcs12)) {
3018 *entry_failure_code = ENTRY_FAIL_VMCS_LINK_PTR;
3022 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL) &&
3023 CC(!kvm_valid_perf_global_ctrl(vcpu_to_pmu(vcpu),
3024 vmcs12->guest_ia32_perf_global_ctrl)))
3027 if (CC((vmcs12->guest_cr0 & (X86_CR0_PG | X86_CR0_PE)) == X86_CR0_PG))
3030 if (CC(ia32e && !(vmcs12->guest_cr4 & X86_CR4_PAE)) ||
3031 CC(ia32e && !(vmcs12->guest_cr0 & X86_CR0_PG)))
3035 * If the load IA32_EFER VM-entry control is 1, the following checks
3036 * are performed on the field for the IA32_EFER MSR:
3037 * - Bits reserved in the IA32_EFER MSR must be 0.
3038 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
3039 * the IA-32e mode guest VM-exit control. It must also be identical
3040 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
3043 if (to_vmx(vcpu)->nested.nested_run_pending &&
3044 (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) {
3045 if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) ||
3046 CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) ||
3047 CC(((vmcs12->guest_cr0 & X86_CR0_PG) &&
3048 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))))
3052 if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS) &&
3053 (CC(is_noncanonical_address(vmcs12->guest_bndcfgs & PAGE_MASK, vcpu)) ||
3054 CC((vmcs12->guest_bndcfgs & MSR_IA32_BNDCFGS_RSVD))))
3057 if (nested_check_guest_non_reg_state(vmcs12))
3063 static int nested_vmx_check_vmentry_hw(struct kvm_vcpu *vcpu)
3065 struct vcpu_vmx *vmx = to_vmx(vcpu);
3066 unsigned long cr3, cr4;
3069 if (!nested_early_check)
3072 if (vmx->msr_autoload.host.nr)
3073 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
3074 if (vmx->msr_autoload.guest.nr)
3075 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
3079 vmx_prepare_switch_to_guest(vcpu);
3082 * Induce a consistency check VMExit by clearing bit 1 in GUEST_RFLAGS,
3083 * which is reserved to '1' by hardware. GUEST_RFLAGS is guaranteed to
3084 * be written (by prepare_vmcs02()) before the "real" VMEnter, i.e.
3085 * there is no need to preserve other bits or save/restore the field.
3087 vmcs_writel(GUEST_RFLAGS, 0);
3089 cr3 = __get_current_cr3_fast();
3090 if (unlikely(cr3 != vmx->loaded_vmcs->host_state.cr3)) {
3091 vmcs_writel(HOST_CR3, cr3);
3092 vmx->loaded_vmcs->host_state.cr3 = cr3;
3095 cr4 = cr4_read_shadow();
3096 if (unlikely(cr4 != vmx->loaded_vmcs->host_state.cr4)) {
3097 vmcs_writel(HOST_CR4, cr4);
3098 vmx->loaded_vmcs->host_state.cr4 = cr4;
3101 vm_fail = __vmx_vcpu_run(vmx, (unsigned long *)&vcpu->arch.regs,
3102 __vmx_vcpu_run_flags(vmx));
3104 if (vmx->msr_autoload.host.nr)
3105 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
3106 if (vmx->msr_autoload.guest.nr)
3107 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
3110 u32 error = vmcs_read32(VM_INSTRUCTION_ERROR);
3114 trace_kvm_nested_vmenter_failed(
3115 "early hardware check VM-instruction error: ", error);
3116 WARN_ON_ONCE(error != VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3121 * VMExit clears RFLAGS.IF and DR7, even on a consistency check.
3123 if (hw_breakpoint_active())
3124 set_debugreg(__this_cpu_read(cpu_dr7), 7);
3129 * A non-failing VMEntry means we somehow entered guest mode with
3130 * an illegal RIP, and that's just the tip of the iceberg. There
3131 * is no telling what memory has been modified or what state has
3132 * been exposed to unknown code. Hitting this all but guarantees
3133 * a (very critical) hardware issue.
3135 WARN_ON(!(vmcs_read32(VM_EXIT_REASON) &
3136 VMX_EXIT_REASONS_FAILED_VMENTRY));
3141 static bool nested_get_evmcs_page(struct kvm_vcpu *vcpu)
3143 struct vcpu_vmx *vmx = to_vmx(vcpu);
3146 * hv_evmcs may end up being not mapped after migration (when
3147 * L2 was running), map it here to make sure vmcs12 changes are
3148 * properly reflected.
3150 if (vmx->nested.enlightened_vmcs_enabled && !vmx->nested.hv_evmcs) {
3151 enum nested_evmptrld_status evmptrld_status =
3152 nested_vmx_handle_enlightened_vmptrld(vcpu, false);
3154 if (evmptrld_status == EVMPTRLD_VMFAIL ||
3155 evmptrld_status == EVMPTRLD_ERROR)
3162 static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu)
3164 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3165 struct vcpu_vmx *vmx = to_vmx(vcpu);
3166 struct kvm_host_map *map;
3170 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3172 * Translate L1 physical address to host physical
3173 * address for vmcs02. Keep the page pinned, so this
3174 * physical address remains valid. We keep a reference
3175 * to it so we can release it later.
3177 if (vmx->nested.apic_access_page) { /* shouldn't happen */
3178 kvm_release_page_clean(vmx->nested.apic_access_page);
3179 vmx->nested.apic_access_page = NULL;
3181 page = kvm_vcpu_gpa_to_page(vcpu, vmcs12->apic_access_addr);
3182 if (!is_error_page(page)) {
3183 vmx->nested.apic_access_page = page;
3184 hpa = page_to_phys(vmx->nested.apic_access_page);
3185 vmcs_write64(APIC_ACCESS_ADDR, hpa);
3187 pr_debug_ratelimited("%s: no backing 'struct page' for APIC-access address in vmcs12\n",
3189 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3190 vcpu->run->internal.suberror =
3191 KVM_INTERNAL_ERROR_EMULATION;
3192 vcpu->run->internal.ndata = 0;
3197 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3198 map = &vmx->nested.virtual_apic_map;
3200 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->virtual_apic_page_addr), map)) {
3201 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, pfn_to_hpa(map->pfn));
3202 } else if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING) &&
3203 nested_cpu_has(vmcs12, CPU_BASED_CR8_STORE_EXITING) &&
3204 !nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
3206 * The processor will never use the TPR shadow, simply
3207 * clear the bit from the execution control. Such a
3208 * configuration is useless, but it happens in tests.
3209 * For any other configuration, failing the vm entry is
3210 * _not_ what the processor does but it's basically the
3211 * only possibility we have.
3213 exec_controls_clearbit(vmx, CPU_BASED_TPR_SHADOW);
3216 * Write an illegal value to VIRTUAL_APIC_PAGE_ADDR to
3217 * force VM-Entry to fail.
3219 vmcs_write64(VIRTUAL_APIC_PAGE_ADDR, -1ull);
3223 if (nested_cpu_has_posted_intr(vmcs12)) {
3224 map = &vmx->nested.pi_desc_map;
3226 if (!kvm_vcpu_map(vcpu, gpa_to_gfn(vmcs12->posted_intr_desc_addr), map)) {
3227 vmx->nested.pi_desc =
3228 (struct pi_desc *)(((void *)map->hva) +
3229 offset_in_page(vmcs12->posted_intr_desc_addr));
3230 vmcs_write64(POSTED_INTR_DESC_ADDR,
3231 pfn_to_hpa(map->pfn) + offset_in_page(vmcs12->posted_intr_desc_addr));
3234 if (nested_vmx_prepare_msr_bitmap(vcpu, vmcs12))
3235 exec_controls_setbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
3237 exec_controls_clearbit(vmx, CPU_BASED_USE_MSR_BITMAPS);
3242 static bool vmx_get_nested_state_pages(struct kvm_vcpu *vcpu)
3244 if (!nested_get_evmcs_page(vcpu)) {
3245 pr_debug_ratelimited("%s: enlightened vmptrld failed\n",
3247 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3248 vcpu->run->internal.suberror =
3249 KVM_INTERNAL_ERROR_EMULATION;
3250 vcpu->run->internal.ndata = 0;
3255 if (is_guest_mode(vcpu) && !nested_get_vmcs12_pages(vcpu))
3261 static int nested_vmx_write_pml_buffer(struct kvm_vcpu *vcpu, gpa_t gpa)
3263 struct vmcs12 *vmcs12;
3264 struct vcpu_vmx *vmx = to_vmx(vcpu);
3267 if (WARN_ON_ONCE(!is_guest_mode(vcpu)))
3270 if (WARN_ON_ONCE(vmx->nested.pml_full))
3274 * Check if PML is enabled for the nested guest. Whether eptp bit 6 is
3275 * set is already checked as part of A/D emulation.
3277 vmcs12 = get_vmcs12(vcpu);
3278 if (!nested_cpu_has_pml(vmcs12))
3281 if (vmcs12->guest_pml_index >= PML_ENTITY_NUM) {
3282 vmx->nested.pml_full = true;
3287 dst = vmcs12->pml_address + sizeof(u64) * vmcs12->guest_pml_index;
3289 if (kvm_write_guest_page(vcpu->kvm, gpa_to_gfn(dst), &gpa,
3290 offset_in_page(dst), sizeof(gpa)))
3293 vmcs12->guest_pml_index--;
3299 * Intel's VMX Instruction Reference specifies a common set of prerequisites
3300 * for running VMX instructions (except VMXON, whose prerequisites are
3301 * slightly different). It also specifies what exception to inject otherwise.
3302 * Note that many of these exceptions have priority over VM exits, so they
3303 * don't have to be checked again here.
3305 static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
3307 if (!to_vmx(vcpu)->nested.vmxon) {
3308 kvm_queue_exception(vcpu, UD_VECTOR);
3312 if (vmx_get_cpl(vcpu)) {
3313 kvm_inject_gp(vcpu, 0);
3320 static u8 vmx_has_apicv_interrupt(struct kvm_vcpu *vcpu)
3322 u8 rvi = vmx_get_rvi();
3323 u8 vppr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_PROCPRI);
3325 return ((rvi & 0xf0) > (vppr & 0xf0));
3328 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
3329 struct vmcs12 *vmcs12);
3332 * If from_vmentry is false, this is being called from state restore (either RSM
3333 * or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
3336 * NVMX_VMENTRY_SUCCESS: Entered VMX non-root mode
3337 * NVMX_VMENTRY_VMFAIL: Consistency check VMFail
3338 * NVMX_VMENTRY_VMEXIT: Consistency check VMExit
3339 * NVMX_VMENTRY_KVM_INTERNAL_ERROR: KVM internal error
3341 enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
3344 struct vcpu_vmx *vmx = to_vmx(vcpu);
3345 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3346 enum vm_entry_failure_code entry_failure_code;
3347 bool evaluate_pending_interrupts;
3348 union vmx_exit_reason exit_reason = {
3349 .basic = EXIT_REASON_INVALID_STATE,
3350 .failed_vmentry = 1,
3354 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
3355 kvm_vcpu_flush_tlb_current(vcpu);
3357 evaluate_pending_interrupts = exec_controls_get(vmx) &
3358 (CPU_BASED_INTR_WINDOW_EXITING | CPU_BASED_NMI_WINDOW_EXITING);
3359 if (likely(!evaluate_pending_interrupts) && kvm_vcpu_apicv_active(vcpu))
3360 evaluate_pending_interrupts |= vmx_has_apicv_interrupt(vcpu);
3362 if (!vmx->nested.nested_run_pending ||
3363 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS))
3364 vmx->nested.vmcs01_debugctl = vmcs_read64(GUEST_IA32_DEBUGCTL);
3365 if (kvm_mpx_supported() &&
3366 (!vmx->nested.nested_run_pending ||
3367 !(vmcs12->vm_entry_controls & VM_ENTRY_LOAD_BNDCFGS)))
3368 vmx->nested.vmcs01_guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
3371 * Overwrite vmcs01.GUEST_CR3 with L1's CR3 if EPT is disabled *and*
3372 * nested early checks are disabled. In the event of a "late" VM-Fail,
3373 * i.e. a VM-Fail detected by hardware but not KVM, KVM must unwind its
3374 * software model to the pre-VMEntry host state. When EPT is disabled,
3375 * GUEST_CR3 holds KVM's shadow CR3, not L1's "real" CR3, which causes
3376 * nested_vmx_restore_host_state() to corrupt vcpu->arch.cr3. Stuffing
3377 * vmcs01.GUEST_CR3 results in the unwind naturally setting arch.cr3 to
3378 * the correct value. Smashing vmcs01.GUEST_CR3 is safe because nested
3379 * VM-Exits, and the unwind, reset KVM's MMU, i.e. vmcs01.GUEST_CR3 is
3380 * guaranteed to be overwritten with a shadow CR3 prior to re-entering
3381 * L1. Don't stuff vmcs01.GUEST_CR3 when using nested early checks as
3382 * KVM modifies vcpu->arch.cr3 if and only if the early hardware checks
3383 * pass, and early VM-Fails do not reset KVM's MMU, i.e. the VM-Fail
3384 * path would need to manually save/restore vmcs01.GUEST_CR3.
3386 if (!enable_ept && !nested_early_check)
3387 vmcs_writel(GUEST_CR3, vcpu->arch.cr3);
3389 vmx_switch_vmcs(vcpu, &vmx->nested.vmcs02);
3391 prepare_vmcs02_early(vmx, &vmx->vmcs01, vmcs12);
3394 if (unlikely(!nested_get_vmcs12_pages(vcpu))) {
3395 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3396 return NVMX_VMENTRY_KVM_INTERNAL_ERROR;
3399 if (nested_vmx_check_vmentry_hw(vcpu)) {
3400 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3401 return NVMX_VMENTRY_VMFAIL;
3404 if (nested_vmx_check_guest_state(vcpu, vmcs12,
3405 &entry_failure_code)) {
3406 exit_reason.basic = EXIT_REASON_INVALID_STATE;
3407 vmcs12->exit_qualification = entry_failure_code;
3408 goto vmentry_fail_vmexit;
3412 enter_guest_mode(vcpu);
3413 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
3414 vcpu->arch.tsc_offset += vmcs12->tsc_offset;
3416 if (prepare_vmcs02(vcpu, vmcs12, &entry_failure_code)) {
3417 exit_reason.basic = EXIT_REASON_INVALID_STATE;
3418 vmcs12->exit_qualification = entry_failure_code;
3419 goto vmentry_fail_vmexit_guest_mode;
3423 failed_index = nested_vmx_load_msr(vcpu,
3424 vmcs12->vm_entry_msr_load_addr,
3425 vmcs12->vm_entry_msr_load_count);
3427 exit_reason.basic = EXIT_REASON_MSR_LOAD_FAIL;
3428 vmcs12->exit_qualification = failed_index;
3429 goto vmentry_fail_vmexit_guest_mode;
3433 * The MMU is not initialized to point at the right entities yet and
3434 * "get pages" would need to read data from the guest (i.e. we will
3435 * need to perform gpa to hpa translation). Request a call
3436 * to nested_get_vmcs12_pages before the next VM-entry. The MSRs
3437 * have already been set at vmentry time and should not be reset.
3439 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
3443 * If L1 had a pending IRQ/NMI until it executed
3444 * VMLAUNCH/VMRESUME which wasn't delivered because it was
3445 * disallowed (e.g. interrupts disabled), L0 needs to
3446 * evaluate if this pending event should cause an exit from L2
3447 * to L1 or delivered directly to L2 (e.g. In case L1 don't
3448 * intercept EXTERNAL_INTERRUPT).
3450 * Usually this would be handled by the processor noticing an
3451 * IRQ/NMI window request, or checking RVI during evaluation of
3452 * pending virtual interrupts. However, this setting was done
3453 * on VMCS01 and now VMCS02 is active instead. Thus, we force L0
3454 * to perform pending event evaluation by requesting a KVM_REQ_EVENT.
3456 if (unlikely(evaluate_pending_interrupts))
3457 kvm_make_request(KVM_REQ_EVENT, vcpu);
3460 * Do not start the preemption timer hrtimer until after we know
3461 * we are successful, so that only nested_vmx_vmexit needs to cancel
3464 vmx->nested.preemption_timer_expired = false;
3465 if (nested_cpu_has_preemption_timer(vmcs12)) {
3466 u64 timer_value = vmx_calc_preemption_timer_value(vcpu);
3467 vmx_start_preemption_timer(vcpu, timer_value);
3471 * Note no nested_vmx_succeed or nested_vmx_fail here. At this point
3472 * we are no longer running L1, and VMLAUNCH/VMRESUME has not yet
3473 * returned as far as L1 is concerned. It will only return (and set
3474 * the success flag) when L2 exits (see nested_vmx_vmexit()).
3476 return NVMX_VMENTRY_SUCCESS;
3479 * A failed consistency check that leads to a VMExit during L1's
3480 * VMEnter to L2 is a variation of a normal VMexit, as explained in
3481 * 26.7 "VM-entry failures during or after loading guest state".
3483 vmentry_fail_vmexit_guest_mode:
3484 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
3485 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
3486 leave_guest_mode(vcpu);
3488 vmentry_fail_vmexit:
3489 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
3492 return NVMX_VMENTRY_VMEXIT;
3494 load_vmcs12_host_state(vcpu, vmcs12);
3495 vmcs12->vm_exit_reason = exit_reason.full;
3496 if (enable_shadow_vmcs || vmx->nested.hv_evmcs)
3497 vmx->nested.need_vmcs12_to_shadow_sync = true;
3498 return NVMX_VMENTRY_VMEXIT;
3502 * nested_vmx_run() handles a nested entry, i.e., a VMLAUNCH or VMRESUME on L1
3503 * for running an L2 nested guest.
3505 static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
3507 struct vmcs12 *vmcs12;
3508 enum nvmx_vmentry_status status;
3509 struct vcpu_vmx *vmx = to_vmx(vcpu);
3510 u32 interrupt_shadow = vmx_get_interrupt_shadow(vcpu);
3511 enum nested_evmptrld_status evmptrld_status;
3513 if (!nested_vmx_check_permission(vcpu))
3516 evmptrld_status = nested_vmx_handle_enlightened_vmptrld(vcpu, launch);
3517 if (evmptrld_status == EVMPTRLD_ERROR) {
3518 kvm_queue_exception(vcpu, UD_VECTOR);
3520 } else if (CC(evmptrld_status == EVMPTRLD_VMFAIL)) {
3521 return nested_vmx_failInvalid(vcpu);
3524 if (CC(!vmx->nested.hv_evmcs && vmx->nested.current_vmptr == -1ull))
3525 return nested_vmx_failInvalid(vcpu);
3527 vmcs12 = get_vmcs12(vcpu);
3530 * Can't VMLAUNCH or VMRESUME a shadow VMCS. Despite the fact
3531 * that there *is* a valid VMCS pointer, RFLAGS.CF is set
3532 * rather than RFLAGS.ZF, and no error number is stored to the
3533 * VM-instruction error field.
3535 if (CC(vmcs12->hdr.shadow_vmcs))
3536 return nested_vmx_failInvalid(vcpu);
3538 if (vmx->nested.hv_evmcs) {
3539 copy_enlightened_to_vmcs12(vmx);
3540 /* Enlightened VMCS doesn't have launch state */
3541 vmcs12->launch_state = !launch;
3542 } else if (enable_shadow_vmcs) {
3543 copy_shadow_to_vmcs12(vmx);
3547 * The nested entry process starts with enforcing various prerequisites
3548 * on vmcs12 as required by the Intel SDM, and act appropriately when
3549 * they fail: As the SDM explains, some conditions should cause the
3550 * instruction to fail, while others will cause the instruction to seem
3551 * to succeed, but return an EXIT_REASON_INVALID_STATE.
3552 * To speed up the normal (success) code path, we should avoid checking
3553 * for misconfigurations which will anyway be caught by the processor
3554 * when using the merged vmcs02.
3556 if (CC(interrupt_shadow & KVM_X86_SHADOW_INT_MOV_SS))
3557 return nested_vmx_fail(vcpu, VMXERR_ENTRY_EVENTS_BLOCKED_BY_MOV_SS);
3559 if (CC(vmcs12->launch_state == launch))
3560 return nested_vmx_fail(vcpu,
3561 launch ? VMXERR_VMLAUNCH_NONCLEAR_VMCS
3562 : VMXERR_VMRESUME_NONLAUNCHED_VMCS);
3564 if (nested_vmx_check_controls(vcpu, vmcs12))
3565 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3567 if (nested_vmx_check_address_space_size(vcpu, vmcs12))
3568 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
3570 if (nested_vmx_check_host_state(vcpu, vmcs12))
3571 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
3574 * We're finally done with prerequisite checking, and can start with
3577 vmx->nested.nested_run_pending = 1;
3578 vmx->nested.has_preemption_timer_deadline = false;
3579 status = nested_vmx_enter_non_root_mode(vcpu, true);
3580 if (unlikely(status != NVMX_VMENTRY_SUCCESS))
3581 goto vmentry_failed;
3583 /* Emulate processing of posted interrupts on VM-Enter. */
3584 if (nested_cpu_has_posted_intr(vmcs12) &&
3585 kvm_apic_has_interrupt(vcpu) == vmx->nested.posted_intr_nv) {
3586 vmx->nested.pi_pending = true;
3587 kvm_make_request(KVM_REQ_EVENT, vcpu);
3588 kvm_apic_clear_irr(vcpu, vmx->nested.posted_intr_nv);
3591 /* Hide L1D cache contents from the nested guest. */
3592 vmx->vcpu.arch.l1tf_flush_l1d = true;
3595 * Must happen outside of nested_vmx_enter_non_root_mode() as it will
3596 * also be used as part of restoring nVMX state for
3597 * snapshot restore (migration).
3599 * In this flow, it is assumed that vmcs12 cache was
3600 * trasferred as part of captured nVMX state and should
3601 * therefore not be read from guest memory (which may not
3602 * exist on destination host yet).
3604 nested_cache_shadow_vmcs12(vcpu, vmcs12);
3607 * If we're entering a halted L2 vcpu and the L2 vcpu won't be
3608 * awakened by event injection or by an NMI-window VM-exit or
3609 * by an interrupt-window VM-exit, halt the vcpu.
3611 if ((vmcs12->guest_activity_state == GUEST_ACTIVITY_HLT) &&
3612 !(vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) &&
3613 !(vmcs12->cpu_based_vm_exec_control & CPU_BASED_NMI_WINDOW_EXITING) &&
3614 !((vmcs12->cpu_based_vm_exec_control & CPU_BASED_INTR_WINDOW_EXITING) &&
3615 (vmcs12->guest_rflags & X86_EFLAGS_IF))) {
3616 vmx->nested.nested_run_pending = 0;
3617 return kvm_vcpu_halt(vcpu);
3622 vmx->nested.nested_run_pending = 0;
3623 if (status == NVMX_VMENTRY_KVM_INTERNAL_ERROR)
3625 if (status == NVMX_VMENTRY_VMEXIT)
3627 WARN_ON_ONCE(status != NVMX_VMENTRY_VMFAIL);
3628 return nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
3632 * On a nested exit from L2 to L1, vmcs12.guest_cr0 might not be up-to-date
3633 * because L2 may have changed some cr0 bits directly (CR0_GUEST_HOST_MASK).
3634 * This function returns the new value we should put in vmcs12.guest_cr0.
3635 * It's not enough to just return the vmcs02 GUEST_CR0. Rather,
3636 * 1. Bits that neither L0 nor L1 trapped, were set directly by L2 and are now
3637 * available in vmcs02 GUEST_CR0. (Note: It's enough to check that L0
3638 * didn't trap the bit, because if L1 did, so would L0).
3639 * 2. Bits that L1 asked to trap (and therefore L0 also did) could not have
3640 * been modified by L2, and L1 knows it. So just leave the old value of
3641 * the bit from vmcs12.guest_cr0. Note that the bit from vmcs02 GUEST_CR0
3642 * isn't relevant, because if L0 traps this bit it can set it to anything.
3643 * 3. Bits that L1 didn't trap, but L0 did. L1 believes the guest could have
3644 * changed these bits, and therefore they need to be updated, but L0
3645 * didn't necessarily allow them to be changed in GUEST_CR0 - and rather
3646 * put them in vmcs02 CR0_READ_SHADOW. So take these bits from there.
3648 static inline unsigned long
3649 vmcs12_guest_cr0(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3652 /*1*/ (vmcs_readl(GUEST_CR0) & vcpu->arch.cr0_guest_owned_bits) |
3653 /*2*/ (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask) |
3654 /*3*/ (vmcs_readl(CR0_READ_SHADOW) & ~(vmcs12->cr0_guest_host_mask |
3655 vcpu->arch.cr0_guest_owned_bits));
3658 static inline unsigned long
3659 vmcs12_guest_cr4(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
3662 /*1*/ (vmcs_readl(GUEST_CR4) & vcpu->arch.cr4_guest_owned_bits) |
3663 /*2*/ (vmcs12->guest_cr4 & vmcs12->cr4_guest_host_mask) |
3664 /*3*/ (vmcs_readl(CR4_READ_SHADOW) & ~(vmcs12->cr4_guest_host_mask |
3665 vcpu->arch.cr4_guest_owned_bits));
3668 static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
3669 struct vmcs12 *vmcs12,
3670 u32 vm_exit_reason, u32 exit_intr_info)
3676 * Per the SDM, VM-Exits due to double and triple faults are never
3677 * considered to occur during event delivery, even if the double/triple
3678 * fault is the result of an escalating vectoring issue.
3680 * Note, the SDM qualifies the double fault behavior with "The original
3681 * event results in a double-fault exception". It's unclear why the
3682 * qualification exists since exits due to double fault can occur only
3683 * while vectoring a different exception (injected events are never
3684 * subject to interception), i.e. there's _always_ an original event.
3686 * The SDM also uses NMI as a confusing example for the "original event
3687 * causes the VM exit directly" clause. NMI isn't special in any way,
3688 * the same rule applies to all events that cause an exit directly.
3689 * NMI is an odd choice for the example because NMIs can only occur on
3690 * instruction boundaries, i.e. they _can't_ occur during vectoring.
3692 if ((u16)vm_exit_reason == EXIT_REASON_TRIPLE_FAULT ||
3693 ((u16)vm_exit_reason == EXIT_REASON_EXCEPTION_NMI &&
3694 is_double_fault(exit_intr_info))) {
3695 vmcs12->idt_vectoring_info_field = 0;
3696 } else if (vcpu->arch.exception.injected) {
3697 nr = vcpu->arch.exception.nr;
3698 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3700 if (kvm_exception_is_soft(nr)) {
3701 vmcs12->vm_exit_instruction_len =
3702 vcpu->arch.event_exit_inst_len;
3703 idt_vectoring |= INTR_TYPE_SOFT_EXCEPTION;
3705 idt_vectoring |= INTR_TYPE_HARD_EXCEPTION;
3707 if (vcpu->arch.exception.has_error_code) {
3708 idt_vectoring |= VECTORING_INFO_DELIVER_CODE_MASK;
3709 vmcs12->idt_vectoring_error_code =
3710 vcpu->arch.exception.error_code;
3713 vmcs12->idt_vectoring_info_field = idt_vectoring;
3714 } else if (vcpu->arch.nmi_injected) {
3715 vmcs12->idt_vectoring_info_field =
3716 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR;
3717 } else if (vcpu->arch.interrupt.injected) {
3718 nr = vcpu->arch.interrupt.nr;
3719 idt_vectoring = nr | VECTORING_INFO_VALID_MASK;
3721 if (vcpu->arch.interrupt.soft) {
3722 idt_vectoring |= INTR_TYPE_SOFT_INTR;
3723 vmcs12->vm_entry_instruction_len =
3724 vcpu->arch.event_exit_inst_len;
3726 idt_vectoring |= INTR_TYPE_EXT_INTR;
3728 vmcs12->idt_vectoring_info_field = idt_vectoring;
3730 vmcs12->idt_vectoring_info_field = 0;
3735 void nested_mark_vmcs12_pages_dirty(struct kvm_vcpu *vcpu)
3737 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3741 * Don't need to mark the APIC access page dirty; it is never
3742 * written to by the CPU during APIC virtualization.
3745 if (nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW)) {
3746 gfn = vmcs12->virtual_apic_page_addr >> PAGE_SHIFT;
3747 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3750 if (nested_cpu_has_posted_intr(vmcs12)) {
3751 gfn = vmcs12->posted_intr_desc_addr >> PAGE_SHIFT;
3752 kvm_vcpu_mark_page_dirty(vcpu, gfn);
3756 static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
3758 struct vcpu_vmx *vmx = to_vmx(vcpu);
3763 if (!vmx->nested.pi_desc || !vmx->nested.pi_pending)
3766 vmx->nested.pi_pending = false;
3767 if (!pi_test_and_clear_on(vmx->nested.pi_desc))
3770 max_irr = find_last_bit((unsigned long *)vmx->nested.pi_desc->pir, 256);
3771 if (max_irr != 256) {
3772 vapic_page = vmx->nested.virtual_apic_map.hva;
3776 __kvm_apic_update_irr(vmx->nested.pi_desc->pir,
3777 vapic_page, &max_irr);
3778 status = vmcs_read16(GUEST_INTR_STATUS);
3779 if ((u8)max_irr > ((u8)status & 0xff)) {
3781 status |= (u8)max_irr;
3782 vmcs_write16(GUEST_INTR_STATUS, status);
3786 nested_mark_vmcs12_pages_dirty(vcpu);
3789 static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
3790 unsigned long exit_qual)
3792 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
3793 unsigned int nr = vcpu->arch.exception.nr;
3794 u32 intr_info = nr | INTR_INFO_VALID_MASK;
3796 if (vcpu->arch.exception.has_error_code) {
3798 * Intel CPUs do not generate error codes with bits 31:16 set,
3799 * and more importantly VMX disallows setting bits 31:16 in the
3800 * injected error code for VM-Entry. Drop the bits to mimic
3801 * hardware and avoid inducing failure on nested VM-Entry if L1
3802 * chooses to inject the exception back to L2. AMD CPUs _do_
3803 * generate "full" 32-bit error codes, so KVM allows userspace
3804 * to inject exception error codes with bits 31:16 set.
3806 vmcs12->vm_exit_intr_error_code = (u16)vcpu->arch.exception.error_code;
3807 intr_info |= INTR_INFO_DELIVER_CODE_MASK;
3810 if (kvm_exception_is_soft(nr))
3811 intr_info |= INTR_TYPE_SOFT_EXCEPTION;
3813 intr_info |= INTR_TYPE_HARD_EXCEPTION;
3815 if (!(vmcs12->idt_vectoring_info_field & VECTORING_INFO_VALID_MASK) &&
3816 vmx_get_nmi_mask(vcpu))
3817 intr_info |= INTR_INFO_UNBLOCK_NMI;
3819 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
3823 * Returns true if a debug trap is pending delivery.
3825 * In KVM, debug traps bear an exception payload. As such, the class of a #DB
3826 * exception may be inferred from the presence of an exception payload.
3828 static inline bool vmx_pending_dbg_trap(struct kvm_vcpu *vcpu)
3830 return vcpu->arch.exception.pending &&
3831 vcpu->arch.exception.nr == DB_VECTOR &&
3832 vcpu->arch.exception.payload;
3836 * Certain VM-exits set the 'pending debug exceptions' field to indicate a
3837 * recognized #DB (data or single-step) that has yet to be delivered. Since KVM
3838 * represents these debug traps with a payload that is said to be compatible
3839 * with the 'pending debug exceptions' field, write the payload to the VMCS
3840 * field if a VM-exit is delivered before the debug trap.
3842 static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu)
3844 if (vmx_pending_dbg_trap(vcpu))
3845 vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
3846 vcpu->arch.exception.payload);
3849 static bool nested_vmx_preemption_timer_pending(struct kvm_vcpu *vcpu)
3851 return nested_cpu_has_preemption_timer(get_vmcs12(vcpu)) &&
3852 to_vmx(vcpu)->nested.preemption_timer_expired;
3855 static int vmx_check_nested_events(struct kvm_vcpu *vcpu)
3857 struct vcpu_vmx *vmx = to_vmx(vcpu);
3858 unsigned long exit_qual;
3859 bool block_nested_events =
3860 vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
3861 bool mtf_pending = vmx->nested.mtf_pending;
3862 struct kvm_lapic *apic = vcpu->arch.apic;
3865 * Clear the MTF state. If a higher priority VM-exit is delivered first,
3866 * this state is discarded.
3868 if (!block_nested_events)
3869 vmx->nested.mtf_pending = false;
3871 if (lapic_in_kernel(vcpu) &&
3872 test_bit(KVM_APIC_INIT, &apic->pending_events)) {
3873 if (block_nested_events)
3875 nested_vmx_update_pending_dbg(vcpu);
3876 clear_bit(KVM_APIC_INIT, &apic->pending_events);
3877 nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0);
3882 * Process any exceptions that are not debug traps before MTF.
3884 if (vcpu->arch.exception.pending && !vmx_pending_dbg_trap(vcpu)) {
3885 if (block_nested_events)
3887 if (!nested_vmx_check_exception(vcpu, &exit_qual))
3889 nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
3894 if (block_nested_events)
3896 nested_vmx_update_pending_dbg(vcpu);
3897 nested_vmx_vmexit(vcpu, EXIT_REASON_MONITOR_TRAP_FLAG, 0, 0);
3901 if (vcpu->arch.exception.pending) {
3902 if (block_nested_events)
3904 if (!nested_vmx_check_exception(vcpu, &exit_qual))
3906 nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
3910 if (nested_vmx_preemption_timer_pending(vcpu)) {
3911 if (block_nested_events)
3913 nested_vmx_vmexit(vcpu, EXIT_REASON_PREEMPTION_TIMER, 0, 0);
3917 if (vcpu->arch.smi_pending && !is_smm(vcpu)) {
3918 if (block_nested_events)
3923 if (vcpu->arch.nmi_pending && !vmx_nmi_blocked(vcpu)) {
3924 if (block_nested_events)
3926 if (!nested_exit_on_nmi(vcpu))
3929 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
3930 NMI_VECTOR | INTR_TYPE_NMI_INTR |
3931 INTR_INFO_VALID_MASK, 0);
3933 * The NMI-triggered VM exit counts as injection:
3934 * clear this one and block further NMIs.
3936 vcpu->arch.nmi_pending = 0;
3937 vmx_set_nmi_mask(vcpu, true);
3941 if (kvm_cpu_has_interrupt(vcpu) && !vmx_interrupt_blocked(vcpu)) {
3942 if (block_nested_events)
3944 if (!nested_exit_on_intr(vcpu))
3946 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT, 0, 0);
3951 vmx_complete_nested_posted_interrupt(vcpu);
3955 static u32 vmx_get_preemption_timer_value(struct kvm_vcpu *vcpu)
3958 hrtimer_get_remaining(&to_vmx(vcpu)->nested.preemption_timer);
3961 if (ktime_to_ns(remaining) <= 0)
3964 value = ktime_to_ns(remaining) * vcpu->arch.virtual_tsc_khz;
3965 do_div(value, 1000000);
3966 return value >> VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE;
3969 static bool is_vmcs12_ext_field(unsigned long field)
3972 case GUEST_ES_SELECTOR:
3973 case GUEST_CS_SELECTOR:
3974 case GUEST_SS_SELECTOR:
3975 case GUEST_DS_SELECTOR:
3976 case GUEST_FS_SELECTOR:
3977 case GUEST_GS_SELECTOR:
3978 case GUEST_LDTR_SELECTOR:
3979 case GUEST_TR_SELECTOR:
3980 case GUEST_ES_LIMIT:
3981 case GUEST_CS_LIMIT:
3982 case GUEST_SS_LIMIT:
3983 case GUEST_DS_LIMIT:
3984 case GUEST_FS_LIMIT:
3985 case GUEST_GS_LIMIT:
3986 case GUEST_LDTR_LIMIT:
3987 case GUEST_TR_LIMIT:
3988 case GUEST_GDTR_LIMIT:
3989 case GUEST_IDTR_LIMIT:
3990 case GUEST_ES_AR_BYTES:
3991 case GUEST_DS_AR_BYTES:
3992 case GUEST_FS_AR_BYTES:
3993 case GUEST_GS_AR_BYTES:
3994 case GUEST_LDTR_AR_BYTES:
3995 case GUEST_TR_AR_BYTES:
4002 case GUEST_LDTR_BASE:
4004 case GUEST_GDTR_BASE:
4005 case GUEST_IDTR_BASE:
4006 case GUEST_PENDING_DBG_EXCEPTIONS:
4016 static void sync_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
4017 struct vmcs12 *vmcs12)
4019 struct vcpu_vmx *vmx = to_vmx(vcpu);
4021 vmcs12->guest_es_selector = vmcs_read16(GUEST_ES_SELECTOR);
4022 vmcs12->guest_cs_selector = vmcs_read16(GUEST_CS_SELECTOR);
4023 vmcs12->guest_ss_selector = vmcs_read16(GUEST_SS_SELECTOR);
4024 vmcs12->guest_ds_selector = vmcs_read16(GUEST_DS_SELECTOR);
4025 vmcs12->guest_fs_selector = vmcs_read16(GUEST_FS_SELECTOR);
4026 vmcs12->guest_gs_selector = vmcs_read16(GUEST_GS_SELECTOR);
4027 vmcs12->guest_ldtr_selector = vmcs_read16(GUEST_LDTR_SELECTOR);
4028 vmcs12->guest_tr_selector = vmcs_read16(GUEST_TR_SELECTOR);
4029 vmcs12->guest_es_limit = vmcs_read32(GUEST_ES_LIMIT);
4030 vmcs12->guest_cs_limit = vmcs_read32(GUEST_CS_LIMIT);
4031 vmcs12->guest_ss_limit = vmcs_read32(GUEST_SS_LIMIT);
4032 vmcs12->guest_ds_limit = vmcs_read32(GUEST_DS_LIMIT);
4033 vmcs12->guest_fs_limit = vmcs_read32(GUEST_FS_LIMIT);
4034 vmcs12->guest_gs_limit = vmcs_read32(GUEST_GS_LIMIT);
4035 vmcs12->guest_ldtr_limit = vmcs_read32(GUEST_LDTR_LIMIT);
4036 vmcs12->guest_tr_limit = vmcs_read32(GUEST_TR_LIMIT);
4037 vmcs12->guest_gdtr_limit = vmcs_read32(GUEST_GDTR_LIMIT);
4038 vmcs12->guest_idtr_limit = vmcs_read32(GUEST_IDTR_LIMIT);
4039 vmcs12->guest_es_ar_bytes = vmcs_read32(GUEST_ES_AR_BYTES);
4040 vmcs12->guest_ds_ar_bytes = vmcs_read32(GUEST_DS_AR_BYTES);
4041 vmcs12->guest_fs_ar_bytes = vmcs_read32(GUEST_FS_AR_BYTES);
4042 vmcs12->guest_gs_ar_bytes = vmcs_read32(GUEST_GS_AR_BYTES);
4043 vmcs12->guest_ldtr_ar_bytes = vmcs_read32(GUEST_LDTR_AR_BYTES);
4044 vmcs12->guest_tr_ar_bytes = vmcs_read32(GUEST_TR_AR_BYTES);
4045 vmcs12->guest_es_base = vmcs_readl(GUEST_ES_BASE);
4046 vmcs12->guest_cs_base = vmcs_readl(GUEST_CS_BASE);
4047 vmcs12->guest_ss_base = vmcs_readl(GUEST_SS_BASE);
4048 vmcs12->guest_ds_base = vmcs_readl(GUEST_DS_BASE);
4049 vmcs12->guest_fs_base = vmcs_readl(GUEST_FS_BASE);
4050 vmcs12->guest_gs_base = vmcs_readl(GUEST_GS_BASE);
4051 vmcs12->guest_ldtr_base = vmcs_readl(GUEST_LDTR_BASE);
4052 vmcs12->guest_tr_base = vmcs_readl(GUEST_TR_BASE);
4053 vmcs12->guest_gdtr_base = vmcs_readl(GUEST_GDTR_BASE);
4054 vmcs12->guest_idtr_base = vmcs_readl(GUEST_IDTR_BASE);
4055 vmcs12->guest_pending_dbg_exceptions =
4056 vmcs_readl(GUEST_PENDING_DBG_EXCEPTIONS);
4057 if (kvm_mpx_supported())
4058 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
4060 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = false;
4063 static void copy_vmcs02_to_vmcs12_rare(struct kvm_vcpu *vcpu,
4064 struct vmcs12 *vmcs12)
4066 struct vcpu_vmx *vmx = to_vmx(vcpu);
4069 if (!vmx->nested.need_sync_vmcs02_to_vmcs12_rare)
4073 WARN_ON_ONCE(vmx->loaded_vmcs != &vmx->vmcs01);
4076 vmx->loaded_vmcs = &vmx->nested.vmcs02;
4077 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->vmcs01);
4079 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4081 vmx->loaded_vmcs = &vmx->vmcs01;
4082 vmx_vcpu_load_vmcs(vcpu, cpu, &vmx->nested.vmcs02);
4087 * Update the guest state fields of vmcs12 to reflect changes that
4088 * occurred while L2 was running. (The "IA-32e mode guest" bit of the
4089 * VM-entry controls is also updated, since this is really a guest
4092 static void sync_vmcs02_to_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
4094 struct vcpu_vmx *vmx = to_vmx(vcpu);
4096 if (vmx->nested.hv_evmcs)
4097 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
4099 vmx->nested.need_sync_vmcs02_to_vmcs12_rare = !vmx->nested.hv_evmcs;
4101 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
4102 vmcs12->guest_cr4 = vmcs12_guest_cr4(vcpu, vmcs12);
4104 vmcs12->guest_rsp = kvm_rsp_read(vcpu);
4105 vmcs12->guest_rip = kvm_rip_read(vcpu);
4106 vmcs12->guest_rflags = vmcs_readl(GUEST_RFLAGS);
4108 vmcs12->guest_cs_ar_bytes = vmcs_read32(GUEST_CS_AR_BYTES);
4109 vmcs12->guest_ss_ar_bytes = vmcs_read32(GUEST_SS_AR_BYTES);
4111 vmcs12->guest_interruptibility_info =
4112 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
4114 if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED)
4115 vmcs12->guest_activity_state = GUEST_ACTIVITY_HLT;
4117 vmcs12->guest_activity_state = GUEST_ACTIVITY_ACTIVE;
4119 if (nested_cpu_has_preemption_timer(vmcs12) &&
4120 vmcs12->vm_exit_controls & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER &&
4121 !vmx->nested.nested_run_pending)
4122 vmcs12->vmx_preemption_timer_value =
4123 vmx_get_preemption_timer_value(vcpu);
4126 * In some cases (usually, nested EPT), L2 is allowed to change its
4127 * own CR3 without exiting. If it has changed it, we must keep it.
4128 * Of course, if L0 is using shadow page tables, GUEST_CR3 was defined
4129 * by L0, not L1 or L2, so we mustn't unconditionally copy it to vmcs12.
4131 * Additionally, restore L2's PDPTR to vmcs12.
4134 vmcs12->guest_cr3 = vmcs_readl(GUEST_CR3);
4135 if (nested_cpu_has_ept(vmcs12) && is_pae_paging(vcpu)) {
4136 vmcs12->guest_pdptr0 = vmcs_read64(GUEST_PDPTR0);
4137 vmcs12->guest_pdptr1 = vmcs_read64(GUEST_PDPTR1);
4138 vmcs12->guest_pdptr2 = vmcs_read64(GUEST_PDPTR2);
4139 vmcs12->guest_pdptr3 = vmcs_read64(GUEST_PDPTR3);
4143 vmcs12->guest_linear_address = vmcs_readl(GUEST_LINEAR_ADDRESS);
4145 if (nested_cpu_has_vid(vmcs12))
4146 vmcs12->guest_intr_status = vmcs_read16(GUEST_INTR_STATUS);
4148 vmcs12->vm_entry_controls =
4149 (vmcs12->vm_entry_controls & ~VM_ENTRY_IA32E_MODE) |
4150 (vm_entry_controls_get(to_vmx(vcpu)) & VM_ENTRY_IA32E_MODE);
4152 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_DEBUG_CONTROLS)
4153 kvm_get_dr(vcpu, 7, (unsigned long *)&vmcs12->guest_dr7);
4155 if (vmcs12->vm_exit_controls & VM_EXIT_SAVE_IA32_EFER)
4156 vmcs12->guest_ia32_efer = vcpu->arch.efer;
4160 * prepare_vmcs12 is part of what we need to do when the nested L2 guest exits
4161 * and we want to prepare to run its L1 parent. L1 keeps a vmcs for L2 (vmcs12),
4162 * and this function updates it to reflect the changes to the guest state while
4163 * L2 was running (and perhaps made some exits which were handled directly by L0
4164 * without going back to L1), and to reflect the exit reason.
4165 * Note that we do not have to copy here all VMCS fields, just those that
4166 * could have changed by the L2 guest or the exit - i.e., the guest-state and
4167 * exit-information fields only. Other fields are modified by L1 with VMWRITE,
4168 * which already writes to vmcs12 directly.
4170 static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
4171 u32 vm_exit_reason, u32 exit_intr_info,
4172 unsigned long exit_qualification)
4174 /* update exit information fields: */
4175 vmcs12->vm_exit_reason = vm_exit_reason;
4176 vmcs12->exit_qualification = exit_qualification;
4179 * On VM-Exit due to a failed VM-Entry, the VMCS isn't marked launched
4180 * and only EXIT_REASON and EXIT_QUALIFICATION are updated, all other
4181 * exit info fields are unmodified.
4183 if (!(vmcs12->vm_exit_reason & VMX_EXIT_REASONS_FAILED_VMENTRY)) {
4184 vmcs12->launch_state = 1;
4186 /* vm_entry_intr_info_field is cleared on exit. Emulate this
4187 * instead of reading the real value. */
4188 vmcs12->vm_entry_intr_info_field &= ~INTR_INFO_VALID_MASK;
4191 * Transfer the event that L0 or L1 may wanted to inject into
4192 * L2 to IDT_VECTORING_INFO_FIELD.
4194 vmcs12_save_pending_event(vcpu, vmcs12,
4195 vm_exit_reason, exit_intr_info);
4197 vmcs12->vm_exit_intr_info = exit_intr_info;
4198 vmcs12->vm_exit_instruction_len = vmcs_read32(VM_EXIT_INSTRUCTION_LEN);
4199 vmcs12->vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
4202 * According to spec, there's no need to store the guest's
4203 * MSRs if the exit is due to a VM-entry failure that occurs
4204 * during or after loading the guest state. Since this exit
4205 * does not fall in that category, we need to save the MSRs.
4207 if (nested_vmx_store_msr(vcpu,
4208 vmcs12->vm_exit_msr_store_addr,
4209 vmcs12->vm_exit_msr_store_count))
4210 nested_vmx_abort(vcpu,
4211 VMX_ABORT_SAVE_GUEST_MSR_FAIL);
4216 * A part of what we need to when the nested L2 guest exits and we want to
4217 * run its L1 parent, is to reset L1's guest state to the host state specified
4219 * This function is to be called not only on normal nested exit, but also on
4220 * a nested entry failure, as explained in Intel's spec, 3B.23.7 ("VM-Entry
4221 * Failures During or After Loading Guest State").
4222 * This function should be called when the active VMCS is L1's (vmcs01).
4224 static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
4225 struct vmcs12 *vmcs12)
4227 enum vm_entry_failure_code ignored;
4228 struct kvm_segment seg;
4230 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER)
4231 vcpu->arch.efer = vmcs12->host_ia32_efer;
4232 else if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
4233 vcpu->arch.efer |= (EFER_LMA | EFER_LME);
4235 vcpu->arch.efer &= ~(EFER_LMA | EFER_LME);
4236 vmx_set_efer(vcpu, vcpu->arch.efer);
4238 kvm_rsp_write(vcpu, vmcs12->host_rsp);
4239 kvm_rip_write(vcpu, vmcs12->host_rip);
4240 vmx_set_rflags(vcpu, X86_EFLAGS_FIXED);
4241 vmx_set_interrupt_shadow(vcpu, 0);
4244 * Note that calling vmx_set_cr0 is important, even if cr0 hasn't
4245 * actually changed, because vmx_set_cr0 refers to efer set above.
4247 * CR0_GUEST_HOST_MASK is already set in the original vmcs01
4248 * (KVM doesn't change it);
4250 vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
4251 vmx_set_cr0(vcpu, vmcs12->host_cr0);
4253 /* Same as above - no reason to call set_cr4_guest_host_mask(). */
4254 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
4255 vmx_set_cr4(vcpu, vmcs12->host_cr4);
4257 nested_ept_uninit_mmu_context(vcpu);
4260 * Only PDPTE load can fail as the value of cr3 was checked on entry and
4261 * couldn't have changed.
4263 if (nested_vmx_load_cr3(vcpu, vmcs12->host_cr3, false, &ignored))
4264 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_PDPTE_FAIL);
4267 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
4269 nested_vmx_transition_tlb_flush(vcpu, vmcs12, false);
4271 vmcs_write32(GUEST_SYSENTER_CS, vmcs12->host_ia32_sysenter_cs);
4272 vmcs_writel(GUEST_SYSENTER_ESP, vmcs12->host_ia32_sysenter_esp);
4273 vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip);
4274 vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base);
4275 vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base);
4276 vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF);
4277 vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF);
4279 /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */
4280 if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS)
4281 vmcs_write64(GUEST_BNDCFGS, 0);
4283 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PAT) {
4284 vmcs_write64(GUEST_IA32_PAT, vmcs12->host_ia32_pat);
4285 vcpu->arch.pat = vmcs12->host_ia32_pat;
4287 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
4288 WARN_ON_ONCE(kvm_set_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL,
4289 vmcs12->host_ia32_perf_global_ctrl));
4291 /* Set L1 segment info according to Intel SDM
4292 27.5.2 Loading Host Segment and Descriptor-Table Registers */
4293 seg = (struct kvm_segment) {
4295 .limit = 0xFFFFFFFF,
4296 .selector = vmcs12->host_cs_selector,
4302 if (vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE)
4306 vmx_set_segment(vcpu, &seg, VCPU_SREG_CS);
4307 seg = (struct kvm_segment) {
4309 .limit = 0xFFFFFFFF,
4316 seg.selector = vmcs12->host_ds_selector;
4317 vmx_set_segment(vcpu, &seg, VCPU_SREG_DS);
4318 seg.selector = vmcs12->host_es_selector;
4319 vmx_set_segment(vcpu, &seg, VCPU_SREG_ES);
4320 seg.selector = vmcs12->host_ss_selector;
4321 vmx_set_segment(vcpu, &seg, VCPU_SREG_SS);
4322 seg.selector = vmcs12->host_fs_selector;
4323 seg.base = vmcs12->host_fs_base;
4324 vmx_set_segment(vcpu, &seg, VCPU_SREG_FS);
4325 seg.selector = vmcs12->host_gs_selector;
4326 seg.base = vmcs12->host_gs_base;
4327 vmx_set_segment(vcpu, &seg, VCPU_SREG_GS);
4328 seg = (struct kvm_segment) {
4329 .base = vmcs12->host_tr_base,
4331 .selector = vmcs12->host_tr_selector,
4335 vmx_set_segment(vcpu, &seg, VCPU_SREG_TR);
4337 kvm_set_dr(vcpu, 7, 0x400);
4338 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
4340 if (cpu_has_vmx_msr_bitmap())
4341 vmx_update_msr_bitmap(vcpu);
4343 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
4344 vmcs12->vm_exit_msr_load_count))
4345 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
4348 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
4350 struct vmx_uret_msr *efer_msr;
4353 if (vm_entry_controls_get(vmx) & VM_ENTRY_LOAD_IA32_EFER)
4354 return vmcs_read64(GUEST_IA32_EFER);
4356 if (cpu_has_load_ia32_efer())
4359 for (i = 0; i < vmx->msr_autoload.guest.nr; ++i) {
4360 if (vmx->msr_autoload.guest.val[i].index == MSR_EFER)
4361 return vmx->msr_autoload.guest.val[i].value;
4364 efer_msr = vmx_find_uret_msr(vmx, MSR_EFER);
4366 return efer_msr->data;
4371 static void nested_vmx_restore_host_state(struct kvm_vcpu *vcpu)
4373 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4374 struct vcpu_vmx *vmx = to_vmx(vcpu);
4375 struct vmx_msr_entry g, h;
4379 vcpu->arch.pat = vmcs_read64(GUEST_IA32_PAT);
4381 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
4383 * L1's host DR7 is lost if KVM_GUESTDBG_USE_HW_BP is set
4384 * as vmcs01.GUEST_DR7 contains a userspace defined value
4385 * and vcpu->arch.dr7 is not squirreled away before the
4386 * nested VMENTER (not worth adding a variable in nested_vmx).
4388 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
4389 kvm_set_dr(vcpu, 7, DR7_FIXED_1);
4391 WARN_ON(kvm_set_dr(vcpu, 7, vmcs_readl(GUEST_DR7)));
4395 * Note that calling vmx_set_{efer,cr0,cr4} is important as they
4396 * handle a variety of side effects to KVM's software model.
4398 vmx_set_efer(vcpu, nested_vmx_get_vmcs01_guest_efer(vmx));
4400 vcpu->arch.cr0_guest_owned_bits = KVM_POSSIBLE_CR0_GUEST_BITS;
4401 vmx_set_cr0(vcpu, vmcs_readl(CR0_READ_SHADOW));
4403 vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
4404 vmx_set_cr4(vcpu, vmcs_readl(CR4_READ_SHADOW));
4406 nested_ept_uninit_mmu_context(vcpu);
4407 vcpu->arch.cr3 = vmcs_readl(GUEST_CR3);
4408 kvm_register_mark_available(vcpu, VCPU_EXREG_CR3);
4411 * Use ept_save_pdptrs(vcpu) to load the MMU's cached PDPTRs
4412 * from vmcs01 (if necessary). The PDPTRs are not loaded on
4413 * VMFail, like everything else we just need to ensure our
4414 * software model is up-to-date.
4416 if (enable_ept && is_pae_paging(vcpu))
4417 ept_save_pdptrs(vcpu);
4419 kvm_mmu_reset_context(vcpu);
4421 if (cpu_has_vmx_msr_bitmap())
4422 vmx_update_msr_bitmap(vcpu);
4425 * This nasty bit of open coding is a compromise between blindly
4426 * loading L1's MSRs using the exit load lists (incorrect emulation
4427 * of VMFail), leaving the nested VM's MSRs in the software model
4428 * (incorrect behavior) and snapshotting the modified MSRs (too
4429 * expensive since the lists are unbound by hardware). For each
4430 * MSR that was (prematurely) loaded from the nested VMEntry load
4431 * list, reload it from the exit load list if it exists and differs
4432 * from the guest value. The intent is to stuff host state as
4433 * silently as possible, not to fully process the exit load list.
4435 for (i = 0; i < vmcs12->vm_entry_msr_load_count; i++) {
4436 gpa = vmcs12->vm_entry_msr_load_addr + (i * sizeof(g));
4437 if (kvm_vcpu_read_guest(vcpu, gpa, &g, sizeof(g))) {
4438 pr_debug_ratelimited(
4439 "%s read MSR index failed (%u, 0x%08llx)\n",
4444 for (j = 0; j < vmcs12->vm_exit_msr_load_count; j++) {
4445 gpa = vmcs12->vm_exit_msr_load_addr + (j * sizeof(h));
4446 if (kvm_vcpu_read_guest(vcpu, gpa, &h, sizeof(h))) {
4447 pr_debug_ratelimited(
4448 "%s read MSR failed (%u, 0x%08llx)\n",
4452 if (h.index != g.index)
4454 if (h.value == g.value)
4457 if (nested_vmx_load_msr_check(vcpu, &h)) {
4458 pr_debug_ratelimited(
4459 "%s check failed (%u, 0x%x, 0x%x)\n",
4460 __func__, j, h.index, h.reserved);
4464 if (kvm_set_msr(vcpu, h.index, h.value)) {
4465 pr_debug_ratelimited(
4466 "%s WRMSR failed (%u, 0x%x, 0x%llx)\n",
4467 __func__, j, h.index, h.value);
4476 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
4480 * Emulate an exit from nested guest (L2) to L1, i.e., prepare to run L1
4481 * and modify vmcs12 to make it see what it would expect to see there if
4482 * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
4484 void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 vm_exit_reason,
4485 u32 exit_intr_info, unsigned long exit_qualification)
4487 struct vcpu_vmx *vmx = to_vmx(vcpu);
4488 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4490 /* trying to cancel vmlaunch/vmresume is a bug */
4491 WARN_ON_ONCE(vmx->nested.nested_run_pending);
4493 if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
4495 * KVM_REQ_GET_NESTED_STATE_PAGES is also used to map
4496 * Enlightened VMCS after migration and we still need to
4497 * do that when something is forcing L2->L1 exit prior to
4500 (void)nested_get_evmcs_page(vcpu);
4503 /* Service the TLB flush request for L2 before switching to L1. */
4504 if (kvm_check_request(KVM_REQ_TLB_FLUSH_CURRENT, vcpu))
4505 kvm_vcpu_flush_tlb_current(vcpu);
4508 * VCPU_EXREG_PDPTR will be clobbered in arch/x86/kvm/vmx/vmx.h between
4509 * now and the new vmentry. Ensure that the VMCS02 PDPTR fields are
4510 * up-to-date before switching to L1.
4512 if (enable_ept && is_pae_paging(vcpu))
4513 vmx_ept_load_pdptrs(vcpu);
4515 leave_guest_mode(vcpu);
4517 if (nested_cpu_has_preemption_timer(vmcs12))
4518 hrtimer_cancel(&to_vmx(vcpu)->nested.preemption_timer);
4520 if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING)
4521 vcpu->arch.tsc_offset -= vmcs12->tsc_offset;
4523 if (likely(!vmx->fail)) {
4524 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
4526 if (vm_exit_reason != -1)
4527 prepare_vmcs12(vcpu, vmcs12, vm_exit_reason,
4528 exit_intr_info, exit_qualification);
4531 * Must happen outside of sync_vmcs02_to_vmcs12() as it will
4532 * also be used to capture vmcs12 cache as part of
4533 * capturing nVMX state for snapshot (migration).
4535 * Otherwise, this flush will dirty guest memory at a
4536 * point it is already assumed by user-space to be
4539 nested_flush_cached_shadow_vmcs12(vcpu, vmcs12);
4542 * The only expected VM-instruction error is "VM entry with
4543 * invalid control field(s)." Anything else indicates a
4544 * problem with L0. And we should never get here with a
4545 * VMFail of any type if early consistency checks are enabled.
4547 WARN_ON_ONCE(vmcs_read32(VM_INSTRUCTION_ERROR) !=
4548 VMXERR_ENTRY_INVALID_CONTROL_FIELD);
4549 WARN_ON_ONCE(nested_early_check);
4553 * Drop events/exceptions that were queued for re-injection to L2
4554 * (picked up via vmx_complete_interrupts()), as well as exceptions
4555 * that were pending for L2. Note, this must NOT be hoisted above
4556 * prepare_vmcs12(), events/exceptions queued for re-injection need to
4557 * be captured in vmcs12 (see vmcs12_save_pending_event()).
4559 vcpu->arch.nmi_injected = false;
4560 kvm_clear_exception_queue(vcpu);
4561 kvm_clear_interrupt_queue(vcpu);
4563 vmx_switch_vmcs(vcpu, &vmx->vmcs01);
4566 * If IBRS is advertised to the vCPU, KVM must flush the indirect
4567 * branch predictors when transitioning from L2 to L1, as L1 expects
4568 * hardware (KVM in this case) to provide separate predictor modes.
4569 * Bare metal isolates VMX root (host) from VMX non-root (guest), but
4570 * doesn't isolate different VMCSs, i.e. in this case, doesn't provide
4571 * separate modes for L2 vs L1.
4573 if (guest_cpuid_has(vcpu, X86_FEATURE_SPEC_CTRL))
4574 indirect_branch_prediction_barrier();
4576 /* Update any VMCS fields that might have changed while L2 ran */
4577 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, vmx->msr_autoload.host.nr);
4578 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, vmx->msr_autoload.guest.nr);
4579 vmcs_write64(TSC_OFFSET, vcpu->arch.tsc_offset);
4580 if (vmx->nested.l1_tpr_threshold != -1)
4581 vmcs_write32(TPR_THRESHOLD, vmx->nested.l1_tpr_threshold);
4583 if (kvm_has_tsc_control)
4584 decache_tsc_multiplier(vmx);
4586 if (vmx->nested.change_vmcs01_virtual_apic_mode) {
4587 vmx->nested.change_vmcs01_virtual_apic_mode = false;
4588 vmx_set_virtual_apic_mode(vcpu);
4591 /* Unpin physical memory we referred to in vmcs02 */
4592 if (vmx->nested.apic_access_page) {
4593 kvm_release_page_clean(vmx->nested.apic_access_page);
4594 vmx->nested.apic_access_page = NULL;
4596 kvm_vcpu_unmap(vcpu, &vmx->nested.virtual_apic_map, true);
4597 kvm_vcpu_unmap(vcpu, &vmx->nested.pi_desc_map, true);
4598 vmx->nested.pi_desc = NULL;
4600 if (vmx->nested.reload_vmcs01_apic_access_page) {
4601 vmx->nested.reload_vmcs01_apic_access_page = false;
4602 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4605 if ((vm_exit_reason != -1) &&
4606 (enable_shadow_vmcs || vmx->nested.hv_evmcs))
4607 vmx->nested.need_vmcs12_to_shadow_sync = true;
4609 /* in case we halted in L2 */
4610 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
4612 if (likely(!vmx->fail)) {
4613 if ((u16)vm_exit_reason == EXIT_REASON_EXTERNAL_INTERRUPT &&
4614 nested_exit_intr_ack_set(vcpu)) {
4615 int irq = kvm_cpu_get_interrupt(vcpu);
4617 vmcs12->vm_exit_intr_info = irq |
4618 INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR;
4621 if (vm_exit_reason != -1)
4622 trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason,
4623 vmcs12->exit_qualification,
4624 vmcs12->idt_vectoring_info_field,
4625 vmcs12->vm_exit_intr_info,
4626 vmcs12->vm_exit_intr_error_code,
4629 load_vmcs12_host_state(vcpu, vmcs12);
4635 * After an early L2 VM-entry failure, we're now back
4636 * in L1 which thinks it just finished a VMLAUNCH or
4637 * VMRESUME instruction, so we need to set the failure
4638 * flag and the VM-instruction error field of the VMCS
4639 * accordingly, and skip the emulated instruction.
4641 (void)nested_vmx_fail(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
4644 * Restore L1's host state to KVM's software model. We're here
4645 * because a consistency check was caught by hardware, which
4646 * means some amount of guest state has been propagated to KVM's
4647 * model and needs to be unwound to the host's state.
4649 nested_vmx_restore_host_state(vcpu);
4655 * Decode the memory-address operand of a vmx instruction, as recorded on an
4656 * exit caused by such an instruction (run by a guest hypervisor).
4657 * On success, returns 0. When the operand is invalid, returns 1 and throws
4660 int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
4661 u32 vmx_instruction_info, bool wr, int len, gva_t *ret)
4665 struct kvm_segment s;
4668 * According to Vol. 3B, "Information for VM Exits Due to Instruction
4669 * Execution", on an exit, vmx_instruction_info holds most of the
4670 * addressing components of the operand. Only the displacement part
4671 * is put in exit_qualification (see 3B, "Basic VM-Exit Information").
4672 * For how an actual address is calculated from all these components,
4673 * refer to Vol. 1, "Operand Addressing".
4675 int scaling = vmx_instruction_info & 3;
4676 int addr_size = (vmx_instruction_info >> 7) & 7;
4677 bool is_reg = vmx_instruction_info & (1u << 10);
4678 int seg_reg = (vmx_instruction_info >> 15) & 7;
4679 int index_reg = (vmx_instruction_info >> 18) & 0xf;
4680 bool index_is_valid = !(vmx_instruction_info & (1u << 22));
4681 int base_reg = (vmx_instruction_info >> 23) & 0xf;
4682 bool base_is_valid = !(vmx_instruction_info & (1u << 27));
4685 kvm_queue_exception(vcpu, UD_VECTOR);
4689 /* Addr = segment_base + offset */
4690 /* offset = base + [index * scale] + displacement */
4691 off = exit_qualification; /* holds the displacement */
4693 off = (gva_t)sign_extend64(off, 31);
4694 else if (addr_size == 0)
4695 off = (gva_t)sign_extend64(off, 15);
4697 off += kvm_register_readl(vcpu, base_reg);
4699 off += kvm_register_readl(vcpu, index_reg) << scaling;
4700 vmx_get_segment(vcpu, &s, seg_reg);
4703 * The effective address, i.e. @off, of a memory operand is truncated
4704 * based on the address size of the instruction. Note that this is
4705 * the *effective address*, i.e. the address prior to accounting for
4706 * the segment's base.
4708 if (addr_size == 1) /* 32 bit */
4710 else if (addr_size == 0) /* 16 bit */
4713 /* Checks for #GP/#SS exceptions. */
4715 if (is_long_mode(vcpu)) {
4717 * The virtual/linear address is never truncated in 64-bit
4718 * mode, e.g. a 32-bit address size can yield a 64-bit virtual
4719 * address when using FS/GS with a non-zero base.
4721 if (seg_reg == VCPU_SREG_FS || seg_reg == VCPU_SREG_GS)
4722 *ret = s.base + off;
4726 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
4727 * non-canonical form. This is the only check on the memory
4728 * destination for long mode!
4730 exn = is_noncanonical_address(*ret, vcpu);
4733 * When not in long mode, the virtual/linear address is
4734 * unconditionally truncated to 32 bits regardless of the
4737 *ret = (s.base + off) & 0xffffffff;
4739 /* Protected mode: apply checks for segment validity in the
4741 * - segment type check (#GP(0) may be thrown)
4742 * - usability check (#GP(0)/#SS(0))
4743 * - limit check (#GP(0)/#SS(0))
4746 /* #GP(0) if the destination operand is located in a
4747 * read-only data segment or any code segment.
4749 exn = ((s.type & 0xa) == 0 || (s.type & 8));
4751 /* #GP(0) if the source operand is located in an
4752 * execute-only code segment
4754 exn = ((s.type & 0xa) == 8);
4756 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
4759 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
4761 exn = (s.unusable != 0);
4764 * Protected mode: #GP(0)/#SS(0) if the memory operand is
4765 * outside the segment limit. All CPUs that support VMX ignore
4766 * limit checks for flat segments, i.e. segments with base==0,
4767 * limit==0xffffffff and of type expand-up data or code.
4769 if (!(s.base == 0 && s.limit == 0xffffffff &&
4770 ((s.type & 8) || !(s.type & 4))))
4771 exn = exn || ((u64)off + len - 1 > s.limit);
4774 kvm_queue_exception_e(vcpu,
4775 seg_reg == VCPU_SREG_SS ?
4776 SS_VECTOR : GP_VECTOR,
4784 void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
4786 struct vcpu_vmx *vmx;
4788 if (!nested_vmx_allowed(vcpu))
4792 if (kvm_x86_ops.pmu_ops->is_valid_msr(vcpu, MSR_CORE_PERF_GLOBAL_CTRL)) {
4793 vmx->nested.msrs.entry_ctls_high |=
4794 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
4795 vmx->nested.msrs.exit_ctls_high |=
4796 VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
4798 vmx->nested.msrs.entry_ctls_high &=
4799 ~VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
4800 vmx->nested.msrs.exit_ctls_high &=
4801 ~VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
4805 static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer,
4809 struct x86_exception e;
4812 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
4813 vmcs_read32(VMX_INSTRUCTION_INFO), false,
4814 sizeof(*vmpointer), &gva)) {
4819 r = kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e);
4820 if (r != X86EMUL_CONTINUE) {
4821 *ret = kvm_handle_memory_failure(vcpu, r, &e);
4829 * Allocate a shadow VMCS and associate it with the currently loaded
4830 * VMCS, unless such a shadow VMCS already exists. The newly allocated
4831 * VMCS is also VMCLEARed, so that it is ready for use.
4833 static struct vmcs *alloc_shadow_vmcs(struct kvm_vcpu *vcpu)
4835 struct vcpu_vmx *vmx = to_vmx(vcpu);
4836 struct loaded_vmcs *loaded_vmcs = vmx->loaded_vmcs;
4839 * We should allocate a shadow vmcs for vmcs01 only when L1
4840 * executes VMXON and free it when L1 executes VMXOFF.
4841 * As it is invalid to execute VMXON twice, we shouldn't reach
4842 * here when vmcs01 already have an allocated shadow vmcs.
4844 WARN_ON(loaded_vmcs == &vmx->vmcs01 && loaded_vmcs->shadow_vmcs);
4846 if (!loaded_vmcs->shadow_vmcs) {
4847 loaded_vmcs->shadow_vmcs = alloc_vmcs(true);
4848 if (loaded_vmcs->shadow_vmcs)
4849 vmcs_clear(loaded_vmcs->shadow_vmcs);
4851 return loaded_vmcs->shadow_vmcs;
4854 static int enter_vmx_operation(struct kvm_vcpu *vcpu)
4856 struct vcpu_vmx *vmx = to_vmx(vcpu);
4859 r = alloc_loaded_vmcs(&vmx->nested.vmcs02);
4863 vmx->nested.cached_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
4864 if (!vmx->nested.cached_vmcs12)
4865 goto out_cached_vmcs12;
4867 vmx->nested.cached_shadow_vmcs12 = kzalloc(VMCS12_SIZE, GFP_KERNEL_ACCOUNT);
4868 if (!vmx->nested.cached_shadow_vmcs12)
4869 goto out_cached_shadow_vmcs12;
4871 if (enable_shadow_vmcs && !alloc_shadow_vmcs(vcpu))
4872 goto out_shadow_vmcs;
4874 hrtimer_init(&vmx->nested.preemption_timer, CLOCK_MONOTONIC,
4875 HRTIMER_MODE_ABS_PINNED);
4876 vmx->nested.preemption_timer.function = vmx_preemption_timer_fn;
4878 vmx->nested.vpid02 = allocate_vpid();
4880 vmx->nested.vmcs02_initialized = false;
4881 vmx->nested.vmxon = true;
4883 if (vmx_pt_mode_is_host_guest()) {
4884 vmx->pt_desc.guest.ctl = 0;
4885 pt_update_intercept_for_msr(vcpu);
4891 kfree(vmx->nested.cached_shadow_vmcs12);
4893 out_cached_shadow_vmcs12:
4894 kfree(vmx->nested.cached_vmcs12);
4897 free_loaded_vmcs(&vmx->nested.vmcs02);
4904 * Emulate the VMXON instruction.
4905 * Currently, we just remember that VMX is active, and do not save or even
4906 * inspect the argument to VMXON (the so-called "VMXON pointer") because we
4907 * do not currently need to store anything in that guest-allocated memory
4908 * region. Consequently, VMCLEAR and VMPTRLD also do not verify that the their
4909 * argument is different from the VMXON pointer (which the spec says they do).
4911 static int handle_vmon(struct kvm_vcpu *vcpu)
4916 struct vcpu_vmx *vmx = to_vmx(vcpu);
4917 const u64 VMXON_NEEDED_FEATURES = FEAT_CTL_LOCKED
4918 | FEAT_CTL_VMX_ENABLED_OUTSIDE_SMX;
4921 * Manually check CR4.VMXE checks, KVM must force CR4.VMXE=1 to enter
4922 * the guest and so cannot rely on hardware to perform the check,
4923 * which has higher priority than VM-Exit (see Intel SDM's pseudocode
4926 * Rely on hardware for the other pre-VM-Exit checks, CR0.PE=1, !VM86
4927 * and !COMPATIBILITY modes. For an unrestricted guest, KVM doesn't
4928 * force any of the relevant guest state. For a restricted guest, KVM
4929 * does force CR0.PE=1, but only to also force VM86 in order to emulate
4930 * Real Mode, and so there's no need to check CR0.PE manually.
4932 if (!kvm_read_cr4_bits(vcpu, X86_CR4_VMXE)) {
4933 kvm_queue_exception(vcpu, UD_VECTOR);
4938 * The CPL is checked for "not in VMX operation" and for "in VMX root",
4939 * and has higher priority than the VM-Fail due to being post-VMXON,
4940 * i.e. VMXON #GPs outside of VMX non-root if CPL!=0. In VMX non-root,
4941 * VMXON causes VM-Exit and KVM unconditionally forwards VMXON VM-Exits
4942 * from L2 to L1, i.e. there's no need to check for the vCPU being in
4945 * Forwarding the VM-Exit unconditionally, i.e. without performing the
4946 * #UD checks (see above), is functionally ok because KVM doesn't allow
4947 * L1 to run L2 without CR4.VMXE=0, and because KVM never modifies L2's
4948 * CR0 or CR4, i.e. it's L2's responsibility to emulate #UDs that are
4949 * missed by hardware due to shadowing CR0 and/or CR4.
4951 if (vmx_get_cpl(vcpu)) {
4952 kvm_inject_gp(vcpu, 0);
4956 if (vmx->nested.vmxon)
4957 return nested_vmx_fail(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
4960 * Invalid CR0/CR4 generates #GP. These checks are performed if and
4961 * only if the vCPU isn't already in VMX operation, i.e. effectively
4962 * have lower priority than the VM-Fail above.
4964 if (!nested_host_cr0_valid(vcpu, kvm_read_cr0(vcpu)) ||
4965 !nested_host_cr4_valid(vcpu, kvm_read_cr4(vcpu))) {
4966 kvm_inject_gp(vcpu, 0);
4970 if ((vmx->msr_ia32_feature_control & VMXON_NEEDED_FEATURES)
4971 != VMXON_NEEDED_FEATURES) {
4972 kvm_inject_gp(vcpu, 0);
4976 if (nested_vmx_get_vmptr(vcpu, &vmptr, &ret))
4981 * The first 4 bytes of VMXON region contain the supported
4982 * VMCS revision identifier
4984 * Note - IA32_VMX_BASIC[48] will never be 1 for the nested case;
4985 * which replaces physical address width with 32
4987 if (!page_address_valid(vcpu, vmptr))
4988 return nested_vmx_failInvalid(vcpu);
4990 if (kvm_read_guest(vcpu->kvm, vmptr, &revision, sizeof(revision)) ||
4991 revision != VMCS12_REVISION)
4992 return nested_vmx_failInvalid(vcpu);
4994 vmx->nested.vmxon_ptr = vmptr;
4995 ret = enter_vmx_operation(vcpu);
4999 return nested_vmx_succeed(vcpu);
5002 static inline void nested_release_vmcs12(struct kvm_vcpu *vcpu)
5004 struct vcpu_vmx *vmx = to_vmx(vcpu);
5006 if (vmx->nested.current_vmptr == -1ull)
5009 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
5011 if (enable_shadow_vmcs) {
5012 /* copy to memory all shadowed fields in case
5013 they were modified */
5014 copy_shadow_to_vmcs12(vmx);
5015 vmx_disable_shadow_vmcs(vmx);
5017 vmx->nested.posted_intr_nv = -1;
5019 /* Flush VMCS12 to guest memory */
5020 kvm_vcpu_write_guest_page(vcpu,
5021 vmx->nested.current_vmptr >> PAGE_SHIFT,
5022 vmx->nested.cached_vmcs12, 0, VMCS12_SIZE);
5024 kvm_mmu_free_roots(vcpu, &vcpu->arch.guest_mmu, KVM_MMU_ROOTS_ALL);
5026 vmx->nested.current_vmptr = -1ull;
5029 /* Emulate the VMXOFF instruction */
5030 static int handle_vmoff(struct kvm_vcpu *vcpu)
5032 if (!nested_vmx_check_permission(vcpu))
5037 /* Process a latched INIT during time CPU was in VMX operation */
5038 kvm_make_request(KVM_REQ_EVENT, vcpu);
5040 return nested_vmx_succeed(vcpu);
5043 /* Emulate the VMCLEAR instruction */
5044 static int handle_vmclear(struct kvm_vcpu *vcpu)
5046 struct vcpu_vmx *vmx = to_vmx(vcpu);
5052 if (!nested_vmx_check_permission(vcpu))
5055 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r))
5058 if (!page_address_valid(vcpu, vmptr))
5059 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
5061 if (vmptr == vmx->nested.vmxon_ptr)
5062 return nested_vmx_fail(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
5065 * When Enlightened VMEntry is enabled on the calling CPU we treat
5066 * memory area pointer by vmptr as Enlightened VMCS (as there's no good
5067 * way to distinguish it from VMCS12) and we must not corrupt it by
5068 * writing to the non-existent 'launch_state' field. The area doesn't
5069 * have to be the currently active EVMCS on the calling CPU and there's
5070 * nothing KVM has to do to transition it from 'active' to 'non-active'
5071 * state. It is possible that the area will stay mapped as
5072 * vmx->nested.hv_evmcs but this shouldn't be a problem.
5074 if (likely(!vmx->nested.enlightened_vmcs_enabled ||
5075 !nested_enlightened_vmentry(vcpu, &evmcs_gpa))) {
5076 if (vmptr == vmx->nested.current_vmptr)
5077 nested_release_vmcs12(vcpu);
5079 kvm_vcpu_write_guest(vcpu,
5080 vmptr + offsetof(struct vmcs12,
5082 &zero, sizeof(zero));
5085 return nested_vmx_succeed(vcpu);
5088 /* Emulate the VMLAUNCH instruction */
5089 static int handle_vmlaunch(struct kvm_vcpu *vcpu)
5091 return nested_vmx_run(vcpu, true);
5094 /* Emulate the VMRESUME instruction */
5095 static int handle_vmresume(struct kvm_vcpu *vcpu)
5098 return nested_vmx_run(vcpu, false);
5101 static int handle_vmread(struct kvm_vcpu *vcpu)
5103 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
5105 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5106 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5107 struct vcpu_vmx *vmx = to_vmx(vcpu);
5108 struct x86_exception e;
5109 unsigned long field;
5115 if (!nested_vmx_check_permission(vcpu))
5119 * In VMX non-root operation, when the VMCS-link pointer is -1ull,
5120 * any VMREAD sets the ALU flags for VMfailInvalid.
5122 if (vmx->nested.current_vmptr == -1ull ||
5123 (is_guest_mode(vcpu) &&
5124 get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
5125 return nested_vmx_failInvalid(vcpu);
5127 /* Decode instruction info and find the field to read */
5128 field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf));
5130 offset = vmcs_field_to_offset(field);
5132 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5134 if (!is_guest_mode(vcpu) && is_vmcs12_ext_field(field))
5135 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
5137 /* Read the field, zero-extended to a u64 value */
5138 value = vmcs12_read_any(vmcs12, field, offset);
5141 * Now copy part of this value to register or memory, as requested.
5142 * Note that the number of bits actually copied is 32 or 64 depending
5143 * on the guest's mode (32 or 64 bit), not on the given field's length.
5145 if (instr_info & BIT(10)) {
5146 kvm_register_writel(vcpu, (((instr_info) >> 3) & 0xf), value);
5148 len = is_64_bit_mode(vcpu) ? 8 : 4;
5149 if (get_vmx_mem_address(vcpu, exit_qualification,
5150 instr_info, true, len, &gva))
5152 /* _system ok, nested_vmx_check_permission has verified cpl=0 */
5153 r = kvm_write_guest_virt_system(vcpu, gva, &value, len, &e);
5154 if (r != X86EMUL_CONTINUE)
5155 return kvm_handle_memory_failure(vcpu, r, &e);
5158 return nested_vmx_succeed(vcpu);
5161 static bool is_shadow_field_rw(unsigned long field)
5164 #define SHADOW_FIELD_RW(x, y) case x:
5165 #include "vmcs_shadow_fields.h"
5173 static bool is_shadow_field_ro(unsigned long field)
5176 #define SHADOW_FIELD_RO(x, y) case x:
5177 #include "vmcs_shadow_fields.h"
5185 static int handle_vmwrite(struct kvm_vcpu *vcpu)
5187 struct vmcs12 *vmcs12 = is_guest_mode(vcpu) ? get_shadow_vmcs12(vcpu)
5189 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5190 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5191 struct vcpu_vmx *vmx = to_vmx(vcpu);
5192 struct x86_exception e;
5193 unsigned long field;
5199 * The value to write might be 32 or 64 bits, depending on L1's long
5200 * mode, and eventually we need to write that into a field of several
5201 * possible lengths. The code below first zero-extends the value to 64
5202 * bit (value), and then copies only the appropriate number of
5203 * bits into the vmcs12 field.
5207 if (!nested_vmx_check_permission(vcpu))
5211 * In VMX non-root operation, when the VMCS-link pointer is -1ull,
5212 * any VMWRITE sets the ALU flags for VMfailInvalid.
5214 if (vmx->nested.current_vmptr == -1ull ||
5215 (is_guest_mode(vcpu) &&
5216 get_vmcs12(vcpu)->vmcs_link_pointer == -1ull))
5217 return nested_vmx_failInvalid(vcpu);
5219 if (instr_info & BIT(10))
5220 value = kvm_register_readl(vcpu, (((instr_info) >> 3) & 0xf));
5222 len = is_64_bit_mode(vcpu) ? 8 : 4;
5223 if (get_vmx_mem_address(vcpu, exit_qualification,
5224 instr_info, false, len, &gva))
5226 r = kvm_read_guest_virt(vcpu, gva, &value, len, &e);
5227 if (r != X86EMUL_CONTINUE)
5228 return kvm_handle_memory_failure(vcpu, r, &e);
5231 field = kvm_register_readl(vcpu, (((instr_info) >> 28) & 0xf));
5233 offset = vmcs_field_to_offset(field);
5235 return nested_vmx_fail(vcpu, VMXERR_UNSUPPORTED_VMCS_COMPONENT);
5238 * If the vCPU supports "VMWRITE to any supported field in the
5239 * VMCS," then the "read-only" fields are actually read/write.
5241 if (vmcs_field_readonly(field) &&
5242 !nested_cpu_has_vmwrite_any_field(vcpu))
5243 return nested_vmx_fail(vcpu, VMXERR_VMWRITE_READ_ONLY_VMCS_COMPONENT);
5246 * Ensure vmcs12 is up-to-date before any VMWRITE that dirties
5247 * vmcs12, else we may crush a field or consume a stale value.
5249 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field))
5250 copy_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
5253 * Some Intel CPUs intentionally drop the reserved bits of the AR byte
5254 * fields on VMWRITE. Emulate this behavior to ensure consistent KVM
5255 * behavior regardless of the underlying hardware, e.g. if an AR_BYTE
5256 * field is intercepted for VMWRITE but not VMREAD (in L1), then VMREAD
5257 * from L1 will return a different value than VMREAD from L2 (L1 sees
5258 * the stripped down value, L2 sees the full value as stored by KVM).
5260 if (field >= GUEST_ES_AR_BYTES && field <= GUEST_TR_AR_BYTES)
5263 vmcs12_write_any(vmcs12, field, offset, value);
5266 * Do not track vmcs12 dirty-state if in guest-mode as we actually
5267 * dirty shadow vmcs12 instead of vmcs12. Fields that can be updated
5268 * by L1 without a vmexit are always updated in the vmcs02, i.e. don't
5269 * "dirty" vmcs12, all others go down the prepare_vmcs02() slow path.
5271 if (!is_guest_mode(vcpu) && !is_shadow_field_rw(field)) {
5273 * L1 can read these fields without exiting, ensure the
5274 * shadow VMCS is up-to-date.
5276 if (enable_shadow_vmcs && is_shadow_field_ro(field)) {
5278 vmcs_load(vmx->vmcs01.shadow_vmcs);
5280 __vmcs_writel(field, value);
5282 vmcs_clear(vmx->vmcs01.shadow_vmcs);
5283 vmcs_load(vmx->loaded_vmcs->vmcs);
5286 vmx->nested.dirty_vmcs12 = true;
5289 return nested_vmx_succeed(vcpu);
5292 static void set_current_vmptr(struct vcpu_vmx *vmx, gpa_t vmptr)
5294 vmx->nested.current_vmptr = vmptr;
5295 if (enable_shadow_vmcs) {
5296 secondary_exec_controls_setbit(vmx, SECONDARY_EXEC_SHADOW_VMCS);
5297 vmcs_write64(VMCS_LINK_POINTER,
5298 __pa(vmx->vmcs01.shadow_vmcs));
5299 vmx->nested.need_vmcs12_to_shadow_sync = true;
5301 vmx->nested.dirty_vmcs12 = true;
5304 /* Emulate the VMPTRLD instruction */
5305 static int handle_vmptrld(struct kvm_vcpu *vcpu)
5307 struct vcpu_vmx *vmx = to_vmx(vcpu);
5311 if (!nested_vmx_check_permission(vcpu))
5314 if (nested_vmx_get_vmptr(vcpu, &vmptr, &r))
5317 if (!page_address_valid(vcpu, vmptr))
5318 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
5320 if (vmptr == vmx->nested.vmxon_ptr)
5321 return nested_vmx_fail(vcpu, VMXERR_VMPTRLD_VMXON_POINTER);
5323 /* Forbid normal VMPTRLD if Enlightened version was used */
5324 if (vmx->nested.hv_evmcs)
5327 if (vmx->nested.current_vmptr != vmptr) {
5328 struct kvm_host_map map;
5329 struct vmcs12 *new_vmcs12;
5331 if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmptr), &map)) {
5333 * Reads from an unbacked page return all 1s,
5334 * which means that the 32 bits located at the
5335 * given physical address won't match the required
5336 * VMCS12_REVISION identifier.
5338 return nested_vmx_fail(vcpu,
5339 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5342 new_vmcs12 = map.hva;
5344 if (new_vmcs12->hdr.revision_id != VMCS12_REVISION ||
5345 (new_vmcs12->hdr.shadow_vmcs &&
5346 !nested_cpu_has_vmx_shadow_vmcs(vcpu))) {
5347 kvm_vcpu_unmap(vcpu, &map, false);
5348 return nested_vmx_fail(vcpu,
5349 VMXERR_VMPTRLD_INCORRECT_VMCS_REVISION_ID);
5352 nested_release_vmcs12(vcpu);
5355 * Load VMCS12 from guest memory since it is not already
5358 memcpy(vmx->nested.cached_vmcs12, new_vmcs12, VMCS12_SIZE);
5359 kvm_vcpu_unmap(vcpu, &map, false);
5361 set_current_vmptr(vmx, vmptr);
5364 return nested_vmx_succeed(vcpu);
5367 /* Emulate the VMPTRST instruction */
5368 static int handle_vmptrst(struct kvm_vcpu *vcpu)
5370 unsigned long exit_qual = vmx_get_exit_qual(vcpu);
5371 u32 instr_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5372 gpa_t current_vmptr = to_vmx(vcpu)->nested.current_vmptr;
5373 struct x86_exception e;
5377 if (!nested_vmx_check_permission(vcpu))
5380 if (unlikely(to_vmx(vcpu)->nested.hv_evmcs))
5383 if (get_vmx_mem_address(vcpu, exit_qual, instr_info,
5384 true, sizeof(gpa_t), &gva))
5386 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
5387 r = kvm_write_guest_virt_system(vcpu, gva, (void *)¤t_vmptr,
5389 if (r != X86EMUL_CONTINUE)
5390 return kvm_handle_memory_failure(vcpu, r, &e);
5392 return nested_vmx_succeed(vcpu);
5395 #define EPTP_PA_MASK GENMASK_ULL(51, 12)
5397 static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
5399 return VALID_PAGE(root_hpa) &&
5400 ((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
5403 /* Emulate the INVEPT instruction */
5404 static int handle_invept(struct kvm_vcpu *vcpu)
5406 struct vcpu_vmx *vmx = to_vmx(vcpu);
5407 u32 vmx_instruction_info, types;
5408 unsigned long type, roots_to_free;
5409 struct kvm_mmu *mmu;
5411 struct x86_exception e;
5417 if (!(vmx->nested.msrs.secondary_ctls_high &
5418 SECONDARY_EXEC_ENABLE_EPT) ||
5419 !(vmx->nested.msrs.ept_caps & VMX_EPT_INVEPT_BIT)) {
5420 kvm_queue_exception(vcpu, UD_VECTOR);
5424 if (!nested_vmx_check_permission(vcpu))
5427 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5428 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
5430 types = (vmx->nested.msrs.ept_caps >> VMX_EPT_EXTENT_SHIFT) & 6;
5432 if (type >= 32 || !(types & (1 << type)))
5433 return nested_vmx_fail(vcpu, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5435 /* According to the Intel VMX instruction reference, the memory
5436 * operand is read even if it isn't needed (e.g., for type==global)
5438 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
5439 vmx_instruction_info, false, sizeof(operand), &gva))
5441 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
5442 if (r != X86EMUL_CONTINUE)
5443 return kvm_handle_memory_failure(vcpu, r, &e);
5446 * Nested EPT roots are always held through guest_mmu,
5449 mmu = &vcpu->arch.guest_mmu;
5452 case VMX_EPT_EXTENT_CONTEXT:
5453 if (!nested_vmx_check_eptp(vcpu, operand.eptp))
5454 return nested_vmx_fail(vcpu,
5455 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5458 if (nested_ept_root_matches(mmu->root_hpa, mmu->root_pgd,
5460 roots_to_free |= KVM_MMU_ROOT_CURRENT;
5462 for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
5463 if (nested_ept_root_matches(mmu->prev_roots[i].hpa,
5464 mmu->prev_roots[i].pgd,
5466 roots_to_free |= KVM_MMU_ROOT_PREVIOUS(i);
5469 case VMX_EPT_EXTENT_GLOBAL:
5470 roots_to_free = KVM_MMU_ROOTS_ALL;
5478 kvm_mmu_free_roots(vcpu, mmu, roots_to_free);
5480 return nested_vmx_succeed(vcpu);
5483 static int handle_invvpid(struct kvm_vcpu *vcpu)
5485 struct vcpu_vmx *vmx = to_vmx(vcpu);
5486 u32 vmx_instruction_info;
5487 unsigned long type, types;
5489 struct x86_exception e;
5497 if (!(vmx->nested.msrs.secondary_ctls_high &
5498 SECONDARY_EXEC_ENABLE_VPID) ||
5499 !(vmx->nested.msrs.vpid_caps & VMX_VPID_INVVPID_BIT)) {
5500 kvm_queue_exception(vcpu, UD_VECTOR);
5504 if (!nested_vmx_check_permission(vcpu))
5507 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5508 type = kvm_register_readl(vcpu, (vmx_instruction_info >> 28) & 0xf);
5510 types = (vmx->nested.msrs.vpid_caps &
5511 VMX_VPID_EXTENT_SUPPORTED_MASK) >> 8;
5513 if (type >= 32 || !(types & (1 << type)))
5514 return nested_vmx_fail(vcpu,
5515 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5517 /* according to the intel vmx instruction reference, the memory
5518 * operand is read even if it isn't needed (e.g., for type==global)
5520 if (get_vmx_mem_address(vcpu, vmx_get_exit_qual(vcpu),
5521 vmx_instruction_info, false, sizeof(operand), &gva))
5523 r = kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e);
5524 if (r != X86EMUL_CONTINUE)
5525 return kvm_handle_memory_failure(vcpu, r, &e);
5527 if (operand.vpid >> 16)
5528 return nested_vmx_fail(vcpu,
5529 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5531 vpid02 = nested_get_vpid02(vcpu);
5533 case VMX_VPID_EXTENT_INDIVIDUAL_ADDR:
5534 if (!operand.vpid ||
5535 is_noncanonical_address(operand.gla, vcpu))
5536 return nested_vmx_fail(vcpu,
5537 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5538 vpid_sync_vcpu_addr(vpid02, operand.gla);
5540 case VMX_VPID_EXTENT_SINGLE_CONTEXT:
5541 case VMX_VPID_EXTENT_SINGLE_NON_GLOBAL:
5543 return nested_vmx_fail(vcpu,
5544 VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID);
5545 vpid_sync_context(vpid02);
5547 case VMX_VPID_EXTENT_ALL_CONTEXT:
5548 vpid_sync_context(vpid02);
5552 return kvm_skip_emulated_instruction(vcpu);
5556 * Sync the shadow page tables if EPT is disabled, L1 is invalidating
5557 * linear mappings for L2 (tagged with L2's VPID). Free all roots as
5558 * VPIDs are not tracked in the MMU role.
5560 * Note, this operates on root_mmu, not guest_mmu, as L1 and L2 share
5561 * an MMU when EPT is disabled.
5563 * TODO: sync only the affected SPTEs for INVDIVIDUAL_ADDR.
5566 kvm_mmu_free_roots(vcpu, &vcpu->arch.root_mmu,
5569 return nested_vmx_succeed(vcpu);
5572 static int nested_vmx_eptp_switching(struct kvm_vcpu *vcpu,
5573 struct vmcs12 *vmcs12)
5575 u32 index = kvm_rcx_read(vcpu);
5578 if (!nested_cpu_has_eptp_switching(vmcs12) ||
5579 !nested_cpu_has_ept(vmcs12))
5582 if (index >= VMFUNC_EPTP_ENTRIES)
5585 if (kvm_vcpu_read_guest_page(vcpu, vmcs12->eptp_list_address >> PAGE_SHIFT,
5586 &new_eptp, index * 8, 8))
5590 * If the (L2) guest does a vmfunc to the currently
5591 * active ept pointer, we don't have to do anything else
5593 if (vmcs12->ept_pointer != new_eptp) {
5594 if (!nested_vmx_check_eptp(vcpu, new_eptp))
5597 vmcs12->ept_pointer = new_eptp;
5599 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
5605 static int handle_vmfunc(struct kvm_vcpu *vcpu)
5607 struct vcpu_vmx *vmx = to_vmx(vcpu);
5608 struct vmcs12 *vmcs12;
5609 u32 function = kvm_rax_read(vcpu);
5612 * VMFUNC is only supported for nested guests, but we always enable the
5613 * secondary control for simplicity; for non-nested mode, fake that we
5614 * didn't by injecting #UD.
5616 if (!is_guest_mode(vcpu)) {
5617 kvm_queue_exception(vcpu, UD_VECTOR);
5621 vmcs12 = get_vmcs12(vcpu);
5622 if (!(vmcs12->vm_function_control & BIT_ULL(function)))
5627 if (nested_vmx_eptp_switching(vcpu, vmcs12))
5633 return kvm_skip_emulated_instruction(vcpu);
5637 * This is effectively a reflected VM-Exit, as opposed to a synthesized
5638 * nested VM-Exit. Pass the original exit reason, i.e. don't hardcode
5639 * EXIT_REASON_VMFUNC as the exit reason.
5641 nested_vmx_vmexit(vcpu, vmx->exit_reason.full,
5642 vmx_get_intr_info(vcpu),
5643 vmx_get_exit_qual(vcpu));
5648 * Return true if an IO instruction with the specified port and size should cause
5649 * a VM-exit into L1.
5651 bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
5654 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5655 gpa_t bitmap, last_bitmap;
5658 last_bitmap = (gpa_t)-1;
5663 bitmap = vmcs12->io_bitmap_a;
5664 else if (port < 0x10000)
5665 bitmap = vmcs12->io_bitmap_b;
5668 bitmap += (port & 0x7fff) / 8;
5670 if (last_bitmap != bitmap)
5671 if (kvm_vcpu_read_guest(vcpu, bitmap, &b, 1))
5673 if (b & (1 << (port & 7)))
5678 last_bitmap = bitmap;
5684 static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
5685 struct vmcs12 *vmcs12)
5687 unsigned long exit_qualification;
5688 unsigned short port;
5691 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
5692 return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
5694 exit_qualification = vmx_get_exit_qual(vcpu);
5696 port = exit_qualification >> 16;
5697 size = (exit_qualification & 7) + 1;
5699 return nested_vmx_check_io_bitmaps(vcpu, port, size);
5703 * Return 1 if we should exit from L2 to L1 to handle an MSR access,
5704 * rather than handle it ourselves in L0. I.e., check whether L1 expressed
5705 * disinterest in the current event (read or write a specific MSR) by using an
5706 * MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
5708 static bool nested_vmx_exit_handled_msr(struct kvm_vcpu *vcpu,
5709 struct vmcs12 *vmcs12,
5710 union vmx_exit_reason exit_reason)
5712 u32 msr_index = kvm_rcx_read(vcpu);
5715 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
5719 * The MSR_BITMAP page is divided into four 1024-byte bitmaps,
5720 * for the four combinations of read/write and low/high MSR numbers.
5721 * First we need to figure out which of the four to use:
5723 bitmap = vmcs12->msr_bitmap;
5724 if (exit_reason.basic == EXIT_REASON_MSR_WRITE)
5726 if (msr_index >= 0xc0000000) {
5727 msr_index -= 0xc0000000;
5731 /* Then read the msr_index'th bit from this bitmap: */
5732 if (msr_index < 1024*8) {
5734 if (kvm_vcpu_read_guest(vcpu, bitmap + msr_index/8, &b, 1))
5736 return 1 & (b >> (msr_index & 7));
5738 return true; /* let L1 handle the wrong parameter */
5742 * Return 1 if we should exit from L2 to L1 to handle a CR access exit,
5743 * rather than handle it ourselves in L0. I.e., check if L1 wanted to
5744 * intercept (via guest_host_mask etc.) the current event.
5746 static bool nested_vmx_exit_handled_cr(struct kvm_vcpu *vcpu,
5747 struct vmcs12 *vmcs12)
5749 unsigned long exit_qualification = vmx_get_exit_qual(vcpu);
5750 int cr = exit_qualification & 15;
5754 switch ((exit_qualification >> 4) & 3) {
5755 case 0: /* mov to cr */
5756 reg = (exit_qualification >> 8) & 15;
5757 val = kvm_register_readl(vcpu, reg);
5760 if (vmcs12->cr0_guest_host_mask &
5761 (val ^ vmcs12->cr0_read_shadow))
5765 if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
5769 if (vmcs12->cr4_guest_host_mask &
5770 (vmcs12->cr4_read_shadow ^ val))
5774 if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
5780 if ((vmcs12->cr0_guest_host_mask & X86_CR0_TS) &&
5781 (vmcs12->cr0_read_shadow & X86_CR0_TS))
5784 case 1: /* mov from cr */
5787 if (vmcs12->cpu_based_vm_exec_control &
5788 CPU_BASED_CR3_STORE_EXITING)
5792 if (vmcs12->cpu_based_vm_exec_control &
5793 CPU_BASED_CR8_STORE_EXITING)
5800 * lmsw can change bits 1..3 of cr0, and only set bit 0 of
5801 * cr0. Other attempted changes are ignored, with no exit.
5803 val = (exit_qualification >> LMSW_SOURCE_DATA_SHIFT) & 0x0f;
5804 if (vmcs12->cr0_guest_host_mask & 0xe &
5805 (val ^ vmcs12->cr0_read_shadow))
5807 if ((vmcs12->cr0_guest_host_mask & 0x1) &&
5808 !(vmcs12->cr0_read_shadow & 0x1) &&
5816 static bool nested_vmx_exit_handled_vmcs_access(struct kvm_vcpu *vcpu,
5817 struct vmcs12 *vmcs12, gpa_t bitmap)
5819 u32 vmx_instruction_info;
5820 unsigned long field;
5823 if (!nested_cpu_has_shadow_vmcs(vmcs12))
5826 /* Decode instruction info and find the field to access */
5827 vmx_instruction_info = vmcs_read32(VMX_INSTRUCTION_INFO);
5828 field = kvm_register_readl(vcpu, (((vmx_instruction_info) >> 28) & 0xf));
5830 /* Out-of-range fields always cause a VM exit from L2 to L1 */
5834 if (kvm_vcpu_read_guest(vcpu, bitmap + field/8, &b, 1))
5837 return 1 & (b >> (field & 7));
5840 static bool nested_vmx_exit_handled_mtf(struct vmcs12 *vmcs12)
5842 u32 entry_intr_info = vmcs12->vm_entry_intr_info_field;
5844 if (nested_cpu_has_mtf(vmcs12))
5848 * An MTF VM-exit may be injected into the guest by setting the
5849 * interruption-type to 7 (other event) and the vector field to 0. Such
5850 * is the case regardless of the 'monitor trap flag' VM-execution
5853 return entry_intr_info == (INTR_INFO_VALID_MASK
5854 | INTR_TYPE_OTHER_EVENT);
5858 * Return true if L0 wants to handle an exit from L2 regardless of whether or not
5859 * L1 wants the exit. Only call this when in is_guest_mode (L2).
5861 static bool nested_vmx_l0_wants_exit(struct kvm_vcpu *vcpu,
5862 union vmx_exit_reason exit_reason)
5866 switch ((u16)exit_reason.basic) {
5867 case EXIT_REASON_EXCEPTION_NMI:
5868 intr_info = vmx_get_intr_info(vcpu);
5869 if (is_nmi(intr_info))
5871 else if (is_page_fault(intr_info))
5872 return vcpu->arch.apf.host_apf_flags ||
5873 vmx_need_pf_intercept(vcpu);
5874 else if (is_debug(intr_info) &&
5876 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
5878 else if (is_breakpoint(intr_info) &&
5879 vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
5881 else if (is_alignment_check(intr_info) &&
5882 !vmx_guest_inject_ac(vcpu))
5885 case EXIT_REASON_EXTERNAL_INTERRUPT:
5887 case EXIT_REASON_MCE_DURING_VMENTRY:
5889 case EXIT_REASON_EPT_VIOLATION:
5891 * L0 always deals with the EPT violation. If nested EPT is
5892 * used, and the nested mmu code discovers that the address is
5893 * missing in the guest EPT table (EPT12), the EPT violation
5894 * will be injected with nested_ept_inject_page_fault()
5897 case EXIT_REASON_EPT_MISCONFIG:
5899 * L2 never uses directly L1's EPT, but rather L0's own EPT
5900 * table (shadow on EPT) or a merged EPT table that L0 built
5901 * (EPT on EPT). So any problems with the structure of the
5902 * table is L0's fault.
5905 case EXIT_REASON_PREEMPTION_TIMER:
5907 case EXIT_REASON_PML_FULL:
5908 /* We emulate PML support to L1. */
5910 case EXIT_REASON_VMFUNC:
5911 /* VM functions are emulated through L2->L0 vmexits. */
5913 case EXIT_REASON_ENCLS:
5914 /* SGX is never exposed to L1 */
5923 * Return 1 if L1 wants to intercept an exit from L2. Only call this when in
5924 * is_guest_mode (L2).
5926 static bool nested_vmx_l1_wants_exit(struct kvm_vcpu *vcpu,
5927 union vmx_exit_reason exit_reason)
5929 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5932 switch ((u16)exit_reason.basic) {
5933 case EXIT_REASON_EXCEPTION_NMI:
5934 intr_info = vmx_get_intr_info(vcpu);
5935 if (is_nmi(intr_info))
5937 else if (is_page_fault(intr_info))
5939 return vmcs12->exception_bitmap &
5940 (1u << (intr_info & INTR_INFO_VECTOR_MASK));
5941 case EXIT_REASON_EXTERNAL_INTERRUPT:
5942 return nested_exit_on_intr(vcpu);
5943 case EXIT_REASON_TRIPLE_FAULT:
5945 case EXIT_REASON_INTERRUPT_WINDOW:
5946 return nested_cpu_has(vmcs12, CPU_BASED_INTR_WINDOW_EXITING);
5947 case EXIT_REASON_NMI_WINDOW:
5948 return nested_cpu_has(vmcs12, CPU_BASED_NMI_WINDOW_EXITING);
5949 case EXIT_REASON_TASK_SWITCH:
5951 case EXIT_REASON_CPUID:
5953 case EXIT_REASON_HLT:
5954 return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
5955 case EXIT_REASON_INVD:
5957 case EXIT_REASON_INVLPG:
5958 return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
5959 case EXIT_REASON_RDPMC:
5960 return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
5961 case EXIT_REASON_RDRAND:
5962 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING);
5963 case EXIT_REASON_RDSEED:
5964 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING);
5965 case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP:
5966 return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
5967 case EXIT_REASON_VMREAD:
5968 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
5969 vmcs12->vmread_bitmap);
5970 case EXIT_REASON_VMWRITE:
5971 return nested_vmx_exit_handled_vmcs_access(vcpu, vmcs12,
5972 vmcs12->vmwrite_bitmap);
5973 case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
5974 case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
5975 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMRESUME:
5976 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
5977 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
5979 * VMX instructions trap unconditionally. This allows L1 to
5980 * emulate them for its L2 guest, i.e., allows 3-level nesting!
5983 case EXIT_REASON_CR_ACCESS:
5984 return nested_vmx_exit_handled_cr(vcpu, vmcs12);
5985 case EXIT_REASON_DR_ACCESS:
5986 return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
5987 case EXIT_REASON_IO_INSTRUCTION:
5988 return nested_vmx_exit_handled_io(vcpu, vmcs12);
5989 case EXIT_REASON_GDTR_IDTR: case EXIT_REASON_LDTR_TR:
5990 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC);
5991 case EXIT_REASON_MSR_READ:
5992 case EXIT_REASON_MSR_WRITE:
5993 return nested_vmx_exit_handled_msr(vcpu, vmcs12, exit_reason);
5994 case EXIT_REASON_INVALID_STATE:
5996 case EXIT_REASON_MWAIT_INSTRUCTION:
5997 return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
5998 case EXIT_REASON_MONITOR_TRAP_FLAG:
5999 return nested_vmx_exit_handled_mtf(vmcs12);
6000 case EXIT_REASON_MONITOR_INSTRUCTION:
6001 return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
6002 case EXIT_REASON_PAUSE_INSTRUCTION:
6003 return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
6004 nested_cpu_has2(vmcs12,
6005 SECONDARY_EXEC_PAUSE_LOOP_EXITING);
6006 case EXIT_REASON_MCE_DURING_VMENTRY:
6008 case EXIT_REASON_TPR_BELOW_THRESHOLD:
6009 return nested_cpu_has(vmcs12, CPU_BASED_TPR_SHADOW);
6010 case EXIT_REASON_APIC_ACCESS:
6011 case EXIT_REASON_APIC_WRITE:
6012 case EXIT_REASON_EOI_INDUCED:
6014 * The controls for "virtualize APIC accesses," "APIC-
6015 * register virtualization," and "virtual-interrupt
6016 * delivery" only come from vmcs12.
6019 case EXIT_REASON_INVPCID:
6021 nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_INVPCID) &&
6022 nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
6023 case EXIT_REASON_WBINVD:
6024 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
6025 case EXIT_REASON_XSETBV:
6027 case EXIT_REASON_XSAVES: case EXIT_REASON_XRSTORS:
6029 * This should never happen, since it is not possible to
6030 * set XSS to a non-zero value---neither in L1 nor in L2.
6031 * If if it were, XSS would have to be checked against
6032 * the XSS exit bitmap in vmcs12.
6034 return nested_cpu_has2(vmcs12, SECONDARY_EXEC_XSAVES);
6035 case EXIT_REASON_UMWAIT:
6036 case EXIT_REASON_TPAUSE:
6037 return nested_cpu_has2(vmcs12,
6038 SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE);
6045 * Conditionally reflect a VM-Exit into L1. Returns %true if the VM-Exit was
6046 * reflected into L1.
6048 bool nested_vmx_reflect_vmexit(struct kvm_vcpu *vcpu)
6050 struct vcpu_vmx *vmx = to_vmx(vcpu);
6051 union vmx_exit_reason exit_reason = vmx->exit_reason;
6052 unsigned long exit_qual;
6055 WARN_ON_ONCE(vmx->nested.nested_run_pending);
6058 * Late nested VM-Fail shares the same flow as nested VM-Exit since KVM
6059 * has already loaded L2's state.
6061 if (unlikely(vmx->fail)) {
6062 trace_kvm_nested_vmenter_failed(
6063 "hardware VM-instruction error: ",
6064 vmcs_read32(VM_INSTRUCTION_ERROR));
6067 goto reflect_vmexit;
6070 trace_kvm_nested_vmexit(exit_reason.full, vcpu, KVM_ISA_VMX);
6072 /* If L0 (KVM) wants the exit, it trumps L1's desires. */
6073 if (nested_vmx_l0_wants_exit(vcpu, exit_reason))
6076 /* If L1 doesn't want the exit, handle it in L0. */
6077 if (!nested_vmx_l1_wants_exit(vcpu, exit_reason))
6081 * vmcs.VM_EXIT_INTR_INFO is only valid for EXCEPTION_NMI exits. For
6082 * EXTERNAL_INTERRUPT, the value for vmcs12->vm_exit_intr_info would
6083 * need to be synthesized by querying the in-kernel LAPIC, but external
6084 * interrupts are never reflected to L1 so it's a non-issue.
6086 exit_intr_info = vmx_get_intr_info(vcpu);
6087 if (is_exception_with_error_code(exit_intr_info)) {
6088 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
6090 vmcs12->vm_exit_intr_error_code =
6091 vmcs_read32(VM_EXIT_INTR_ERROR_CODE);
6093 exit_qual = vmx_get_exit_qual(vcpu);
6096 nested_vmx_vmexit(vcpu, exit_reason.full, exit_intr_info, exit_qual);
6100 static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
6101 struct kvm_nested_state __user *user_kvm_nested_state,
6104 struct vcpu_vmx *vmx;
6105 struct vmcs12 *vmcs12;
6106 struct kvm_nested_state kvm_state = {
6108 .format = KVM_STATE_NESTED_FORMAT_VMX,
6109 .size = sizeof(kvm_state),
6111 .hdr.vmx.vmxon_pa = -1ull,
6112 .hdr.vmx.vmcs12_pa = -1ull,
6113 .hdr.vmx.preemption_timer_deadline = 0,
6115 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
6116 &user_kvm_nested_state->data.vmx[0];
6119 return kvm_state.size + sizeof(*user_vmx_nested_state);
6122 vmcs12 = get_vmcs12(vcpu);
6124 if (nested_vmx_allowed(vcpu) &&
6125 (vmx->nested.vmxon || vmx->nested.smm.vmxon)) {
6126 kvm_state.hdr.vmx.vmxon_pa = vmx->nested.vmxon_ptr;
6127 kvm_state.hdr.vmx.vmcs12_pa = vmx->nested.current_vmptr;
6129 if (vmx_has_valid_vmcs12(vcpu)) {
6130 kvm_state.size += sizeof(user_vmx_nested_state->vmcs12);
6132 if (vmx->nested.hv_evmcs)
6133 kvm_state.flags |= KVM_STATE_NESTED_EVMCS;
6135 if (is_guest_mode(vcpu) &&
6136 nested_cpu_has_shadow_vmcs(vmcs12) &&
6137 vmcs12->vmcs_link_pointer != -1ull)
6138 kvm_state.size += sizeof(user_vmx_nested_state->shadow_vmcs12);
6141 if (vmx->nested.smm.vmxon)
6142 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_VMXON;
6144 if (vmx->nested.smm.guest_mode)
6145 kvm_state.hdr.vmx.smm.flags |= KVM_STATE_NESTED_SMM_GUEST_MODE;
6147 if (is_guest_mode(vcpu)) {
6148 kvm_state.flags |= KVM_STATE_NESTED_GUEST_MODE;
6150 if (vmx->nested.nested_run_pending)
6151 kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
6153 if (vmx->nested.mtf_pending)
6154 kvm_state.flags |= KVM_STATE_NESTED_MTF_PENDING;
6156 if (nested_cpu_has_preemption_timer(vmcs12) &&
6157 vmx->nested.has_preemption_timer_deadline) {
6158 kvm_state.hdr.vmx.flags |=
6159 KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE;
6160 kvm_state.hdr.vmx.preemption_timer_deadline =
6161 vmx->nested.preemption_timer_deadline;
6166 if (user_data_size < kvm_state.size)
6169 if (copy_to_user(user_kvm_nested_state, &kvm_state, sizeof(kvm_state)))
6172 if (!vmx_has_valid_vmcs12(vcpu))
6176 * When running L2, the authoritative vmcs12 state is in the
6177 * vmcs02. When running L1, the authoritative vmcs12 state is
6178 * in the shadow or enlightened vmcs linked to vmcs01, unless
6179 * need_vmcs12_to_shadow_sync is set, in which case, the authoritative
6180 * vmcs12 state is in the vmcs12 already.
6182 if (is_guest_mode(vcpu)) {
6183 sync_vmcs02_to_vmcs12(vcpu, vmcs12);
6184 sync_vmcs02_to_vmcs12_rare(vcpu, vmcs12);
6186 copy_vmcs02_to_vmcs12_rare(vcpu, get_vmcs12(vcpu));
6187 if (!vmx->nested.need_vmcs12_to_shadow_sync) {
6188 if (vmx->nested.hv_evmcs)
6189 copy_enlightened_to_vmcs12(vmx);
6190 else if (enable_shadow_vmcs)
6191 copy_shadow_to_vmcs12(vmx);
6195 BUILD_BUG_ON(sizeof(user_vmx_nested_state->vmcs12) < VMCS12_SIZE);
6196 BUILD_BUG_ON(sizeof(user_vmx_nested_state->shadow_vmcs12) < VMCS12_SIZE);
6199 * Copy over the full allocated size of vmcs12 rather than just the size
6202 if (copy_to_user(user_vmx_nested_state->vmcs12, vmcs12, VMCS12_SIZE))
6205 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
6206 vmcs12->vmcs_link_pointer != -1ull) {
6207 if (copy_to_user(user_vmx_nested_state->shadow_vmcs12,
6208 get_shadow_vmcs12(vcpu), VMCS12_SIZE))
6212 return kvm_state.size;
6216 * Forcibly leave nested mode in order to be able to reset the VCPU later on.
6218 void vmx_leave_nested(struct kvm_vcpu *vcpu)
6220 if (is_guest_mode(vcpu)) {
6221 to_vmx(vcpu)->nested.nested_run_pending = 0;
6222 nested_vmx_vmexit(vcpu, -1, 0, 0);
6227 static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
6228 struct kvm_nested_state __user *user_kvm_nested_state,
6229 struct kvm_nested_state *kvm_state)
6231 struct vcpu_vmx *vmx = to_vmx(vcpu);
6232 struct vmcs12 *vmcs12;
6233 enum vm_entry_failure_code ignored;
6234 struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
6235 &user_kvm_nested_state->data.vmx[0];
6238 if (kvm_state->format != KVM_STATE_NESTED_FORMAT_VMX)
6241 if (kvm_state->hdr.vmx.vmxon_pa == -1ull) {
6242 if (kvm_state->hdr.vmx.smm.flags)
6245 if (kvm_state->hdr.vmx.vmcs12_pa != -1ull)
6249 * KVM_STATE_NESTED_EVMCS used to signal that KVM should
6250 * enable eVMCS capability on vCPU. However, since then
6251 * code was changed such that flag signals vmcs12 should
6252 * be copied into eVMCS in guest memory.
6254 * To preserve backwards compatability, allow user
6255 * to set this flag even when there is no VMXON region.
6257 if (kvm_state->flags & ~KVM_STATE_NESTED_EVMCS)
6260 if (!nested_vmx_allowed(vcpu))
6263 if (!page_address_valid(vcpu, kvm_state->hdr.vmx.vmxon_pa))
6267 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
6268 (kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
6271 if (kvm_state->hdr.vmx.smm.flags &
6272 ~(KVM_STATE_NESTED_SMM_GUEST_MODE | KVM_STATE_NESTED_SMM_VMXON))
6275 if (kvm_state->hdr.vmx.flags & ~KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE)
6279 * SMM temporarily disables VMX, so we cannot be in guest mode,
6280 * nor can VMLAUNCH/VMRESUME be pending. Outside SMM, SMM flags
6285 (KVM_STATE_NESTED_GUEST_MODE | KVM_STATE_NESTED_RUN_PENDING))
6286 : kvm_state->hdr.vmx.smm.flags)
6289 if ((kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE) &&
6290 !(kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON))
6293 if ((kvm_state->flags & KVM_STATE_NESTED_EVMCS) &&
6294 (!nested_vmx_allowed(vcpu) || !vmx->nested.enlightened_vmcs_enabled))
6297 vmx_leave_nested(vcpu);
6299 if (kvm_state->hdr.vmx.vmxon_pa == -1ull)
6302 vmx->nested.vmxon_ptr = kvm_state->hdr.vmx.vmxon_pa;
6303 ret = enter_vmx_operation(vcpu);
6307 /* Empty 'VMXON' state is permitted if no VMCS loaded */
6308 if (kvm_state->size < sizeof(*kvm_state) + sizeof(*vmcs12)) {
6309 /* See vmx_has_valid_vmcs12. */
6310 if ((kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE) ||
6311 (kvm_state->flags & KVM_STATE_NESTED_EVMCS) ||
6312 (kvm_state->hdr.vmx.vmcs12_pa != -1ull))
6318 if (kvm_state->hdr.vmx.vmcs12_pa != -1ull) {
6319 if (kvm_state->hdr.vmx.vmcs12_pa == kvm_state->hdr.vmx.vmxon_pa ||
6320 !page_address_valid(vcpu, kvm_state->hdr.vmx.vmcs12_pa))
6323 set_current_vmptr(vmx, kvm_state->hdr.vmx.vmcs12_pa);
6324 } else if (kvm_state->flags & KVM_STATE_NESTED_EVMCS) {
6326 * nested_vmx_handle_enlightened_vmptrld() cannot be called
6327 * directly from here as HV_X64_MSR_VP_ASSIST_PAGE may not be
6328 * restored yet. EVMCS will be mapped from
6329 * nested_get_vmcs12_pages().
6331 kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
6336 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_VMXON) {
6337 vmx->nested.smm.vmxon = true;
6338 vmx->nested.vmxon = false;
6340 if (kvm_state->hdr.vmx.smm.flags & KVM_STATE_NESTED_SMM_GUEST_MODE)
6341 vmx->nested.smm.guest_mode = true;
6344 vmcs12 = get_vmcs12(vcpu);
6345 if (copy_from_user(vmcs12, user_vmx_nested_state->vmcs12, sizeof(*vmcs12)))
6348 if (vmcs12->hdr.revision_id != VMCS12_REVISION)
6351 if (!(kvm_state->flags & KVM_STATE_NESTED_GUEST_MODE))
6354 vmx->nested.nested_run_pending =
6355 !!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
6357 vmx->nested.mtf_pending =
6358 !!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING);
6361 if (nested_cpu_has_shadow_vmcs(vmcs12) &&
6362 vmcs12->vmcs_link_pointer != -1ull) {
6363 struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
6365 if (kvm_state->size <
6366 sizeof(*kvm_state) +
6367 sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12))
6368 goto error_guest_mode;
6370 if (copy_from_user(shadow_vmcs12,
6371 user_vmx_nested_state->shadow_vmcs12,
6372 sizeof(*shadow_vmcs12))) {
6374 goto error_guest_mode;
6377 if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION ||
6378 !shadow_vmcs12->hdr.shadow_vmcs)
6379 goto error_guest_mode;
6382 vmx->nested.has_preemption_timer_deadline = false;
6383 if (kvm_state->hdr.vmx.flags & KVM_STATE_VMX_PREEMPTION_TIMER_DEADLINE) {
6384 vmx->nested.has_preemption_timer_deadline = true;
6385 vmx->nested.preemption_timer_deadline =
6386 kvm_state->hdr.vmx.preemption_timer_deadline;
6389 if (nested_vmx_check_controls(vcpu, vmcs12) ||
6390 nested_vmx_check_host_state(vcpu, vmcs12) ||
6391 nested_vmx_check_guest_state(vcpu, vmcs12, &ignored))
6392 goto error_guest_mode;
6394 vmx->nested.dirty_vmcs12 = true;
6395 ret = nested_vmx_enter_non_root_mode(vcpu, false);
6397 goto error_guest_mode;
6402 vmx->nested.nested_run_pending = 0;
6406 void nested_vmx_set_vmcs_shadowing_bitmap(void)
6408 if (enable_shadow_vmcs) {
6409 vmcs_write64(VMREAD_BITMAP, __pa(vmx_vmread_bitmap));
6410 vmcs_write64(VMWRITE_BITMAP, __pa(vmx_vmwrite_bitmap));
6415 * nested_vmx_setup_ctls_msrs() sets up variables containing the values to be
6416 * returned for the various VMX controls MSRs when nested VMX is enabled.
6417 * The same values should also be used to verify that vmcs12 control fields are
6418 * valid during nested entry from L1 to L2.
6419 * Each of these control msrs has a low and high 32-bit half: A low bit is on
6420 * if the corresponding bit in the (32-bit) control field *must* be on, and a
6421 * bit in the high half is on if the corresponding bit in the control field
6422 * may be on. See also vmx_control_verify().
6424 void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
6427 * Note that as a general rule, the high half of the MSRs (bits in
6428 * the control fields which may be 1) should be initialized by the
6429 * intersection of the underlying hardware's MSR (i.e., features which
6430 * can be supported) and the list of features we want to expose -
6431 * because they are known to be properly supported in our code.
6432 * Also, usually, the low half of the MSRs (bits which must be 1) can
6433 * be set to 0, meaning that L1 may turn off any of these bits. The
6434 * reason is that if one of these bits is necessary, it will appear
6435 * in vmcs01 and prepare_vmcs02, when it bitwise-or's the control
6436 * fields of vmcs01 and vmcs02, will turn these bits off - and
6437 * nested_vmx_l1_wants_exit() will not pass related exits to L1.
6438 * These rules have exceptions below.
6441 /* pin-based controls */
6442 rdmsr(MSR_IA32_VMX_PINBASED_CTLS,
6443 msrs->pinbased_ctls_low,
6444 msrs->pinbased_ctls_high);
6445 msrs->pinbased_ctls_low |=
6446 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
6447 msrs->pinbased_ctls_high &=
6448 PIN_BASED_EXT_INTR_MASK |
6449 PIN_BASED_NMI_EXITING |
6450 PIN_BASED_VIRTUAL_NMIS |
6451 (enable_apicv ? PIN_BASED_POSTED_INTR : 0);
6452 msrs->pinbased_ctls_high |=
6453 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
6454 PIN_BASED_VMX_PREEMPTION_TIMER;
6457 rdmsr(MSR_IA32_VMX_EXIT_CTLS,
6458 msrs->exit_ctls_low,
6459 msrs->exit_ctls_high);
6460 msrs->exit_ctls_low =
6461 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR;
6463 msrs->exit_ctls_high &=
6464 #ifdef CONFIG_X86_64
6465 VM_EXIT_HOST_ADDR_SPACE_SIZE |
6467 VM_EXIT_LOAD_IA32_PAT | VM_EXIT_SAVE_IA32_PAT |
6468 VM_EXIT_CLEAR_BNDCFGS | VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL;
6469 msrs->exit_ctls_high |=
6470 VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR |
6471 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
6472 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
6474 /* We support free control of debug control saving. */
6475 msrs->exit_ctls_low &= ~VM_EXIT_SAVE_DEBUG_CONTROLS;
6477 /* entry controls */
6478 rdmsr(MSR_IA32_VMX_ENTRY_CTLS,
6479 msrs->entry_ctls_low,
6480 msrs->entry_ctls_high);
6481 msrs->entry_ctls_low =
6482 VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR;
6483 msrs->entry_ctls_high &=
6484 #ifdef CONFIG_X86_64
6485 VM_ENTRY_IA32E_MODE |
6487 VM_ENTRY_LOAD_IA32_PAT | VM_ENTRY_LOAD_BNDCFGS |
6488 VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL;
6489 msrs->entry_ctls_high |=
6490 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
6492 /* We support free control of debug control loading. */
6493 msrs->entry_ctls_low &= ~VM_ENTRY_LOAD_DEBUG_CONTROLS;
6495 /* cpu-based controls */
6496 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS,
6497 msrs->procbased_ctls_low,
6498 msrs->procbased_ctls_high);
6499 msrs->procbased_ctls_low =
6500 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR;
6501 msrs->procbased_ctls_high &=
6502 CPU_BASED_INTR_WINDOW_EXITING |
6503 CPU_BASED_NMI_WINDOW_EXITING | CPU_BASED_USE_TSC_OFFSETTING |
6504 CPU_BASED_HLT_EXITING | CPU_BASED_INVLPG_EXITING |
6505 CPU_BASED_MWAIT_EXITING | CPU_BASED_CR3_LOAD_EXITING |
6506 CPU_BASED_CR3_STORE_EXITING |
6507 #ifdef CONFIG_X86_64
6508 CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING |
6510 CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING |
6511 CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_TRAP_FLAG |
6512 CPU_BASED_MONITOR_EXITING | CPU_BASED_RDPMC_EXITING |
6513 CPU_BASED_RDTSC_EXITING | CPU_BASED_PAUSE_EXITING |
6514 CPU_BASED_TPR_SHADOW | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS;
6516 * We can allow some features even when not supported by the
6517 * hardware. For example, L1 can specify an MSR bitmap - and we
6518 * can use it to avoid exits to L1 - even when L0 runs L2
6519 * without MSR bitmaps.
6521 msrs->procbased_ctls_high |=
6522 CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
6523 CPU_BASED_USE_MSR_BITMAPS;
6525 /* We support free control of CR3 access interception. */
6526 msrs->procbased_ctls_low &=
6527 ~(CPU_BASED_CR3_LOAD_EXITING | CPU_BASED_CR3_STORE_EXITING);
6530 * secondary cpu-based controls. Do not include those that
6531 * depend on CPUID bits, they are added later by
6532 * vmx_vcpu_after_set_cpuid.
6534 if (msrs->procbased_ctls_high & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS)
6535 rdmsr(MSR_IA32_VMX_PROCBASED_CTLS2,
6536 msrs->secondary_ctls_low,
6537 msrs->secondary_ctls_high);
6539 msrs->secondary_ctls_low = 0;
6540 msrs->secondary_ctls_high &=
6541 SECONDARY_EXEC_DESC |
6542 SECONDARY_EXEC_ENABLE_RDTSCP |
6543 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
6544 SECONDARY_EXEC_WBINVD_EXITING |
6545 SECONDARY_EXEC_APIC_REGISTER_VIRT |
6546 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY |
6547 SECONDARY_EXEC_RDRAND_EXITING |
6548 SECONDARY_EXEC_ENABLE_INVPCID |
6549 SECONDARY_EXEC_RDSEED_EXITING |
6550 SECONDARY_EXEC_XSAVES;
6553 * We can emulate "VMCS shadowing," even if the hardware
6554 * doesn't support it.
6556 msrs->secondary_ctls_high |=
6557 SECONDARY_EXEC_SHADOW_VMCS;
6560 /* nested EPT: emulate EPT also to L1 */
6561 msrs->secondary_ctls_high |=
6562 SECONDARY_EXEC_ENABLE_EPT;
6564 VMX_EPT_PAGE_WALK_4_BIT |
6565 VMX_EPT_PAGE_WALK_5_BIT |
6567 VMX_EPT_INVEPT_BIT |
6568 VMX_EPT_EXECUTE_ONLY_BIT;
6570 msrs->ept_caps &= ept_caps;
6571 msrs->ept_caps |= VMX_EPT_EXTENT_GLOBAL_BIT |
6572 VMX_EPT_EXTENT_CONTEXT_BIT | VMX_EPT_2MB_PAGE_BIT |
6573 VMX_EPT_1GB_PAGE_BIT;
6574 if (enable_ept_ad_bits) {
6575 msrs->secondary_ctls_high |=
6576 SECONDARY_EXEC_ENABLE_PML;
6577 msrs->ept_caps |= VMX_EPT_AD_BIT;
6581 if (cpu_has_vmx_vmfunc()) {
6582 msrs->secondary_ctls_high |=
6583 SECONDARY_EXEC_ENABLE_VMFUNC;
6585 * Advertise EPTP switching unconditionally
6586 * since we emulate it
6589 msrs->vmfunc_controls =
6590 VMX_VMFUNC_EPTP_SWITCHING;
6594 * Old versions of KVM use the single-context version without
6595 * checking for support, so declare that it is supported even
6596 * though it is treated as global context. The alternative is
6597 * not failing the single-context invvpid, and it is worse.
6600 msrs->secondary_ctls_high |=
6601 SECONDARY_EXEC_ENABLE_VPID;
6602 msrs->vpid_caps = VMX_VPID_INVVPID_BIT |
6603 VMX_VPID_EXTENT_SUPPORTED_MASK;
6606 if (enable_unrestricted_guest)
6607 msrs->secondary_ctls_high |=
6608 SECONDARY_EXEC_UNRESTRICTED_GUEST;
6610 if (flexpriority_enabled)
6611 msrs->secondary_ctls_high |=
6612 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6614 /* miscellaneous data */
6615 rdmsr(MSR_IA32_VMX_MISC,
6618 msrs->misc_low &= VMX_MISC_SAVE_EFER_LMA;
6620 MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS |
6621 VMX_MISC_EMULATED_PREEMPTION_TIMER_RATE |
6622 VMX_MISC_ACTIVITY_HLT;
6623 msrs->misc_high = 0;
6626 * This MSR reports some information about VMX support. We
6627 * should return information about the VMX we emulate for the
6628 * guest, and the VMCS structure we give it - not about the
6629 * VMX support of the underlying hardware.
6633 VMX_BASIC_TRUE_CTLS |
6634 ((u64)VMCS12_SIZE << VMX_BASIC_VMCS_SIZE_SHIFT) |
6635 (VMX_BASIC_MEM_TYPE_WB << VMX_BASIC_MEM_TYPE_SHIFT);
6637 if (cpu_has_vmx_basic_inout())
6638 msrs->basic |= VMX_BASIC_INOUT;
6641 * These MSRs specify bits which the guest must keep fixed on
6642 * while L1 is in VMXON mode (in L1's root mode, or running an L2).
6643 * We picked the standard core2 setting.
6645 #define VMXON_CR0_ALWAYSON (X86_CR0_PE | X86_CR0_PG | X86_CR0_NE)
6646 #define VMXON_CR4_ALWAYSON X86_CR4_VMXE
6647 msrs->cr0_fixed0 = VMXON_CR0_ALWAYSON;
6648 msrs->cr4_fixed0 = VMXON_CR4_ALWAYSON;
6650 /* These MSRs specify bits which the guest must keep fixed off. */
6651 rdmsrl(MSR_IA32_VMX_CR0_FIXED1, msrs->cr0_fixed1);
6652 rdmsrl(MSR_IA32_VMX_CR4_FIXED1, msrs->cr4_fixed1);
6654 /* highest index: VMX_PREEMPTION_TIMER_VALUE */
6655 msrs->vmcs_enum = VMCS12_MAX_FIELD_INDEX << 1;
6658 void nested_vmx_hardware_unsetup(void)
6662 if (enable_shadow_vmcs) {
6663 for (i = 0; i < VMX_BITMAP_NR; i++)
6664 free_page((unsigned long)vmx_bitmap[i]);
6668 __init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *))
6672 if (!cpu_has_vmx_shadow_vmcs())
6673 enable_shadow_vmcs = 0;
6674 if (enable_shadow_vmcs) {
6675 for (i = 0; i < VMX_BITMAP_NR; i++) {
6677 * The vmx_bitmap is not tied to a VM and so should
6678 * not be charged to a memcg.
6680 vmx_bitmap[i] = (unsigned long *)
6681 __get_free_page(GFP_KERNEL);
6682 if (!vmx_bitmap[i]) {
6683 nested_vmx_hardware_unsetup();
6688 init_vmcs_shadow_fields();
6691 exit_handlers[EXIT_REASON_VMCLEAR] = handle_vmclear;
6692 exit_handlers[EXIT_REASON_VMLAUNCH] = handle_vmlaunch;
6693 exit_handlers[EXIT_REASON_VMPTRLD] = handle_vmptrld;
6694 exit_handlers[EXIT_REASON_VMPTRST] = handle_vmptrst;
6695 exit_handlers[EXIT_REASON_VMREAD] = handle_vmread;
6696 exit_handlers[EXIT_REASON_VMRESUME] = handle_vmresume;
6697 exit_handlers[EXIT_REASON_VMWRITE] = handle_vmwrite;
6698 exit_handlers[EXIT_REASON_VMOFF] = handle_vmoff;
6699 exit_handlers[EXIT_REASON_VMON] = handle_vmon;
6700 exit_handlers[EXIT_REASON_INVEPT] = handle_invept;
6701 exit_handlers[EXIT_REASON_INVVPID] = handle_invvpid;
6702 exit_handlers[EXIT_REASON_VMFUNC] = handle_vmfunc;
6707 struct kvm_x86_nested_ops vmx_nested_ops = {
6708 .leave_nested = vmx_leave_nested,
6709 .check_events = vmx_check_nested_events,
6710 .hv_timer_pending = nested_vmx_preemption_timer_pending,
6711 .get_state = vmx_get_nested_state,
6712 .set_state = vmx_set_nested_state,
6713 .get_nested_state_pages = vmx_get_nested_state_pages,
6714 .write_log_dirty = nested_vmx_write_pml_buffer,
6715 .enable_evmcs = nested_enable_evmcs,
6716 .get_evmcs_version = nested_get_evmcs_version,