1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /* CPU virtualization extensions handling
4 * This should carry the code for handling CPU virtualization extensions
5 * that needs to live in the kernel core.
7 * Author: Eduardo Habkost <ehabkost@redhat.com>
9 * Copyright (C) 2008, Red Hat Inc.
11 * Contains code from KVM, Copyright (C) 2006 Qumranet, Inc.
13 #ifndef _ASM_X86_VIRTEX_H
14 #define _ASM_X86_VIRTEX_H
16 #include <asm/processor.h>
20 #include <asm/tlbflush.h>
26 static inline int cpu_has_vmx(void)
28 unsigned long ecx = cpuid_ecx(1);
29 return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
34 * cpu_vmxoff() - Disable VMX on the current CPU
36 * Disable VMX and clear CR4.VMXE (even if VMXOFF faults)
38 * Note, VMXOFF causes a #UD if the CPU is !post-VMXON, but it's impossible to
39 * atomically track post-VMXON state, e.g. this may be called in NMI context.
40 * Eat all faults as all other faults on VMXOFF faults are mode related, i.e.
41 * faults are guaranteed to be due to the !post-VMXON check unless the CPU is
42 * magically in RM, VM86, compat mode, or at CPL>0.
44 static inline void cpu_vmxoff(void)
46 asm_volatile_goto("1: vmxoff\n\t"
47 _ASM_EXTABLE(1b, %l[fault]) :::: fault);
49 cr4_clear_bits(X86_CR4_VMXE);
52 static inline int cpu_vmx_enabled(void)
54 return __read_cr4() & X86_CR4_VMXE;
57 /** Disable VMX if it is enabled on the current CPU
59 * You shouldn't call this if cpu_has_vmx() returns 0.
61 static inline void __cpu_emergency_vmxoff(void)
63 if (cpu_vmx_enabled())
67 /** Disable VMX if it is supported and enabled on the current CPU
69 static inline void cpu_emergency_vmxoff(void)
72 __cpu_emergency_vmxoff();
82 /** Check if the CPU has SVM support
84 * You can use the 'msg' arg to get a message describing the problem,
85 * if the function returns zero. Simply pass NULL if you are not interested
86 * on the messages; gcc should take care of not generating code for
87 * the messages on this case.
89 static inline int cpu_has_svm(const char **msg)
91 if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
92 boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) {
94 *msg = "not amd or hygon";
98 if (!boot_cpu_has(X86_FEATURE_SVM)) {
100 *msg = "svm not available";
107 /** Disable SVM on the current CPU
109 * You should call this only if cpu_has_svm() returned true.
111 static inline void cpu_svm_disable(void)
115 wrmsrl(MSR_VM_HSAVE_PA, 0);
116 rdmsrl(MSR_EFER, efer);
117 if (efer & EFER_SVME) {
119 * Force GIF=1 prior to disabling SVM to ensure INIT and NMI
120 * aren't blocked, e.g. if a fatal error occurred between CLGI
121 * and STGI. Note, STGI may #UD if SVM is disabled from NMI
122 * context between reading EFER and executing STGI. In that
123 * case, GIF must already be set, otherwise the NMI would have
124 * been blocked, so just eat the fault.
126 asm_volatile_goto("1: stgi\n\t"
127 _ASM_EXTABLE(1b, %l[fault])
128 ::: "memory" : fault);
130 wrmsrl(MSR_EFER, efer & ~EFER_SVME);
134 /** Makes sure SVM is disabled, if it is supported on the CPU
136 static inline void cpu_emergency_svm_disable(void)
138 if (cpu_has_svm(NULL))
142 #endif /* _ASM_X86_VIRTEX_H */