1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_VMX_INSN_H
3 #define __KVM_X86_VMX_INSN_H
5 #include <linux/nospec.h>
7 #include <asm/kvm_host.h>
13 #define __ex(x) __kvm_handle_fault_on_reboot(x)
15 asmlinkage void vmread_error(unsigned long field, bool fault);
16 __attribute__((regparm(0))) void vmread_error_trampoline(unsigned long field,
18 void vmwrite_error(unsigned long field, unsigned long value);
19 void vmclear_error(struct vmcs *vmcs, u64 phys_addr);
20 void vmptrld_error(struct vmcs *vmcs, u64 phys_addr);
21 void invvpid_error(unsigned long ext, u16 vpid, gva_t gva);
22 void invept_error(unsigned long ext, u64 eptp, gpa_t gpa);
24 static __always_inline void vmcs_check16(unsigned long field)
26 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
27 "16-bit accessor invalid for 64-bit field");
28 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
29 "16-bit accessor invalid for 64-bit high field");
30 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
31 "16-bit accessor invalid for 32-bit high field");
32 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
33 "16-bit accessor invalid for natural width field");
36 static __always_inline void vmcs_check32(unsigned long field)
38 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
39 "32-bit accessor invalid for 16-bit field");
40 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
41 "32-bit accessor invalid for natural width field");
44 static __always_inline void vmcs_check64(unsigned long field)
46 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
47 "64-bit accessor invalid for 16-bit field");
48 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
49 "64-bit accessor invalid for 64-bit high field");
50 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
51 "64-bit accessor invalid for 32-bit field");
52 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x6000,
53 "64-bit accessor invalid for natural width field");
56 static __always_inline void vmcs_checkl(unsigned long field)
58 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0,
59 "Natural width accessor invalid for 16-bit field");
60 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2000,
61 "Natural width accessor invalid for 64-bit field");
62 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6001) == 0x2001,
63 "Natural width accessor invalid for 64-bit high field");
64 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x4000,
65 "Natural width accessor invalid for 32-bit field");
68 static __always_inline unsigned long __vmcs_readl(unsigned long field)
72 asm volatile("1: vmread %2, %1\n\t"
73 ".byte 0x3e\n\t" /* branch taken hint */
77 * VMREAD failed. Push '0' for @fault, push the failing
78 * @field, and bounce through the trampoline to preserve
83 "2:call vmread_error_trampoline\n\t"
86 * Unwind the stack. Note, the trampoline zeros out the
87 * memory for @fault so that the result is '0' on error.
93 /* VMREAD faulted. As above, except push '1' for @fault. */
94 ".pushsection .fixup, \"ax\"\n\t"
100 : ASM_CALL_CONSTRAINT, "=r"(value) : "r"(field) : "cc");
104 static __always_inline u16 vmcs_read16(unsigned long field)
107 if (static_branch_unlikely(&enable_evmcs))
108 return evmcs_read16(field);
109 return __vmcs_readl(field);
112 static __always_inline u32 vmcs_read32(unsigned long field)
115 if (static_branch_unlikely(&enable_evmcs))
116 return evmcs_read32(field);
117 return __vmcs_readl(field);
120 static __always_inline u64 vmcs_read64(unsigned long field)
123 if (static_branch_unlikely(&enable_evmcs))
124 return evmcs_read64(field);
126 return __vmcs_readl(field);
128 return __vmcs_readl(field) | ((u64)__vmcs_readl(field+1) << 32);
132 static __always_inline unsigned long vmcs_readl(unsigned long field)
135 if (static_branch_unlikely(&enable_evmcs))
136 return evmcs_read64(field);
137 return __vmcs_readl(field);
140 #define vmx_asm1(insn, op1, error_args...) \
142 asm_volatile_goto("1: " __stringify(insn) " %0\n\t" \
143 ".byte 0x2e\n\t" /* branch not taken hint */ \
144 "jna %l[error]\n\t" \
145 _ASM_EXTABLE(1b, %l[fault]) \
146 : : op1 : "cc" : error, fault); \
149 instrumentation_begin(); \
150 insn##_error(error_args); \
151 instrumentation_end(); \
154 kvm_spurious_fault(); \
157 #define vmx_asm2(insn, op1, op2, error_args...) \
159 asm_volatile_goto("1: " __stringify(insn) " %1, %0\n\t" \
160 ".byte 0x2e\n\t" /* branch not taken hint */ \
161 "jna %l[error]\n\t" \
162 _ASM_EXTABLE(1b, %l[fault]) \
163 : : op1, op2 : "cc" : error, fault); \
166 instrumentation_begin(); \
167 insn##_error(error_args); \
168 instrumentation_end(); \
171 kvm_spurious_fault(); \
174 static __always_inline void __vmcs_writel(unsigned long field, unsigned long value)
176 vmx_asm2(vmwrite, "r"(field), "rm"(value), field, value);
179 static __always_inline void vmcs_write16(unsigned long field, u16 value)
182 if (static_branch_unlikely(&enable_evmcs))
183 return evmcs_write16(field, value);
185 __vmcs_writel(field, value);
188 static __always_inline void vmcs_write32(unsigned long field, u32 value)
191 if (static_branch_unlikely(&enable_evmcs))
192 return evmcs_write32(field, value);
194 __vmcs_writel(field, value);
197 static __always_inline void vmcs_write64(unsigned long field, u64 value)
200 if (static_branch_unlikely(&enable_evmcs))
201 return evmcs_write64(field, value);
203 __vmcs_writel(field, value);
204 #ifndef CONFIG_X86_64
205 __vmcs_writel(field+1, value >> 32);
209 static __always_inline void vmcs_writel(unsigned long field, unsigned long value)
212 if (static_branch_unlikely(&enable_evmcs))
213 return evmcs_write64(field, value);
215 __vmcs_writel(field, value);
218 static __always_inline void vmcs_clear_bits(unsigned long field, u32 mask)
220 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
221 "vmcs_clear_bits does not support 64-bit fields");
222 if (static_branch_unlikely(&enable_evmcs))
223 return evmcs_write32(field, evmcs_read32(field) & ~mask);
225 __vmcs_writel(field, __vmcs_readl(field) & ~mask);
228 static __always_inline void vmcs_set_bits(unsigned long field, u32 mask)
230 BUILD_BUG_ON_MSG(__builtin_constant_p(field) && ((field) & 0x6000) == 0x2000,
231 "vmcs_set_bits does not support 64-bit fields");
232 if (static_branch_unlikely(&enable_evmcs))
233 return evmcs_write32(field, evmcs_read32(field) | mask);
235 __vmcs_writel(field, __vmcs_readl(field) | mask);
238 static inline void vmcs_clear(struct vmcs *vmcs)
240 u64 phys_addr = __pa(vmcs);
242 vmx_asm1(vmclear, "m"(phys_addr), vmcs, phys_addr);
245 static inline void vmcs_load(struct vmcs *vmcs)
247 u64 phys_addr = __pa(vmcs);
249 if (static_branch_unlikely(&enable_evmcs))
250 return evmcs_load(phys_addr);
252 vmx_asm1(vmptrld, "m"(phys_addr), vmcs, phys_addr);
255 static inline void __invvpid(unsigned long ext, u16 vpid, gva_t gva)
261 } operand = { vpid, 0, gva };
263 vmx_asm2(invvpid, "r"(ext), "m"(operand), ext, vpid, gva);
266 static inline void __invept(unsigned long ext, u64 eptp, gpa_t gpa)
270 } operand = {eptp, gpa};
272 vmx_asm2(invept, "r"(ext), "m"(operand), ext, eptp, gpa);
275 static inline void vpid_sync_vcpu_single(int vpid)
280 __invvpid(VMX_VPID_EXTENT_SINGLE_CONTEXT, vpid, 0);
283 static inline void vpid_sync_vcpu_global(void)
285 __invvpid(VMX_VPID_EXTENT_ALL_CONTEXT, 0, 0);
288 static inline void vpid_sync_context(int vpid)
290 if (cpu_has_vmx_invvpid_single())
291 vpid_sync_vcpu_single(vpid);
293 vpid_sync_vcpu_global();
296 static inline void vpid_sync_vcpu_addr(int vpid, gva_t addr)
301 if (cpu_has_vmx_invvpid_individual_addr())
302 __invvpid(VMX_VPID_EXTENT_INDIVIDUAL_ADDR, vpid, addr);
304 vpid_sync_context(vpid);
307 static inline void ept_sync_global(void)
309 __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0);
312 static inline void ept_sync_context(u64 eptp)
314 if (cpu_has_vmx_invept_context())
315 __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0);
320 #endif /* __KVM_X86_VMX_INSN_H */