1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/linkage.h>
4 #include <asm/bitsperlong.h>
5 #include <asm/kvm_vcpu_regs.h>
6 #include <asm/nospec-branch.h>
9 #define WORD_SIZE (BITS_PER_LONG / 8)
11 #define VCPU_RAX __VCPU_REGS_RAX * WORD_SIZE
12 #define VCPU_RCX __VCPU_REGS_RCX * WORD_SIZE
13 #define VCPU_RDX __VCPU_REGS_RDX * WORD_SIZE
14 #define VCPU_RBX __VCPU_REGS_RBX * WORD_SIZE
15 /* Intentionally omit RSP as it's context switched by hardware */
16 #define VCPU_RBP __VCPU_REGS_RBP * WORD_SIZE
17 #define VCPU_RSI __VCPU_REGS_RSI * WORD_SIZE
18 #define VCPU_RDI __VCPU_REGS_RDI * WORD_SIZE
21 #define VCPU_R8 __VCPU_REGS_R8 * WORD_SIZE
22 #define VCPU_R9 __VCPU_REGS_R9 * WORD_SIZE
23 #define VCPU_R10 __VCPU_REGS_R10 * WORD_SIZE
24 #define VCPU_R11 __VCPU_REGS_R11 * WORD_SIZE
25 #define VCPU_R12 __VCPU_REGS_R12 * WORD_SIZE
26 #define VCPU_R13 __VCPU_REGS_R13 * WORD_SIZE
27 #define VCPU_R14 __VCPU_REGS_R14 * WORD_SIZE
28 #define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE
34 * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode
35 * @vmx: struct vcpu_vmx *
36 * @regs: unsigned long * (to guest registers)
37 * @flags: VMX_RUN_VMRESUME: use VMRESUME instead of VMLAUNCH
38 * VMX_RUN_SAVE_SPEC_CTRL: save guest SPEC_CTRL into vmx->spec_ctrl
41 * 0 on VM-Exit, 1 on VM-Fail
45 mov %_ASM_SP, %_ASM_BP
57 /* Save @vmx for SPEC_CTRL handling */
60 /* Save @flags for SPEC_CTRL handling */
64 * Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and
65 * @regs is needed after VM-Exit to save the guest's register values.
69 /* Copy @flags to BL, _ASM_ARG3 is volatile. */
72 lea (%_ASM_SP), %_ASM_ARG2
73 call vmx_update_host_rsp
75 /* Load @regs to RAX. */
76 mov (%_ASM_SP), %_ASM_AX
78 /* Check if vmlaunch or vmresume is needed */
79 testb $VMX_RUN_VMRESUME, %bl
81 /* Load guest registers. Don't clobber flags. */
82 mov VCPU_RBX(%_ASM_AX), %_ASM_BX
83 mov VCPU_RCX(%_ASM_AX), %_ASM_CX
84 mov VCPU_RDX(%_ASM_AX), %_ASM_DX
85 mov VCPU_RSI(%_ASM_AX), %_ASM_SI
86 mov VCPU_RDI(%_ASM_AX), %_ASM_DI
87 mov VCPU_RBP(%_ASM_AX), %_ASM_BP
89 mov VCPU_R8 (%_ASM_AX), %r8
90 mov VCPU_R9 (%_ASM_AX), %r9
91 mov VCPU_R10(%_ASM_AX), %r10
92 mov VCPU_R11(%_ASM_AX), %r11
93 mov VCPU_R12(%_ASM_AX), %r12
94 mov VCPU_R13(%_ASM_AX), %r13
95 mov VCPU_R14(%_ASM_AX), %r14
96 mov VCPU_R15(%_ASM_AX), %r15
98 /* Load guest RAX. This kills the @regs pointer! */
99 mov VCPU_RAX(%_ASM_AX), %_ASM_AX
101 /* Check EFLAGS.ZF from 'testb' above */
105 * If VMRESUME/VMLAUNCH and corresponding vmexit succeed, execution resumes at
106 * the 'vmx_vmexit' label below.
116 _ASM_EXTABLE(.Lvmresume, .Lfixup)
117 _ASM_EXTABLE(.Lvmlaunch, .Lfixup)
119 SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
121 /* Temporarily save guest's RAX. */
124 /* Reload @regs to RAX. */
125 mov WORD_SIZE(%_ASM_SP), %_ASM_AX
127 /* Save all guest registers, including RAX from the stack */
128 __ASM_SIZE(pop) VCPU_RAX(%_ASM_AX)
129 mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
130 mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
131 mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
132 mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
133 mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
134 mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
136 mov %r8, VCPU_R8 (%_ASM_AX)
137 mov %r9, VCPU_R9 (%_ASM_AX)
138 mov %r10, VCPU_R10(%_ASM_AX)
139 mov %r11, VCPU_R11(%_ASM_AX)
140 mov %r12, VCPU_R12(%_ASM_AX)
141 mov %r13, VCPU_R13(%_ASM_AX)
142 mov %r14, VCPU_R14(%_ASM_AX)
143 mov %r15, VCPU_R15(%_ASM_AX)
146 /* Clear return value to indicate VM-Exit (as opposed to VM-Fail). */
151 * Clear all general purpose registers except RSP and RBX to prevent
152 * speculative use of the guest's values, even those that are reloaded
153 * via the stack. In theory, an L1 cache miss when restoring registers
154 * could lead to speculative execution with the guest's values.
155 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
156 * free. RSP and RAX are exempt as RSP is restored by hardware during
157 * VM-Exit and RBX is explicitly loaded with 0 or 1 to hold the return
178 add $WORD_SIZE, %_ASM_SP
181 * IMPORTANT: RSB filling and SPEC_CTRL handling must be done before
182 * the first unbalanced RET after vmexit!
184 * For retpoline or IBRS, RSB filling is needed to prevent poisoned RSB
185 * entries and (in some cases) RSB underflow.
187 * eIBRS has its own protection against poisoned RSB, so it doesn't
188 * need the RSB filling sequence. But it does need to be enabled, and a
189 * single call to retire, before the first unbalanced RET.
192 FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT,\
193 X86_FEATURE_RSB_VMEXIT_LITE
196 pop %_ASM_ARG2 /* @flags */
197 pop %_ASM_ARG1 /* @vmx */
199 call vmx_spec_ctrl_restore_host
201 /* Put return value in AX */
202 mov %_ASM_BX, %_ASM_AX
218 cmpb $0, kvm_rebooting
222 /* VM-Fail: set return value to 1 */
226 ENDPROC(__vmx_vcpu_run)
232 * vmread_error_trampoline - Trampoline from inline asm to vmread_error()
233 * @field: VMCS field encoding that failed
234 * @fault: %true if the VMREAD faulted, %false if it failed
236 * Save and restore volatile registers across a call to vmread_error(). Note,
237 * all parameters are passed on the stack.
239 ENTRY(vmread_error_trampoline)
241 mov %_ASM_SP, %_ASM_BP
255 /* Load @field and @fault to arg1 and arg2 respectively. */
256 mov 3*WORD_SIZE(%rbp), %_ASM_ARG2
257 mov 2*WORD_SIZE(%rbp), %_ASM_ARG1
259 /* Parameters are passed on the stack for 32-bit (see asmlinkage). */
260 push 3*WORD_SIZE(%ebp)
261 push 2*WORD_SIZE(%ebp)
266 #ifndef CONFIG_X86_64
270 /* Zero out @fault, which will be popped into the result register. */
271 _ASM_MOV $0, 3*WORD_SIZE(%_ASM_BP)
287 ENDPROC(vmread_error_trampoline)