1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/linkage.h>
4 #include <asm/bitsperlong.h>
5 #include <asm/kvm_vcpu_regs.h>
6 #include <asm/nospec-branch.h>
7 #include <asm/segment.h>
10 #define WORD_SIZE (BITS_PER_LONG / 8)
12 #define VCPU_RAX __VCPU_REGS_RAX * WORD_SIZE
13 #define VCPU_RCX __VCPU_REGS_RCX * WORD_SIZE
14 #define VCPU_RDX __VCPU_REGS_RDX * WORD_SIZE
15 #define VCPU_RBX __VCPU_REGS_RBX * WORD_SIZE
16 /* Intentionally omit RSP as it's context switched by hardware */
17 #define VCPU_RBP __VCPU_REGS_RBP * WORD_SIZE
18 #define VCPU_RSI __VCPU_REGS_RSI * WORD_SIZE
19 #define VCPU_RDI __VCPU_REGS_RDI * WORD_SIZE
22 #define VCPU_R8 __VCPU_REGS_R8 * WORD_SIZE
23 #define VCPU_R9 __VCPU_REGS_R9 * WORD_SIZE
24 #define VCPU_R10 __VCPU_REGS_R10 * WORD_SIZE
25 #define VCPU_R11 __VCPU_REGS_R11 * WORD_SIZE
26 #define VCPU_R12 __VCPU_REGS_R12 * WORD_SIZE
27 #define VCPU_R13 __VCPU_REGS_R13 * WORD_SIZE
28 #define VCPU_R14 __VCPU_REGS_R14 * WORD_SIZE
29 #define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE
32 .section .noinstr.text, "ax"
35 * __vmx_vcpu_run - Run a vCPU via a transition to VMX guest mode
36 * @vmx: struct vcpu_vmx *
37 * @regs: unsigned long * (to guest registers)
38 * @flags: VMX_RUN_VMRESUME: use VMRESUME instead of VMLAUNCH
39 * VMX_RUN_SAVE_SPEC_CTRL: save guest SPEC_CTRL into vmx->spec_ctrl
42 * 0 on VM-Exit, 1 on VM-Fail
44 SYM_FUNC_START(__vmx_vcpu_run)
46 mov %_ASM_SP, %_ASM_BP
58 /* Save @vmx for SPEC_CTRL handling */
61 /* Save @flags for SPEC_CTRL handling */
65 * Save @regs, _ASM_ARG2 may be modified by vmx_update_host_rsp() and
66 * @regs is needed after VM-Exit to save the guest's register values.
70 /* Copy @flags to BL, _ASM_ARG3 is volatile. */
73 lea (%_ASM_SP), %_ASM_ARG2
74 call vmx_update_host_rsp
76 /* Load @regs to RAX. */
77 mov (%_ASM_SP), %_ASM_AX
79 /* Check if vmlaunch or vmresume is needed */
80 bt $VMX_RUN_VMRESUME_SHIFT, %bx
82 /* Load guest registers. Don't clobber flags. */
83 mov VCPU_RCX(%_ASM_AX), %_ASM_CX
84 mov VCPU_RDX(%_ASM_AX), %_ASM_DX
85 mov VCPU_RBX(%_ASM_AX), %_ASM_BX
86 mov VCPU_RBP(%_ASM_AX), %_ASM_BP
87 mov VCPU_RSI(%_ASM_AX), %_ASM_SI
88 mov VCPU_RDI(%_ASM_AX), %_ASM_DI
90 mov VCPU_R8 (%_ASM_AX), %r8
91 mov VCPU_R9 (%_ASM_AX), %r9
92 mov VCPU_R10(%_ASM_AX), %r10
93 mov VCPU_R11(%_ASM_AX), %r11
94 mov VCPU_R12(%_ASM_AX), %r12
95 mov VCPU_R13(%_ASM_AX), %r13
96 mov VCPU_R14(%_ASM_AX), %r14
97 mov VCPU_R15(%_ASM_AX), %r15
99 /* Load guest RAX. This kills the @regs pointer! */
100 mov VCPU_RAX(%_ASM_AX), %_ASM_AX
102 /* Clobbers EFLAGS.ZF */
105 /* Check EFLAGS.CF from the VMX_RUN_VMRESUME bit test above. */
109 * After a successful VMRESUME/VMLAUNCH, control flow "magically"
110 * resumes below at 'vmx_vmexit' due to the VMCS HOST_RIP setting.
111 * So this isn't a typical function and objtool needs to be told to
112 * save the unwind state here and restore it below.
117 * If VMRESUME/VMLAUNCH and corresponding vmexit succeed, execution resumes at
118 * the 'vmx_vmexit' label below.
128 _ASM_EXTABLE(.Lvmresume, .Lfixup)
129 _ASM_EXTABLE(.Lvmlaunch, .Lfixup)
131 SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL)
133 /* Restore unwind state from before the VMRESUME/VMLAUNCH. */
136 /* Temporarily save guest's RAX. */
139 /* Reload @regs to RAX. */
140 mov WORD_SIZE(%_ASM_SP), %_ASM_AX
142 /* Save all guest registers, including RAX from the stack */
143 pop VCPU_RAX(%_ASM_AX)
144 mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
145 mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
146 mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
147 mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
148 mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
149 mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
151 mov %r8, VCPU_R8 (%_ASM_AX)
152 mov %r9, VCPU_R9 (%_ASM_AX)
153 mov %r10, VCPU_R10(%_ASM_AX)
154 mov %r11, VCPU_R11(%_ASM_AX)
155 mov %r12, VCPU_R12(%_ASM_AX)
156 mov %r13, VCPU_R13(%_ASM_AX)
157 mov %r14, VCPU_R14(%_ASM_AX)
158 mov %r15, VCPU_R15(%_ASM_AX)
161 /* Clear return value to indicate VM-Exit (as opposed to VM-Fail). */
166 * Clear all general purpose registers except RSP and RBX to prevent
167 * speculative use of the guest's values, even those that are reloaded
168 * via the stack. In theory, an L1 cache miss when restoring registers
169 * could lead to speculative execution with the guest's values.
170 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
171 * free. RSP and RAX are exempt as RSP is restored by hardware during
172 * VM-Exit and RBX is explicitly loaded with 0 or 1 to hold the return
193 add $WORD_SIZE, %_ASM_SP
196 * IMPORTANT: RSB filling and SPEC_CTRL handling must be done before
197 * the first unbalanced RET after vmexit!
199 * For retpoline or IBRS, RSB filling is needed to prevent poisoned RSB
200 * entries and (in some cases) RSB underflow.
202 * eIBRS has its own protection against poisoned RSB, so it doesn't
203 * need the RSB filling sequence. But it does need to be enabled, and a
204 * single call to retire, before the first unbalanced RET.
207 FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT,\
208 X86_FEATURE_RSB_VMEXIT_LITE
211 pop %_ASM_ARG2 /* @flags */
212 pop %_ASM_ARG1 /* @vmx */
214 call vmx_spec_ctrl_restore_host
216 /* Put return value in AX */
217 mov %_ASM_BX, %_ASM_AX
233 cmpb $0, kvm_rebooting
237 /* VM-Fail: set return value to 1 */
241 SYM_FUNC_END(__vmx_vcpu_run)
247 * vmread_error_trampoline - Trampoline from inline asm to vmread_error()
248 * @field: VMCS field encoding that failed
249 * @fault: %true if the VMREAD faulted, %false if it failed
251 * Save and restore volatile registers across a call to vmread_error(). Note,
252 * all parameters are passed on the stack.
254 SYM_FUNC_START(vmread_error_trampoline)
256 mov %_ASM_SP, %_ASM_BP
270 /* Load @field and @fault to arg1 and arg2 respectively. */
271 mov 3*WORD_SIZE(%rbp), %_ASM_ARG2
272 mov 2*WORD_SIZE(%rbp), %_ASM_ARG1
274 /* Parameters are passed on the stack for 32-bit (see asmlinkage). */
275 push 3*WORD_SIZE(%ebp)
276 push 2*WORD_SIZE(%ebp)
281 #ifndef CONFIG_X86_64
285 /* Zero out @fault, which will be popped into the result register. */
286 _ASM_MOV $0, 3*WORD_SIZE(%_ASM_BP)
302 SYM_FUNC_END(vmread_error_trampoline)
304 SYM_FUNC_START(vmx_do_interrupt_nmi_irqoff)
306 * Unconditionally create a stack frame, getting the correct RSP on the
307 * stack (for x86-64) would take two instructions anyways, and RBP can
308 * be used to restore RSP to make objtool happy (see below).
311 mov %_ASM_SP, %_ASM_BP
315 * Align RSP to a 16-byte boundary (to emulate CPU behavior) before
316 * creating the synthetic interrupt stack frame for the IRQ/NMI.
324 CALL_NOSPEC _ASM_ARG1
327 * "Restore" RSP from RBP, even though IRET has already unwound RSP to
328 * the correct value. objtool doesn't know the callee will IRET and,
329 * without the explicit restore, thinks the stack is getting walloped.
330 * Using an unwind hint is problematic due to x86-64's dynamic alignment.
332 mov %_ASM_BP, %_ASM_SP
335 SYM_FUNC_END(vmx_do_interrupt_nmi_irqoff)