1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/linkage.h>
4 #include <asm/bitsperlong.h>
5 #include <asm/kvm_vcpu_regs.h>
6 #include <asm/nospec-branch.h>
8 #define WORD_SIZE (BITS_PER_LONG / 8)
10 /* Intentionally omit RAX as it's context switched by hardware */
11 #define VCPU_RCX __VCPU_REGS_RCX * WORD_SIZE
12 #define VCPU_RDX __VCPU_REGS_RDX * WORD_SIZE
13 #define VCPU_RBX __VCPU_REGS_RBX * WORD_SIZE
14 /* Intentionally omit RSP as it's context switched by hardware */
15 #define VCPU_RBP __VCPU_REGS_RBP * WORD_SIZE
16 #define VCPU_RSI __VCPU_REGS_RSI * WORD_SIZE
17 #define VCPU_RDI __VCPU_REGS_RDI * WORD_SIZE
20 #define VCPU_R8 __VCPU_REGS_R8 * WORD_SIZE
21 #define VCPU_R9 __VCPU_REGS_R9 * WORD_SIZE
22 #define VCPU_R10 __VCPU_REGS_R10 * WORD_SIZE
23 #define VCPU_R11 __VCPU_REGS_R11 * WORD_SIZE
24 #define VCPU_R12 __VCPU_REGS_R12 * WORD_SIZE
25 #define VCPU_R13 __VCPU_REGS_R13 * WORD_SIZE
26 #define VCPU_R14 __VCPU_REGS_R14 * WORD_SIZE
27 #define VCPU_R15 __VCPU_REGS_R15 * WORD_SIZE
30 .section .noinstr.text, "ax"
33 * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
34 * @vmcb_pa: unsigned long
35 * @regs: unsigned long * (to guest registers)
37 SYM_FUNC_START(__svm_vcpu_run)
56 /* Move @regs to RAX. */
57 mov %_ASM_ARG2, %_ASM_AX
59 /* Load guest registers. */
60 mov VCPU_RCX(%_ASM_AX), %_ASM_CX
61 mov VCPU_RDX(%_ASM_AX), %_ASM_DX
62 mov VCPU_RBX(%_ASM_AX), %_ASM_BX
63 mov VCPU_RBP(%_ASM_AX), %_ASM_BP
64 mov VCPU_RSI(%_ASM_AX), %_ASM_SI
65 mov VCPU_RDI(%_ASM_AX), %_ASM_DI
67 mov VCPU_R8 (%_ASM_AX), %r8
68 mov VCPU_R9 (%_ASM_AX), %r9
69 mov VCPU_R10(%_ASM_AX), %r10
70 mov VCPU_R11(%_ASM_AX), %r11
71 mov VCPU_R12(%_ASM_AX), %r12
72 mov VCPU_R13(%_ASM_AX), %r13
73 mov VCPU_R14(%_ASM_AX), %r14
74 mov VCPU_R15(%_ASM_AX), %r15
77 /* "POP" @vmcb to RAX. */
80 /* Enter guest mode */
87 #ifdef CONFIG_RETPOLINE
88 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
89 FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
92 /* "POP" @regs to RAX. */
95 /* Save all guest registers. */
96 mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
97 mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
98 mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
99 mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
100 mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
101 mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
103 mov %r8, VCPU_R8 (%_ASM_AX)
104 mov %r9, VCPU_R9 (%_ASM_AX)
105 mov %r10, VCPU_R10(%_ASM_AX)
106 mov %r11, VCPU_R11(%_ASM_AX)
107 mov %r12, VCPU_R12(%_ASM_AX)
108 mov %r13, VCPU_R13(%_ASM_AX)
109 mov %r14, VCPU_R14(%_ASM_AX)
110 mov %r15, VCPU_R15(%_ASM_AX)
114 * Clear all general purpose registers except RSP and RAX to prevent
115 * speculative use of the guest's values, even those that are reloaded
116 * via the stack. In theory, an L1 cache miss when restoring registers
117 * could lead to speculative execution with the guest's values.
118 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
119 * free. RSP and RAX are exempt as they are restored by hardware
153 3: cmpb $0, kvm_rebooting
159 SYM_FUNC_END(__svm_vcpu_run)
162 * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
163 * @vmcb_pa: unsigned long
165 SYM_FUNC_START(__svm_sev_es_vcpu_run)
178 /* Move @vmcb to RAX. */
179 mov %_ASM_ARG1, %_ASM_AX
181 /* Enter guest mode */
188 #ifdef CONFIG_RETPOLINE
189 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
190 FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
207 3: cmpb $0, kvm_rebooting
213 SYM_FUNC_END(__svm_sev_es_vcpu_run)