1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/linkage.h>
4 #include <asm/bitsperlong.h>
5 #include <asm/kvm_vcpu_regs.h>
6 #include <asm/nospec-branch.h>
7 #include "kvm-asm-offsets.h"
9 #define WORD_SIZE (BITS_PER_LONG / 8)
11 /* Intentionally omit RAX as it's context switched by hardware */
12 #define VCPU_RCX (SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
13 #define VCPU_RDX (SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
14 #define VCPU_RBX (SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)
15 /* Intentionally omit RSP as it's context switched by hardware */
16 #define VCPU_RBP (SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)
17 #define VCPU_RSI (SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
18 #define VCPU_RDI (SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
21 #define VCPU_R8 (SVM_vcpu_arch_regs + __VCPU_REGS_R8 * WORD_SIZE)
22 #define VCPU_R9 (SVM_vcpu_arch_regs + __VCPU_REGS_R9 * WORD_SIZE)
23 #define VCPU_R10 (SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
24 #define VCPU_R11 (SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
25 #define VCPU_R12 (SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
26 #define VCPU_R13 (SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
27 #define VCPU_R14 (SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
28 #define VCPU_R15 (SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
31 #define SVM_vmcb01_pa (SVM_vmcb01 + KVM_VMCB_pa)
33 .section .noinstr.text, "ax"
35 .macro RESTORE_GUEST_SPEC_CTRL
36 /* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
38 "jmp 800f", X86_FEATURE_MSR_SPEC_CTRL, \
39 "", X86_FEATURE_V_SPEC_CTRL
42 .macro RESTORE_GUEST_SPEC_CTRL_BODY
45 * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the
46 * host's, write the MSR. This is kept out-of-line so that the common
47 * case does not have to jump.
49 * IMPORTANT: To avoid RSB underflow attacks and any other nastiness,
50 * there must not be any returns or indirect branches between this code
53 movl SVM_spec_ctrl(%_ASM_DI), %eax
54 cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
56 mov $MSR_IA32_SPEC_CTRL, %ecx
62 .macro RESTORE_HOST_SPEC_CTRL
63 /* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
65 "jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \
66 "", X86_FEATURE_V_SPEC_CTRL
69 .macro RESTORE_HOST_SPEC_CTRL_BODY
71 /* Same for after vmexit. */
72 mov $MSR_IA32_SPEC_CTRL, %ecx
75 * Load the value that the guest had written into MSR_IA32_SPEC_CTRL,
76 * if it was not intercepted during guest execution.
81 movl %eax, SVM_spec_ctrl(%_ASM_DI)
84 /* Now restore the host value of the MSR if different from the guest's. */
85 movl PER_CPU_VAR(x86_spec_ctrl_current), %eax
86 cmp SVM_spec_ctrl(%_ASM_DI), %eax
95 * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
96 * @svm: struct vcpu_svm *
97 * @spec_ctrl_intercepted: bool
99 SYM_FUNC_START(__svm_vcpu_run)
113 * Save variables needed after vmexit on the stack, in inverse
114 * order compared to when they are needed.
117 /* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
120 /* Needed to restore access to percpu variables. */
121 __ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa)
123 /* Finally save @svm. */
126 .ifnc _ASM_ARG1, _ASM_DI
128 * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
129 * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
131 mov %_ASM_ARG1, %_ASM_DI
134 /* Clobbers RAX, RCX, RDX. */
135 RESTORE_GUEST_SPEC_CTRL
138 * Use a single vmcb (vmcb01 because it's always valid) for
139 * context switching guest state via VMLOAD/VMSAVE, that way
140 * the state doesn't need to be copied between vmcb01 and
141 * vmcb02 when switching vmcbs for nested virtualization.
143 mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
147 /* Get svm->current_vmcb->pa into RAX. */
148 mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
149 mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
151 /* Load guest registers. */
152 mov VCPU_RCX(%_ASM_DI), %_ASM_CX
153 mov VCPU_RDX(%_ASM_DI), %_ASM_DX
154 mov VCPU_RBX(%_ASM_DI), %_ASM_BX
155 mov VCPU_RBP(%_ASM_DI), %_ASM_BP
156 mov VCPU_RSI(%_ASM_DI), %_ASM_SI
158 mov VCPU_R8 (%_ASM_DI), %r8
159 mov VCPU_R9 (%_ASM_DI), %r9
160 mov VCPU_R10(%_ASM_DI), %r10
161 mov VCPU_R11(%_ASM_DI), %r11
162 mov VCPU_R12(%_ASM_DI), %r12
163 mov VCPU_R13(%_ASM_DI), %r13
164 mov VCPU_R14(%_ASM_DI), %r14
165 mov VCPU_R15(%_ASM_DI), %r15
167 mov VCPU_RDI(%_ASM_DI), %_ASM_DI
169 /* Enter guest mode */
176 /* Pop @svm to RAX while it's the only available register. */
179 /* Save all guest registers. */
180 mov %_ASM_CX, VCPU_RCX(%_ASM_AX)
181 mov %_ASM_DX, VCPU_RDX(%_ASM_AX)
182 mov %_ASM_BX, VCPU_RBX(%_ASM_AX)
183 mov %_ASM_BP, VCPU_RBP(%_ASM_AX)
184 mov %_ASM_SI, VCPU_RSI(%_ASM_AX)
185 mov %_ASM_DI, VCPU_RDI(%_ASM_AX)
187 mov %r8, VCPU_R8 (%_ASM_AX)
188 mov %r9, VCPU_R9 (%_ASM_AX)
189 mov %r10, VCPU_R10(%_ASM_AX)
190 mov %r11, VCPU_R11(%_ASM_AX)
191 mov %r12, VCPU_R12(%_ASM_AX)
192 mov %r13, VCPU_R13(%_ASM_AX)
193 mov %r14, VCPU_R14(%_ASM_AX)
194 mov %r15, VCPU_R15(%_ASM_AX)
197 /* @svm can stay in RDI from now on. */
198 mov %_ASM_AX, %_ASM_DI
200 mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
204 /* Restores GSBASE among other things, allowing access to percpu data. */
209 #ifdef CONFIG_RETPOLINE
210 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
211 FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
214 /* Clobbers RAX, RCX, RDX. */
215 RESTORE_HOST_SPEC_CTRL
218 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
219 * untrained as soon as we exit the VM and are back to the
220 * kernel. This should be done before re-enabling interrupts
221 * because interrupt handlers won't sanitize 'ret' if the return is
227 ALTERNATIVE "", "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT
230 * Clear all general purpose registers except RSP and RAX to prevent
231 * speculative use of the guest's values, even those that are reloaded
232 * via the stack. In theory, an L1 cache miss when restoring registers
233 * could lead to speculative execution with the guest's values.
234 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
235 * free. RSP and RAX are exempt as they are restored by hardware
255 /* "Pop" @spec_ctrl_intercepted. */
272 RESTORE_GUEST_SPEC_CTRL_BODY
273 RESTORE_HOST_SPEC_CTRL_BODY
275 10: cmpb $0, kvm_rebooting
278 30: cmpb $0, kvm_rebooting
281 50: cmpb $0, kvm_rebooting
284 70: cmpb $0, kvm_rebooting
288 _ASM_EXTABLE(1b, 10b)
289 _ASM_EXTABLE(3b, 30b)
290 _ASM_EXTABLE(5b, 50b)
291 _ASM_EXTABLE(7b, 70b)
293 SYM_FUNC_END(__svm_vcpu_run)
296 * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
297 * @svm: struct vcpu_svm *
298 * @spec_ctrl_intercepted: bool
300 SYM_FUNC_START(__svm_sev_es_vcpu_run)
314 * Save variables needed after vmexit on the stack, in inverse
315 * order compared to when they are needed.
318 /* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL. */
324 .ifnc _ASM_ARG1, _ASM_DI
326 * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
327 * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
329 mov %_ASM_ARG1, %_ASM_DI
332 /* Clobbers RAX, RCX, RDX. */
333 RESTORE_GUEST_SPEC_CTRL
335 /* Get svm->current_vmcb->pa into RAX. */
336 mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
337 mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
339 /* Enter guest mode */
346 /* Pop @svm to RDI, guest registers have been saved already. */
349 #ifdef CONFIG_RETPOLINE
350 /* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
351 FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RETPOLINE
354 /* Clobbers RAX, RCX, RDX. */
355 RESTORE_HOST_SPEC_CTRL
358 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
359 * untrained as soon as we exit the VM and are back to the
360 * kernel. This should be done before re-enabling interrupts
361 * because interrupt handlers won't sanitize RET if the return is
366 /* "Pop" @spec_ctrl_intercepted. */
383 RESTORE_GUEST_SPEC_CTRL_BODY
384 RESTORE_HOST_SPEC_CTRL_BODY
386 3: cmpb $0, kvm_rebooting
392 SYM_FUNC_END(__svm_sev_es_vcpu_run)