1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef _ASM_X86_NOSPEC_BRANCH_H_
4 #define _ASM_X86_NOSPEC_BRANCH_H_
6 #include <linux/static_key.h>
8 #include <asm/alternative.h>
9 #include <asm/alternative-asm.h>
10 #include <asm/cpufeatures.h>
11 #include <asm/msr-index.h>
14 * Fill the CPU return stack buffer.
16 * Each entry in the RSB, if used for a speculative 'ret', contains an
17 * infinite 'pause; lfence; jmp' loop to capture speculative execution.
19 * This is required in various cases for retpoline and IBRS-based
20 * mitigations for the Spectre variant 2 vulnerability. Sometimes to
21 * eliminate potentially bogus entries from the RSB, and sometimes
22 * purely to ensure that it doesn't get empty, which on some CPUs would
23 * allow predictions from other (unwanted!) sources to be used.
25 * We define a CPP macro such that it can be used from both .S files and
26 * inline assembly. It's possible to do a .macro and then include that
27 * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
30 #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
31 #define RSB_FILL_LOOPS 16 /* To avoid underflow */
34 * Google experimented with loop-unrolling and this turned out to be
35 * the optimal version — two calls, each with their own speculation
36 * trap should their return address end up getting used, in a loop.
38 #define __FILL_RETURN_BUFFER(reg, nr, sp) \
42 773: /* speculation trap */ \
48 775: /* speculation trap */ \
55 add $(BITS_PER_LONG/8) * nr, sp;
60 * This should be used immediately before a retpoline alternative. It tells
61 * objtool where the retpolines are so that it can make sense of the control
62 * flow by just reading the original instruction(s) and ignoring the
65 .macro ANNOTATE_NOSPEC_ALTERNATIVE
67 .pushsection .discard.nospec
68 .long .Lannotate_\@ - .
73 * This should be used immediately before an indirect jump/call. It tells
74 * objtool the subsequent indirect jump/call is vouched safe for retpoline
77 .macro ANNOTATE_RETPOLINE_SAFE
79 .pushsection .discard.retpoline_safe
80 _ASM_PTR .Lannotate_\@
85 * These are the bare retpoline primitives for indirect jmp and call.
86 * Do not use these directly; they only exist to make the ALTERNATIVE
87 * invocation below less ugly.
89 .macro RETPOLINE_JMP reg:req
101 * This is a wrapper around RETPOLINE_JMP so the called function in reg
102 * returns to the instruction after the macro.
104 .macro RETPOLINE_CALL reg:req
106 .Ldo_retpoline_jmp_\@:
109 call .Ldo_retpoline_jmp_\@
113 * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
114 * indirect jmp/call which may be susceptible to the Spectre variant 2
117 .macro JMP_NOSPEC reg:req
118 #ifdef CONFIG_RETPOLINE
119 ANNOTATE_NOSPEC_ALTERNATIVE
120 ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg), \
121 __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
122 __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_AMD
128 .macro CALL_NOSPEC reg:req
129 #ifdef CONFIG_RETPOLINE
130 ANNOTATE_NOSPEC_ALTERNATIVE
131 ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg), \
132 __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
133 __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_AMD
140 * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
141 * monstrosity above, manually.
143 .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
144 #ifdef CONFIG_RETPOLINE
145 ANNOTATE_NOSPEC_ALTERNATIVE
146 ALTERNATIVE "jmp .Lskip_rsb_\@", \
147 __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \
153 #else /* __ASSEMBLY__ */
155 #define ANNOTATE_NOSPEC_ALTERNATIVE \
157 ".pushsection .discard.nospec\n\t" \
158 ".long 999b - .\n\t" \
161 #define ANNOTATE_RETPOLINE_SAFE \
163 ".pushsection .discard.retpoline_safe\n\t" \
164 _ASM_PTR " 999b\n\t" \
167 #if defined(CONFIG_X86_64) && defined(RETPOLINE)
170 * Since the inline asm uses the %V modifier which is only in newer GCC,
171 * the 64-bit one is dependent on RETPOLINE not CONFIG_RETPOLINE.
173 # define CALL_NOSPEC \
174 ANNOTATE_NOSPEC_ALTERNATIVE \
176 ANNOTATE_RETPOLINE_SAFE \
177 "call *%[thunk_target]\n", \
178 "call __x86_indirect_thunk_%V[thunk_target]\n", \
179 X86_FEATURE_RETPOLINE)
180 # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
182 #elif defined(CONFIG_X86_32) && defined(CONFIG_RETPOLINE)
184 * For i386 we use the original ret-equivalent retpoline, because
185 * otherwise we'll run out of registers. We don't care about CET
188 # define CALL_NOSPEC \
190 ANNOTATE_RETPOLINE_SAFE \
191 "call *%[thunk_target]\n", \
194 "901: call 903f;\n" \
199 "903: lea 4(%%esp), %%esp;\n" \
200 " pushl %[thunk_target];\n" \
203 "904: call 901b;\n", \
204 X86_FEATURE_RETPOLINE)
206 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
207 #else /* No retpoline for C / inline asm */
208 # define CALL_NOSPEC "call *%[thunk_target]\n"
209 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
212 /* The Spectre V2 mitigation variants */
213 enum spectre_v2_mitigation {
215 SPECTRE_V2_RETPOLINE_MINIMAL,
216 SPECTRE_V2_RETPOLINE_MINIMAL_AMD,
217 SPECTRE_V2_RETPOLINE_GENERIC,
218 SPECTRE_V2_RETPOLINE_AMD,
219 SPECTRE_V2_IBRS_ENHANCED,
222 /* The indirect branch speculation control variants */
223 enum spectre_v2_user_mitigation {
224 SPECTRE_V2_USER_NONE,
225 SPECTRE_V2_USER_STRICT,
226 SPECTRE_V2_USER_STRICT_PREFERRED,
227 SPECTRE_V2_USER_PRCTL,
228 SPECTRE_V2_USER_SECCOMP,
231 /* The Speculative Store Bypass disable variants */
232 enum ssb_mitigation {
233 SPEC_STORE_BYPASS_NONE,
234 SPEC_STORE_BYPASS_DISABLE,
235 SPEC_STORE_BYPASS_PRCTL,
236 SPEC_STORE_BYPASS_SECCOMP,
239 extern char __indirect_thunk_start[];
240 extern char __indirect_thunk_end[];
243 * On VMEXIT we must ensure that no RSB predictions learned in the guest
244 * can be followed in the host, by overwriting the RSB completely. Both
245 * retpoline and IBRS mitigations for Spectre v2 need this; only on future
246 * CPUs with IBRS_ALL *might* it be avoided.
248 static inline void vmexit_fill_RSB(void)
250 #ifdef CONFIG_RETPOLINE
253 asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
254 ALTERNATIVE("jmp 910f",
255 __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1)),
256 X86_FEATURE_RETPOLINE)
258 : "=r" (loops), ASM_CALL_CONSTRAINT
263 static __always_inline
264 void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
266 asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
269 "d" ((u32)(val >> 32)),
270 [feature] "i" (feature)
274 static inline void indirect_branch_prediction_barrier(void)
276 u64 val = PRED_CMD_IBPB;
278 alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
281 /* The Intel SPEC CTRL MSR base value cache */
282 extern u64 x86_spec_ctrl_base;
285 * With retpoline, we must use IBRS to restrict branch prediction
286 * before calling into firmware.
288 * (Implemented as CPP macros due to header hell.)
290 #define firmware_restrict_branch_speculation_start() \
292 u64 val = x86_spec_ctrl_base | SPEC_CTRL_IBRS; \
295 alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
296 X86_FEATURE_USE_IBRS_FW); \
299 #define firmware_restrict_branch_speculation_end() \
301 u64 val = x86_spec_ctrl_base; \
303 alternative_msr_write(MSR_IA32_SPEC_CTRL, val, \
304 X86_FEATURE_USE_IBRS_FW); \
308 DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
309 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
310 DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
312 DECLARE_STATIC_KEY_FALSE(mds_user_clear);
313 DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
315 #include <asm/segment.h>
318 * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
320 * This uses the otherwise unused and obsolete VERW instruction in
321 * combination with microcode which triggers a CPU buffer flush when the
322 * instruction is executed.
324 static __always_inline void mds_clear_cpu_buffers(void)
326 static const u16 ds = __KERNEL_DS;
329 * Has to be the memory-operand variant because only that
330 * guarantees the CPU buffer flush functionality according to
331 * documentation. The register-operand variant does not.
332 * Works with any segment selector, but a valid writable
333 * data segment is the fastest variant.
335 * "cc" clobber is required because VERW modifies ZF.
337 asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
341 * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
343 * Clear CPU buffers if the corresponding static key is enabled
345 static __always_inline void mds_user_clear_cpu_buffers(void)
347 if (static_branch_likely(&mds_user_clear))
348 mds_clear_cpu_buffers();
352 * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
354 * Clear CPU buffers if the corresponding static key is enabled
356 static inline void mds_idle_clear_cpu_buffers(void)
358 if (static_branch_likely(&mds_idle_clear))
359 mds_clear_cpu_buffers();
362 #endif /* __ASSEMBLY__ */
365 * Below is used in the eBPF JIT compiler and emits the byte sequence
366 * for the following assembly:
368 * With retpolines configured:
379 * Without retpolines configured:
383 #ifdef CONFIG_RETPOLINE
384 # define RETPOLINE_RAX_BPF_JIT_SIZE 17
385 # define RETPOLINE_RAX_BPF_JIT() \
386 EMIT1_off32(0xE8, 7); /* callq do_rop */ \
388 EMIT2(0xF3, 0x90); /* pause */ \
389 EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
390 EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
392 EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \
393 EMIT1(0xC3); /* retq */
395 # define RETPOLINE_RAX_BPF_JIT_SIZE 2
396 # define RETPOLINE_RAX_BPF_JIT() \
397 EMIT2(0xFF, 0xE0); /* jmp *%rax */
400 #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */