1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef _ASM_X86_NOSPEC_BRANCH_H_
4 #define _ASM_X86_NOSPEC_BRANCH_H_
6 #include <linux/static_key.h>
8 #include <asm/alternative.h>
9 #include <asm/alternative-asm.h>
10 #include <asm/cpufeatures.h>
11 #include <asm/msr-index.h>
12 #include <asm/percpu.h>
15 * Fill the CPU return stack buffer.
17 * Each entry in the RSB, if used for a speculative 'ret', contains an
18 * infinite 'pause; lfence; jmp' loop to capture speculative execution.
20 * This is required in various cases for retpoline and IBRS-based
21 * mitigations for the Spectre variant 2 vulnerability. Sometimes to
22 * eliminate potentially bogus entries from the RSB, and sometimes
23 * purely to ensure that it doesn't get empty, which on some CPUs would
24 * allow predictions from other (unwanted!) sources to be used.
26 * We define a CPP macro such that it can be used from both .S files and
27 * inline assembly. It's possible to do a .macro and then include that
28 * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
31 #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
32 #define RSB_FILL_LOOPS 16 /* To avoid underflow */
35 * Google experimented with loop-unrolling and this turned out to be
36 * the optimal version — two calls, each with their own speculation
37 * trap should their return address end up getting used, in a loop.
40 #define __FILL_RETURN_BUFFER(reg, nr, sp) \
44 773: /* speculation trap */ \
50 775: /* speculation trap */ \
57 add $(BITS_PER_LONG/8) * nr, sp; \
58 /* barrier for jnz misprediction */ \
62 * i386 doesn't unconditionally have LFENCE, as such it can't
65 #define __FILL_RETURN_BUFFER(reg, nr, sp) \
71 add $(BITS_PER_LONG/8) * nr, sp;
74 #define ISSUE_UNBALANCED_RET_GUARD(sp) \
78 add $(BITS_PER_LONG/8), sp; \
84 * This should be used immediately before a retpoline alternative. It tells
85 * objtool where the retpolines are so that it can make sense of the control
86 * flow by just reading the original instruction(s) and ignoring the
89 .macro ANNOTATE_NOSPEC_ALTERNATIVE
91 .pushsection .discard.nospec
92 .long .Lannotate_\@ - .
97 * This should be used immediately before an indirect jump/call. It tells
98 * objtool the subsequent indirect jump/call is vouched safe for retpoline
101 .macro ANNOTATE_RETPOLINE_SAFE
103 .pushsection .discard.retpoline_safe
104 _ASM_PTR .Lannotate_\@
109 * These are the bare retpoline primitives for indirect jmp and call.
110 * Do not use these directly; they only exist to make the ALTERNATIVE
111 * invocation below less ugly.
113 .macro RETPOLINE_JMP reg:req
125 * This is a wrapper around RETPOLINE_JMP so the called function in reg
126 * returns to the instruction after the macro.
128 .macro RETPOLINE_CALL reg:req
130 .Ldo_retpoline_jmp_\@:
133 call .Ldo_retpoline_jmp_\@
137 * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
138 * indirect jmp/call which may be susceptible to the Spectre variant 2
141 .macro JMP_NOSPEC reg:req
142 #ifdef CONFIG_RETPOLINE
143 ANNOTATE_NOSPEC_ALTERNATIVE
144 ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; jmp *\reg), \
145 __stringify(RETPOLINE_JMP \reg), X86_FEATURE_RETPOLINE, \
146 __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; jmp *\reg), X86_FEATURE_RETPOLINE_LFENCE
152 .macro CALL_NOSPEC reg:req
153 #ifdef CONFIG_RETPOLINE
154 ANNOTATE_NOSPEC_ALTERNATIVE
155 ALTERNATIVE_2 __stringify(ANNOTATE_RETPOLINE_SAFE; call *\reg), \
156 __stringify(RETPOLINE_CALL \reg), X86_FEATURE_RETPOLINE,\
157 __stringify(lfence; ANNOTATE_RETPOLINE_SAFE; call *\reg), X86_FEATURE_RETPOLINE_LFENCE
164 * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
165 * monstrosity above, manually.
167 .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req
168 ANNOTATE_NOSPEC_ALTERNATIVE
169 ALTERNATIVE "jmp .Lskip_rsb_\@", \
170 __stringify(__FILL_RETURN_BUFFER(\reg,\nr,%_ASM_SP)) \
175 #else /* __ASSEMBLY__ */
177 #define ANNOTATE_NOSPEC_ALTERNATIVE \
179 ".pushsection .discard.nospec\n\t" \
180 ".long 999b - .\n\t" \
183 #define ANNOTATE_RETPOLINE_SAFE \
185 ".pushsection .discard.retpoline_safe\n\t" \
186 _ASM_PTR " 999b\n\t" \
189 #ifdef CONFIG_RETPOLINE
193 * Inline asm uses the %V modifier which is only in newer GCC
194 * which is ensured when CONFIG_RETPOLINE is defined.
196 # define CALL_NOSPEC \
197 ANNOTATE_NOSPEC_ALTERNATIVE \
199 ANNOTATE_RETPOLINE_SAFE \
200 "call *%[thunk_target]\n", \
201 "call __x86_indirect_thunk_%V[thunk_target]\n", \
202 X86_FEATURE_RETPOLINE, \
204 ANNOTATE_RETPOLINE_SAFE \
205 "call *%[thunk_target]\n", \
206 X86_FEATURE_RETPOLINE_LFENCE)
207 # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
209 #else /* CONFIG_X86_32 */
211 * For i386 we use the original ret-equivalent retpoline, because
212 * otherwise we'll run out of registers. We don't care about CET
215 # define CALL_NOSPEC \
216 ANNOTATE_NOSPEC_ALTERNATIVE \
218 ANNOTATE_RETPOLINE_SAFE \
219 "call *%[thunk_target]\n", \
222 "901: call 903f;\n" \
227 "903: lea 4(%%esp), %%esp;\n" \
228 " pushl %[thunk_target];\n" \
231 "904: call 901b;\n", \
232 X86_FEATURE_RETPOLINE, \
234 ANNOTATE_RETPOLINE_SAFE \
235 "call *%[thunk_target]\n", \
236 X86_FEATURE_RETPOLINE_LFENCE)
238 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
240 #else /* No retpoline for C / inline asm */
241 # define CALL_NOSPEC "call *%[thunk_target]\n"
242 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
245 /* The Spectre V2 mitigation variants */
246 enum spectre_v2_mitigation {
248 SPECTRE_V2_RETPOLINE,
251 SPECTRE_V2_EIBRS_RETPOLINE,
252 SPECTRE_V2_EIBRS_LFENCE,
256 /* The indirect branch speculation control variants */
257 enum spectre_v2_user_mitigation {
258 SPECTRE_V2_USER_NONE,
259 SPECTRE_V2_USER_STRICT,
260 SPECTRE_V2_USER_STRICT_PREFERRED,
261 SPECTRE_V2_USER_PRCTL,
262 SPECTRE_V2_USER_SECCOMP,
265 /* The Speculative Store Bypass disable variants */
266 enum ssb_mitigation {
267 SPEC_STORE_BYPASS_NONE,
268 SPEC_STORE_BYPASS_DISABLE,
269 SPEC_STORE_BYPASS_PRCTL,
270 SPEC_STORE_BYPASS_SECCOMP,
273 extern char __indirect_thunk_start[];
274 extern char __indirect_thunk_end[];
277 * On VMEXIT we must ensure that no RSB predictions learned in the guest
278 * can be followed in the host, by overwriting the RSB completely. Both
279 * retpoline and IBRS mitigations for Spectre v2 need this; only on future
280 * CPUs with IBRS_ALL *might* it be avoided.
282 static __always_inline void vmexit_fill_RSB(void)
286 asm volatile (ANNOTATE_NOSPEC_ALTERNATIVE
287 ALTERNATIVE_2("jmp 910f", "", X86_FEATURE_RSB_VMEXIT,
288 "jmp 911f", X86_FEATURE_RSB_VMEXIT_LITE)
289 __stringify(__FILL_RETURN_BUFFER(%0, RSB_CLEAR_LOOPS, %1))
291 __stringify(ISSUE_UNBALANCED_RET_GUARD(%1))
293 : "=r" (loops), ASM_CALL_CONSTRAINT
297 static __always_inline
298 void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
300 asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
303 "d" ((u32)(val >> 32)),
304 [feature] "i" (feature)
308 static inline void indirect_branch_prediction_barrier(void)
310 u64 val = PRED_CMD_IBPB;
312 alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB);
315 /* The Intel SPEC CTRL MSR base value cache */
316 extern u64 x86_spec_ctrl_base;
317 DECLARE_PER_CPU(u64, x86_spec_ctrl_current);
318 extern void update_spec_ctrl_cond(u64 val);
319 extern u64 spec_ctrl_current(void);
322 * With retpoline, we must use IBRS to restrict branch prediction
323 * before calling into firmware.
325 * (Implemented as CPP macros due to header hell.)
327 #define firmware_restrict_branch_speculation_start() \
330 alternative_msr_write(MSR_IA32_SPEC_CTRL, \
331 spec_ctrl_current() | SPEC_CTRL_IBRS, \
332 X86_FEATURE_USE_IBRS_FW); \
335 #define firmware_restrict_branch_speculation_end() \
337 alternative_msr_write(MSR_IA32_SPEC_CTRL, \
338 spec_ctrl_current(), \
339 X86_FEATURE_USE_IBRS_FW); \
343 DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
344 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
345 DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
347 DECLARE_STATIC_KEY_FALSE(mds_user_clear);
348 DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
350 DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
352 #include <asm/segment.h>
355 * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
357 * This uses the otherwise unused and obsolete VERW instruction in
358 * combination with microcode which triggers a CPU buffer flush when the
359 * instruction is executed.
361 static __always_inline void mds_clear_cpu_buffers(void)
363 static const u16 ds = __KERNEL_DS;
366 * Has to be the memory-operand variant because only that
367 * guarantees the CPU buffer flush functionality according to
368 * documentation. The register-operand variant does not.
369 * Works with any segment selector, but a valid writable
370 * data segment is the fastest variant.
372 * "cc" clobber is required because VERW modifies ZF.
374 asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
378 * mds_user_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
380 * Clear CPU buffers if the corresponding static key is enabled
382 static __always_inline void mds_user_clear_cpu_buffers(void)
384 if (static_branch_likely(&mds_user_clear))
385 mds_clear_cpu_buffers();
389 * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
391 * Clear CPU buffers if the corresponding static key is enabled
393 static inline void mds_idle_clear_cpu_buffers(void)
395 if (static_branch_likely(&mds_idle_clear))
396 mds_clear_cpu_buffers();
399 #endif /* __ASSEMBLY__ */
402 * Below is used in the eBPF JIT compiler and emits the byte sequence
403 * for the following assembly:
405 * With retpolines configured:
416 * Without retpolines configured:
420 #ifdef CONFIG_RETPOLINE
421 # define RETPOLINE_RAX_BPF_JIT_SIZE 17
422 # define RETPOLINE_RAX_BPF_JIT() \
423 EMIT1_off32(0xE8, 7); /* callq do_rop */ \
425 EMIT2(0xF3, 0x90); /* pause */ \
426 EMIT3(0x0F, 0xAE, 0xE8); /* lfence */ \
427 EMIT2(0xEB, 0xF9); /* jmp spec_trap */ \
429 EMIT4(0x48, 0x89, 0x04, 0x24); /* mov %rax,(%rsp) */ \
430 EMIT1(0xC3); /* retq */
432 # define RETPOLINE_RAX_BPF_JIT_SIZE 2
433 # define RETPOLINE_RAX_BPF_JIT() \
434 EMIT2(0xFF, 0xE0); /* jmp *%rax */
437 #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */