1 /* SPDX-License-Identifier: GPL-2.0 */
3 #ifndef _ASM_X86_NOSPEC_BRANCH_H_
4 #define _ASM_X86_NOSPEC_BRANCH_H_
6 #include <linux/static_key.h>
7 #include <linux/objtool.h>
8 #include <linux/linkage.h>
10 #include <asm/alternative.h>
11 #include <asm/cpufeatures.h>
12 #include <asm/msr-index.h>
13 #include <asm/unwind_hints.h>
14 #include <asm/percpu.h>
16 #define RETPOLINE_THUNK_SIZE 32
19 * Fill the CPU return stack buffer.
21 * Each entry in the RSB, if used for a speculative 'ret', contains an
22 * infinite 'pause; lfence; jmp' loop to capture speculative execution.
24 * This is required in various cases for retpoline and IBRS-based
25 * mitigations for the Spectre variant 2 vulnerability. Sometimes to
26 * eliminate potentially bogus entries from the RSB, and sometimes
27 * purely to ensure that it doesn't get empty, which on some CPUs would
28 * allow predictions from other (unwanted!) sources to be used.
30 * We define a CPP macro such that it can be used from both .S files and
31 * inline assembly. It's possible to do a .macro and then include that
32 * from C via asm(".include <asm/nospec-branch.h>") but let's not go there.
35 #define RSB_CLEAR_LOOPS 32 /* To forcibly overwrite all entries */
38 * Common helper for __FILL_RETURN_BUFFER and __FILL_ONE_RETURN.
40 #define __FILL_RETURN_SLOT \
41 ANNOTATE_INTRA_FUNCTION_CALL; \
47 * Stuff the entire RSB.
49 * Google experimented with loop-unrolling and this turned out to be
50 * the optimal version - two calls, each with their own speculation
51 * trap should their return address end up getting used, in a loop.
54 #define __FILL_RETURN_BUFFER(reg, nr) \
59 add $(BITS_PER_LONG/8) * 2, %_ASM_SP; \
62 /* barrier for jnz misprediction */ \
66 * i386 doesn't unconditionally have LFENCE, as such it can't
69 #define __FILL_RETURN_BUFFER(reg, nr) \
73 add $(BITS_PER_LONG/8) * nr, %_ASM_SP;
77 * Stuff a single RSB slot.
79 * To mitigate Post-Barrier RSB speculation, one CALL instruction must be
80 * forced to retire before letting a RET instruction execute.
82 * On PBRSB-vulnerable CPUs, it is not safe for a RET to be executed
85 #define __FILL_ONE_RETURN \
87 add $(BITS_PER_LONG/8), %_ASM_SP; \
93 * This should be used immediately before an indirect jump/call. It tells
94 * objtool the subsequent indirect jump/call is vouched safe for retpoline
97 .macro ANNOTATE_RETPOLINE_SAFE
99 .pushsection .discard.retpoline_safe
100 _ASM_PTR .Lannotate_\@
105 * (ab)use RETPOLINE_SAFE on RET to annotate away 'bare' RET instructions
106 * vs RETBleed validation.
108 #define ANNOTATE_UNRET_SAFE ANNOTATE_RETPOLINE_SAFE
111 * Abuse ANNOTATE_RETPOLINE_SAFE on a NOP to indicate UNRET_END, should
112 * eventually turn into it's own annotation.
114 .macro ANNOTATE_UNRET_END
115 #if (defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO))
116 ANNOTATE_RETPOLINE_SAFE
122 * Equivalent to -mindirect-branch-cs-prefix; emit the 5 byte jmp/call
123 * to the retpoline thunk with a CS prefix when the register requires
124 * a RAX prefix byte to encode. Also see apply_retpolines().
126 .macro __CS_PREFIX reg:req
127 .irp rs,r8,r9,r10,r11,r12,r13,r14,r15
135 * JMP_NOSPEC and CALL_NOSPEC macros can be used instead of a simple
136 * indirect jmp/call which may be susceptible to the Spectre variant 2
139 .macro JMP_NOSPEC reg:req
140 #ifdef CONFIG_RETPOLINE
142 jmp __x86_indirect_thunk_\reg
149 .macro CALL_NOSPEC reg:req
150 #ifdef CONFIG_RETPOLINE
152 call __x86_indirect_thunk_\reg
159 * A simpler FILL_RETURN_BUFFER macro. Don't make people use the CPP
160 * monstrosity above, manually.
162 .macro FILL_RETURN_BUFFER reg:req nr:req ftr:req ftr2=ALT_NOT(X86_FEATURE_ALWAYS)
163 ALTERNATIVE_2 "jmp .Lskip_rsb_\@", \
164 __stringify(__FILL_RETURN_BUFFER(\reg,\nr)), \ftr, \
165 __stringify(__FILL_ONE_RETURN), \ftr2
171 * The CALL to srso_alias_untrain_ret() must be patched in directly at
172 * the spot where untraining must be done, ie., srso_alias_untrain_ret()
173 * must be the target of a CALL instruction instead of indirectly
174 * jumping to a wrapper which then calls it. Therefore, this macro is
175 * called outside of __UNTRAIN_RET below, for the time being, before the
176 * kernel can support nested alternatives with arbitrary nesting.
178 .macro CALL_UNTRAIN_RET
179 #ifdef CONFIG_CPU_UNRET_ENTRY
180 ALTERNATIVE_2 "", "call entry_untrain_ret", X86_FEATURE_UNRET, \
181 "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS
186 * Mitigate RETBleed for AMD/Hygon Zen uarch. Requires KERNEL CR3 because the
187 * return thunk isn't mapped into the userspace tables (then again, AMD
188 * typically has NO_MELTDOWN).
190 * While retbleed_untrain_ret() doesn't clobber anything but requires stack,
191 * entry_ibpb() will clobber AX, CX, DX.
193 * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point
194 * where we have a stack but before any RET instruction.
197 #if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \
198 defined(CONFIG_CPU_SRSO)
201 ALTERNATIVE "", "call entry_ibpb", X86_FEATURE_ENTRY_IBPB
206 * Macro to execute VERW instruction that mitigate transient data sampling
207 * attacks such as MDS. On affected systems a microcode update overloaded VERW
208 * instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF.
210 * Note: Only the memory operand variant of VERW clears the CPU buffers.
212 .macro CLEAR_CPU_BUFFERS
213 ALTERNATIVE "jmp .Lskip_verw_\@", "", X86_FEATURE_CLEAR_CPU_BUF
214 verw _ASM_RIP(mds_verw_sel)
219 .macro CLEAR_BRANCH_HISTORY
220 ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP
223 .macro CLEAR_BRANCH_HISTORY_VMEXIT
224 ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT
227 #define CLEAR_BRANCH_HISTORY
228 #define CLEAR_BRANCH_HISTORY_VMEXIT
231 #else /* __ASSEMBLY__ */
233 #define ANNOTATE_RETPOLINE_SAFE \
235 ".pushsection .discard.retpoline_safe\n\t" \
236 _ASM_PTR " 999b\n\t" \
239 typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
240 extern retpoline_thunk_t __x86_indirect_thunk_array[];
242 #ifdef CONFIG_RETHUNK
243 extern void __x86_return_thunk(void);
245 static inline void __x86_return_thunk(void) {}
248 extern void retbleed_return_thunk(void);
249 extern void srso_return_thunk(void);
250 extern void srso_alias_return_thunk(void);
252 extern void retbleed_untrain_ret(void);
253 extern void srso_untrain_ret(void);
254 extern void srso_alias_untrain_ret(void);
256 extern void entry_untrain_ret(void);
257 extern void entry_ibpb(void);
260 extern void clear_bhb_loop(void);
263 extern void (*x86_return_thunk)(void);
265 #ifdef CONFIG_RETPOLINE
268 extern retpoline_thunk_t __x86_indirect_thunk_ ## reg;
269 #include <asm/GEN-for-each-reg.h>
275 * Inline asm uses the %V modifier which is only in newer GCC
276 * which is ensured when CONFIG_RETPOLINE is defined.
278 # define CALL_NOSPEC \
280 ANNOTATE_RETPOLINE_SAFE \
281 "call *%[thunk_target]\n", \
282 "call __x86_indirect_thunk_%V[thunk_target]\n", \
283 X86_FEATURE_RETPOLINE, \
285 ANNOTATE_RETPOLINE_SAFE \
286 "call *%[thunk_target]\n", \
287 X86_FEATURE_RETPOLINE_LFENCE)
289 # define THUNK_TARGET(addr) [thunk_target] "r" (addr)
291 #else /* CONFIG_X86_32 */
293 * For i386 we use the original ret-equivalent retpoline, because
294 * otherwise we'll run out of registers. We don't care about CET
297 # define CALL_NOSPEC \
299 ANNOTATE_RETPOLINE_SAFE \
300 "call *%[thunk_target]\n", \
303 "901: call 903f;\n" \
308 "903: lea 4(%%esp), %%esp;\n" \
309 " pushl %[thunk_target];\n" \
312 "904: call 901b;\n", \
313 X86_FEATURE_RETPOLINE, \
315 ANNOTATE_RETPOLINE_SAFE \
316 "call *%[thunk_target]\n", \
317 X86_FEATURE_RETPOLINE_LFENCE)
319 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
321 #else /* No retpoline for C / inline asm */
322 # define CALL_NOSPEC "call *%[thunk_target]\n"
323 # define THUNK_TARGET(addr) [thunk_target] "rm" (addr)
326 /* The Spectre V2 mitigation variants */
327 enum spectre_v2_mitigation {
329 SPECTRE_V2_RETPOLINE,
332 SPECTRE_V2_EIBRS_RETPOLINE,
333 SPECTRE_V2_EIBRS_LFENCE,
337 /* The indirect branch speculation control variants */
338 enum spectre_v2_user_mitigation {
339 SPECTRE_V2_USER_NONE,
340 SPECTRE_V2_USER_STRICT,
341 SPECTRE_V2_USER_STRICT_PREFERRED,
342 SPECTRE_V2_USER_PRCTL,
343 SPECTRE_V2_USER_SECCOMP,
346 /* The Speculative Store Bypass disable variants */
347 enum ssb_mitigation {
348 SPEC_STORE_BYPASS_NONE,
349 SPEC_STORE_BYPASS_DISABLE,
350 SPEC_STORE_BYPASS_PRCTL,
351 SPEC_STORE_BYPASS_SECCOMP,
354 extern char __indirect_thunk_start[];
355 extern char __indirect_thunk_end[];
357 static __always_inline
358 void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
360 asm volatile(ALTERNATIVE("", "wrmsr", %c[feature])
363 "d" ((u32)(val >> 32)),
364 [feature] "i" (feature)
368 extern u64 x86_pred_cmd;
370 static inline void indirect_branch_prediction_barrier(void)
372 alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB);
375 /* The Intel SPEC CTRL MSR base value cache */
376 extern u64 x86_spec_ctrl_base;
377 DECLARE_PER_CPU(u64, x86_spec_ctrl_current);
378 extern void update_spec_ctrl_cond(u64 val);
379 extern u64 spec_ctrl_current(void);
382 * With retpoline, we must use IBRS to restrict branch prediction
383 * before calling into firmware.
385 * (Implemented as CPP macros due to header hell.)
387 #define firmware_restrict_branch_speculation_start() \
390 alternative_msr_write(MSR_IA32_SPEC_CTRL, \
391 spec_ctrl_current() | SPEC_CTRL_IBRS, \
392 X86_FEATURE_USE_IBRS_FW); \
393 alternative_msr_write(MSR_IA32_PRED_CMD, PRED_CMD_IBPB, \
394 X86_FEATURE_USE_IBPB_FW); \
397 #define firmware_restrict_branch_speculation_end() \
399 alternative_msr_write(MSR_IA32_SPEC_CTRL, \
400 spec_ctrl_current(), \
401 X86_FEATURE_USE_IBRS_FW); \
405 DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
406 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
407 DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
409 DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
411 DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
413 DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
415 extern u16 mds_verw_sel;
417 #include <asm/segment.h>
420 * mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
422 * This uses the otherwise unused and obsolete VERW instruction in
423 * combination with microcode which triggers a CPU buffer flush when the
424 * instruction is executed.
426 static __always_inline void mds_clear_cpu_buffers(void)
428 static const u16 ds = __KERNEL_DS;
431 * Has to be the memory-operand variant because only that
432 * guarantees the CPU buffer flush functionality according to
433 * documentation. The register-operand variant does not.
434 * Works with any segment selector, but a valid writable
435 * data segment is the fastest variant.
437 * "cc" clobber is required because VERW modifies ZF.
439 asm volatile("verw %[ds]" : : [ds] "m" (ds) : "cc");
443 * mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
445 * Clear CPU buffers if the corresponding static key is enabled
447 static inline void mds_idle_clear_cpu_buffers(void)
449 if (static_branch_likely(&mds_idle_clear))
450 mds_clear_cpu_buffers();
453 #endif /* __ASSEMBLY__ */
455 #endif /* _ASM_X86_NOSPEC_BRANCH_H_ */