1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 1994 Linus Torvalds
5 * Cyrix stuff, June 1998 by:
6 * - Rafael R. Reilova (moved everything from head.S),
7 * <rreilova@ececs.uc.edu>
8 * - Channing Corn (tests & fixes),
9 * - Andrew D. Balsa (code cleanup).
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 #include <linux/module.h>
15 #include <linux/nospec.h>
16 #include <linux/prctl.h>
17 #include <linux/sched/smt.h>
19 #include <asm/spec-ctrl.h>
20 #include <asm/cmdline.h>
22 #include <asm/processor.h>
23 #include <asm/processor-flags.h>
24 #include <asm/fpu/internal.h>
27 #include <asm/paravirt.h>
28 #include <asm/alternative.h>
29 #include <asm/hypervisor.h>
30 #include <asm/pgtable.h>
31 #include <asm/set_memory.h>
32 #include <asm/intel-family.h>
33 #include <asm/e820/api.h>
34 #include <linux/bpf.h>
38 static void __init spectre_v1_select_mitigation(void);
39 static void __init spectre_v2_select_mitigation(void);
40 static void __init ssb_select_mitigation(void);
41 static void __init l1tf_select_mitigation(void);
42 static void __init mds_select_mitigation(void);
43 static void __init md_clear_update_mitigation(void);
44 static void __init md_clear_select_mitigation(void);
45 static void __init taa_select_mitigation(void);
46 static void __init mmio_select_mitigation(void);
47 static void __init srbds_select_mitigation(void);
49 /* The base value of the SPEC_CTRL MSR that always has to be preserved. */
50 u64 x86_spec_ctrl_base;
51 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
52 static DEFINE_MUTEX(spec_ctrl_mutex);
55 * The vendor and possibly platform specific bits which can be modified in
58 static u64 __ro_after_init x86_spec_ctrl_mask = SPEC_CTRL_IBRS;
61 * AMD specific MSR info for Speculative Store Bypass control.
62 * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
64 u64 __ro_after_init x86_amd_ls_cfg_base;
65 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
67 /* Control conditional STIBP in switch_to() */
68 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
69 /* Control conditional IBPB in switch_mm() */
70 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
71 /* Control unconditional IBPB in switch_mm() */
72 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
74 /* Control MDS CPU buffer clear before returning to user space */
75 DEFINE_STATIC_KEY_FALSE(mds_user_clear);
76 EXPORT_SYMBOL_GPL(mds_user_clear);
77 /* Control MDS CPU buffer clear before idling (halt, mwait) */
78 DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
79 EXPORT_SYMBOL_GPL(mds_idle_clear);
81 /* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
82 DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
83 EXPORT_SYMBOL_GPL(mmio_stale_data_clear);
85 void __init check_bugs(void)
90 * identify_boot_cpu() initialized SMT support information, let the
93 cpu_smt_check_topology();
95 if (!IS_ENABLED(CONFIG_SMP)) {
97 print_cpu_info(&boot_cpu_data);
101 * Read the SPEC_CTRL MSR to account for reserved bits which may
102 * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
103 * init code as it is not enumerated and depends on the family.
105 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
106 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
108 /* Allow STIBP in MSR_SPEC_CTRL if supported */
109 if (boot_cpu_has(X86_FEATURE_STIBP))
110 x86_spec_ctrl_mask |= SPEC_CTRL_STIBP;
112 /* Select the proper CPU mitigations before patching alternatives: */
113 spectre_v1_select_mitigation();
114 spectre_v2_select_mitigation();
115 ssb_select_mitigation();
116 l1tf_select_mitigation();
117 md_clear_select_mitigation();
118 srbds_select_mitigation();
124 * Check whether we are able to run this kernel safely on SMP.
126 * - i386 is no longer supported.
127 * - In order to run on anything without a TSC, we need to be
128 * compiled for a i486.
130 if (boot_cpu_data.x86 < 4)
131 panic("Kernel requires i486+ for 'invlpg' and other features");
133 init_utsname()->machine[1] =
134 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
135 alternative_instructions();
137 fpu__init_check_bugs();
138 #else /* CONFIG_X86_64 */
139 alternative_instructions();
142 * Make sure the first 2MB area is not mapped by huge pages
143 * There are typically fixed size MTRRs in there and overlapping
144 * MTRRs into large pages causes slow downs.
146 * Right now we don't do that with gbpages because there seems
147 * very little benefit for that case.
150 set_memory_4k((unsigned long)__va(0), 1);
155 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
157 u64 msrval, guestval, hostval = x86_spec_ctrl_base;
158 struct thread_info *ti = current_thread_info();
160 /* Is MSR_SPEC_CTRL implemented ? */
161 if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
163 * Restrict guest_spec_ctrl to supported values. Clear the
164 * modifiable bits in the host base value and or the
165 * modifiable bits from the guest value.
167 guestval = hostval & ~x86_spec_ctrl_mask;
168 guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
170 /* SSBD controlled in MSR_SPEC_CTRL */
171 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
172 static_cpu_has(X86_FEATURE_AMD_SSBD))
173 hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
175 /* Conditional STIBP enabled? */
176 if (static_branch_unlikely(&switch_to_cond_stibp))
177 hostval |= stibp_tif_to_spec_ctrl(ti->flags);
179 if (hostval != guestval) {
180 msrval = setguest ? guestval : hostval;
181 wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
186 * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
187 * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
189 if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
190 !static_cpu_has(X86_FEATURE_VIRT_SSBD))
194 * If the host has SSBD mitigation enabled, force it in the host's
195 * virtual MSR value. If its not permanently enabled, evaluate
196 * current's TIF_SSBD thread flag.
198 if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
199 hostval = SPEC_CTRL_SSBD;
201 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
203 /* Sanitize the guest value */
204 guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
206 if (hostval != guestval) {
209 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
210 ssbd_spec_ctrl_to_tif(hostval);
212 speculation_ctrl_update(tif);
215 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
217 static void x86_amd_ssb_disable(void)
219 u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
221 if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
222 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
223 else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
224 wrmsrl(MSR_AMD64_LS_CFG, msrval);
228 #define pr_fmt(fmt) "MDS: " fmt
230 /* Default mitigation for MDS-affected CPUs */
231 static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
232 static bool mds_nosmt __ro_after_init = false;
234 static const char * const mds_strings[] = {
235 [MDS_MITIGATION_OFF] = "Vulnerable",
236 [MDS_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
237 [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
240 static void __init mds_select_mitigation(void)
242 if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
243 mds_mitigation = MDS_MITIGATION_OFF;
247 if (mds_mitigation == MDS_MITIGATION_FULL) {
248 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
249 mds_mitigation = MDS_MITIGATION_VMWERV;
251 static_branch_enable(&mds_user_clear);
253 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
254 (mds_nosmt || cpu_mitigations_auto_nosmt()))
255 cpu_smt_disable(false);
259 static int __init mds_cmdline(char *str)
261 if (!boot_cpu_has_bug(X86_BUG_MDS))
267 if (!strcmp(str, "off"))
268 mds_mitigation = MDS_MITIGATION_OFF;
269 else if (!strcmp(str, "full"))
270 mds_mitigation = MDS_MITIGATION_FULL;
271 else if (!strcmp(str, "full,nosmt")) {
272 mds_mitigation = MDS_MITIGATION_FULL;
278 early_param("mds", mds_cmdline);
281 #define pr_fmt(fmt) "TAA: " fmt
283 /* Default mitigation for TAA-affected CPUs */
284 static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW;
285 static bool taa_nosmt __ro_after_init;
287 static const char * const taa_strings[] = {
288 [TAA_MITIGATION_OFF] = "Vulnerable",
289 [TAA_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
290 [TAA_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
291 [TAA_MITIGATION_TSX_DISABLED] = "Mitigation: TSX disabled",
294 static void __init taa_select_mitigation(void)
298 if (!boot_cpu_has_bug(X86_BUG_TAA)) {
299 taa_mitigation = TAA_MITIGATION_OFF;
303 /* TSX previously disabled by tsx=off */
304 if (!boot_cpu_has(X86_FEATURE_RTM)) {
305 taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
309 if (cpu_mitigations_off()) {
310 taa_mitigation = TAA_MITIGATION_OFF;
315 * TAA mitigation via VERW is turned off if both
316 * tsx_async_abort=off and mds=off are specified.
318 if (taa_mitigation == TAA_MITIGATION_OFF &&
319 mds_mitigation == MDS_MITIGATION_OFF)
322 if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
323 taa_mitigation = TAA_MITIGATION_VERW;
325 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
328 * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
329 * A microcode update fixes this behavior to clear CPU buffers. It also
330 * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
331 * ARCH_CAP_TSX_CTRL_MSR bit.
333 * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
334 * update is required.
336 ia32_cap = x86_read_arch_cap_msr();
337 if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
338 !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
339 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
342 * TSX is enabled, select alternate mitigation for TAA which is
343 * the same as MDS. Enable MDS static branch to clear CPU buffers.
345 * For guests that can't determine whether the correct microcode is
346 * present on host, enable the mitigation for UCODE_NEEDED as well.
348 static_branch_enable(&mds_user_clear);
350 if (taa_nosmt || cpu_mitigations_auto_nosmt())
351 cpu_smt_disable(false);
354 static int __init tsx_async_abort_parse_cmdline(char *str)
356 if (!boot_cpu_has_bug(X86_BUG_TAA))
362 if (!strcmp(str, "off")) {
363 taa_mitigation = TAA_MITIGATION_OFF;
364 } else if (!strcmp(str, "full")) {
365 taa_mitigation = TAA_MITIGATION_VERW;
366 } else if (!strcmp(str, "full,nosmt")) {
367 taa_mitigation = TAA_MITIGATION_VERW;
373 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
376 #define pr_fmt(fmt) "MMIO Stale Data: " fmt
378 enum mmio_mitigations {
380 MMIO_MITIGATION_UCODE_NEEDED,
381 MMIO_MITIGATION_VERW,
384 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */
385 static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW;
386 static bool mmio_nosmt __ro_after_init = false;
388 static const char * const mmio_strings[] = {
389 [MMIO_MITIGATION_OFF] = "Vulnerable",
390 [MMIO_MITIGATION_UCODE_NEEDED] = "Vulnerable: Clear CPU buffers attempted, no microcode",
391 [MMIO_MITIGATION_VERW] = "Mitigation: Clear CPU buffers",
394 static void __init mmio_select_mitigation(void)
398 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
399 boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
400 cpu_mitigations_off()) {
401 mmio_mitigation = MMIO_MITIGATION_OFF;
405 if (mmio_mitigation == MMIO_MITIGATION_OFF)
408 ia32_cap = x86_read_arch_cap_msr();
411 * Enable CPU buffer clear mitigation for host and VMM, if also affected
412 * by MDS or TAA. Otherwise, enable mitigation for VMM only.
414 if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
415 boot_cpu_has(X86_FEATURE_RTM)))
416 static_branch_enable(&mds_user_clear);
418 static_branch_enable(&mmio_stale_data_clear);
421 * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
422 * be propagated to uncore buffers, clearing the Fill buffers on idle
423 * is required irrespective of SMT state.
425 if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
426 static_branch_enable(&mds_idle_clear);
429 * Check if the system has the right microcode.
431 * CPU Fill buffer clear mitigation is enumerated by either an explicit
432 * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
435 if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
436 (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
437 boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
438 !(ia32_cap & ARCH_CAP_MDS_NO)))
439 mmio_mitigation = MMIO_MITIGATION_VERW;
441 mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
443 if (mmio_nosmt || cpu_mitigations_auto_nosmt())
444 cpu_smt_disable(false);
447 static int __init mmio_stale_data_parse_cmdline(char *str)
449 if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
455 if (!strcmp(str, "off")) {
456 mmio_mitigation = MMIO_MITIGATION_OFF;
457 } else if (!strcmp(str, "full")) {
458 mmio_mitigation = MMIO_MITIGATION_VERW;
459 } else if (!strcmp(str, "full,nosmt")) {
460 mmio_mitigation = MMIO_MITIGATION_VERW;
466 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
469 #define pr_fmt(fmt) "" fmt
471 static void __init md_clear_update_mitigation(void)
473 if (cpu_mitigations_off())
476 if (!static_key_enabled(&mds_user_clear))
480 * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
481 * mitigation, if necessary.
483 if (mds_mitigation == MDS_MITIGATION_OFF &&
484 boot_cpu_has_bug(X86_BUG_MDS)) {
485 mds_mitigation = MDS_MITIGATION_FULL;
486 mds_select_mitigation();
488 if (taa_mitigation == TAA_MITIGATION_OFF &&
489 boot_cpu_has_bug(X86_BUG_TAA)) {
490 taa_mitigation = TAA_MITIGATION_VERW;
491 taa_select_mitigation();
493 if (mmio_mitigation == MMIO_MITIGATION_OFF &&
494 boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
495 mmio_mitigation = MMIO_MITIGATION_VERW;
496 mmio_select_mitigation();
499 if (boot_cpu_has_bug(X86_BUG_MDS))
500 pr_info("MDS: %s\n", mds_strings[mds_mitigation]);
501 if (boot_cpu_has_bug(X86_BUG_TAA))
502 pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
503 if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
504 pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
505 else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
506 pr_info("MMIO Stale Data: Unknown: No mitigations\n");
509 static void __init md_clear_select_mitigation(void)
511 mds_select_mitigation();
512 taa_select_mitigation();
513 mmio_select_mitigation();
516 * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update
517 * and print their mitigation after MDS, TAA and MMIO Stale Data
518 * mitigation selection is done.
520 md_clear_update_mitigation();
524 #define pr_fmt(fmt) "SRBDS: " fmt
526 enum srbds_mitigations {
527 SRBDS_MITIGATION_OFF,
528 SRBDS_MITIGATION_UCODE_NEEDED,
529 SRBDS_MITIGATION_FULL,
530 SRBDS_MITIGATION_TSX_OFF,
531 SRBDS_MITIGATION_HYPERVISOR,
534 static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL;
536 static const char * const srbds_strings[] = {
537 [SRBDS_MITIGATION_OFF] = "Vulnerable",
538 [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
539 [SRBDS_MITIGATION_FULL] = "Mitigation: Microcode",
540 [SRBDS_MITIGATION_TSX_OFF] = "Mitigation: TSX disabled",
541 [SRBDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status",
544 static bool srbds_off;
546 void update_srbds_msr(void)
550 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
553 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
556 if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
559 rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
561 switch (srbds_mitigation) {
562 case SRBDS_MITIGATION_OFF:
563 case SRBDS_MITIGATION_TSX_OFF:
564 mcu_ctrl |= RNGDS_MITG_DIS;
566 case SRBDS_MITIGATION_FULL:
567 mcu_ctrl &= ~RNGDS_MITG_DIS;
573 wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
576 static void __init srbds_select_mitigation(void)
580 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
584 * Check to see if this is one of the MDS_NO systems supporting TSX that
585 * are only exposed to SRBDS when TSX is enabled or when CPU is affected
586 * by Processor MMIO Stale Data vulnerability.
588 ia32_cap = x86_read_arch_cap_msr();
589 if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
590 !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
591 srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
592 else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
593 srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
594 else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
595 srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
596 else if (cpu_mitigations_off() || srbds_off)
597 srbds_mitigation = SRBDS_MITIGATION_OFF;
600 pr_info("%s\n", srbds_strings[srbds_mitigation]);
603 static int __init srbds_parse_cmdline(char *str)
608 if (!boot_cpu_has_bug(X86_BUG_SRBDS))
611 srbds_off = !strcmp(str, "off");
614 early_param("srbds", srbds_parse_cmdline);
617 #define pr_fmt(fmt) "Spectre V1 : " fmt
619 enum spectre_v1_mitigation {
620 SPECTRE_V1_MITIGATION_NONE,
621 SPECTRE_V1_MITIGATION_AUTO,
624 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
625 SPECTRE_V1_MITIGATION_AUTO;
627 static const char * const spectre_v1_strings[] = {
628 [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
629 [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
633 * Does SMAP provide full mitigation against speculative kernel access to
636 static bool smap_works_speculatively(void)
638 if (!boot_cpu_has(X86_FEATURE_SMAP))
642 * On CPUs which are vulnerable to Meltdown, SMAP does not
643 * prevent speculative access to user data in the L1 cache.
644 * Consider SMAP to be non-functional as a mitigation on these
647 if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
653 static void __init spectre_v1_select_mitigation(void)
655 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
656 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
660 if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
662 * With Spectre v1, a user can speculatively control either
663 * path of a conditional swapgs with a user-controlled GS
664 * value. The mitigation is to add lfences to both code paths.
666 * If FSGSBASE is enabled, the user can put a kernel address in
667 * GS, in which case SMAP provides no protection.
669 * [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the
670 * FSGSBASE enablement patches have been merged. ]
672 * If FSGSBASE is disabled, the user can only put a user space
673 * address in GS. That makes an attack harder, but still
674 * possible if there's no SMAP protection.
676 if (!smap_works_speculatively()) {
678 * Mitigation can be provided from SWAPGS itself or
679 * PTI as the CR3 write in the Meltdown mitigation
682 * If neither is there, mitigate with an LFENCE to
683 * stop speculation through swapgs.
685 if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
686 !boot_cpu_has(X86_FEATURE_PTI))
687 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
690 * Enable lfences in the kernel entry (non-swapgs)
691 * paths, to prevent user entry from speculatively
694 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
698 pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
701 static int __init nospectre_v1_cmdline(char *str)
703 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
706 early_param("nospectre_v1", nospectre_v1_cmdline);
709 #define pr_fmt(fmt) "Spectre V2 : " fmt
711 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
714 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
715 SPECTRE_V2_USER_NONE;
716 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
717 SPECTRE_V2_USER_NONE;
719 #ifdef CONFIG_RETPOLINE
720 static bool spectre_v2_bad_module;
722 bool retpoline_module_ok(bool has_retpoline)
724 if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
727 pr_err("System may be vulnerable to spectre v2\n");
728 spectre_v2_bad_module = true;
732 static inline const char *spectre_v2_module_string(void)
734 return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
737 static inline const char *spectre_v2_module_string(void) { return ""; }
740 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
741 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
742 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
744 #ifdef CONFIG_BPF_SYSCALL
745 void unpriv_ebpf_notify(int new_state)
750 /* Unprivileged eBPF is enabled */
752 switch (spectre_v2_enabled) {
753 case SPECTRE_V2_EIBRS:
754 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
756 case SPECTRE_V2_EIBRS_LFENCE:
757 if (sched_smt_active())
758 pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
766 static inline bool match_option(const char *arg, int arglen, const char *opt)
768 int len = strlen(opt);
770 return len == arglen && !strncmp(arg, opt, len);
773 /* The kernel command line selection for spectre v2 */
774 enum spectre_v2_mitigation_cmd {
777 SPECTRE_V2_CMD_FORCE,
778 SPECTRE_V2_CMD_RETPOLINE,
779 SPECTRE_V2_CMD_RETPOLINE_GENERIC,
780 SPECTRE_V2_CMD_RETPOLINE_LFENCE,
781 SPECTRE_V2_CMD_EIBRS,
782 SPECTRE_V2_CMD_EIBRS_RETPOLINE,
783 SPECTRE_V2_CMD_EIBRS_LFENCE,
786 enum spectre_v2_user_cmd {
787 SPECTRE_V2_USER_CMD_NONE,
788 SPECTRE_V2_USER_CMD_AUTO,
789 SPECTRE_V2_USER_CMD_FORCE,
790 SPECTRE_V2_USER_CMD_PRCTL,
791 SPECTRE_V2_USER_CMD_PRCTL_IBPB,
792 SPECTRE_V2_USER_CMD_SECCOMP,
793 SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
796 static const char * const spectre_v2_user_strings[] = {
797 [SPECTRE_V2_USER_NONE] = "User space: Vulnerable",
798 [SPECTRE_V2_USER_STRICT] = "User space: Mitigation: STIBP protection",
799 [SPECTRE_V2_USER_STRICT_PREFERRED] = "User space: Mitigation: STIBP always-on protection",
800 [SPECTRE_V2_USER_PRCTL] = "User space: Mitigation: STIBP via prctl",
801 [SPECTRE_V2_USER_SECCOMP] = "User space: Mitigation: STIBP via seccomp and prctl",
804 static const struct {
806 enum spectre_v2_user_cmd cmd;
808 } v2_user_options[] __initconst = {
809 { "auto", SPECTRE_V2_USER_CMD_AUTO, false },
810 { "off", SPECTRE_V2_USER_CMD_NONE, false },
811 { "on", SPECTRE_V2_USER_CMD_FORCE, true },
812 { "prctl", SPECTRE_V2_USER_CMD_PRCTL, false },
813 { "prctl,ibpb", SPECTRE_V2_USER_CMD_PRCTL_IBPB, false },
814 { "seccomp", SPECTRE_V2_USER_CMD_SECCOMP, false },
815 { "seccomp,ibpb", SPECTRE_V2_USER_CMD_SECCOMP_IBPB, false },
818 static void __init spec_v2_user_print_cond(const char *reason, bool secure)
820 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
821 pr_info("spectre_v2_user=%s forced on command line.\n", reason);
824 static enum spectre_v2_user_cmd __init
825 spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd)
831 case SPECTRE_V2_CMD_NONE:
832 return SPECTRE_V2_USER_CMD_NONE;
833 case SPECTRE_V2_CMD_FORCE:
834 return SPECTRE_V2_USER_CMD_FORCE;
839 ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
842 return SPECTRE_V2_USER_CMD_AUTO;
844 for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
845 if (match_option(arg, ret, v2_user_options[i].option)) {
846 spec_v2_user_print_cond(v2_user_options[i].option,
847 v2_user_options[i].secure);
848 return v2_user_options[i].cmd;
852 pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
853 return SPECTRE_V2_USER_CMD_AUTO;
856 static inline bool spectre_v2_in_eibrs_mode(enum spectre_v2_mitigation mode)
858 return (mode == SPECTRE_V2_EIBRS ||
859 mode == SPECTRE_V2_EIBRS_RETPOLINE ||
860 mode == SPECTRE_V2_EIBRS_LFENCE);
864 spectre_v2_user_select_mitigation(enum spectre_v2_mitigation_cmd v2_cmd)
866 enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
867 bool smt_possible = IS_ENABLED(CONFIG_SMP);
868 enum spectre_v2_user_cmd cmd;
870 if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
873 if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
874 cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
875 smt_possible = false;
877 cmd = spectre_v2_parse_user_cmdline(v2_cmd);
879 case SPECTRE_V2_USER_CMD_NONE:
881 case SPECTRE_V2_USER_CMD_FORCE:
882 mode = SPECTRE_V2_USER_STRICT;
884 case SPECTRE_V2_USER_CMD_PRCTL:
885 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
886 mode = SPECTRE_V2_USER_PRCTL;
888 case SPECTRE_V2_USER_CMD_AUTO:
889 case SPECTRE_V2_USER_CMD_SECCOMP:
890 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
891 if (IS_ENABLED(CONFIG_SECCOMP))
892 mode = SPECTRE_V2_USER_SECCOMP;
894 mode = SPECTRE_V2_USER_PRCTL;
898 /* Initialize Indirect Branch Prediction Barrier */
899 if (boot_cpu_has(X86_FEATURE_IBPB)) {
900 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
902 spectre_v2_user_ibpb = mode;
904 case SPECTRE_V2_USER_CMD_FORCE:
905 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
906 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
907 static_branch_enable(&switch_mm_always_ibpb);
908 spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
910 case SPECTRE_V2_USER_CMD_PRCTL:
911 case SPECTRE_V2_USER_CMD_AUTO:
912 case SPECTRE_V2_USER_CMD_SECCOMP:
913 static_branch_enable(&switch_mm_cond_ibpb);
919 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
920 static_key_enabled(&switch_mm_always_ibpb) ?
921 "always-on" : "conditional");
925 * If no STIBP, enhanced IBRS is enabled or SMT impossible, STIBP is not
928 if (!boot_cpu_has(X86_FEATURE_STIBP) ||
930 spectre_v2_in_eibrs_mode(spectre_v2_enabled))
934 * At this point, an STIBP mode other than "off" has been set.
935 * If STIBP support is not being forced, check if STIBP always-on
938 if (mode != SPECTRE_V2_USER_STRICT &&
939 boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
940 mode = SPECTRE_V2_USER_STRICT_PREFERRED;
942 spectre_v2_user_stibp = mode;
945 pr_info("%s\n", spectre_v2_user_strings[mode]);
948 static const char * const spectre_v2_strings[] = {
949 [SPECTRE_V2_NONE] = "Vulnerable",
950 [SPECTRE_V2_RETPOLINE] = "Mitigation: Retpolines",
951 [SPECTRE_V2_LFENCE] = "Mitigation: LFENCE",
952 [SPECTRE_V2_EIBRS] = "Mitigation: Enhanced IBRS",
953 [SPECTRE_V2_EIBRS_LFENCE] = "Mitigation: Enhanced IBRS + LFENCE",
954 [SPECTRE_V2_EIBRS_RETPOLINE] = "Mitigation: Enhanced IBRS + Retpolines",
957 static const struct {
959 enum spectre_v2_mitigation_cmd cmd;
961 } mitigation_options[] __initconst = {
962 { "off", SPECTRE_V2_CMD_NONE, false },
963 { "on", SPECTRE_V2_CMD_FORCE, true },
964 { "retpoline", SPECTRE_V2_CMD_RETPOLINE, false },
965 { "retpoline,amd", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false },
966 { "retpoline,lfence", SPECTRE_V2_CMD_RETPOLINE_LFENCE, false },
967 { "retpoline,generic", SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
968 { "eibrs", SPECTRE_V2_CMD_EIBRS, false },
969 { "eibrs,lfence", SPECTRE_V2_CMD_EIBRS_LFENCE, false },
970 { "eibrs,retpoline", SPECTRE_V2_CMD_EIBRS_RETPOLINE, false },
971 { "auto", SPECTRE_V2_CMD_AUTO, false },
974 static void __init spec_v2_print_cond(const char *reason, bool secure)
976 if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
977 pr_info("%s selected on command line.\n", reason);
980 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
982 enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
986 if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
987 cpu_mitigations_off())
988 return SPECTRE_V2_CMD_NONE;
990 ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
992 return SPECTRE_V2_CMD_AUTO;
994 for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
995 if (!match_option(arg, ret, mitigation_options[i].option))
997 cmd = mitigation_options[i].cmd;
1001 if (i >= ARRAY_SIZE(mitigation_options)) {
1002 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
1003 return SPECTRE_V2_CMD_AUTO;
1006 if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
1007 cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
1008 cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
1009 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
1010 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
1011 !IS_ENABLED(CONFIG_RETPOLINE)) {
1012 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1013 mitigation_options[i].option);
1014 return SPECTRE_V2_CMD_AUTO;
1017 if ((cmd == SPECTRE_V2_CMD_EIBRS ||
1018 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
1019 cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
1020 !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
1021 pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n",
1022 mitigation_options[i].option);
1023 return SPECTRE_V2_CMD_AUTO;
1026 if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
1027 cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
1028 !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
1029 pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
1030 mitigation_options[i].option);
1031 return SPECTRE_V2_CMD_AUTO;
1034 spec_v2_print_cond(mitigation_options[i].option,
1035 mitigation_options[i].secure);
1039 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
1041 if (!IS_ENABLED(CONFIG_RETPOLINE)) {
1042 pr_err("Kernel not compiled with retpoline; no mitigation available!");
1043 return SPECTRE_V2_NONE;
1046 return SPECTRE_V2_RETPOLINE;
1049 static void __init spectre_v2_select_mitigation(void)
1051 enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
1052 enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
1055 * If the CPU is not affected and the command line mode is NONE or AUTO
1056 * then nothing to do.
1058 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
1059 (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
1063 case SPECTRE_V2_CMD_NONE:
1066 case SPECTRE_V2_CMD_FORCE:
1067 case SPECTRE_V2_CMD_AUTO:
1068 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
1069 mode = SPECTRE_V2_EIBRS;
1073 mode = spectre_v2_select_retpoline();
1076 case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
1077 pr_err(SPECTRE_V2_LFENCE_MSG);
1078 mode = SPECTRE_V2_LFENCE;
1081 case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
1082 mode = SPECTRE_V2_RETPOLINE;
1085 case SPECTRE_V2_CMD_RETPOLINE:
1086 mode = spectre_v2_select_retpoline();
1089 case SPECTRE_V2_CMD_EIBRS:
1090 mode = SPECTRE_V2_EIBRS;
1093 case SPECTRE_V2_CMD_EIBRS_LFENCE:
1094 mode = SPECTRE_V2_EIBRS_LFENCE;
1097 case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
1098 mode = SPECTRE_V2_EIBRS_RETPOLINE;
1102 if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
1103 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
1105 if (spectre_v2_in_eibrs_mode(mode)) {
1106 /* Force it so VMEXIT will restore correctly */
1107 x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
1108 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1112 case SPECTRE_V2_NONE:
1113 case SPECTRE_V2_EIBRS:
1116 case SPECTRE_V2_LFENCE:
1117 case SPECTRE_V2_EIBRS_LFENCE:
1118 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
1121 case SPECTRE_V2_RETPOLINE:
1122 case SPECTRE_V2_EIBRS_RETPOLINE:
1123 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
1127 spectre_v2_enabled = mode;
1128 pr_info("%s\n", spectre_v2_strings[mode]);
1131 * If spectre v2 protection has been enabled, unconditionally fill
1132 * RSB during a context switch; this protects against two independent
1135 * - RSB underflow (and switch to BTB) on Skylake+
1136 * - SpectreRSB variant of spectre v2 on X86_BUG_SPECTRE_V2 CPUs
1138 setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
1139 pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
1142 * Retpoline means the kernel is safe because it has no indirect
1143 * branches. Enhanced IBRS protects firmware too, so, enable restricted
1144 * speculation around firmware calls only when Enhanced IBRS isn't
1147 * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
1148 * the user might select retpoline on the kernel command line and if
1149 * the CPU supports Enhanced IBRS, kernel might un-intentionally not
1150 * enable IBRS around firmware calls.
1152 if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_eibrs_mode(mode)) {
1153 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
1154 pr_info("Enabling Restricted Speculation for firmware calls\n");
1157 /* Set up IBPB and STIBP depending on the general spectre V2 command */
1158 spectre_v2_user_select_mitigation(cmd);
1161 static void update_stibp_msr(void * __unused)
1163 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1166 /* Update x86_spec_ctrl_base in case SMT state changed. */
1167 static void update_stibp_strict(void)
1169 u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
1171 if (sched_smt_active())
1172 mask |= SPEC_CTRL_STIBP;
1174 if (mask == x86_spec_ctrl_base)
1177 pr_info("Update user space SMT mitigation: STIBP %s\n",
1178 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
1179 x86_spec_ctrl_base = mask;
1180 on_each_cpu(update_stibp_msr, NULL, 1);
1183 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
1184 static void update_indir_branch_cond(void)
1186 if (sched_smt_active())
1187 static_branch_enable(&switch_to_cond_stibp);
1189 static_branch_disable(&switch_to_cond_stibp);
1193 #define pr_fmt(fmt) fmt
1195 /* Update the static key controlling the MDS CPU buffer clear in idle */
1196 static void update_mds_branch_idle(void)
1198 u64 ia32_cap = x86_read_arch_cap_msr();
1201 * Enable the idle clearing if SMT is active on CPUs which are
1202 * affected only by MSBDS and not any other MDS variant.
1204 * The other variants cannot be mitigated when SMT is enabled, so
1205 * clearing the buffers on idle just to prevent the Store Buffer
1206 * repartitioning leak would be a window dressing exercise.
1208 if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
1211 if (sched_smt_active()) {
1212 static_branch_enable(&mds_idle_clear);
1213 } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
1214 (ia32_cap & ARCH_CAP_FBSDP_NO)) {
1215 static_branch_disable(&mds_idle_clear);
1219 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
1220 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
1221 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
1223 void arch_smt_update(void)
1225 mutex_lock(&spec_ctrl_mutex);
1227 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
1228 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
1229 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
1231 switch (spectre_v2_user_stibp) {
1232 case SPECTRE_V2_USER_NONE:
1234 case SPECTRE_V2_USER_STRICT:
1235 case SPECTRE_V2_USER_STRICT_PREFERRED:
1236 update_stibp_strict();
1238 case SPECTRE_V2_USER_PRCTL:
1239 case SPECTRE_V2_USER_SECCOMP:
1240 update_indir_branch_cond();
1244 switch (mds_mitigation) {
1245 case MDS_MITIGATION_FULL:
1246 case MDS_MITIGATION_VMWERV:
1247 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
1248 pr_warn_once(MDS_MSG_SMT);
1249 update_mds_branch_idle();
1251 case MDS_MITIGATION_OFF:
1255 switch (taa_mitigation) {
1256 case TAA_MITIGATION_VERW:
1257 case TAA_MITIGATION_UCODE_NEEDED:
1258 if (sched_smt_active())
1259 pr_warn_once(TAA_MSG_SMT);
1261 case TAA_MITIGATION_TSX_DISABLED:
1262 case TAA_MITIGATION_OFF:
1266 switch (mmio_mitigation) {
1267 case MMIO_MITIGATION_VERW:
1268 case MMIO_MITIGATION_UCODE_NEEDED:
1269 if (sched_smt_active())
1270 pr_warn_once(MMIO_MSG_SMT);
1272 case MMIO_MITIGATION_OFF:
1276 mutex_unlock(&spec_ctrl_mutex);
1280 #define pr_fmt(fmt) "Speculative Store Bypass: " fmt
1282 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
1284 /* The kernel command line selection */
1285 enum ssb_mitigation_cmd {
1286 SPEC_STORE_BYPASS_CMD_NONE,
1287 SPEC_STORE_BYPASS_CMD_AUTO,
1288 SPEC_STORE_BYPASS_CMD_ON,
1289 SPEC_STORE_BYPASS_CMD_PRCTL,
1290 SPEC_STORE_BYPASS_CMD_SECCOMP,
1293 static const char * const ssb_strings[] = {
1294 [SPEC_STORE_BYPASS_NONE] = "Vulnerable",
1295 [SPEC_STORE_BYPASS_DISABLE] = "Mitigation: Speculative Store Bypass disabled",
1296 [SPEC_STORE_BYPASS_PRCTL] = "Mitigation: Speculative Store Bypass disabled via prctl",
1297 [SPEC_STORE_BYPASS_SECCOMP] = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
1300 static const struct {
1302 enum ssb_mitigation_cmd cmd;
1303 } ssb_mitigation_options[] __initconst = {
1304 { "auto", SPEC_STORE_BYPASS_CMD_AUTO }, /* Platform decides */
1305 { "on", SPEC_STORE_BYPASS_CMD_ON }, /* Disable Speculative Store Bypass */
1306 { "off", SPEC_STORE_BYPASS_CMD_NONE }, /* Don't touch Speculative Store Bypass */
1307 { "prctl", SPEC_STORE_BYPASS_CMD_PRCTL }, /* Disable Speculative Store Bypass via prctl */
1308 { "seccomp", SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
1311 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
1313 enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
1317 if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
1318 cpu_mitigations_off()) {
1319 return SPEC_STORE_BYPASS_CMD_NONE;
1321 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
1324 return SPEC_STORE_BYPASS_CMD_AUTO;
1326 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
1327 if (!match_option(arg, ret, ssb_mitigation_options[i].option))
1330 cmd = ssb_mitigation_options[i].cmd;
1334 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
1335 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
1336 return SPEC_STORE_BYPASS_CMD_AUTO;
1343 static enum ssb_mitigation __init __ssb_select_mitigation(void)
1345 enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
1346 enum ssb_mitigation_cmd cmd;
1348 if (!boot_cpu_has(X86_FEATURE_SSBD))
1351 cmd = ssb_parse_cmdline();
1352 if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
1353 (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
1354 cmd == SPEC_STORE_BYPASS_CMD_AUTO))
1358 case SPEC_STORE_BYPASS_CMD_AUTO:
1359 case SPEC_STORE_BYPASS_CMD_SECCOMP:
1361 * Choose prctl+seccomp as the default mode if seccomp is
1364 if (IS_ENABLED(CONFIG_SECCOMP))
1365 mode = SPEC_STORE_BYPASS_SECCOMP;
1367 mode = SPEC_STORE_BYPASS_PRCTL;
1369 case SPEC_STORE_BYPASS_CMD_ON:
1370 mode = SPEC_STORE_BYPASS_DISABLE;
1372 case SPEC_STORE_BYPASS_CMD_PRCTL:
1373 mode = SPEC_STORE_BYPASS_PRCTL;
1375 case SPEC_STORE_BYPASS_CMD_NONE:
1380 * If SSBD is controlled by the SPEC_CTRL MSR, then set the proper
1381 * bit in the mask to allow guests to use the mitigation even in the
1382 * case where the host does not enable it.
1384 if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
1385 static_cpu_has(X86_FEATURE_AMD_SSBD)) {
1386 x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
1390 * We have three CPU feature flags that are in play here:
1391 * - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
1392 * - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
1393 * - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
1395 if (mode == SPEC_STORE_BYPASS_DISABLE) {
1396 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
1398 * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
1399 * use a completely different MSR and bit dependent on family.
1401 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
1402 !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
1403 x86_amd_ssb_disable();
1405 x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
1406 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1413 static void ssb_select_mitigation(void)
1415 ssb_mode = __ssb_select_mitigation();
1417 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1418 pr_info("%s\n", ssb_strings[ssb_mode]);
1422 #define pr_fmt(fmt) "Speculation prctl: " fmt
1424 static void task_update_spec_tif(struct task_struct *tsk)
1426 /* Force the update of the real TIF bits */
1427 set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
1430 * Immediately update the speculation control MSRs for the current
1431 * task, but for a non-current task delay setting the CPU
1432 * mitigation until it is scheduled next.
1434 * This can only happen for SECCOMP mitigation. For PRCTL it's
1435 * always the current task.
1438 speculation_ctrl_update_current();
1441 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
1443 if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
1444 ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
1448 case PR_SPEC_ENABLE:
1449 /* If speculation is force disabled, enable is not allowed */
1450 if (task_spec_ssb_force_disable(task))
1452 task_clear_spec_ssb_disable(task);
1453 task_update_spec_tif(task);
1455 case PR_SPEC_DISABLE:
1456 task_set_spec_ssb_disable(task);
1457 task_update_spec_tif(task);
1459 case PR_SPEC_FORCE_DISABLE:
1460 task_set_spec_ssb_disable(task);
1461 task_set_spec_ssb_force_disable(task);
1462 task_update_spec_tif(task);
1470 static bool is_spec_ib_user_controlled(void)
1472 return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
1473 spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
1474 spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
1475 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
1478 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
1481 case PR_SPEC_ENABLE:
1482 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
1483 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
1486 * With strict mode for both IBPB and STIBP, the instruction
1487 * code paths avoid checking this task flag and instead,
1488 * unconditionally run the instruction. However, STIBP and IBPB
1489 * are independent and either can be set to conditionally
1490 * enabled regardless of the mode of the other.
1492 * If either is set to conditional, allow the task flag to be
1493 * updated, unless it was force-disabled by a previous prctl
1494 * call. Currently, this is possible on an AMD CPU which has the
1495 * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
1496 * kernel is booted with 'spectre_v2_user=seccomp', then
1497 * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
1498 * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
1500 if (!is_spec_ib_user_controlled() ||
1501 task_spec_ib_force_disable(task))
1504 task_clear_spec_ib_disable(task);
1505 task_update_spec_tif(task);
1507 case PR_SPEC_DISABLE:
1508 case PR_SPEC_FORCE_DISABLE:
1510 * Indirect branch speculation is always allowed when
1511 * mitigation is force disabled.
1513 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
1514 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
1517 if (!is_spec_ib_user_controlled())
1520 task_set_spec_ib_disable(task);
1521 if (ctrl == PR_SPEC_FORCE_DISABLE)
1522 task_set_spec_ib_force_disable(task);
1523 task_update_spec_tif(task);
1531 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
1535 case PR_SPEC_STORE_BYPASS:
1536 return ssb_prctl_set(task, ctrl);
1537 case PR_SPEC_INDIRECT_BRANCH:
1538 return ib_prctl_set(task, ctrl);
1544 #ifdef CONFIG_SECCOMP
1545 void arch_seccomp_spec_mitigate(struct task_struct *task)
1547 if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
1548 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
1549 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
1550 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
1551 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
1555 static int ssb_prctl_get(struct task_struct *task)
1558 case SPEC_STORE_BYPASS_DISABLE:
1559 return PR_SPEC_DISABLE;
1560 case SPEC_STORE_BYPASS_SECCOMP:
1561 case SPEC_STORE_BYPASS_PRCTL:
1562 if (task_spec_ssb_force_disable(task))
1563 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1564 if (task_spec_ssb_disable(task))
1565 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1566 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1568 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1569 return PR_SPEC_ENABLE;
1570 return PR_SPEC_NOT_AFFECTED;
1574 static int ib_prctl_get(struct task_struct *task)
1576 if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
1577 return PR_SPEC_NOT_AFFECTED;
1579 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
1580 spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
1581 return PR_SPEC_ENABLE;
1582 else if (is_spec_ib_user_controlled()) {
1583 if (task_spec_ib_force_disable(task))
1584 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1585 if (task_spec_ib_disable(task))
1586 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1587 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1588 } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
1589 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
1590 spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
1591 return PR_SPEC_DISABLE;
1593 return PR_SPEC_NOT_AFFECTED;
1596 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
1599 case PR_SPEC_STORE_BYPASS:
1600 return ssb_prctl_get(task);
1601 case PR_SPEC_INDIRECT_BRANCH:
1602 return ib_prctl_get(task);
1608 void x86_spec_ctrl_setup_ap(void)
1610 if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
1611 wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
1613 if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
1614 x86_amd_ssb_disable();
1617 bool itlb_multihit_kvm_mitigation;
1618 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
1621 #define pr_fmt(fmt) "L1TF: " fmt
1623 /* Default mitigation for L1TF-affected CPUs */
1624 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
1625 #if IS_ENABLED(CONFIG_KVM_INTEL)
1626 EXPORT_SYMBOL_GPL(l1tf_mitigation);
1628 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
1629 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
1632 * These CPUs all support 44bits physical address space internally in the
1633 * cache but CPUID can report a smaller number of physical address bits.
1635 * The L1TF mitigation uses the top most address bit for the inversion of
1636 * non present PTEs. When the installed memory reaches into the top most
1637 * address bit due to memory holes, which has been observed on machines
1638 * which report 36bits physical address bits and have 32G RAM installed,
1639 * then the mitigation range check in l1tf_select_mitigation() triggers.
1640 * This is a false positive because the mitigation is still possible due to
1641 * the fact that the cache uses 44bit internally. Use the cache bits
1642 * instead of the reported physical bits and adjust them on the affected
1643 * machines to 44bit if the reported bits are less than 44.
1645 static void override_cache_bits(struct cpuinfo_x86 *c)
1650 switch (c->x86_model) {
1651 case INTEL_FAM6_NEHALEM:
1652 case INTEL_FAM6_WESTMERE:
1653 case INTEL_FAM6_SANDYBRIDGE:
1654 case INTEL_FAM6_IVYBRIDGE:
1655 case INTEL_FAM6_HASWELL_CORE:
1656 case INTEL_FAM6_HASWELL_ULT:
1657 case INTEL_FAM6_HASWELL_GT3E:
1658 case INTEL_FAM6_BROADWELL_CORE:
1659 case INTEL_FAM6_BROADWELL_GT3E:
1660 case INTEL_FAM6_SKYLAKE_MOBILE:
1661 case INTEL_FAM6_SKYLAKE_DESKTOP:
1662 case INTEL_FAM6_KABYLAKE_MOBILE:
1663 case INTEL_FAM6_KABYLAKE_DESKTOP:
1664 if (c->x86_cache_bits < 44)
1665 c->x86_cache_bits = 44;
1670 static void __init l1tf_select_mitigation(void)
1674 if (!boot_cpu_has_bug(X86_BUG_L1TF))
1677 if (cpu_mitigations_off())
1678 l1tf_mitigation = L1TF_MITIGATION_OFF;
1679 else if (cpu_mitigations_auto_nosmt())
1680 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1682 override_cache_bits(&boot_cpu_data);
1684 switch (l1tf_mitigation) {
1685 case L1TF_MITIGATION_OFF:
1686 case L1TF_MITIGATION_FLUSH_NOWARN:
1687 case L1TF_MITIGATION_FLUSH:
1689 case L1TF_MITIGATION_FLUSH_NOSMT:
1690 case L1TF_MITIGATION_FULL:
1691 cpu_smt_disable(false);
1693 case L1TF_MITIGATION_FULL_FORCE:
1694 cpu_smt_disable(true);
1698 #if CONFIG_PGTABLE_LEVELS == 2
1699 pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
1703 half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
1704 if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
1705 e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
1706 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
1707 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
1709 pr_info("However, doing so will make a part of your RAM unusable.\n");
1710 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
1714 setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
1717 static int __init l1tf_cmdline(char *str)
1719 if (!boot_cpu_has_bug(X86_BUG_L1TF))
1725 if (!strcmp(str, "off"))
1726 l1tf_mitigation = L1TF_MITIGATION_OFF;
1727 else if (!strcmp(str, "flush,nowarn"))
1728 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
1729 else if (!strcmp(str, "flush"))
1730 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
1731 else if (!strcmp(str, "flush,nosmt"))
1732 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1733 else if (!strcmp(str, "full"))
1734 l1tf_mitigation = L1TF_MITIGATION_FULL;
1735 else if (!strcmp(str, "full,force"))
1736 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
1740 early_param("l1tf", l1tf_cmdline);
1743 #define pr_fmt(fmt) fmt
1747 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
1749 #if IS_ENABLED(CONFIG_KVM_INTEL)
1750 static const char * const l1tf_vmx_states[] = {
1751 [VMENTER_L1D_FLUSH_AUTO] = "auto",
1752 [VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
1753 [VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
1754 [VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
1755 [VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
1756 [VMENTER_L1D_FLUSH_NOT_REQUIRED] = "flush not necessary"
1759 static ssize_t l1tf_show_state(char *buf)
1761 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
1762 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1764 if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
1765 (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
1766 sched_smt_active())) {
1767 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
1768 l1tf_vmx_states[l1tf_vmx_mitigation]);
1771 return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
1772 l1tf_vmx_states[l1tf_vmx_mitigation],
1773 sched_smt_active() ? "vulnerable" : "disabled");
1776 static ssize_t itlb_multihit_show_state(char *buf)
1778 if (itlb_multihit_kvm_mitigation)
1779 return sprintf(buf, "KVM: Mitigation: Split huge pages\n");
1781 return sprintf(buf, "KVM: Vulnerable\n");
1784 static ssize_t l1tf_show_state(char *buf)
1786 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
1789 static ssize_t itlb_multihit_show_state(char *buf)
1791 return sprintf(buf, "Processor vulnerable\n");
1795 static ssize_t mds_show_state(char *buf)
1797 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1798 return sprintf(buf, "%s; SMT Host state unknown\n",
1799 mds_strings[mds_mitigation]);
1802 if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
1803 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
1804 (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
1805 sched_smt_active() ? "mitigated" : "disabled"));
1808 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
1809 sched_smt_active() ? "vulnerable" : "disabled");
1812 static ssize_t tsx_async_abort_show_state(char *buf)
1814 if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
1815 (taa_mitigation == TAA_MITIGATION_OFF))
1816 return sprintf(buf, "%s\n", taa_strings[taa_mitigation]);
1818 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1819 return sprintf(buf, "%s; SMT Host state unknown\n",
1820 taa_strings[taa_mitigation]);
1823 return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
1824 sched_smt_active() ? "vulnerable" : "disabled");
1827 static ssize_t mmio_stale_data_show_state(char *buf)
1829 if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
1830 return sysfs_emit(buf, "Unknown: No mitigations\n");
1832 if (mmio_mitigation == MMIO_MITIGATION_OFF)
1833 return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
1835 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
1836 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
1837 mmio_strings[mmio_mitigation]);
1840 return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation],
1841 sched_smt_active() ? "vulnerable" : "disabled");
1844 static char *stibp_state(void)
1846 if (spectre_v2_in_eibrs_mode(spectre_v2_enabled))
1849 switch (spectre_v2_user_stibp) {
1850 case SPECTRE_V2_USER_NONE:
1851 return ", STIBP: disabled";
1852 case SPECTRE_V2_USER_STRICT:
1853 return ", STIBP: forced";
1854 case SPECTRE_V2_USER_STRICT_PREFERRED:
1855 return ", STIBP: always-on";
1856 case SPECTRE_V2_USER_PRCTL:
1857 case SPECTRE_V2_USER_SECCOMP:
1858 if (static_key_enabled(&switch_to_cond_stibp))
1859 return ", STIBP: conditional";
1864 static char *ibpb_state(void)
1866 if (boot_cpu_has(X86_FEATURE_IBPB)) {
1867 if (static_key_enabled(&switch_mm_always_ibpb))
1868 return ", IBPB: always-on";
1869 if (static_key_enabled(&switch_mm_cond_ibpb))
1870 return ", IBPB: conditional";
1871 return ", IBPB: disabled";
1876 static ssize_t spectre_v2_show_state(char *buf)
1878 if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
1879 return sprintf(buf, "Vulnerable: LFENCE\n");
1881 if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
1882 return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
1884 if (sched_smt_active() && unprivileged_ebpf_enabled() &&
1885 spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
1886 return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
1888 return sprintf(buf, "%s%s%s%s%s%s\n",
1889 spectre_v2_strings[spectre_v2_enabled],
1891 boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
1893 boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
1894 spectre_v2_module_string());
1897 static ssize_t srbds_show_state(char *buf)
1899 return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
1902 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
1903 char *buf, unsigned int bug)
1905 if (!boot_cpu_has_bug(bug))
1906 return sprintf(buf, "Not affected\n");
1909 case X86_BUG_CPU_MELTDOWN:
1910 if (boot_cpu_has(X86_FEATURE_PTI))
1911 return sprintf(buf, "Mitigation: PTI\n");
1915 case X86_BUG_SPECTRE_V1:
1916 return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
1918 case X86_BUG_SPECTRE_V2:
1919 return spectre_v2_show_state(buf);
1921 case X86_BUG_SPEC_STORE_BYPASS:
1922 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
1925 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
1926 return l1tf_show_state(buf);
1930 return mds_show_state(buf);
1933 return tsx_async_abort_show_state(buf);
1935 case X86_BUG_ITLB_MULTIHIT:
1936 return itlb_multihit_show_state(buf);
1939 return srbds_show_state(buf);
1941 case X86_BUG_MMIO_STALE_DATA:
1942 case X86_BUG_MMIO_UNKNOWN:
1943 return mmio_stale_data_show_state(buf);
1949 return sprintf(buf, "Vulnerable\n");
1952 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
1954 return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
1957 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
1959 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
1962 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
1964 return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
1967 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
1969 return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
1972 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
1974 return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
1977 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
1979 return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
1982 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
1984 return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
1987 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
1989 return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
1992 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
1994 return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
1997 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
1999 if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
2000 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN);
2002 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);