GNU Linux-libre 4.14.303-gnu1
[releases.git] / arch / x86 / kernel / cpu / bugs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  Copyright (C) 1994  Linus Torvalds
4  *
5  *  Cyrix stuff, June 1998 by:
6  *      - Rafael R. Reilova (moved everything from head.S),
7  *        <rreilova@ececs.uc.edu>
8  *      - Channing Corn (tests & fixes),
9  *      - Andrew D. Balsa (code cleanup).
10  */
11 #include <linux/init.h>
12 #include <linux/utsname.h>
13 #include <linux/cpu.h>
14 #include <linux/module.h>
15 #include <linux/nospec.h>
16 #include <linux/prctl.h>
17 #include <linux/sched/smt.h>
18
19 #include <asm/spec-ctrl.h>
20 #include <asm/cmdline.h>
21 #include <asm/bugs.h>
22 #include <asm/processor.h>
23 #include <asm/processor-flags.h>
24 #include <asm/fpu/internal.h>
25 #include <asm/msr.h>
26 #include <asm/vmx.h>
27 #include <asm/paravirt.h>
28 #include <asm/alternative.h>
29 #include <asm/hypervisor.h>
30 #include <asm/pgtable.h>
31 #include <asm/set_memory.h>
32 #include <asm/intel-family.h>
33 #include <asm/e820/api.h>
34 #include <linux/bpf.h>
35
36 #include "cpu.h"
37
38 static void __init spectre_v1_select_mitigation(void);
39 static void __init spectre_v2_select_mitigation(void);
40 static void __init retbleed_select_mitigation(void);
41 static void __init spectre_v2_user_select_mitigation(void);
42 static void __init ssb_select_mitigation(void);
43 static void __init l1tf_select_mitigation(void);
44 static void __init mds_select_mitigation(void);
45 static void __init md_clear_update_mitigation(void);
46 static void __init md_clear_select_mitigation(void);
47 static void __init taa_select_mitigation(void);
48 static void __init mmio_select_mitigation(void);
49 static void __init srbds_select_mitigation(void);
50
51 /* The base value of the SPEC_CTRL MSR without task-specific bits set */
52 u64 x86_spec_ctrl_base;
53 EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
54
55 /* The current value of the SPEC_CTRL MSR with task-specific bits set */
56 DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
57 EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
58
59 static DEFINE_MUTEX(spec_ctrl_mutex);
60
61 /* Update SPEC_CTRL MSR and its cached copy unconditionally */
62 static void update_spec_ctrl(u64 val)
63 {
64         this_cpu_write(x86_spec_ctrl_current, val);
65         wrmsrl(MSR_IA32_SPEC_CTRL, val);
66 }
67
68 /*
69  * Keep track of the SPEC_CTRL MSR value for the current task, which may differ
70  * from x86_spec_ctrl_base due to STIBP/SSB in __speculation_ctrl_update().
71  */
72 void update_spec_ctrl_cond(u64 val)
73 {
74         if (this_cpu_read(x86_spec_ctrl_current) == val)
75                 return;
76
77         this_cpu_write(x86_spec_ctrl_current, val);
78
79         /*
80          * When KERNEL_IBRS this MSR is written on return-to-user, unless
81          * forced the update can be delayed until that time.
82          */
83         if (!cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS))
84                 wrmsrl(MSR_IA32_SPEC_CTRL, val);
85 }
86
87 u64 spec_ctrl_current(void)
88 {
89         return this_cpu_read(x86_spec_ctrl_current);
90 }
91 EXPORT_SYMBOL_GPL(spec_ctrl_current);
92
93 /*
94  * AMD specific MSR info for Speculative Store Bypass control.
95  * x86_amd_ls_cfg_ssbd_mask is initialized in identify_boot_cpu().
96  */
97 u64 __ro_after_init x86_amd_ls_cfg_base;
98 u64 __ro_after_init x86_amd_ls_cfg_ssbd_mask;
99
100 /* Control conditional STIBP in switch_to() */
101 DEFINE_STATIC_KEY_FALSE(switch_to_cond_stibp);
102 /* Control conditional IBPB in switch_mm() */
103 DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
104 /* Control unconditional IBPB in switch_mm() */
105 DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
106
107 /* Control MDS CPU buffer clear before returning to user space */
108 DEFINE_STATIC_KEY_FALSE(mds_user_clear);
109 EXPORT_SYMBOL_GPL(mds_user_clear);
110 /* Control MDS CPU buffer clear before idling (halt, mwait) */
111 DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
112 EXPORT_SYMBOL_GPL(mds_idle_clear);
113
114 /* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
115 DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
116 EXPORT_SYMBOL_GPL(mmio_stale_data_clear);
117
118 void __init check_bugs(void)
119 {
120         identify_boot_cpu();
121
122         /*
123          * identify_boot_cpu() initialized SMT support information, let the
124          * core code know.
125          */
126         cpu_smt_check_topology();
127
128         if (!IS_ENABLED(CONFIG_SMP)) {
129                 pr_info("CPU: ");
130                 print_cpu_info(&boot_cpu_data);
131         }
132
133         /*
134          * Read the SPEC_CTRL MSR to account for reserved bits which may
135          * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD
136          * init code as it is not enumerated and depends on the family.
137          */
138         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
139                 rdmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
140
141         /* Select the proper CPU mitigations before patching alternatives: */
142         spectre_v1_select_mitigation();
143         spectre_v2_select_mitigation();
144         /*
145          * retbleed_select_mitigation() relies on the state set by
146          * spectre_v2_select_mitigation(); specifically it wants to know about
147          * spectre_v2=ibrs.
148          */
149         retbleed_select_mitigation();
150         /*
151          * spectre_v2_user_select_mitigation() relies on the state set by
152          * retbleed_select_mitigation(); specifically the STIBP selection is
153          * forced for UNRET.
154          */
155         spectre_v2_user_select_mitigation();
156         ssb_select_mitigation();
157         l1tf_select_mitigation();
158         md_clear_select_mitigation();
159         srbds_select_mitigation();
160
161         arch_smt_update();
162
163 #ifdef CONFIG_X86_32
164         /*
165          * Check whether we are able to run this kernel safely on SMP.
166          *
167          * - i386 is no longer supported.
168          * - In order to run on anything without a TSC, we need to be
169          *   compiled for a i486.
170          */
171         if (boot_cpu_data.x86 < 4)
172                 panic("Kernel requires i486+ for 'invlpg' and other features");
173
174         init_utsname()->machine[1] =
175                 '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
176         alternative_instructions();
177
178         fpu__init_check_bugs();
179 #else /* CONFIG_X86_64 */
180         alternative_instructions();
181
182         /*
183          * Make sure the first 2MB area is not mapped by huge pages
184          * There are typically fixed size MTRRs in there and overlapping
185          * MTRRs into large pages causes slow downs.
186          *
187          * Right now we don't do that with gbpages because there seems
188          * very little benefit for that case.
189          */
190         if (!direct_gbpages)
191                 set_memory_4k((unsigned long)__va(0), 1);
192 #endif
193 }
194
195 /*
196  * NOTE: For VMX, this function is not called in the vmexit path.
197  * It uses vmx_spec_ctrl_restore_host() instead.
198  */
199 void
200 x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
201 {
202         u64 msrval, guestval = guest_spec_ctrl, hostval = spec_ctrl_current();
203         struct thread_info *ti = current_thread_info();
204
205         if (static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL)) {
206                 if (hostval != guestval) {
207                         msrval = setguest ? guestval : hostval;
208                         wrmsrl(MSR_IA32_SPEC_CTRL, msrval);
209                 }
210         }
211
212         /*
213          * If SSBD is not handled in MSR_SPEC_CTRL on AMD, update
214          * MSR_AMD64_L2_CFG or MSR_VIRT_SPEC_CTRL if supported.
215          */
216         if (!static_cpu_has(X86_FEATURE_LS_CFG_SSBD) &&
217             !static_cpu_has(X86_FEATURE_VIRT_SSBD))
218                 return;
219
220         /*
221          * If the host has SSBD mitigation enabled, force it in the host's
222          * virtual MSR value. If its not permanently enabled, evaluate
223          * current's TIF_SSBD thread flag.
224          */
225         if (static_cpu_has(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE))
226                 hostval = SPEC_CTRL_SSBD;
227         else
228                 hostval = ssbd_tif_to_spec_ctrl(ti->flags);
229
230         /* Sanitize the guest value */
231         guestval = guest_virt_spec_ctrl & SPEC_CTRL_SSBD;
232
233         if (hostval != guestval) {
234                 unsigned long tif;
235
236                 tif = setguest ? ssbd_spec_ctrl_to_tif(guestval) :
237                                  ssbd_spec_ctrl_to_tif(hostval);
238
239                 speculation_ctrl_update(tif);
240         }
241 }
242 EXPORT_SYMBOL_GPL(x86_virt_spec_ctrl);
243
244 static void x86_amd_ssb_disable(void)
245 {
246         u64 msrval = x86_amd_ls_cfg_base | x86_amd_ls_cfg_ssbd_mask;
247
248         if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
249                 wrmsrl(MSR_AMD64_VIRT_SPEC_CTRL, SPEC_CTRL_SSBD);
250         else if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
251                 wrmsrl(MSR_AMD64_LS_CFG, msrval);
252 }
253
254 #undef pr_fmt
255 #define pr_fmt(fmt)     "MDS: " fmt
256
257 /* Default mitigation for MDS-affected CPUs */
258 static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
259 static bool mds_nosmt __ro_after_init = false;
260
261 static const char * const mds_strings[] = {
262         [MDS_MITIGATION_OFF]    = "Vulnerable",
263         [MDS_MITIGATION_FULL]   = "Mitigation: Clear CPU buffers",
264         [MDS_MITIGATION_VMWERV] = "Vulnerable: Clear CPU buffers attempted, no microcode",
265 };
266
267 static void __init mds_select_mitigation(void)
268 {
269         if (!boot_cpu_has_bug(X86_BUG_MDS) || cpu_mitigations_off()) {
270                 mds_mitigation = MDS_MITIGATION_OFF;
271                 return;
272         }
273
274         if (mds_mitigation == MDS_MITIGATION_FULL) {
275                 if (!boot_cpu_has(X86_FEATURE_MD_CLEAR))
276                         mds_mitigation = MDS_MITIGATION_VMWERV;
277
278                 static_branch_enable(&mds_user_clear);
279
280                 if (!boot_cpu_has(X86_BUG_MSBDS_ONLY) &&
281                     (mds_nosmt || cpu_mitigations_auto_nosmt()))
282                         cpu_smt_disable(false);
283         }
284 }
285
286 static int __init mds_cmdline(char *str)
287 {
288         if (!boot_cpu_has_bug(X86_BUG_MDS))
289                 return 0;
290
291         if (!str)
292                 return -EINVAL;
293
294         if (!strcmp(str, "off"))
295                 mds_mitigation = MDS_MITIGATION_OFF;
296         else if (!strcmp(str, "full"))
297                 mds_mitigation = MDS_MITIGATION_FULL;
298         else if (!strcmp(str, "full,nosmt")) {
299                 mds_mitigation = MDS_MITIGATION_FULL;
300                 mds_nosmt = true;
301         }
302
303         return 0;
304 }
305 early_param("mds", mds_cmdline);
306
307 #undef pr_fmt
308 #define pr_fmt(fmt)     "TAA: " fmt
309
310 /* Default mitigation for TAA-affected CPUs */
311 static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW;
312 static bool taa_nosmt __ro_after_init;
313
314 static const char * const taa_strings[] = {
315         [TAA_MITIGATION_OFF]            = "Vulnerable",
316         [TAA_MITIGATION_UCODE_NEEDED]   = "Vulnerable: Clear CPU buffers attempted, no microcode",
317         [TAA_MITIGATION_VERW]           = "Mitigation: Clear CPU buffers",
318         [TAA_MITIGATION_TSX_DISABLED]   = "Mitigation: TSX disabled",
319 };
320
321 static void __init taa_select_mitigation(void)
322 {
323         u64 ia32_cap;
324
325         if (!boot_cpu_has_bug(X86_BUG_TAA)) {
326                 taa_mitigation = TAA_MITIGATION_OFF;
327                 return;
328         }
329
330         /* TSX previously disabled by tsx=off */
331         if (!boot_cpu_has(X86_FEATURE_RTM)) {
332                 taa_mitigation = TAA_MITIGATION_TSX_DISABLED;
333                 return;
334         }
335
336         if (cpu_mitigations_off()) {
337                 taa_mitigation = TAA_MITIGATION_OFF;
338                 return;
339         }
340
341         /*
342          * TAA mitigation via VERW is turned off if both
343          * tsx_async_abort=off and mds=off are specified.
344          */
345         if (taa_mitigation == TAA_MITIGATION_OFF &&
346             mds_mitigation == MDS_MITIGATION_OFF)
347                 return;
348
349         if (boot_cpu_has(X86_FEATURE_MD_CLEAR))
350                 taa_mitigation = TAA_MITIGATION_VERW;
351         else
352                 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
353
354         /*
355          * VERW doesn't clear the CPU buffers when MD_CLEAR=1 and MDS_NO=1.
356          * A microcode update fixes this behavior to clear CPU buffers. It also
357          * adds support for MSR_IA32_TSX_CTRL which is enumerated by the
358          * ARCH_CAP_TSX_CTRL_MSR bit.
359          *
360          * On MDS_NO=1 CPUs if ARCH_CAP_TSX_CTRL_MSR is not set, microcode
361          * update is required.
362          */
363         ia32_cap = x86_read_arch_cap_msr();
364         if ( (ia32_cap & ARCH_CAP_MDS_NO) &&
365             !(ia32_cap & ARCH_CAP_TSX_CTRL_MSR))
366                 taa_mitigation = TAA_MITIGATION_UCODE_NEEDED;
367
368         /*
369          * TSX is enabled, select alternate mitigation for TAA which is
370          * the same as MDS. Enable MDS static branch to clear CPU buffers.
371          *
372          * For guests that can't determine whether the correct microcode is
373          * present on host, enable the mitigation for UCODE_NEEDED as well.
374          */
375         static_branch_enable(&mds_user_clear);
376
377         if (taa_nosmt || cpu_mitigations_auto_nosmt())
378                 cpu_smt_disable(false);
379 }
380
381 static int __init tsx_async_abort_parse_cmdline(char *str)
382 {
383         if (!boot_cpu_has_bug(X86_BUG_TAA))
384                 return 0;
385
386         if (!str)
387                 return -EINVAL;
388
389         if (!strcmp(str, "off")) {
390                 taa_mitigation = TAA_MITIGATION_OFF;
391         } else if (!strcmp(str, "full")) {
392                 taa_mitigation = TAA_MITIGATION_VERW;
393         } else if (!strcmp(str, "full,nosmt")) {
394                 taa_mitigation = TAA_MITIGATION_VERW;
395                 taa_nosmt = true;
396         }
397
398         return 0;
399 }
400 early_param("tsx_async_abort", tsx_async_abort_parse_cmdline);
401
402 #undef pr_fmt
403 #define pr_fmt(fmt)     "MMIO Stale Data: " fmt
404
405 enum mmio_mitigations {
406         MMIO_MITIGATION_OFF,
407         MMIO_MITIGATION_UCODE_NEEDED,
408         MMIO_MITIGATION_VERW,
409 };
410
411 /* Default mitigation for Processor MMIO Stale Data vulnerabilities */
412 static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW;
413 static bool mmio_nosmt __ro_after_init = false;
414
415 static const char * const mmio_strings[] = {
416         [MMIO_MITIGATION_OFF]           = "Vulnerable",
417         [MMIO_MITIGATION_UCODE_NEEDED]  = "Vulnerable: Clear CPU buffers attempted, no microcode",
418         [MMIO_MITIGATION_VERW]          = "Mitigation: Clear CPU buffers",
419 };
420
421 static void __init mmio_select_mitigation(void)
422 {
423         u64 ia32_cap;
424
425         if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA) ||
426              boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN) ||
427              cpu_mitigations_off()) {
428                 mmio_mitigation = MMIO_MITIGATION_OFF;
429                 return;
430         }
431
432         if (mmio_mitigation == MMIO_MITIGATION_OFF)
433                 return;
434
435         ia32_cap = x86_read_arch_cap_msr();
436
437         /*
438          * Enable CPU buffer clear mitigation for host and VMM, if also affected
439          * by MDS or TAA. Otherwise, enable mitigation for VMM only.
440          */
441         if (boot_cpu_has_bug(X86_BUG_MDS) || (boot_cpu_has_bug(X86_BUG_TAA) &&
442                                               boot_cpu_has(X86_FEATURE_RTM)))
443                 static_branch_enable(&mds_user_clear);
444         else
445                 static_branch_enable(&mmio_stale_data_clear);
446
447         /*
448          * If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
449          * be propagated to uncore buffers, clearing the Fill buffers on idle
450          * is required irrespective of SMT state.
451          */
452         if (!(ia32_cap & ARCH_CAP_FBSDP_NO))
453                 static_branch_enable(&mds_idle_clear);
454
455         /*
456          * Check if the system has the right microcode.
457          *
458          * CPU Fill buffer clear mitigation is enumerated by either an explicit
459          * FB_CLEAR or by the presence of both MD_CLEAR and L1D_FLUSH on MDS
460          * affected systems.
461          */
462         if ((ia32_cap & ARCH_CAP_FB_CLEAR) ||
463             (boot_cpu_has(X86_FEATURE_MD_CLEAR) &&
464              boot_cpu_has(X86_FEATURE_FLUSH_L1D) &&
465              !(ia32_cap & ARCH_CAP_MDS_NO)))
466                 mmio_mitigation = MMIO_MITIGATION_VERW;
467         else
468                 mmio_mitigation = MMIO_MITIGATION_UCODE_NEEDED;
469
470         if (mmio_nosmt || cpu_mitigations_auto_nosmt())
471                 cpu_smt_disable(false);
472 }
473
474 static int __init mmio_stale_data_parse_cmdline(char *str)
475 {
476         if (!boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
477                 return 0;
478
479         if (!str)
480                 return -EINVAL;
481
482         if (!strcmp(str, "off")) {
483                 mmio_mitigation = MMIO_MITIGATION_OFF;
484         } else if (!strcmp(str, "full")) {
485                 mmio_mitigation = MMIO_MITIGATION_VERW;
486         } else if (!strcmp(str, "full,nosmt")) {
487                 mmio_mitigation = MMIO_MITIGATION_VERW;
488                 mmio_nosmt = true;
489         }
490
491         return 0;
492 }
493 early_param("mmio_stale_data", mmio_stale_data_parse_cmdline);
494
495 #undef pr_fmt
496 #define pr_fmt(fmt)     "" fmt
497
498 static void __init md_clear_update_mitigation(void)
499 {
500         if (cpu_mitigations_off())
501                 return;
502
503         if (!static_key_enabled(&mds_user_clear))
504                 goto out;
505
506         /*
507          * mds_user_clear is now enabled. Update MDS, TAA and MMIO Stale Data
508          * mitigation, if necessary.
509          */
510         if (mds_mitigation == MDS_MITIGATION_OFF &&
511             boot_cpu_has_bug(X86_BUG_MDS)) {
512                 mds_mitigation = MDS_MITIGATION_FULL;
513                 mds_select_mitigation();
514         }
515         if (taa_mitigation == TAA_MITIGATION_OFF &&
516             boot_cpu_has_bug(X86_BUG_TAA)) {
517                 taa_mitigation = TAA_MITIGATION_VERW;
518                 taa_select_mitigation();
519         }
520         if (mmio_mitigation == MMIO_MITIGATION_OFF &&
521             boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
522                 mmio_mitigation = MMIO_MITIGATION_VERW;
523                 mmio_select_mitigation();
524         }
525 out:
526         if (boot_cpu_has_bug(X86_BUG_MDS))
527                 pr_info("MDS: %s\n", mds_strings[mds_mitigation]);
528         if (boot_cpu_has_bug(X86_BUG_TAA))
529                 pr_info("TAA: %s\n", taa_strings[taa_mitigation]);
530         if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
531                 pr_info("MMIO Stale Data: %s\n", mmio_strings[mmio_mitigation]);
532         else if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
533                 pr_info("MMIO Stale Data: Unknown: No mitigations\n");
534 }
535
536 static void __init md_clear_select_mitigation(void)
537 {
538         mds_select_mitigation();
539         taa_select_mitigation();
540         mmio_select_mitigation();
541
542         /*
543          * As MDS, TAA and MMIO Stale Data mitigations are inter-related, update
544          * and print their mitigation after MDS, TAA and MMIO Stale Data
545          * mitigation selection is done.
546          */
547         md_clear_update_mitigation();
548 }
549
550 #undef pr_fmt
551 #define pr_fmt(fmt)     "SRBDS: " fmt
552
553 enum srbds_mitigations {
554         SRBDS_MITIGATION_OFF,
555         SRBDS_MITIGATION_UCODE_NEEDED,
556         SRBDS_MITIGATION_FULL,
557         SRBDS_MITIGATION_TSX_OFF,
558         SRBDS_MITIGATION_HYPERVISOR,
559 };
560
561 static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL;
562
563 static const char * const srbds_strings[] = {
564         [SRBDS_MITIGATION_OFF]          = "Vulnerable",
565         [SRBDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
566         [SRBDS_MITIGATION_FULL]         = "Mitigation: Microcode",
567         [SRBDS_MITIGATION_TSX_OFF]      = "Mitigation: TSX disabled",
568         [SRBDS_MITIGATION_HYPERVISOR]   = "Unknown: Dependent on hypervisor status",
569 };
570
571 static bool srbds_off;
572
573 void update_srbds_msr(void)
574 {
575         u64 mcu_ctrl;
576
577         if (!boot_cpu_has_bug(X86_BUG_SRBDS))
578                 return;
579
580         if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
581                 return;
582
583         if (srbds_mitigation == SRBDS_MITIGATION_UCODE_NEEDED)
584                 return;
585
586         rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
587
588         switch (srbds_mitigation) {
589         case SRBDS_MITIGATION_OFF:
590         case SRBDS_MITIGATION_TSX_OFF:
591                 mcu_ctrl |= RNGDS_MITG_DIS;
592                 break;
593         case SRBDS_MITIGATION_FULL:
594                 mcu_ctrl &= ~RNGDS_MITG_DIS;
595                 break;
596         default:
597                 break;
598         }
599
600         wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl);
601 }
602
603 static void __init srbds_select_mitigation(void)
604 {
605         u64 ia32_cap;
606
607         if (!boot_cpu_has_bug(X86_BUG_SRBDS))
608                 return;
609
610         /*
611          * Check to see if this is one of the MDS_NO systems supporting TSX that
612          * are only exposed to SRBDS when TSX is enabled or when CPU is affected
613          * by Processor MMIO Stale Data vulnerability.
614          */
615         ia32_cap = x86_read_arch_cap_msr();
616         if ((ia32_cap & ARCH_CAP_MDS_NO) && !boot_cpu_has(X86_FEATURE_RTM) &&
617             !boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA))
618                 srbds_mitigation = SRBDS_MITIGATION_TSX_OFF;
619         else if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
620                 srbds_mitigation = SRBDS_MITIGATION_HYPERVISOR;
621         else if (!boot_cpu_has(X86_FEATURE_SRBDS_CTRL))
622                 srbds_mitigation = SRBDS_MITIGATION_UCODE_NEEDED;
623         else if (cpu_mitigations_off() || srbds_off)
624                 srbds_mitigation = SRBDS_MITIGATION_OFF;
625
626         update_srbds_msr();
627         pr_info("%s\n", srbds_strings[srbds_mitigation]);
628 }
629
630 static int __init srbds_parse_cmdline(char *str)
631 {
632         if (!str)
633                 return -EINVAL;
634
635         if (!boot_cpu_has_bug(X86_BUG_SRBDS))
636                 return 0;
637
638         srbds_off = !strcmp(str, "off");
639         return 0;
640 }
641 early_param("srbds", srbds_parse_cmdline);
642
643 #undef pr_fmt
644 #define pr_fmt(fmt)     "Spectre V1 : " fmt
645
646 enum spectre_v1_mitigation {
647         SPECTRE_V1_MITIGATION_NONE,
648         SPECTRE_V1_MITIGATION_AUTO,
649 };
650
651 static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
652         SPECTRE_V1_MITIGATION_AUTO;
653
654 static const char * const spectre_v1_strings[] = {
655         [SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
656         [SPECTRE_V1_MITIGATION_AUTO] = "Mitigation: usercopy/swapgs barriers and __user pointer sanitization",
657 };
658
659 /*
660  * Does SMAP provide full mitigation against speculative kernel access to
661  * userspace?
662  */
663 static bool smap_works_speculatively(void)
664 {
665         if (!boot_cpu_has(X86_FEATURE_SMAP))
666                 return false;
667
668         /*
669          * On CPUs which are vulnerable to Meltdown, SMAP does not
670          * prevent speculative access to user data in the L1 cache.
671          * Consider SMAP to be non-functional as a mitigation on these
672          * CPUs.
673          */
674         if (boot_cpu_has(X86_BUG_CPU_MELTDOWN))
675                 return false;
676
677         return true;
678 }
679
680 static void __init spectre_v1_select_mitigation(void)
681 {
682         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V1) || cpu_mitigations_off()) {
683                 spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
684                 return;
685         }
686
687         if (spectre_v1_mitigation == SPECTRE_V1_MITIGATION_AUTO) {
688                 /*
689                  * With Spectre v1, a user can speculatively control either
690                  * path of a conditional swapgs with a user-controlled GS
691                  * value.  The mitigation is to add lfences to both code paths.
692                  *
693                  * If FSGSBASE is enabled, the user can put a kernel address in
694                  * GS, in which case SMAP provides no protection.
695                  *
696                  * [ NOTE: Don't check for X86_FEATURE_FSGSBASE until the
697                  *         FSGSBASE enablement patches have been merged. ]
698                  *
699                  * If FSGSBASE is disabled, the user can only put a user space
700                  * address in GS.  That makes an attack harder, but still
701                  * possible if there's no SMAP protection.
702                  */
703                 if (!smap_works_speculatively()) {
704                         /*
705                          * Mitigation can be provided from SWAPGS itself or
706                          * PTI as the CR3 write in the Meltdown mitigation
707                          * is serializing.
708                          *
709                          * If neither is there, mitigate with an LFENCE to
710                          * stop speculation through swapgs.
711                          */
712                         if (boot_cpu_has_bug(X86_BUG_SWAPGS) &&
713                             !boot_cpu_has(X86_FEATURE_PTI))
714                                 setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_USER);
715
716                         /*
717                          * Enable lfences in the kernel entry (non-swapgs)
718                          * paths, to prevent user entry from speculatively
719                          * skipping swapgs.
720                          */
721                         setup_force_cpu_cap(X86_FEATURE_FENCE_SWAPGS_KERNEL);
722                 }
723         }
724
725         pr_info("%s\n", spectre_v1_strings[spectre_v1_mitigation]);
726 }
727
728 static int __init nospectre_v1_cmdline(char *str)
729 {
730         spectre_v1_mitigation = SPECTRE_V1_MITIGATION_NONE;
731         return 0;
732 }
733 early_param("nospectre_v1", nospectre_v1_cmdline);
734
735 static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init =
736         SPECTRE_V2_NONE;
737
738 #undef pr_fmt
739 #define pr_fmt(fmt)     "RETBleed: " fmt
740
741 enum retbleed_mitigation {
742         RETBLEED_MITIGATION_NONE,
743         RETBLEED_MITIGATION_IBRS,
744         RETBLEED_MITIGATION_EIBRS,
745 };
746
747 enum retbleed_mitigation_cmd {
748         RETBLEED_CMD_OFF,
749         RETBLEED_CMD_AUTO
750 };
751
752 const char * const retbleed_strings[] = {
753         [RETBLEED_MITIGATION_NONE]      = "Vulnerable",
754         [RETBLEED_MITIGATION_IBRS]      = "Mitigation: IBRS",
755         [RETBLEED_MITIGATION_EIBRS]     = "Mitigation: Enhanced IBRS",
756 };
757
758 static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
759         RETBLEED_MITIGATION_NONE;
760 static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init =
761         RETBLEED_CMD_AUTO;
762
763 static int __init retbleed_parse_cmdline(char *str)
764 {
765         if (!str)
766                 return -EINVAL;
767
768         if (!strcmp(str, "off"))
769                 retbleed_cmd = RETBLEED_CMD_OFF;
770         else if (!strcmp(str, "auto"))
771                 retbleed_cmd = RETBLEED_CMD_AUTO;
772         else
773                 pr_err("Unknown retbleed option (%s). Defaulting to 'auto'\n", str);
774
775         return 0;
776 }
777 early_param("retbleed", retbleed_parse_cmdline);
778
779 #define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
780
781 static void __init retbleed_select_mitigation(void)
782 {
783         if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off())
784                 return;
785
786         switch (retbleed_cmd) {
787         case RETBLEED_CMD_OFF:
788                 return;
789
790         case RETBLEED_CMD_AUTO:
791         default:
792                 /*
793                  * The Intel mitigation (IBRS) was already selected in
794                  * spectre_v2_select_mitigation().
795                  */
796
797                 break;
798         }
799
800         switch (retbleed_mitigation) {
801         default:
802                 break;
803         }
804
805         /*
806          * Let IBRS trump all on Intel without affecting the effects of the
807          * retbleed= cmdline option.
808          */
809         if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
810                 switch (spectre_v2_enabled) {
811                 case SPECTRE_V2_IBRS:
812                         retbleed_mitigation = RETBLEED_MITIGATION_IBRS;
813                         break;
814                 case SPECTRE_V2_EIBRS:
815                 case SPECTRE_V2_EIBRS_RETPOLINE:
816                 case SPECTRE_V2_EIBRS_LFENCE:
817                         retbleed_mitigation = RETBLEED_MITIGATION_EIBRS;
818                         break;
819                 default:
820                         pr_err(RETBLEED_INTEL_MSG);
821                 }
822         }
823
824         pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
825 }
826
827 #undef pr_fmt
828 #define pr_fmt(fmt)     "Spectre V2 : " fmt
829
830 static enum spectre_v2_user_mitigation spectre_v2_user_stibp __ro_after_init =
831         SPECTRE_V2_USER_NONE;
832 static enum spectre_v2_user_mitigation spectre_v2_user_ibpb __ro_after_init =
833         SPECTRE_V2_USER_NONE;
834
835 #ifdef CONFIG_RETPOLINE
836 static bool spectre_v2_bad_module;
837
838 bool retpoline_module_ok(bool has_retpoline)
839 {
840         if (spectre_v2_enabled == SPECTRE_V2_NONE || has_retpoline)
841                 return true;
842
843         pr_err("System may be vulnerable to spectre v2\n");
844         spectre_v2_bad_module = true;
845         return false;
846 }
847
848 static inline const char *spectre_v2_module_string(void)
849 {
850         return spectre_v2_bad_module ? " - vulnerable module loaded" : "";
851 }
852 #else
853 static inline const char *spectre_v2_module_string(void) { return ""; }
854 #endif
855
856 #define SPECTRE_V2_LFENCE_MSG "WARNING: LFENCE mitigation is not recommended for this CPU, data leaks possible!\n"
857 #define SPECTRE_V2_EIBRS_EBPF_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS on, data leaks possible via Spectre v2 BHB attacks!\n"
858 #define SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG "WARNING: Unprivileged eBPF is enabled with eIBRS+LFENCE mitigation and SMT, data leaks possible via Spectre v2 BHB attacks!\n"
859 #define SPECTRE_V2_IBRS_PERF_MSG "WARNING: IBRS mitigation selected on Enhanced IBRS CPU, this may cause unnecessary performance loss\n"
860
861 #ifdef CONFIG_BPF_SYSCALL
862 void unpriv_ebpf_notify(int new_state)
863 {
864         if (new_state)
865                 return;
866
867         /* Unprivileged eBPF is enabled */
868
869         switch (spectre_v2_enabled) {
870         case SPECTRE_V2_EIBRS:
871                 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
872                 break;
873         case SPECTRE_V2_EIBRS_LFENCE:
874                 if (sched_smt_active())
875                         pr_err(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
876                 break;
877         default:
878                 break;
879         }
880 }
881 #endif
882
883 static inline bool match_option(const char *arg, int arglen, const char *opt)
884 {
885         int len = strlen(opt);
886
887         return len == arglen && !strncmp(arg, opt, len);
888 }
889
890 /* The kernel command line selection for spectre v2 */
891 enum spectre_v2_mitigation_cmd {
892         SPECTRE_V2_CMD_NONE,
893         SPECTRE_V2_CMD_AUTO,
894         SPECTRE_V2_CMD_FORCE,
895         SPECTRE_V2_CMD_RETPOLINE,
896         SPECTRE_V2_CMD_RETPOLINE_GENERIC,
897         SPECTRE_V2_CMD_RETPOLINE_LFENCE,
898         SPECTRE_V2_CMD_EIBRS,
899         SPECTRE_V2_CMD_EIBRS_RETPOLINE,
900         SPECTRE_V2_CMD_EIBRS_LFENCE,
901         SPECTRE_V2_CMD_IBRS,
902 };
903
904 enum spectre_v2_user_cmd {
905         SPECTRE_V2_USER_CMD_NONE,
906         SPECTRE_V2_USER_CMD_AUTO,
907         SPECTRE_V2_USER_CMD_FORCE,
908         SPECTRE_V2_USER_CMD_PRCTL,
909         SPECTRE_V2_USER_CMD_PRCTL_IBPB,
910         SPECTRE_V2_USER_CMD_SECCOMP,
911         SPECTRE_V2_USER_CMD_SECCOMP_IBPB,
912 };
913
914 static const char * const spectre_v2_user_strings[] = {
915         [SPECTRE_V2_USER_NONE]                  = "User space: Vulnerable",
916         [SPECTRE_V2_USER_STRICT]                = "User space: Mitigation: STIBP protection",
917         [SPECTRE_V2_USER_STRICT_PREFERRED]      = "User space: Mitigation: STIBP always-on protection",
918         [SPECTRE_V2_USER_PRCTL]                 = "User space: Mitigation: STIBP via prctl",
919         [SPECTRE_V2_USER_SECCOMP]               = "User space: Mitigation: STIBP via seccomp and prctl",
920 };
921
922 static const struct {
923         const char                      *option;
924         enum spectre_v2_user_cmd        cmd;
925         bool                            secure;
926 } v2_user_options[] __initconst = {
927         { "auto",               SPECTRE_V2_USER_CMD_AUTO,               false },
928         { "off",                SPECTRE_V2_USER_CMD_NONE,               false },
929         { "on",                 SPECTRE_V2_USER_CMD_FORCE,              true  },
930         { "prctl",              SPECTRE_V2_USER_CMD_PRCTL,              false },
931         { "prctl,ibpb",         SPECTRE_V2_USER_CMD_PRCTL_IBPB,         false },
932         { "seccomp",            SPECTRE_V2_USER_CMD_SECCOMP,            false },
933         { "seccomp,ibpb",       SPECTRE_V2_USER_CMD_SECCOMP_IBPB,       false },
934 };
935
936 static void __init spec_v2_user_print_cond(const char *reason, bool secure)
937 {
938         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
939                 pr_info("spectre_v2_user=%s forced on command line.\n", reason);
940 }
941
942 static __ro_after_init enum spectre_v2_mitigation_cmd spectre_v2_cmd;
943
944 static enum spectre_v2_user_cmd __init
945 spectre_v2_parse_user_cmdline(void)
946 {
947         char arg[20];
948         int ret, i;
949
950         switch (spectre_v2_cmd) {
951         case SPECTRE_V2_CMD_NONE:
952                 return SPECTRE_V2_USER_CMD_NONE;
953         case SPECTRE_V2_CMD_FORCE:
954                 return SPECTRE_V2_USER_CMD_FORCE;
955         default:
956                 break;
957         }
958
959         ret = cmdline_find_option(boot_command_line, "spectre_v2_user",
960                                   arg, sizeof(arg));
961         if (ret < 0)
962                 return SPECTRE_V2_USER_CMD_AUTO;
963
964         for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) {
965                 if (match_option(arg, ret, v2_user_options[i].option)) {
966                         spec_v2_user_print_cond(v2_user_options[i].option,
967                                                 v2_user_options[i].secure);
968                         return v2_user_options[i].cmd;
969                 }
970         }
971
972         pr_err("Unknown user space protection option (%s). Switching to AUTO select\n", arg);
973         return SPECTRE_V2_USER_CMD_AUTO;
974 }
975
976 static inline bool spectre_v2_in_ibrs_mode(enum spectre_v2_mitigation mode)
977 {
978         return mode == SPECTRE_V2_IBRS ||
979                mode == SPECTRE_V2_EIBRS ||
980                mode == SPECTRE_V2_EIBRS_RETPOLINE ||
981                mode == SPECTRE_V2_EIBRS_LFENCE;
982 }
983
984 static void __init
985 spectre_v2_user_select_mitigation(void)
986 {
987         enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE;
988         bool smt_possible = IS_ENABLED(CONFIG_SMP);
989         enum spectre_v2_user_cmd cmd;
990
991         if (!boot_cpu_has(X86_FEATURE_IBPB) && !boot_cpu_has(X86_FEATURE_STIBP))
992                 return;
993
994         if (cpu_smt_control == CPU_SMT_FORCE_DISABLED ||
995             cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
996                 smt_possible = false;
997
998         cmd = spectre_v2_parse_user_cmdline();
999         switch (cmd) {
1000         case SPECTRE_V2_USER_CMD_NONE:
1001                 goto set_mode;
1002         case SPECTRE_V2_USER_CMD_FORCE:
1003                 mode = SPECTRE_V2_USER_STRICT;
1004                 break;
1005         case SPECTRE_V2_USER_CMD_PRCTL:
1006         case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
1007                 mode = SPECTRE_V2_USER_PRCTL;
1008                 break;
1009         case SPECTRE_V2_USER_CMD_AUTO:
1010         case SPECTRE_V2_USER_CMD_SECCOMP:
1011         case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
1012                 if (IS_ENABLED(CONFIG_SECCOMP))
1013                         mode = SPECTRE_V2_USER_SECCOMP;
1014                 else
1015                         mode = SPECTRE_V2_USER_PRCTL;
1016                 break;
1017         }
1018
1019         /* Initialize Indirect Branch Prediction Barrier */
1020         if (boot_cpu_has(X86_FEATURE_IBPB)) {
1021                 setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
1022
1023                 spectre_v2_user_ibpb = mode;
1024                 switch (cmd) {
1025                 case SPECTRE_V2_USER_CMD_FORCE:
1026                 case SPECTRE_V2_USER_CMD_PRCTL_IBPB:
1027                 case SPECTRE_V2_USER_CMD_SECCOMP_IBPB:
1028                         static_branch_enable(&switch_mm_always_ibpb);
1029                         spectre_v2_user_ibpb = SPECTRE_V2_USER_STRICT;
1030                         break;
1031                 case SPECTRE_V2_USER_CMD_PRCTL:
1032                 case SPECTRE_V2_USER_CMD_AUTO:
1033                 case SPECTRE_V2_USER_CMD_SECCOMP:
1034                         static_branch_enable(&switch_mm_cond_ibpb);
1035                         break;
1036                 default:
1037                         break;
1038                 }
1039
1040                 pr_info("mitigation: Enabling %s Indirect Branch Prediction Barrier\n",
1041                         static_key_enabled(&switch_mm_always_ibpb) ?
1042                         "always-on" : "conditional");
1043         }
1044
1045         /*
1046          * If no STIBP, IBRS or enhanced IBRS is enabled, or SMT impossible,
1047          * STIBP is not required.
1048          */
1049         if (!boot_cpu_has(X86_FEATURE_STIBP) ||
1050             !smt_possible ||
1051             spectre_v2_in_ibrs_mode(spectre_v2_enabled))
1052                 return;
1053
1054         /*
1055          * At this point, an STIBP mode other than "off" has been set.
1056          * If STIBP support is not being forced, check if STIBP always-on
1057          * is preferred.
1058          */
1059         if (mode != SPECTRE_V2_USER_STRICT &&
1060             boot_cpu_has(X86_FEATURE_AMD_STIBP_ALWAYS_ON))
1061                 mode = SPECTRE_V2_USER_STRICT_PREFERRED;
1062
1063         spectre_v2_user_stibp = mode;
1064
1065 set_mode:
1066         pr_info("%s\n", spectre_v2_user_strings[mode]);
1067 }
1068
1069 static const char * const spectre_v2_strings[] = {
1070         [SPECTRE_V2_NONE]                       = "Vulnerable",
1071         [SPECTRE_V2_RETPOLINE]                  = "Mitigation: Retpolines",
1072         [SPECTRE_V2_LFENCE]                     = "Mitigation: LFENCE",
1073         [SPECTRE_V2_EIBRS]                      = "Mitigation: Enhanced IBRS",
1074         [SPECTRE_V2_EIBRS_LFENCE]               = "Mitigation: Enhanced IBRS + LFENCE",
1075         [SPECTRE_V2_EIBRS_RETPOLINE]            = "Mitigation: Enhanced IBRS + Retpolines",
1076         [SPECTRE_V2_IBRS]                       = "Mitigation: IBRS",
1077 };
1078
1079 static const struct {
1080         const char *option;
1081         enum spectre_v2_mitigation_cmd cmd;
1082         bool secure;
1083 } mitigation_options[] __initconst = {
1084         { "off",                SPECTRE_V2_CMD_NONE,              false },
1085         { "on",                 SPECTRE_V2_CMD_FORCE,             true  },
1086         { "retpoline",          SPECTRE_V2_CMD_RETPOLINE,         false },
1087         { "retpoline,amd",      SPECTRE_V2_CMD_RETPOLINE_LFENCE,  false },
1088         { "retpoline,lfence",   SPECTRE_V2_CMD_RETPOLINE_LFENCE,  false },
1089         { "retpoline,generic",  SPECTRE_V2_CMD_RETPOLINE_GENERIC, false },
1090         { "eibrs",              SPECTRE_V2_CMD_EIBRS,             false },
1091         { "eibrs,lfence",       SPECTRE_V2_CMD_EIBRS_LFENCE,      false },
1092         { "eibrs,retpoline",    SPECTRE_V2_CMD_EIBRS_RETPOLINE,   false },
1093         { "auto",               SPECTRE_V2_CMD_AUTO,              false },
1094         { "ibrs",               SPECTRE_V2_CMD_IBRS,              false },
1095 };
1096
1097 static void __init spec_v2_print_cond(const char *reason, bool secure)
1098 {
1099         if (boot_cpu_has_bug(X86_BUG_SPECTRE_V2) != secure)
1100                 pr_info("%s selected on command line.\n", reason);
1101 }
1102
1103 static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
1104 {
1105         enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
1106         char arg[20];
1107         int ret, i;
1108
1109         if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
1110             cpu_mitigations_off())
1111                 return SPECTRE_V2_CMD_NONE;
1112
1113         ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
1114         if (ret < 0)
1115                 return SPECTRE_V2_CMD_AUTO;
1116
1117         for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
1118                 if (!match_option(arg, ret, mitigation_options[i].option))
1119                         continue;
1120                 cmd = mitigation_options[i].cmd;
1121                 break;
1122         }
1123
1124         if (i >= ARRAY_SIZE(mitigation_options)) {
1125                 pr_err("unknown option (%s). Switching to AUTO select\n", arg);
1126                 return SPECTRE_V2_CMD_AUTO;
1127         }
1128
1129         if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
1130              cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
1131              cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC ||
1132              cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
1133              cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
1134             !IS_ENABLED(CONFIG_RETPOLINE)) {
1135                 pr_err("%s selected but not compiled in. Switching to AUTO select\n",
1136                        mitigation_options[i].option);
1137                 return SPECTRE_V2_CMD_AUTO;
1138         }
1139
1140         if ((cmd == SPECTRE_V2_CMD_EIBRS ||
1141              cmd == SPECTRE_V2_CMD_EIBRS_LFENCE ||
1142              cmd == SPECTRE_V2_CMD_EIBRS_RETPOLINE) &&
1143             !boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
1144                 pr_err("%s selected but CPU doesn't have eIBRS. Switching to AUTO select\n",
1145                        mitigation_options[i].option);
1146                 return SPECTRE_V2_CMD_AUTO;
1147         }
1148
1149         if ((cmd == SPECTRE_V2_CMD_RETPOLINE_LFENCE ||
1150              cmd == SPECTRE_V2_CMD_EIBRS_LFENCE) &&
1151             !boot_cpu_has(X86_FEATURE_LFENCE_RDTSC)) {
1152                 pr_err("%s selected, but CPU doesn't have a serializing LFENCE. Switching to AUTO select\n",
1153                        mitigation_options[i].option);
1154                 return SPECTRE_V2_CMD_AUTO;
1155         }
1156
1157         if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) {
1158                 pr_err("%s selected but not Intel CPU. Switching to AUTO select\n",
1159                        mitigation_options[i].option);
1160                 return SPECTRE_V2_CMD_AUTO;
1161         }
1162
1163         if (cmd == SPECTRE_V2_CMD_IBRS && !boot_cpu_has(X86_FEATURE_IBRS)) {
1164                 pr_err("%s selected but CPU doesn't have IBRS. Switching to AUTO select\n",
1165                        mitigation_options[i].option);
1166                 return SPECTRE_V2_CMD_AUTO;
1167         }
1168
1169         if (cmd == SPECTRE_V2_CMD_IBRS && boot_cpu_has(X86_FEATURE_XENPV)) {
1170                 pr_err("%s selected but running as XenPV guest. Switching to AUTO select\n",
1171                        mitigation_options[i].option);
1172                 return SPECTRE_V2_CMD_AUTO;
1173         }
1174
1175         spec_v2_print_cond(mitigation_options[i].option,
1176                            mitigation_options[i].secure);
1177         return cmd;
1178 }
1179
1180 static enum spectre_v2_mitigation __init spectre_v2_select_retpoline(void)
1181 {
1182         if (!IS_ENABLED(CONFIG_RETPOLINE)) {
1183                 pr_err("Kernel not compiled with retpoline; no mitigation available!");
1184                 return SPECTRE_V2_NONE;
1185         }
1186
1187         return SPECTRE_V2_RETPOLINE;
1188 }
1189
1190 /* Disable in-kernel use of non-RSB RET predictors */
1191 static void __init spec_ctrl_disable_kernel_rrsba(void)
1192 {
1193         u64 ia32_cap;
1194
1195         if (!boot_cpu_has(X86_FEATURE_RRSBA_CTRL))
1196                 return;
1197
1198         ia32_cap = x86_read_arch_cap_msr();
1199
1200         if (ia32_cap & ARCH_CAP_RRSBA) {
1201                 x86_spec_ctrl_base |= SPEC_CTRL_RRSBA_DIS_S;
1202                 update_spec_ctrl(x86_spec_ctrl_base);
1203         }
1204 }
1205
1206 static void __init spectre_v2_determine_rsb_fill_type_at_vmexit(enum spectre_v2_mitigation mode)
1207 {
1208         /*
1209          * Similar to context switches, there are two types of RSB attacks
1210          * after VM exit:
1211          *
1212          * 1) RSB underflow
1213          *
1214          * 2) Poisoned RSB entry
1215          *
1216          * When retpoline is enabled, both are mitigated by filling/clearing
1217          * the RSB.
1218          *
1219          * When IBRS is enabled, while #1 would be mitigated by the IBRS branch
1220          * prediction isolation protections, RSB still needs to be cleared
1221          * because of #2.  Note that SMEP provides no protection here, unlike
1222          * user-space-poisoned RSB entries.
1223          *
1224          * eIBRS should protect against RSB poisoning, but if the EIBRS_PBRSB
1225          * bug is present then a LITE version of RSB protection is required,
1226          * just a single call needs to retire before a RET is executed.
1227          */
1228         switch (mode) {
1229         case SPECTRE_V2_NONE:
1230                 return;
1231
1232         case SPECTRE_V2_EIBRS_LFENCE:
1233         case SPECTRE_V2_EIBRS:
1234                 if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB) &&
1235                     (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)) {
1236                         setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT_LITE);
1237                         pr_info("Spectre v2 / PBRSB-eIBRS: Retire a single CALL on VMEXIT\n");
1238                 }
1239                 return;
1240
1241         case SPECTRE_V2_EIBRS_RETPOLINE:
1242         case SPECTRE_V2_RETPOLINE:
1243         case SPECTRE_V2_LFENCE:
1244         case SPECTRE_V2_IBRS:
1245                 setup_force_cpu_cap(X86_FEATURE_RSB_VMEXIT);
1246                 pr_info("Spectre v2 / SpectreRSB : Filling RSB on VMEXIT\n");
1247                 return;
1248         }
1249
1250         pr_warn_once("Unknown Spectre v2 mode, disabling RSB mitigation at VM exit");
1251         dump_stack();
1252 }
1253
1254 static void __init spectre_v2_select_mitigation(void)
1255 {
1256         enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
1257         enum spectre_v2_mitigation mode = SPECTRE_V2_NONE;
1258
1259         /*
1260          * If the CPU is not affected and the command line mode is NONE or AUTO
1261          * then nothing to do.
1262          */
1263         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2) &&
1264             (cmd == SPECTRE_V2_CMD_NONE || cmd == SPECTRE_V2_CMD_AUTO))
1265                 return;
1266
1267         switch (cmd) {
1268         case SPECTRE_V2_CMD_NONE:
1269                 return;
1270
1271         case SPECTRE_V2_CMD_FORCE:
1272         case SPECTRE_V2_CMD_AUTO:
1273                 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED)) {
1274                         mode = SPECTRE_V2_EIBRS;
1275                         break;
1276                 }
1277
1278                 if (boot_cpu_has_bug(X86_BUG_RETBLEED) &&
1279                     retbleed_cmd != RETBLEED_CMD_OFF &&
1280                     boot_cpu_has(X86_FEATURE_IBRS) &&
1281                     boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) {
1282                         mode = SPECTRE_V2_IBRS;
1283                         break;
1284                 }
1285
1286                 mode = spectre_v2_select_retpoline();
1287                 break;
1288
1289         case SPECTRE_V2_CMD_RETPOLINE_LFENCE:
1290                 pr_err(SPECTRE_V2_LFENCE_MSG);
1291                 mode = SPECTRE_V2_LFENCE;
1292                 break;
1293
1294         case SPECTRE_V2_CMD_RETPOLINE_GENERIC:
1295                 mode = SPECTRE_V2_RETPOLINE;
1296                 break;
1297
1298         case SPECTRE_V2_CMD_RETPOLINE:
1299                 mode = spectre_v2_select_retpoline();
1300                 break;
1301
1302         case SPECTRE_V2_CMD_IBRS:
1303                 mode = SPECTRE_V2_IBRS;
1304                 break;
1305
1306         case SPECTRE_V2_CMD_EIBRS:
1307                 mode = SPECTRE_V2_EIBRS;
1308                 break;
1309
1310         case SPECTRE_V2_CMD_EIBRS_LFENCE:
1311                 mode = SPECTRE_V2_EIBRS_LFENCE;
1312                 break;
1313
1314         case SPECTRE_V2_CMD_EIBRS_RETPOLINE:
1315                 mode = SPECTRE_V2_EIBRS_RETPOLINE;
1316                 break;
1317         }
1318
1319         if (mode == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
1320                 pr_err(SPECTRE_V2_EIBRS_EBPF_MSG);
1321
1322         if (spectre_v2_in_ibrs_mode(mode)) {
1323                 x86_spec_ctrl_base |= SPEC_CTRL_IBRS;
1324                 update_spec_ctrl(x86_spec_ctrl_base);
1325         }
1326
1327         switch (mode) {
1328         case SPECTRE_V2_NONE:
1329         case SPECTRE_V2_EIBRS:
1330                 break;
1331
1332         case SPECTRE_V2_IBRS:
1333                 setup_force_cpu_cap(X86_FEATURE_KERNEL_IBRS);
1334                 if (boot_cpu_has(X86_FEATURE_IBRS_ENHANCED))
1335                         pr_warn(SPECTRE_V2_IBRS_PERF_MSG);
1336                 break;
1337
1338         case SPECTRE_V2_LFENCE:
1339         case SPECTRE_V2_EIBRS_LFENCE:
1340                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE_LFENCE);
1341                 /* fallthrough */
1342
1343         case SPECTRE_V2_RETPOLINE:
1344         case SPECTRE_V2_EIBRS_RETPOLINE:
1345                 setup_force_cpu_cap(X86_FEATURE_RETPOLINE);
1346                 break;
1347         }
1348
1349         /*
1350          * Disable alternate RSB predictions in kernel when indirect CALLs and
1351          * JMPs gets protection against BHI and Intramode-BTI, but RET
1352          * prediction from a non-RSB predictor is still a risk.
1353          */
1354         if (mode == SPECTRE_V2_EIBRS_LFENCE ||
1355             mode == SPECTRE_V2_EIBRS_RETPOLINE ||
1356             mode == SPECTRE_V2_RETPOLINE)
1357                 spec_ctrl_disable_kernel_rrsba();
1358
1359         spectre_v2_enabled = mode;
1360         pr_info("%s\n", spectre_v2_strings[mode]);
1361
1362         /*
1363          * If Spectre v2 protection has been enabled, fill the RSB during a
1364          * context switch.  In general there are two types of RSB attacks
1365          * across context switches, for which the CALLs/RETs may be unbalanced.
1366          *
1367          * 1) RSB underflow
1368          *
1369          *    Some Intel parts have "bottomless RSB".  When the RSB is empty,
1370          *    speculated return targets may come from the branch predictor,
1371          *    which could have a user-poisoned BTB or BHB entry.
1372          *
1373          *    AMD has it even worse: *all* returns are speculated from the BTB,
1374          *    regardless of the state of the RSB.
1375          *
1376          *    When IBRS or eIBRS is enabled, the "user -> kernel" attack
1377          *    scenario is mitigated by the IBRS branch prediction isolation
1378          *    properties, so the RSB buffer filling wouldn't be necessary to
1379          *    protect against this type of attack.
1380          *
1381          *    The "user -> user" attack scenario is mitigated by RSB filling.
1382          *
1383          * 2) Poisoned RSB entry
1384          *
1385          *    If the 'next' in-kernel return stack is shorter than 'prev',
1386          *    'next' could be tricked into speculating with a user-poisoned RSB
1387          *    entry.
1388          *
1389          *    The "user -> kernel" attack scenario is mitigated by SMEP and
1390          *    eIBRS.
1391          *
1392          *    The "user -> user" scenario, also known as SpectreBHB, requires
1393          *    RSB clearing.
1394          *
1395          * So to mitigate all cases, unconditionally fill RSB on context
1396          * switches.
1397          *
1398          * FIXME: Is this pointless for retbleed-affected AMD?
1399          */
1400         setup_force_cpu_cap(X86_FEATURE_RSB_CTXSW);
1401         pr_info("Spectre v2 / SpectreRSB mitigation: Filling RSB on context switch\n");
1402
1403         spectre_v2_determine_rsb_fill_type_at_vmexit(mode);
1404
1405         /*
1406          * Retpoline protects the kernel, but doesn't protect firmware.  IBRS
1407          * and Enhanced IBRS protect firmware too, so enable IBRS around
1408          * firmware calls only when IBRS / Enhanced IBRS aren't otherwise
1409          * enabled.
1410          *
1411          * Use "mode" to check Enhanced IBRS instead of boot_cpu_has(), because
1412          * the user might select retpoline on the kernel command line and if
1413          * the CPU supports Enhanced IBRS, kernel might un-intentionally not
1414          * enable IBRS around firmware calls.
1415          */
1416         if (boot_cpu_has(X86_FEATURE_IBRS) && !spectre_v2_in_ibrs_mode(mode)) {
1417                 setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
1418                 pr_info("Enabling Restricted Speculation for firmware calls\n");
1419         }
1420
1421         /* Set up IBPB and STIBP depending on the general spectre V2 command */
1422         spectre_v2_cmd = cmd;
1423 }
1424
1425 static void update_stibp_msr(void * __unused)
1426 {
1427         u64 val = spec_ctrl_current() | (x86_spec_ctrl_base & SPEC_CTRL_STIBP);
1428         update_spec_ctrl(val);
1429 }
1430
1431 /* Update x86_spec_ctrl_base in case SMT state changed. */
1432 static void update_stibp_strict(void)
1433 {
1434         u64 mask = x86_spec_ctrl_base & ~SPEC_CTRL_STIBP;
1435
1436         if (sched_smt_active())
1437                 mask |= SPEC_CTRL_STIBP;
1438
1439         if (mask == x86_spec_ctrl_base)
1440                 return;
1441
1442         pr_info("Update user space SMT mitigation: STIBP %s\n",
1443                 mask & SPEC_CTRL_STIBP ? "always-on" : "off");
1444         x86_spec_ctrl_base = mask;
1445         on_each_cpu(update_stibp_msr, NULL, 1);
1446 }
1447
1448 /* Update the static key controlling the evaluation of TIF_SPEC_IB */
1449 static void update_indir_branch_cond(void)
1450 {
1451         if (sched_smt_active())
1452                 static_branch_enable(&switch_to_cond_stibp);
1453         else
1454                 static_branch_disable(&switch_to_cond_stibp);
1455 }
1456
1457 #undef pr_fmt
1458 #define pr_fmt(fmt) fmt
1459
1460 /* Update the static key controlling the MDS CPU buffer clear in idle */
1461 static void update_mds_branch_idle(void)
1462 {
1463         u64 ia32_cap = x86_read_arch_cap_msr();
1464
1465         /*
1466          * Enable the idle clearing if SMT is active on CPUs which are
1467          * affected only by MSBDS and not any other MDS variant.
1468          *
1469          * The other variants cannot be mitigated when SMT is enabled, so
1470          * clearing the buffers on idle just to prevent the Store Buffer
1471          * repartitioning leak would be a window dressing exercise.
1472          */
1473         if (!boot_cpu_has_bug(X86_BUG_MSBDS_ONLY))
1474                 return;
1475
1476         if (sched_smt_active()) {
1477                 static_branch_enable(&mds_idle_clear);
1478         } else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
1479                    (ia32_cap & ARCH_CAP_FBSDP_NO)) {
1480                 static_branch_disable(&mds_idle_clear);
1481         }
1482 }
1483
1484 #define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
1485 #define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
1486 #define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
1487
1488 void arch_smt_update(void)
1489 {
1490         mutex_lock(&spec_ctrl_mutex);
1491
1492         if (sched_smt_active() && unprivileged_ebpf_enabled() &&
1493             spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
1494                 pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
1495
1496         switch (spectre_v2_user_stibp) {
1497         case SPECTRE_V2_USER_NONE:
1498                 break;
1499         case SPECTRE_V2_USER_STRICT:
1500         case SPECTRE_V2_USER_STRICT_PREFERRED:
1501                 update_stibp_strict();
1502                 break;
1503         case SPECTRE_V2_USER_PRCTL:
1504         case SPECTRE_V2_USER_SECCOMP:
1505                 update_indir_branch_cond();
1506                 break;
1507         }
1508
1509         switch (mds_mitigation) {
1510         case MDS_MITIGATION_FULL:
1511         case MDS_MITIGATION_VMWERV:
1512                 if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
1513                         pr_warn_once(MDS_MSG_SMT);
1514                 update_mds_branch_idle();
1515                 break;
1516         case MDS_MITIGATION_OFF:
1517                 break;
1518         }
1519
1520         switch (taa_mitigation) {
1521         case TAA_MITIGATION_VERW:
1522         case TAA_MITIGATION_UCODE_NEEDED:
1523                 if (sched_smt_active())
1524                         pr_warn_once(TAA_MSG_SMT);
1525                 break;
1526         case TAA_MITIGATION_TSX_DISABLED:
1527         case TAA_MITIGATION_OFF:
1528                 break;
1529         }
1530
1531         switch (mmio_mitigation) {
1532         case MMIO_MITIGATION_VERW:
1533         case MMIO_MITIGATION_UCODE_NEEDED:
1534                 if (sched_smt_active())
1535                         pr_warn_once(MMIO_MSG_SMT);
1536                 break;
1537         case MMIO_MITIGATION_OFF:
1538                 break;
1539         }
1540
1541         mutex_unlock(&spec_ctrl_mutex);
1542 }
1543
1544 #undef pr_fmt
1545 #define pr_fmt(fmt)     "Speculative Store Bypass: " fmt
1546
1547 static enum ssb_mitigation ssb_mode __ro_after_init = SPEC_STORE_BYPASS_NONE;
1548
1549 /* The kernel command line selection */
1550 enum ssb_mitigation_cmd {
1551         SPEC_STORE_BYPASS_CMD_NONE,
1552         SPEC_STORE_BYPASS_CMD_AUTO,
1553         SPEC_STORE_BYPASS_CMD_ON,
1554         SPEC_STORE_BYPASS_CMD_PRCTL,
1555         SPEC_STORE_BYPASS_CMD_SECCOMP,
1556 };
1557
1558 static const char * const ssb_strings[] = {
1559         [SPEC_STORE_BYPASS_NONE]        = "Vulnerable",
1560         [SPEC_STORE_BYPASS_DISABLE]     = "Mitigation: Speculative Store Bypass disabled",
1561         [SPEC_STORE_BYPASS_PRCTL]       = "Mitigation: Speculative Store Bypass disabled via prctl",
1562         [SPEC_STORE_BYPASS_SECCOMP]     = "Mitigation: Speculative Store Bypass disabled via prctl and seccomp",
1563 };
1564
1565 static const struct {
1566         const char *option;
1567         enum ssb_mitigation_cmd cmd;
1568 } ssb_mitigation_options[]  __initconst = {
1569         { "auto",       SPEC_STORE_BYPASS_CMD_AUTO },    /* Platform decides */
1570         { "on",         SPEC_STORE_BYPASS_CMD_ON },      /* Disable Speculative Store Bypass */
1571         { "off",        SPEC_STORE_BYPASS_CMD_NONE },    /* Don't touch Speculative Store Bypass */
1572         { "prctl",      SPEC_STORE_BYPASS_CMD_PRCTL },   /* Disable Speculative Store Bypass via prctl */
1573         { "seccomp",    SPEC_STORE_BYPASS_CMD_SECCOMP }, /* Disable Speculative Store Bypass via prctl and seccomp */
1574 };
1575
1576 static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
1577 {
1578         enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
1579         char arg[20];
1580         int ret, i;
1581
1582         if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
1583             cpu_mitigations_off()) {
1584                 return SPEC_STORE_BYPASS_CMD_NONE;
1585         } else {
1586                 ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
1587                                           arg, sizeof(arg));
1588                 if (ret < 0)
1589                         return SPEC_STORE_BYPASS_CMD_AUTO;
1590
1591                 for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
1592                         if (!match_option(arg, ret, ssb_mitigation_options[i].option))
1593                                 continue;
1594
1595                         cmd = ssb_mitigation_options[i].cmd;
1596                         break;
1597                 }
1598
1599                 if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
1600                         pr_err("unknown option (%s). Switching to AUTO select\n", arg);
1601                         return SPEC_STORE_BYPASS_CMD_AUTO;
1602                 }
1603         }
1604
1605         return cmd;
1606 }
1607
1608 static enum ssb_mitigation __init __ssb_select_mitigation(void)
1609 {
1610         enum ssb_mitigation mode = SPEC_STORE_BYPASS_NONE;
1611         enum ssb_mitigation_cmd cmd;
1612
1613         if (!boot_cpu_has(X86_FEATURE_SSBD))
1614                 return mode;
1615
1616         cmd = ssb_parse_cmdline();
1617         if (!boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS) &&
1618             (cmd == SPEC_STORE_BYPASS_CMD_NONE ||
1619              cmd == SPEC_STORE_BYPASS_CMD_AUTO))
1620                 return mode;
1621
1622         switch (cmd) {
1623         case SPEC_STORE_BYPASS_CMD_AUTO:
1624         case SPEC_STORE_BYPASS_CMD_SECCOMP:
1625                 /*
1626                  * Choose prctl+seccomp as the default mode if seccomp is
1627                  * enabled.
1628                  */
1629                 if (IS_ENABLED(CONFIG_SECCOMP))
1630                         mode = SPEC_STORE_BYPASS_SECCOMP;
1631                 else
1632                         mode = SPEC_STORE_BYPASS_PRCTL;
1633                 break;
1634         case SPEC_STORE_BYPASS_CMD_ON:
1635                 mode = SPEC_STORE_BYPASS_DISABLE;
1636                 break;
1637         case SPEC_STORE_BYPASS_CMD_PRCTL:
1638                 mode = SPEC_STORE_BYPASS_PRCTL;
1639                 break;
1640         case SPEC_STORE_BYPASS_CMD_NONE:
1641                 break;
1642         }
1643
1644         /*
1645          * We have three CPU feature flags that are in play here:
1646          *  - X86_BUG_SPEC_STORE_BYPASS - CPU is susceptible.
1647          *  - X86_FEATURE_SSBD - CPU is able to turn off speculative store bypass
1648          *  - X86_FEATURE_SPEC_STORE_BYPASS_DISABLE - engage the mitigation
1649          */
1650         if (mode == SPEC_STORE_BYPASS_DISABLE) {
1651                 setup_force_cpu_cap(X86_FEATURE_SPEC_STORE_BYPASS_DISABLE);
1652                 /*
1653                  * Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
1654                  * use a completely different MSR and bit dependent on family.
1655                  */
1656                 if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
1657                     !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
1658                         x86_amd_ssb_disable();
1659                 } else {
1660                         x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
1661                         update_spec_ctrl(x86_spec_ctrl_base);
1662                 }
1663         }
1664
1665         return mode;
1666 }
1667
1668 static void ssb_select_mitigation(void)
1669 {
1670         ssb_mode = __ssb_select_mitigation();
1671
1672         if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1673                 pr_info("%s\n", ssb_strings[ssb_mode]);
1674 }
1675
1676 #undef pr_fmt
1677 #define pr_fmt(fmt)     "Speculation prctl: " fmt
1678
1679 static void task_update_spec_tif(struct task_struct *tsk)
1680 {
1681         /* Force the update of the real TIF bits */
1682         set_tsk_thread_flag(tsk, TIF_SPEC_FORCE_UPDATE);
1683
1684         /*
1685          * Immediately update the speculation control MSRs for the current
1686          * task, but for a non-current task delay setting the CPU
1687          * mitigation until it is scheduled next.
1688          *
1689          * This can only happen for SECCOMP mitigation. For PRCTL it's
1690          * always the current task.
1691          */
1692         if (tsk == current)
1693                 speculation_ctrl_update_current();
1694 }
1695
1696 static int ssb_prctl_set(struct task_struct *task, unsigned long ctrl)
1697 {
1698         if (ssb_mode != SPEC_STORE_BYPASS_PRCTL &&
1699             ssb_mode != SPEC_STORE_BYPASS_SECCOMP)
1700                 return -ENXIO;
1701
1702         switch (ctrl) {
1703         case PR_SPEC_ENABLE:
1704                 /* If speculation is force disabled, enable is not allowed */
1705                 if (task_spec_ssb_force_disable(task))
1706                         return -EPERM;
1707                 task_clear_spec_ssb_disable(task);
1708                 task_update_spec_tif(task);
1709                 break;
1710         case PR_SPEC_DISABLE:
1711                 task_set_spec_ssb_disable(task);
1712                 task_update_spec_tif(task);
1713                 break;
1714         case PR_SPEC_FORCE_DISABLE:
1715                 task_set_spec_ssb_disable(task);
1716                 task_set_spec_ssb_force_disable(task);
1717                 task_update_spec_tif(task);
1718                 break;
1719         default:
1720                 return -ERANGE;
1721         }
1722         return 0;
1723 }
1724
1725 static bool is_spec_ib_user_controlled(void)
1726 {
1727         return spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL ||
1728                 spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
1729                 spectre_v2_user_stibp == SPECTRE_V2_USER_PRCTL ||
1730                 spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP;
1731 }
1732
1733 static int ib_prctl_set(struct task_struct *task, unsigned long ctrl)
1734 {
1735         switch (ctrl) {
1736         case PR_SPEC_ENABLE:
1737                 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
1738                     spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
1739                         return 0;
1740                 /*
1741                  * With strict mode for both IBPB and STIBP, the instruction
1742                  * code paths avoid checking this task flag and instead,
1743                  * unconditionally run the instruction. However, STIBP and IBPB
1744                  * are independent and either can be set to conditionally
1745                  * enabled regardless of the mode of the other.
1746                  *
1747                  * If either is set to conditional, allow the task flag to be
1748                  * updated, unless it was force-disabled by a previous prctl
1749                  * call. Currently, this is possible on an AMD CPU which has the
1750                  * feature X86_FEATURE_AMD_STIBP_ALWAYS_ON. In this case, if the
1751                  * kernel is booted with 'spectre_v2_user=seccomp', then
1752                  * spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP and
1753                  * spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED.
1754                  */
1755                 if (!is_spec_ib_user_controlled() ||
1756                     task_spec_ib_force_disable(task))
1757                         return -EPERM;
1758
1759                 task_clear_spec_ib_disable(task);
1760                 task_update_spec_tif(task);
1761                 break;
1762         case PR_SPEC_DISABLE:
1763         case PR_SPEC_FORCE_DISABLE:
1764                 /*
1765                  * Indirect branch speculation is always allowed when
1766                  * mitigation is force disabled.
1767                  */
1768                 if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
1769                     spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
1770                         return -EPERM;
1771
1772                 if (!is_spec_ib_user_controlled())
1773                         return 0;
1774
1775                 task_set_spec_ib_disable(task);
1776                 if (ctrl == PR_SPEC_FORCE_DISABLE)
1777                         task_set_spec_ib_force_disable(task);
1778                 task_update_spec_tif(task);
1779                 if (task == current)
1780                         indirect_branch_prediction_barrier();
1781                 break;
1782         default:
1783                 return -ERANGE;
1784         }
1785         return 0;
1786 }
1787
1788 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
1789                              unsigned long ctrl)
1790 {
1791         switch (which) {
1792         case PR_SPEC_STORE_BYPASS:
1793                 return ssb_prctl_set(task, ctrl);
1794         case PR_SPEC_INDIRECT_BRANCH:
1795                 return ib_prctl_set(task, ctrl);
1796         default:
1797                 return -ENODEV;
1798         }
1799 }
1800
1801 #ifdef CONFIG_SECCOMP
1802 void arch_seccomp_spec_mitigate(struct task_struct *task)
1803 {
1804         if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP)
1805                 ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE);
1806         if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP ||
1807             spectre_v2_user_stibp == SPECTRE_V2_USER_SECCOMP)
1808                 ib_prctl_set(task, PR_SPEC_FORCE_DISABLE);
1809 }
1810 #endif
1811
1812 static int ssb_prctl_get(struct task_struct *task)
1813 {
1814         switch (ssb_mode) {
1815         case SPEC_STORE_BYPASS_DISABLE:
1816                 return PR_SPEC_DISABLE;
1817         case SPEC_STORE_BYPASS_SECCOMP:
1818         case SPEC_STORE_BYPASS_PRCTL:
1819                 if (task_spec_ssb_force_disable(task))
1820                         return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1821                 if (task_spec_ssb_disable(task))
1822                         return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1823                 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1824         default:
1825                 if (boot_cpu_has_bug(X86_BUG_SPEC_STORE_BYPASS))
1826                         return PR_SPEC_ENABLE;
1827                 return PR_SPEC_NOT_AFFECTED;
1828         }
1829 }
1830
1831 static int ib_prctl_get(struct task_struct *task)
1832 {
1833         if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2))
1834                 return PR_SPEC_NOT_AFFECTED;
1835
1836         if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE &&
1837             spectre_v2_user_stibp == SPECTRE_V2_USER_NONE)
1838                 return PR_SPEC_ENABLE;
1839         else if (is_spec_ib_user_controlled()) {
1840                 if (task_spec_ib_force_disable(task))
1841                         return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
1842                 if (task_spec_ib_disable(task))
1843                         return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
1844                 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
1845         } else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT ||
1846             spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
1847             spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED)
1848                 return PR_SPEC_DISABLE;
1849         else
1850                 return PR_SPEC_NOT_AFFECTED;
1851 }
1852
1853 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
1854 {
1855         switch (which) {
1856         case PR_SPEC_STORE_BYPASS:
1857                 return ssb_prctl_get(task);
1858         case PR_SPEC_INDIRECT_BRANCH:
1859                 return ib_prctl_get(task);
1860         default:
1861                 return -ENODEV;
1862         }
1863 }
1864
1865 void x86_spec_ctrl_setup_ap(void)
1866 {
1867         if (boot_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
1868                 update_spec_ctrl(x86_spec_ctrl_base);
1869
1870         if (ssb_mode == SPEC_STORE_BYPASS_DISABLE)
1871                 x86_amd_ssb_disable();
1872 }
1873
1874 bool itlb_multihit_kvm_mitigation;
1875 EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
1876
1877 #undef pr_fmt
1878 #define pr_fmt(fmt)     "L1TF: " fmt
1879
1880 /* Default mitigation for L1TF-affected CPUs */
1881 enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
1882 #if IS_ENABLED(CONFIG_KVM_INTEL)
1883 EXPORT_SYMBOL_GPL(l1tf_mitigation);
1884 #endif
1885 enum vmx_l1d_flush_state l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
1886 EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
1887
1888 /*
1889  * These CPUs all support 44bits physical address space internally in the
1890  * cache but CPUID can report a smaller number of physical address bits.
1891  *
1892  * The L1TF mitigation uses the top most address bit for the inversion of
1893  * non present PTEs. When the installed memory reaches into the top most
1894  * address bit due to memory holes, which has been observed on machines
1895  * which report 36bits physical address bits and have 32G RAM installed,
1896  * then the mitigation range check in l1tf_select_mitigation() triggers.
1897  * This is a false positive because the mitigation is still possible due to
1898  * the fact that the cache uses 44bit internally. Use the cache bits
1899  * instead of the reported physical bits and adjust them on the affected
1900  * machines to 44bit if the reported bits are less than 44.
1901  */
1902 static void override_cache_bits(struct cpuinfo_x86 *c)
1903 {
1904         if (c->x86 != 6)
1905                 return;
1906
1907         switch (c->x86_model) {
1908         case INTEL_FAM6_NEHALEM:
1909         case INTEL_FAM6_WESTMERE:
1910         case INTEL_FAM6_SANDYBRIDGE:
1911         case INTEL_FAM6_IVYBRIDGE:
1912         case INTEL_FAM6_HASWELL_CORE:
1913         case INTEL_FAM6_HASWELL_ULT:
1914         case INTEL_FAM6_HASWELL_GT3E:
1915         case INTEL_FAM6_BROADWELL_CORE:
1916         case INTEL_FAM6_BROADWELL_GT3E:
1917         case INTEL_FAM6_SKYLAKE_MOBILE:
1918         case INTEL_FAM6_SKYLAKE_DESKTOP:
1919         case INTEL_FAM6_KABYLAKE_MOBILE:
1920         case INTEL_FAM6_KABYLAKE_DESKTOP:
1921                 if (c->x86_cache_bits < 44)
1922                         c->x86_cache_bits = 44;
1923                 break;
1924         }
1925 }
1926
1927 static void __init l1tf_select_mitigation(void)
1928 {
1929         u64 half_pa;
1930
1931         if (!boot_cpu_has_bug(X86_BUG_L1TF))
1932                 return;
1933
1934         if (cpu_mitigations_off())
1935                 l1tf_mitigation = L1TF_MITIGATION_OFF;
1936         else if (cpu_mitigations_auto_nosmt())
1937                 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1938
1939         override_cache_bits(&boot_cpu_data);
1940
1941         switch (l1tf_mitigation) {
1942         case L1TF_MITIGATION_OFF:
1943         case L1TF_MITIGATION_FLUSH_NOWARN:
1944         case L1TF_MITIGATION_FLUSH:
1945                 break;
1946         case L1TF_MITIGATION_FLUSH_NOSMT:
1947         case L1TF_MITIGATION_FULL:
1948                 cpu_smt_disable(false);
1949                 break;
1950         case L1TF_MITIGATION_FULL_FORCE:
1951                 cpu_smt_disable(true);
1952                 break;
1953         }
1954
1955 #if CONFIG_PGTABLE_LEVELS == 2
1956         pr_warn("Kernel not compiled for PAE. No mitigation for L1TF\n");
1957         return;
1958 #endif
1959
1960         half_pa = (u64)l1tf_pfn_limit() << PAGE_SHIFT;
1961         if (l1tf_mitigation != L1TF_MITIGATION_OFF &&
1962                         e820__mapped_any(half_pa, ULLONG_MAX - half_pa, E820_TYPE_RAM)) {
1963                 pr_warn("System has more than MAX_PA/2 memory. L1TF mitigation not effective.\n");
1964                 pr_info("You may make it effective by booting the kernel with mem=%llu parameter.\n",
1965                                 half_pa);
1966                 pr_info("However, doing so will make a part of your RAM unusable.\n");
1967                 pr_info("Reading https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/l1tf.html might help you decide.\n");
1968                 return;
1969         }
1970
1971         setup_force_cpu_cap(X86_FEATURE_L1TF_PTEINV);
1972 }
1973
1974 static int __init l1tf_cmdline(char *str)
1975 {
1976         if (!boot_cpu_has_bug(X86_BUG_L1TF))
1977                 return 0;
1978
1979         if (!str)
1980                 return -EINVAL;
1981
1982         if (!strcmp(str, "off"))
1983                 l1tf_mitigation = L1TF_MITIGATION_OFF;
1984         else if (!strcmp(str, "flush,nowarn"))
1985                 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOWARN;
1986         else if (!strcmp(str, "flush"))
1987                 l1tf_mitigation = L1TF_MITIGATION_FLUSH;
1988         else if (!strcmp(str, "flush,nosmt"))
1989                 l1tf_mitigation = L1TF_MITIGATION_FLUSH_NOSMT;
1990         else if (!strcmp(str, "full"))
1991                 l1tf_mitigation = L1TF_MITIGATION_FULL;
1992         else if (!strcmp(str, "full,force"))
1993                 l1tf_mitigation = L1TF_MITIGATION_FULL_FORCE;
1994
1995         return 0;
1996 }
1997 early_param("l1tf", l1tf_cmdline);
1998
1999 #undef pr_fmt
2000 #define pr_fmt(fmt) fmt
2001
2002 #ifdef CONFIG_SYSFS
2003
2004 #define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
2005
2006 #if IS_ENABLED(CONFIG_KVM_INTEL)
2007 static const char * const l1tf_vmx_states[] = {
2008         [VMENTER_L1D_FLUSH_AUTO]                = "auto",
2009         [VMENTER_L1D_FLUSH_NEVER]               = "vulnerable",
2010         [VMENTER_L1D_FLUSH_COND]                = "conditional cache flushes",
2011         [VMENTER_L1D_FLUSH_ALWAYS]              = "cache flushes",
2012         [VMENTER_L1D_FLUSH_EPT_DISABLED]        = "EPT disabled",
2013         [VMENTER_L1D_FLUSH_NOT_REQUIRED]        = "flush not necessary"
2014 };
2015
2016 static ssize_t l1tf_show_state(char *buf)
2017 {
2018         if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
2019                 return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
2020
2021         if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_EPT_DISABLED ||
2022             (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_NEVER &&
2023              sched_smt_active())) {
2024                 return sprintf(buf, "%s; VMX: %s\n", L1TF_DEFAULT_MSG,
2025                                l1tf_vmx_states[l1tf_vmx_mitigation]);
2026         }
2027
2028         return sprintf(buf, "%s; VMX: %s, SMT %s\n", L1TF_DEFAULT_MSG,
2029                        l1tf_vmx_states[l1tf_vmx_mitigation],
2030                        sched_smt_active() ? "vulnerable" : "disabled");
2031 }
2032
2033 static ssize_t itlb_multihit_show_state(char *buf)
2034 {
2035         if (itlb_multihit_kvm_mitigation)
2036                 return sprintf(buf, "KVM: Mitigation: Split huge pages\n");
2037         else
2038                 return sprintf(buf, "KVM: Vulnerable\n");
2039 }
2040 #else
2041 static ssize_t l1tf_show_state(char *buf)
2042 {
2043         return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
2044 }
2045
2046 static ssize_t itlb_multihit_show_state(char *buf)
2047 {
2048         return sprintf(buf, "Processor vulnerable\n");
2049 }
2050 #endif
2051
2052 static ssize_t mds_show_state(char *buf)
2053 {
2054         if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2055                 return sprintf(buf, "%s; SMT Host state unknown\n",
2056                                mds_strings[mds_mitigation]);
2057         }
2058
2059         if (boot_cpu_has(X86_BUG_MSBDS_ONLY)) {
2060                 return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
2061                                (mds_mitigation == MDS_MITIGATION_OFF ? "vulnerable" :
2062                                 sched_smt_active() ? "mitigated" : "disabled"));
2063         }
2064
2065         return sprintf(buf, "%s; SMT %s\n", mds_strings[mds_mitigation],
2066                        sched_smt_active() ? "vulnerable" : "disabled");
2067 }
2068
2069 static ssize_t tsx_async_abort_show_state(char *buf)
2070 {
2071         if ((taa_mitigation == TAA_MITIGATION_TSX_DISABLED) ||
2072             (taa_mitigation == TAA_MITIGATION_OFF))
2073                 return sprintf(buf, "%s\n", taa_strings[taa_mitigation]);
2074
2075         if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2076                 return sprintf(buf, "%s; SMT Host state unknown\n",
2077                                taa_strings[taa_mitigation]);
2078         }
2079
2080         return sprintf(buf, "%s; SMT %s\n", taa_strings[taa_mitigation],
2081                        sched_smt_active() ? "vulnerable" : "disabled");
2082 }
2083
2084 static ssize_t mmio_stale_data_show_state(char *buf)
2085 {
2086         if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
2087                 return sysfs_emit(buf, "Unknown: No mitigations\n");
2088
2089         if (mmio_mitigation == MMIO_MITIGATION_OFF)
2090                 return sysfs_emit(buf, "%s\n", mmio_strings[mmio_mitigation]);
2091
2092         if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
2093                 return sysfs_emit(buf, "%s; SMT Host state unknown\n",
2094                                   mmio_strings[mmio_mitigation]);
2095         }
2096
2097         return sysfs_emit(buf, "%s; SMT %s\n", mmio_strings[mmio_mitigation],
2098                           sched_smt_active() ? "vulnerable" : "disabled");
2099 }
2100
2101 static char *stibp_state(void)
2102 {
2103         if (spectre_v2_in_ibrs_mode(spectre_v2_enabled))
2104                 return "";
2105
2106         switch (spectre_v2_user_stibp) {
2107         case SPECTRE_V2_USER_NONE:
2108                 return ", STIBP: disabled";
2109         case SPECTRE_V2_USER_STRICT:
2110                 return ", STIBP: forced";
2111         case SPECTRE_V2_USER_STRICT_PREFERRED:
2112                 return ", STIBP: always-on";
2113         case SPECTRE_V2_USER_PRCTL:
2114         case SPECTRE_V2_USER_SECCOMP:
2115                 if (static_key_enabled(&switch_to_cond_stibp))
2116                         return ", STIBP: conditional";
2117         }
2118         return "";
2119 }
2120
2121 static char *ibpb_state(void)
2122 {
2123         if (boot_cpu_has(X86_FEATURE_IBPB)) {
2124                 if (static_key_enabled(&switch_mm_always_ibpb))
2125                         return ", IBPB: always-on";
2126                 if (static_key_enabled(&switch_mm_cond_ibpb))
2127                         return ", IBPB: conditional";
2128                 return ", IBPB: disabled";
2129         }
2130         return "";
2131 }
2132
2133 static char *pbrsb_eibrs_state(void)
2134 {
2135         if (boot_cpu_has_bug(X86_BUG_EIBRS_PBRSB)) {
2136                 if (boot_cpu_has(X86_FEATURE_RSB_VMEXIT_LITE) ||
2137                     boot_cpu_has(X86_FEATURE_RSB_VMEXIT))
2138                         return ", PBRSB-eIBRS: SW sequence";
2139                 else
2140                         return ", PBRSB-eIBRS: Vulnerable";
2141         } else {
2142                 return ", PBRSB-eIBRS: Not affected";
2143         }
2144 }
2145
2146 static ssize_t spectre_v2_show_state(char *buf)
2147 {
2148         if (spectre_v2_enabled == SPECTRE_V2_LFENCE)
2149                 return sprintf(buf, "Vulnerable: LFENCE\n");
2150
2151         if (spectre_v2_enabled == SPECTRE_V2_EIBRS && unprivileged_ebpf_enabled())
2152                 return sprintf(buf, "Vulnerable: eIBRS with unprivileged eBPF\n");
2153
2154         if (sched_smt_active() && unprivileged_ebpf_enabled() &&
2155             spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
2156                 return sprintf(buf, "Vulnerable: eIBRS+LFENCE with unprivileged eBPF and SMT\n");
2157
2158         return sprintf(buf, "%s%s%s%s%s%s%s\n",
2159                        spectre_v2_strings[spectre_v2_enabled],
2160                        ibpb_state(),
2161                        boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
2162                        stibp_state(),
2163                        boot_cpu_has(X86_FEATURE_RSB_CTXSW) ? ", RSB filling" : "",
2164                        pbrsb_eibrs_state(),
2165                        spectre_v2_module_string());
2166 }
2167
2168 static ssize_t srbds_show_state(char *buf)
2169 {
2170         return sprintf(buf, "%s\n", srbds_strings[srbds_mitigation]);
2171 }
2172
2173 static ssize_t retbleed_show_state(char *buf)
2174 {
2175         return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]);
2176 }
2177
2178 static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
2179                                char *buf, unsigned int bug)
2180 {
2181         if (!boot_cpu_has_bug(bug))
2182                 return sprintf(buf, "Not affected\n");
2183
2184         switch (bug) {
2185         case X86_BUG_CPU_MELTDOWN:
2186                 if (boot_cpu_has(X86_FEATURE_PTI))
2187                         return sprintf(buf, "Mitigation: PTI\n");
2188
2189                 break;
2190
2191         case X86_BUG_SPECTRE_V1:
2192                 return sprintf(buf, "%s\n", spectre_v1_strings[spectre_v1_mitigation]);
2193
2194         case X86_BUG_SPECTRE_V2:
2195                 return spectre_v2_show_state(buf);
2196
2197         case X86_BUG_SPEC_STORE_BYPASS:
2198                 return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);
2199
2200         case X86_BUG_L1TF:
2201                 if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
2202                         return l1tf_show_state(buf);
2203                 break;
2204
2205         case X86_BUG_MDS:
2206                 return mds_show_state(buf);
2207
2208         case X86_BUG_TAA:
2209                 return tsx_async_abort_show_state(buf);
2210
2211         case X86_BUG_ITLB_MULTIHIT:
2212                 return itlb_multihit_show_state(buf);
2213
2214         case X86_BUG_SRBDS:
2215                 return srbds_show_state(buf);
2216
2217         case X86_BUG_MMIO_STALE_DATA:
2218         case X86_BUG_MMIO_UNKNOWN:
2219                 return mmio_stale_data_show_state(buf);
2220
2221         case X86_BUG_RETBLEED:
2222                 return retbleed_show_state(buf);
2223
2224         default:
2225                 break;
2226         }
2227
2228         return sprintf(buf, "Vulnerable\n");
2229 }
2230
2231 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf)
2232 {
2233         return cpu_show_common(dev, attr, buf, X86_BUG_CPU_MELTDOWN);
2234 }
2235
2236 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf)
2237 {
2238         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V1);
2239 }
2240
2241 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf)
2242 {
2243         return cpu_show_common(dev, attr, buf, X86_BUG_SPECTRE_V2);
2244 }
2245
2246 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf)
2247 {
2248         return cpu_show_common(dev, attr, buf, X86_BUG_SPEC_STORE_BYPASS);
2249 }
2250
2251 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf)
2252 {
2253         return cpu_show_common(dev, attr, buf, X86_BUG_L1TF);
2254 }
2255
2256 ssize_t cpu_show_mds(struct device *dev, struct device_attribute *attr, char *buf)
2257 {
2258         return cpu_show_common(dev, attr, buf, X86_BUG_MDS);
2259 }
2260
2261 ssize_t cpu_show_tsx_async_abort(struct device *dev, struct device_attribute *attr, char *buf)
2262 {
2263         return cpu_show_common(dev, attr, buf, X86_BUG_TAA);
2264 }
2265
2266 ssize_t cpu_show_itlb_multihit(struct device *dev, struct device_attribute *attr, char *buf)
2267 {
2268         return cpu_show_common(dev, attr, buf, X86_BUG_ITLB_MULTIHIT);
2269 }
2270
2271 ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char *buf)
2272 {
2273         return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS);
2274 }
2275
2276 ssize_t cpu_show_mmio_stale_data(struct device *dev, struct device_attribute *attr, char *buf)
2277 {
2278         if (boot_cpu_has_bug(X86_BUG_MMIO_UNKNOWN))
2279                 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_UNKNOWN);
2280         else
2281                 return cpu_show_common(dev, attr, buf, X86_BUG_MMIO_STALE_DATA);
2282 }
2283
2284 ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf)
2285 {
2286         return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED);
2287 }
2288 #endif