1 // SPDX-License-Identifier: GPL-2.0-only
3 * Handle detection, reporting and mitigation of Spectre v1, v2 and v4, as
6 * https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
8 * This code was originally written hastily under an awful lot of stress and so
9 * aspects of it are somewhat hacky. Unfortunately, changing anything in here
10 * instantly makes me feel ill. Thanks, Jann. Thann.
12 * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
13 * Copyright (C) 2020 Google LLC
15 * "If there's something strange in your neighbourhood, who you gonna call?"
17 * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
20 #include <linux/arm-smccc.h>
21 #include <linux/bpf.h>
22 #include <linux/cpu.h>
23 #include <linux/device.h>
24 #include <linux/nospec.h>
25 #include <linux/prctl.h>
26 #include <linux/sched/task_stack.h>
29 #include <asm/spectre.h>
30 #include <asm/traps.h>
31 #include <asm/vectors.h>
35 * We try to ensure that the mitigation state can never change as the result of
36 * onlining a late CPU.
38 static void update_mitigation_state(enum mitigation_state *oldp,
39 enum mitigation_state new)
41 enum mitigation_state state;
44 state = READ_ONCE(*oldp);
48 /* Userspace almost certainly can't deal with this. */
49 if (WARN_ON(system_capabilities_finalized()))
51 } while (cmpxchg_relaxed(oldp, state, new) != state);
57 * The kernel can't protect userspace for this one: it's each person for
58 * themselves. Advertise what we're doing and be done with it.
60 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
63 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
69 * This one sucks. A CPU is either:
71 * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
72 * - Mitigated in hardware and listed in our "safe list".
73 * - Mitigated in software by firmware.
74 * - Mitigated in software by a CPU-specific dance in the kernel and a
75 * firmware call at EL2.
78 * It's not unlikely for different CPUs in a big.LITTLE system to fall into
81 static enum mitigation_state spectre_v2_state;
83 static bool __read_mostly __nospectre_v2;
84 static int __init parse_spectre_v2_param(char *str)
86 __nospectre_v2 = true;
89 early_param("nospectre_v2", parse_spectre_v2_param);
91 static bool spectre_v2_mitigations_off(void)
93 bool ret = __nospectre_v2 || cpu_mitigations_off();
96 pr_info_once("spectre-v2 mitigation disabled by command line option\n");
101 static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
104 case SPECTRE_UNAFFECTED:
107 case SPECTRE_VULNERABLE:
108 return ", but not BHB";
109 case SPECTRE_MITIGATED:
114 static bool _unprivileged_ebpf_enabled(void)
116 #ifdef CONFIG_BPF_SYSCALL
117 return !sysctl_unprivileged_bpf_disabled;
123 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
126 enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
127 const char *bhb_str = get_bhb_affected_string(bhb_state);
128 const char *v2_str = "Branch predictor hardening";
130 switch (spectre_v2_state) {
131 case SPECTRE_UNAFFECTED:
132 if (bhb_state == SPECTRE_UNAFFECTED)
133 return sprintf(buf, "Not affected\n");
136 * Platforms affected by Spectre-BHB can't report
137 * "Not affected" for Spectre-v2.
141 case SPECTRE_MITIGATED:
142 if (bhb_state == SPECTRE_MITIGATED && _unprivileged_ebpf_enabled())
143 return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
145 return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
146 case SPECTRE_VULNERABLE:
149 return sprintf(buf, "Vulnerable\n");
153 static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
156 static const struct midr_range spectre_v2_safe_list[] = {
157 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
158 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
159 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
160 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
161 MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
162 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
163 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
164 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
168 /* If the CPU has CSV2 set, we're safe */
169 pfr0 = read_cpuid(ID_AA64PFR0_EL1);
170 if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
171 return SPECTRE_UNAFFECTED;
173 /* Alternatively, we have a list of unaffected CPUs */
174 if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
175 return SPECTRE_UNAFFECTED;
177 return SPECTRE_VULNERABLE;
180 static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
183 struct arm_smccc_res res;
185 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
186 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
190 case SMCCC_RET_SUCCESS:
191 return SPECTRE_MITIGATED;
192 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
193 return SPECTRE_UNAFFECTED;
196 case SMCCC_RET_NOT_SUPPORTED:
197 return SPECTRE_VULNERABLE;
201 bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
203 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
205 if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
208 if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
214 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
216 enum mitigation_state arm64_get_spectre_v2_state(void)
218 return spectre_v2_state;
222 #include <asm/cacheflush.h>
223 #include <asm/kvm_asm.h>
225 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
227 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
228 const char *hyp_vecs_end)
230 void *dst = lm_alias(__bp_harden_hyp_vecs + slot * SZ_2K);
233 for (i = 0; i < SZ_2K; i += 0x80)
234 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
236 __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
239 static DEFINE_RAW_SPINLOCK(bp_lock);
240 static void install_bp_hardening_cb(bp_hardening_cb_t fn)
243 const char *hyp_vecs_start = __smccc_workaround_1_smc;
244 const char *hyp_vecs_end = __smccc_workaround_1_smc +
245 __SMCCC_WORKAROUND_1_SMC_SZ;
248 * Vinz Clortho takes the hyp_vecs start/end "keys" at
249 * the door when we're a guest. Skip the hyp-vectors work.
251 if (!is_hyp_mode_available()) {
252 __this_cpu_write(bp_hardening_data.fn, fn);
256 raw_spin_lock(&bp_lock);
257 for_each_possible_cpu(cpu) {
258 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
259 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
265 slot = atomic_inc_return(&arm64_el2_vector_last_slot);
266 BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
267 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
270 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
271 __this_cpu_write(bp_hardening_data.fn, fn);
272 __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
273 raw_spin_unlock(&bp_lock);
276 static void install_bp_hardening_cb(bp_hardening_cb_t fn)
278 __this_cpu_write(bp_hardening_data.fn, fn);
280 #endif /* CONFIG_KVM */
282 static void call_smc_arch_workaround_1(void)
284 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
287 static void call_hvc_arch_workaround_1(void)
289 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
292 static void qcom_link_stack_sanitisation(void)
296 asm volatile("mov %0, x30 \n"
304 static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void)
306 u32 midr = read_cpuid_id();
307 if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
308 ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
311 return qcom_link_stack_sanitisation;
314 static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
316 bp_hardening_cb_t cb;
317 enum mitigation_state state;
319 state = spectre_v2_get_cpu_fw_mitigation_state();
320 if (state != SPECTRE_MITIGATED)
323 if (spectre_v2_mitigations_off())
324 return SPECTRE_VULNERABLE;
326 switch (arm_smccc_1_1_get_conduit()) {
327 case SMCCC_CONDUIT_HVC:
328 cb = call_hvc_arch_workaround_1;
331 case SMCCC_CONDUIT_SMC:
332 cb = call_smc_arch_workaround_1;
336 return SPECTRE_VULNERABLE;
340 * Prefer a CPU-specific workaround if it exists. Note that we
341 * still rely on firmware for the mitigation at EL2.
343 cb = spectre_v2_get_sw_mitigation_cb() ?: cb;
344 install_bp_hardening_cb(cb);
345 return SPECTRE_MITIGATED;
348 void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
350 enum mitigation_state state;
352 WARN_ON(preemptible());
354 state = spectre_v2_get_cpu_hw_mitigation_state();
355 if (state == SPECTRE_VULNERABLE)
356 state = spectre_v2_enable_fw_mitigation();
358 update_mitigation_state(&spectre_v2_state, state);
364 * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
367 * - Mitigated in hardware and listed in our "safe list".
368 * - Mitigated in hardware via PSTATE.SSBS.
369 * - Mitigated in software by firmware (sometimes referred to as SSBD).
371 * Wait, that doesn't sound so bad, does it? Keep reading...
373 * A major source of headaches is that the software mitigation is enabled both
374 * on a per-task basis, but can also be forced on for the kernel, necessitating
375 * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
376 * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
377 * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
378 * so you can have systems that have both firmware and SSBS mitigations. This
379 * means we actually have to reject late onlining of CPUs with mitigations if
380 * all of the currently onlined CPUs are safelisted, as the mitigation tends to
381 * be opt-in for userspace. Yes, really, the cure is worse than the disease.
383 * The only good part is that if the firmware mitigation is present, then it is
384 * present for all CPUs, meaning we don't have to worry about late onlining of a
385 * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
387 * Give me a VAX-11/780 any day of the week...
389 static enum mitigation_state spectre_v4_state;
391 /* This is the per-cpu state tracking whether we need to talk to firmware */
392 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
394 enum spectre_v4_policy {
395 SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
396 SPECTRE_V4_POLICY_MITIGATION_ENABLED,
397 SPECTRE_V4_POLICY_MITIGATION_DISABLED,
400 static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
402 static const struct spectre_v4_param {
404 enum spectre_v4_policy policy;
405 } spectre_v4_params[] = {
406 { "force-on", SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
407 { "force-off", SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
408 { "kernel", SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
410 static int __init parse_spectre_v4_param(char *str)
417 for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
418 const struct spectre_v4_param *param = &spectre_v4_params[i];
420 if (strncmp(str, param->str, strlen(param->str)))
423 __spectre_v4_policy = param->policy;
429 early_param("ssbd", parse_spectre_v4_param);
432 * Because this was all written in a rush by people working in different silos,
433 * we've ended up with multiple command line options to control the same thing.
434 * Wrap these up in some helpers, which prefer disabling the mitigation if faced
435 * with contradictory parameters. The mitigation is always either "off",
438 static bool spectre_v4_mitigations_off(void)
440 bool ret = cpu_mitigations_off() ||
441 __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
444 pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
449 /* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
450 static bool spectre_v4_mitigations_dynamic(void)
452 return !spectre_v4_mitigations_off() &&
453 __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
456 static bool spectre_v4_mitigations_on(void)
458 return !spectre_v4_mitigations_off() &&
459 __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
462 ssize_t cpu_show_spec_store_bypass(struct device *dev,
463 struct device_attribute *attr, char *buf)
465 switch (spectre_v4_state) {
466 case SPECTRE_UNAFFECTED:
467 return sprintf(buf, "Not affected\n");
468 case SPECTRE_MITIGATED:
469 return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
470 case SPECTRE_VULNERABLE:
473 return sprintf(buf, "Vulnerable\n");
477 enum mitigation_state arm64_get_spectre_v4_state(void)
479 return spectre_v4_state;
482 static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
484 static const struct midr_range spectre_v4_safe_list[] = {
485 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
486 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
487 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
488 MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
489 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
490 MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
494 if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
495 return SPECTRE_UNAFFECTED;
497 /* CPU features are detected first */
498 if (this_cpu_has_cap(ARM64_SSBS))
499 return SPECTRE_MITIGATED;
501 return SPECTRE_VULNERABLE;
504 static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
507 struct arm_smccc_res res;
509 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
510 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
514 case SMCCC_RET_SUCCESS:
515 return SPECTRE_MITIGATED;
516 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
518 case SMCCC_RET_NOT_REQUIRED:
519 return SPECTRE_UNAFFECTED;
522 case SMCCC_RET_NOT_SUPPORTED:
523 return SPECTRE_VULNERABLE;
527 bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
529 enum mitigation_state state;
531 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
533 state = spectre_v4_get_cpu_hw_mitigation_state();
534 if (state == SPECTRE_VULNERABLE)
535 state = spectre_v4_get_cpu_fw_mitigation_state();
537 return state != SPECTRE_UNAFFECTED;
540 bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr)
542 const u32 instr_mask = ~(1U << PSTATE_Imm_shift);
543 const u32 instr_val = 0xd500401f | PSTATE_SSBS;
545 if ((instr & instr_mask) != instr_val)
548 if (instr & BIT(PSTATE_Imm_shift))
549 regs->pstate |= PSR_SSBS_BIT;
551 regs->pstate &= ~PSR_SSBS_BIT;
553 arm64_skip_faulting_instruction(regs, 4);
557 static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
559 enum mitigation_state state;
562 * If the system is mitigated but this CPU doesn't have SSBS, then
563 * we must be on the safelist and there's nothing more to do.
565 state = spectre_v4_get_cpu_hw_mitigation_state();
566 if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
569 if (spectre_v4_mitigations_off()) {
570 sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
571 asm volatile(SET_PSTATE_SSBS(1));
572 return SPECTRE_VULNERABLE;
575 /* SCTLR_EL1.DSSBS was initialised to 0 during boot */
576 asm volatile(SET_PSTATE_SSBS(0));
577 return SPECTRE_MITIGATED;
581 * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
582 * we fallthrough and check whether firmware needs to be called on this CPU.
584 void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
586 __le32 *updptr, int nr_inst)
588 BUG_ON(nr_inst != 1); /* Branch -> NOP */
590 if (spectre_v4_mitigations_off())
593 if (cpus_have_final_cap(ARM64_SSBS))
596 if (spectre_v4_mitigations_dynamic())
597 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
601 * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
602 * to call into firmware to adjust the mitigation state.
604 void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt,
606 __le32 *updptr, int nr_inst)
610 BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
612 switch (arm_smccc_1_1_get_conduit()) {
613 case SMCCC_CONDUIT_HVC:
614 insn = aarch64_insn_get_hvc_value();
616 case SMCCC_CONDUIT_SMC:
617 insn = aarch64_insn_get_smc_value();
623 *updptr = cpu_to_le32(insn);
626 static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
628 enum mitigation_state state;
630 state = spectre_v4_get_cpu_fw_mitigation_state();
631 if (state != SPECTRE_MITIGATED)
634 if (spectre_v4_mitigations_off()) {
635 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
636 return SPECTRE_VULNERABLE;
639 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
641 if (spectre_v4_mitigations_dynamic())
642 __this_cpu_write(arm64_ssbd_callback_required, 1);
644 return SPECTRE_MITIGATED;
647 void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
649 enum mitigation_state state;
651 WARN_ON(preemptible());
653 state = spectre_v4_enable_hw_mitigation();
654 if (state == SPECTRE_VULNERABLE)
655 state = spectre_v4_enable_fw_mitigation();
657 update_mitigation_state(&spectre_v4_state, state);
660 static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
662 u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
667 regs->pstate &= ~bit;
670 void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
672 struct pt_regs *regs = task_pt_regs(tsk);
673 bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
675 if (spectre_v4_mitigations_off())
677 else if (spectre_v4_mitigations_dynamic() && !kthread)
678 ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
680 __update_pstate_ssbs(regs, ssbs);
684 * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
685 * This is interesting because the "speculation disabled" behaviour can be
686 * configured so that it is preserved across exec(), which means that the
687 * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
690 static void ssbd_prctl_enable_mitigation(struct task_struct *task)
692 task_clear_spec_ssb_noexec(task);
693 task_set_spec_ssb_disable(task);
694 set_tsk_thread_flag(task, TIF_SSBD);
697 static void ssbd_prctl_disable_mitigation(struct task_struct *task)
699 task_clear_spec_ssb_noexec(task);
700 task_clear_spec_ssb_disable(task);
701 clear_tsk_thread_flag(task, TIF_SSBD);
704 static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
708 /* Enable speculation: disable mitigation */
710 * Force disabled speculation prevents it from being
713 if (task_spec_ssb_force_disable(task))
717 * If the mitigation is forced on, then speculation is forced
718 * off and we again prevent it from being re-enabled.
720 if (spectre_v4_mitigations_on())
723 ssbd_prctl_disable_mitigation(task);
725 case PR_SPEC_FORCE_DISABLE:
726 /* Force disable speculation: force enable mitigation */
728 * If the mitigation is forced off, then speculation is forced
729 * on and we prevent it from being disabled.
731 if (spectre_v4_mitigations_off())
734 task_set_spec_ssb_force_disable(task);
736 case PR_SPEC_DISABLE:
737 /* Disable speculation: enable mitigation */
738 /* Same as PR_SPEC_FORCE_DISABLE */
739 if (spectre_v4_mitigations_off())
742 ssbd_prctl_enable_mitigation(task);
744 case PR_SPEC_DISABLE_NOEXEC:
745 /* Disable speculation until execve(): enable mitigation */
747 * If the mitigation state is forced one way or the other, then
748 * we must fail now before we try to toggle it on execve().
750 if (task_spec_ssb_force_disable(task) ||
751 spectre_v4_mitigations_off() ||
752 spectre_v4_mitigations_on()) {
756 ssbd_prctl_enable_mitigation(task);
757 task_set_spec_ssb_noexec(task);
763 spectre_v4_enable_task_mitigation(task);
767 int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
771 case PR_SPEC_STORE_BYPASS:
772 return ssbd_prctl_set(task, ctrl);
778 static int ssbd_prctl_get(struct task_struct *task)
780 switch (spectre_v4_state) {
781 case SPECTRE_UNAFFECTED:
782 return PR_SPEC_NOT_AFFECTED;
783 case SPECTRE_MITIGATED:
784 if (spectre_v4_mitigations_on())
785 return PR_SPEC_NOT_AFFECTED;
787 if (spectre_v4_mitigations_dynamic())
790 /* Mitigations are disabled, so we're vulnerable. */
792 case SPECTRE_VULNERABLE:
795 return PR_SPEC_ENABLE;
798 /* Check the mitigation state for this task */
799 if (task_spec_ssb_force_disable(task))
800 return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
802 if (task_spec_ssb_noexec(task))
803 return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
805 if (task_spec_ssb_disable(task))
806 return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
808 return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
811 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
814 case PR_SPEC_STORE_BYPASS:
815 return ssbd_prctl_get(task);
825 * - Mitigated by a branchy loop a CPU specific number of times, and listed
826 * in our "loop mitigated list".
827 * - Mitigated in software by the firmware Spectre v2 call.
828 * - Has the ClearBHB instruction to perform the mitigation.
829 * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
830 * software mitigation in the vectors is needed.
831 * - Has CSV2.3, so is unaffected.
833 static enum mitigation_state spectre_bhb_state;
835 enum mitigation_state arm64_get_spectre_bhb_state(void)
837 return spectre_bhb_state;
841 * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
842 * SCOPE_SYSTEM call will give the right answer.
844 u8 spectre_bhb_loop_affected(int scope)
849 if (scope == SCOPE_LOCAL_CPU) {
850 static const struct midr_range spectre_bhb_k32_list[] = {
851 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
852 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
853 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
854 MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
855 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
856 MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
857 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
858 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
861 static const struct midr_range spectre_bhb_k24_list[] = {
862 MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
863 MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
864 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
867 static const struct midr_range spectre_bhb_k11_list[] = {
868 MIDR_ALL_VERSIONS(MIDR_AMPERE1),
871 static const struct midr_range spectre_bhb_k8_list[] = {
872 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
873 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
877 if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
879 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
881 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
883 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
886 max_bhb_k = max(max_bhb_k, k);
894 static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
897 struct arm_smccc_res res;
899 arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
900 ARM_SMCCC_ARCH_WORKAROUND_3, &res);
904 case SMCCC_RET_SUCCESS:
905 return SPECTRE_MITIGATED;
906 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
907 return SPECTRE_UNAFFECTED;
910 case SMCCC_RET_NOT_SUPPORTED:
911 return SPECTRE_VULNERABLE;
915 static bool is_spectre_bhb_fw_affected(int scope)
917 static bool system_affected;
918 enum mitigation_state fw_state;
919 bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
920 static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
921 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
922 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
925 bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
926 spectre_bhb_firmware_mitigated_list);
928 if (scope != SCOPE_LOCAL_CPU)
929 return system_affected;
931 fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
932 if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
933 system_affected = true;
940 static bool supports_ecbhb(int scope)
944 if (scope == SCOPE_LOCAL_CPU)
945 mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
947 mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
949 return cpuid_feature_extract_unsigned_field(mmfr1,
950 ID_AA64MMFR1_ECBHB_SHIFT);
953 bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
956 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
958 if (supports_csv2p3(scope))
961 if (supports_clearbhb(scope))
964 if (spectre_bhb_loop_affected(scope))
967 if (is_spectre_bhb_fw_affected(scope))
973 static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
975 const char *v = arm64_get_bp_hardening_vector(slot);
980 __this_cpu_write(this_cpu_vector, v);
983 * When KPTI is in use, the vectors are switched when exiting to
986 if (arm64_kernel_unmapped_at_el0())
989 write_sysreg(v, vbar_el1);
994 static int kvm_bhb_get_vecs_size(const char *start)
996 if (start == __smccc_workaround_3_smc)
997 return __SMCCC_WORKAROUND_3_SMC_SZ;
998 else if (start == __spectre_bhb_loop_k8 ||
999 start == __spectre_bhb_loop_k24 ||
1000 start == __spectre_bhb_loop_k32)
1001 return __SPECTRE_BHB_LOOP_SZ;
1002 else if (start == __spectre_bhb_clearbhb)
1003 return __SPECTRE_BHB_CLEARBHB_SZ;
1008 static void kvm_setup_bhb_slot(const char *hyp_vecs_start)
1010 int cpu, slot = -1, size;
1011 const char *hyp_vecs_end;
1013 if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
1016 size = kvm_bhb_get_vecs_size(hyp_vecs_start);
1017 if (WARN_ON_ONCE(!hyp_vecs_start || !size))
1019 hyp_vecs_end = hyp_vecs_start + size;
1021 raw_spin_lock(&bp_lock);
1022 for_each_possible_cpu(cpu) {
1023 if (per_cpu(bp_hardening_data.template_start, cpu) == hyp_vecs_start) {
1024 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
1030 slot = atomic_inc_return(&arm64_el2_vector_last_slot);
1031 BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
1032 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
1035 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
1036 __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
1037 raw_spin_unlock(&bp_lock);
1040 #define __smccc_workaround_3_smc NULL
1041 #define __spectre_bhb_loop_k8 NULL
1042 #define __spectre_bhb_loop_k24 NULL
1043 #define __spectre_bhb_loop_k32 NULL
1044 #define __spectre_bhb_clearbhb NULL
1046 static void kvm_setup_bhb_slot(const char *hyp_vecs_start) { }
1047 #endif /* CONFIG_KVM */
1049 void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
1051 enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
1053 if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
1056 if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) {
1057 /* No point mitigating Spectre-BHB alone. */
1058 } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
1059 pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
1060 } else if (cpu_mitigations_off()) {
1061 pr_info_once("spectre-bhb mitigation disabled by command line option\n");
1062 } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
1063 state = SPECTRE_MITIGATED;
1064 } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
1065 kvm_setup_bhb_slot(__spectre_bhb_clearbhb);
1066 this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
1068 state = SPECTRE_MITIGATED;
1069 } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
1070 switch (spectre_bhb_loop_affected(SCOPE_SYSTEM)) {
1072 kvm_setup_bhb_slot(__spectre_bhb_loop_k8);
1075 kvm_setup_bhb_slot(__spectre_bhb_loop_k24);
1078 kvm_setup_bhb_slot(__spectre_bhb_loop_k32);
1083 this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
1085 state = SPECTRE_MITIGATED;
1086 } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
1087 fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
1088 if (fw_state == SPECTRE_MITIGATED) {
1089 kvm_setup_bhb_slot(__smccc_workaround_3_smc);
1090 this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
1092 state = SPECTRE_MITIGATED;
1096 update_mitigation_state(&spectre_bhb_state, state);
1099 /* Patched to correct the immediate */
1100 void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
1101 __le32 *origptr, __le32 *updptr, int nr_inst)
1105 u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
1107 BUG_ON(nr_inst != 1); /* MOV -> MOV */
1109 if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
1112 insn = le32_to_cpu(*origptr);
1113 rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
1114 insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
1115 AARCH64_INSN_VARIANT_64BIT,
1116 AARCH64_INSN_MOVEWIDE_ZERO);
1117 *updptr++ = cpu_to_le32(insn);
1120 #ifdef CONFIG_BPF_SYSCALL
1121 #define EBPF_WARN "Unprivileged eBPF is enabled, data leaks possible via Spectre v2 BHB attacks!\n"
1122 void unpriv_ebpf_notify(int new_state)
1124 if (spectre_v2_state == SPECTRE_VULNERABLE ||
1125 spectre_bhb_state != SPECTRE_MITIGATED)
1129 pr_err("WARNING: %s", EBPF_WARN);