2 * Contains CPU specific errata definitions
4 * Copyright (C) 2014 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/arm-smccc.h>
20 #include <linux/psci.h>
21 #include <linux/types.h>
22 #include <linux/cpu.h>
24 #include <asm/cputype.h>
25 #include <asm/cpufeature.h>
26 #include <asm/vectors.h>
28 static bool __maybe_unused
29 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
31 u32 midr = read_cpuid_id();
33 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
34 return is_midr_in_range(midr, &entry->midr_range);
37 static bool __maybe_unused
38 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
41 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
42 return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
45 static bool __maybe_unused
46 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
50 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
52 model = read_cpuid_id();
53 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
54 MIDR_ARCHITECTURE_MASK;
56 return model == entry->midr_range.model;
60 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
63 u64 mask = CTR_CACHE_MINLINE_MASK;
65 /* Skip matching the min line sizes for cache type check */
66 if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
67 mask ^= arm64_ftr_reg_ctrel0.strict_mask;
69 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
70 return (read_cpuid_cachetype() & mask) !=
71 (arm64_ftr_reg_ctrel0.sys_val & mask);
75 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
77 /* Clear SCTLR_EL1.UCT */
78 config_sctlr_el1(SCTLR_EL1_UCT, 0);
81 #include <asm/mmu_context.h>
82 #include <asm/cacheflush.h>
84 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
87 extern char __smccc_workaround_1_smc_start[];
88 extern char __smccc_workaround_1_smc_end[];
89 extern char __smccc_workaround_3_smc_start[];
90 extern char __smccc_workaround_3_smc_end[];
91 extern char __spectre_bhb_loop_k8_start[];
92 extern char __spectre_bhb_loop_k8_end[];
93 extern char __spectre_bhb_loop_k24_start[];
94 extern char __spectre_bhb_loop_k24_end[];
95 extern char __spectre_bhb_loop_k32_start[];
96 extern char __spectre_bhb_loop_k32_end[];
97 extern char __spectre_bhb_clearbhb_start[];
98 extern char __spectre_bhb_clearbhb_end[];
100 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
101 const char *hyp_vecs_end)
103 void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
106 for (i = 0; i < SZ_2K; i += 0x80)
107 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
109 flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
112 static DEFINE_SPINLOCK(bp_lock);
113 static int last_slot = -1;
115 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
116 const char *hyp_vecs_start,
117 const char *hyp_vecs_end)
123 for_each_possible_cpu(cpu) {
124 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
125 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
132 BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
133 / SZ_2K) <= last_slot);
135 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
138 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
139 __this_cpu_write(bp_hardening_data.fn, fn);
140 __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
141 spin_unlock(&bp_lock);
144 #define __smccc_workaround_1_smc_start NULL
145 #define __smccc_workaround_1_smc_end NULL
147 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
148 const char *hyp_vecs_start,
149 const char *hyp_vecs_end)
151 __this_cpu_write(bp_hardening_data.fn, fn);
153 #endif /* CONFIG_KVM */
155 #include <uapi/linux/psci.h>
156 #include <linux/arm-smccc.h>
157 #include <linux/psci.h>
159 static void call_smc_arch_workaround_1(void)
161 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
164 static void call_hvc_arch_workaround_1(void)
166 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
169 static void qcom_link_stack_sanitization(void)
173 asm volatile("mov %0, x30 \n"
181 static bool __nospectre_v2;
182 static int __init parse_nospectre_v2(char *str)
184 __nospectre_v2 = true;
187 early_param("nospectre_v2", parse_nospectre_v2);
191 * 0: No workaround required
192 * 1: Workaround installed
194 static int detect_harden_bp_fw(void)
196 bp_hardening_cb_t cb;
197 void *smccc_start, *smccc_end;
198 struct arm_smccc_res res;
199 u32 midr = read_cpuid_id();
201 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
204 switch (psci_ops.conduit) {
205 case PSCI_CONDUIT_HVC:
206 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
207 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
208 switch ((int)res.a0) {
210 /* Firmware says we're just fine */
213 cb = call_hvc_arch_workaround_1;
214 /* This is a guest, no need to patch KVM vectors */
223 case PSCI_CONDUIT_SMC:
224 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
225 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
226 switch ((int)res.a0) {
228 /* Firmware says we're just fine */
231 cb = call_smc_arch_workaround_1;
232 smccc_start = __smccc_workaround_1_smc_start;
233 smccc_end = __smccc_workaround_1_smc_end;
244 if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
245 ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
246 cb = qcom_link_stack_sanitization;
248 if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
249 install_bp_hardening_cb(cb, smccc_start, smccc_end);
254 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
256 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
257 static bool __ssb_safe = true;
259 static const struct ssbd_options {
263 { "force-on", ARM64_SSBD_FORCE_ENABLE, },
264 { "force-off", ARM64_SSBD_FORCE_DISABLE, },
265 { "kernel", ARM64_SSBD_KERNEL, },
268 static int __init ssbd_cfg(char *buf)
275 for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
276 int len = strlen(ssbd_options[i].str);
278 if (strncmp(buf, ssbd_options[i].str, len))
281 ssbd_state = ssbd_options[i].state;
287 early_param("ssbd", ssbd_cfg);
289 void __init arm64_update_smccc_conduit(struct alt_instr *alt,
290 __le32 *origptr, __le32 *updptr,
295 BUG_ON(nr_inst != 1);
297 switch (psci_ops.conduit) {
298 case PSCI_CONDUIT_HVC:
299 insn = aarch64_insn_get_hvc_value();
301 case PSCI_CONDUIT_SMC:
302 insn = aarch64_insn_get_smc_value();
308 *updptr = cpu_to_le32(insn);
311 void __init arm64_enable_wa2_handling(struct alt_instr *alt,
312 __le32 *origptr, __le32 *updptr,
315 BUG_ON(nr_inst != 1);
317 * Only allow mitigation on EL1 entry/exit and guest
318 * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
321 if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
322 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
325 void arm64_set_ssbd_mitigation(bool state)
327 if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
328 pr_info_once("SSBD disabled by kernel configuration\n");
332 if (this_cpu_has_cap(ARM64_SSBS)) {
334 asm volatile(SET_PSTATE_SSBS(0));
336 asm volatile(SET_PSTATE_SSBS(1));
340 switch (psci_ops.conduit) {
341 case PSCI_CONDUIT_HVC:
342 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
345 case PSCI_CONDUIT_SMC:
346 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
355 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
358 struct arm_smccc_res res;
359 bool required = true;
361 bool this_cpu_safe = false;
363 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
365 if (cpu_mitigations_off())
366 ssbd_state = ARM64_SSBD_FORCE_DISABLE;
368 /* delay setting __ssb_safe until we get a firmware response */
369 if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
370 this_cpu_safe = true;
372 if (this_cpu_has_cap(ARM64_SSBS)) {
379 if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
380 ssbd_state = ARM64_SSBD_UNKNOWN;
386 switch (psci_ops.conduit) {
387 case PSCI_CONDUIT_HVC:
388 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
389 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
392 case PSCI_CONDUIT_SMC:
393 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
394 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
398 ssbd_state = ARM64_SSBD_UNKNOWN;
407 case SMCCC_RET_NOT_SUPPORTED:
408 ssbd_state = ARM64_SSBD_UNKNOWN;
413 /* machines with mixed mitigation requirements must not return this */
414 case SMCCC_RET_NOT_REQUIRED:
415 pr_info_once("%s mitigation not required\n", entry->desc);
416 ssbd_state = ARM64_SSBD_MITIGATED;
419 case SMCCC_RET_SUCCESS:
424 case 1: /* Mitigation not required on this CPU */
435 switch (ssbd_state) {
436 case ARM64_SSBD_FORCE_DISABLE:
437 arm64_set_ssbd_mitigation(false);
441 case ARM64_SSBD_KERNEL:
443 __this_cpu_write(arm64_ssbd_callback_required, 1);
444 arm64_set_ssbd_mitigation(true);
448 case ARM64_SSBD_FORCE_ENABLE:
449 arm64_set_ssbd_mitigation(true);
459 switch (ssbd_state) {
460 case ARM64_SSBD_FORCE_DISABLE:
461 pr_info_once("%s disabled from command-line\n", entry->desc);
464 case ARM64_SSBD_FORCE_ENABLE:
465 pr_info_once("%s forced from command-line\n", entry->desc);
472 /* known invulnerable cores */
473 static const struct midr_range arm64_ssb_cpus[] = {
474 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
475 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
476 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
480 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
481 .matches = is_affected_midr_range, \
482 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
484 #define CAP_MIDR_ALL_VERSIONS(model) \
485 .matches = is_affected_midr_range, \
486 .midr_range = MIDR_ALL_VERSIONS(model)
488 #define MIDR_FIXED(rev, revidr_mask) \
489 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
491 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
492 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
493 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
495 #define CAP_MIDR_RANGE_LIST(list) \
496 .matches = is_affected_midr_range_list, \
497 .midr_range_list = list
499 /* Errata affecting a range of revisions of given model variant */
500 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
501 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
503 /* Errata affecting a single variant/revision of a model */
504 #define ERRATA_MIDR_REV(model, var, rev) \
505 ERRATA_MIDR_RANGE(model, var, rev, var, rev)
507 /* Errata affecting all variants/revisions of a given a model */
508 #define ERRATA_MIDR_ALL_VERSIONS(model) \
509 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
510 CAP_MIDR_ALL_VERSIONS(model)
512 /* Errata affecting a list of midr ranges, with same work around */
513 #define ERRATA_MIDR_RANGE_LIST(midr_list) \
514 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
515 CAP_MIDR_RANGE_LIST(midr_list)
517 /* Track overall mitigation state. We are only mitigated if all cores are ok */
518 static bool __hardenbp_enab = true;
519 static bool __spectrev2_safe = true;
522 * List of CPUs that do not need any Spectre-v2 mitigation at all.
524 static const struct midr_range spectre_v2_safe_list[] = {
525 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
526 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
527 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
532 * Track overall bp hardening for all heterogeneous cores in the machine.
533 * We are only considered "safe" if all booted cores are known safe.
535 static bool __maybe_unused
536 check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
540 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
542 /* If the CPU has CSV2 set, we're safe */
543 if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
544 ID_AA64PFR0_CSV2_SHIFT))
547 /* Alternatively, we have a list of unaffected CPUs */
548 if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
551 /* Fallback to firmware detection */
552 need_wa = detect_harden_bp_fw();
556 __spectrev2_safe = false;
558 if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
559 pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
560 __hardenbp_enab = false;
565 if (__nospectre_v2 || cpu_mitigations_off()) {
566 pr_info_once("spectrev2 mitigation disabled by command line option\n");
567 __hardenbp_enab = false;
572 pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
573 __hardenbp_enab = false;
576 return (need_wa > 0);
579 const struct arm64_cpu_capabilities arm64_errata[] = {
580 #if defined(CONFIG_ARM64_ERRATUM_826319) || \
581 defined(CONFIG_ARM64_ERRATUM_827319) || \
582 defined(CONFIG_ARM64_ERRATUM_824069)
584 /* Cortex-A53 r0p[012] */
585 .desc = "ARM errata 826319, 827319, 824069",
586 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
587 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
588 .cpu_enable = cpu_enable_cache_maint_trap,
591 #ifdef CONFIG_ARM64_ERRATUM_819472
593 /* Cortex-A53 r0p[01] */
594 .desc = "ARM errata 819472",
595 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
596 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
597 .cpu_enable = cpu_enable_cache_maint_trap,
600 #ifdef CONFIG_ARM64_ERRATUM_832075
602 /* Cortex-A57 r0p0 - r1p2 */
603 .desc = "ARM erratum 832075",
604 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
605 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
610 #ifdef CONFIG_ARM64_ERRATUM_834220
612 /* Cortex-A57 r0p0 - r1p2 */
613 .desc = "ARM erratum 834220",
614 .capability = ARM64_WORKAROUND_834220,
615 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
620 #ifdef CONFIG_ARM64_ERRATUM_845719
622 /* Cortex-A53 r0p[01234] */
623 .desc = "ARM erratum 845719",
624 .capability = ARM64_WORKAROUND_845719,
625 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
628 #ifdef CONFIG_CAVIUM_ERRATUM_23154
630 /* Cavium ThunderX, pass 1.x */
631 .desc = "Cavium erratum 23154",
632 .capability = ARM64_WORKAROUND_CAVIUM_23154,
633 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
636 #ifdef CONFIG_CAVIUM_ERRATUM_27456
638 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
639 .desc = "Cavium erratum 27456",
640 .capability = ARM64_WORKAROUND_CAVIUM_27456,
641 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
646 /* Cavium ThunderX, T81 pass 1.0 */
647 .desc = "Cavium erratum 27456",
648 .capability = ARM64_WORKAROUND_CAVIUM_27456,
649 ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
652 #ifdef CONFIG_CAVIUM_ERRATUM_30115
654 /* Cavium ThunderX, T88 pass 1.x - 2.2 */
655 .desc = "Cavium erratum 30115",
656 .capability = ARM64_WORKAROUND_CAVIUM_30115,
657 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
662 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
663 .desc = "Cavium erratum 30115",
664 .capability = ARM64_WORKAROUND_CAVIUM_30115,
665 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
668 /* Cavium ThunderX, T83 pass 1.0 */
669 .desc = "Cavium erratum 30115",
670 .capability = ARM64_WORKAROUND_CAVIUM_30115,
671 ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
675 .desc = "Mismatched cache line size",
676 .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
677 .matches = has_mismatched_cache_type,
678 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
679 .cpu_enable = cpu_enable_trap_ctr_access,
682 .desc = "Mismatched cache type",
683 .capability = ARM64_MISMATCHED_CACHE_TYPE,
684 .matches = has_mismatched_cache_type,
685 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
686 .cpu_enable = cpu_enable_trap_ctr_access,
688 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
690 .desc = "Qualcomm Technologies Falkor erratum 1003",
691 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
692 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
695 .desc = "Qualcomm Technologies Kryo erratum 1003",
696 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
697 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
698 .midr_range.model = MIDR_QCOM_KRYO,
699 .matches = is_kryo_midr,
702 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
704 .desc = "Qualcomm Technologies Falkor erratum 1009",
705 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
706 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
709 #ifdef CONFIG_ARM64_ERRATUM_858921
711 /* Cortex-A73 all versions */
712 .desc = "ARM erratum 858921",
713 .capability = ARM64_WORKAROUND_858921,
714 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
718 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
719 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
720 .matches = check_branch_predictor,
723 .desc = "Speculative Store Bypass Disable",
724 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
725 .capability = ARM64_SSBD,
726 .matches = has_ssbd_mitigation,
727 .midr_range_list = arm64_ssb_cpus,
729 #ifdef CONFIG_ARM64_ERRATUM_1188873
731 /* Cortex-A76 r0p0 to r2p0 */
732 .desc = "ARM erratum 1188873",
733 .capability = ARM64_WORKAROUND_1188873,
734 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
738 .desc = "Spectre-BHB",
739 .capability = ARM64_SPECTRE_BHB,
740 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
741 .matches = is_spectre_bhb_affected,
742 .cpu_enable = spectre_bhb_enable_mitigation,
748 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
751 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
754 static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
757 case SPECTRE_UNAFFECTED:
760 case SPECTRE_VULNERABLE:
761 return ", but not BHB";
762 case SPECTRE_MITIGATED:
767 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
770 enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
771 const char *bhb_str = get_bhb_affected_string(bhb_state);
772 const char *v2_str = "Branch predictor hardening";
774 if (__spectrev2_safe) {
775 if (bhb_state == SPECTRE_UNAFFECTED)
776 return sprintf(buf, "Not affected\n");
779 * Platforms affected by Spectre-BHB can't report
780 * "Not affected" for Spectre-v2.
786 return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
788 return sprintf(buf, "Vulnerable\n");
791 ssize_t cpu_show_spec_store_bypass(struct device *dev,
792 struct device_attribute *attr, char *buf)
795 return sprintf(buf, "Not affected\n");
797 switch (ssbd_state) {
798 case ARM64_SSBD_KERNEL:
799 case ARM64_SSBD_FORCE_ENABLE:
800 if (IS_ENABLED(CONFIG_ARM64_SSBD))
802 "Mitigation: Speculative Store Bypass disabled via prctl\n");
805 return sprintf(buf, "Vulnerable\n");
809 * We try to ensure that the mitigation state can never change as the result of
810 * onlining a late CPU.
812 static void update_mitigation_state(enum mitigation_state *oldp,
813 enum mitigation_state new)
815 enum mitigation_state state;
818 state = READ_ONCE(*oldp);
821 } while (cmpxchg_relaxed(oldp, state, new) != state);
828 * - Mitigated by a branchy loop a CPU specific number of times, and listed
829 * in our "loop mitigated list".
830 * - Mitigated in software by the firmware Spectre v2 call.
831 * - Has the ClearBHB instruction to perform the mitigation.
832 * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
833 * software mitigation in the vectors is needed.
834 * - Has CSV2.3, so is unaffected.
836 static enum mitigation_state spectre_bhb_state;
838 enum mitigation_state arm64_get_spectre_bhb_state(void)
840 return spectre_bhb_state;
844 * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
845 * SCOPE_SYSTEM call will give the right answer.
847 u8 spectre_bhb_loop_affected(int scope)
852 if (scope == SCOPE_LOCAL_CPU) {
853 static const struct midr_range spectre_bhb_k32_list[] = {
854 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
855 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
856 MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
857 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
858 MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
859 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
860 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
863 static const struct midr_range spectre_bhb_k24_list[] = {
864 MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
865 MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
866 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
869 static const struct midr_range spectre_bhb_k8_list[] = {
870 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
871 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
875 if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
877 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
879 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
882 max_bhb_k = max(max_bhb_k, k);
890 static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
893 struct arm_smccc_res res;
895 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
896 return SPECTRE_VULNERABLE;
898 switch (psci_ops.conduit) {
899 case PSCI_CONDUIT_HVC:
900 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
901 ARM_SMCCC_ARCH_WORKAROUND_3, &res);
904 case PSCI_CONDUIT_SMC:
905 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
906 ARM_SMCCC_ARCH_WORKAROUND_3, &res);
910 return SPECTRE_VULNERABLE;
915 case SMCCC_RET_SUCCESS:
916 return SPECTRE_MITIGATED;
917 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
918 return SPECTRE_UNAFFECTED;
920 case SMCCC_RET_NOT_SUPPORTED:
921 return SPECTRE_VULNERABLE;
925 static bool is_spectre_bhb_fw_affected(int scope)
927 static bool system_affected;
928 enum mitigation_state fw_state;
929 bool has_smccc = (psci_ops.smccc_version >= SMCCC_VERSION_1_1);
930 static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
931 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
932 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
935 bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
936 spectre_bhb_firmware_mitigated_list);
938 if (scope != SCOPE_LOCAL_CPU)
939 return system_affected;
941 fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
942 if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
943 system_affected = true;
950 static bool supports_ecbhb(int scope)
954 if (scope == SCOPE_LOCAL_CPU)
955 mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
957 mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
959 return cpuid_feature_extract_unsigned_field(mmfr1,
960 ID_AA64MMFR1_ECBHB_SHIFT);
963 bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
966 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
968 if (supports_csv2p3(scope))
971 if (supports_clearbhb(scope))
974 if (spectre_bhb_loop_affected(scope))
977 if (is_spectre_bhb_fw_affected(scope))
983 static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
985 const char *v = arm64_get_bp_hardening_vector(slot);
990 __this_cpu_write(this_cpu_vector, v);
993 * When KPTI is in use, the vectors are switched when exiting to
996 if (arm64_kernel_unmapped_at_el0())
999 write_sysreg(v, vbar_el1);
1004 static const char *kvm_bhb_get_vecs_end(const char *start)
1006 if (start == __smccc_workaround_3_smc_start)
1007 return __smccc_workaround_3_smc_end;
1008 else if (start == __spectre_bhb_loop_k8_start)
1009 return __spectre_bhb_loop_k8_end;
1010 else if (start == __spectre_bhb_loop_k24_start)
1011 return __spectre_bhb_loop_k24_end;
1012 else if (start == __spectre_bhb_loop_k32_start)
1013 return __spectre_bhb_loop_k32_end;
1014 else if (start == __spectre_bhb_clearbhb_start)
1015 return __spectre_bhb_clearbhb_end;
1020 static void kvm_setup_bhb_slot(const char *hyp_vecs_start)
1023 const char *hyp_vecs_end;
1025 if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
1028 hyp_vecs_end = kvm_bhb_get_vecs_end(hyp_vecs_start);
1029 if (WARN_ON_ONCE(!hyp_vecs_start || !hyp_vecs_end))
1032 spin_lock(&bp_lock);
1033 for_each_possible_cpu(cpu) {
1034 if (per_cpu(bp_hardening_data.template_start, cpu) == hyp_vecs_start) {
1035 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
1042 BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
1043 / SZ_2K) <= last_slot);
1045 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
1048 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
1049 __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
1050 spin_unlock(&bp_lock);
1053 #define __smccc_workaround_3_smc_start NULL
1054 #define __spectre_bhb_loop_k8_start NULL
1055 #define __spectre_bhb_loop_k24_start NULL
1056 #define __spectre_bhb_loop_k32_start NULL
1057 #define __spectre_bhb_clearbhb_start NULL
1059 static void kvm_setup_bhb_slot(const char *hyp_vecs_start) { };
1062 void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
1064 enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
1066 if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
1069 if (!__spectrev2_safe && !__hardenbp_enab) {
1070 /* No point mitigating Spectre-BHB alone. */
1071 } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
1072 pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
1073 } else if (cpu_mitigations_off()) {
1074 pr_info_once("spectre-bhb mitigation disabled by command line option\n");
1075 } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
1076 state = SPECTRE_MITIGATED;
1077 } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
1078 kvm_setup_bhb_slot(__spectre_bhb_clearbhb_start);
1079 this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
1081 state = SPECTRE_MITIGATED;
1082 } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
1083 switch (spectre_bhb_loop_affected(SCOPE_SYSTEM)) {
1085 kvm_setup_bhb_slot(__spectre_bhb_loop_k8_start);
1088 kvm_setup_bhb_slot(__spectre_bhb_loop_k24_start);
1091 kvm_setup_bhb_slot(__spectre_bhb_loop_k32_start);
1096 this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
1098 state = SPECTRE_MITIGATED;
1099 } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
1100 fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
1101 if (fw_state == SPECTRE_MITIGATED) {
1102 kvm_setup_bhb_slot(__smccc_workaround_3_smc_start);
1103 this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
1106 * With WA3 in the vectors, the WA1 calls can be
1109 __this_cpu_write(bp_hardening_data.fn, NULL);
1111 state = SPECTRE_MITIGATED;
1115 update_mitigation_state(&spectre_bhb_state, state);
1118 /* Patched to correct the immediate */
1119 void __init spectre_bhb_patch_loop_iter(struct alt_instr *alt,
1120 __le32 *origptr, __le32 *updptr, int nr_inst)
1124 u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
1126 BUG_ON(nr_inst != 1); /* MOV -> MOV */
1128 if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
1131 insn = le32_to_cpu(*origptr);
1132 rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
1133 insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
1134 AARCH64_INSN_VARIANT_64BIT,
1135 AARCH64_INSN_MOVEWIDE_ZERO);
1136 *updptr++ = cpu_to_le32(insn);