2 * Contains CPU specific errata definitions
4 * Copyright (C) 2014 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/arm-smccc.h>
20 #include <linux/psci.h>
21 #include <linux/types.h>
22 #include <linux/cpu.h>
24 #include <asm/cputype.h>
25 #include <asm/cpufeature.h>
26 #include <asm/vectors.h>
28 static bool __maybe_unused
29 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
31 u32 midr = read_cpuid_id();
33 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
34 return is_midr_in_range(midr, &entry->midr_range);
37 static bool __maybe_unused
38 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
41 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
42 return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
45 static bool __maybe_unused
46 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
50 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
52 model = read_cpuid_id();
53 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
54 MIDR_ARCHITECTURE_MASK;
56 return model == entry->midr_range.model;
60 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
63 u64 mask = CTR_CACHE_MINLINE_MASK;
65 /* Skip matching the min line sizes for cache type check */
66 if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
67 mask ^= arm64_ftr_reg_ctrel0.strict_mask;
69 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
70 return (read_cpuid_cachetype() & mask) !=
71 (arm64_ftr_reg_ctrel0.sys_val & mask);
75 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
77 /* Clear SCTLR_EL1.UCT */
78 config_sctlr_el1(SCTLR_EL1_UCT, 0);
81 #include <asm/mmu_context.h>
82 #include <asm/cacheflush.h>
84 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
87 extern char __smccc_workaround_1_smc_start[];
88 extern char __smccc_workaround_1_smc_end[];
89 extern char __smccc_workaround_3_smc_start[];
90 extern char __smccc_workaround_3_smc_end[];
91 extern char __spectre_bhb_loop_k8_start[];
92 extern char __spectre_bhb_loop_k8_end[];
93 extern char __spectre_bhb_loop_k24_start[];
94 extern char __spectre_bhb_loop_k24_end[];
95 extern char __spectre_bhb_loop_k32_start[];
96 extern char __spectre_bhb_loop_k32_end[];
97 extern char __spectre_bhb_clearbhb_start[];
98 extern char __spectre_bhb_clearbhb_end[];
100 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
101 const char *hyp_vecs_end)
103 void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
106 for (i = 0; i < SZ_2K; i += 0x80)
107 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
109 flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
112 static DEFINE_SPINLOCK(bp_lock);
113 static int last_slot = -1;
115 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
116 const char *hyp_vecs_start,
117 const char *hyp_vecs_end)
123 for_each_possible_cpu(cpu) {
124 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
125 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
132 BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
133 / SZ_2K) <= last_slot);
135 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
138 if (fn != __this_cpu_read(bp_hardening_data.fn)) {
139 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
140 __this_cpu_write(bp_hardening_data.fn, fn);
141 __this_cpu_write(bp_hardening_data.template_start,
144 spin_unlock(&bp_lock);
147 #define __smccc_workaround_1_smc_start NULL
148 #define __smccc_workaround_1_smc_end NULL
150 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
151 const char *hyp_vecs_start,
152 const char *hyp_vecs_end)
154 __this_cpu_write(bp_hardening_data.fn, fn);
156 #endif /* CONFIG_KVM */
158 #include <uapi/linux/psci.h>
159 #include <linux/arm-smccc.h>
160 #include <linux/psci.h>
162 static void call_smc_arch_workaround_1(void)
164 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
167 static void call_hvc_arch_workaround_1(void)
169 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
172 static void qcom_link_stack_sanitization(void)
176 asm volatile("mov %0, x30 \n"
184 static bool __nospectre_v2;
185 static int __init parse_nospectre_v2(char *str)
187 __nospectre_v2 = true;
190 early_param("nospectre_v2", parse_nospectre_v2);
194 * 0: No workaround required
195 * 1: Workaround installed
197 static int detect_harden_bp_fw(void)
199 bp_hardening_cb_t cb;
200 void *smccc_start, *smccc_end;
201 struct arm_smccc_res res;
202 u32 midr = read_cpuid_id();
204 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
207 switch (psci_ops.conduit) {
208 case PSCI_CONDUIT_HVC:
209 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
210 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
211 switch ((int)res.a0) {
213 /* Firmware says we're just fine */
216 cb = call_hvc_arch_workaround_1;
217 /* This is a guest, no need to patch KVM vectors */
226 case PSCI_CONDUIT_SMC:
227 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
228 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
229 switch ((int)res.a0) {
231 /* Firmware says we're just fine */
234 cb = call_smc_arch_workaround_1;
235 smccc_start = __smccc_workaround_1_smc_start;
236 smccc_end = __smccc_workaround_1_smc_end;
247 if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
248 ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
249 cb = qcom_link_stack_sanitization;
251 if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
252 install_bp_hardening_cb(cb, smccc_start, smccc_end);
257 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
259 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
260 static bool __ssb_safe = true;
262 static const struct ssbd_options {
266 { "force-on", ARM64_SSBD_FORCE_ENABLE, },
267 { "force-off", ARM64_SSBD_FORCE_DISABLE, },
268 { "kernel", ARM64_SSBD_KERNEL, },
271 static int __init ssbd_cfg(char *buf)
278 for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
279 int len = strlen(ssbd_options[i].str);
281 if (strncmp(buf, ssbd_options[i].str, len))
284 ssbd_state = ssbd_options[i].state;
290 early_param("ssbd", ssbd_cfg);
292 void __init arm64_update_smccc_conduit(struct alt_instr *alt,
293 __le32 *origptr, __le32 *updptr,
298 BUG_ON(nr_inst != 1);
300 switch (psci_ops.conduit) {
301 case PSCI_CONDUIT_HVC:
302 insn = aarch64_insn_get_hvc_value();
304 case PSCI_CONDUIT_SMC:
305 insn = aarch64_insn_get_smc_value();
311 *updptr = cpu_to_le32(insn);
314 void __init arm64_enable_wa2_handling(struct alt_instr *alt,
315 __le32 *origptr, __le32 *updptr,
318 BUG_ON(nr_inst != 1);
320 * Only allow mitigation on EL1 entry/exit and guest
321 * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
324 if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
325 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
328 void arm64_set_ssbd_mitigation(bool state)
330 if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
331 pr_info_once("SSBD disabled by kernel configuration\n");
335 if (this_cpu_has_cap(ARM64_SSBS)) {
337 asm volatile(SET_PSTATE_SSBS(0));
339 asm volatile(SET_PSTATE_SSBS(1));
343 switch (psci_ops.conduit) {
344 case PSCI_CONDUIT_HVC:
345 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
348 case PSCI_CONDUIT_SMC:
349 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
358 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
361 struct arm_smccc_res res;
362 bool required = true;
364 bool this_cpu_safe = false;
366 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
368 if (cpu_mitigations_off())
369 ssbd_state = ARM64_SSBD_FORCE_DISABLE;
371 /* delay setting __ssb_safe until we get a firmware response */
372 if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
373 this_cpu_safe = true;
375 if (this_cpu_has_cap(ARM64_SSBS)) {
382 if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
383 ssbd_state = ARM64_SSBD_UNKNOWN;
389 switch (psci_ops.conduit) {
390 case PSCI_CONDUIT_HVC:
391 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
392 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
395 case PSCI_CONDUIT_SMC:
396 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
397 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
401 ssbd_state = ARM64_SSBD_UNKNOWN;
410 case SMCCC_RET_NOT_SUPPORTED:
411 ssbd_state = ARM64_SSBD_UNKNOWN;
416 /* machines with mixed mitigation requirements must not return this */
417 case SMCCC_RET_NOT_REQUIRED:
418 pr_info_once("%s mitigation not required\n", entry->desc);
419 ssbd_state = ARM64_SSBD_MITIGATED;
422 case SMCCC_RET_SUCCESS:
427 case 1: /* Mitigation not required on this CPU */
438 switch (ssbd_state) {
439 case ARM64_SSBD_FORCE_DISABLE:
440 arm64_set_ssbd_mitigation(false);
444 case ARM64_SSBD_KERNEL:
446 __this_cpu_write(arm64_ssbd_callback_required, 1);
447 arm64_set_ssbd_mitigation(true);
451 case ARM64_SSBD_FORCE_ENABLE:
452 arm64_set_ssbd_mitigation(true);
462 switch (ssbd_state) {
463 case ARM64_SSBD_FORCE_DISABLE:
464 pr_info_once("%s disabled from command-line\n", entry->desc);
467 case ARM64_SSBD_FORCE_ENABLE:
468 pr_info_once("%s forced from command-line\n", entry->desc);
475 /* known invulnerable cores */
476 static const struct midr_range arm64_ssb_cpus[] = {
477 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
478 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
479 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
483 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
484 .matches = is_affected_midr_range, \
485 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
487 #define CAP_MIDR_ALL_VERSIONS(model) \
488 .matches = is_affected_midr_range, \
489 .midr_range = MIDR_ALL_VERSIONS(model)
491 #define MIDR_FIXED(rev, revidr_mask) \
492 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
494 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
495 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
496 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
498 #define CAP_MIDR_RANGE_LIST(list) \
499 .matches = is_affected_midr_range_list, \
500 .midr_range_list = list
502 /* Errata affecting a range of revisions of given model variant */
503 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
504 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
506 /* Errata affecting a single variant/revision of a model */
507 #define ERRATA_MIDR_REV(model, var, rev) \
508 ERRATA_MIDR_RANGE(model, var, rev, var, rev)
510 /* Errata affecting all variants/revisions of a given a model */
511 #define ERRATA_MIDR_ALL_VERSIONS(model) \
512 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
513 CAP_MIDR_ALL_VERSIONS(model)
515 /* Errata affecting a list of midr ranges, with same work around */
516 #define ERRATA_MIDR_RANGE_LIST(midr_list) \
517 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
518 CAP_MIDR_RANGE_LIST(midr_list)
520 /* Track overall mitigation state. We are only mitigated if all cores are ok */
521 static bool __hardenbp_enab = true;
522 static bool __spectrev2_safe = true;
525 * List of CPUs that do not need any Spectre-v2 mitigation at all.
527 static const struct midr_range spectre_v2_safe_list[] = {
528 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
529 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
530 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
535 * Track overall bp hardening for all heterogeneous cores in the machine.
536 * We are only considered "safe" if all booted cores are known safe.
538 static bool __maybe_unused
539 check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
543 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
545 /* If the CPU has CSV2 set, we're safe */
546 if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
547 ID_AA64PFR0_CSV2_SHIFT))
550 /* Alternatively, we have a list of unaffected CPUs */
551 if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
554 /* Fallback to firmware detection */
555 need_wa = detect_harden_bp_fw();
559 __spectrev2_safe = false;
561 if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
562 pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
563 __hardenbp_enab = false;
568 if (__nospectre_v2 || cpu_mitigations_off()) {
569 pr_info_once("spectrev2 mitigation disabled by command line option\n");
570 __hardenbp_enab = false;
575 pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
576 __hardenbp_enab = false;
579 return (need_wa > 0);
582 #ifdef CONFIG_ARM64_ERRATUM_1742098
583 static struct midr_range broken_aarch32_aes[] = {
584 MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
585 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
590 const struct arm64_cpu_capabilities arm64_errata[] = {
591 #if defined(CONFIG_ARM64_ERRATUM_826319) || \
592 defined(CONFIG_ARM64_ERRATUM_827319) || \
593 defined(CONFIG_ARM64_ERRATUM_824069)
595 /* Cortex-A53 r0p[012] */
596 .desc = "ARM errata 826319, 827319, 824069",
597 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
598 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
599 .cpu_enable = cpu_enable_cache_maint_trap,
602 #ifdef CONFIG_ARM64_ERRATUM_819472
604 /* Cortex-A53 r0p[01] */
605 .desc = "ARM errata 819472",
606 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
607 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
608 .cpu_enable = cpu_enable_cache_maint_trap,
611 #ifdef CONFIG_ARM64_ERRATUM_832075
613 /* Cortex-A57 r0p0 - r1p2 */
614 .desc = "ARM erratum 832075",
615 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
616 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
621 #ifdef CONFIG_ARM64_ERRATUM_834220
623 /* Cortex-A57 r0p0 - r1p2 */
624 .desc = "ARM erratum 834220",
625 .capability = ARM64_WORKAROUND_834220,
626 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
631 #ifdef CONFIG_ARM64_ERRATUM_845719
633 /* Cortex-A53 r0p[01234] */
634 .desc = "ARM erratum 845719",
635 .capability = ARM64_WORKAROUND_845719,
636 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
639 #ifdef CONFIG_CAVIUM_ERRATUM_23154
641 /* Cavium ThunderX, pass 1.x */
642 .desc = "Cavium erratum 23154",
643 .capability = ARM64_WORKAROUND_CAVIUM_23154,
644 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
647 #ifdef CONFIG_CAVIUM_ERRATUM_27456
649 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
650 .desc = "Cavium erratum 27456",
651 .capability = ARM64_WORKAROUND_CAVIUM_27456,
652 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
657 /* Cavium ThunderX, T81 pass 1.0 */
658 .desc = "Cavium erratum 27456",
659 .capability = ARM64_WORKAROUND_CAVIUM_27456,
660 ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
663 #ifdef CONFIG_CAVIUM_ERRATUM_30115
665 /* Cavium ThunderX, T88 pass 1.x - 2.2 */
666 .desc = "Cavium erratum 30115",
667 .capability = ARM64_WORKAROUND_CAVIUM_30115,
668 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
673 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
674 .desc = "Cavium erratum 30115",
675 .capability = ARM64_WORKAROUND_CAVIUM_30115,
676 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
679 /* Cavium ThunderX, T83 pass 1.0 */
680 .desc = "Cavium erratum 30115",
681 .capability = ARM64_WORKAROUND_CAVIUM_30115,
682 ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
686 .desc = "Mismatched cache line size",
687 .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
688 .matches = has_mismatched_cache_type,
689 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
690 .cpu_enable = cpu_enable_trap_ctr_access,
693 .desc = "Mismatched cache type",
694 .capability = ARM64_MISMATCHED_CACHE_TYPE,
695 .matches = has_mismatched_cache_type,
696 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
697 .cpu_enable = cpu_enable_trap_ctr_access,
699 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
701 .desc = "Qualcomm Technologies Falkor erratum 1003",
702 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
703 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
706 .desc = "Qualcomm Technologies Kryo erratum 1003",
707 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
708 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
709 .midr_range.model = MIDR_QCOM_KRYO,
710 .matches = is_kryo_midr,
713 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
715 .desc = "Qualcomm Technologies Falkor erratum 1009",
716 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
717 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
720 #ifdef CONFIG_ARM64_ERRATUM_858921
722 /* Cortex-A73 all versions */
723 .desc = "ARM erratum 858921",
724 .capability = ARM64_WORKAROUND_858921,
725 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
729 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
730 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
731 .matches = check_branch_predictor,
734 .desc = "Speculative Store Bypass Disable",
735 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
736 .capability = ARM64_SSBD,
737 .matches = has_ssbd_mitigation,
738 .midr_range_list = arm64_ssb_cpus,
740 #ifdef CONFIG_ARM64_ERRATUM_1188873
742 /* Cortex-A76 r0p0 to r2p0 */
743 .desc = "ARM erratum 1188873",
744 .capability = ARM64_WORKAROUND_1188873,
745 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
749 .desc = "Spectre-BHB",
750 .capability = ARM64_SPECTRE_BHB,
751 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
752 .matches = is_spectre_bhb_affected,
753 .cpu_enable = spectre_bhb_enable_mitigation,
755 #ifdef CONFIG_ARM64_ERRATUM_1742098
757 .desc = "ARM erratum 1742098",
758 .capability = ARM64_WORKAROUND_1742098,
759 CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
760 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
767 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
770 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
773 static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
776 case SPECTRE_UNAFFECTED:
779 case SPECTRE_VULNERABLE:
780 return ", but not BHB";
781 case SPECTRE_MITIGATED:
786 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
789 enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
790 const char *bhb_str = get_bhb_affected_string(bhb_state);
791 const char *v2_str = "Branch predictor hardening";
793 if (__spectrev2_safe) {
794 if (bhb_state == SPECTRE_UNAFFECTED)
795 return sprintf(buf, "Not affected\n");
798 * Platforms affected by Spectre-BHB can't report
799 * "Not affected" for Spectre-v2.
805 return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
807 return sprintf(buf, "Vulnerable\n");
810 ssize_t cpu_show_spec_store_bypass(struct device *dev,
811 struct device_attribute *attr, char *buf)
814 return sprintf(buf, "Not affected\n");
816 switch (ssbd_state) {
817 case ARM64_SSBD_KERNEL:
818 case ARM64_SSBD_FORCE_ENABLE:
819 if (IS_ENABLED(CONFIG_ARM64_SSBD))
821 "Mitigation: Speculative Store Bypass disabled via prctl\n");
824 return sprintf(buf, "Vulnerable\n");
828 * We try to ensure that the mitigation state can never change as the result of
829 * onlining a late CPU.
831 static void update_mitigation_state(enum mitigation_state *oldp,
832 enum mitigation_state new)
834 enum mitigation_state state;
837 state = READ_ONCE(*oldp);
840 } while (cmpxchg_relaxed(oldp, state, new) != state);
847 * - Mitigated by a branchy loop a CPU specific number of times, and listed
848 * in our "loop mitigated list".
849 * - Mitigated in software by the firmware Spectre v2 call.
850 * - Has the ClearBHB instruction to perform the mitigation.
851 * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
852 * software mitigation in the vectors is needed.
853 * - Has CSV2.3, so is unaffected.
855 static enum mitigation_state spectre_bhb_state;
857 enum mitigation_state arm64_get_spectre_bhb_state(void)
859 return spectre_bhb_state;
863 * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
864 * SCOPE_SYSTEM call will give the right answer.
866 u8 spectre_bhb_loop_affected(int scope)
871 if (scope == SCOPE_LOCAL_CPU) {
872 static const struct midr_range spectre_bhb_k32_list[] = {
873 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
874 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
875 MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
876 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
877 MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
878 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
879 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
882 static const struct midr_range spectre_bhb_k24_list[] = {
883 MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
884 MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
885 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
888 static const struct midr_range spectre_bhb_k8_list[] = {
889 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
890 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
894 if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
896 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
898 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
901 max_bhb_k = max(max_bhb_k, k);
909 static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
912 struct arm_smccc_res res;
914 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
915 return SPECTRE_VULNERABLE;
917 switch (psci_ops.conduit) {
918 case PSCI_CONDUIT_HVC:
919 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
920 ARM_SMCCC_ARCH_WORKAROUND_3, &res);
923 case PSCI_CONDUIT_SMC:
924 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
925 ARM_SMCCC_ARCH_WORKAROUND_3, &res);
929 return SPECTRE_VULNERABLE;
934 case SMCCC_RET_SUCCESS:
935 return SPECTRE_MITIGATED;
936 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
937 return SPECTRE_UNAFFECTED;
939 case SMCCC_RET_NOT_SUPPORTED:
940 return SPECTRE_VULNERABLE;
944 static bool is_spectre_bhb_fw_affected(int scope)
946 static bool system_affected;
947 enum mitigation_state fw_state;
948 bool has_smccc = (psci_ops.smccc_version >= SMCCC_VERSION_1_1);
949 static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
950 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
951 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
954 bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
955 spectre_bhb_firmware_mitigated_list);
957 if (scope != SCOPE_LOCAL_CPU)
958 return system_affected;
960 fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
961 if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
962 system_affected = true;
969 static bool supports_ecbhb(int scope)
973 if (scope == SCOPE_LOCAL_CPU)
974 mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
976 mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
978 return cpuid_feature_extract_unsigned_field(mmfr1,
979 ID_AA64MMFR1_ECBHB_SHIFT);
982 bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
985 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
987 if (supports_csv2p3(scope))
990 if (supports_clearbhb(scope))
993 if (spectre_bhb_loop_affected(scope))
996 if (is_spectre_bhb_fw_affected(scope))
1002 static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
1004 const char *v = arm64_get_bp_hardening_vector(slot);
1009 __this_cpu_write(this_cpu_vector, v);
1012 * When KPTI is in use, the vectors are switched when exiting to
1015 if (arm64_kernel_unmapped_at_el0())
1018 write_sysreg(v, vbar_el1);
1023 static const char *kvm_bhb_get_vecs_end(const char *start)
1025 if (start == __smccc_workaround_3_smc_start)
1026 return __smccc_workaround_3_smc_end;
1027 else if (start == __spectre_bhb_loop_k8_start)
1028 return __spectre_bhb_loop_k8_end;
1029 else if (start == __spectre_bhb_loop_k24_start)
1030 return __spectre_bhb_loop_k24_end;
1031 else if (start == __spectre_bhb_loop_k32_start)
1032 return __spectre_bhb_loop_k32_end;
1033 else if (start == __spectre_bhb_clearbhb_start)
1034 return __spectre_bhb_clearbhb_end;
1039 static void kvm_setup_bhb_slot(const char *hyp_vecs_start)
1042 const char *hyp_vecs_end;
1044 if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
1047 hyp_vecs_end = kvm_bhb_get_vecs_end(hyp_vecs_start);
1048 if (WARN_ON_ONCE(!hyp_vecs_start || !hyp_vecs_end))
1051 spin_lock(&bp_lock);
1052 for_each_possible_cpu(cpu) {
1053 if (per_cpu(bp_hardening_data.template_start, cpu) == hyp_vecs_start) {
1054 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
1061 BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
1062 / SZ_2K) <= last_slot);
1064 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
1067 if (hyp_vecs_start != __this_cpu_read(bp_hardening_data.template_start)) {
1068 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
1069 __this_cpu_write(bp_hardening_data.template_start,
1072 spin_unlock(&bp_lock);
1075 #define __smccc_workaround_3_smc_start NULL
1076 #define __spectre_bhb_loop_k8_start NULL
1077 #define __spectre_bhb_loop_k24_start NULL
1078 #define __spectre_bhb_loop_k32_start NULL
1079 #define __spectre_bhb_clearbhb_start NULL
1081 static void kvm_setup_bhb_slot(const char *hyp_vecs_start) { };
1084 void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
1086 enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
1088 if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
1091 if (!__spectrev2_safe && !__hardenbp_enab) {
1092 /* No point mitigating Spectre-BHB alone. */
1093 } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
1094 pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
1095 } else if (cpu_mitigations_off()) {
1096 pr_info_once("spectre-bhb mitigation disabled by command line option\n");
1097 } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
1098 state = SPECTRE_MITIGATED;
1099 } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
1100 kvm_setup_bhb_slot(__spectre_bhb_clearbhb_start);
1101 this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
1103 state = SPECTRE_MITIGATED;
1104 } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
1105 switch (spectre_bhb_loop_affected(SCOPE_SYSTEM)) {
1108 * A57/A72-r0 will already have selected the
1109 * spectre-indirect vector, which is sufficient
1112 if (!__this_cpu_read(bp_hardening_data.fn))
1113 kvm_setup_bhb_slot(__spectre_bhb_loop_k8_start);
1116 kvm_setup_bhb_slot(__spectre_bhb_loop_k24_start);
1119 kvm_setup_bhb_slot(__spectre_bhb_loop_k32_start);
1124 this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
1126 state = SPECTRE_MITIGATED;
1127 } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
1128 fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
1129 if (fw_state == SPECTRE_MITIGATED) {
1130 kvm_setup_bhb_slot(__smccc_workaround_3_smc_start);
1131 this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
1134 * With WA3 in the vectors, the WA1 calls can be
1137 __this_cpu_write(bp_hardening_data.fn, NULL);
1139 state = SPECTRE_MITIGATED;
1143 update_mitigation_state(&spectre_bhb_state, state);
1146 /* Patched to correct the immediate */
1147 void __init spectre_bhb_patch_loop_iter(struct alt_instr *alt,
1148 __le32 *origptr, __le32 *updptr, int nr_inst)
1152 u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
1154 BUG_ON(nr_inst != 1); /* MOV -> MOV */
1156 if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
1159 insn = le32_to_cpu(*origptr);
1160 rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
1161 insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
1162 AARCH64_INSN_VARIANT_64BIT,
1163 AARCH64_INSN_MOVEWIDE_ZERO);
1164 *updptr++ = cpu_to_le32(insn);