2 * Contains CPU specific errata definitions
4 * Copyright (C) 2014 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/arm-smccc.h>
20 #include <linux/psci.h>
21 #include <linux/types.h>
22 #include <asm/cachetype.h>
24 #include <asm/cputype.h>
25 #include <asm/cpufeature.h>
26 #include <asm/vectors.h>
28 static bool __maybe_unused
29 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
31 u32 midr = read_cpuid_id();
33 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
34 return is_midr_in_range(midr, &entry->midr_range);
37 static bool __maybe_unused
38 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
41 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
42 return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
46 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
49 u64 mask = CTR_CACHE_MINLINE_MASK;
51 /* Skip matching the min line sizes for cache type check */
52 if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
53 mask ^= arm64_ftr_reg_ctrel0.strict_mask;
55 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
56 return (read_cpuid_cachetype() & mask) !=
57 (arm64_ftr_reg_ctrel0.sys_val & mask);
61 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
63 /* Clear SCTLR_EL1.UCT */
64 config_sctlr_el1(SCTLR_EL1_UCT, 0);
67 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
68 #include <asm/mmu_context.h>
69 #include <asm/cacheflush.h>
71 static bool __hardenbp_enab;
72 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
75 extern char __smccc_workaround_1_smc_start[];
76 extern char __smccc_workaround_1_smc_end[];
77 extern char __smccc_workaround_1_hvc_start[];
78 extern char __smccc_workaround_1_hvc_end[];
79 extern char __smccc_workaround_3_smc_start[];
80 extern char __smccc_workaround_3_smc_end[];
81 extern char __spectre_bhb_loop_k8_start[];
82 extern char __spectre_bhb_loop_k8_end[];
83 extern char __spectre_bhb_loop_k24_start[];
84 extern char __spectre_bhb_loop_k24_end[];
85 extern char __spectre_bhb_loop_k32_start[];
86 extern char __spectre_bhb_loop_k32_end[];
87 extern char __spectre_bhb_clearbhb_start[];
88 extern char __spectre_bhb_clearbhb_end[];
90 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
91 const char *hyp_vecs_end)
93 void *dst = __bp_harden_hyp_vecs_start + slot * SZ_2K;
96 for (i = 0; i < SZ_2K; i += 0x80)
97 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
99 flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
102 static DEFINE_SPINLOCK(bp_lock);
103 static int last_slot = -1;
105 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
106 const char *hyp_vecs_start,
107 const char *hyp_vecs_end)
113 for_each_possible_cpu(cpu) {
114 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
115 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
122 BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
123 / SZ_2K) <= last_slot);
125 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
128 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
129 __this_cpu_write(bp_hardening_data.fn, fn);
130 __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
131 __hardenbp_enab = true;
132 spin_unlock(&bp_lock);
135 #define __smccc_workaround_1_smc_start NULL
136 #define __smccc_workaround_1_smc_end NULL
137 #define __smccc_workaround_1_hvc_start NULL
138 #define __smccc_workaround_1_hvc_end NULL
140 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
141 const char *hyp_vecs_start,
142 const char *hyp_vecs_end)
144 __this_cpu_write(bp_hardening_data.fn, fn);
145 __hardenbp_enab = true;
147 #endif /* CONFIG_KVM */
149 static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
150 bp_hardening_cb_t fn,
151 const char *hyp_vecs_start,
152 const char *hyp_vecs_end)
156 if (!entry->matches(entry, SCOPE_LOCAL_CPU))
159 pfr0 = read_cpuid(ID_AA64PFR0_EL1);
160 if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
163 __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
166 #include <uapi/linux/psci.h>
167 #include <linux/arm-smccc.h>
168 #include <linux/psci.h>
170 static void call_smc_arch_workaround_1(void)
172 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
175 static void call_hvc_arch_workaround_1(void)
177 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
181 enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
183 bp_hardening_cb_t cb;
184 void *smccc_start, *smccc_end;
185 struct arm_smccc_res res;
187 if (!entry->matches(entry, SCOPE_LOCAL_CPU))
190 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
193 switch (psci_ops.conduit) {
194 case PSCI_CONDUIT_HVC:
195 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
196 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
199 cb = call_hvc_arch_workaround_1;
200 smccc_start = __smccc_workaround_1_hvc_start;
201 smccc_end = __smccc_workaround_1_hvc_end;
204 case PSCI_CONDUIT_SMC:
205 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
206 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
209 cb = call_smc_arch_workaround_1;
210 smccc_start = __smccc_workaround_1_smc_start;
211 smccc_end = __smccc_workaround_1_smc_end;
218 install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
222 #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
224 void __init arm64_update_smccc_conduit(struct alt_instr *alt,
225 __le32 *origptr, __le32 *updptr,
230 BUG_ON(nr_inst != 1);
232 switch (psci_ops.conduit) {
233 case PSCI_CONDUIT_HVC:
234 insn = aarch64_insn_get_hvc_value();
236 case PSCI_CONDUIT_SMC:
237 insn = aarch64_insn_get_smc_value();
243 *updptr = cpu_to_le32(insn);
246 #ifdef CONFIG_ARM64_SSBD
247 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
249 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
251 static const struct ssbd_options {
255 { "force-on", ARM64_SSBD_FORCE_ENABLE, },
256 { "force-off", ARM64_SSBD_FORCE_DISABLE, },
257 { "kernel", ARM64_SSBD_KERNEL, },
260 static int __init ssbd_cfg(char *buf)
267 for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
268 int len = strlen(ssbd_options[i].str);
270 if (strncmp(buf, ssbd_options[i].str, len))
273 ssbd_state = ssbd_options[i].state;
279 early_param("ssbd", ssbd_cfg);
281 void __init arm64_enable_wa2_handling(struct alt_instr *alt,
282 __le32 *origptr, __le32 *updptr,
285 BUG_ON(nr_inst != 1);
287 * Only allow mitigation on EL1 entry/exit and guest
288 * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
291 if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
292 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
295 void arm64_set_ssbd_mitigation(bool state)
297 switch (psci_ops.conduit) {
298 case PSCI_CONDUIT_HVC:
299 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
302 case PSCI_CONDUIT_SMC:
303 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
312 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
315 struct arm_smccc_res res;
316 bool required = true;
319 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
321 if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
322 ssbd_state = ARM64_SSBD_UNKNOWN;
326 switch (psci_ops.conduit) {
327 case PSCI_CONDUIT_HVC:
328 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
329 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
332 case PSCI_CONDUIT_SMC:
333 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
334 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
338 ssbd_state = ARM64_SSBD_UNKNOWN;
345 case SMCCC_RET_NOT_SUPPORTED:
346 ssbd_state = ARM64_SSBD_UNKNOWN;
349 case SMCCC_RET_NOT_REQUIRED:
350 pr_info_once("%s mitigation not required\n", entry->desc);
351 ssbd_state = ARM64_SSBD_MITIGATED;
354 case SMCCC_RET_SUCCESS:
358 case 1: /* Mitigation not required on this CPU */
367 switch (ssbd_state) {
368 case ARM64_SSBD_FORCE_DISABLE:
369 pr_info_once("%s disabled from command-line\n", entry->desc);
370 arm64_set_ssbd_mitigation(false);
374 case ARM64_SSBD_KERNEL:
376 __this_cpu_write(arm64_ssbd_callback_required, 1);
377 arm64_set_ssbd_mitigation(true);
381 case ARM64_SSBD_FORCE_ENABLE:
382 pr_info_once("%s forced from command-line\n", entry->desc);
383 arm64_set_ssbd_mitigation(true);
394 #endif /* CONFIG_ARM64_SSBD */
396 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
397 .matches = is_affected_midr_range, \
398 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
400 #define CAP_MIDR_ALL_VERSIONS(model) \
401 .matches = is_affected_midr_range, \
402 .midr_range = MIDR_ALL_VERSIONS(model)
404 #define MIDR_FIXED(rev, revidr_mask) \
405 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
407 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
408 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
409 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
411 #define CAP_MIDR_RANGE_LIST(list) \
412 .matches = is_affected_midr_range_list, \
413 .midr_range_list = list
415 /* Errata affecting a range of revisions of given model variant */
416 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
417 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
419 /* Errata affecting a single variant/revision of a model */
420 #define ERRATA_MIDR_REV(model, var, rev) \
421 ERRATA_MIDR_RANGE(model, var, rev, var, rev)
423 /* Errata affecting all variants/revisions of a given a model */
424 #define ERRATA_MIDR_ALL_VERSIONS(model) \
425 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
426 CAP_MIDR_ALL_VERSIONS(model)
428 /* Errata affecting a list of midr ranges, with same work around */
429 #define ERRATA_MIDR_RANGE_LIST(midr_list) \
430 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
431 CAP_MIDR_RANGE_LIST(midr_list)
433 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
436 * List of CPUs where we need to issue a psci call to
437 * harden the branch predictor.
439 static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
440 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
441 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
442 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
443 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
444 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
445 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
451 const struct arm64_cpu_capabilities arm64_errata[] = {
452 #if defined(CONFIG_ARM64_ERRATUM_826319) || \
453 defined(CONFIG_ARM64_ERRATUM_827319) || \
454 defined(CONFIG_ARM64_ERRATUM_824069)
456 /* Cortex-A53 r0p[012] */
457 .desc = "ARM errata 826319, 827319, 824069",
458 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
459 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
460 .cpu_enable = cpu_enable_cache_maint_trap,
463 #ifdef CONFIG_ARM64_ERRATUM_819472
465 /* Cortex-A53 r0p[01] */
466 .desc = "ARM errata 819472",
467 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
468 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
469 .cpu_enable = cpu_enable_cache_maint_trap,
472 #ifdef CONFIG_ARM64_ERRATUM_832075
474 /* Cortex-A57 r0p0 - r1p2 */
475 .desc = "ARM erratum 832075",
476 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
477 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
482 #ifdef CONFIG_ARM64_ERRATUM_834220
484 /* Cortex-A57 r0p0 - r1p2 */
485 .desc = "ARM erratum 834220",
486 .capability = ARM64_WORKAROUND_834220,
487 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
492 #ifdef CONFIG_ARM64_ERRATUM_845719
494 /* Cortex-A53 r0p[01234] */
495 .desc = "ARM erratum 845719",
496 .capability = ARM64_WORKAROUND_845719,
497 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
500 #ifdef CONFIG_CAVIUM_ERRATUM_23154
502 /* Cavium ThunderX, pass 1.x */
503 .desc = "Cavium erratum 23154",
504 .capability = ARM64_WORKAROUND_CAVIUM_23154,
505 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
508 #ifdef CONFIG_CAVIUM_ERRATUM_27456
510 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
511 .desc = "Cavium erratum 27456",
512 .capability = ARM64_WORKAROUND_CAVIUM_27456,
513 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
518 /* Cavium ThunderX, T81 pass 1.0 */
519 .desc = "Cavium erratum 27456",
520 .capability = ARM64_WORKAROUND_CAVIUM_27456,
521 ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
525 .desc = "Mismatched cache line size",
526 .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
527 .matches = has_mismatched_cache_type,
528 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
529 .cpu_enable = cpu_enable_trap_ctr_access,
532 .desc = "Mismatched cache type",
533 .capability = ARM64_MISMATCHED_CACHE_TYPE,
534 .matches = has_mismatched_cache_type,
535 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
536 .cpu_enable = cpu_enable_trap_ctr_access,
538 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
540 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
541 ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
542 .cpu_enable = enable_smccc_arch_workaround_1,
545 #ifdef CONFIG_ARM64_SSBD
547 .desc = "Speculative Store Bypass Disable",
548 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
549 .capability = ARM64_SSBD,
550 .matches = has_ssbd_mitigation,
553 #ifdef CONFIG_ARM64_ERRATUM_1188873
555 /* Cortex-A76 r0p0 to r2p0 */
556 .desc = "ARM erratum 1188873",
557 .capability = ARM64_WORKAROUND_1188873,
558 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
562 .desc = "Spectre-BHB",
563 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
564 .capability = ARM64_SPECTRE_BHB,
565 .matches = is_spectre_bhb_affected,
566 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
567 .cpu_enable = spectre_bhb_enable_mitigation,
575 * We try to ensure that the mitigation state can never change as the result of
576 * onlining a late CPU.
578 static void __maybe_unused update_mitigation_state(enum mitigation_state *oldp,
579 enum mitigation_state new)
581 enum mitigation_state state;
584 state = READ_ONCE(*oldp);
587 } while (cmpxchg_relaxed(oldp, state, new) != state);
594 * - Mitigated by a branchy loop a CPU specific number of times, and listed
595 * in our "loop mitigated list".
596 * - Mitigated in software by the firmware Spectre v2 call.
597 * - Has the ClearBHB instruction to perform the mitigation.
598 * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
599 * software mitigation in the vectors is needed.
600 * - Has CSV2.3, so is unaffected.
602 static enum mitigation_state spectre_bhb_state;
604 enum mitigation_state arm64_get_spectre_bhb_state(void)
606 return spectre_bhb_state;
610 * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
611 * SCOPE_SYSTEM call will give the right answer.
613 u8 spectre_bhb_loop_affected(int scope)
618 if (scope == SCOPE_LOCAL_CPU) {
619 static const struct midr_range spectre_bhb_k32_list[] = {
620 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
621 MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
622 MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
623 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
624 MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
625 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
626 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
629 static const struct midr_range spectre_bhb_k24_list[] = {
630 MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
631 MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
632 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
635 static const struct midr_range spectre_bhb_k8_list[] = {
636 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
637 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
641 if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
643 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
645 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
648 max_bhb_k = max(max_bhb_k, k);
656 static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
659 struct arm_smccc_res res;
661 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
662 return SPECTRE_VULNERABLE;
664 switch (psci_ops.conduit) {
665 case PSCI_CONDUIT_HVC:
666 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
667 ARM_SMCCC_ARCH_WORKAROUND_3, &res);
670 case PSCI_CONDUIT_SMC:
671 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
672 ARM_SMCCC_ARCH_WORKAROUND_3, &res);
676 return SPECTRE_VULNERABLE;
681 case SMCCC_RET_SUCCESS:
682 return SPECTRE_MITIGATED;
683 case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
684 return SPECTRE_UNAFFECTED;
686 case SMCCC_RET_NOT_SUPPORTED:
687 return SPECTRE_VULNERABLE;
691 static bool is_spectre_bhb_fw_affected(int scope)
693 static bool system_affected;
694 enum mitigation_state fw_state;
695 bool has_smccc = (psci_ops.smccc_version >= SMCCC_VERSION_1_1);
696 static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
697 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
698 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
701 bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
702 spectre_bhb_firmware_mitigated_list);
704 if (scope != SCOPE_LOCAL_CPU)
705 return system_affected;
707 fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
708 if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
709 system_affected = true;
716 static bool __maybe_unused supports_ecbhb(int scope)
720 if (scope == SCOPE_LOCAL_CPU)
721 mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
723 mmfr1 = read_system_reg(SYS_ID_AA64MMFR1_EL1);
725 return cpuid_feature_extract_unsigned_field(mmfr1,
726 ID_AA64MMFR1_ECBHB_SHIFT);
729 bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
732 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
734 if (supports_csv2p3(scope))
737 if (supports_clearbhb(scope))
740 if (spectre_bhb_loop_affected(scope))
743 if (is_spectre_bhb_fw_affected(scope))
749 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
750 static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
752 const char *v = arm64_get_bp_hardening_vector(slot);
757 __this_cpu_write(this_cpu_vector, v);
760 * When KPTI is in use, the vectors are switched when exiting to
763 if (arm64_kernel_unmapped_at_el0())
766 write_sysreg(v, vbar_el1);
771 static const char *kvm_bhb_get_vecs_end(const char *start)
773 if (start == __smccc_workaround_3_smc_start)
774 return __smccc_workaround_3_smc_end;
775 else if (start == __spectre_bhb_loop_k8_start)
776 return __spectre_bhb_loop_k8_end;
777 else if (start == __spectre_bhb_loop_k24_start)
778 return __spectre_bhb_loop_k24_end;
779 else if (start == __spectre_bhb_loop_k32_start)
780 return __spectre_bhb_loop_k32_end;
781 else if (start == __spectre_bhb_clearbhb_start)
782 return __spectre_bhb_clearbhb_end;
787 static void kvm_setup_bhb_slot(const char *hyp_vecs_start)
790 const char *hyp_vecs_end;
792 if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
795 hyp_vecs_end = kvm_bhb_get_vecs_end(hyp_vecs_start);
796 if (WARN_ON_ONCE(!hyp_vecs_start || !hyp_vecs_end))
800 for_each_possible_cpu(cpu) {
801 if (per_cpu(bp_hardening_data.template_start, cpu) == hyp_vecs_start) {
802 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
809 BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
810 / SZ_2K) <= last_slot);
812 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
815 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
816 __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
817 spin_unlock(&bp_lock);
820 #define __smccc_workaround_3_smc_start NULL
821 #define __spectre_bhb_loop_k8_start NULL
822 #define __spectre_bhb_loop_k24_start NULL
823 #define __spectre_bhb_loop_k32_start NULL
824 #define __spectre_bhb_clearbhb_start NULL
826 static void kvm_setup_bhb_slot(const char *hyp_vecs_start) { };
827 #endif /* CONFIG_KVM */
829 static bool is_spectrev2_safe(void)
831 return !is_midr_in_range_list(read_cpuid_id(),
832 arm64_bp_harden_smccc_cpus);
835 void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
837 enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
839 if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
842 if (!is_spectrev2_safe() && !__hardenbp_enab) {
843 /* No point mitigating Spectre-BHB alone. */
844 } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
845 pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
846 } else if (cpu_mitigations_off()) {
847 pr_info_once("spectre-bhb mitigation disabled by command line option\n");
848 } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
849 state = SPECTRE_MITIGATED;
850 } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
851 kvm_setup_bhb_slot(__spectre_bhb_clearbhb_start);
852 this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
854 state = SPECTRE_MITIGATED;
855 } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
856 switch (spectre_bhb_loop_affected(SCOPE_SYSTEM)) {
858 kvm_setup_bhb_slot(__spectre_bhb_loop_k8_start);
861 kvm_setup_bhb_slot(__spectre_bhb_loop_k24_start);
864 kvm_setup_bhb_slot(__spectre_bhb_loop_k32_start);
869 this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
871 state = SPECTRE_MITIGATED;
872 } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
873 fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
874 if (fw_state == SPECTRE_MITIGATED) {
875 kvm_setup_bhb_slot(__smccc_workaround_3_smc_start);
876 this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
879 * With WA3 in the vectors, the WA1 calls can be
882 __this_cpu_write(bp_hardening_data.fn, NULL);
884 state = SPECTRE_MITIGATED;
888 update_mitigation_state(&spectre_bhb_state, state);
891 /* Patched to correct the immediate */
892 void __init spectre_bhb_patch_loop_iter(struct alt_instr *alt,
893 __le32 *origptr, __le32 *updptr, int nr_inst)
897 u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
899 BUG_ON(nr_inst != 1); /* MOV -> MOV */
901 if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
904 insn = le32_to_cpu(*origptr);
905 rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
906 insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
907 AARCH64_INSN_VARIANT_64BIT,
908 AARCH64_INSN_MOVEWIDE_ZERO);
909 *updptr++ = cpu_to_le32(insn);
911 #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */