2 * Contains CPU specific errata definitions
4 * Copyright (C) 2014 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/arm-smccc.h>
20 #include <linux/psci.h>
21 #include <linux/types.h>
22 #include <linux/cpu.h>
24 #include <asm/cputype.h>
25 #include <asm/cpufeature.h>
26 #include <asm/smp_plat.h>
28 static bool __maybe_unused
29 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
31 const struct arm64_midr_revidr *fix;
32 u32 midr = read_cpuid_id(), revidr;
34 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
35 if (!is_midr_in_range(midr, &entry->midr_range))
38 midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
39 revidr = read_cpuid(REVIDR_EL1);
40 for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
41 if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
47 static bool __maybe_unused
48 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
51 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
52 return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
55 static bool __maybe_unused
56 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
60 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
62 model = read_cpuid_id();
63 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
64 MIDR_ARCHITECTURE_MASK;
66 return model == entry->midr_range.model;
70 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
73 u64 mask = CTR_CACHE_MINLINE_MASK;
75 /* Skip matching the min line sizes for cache type check */
76 if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
77 mask ^= arm64_ftr_reg_ctrel0.strict_mask;
79 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
80 return (read_cpuid_cachetype() & mask) !=
81 (arm64_ftr_reg_ctrel0.sys_val & mask);
85 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
87 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
90 atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
92 #include <asm/mmu_context.h>
93 #include <asm/cacheflush.h>
95 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
97 #ifdef CONFIG_KVM_INDIRECT_VECTORS
98 extern char __smccc_workaround_1_smc_start[];
99 extern char __smccc_workaround_1_smc_end[];
101 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
102 const char *hyp_vecs_end)
104 void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
107 for (i = 0; i < SZ_2K; i += 0x80)
108 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
110 __flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
113 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
114 const char *hyp_vecs_start,
115 const char *hyp_vecs_end)
117 static DEFINE_SPINLOCK(bp_lock);
121 for_each_possible_cpu(cpu) {
122 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
123 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
129 slot = atomic_inc_return(&arm64_el2_vector_last_slot);
130 BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
131 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
134 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
135 __this_cpu_write(bp_hardening_data.fn, fn);
136 spin_unlock(&bp_lock);
139 #define __smccc_workaround_1_smc_start NULL
140 #define __smccc_workaround_1_smc_end NULL
142 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
143 const char *hyp_vecs_start,
144 const char *hyp_vecs_end)
146 __this_cpu_write(bp_hardening_data.fn, fn);
148 #endif /* CONFIG_KVM_INDIRECT_VECTORS */
150 #include <uapi/linux/psci.h>
151 #include <linux/arm-smccc.h>
152 #include <linux/psci.h>
154 static void call_smc_arch_workaround_1(void)
156 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
159 static void call_hvc_arch_workaround_1(void)
161 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
164 static void qcom_link_stack_sanitization(void)
168 asm volatile("mov %0, x30 \n"
176 static bool __nospectre_v2;
177 static int __init parse_nospectre_v2(char *str)
179 __nospectre_v2 = true;
182 early_param("nospectre_v2", parse_nospectre_v2);
186 * 0: No workaround required
187 * 1: Workaround installed
189 static int detect_harden_bp_fw(void)
191 bp_hardening_cb_t cb;
192 void *smccc_start, *smccc_end;
193 struct arm_smccc_res res;
194 u32 midr = read_cpuid_id();
196 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
199 switch (psci_ops.conduit) {
200 case PSCI_CONDUIT_HVC:
201 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
202 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
203 switch ((int)res.a0) {
205 /* Firmware says we're just fine */
208 cb = call_hvc_arch_workaround_1;
209 /* This is a guest, no need to patch KVM vectors */
218 case PSCI_CONDUIT_SMC:
219 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
220 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
221 switch ((int)res.a0) {
223 /* Firmware says we're just fine */
226 cb = call_smc_arch_workaround_1;
227 smccc_start = __smccc_workaround_1_smc_start;
228 smccc_end = __smccc_workaround_1_smc_end;
239 if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
240 ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
241 cb = qcom_link_stack_sanitization;
243 if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
244 install_bp_hardening_cb(cb, smccc_start, smccc_end);
249 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
251 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
252 static bool __ssb_safe = true;
254 static const struct ssbd_options {
258 { "force-on", ARM64_SSBD_FORCE_ENABLE, },
259 { "force-off", ARM64_SSBD_FORCE_DISABLE, },
260 { "kernel", ARM64_SSBD_KERNEL, },
263 static int __init ssbd_cfg(char *buf)
270 for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
271 int len = strlen(ssbd_options[i].str);
273 if (strncmp(buf, ssbd_options[i].str, len))
276 ssbd_state = ssbd_options[i].state;
282 early_param("ssbd", ssbd_cfg);
284 void __init arm64_update_smccc_conduit(struct alt_instr *alt,
285 __le32 *origptr, __le32 *updptr,
290 BUG_ON(nr_inst != 1);
292 switch (psci_ops.conduit) {
293 case PSCI_CONDUIT_HVC:
294 insn = aarch64_insn_get_hvc_value();
296 case PSCI_CONDUIT_SMC:
297 insn = aarch64_insn_get_smc_value();
303 *updptr = cpu_to_le32(insn);
306 void __init arm64_enable_wa2_handling(struct alt_instr *alt,
307 __le32 *origptr, __le32 *updptr,
310 BUG_ON(nr_inst != 1);
312 * Only allow mitigation on EL1 entry/exit and guest
313 * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
316 if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
317 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
320 void arm64_set_ssbd_mitigation(bool state)
322 if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
323 pr_info_once("SSBD disabled by kernel configuration\n");
327 if (this_cpu_has_cap(ARM64_SSBS)) {
329 asm volatile(SET_PSTATE_SSBS(0));
331 asm volatile(SET_PSTATE_SSBS(1));
335 switch (psci_ops.conduit) {
336 case PSCI_CONDUIT_HVC:
337 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
340 case PSCI_CONDUIT_SMC:
341 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
350 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
353 struct arm_smccc_res res;
354 bool required = true;
356 bool this_cpu_safe = false;
358 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
360 if (cpu_mitigations_off())
361 ssbd_state = ARM64_SSBD_FORCE_DISABLE;
363 /* delay setting __ssb_safe until we get a firmware response */
364 if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
365 this_cpu_safe = true;
367 if (this_cpu_has_cap(ARM64_SSBS)) {
374 if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
375 ssbd_state = ARM64_SSBD_UNKNOWN;
381 switch (psci_ops.conduit) {
382 case PSCI_CONDUIT_HVC:
383 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
384 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
387 case PSCI_CONDUIT_SMC:
388 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
389 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
393 ssbd_state = ARM64_SSBD_UNKNOWN;
402 case SMCCC_RET_NOT_SUPPORTED:
403 ssbd_state = ARM64_SSBD_UNKNOWN;
408 /* machines with mixed mitigation requirements must not return this */
409 case SMCCC_RET_NOT_REQUIRED:
410 pr_info_once("%s mitigation not required\n", entry->desc);
411 ssbd_state = ARM64_SSBD_MITIGATED;
414 case SMCCC_RET_SUCCESS:
419 case 1: /* Mitigation not required on this CPU */
430 switch (ssbd_state) {
431 case ARM64_SSBD_FORCE_DISABLE:
432 arm64_set_ssbd_mitigation(false);
436 case ARM64_SSBD_KERNEL:
438 __this_cpu_write(arm64_ssbd_callback_required, 1);
439 arm64_set_ssbd_mitigation(true);
443 case ARM64_SSBD_FORCE_ENABLE:
444 arm64_set_ssbd_mitigation(true);
454 switch (ssbd_state) {
455 case ARM64_SSBD_FORCE_DISABLE:
456 pr_info_once("%s disabled from command-line\n", entry->desc);
459 case ARM64_SSBD_FORCE_ENABLE:
460 pr_info_once("%s forced from command-line\n", entry->desc);
467 /* known invulnerable cores */
468 static const struct midr_range arm64_ssb_cpus[] = {
469 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
470 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
471 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
475 #ifdef CONFIG_ARM64_ERRATUM_1463225
476 DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
479 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
482 u32 midr = read_cpuid_id();
483 /* Cortex-A76 r0p0 - r3p1 */
484 struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
486 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
487 return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
491 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
492 .matches = is_affected_midr_range, \
493 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
495 #define CAP_MIDR_ALL_VERSIONS(model) \
496 .matches = is_affected_midr_range, \
497 .midr_range = MIDR_ALL_VERSIONS(model)
499 #define MIDR_FIXED(rev, revidr_mask) \
500 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
502 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
503 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
504 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
506 #define CAP_MIDR_RANGE_LIST(list) \
507 .matches = is_affected_midr_range_list, \
508 .midr_range_list = list
510 /* Errata affecting a range of revisions of given model variant */
511 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
512 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
514 /* Errata affecting a single variant/revision of a model */
515 #define ERRATA_MIDR_REV(model, var, rev) \
516 ERRATA_MIDR_RANGE(model, var, rev, var, rev)
518 /* Errata affecting all variants/revisions of a given a model */
519 #define ERRATA_MIDR_ALL_VERSIONS(model) \
520 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
521 CAP_MIDR_ALL_VERSIONS(model)
523 /* Errata affecting a list of midr ranges, with same work around */
524 #define ERRATA_MIDR_RANGE_LIST(midr_list) \
525 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
526 CAP_MIDR_RANGE_LIST(midr_list)
528 /* Track overall mitigation state. We are only mitigated if all cores are ok */
529 static bool __hardenbp_enab = true;
530 static bool __spectrev2_safe = true;
533 * Generic helper for handling capabilties with multiple (match,enable) pairs
534 * of call backs, sharing the same capability bit.
535 * Iterate over each entry to see if at least one matches.
537 static bool __maybe_unused
538 multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, int scope)
540 const struct arm64_cpu_capabilities *caps;
542 for (caps = entry->match_list; caps->matches; caps++)
543 if (caps->matches(caps, scope))
550 * Take appropriate action for all matching entries in the shared capability
553 static void __maybe_unused
554 multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
556 const struct arm64_cpu_capabilities *caps;
558 for (caps = entry->match_list; caps->matches; caps++)
559 if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
561 caps->cpu_enable(caps);
565 * List of CPUs that do not need any Spectre-v2 mitigation at all.
567 static const struct midr_range spectre_v2_safe_list[] = {
568 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
569 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
570 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
575 * Track overall bp hardening for all heterogeneous cores in the machine.
576 * We are only considered "safe" if all booted cores are known safe.
578 static bool __maybe_unused
579 check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
583 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
585 /* If the CPU has CSV2 set, we're safe */
586 if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
587 ID_AA64PFR0_CSV2_SHIFT))
590 /* Alternatively, we have a list of unaffected CPUs */
591 if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
594 /* Fallback to firmware detection */
595 need_wa = detect_harden_bp_fw();
599 __spectrev2_safe = false;
601 if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
602 pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
603 __hardenbp_enab = false;
608 if (__nospectre_v2 || cpu_mitigations_off()) {
609 pr_info_once("spectrev2 mitigation disabled by command line option\n");
610 __hardenbp_enab = false;
615 pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
616 __hardenbp_enab = false;
619 return (need_wa > 0);
623 cpu_enable_branch_predictor_hardening(const struct arm64_cpu_capabilities *cap)
625 cap->matches(cap, SCOPE_LOCAL_CPU);
628 static const __maybe_unused struct midr_range tx2_family_cpus[] = {
629 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
630 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
634 static bool __maybe_unused
635 needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
640 if (!is_affected_midr_range_list(entry, scope) ||
641 !is_hyp_mode_available())
644 for_each_possible_cpu(i) {
645 if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
652 static bool __maybe_unused
653 has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
656 u32 midr = read_cpuid_id();
657 bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT);
658 const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
660 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
661 return is_midr_in_range(midr, &range) && has_dic;
664 #ifdef CONFIG_HARDEN_EL2_VECTORS
666 static const struct midr_range arm64_harden_el2_vectors[] = {
667 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
668 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
674 const struct arm64_cpu_capabilities arm64_errata[] = {
675 #if defined(CONFIG_ARM64_ERRATUM_826319) || \
676 defined(CONFIG_ARM64_ERRATUM_827319) || \
677 defined(CONFIG_ARM64_ERRATUM_824069)
679 /* Cortex-A53 r0p[012] */
680 .desc = "ARM errata 826319, 827319, 824069",
681 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
682 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
683 .cpu_enable = cpu_enable_cache_maint_trap,
686 #ifdef CONFIG_ARM64_ERRATUM_819472
688 /* Cortex-A53 r0p[01] */
689 .desc = "ARM errata 819472",
690 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
691 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
692 .cpu_enable = cpu_enable_cache_maint_trap,
695 #ifdef CONFIG_ARM64_ERRATUM_832075
697 /* Cortex-A57 r0p0 - r1p2 */
698 .desc = "ARM erratum 832075",
699 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
700 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
705 #ifdef CONFIG_ARM64_ERRATUM_834220
707 /* Cortex-A57 r0p0 - r1p2 */
708 .desc = "ARM erratum 834220",
709 .capability = ARM64_WORKAROUND_834220,
710 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
715 #ifdef CONFIG_ARM64_ERRATUM_843419
717 /* Cortex-A53 r0p[01234] */
718 .desc = "ARM erratum 843419",
719 .capability = ARM64_WORKAROUND_843419,
720 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
721 MIDR_FIXED(0x4, BIT(8)),
724 #ifdef CONFIG_ARM64_ERRATUM_845719
726 /* Cortex-A53 r0p[01234] */
727 .desc = "ARM erratum 845719",
728 .capability = ARM64_WORKAROUND_845719,
729 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
732 #ifdef CONFIG_CAVIUM_ERRATUM_23154
734 /* Cavium ThunderX, pass 1.x */
735 .desc = "Cavium erratum 23154",
736 .capability = ARM64_WORKAROUND_CAVIUM_23154,
737 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
740 #ifdef CONFIG_CAVIUM_ERRATUM_27456
742 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
743 .desc = "Cavium erratum 27456",
744 .capability = ARM64_WORKAROUND_CAVIUM_27456,
745 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
750 /* Cavium ThunderX, T81 pass 1.0 */
751 .desc = "Cavium erratum 27456",
752 .capability = ARM64_WORKAROUND_CAVIUM_27456,
753 ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
756 #ifdef CONFIG_CAVIUM_ERRATUM_30115
758 /* Cavium ThunderX, T88 pass 1.x - 2.2 */
759 .desc = "Cavium erratum 30115",
760 .capability = ARM64_WORKAROUND_CAVIUM_30115,
761 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
766 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
767 .desc = "Cavium erratum 30115",
768 .capability = ARM64_WORKAROUND_CAVIUM_30115,
769 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
772 /* Cavium ThunderX, T83 pass 1.0 */
773 .desc = "Cavium erratum 30115",
774 .capability = ARM64_WORKAROUND_CAVIUM_30115,
775 ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
779 .desc = "Mismatched cache line size",
780 .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
781 .matches = has_mismatched_cache_type,
782 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
783 .cpu_enable = cpu_enable_trap_ctr_access,
786 .desc = "Mismatched cache type",
787 .capability = ARM64_MISMATCHED_CACHE_TYPE,
788 .matches = has_mismatched_cache_type,
789 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
790 .cpu_enable = cpu_enable_trap_ctr_access,
792 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
794 .desc = "Qualcomm Technologies Falkor erratum 1003",
795 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
796 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
799 .desc = "Qualcomm Technologies Kryo erratum 1003",
800 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
801 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
802 .midr_range.model = MIDR_QCOM_KRYO,
803 .matches = is_kryo_midr,
806 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
808 .desc = "Qualcomm Technologies Falkor erratum 1009",
809 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
810 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
813 #ifdef CONFIG_ARM64_ERRATUM_858921
815 /* Cortex-A73 all versions */
816 .desc = "ARM erratum 858921",
817 .capability = ARM64_WORKAROUND_858921,
818 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
822 .desc = "Branch predictor hardening",
823 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
824 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
825 .matches = check_branch_predictor,
826 .cpu_enable = cpu_enable_branch_predictor_hardening,
828 #ifdef CONFIG_HARDEN_EL2_VECTORS
830 .desc = "EL2 vector hardening",
831 .capability = ARM64_HARDEN_EL2_VECTORS,
832 ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
836 .desc = "Speculative Store Bypass Disable",
837 .capability = ARM64_SSBD,
838 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
839 .matches = has_ssbd_mitigation,
840 .midr_range_list = arm64_ssb_cpus,
842 #ifdef CONFIG_ARM64_ERRATUM_1463225
844 .desc = "ARM erratum 1463225",
845 .capability = ARM64_WORKAROUND_1463225,
846 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
847 .matches = has_cortex_a76_erratum_1463225,
850 #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
852 .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
853 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
854 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
855 .matches = needs_tx2_tvm_workaround,
858 #ifdef CONFIG_ARM64_ERRATUM_1542419
860 /* we depend on the firmware portion for correctness */
861 .desc = "ARM erratum 1542419 (kernel portion)",
862 .capability = ARM64_WORKAROUND_1542419,
863 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
864 .matches = has_neoverse_n1_erratum_1542419,
865 .cpu_enable = cpu_enable_trap_ctr_access,
872 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
875 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
878 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
881 if (__spectrev2_safe)
882 return sprintf(buf, "Not affected\n");
885 return sprintf(buf, "Mitigation: Branch predictor hardening\n");
887 return sprintf(buf, "Vulnerable\n");
890 ssize_t cpu_show_spec_store_bypass(struct device *dev,
891 struct device_attribute *attr, char *buf)
894 return sprintf(buf, "Not affected\n");
896 switch (ssbd_state) {
897 case ARM64_SSBD_KERNEL:
898 case ARM64_SSBD_FORCE_ENABLE:
899 if (IS_ENABLED(CONFIG_ARM64_SSBD))
901 "Mitigation: Speculative Store Bypass disabled via prctl\n");
904 return sprintf(buf, "Vulnerable\n");