2 * Contains CPU specific errata definitions
4 * Copyright (C) 2014 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/arm-smccc.h>
20 #include <linux/psci.h>
21 #include <linux/types.h>
22 #include <linux/cpu.h>
24 #include <asm/cputype.h>
25 #include <asm/cpufeature.h>
27 static bool __maybe_unused
28 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
30 u32 midr = read_cpuid_id();
32 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
33 return is_midr_in_range(midr, &entry->midr_range);
36 static bool __maybe_unused
37 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
40 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
41 return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
44 static bool __maybe_unused
45 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
49 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
51 model = read_cpuid_id();
52 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
53 MIDR_ARCHITECTURE_MASK;
55 return model == entry->midr_range.model;
59 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
62 u64 mask = CTR_CACHE_MINLINE_MASK;
64 /* Skip matching the min line sizes for cache type check */
65 if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
66 mask ^= arm64_ftr_reg_ctrel0.strict_mask;
68 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
69 return (read_cpuid_cachetype() & mask) !=
70 (arm64_ftr_reg_ctrel0.sys_val & mask);
74 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
76 /* Clear SCTLR_EL1.UCT */
77 config_sctlr_el1(SCTLR_EL1_UCT, 0);
80 #include <asm/mmu_context.h>
81 #include <asm/cacheflush.h>
83 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
86 extern char __smccc_workaround_1_smc_start[];
87 extern char __smccc_workaround_1_smc_end[];
89 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
90 const char *hyp_vecs_end)
92 void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
95 for (i = 0; i < SZ_2K; i += 0x80)
96 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
98 flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
101 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
102 const char *hyp_vecs_start,
103 const char *hyp_vecs_end)
105 static int last_slot = -1;
106 static DEFINE_SPINLOCK(bp_lock);
110 for_each_possible_cpu(cpu) {
111 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
112 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
119 BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
120 / SZ_2K) <= last_slot);
122 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
125 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
126 __this_cpu_write(bp_hardening_data.fn, fn);
127 spin_unlock(&bp_lock);
130 #define __smccc_workaround_1_smc_start NULL
131 #define __smccc_workaround_1_smc_end NULL
133 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
134 const char *hyp_vecs_start,
135 const char *hyp_vecs_end)
137 __this_cpu_write(bp_hardening_data.fn, fn);
139 #endif /* CONFIG_KVM */
141 #include <uapi/linux/psci.h>
142 #include <linux/arm-smccc.h>
143 #include <linux/psci.h>
145 static void call_smc_arch_workaround_1(void)
147 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
150 static void call_hvc_arch_workaround_1(void)
152 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
155 static void qcom_link_stack_sanitization(void)
159 asm volatile("mov %0, x30 \n"
167 static bool __nospectre_v2;
168 static int __init parse_nospectre_v2(char *str)
170 __nospectre_v2 = true;
173 early_param("nospectre_v2", parse_nospectre_v2);
177 * 0: No workaround required
178 * 1: Workaround installed
180 static int detect_harden_bp_fw(void)
182 bp_hardening_cb_t cb;
183 void *smccc_start, *smccc_end;
184 struct arm_smccc_res res;
185 u32 midr = read_cpuid_id();
187 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
190 switch (psci_ops.conduit) {
191 case PSCI_CONDUIT_HVC:
192 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
193 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
194 switch ((int)res.a0) {
196 /* Firmware says we're just fine */
199 cb = call_hvc_arch_workaround_1;
200 /* This is a guest, no need to patch KVM vectors */
209 case PSCI_CONDUIT_SMC:
210 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
211 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
212 switch ((int)res.a0) {
214 /* Firmware says we're just fine */
217 cb = call_smc_arch_workaround_1;
218 smccc_start = __smccc_workaround_1_smc_start;
219 smccc_end = __smccc_workaround_1_smc_end;
230 if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
231 ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
232 cb = qcom_link_stack_sanitization;
234 if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
235 install_bp_hardening_cb(cb, smccc_start, smccc_end);
240 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
242 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
243 static bool __ssb_safe = true;
245 static const struct ssbd_options {
249 { "force-on", ARM64_SSBD_FORCE_ENABLE, },
250 { "force-off", ARM64_SSBD_FORCE_DISABLE, },
251 { "kernel", ARM64_SSBD_KERNEL, },
254 static int __init ssbd_cfg(char *buf)
261 for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
262 int len = strlen(ssbd_options[i].str);
264 if (strncmp(buf, ssbd_options[i].str, len))
267 ssbd_state = ssbd_options[i].state;
273 early_param("ssbd", ssbd_cfg);
275 void __init arm64_update_smccc_conduit(struct alt_instr *alt,
276 __le32 *origptr, __le32 *updptr,
281 BUG_ON(nr_inst != 1);
283 switch (psci_ops.conduit) {
284 case PSCI_CONDUIT_HVC:
285 insn = aarch64_insn_get_hvc_value();
287 case PSCI_CONDUIT_SMC:
288 insn = aarch64_insn_get_smc_value();
294 *updptr = cpu_to_le32(insn);
297 void __init arm64_enable_wa2_handling(struct alt_instr *alt,
298 __le32 *origptr, __le32 *updptr,
301 BUG_ON(nr_inst != 1);
303 * Only allow mitigation on EL1 entry/exit and guest
304 * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
307 if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
308 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
311 void arm64_set_ssbd_mitigation(bool state)
313 if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
314 pr_info_once("SSBD disabled by kernel configuration\n");
318 if (this_cpu_has_cap(ARM64_SSBS)) {
320 asm volatile(SET_PSTATE_SSBS(0));
322 asm volatile(SET_PSTATE_SSBS(1));
326 switch (psci_ops.conduit) {
327 case PSCI_CONDUIT_HVC:
328 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
331 case PSCI_CONDUIT_SMC:
332 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
341 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
344 struct arm_smccc_res res;
345 bool required = true;
347 bool this_cpu_safe = false;
349 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
351 if (cpu_mitigations_off())
352 ssbd_state = ARM64_SSBD_FORCE_DISABLE;
354 /* delay setting __ssb_safe until we get a firmware response */
355 if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
356 this_cpu_safe = true;
358 if (this_cpu_has_cap(ARM64_SSBS)) {
365 if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
366 ssbd_state = ARM64_SSBD_UNKNOWN;
372 switch (psci_ops.conduit) {
373 case PSCI_CONDUIT_HVC:
374 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
375 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
378 case PSCI_CONDUIT_SMC:
379 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
380 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
384 ssbd_state = ARM64_SSBD_UNKNOWN;
393 case SMCCC_RET_NOT_SUPPORTED:
394 ssbd_state = ARM64_SSBD_UNKNOWN;
399 /* machines with mixed mitigation requirements must not return this */
400 case SMCCC_RET_NOT_REQUIRED:
401 pr_info_once("%s mitigation not required\n", entry->desc);
402 ssbd_state = ARM64_SSBD_MITIGATED;
405 case SMCCC_RET_SUCCESS:
410 case 1: /* Mitigation not required on this CPU */
421 switch (ssbd_state) {
422 case ARM64_SSBD_FORCE_DISABLE:
423 arm64_set_ssbd_mitigation(false);
427 case ARM64_SSBD_KERNEL:
429 __this_cpu_write(arm64_ssbd_callback_required, 1);
430 arm64_set_ssbd_mitigation(true);
434 case ARM64_SSBD_FORCE_ENABLE:
435 arm64_set_ssbd_mitigation(true);
445 switch (ssbd_state) {
446 case ARM64_SSBD_FORCE_DISABLE:
447 pr_info_once("%s disabled from command-line\n", entry->desc);
450 case ARM64_SSBD_FORCE_ENABLE:
451 pr_info_once("%s forced from command-line\n", entry->desc);
458 /* known invulnerable cores */
459 static const struct midr_range arm64_ssb_cpus[] = {
460 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
461 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
462 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
466 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
467 .matches = is_affected_midr_range, \
468 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
470 #define CAP_MIDR_ALL_VERSIONS(model) \
471 .matches = is_affected_midr_range, \
472 .midr_range = MIDR_ALL_VERSIONS(model)
474 #define MIDR_FIXED(rev, revidr_mask) \
475 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
477 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
478 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
479 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
481 #define CAP_MIDR_RANGE_LIST(list) \
482 .matches = is_affected_midr_range_list, \
483 .midr_range_list = list
485 /* Errata affecting a range of revisions of given model variant */
486 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
487 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
489 /* Errata affecting a single variant/revision of a model */
490 #define ERRATA_MIDR_REV(model, var, rev) \
491 ERRATA_MIDR_RANGE(model, var, rev, var, rev)
493 /* Errata affecting all variants/revisions of a given a model */
494 #define ERRATA_MIDR_ALL_VERSIONS(model) \
495 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
496 CAP_MIDR_ALL_VERSIONS(model)
498 /* Errata affecting a list of midr ranges, with same work around */
499 #define ERRATA_MIDR_RANGE_LIST(midr_list) \
500 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
501 CAP_MIDR_RANGE_LIST(midr_list)
503 /* Track overall mitigation state. We are only mitigated if all cores are ok */
504 static bool __hardenbp_enab = true;
505 static bool __spectrev2_safe = true;
508 * List of CPUs that do not need any Spectre-v2 mitigation at all.
510 static const struct midr_range spectre_v2_safe_list[] = {
511 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
512 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
513 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
518 * Track overall bp hardening for all heterogeneous cores in the machine.
519 * We are only considered "safe" if all booted cores are known safe.
521 static bool __maybe_unused
522 check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
526 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
528 /* If the CPU has CSV2 set, we're safe */
529 if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
530 ID_AA64PFR0_CSV2_SHIFT))
533 /* Alternatively, we have a list of unaffected CPUs */
534 if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
537 /* Fallback to firmware detection */
538 need_wa = detect_harden_bp_fw();
542 __spectrev2_safe = false;
544 if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
545 pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
546 __hardenbp_enab = false;
551 if (__nospectre_v2 || cpu_mitigations_off()) {
552 pr_info_once("spectrev2 mitigation disabled by command line option\n");
553 __hardenbp_enab = false;
558 pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
559 __hardenbp_enab = false;
562 return (need_wa > 0);
565 const struct arm64_cpu_capabilities arm64_errata[] = {
566 #if defined(CONFIG_ARM64_ERRATUM_826319) || \
567 defined(CONFIG_ARM64_ERRATUM_827319) || \
568 defined(CONFIG_ARM64_ERRATUM_824069)
570 /* Cortex-A53 r0p[012] */
571 .desc = "ARM errata 826319, 827319, 824069",
572 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
573 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
574 .cpu_enable = cpu_enable_cache_maint_trap,
577 #ifdef CONFIG_ARM64_ERRATUM_819472
579 /* Cortex-A53 r0p[01] */
580 .desc = "ARM errata 819472",
581 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
582 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
583 .cpu_enable = cpu_enable_cache_maint_trap,
586 #ifdef CONFIG_ARM64_ERRATUM_832075
588 /* Cortex-A57 r0p0 - r1p2 */
589 .desc = "ARM erratum 832075",
590 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
591 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
596 #ifdef CONFIG_ARM64_ERRATUM_834220
598 /* Cortex-A57 r0p0 - r1p2 */
599 .desc = "ARM erratum 834220",
600 .capability = ARM64_WORKAROUND_834220,
601 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
606 #ifdef CONFIG_ARM64_ERRATUM_845719
608 /* Cortex-A53 r0p[01234] */
609 .desc = "ARM erratum 845719",
610 .capability = ARM64_WORKAROUND_845719,
611 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
614 #ifdef CONFIG_CAVIUM_ERRATUM_23154
616 /* Cavium ThunderX, pass 1.x */
617 .desc = "Cavium erratum 23154",
618 .capability = ARM64_WORKAROUND_CAVIUM_23154,
619 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
622 #ifdef CONFIG_CAVIUM_ERRATUM_27456
624 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
625 .desc = "Cavium erratum 27456",
626 .capability = ARM64_WORKAROUND_CAVIUM_27456,
627 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
632 /* Cavium ThunderX, T81 pass 1.0 */
633 .desc = "Cavium erratum 27456",
634 .capability = ARM64_WORKAROUND_CAVIUM_27456,
635 ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
638 #ifdef CONFIG_CAVIUM_ERRATUM_30115
640 /* Cavium ThunderX, T88 pass 1.x - 2.2 */
641 .desc = "Cavium erratum 30115",
642 .capability = ARM64_WORKAROUND_CAVIUM_30115,
643 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
648 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
649 .desc = "Cavium erratum 30115",
650 .capability = ARM64_WORKAROUND_CAVIUM_30115,
651 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
654 /* Cavium ThunderX, T83 pass 1.0 */
655 .desc = "Cavium erratum 30115",
656 .capability = ARM64_WORKAROUND_CAVIUM_30115,
657 ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
661 .desc = "Mismatched cache line size",
662 .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
663 .matches = has_mismatched_cache_type,
664 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
665 .cpu_enable = cpu_enable_trap_ctr_access,
668 .desc = "Mismatched cache type",
669 .capability = ARM64_MISMATCHED_CACHE_TYPE,
670 .matches = has_mismatched_cache_type,
671 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
672 .cpu_enable = cpu_enable_trap_ctr_access,
674 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
676 .desc = "Qualcomm Technologies Falkor erratum 1003",
677 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
678 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
681 .desc = "Qualcomm Technologies Kryo erratum 1003",
682 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
683 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
684 .midr_range.model = MIDR_QCOM_KRYO,
685 .matches = is_kryo_midr,
688 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
690 .desc = "Qualcomm Technologies Falkor erratum 1009",
691 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
692 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
695 #ifdef CONFIG_ARM64_ERRATUM_858921
697 /* Cortex-A73 all versions */
698 .desc = "ARM erratum 858921",
699 .capability = ARM64_WORKAROUND_858921,
700 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
704 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
705 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
706 .matches = check_branch_predictor,
709 .desc = "Speculative Store Bypass Disable",
710 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
711 .capability = ARM64_SSBD,
712 .matches = has_ssbd_mitigation,
713 .midr_range_list = arm64_ssb_cpus,
719 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
722 return sprintf(buf, "Mitigation: __user pointer sanitization\n");
725 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
728 if (__spectrev2_safe)
729 return sprintf(buf, "Not affected\n");
732 return sprintf(buf, "Mitigation: Branch predictor hardening\n");
734 return sprintf(buf, "Vulnerable\n");
737 ssize_t cpu_show_spec_store_bypass(struct device *dev,
738 struct device_attribute *attr, char *buf)
741 return sprintf(buf, "Not affected\n");
743 switch (ssbd_state) {
744 case ARM64_SSBD_KERNEL:
745 case ARM64_SSBD_FORCE_ENABLE:
746 if (IS_ENABLED(CONFIG_ARM64_SSBD))
748 "Mitigation: Speculative Store Bypass disabled via prctl\n");
751 return sprintf(buf, "Vulnerable\n");