1 // SPDX-License-Identifier: GPL-2.0-only
3 * Contains CPU specific errata definitions
5 * Copyright (C) 2014 ARM Ltd.
8 #include <linux/arm-smccc.h>
9 #include <linux/types.h>
10 #include <linux/cpu.h>
12 #include <asm/cputype.h>
13 #include <asm/cpufeature.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/smp_plat.h>
17 static bool __maybe_unused
18 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
20 const struct arm64_midr_revidr *fix;
21 u32 midr = read_cpuid_id(), revidr;
23 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
24 if (!is_midr_in_range(midr, &entry->midr_range))
27 midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
28 revidr = read_cpuid(REVIDR_EL1);
29 for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
30 if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
36 static bool __maybe_unused
37 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
40 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
41 return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
44 static bool __maybe_unused
45 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
49 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
51 model = read_cpuid_id();
52 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
53 MIDR_ARCHITECTURE_MASK;
55 return model == entry->midr_range.model;
59 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
62 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
63 u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
64 u64 ctr_raw, ctr_real;
66 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
69 * We want to make sure that all the CPUs in the system expose
70 * a consistent CTR_EL0 to make sure that applications behaves
71 * correctly with migration.
73 * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
75 * 1) It is safe if the system doesn't support IDC, as CPU anyway
76 * reports IDC = 0, consistent with the rest.
78 * 2) If the system has IDC, it is still safe as we trap CTR_EL0
79 * access on this CPU via the ARM64_HAS_CACHE_IDC capability.
81 * So, we need to make sure either the raw CTR_EL0 or the effective
82 * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
84 ctr_raw = read_cpuid_cachetype() & mask;
85 ctr_real = read_cpuid_effective_cachetype() & mask;
87 return (ctr_real != sys) && (ctr_raw != sys);
91 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
93 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
94 bool enable_uct_trap = false;
96 /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
97 if ((read_cpuid_cachetype() & mask) !=
98 (arm64_ftr_reg_ctrel0.sys_val & mask))
99 enable_uct_trap = true;
101 /* ... or if the system is affected by an erratum */
102 if (cap->capability == ARM64_WORKAROUND_1542419)
103 enable_uct_trap = true;
106 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
109 #ifdef CONFIG_ARM64_ERRATUM_1463225
111 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
114 return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode();
118 static void __maybe_unused
119 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
121 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
124 static DEFINE_RAW_SPINLOCK(reg_user_mask_modification);
125 static void __maybe_unused
126 cpu_clear_bf16_from_user_emulation(const struct arm64_cpu_capabilities *__unused)
128 struct arm64_ftr_reg *regp;
130 regp = get_arm64_ftr_reg(SYS_ID_AA64ISAR1_EL1);
134 raw_spin_lock(®_user_mask_modification);
135 if (regp->user_mask & ID_AA64ISAR1_EL1_BF16_MASK)
136 regp->user_mask &= ~ID_AA64ISAR1_EL1_BF16_MASK;
137 raw_spin_unlock(®_user_mask_modification);
140 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
141 .matches = is_affected_midr_range, \
142 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
144 #define CAP_MIDR_ALL_VERSIONS(model) \
145 .matches = is_affected_midr_range, \
146 .midr_range = MIDR_ALL_VERSIONS(model)
148 #define MIDR_FIXED(rev, revidr_mask) \
149 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
151 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
152 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
153 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
155 #define CAP_MIDR_RANGE_LIST(list) \
156 .matches = is_affected_midr_range_list, \
157 .midr_range_list = list
159 /* Errata affecting a range of revisions of given model variant */
160 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
161 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
163 /* Errata affecting a single variant/revision of a model */
164 #define ERRATA_MIDR_REV(model, var, rev) \
165 ERRATA_MIDR_RANGE(model, var, rev, var, rev)
167 /* Errata affecting all variants/revisions of a given a model */
168 #define ERRATA_MIDR_ALL_VERSIONS(model) \
169 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
170 CAP_MIDR_ALL_VERSIONS(model)
172 /* Errata affecting a list of midr ranges, with same work around */
173 #define ERRATA_MIDR_RANGE_LIST(midr_list) \
174 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
175 CAP_MIDR_RANGE_LIST(midr_list)
177 static const __maybe_unused struct midr_range tx2_family_cpus[] = {
178 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
179 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
183 static bool __maybe_unused
184 needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
189 if (!is_affected_midr_range_list(entry, scope) ||
190 !is_hyp_mode_available())
193 for_each_possible_cpu(i) {
194 if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
201 static bool __maybe_unused
202 has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
205 u32 midr = read_cpuid_id();
206 bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT);
207 const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
209 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
210 return is_midr_in_range(midr, &range) && has_dic;
213 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
214 static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
215 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
217 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
220 .midr_range.model = MIDR_QCOM_KRYO,
221 .matches = is_kryo_midr,
224 #ifdef CONFIG_ARM64_ERRATUM_1286807
226 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
229 /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
230 ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
233 #ifdef CONFIG_ARM64_ERRATUM_2441007
235 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
238 #ifdef CONFIG_ARM64_ERRATUM_2441009
240 /* Cortex-A510 r0p0 -> r1p1. Fixed in r1p2 */
241 ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
248 #ifdef CONFIG_CAVIUM_ERRATUM_23154
249 static const struct midr_range cavium_erratum_23154_cpus[] = {
250 MIDR_ALL_VERSIONS(MIDR_THUNDERX),
251 MIDR_ALL_VERSIONS(MIDR_THUNDERX_81XX),
252 MIDR_ALL_VERSIONS(MIDR_THUNDERX_83XX),
253 MIDR_ALL_VERSIONS(MIDR_OCTX2_98XX),
254 MIDR_ALL_VERSIONS(MIDR_OCTX2_96XX),
255 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XX),
256 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXN),
257 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXMM),
258 MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXO),
263 #ifdef CONFIG_CAVIUM_ERRATUM_27456
264 const struct midr_range cavium_erratum_27456_cpus[] = {
265 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
266 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
267 /* Cavium ThunderX, T81 pass 1.0 */
268 MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
273 #ifdef CONFIG_CAVIUM_ERRATUM_30115
274 static const struct midr_range cavium_erratum_30115_cpus[] = {
275 /* Cavium ThunderX, T88 pass 1.x - 2.2 */
276 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
277 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
278 MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
279 /* Cavium ThunderX, T83 pass 1.0 */
280 MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
285 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
286 static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
288 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
291 .midr_range.model = MIDR_QCOM_KRYO,
292 .matches = is_kryo_midr,
298 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
299 static const struct midr_range workaround_clean_cache[] = {
300 #if defined(CONFIG_ARM64_ERRATUM_826319) || \
301 defined(CONFIG_ARM64_ERRATUM_827319) || \
302 defined(CONFIG_ARM64_ERRATUM_824069)
303 /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
304 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
306 #ifdef CONFIG_ARM64_ERRATUM_819472
307 /* Cortex-A53 r0p[01] : ARM errata 819472 */
308 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
314 #ifdef CONFIG_ARM64_ERRATUM_1418040
316 * - 1188873 affects r0p0 to r2p0
317 * - 1418040 affects r0p0 to r3p1
319 static const struct midr_range erratum_1418040_list[] = {
320 /* Cortex-A76 r0p0 to r3p1 */
321 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
322 /* Neoverse-N1 r0p0 to r3p1 */
323 MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
324 /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
325 MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
330 #ifdef CONFIG_ARM64_ERRATUM_845719
331 static const struct midr_range erratum_845719_list[] = {
332 /* Cortex-A53 r0p[01234] */
333 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
334 /* Brahma-B53 r0p[0] */
335 MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
336 /* Kryo2XX Silver rAp4 */
337 MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4),
342 #ifdef CONFIG_ARM64_ERRATUM_843419
343 static const struct arm64_cpu_capabilities erratum_843419_list[] = {
345 /* Cortex-A53 r0p[01234] */
346 .matches = is_affected_midr_range,
347 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
348 MIDR_FIXED(0x4, BIT(8)),
351 /* Brahma-B53 r0p[0] */
352 .matches = is_affected_midr_range,
353 ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
359 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
360 static const struct midr_range erratum_speculative_at_list[] = {
361 #ifdef CONFIG_ARM64_ERRATUM_1165522
362 /* Cortex A76 r0p0 to r2p0 */
363 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
365 #ifdef CONFIG_ARM64_ERRATUM_1319367
366 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
367 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
369 #ifdef CONFIG_ARM64_ERRATUM_1530923
370 /* Cortex A55 r0p0 to r2p0 */
371 MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
372 /* Kryo4xx Silver (rdpe => r1p0) */
373 MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
379 #ifdef CONFIG_ARM64_ERRATUM_1463225
380 static const struct midr_range erratum_1463225[] = {
381 /* Cortex-A76 r0p0 - r3p1 */
382 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
383 /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
384 MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
389 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
390 static const struct midr_range trbe_overwrite_fill_mode_cpus[] = {
391 #ifdef CONFIG_ARM64_ERRATUM_2139208
392 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
393 MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
395 #ifdef CONFIG_ARM64_ERRATUM_2119858
396 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
397 MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
401 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE */
403 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
404 static const struct midr_range tsb_flush_fail_cpus[] = {
405 #ifdef CONFIG_ARM64_ERRATUM_2067961
406 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
407 MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
409 #ifdef CONFIG_ARM64_ERRATUM_2054223
410 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
414 #endif /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */
416 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
417 static struct midr_range trbe_write_out_of_range_cpus[] = {
418 #ifdef CONFIG_ARM64_ERRATUM_2253138
419 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
420 MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
422 #ifdef CONFIG_ARM64_ERRATUM_2224489
423 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
424 MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
428 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
430 #ifdef CONFIG_ARM64_ERRATUM_1742098
431 static struct midr_range broken_aarch32_aes[] = {
432 MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
433 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
436 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
438 const struct arm64_cpu_capabilities arm64_errata[] = {
439 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
441 .desc = "ARM errata 826319, 827319, 824069, or 819472",
442 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
443 ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
444 .cpu_enable = cpu_enable_cache_maint_trap,
447 #ifdef CONFIG_ARM64_ERRATUM_832075
449 /* Cortex-A57 r0p0 - r1p2 */
450 .desc = "ARM erratum 832075",
451 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
452 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
457 #ifdef CONFIG_ARM64_ERRATUM_834220
459 /* Cortex-A57 r0p0 - r1p2 */
460 .desc = "ARM erratum 834220",
461 .capability = ARM64_WORKAROUND_834220,
462 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
467 #ifdef CONFIG_ARM64_ERRATUM_843419
469 .desc = "ARM erratum 843419",
470 .capability = ARM64_WORKAROUND_843419,
471 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
472 .matches = cpucap_multi_entry_cap_matches,
473 .match_list = erratum_843419_list,
476 #ifdef CONFIG_ARM64_ERRATUM_845719
478 .desc = "ARM erratum 845719",
479 .capability = ARM64_WORKAROUND_845719,
480 ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
483 #ifdef CONFIG_CAVIUM_ERRATUM_23154
485 .desc = "Cavium errata 23154 and 38545",
486 .capability = ARM64_WORKAROUND_CAVIUM_23154,
487 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
488 ERRATA_MIDR_RANGE_LIST(cavium_erratum_23154_cpus),
491 #ifdef CONFIG_CAVIUM_ERRATUM_27456
493 .desc = "Cavium erratum 27456",
494 .capability = ARM64_WORKAROUND_CAVIUM_27456,
495 ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
498 #ifdef CONFIG_CAVIUM_ERRATUM_30115
500 .desc = "Cavium erratum 30115",
501 .capability = ARM64_WORKAROUND_CAVIUM_30115,
502 ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
506 .desc = "Mismatched cache type (CTR_EL0)",
507 .capability = ARM64_MISMATCHED_CACHE_TYPE,
508 .matches = has_mismatched_cache_type,
509 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
510 .cpu_enable = cpu_enable_trap_ctr_access,
512 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
514 .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
515 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
516 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
517 .matches = cpucap_multi_entry_cap_matches,
518 .match_list = qcom_erratum_1003_list,
521 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
523 .desc = "Qualcomm erratum 1009, or ARM erratum 1286807, 2441009",
524 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
525 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
526 .matches = cpucap_multi_entry_cap_matches,
527 .match_list = arm64_repeat_tlbi_list,
530 #ifdef CONFIG_ARM64_ERRATUM_858921
532 /* Cortex-A73 all versions */
533 .desc = "ARM erratum 858921",
534 .capability = ARM64_WORKAROUND_858921,
535 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
539 .desc = "Spectre-v2",
540 .capability = ARM64_SPECTRE_V2,
541 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
542 .matches = has_spectre_v2,
543 .cpu_enable = spectre_v2_enable_mitigation,
545 #ifdef CONFIG_RANDOMIZE_BASE
547 /* Must come after the Spectre-v2 entry */
548 .desc = "Spectre-v3a",
549 .capability = ARM64_SPECTRE_V3A,
550 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
551 .matches = has_spectre_v3a,
552 .cpu_enable = spectre_v3a_enable_mitigation,
556 .desc = "Spectre-v4",
557 .capability = ARM64_SPECTRE_V4,
558 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
559 .matches = has_spectre_v4,
560 .cpu_enable = spectre_v4_enable_mitigation,
563 .desc = "Spectre-BHB",
564 .capability = ARM64_SPECTRE_BHB,
565 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
566 .matches = is_spectre_bhb_affected,
567 .cpu_enable = spectre_bhb_enable_mitigation,
569 #ifdef CONFIG_ARM64_ERRATUM_1418040
571 .desc = "ARM erratum 1418040",
572 .capability = ARM64_WORKAROUND_1418040,
573 ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
575 * We need to allow affected CPUs to come in late, but
576 * also need the non-affected CPUs to be able to come
577 * in at any point in time. Wonderful.
579 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
582 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
584 .desc = "ARM errata 1165522, 1319367, or 1530923",
585 .capability = ARM64_WORKAROUND_SPECULATIVE_AT,
586 ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list),
589 #ifdef CONFIG_ARM64_ERRATUM_1463225
591 .desc = "ARM erratum 1463225",
592 .capability = ARM64_WORKAROUND_1463225,
593 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
594 .matches = has_cortex_a76_erratum_1463225,
595 .midr_range_list = erratum_1463225,
598 #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
600 .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
601 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
602 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
603 .matches = needs_tx2_tvm_workaround,
606 .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
607 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
608 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
611 #ifdef CONFIG_ARM64_ERRATUM_1542419
613 /* we depend on the firmware portion for correctness */
614 .desc = "ARM erratum 1542419 (kernel portion)",
615 .capability = ARM64_WORKAROUND_1542419,
616 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
617 .matches = has_neoverse_n1_erratum_1542419,
618 .cpu_enable = cpu_enable_trap_ctr_access,
621 #ifdef CONFIG_ARM64_ERRATUM_1508412
623 /* we depend on the firmware portion for correctness */
624 .desc = "ARM erratum 1508412 (kernel portion)",
625 .capability = ARM64_WORKAROUND_1508412,
626 ERRATA_MIDR_RANGE(MIDR_CORTEX_A77,
631 #ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM
634 .desc = "NVIDIA Carmel CNP erratum",
635 .capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP,
636 ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
639 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
642 * The erratum work around is handled within the TRBE
643 * driver and can be applied per-cpu. So, we can allow
644 * a late CPU to come online with this erratum.
646 .desc = "ARM erratum 2119858 or 2139208",
647 .capability = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE,
648 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
649 CAP_MIDR_RANGE_LIST(trbe_overwrite_fill_mode_cpus),
652 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
654 .desc = "ARM erratum 2067961 or 2054223",
655 .capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE,
656 ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus),
659 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
661 .desc = "ARM erratum 2253138 or 2224489",
662 .capability = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE,
663 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
664 CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
667 #ifdef CONFIG_ARM64_ERRATUM_2077057
669 .desc = "ARM erratum 2077057",
670 .capability = ARM64_WORKAROUND_2077057,
671 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
674 #ifdef CONFIG_ARM64_ERRATUM_2064142
676 .desc = "ARM erratum 2064142",
677 .capability = ARM64_WORKAROUND_2064142,
679 /* Cortex-A510 r0p0 - r0p2 */
680 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
683 #ifdef CONFIG_ARM64_ERRATUM_2457168
685 .desc = "ARM erratum 2457168",
686 .capability = ARM64_WORKAROUND_2457168,
687 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
689 /* Cortex-A510 r0p0-r1p1 */
690 CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
693 #ifdef CONFIG_ARM64_ERRATUM_2038923
695 .desc = "ARM erratum 2038923",
696 .capability = ARM64_WORKAROUND_2038923,
698 /* Cortex-A510 r0p0 - r0p2 */
699 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
702 #ifdef CONFIG_ARM64_ERRATUM_1902691
704 .desc = "ARM erratum 1902691",
705 .capability = ARM64_WORKAROUND_1902691,
707 /* Cortex-A510 r0p0 - r0p1 */
708 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 1)
711 #ifdef CONFIG_ARM64_ERRATUM_1742098
713 .desc = "ARM erratum 1742098",
714 .capability = ARM64_WORKAROUND_1742098,
715 CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
716 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
719 #ifdef CONFIG_ARM64_ERRATUM_2658417
721 .desc = "ARM erratum 2658417",
722 .capability = ARM64_WORKAROUND_2658417,
723 /* Cortex-A510 r0p0 - r1p1 */
724 ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
725 MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)),
726 .cpu_enable = cpu_clear_bf16_from_user_emulation,
729 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
731 .desc = "ARM erratum 2966298",
732 .capability = ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD,
733 /* Cortex-A520 r0p0 - r0p1 */
734 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1),
737 #ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
739 .desc = "AmpereOne erratum AC03_CPU_38",
740 .capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38,
741 ERRATA_MIDR_ALL_VERSIONS(MIDR_AMPERE1),