1 // SPDX-License-Identifier: GPL-2.0-only
3 * Contains CPU specific errata definitions
5 * Copyright (C) 2014 ARM Ltd.
8 #include <linux/arm-smccc.h>
9 #include <linux/types.h>
10 #include <linux/cpu.h>
12 #include <asm/cputype.h>
13 #include <asm/cpufeature.h>
14 #include <asm/kvm_asm.h>
15 #include <asm/smp_plat.h>
17 static bool __maybe_unused
18 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
20 const struct arm64_midr_revidr *fix;
21 u32 midr = read_cpuid_id(), revidr;
23 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
24 if (!is_midr_in_range(midr, &entry->midr_range))
27 midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
28 revidr = read_cpuid(REVIDR_EL1);
29 for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
30 if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
36 static bool __maybe_unused
37 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
40 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
41 return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
44 static bool __maybe_unused
45 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
49 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
51 model = read_cpuid_id();
52 model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
53 MIDR_ARCHITECTURE_MASK;
55 return model == entry->midr_range.model;
59 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
62 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
63 u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
64 u64 ctr_raw, ctr_real;
66 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
69 * We want to make sure that all the CPUs in the system expose
70 * a consistent CTR_EL0 to make sure that applications behaves
71 * correctly with migration.
73 * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
75 * 1) It is safe if the system doesn't support IDC, as CPU anyway
76 * reports IDC = 0, consistent with the rest.
78 * 2) If the system has IDC, it is still safe as we trap CTR_EL0
79 * access on this CPU via the ARM64_HAS_CACHE_IDC capability.
81 * So, we need to make sure either the raw CTR_EL0 or the effective
82 * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
84 ctr_raw = read_cpuid_cachetype() & mask;
85 ctr_real = read_cpuid_effective_cachetype() & mask;
87 return (ctr_real != sys) && (ctr_raw != sys);
91 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
93 u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
94 bool enable_uct_trap = false;
96 /* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
97 if ((read_cpuid_cachetype() & mask) !=
98 (arm64_ftr_reg_ctrel0.sys_val & mask))
99 enable_uct_trap = true;
101 /* ... or if the system is affected by an erratum */
102 if (cap->capability == ARM64_WORKAROUND_1542419)
103 enable_uct_trap = true;
106 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
109 #ifdef CONFIG_ARM64_ERRATUM_1463225
111 has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
114 return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode();
118 static void __maybe_unused
119 cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
121 sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
124 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
125 .matches = is_affected_midr_range, \
126 .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
128 #define CAP_MIDR_ALL_VERSIONS(model) \
129 .matches = is_affected_midr_range, \
130 .midr_range = MIDR_ALL_VERSIONS(model)
132 #define MIDR_FIXED(rev, revidr_mask) \
133 .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
135 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max) \
136 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
137 CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
139 #define CAP_MIDR_RANGE_LIST(list) \
140 .matches = is_affected_midr_range_list, \
141 .midr_range_list = list
143 /* Errata affecting a range of revisions of given model variant */
144 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max) \
145 ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
147 /* Errata affecting a single variant/revision of a model */
148 #define ERRATA_MIDR_REV(model, var, rev) \
149 ERRATA_MIDR_RANGE(model, var, rev, var, rev)
151 /* Errata affecting all variants/revisions of a given a model */
152 #define ERRATA_MIDR_ALL_VERSIONS(model) \
153 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
154 CAP_MIDR_ALL_VERSIONS(model)
156 /* Errata affecting a list of midr ranges, with same work around */
157 #define ERRATA_MIDR_RANGE_LIST(midr_list) \
158 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM, \
159 CAP_MIDR_RANGE_LIST(midr_list)
161 static const __maybe_unused struct midr_range tx2_family_cpus[] = {
162 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
163 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
167 static bool __maybe_unused
168 needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
173 if (!is_affected_midr_range_list(entry, scope) ||
174 !is_hyp_mode_available())
177 for_each_possible_cpu(i) {
178 if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
185 static bool __maybe_unused
186 has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
189 u32 midr = read_cpuid_id();
190 bool has_dic = read_cpuid_cachetype() & BIT(CTR_DIC_SHIFT);
191 const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
193 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
194 return is_midr_in_range(midr, &range) && has_dic;
197 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
198 static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
199 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
201 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
204 .midr_range.model = MIDR_QCOM_KRYO,
205 .matches = is_kryo_midr,
208 #ifdef CONFIG_ARM64_ERRATUM_1286807
210 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
213 /* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
214 ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
217 #ifdef CONFIG_ARM64_ERRATUM_2441007
219 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
222 #ifdef CONFIG_ARM64_ERRATUM_2441009
224 /* Cortex-A510 r0p0 -> r1p1. Fixed in r1p2 */
225 ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
232 #ifdef CONFIG_CAVIUM_ERRATUM_27456
233 const struct midr_range cavium_erratum_27456_cpus[] = {
234 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
235 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
236 /* Cavium ThunderX, T81 pass 1.0 */
237 MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
242 #ifdef CONFIG_CAVIUM_ERRATUM_30115
243 static const struct midr_range cavium_erratum_30115_cpus[] = {
244 /* Cavium ThunderX, T88 pass 1.x - 2.2 */
245 MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
246 /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
247 MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
248 /* Cavium ThunderX, T83 pass 1.0 */
249 MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
254 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
255 static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
257 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
260 .midr_range.model = MIDR_QCOM_KRYO,
261 .matches = is_kryo_midr,
267 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
268 static const struct midr_range workaround_clean_cache[] = {
269 #if defined(CONFIG_ARM64_ERRATUM_826319) || \
270 defined(CONFIG_ARM64_ERRATUM_827319) || \
271 defined(CONFIG_ARM64_ERRATUM_824069)
272 /* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
273 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
275 #ifdef CONFIG_ARM64_ERRATUM_819472
276 /* Cortex-A53 r0p[01] : ARM errata 819472 */
277 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
283 #ifdef CONFIG_ARM64_ERRATUM_1418040
285 * - 1188873 affects r0p0 to r2p0
286 * - 1418040 affects r0p0 to r3p1
288 static const struct midr_range erratum_1418040_list[] = {
289 /* Cortex-A76 r0p0 to r3p1 */
290 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
291 /* Neoverse-N1 r0p0 to r3p1 */
292 MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
293 /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
294 MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
299 #ifdef CONFIG_ARM64_ERRATUM_845719
300 static const struct midr_range erratum_845719_list[] = {
301 /* Cortex-A53 r0p[01234] */
302 MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
303 /* Brahma-B53 r0p[0] */
304 MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
305 /* Kryo2XX Silver rAp4 */
306 MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4),
311 #ifdef CONFIG_ARM64_ERRATUM_843419
312 static const struct arm64_cpu_capabilities erratum_843419_list[] = {
314 /* Cortex-A53 r0p[01234] */
315 .matches = is_affected_midr_range,
316 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
317 MIDR_FIXED(0x4, BIT(8)),
320 /* Brahma-B53 r0p[0] */
321 .matches = is_affected_midr_range,
322 ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
328 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
329 static const struct midr_range erratum_speculative_at_list[] = {
330 #ifdef CONFIG_ARM64_ERRATUM_1165522
331 /* Cortex A76 r0p0 to r2p0 */
332 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
334 #ifdef CONFIG_ARM64_ERRATUM_1319367
335 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
336 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
338 #ifdef CONFIG_ARM64_ERRATUM_1530923
339 /* Cortex A55 r0p0 to r2p0 */
340 MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
341 /* Kryo4xx Silver (rdpe => r1p0) */
342 MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
348 #ifdef CONFIG_ARM64_ERRATUM_1463225
349 static const struct midr_range erratum_1463225[] = {
350 /* Cortex-A76 r0p0 - r3p1 */
351 MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
352 /* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
353 MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
358 #ifdef CONFIG_ARM64_ERRATUM_1742098
359 static struct midr_range broken_aarch32_aes[] = {
360 MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
361 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
366 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
367 static const struct midr_range trbe_overwrite_fill_mode_cpus[] = {
368 #ifdef CONFIG_ARM64_ERRATUM_2139208
369 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
371 #ifdef CONFIG_ARM64_ERRATUM_2119858
372 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
376 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE */
378 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
379 static const struct midr_range tsb_flush_fail_cpus[] = {
380 #ifdef CONFIG_ARM64_ERRATUM_2067961
381 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
383 #ifdef CONFIG_ARM64_ERRATUM_2054223
384 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
388 #endif /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */
390 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
391 static struct midr_range trbe_write_out_of_range_cpus[] = {
392 #ifdef CONFIG_ARM64_ERRATUM_2253138
393 MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
395 #ifdef CONFIG_ARM64_ERRATUM_2224489
396 MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
400 #endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
402 const struct arm64_cpu_capabilities arm64_errata[] = {
403 #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
405 .desc = "ARM errata 826319, 827319, 824069, or 819472",
406 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
407 ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
408 .cpu_enable = cpu_enable_cache_maint_trap,
411 #ifdef CONFIG_ARM64_ERRATUM_832075
413 /* Cortex-A57 r0p0 - r1p2 */
414 .desc = "ARM erratum 832075",
415 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
416 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
421 #ifdef CONFIG_ARM64_ERRATUM_834220
423 /* Cortex-A57 r0p0 - r1p2 */
424 .desc = "ARM erratum 834220",
425 .capability = ARM64_WORKAROUND_834220,
426 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
431 #ifdef CONFIG_ARM64_ERRATUM_843419
433 .desc = "ARM erratum 843419",
434 .capability = ARM64_WORKAROUND_843419,
435 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
436 .matches = cpucap_multi_entry_cap_matches,
437 .match_list = erratum_843419_list,
440 #ifdef CONFIG_ARM64_ERRATUM_845719
442 .desc = "ARM erratum 845719",
443 .capability = ARM64_WORKAROUND_845719,
444 ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
447 #ifdef CONFIG_CAVIUM_ERRATUM_23154
449 /* Cavium ThunderX, pass 1.x */
450 .desc = "Cavium erratum 23154",
451 .capability = ARM64_WORKAROUND_CAVIUM_23154,
452 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
455 #ifdef CONFIG_CAVIUM_ERRATUM_27456
457 .desc = "Cavium erratum 27456",
458 .capability = ARM64_WORKAROUND_CAVIUM_27456,
459 ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
462 #ifdef CONFIG_CAVIUM_ERRATUM_30115
464 .desc = "Cavium erratum 30115",
465 .capability = ARM64_WORKAROUND_CAVIUM_30115,
466 ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
470 .desc = "Mismatched cache type (CTR_EL0)",
471 .capability = ARM64_MISMATCHED_CACHE_TYPE,
472 .matches = has_mismatched_cache_type,
473 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
474 .cpu_enable = cpu_enable_trap_ctr_access,
476 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
478 .desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
479 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
480 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
481 .matches = cpucap_multi_entry_cap_matches,
482 .match_list = qcom_erratum_1003_list,
485 #ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
487 .desc = "Qualcomm erratum 1009, or ARM erratum 1286807, 2441009",
488 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
489 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
490 .matches = cpucap_multi_entry_cap_matches,
491 .match_list = arm64_repeat_tlbi_list,
494 #ifdef CONFIG_ARM64_ERRATUM_858921
496 /* Cortex-A73 all versions */
497 .desc = "ARM erratum 858921",
498 .capability = ARM64_WORKAROUND_858921,
499 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
503 .desc = "Spectre-v2",
504 .capability = ARM64_SPECTRE_V2,
505 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
506 .matches = has_spectre_v2,
507 .cpu_enable = spectre_v2_enable_mitigation,
509 #ifdef CONFIG_RANDOMIZE_BASE
511 /* Must come after the Spectre-v2 entry */
512 .desc = "Spectre-v3a",
513 .capability = ARM64_SPECTRE_V3A,
514 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
515 .matches = has_spectre_v3a,
516 .cpu_enable = spectre_v3a_enable_mitigation,
520 .desc = "Spectre-v4",
521 .capability = ARM64_SPECTRE_V4,
522 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
523 .matches = has_spectre_v4,
524 .cpu_enable = spectre_v4_enable_mitigation,
527 .desc = "Spectre-BHB",
528 .capability = ARM64_SPECTRE_BHB,
529 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
530 .matches = is_spectre_bhb_affected,
531 .cpu_enable = spectre_bhb_enable_mitigation,
533 #ifdef CONFIG_ARM64_ERRATUM_1418040
535 .desc = "ARM erratum 1418040",
536 .capability = ARM64_WORKAROUND_1418040,
537 ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
539 * We need to allow affected CPUs to come in late, but
540 * also need the non-affected CPUs to be able to come
541 * in at any point in time. Wonderful.
543 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
546 #ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
548 .desc = "ARM errata 1165522, 1319367, or 1530923",
549 .capability = ARM64_WORKAROUND_SPECULATIVE_AT,
550 ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list),
553 #ifdef CONFIG_ARM64_ERRATUM_1463225
555 .desc = "ARM erratum 1463225",
556 .capability = ARM64_WORKAROUND_1463225,
557 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
558 .matches = has_cortex_a76_erratum_1463225,
559 .midr_range_list = erratum_1463225,
562 #ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
564 .desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
565 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
566 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
567 .matches = needs_tx2_tvm_workaround,
570 .desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
571 .capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
572 ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
575 #ifdef CONFIG_ARM64_ERRATUM_1542419
577 /* we depend on the firmware portion for correctness */
578 .desc = "ARM erratum 1542419 (kernel portion)",
579 .capability = ARM64_WORKAROUND_1542419,
580 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
581 .matches = has_neoverse_n1_erratum_1542419,
582 .cpu_enable = cpu_enable_trap_ctr_access,
585 #ifdef CONFIG_ARM64_ERRATUM_1508412
587 /* we depend on the firmware portion for correctness */
588 .desc = "ARM erratum 1508412 (kernel portion)",
589 .capability = ARM64_WORKAROUND_1508412,
590 ERRATA_MIDR_RANGE(MIDR_CORTEX_A77,
595 #ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM
598 .desc = "NVIDIA Carmel CNP erratum",
599 .capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP,
600 ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
604 #ifdef CONFIG_ARM64_ERRATUM_2457168
606 .desc = "ARM erratum 2457168",
607 .capability = ARM64_WORKAROUND_2457168,
608 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
609 /* Cortex-A510 r0p0-r1p1 */
610 CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
613 #ifdef CONFIG_ARM64_ERRATUM_1742098
615 .desc = "ARM erratum 1742098",
616 .capability = ARM64_WORKAROUND_1742098,
617 CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
618 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
621 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
624 * The erratum work around is handled within the TRBE
625 * driver and can be applied per-cpu. So, we can allow
626 * a late CPU to come online with this erratum.
628 .desc = "ARM erratum 2119858 or 2139208",
629 .capability = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE,
630 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
631 CAP_MIDR_RANGE_LIST(trbe_overwrite_fill_mode_cpus),
634 #ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
636 .desc = "ARM erratum 2067961 or 2054223",
637 .capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE,
638 ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus),
641 #ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
643 .desc = "ARM erratum 2253138 or 2224489",
644 .capability = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE,
645 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
646 CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),