2 * Contains CPU specific errata definitions
4 * Copyright (C) 2014 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #include <linux/arm-smccc.h>
20 #include <linux/psci.h>
21 #include <linux/types.h>
22 #include <asm/cachetype.h>
24 #include <asm/cputype.h>
25 #include <asm/cpufeature.h>
27 static bool __maybe_unused
28 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
30 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
31 return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model,
32 entry->midr_range_min,
33 entry->midr_range_max);
37 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
40 u64 mask = CTR_CACHE_MINLINE_MASK;
42 /* Skip matching the min line sizes for cache type check */
43 if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
44 mask ^= arm64_ftr_reg_ctrel0.strict_mask;
46 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
47 return (read_cpuid_cachetype() & mask) !=
48 (arm64_ftr_reg_ctrel0.sys_val & mask);
51 static int cpu_enable_trap_ctr_access(void *__unused)
53 /* Clear SCTLR_EL1.UCT */
54 config_sctlr_el1(SCTLR_EL1_UCT, 0);
58 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
59 #include <asm/mmu_context.h>
60 #include <asm/cacheflush.h>
62 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
65 extern char __smccc_workaround_1_smc_start[];
66 extern char __smccc_workaround_1_smc_end[];
67 extern char __smccc_workaround_1_hvc_start[];
68 extern char __smccc_workaround_1_hvc_end[];
70 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
71 const char *hyp_vecs_end)
73 void *dst = __bp_harden_hyp_vecs_start + slot * SZ_2K;
76 for (i = 0; i < SZ_2K; i += 0x80)
77 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
79 flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
82 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
83 const char *hyp_vecs_start,
84 const char *hyp_vecs_end)
86 static int last_slot = -1;
87 static DEFINE_SPINLOCK(bp_lock);
91 for_each_possible_cpu(cpu) {
92 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
93 slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
100 BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
101 / SZ_2K) <= last_slot);
103 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
106 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
107 __this_cpu_write(bp_hardening_data.fn, fn);
108 spin_unlock(&bp_lock);
111 #define __smccc_workaround_1_smc_start NULL
112 #define __smccc_workaround_1_smc_end NULL
113 #define __smccc_workaround_1_hvc_start NULL
114 #define __smccc_workaround_1_hvc_end NULL
116 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
117 const char *hyp_vecs_start,
118 const char *hyp_vecs_end)
120 __this_cpu_write(bp_hardening_data.fn, fn);
122 #endif /* CONFIG_KVM */
124 static void install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
125 bp_hardening_cb_t fn,
126 const char *hyp_vecs_start,
127 const char *hyp_vecs_end)
131 if (!entry->matches(entry, SCOPE_LOCAL_CPU))
134 pfr0 = read_cpuid(ID_AA64PFR0_EL1);
135 if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
138 __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
141 #include <uapi/linux/psci.h>
142 #include <linux/arm-smccc.h>
143 #include <linux/psci.h>
145 static void call_smc_arch_workaround_1(void)
147 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
150 static void call_hvc_arch_workaround_1(void)
152 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
155 static int enable_smccc_arch_workaround_1(void *data)
157 const struct arm64_cpu_capabilities *entry = data;
158 bp_hardening_cb_t cb;
159 void *smccc_start, *smccc_end;
160 struct arm_smccc_res res;
162 if (!entry->matches(entry, SCOPE_LOCAL_CPU))
165 if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
168 switch (psci_ops.conduit) {
169 case PSCI_CONDUIT_HVC:
170 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
171 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
174 cb = call_hvc_arch_workaround_1;
175 smccc_start = __smccc_workaround_1_hvc_start;
176 smccc_end = __smccc_workaround_1_hvc_end;
179 case PSCI_CONDUIT_SMC:
180 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
181 ARM_SMCCC_ARCH_WORKAROUND_1, &res);
184 cb = call_smc_arch_workaround_1;
185 smccc_start = __smccc_workaround_1_smc_start;
186 smccc_end = __smccc_workaround_1_smc_end;
193 install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
197 #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */
199 #ifdef CONFIG_ARM64_SSBD
200 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
202 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
204 static const struct ssbd_options {
208 { "force-on", ARM64_SSBD_FORCE_ENABLE, },
209 { "force-off", ARM64_SSBD_FORCE_DISABLE, },
210 { "kernel", ARM64_SSBD_KERNEL, },
213 static int __init ssbd_cfg(char *buf)
220 for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
221 int len = strlen(ssbd_options[i].str);
223 if (strncmp(buf, ssbd_options[i].str, len))
226 ssbd_state = ssbd_options[i].state;
232 early_param("ssbd", ssbd_cfg);
234 void __init arm64_update_smccc_conduit(struct alt_instr *alt,
235 __le32 *origptr, __le32 *updptr,
240 BUG_ON(nr_inst != 1);
242 switch (psci_ops.conduit) {
243 case PSCI_CONDUIT_HVC:
244 insn = aarch64_insn_get_hvc_value();
246 case PSCI_CONDUIT_SMC:
247 insn = aarch64_insn_get_smc_value();
253 *updptr = cpu_to_le32(insn);
256 void __init arm64_enable_wa2_handling(struct alt_instr *alt,
257 __le32 *origptr, __le32 *updptr,
260 BUG_ON(nr_inst != 1);
262 * Only allow mitigation on EL1 entry/exit and guest
263 * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
266 if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
267 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
270 void arm64_set_ssbd_mitigation(bool state)
272 switch (psci_ops.conduit) {
273 case PSCI_CONDUIT_HVC:
274 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
277 case PSCI_CONDUIT_SMC:
278 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
287 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
290 struct arm_smccc_res res;
291 bool required = true;
294 WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
296 if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
297 ssbd_state = ARM64_SSBD_UNKNOWN;
301 switch (psci_ops.conduit) {
302 case PSCI_CONDUIT_HVC:
303 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
304 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
307 case PSCI_CONDUIT_SMC:
308 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
309 ARM_SMCCC_ARCH_WORKAROUND_2, &res);
313 ssbd_state = ARM64_SSBD_UNKNOWN;
320 case SMCCC_RET_NOT_SUPPORTED:
321 ssbd_state = ARM64_SSBD_UNKNOWN;
324 case SMCCC_RET_NOT_REQUIRED:
325 pr_info_once("%s mitigation not required\n", entry->desc);
326 ssbd_state = ARM64_SSBD_MITIGATED;
329 case SMCCC_RET_SUCCESS:
333 case 1: /* Mitigation not required on this CPU */
342 switch (ssbd_state) {
343 case ARM64_SSBD_FORCE_DISABLE:
344 pr_info_once("%s disabled from command-line\n", entry->desc);
345 arm64_set_ssbd_mitigation(false);
349 case ARM64_SSBD_KERNEL:
351 __this_cpu_write(arm64_ssbd_callback_required, 1);
352 arm64_set_ssbd_mitigation(true);
356 case ARM64_SSBD_FORCE_ENABLE:
357 pr_info_once("%s forced from command-line\n", entry->desc);
358 arm64_set_ssbd_mitigation(true);
369 #endif /* CONFIG_ARM64_SSBD */
371 #define MIDR_RANGE(model, min, max) \
372 .def_scope = SCOPE_LOCAL_CPU, \
373 .matches = is_affected_midr_range, \
374 .midr_model = model, \
375 .midr_range_min = min, \
376 .midr_range_max = max
378 #define MIDR_ALL_VERSIONS(model) \
379 .def_scope = SCOPE_LOCAL_CPU, \
380 .matches = is_affected_midr_range, \
381 .midr_model = model, \
382 .midr_range_min = 0, \
383 .midr_range_max = (MIDR_VARIANT_MASK | MIDR_REVISION_MASK)
385 const struct arm64_cpu_capabilities arm64_errata[] = {
386 #if defined(CONFIG_ARM64_ERRATUM_826319) || \
387 defined(CONFIG_ARM64_ERRATUM_827319) || \
388 defined(CONFIG_ARM64_ERRATUM_824069)
390 /* Cortex-A53 r0p[012] */
391 .desc = "ARM errata 826319, 827319, 824069",
392 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
393 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
394 .enable = cpu_enable_cache_maint_trap,
397 #ifdef CONFIG_ARM64_ERRATUM_819472
399 /* Cortex-A53 r0p[01] */
400 .desc = "ARM errata 819472",
401 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
402 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
403 .enable = cpu_enable_cache_maint_trap,
406 #ifdef CONFIG_ARM64_ERRATUM_832075
408 /* Cortex-A57 r0p0 - r1p2 */
409 .desc = "ARM erratum 832075",
410 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
411 MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
412 (1 << MIDR_VARIANT_SHIFT) | 2),
415 #ifdef CONFIG_ARM64_ERRATUM_834220
417 /* Cortex-A57 r0p0 - r1p2 */
418 .desc = "ARM erratum 834220",
419 .capability = ARM64_WORKAROUND_834220,
420 MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
421 (1 << MIDR_VARIANT_SHIFT) | 2),
424 #ifdef CONFIG_ARM64_ERRATUM_845719
426 /* Cortex-A53 r0p[01234] */
427 .desc = "ARM erratum 845719",
428 .capability = ARM64_WORKAROUND_845719,
429 MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
432 #ifdef CONFIG_CAVIUM_ERRATUM_23154
434 /* Cavium ThunderX, pass 1.x */
435 .desc = "Cavium erratum 23154",
436 .capability = ARM64_WORKAROUND_CAVIUM_23154,
437 MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01),
440 #ifdef CONFIG_CAVIUM_ERRATUM_27456
442 /* Cavium ThunderX, T88 pass 1.x - 2.1 */
443 .desc = "Cavium erratum 27456",
444 .capability = ARM64_WORKAROUND_CAVIUM_27456,
445 MIDR_RANGE(MIDR_THUNDERX, 0x00,
446 (1 << MIDR_VARIANT_SHIFT) | 1),
449 /* Cavium ThunderX, T81 pass 1.0 */
450 .desc = "Cavium erratum 27456",
451 .capability = ARM64_WORKAROUND_CAVIUM_27456,
452 MIDR_RANGE(MIDR_THUNDERX_81XX, 0x00, 0x00),
456 .desc = "Mismatched cache line size",
457 .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
458 .matches = has_mismatched_cache_type,
459 .def_scope = SCOPE_LOCAL_CPU,
460 .enable = cpu_enable_trap_ctr_access,
463 .desc = "Mismatched cache type",
464 .capability = ARM64_MISMATCHED_CACHE_TYPE,
465 .matches = has_mismatched_cache_type,
466 .def_scope = SCOPE_LOCAL_CPU,
467 .enable = cpu_enable_trap_ctr_access,
469 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
471 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
472 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
473 .enable = enable_smccc_arch_workaround_1,
476 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
477 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
478 .enable = enable_smccc_arch_workaround_1,
481 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
482 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
483 .enable = enable_smccc_arch_workaround_1,
486 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
487 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
488 .enable = enable_smccc_arch_workaround_1,
491 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
492 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
493 .enable = enable_smccc_arch_workaround_1,
496 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
497 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
498 .enable = enable_smccc_arch_workaround_1,
501 #ifdef CONFIG_ARM64_SSBD
503 .desc = "Speculative Store Bypass Disable",
504 .def_scope = SCOPE_LOCAL_CPU,
505 .capability = ARM64_SSBD,
506 .matches = has_ssbd_mitigation,
514 * The CPU Errata work arounds are detected and applied at boot time
515 * and the related information is freed soon after. If the new CPU requires
516 * an errata not detected at boot, fail this CPU.
518 void verify_local_cpu_errata_workarounds(void)
520 const struct arm64_cpu_capabilities *caps = arm64_errata;
522 for (; caps->matches; caps++) {
523 if (cpus_have_cap(caps->capability)) {
525 caps->enable((void *)caps);
526 } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
527 pr_crit("CPU%d: Requires work around for %s, not detected"
530 caps->desc ? : "an erratum");
536 void update_cpu_errata_workarounds(void)
538 update_cpu_capabilities(arm64_errata, "enabling workaround for");
541 void __init enable_errata_workarounds(void)
543 enable_cpu_capabilities(arm64_errata);