Mention branches and keyring.
[releases.git] / arm64 / kernel / cpu_errata.c
1 /*
2  * Contains CPU specific errata definitions
3  *
4  * Copyright (C) 2014 ARM Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/arm-smccc.h>
20 #include <linux/psci.h>
21 #include <linux/types.h>
22 #include <linux/cpu.h>
23 #include <asm/cpu.h>
24 #include <asm/cputype.h>
25 #include <asm/cpufeature.h>
26 #include <asm/vectors.h>
27
28 static bool __maybe_unused
29 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
30 {
31         u32 midr = read_cpuid_id();
32
33         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
34         return is_midr_in_range(midr, &entry->midr_range);
35 }
36
37 static bool __maybe_unused
38 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
39                             int scope)
40 {
41         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
42         return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
43 }
44
45 static bool __maybe_unused
46 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
47 {
48         u32 model;
49
50         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
51
52         model = read_cpuid_id();
53         model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
54                  MIDR_ARCHITECTURE_MASK;
55
56         return model == entry->midr_range.model;
57 }
58
59 static bool
60 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
61                           int scope)
62 {
63         u64 mask = CTR_CACHE_MINLINE_MASK;
64
65         /* Skip matching the min line sizes for cache type check */
66         if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
67                 mask ^= arm64_ftr_reg_ctrel0.strict_mask;
68
69         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
70         return (read_cpuid_cachetype() & mask) !=
71                (arm64_ftr_reg_ctrel0.sys_val & mask);
72 }
73
74 static void
75 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
76 {
77         /* Clear SCTLR_EL1.UCT */
78         config_sctlr_el1(SCTLR_EL1_UCT, 0);
79 }
80
81 #include <asm/mmu_context.h>
82 #include <asm/cacheflush.h>
83
84 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
85
86 #ifdef CONFIG_KVM
87 extern char __smccc_workaround_1_smc_start[];
88 extern char __smccc_workaround_1_smc_end[];
89 extern char __smccc_workaround_3_smc_start[];
90 extern char __smccc_workaround_3_smc_end[];
91 extern char __spectre_bhb_loop_k8_start[];
92 extern char __spectre_bhb_loop_k8_end[];
93 extern char __spectre_bhb_loop_k24_start[];
94 extern char __spectre_bhb_loop_k24_end[];
95 extern char __spectre_bhb_loop_k32_start[];
96 extern char __spectre_bhb_loop_k32_end[];
97 extern char __spectre_bhb_clearbhb_start[];
98 extern char __spectre_bhb_clearbhb_end[];
99
100 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
101                                 const char *hyp_vecs_end)
102 {
103         void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
104         int i;
105
106         for (i = 0; i < SZ_2K; i += 0x80)
107                 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
108
109         flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
110 }
111
112 static DEFINE_SPINLOCK(bp_lock);
113 static int last_slot = -1;
114
115 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
116                                     const char *hyp_vecs_start,
117                                     const char *hyp_vecs_end)
118 {
119
120         int cpu, slot = -1;
121
122         spin_lock(&bp_lock);
123         for_each_possible_cpu(cpu) {
124                 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
125                         slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
126                         break;
127                 }
128         }
129
130         if (slot == -1) {
131                 last_slot++;
132                 BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
133                         / SZ_2K) <= last_slot);
134                 slot = last_slot;
135                 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
136         }
137
138         __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
139         __this_cpu_write(bp_hardening_data.fn, fn);
140         __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
141         spin_unlock(&bp_lock);
142 }
143 #else
144 #define __smccc_workaround_1_smc_start          NULL
145 #define __smccc_workaround_1_smc_end            NULL
146
147 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
148                                       const char *hyp_vecs_start,
149                                       const char *hyp_vecs_end)
150 {
151         __this_cpu_write(bp_hardening_data.fn, fn);
152 }
153 #endif  /* CONFIG_KVM */
154
155 #include <uapi/linux/psci.h>
156 #include <linux/arm-smccc.h>
157 #include <linux/psci.h>
158
159 static void call_smc_arch_workaround_1(void)
160 {
161         arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
162 }
163
164 static void call_hvc_arch_workaround_1(void)
165 {
166         arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
167 }
168
169 static void qcom_link_stack_sanitization(void)
170 {
171         u64 tmp;
172
173         asm volatile("mov       %0, x30         \n"
174                      ".rept     16              \n"
175                      "bl        . + 4           \n"
176                      ".endr                     \n"
177                      "mov       x30, %0         \n"
178                      : "=&r" (tmp));
179 }
180
181 static bool __nospectre_v2;
182 static int __init parse_nospectre_v2(char *str)
183 {
184         __nospectre_v2 = true;
185         return 0;
186 }
187 early_param("nospectre_v2", parse_nospectre_v2);
188
189 /*
190  * -1: No workaround
191  *  0: No workaround required
192  *  1: Workaround installed
193  */
194 static int detect_harden_bp_fw(void)
195 {
196         bp_hardening_cb_t cb;
197         void *smccc_start, *smccc_end;
198         struct arm_smccc_res res;
199         u32 midr = read_cpuid_id();
200
201         if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
202                 return -1;
203
204         switch (psci_ops.conduit) {
205         case PSCI_CONDUIT_HVC:
206                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
207                                   ARM_SMCCC_ARCH_WORKAROUND_1, &res);
208                 switch ((int)res.a0) {
209                 case 1:
210                         /* Firmware says we're just fine */
211                         return 0;
212                 case 0:
213                         cb = call_hvc_arch_workaround_1;
214                         /* This is a guest, no need to patch KVM vectors */
215                         smccc_start = NULL;
216                         smccc_end = NULL;
217                         break;
218                 default:
219                         return -1;
220                 }
221                 break;
222
223         case PSCI_CONDUIT_SMC:
224                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
225                                   ARM_SMCCC_ARCH_WORKAROUND_1, &res);
226                 switch ((int)res.a0) {
227                 case 1:
228                         /* Firmware says we're just fine */
229                         return 0;
230                 case 0:
231                         cb = call_smc_arch_workaround_1;
232                         smccc_start = __smccc_workaround_1_smc_start;
233                         smccc_end = __smccc_workaround_1_smc_end;
234                         break;
235                 default:
236                         return -1;
237                 }
238                 break;
239
240         default:
241                 return -1;
242         }
243
244         if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
245             ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
246                 cb = qcom_link_stack_sanitization;
247
248         if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
249                 install_bp_hardening_cb(cb, smccc_start, smccc_end);
250
251         return 1;
252 }
253
254 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
255
256 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
257 static bool __ssb_safe = true;
258
259 static const struct ssbd_options {
260         const char      *str;
261         int             state;
262 } ssbd_options[] = {
263         { "force-on",   ARM64_SSBD_FORCE_ENABLE, },
264         { "force-off",  ARM64_SSBD_FORCE_DISABLE, },
265         { "kernel",     ARM64_SSBD_KERNEL, },
266 };
267
268 static int __init ssbd_cfg(char *buf)
269 {
270         int i;
271
272         if (!buf || !buf[0])
273                 return -EINVAL;
274
275         for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
276                 int len = strlen(ssbd_options[i].str);
277
278                 if (strncmp(buf, ssbd_options[i].str, len))
279                         continue;
280
281                 ssbd_state = ssbd_options[i].state;
282                 return 0;
283         }
284
285         return -EINVAL;
286 }
287 early_param("ssbd", ssbd_cfg);
288
289 void __init arm64_update_smccc_conduit(struct alt_instr *alt,
290                                        __le32 *origptr, __le32 *updptr,
291                                        int nr_inst)
292 {
293         u32 insn;
294
295         BUG_ON(nr_inst != 1);
296
297         switch (psci_ops.conduit) {
298         case PSCI_CONDUIT_HVC:
299                 insn = aarch64_insn_get_hvc_value();
300                 break;
301         case PSCI_CONDUIT_SMC:
302                 insn = aarch64_insn_get_smc_value();
303                 break;
304         default:
305                 return;
306         }
307
308         *updptr = cpu_to_le32(insn);
309 }
310
311 void __init arm64_enable_wa2_handling(struct alt_instr *alt,
312                                       __le32 *origptr, __le32 *updptr,
313                                       int nr_inst)
314 {
315         BUG_ON(nr_inst != 1);
316         /*
317          * Only allow mitigation on EL1 entry/exit and guest
318          * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
319          * be flipped.
320          */
321         if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
322                 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
323 }
324
325 void arm64_set_ssbd_mitigation(bool state)
326 {
327         if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
328                 pr_info_once("SSBD disabled by kernel configuration\n");
329                 return;
330         }
331
332         if (this_cpu_has_cap(ARM64_SSBS)) {
333                 if (state)
334                         asm volatile(SET_PSTATE_SSBS(0));
335                 else
336                         asm volatile(SET_PSTATE_SSBS(1));
337                 return;
338         }
339
340         switch (psci_ops.conduit) {
341         case PSCI_CONDUIT_HVC:
342                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
343                 break;
344
345         case PSCI_CONDUIT_SMC:
346                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
347                 break;
348
349         default:
350                 WARN_ON_ONCE(1);
351                 break;
352         }
353 }
354
355 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
356                                     int scope)
357 {
358         struct arm_smccc_res res;
359         bool required = true;
360         s32 val;
361         bool this_cpu_safe = false;
362
363         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
364
365         if (cpu_mitigations_off())
366                 ssbd_state = ARM64_SSBD_FORCE_DISABLE;
367
368         /* delay setting __ssb_safe until we get a firmware response */
369         if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
370                 this_cpu_safe = true;
371
372         if (this_cpu_has_cap(ARM64_SSBS)) {
373                 if (!this_cpu_safe)
374                         __ssb_safe = false;
375                 required = false;
376                 goto out_printmsg;
377         }
378
379         if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
380                 ssbd_state = ARM64_SSBD_UNKNOWN;
381                 if (!this_cpu_safe)
382                         __ssb_safe = false;
383                 return false;
384         }
385
386         switch (psci_ops.conduit) {
387         case PSCI_CONDUIT_HVC:
388                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
389                                   ARM_SMCCC_ARCH_WORKAROUND_2, &res);
390                 break;
391
392         case PSCI_CONDUIT_SMC:
393                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
394                                   ARM_SMCCC_ARCH_WORKAROUND_2, &res);
395                 break;
396
397         default:
398                 ssbd_state = ARM64_SSBD_UNKNOWN;
399                 if (!this_cpu_safe)
400                         __ssb_safe = false;
401                 return false;
402         }
403
404         val = (s32)res.a0;
405
406         switch (val) {
407         case SMCCC_RET_NOT_SUPPORTED:
408                 ssbd_state = ARM64_SSBD_UNKNOWN;
409                 if (!this_cpu_safe)
410                         __ssb_safe = false;
411                 return false;
412
413         /* machines with mixed mitigation requirements must not return this */
414         case SMCCC_RET_NOT_REQUIRED:
415                 pr_info_once("%s mitigation not required\n", entry->desc);
416                 ssbd_state = ARM64_SSBD_MITIGATED;
417                 return false;
418
419         case SMCCC_RET_SUCCESS:
420                 __ssb_safe = false;
421                 required = true;
422                 break;
423
424         case 1: /* Mitigation not required on this CPU */
425                 required = false;
426                 break;
427
428         default:
429                 WARN_ON(1);
430                 if (!this_cpu_safe)
431                         __ssb_safe = false;
432                 return false;
433         }
434
435         switch (ssbd_state) {
436         case ARM64_SSBD_FORCE_DISABLE:
437                 arm64_set_ssbd_mitigation(false);
438                 required = false;
439                 break;
440
441         case ARM64_SSBD_KERNEL:
442                 if (required) {
443                         __this_cpu_write(arm64_ssbd_callback_required, 1);
444                         arm64_set_ssbd_mitigation(true);
445                 }
446                 break;
447
448         case ARM64_SSBD_FORCE_ENABLE:
449                 arm64_set_ssbd_mitigation(true);
450                 required = true;
451                 break;
452
453         default:
454                 WARN_ON(1);
455                 break;
456         }
457
458 out_printmsg:
459         switch (ssbd_state) {
460         case ARM64_SSBD_FORCE_DISABLE:
461                 pr_info_once("%s disabled from command-line\n", entry->desc);
462                 break;
463
464         case ARM64_SSBD_FORCE_ENABLE:
465                 pr_info_once("%s forced from command-line\n", entry->desc);
466                 break;
467         }
468
469         return required;
470 }
471
472 /* known invulnerable cores */
473 static const struct midr_range arm64_ssb_cpus[] = {
474         MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
475         MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
476         MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
477         {},
478 };
479
480 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)       \
481         .matches = is_affected_midr_range,                      \
482         .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
483
484 #define CAP_MIDR_ALL_VERSIONS(model)                                    \
485         .matches = is_affected_midr_range,                              \
486         .midr_range = MIDR_ALL_VERSIONS(model)
487
488 #define MIDR_FIXED(rev, revidr_mask) \
489         .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
490
491 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)            \
492         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                         \
493         CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
494
495 #define CAP_MIDR_RANGE_LIST(list)                               \
496         .matches = is_affected_midr_range_list,                 \
497         .midr_range_list = list
498
499 /* Errata affecting a range of revisions of  given model variant */
500 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)      \
501         ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
502
503 /* Errata affecting a single variant/revision of a model */
504 #define ERRATA_MIDR_REV(model, var, rev)        \
505         ERRATA_MIDR_RANGE(model, var, rev, var, rev)
506
507 /* Errata affecting all variants/revisions of a given a model */
508 #define ERRATA_MIDR_ALL_VERSIONS(model)                         \
509         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
510         CAP_MIDR_ALL_VERSIONS(model)
511
512 /* Errata affecting a list of midr ranges, with same work around */
513 #define ERRATA_MIDR_RANGE_LIST(midr_list)                       \
514         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
515         CAP_MIDR_RANGE_LIST(midr_list)
516
517 /* Track overall mitigation state. We are only mitigated if all cores are ok */
518 static bool __hardenbp_enab = true;
519 static bool __spectrev2_safe = true;
520
521 /*
522  * List of CPUs that do not need any Spectre-v2 mitigation at all.
523  */
524 static const struct midr_range spectre_v2_safe_list[] = {
525         MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
526         MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
527         MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
528         { /* sentinel */ }
529 };
530
531 /*
532  * Track overall bp hardening for all heterogeneous cores in the machine.
533  * We are only considered "safe" if all booted cores are known safe.
534  */
535 static bool __maybe_unused
536 check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
537 {
538         int need_wa;
539
540         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
541
542         /* If the CPU has CSV2 set, we're safe */
543         if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
544                                                  ID_AA64PFR0_CSV2_SHIFT))
545                 return false;
546
547         /* Alternatively, we have a list of unaffected CPUs */
548         if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
549                 return false;
550
551         /* Fallback to firmware detection */
552         need_wa = detect_harden_bp_fw();
553         if (!need_wa)
554                 return false;
555
556         __spectrev2_safe = false;
557
558         if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
559                 pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
560                 __hardenbp_enab = false;
561                 return false;
562         }
563
564         /* forced off */
565         if (__nospectre_v2 || cpu_mitigations_off()) {
566                 pr_info_once("spectrev2 mitigation disabled by command line option\n");
567                 __hardenbp_enab = false;
568                 return false;
569         }
570
571         if (need_wa < 0) {
572                 pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
573                 __hardenbp_enab = false;
574         }
575
576         return (need_wa > 0);
577 }
578
579 const struct arm64_cpu_capabilities arm64_errata[] = {
580 #if     defined(CONFIG_ARM64_ERRATUM_826319) || \
581         defined(CONFIG_ARM64_ERRATUM_827319) || \
582         defined(CONFIG_ARM64_ERRATUM_824069)
583         {
584         /* Cortex-A53 r0p[012] */
585                 .desc = "ARM errata 826319, 827319, 824069",
586                 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
587                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
588                 .cpu_enable = cpu_enable_cache_maint_trap,
589         },
590 #endif
591 #ifdef CONFIG_ARM64_ERRATUM_819472
592         {
593         /* Cortex-A53 r0p[01] */
594                 .desc = "ARM errata 819472",
595                 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
596                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
597                 .cpu_enable = cpu_enable_cache_maint_trap,
598         },
599 #endif
600 #ifdef CONFIG_ARM64_ERRATUM_832075
601         {
602         /* Cortex-A57 r0p0 - r1p2 */
603                 .desc = "ARM erratum 832075",
604                 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
605                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
606                                   0, 0,
607                                   1, 2),
608         },
609 #endif
610 #ifdef CONFIG_ARM64_ERRATUM_834220
611         {
612         /* Cortex-A57 r0p0 - r1p2 */
613                 .desc = "ARM erratum 834220",
614                 .capability = ARM64_WORKAROUND_834220,
615                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
616                                   0, 0,
617                                   1, 2),
618         },
619 #endif
620 #ifdef CONFIG_ARM64_ERRATUM_845719
621         {
622         /* Cortex-A53 r0p[01234] */
623                 .desc = "ARM erratum 845719",
624                 .capability = ARM64_WORKAROUND_845719,
625                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
626         },
627 #endif
628 #ifdef CONFIG_CAVIUM_ERRATUM_23154
629         {
630         /* Cavium ThunderX, pass 1.x */
631                 .desc = "Cavium erratum 23154",
632                 .capability = ARM64_WORKAROUND_CAVIUM_23154,
633                 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
634         },
635 #endif
636 #ifdef CONFIG_CAVIUM_ERRATUM_27456
637         {
638         /* Cavium ThunderX, T88 pass 1.x - 2.1 */
639                 .desc = "Cavium erratum 27456",
640                 .capability = ARM64_WORKAROUND_CAVIUM_27456,
641                 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
642                                   0, 0,
643                                   1, 1),
644         },
645         {
646         /* Cavium ThunderX, T81 pass 1.0 */
647                 .desc = "Cavium erratum 27456",
648                 .capability = ARM64_WORKAROUND_CAVIUM_27456,
649                 ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
650         },
651 #endif
652 #ifdef CONFIG_CAVIUM_ERRATUM_30115
653         {
654         /* Cavium ThunderX, T88 pass 1.x - 2.2 */
655                 .desc = "Cavium erratum 30115",
656                 .capability = ARM64_WORKAROUND_CAVIUM_30115,
657                 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
658                                       0, 0,
659                                       1, 2),
660         },
661         {
662         /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
663                 .desc = "Cavium erratum 30115",
664                 .capability = ARM64_WORKAROUND_CAVIUM_30115,
665                 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
666         },
667         {
668         /* Cavium ThunderX, T83 pass 1.0 */
669                 .desc = "Cavium erratum 30115",
670                 .capability = ARM64_WORKAROUND_CAVIUM_30115,
671                 ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
672         },
673 #endif
674         {
675                 .desc = "Mismatched cache line size",
676                 .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
677                 .matches = has_mismatched_cache_type,
678                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
679                 .cpu_enable = cpu_enable_trap_ctr_access,
680         },
681         {
682                 .desc = "Mismatched cache type",
683                 .capability = ARM64_MISMATCHED_CACHE_TYPE,
684                 .matches = has_mismatched_cache_type,
685                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
686                 .cpu_enable = cpu_enable_trap_ctr_access,
687         },
688 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
689         {
690                 .desc = "Qualcomm Technologies Falkor erratum 1003",
691                 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
692                 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
693         },
694         {
695                 .desc = "Qualcomm Technologies Kryo erratum 1003",
696                 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
697                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
698                 .midr_range.model = MIDR_QCOM_KRYO,
699                 .matches = is_kryo_midr,
700         },
701 #endif
702 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
703         {
704                 .desc = "Qualcomm Technologies Falkor erratum 1009",
705                 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
706                 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
707         },
708 #endif
709 #ifdef CONFIG_ARM64_ERRATUM_858921
710         {
711         /* Cortex-A73 all versions */
712                 .desc = "ARM erratum 858921",
713                 .capability = ARM64_WORKAROUND_858921,
714                 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
715         },
716 #endif
717         {
718                 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
719                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
720                 .matches = check_branch_predictor,
721         },
722         {
723                 .desc = "Speculative Store Bypass Disable",
724                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
725                 .capability = ARM64_SSBD,
726                 .matches = has_ssbd_mitigation,
727                 .midr_range_list = arm64_ssb_cpus,
728         },
729 #ifdef CONFIG_ARM64_ERRATUM_1188873
730         {
731                 /* Cortex-A76 r0p0 to r2p0 */
732                 .desc = "ARM erratum 1188873",
733                 .capability = ARM64_WORKAROUND_1188873,
734                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
735         },
736 #endif
737         {
738                 .desc = "Spectre-BHB",
739                 .capability = ARM64_SPECTRE_BHB,
740                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
741                 .matches = is_spectre_bhb_affected,
742                 .cpu_enable = spectre_bhb_enable_mitigation,
743         },
744         {
745         }
746 };
747
748 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
749                             char *buf)
750 {
751         return sprintf(buf, "Mitigation: __user pointer sanitization\n");
752 }
753
754 static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
755 {
756         switch (bhb_state) {
757         case SPECTRE_UNAFFECTED:
758                 return "";
759         default:
760         case SPECTRE_VULNERABLE:
761                 return ", but not BHB";
762         case SPECTRE_MITIGATED:
763                 return ", BHB";
764         }
765 }
766
767 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
768                 char *buf)
769 {
770         enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
771         const char *bhb_str = get_bhb_affected_string(bhb_state);
772         const char *v2_str = "Branch predictor hardening";
773
774         if (__spectrev2_safe) {
775                 if (bhb_state == SPECTRE_UNAFFECTED)
776                         return sprintf(buf, "Not affected\n");
777
778                 /*
779                  * Platforms affected by Spectre-BHB can't report
780                  * "Not affected" for Spectre-v2.
781                  */
782                 v2_str = "CSV2";
783         }
784
785         if (__hardenbp_enab)
786                 return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
787
788         return sprintf(buf, "Vulnerable\n");
789 }
790
791 ssize_t cpu_show_spec_store_bypass(struct device *dev,
792                 struct device_attribute *attr, char *buf)
793 {
794         if (__ssb_safe)
795                 return sprintf(buf, "Not affected\n");
796
797         switch (ssbd_state) {
798         case ARM64_SSBD_KERNEL:
799         case ARM64_SSBD_FORCE_ENABLE:
800                 if (IS_ENABLED(CONFIG_ARM64_SSBD))
801                         return sprintf(buf,
802                             "Mitigation: Speculative Store Bypass disabled via prctl\n");
803         }
804
805         return sprintf(buf, "Vulnerable\n");
806 }
807
808 /*
809  * We try to ensure that the mitigation state can never change as the result of
810  * onlining a late CPU.
811  */
812 static void update_mitigation_state(enum mitigation_state *oldp,
813                                     enum mitigation_state new)
814 {
815         enum mitigation_state state;
816
817         do {
818                 state = READ_ONCE(*oldp);
819                 if (new <= state)
820                         break;
821         } while (cmpxchg_relaxed(oldp, state, new) != state);
822 }
823
824 /*
825  * Spectre BHB.
826  *
827  * A CPU is either:
828  * - Mitigated by a branchy loop a CPU specific number of times, and listed
829  *   in our "loop mitigated list".
830  * - Mitigated in software by the firmware Spectre v2 call.
831  * - Has the ClearBHB instruction to perform the mitigation.
832  * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
833  *   software mitigation in the vectors is needed.
834  * - Has CSV2.3, so is unaffected.
835  */
836 static enum mitigation_state spectre_bhb_state;
837
838 enum mitigation_state arm64_get_spectre_bhb_state(void)
839 {
840         return spectre_bhb_state;
841 }
842
843 /*
844  * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
845  * SCOPE_SYSTEM call will give the right answer.
846  */
847 u8 spectre_bhb_loop_affected(int scope)
848 {
849         u8 k = 0;
850         static u8 max_bhb_k;
851
852         if (scope == SCOPE_LOCAL_CPU) {
853                 static const struct midr_range spectre_bhb_k32_list[] = {
854                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
855                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
856                         MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
857                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
858                         MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
859                         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
860                         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
861                         {},
862                 };
863                 static const struct midr_range spectre_bhb_k24_list[] = {
864                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
865                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
866                         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
867                         {},
868                 };
869                 static const struct midr_range spectre_bhb_k8_list[] = {
870                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
871                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
872                         {},
873                 };
874
875                 if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
876                         k = 32;
877                 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
878                         k = 24;
879                 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
880                         k =  8;
881
882                 max_bhb_k = max(max_bhb_k, k);
883         } else {
884                 k = max_bhb_k;
885         }
886
887         return k;
888 }
889
890 static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
891 {
892         int ret;
893         struct arm_smccc_res res;
894
895         if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
896                 return SPECTRE_VULNERABLE;
897
898         switch (psci_ops.conduit) {
899         case PSCI_CONDUIT_HVC:
900                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
901                                   ARM_SMCCC_ARCH_WORKAROUND_3, &res);
902                 break;
903
904         case PSCI_CONDUIT_SMC:
905                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
906                                   ARM_SMCCC_ARCH_WORKAROUND_3, &res);
907                 break;
908
909         default:
910                 return SPECTRE_VULNERABLE;
911         }
912
913         ret = res.a0;
914         switch (ret) {
915         case SMCCC_RET_SUCCESS:
916                 return SPECTRE_MITIGATED;
917         case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
918                 return SPECTRE_UNAFFECTED;
919         default:
920         case SMCCC_RET_NOT_SUPPORTED:
921                 return SPECTRE_VULNERABLE;
922         }
923 }
924
925 static bool is_spectre_bhb_fw_affected(int scope)
926 {
927         static bool system_affected;
928         enum mitigation_state fw_state;
929         bool has_smccc = (psci_ops.smccc_version >= SMCCC_VERSION_1_1);
930         static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
931                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
932                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
933                 {},
934         };
935         bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
936                                          spectre_bhb_firmware_mitigated_list);
937
938         if (scope != SCOPE_LOCAL_CPU)
939                 return system_affected;
940
941         fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
942         if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
943                 system_affected = true;
944                 return true;
945         }
946
947         return false;
948 }
949
950 static bool supports_ecbhb(int scope)
951 {
952         u64 mmfr1;
953
954         if (scope == SCOPE_LOCAL_CPU)
955                 mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
956         else
957                 mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
958
959         return cpuid_feature_extract_unsigned_field(mmfr1,
960                                                     ID_AA64MMFR1_ECBHB_SHIFT);
961 }
962
963 bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
964                              int scope)
965 {
966         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
967
968         if (supports_csv2p3(scope))
969                 return false;
970
971         if (supports_clearbhb(scope))
972                 return true;
973
974         if (spectre_bhb_loop_affected(scope))
975                 return true;
976
977         if (is_spectre_bhb_fw_affected(scope))
978                 return true;
979
980         return false;
981 }
982
983 static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
984 {
985         const char *v = arm64_get_bp_hardening_vector(slot);
986
987         if (slot < 0)
988                 return;
989
990         __this_cpu_write(this_cpu_vector, v);
991
992         /*
993          * When KPTI is in use, the vectors are switched when exiting to
994          * user-space.
995          */
996         if (arm64_kernel_unmapped_at_el0())
997                 return;
998
999         write_sysreg(v, vbar_el1);
1000         isb();
1001 }
1002
1003 #ifdef CONFIG_KVM
1004 static const char *kvm_bhb_get_vecs_end(const char *start)
1005 {
1006         if (start == __smccc_workaround_3_smc_start)
1007                 return __smccc_workaround_3_smc_end;
1008         else if (start == __spectre_bhb_loop_k8_start)
1009                 return __spectre_bhb_loop_k8_end;
1010         else if (start == __spectre_bhb_loop_k24_start)
1011                 return __spectre_bhb_loop_k24_end;
1012         else if (start == __spectre_bhb_loop_k32_start)
1013                 return __spectre_bhb_loop_k32_end;
1014         else if (start == __spectre_bhb_clearbhb_start)
1015                 return __spectre_bhb_clearbhb_end;
1016
1017         return NULL;
1018 }
1019
1020 static void kvm_setup_bhb_slot(const char *hyp_vecs_start)
1021 {
1022         int cpu, slot = -1;
1023         const char *hyp_vecs_end;
1024
1025         if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
1026                 return;
1027
1028         hyp_vecs_end = kvm_bhb_get_vecs_end(hyp_vecs_start);
1029         if (WARN_ON_ONCE(!hyp_vecs_start || !hyp_vecs_end))
1030                 return;
1031
1032         spin_lock(&bp_lock);
1033         for_each_possible_cpu(cpu) {
1034                 if (per_cpu(bp_hardening_data.template_start, cpu) == hyp_vecs_start) {
1035                         slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
1036                         break;
1037                 }
1038         }
1039
1040         if (slot == -1) {
1041                 last_slot++;
1042                 BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
1043                         / SZ_2K) <= last_slot);
1044                 slot = last_slot;
1045                 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
1046         }
1047
1048         __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
1049         __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
1050         spin_unlock(&bp_lock);
1051 }
1052 #else
1053 #define __smccc_workaround_3_smc_start NULL
1054 #define __spectre_bhb_loop_k8_start NULL
1055 #define __spectre_bhb_loop_k24_start NULL
1056 #define __spectre_bhb_loop_k32_start NULL
1057 #define __spectre_bhb_clearbhb_start NULL
1058
1059 static void kvm_setup_bhb_slot(const char *hyp_vecs_start) { };
1060 #endif
1061
1062 void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
1063 {
1064         enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
1065
1066         if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
1067                 return;
1068
1069         if (!__spectrev2_safe &&  !__hardenbp_enab) {
1070                 /* No point mitigating Spectre-BHB alone. */
1071         } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
1072                 pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
1073         } else if (cpu_mitigations_off()) {
1074                 pr_info_once("spectre-bhb mitigation disabled by command line option\n");
1075         } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
1076                 state = SPECTRE_MITIGATED;
1077         } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
1078                 kvm_setup_bhb_slot(__spectre_bhb_clearbhb_start);
1079                 this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
1080
1081                 state = SPECTRE_MITIGATED;
1082         } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
1083                 switch (spectre_bhb_loop_affected(SCOPE_SYSTEM)) {
1084                 case 8:
1085                         kvm_setup_bhb_slot(__spectre_bhb_loop_k8_start);
1086                         break;
1087                 case 24:
1088                         kvm_setup_bhb_slot(__spectre_bhb_loop_k24_start);
1089                         break;
1090                 case 32:
1091                         kvm_setup_bhb_slot(__spectre_bhb_loop_k32_start);
1092                         break;
1093                 default:
1094                         WARN_ON_ONCE(1);
1095                 }
1096                 this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
1097
1098                 state = SPECTRE_MITIGATED;
1099         } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
1100                 fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
1101                 if (fw_state == SPECTRE_MITIGATED) {
1102                         kvm_setup_bhb_slot(__smccc_workaround_3_smc_start);
1103                         this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
1104
1105                         /*
1106                          * With WA3 in the vectors, the WA1 calls can be
1107                          * removed.
1108                          */
1109                         __this_cpu_write(bp_hardening_data.fn, NULL);
1110
1111                         state = SPECTRE_MITIGATED;
1112                 }
1113         }
1114
1115         update_mitigation_state(&spectre_bhb_state, state);
1116 }
1117
1118 /* Patched to correct the immediate */
1119 void __init spectre_bhb_patch_loop_iter(struct alt_instr *alt,
1120                                         __le32 *origptr, __le32 *updptr, int nr_inst)
1121 {
1122         u8 rd;
1123         u32 insn;
1124         u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
1125
1126         BUG_ON(nr_inst != 1); /* MOV -> MOV */
1127
1128         if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
1129                 return;
1130
1131         insn = le32_to_cpu(*origptr);
1132         rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
1133         insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
1134                                          AARCH64_INSN_VARIANT_64BIT,
1135                                          AARCH64_INSN_MOVEWIDE_ZERO);
1136         *updptr++ = cpu_to_le32(insn);
1137 }