GNU Linux-libre 4.14.303-gnu1
[releases.git] / arch / arm64 / kernel / cpu_errata.c
1 /*
2  * Contains CPU specific errata definitions
3  *
4  * Copyright (C) 2014 ARM Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/arm-smccc.h>
20 #include <linux/psci.h>
21 #include <linux/types.h>
22 #include <linux/cpu.h>
23 #include <asm/cpu.h>
24 #include <asm/cputype.h>
25 #include <asm/cpufeature.h>
26 #include <asm/vectors.h>
27
28 static bool __maybe_unused
29 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
30 {
31         u32 midr = read_cpuid_id();
32
33         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
34         return is_midr_in_range(midr, &entry->midr_range);
35 }
36
37 static bool __maybe_unused
38 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
39                             int scope)
40 {
41         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
42         return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
43 }
44
45 static bool __maybe_unused
46 is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
47 {
48         u32 model;
49
50         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
51
52         model = read_cpuid_id();
53         model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
54                  MIDR_ARCHITECTURE_MASK;
55
56         return model == entry->midr_range.model;
57 }
58
59 static bool
60 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
61                           int scope)
62 {
63         u64 mask = CTR_CACHE_MINLINE_MASK;
64
65         /* Skip matching the min line sizes for cache type check */
66         if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
67                 mask ^= arm64_ftr_reg_ctrel0.strict_mask;
68
69         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
70         return (read_cpuid_cachetype() & mask) !=
71                (arm64_ftr_reg_ctrel0.sys_val & mask);
72 }
73
74 static void
75 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
76 {
77         /* Clear SCTLR_EL1.UCT */
78         config_sctlr_el1(SCTLR_EL1_UCT, 0);
79 }
80
81 #include <asm/mmu_context.h>
82 #include <asm/cacheflush.h>
83
84 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
85
86 #ifdef CONFIG_KVM
87 extern char __smccc_workaround_1_smc_start[];
88 extern char __smccc_workaround_1_smc_end[];
89 extern char __smccc_workaround_3_smc_start[];
90 extern char __smccc_workaround_3_smc_end[];
91 extern char __spectre_bhb_loop_k8_start[];
92 extern char __spectre_bhb_loop_k8_end[];
93 extern char __spectre_bhb_loop_k24_start[];
94 extern char __spectre_bhb_loop_k24_end[];
95 extern char __spectre_bhb_loop_k32_start[];
96 extern char __spectre_bhb_loop_k32_end[];
97 extern char __spectre_bhb_clearbhb_start[];
98 extern char __spectre_bhb_clearbhb_end[];
99
100 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
101                                 const char *hyp_vecs_end)
102 {
103         void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
104         int i;
105
106         for (i = 0; i < SZ_2K; i += 0x80)
107                 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
108
109         flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
110 }
111
112 static DEFINE_SPINLOCK(bp_lock);
113 static int last_slot = -1;
114
115 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
116                                     const char *hyp_vecs_start,
117                                     const char *hyp_vecs_end)
118 {
119
120         int cpu, slot = -1;
121
122         spin_lock(&bp_lock);
123         for_each_possible_cpu(cpu) {
124                 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
125                         slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
126                         break;
127                 }
128         }
129
130         if (slot == -1) {
131                 last_slot++;
132                 BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
133                         / SZ_2K) <= last_slot);
134                 slot = last_slot;
135                 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
136         }
137
138         if (fn != __this_cpu_read(bp_hardening_data.fn)) {
139                 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
140                 __this_cpu_write(bp_hardening_data.fn, fn);
141                 __this_cpu_write(bp_hardening_data.template_start,
142                                  hyp_vecs_start);
143         }
144         spin_unlock(&bp_lock);
145 }
146 #else
147 #define __smccc_workaround_1_smc_start          NULL
148 #define __smccc_workaround_1_smc_end            NULL
149
150 static void install_bp_hardening_cb(bp_hardening_cb_t fn,
151                                       const char *hyp_vecs_start,
152                                       const char *hyp_vecs_end)
153 {
154         __this_cpu_write(bp_hardening_data.fn, fn);
155 }
156 #endif  /* CONFIG_KVM */
157
158 #include <uapi/linux/psci.h>
159 #include <linux/arm-smccc.h>
160 #include <linux/psci.h>
161
162 static void call_smc_arch_workaround_1(void)
163 {
164         arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
165 }
166
167 static void call_hvc_arch_workaround_1(void)
168 {
169         arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
170 }
171
172 static void qcom_link_stack_sanitization(void)
173 {
174         u64 tmp;
175
176         asm volatile("mov       %0, x30         \n"
177                      ".rept     16              \n"
178                      "bl        . + 4           \n"
179                      ".endr                     \n"
180                      "mov       x30, %0         \n"
181                      : "=&r" (tmp));
182 }
183
184 static bool __nospectre_v2;
185 static int __init parse_nospectre_v2(char *str)
186 {
187         __nospectre_v2 = true;
188         return 0;
189 }
190 early_param("nospectre_v2", parse_nospectre_v2);
191
192 /*
193  * -1: No workaround
194  *  0: No workaround required
195  *  1: Workaround installed
196  */
197 static int detect_harden_bp_fw(void)
198 {
199         bp_hardening_cb_t cb;
200         void *smccc_start, *smccc_end;
201         struct arm_smccc_res res;
202         u32 midr = read_cpuid_id();
203
204         if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
205                 return -1;
206
207         switch (psci_ops.conduit) {
208         case PSCI_CONDUIT_HVC:
209                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
210                                   ARM_SMCCC_ARCH_WORKAROUND_1, &res);
211                 switch ((int)res.a0) {
212                 case 1:
213                         /* Firmware says we're just fine */
214                         return 0;
215                 case 0:
216                         cb = call_hvc_arch_workaround_1;
217                         /* This is a guest, no need to patch KVM vectors */
218                         smccc_start = NULL;
219                         smccc_end = NULL;
220                         break;
221                 default:
222                         return -1;
223                 }
224                 break;
225
226         case PSCI_CONDUIT_SMC:
227                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
228                                   ARM_SMCCC_ARCH_WORKAROUND_1, &res);
229                 switch ((int)res.a0) {
230                 case 1:
231                         /* Firmware says we're just fine */
232                         return 0;
233                 case 0:
234                         cb = call_smc_arch_workaround_1;
235                         smccc_start = __smccc_workaround_1_smc_start;
236                         smccc_end = __smccc_workaround_1_smc_end;
237                         break;
238                 default:
239                         return -1;
240                 }
241                 break;
242
243         default:
244                 return -1;
245         }
246
247         if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
248             ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
249                 cb = qcom_link_stack_sanitization;
250
251         if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
252                 install_bp_hardening_cb(cb, smccc_start, smccc_end);
253
254         return 1;
255 }
256
257 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
258
259 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
260 static bool __ssb_safe = true;
261
262 static const struct ssbd_options {
263         const char      *str;
264         int             state;
265 } ssbd_options[] = {
266         { "force-on",   ARM64_SSBD_FORCE_ENABLE, },
267         { "force-off",  ARM64_SSBD_FORCE_DISABLE, },
268         { "kernel",     ARM64_SSBD_KERNEL, },
269 };
270
271 static int __init ssbd_cfg(char *buf)
272 {
273         int i;
274
275         if (!buf || !buf[0])
276                 return -EINVAL;
277
278         for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
279                 int len = strlen(ssbd_options[i].str);
280
281                 if (strncmp(buf, ssbd_options[i].str, len))
282                         continue;
283
284                 ssbd_state = ssbd_options[i].state;
285                 return 0;
286         }
287
288         return -EINVAL;
289 }
290 early_param("ssbd", ssbd_cfg);
291
292 void __init arm64_update_smccc_conduit(struct alt_instr *alt,
293                                        __le32 *origptr, __le32 *updptr,
294                                        int nr_inst)
295 {
296         u32 insn;
297
298         BUG_ON(nr_inst != 1);
299
300         switch (psci_ops.conduit) {
301         case PSCI_CONDUIT_HVC:
302                 insn = aarch64_insn_get_hvc_value();
303                 break;
304         case PSCI_CONDUIT_SMC:
305                 insn = aarch64_insn_get_smc_value();
306                 break;
307         default:
308                 return;
309         }
310
311         *updptr = cpu_to_le32(insn);
312 }
313
314 void __init arm64_enable_wa2_handling(struct alt_instr *alt,
315                                       __le32 *origptr, __le32 *updptr,
316                                       int nr_inst)
317 {
318         BUG_ON(nr_inst != 1);
319         /*
320          * Only allow mitigation on EL1 entry/exit and guest
321          * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
322          * be flipped.
323          */
324         if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
325                 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
326 }
327
328 void arm64_set_ssbd_mitigation(bool state)
329 {
330         if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
331                 pr_info_once("SSBD disabled by kernel configuration\n");
332                 return;
333         }
334
335         if (this_cpu_has_cap(ARM64_SSBS)) {
336                 if (state)
337                         asm volatile(SET_PSTATE_SSBS(0));
338                 else
339                         asm volatile(SET_PSTATE_SSBS(1));
340                 return;
341         }
342
343         switch (psci_ops.conduit) {
344         case PSCI_CONDUIT_HVC:
345                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
346                 break;
347
348         case PSCI_CONDUIT_SMC:
349                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
350                 break;
351
352         default:
353                 WARN_ON_ONCE(1);
354                 break;
355         }
356 }
357
358 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
359                                     int scope)
360 {
361         struct arm_smccc_res res;
362         bool required = true;
363         s32 val;
364         bool this_cpu_safe = false;
365
366         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
367
368         if (cpu_mitigations_off())
369                 ssbd_state = ARM64_SSBD_FORCE_DISABLE;
370
371         /* delay setting __ssb_safe until we get a firmware response */
372         if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
373                 this_cpu_safe = true;
374
375         if (this_cpu_has_cap(ARM64_SSBS)) {
376                 if (!this_cpu_safe)
377                         __ssb_safe = false;
378                 required = false;
379                 goto out_printmsg;
380         }
381
382         if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
383                 ssbd_state = ARM64_SSBD_UNKNOWN;
384                 if (!this_cpu_safe)
385                         __ssb_safe = false;
386                 return false;
387         }
388
389         switch (psci_ops.conduit) {
390         case PSCI_CONDUIT_HVC:
391                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
392                                   ARM_SMCCC_ARCH_WORKAROUND_2, &res);
393                 break;
394
395         case PSCI_CONDUIT_SMC:
396                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
397                                   ARM_SMCCC_ARCH_WORKAROUND_2, &res);
398                 break;
399
400         default:
401                 ssbd_state = ARM64_SSBD_UNKNOWN;
402                 if (!this_cpu_safe)
403                         __ssb_safe = false;
404                 return false;
405         }
406
407         val = (s32)res.a0;
408
409         switch (val) {
410         case SMCCC_RET_NOT_SUPPORTED:
411                 ssbd_state = ARM64_SSBD_UNKNOWN;
412                 if (!this_cpu_safe)
413                         __ssb_safe = false;
414                 return false;
415
416         /* machines with mixed mitigation requirements must not return this */
417         case SMCCC_RET_NOT_REQUIRED:
418                 pr_info_once("%s mitigation not required\n", entry->desc);
419                 ssbd_state = ARM64_SSBD_MITIGATED;
420                 return false;
421
422         case SMCCC_RET_SUCCESS:
423                 __ssb_safe = false;
424                 required = true;
425                 break;
426
427         case 1: /* Mitigation not required on this CPU */
428                 required = false;
429                 break;
430
431         default:
432                 WARN_ON(1);
433                 if (!this_cpu_safe)
434                         __ssb_safe = false;
435                 return false;
436         }
437
438         switch (ssbd_state) {
439         case ARM64_SSBD_FORCE_DISABLE:
440                 arm64_set_ssbd_mitigation(false);
441                 required = false;
442                 break;
443
444         case ARM64_SSBD_KERNEL:
445                 if (required) {
446                         __this_cpu_write(arm64_ssbd_callback_required, 1);
447                         arm64_set_ssbd_mitigation(true);
448                 }
449                 break;
450
451         case ARM64_SSBD_FORCE_ENABLE:
452                 arm64_set_ssbd_mitigation(true);
453                 required = true;
454                 break;
455
456         default:
457                 WARN_ON(1);
458                 break;
459         }
460
461 out_printmsg:
462         switch (ssbd_state) {
463         case ARM64_SSBD_FORCE_DISABLE:
464                 pr_info_once("%s disabled from command-line\n", entry->desc);
465                 break;
466
467         case ARM64_SSBD_FORCE_ENABLE:
468                 pr_info_once("%s forced from command-line\n", entry->desc);
469                 break;
470         }
471
472         return required;
473 }
474
475 /* known invulnerable cores */
476 static const struct midr_range arm64_ssb_cpus[] = {
477         MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
478         MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
479         MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
480         {},
481 };
482
483 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)       \
484         .matches = is_affected_midr_range,                      \
485         .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
486
487 #define CAP_MIDR_ALL_VERSIONS(model)                                    \
488         .matches = is_affected_midr_range,                              \
489         .midr_range = MIDR_ALL_VERSIONS(model)
490
491 #define MIDR_FIXED(rev, revidr_mask) \
492         .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
493
494 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)            \
495         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                         \
496         CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
497
498 #define CAP_MIDR_RANGE_LIST(list)                               \
499         .matches = is_affected_midr_range_list,                 \
500         .midr_range_list = list
501
502 /* Errata affecting a range of revisions of  given model variant */
503 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)      \
504         ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
505
506 /* Errata affecting a single variant/revision of a model */
507 #define ERRATA_MIDR_REV(model, var, rev)        \
508         ERRATA_MIDR_RANGE(model, var, rev, var, rev)
509
510 /* Errata affecting all variants/revisions of a given a model */
511 #define ERRATA_MIDR_ALL_VERSIONS(model)                         \
512         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
513         CAP_MIDR_ALL_VERSIONS(model)
514
515 /* Errata affecting a list of midr ranges, with same work around */
516 #define ERRATA_MIDR_RANGE_LIST(midr_list)                       \
517         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
518         CAP_MIDR_RANGE_LIST(midr_list)
519
520 /* Track overall mitigation state. We are only mitigated if all cores are ok */
521 static bool __hardenbp_enab = true;
522 static bool __spectrev2_safe = true;
523
524 /*
525  * List of CPUs that do not need any Spectre-v2 mitigation at all.
526  */
527 static const struct midr_range spectre_v2_safe_list[] = {
528         MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
529         MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
530         MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
531         { /* sentinel */ }
532 };
533
534 /*
535  * Track overall bp hardening for all heterogeneous cores in the machine.
536  * We are only considered "safe" if all booted cores are known safe.
537  */
538 static bool __maybe_unused
539 check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
540 {
541         int need_wa;
542
543         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
544
545         /* If the CPU has CSV2 set, we're safe */
546         if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
547                                                  ID_AA64PFR0_CSV2_SHIFT))
548                 return false;
549
550         /* Alternatively, we have a list of unaffected CPUs */
551         if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
552                 return false;
553
554         /* Fallback to firmware detection */
555         need_wa = detect_harden_bp_fw();
556         if (!need_wa)
557                 return false;
558
559         __spectrev2_safe = false;
560
561         if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
562                 pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
563                 __hardenbp_enab = false;
564                 return false;
565         }
566
567         /* forced off */
568         if (__nospectre_v2 || cpu_mitigations_off()) {
569                 pr_info_once("spectrev2 mitigation disabled by command line option\n");
570                 __hardenbp_enab = false;
571                 return false;
572         }
573
574         if (need_wa < 0) {
575                 pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
576                 __hardenbp_enab = false;
577         }
578
579         return (need_wa > 0);
580 }
581
582 #ifdef CONFIG_ARM64_ERRATUM_1742098
583 static struct midr_range broken_aarch32_aes[] = {
584         MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
585         MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
586         {},
587 };
588 #endif
589
590 const struct arm64_cpu_capabilities arm64_errata[] = {
591 #if     defined(CONFIG_ARM64_ERRATUM_826319) || \
592         defined(CONFIG_ARM64_ERRATUM_827319) || \
593         defined(CONFIG_ARM64_ERRATUM_824069)
594         {
595         /* Cortex-A53 r0p[012] */
596                 .desc = "ARM errata 826319, 827319, 824069",
597                 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
598                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
599                 .cpu_enable = cpu_enable_cache_maint_trap,
600         },
601 #endif
602 #ifdef CONFIG_ARM64_ERRATUM_819472
603         {
604         /* Cortex-A53 r0p[01] */
605                 .desc = "ARM errata 819472",
606                 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
607                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
608                 .cpu_enable = cpu_enable_cache_maint_trap,
609         },
610 #endif
611 #ifdef CONFIG_ARM64_ERRATUM_832075
612         {
613         /* Cortex-A57 r0p0 - r1p2 */
614                 .desc = "ARM erratum 832075",
615                 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
616                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
617                                   0, 0,
618                                   1, 2),
619         },
620 #endif
621 #ifdef CONFIG_ARM64_ERRATUM_834220
622         {
623         /* Cortex-A57 r0p0 - r1p2 */
624                 .desc = "ARM erratum 834220",
625                 .capability = ARM64_WORKAROUND_834220,
626                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
627                                   0, 0,
628                                   1, 2),
629         },
630 #endif
631 #ifdef CONFIG_ARM64_ERRATUM_845719
632         {
633         /* Cortex-A53 r0p[01234] */
634                 .desc = "ARM erratum 845719",
635                 .capability = ARM64_WORKAROUND_845719,
636                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
637         },
638 #endif
639 #ifdef CONFIG_CAVIUM_ERRATUM_23154
640         {
641         /* Cavium ThunderX, pass 1.x */
642                 .desc = "Cavium erratum 23154",
643                 .capability = ARM64_WORKAROUND_CAVIUM_23154,
644                 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
645         },
646 #endif
647 #ifdef CONFIG_CAVIUM_ERRATUM_27456
648         {
649         /* Cavium ThunderX, T88 pass 1.x - 2.1 */
650                 .desc = "Cavium erratum 27456",
651                 .capability = ARM64_WORKAROUND_CAVIUM_27456,
652                 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
653                                   0, 0,
654                                   1, 1),
655         },
656         {
657         /* Cavium ThunderX, T81 pass 1.0 */
658                 .desc = "Cavium erratum 27456",
659                 .capability = ARM64_WORKAROUND_CAVIUM_27456,
660                 ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
661         },
662 #endif
663 #ifdef CONFIG_CAVIUM_ERRATUM_30115
664         {
665         /* Cavium ThunderX, T88 pass 1.x - 2.2 */
666                 .desc = "Cavium erratum 30115",
667                 .capability = ARM64_WORKAROUND_CAVIUM_30115,
668                 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
669                                       0, 0,
670                                       1, 2),
671         },
672         {
673         /* Cavium ThunderX, T81 pass 1.0 - 1.2 */
674                 .desc = "Cavium erratum 30115",
675                 .capability = ARM64_WORKAROUND_CAVIUM_30115,
676                 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
677         },
678         {
679         /* Cavium ThunderX, T83 pass 1.0 */
680                 .desc = "Cavium erratum 30115",
681                 .capability = ARM64_WORKAROUND_CAVIUM_30115,
682                 ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
683         },
684 #endif
685         {
686                 .desc = "Mismatched cache line size",
687                 .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
688                 .matches = has_mismatched_cache_type,
689                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
690                 .cpu_enable = cpu_enable_trap_ctr_access,
691         },
692         {
693                 .desc = "Mismatched cache type",
694                 .capability = ARM64_MISMATCHED_CACHE_TYPE,
695                 .matches = has_mismatched_cache_type,
696                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
697                 .cpu_enable = cpu_enable_trap_ctr_access,
698         },
699 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
700         {
701                 .desc = "Qualcomm Technologies Falkor erratum 1003",
702                 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
703                 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
704         },
705         {
706                 .desc = "Qualcomm Technologies Kryo erratum 1003",
707                 .capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
708                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
709                 .midr_range.model = MIDR_QCOM_KRYO,
710                 .matches = is_kryo_midr,
711         },
712 #endif
713 #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
714         {
715                 .desc = "Qualcomm Technologies Falkor erratum 1009",
716                 .capability = ARM64_WORKAROUND_REPEAT_TLBI,
717                 ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
718         },
719 #endif
720 #ifdef CONFIG_ARM64_ERRATUM_858921
721         {
722         /* Cortex-A73 all versions */
723                 .desc = "ARM erratum 858921",
724                 .capability = ARM64_WORKAROUND_858921,
725                 ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
726         },
727 #endif
728         {
729                 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
730                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
731                 .matches = check_branch_predictor,
732         },
733         {
734                 .desc = "Speculative Store Bypass Disable",
735                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
736                 .capability = ARM64_SSBD,
737                 .matches = has_ssbd_mitigation,
738                 .midr_range_list = arm64_ssb_cpus,
739         },
740 #ifdef CONFIG_ARM64_ERRATUM_1188873
741         {
742                 /* Cortex-A76 r0p0 to r2p0 */
743                 .desc = "ARM erratum 1188873",
744                 .capability = ARM64_WORKAROUND_1188873,
745                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
746         },
747 #endif
748         {
749                 .desc = "Spectre-BHB",
750                 .capability = ARM64_SPECTRE_BHB,
751                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
752                 .matches = is_spectre_bhb_affected,
753                 .cpu_enable = spectre_bhb_enable_mitigation,
754         },
755 #ifdef CONFIG_ARM64_ERRATUM_1742098
756         {
757                 .desc = "ARM erratum 1742098",
758                 .capability = ARM64_WORKAROUND_1742098,
759                 CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
760                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
761         },
762 #endif
763         {
764         }
765 };
766
767 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
768                             char *buf)
769 {
770         return sprintf(buf, "Mitigation: __user pointer sanitization\n");
771 }
772
773 static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
774 {
775         switch (bhb_state) {
776         case SPECTRE_UNAFFECTED:
777                 return "";
778         default:
779         case SPECTRE_VULNERABLE:
780                 return ", but not BHB";
781         case SPECTRE_MITIGATED:
782                 return ", BHB";
783         }
784 }
785
786 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
787                 char *buf)
788 {
789         enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
790         const char *bhb_str = get_bhb_affected_string(bhb_state);
791         const char *v2_str = "Branch predictor hardening";
792
793         if (__spectrev2_safe) {
794                 if (bhb_state == SPECTRE_UNAFFECTED)
795                         return sprintf(buf, "Not affected\n");
796
797                 /*
798                  * Platforms affected by Spectre-BHB can't report
799                  * "Not affected" for Spectre-v2.
800                  */
801                 v2_str = "CSV2";
802         }
803
804         if (__hardenbp_enab)
805                 return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
806
807         return sprintf(buf, "Vulnerable\n");
808 }
809
810 ssize_t cpu_show_spec_store_bypass(struct device *dev,
811                 struct device_attribute *attr, char *buf)
812 {
813         if (__ssb_safe)
814                 return sprintf(buf, "Not affected\n");
815
816         switch (ssbd_state) {
817         case ARM64_SSBD_KERNEL:
818         case ARM64_SSBD_FORCE_ENABLE:
819                 if (IS_ENABLED(CONFIG_ARM64_SSBD))
820                         return sprintf(buf,
821                             "Mitigation: Speculative Store Bypass disabled via prctl\n");
822         }
823
824         return sprintf(buf, "Vulnerable\n");
825 }
826
827 /*
828  * We try to ensure that the mitigation state can never change as the result of
829  * onlining a late CPU.
830  */
831 static void update_mitigation_state(enum mitigation_state *oldp,
832                                     enum mitigation_state new)
833 {
834         enum mitigation_state state;
835
836         do {
837                 state = READ_ONCE(*oldp);
838                 if (new <= state)
839                         break;
840         } while (cmpxchg_relaxed(oldp, state, new) != state);
841 }
842
843 /*
844  * Spectre BHB.
845  *
846  * A CPU is either:
847  * - Mitigated by a branchy loop a CPU specific number of times, and listed
848  *   in our "loop mitigated list".
849  * - Mitigated in software by the firmware Spectre v2 call.
850  * - Has the ClearBHB instruction to perform the mitigation.
851  * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
852  *   software mitigation in the vectors is needed.
853  * - Has CSV2.3, so is unaffected.
854  */
855 static enum mitigation_state spectre_bhb_state;
856
857 enum mitigation_state arm64_get_spectre_bhb_state(void)
858 {
859         return spectre_bhb_state;
860 }
861
862 /*
863  * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
864  * SCOPE_SYSTEM call will give the right answer.
865  */
866 u8 spectre_bhb_loop_affected(int scope)
867 {
868         u8 k = 0;
869         static u8 max_bhb_k;
870
871         if (scope == SCOPE_LOCAL_CPU) {
872                 static const struct midr_range spectre_bhb_k32_list[] = {
873                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
874                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
875                         MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
876                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
877                         MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
878                         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
879                         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
880                         {},
881                 };
882                 static const struct midr_range spectre_bhb_k24_list[] = {
883                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
884                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
885                         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
886                         {},
887                 };
888                 static const struct midr_range spectre_bhb_k8_list[] = {
889                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
890                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
891                         {},
892                 };
893
894                 if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
895                         k = 32;
896                 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
897                         k = 24;
898                 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
899                         k =  8;
900
901                 max_bhb_k = max(max_bhb_k, k);
902         } else {
903                 k = max_bhb_k;
904         }
905
906         return k;
907 }
908
909 static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
910 {
911         int ret;
912         struct arm_smccc_res res;
913
914         if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
915                 return SPECTRE_VULNERABLE;
916
917         switch (psci_ops.conduit) {
918         case PSCI_CONDUIT_HVC:
919                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
920                                   ARM_SMCCC_ARCH_WORKAROUND_3, &res);
921                 break;
922
923         case PSCI_CONDUIT_SMC:
924                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
925                                   ARM_SMCCC_ARCH_WORKAROUND_3, &res);
926                 break;
927
928         default:
929                 return SPECTRE_VULNERABLE;
930         }
931
932         ret = res.a0;
933         switch (ret) {
934         case SMCCC_RET_SUCCESS:
935                 return SPECTRE_MITIGATED;
936         case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
937                 return SPECTRE_UNAFFECTED;
938         default:
939         case SMCCC_RET_NOT_SUPPORTED:
940                 return SPECTRE_VULNERABLE;
941         }
942 }
943
944 static bool is_spectre_bhb_fw_affected(int scope)
945 {
946         static bool system_affected;
947         enum mitigation_state fw_state;
948         bool has_smccc = (psci_ops.smccc_version >= SMCCC_VERSION_1_1);
949         static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
950                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
951                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
952                 {},
953         };
954         bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
955                                          spectre_bhb_firmware_mitigated_list);
956
957         if (scope != SCOPE_LOCAL_CPU)
958                 return system_affected;
959
960         fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
961         if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
962                 system_affected = true;
963                 return true;
964         }
965
966         return false;
967 }
968
969 static bool supports_ecbhb(int scope)
970 {
971         u64 mmfr1;
972
973         if (scope == SCOPE_LOCAL_CPU)
974                 mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
975         else
976                 mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
977
978         return cpuid_feature_extract_unsigned_field(mmfr1,
979                                                     ID_AA64MMFR1_ECBHB_SHIFT);
980 }
981
982 bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
983                              int scope)
984 {
985         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
986
987         if (supports_csv2p3(scope))
988                 return false;
989
990         if (supports_clearbhb(scope))
991                 return true;
992
993         if (spectre_bhb_loop_affected(scope))
994                 return true;
995
996         if (is_spectre_bhb_fw_affected(scope))
997                 return true;
998
999         return false;
1000 }
1001
1002 static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
1003 {
1004         const char *v = arm64_get_bp_hardening_vector(slot);
1005
1006         if (slot < 0)
1007                 return;
1008
1009         __this_cpu_write(this_cpu_vector, v);
1010
1011         /*
1012          * When KPTI is in use, the vectors are switched when exiting to
1013          * user-space.
1014          */
1015         if (arm64_kernel_unmapped_at_el0())
1016                 return;
1017
1018         write_sysreg(v, vbar_el1);
1019         isb();
1020 }
1021
1022 #ifdef CONFIG_KVM
1023 static const char *kvm_bhb_get_vecs_end(const char *start)
1024 {
1025         if (start == __smccc_workaround_3_smc_start)
1026                 return __smccc_workaround_3_smc_end;
1027         else if (start == __spectre_bhb_loop_k8_start)
1028                 return __spectre_bhb_loop_k8_end;
1029         else if (start == __spectre_bhb_loop_k24_start)
1030                 return __spectre_bhb_loop_k24_end;
1031         else if (start == __spectre_bhb_loop_k32_start)
1032                 return __spectre_bhb_loop_k32_end;
1033         else if (start == __spectre_bhb_clearbhb_start)
1034                 return __spectre_bhb_clearbhb_end;
1035
1036         return NULL;
1037 }
1038
1039 static void kvm_setup_bhb_slot(const char *hyp_vecs_start)
1040 {
1041         int cpu, slot = -1;
1042         const char *hyp_vecs_end;
1043
1044         if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
1045                 return;
1046
1047         hyp_vecs_end = kvm_bhb_get_vecs_end(hyp_vecs_start);
1048         if (WARN_ON_ONCE(!hyp_vecs_start || !hyp_vecs_end))
1049                 return;
1050
1051         spin_lock(&bp_lock);
1052         for_each_possible_cpu(cpu) {
1053                 if (per_cpu(bp_hardening_data.template_start, cpu) == hyp_vecs_start) {
1054                         slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
1055                         break;
1056                 }
1057         }
1058
1059         if (slot == -1) {
1060                 last_slot++;
1061                 BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
1062                         / SZ_2K) <= last_slot);
1063                 slot = last_slot;
1064                 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
1065         }
1066
1067         if (hyp_vecs_start != __this_cpu_read(bp_hardening_data.template_start)) {
1068                 __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
1069                 __this_cpu_write(bp_hardening_data.template_start,
1070                                  hyp_vecs_start);
1071         }
1072         spin_unlock(&bp_lock);
1073 }
1074 #else
1075 #define __smccc_workaround_3_smc_start NULL
1076 #define __spectre_bhb_loop_k8_start NULL
1077 #define __spectre_bhb_loop_k24_start NULL
1078 #define __spectre_bhb_loop_k32_start NULL
1079 #define __spectre_bhb_clearbhb_start NULL
1080
1081 static void kvm_setup_bhb_slot(const char *hyp_vecs_start) { };
1082 #endif
1083
1084 void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
1085 {
1086         enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
1087
1088         if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
1089                 return;
1090
1091         if (!__spectrev2_safe &&  !__hardenbp_enab) {
1092                 /* No point mitigating Spectre-BHB alone. */
1093         } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
1094                 pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
1095         } else if (cpu_mitigations_off()) {
1096                 pr_info_once("spectre-bhb mitigation disabled by command line option\n");
1097         } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
1098                 state = SPECTRE_MITIGATED;
1099         } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
1100                 kvm_setup_bhb_slot(__spectre_bhb_clearbhb_start);
1101                 this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
1102
1103                 state = SPECTRE_MITIGATED;
1104         } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
1105                 switch (spectre_bhb_loop_affected(SCOPE_SYSTEM)) {
1106                 case 8:
1107                         /*
1108                          * A57/A72-r0 will already have selected the
1109                          * spectre-indirect vector, which is sufficient
1110                          * for BHB too.
1111                          */
1112                         if (!__this_cpu_read(bp_hardening_data.fn))
1113                                 kvm_setup_bhb_slot(__spectre_bhb_loop_k8_start);
1114                         break;
1115                 case 24:
1116                         kvm_setup_bhb_slot(__spectre_bhb_loop_k24_start);
1117                         break;
1118                 case 32:
1119                         kvm_setup_bhb_slot(__spectre_bhb_loop_k32_start);
1120                         break;
1121                 default:
1122                         WARN_ON_ONCE(1);
1123                 }
1124                 this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
1125
1126                 state = SPECTRE_MITIGATED;
1127         } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
1128                 fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
1129                 if (fw_state == SPECTRE_MITIGATED) {
1130                         kvm_setup_bhb_slot(__smccc_workaround_3_smc_start);
1131                         this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
1132
1133                         /*
1134                          * With WA3 in the vectors, the WA1 calls can be
1135                          * removed.
1136                          */
1137                         __this_cpu_write(bp_hardening_data.fn, NULL);
1138
1139                         state = SPECTRE_MITIGATED;
1140                 }
1141         }
1142
1143         update_mitigation_state(&spectre_bhb_state, state);
1144 }
1145
1146 /* Patched to correct the immediate */
1147 void __init spectre_bhb_patch_loop_iter(struct alt_instr *alt,
1148                                         __le32 *origptr, __le32 *updptr, int nr_inst)
1149 {
1150         u8 rd;
1151         u32 insn;
1152         u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
1153
1154         BUG_ON(nr_inst != 1); /* MOV -> MOV */
1155
1156         if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
1157                 return;
1158
1159         insn = le32_to_cpu(*origptr);
1160         rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
1161         insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
1162                                          AARCH64_INSN_VARIANT_64BIT,
1163                                          AARCH64_INSN_MOVEWIDE_ZERO);
1164         *updptr++ = cpu_to_le32(insn);
1165 }