GNU Linux-libre 4.9.332-gnu1
[releases.git] / arch / arm64 / kernel / cpu_errata.c
1 /*
2  * Contains CPU specific errata definitions
3  *
4  * Copyright (C) 2014 ARM Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #include <linux/arm-smccc.h>
20 #include <linux/psci.h>
21 #include <linux/types.h>
22 #include <asm/cachetype.h>
23 #include <asm/cpu.h>
24 #include <asm/cputype.h>
25 #include <asm/cpufeature.h>
26 #include <asm/vectors.h>
27
28 static bool __maybe_unused
29 is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
30 {
31         u32 midr = read_cpuid_id();
32
33         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
34         return is_midr_in_range(midr, &entry->midr_range);
35 }
36
37 static bool __maybe_unused
38 is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
39                             int scope)
40 {
41         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
42         return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
43 }
44
45 static bool
46 has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
47                           int scope)
48 {
49         u64 mask = CTR_CACHE_MINLINE_MASK;
50
51         /* Skip matching the min line sizes for cache type check */
52         if (entry->capability == ARM64_MISMATCHED_CACHE_TYPE)
53                 mask ^= arm64_ftr_reg_ctrel0.strict_mask;
54
55         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
56         return (read_cpuid_cachetype() & mask) !=
57                (arm64_ftr_reg_ctrel0.sys_val & mask);
58 }
59
60 static void
61 cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
62 {
63         /* Clear SCTLR_EL1.UCT */
64         config_sctlr_el1(SCTLR_EL1_UCT, 0);
65 }
66
67 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
68 #include <asm/mmu_context.h>
69 #include <asm/cacheflush.h>
70
71 static bool __hardenbp_enab;
72 DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
73
74 #ifdef CONFIG_KVM
75 extern char __smccc_workaround_1_smc_start[];
76 extern char __smccc_workaround_1_smc_end[];
77 extern char __smccc_workaround_1_hvc_start[];
78 extern char __smccc_workaround_1_hvc_end[];
79 extern char __smccc_workaround_3_smc_start[];
80 extern char __smccc_workaround_3_smc_end[];
81 extern char __spectre_bhb_loop_k8_start[];
82 extern char __spectre_bhb_loop_k8_end[];
83 extern char __spectre_bhb_loop_k24_start[];
84 extern char __spectre_bhb_loop_k24_end[];
85 extern char __spectre_bhb_loop_k32_start[];
86 extern char __spectre_bhb_loop_k32_end[];
87 extern char __spectre_bhb_clearbhb_start[];
88 extern char __spectre_bhb_clearbhb_end[];
89
90 static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
91                                 const char *hyp_vecs_end)
92 {
93         void *dst = __bp_harden_hyp_vecs_start + slot * SZ_2K;
94         int i;
95
96         for (i = 0; i < SZ_2K; i += 0x80)
97                 memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
98
99         flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
100 }
101
102 static DEFINE_SPINLOCK(bp_lock);
103 static int last_slot = -1;
104
105 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
106                                       const char *hyp_vecs_start,
107                                       const char *hyp_vecs_end)
108 {
109
110         int cpu, slot = -1;
111
112         spin_lock(&bp_lock);
113         for_each_possible_cpu(cpu) {
114                 if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
115                         slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
116                         break;
117                 }
118         }
119
120         if (slot == -1) {
121                 last_slot++;
122                 BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
123                         / SZ_2K) <= last_slot);
124                 slot = last_slot;
125                 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
126         }
127
128         __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
129         __this_cpu_write(bp_hardening_data.fn, fn);
130         __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
131         __hardenbp_enab = true;
132         spin_unlock(&bp_lock);
133 }
134 #else
135 #define __smccc_workaround_1_smc_start          NULL
136 #define __smccc_workaround_1_smc_end            NULL
137 #define __smccc_workaround_1_hvc_start          NULL
138 #define __smccc_workaround_1_hvc_end            NULL
139
140 static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
141                                       const char *hyp_vecs_start,
142                                       const char *hyp_vecs_end)
143 {
144         __this_cpu_write(bp_hardening_data.fn, fn);
145         __hardenbp_enab = true;
146 }
147 #endif  /* CONFIG_KVM */
148
149 static void  install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
150                                      bp_hardening_cb_t fn,
151                                      const char *hyp_vecs_start,
152                                      const char *hyp_vecs_end)
153 {
154         u64 pfr0;
155
156         if (!entry->matches(entry, SCOPE_LOCAL_CPU))
157                 return;
158
159         pfr0 = read_cpuid(ID_AA64PFR0_EL1);
160         if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
161                 return;
162
163         __install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
164 }
165
166 #include <uapi/linux/psci.h>
167 #include <linux/arm-smccc.h>
168 #include <linux/psci.h>
169
170 static void call_smc_arch_workaround_1(void)
171 {
172         arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
173 }
174
175 static void call_hvc_arch_workaround_1(void)
176 {
177         arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
178 }
179
180 static void
181 enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
182 {
183         bp_hardening_cb_t cb;
184         void *smccc_start, *smccc_end;
185         struct arm_smccc_res res;
186
187         if (!entry->matches(entry, SCOPE_LOCAL_CPU))
188                 return;
189
190         if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
191                 return;
192
193         switch (psci_ops.conduit) {
194         case PSCI_CONDUIT_HVC:
195                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
196                                   ARM_SMCCC_ARCH_WORKAROUND_1, &res);
197                 if ((int)res.a0 < 0)
198                         return;
199                 cb = call_hvc_arch_workaround_1;
200                 smccc_start = __smccc_workaround_1_hvc_start;
201                 smccc_end = __smccc_workaround_1_hvc_end;
202                 break;
203
204         case PSCI_CONDUIT_SMC:
205                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
206                                   ARM_SMCCC_ARCH_WORKAROUND_1, &res);
207                 if ((int)res.a0 < 0)
208                         return;
209                 cb = call_smc_arch_workaround_1;
210                 smccc_start = __smccc_workaround_1_smc_start;
211                 smccc_end = __smccc_workaround_1_smc_end;
212                 break;
213
214         default:
215                 return;
216         }
217
218         install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
219
220         return;
221 }
222 #endif  /* CONFIG_HARDEN_BRANCH_PREDICTOR */
223
224 void __init arm64_update_smccc_conduit(struct alt_instr *alt,
225                                        __le32 *origptr, __le32 *updptr,
226                                        int nr_inst)
227 {
228         u32 insn;
229
230         BUG_ON(nr_inst != 1);
231
232         switch (psci_ops.conduit) {
233         case PSCI_CONDUIT_HVC:
234                 insn = aarch64_insn_get_hvc_value();
235                 break;
236         case PSCI_CONDUIT_SMC:
237                 insn = aarch64_insn_get_smc_value();
238                 break;
239         default:
240                 return;
241         }
242
243         *updptr = cpu_to_le32(insn);
244 }
245
246 #ifdef CONFIG_ARM64_SSBD
247 DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
248
249 int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
250
251 static const struct ssbd_options {
252         const char      *str;
253         int             state;
254 } ssbd_options[] = {
255         { "force-on",   ARM64_SSBD_FORCE_ENABLE, },
256         { "force-off",  ARM64_SSBD_FORCE_DISABLE, },
257         { "kernel",     ARM64_SSBD_KERNEL, },
258 };
259
260 static int __init ssbd_cfg(char *buf)
261 {
262         int i;
263
264         if (!buf || !buf[0])
265                 return -EINVAL;
266
267         for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
268                 int len = strlen(ssbd_options[i].str);
269
270                 if (strncmp(buf, ssbd_options[i].str, len))
271                         continue;
272
273                 ssbd_state = ssbd_options[i].state;
274                 return 0;
275         }
276
277         return -EINVAL;
278 }
279 early_param("ssbd", ssbd_cfg);
280
281 void __init arm64_enable_wa2_handling(struct alt_instr *alt,
282                                       __le32 *origptr, __le32 *updptr,
283                                       int nr_inst)
284 {
285         BUG_ON(nr_inst != 1);
286         /*
287          * Only allow mitigation on EL1 entry/exit and guest
288          * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
289          * be flipped.
290          */
291         if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
292                 *updptr = cpu_to_le32(aarch64_insn_gen_nop());
293 }
294
295 void arm64_set_ssbd_mitigation(bool state)
296 {
297         switch (psci_ops.conduit) {
298         case PSCI_CONDUIT_HVC:
299                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
300                 break;
301
302         case PSCI_CONDUIT_SMC:
303                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
304                 break;
305
306         default:
307                 WARN_ON_ONCE(1);
308                 break;
309         }
310 }
311
312 static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
313                                     int scope)
314 {
315         struct arm_smccc_res res;
316         bool required = true;
317         s32 val;
318
319         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
320
321         if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
322                 ssbd_state = ARM64_SSBD_UNKNOWN;
323                 return false;
324         }
325
326         switch (psci_ops.conduit) {
327         case PSCI_CONDUIT_HVC:
328                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
329                                   ARM_SMCCC_ARCH_WORKAROUND_2, &res);
330                 break;
331
332         case PSCI_CONDUIT_SMC:
333                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
334                                   ARM_SMCCC_ARCH_WORKAROUND_2, &res);
335                 break;
336
337         default:
338                 ssbd_state = ARM64_SSBD_UNKNOWN;
339                 return false;
340         }
341
342         val = (s32)res.a0;
343
344         switch (val) {
345         case SMCCC_RET_NOT_SUPPORTED:
346                 ssbd_state = ARM64_SSBD_UNKNOWN;
347                 return false;
348
349         case SMCCC_RET_NOT_REQUIRED:
350                 pr_info_once("%s mitigation not required\n", entry->desc);
351                 ssbd_state = ARM64_SSBD_MITIGATED;
352                 return false;
353
354         case SMCCC_RET_SUCCESS:
355                 required = true;
356                 break;
357
358         case 1: /* Mitigation not required on this CPU */
359                 required = false;
360                 break;
361
362         default:
363                 WARN_ON(1);
364                 return false;
365         }
366
367         switch (ssbd_state) {
368         case ARM64_SSBD_FORCE_DISABLE:
369                 pr_info_once("%s disabled from command-line\n", entry->desc);
370                 arm64_set_ssbd_mitigation(false);
371                 required = false;
372                 break;
373
374         case ARM64_SSBD_KERNEL:
375                 if (required) {
376                         __this_cpu_write(arm64_ssbd_callback_required, 1);
377                         arm64_set_ssbd_mitigation(true);
378                 }
379                 break;
380
381         case ARM64_SSBD_FORCE_ENABLE:
382                 pr_info_once("%s forced from command-line\n", entry->desc);
383                 arm64_set_ssbd_mitigation(true);
384                 required = true;
385                 break;
386
387         default:
388                 WARN_ON(1);
389                 break;
390         }
391
392         return required;
393 }
394 #endif  /* CONFIG_ARM64_SSBD */
395
396 #define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)       \
397         .matches = is_affected_midr_range,                      \
398         .midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
399
400 #define CAP_MIDR_ALL_VERSIONS(model)                                    \
401         .matches = is_affected_midr_range,                              \
402         .midr_range = MIDR_ALL_VERSIONS(model)
403
404 #define MIDR_FIXED(rev, revidr_mask) \
405         .fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
406
407 #define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)            \
408         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                         \
409         CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
410
411 #define CAP_MIDR_RANGE_LIST(list)                               \
412         .matches = is_affected_midr_range_list,                 \
413         .midr_range_list = list
414
415 /* Errata affecting a range of revisions of  given model variant */
416 #define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)      \
417         ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
418
419 /* Errata affecting a single variant/revision of a model */
420 #define ERRATA_MIDR_REV(model, var, rev)        \
421         ERRATA_MIDR_RANGE(model, var, rev, var, rev)
422
423 /* Errata affecting all variants/revisions of a given a model */
424 #define ERRATA_MIDR_ALL_VERSIONS(model)                         \
425         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
426         CAP_MIDR_ALL_VERSIONS(model)
427
428 /* Errata affecting a list of midr ranges, with same work around */
429 #define ERRATA_MIDR_RANGE_LIST(midr_list)                       \
430         .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,                 \
431         CAP_MIDR_RANGE_LIST(midr_list)
432
433 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
434
435 /*
436  * List of CPUs where we need to issue a psci call to
437  * harden the branch predictor.
438  */
439 static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
440         MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
441         MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
442         MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
443         MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
444         MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
445         MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
446         {},
447 };
448
449 #endif
450
451 #ifdef CONFIG_ARM64_ERRATUM_1742098
452 static struct midr_range broken_aarch32_aes[] = {
453         MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
454         MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
455         {},
456 };
457 #endif
458
459 const struct arm64_cpu_capabilities arm64_errata[] = {
460 #if     defined(CONFIG_ARM64_ERRATUM_826319) || \
461         defined(CONFIG_ARM64_ERRATUM_827319) || \
462         defined(CONFIG_ARM64_ERRATUM_824069)
463         {
464         /* Cortex-A53 r0p[012] */
465                 .desc = "ARM errata 826319, 827319, 824069",
466                 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
467                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
468                 .cpu_enable = cpu_enable_cache_maint_trap,
469         },
470 #endif
471 #ifdef CONFIG_ARM64_ERRATUM_819472
472         {
473         /* Cortex-A53 r0p[01] */
474                 .desc = "ARM errata 819472",
475                 .capability = ARM64_WORKAROUND_CLEAN_CACHE,
476                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
477                 .cpu_enable = cpu_enable_cache_maint_trap,
478         },
479 #endif
480 #ifdef CONFIG_ARM64_ERRATUM_832075
481         {
482         /* Cortex-A57 r0p0 - r1p2 */
483                 .desc = "ARM erratum 832075",
484                 .capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
485                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
486                                   0, 0,
487                                   1, 2),
488         },
489 #endif
490 #ifdef CONFIG_ARM64_ERRATUM_834220
491         {
492         /* Cortex-A57 r0p0 - r1p2 */
493                 .desc = "ARM erratum 834220",
494                 .capability = ARM64_WORKAROUND_834220,
495                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
496                                   0, 0,
497                                   1, 2),
498         },
499 #endif
500 #ifdef CONFIG_ARM64_ERRATUM_845719
501         {
502         /* Cortex-A53 r0p[01234] */
503                 .desc = "ARM erratum 845719",
504                 .capability = ARM64_WORKAROUND_845719,
505                 ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
506         },
507 #endif
508 #ifdef CONFIG_CAVIUM_ERRATUM_23154
509         {
510         /* Cavium ThunderX, pass 1.x */
511                 .desc = "Cavium erratum 23154",
512                 .capability = ARM64_WORKAROUND_CAVIUM_23154,
513                 ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
514         },
515 #endif
516 #ifdef CONFIG_CAVIUM_ERRATUM_27456
517         {
518         /* Cavium ThunderX, T88 pass 1.x - 2.1 */
519                 .desc = "Cavium erratum 27456",
520                 .capability = ARM64_WORKAROUND_CAVIUM_27456,
521                 ERRATA_MIDR_RANGE(MIDR_THUNDERX,
522                                   0, 0,
523                                   1, 1),
524         },
525         {
526         /* Cavium ThunderX, T81 pass 1.0 */
527                 .desc = "Cavium erratum 27456",
528                 .capability = ARM64_WORKAROUND_CAVIUM_27456,
529                 ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
530         },
531 #endif
532         {
533                 .desc = "Mismatched cache line size",
534                 .capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
535                 .matches = has_mismatched_cache_type,
536                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
537                 .cpu_enable = cpu_enable_trap_ctr_access,
538         },
539         {
540                 .desc = "Mismatched cache type",
541                 .capability = ARM64_MISMATCHED_CACHE_TYPE,
542                 .matches = has_mismatched_cache_type,
543                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
544                 .cpu_enable = cpu_enable_trap_ctr_access,
545         },
546 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
547         {
548                 .capability = ARM64_HARDEN_BRANCH_PREDICTOR,
549                 ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
550                 .cpu_enable = enable_smccc_arch_workaround_1,
551         },
552 #endif
553 #ifdef CONFIG_ARM64_SSBD
554         {
555                 .desc = "Speculative Store Bypass Disable",
556                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
557                 .capability = ARM64_SSBD,
558                 .matches = has_ssbd_mitigation,
559         },
560 #endif
561 #ifdef CONFIG_ARM64_ERRATUM_1188873
562         {
563                 /* Cortex-A76 r0p0 to r2p0 */
564                 .desc = "ARM erratum 1188873",
565                 .capability = ARM64_WORKAROUND_1188873,
566                 ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
567         },
568 #endif
569         {
570                 .desc = "Spectre-BHB",
571                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
572                 .capability = ARM64_SPECTRE_BHB,
573                 .matches = is_spectre_bhb_affected,
574 #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY
575                 .cpu_enable = spectre_bhb_enable_mitigation,
576 #endif
577         },
578 #ifdef CONFIG_ARM64_ERRATUM_1742098
579         {
580                 .desc = "ARM erratum 1742098",
581                 .capability = ARM64_WORKAROUND_1742098,
582                 CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
583                 .type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
584         },
585 #endif
586         {
587         }
588 };
589
590 /*
591  * We try to ensure that the mitigation state can never change as the result of
592  * onlining a late CPU.
593  */
594 static void __maybe_unused update_mitigation_state(enum mitigation_state *oldp,
595                                                    enum mitigation_state new)
596 {
597         enum mitigation_state state;
598
599         do {
600                 state = READ_ONCE(*oldp);
601                 if (new <= state)
602                         break;
603         } while (cmpxchg_relaxed(oldp, state, new) != state);
604 }
605
606 /*
607  * Spectre BHB.
608  *
609  * A CPU is either:
610  * - Mitigated by a branchy loop a CPU specific number of times, and listed
611  *   in our "loop mitigated list".
612  * - Mitigated in software by the firmware Spectre v2 call.
613  * - Has the ClearBHB instruction to perform the mitigation.
614  * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
615  *   software mitigation in the vectors is needed.
616  * - Has CSV2.3, so is unaffected.
617  */
618 static enum mitigation_state spectre_bhb_state;
619
620 enum mitigation_state arm64_get_spectre_bhb_state(void)
621 {
622         return spectre_bhb_state;
623 }
624
625 /*
626  * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
627  * SCOPE_SYSTEM call will give the right answer.
628  */
629 u8 spectre_bhb_loop_affected(int scope)
630 {
631         u8 k = 0;
632         static u8 max_bhb_k;
633
634         if (scope == SCOPE_LOCAL_CPU) {
635                 static const struct midr_range spectre_bhb_k32_list[] = {
636                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
637                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
638                         MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
639                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
640                         MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
641                         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
642                         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
643                         {},
644                 };
645                 static const struct midr_range spectre_bhb_k24_list[] = {
646                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
647                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
648                         MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
649                         {},
650                 };
651                 static const struct midr_range spectre_bhb_k8_list[] = {
652                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
653                         MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
654                         {},
655                 };
656
657                 if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
658                         k = 32;
659                 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
660                         k = 24;
661                 else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
662                         k =  8;
663
664                 max_bhb_k = max(max_bhb_k, k);
665         } else {
666                 k = max_bhb_k;
667         }
668
669         return k;
670 }
671
672 static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
673 {
674         int ret;
675         struct arm_smccc_res res;
676
677         if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
678                 return SPECTRE_VULNERABLE;
679
680         switch (psci_ops.conduit) {
681         case PSCI_CONDUIT_HVC:
682                 arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
683                                   ARM_SMCCC_ARCH_WORKAROUND_3, &res);
684                 break;
685
686         case PSCI_CONDUIT_SMC:
687                 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
688                                   ARM_SMCCC_ARCH_WORKAROUND_3, &res);
689                 break;
690
691         default:
692                 return SPECTRE_VULNERABLE;
693         }
694
695         ret = res.a0;
696         switch (ret) {
697         case SMCCC_RET_SUCCESS:
698                 return SPECTRE_MITIGATED;
699         case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
700                 return SPECTRE_UNAFFECTED;
701         default:
702         case SMCCC_RET_NOT_SUPPORTED:
703                 return SPECTRE_VULNERABLE;
704         }
705 }
706
707 static bool is_spectre_bhb_fw_affected(int scope)
708 {
709         static bool system_affected;
710         enum mitigation_state fw_state;
711         bool has_smccc = (psci_ops.smccc_version >= SMCCC_VERSION_1_1);
712         static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
713                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
714                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
715                 {},
716         };
717         bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
718                                          spectre_bhb_firmware_mitigated_list);
719
720         if (scope != SCOPE_LOCAL_CPU)
721                 return system_affected;
722
723         fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
724         if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
725                 system_affected = true;
726                 return true;
727         }
728
729         return false;
730 }
731
732 static bool __maybe_unused supports_ecbhb(int scope)
733 {
734         u64 mmfr1;
735
736         if (scope == SCOPE_LOCAL_CPU)
737                 mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
738         else
739                 mmfr1 = read_system_reg(SYS_ID_AA64MMFR1_EL1);
740
741         return cpuid_feature_extract_unsigned_field(mmfr1,
742                                                     ID_AA64MMFR1_ECBHB_SHIFT);
743 }
744
745 bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
746                              int scope)
747 {
748         WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
749
750         if (supports_csv2p3(scope))
751                 return false;
752
753         if (supports_clearbhb(scope))
754                 return true;
755
756         if (spectre_bhb_loop_affected(scope))
757                 return true;
758
759         if (is_spectre_bhb_fw_affected(scope))
760                 return true;
761
762         return false;
763 }
764
765 #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
766 static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
767 {
768         const char *v = arm64_get_bp_hardening_vector(slot);
769
770         if (slot < 0)
771                 return;
772
773         __this_cpu_write(this_cpu_vector, v);
774
775         /*
776          * When KPTI is in use, the vectors are switched when exiting to
777          * user-space.
778          */
779         if (arm64_kernel_unmapped_at_el0())
780                 return;
781
782         write_sysreg(v, vbar_el1);
783         isb();
784 }
785
786 #ifdef CONFIG_KVM
787 static const char *kvm_bhb_get_vecs_end(const char *start)
788 {
789         if (start == __smccc_workaround_3_smc_start)
790                 return __smccc_workaround_3_smc_end;
791         else if (start == __spectre_bhb_loop_k8_start)
792                 return __spectre_bhb_loop_k8_end;
793         else if (start == __spectre_bhb_loop_k24_start)
794                 return __spectre_bhb_loop_k24_end;
795         else if (start == __spectre_bhb_loop_k32_start)
796                 return __spectre_bhb_loop_k32_end;
797         else if (start == __spectre_bhb_clearbhb_start)
798                 return __spectre_bhb_clearbhb_end;
799
800         return NULL;
801 }
802
803 static void kvm_setup_bhb_slot(const char *hyp_vecs_start)
804 {
805         int cpu, slot = -1;
806         const char *hyp_vecs_end;
807
808         if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
809                 return;
810
811         hyp_vecs_end = kvm_bhb_get_vecs_end(hyp_vecs_start);
812         if (WARN_ON_ONCE(!hyp_vecs_start || !hyp_vecs_end))
813                 return;
814
815         spin_lock(&bp_lock);
816         for_each_possible_cpu(cpu) {
817                 if (per_cpu(bp_hardening_data.template_start, cpu) == hyp_vecs_start) {
818                         slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
819                         break;
820                 }
821         }
822
823         if (slot == -1) {
824                 last_slot++;
825                 BUG_ON(((__bp_harden_hyp_vecs_end - __bp_harden_hyp_vecs_start)
826                         / SZ_2K) <= last_slot);
827                 slot = last_slot;
828                 __copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
829         }
830
831         __this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
832         __this_cpu_write(bp_hardening_data.template_start, hyp_vecs_start);
833         spin_unlock(&bp_lock);
834 }
835 #else
836 #define __smccc_workaround_3_smc_start NULL
837 #define __spectre_bhb_loop_k8_start NULL
838 #define __spectre_bhb_loop_k24_start NULL
839 #define __spectre_bhb_loop_k32_start NULL
840 #define __spectre_bhb_clearbhb_start NULL
841
842 static void kvm_setup_bhb_slot(const char *hyp_vecs_start) { };
843 #endif /* CONFIG_KVM */
844
845 static bool is_spectrev2_safe(void)
846 {
847         return !is_midr_in_range_list(read_cpuid_id(),
848                                       arm64_bp_harden_smccc_cpus);
849 }
850
851 void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
852 {
853         enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
854
855         if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
856                 return;
857
858         if (!is_spectrev2_safe() &&  !__hardenbp_enab) {
859                 /* No point mitigating Spectre-BHB alone. */
860         } else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
861                 pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
862         } else if (cpu_mitigations_off()) {
863                 pr_info_once("spectre-bhb mitigation disabled by command line option\n");
864         } else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
865                 state = SPECTRE_MITIGATED;
866         } else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
867                 kvm_setup_bhb_slot(__spectre_bhb_clearbhb_start);
868                 this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
869
870                 state = SPECTRE_MITIGATED;
871         } else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
872                 switch (spectre_bhb_loop_affected(SCOPE_SYSTEM)) {
873                 case 8:
874                         kvm_setup_bhb_slot(__spectre_bhb_loop_k8_start);
875                         break;
876                 case 24:
877                         kvm_setup_bhb_slot(__spectre_bhb_loop_k24_start);
878                         break;
879                 case 32:
880                         kvm_setup_bhb_slot(__spectre_bhb_loop_k32_start);
881                         break;
882                 default:
883                         WARN_ON_ONCE(1);
884                 }
885                 this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
886
887                 state = SPECTRE_MITIGATED;
888         } else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
889                 fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
890                 if (fw_state == SPECTRE_MITIGATED) {
891                         kvm_setup_bhb_slot(__smccc_workaround_3_smc_start);
892                         this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
893
894                         /*
895                          * With WA3 in the vectors, the WA1 calls can be
896                          * removed.
897                          */
898                         __this_cpu_write(bp_hardening_data.fn, NULL);
899
900                         state = SPECTRE_MITIGATED;
901                 }
902         }
903
904         update_mitigation_state(&spectre_bhb_state, state);
905 }
906
907 /* Patched to correct the immediate */
908 void __init spectre_bhb_patch_loop_iter(struct alt_instr *alt,
909                                         __le32 *origptr, __le32 *updptr, int nr_inst)
910 {
911         u8 rd;
912         u32 insn;
913         u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
914
915         BUG_ON(nr_inst != 1); /* MOV -> MOV */
916
917         if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
918                 return;
919
920         insn = le32_to_cpu(*origptr);
921         rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
922         insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
923                                          AARCH64_INSN_VARIANT_64BIT,
924                                          AARCH64_INSN_MOVEWIDE_ZERO);
925         *updptr++ = cpu_to_le32(insn);
926 }
927 #endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */