GNU Linux-libre 4.19.209-gnu1
[releases.git] / arch / arm64 / kernel / cpufeature.c
1 /*
2  * Contains CPU feature definitions
3  *
4  * Copyright (C) 2015 ARM Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #define pr_fmt(fmt) "CPU features: " fmt
20
21 #include <linux/bsearch.h>
22 #include <linux/cpumask.h>
23 #include <linux/sort.h>
24 #include <linux/stop_machine.h>
25 #include <linux/types.h>
26 #include <linux/mm.h>
27 #include <linux/cpu.h>
28 #include <asm/cpu.h>
29 #include <asm/cpufeature.h>
30 #include <asm/cpu_ops.h>
31 #include <asm/fpsimd.h>
32 #include <asm/mmu_context.h>
33 #include <asm/processor.h>
34 #include <asm/sysreg.h>
35 #include <asm/traps.h>
36 #include <asm/virt.h>
37
38 unsigned long elf_hwcap __read_mostly;
39 EXPORT_SYMBOL_GPL(elf_hwcap);
40
41 #ifdef CONFIG_COMPAT
42 #define COMPAT_ELF_HWCAP_DEFAULT        \
43                                 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
44                                  COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
45                                  COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\
46                                  COMPAT_HWCAP_LPAE)
47 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
48 unsigned int compat_elf_hwcap2 __read_mostly;
49 #endif
50
51 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
52 EXPORT_SYMBOL(cpu_hwcaps);
53
54 /*
55  * Flag to indicate if we have computed the system wide
56  * capabilities based on the boot time active CPUs. This
57  * will be used to determine if a new booting CPU should
58  * go through the verification process to make sure that it
59  * supports the system capabilities, without using a hotplug
60  * notifier.
61  */
62 static bool sys_caps_initialised;
63
64 static inline void set_sys_caps_initialised(void)
65 {
66         sys_caps_initialised = true;
67 }
68
69 static int dump_cpu_hwcaps(struct notifier_block *self, unsigned long v, void *p)
70 {
71         /* file-wide pr_fmt adds "CPU features: " prefix */
72         pr_emerg("0x%*pb\n", ARM64_NCAPS, &cpu_hwcaps);
73         return 0;
74 }
75
76 static struct notifier_block cpu_hwcaps_notifier = {
77         .notifier_call = dump_cpu_hwcaps
78 };
79
80 static int __init register_cpu_hwcaps_dumper(void)
81 {
82         atomic_notifier_chain_register(&panic_notifier_list,
83                                        &cpu_hwcaps_notifier);
84         return 0;
85 }
86 __initcall(register_cpu_hwcaps_dumper);
87
88 DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
89 EXPORT_SYMBOL(cpu_hwcap_keys);
90
91 #define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
92         {                                               \
93                 .sign = SIGNED,                         \
94                 .visible = VISIBLE,                     \
95                 .strict = STRICT,                       \
96                 .type = TYPE,                           \
97                 .shift = SHIFT,                         \
98                 .width = WIDTH,                         \
99                 .safe_val = SAFE_VAL,                   \
100         }
101
102 /* Define a feature with unsigned values */
103 #define ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
104         __ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
105
106 /* Define a feature with a signed value */
107 #define S_ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
108         __ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
109
110 #define ARM64_FTR_END                                   \
111         {                                               \
112                 .width = 0,                             \
113         }
114
115 /* meta feature for alternatives */
116 static bool __maybe_unused
117 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
118
119
120 /*
121  * NOTE: Any changes to the visibility of features should be kept in
122  * sync with the documentation of the CPU feature register ABI.
123  */
124 static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
125         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_TS_SHIFT, 4, 0),
126         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_FHM_SHIFT, 4, 0),
127         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_DP_SHIFT, 4, 0),
128         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM4_SHIFT, 4, 0),
129         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SM3_SHIFT, 4, 0),
130         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA3_SHIFT, 4, 0),
131         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
132         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
133         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
134         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
135         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
136         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
137         ARM64_FTR_END,
138 };
139
140 static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
141         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_LRCPC_SHIFT, 4, 0),
142         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_FCMA_SHIFT, 4, 0),
143         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_JSCVT_SHIFT, 4, 0),
144         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_DPB_SHIFT, 4, 0),
145         ARM64_FTR_END,
146 };
147
148 static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
149         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
150         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
151         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_DIT_SHIFT, 4, 0),
152         ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
153                                    FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_SVE_SHIFT, 4, 0),
154         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_RAS_SHIFT, 4, 0),
155         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
156         S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
157         S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
158         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
159         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
160         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
161         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
162         ARM64_FTR_END,
163 };
164
165 static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
166         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_SSBS_SHIFT, 4, ID_AA64PFR1_SSBS_PSTATE_NI),
167         ARM64_FTR_END,
168 };
169
170 static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
171         /*
172          * We already refuse to boot CPUs that don't support our configured
173          * page size, so we can only detect mismatches for a page size other
174          * than the one we're currently using. Unfortunately, SoCs like this
175          * exist in the wild so, even though we don't like it, we'll have to go
176          * along with it and treat them as non-strict.
177          */
178         S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
179         S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
180         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
181
182         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
183         /* Linux shouldn't care about secure memory */
184         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
185         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
186         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
187         /*
188          * Differing PARange is fine as long as all peripherals and memory are mapped
189          * within the minimum PARange of all CPUs
190          */
191         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
192         ARM64_FTR_END,
193 };
194
195 static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
196         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
197         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
198         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
199         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
200         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
201         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
202         ARM64_FTR_END,
203 };
204
205 static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
206         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_FWB_SHIFT, 4, 0),
207         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_AT_SHIFT, 4, 0),
208         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
209         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
210         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
211         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
212         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
213         ARM64_FTR_END,
214 };
215
216 static const struct arm64_ftr_bits ftr_ctr[] = {
217         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
218         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DIC_SHIFT, 1, 1),
219         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IDC_SHIFT, 1, 1),
220         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_CWG_SHIFT, 4, 0),
221         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_ERG_SHIFT, 4, 0),
222         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
223         /*
224          * Linux can handle differing I-cache policies. Userspace JITs will
225          * make use of *minLine.
226          * If we have differing I-cache policies, report it as the weakest - VIPT.
227          */
228         ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_VIPT),       /* L1Ip */
229         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
230         ARM64_FTR_END,
231 };
232
233 struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
234         .name           = "SYS_CTR_EL0",
235         .ftr_bits       = ftr_ctr
236 };
237
238 static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
239         S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0xf),   /* InnerShr */
240         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),       /* FCSE */
241         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0),    /* AuxReg */
242         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),       /* TCM */
243         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),       /* ShareLvl */
244         S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0xf),    /* OuterShr */
245         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),        /* PMSA */
246         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),        /* VMSA */
247         ARM64_FTR_END,
248 };
249
250 static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
251         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 36, 28, 0),
252         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_PMSVER_SHIFT, 4, 0),
253         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
254         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
255         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
256         /*
257          * We can instantiate multiple PMU instances with different levels
258          * of support.
259          */
260         S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
261         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
262         ARM64_FTR_END,
263 };
264
265 static const struct arm64_ftr_bits ftr_mvfr2[] = {
266         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),                /* FPMisc */
267         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),                /* SIMDMisc */
268         ARM64_FTR_END,
269 };
270
271 static const struct arm64_ftr_bits ftr_dczid[] = {
272         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 4, 1, 1),            /* DZP */
273         ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),       /* BS */
274         ARM64_FTR_END,
275 };
276
277
278 static const struct arm64_ftr_bits ftr_id_isar5[] = {
279         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_RDM_SHIFT, 4, 0),
280         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_CRC32_SHIFT, 4, 0),
281         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA2_SHIFT, 4, 0),
282         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SHA1_SHIFT, 4, 0),
283         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_AES_SHIFT, 4, 0),
284         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_SEVL_SHIFT, 4, 0),
285         ARM64_FTR_END,
286 };
287
288 static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
289         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),        /* ac2 */
290         ARM64_FTR_END,
291 };
292
293 static const struct arm64_ftr_bits ftr_id_pfr0[] = {
294         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),               /* State3 */
295         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),                /* State2 */
296         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),                /* State1 */
297         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),                /* State0 */
298         ARM64_FTR_END,
299 };
300
301 static const struct arm64_ftr_bits ftr_id_dfr0[] = {
302         /* [31:28] TraceFilt */
303         S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf),   /* PerfMon */
304         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
305         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
306         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
307         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
308         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
309         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
310         ARM64_FTR_END,
311 };
312
313 static const struct arm64_ftr_bits ftr_zcr[] = {
314         ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE,
315                 ZCR_ELx_LEN_SHIFT, ZCR_ELx_LEN_SIZE, 0),        /* LEN */
316         ARM64_FTR_END,
317 };
318
319 /*
320  * Common ftr bits for a 32bit register with all hidden, strict
321  * attributes, with 4bit feature fields and a default safe value of
322  * 0. Covers the following 32bit registers:
323  * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
324  */
325 static const struct arm64_ftr_bits ftr_generic_32bits[] = {
326         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
327         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
328         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
329         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
330         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
331         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
332         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
333         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
334         ARM64_FTR_END,
335 };
336
337 /* Table for a single 32bit feature value */
338 static const struct arm64_ftr_bits ftr_single32[] = {
339         ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 32, 0),
340         ARM64_FTR_END,
341 };
342
343 static const struct arm64_ftr_bits ftr_raz[] = {
344         ARM64_FTR_END,
345 };
346
347 #define ARM64_FTR_REG(id, table) {              \
348         .sys_id = id,                           \
349         .reg =  &(struct arm64_ftr_reg){        \
350                 .name = #id,                    \
351                 .ftr_bits = &((table)[0]),      \
352         }}
353
354 static const struct __ftr_reg_entry {
355         u32                     sys_id;
356         struct arm64_ftr_reg    *reg;
357 } arm64_ftr_regs[] = {
358
359         /* Op1 = 0, CRn = 0, CRm = 1 */
360         ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
361         ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits),
362         ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
363         ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
364         ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
365         ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
366         ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
367
368         /* Op1 = 0, CRn = 0, CRm = 2 */
369         ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits),
370         ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
371         ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
372         ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
373         ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
374         ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
375         ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
376
377         /* Op1 = 0, CRn = 0, CRm = 3 */
378         ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
379         ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
380         ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
381
382         /* Op1 = 0, CRn = 0, CRm = 4 */
383         ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
384         ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1),
385         ARM64_FTR_REG(SYS_ID_AA64ZFR0_EL1, ftr_raz),
386
387         /* Op1 = 0, CRn = 0, CRm = 5 */
388         ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
389         ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_raz),
390
391         /* Op1 = 0, CRn = 0, CRm = 6 */
392         ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
393         ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1),
394
395         /* Op1 = 0, CRn = 0, CRm = 7 */
396         ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
397         ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
398         ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
399
400         /* Op1 = 0, CRn = 1, CRm = 2 */
401         ARM64_FTR_REG(SYS_ZCR_EL1, ftr_zcr),
402
403         /* Op1 = 3, CRn = 0, CRm = 0 */
404         { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
405         ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
406
407         /* Op1 = 3, CRn = 14, CRm = 0 */
408         ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_single32),
409 };
410
411 static int search_cmp_ftr_reg(const void *id, const void *regp)
412 {
413         return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id;
414 }
415
416 /*
417  * get_arm64_ftr_reg - Lookup a feature register entry using its
418  * sys_reg() encoding. With the array arm64_ftr_regs sorted in the
419  * ascending order of sys_id , we use binary search to find a matching
420  * entry.
421  *
422  * returns - Upon success,  matching ftr_reg entry for id.
423  *         - NULL on failure. It is upto the caller to decide
424  *           the impact of a failure.
425  */
426 static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
427 {
428         const struct __ftr_reg_entry *ret;
429
430         ret = bsearch((const void *)(unsigned long)sys_id,
431                         arm64_ftr_regs,
432                         ARRAY_SIZE(arm64_ftr_regs),
433                         sizeof(arm64_ftr_regs[0]),
434                         search_cmp_ftr_reg);
435         if (ret)
436                 return ret->reg;
437         return NULL;
438 }
439
440 static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
441                                s64 ftr_val)
442 {
443         u64 mask = arm64_ftr_mask(ftrp);
444
445         reg &= ~mask;
446         reg |= (ftr_val << ftrp->shift) & mask;
447         return reg;
448 }
449
450 static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
451                                 s64 cur)
452 {
453         s64 ret = 0;
454
455         switch (ftrp->type) {
456         case FTR_EXACT:
457                 ret = ftrp->safe_val;
458                 break;
459         case FTR_LOWER_SAFE:
460                 ret = new < cur ? new : cur;
461                 break;
462         case FTR_HIGHER_OR_ZERO_SAFE:
463                 if (!cur || !new)
464                         break;
465                 /* Fallthrough */
466         case FTR_HIGHER_SAFE:
467                 ret = new > cur ? new : cur;
468                 break;
469         default:
470                 BUG();
471         }
472
473         return ret;
474 }
475
476 static void __init sort_ftr_regs(void)
477 {
478         int i;
479
480         /* Check that the array is sorted so that we can do the binary search */
481         for (i = 1; i < ARRAY_SIZE(arm64_ftr_regs); i++)
482                 BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
483 }
484
485 /*
486  * Initialise the CPU feature register from Boot CPU values.
487  * Also initiliases the strict_mask for the register.
488  * Any bits that are not covered by an arm64_ftr_bits entry are considered
489  * RES0 for the system-wide value, and must strictly match.
490  */
491 static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
492 {
493         u64 val = 0;
494         u64 strict_mask = ~0x0ULL;
495         u64 user_mask = 0;
496         u64 valid_mask = 0;
497
498         const struct arm64_ftr_bits *ftrp;
499         struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
500
501         BUG_ON(!reg);
502
503         for (ftrp  = reg->ftr_bits; ftrp->width; ftrp++) {
504                 u64 ftr_mask = arm64_ftr_mask(ftrp);
505                 s64 ftr_new = arm64_ftr_value(ftrp, new);
506
507                 val = arm64_ftr_set_value(ftrp, val, ftr_new);
508
509                 valid_mask |= ftr_mask;
510                 if (!ftrp->strict)
511                         strict_mask &= ~ftr_mask;
512                 if (ftrp->visible)
513                         user_mask |= ftr_mask;
514                 else
515                         reg->user_val = arm64_ftr_set_value(ftrp,
516                                                             reg->user_val,
517                                                             ftrp->safe_val);
518         }
519
520         val &= valid_mask;
521
522         reg->sys_val = val;
523         reg->strict_mask = strict_mask;
524         reg->user_mask = user_mask;
525 }
526
527 extern const struct arm64_cpu_capabilities arm64_errata[];
528 static void __init setup_boot_cpu_capabilities(void);
529
530 void __init init_cpu_features(struct cpuinfo_arm64 *info)
531 {
532         /* Before we start using the tables, make sure it is sorted */
533         sort_ftr_regs();
534
535         init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
536         init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
537         init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
538         init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
539         init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
540         init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
541         init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
542         init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
543         init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
544         init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
545         init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
546         init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
547         init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
548
549         if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
550                 init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
551                 init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
552                 init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
553                 init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
554                 init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
555                 init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
556                 init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
557                 init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
558                 init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
559                 init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
560                 init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
561                 init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
562                 init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
563                 init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
564                 init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
565                 init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
566         }
567
568         if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
569                 init_cpu_ftr_reg(SYS_ZCR_EL1, info->reg_zcr);
570                 sve_init_vq_map();
571         }
572
573         /*
574          * Detect and enable early CPU capabilities based on the boot CPU,
575          * after we have initialised the CPU feature infrastructure.
576          */
577         setup_boot_cpu_capabilities();
578 }
579
580 static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
581 {
582         const struct arm64_ftr_bits *ftrp;
583
584         for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
585                 s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
586                 s64 ftr_new = arm64_ftr_value(ftrp, new);
587
588                 if (ftr_cur == ftr_new)
589                         continue;
590                 /* Find a safe value */
591                 ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
592                 reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
593         }
594
595 }
596
597 static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
598 {
599         struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
600
601         BUG_ON(!regp);
602         update_cpu_ftr_reg(regp, val);
603         if ((boot & regp->strict_mask) == (val & regp->strict_mask))
604                 return 0;
605         pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
606                         regp->name, boot, cpu, val);
607         return 1;
608 }
609
610 /*
611  * Update system wide CPU feature registers with the values from a
612  * non-boot CPU. Also performs SANITY checks to make sure that there
613  * aren't any insane variations from that of the boot CPU.
614  */
615 void update_cpu_features(int cpu,
616                          struct cpuinfo_arm64 *info,
617                          struct cpuinfo_arm64 *boot)
618 {
619         int taint = 0;
620
621         /*
622          * The kernel can handle differing I-cache policies, but otherwise
623          * caches should look identical. Userspace JITs will make use of
624          * *minLine.
625          */
626         taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
627                                       info->reg_ctr, boot->reg_ctr);
628
629         /*
630          * Userspace may perform DC ZVA instructions. Mismatched block sizes
631          * could result in too much or too little memory being zeroed if a
632          * process is preempted and migrated between CPUs.
633          */
634         taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
635                                       info->reg_dczid, boot->reg_dczid);
636
637         /* If different, timekeeping will be broken (especially with KVM) */
638         taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
639                                       info->reg_cntfrq, boot->reg_cntfrq);
640
641         /*
642          * The kernel uses self-hosted debug features and expects CPUs to
643          * support identical debug features. We presently need CTX_CMPs, WRPs,
644          * and BRPs to be identical.
645          * ID_AA64DFR1 is currently RES0.
646          */
647         taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
648                                       info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
649         taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
650                                       info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
651         /*
652          * Even in big.LITTLE, processors should be identical instruction-set
653          * wise.
654          */
655         taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
656                                       info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
657         taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
658                                       info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
659
660         /*
661          * Differing PARange support is fine as long as all peripherals and
662          * memory are mapped within the minimum PARange of all CPUs.
663          * Linux should not care about secure memory.
664          */
665         taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
666                                       info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
667         taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
668                                       info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
669         taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
670                                       info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
671
672         taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
673                                       info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
674         taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
675                                       info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
676
677         taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
678                                       info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0);
679
680         /*
681          * If we have AArch32, we care about 32-bit features for compat.
682          * If the system doesn't support AArch32, don't update them.
683          */
684         if (id_aa64pfr0_32bit_el0(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
685                 id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
686
687                 taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
688                                         info->reg_id_dfr0, boot->reg_id_dfr0);
689                 taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
690                                         info->reg_id_isar0, boot->reg_id_isar0);
691                 taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
692                                         info->reg_id_isar1, boot->reg_id_isar1);
693                 taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
694                                         info->reg_id_isar2, boot->reg_id_isar2);
695                 taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
696                                         info->reg_id_isar3, boot->reg_id_isar3);
697                 taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
698                                         info->reg_id_isar4, boot->reg_id_isar4);
699                 taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
700                                         info->reg_id_isar5, boot->reg_id_isar5);
701
702                 /*
703                  * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
704                  * ACTLR formats could differ across CPUs and therefore would have to
705                  * be trapped for virtualization anyway.
706                  */
707                 taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
708                                         info->reg_id_mmfr0, boot->reg_id_mmfr0);
709                 taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
710                                         info->reg_id_mmfr1, boot->reg_id_mmfr1);
711                 taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
712                                         info->reg_id_mmfr2, boot->reg_id_mmfr2);
713                 taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
714                                         info->reg_id_mmfr3, boot->reg_id_mmfr3);
715                 taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
716                                         info->reg_id_pfr0, boot->reg_id_pfr0);
717                 taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
718                                         info->reg_id_pfr1, boot->reg_id_pfr1);
719                 taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
720                                         info->reg_mvfr0, boot->reg_mvfr0);
721                 taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
722                                         info->reg_mvfr1, boot->reg_mvfr1);
723                 taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
724                                         info->reg_mvfr2, boot->reg_mvfr2);
725         }
726
727         if (id_aa64pfr0_sve(info->reg_id_aa64pfr0)) {
728                 taint |= check_update_ftr_reg(SYS_ZCR_EL1, cpu,
729                                         info->reg_zcr, boot->reg_zcr);
730
731                 /* Probe vector lengths, unless we already gave up on SVE */
732                 if (id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1)) &&
733                     !sys_caps_initialised)
734                         sve_update_vq_map();
735         }
736
737         /*
738          * Mismatched CPU features are a recipe for disaster. Don't even
739          * pretend to support them.
740          */
741         if (taint) {
742                 pr_warn_once("Unsupported CPU feature variation detected.\n");
743                 add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
744         }
745 }
746
747 u64 read_sanitised_ftr_reg(u32 id)
748 {
749         struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
750
751         /* We shouldn't get a request for an unsupported register */
752         BUG_ON(!regp);
753         return regp->sys_val;
754 }
755
756 #define read_sysreg_case(r)     \
757         case r:         return read_sysreg_s(r)
758
759 /*
760  * __read_sysreg_by_encoding() - Used by a STARTING cpu before cpuinfo is populated.
761  * Read the system register on the current CPU
762  */
763 static u64 __read_sysreg_by_encoding(u32 sys_id)
764 {
765         switch (sys_id) {
766         read_sysreg_case(SYS_ID_PFR0_EL1);
767         read_sysreg_case(SYS_ID_PFR1_EL1);
768         read_sysreg_case(SYS_ID_DFR0_EL1);
769         read_sysreg_case(SYS_ID_MMFR0_EL1);
770         read_sysreg_case(SYS_ID_MMFR1_EL1);
771         read_sysreg_case(SYS_ID_MMFR2_EL1);
772         read_sysreg_case(SYS_ID_MMFR3_EL1);
773         read_sysreg_case(SYS_ID_ISAR0_EL1);
774         read_sysreg_case(SYS_ID_ISAR1_EL1);
775         read_sysreg_case(SYS_ID_ISAR2_EL1);
776         read_sysreg_case(SYS_ID_ISAR3_EL1);
777         read_sysreg_case(SYS_ID_ISAR4_EL1);
778         read_sysreg_case(SYS_ID_ISAR5_EL1);
779         read_sysreg_case(SYS_MVFR0_EL1);
780         read_sysreg_case(SYS_MVFR1_EL1);
781         read_sysreg_case(SYS_MVFR2_EL1);
782
783         read_sysreg_case(SYS_ID_AA64PFR0_EL1);
784         read_sysreg_case(SYS_ID_AA64PFR1_EL1);
785         read_sysreg_case(SYS_ID_AA64DFR0_EL1);
786         read_sysreg_case(SYS_ID_AA64DFR1_EL1);
787         read_sysreg_case(SYS_ID_AA64MMFR0_EL1);
788         read_sysreg_case(SYS_ID_AA64MMFR1_EL1);
789         read_sysreg_case(SYS_ID_AA64MMFR2_EL1);
790         read_sysreg_case(SYS_ID_AA64ISAR0_EL1);
791         read_sysreg_case(SYS_ID_AA64ISAR1_EL1);
792
793         read_sysreg_case(SYS_CNTFRQ_EL0);
794         read_sysreg_case(SYS_CTR_EL0);
795         read_sysreg_case(SYS_DCZID_EL0);
796
797         default:
798                 BUG();
799                 return 0;
800         }
801 }
802
803 #include <linux/irqchip/arm-gic-v3.h>
804
805 static bool
806 feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
807 {
808         int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign);
809
810         return val >= entry->min_field_value;
811 }
812
813 static bool
814 has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
815 {
816         u64 val;
817
818         WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
819         if (scope == SCOPE_SYSTEM)
820                 val = read_sanitised_ftr_reg(entry->sys_reg);
821         else
822                 val = __read_sysreg_by_encoding(entry->sys_reg);
823
824         return feature_matches(val, entry);
825 }
826
827 static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
828 {
829         bool has_sre;
830
831         if (!has_cpuid_feature(entry, scope))
832                 return false;
833
834         has_sre = gic_enable_sre();
835         if (!has_sre)
836                 pr_warn_once("%s present but disabled by higher exception level\n",
837                              entry->desc);
838
839         return has_sre;
840 }
841
842 static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
843 {
844         u32 midr = read_cpuid_id();
845
846         /* Cavium ThunderX pass 1.x and 2.x */
847         return midr_is_cpu_model_range(midr, MIDR_THUNDERX,
848                 MIDR_CPU_VAR_REV(0, 0),
849                 MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
850 }
851
852 static bool has_no_fpsimd(const struct arm64_cpu_capabilities *entry, int __unused)
853 {
854         u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
855
856         return cpuid_feature_extract_signed_field(pfr0,
857                                         ID_AA64PFR0_FP_SHIFT) < 0;
858 }
859
860 static bool has_cache_idc(const struct arm64_cpu_capabilities *entry,
861                           int scope)
862 {
863         u64 ctr;
864
865         if (scope == SCOPE_SYSTEM)
866                 ctr = arm64_ftr_reg_ctrel0.sys_val;
867         else
868                 ctr = read_cpuid_cachetype();
869
870         return ctr & BIT(CTR_IDC_SHIFT);
871 }
872
873 static bool has_cache_dic(const struct arm64_cpu_capabilities *entry,
874                           int scope)
875 {
876         u64 ctr;
877
878         if (scope == SCOPE_SYSTEM)
879                 ctr = arm64_ftr_reg_ctrel0.sys_val;
880         else
881                 ctr = read_cpuid_cachetype();
882
883         return ctr & BIT(CTR_DIC_SHIFT);
884 }
885
886 static bool __meltdown_safe = true;
887 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
888
889 static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
890                                 int scope)
891 {
892         /* List of CPUs that are not vulnerable and don't need KPTI */
893         static const struct midr_range kpti_safe_list[] = {
894                 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
895                 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
896                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
897                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
898                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
899                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
900                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
901                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
902                 MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
903                 { /* sentinel */ }
904         };
905         char const *str = "kpti command line option";
906         bool meltdown_safe;
907
908         meltdown_safe = is_midr_in_range_list(read_cpuid_id(), kpti_safe_list);
909
910         /* Defer to CPU feature registers */
911         if (has_cpuid_feature(entry, scope))
912                 meltdown_safe = true;
913
914         if (!meltdown_safe)
915                 __meltdown_safe = false;
916
917         /*
918          * For reasons that aren't entirely clear, enabling KPTI on Cavium
919          * ThunderX leads to apparent I-cache corruption of kernel text, which
920          * ends as well as you might imagine. Don't even try.
921          */
922         if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
923                 str = "ARM64_WORKAROUND_CAVIUM_27456";
924                 __kpti_forced = -1;
925         }
926
927         /* Useful for KASLR robustness */
928         if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_offset() > 0) {
929                 if (!__kpti_forced) {
930                         str = "KASLR";
931                         __kpti_forced = 1;
932                 }
933         }
934
935         if (cpu_mitigations_off() && !__kpti_forced) {
936                 str = "mitigations=off";
937                 __kpti_forced = -1;
938         }
939
940         if (!IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) {
941                 pr_info_once("kernel page table isolation disabled by kernel configuration\n");
942                 return false;
943         }
944
945         /* Forced? */
946         if (__kpti_forced) {
947                 pr_info_once("kernel page table isolation forced %s by %s\n",
948                              __kpti_forced > 0 ? "ON" : "OFF", str);
949                 return __kpti_forced > 0;
950         }
951
952         return !meltdown_safe;
953 }
954
955 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
956 static void
957 kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
958 {
959         typedef void (kpti_remap_fn)(int, int, phys_addr_t);
960         extern kpti_remap_fn idmap_kpti_install_ng_mappings;
961         kpti_remap_fn *remap_fn;
962
963         static bool kpti_applied = false;
964         int cpu = smp_processor_id();
965
966         if (kpti_applied)
967                 return;
968
969         remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
970
971         cpu_install_idmap();
972         remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
973         cpu_uninstall_idmap();
974
975         if (!cpu)
976                 kpti_applied = true;
977
978         return;
979 }
980 #else
981 static void
982 kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
983 {
984 }
985 #endif  /* CONFIG_UNMAP_KERNEL_AT_EL0 */
986
987 static int __init parse_kpti(char *str)
988 {
989         bool enabled;
990         int ret = strtobool(str, &enabled);
991
992         if (ret)
993                 return ret;
994
995         __kpti_forced = enabled ? 1 : -1;
996         return 0;
997 }
998 early_param("kpti", parse_kpti);
999
1000 #ifdef CONFIG_ARM64_HW_AFDBM
1001 static inline void __cpu_enable_hw_dbm(void)
1002 {
1003         u64 tcr = read_sysreg(tcr_el1) | TCR_HD;
1004
1005         write_sysreg(tcr, tcr_el1);
1006         isb();
1007 }
1008
1009 static bool cpu_has_broken_dbm(void)
1010 {
1011         /* List of CPUs which have broken DBM support. */
1012         static const struct midr_range cpus[] = {
1013 #ifdef CONFIG_ARM64_ERRATUM_1024718
1014                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
1015 #endif
1016                 {},
1017         };
1018
1019         return is_midr_in_range_list(read_cpuid_id(), cpus);
1020 }
1021
1022 static bool cpu_can_use_dbm(const struct arm64_cpu_capabilities *cap)
1023 {
1024         return has_cpuid_feature(cap, SCOPE_LOCAL_CPU) &&
1025                !cpu_has_broken_dbm();
1026 }
1027
1028 static void cpu_enable_hw_dbm(struct arm64_cpu_capabilities const *cap)
1029 {
1030         if (cpu_can_use_dbm(cap))
1031                 __cpu_enable_hw_dbm();
1032 }
1033
1034 static bool has_hw_dbm(const struct arm64_cpu_capabilities *cap,
1035                        int __unused)
1036 {
1037         static bool detected = false;
1038         /*
1039          * DBM is a non-conflicting feature. i.e, the kernel can safely
1040          * run a mix of CPUs with and without the feature. So, we
1041          * unconditionally enable the capability to allow any late CPU
1042          * to use the feature. We only enable the control bits on the
1043          * CPU, if it actually supports.
1044          *
1045          * We have to make sure we print the "feature" detection only
1046          * when at least one CPU actually uses it. So check if this CPU
1047          * can actually use it and print the message exactly once.
1048          *
1049          * This is safe as all CPUs (including secondary CPUs - due to the
1050          * LOCAL_CPU scope - and the hotplugged CPUs - via verification)
1051          * goes through the "matches" check exactly once. Also if a CPU
1052          * matches the criteria, it is guaranteed that the CPU will turn
1053          * the DBM on, as the capability is unconditionally enabled.
1054          */
1055         if (!detected && cpu_can_use_dbm(cap)) {
1056                 detected = true;
1057                 pr_info("detected: Hardware dirty bit management\n");
1058         }
1059
1060         return true;
1061 }
1062
1063 #endif
1064
1065 #ifdef CONFIG_ARM64_VHE
1066 static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
1067 {
1068         return is_kernel_in_hyp_mode();
1069 }
1070
1071 static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
1072 {
1073         /*
1074          * Copy register values that aren't redirected by hardware.
1075          *
1076          * Before code patching, we only set tpidr_el1, all CPUs need to copy
1077          * this value to tpidr_el2 before we patch the code. Once we've done
1078          * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
1079          * do anything here.
1080          */
1081         if (!alternatives_applied)
1082                 write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
1083 }
1084 #endif
1085
1086 static void cpu_has_fwb(const struct arm64_cpu_capabilities *__unused)
1087 {
1088         u64 val = read_sysreg_s(SYS_CLIDR_EL1);
1089
1090         /* Check that CLIDR_EL1.LOU{U,IS} are both 0 */
1091         WARN_ON(val & (7 << 27 | 7 << 21));
1092 }
1093
1094 #ifdef CONFIG_ARM64_SSBD
1095 static int ssbs_emulation_handler(struct pt_regs *regs, u32 instr)
1096 {
1097         if (user_mode(regs))
1098                 return 1;
1099
1100         if (instr & BIT(CRm_shift))
1101                 regs->pstate |= PSR_SSBS_BIT;
1102         else
1103                 regs->pstate &= ~PSR_SSBS_BIT;
1104
1105         arm64_skip_faulting_instruction(regs, 4);
1106         return 0;
1107 }
1108
1109 static struct undef_hook ssbs_emulation_hook = {
1110         .instr_mask     = ~(1U << CRm_shift),
1111         .instr_val      = 0xd500001f | REG_PSTATE_SSBS_IMM,
1112         .fn             = ssbs_emulation_handler,
1113 };
1114
1115 static void cpu_enable_ssbs(const struct arm64_cpu_capabilities *__unused)
1116 {
1117         static bool undef_hook_registered = false;
1118         static DEFINE_SPINLOCK(hook_lock);
1119
1120         spin_lock(&hook_lock);
1121         if (!undef_hook_registered) {
1122                 register_undef_hook(&ssbs_emulation_hook);
1123                 undef_hook_registered = true;
1124         }
1125         spin_unlock(&hook_lock);
1126
1127         if (arm64_get_ssbd_state() == ARM64_SSBD_FORCE_DISABLE) {
1128                 sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
1129                 arm64_set_ssbd_mitigation(false);
1130         } else {
1131                 arm64_set_ssbd_mitigation(true);
1132         }
1133 }
1134 #endif /* CONFIG_ARM64_SSBD */
1135
1136 static const struct arm64_cpu_capabilities arm64_features[] = {
1137         {
1138                 .desc = "GIC system register CPU interface",
1139                 .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
1140                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1141                 .matches = has_useable_gicv3_cpuif,
1142                 .sys_reg = SYS_ID_AA64PFR0_EL1,
1143                 .field_pos = ID_AA64PFR0_GIC_SHIFT,
1144                 .sign = FTR_UNSIGNED,
1145                 .min_field_value = 1,
1146         },
1147 #ifdef CONFIG_ARM64_PAN
1148         {
1149                 .desc = "Privileged Access Never",
1150                 .capability = ARM64_HAS_PAN,
1151                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1152                 .matches = has_cpuid_feature,
1153                 .sys_reg = SYS_ID_AA64MMFR1_EL1,
1154                 .field_pos = ID_AA64MMFR1_PAN_SHIFT,
1155                 .sign = FTR_UNSIGNED,
1156                 .min_field_value = 1,
1157                 .cpu_enable = cpu_enable_pan,
1158         },
1159 #endif /* CONFIG_ARM64_PAN */
1160 #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
1161         {
1162                 .desc = "LSE atomic instructions",
1163                 .capability = ARM64_HAS_LSE_ATOMICS,
1164                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1165                 .matches = has_cpuid_feature,
1166                 .sys_reg = SYS_ID_AA64ISAR0_EL1,
1167                 .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
1168                 .sign = FTR_UNSIGNED,
1169                 .min_field_value = 2,
1170         },
1171 #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
1172         {
1173                 .desc = "Software prefetching using PRFM",
1174                 .capability = ARM64_HAS_NO_HW_PREFETCH,
1175                 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
1176                 .matches = has_no_hw_prefetch,
1177         },
1178 #ifdef CONFIG_ARM64_UAO
1179         {
1180                 .desc = "User Access Override",
1181                 .capability = ARM64_HAS_UAO,
1182                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1183                 .matches = has_cpuid_feature,
1184                 .sys_reg = SYS_ID_AA64MMFR2_EL1,
1185                 .field_pos = ID_AA64MMFR2_UAO_SHIFT,
1186                 .min_field_value = 1,
1187                 /*
1188                  * We rely on stop_machine() calling uao_thread_switch() to set
1189                  * UAO immediately after patching.
1190                  */
1191         },
1192 #endif /* CONFIG_ARM64_UAO */
1193 #ifdef CONFIG_ARM64_PAN
1194         {
1195                 .capability = ARM64_ALT_PAN_NOT_UAO,
1196                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1197                 .matches = cpufeature_pan_not_uao,
1198         },
1199 #endif /* CONFIG_ARM64_PAN */
1200 #ifdef CONFIG_ARM64_VHE
1201         {
1202                 .desc = "Virtualization Host Extensions",
1203                 .capability = ARM64_HAS_VIRT_HOST_EXTN,
1204                 .type = ARM64_CPUCAP_STRICT_BOOT_CPU_FEATURE,
1205                 .matches = runs_at_el2,
1206                 .cpu_enable = cpu_copy_el2regs,
1207         },
1208 #endif  /* CONFIG_ARM64_VHE */
1209         {
1210                 .desc = "32-bit EL0 Support",
1211                 .capability = ARM64_HAS_32BIT_EL0,
1212                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1213                 .matches = has_cpuid_feature,
1214                 .sys_reg = SYS_ID_AA64PFR0_EL1,
1215                 .sign = FTR_UNSIGNED,
1216                 .field_pos = ID_AA64PFR0_EL0_SHIFT,
1217                 .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
1218         },
1219         {
1220                 .desc = "Kernel page table isolation (KPTI)",
1221                 .capability = ARM64_UNMAP_KERNEL_AT_EL0,
1222                 .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
1223                 /*
1224                  * The ID feature fields below are used to indicate that
1225                  * the CPU doesn't need KPTI. See unmap_kernel_at_el0 for
1226                  * more details.
1227                  */
1228                 .sys_reg = SYS_ID_AA64PFR0_EL1,
1229                 .field_pos = ID_AA64PFR0_CSV3_SHIFT,
1230                 .min_field_value = 1,
1231                 .matches = unmap_kernel_at_el0,
1232                 .cpu_enable = kpti_install_ng_mappings,
1233         },
1234         {
1235                 /* FP/SIMD is not implemented */
1236                 .capability = ARM64_HAS_NO_FPSIMD,
1237                 .type = ARM64_CPUCAP_BOOT_RESTRICTED_CPU_LOCAL_FEATURE,
1238                 .min_field_value = 0,
1239                 .matches = has_no_fpsimd,
1240         },
1241 #ifdef CONFIG_ARM64_PMEM
1242         {
1243                 .desc = "Data cache clean to Point of Persistence",
1244                 .capability = ARM64_HAS_DCPOP,
1245                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1246                 .matches = has_cpuid_feature,
1247                 .sys_reg = SYS_ID_AA64ISAR1_EL1,
1248                 .field_pos = ID_AA64ISAR1_DPB_SHIFT,
1249                 .min_field_value = 1,
1250         },
1251 #endif
1252 #ifdef CONFIG_ARM64_SVE
1253         {
1254                 .desc = "Scalable Vector Extension",
1255                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1256                 .capability = ARM64_SVE,
1257                 .sys_reg = SYS_ID_AA64PFR0_EL1,
1258                 .sign = FTR_UNSIGNED,
1259                 .field_pos = ID_AA64PFR0_SVE_SHIFT,
1260                 .min_field_value = ID_AA64PFR0_SVE,
1261                 .matches = has_cpuid_feature,
1262                 .cpu_enable = sve_kernel_enable,
1263         },
1264 #endif /* CONFIG_ARM64_SVE */
1265 #ifdef CONFIG_ARM64_RAS_EXTN
1266         {
1267                 .desc = "RAS Extension Support",
1268                 .capability = ARM64_HAS_RAS_EXTN,
1269                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1270                 .matches = has_cpuid_feature,
1271                 .sys_reg = SYS_ID_AA64PFR0_EL1,
1272                 .sign = FTR_UNSIGNED,
1273                 .field_pos = ID_AA64PFR0_RAS_SHIFT,
1274                 .min_field_value = ID_AA64PFR0_RAS_V1,
1275                 .cpu_enable = cpu_clear_disr,
1276         },
1277 #endif /* CONFIG_ARM64_RAS_EXTN */
1278         {
1279                 .desc = "Data cache clean to the PoU not required for I/D coherence",
1280                 .capability = ARM64_HAS_CACHE_IDC,
1281                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1282                 .matches = has_cache_idc,
1283         },
1284         {
1285                 .desc = "Instruction cache invalidation not required for I/D coherence",
1286                 .capability = ARM64_HAS_CACHE_DIC,
1287                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1288                 .matches = has_cache_dic,
1289         },
1290         {
1291                 .desc = "Stage-2 Force Write-Back",
1292                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
1293                 .capability = ARM64_HAS_STAGE2_FWB,
1294                 .sys_reg = SYS_ID_AA64MMFR2_EL1,
1295                 .sign = FTR_UNSIGNED,
1296                 .field_pos = ID_AA64MMFR2_FWB_SHIFT,
1297                 .min_field_value = 1,
1298                 .matches = has_cpuid_feature,
1299                 .cpu_enable = cpu_has_fwb,
1300         },
1301 #ifdef CONFIG_ARM64_HW_AFDBM
1302         {
1303                 /*
1304                  * Since we turn this on always, we don't want the user to
1305                  * think that the feature is available when it may not be.
1306                  * So hide the description.
1307                  *
1308                  * .desc = "Hardware pagetable Dirty Bit Management",
1309                  *
1310                  */
1311                 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
1312                 .capability = ARM64_HW_DBM,
1313                 .sys_reg = SYS_ID_AA64MMFR1_EL1,
1314                 .sign = FTR_UNSIGNED,
1315                 .field_pos = ID_AA64MMFR1_HADBS_SHIFT,
1316                 .min_field_value = 2,
1317                 .matches = has_hw_dbm,
1318                 .cpu_enable = cpu_enable_hw_dbm,
1319         },
1320 #endif
1321 #ifdef CONFIG_ARM64_SSBD
1322         {
1323                 .desc = "Speculative Store Bypassing Safe (SSBS)",
1324                 .capability = ARM64_SSBS,
1325                 .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
1326                 .matches = has_cpuid_feature,
1327                 .sys_reg = SYS_ID_AA64PFR1_EL1,
1328                 .field_pos = ID_AA64PFR1_SSBS_SHIFT,
1329                 .sign = FTR_UNSIGNED,
1330                 .min_field_value = ID_AA64PFR1_SSBS_PSTATE_ONLY,
1331                 .cpu_enable = cpu_enable_ssbs,
1332         },
1333 #endif
1334         {},
1335 };
1336
1337
1338 #define HWCAP_CPUID_MATCH(reg, field, s, min_value)             \
1339                 .matches = has_cpuid_feature,                   \
1340                 .sys_reg = reg,                                 \
1341                 .field_pos = field,                             \
1342                 .sign = s,                                      \
1343                 .min_field_value = min_value,                   \
1344
1345 #define __HWCAP_CAP(name, cap_type, cap)                        \
1346                 .desc = name,                                   \
1347                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,            \
1348                 .hwcap_type = cap_type,                         \
1349                 .hwcap = cap,                                   \
1350
1351 #define HWCAP_CAP(reg, field, s, min_value, cap_type, cap)      \
1352         {                                                       \
1353                 __HWCAP_CAP(#cap, cap_type, cap)                \
1354                 HWCAP_CPUID_MATCH(reg, field, s, min_value)     \
1355         }
1356
1357 #define HWCAP_CAP_MATCH(match, cap_type, cap)                   \
1358         {                                                       \
1359                 __HWCAP_CAP(#cap, cap_type, cap)                \
1360                 .matches = match,                               \
1361         }
1362
1363 static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
1364         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
1365         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
1366         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
1367         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
1368         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_SHA512),
1369         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
1370         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
1371         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_RDM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDRDM),
1372         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA3),
1373         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM3_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM3),
1374         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SM4_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SM4),
1375         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_DP_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDDP),
1376         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_FHM_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_ASIMDFHM),
1377         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_TS_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FLAGM),
1378         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
1379         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
1380         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
1381         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
1382         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_DIT_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_DIT),
1383         HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_DPB_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_DCPOP),
1384         HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_JSCVT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_JSCVT),
1385         HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_FCMA_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_FCMA),
1386         HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_LRCPC),
1387         HWCAP_CAP(SYS_ID_AA64ISAR1_EL1, ID_AA64ISAR1_LRCPC_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ILRCPC),
1388         HWCAP_CAP(SYS_ID_AA64MMFR2_EL1, ID_AA64MMFR2_AT_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_USCAT),
1389 #ifdef CONFIG_ARM64_SVE
1390         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_SVE_SHIFT, FTR_UNSIGNED, ID_AA64PFR0_SVE, CAP_HWCAP, HWCAP_SVE),
1391 #endif
1392         HWCAP_CAP(SYS_ID_AA64PFR1_EL1, ID_AA64PFR1_SSBS_SHIFT, FTR_UNSIGNED, ID_AA64PFR1_SSBS_PSTATE_INSNS, CAP_HWCAP, HWCAP_SSBS),
1393         {},
1394 };
1395
1396 #ifdef CONFIG_COMPAT
1397 static bool compat_has_neon(const struct arm64_cpu_capabilities *cap, int scope)
1398 {
1399         /*
1400          * Check that all of MVFR1_EL1.{SIMDSP, SIMDInt, SIMDLS} are available,
1401          * in line with that of arm32 as in vfp_init(). We make sure that the
1402          * check is future proof, by making sure value is non-zero.
1403          */
1404         u32 mvfr1;
1405
1406         WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
1407         if (scope == SCOPE_SYSTEM)
1408                 mvfr1 = read_sanitised_ftr_reg(SYS_MVFR1_EL1);
1409         else
1410                 mvfr1 = read_sysreg_s(SYS_MVFR1_EL1);
1411
1412         return cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDSP_SHIFT) &&
1413                 cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDINT_SHIFT) &&
1414                 cpuid_feature_extract_unsigned_field(mvfr1, MVFR1_SIMDLS_SHIFT);
1415 }
1416 #endif
1417
1418 static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
1419 #ifdef CONFIG_COMPAT
1420         HWCAP_CAP_MATCH(compat_has_neon, CAP_COMPAT_HWCAP, COMPAT_HWCAP_NEON),
1421         HWCAP_CAP(SYS_MVFR1_EL1, MVFR1_SIMDFMAC_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv4),
1422         /* Arm v8 mandates MVFR0.FPDP == {0, 2}. So, piggy back on this for the presence of VFP support */
1423         HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFP),
1424         HWCAP_CAP(SYS_MVFR0_EL1, MVFR0_FPDP_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP, COMPAT_HWCAP_VFPv3),
1425         HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
1426         HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
1427         HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
1428         HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
1429         HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
1430 #endif
1431         {},
1432 };
1433
1434 static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
1435 {
1436         switch (cap->hwcap_type) {
1437         case CAP_HWCAP:
1438                 elf_hwcap |= cap->hwcap;
1439                 break;
1440 #ifdef CONFIG_COMPAT
1441         case CAP_COMPAT_HWCAP:
1442                 compat_elf_hwcap |= (u32)cap->hwcap;
1443                 break;
1444         case CAP_COMPAT_HWCAP2:
1445                 compat_elf_hwcap2 |= (u32)cap->hwcap;
1446                 break;
1447 #endif
1448         default:
1449                 WARN_ON(1);
1450                 break;
1451         }
1452 }
1453
1454 /* Check if we have a particular HWCAP enabled */
1455 static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
1456 {
1457         bool rc;
1458
1459         switch (cap->hwcap_type) {
1460         case CAP_HWCAP:
1461                 rc = (elf_hwcap & cap->hwcap) != 0;
1462                 break;
1463 #ifdef CONFIG_COMPAT
1464         case CAP_COMPAT_HWCAP:
1465                 rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
1466                 break;
1467         case CAP_COMPAT_HWCAP2:
1468                 rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
1469                 break;
1470 #endif
1471         default:
1472                 WARN_ON(1);
1473                 rc = false;
1474         }
1475
1476         return rc;
1477 }
1478
1479 static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
1480 {
1481         /* We support emulation of accesses to CPU ID feature registers */
1482         elf_hwcap |= HWCAP_CPUID;
1483         for (; hwcaps->matches; hwcaps++)
1484                 if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps)))
1485                         cap_set_elf_hwcap(hwcaps);
1486 }
1487
1488 /*
1489  * Check if the current CPU has a given feature capability.
1490  * Should be called from non-preemptible context.
1491  */
1492 static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
1493                                unsigned int cap)
1494 {
1495         const struct arm64_cpu_capabilities *caps;
1496
1497         if (WARN_ON(preemptible()))
1498                 return false;
1499
1500         for (caps = cap_array; caps->matches; caps++)
1501                 if (caps->capability == cap)
1502                         return caps->matches(caps, SCOPE_LOCAL_CPU);
1503
1504         return false;
1505 }
1506
1507 static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
1508                                       u16 scope_mask, const char *info)
1509 {
1510         scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
1511         for (; caps->matches; caps++) {
1512                 if (!(caps->type & scope_mask) ||
1513                     !caps->matches(caps, cpucap_default_scope(caps)))
1514                         continue;
1515
1516                 if (!cpus_have_cap(caps->capability) && caps->desc)
1517                         pr_info("%s %s\n", info, caps->desc);
1518                 cpus_set_cap(caps->capability);
1519         }
1520 }
1521
1522 static void update_cpu_capabilities(u16 scope_mask)
1523 {
1524         __update_cpu_capabilities(arm64_errata, scope_mask,
1525                                   "enabling workaround for");
1526         __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
1527 }
1528
1529 static int __enable_cpu_capability(void *arg)
1530 {
1531         const struct arm64_cpu_capabilities *cap = arg;
1532
1533         cap->cpu_enable(cap);
1534         return 0;
1535 }
1536
1537 /*
1538  * Run through the enabled capabilities and enable() it on all active
1539  * CPUs
1540  */
1541 static void __init
1542 __enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
1543                           u16 scope_mask)
1544 {
1545         scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
1546         for (; caps->matches; caps++) {
1547                 unsigned int num = caps->capability;
1548
1549                 if (!(caps->type & scope_mask) || !cpus_have_cap(num))
1550                         continue;
1551
1552                 /* Ensure cpus_have_const_cap(num) works */
1553                 static_branch_enable(&cpu_hwcap_keys[num]);
1554
1555                 if (caps->cpu_enable) {
1556                         /*
1557                          * Capabilities with SCOPE_BOOT_CPU scope are finalised
1558                          * before any secondary CPU boots. Thus, each secondary
1559                          * will enable the capability as appropriate via
1560                          * check_local_cpu_capabilities(). The only exception is
1561                          * the boot CPU, for which the capability must be
1562                          * enabled here. This approach avoids costly
1563                          * stop_machine() calls for this case.
1564                          *
1565                          * Otherwise, use stop_machine() as it schedules the
1566                          * work allowing us to modify PSTATE, instead of
1567                          * on_each_cpu() which uses an IPI, giving us a PSTATE
1568                          * that disappears when we return.
1569                          */
1570                         if (scope_mask & SCOPE_BOOT_CPU)
1571                                 caps->cpu_enable(caps);
1572                         else
1573                                 stop_machine(__enable_cpu_capability,
1574                                              (void *)caps, cpu_online_mask);
1575                 }
1576         }
1577 }
1578
1579 static void __init enable_cpu_capabilities(u16 scope_mask)
1580 {
1581         __enable_cpu_capabilities(arm64_errata, scope_mask);
1582         __enable_cpu_capabilities(arm64_features, scope_mask);
1583 }
1584
1585 /*
1586  * Run through the list of capabilities to check for conflicts.
1587  * If the system has already detected a capability, take necessary
1588  * action on this CPU.
1589  *
1590  * Returns "false" on conflicts.
1591  */
1592 static bool
1593 __verify_local_cpu_caps(const struct arm64_cpu_capabilities *caps,
1594                         u16 scope_mask)
1595 {
1596         bool cpu_has_cap, system_has_cap;
1597
1598         scope_mask &= ARM64_CPUCAP_SCOPE_MASK;
1599
1600         for (; caps->matches; caps++) {
1601                 if (!(caps->type & scope_mask))
1602                         continue;
1603
1604                 cpu_has_cap = caps->matches(caps, SCOPE_LOCAL_CPU);
1605                 system_has_cap = cpus_have_cap(caps->capability);
1606
1607                 if (system_has_cap) {
1608                         /*
1609                          * Check if the new CPU misses an advertised feature,
1610                          * which is not safe to miss.
1611                          */
1612                         if (!cpu_has_cap && !cpucap_late_cpu_optional(caps))
1613                                 break;
1614                         /*
1615                          * We have to issue cpu_enable() irrespective of
1616                          * whether the CPU has it or not, as it is enabeld
1617                          * system wide. It is upto the call back to take
1618                          * appropriate action on this CPU.
1619                          */
1620                         if (caps->cpu_enable)
1621                                 caps->cpu_enable(caps);
1622                 } else {
1623                         /*
1624                          * Check if the CPU has this capability if it isn't
1625                          * safe to have when the system doesn't.
1626                          */
1627                         if (cpu_has_cap && !cpucap_late_cpu_permitted(caps))
1628                                 break;
1629                 }
1630         }
1631
1632         if (caps->matches) {
1633                 pr_crit("CPU%d: Detected conflict for capability %d (%s), System: %d, CPU: %d\n",
1634                         smp_processor_id(), caps->capability,
1635                         caps->desc, system_has_cap, cpu_has_cap);
1636                 return false;
1637         }
1638
1639         return true;
1640 }
1641
1642 static bool verify_local_cpu_caps(u16 scope_mask)
1643 {
1644         return __verify_local_cpu_caps(arm64_errata, scope_mask) &&
1645                __verify_local_cpu_caps(arm64_features, scope_mask);
1646 }
1647
1648 /*
1649  * Check for CPU features that are used in early boot
1650  * based on the Boot CPU value.
1651  */
1652 static void check_early_cpu_features(void)
1653 {
1654         verify_cpu_asid_bits();
1655         /*
1656          * Early features are used by the kernel already. If there
1657          * is a conflict, we cannot proceed further.
1658          */
1659         if (!verify_local_cpu_caps(SCOPE_BOOT_CPU))
1660                 cpu_panic_kernel();
1661 }
1662
1663 static void
1664 verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
1665 {
1666
1667         for (; caps->matches; caps++)
1668                 if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) {
1669                         pr_crit("CPU%d: missing HWCAP: %s\n",
1670                                         smp_processor_id(), caps->desc);
1671                         cpu_die_early();
1672                 }
1673 }
1674
1675 static void verify_sve_features(void)
1676 {
1677         u64 safe_zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
1678         u64 zcr = read_zcr_features();
1679
1680         unsigned int safe_len = safe_zcr & ZCR_ELx_LEN_MASK;
1681         unsigned int len = zcr & ZCR_ELx_LEN_MASK;
1682
1683         if (len < safe_len || sve_verify_vq_map()) {
1684                 pr_crit("CPU%d: SVE: required vector length(s) missing\n",
1685                         smp_processor_id());
1686                 cpu_die_early();
1687         }
1688
1689         /* Add checks on other ZCR bits here if necessary */
1690 }
1691
1692
1693 /*
1694  * Run through the enabled system capabilities and enable() it on this CPU.
1695  * The capabilities were decided based on the available CPUs at the boot time.
1696  * Any new CPU should match the system wide status of the capability. If the
1697  * new CPU doesn't have a capability which the system now has enabled, we
1698  * cannot do anything to fix it up and could cause unexpected failures. So
1699  * we park the CPU.
1700  */
1701 static void verify_local_cpu_capabilities(void)
1702 {
1703         /*
1704          * The capabilities with SCOPE_BOOT_CPU are checked from
1705          * check_early_cpu_features(), as they need to be verified
1706          * on all secondary CPUs.
1707          */
1708         if (!verify_local_cpu_caps(SCOPE_ALL & ~SCOPE_BOOT_CPU))
1709                 cpu_die_early();
1710
1711         verify_local_elf_hwcaps(arm64_elf_hwcaps);
1712
1713         if (system_supports_32bit_el0())
1714                 verify_local_elf_hwcaps(compat_elf_hwcaps);
1715
1716         if (system_supports_sve())
1717                 verify_sve_features();
1718 }
1719
1720 void check_local_cpu_capabilities(void)
1721 {
1722         /*
1723          * All secondary CPUs should conform to the early CPU features
1724          * in use by the kernel based on boot CPU.
1725          */
1726         check_early_cpu_features();
1727
1728         /*
1729          * If we haven't finalised the system capabilities, this CPU gets
1730          * a chance to update the errata work arounds and local features.
1731          * Otherwise, this CPU should verify that it has all the system
1732          * advertised capabilities.
1733          */
1734         if (!sys_caps_initialised)
1735                 update_cpu_capabilities(SCOPE_LOCAL_CPU);
1736         else
1737                 verify_local_cpu_capabilities();
1738 }
1739
1740 static void __init setup_boot_cpu_capabilities(void)
1741 {
1742         /* Detect capabilities with either SCOPE_BOOT_CPU or SCOPE_LOCAL_CPU */
1743         update_cpu_capabilities(SCOPE_BOOT_CPU | SCOPE_LOCAL_CPU);
1744         /* Enable the SCOPE_BOOT_CPU capabilities alone right away */
1745         enable_cpu_capabilities(SCOPE_BOOT_CPU);
1746 }
1747
1748 DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
1749 EXPORT_SYMBOL(arm64_const_caps_ready);
1750
1751 static void __init mark_const_caps_ready(void)
1752 {
1753         static_branch_enable(&arm64_const_caps_ready);
1754 }
1755
1756 extern const struct arm64_cpu_capabilities arm64_errata[];
1757
1758 bool this_cpu_has_cap(unsigned int cap)
1759 {
1760         return (__this_cpu_has_cap(arm64_features, cap) ||
1761                 __this_cpu_has_cap(arm64_errata, cap));
1762 }
1763
1764 static void __init setup_system_capabilities(void)
1765 {
1766         /*
1767          * We have finalised the system-wide safe feature
1768          * registers, finalise the capabilities that depend
1769          * on it. Also enable all the available capabilities,
1770          * that are not enabled already.
1771          */
1772         update_cpu_capabilities(SCOPE_SYSTEM);
1773         enable_cpu_capabilities(SCOPE_ALL & ~SCOPE_BOOT_CPU);
1774 }
1775
1776 void __init setup_cpu_features(void)
1777 {
1778         u32 cwg;
1779
1780         setup_system_capabilities();
1781         mark_const_caps_ready();
1782         setup_elf_hwcaps(arm64_elf_hwcaps);
1783
1784         if (system_supports_32bit_el0())
1785                 setup_elf_hwcaps(compat_elf_hwcaps);
1786
1787         if (system_uses_ttbr0_pan())
1788                 pr_info("emulated: Privileged Access Never (PAN) using TTBR0_EL1 switching\n");
1789
1790         sve_setup();
1791         minsigstksz_setup();
1792
1793         /* Advertise that we have computed the system capabilities */
1794         set_sys_caps_initialised();
1795
1796         /*
1797          * Check for sane CTR_EL0.CWG value.
1798          */
1799         cwg = cache_type_cwg();
1800         if (!cwg)
1801                 pr_warn("No Cache Writeback Granule information, assuming %d\n",
1802                         ARCH_DMA_MINALIGN);
1803 }
1804
1805 static bool __maybe_unused
1806 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
1807 {
1808         return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
1809 }
1810
1811 /*
1812  * We emulate only the following system register space.
1813  * Op0 = 0x3, CRn = 0x0, Op1 = 0x0, CRm = [0, 4 - 7]
1814  * See Table C5-6 System instruction encodings for System register accesses,
1815  * ARMv8 ARM(ARM DDI 0487A.f) for more details.
1816  */
1817 static inline bool __attribute_const__ is_emulated(u32 id)
1818 {
1819         return (sys_reg_Op0(id) == 0x3 &&
1820                 sys_reg_CRn(id) == 0x0 &&
1821                 sys_reg_Op1(id) == 0x0 &&
1822                 (sys_reg_CRm(id) == 0 ||
1823                  ((sys_reg_CRm(id) >= 4) && (sys_reg_CRm(id) <= 7))));
1824 }
1825
1826 /*
1827  * With CRm == 0, reg should be one of :
1828  * MIDR_EL1, MPIDR_EL1 or REVIDR_EL1.
1829  */
1830 static inline int emulate_id_reg(u32 id, u64 *valp)
1831 {
1832         switch (id) {
1833         case SYS_MIDR_EL1:
1834                 *valp = read_cpuid_id();
1835                 break;
1836         case SYS_MPIDR_EL1:
1837                 *valp = SYS_MPIDR_SAFE_VAL;
1838                 break;
1839         case SYS_REVIDR_EL1:
1840                 /* IMPLEMENTATION DEFINED values are emulated with 0 */
1841                 *valp = 0;
1842                 break;
1843         default:
1844                 return -EINVAL;
1845         }
1846
1847         return 0;
1848 }
1849
1850 static int emulate_sys_reg(u32 id, u64 *valp)
1851 {
1852         struct arm64_ftr_reg *regp;
1853
1854         if (!is_emulated(id))
1855                 return -EINVAL;
1856
1857         if (sys_reg_CRm(id) == 0)
1858                 return emulate_id_reg(id, valp);
1859
1860         regp = get_arm64_ftr_reg(id);
1861         if (regp)
1862                 *valp = arm64_ftr_reg_user_value(regp);
1863         else
1864                 /*
1865                  * The untracked registers are either IMPLEMENTATION DEFINED
1866                  * (e.g, ID_AFR0_EL1) or reserved RAZ.
1867                  */
1868                 *valp = 0;
1869         return 0;
1870 }
1871
1872 static int emulate_mrs(struct pt_regs *regs, u32 insn)
1873 {
1874         int rc;
1875         u32 sys_reg, dst;
1876         u64 val;
1877
1878         /*
1879          * sys_reg values are defined as used in mrs/msr instruction.
1880          * shift the imm value to get the encoding.
1881          */
1882         sys_reg = (u32)aarch64_insn_decode_immediate(AARCH64_INSN_IMM_16, insn) << 5;
1883         rc = emulate_sys_reg(sys_reg, &val);
1884         if (!rc) {
1885                 dst = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RT, insn);
1886                 pt_regs_write_reg(regs, dst, val);
1887                 arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
1888         }
1889
1890         return rc;
1891 }
1892
1893 static struct undef_hook mrs_hook = {
1894         .instr_mask = 0xfff00000,
1895         .instr_val  = 0xd5300000,
1896         .pstate_mask = PSR_AA32_MODE_MASK,
1897         .pstate_val = PSR_MODE_EL0t,
1898         .fn = emulate_mrs,
1899 };
1900
1901 static int __init enable_mrs_emulation(void)
1902 {
1903         register_undef_hook(&mrs_hook);
1904         return 0;
1905 }
1906
1907 core_initcall(enable_mrs_emulation);
1908
1909 void cpu_clear_disr(const struct arm64_cpu_capabilities *__unused)
1910 {
1911         /* Firmware may have left a deferred SError in this register. */
1912         write_sysreg_s(0, SYS_DISR_EL1);
1913 }
1914
1915 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr,
1916                           char *buf)
1917 {
1918         if (__meltdown_safe)
1919                 return sprintf(buf, "Not affected\n");
1920
1921         if (arm64_kernel_unmapped_at_el0())
1922                 return sprintf(buf, "Mitigation: PTI\n");
1923
1924         return sprintf(buf, "Vulnerable\n");
1925 }