GNU Linux-libre 4.9.337-gnu1
[releases.git] / arch / arm64 / kernel / cpufeature.c
1 /*
2  * Contains CPU feature definitions
3  *
4  * Copyright (C) 2015 ARM Ltd.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
13  * GNU General Public License for more details.
14  *
15  * You should have received a copy of the GNU General Public License
16  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
17  */
18
19 #define pr_fmt(fmt) "CPU features: " fmt
20
21 #include <linux/bsearch.h>
22 #include <linux/cpumask.h>
23 #include <linux/percpu.h>
24 #include <linux/sort.h>
25 #include <linux/stop_machine.h>
26 #include <linux/types.h>
27
28 #include <asm/cpu.h>
29 #include <asm/cpufeature.h>
30 #include <asm/cpu_ops.h>
31 #include <asm/hwcap.h>
32 #include <asm/mmu_context.h>
33 #include <asm/processor.h>
34 #include <asm/sysreg.h>
35 #include <asm/vectors.h>
36 #include <asm/virt.h>
37
38 unsigned long elf_hwcap __read_mostly;
39 EXPORT_SYMBOL_GPL(elf_hwcap);
40
41 #ifdef CONFIG_COMPAT
42 #define COMPAT_ELF_HWCAP_DEFAULT        \
43                                 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
44                                  COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
45                                  COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
46                                  COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
47                                  COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
48                                  COMPAT_HWCAP_LPAE)
49 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
50 unsigned int compat_elf_hwcap2 __read_mostly;
51 #endif
52
53 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
54 EXPORT_SYMBOL(cpu_hwcaps);
55
56 DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;
57
58 DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
59 EXPORT_SYMBOL(cpu_hwcap_keys);
60
61 #define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
62         {                                               \
63                 .sign = SIGNED,                         \
64                 .strict = STRICT,                       \
65                 .type = TYPE,                           \
66                 .shift = SHIFT,                         \
67                 .width = WIDTH,                         \
68                 .safe_val = SAFE_VAL,                   \
69         }
70
71 /* Define a feature with unsigned values */
72 #define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
73         __ARM64_FTR_BITS(FTR_UNSIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
74
75 /* Define a feature with a signed value */
76 #define S_ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
77         __ARM64_FTR_BITS(FTR_SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
78
79 #define ARM64_FTR_END                                   \
80         {                                               \
81                 .width = 0,                             \
82         }
83
84 /* meta feature for alternatives */
85 static bool __maybe_unused
86 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
87
88
89 static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
90         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
91         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
92         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0),
93         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
94         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
95         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
96         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
97         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
98         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* RAZ */
99         ARM64_FTR_END,
100 };
101
102 static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
103         ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_CLEARBHB_SHIFT, 4, 0),
104         ARM64_FTR_END,
105 };
106
107 static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
108         ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
109         ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
110         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 24, 0),
111         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0),
112         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
113         S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
114         S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
115         /* Linux doesn't care about the EL3 */
116         ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64PFR0_EL3_SHIFT, 4, 0),
117         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL2_SHIFT, 4, 0),
118         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
119         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
120         ARM64_FTR_END,
121 };
122
123 static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
124         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
125         S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
126         S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
127         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
128         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
129         /* Linux shouldn't care about secure memory */
130         ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
131         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
132         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
133         /*
134          * Differing PARange is fine as long as all peripherals and memory are mapped
135          * within the minimum PARange of all CPUs
136          */
137         ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
138         ARM64_FTR_END,
139 };
140
141 static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
142         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
143         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
144         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
145         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
146         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
147         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
148         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
149         ARM64_FTR_END,
150 };
151
152 static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
153         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
154         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
155         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
156         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
157         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
158         ARM64_FTR_END,
159 };
160
161 static const struct arm64_ftr_bits ftr_ctr[] = {
162         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1),        /* RES1 */
163         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 30, 1, 0),
164         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 29, 1, 1),   /* DIC */
165         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1),   /* IDC */
166         ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, 24, 4, 0),  /* CWG */
167         ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, 20, 4, 0),  /* ERG */
168         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
169         /*
170          * Linux can handle differing I-cache policies. Userspace JITs will
171          * make use of *minLine.
172          * If we have differing I-cache policies, report it as the weakest - AIVIVT.
173          */
174         ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_AIVIVT),  /* L1Ip */
175         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0),        /* RAZ */
176         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
177         ARM64_FTR_END,
178 };
179
180 struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
181         .name           = "SYS_CTR_EL0",
182         .ftr_bits       = ftr_ctr
183 };
184
185 static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
186         S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0xf),    /* InnerShr */
187         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0),        /* FCSE */
188         ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0),        /* AuxReg */
189         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 16, 4, 0),        /* TCM */
190         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0),        /* ShareLvl */
191         S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0xf),     /* OuterShr */
192         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* PMSA */
193         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* VMSA */
194         ARM64_FTR_END,
195 };
196
197 static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
198         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
199         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
200         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
201         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
202         S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
203         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
204         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
205         ARM64_FTR_END,
206 };
207
208 static const struct arm64_ftr_bits ftr_mvfr2[] = {
209         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 24, 0),        /* RAZ */
210         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0),         /* FPMisc */
211         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0),         /* SIMDMisc */
212         ARM64_FTR_END,
213 };
214
215 static const struct arm64_ftr_bits ftr_dczid[] = {
216         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 5, 27, 0),        /* RAZ */
217         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 1, 1),         /* DZP */
218         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),    /* BS */
219         ARM64_FTR_END,
220 };
221
222
223 static const struct arm64_ftr_bits ftr_id_isar5[] = {
224         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_RDM_SHIFT, 4, 0),
225         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 20, 4, 0),        /* RAZ */
226         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_CRC32_SHIFT, 4, 0),
227         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA2_SHIFT, 4, 0),
228         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA1_SHIFT, 4, 0),
229         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_AES_SHIFT, 4, 0),
230         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SEVL_SHIFT, 4, 0),
231         ARM64_FTR_END,
232 };
233
234 static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
235         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 24, 0),        /* RAZ */
236         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0),         /* ac2 */
237         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0),         /* RAZ */
238         ARM64_FTR_END,
239 };
240
241 static const struct arm64_ftr_bits ftr_id_pfr0[] = {
242         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 16, 16, 0),       /* RAZ */
243         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0),        /* State3 */
244         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0),         /* State2 */
245         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0),         /* State1 */
246         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0),         /* State0 */
247         ARM64_FTR_END,
248 };
249
250 static const struct arm64_ftr_bits ftr_id_dfr0[] = {
251         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
252         S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf),       /* PerfMon */
253         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
254         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
255         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
256         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
257         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
258         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
259         ARM64_FTR_END,
260 };
261
262 /*
263  * Common ftr bits for a 32bit register with all hidden, strict
264  * attributes, with 4bit feature fields and a default safe value of
265  * 0. Covers the following 32bit registers:
266  * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
267  */
268 static const struct arm64_ftr_bits ftr_generic_32bits[] = {
269         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
270         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
271         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
272         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
273         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
274         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
275         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
276         ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
277         ARM64_FTR_END,
278 };
279
280 static const struct arm64_ftr_bits ftr_generic[] = {
281         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0),
282         ARM64_FTR_END,
283 };
284
285 static const struct arm64_ftr_bits ftr_generic32[] = {
286         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 32, 0),
287         ARM64_FTR_END,
288 };
289
290 static const struct arm64_ftr_bits ftr_aa64raz[] = {
291         ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0),
292         ARM64_FTR_END,
293 };
294
295 #define ARM64_FTR_REG(id, table) {              \
296         .sys_id = id,                           \
297         .reg =  &(struct arm64_ftr_reg){        \
298                 .name = #id,                    \
299                 .ftr_bits = &((table)[0]),      \
300         }}
301
302 static const struct __ftr_reg_entry {
303         u32                     sys_id;
304         struct arm64_ftr_reg    *reg;
305 } arm64_ftr_regs[] = {
306
307         /* Op1 = 0, CRn = 0, CRm = 1 */
308         ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
309         ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits),
310         ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
311         ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
312         ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
313         ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
314         ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
315
316         /* Op1 = 0, CRn = 0, CRm = 2 */
317         ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits),
318         ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
319         ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
320         ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
321         ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
322         ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
323         ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
324
325         /* Op1 = 0, CRn = 0, CRm = 3 */
326         ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
327         ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
328         ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
329
330         /* Op1 = 0, CRn = 0, CRm = 4 */
331         ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
332         ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_aa64raz),
333
334         /* Op1 = 0, CRn = 0, CRm = 5 */
335         ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
336         ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_generic),
337
338         /* Op1 = 0, CRn = 0, CRm = 6 */
339         ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
340         ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_aa64raz),
341         ARM64_FTR_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2),
342
343         /* Op1 = 0, CRn = 0, CRm = 7 */
344         ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
345         ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
346         ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
347
348         /* Op1 = 3, CRn = 0, CRm = 0 */
349         { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
350         ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
351
352         /* Op1 = 3, CRn = 14, CRm = 0 */
353         ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_generic32),
354 };
355
356 static int search_cmp_ftr_reg(const void *id, const void *regp)
357 {
358         return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id;
359 }
360
361 /*
362  * get_arm64_ftr_reg - Lookup a feature register entry using its
363  * sys_reg() encoding. With the array arm64_ftr_regs sorted in the
364  * ascending order of sys_id , we use binary search to find a matching
365  * entry.
366  *
367  * returns - Upon success,  matching ftr_reg entry for id.
368  *         - NULL on failure. It is upto the caller to decide
369  *           the impact of a failure.
370  */
371 static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
372 {
373         const struct __ftr_reg_entry *ret;
374
375         ret = bsearch((const void *)(unsigned long)sys_id,
376                         arm64_ftr_regs,
377                         ARRAY_SIZE(arm64_ftr_regs),
378                         sizeof(arm64_ftr_regs[0]),
379                         search_cmp_ftr_reg);
380         if (ret)
381                 return ret->reg;
382         return NULL;
383 }
384
385 static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
386                                s64 ftr_val)
387 {
388         u64 mask = arm64_ftr_mask(ftrp);
389
390         reg &= ~mask;
391         reg |= (ftr_val << ftrp->shift) & mask;
392         return reg;
393 }
394
395 static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
396                                 s64 cur)
397 {
398         s64 ret = 0;
399
400         switch (ftrp->type) {
401         case FTR_EXACT:
402                 ret = ftrp->safe_val;
403                 break;
404         case FTR_LOWER_SAFE:
405                 ret = new < cur ? new : cur;
406                 break;
407         case FTR_HIGHER_OR_ZERO_SAFE:
408                 if (!cur || !new)
409                         break;
410                 /* Fallthrough */
411         case FTR_HIGHER_SAFE:
412                 ret = new > cur ? new : cur;
413                 break;
414         default:
415                 BUG();
416         }
417
418         return ret;
419 }
420
421 static void __init sort_ftr_regs(void)
422 {
423         int i;
424
425         /* Check that the array is sorted so that we can do the binary search */
426         for (i = 1; i < ARRAY_SIZE(arm64_ftr_regs); i++)
427                 BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
428 }
429
430 /*
431  * Initialise the CPU feature register from Boot CPU values.
432  * Also initiliases the strict_mask for the register.
433  */
434 static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
435 {
436         u64 val = 0;
437         u64 strict_mask = ~0x0ULL;
438         const struct arm64_ftr_bits *ftrp;
439         struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
440
441         BUG_ON(!reg);
442
443         for (ftrp  = reg->ftr_bits; ftrp->width; ftrp++) {
444                 s64 ftr_new = arm64_ftr_value(ftrp, new);
445
446                 val = arm64_ftr_set_value(ftrp, val, ftr_new);
447                 if (!ftrp->strict)
448                         strict_mask &= ~arm64_ftr_mask(ftrp);
449         }
450         reg->sys_val = val;
451         reg->strict_mask = strict_mask;
452 }
453
454 extern const struct arm64_cpu_capabilities arm64_errata[];
455 static void update_cpu_errata_workarounds(void);
456
457 void __init init_cpu_features(struct cpuinfo_arm64 *info)
458 {
459         /* Before we start using the tables, make sure it is sorted */
460         sort_ftr_regs();
461
462         init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
463         init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
464         init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
465         init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
466         init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
467         init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
468         init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
469         init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2);
470         init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
471         init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
472         init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
473         init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
474         init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
475
476         if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
477                 init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
478                 init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
479                 init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
480                 init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
481                 init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
482                 init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
483                 init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
484                 init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
485                 init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
486                 init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
487                 init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
488                 init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
489                 init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
490                 init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
491                 init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
492                 init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
493         }
494
495         /*
496          * Run the errata work around checks on the boot CPU, once we have
497          * initialised the cpu feature infrastructure.
498          */
499         update_cpu_errata_workarounds();
500 }
501
502 static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
503 {
504         const struct arm64_ftr_bits *ftrp;
505
506         for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
507                 s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
508                 s64 ftr_new = arm64_ftr_value(ftrp, new);
509
510                 if (ftr_cur == ftr_new)
511                         continue;
512                 /* Find a safe value */
513                 ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
514                 reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
515         }
516
517 }
518
519 static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
520 {
521         struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
522
523         BUG_ON(!regp);
524         update_cpu_ftr_reg(regp, val);
525         if ((boot & regp->strict_mask) == (val & regp->strict_mask))
526                 return 0;
527         pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
528                         regp->name, boot, cpu, val);
529         return 1;
530 }
531
532 /*
533  * Update system wide CPU feature registers with the values from a
534  * non-boot CPU. Also performs SANITY checks to make sure that there
535  * aren't any insane variations from that of the boot CPU.
536  */
537 void update_cpu_features(int cpu,
538                          struct cpuinfo_arm64 *info,
539                          struct cpuinfo_arm64 *boot)
540 {
541         int taint = 0;
542
543         /*
544          * The kernel can handle differing I-cache policies, but otherwise
545          * caches should look identical. Userspace JITs will make use of
546          * *minLine.
547          */
548         taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
549                                       info->reg_ctr, boot->reg_ctr);
550
551         /*
552          * Userspace may perform DC ZVA instructions. Mismatched block sizes
553          * could result in too much or too little memory being zeroed if a
554          * process is preempted and migrated between CPUs.
555          */
556         taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
557                                       info->reg_dczid, boot->reg_dczid);
558
559         /* If different, timekeeping will be broken (especially with KVM) */
560         taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
561                                       info->reg_cntfrq, boot->reg_cntfrq);
562
563         /*
564          * The kernel uses self-hosted debug features and expects CPUs to
565          * support identical debug features. We presently need CTX_CMPs, WRPs,
566          * and BRPs to be identical.
567          * ID_AA64DFR1 is currently RES0.
568          */
569         taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
570                                       info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
571         taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
572                                       info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
573         /*
574          * Even in big.LITTLE, processors should be identical instruction-set
575          * wise.
576          */
577         taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
578                                       info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
579         taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
580                                       info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
581         taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu,
582                                       info->reg_id_aa64isar2, boot->reg_id_aa64isar2);
583
584         /*
585          * Differing PARange support is fine as long as all peripherals and
586          * memory are mapped within the minimum PARange of all CPUs.
587          * Linux should not care about secure memory.
588          */
589         taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
590                                       info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
591         taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
592                                       info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
593         taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
594                                       info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
595
596         /*
597          * EL3 is not our concern.
598          * ID_AA64PFR1 is currently RES0.
599          */
600         taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
601                                       info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
602         taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
603                                       info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
604
605         /*
606          * If we have AArch32, we care about 32-bit features for compat.
607          * If the system doesn't support AArch32, don't update them.
608          */
609         if (id_aa64pfr0_32bit_el0(read_system_reg(SYS_ID_AA64PFR0_EL1)) &&
610                 id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
611
612                 taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
613                                         info->reg_id_dfr0, boot->reg_id_dfr0);
614                 taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
615                                         info->reg_id_isar0, boot->reg_id_isar0);
616                 taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
617                                         info->reg_id_isar1, boot->reg_id_isar1);
618                 taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
619                                         info->reg_id_isar2, boot->reg_id_isar2);
620                 taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
621                                         info->reg_id_isar3, boot->reg_id_isar3);
622                 taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
623                                         info->reg_id_isar4, boot->reg_id_isar4);
624                 taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
625                                         info->reg_id_isar5, boot->reg_id_isar5);
626
627                 /*
628                  * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
629                  * ACTLR formats could differ across CPUs and therefore would have to
630                  * be trapped for virtualization anyway.
631                  */
632                 taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
633                                         info->reg_id_mmfr0, boot->reg_id_mmfr0);
634                 taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
635                                         info->reg_id_mmfr1, boot->reg_id_mmfr1);
636                 taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
637                                         info->reg_id_mmfr2, boot->reg_id_mmfr2);
638                 taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
639                                         info->reg_id_mmfr3, boot->reg_id_mmfr3);
640                 taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
641                                         info->reg_id_pfr0, boot->reg_id_pfr0);
642                 taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
643                                         info->reg_id_pfr1, boot->reg_id_pfr1);
644                 taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
645                                         info->reg_mvfr0, boot->reg_mvfr0);
646                 taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
647                                         info->reg_mvfr1, boot->reg_mvfr1);
648                 taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
649                                         info->reg_mvfr2, boot->reg_mvfr2);
650         }
651
652         /*
653          * Mismatched CPU features are a recipe for disaster. Don't even
654          * pretend to support them.
655          */
656         WARN_TAINT_ONCE(taint, TAINT_CPU_OUT_OF_SPEC,
657                         "Unsupported CPU feature variation.\n");
658 }
659
660 u64 read_system_reg(u32 id)
661 {
662         struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
663
664         /* We shouldn't get a request for an unsupported register */
665         BUG_ON(!regp);
666         return regp->sys_val;
667 }
668
669 /*
670  * __raw_read_system_reg() - Used by a STARTING cpu before cpuinfo is populated.
671  * Read the system register on the current CPU
672  */
673 static u64 __raw_read_system_reg(u32 sys_id)
674 {
675         switch (sys_id) {
676         case SYS_ID_PFR0_EL1:           return read_cpuid(ID_PFR0_EL1);
677         case SYS_ID_PFR1_EL1:           return read_cpuid(ID_PFR1_EL1);
678         case SYS_ID_DFR0_EL1:           return read_cpuid(ID_DFR0_EL1);
679         case SYS_ID_MMFR0_EL1:          return read_cpuid(ID_MMFR0_EL1);
680         case SYS_ID_MMFR1_EL1:          return read_cpuid(ID_MMFR1_EL1);
681         case SYS_ID_MMFR2_EL1:          return read_cpuid(ID_MMFR2_EL1);
682         case SYS_ID_MMFR3_EL1:          return read_cpuid(ID_MMFR3_EL1);
683         case SYS_ID_ISAR0_EL1:          return read_cpuid(ID_ISAR0_EL1);
684         case SYS_ID_ISAR1_EL1:          return read_cpuid(ID_ISAR1_EL1);
685         case SYS_ID_ISAR2_EL1:          return read_cpuid(ID_ISAR2_EL1);
686         case SYS_ID_ISAR3_EL1:          return read_cpuid(ID_ISAR3_EL1);
687         case SYS_ID_ISAR4_EL1:          return read_cpuid(ID_ISAR4_EL1);
688         case SYS_ID_ISAR5_EL1:          return read_cpuid(ID_ISAR5_EL1);
689         case SYS_MVFR0_EL1:             return read_cpuid(MVFR0_EL1);
690         case SYS_MVFR1_EL1:             return read_cpuid(MVFR1_EL1);
691         case SYS_MVFR2_EL1:             return read_cpuid(MVFR2_EL1);
692
693         case SYS_ID_AA64PFR0_EL1:       return read_cpuid(ID_AA64PFR0_EL1);
694         case SYS_ID_AA64PFR1_EL1:       return read_cpuid(ID_AA64PFR1_EL1);
695         case SYS_ID_AA64DFR0_EL1:       return read_cpuid(ID_AA64DFR0_EL1);
696         case SYS_ID_AA64DFR1_EL1:       return read_cpuid(ID_AA64DFR1_EL1);
697         case SYS_ID_AA64MMFR0_EL1:      return read_cpuid(ID_AA64MMFR0_EL1);
698         case SYS_ID_AA64MMFR1_EL1:      return read_cpuid(ID_AA64MMFR1_EL1);
699         case SYS_ID_AA64MMFR2_EL1:      return read_cpuid(ID_AA64MMFR2_EL1);
700         case SYS_ID_AA64ISAR0_EL1:      return read_cpuid(ID_AA64ISAR0_EL1);
701         case SYS_ID_AA64ISAR1_EL1:      return read_cpuid(ID_AA64ISAR1_EL1);
702         case SYS_ID_AA64ISAR2_EL1:      return read_cpuid(ID_AA64ISAR2_EL1);
703
704         case SYS_CNTFRQ_EL0:            return read_cpuid(CNTFRQ_EL0);
705         case SYS_CTR_EL0:               return read_cpuid(CTR_EL0);
706         case SYS_DCZID_EL0:             return read_cpuid(DCZID_EL0);
707         default:
708                 BUG();
709                 return 0;
710         }
711 }
712
713 #include <linux/irqchip/arm-gic-v3.h>
714
715 static bool
716 feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
717 {
718         int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign);
719
720         return val >= entry->min_field_value;
721 }
722
723 static bool
724 has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
725 {
726         u64 val;
727
728         WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
729         if (scope == SCOPE_SYSTEM)
730                 val = read_system_reg(entry->sys_reg);
731         else
732                 val = __raw_read_system_reg(entry->sys_reg);
733
734         return feature_matches(val, entry);
735 }
736
737 static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
738 {
739         bool has_sre;
740
741         if (!has_cpuid_feature(entry, scope))
742                 return false;
743
744         has_sre = gic_enable_sre();
745         if (!has_sre)
746                 pr_warn_once("%s present but disabled by higher exception level\n",
747                              entry->desc);
748
749         return has_sre;
750 }
751
752 static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
753 {
754         u32 midr = read_cpuid_id();
755
756         /* Cavium ThunderX pass 1.x and 2.x */
757         return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX,
758                 MIDR_CPU_VAR_REV(0, 0),
759                 MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
760 }
761
762 static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
763 {
764         return is_kernel_in_hyp_mode();
765 }
766
767 static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
768                            int __unused)
769 {
770         phys_addr_t idmap_addr = virt_to_phys(__hyp_idmap_text_start);
771
772         /*
773          * Activate the lower HYP offset only if:
774          * - the idmap doesn't clash with it,
775          * - the kernel is not running at EL2.
776          */
777         return idmap_addr > GENMASK(VA_BITS - 2, 0) && !is_kernel_in_hyp_mode();
778 }
779
780 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
781 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
782
783 static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
784                                 int __unused)
785 {
786         /* List of CPUs that are not vulnerable and don't need KPTI */
787         static const struct midr_range kpti_safe_list[] = {
788                 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
789                 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
790                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
791                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
792                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
793                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
794                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
795                 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
796         };
797         char const *str = "command line option";
798         u64 pfr0 = read_system_reg(SYS_ID_AA64PFR0_EL1);
799
800         /*
801          * For reasons that aren't entirely clear, enabling KPTI on Cavium
802          * ThunderX leads to apparent I-cache corruption of kernel text, which
803          * ends as well as you might imagine. Don't even try.
804          */
805         if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
806                 str = "ARM64_WORKAROUND_CAVIUM_27456";
807                 __kpti_forced = -1;
808         }
809
810         /* Forced? */
811         if (__kpti_forced) {
812                 pr_info_once("kernel page table isolation forced %s by %s\n",
813                              __kpti_forced > 0 ? "ON" : "OFF", str);
814                 return __kpti_forced > 0;
815         }
816
817         /* Useful for KASLR robustness */
818         if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
819                 return true;
820
821         /* Don't force KPTI for CPUs that are not vulnerable */
822         if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
823                 return false;
824
825         /* Defer to CPU feature registers */
826         return !cpuid_feature_extract_unsigned_field(pfr0,
827                                                      ID_AA64PFR0_CSV3_SHIFT);
828 }
829
830 static void
831 kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
832 {
833         typedef void (kpti_remap_fn)(int, int, phys_addr_t);
834         extern kpti_remap_fn idmap_kpti_install_ng_mappings;
835         kpti_remap_fn *remap_fn;
836
837         static bool kpti_applied = false;
838         int cpu = smp_processor_id();
839
840         if (__this_cpu_read(this_cpu_vector) == vectors) {
841                 const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI);
842
843                 __this_cpu_write(this_cpu_vector, v);
844         }
845
846         if (kpti_applied)
847                 return;
848
849         remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
850
851         cpu_install_idmap();
852         remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
853         cpu_uninstall_idmap();
854
855         if (!cpu)
856                 kpti_applied = true;
857
858         return;
859 }
860
861 static int __init parse_kpti(char *str)
862 {
863         bool enabled;
864         int ret = strtobool(str, &enabled);
865
866         if (ret)
867                 return ret;
868
869         __kpti_forced = enabled ? 1 : -1;
870         return 0;
871 }
872 early_param("kpti", parse_kpti);
873 #endif  /* CONFIG_UNMAP_KERNEL_AT_EL0 */
874
875 static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
876 {
877         /*
878          * Copy register values that aren't redirected by hardware.
879          *
880          * Before code patching, we only set tpidr_el1, all CPUs need to copy
881          * this value to tpidr_el2 before we patch the code. Once we've done
882          * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
883          * do anything here.
884          */
885         if (!alternatives_applied)
886                 write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
887 }
888
889 static void elf_hwcap_fixup(void)
890 {
891 #ifdef CONFIG_ARM64_ERRATUM_1742098
892         if (cpus_have_const_cap(ARM64_WORKAROUND_1742098))
893                 compat_elf_hwcap2 &= ~COMPAT_HWCAP2_AES;
894 #endif /* ARM64_ERRATUM_1742098 */
895 }
896
897 static const struct arm64_cpu_capabilities arm64_features[] = {
898         {
899                 .desc = "GIC system register CPU interface",
900                 .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
901                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
902                 .matches = has_useable_gicv3_cpuif,
903                 .sys_reg = SYS_ID_AA64PFR0_EL1,
904                 .field_pos = ID_AA64PFR0_GIC_SHIFT,
905                 .sign = FTR_UNSIGNED,
906                 .min_field_value = 1,
907         },
908 #ifdef CONFIG_ARM64_PAN
909         {
910                 .desc = "Privileged Access Never",
911                 .capability = ARM64_HAS_PAN,
912                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
913                 .matches = has_cpuid_feature,
914                 .sys_reg = SYS_ID_AA64MMFR1_EL1,
915                 .field_pos = ID_AA64MMFR1_PAN_SHIFT,
916                 .sign = FTR_UNSIGNED,
917                 .min_field_value = 1,
918                 .cpu_enable = cpu_enable_pan,
919         },
920 #endif /* CONFIG_ARM64_PAN */
921 #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
922         {
923                 .desc = "LSE atomic instructions",
924                 .capability = ARM64_HAS_LSE_ATOMICS,
925                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
926                 .matches = has_cpuid_feature,
927                 .sys_reg = SYS_ID_AA64ISAR0_EL1,
928                 .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
929                 .sign = FTR_UNSIGNED,
930                 .min_field_value = 2,
931         },
932 #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
933         {
934                 .desc = "Software prefetching using PRFM",
935                 .capability = ARM64_HAS_NO_HW_PREFETCH,
936                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
937                 .matches = has_no_hw_prefetch,
938         },
939 #ifdef CONFIG_ARM64_UAO
940         {
941                 .desc = "User Access Override",
942                 .capability = ARM64_HAS_UAO,
943                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
944                 .matches = has_cpuid_feature,
945                 .sys_reg = SYS_ID_AA64MMFR2_EL1,
946                 .field_pos = ID_AA64MMFR2_UAO_SHIFT,
947                 .min_field_value = 1,
948                 /*
949                  * We rely on stop_machine() calling uao_thread_switch() to set
950                  * UAO immediately after patching.
951                  */
952         },
953 #endif /* CONFIG_ARM64_UAO */
954 #ifdef CONFIG_ARM64_PAN
955         {
956                 .capability = ARM64_ALT_PAN_NOT_UAO,
957                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
958                 .matches = cpufeature_pan_not_uao,
959         },
960 #endif /* CONFIG_ARM64_PAN */
961         {
962                 .desc = "Virtualization Host Extensions",
963                 .capability = ARM64_HAS_VIRT_HOST_EXTN,
964                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
965                 .matches = runs_at_el2,
966                 .cpu_enable = cpu_copy_el2regs,
967         },
968         {
969                 .desc = "32-bit EL0 Support",
970                 .capability = ARM64_HAS_32BIT_EL0,
971                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
972                 .matches = has_cpuid_feature,
973                 .sys_reg = SYS_ID_AA64PFR0_EL1,
974                 .sign = FTR_UNSIGNED,
975                 .field_pos = ID_AA64PFR0_EL0_SHIFT,
976                 .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
977         },
978         {
979                 .desc = "Reduced HYP mapping offset",
980                 .capability = ARM64_HYP_OFFSET_LOW,
981                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
982                 .matches = hyp_offset_low,
983         },
984 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
985         {
986                 .desc = "Kernel page table isolation (KPTI)",
987                 .capability = ARM64_UNMAP_KERNEL_AT_EL0,
988                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
989                 .matches = unmap_kernel_at_el0,
990                 .cpu_enable = kpti_install_ng_mappings,
991         },
992 #endif
993         {},
994 };
995
996 #define HWCAP_CAP(reg, field, s, min_value, cap_type, cap)      \
997         {                                                       \
998                 .desc = #cap,                                   \
999                 .type = ARM64_CPUCAP_SYSTEM_FEATURE,            \
1000                 .matches = has_cpuid_feature,                   \
1001                 .sys_reg = reg,                                 \
1002                 .field_pos = field,                             \
1003                 .sign = s,                                      \
1004                 .min_field_value = min_value,                   \
1005                 .hwcap_type = cap_type,                         \
1006                 .hwcap = cap,                                   \
1007         }
1008
1009 static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
1010         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
1011         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
1012         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
1013         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
1014         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
1015         HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
1016         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
1017         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
1018         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
1019         HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
1020         {},
1021 };
1022
1023 static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
1024 #ifdef CONFIG_COMPAT
1025         HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
1026         HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
1027         HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
1028         HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
1029         HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
1030 #endif
1031         {},
1032 };
1033
1034 static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
1035 {
1036         switch (cap->hwcap_type) {
1037         case CAP_HWCAP:
1038                 elf_hwcap |= cap->hwcap;
1039                 break;
1040 #ifdef CONFIG_COMPAT
1041         case CAP_COMPAT_HWCAP:
1042                 compat_elf_hwcap |= (u32)cap->hwcap;
1043                 break;
1044         case CAP_COMPAT_HWCAP2:
1045                 compat_elf_hwcap2 |= (u32)cap->hwcap;
1046                 break;
1047 #endif
1048         default:
1049                 WARN_ON(1);
1050                 break;
1051         }
1052 }
1053
1054 /* Check if we have a particular HWCAP enabled */
1055 static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
1056 {
1057         bool rc;
1058
1059         switch (cap->hwcap_type) {
1060         case CAP_HWCAP:
1061                 rc = (elf_hwcap & cap->hwcap) != 0;
1062                 break;
1063 #ifdef CONFIG_COMPAT
1064         case CAP_COMPAT_HWCAP:
1065                 rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
1066                 break;
1067         case CAP_COMPAT_HWCAP2:
1068                 rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
1069                 break;
1070 #endif
1071         default:
1072                 WARN_ON(1);
1073                 rc = false;
1074         }
1075
1076         return rc;
1077 }
1078
1079 static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
1080 {
1081         for (; hwcaps->matches; hwcaps++)
1082                 if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps)))
1083                         cap_set_elf_hwcap(hwcaps);
1084 }
1085
1086 /*
1087  * Check if the current CPU has a given feature capability.
1088  * Should be called from non-preemptible context.
1089  */
1090 static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
1091                                unsigned int cap)
1092 {
1093         const struct arm64_cpu_capabilities *caps;
1094
1095         if (WARN_ON(preemptible()))
1096                 return false;
1097
1098         for (caps = cap_array; caps->matches; caps++)
1099                 if (caps->capability == cap &&
1100                     caps->matches(caps, SCOPE_LOCAL_CPU))
1101                         return true;
1102         return false;
1103 }
1104
1105 static void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
1106                                     const char *info)
1107 {
1108         for (; caps->matches; caps++) {
1109                 if (!caps->matches(caps, cpucap_default_scope(caps)))
1110                         continue;
1111
1112                 if (!cpus_have_cap(caps->capability) && caps->desc)
1113                         pr_info("%s %s\n", info, caps->desc);
1114                 cpus_set_cap(caps->capability);
1115         }
1116 }
1117
1118 static int __enable_cpu_capability(void *arg)
1119 {
1120         const struct arm64_cpu_capabilities *cap = arg;
1121
1122         cap->cpu_enable(cap);
1123         return 0;
1124 }
1125
1126 /*
1127  * Run through the enabled capabilities and enable() it on all active
1128  * CPUs
1129  */
1130 static void __init
1131 enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
1132 {
1133         for (; caps->matches; caps++) {
1134                 unsigned int num = caps->capability;
1135
1136                 if (!cpus_have_cap(num))
1137                         continue;
1138
1139                 /* Ensure cpus_have_const_cap(num) works */
1140                 static_branch_enable(&cpu_hwcap_keys[num]);
1141
1142                 if (caps->cpu_enable) {
1143                         /*
1144                          * Use stop_machine() as it schedules the work allowing
1145                          * us to modify PSTATE, instead of on_each_cpu() which
1146                          * uses an IPI, giving us a PSTATE that disappears when
1147                          * we return.
1148                          */
1149                         stop_machine(__enable_cpu_capability, (void *)caps,
1150                                      cpu_online_mask);
1151                 }
1152         }
1153 }
1154
1155 /*
1156  * Flag to indicate if we have computed the system wide
1157  * capabilities based on the boot time active CPUs. This
1158  * will be used to determine if a new booting CPU should
1159  * go through the verification process to make sure that it
1160  * supports the system capabilities, without using a hotplug
1161  * notifier.
1162  */
1163 static bool sys_caps_initialised;
1164
1165 static inline void set_sys_caps_initialised(void)
1166 {
1167         sys_caps_initialised = true;
1168 }
1169
1170 /*
1171  * Check for CPU features that are used in early boot
1172  * based on the Boot CPU value.
1173  */
1174 static void check_early_cpu_features(void)
1175 {
1176         verify_cpu_run_el();
1177         verify_cpu_asid_bits();
1178 }
1179
1180 static void
1181 verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
1182 {
1183
1184         for (; caps->matches; caps++)
1185                 if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) {
1186                         pr_crit("CPU%d: missing HWCAP: %s\n",
1187                                         smp_processor_id(), caps->desc);
1188                         cpu_die_early();
1189                 }
1190 }
1191
1192 static void
1193 verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list)
1194 {
1195         const struct arm64_cpu_capabilities *caps = caps_list;
1196         for (; caps->matches; caps++) {
1197                 if (!cpus_have_cap(caps->capability))
1198                         continue;
1199                 /*
1200                  * If the new CPU misses an advertised feature, we cannot proceed
1201                  * further, park the cpu.
1202                  */
1203                 if (!__this_cpu_has_cap(caps_list, caps->capability)) {
1204                         pr_crit("CPU%d: missing feature: %s\n",
1205                                         smp_processor_id(), caps->desc);
1206                         cpu_die_early();
1207                 }
1208                 if (caps->cpu_enable)
1209                         caps->cpu_enable(caps);
1210         }
1211 }
1212
1213 /*
1214  * The CPU Errata work arounds are detected and applied at boot time
1215  * and the related information is freed soon after. If the new CPU requires
1216  * an errata not detected at boot, fail this CPU.
1217  */
1218 static void verify_local_cpu_errata_workarounds(void)
1219 {
1220         const struct arm64_cpu_capabilities *caps = arm64_errata;
1221
1222         for (; caps->matches; caps++) {
1223                 if (cpus_have_cap(caps->capability)) {
1224                         if (caps->cpu_enable)
1225                                 caps->cpu_enable(caps);
1226                 } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
1227                         pr_crit("CPU%d: Requires work around for %s, not detected"
1228                                         " at boot time\n",
1229                                 smp_processor_id(),
1230                                 caps->desc ? : "an erratum");
1231                         cpu_die_early();
1232                 }
1233         }
1234 }
1235
1236 static void update_cpu_errata_workarounds(void)
1237 {
1238         update_cpu_capabilities(arm64_errata, "enabling workaround for");
1239 }
1240
1241 static void __init enable_errata_workarounds(void)
1242 {
1243         enable_cpu_capabilities(arm64_errata);
1244 }
1245
1246 /*
1247  * Run through the enabled system capabilities and enable() it on this CPU.
1248  * The capabilities were decided based on the available CPUs at the boot time.
1249  * Any new CPU should match the system wide status of the capability. If the
1250  * new CPU doesn't have a capability which the system now has enabled, we
1251  * cannot do anything to fix it up and could cause unexpected failures. So
1252  * we park the CPU.
1253  */
1254 static void verify_local_cpu_capabilities(void)
1255 {
1256         verify_local_cpu_errata_workarounds();
1257         verify_local_cpu_features(arm64_features);
1258         verify_local_elf_hwcaps(arm64_elf_hwcaps);
1259         if (system_supports_32bit_el0())
1260                 verify_local_elf_hwcaps(compat_elf_hwcaps);
1261 }
1262
1263 void check_local_cpu_capabilities(void)
1264 {
1265         /*
1266          * All secondary CPUs should conform to the early CPU features
1267          * in use by the kernel based on boot CPU.
1268          */
1269         check_early_cpu_features();
1270
1271         /*
1272          * If we haven't finalised the system capabilities, this CPU gets
1273          * a chance to update the errata work arounds.
1274          * Otherwise, this CPU should verify that it has all the system
1275          * advertised capabilities.
1276          */
1277         if (!sys_caps_initialised)
1278                 update_cpu_errata_workarounds();
1279         else
1280                 verify_local_cpu_capabilities();
1281 }
1282
1283 static void __init setup_feature_capabilities(void)
1284 {
1285         update_cpu_capabilities(arm64_features, "detected feature:");
1286         enable_cpu_capabilities(arm64_features);
1287 }
1288
1289 DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
1290 EXPORT_SYMBOL(arm64_const_caps_ready);
1291
1292 static void __init mark_const_caps_ready(void)
1293 {
1294         static_branch_enable(&arm64_const_caps_ready);
1295 }
1296
1297 extern const struct arm64_cpu_capabilities arm64_errata[];
1298
1299 bool this_cpu_has_cap(unsigned int cap)
1300 {
1301         return (__this_cpu_has_cap(arm64_features, cap) ||
1302                 __this_cpu_has_cap(arm64_errata, cap));
1303 }
1304
1305 void __init setup_cpu_features(void)
1306 {
1307         u32 cwg;
1308         int cls;
1309
1310         /* Set the CPU feature capabilies */
1311         setup_feature_capabilities();
1312         enable_errata_workarounds();
1313         mark_const_caps_ready();
1314         setup_elf_hwcaps(arm64_elf_hwcaps);
1315
1316         if (system_supports_32bit_el0()) {
1317                 setup_elf_hwcaps(compat_elf_hwcaps);
1318                 elf_hwcap_fixup();
1319         }
1320
1321         /* Advertise that we have computed the system capabilities */
1322         set_sys_caps_initialised();
1323
1324         /*
1325          * Check for sane CTR_EL0.CWG value.
1326          */
1327         cwg = cache_type_cwg();
1328         cls = cache_line_size();
1329         if (!cwg)
1330                 pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
1331                         cls);
1332         if (L1_CACHE_BYTES < cls)
1333                 pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
1334                         L1_CACHE_BYTES, cls);
1335 }
1336
1337 static bool __maybe_unused
1338 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
1339 {
1340         return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));
1341 }