2 * Contains CPU feature definitions
4 * Copyright (C) 2015 ARM Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
19 #define pr_fmt(fmt) "CPU features: " fmt
21 #include <linux/bsearch.h>
22 #include <linux/cpumask.h>
23 #include <linux/percpu.h>
24 #include <linux/sort.h>
25 #include <linux/stop_machine.h>
26 #include <linux/types.h>
29 #include <asm/cpufeature.h>
30 #include <asm/cpu_ops.h>
31 #include <asm/hwcap.h>
32 #include <asm/mmu_context.h>
33 #include <asm/processor.h>
34 #include <asm/sysreg.h>
35 #include <asm/vectors.h>
38 unsigned long elf_hwcap __read_mostly;
39 EXPORT_SYMBOL_GPL(elf_hwcap);
42 #define COMPAT_ELF_HWCAP_DEFAULT \
43 (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
44 COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
45 COMPAT_HWCAP_TLS|COMPAT_HWCAP_VFP|\
46 COMPAT_HWCAP_VFPv3|COMPAT_HWCAP_VFPv4|\
47 COMPAT_HWCAP_NEON|COMPAT_HWCAP_IDIV|\
49 unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
50 unsigned int compat_elf_hwcap2 __read_mostly;
53 DECLARE_BITMAP(cpu_hwcaps, ARM64_NCAPS);
54 EXPORT_SYMBOL(cpu_hwcaps);
56 DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;
58 DEFINE_STATIC_KEY_ARRAY_FALSE(cpu_hwcap_keys, ARM64_NCAPS);
59 EXPORT_SYMBOL(cpu_hwcap_keys);
61 #define __ARM64_FTR_BITS(SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
68 .safe_val = SAFE_VAL, \
71 /* Define a feature with unsigned values */
72 #define ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
73 __ARM64_FTR_BITS(FTR_UNSIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
75 /* Define a feature with a signed value */
76 #define S_ARM64_FTR_BITS(STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
77 __ARM64_FTR_BITS(FTR_SIGNED, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)
79 #define ARM64_FTR_END \
84 /* meta feature for alternatives */
85 static bool __maybe_unused
86 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused);
89 static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
90 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
91 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64ISAR0_RDM_SHIFT, 4, 0),
92 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0),
93 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_ATOMICS_SHIFT, 4, 0),
94 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_CRC32_SHIFT, 4, 0),
95 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA2_SHIFT, 4, 0),
96 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_SHA1_SHIFT, 4, 0),
97 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_AES_SHIFT, 4, 0),
98 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* RAZ */
102 static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
103 ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64ISAR2_CLEARBHB_SHIFT, 4, 0),
107 static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
108 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV3_SHIFT, 4, 0),
109 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_CSV2_SHIFT, 4, 0),
110 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 24, 0),
111 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0),
112 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_GIC_SHIFT, 4, 0),
113 S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
114 S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
115 /* Linux doesn't care about the EL3 */
116 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64PFR0_EL3_SHIFT, 4, 0),
117 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL2_SHIFT, 4, 0),
118 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
119 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
123 static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
124 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
125 S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN4_SHIFT, 4, ID_AA64MMFR0_TGRAN4_NI),
126 S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN64_SHIFT, 4, ID_AA64MMFR0_TGRAN64_NI),
127 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_TGRAN16_SHIFT, 4, ID_AA64MMFR0_TGRAN16_NI),
128 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL0_SHIFT, 4, 0),
129 /* Linux shouldn't care about secure memory */
130 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_SNSMEM_SHIFT, 4, 0),
131 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_BIGENDEL_SHIFT, 4, 0),
132 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR0_ASID_SHIFT, 4, 0),
134 * Differing PARange is fine as long as all peripherals and memory are mapped
135 * within the minimum PARange of all CPUs
137 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_PARANGE_SHIFT, 4, 0),
141 static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
142 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
143 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_PAN_SHIFT, 4, 0),
144 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_LOR_SHIFT, 4, 0),
145 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HPD_SHIFT, 4, 0),
146 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VHE_SHIFT, 4, 0),
147 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_VMIDBITS_SHIFT, 4, 0),
148 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR1_HADBS_SHIFT, 4, 0),
152 static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
153 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LVA_SHIFT, 4, 0),
154 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_IESB_SHIFT, 4, 0),
155 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_LSM_SHIFT, 4, 0),
156 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_UAO_SHIFT, 4, 0),
157 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64MMFR2_CNP_SHIFT, 4, 0),
161 static const struct arm64_ftr_bits ftr_ctr[] = {
162 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
163 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 30, 1, 0),
164 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 29, 1, 1), /* DIC */
165 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 1, 1), /* IDC */
166 ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, 24, 4, 0), /* CWG */
167 ARM64_FTR_BITS(FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, 20, 4, 0), /* ERG */
168 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, CTR_DMINLINE_SHIFT, 4, 1),
170 * Linux can handle differing I-cache policies. Userspace JITs will
171 * make use of *minLine.
172 * If we have differing I-cache policies, report it as the weakest - AIVIVT.
174 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_EXACT, 14, 2, ICACHE_POLICY_AIVIVT), /* L1Ip */
175 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 10, 0), /* RAZ */
176 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, CTR_IMINLINE_SHIFT, 4, 0),
180 struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
181 .name = "SYS_CTR_EL0",
185 static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
186 S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 28, 4, 0xf), /* InnerShr */
187 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 24, 4, 0), /* FCSE */
188 ARM64_FTR_BITS(FTR_NONSTRICT, FTR_LOWER_SAFE, 20, 4, 0), /* AuxReg */
189 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 16, 4, 0), /* TCM */
190 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0), /* ShareLvl */
191 S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0xf), /* OuterShr */
192 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* PMSA */
193 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* VMSA */
197 static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
198 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 32, 32, 0),
199 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_CTX_CMPS_SHIFT, 4, 0),
200 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_WRPS_SHIFT, 4, 0),
201 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_BRPS_SHIFT, 4, 0),
202 S_ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_PMUVER_SHIFT, 4, 0),
203 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_TRACEVER_SHIFT, 4, 0),
204 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_AA64DFR0_DEBUGVER_SHIFT, 4, 0x6),
208 static const struct arm64_ftr_bits ftr_mvfr2[] = {
209 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 24, 0), /* RAZ */
210 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* FPMisc */
211 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* SIMDMisc */
215 static const struct arm64_ftr_bits ftr_dczid[] = {
216 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 5, 27, 0), /* RAZ */
217 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 1, 1), /* DZP */
218 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0), /* BS */
223 static const struct arm64_ftr_bits ftr_id_isar5[] = {
224 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_RDM_SHIFT, 4, 0),
225 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 20, 4, 0), /* RAZ */
226 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_CRC32_SHIFT, 4, 0),
227 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA2_SHIFT, 4, 0),
228 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SHA1_SHIFT, 4, 0),
229 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_AES_SHIFT, 4, 0),
230 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, ID_ISAR5_SEVL_SHIFT, 4, 0),
234 static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
235 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 24, 0), /* RAZ */
236 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* ac2 */
237 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* RAZ */
241 static const struct arm64_ftr_bits ftr_id_pfr0[] = {
242 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 16, 16, 0), /* RAZ */
243 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 12, 4, 0), /* State3 */
244 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 8, 4, 0), /* State2 */
245 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 4, 4, 0), /* State1 */
246 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 4, 0), /* State0 */
250 static const struct arm64_ftr_bits ftr_id_dfr0[] = {
251 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
252 S_ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */
253 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
254 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
255 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
256 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
257 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
258 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
263 * Common ftr bits for a 32bit register with all hidden, strict
264 * attributes, with 4bit feature fields and a default safe value of
265 * 0. Covers the following 32bit registers:
266 * id_isar[0-4], id_mmfr[1-3], id_pfr1, mvfr[0-1]
268 static const struct arm64_ftr_bits ftr_generic_32bits[] = {
269 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
270 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
271 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
272 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
273 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
274 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
275 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
276 ARM64_FTR_BITS(FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
280 static const struct arm64_ftr_bits ftr_generic[] = {
281 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0),
285 static const struct arm64_ftr_bits ftr_generic32[] = {
286 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 32, 0),
290 static const struct arm64_ftr_bits ftr_aa64raz[] = {
291 ARM64_FTR_BITS(FTR_STRICT, FTR_EXACT, 0, 64, 0),
295 #define ARM64_FTR_REG(id, table) { \
297 .reg = &(struct arm64_ftr_reg){ \
299 .ftr_bits = &((table)[0]), \
302 static const struct __ftr_reg_entry {
304 struct arm64_ftr_reg *reg;
305 } arm64_ftr_regs[] = {
307 /* Op1 = 0, CRn = 0, CRm = 1 */
308 ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
309 ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_generic_32bits),
310 ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
311 ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
312 ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
313 ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
314 ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),
316 /* Op1 = 0, CRn = 0, CRm = 2 */
317 ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_generic_32bits),
318 ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
319 ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
320 ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
321 ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_generic_32bits),
322 ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
323 ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
325 /* Op1 = 0, CRn = 0, CRm = 3 */
326 ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_generic_32bits),
327 ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_generic_32bits),
328 ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
330 /* Op1 = 0, CRn = 0, CRm = 4 */
331 ARM64_FTR_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0),
332 ARM64_FTR_REG(SYS_ID_AA64PFR1_EL1, ftr_aa64raz),
334 /* Op1 = 0, CRn = 0, CRm = 5 */
335 ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
336 ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_generic),
338 /* Op1 = 0, CRn = 0, CRm = 6 */
339 ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
340 ARM64_FTR_REG(SYS_ID_AA64ISAR1_EL1, ftr_aa64raz),
341 ARM64_FTR_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2),
343 /* Op1 = 0, CRn = 0, CRm = 7 */
344 ARM64_FTR_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0),
345 ARM64_FTR_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1),
346 ARM64_FTR_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2),
348 /* Op1 = 3, CRn = 0, CRm = 0 */
349 { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
350 ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),
352 /* Op1 = 3, CRn = 14, CRm = 0 */
353 ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_generic32),
356 static int search_cmp_ftr_reg(const void *id, const void *regp)
358 return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id;
362 * get_arm64_ftr_reg - Lookup a feature register entry using its
363 * sys_reg() encoding. With the array arm64_ftr_regs sorted in the
364 * ascending order of sys_id , we use binary search to find a matching
367 * returns - Upon success, matching ftr_reg entry for id.
368 * - NULL on failure. It is upto the caller to decide
369 * the impact of a failure.
371 static struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
373 const struct __ftr_reg_entry *ret;
375 ret = bsearch((const void *)(unsigned long)sys_id,
377 ARRAY_SIZE(arm64_ftr_regs),
378 sizeof(arm64_ftr_regs[0]),
385 static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
388 u64 mask = arm64_ftr_mask(ftrp);
391 reg |= (ftr_val << ftrp->shift) & mask;
395 static s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
400 switch (ftrp->type) {
402 ret = ftrp->safe_val;
405 ret = new < cur ? new : cur;
407 case FTR_HIGHER_OR_ZERO_SAFE:
411 case FTR_HIGHER_SAFE:
412 ret = new > cur ? new : cur;
421 static void __init sort_ftr_regs(void)
425 /* Check that the array is sorted so that we can do the binary search */
426 for (i = 1; i < ARRAY_SIZE(arm64_ftr_regs); i++)
427 BUG_ON(arm64_ftr_regs[i].sys_id < arm64_ftr_regs[i - 1].sys_id);
431 * Initialise the CPU feature register from Boot CPU values.
432 * Also initiliases the strict_mask for the register.
434 static void __init init_cpu_ftr_reg(u32 sys_reg, u64 new)
437 u64 strict_mask = ~0x0ULL;
438 const struct arm64_ftr_bits *ftrp;
439 struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);
443 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
444 s64 ftr_new = arm64_ftr_value(ftrp, new);
446 val = arm64_ftr_set_value(ftrp, val, ftr_new);
448 strict_mask &= ~arm64_ftr_mask(ftrp);
451 reg->strict_mask = strict_mask;
454 extern const struct arm64_cpu_capabilities arm64_errata[];
455 static void update_cpu_errata_workarounds(void);
457 void __init init_cpu_features(struct cpuinfo_arm64 *info)
459 /* Before we start using the tables, make sure it is sorted */
462 init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
463 init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
464 init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
465 init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
466 init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
467 init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
468 init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
469 init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2);
470 init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
471 init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
472 init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
473 init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
474 init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
476 if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
477 init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
478 init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
479 init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
480 init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
481 init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
482 init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
483 init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
484 init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
485 init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
486 init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
487 init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
488 init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
489 init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
490 init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
491 init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
492 init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
496 * Run the errata work around checks on the boot CPU, once we have
497 * initialised the cpu feature infrastructure.
499 update_cpu_errata_workarounds();
502 static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
504 const struct arm64_ftr_bits *ftrp;
506 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
507 s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
508 s64 ftr_new = arm64_ftr_value(ftrp, new);
510 if (ftr_cur == ftr_new)
512 /* Find a safe value */
513 ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
514 reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
519 static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
521 struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);
524 update_cpu_ftr_reg(regp, val);
525 if ((boot & regp->strict_mask) == (val & regp->strict_mask))
527 pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
528 regp->name, boot, cpu, val);
533 * Update system wide CPU feature registers with the values from a
534 * non-boot CPU. Also performs SANITY checks to make sure that there
535 * aren't any insane variations from that of the boot CPU.
537 void update_cpu_features(int cpu,
538 struct cpuinfo_arm64 *info,
539 struct cpuinfo_arm64 *boot)
544 * The kernel can handle differing I-cache policies, but otherwise
545 * caches should look identical. Userspace JITs will make use of
548 taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
549 info->reg_ctr, boot->reg_ctr);
552 * Userspace may perform DC ZVA instructions. Mismatched block sizes
553 * could result in too much or too little memory being zeroed if a
554 * process is preempted and migrated between CPUs.
556 taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
557 info->reg_dczid, boot->reg_dczid);
559 /* If different, timekeeping will be broken (especially with KVM) */
560 taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
561 info->reg_cntfrq, boot->reg_cntfrq);
564 * The kernel uses self-hosted debug features and expects CPUs to
565 * support identical debug features. We presently need CTX_CMPs, WRPs,
566 * and BRPs to be identical.
567 * ID_AA64DFR1 is currently RES0.
569 taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
570 info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
571 taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
572 info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
574 * Even in big.LITTLE, processors should be identical instruction-set
577 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
578 info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
579 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
580 info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
581 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu,
582 info->reg_id_aa64isar2, boot->reg_id_aa64isar2);
585 * Differing PARange support is fine as long as all peripherals and
586 * memory are mapped within the minimum PARange of all CPUs.
587 * Linux should not care about secure memory.
589 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
590 info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
591 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
592 info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
593 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
594 info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
597 * EL3 is not our concern.
598 * ID_AA64PFR1 is currently RES0.
600 taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
601 info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
602 taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
603 info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
606 * If we have AArch32, we care about 32-bit features for compat.
607 * If the system doesn't support AArch32, don't update them.
609 if (id_aa64pfr0_32bit_el0(read_system_reg(SYS_ID_AA64PFR0_EL1)) &&
610 id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
612 taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
613 info->reg_id_dfr0, boot->reg_id_dfr0);
614 taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
615 info->reg_id_isar0, boot->reg_id_isar0);
616 taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
617 info->reg_id_isar1, boot->reg_id_isar1);
618 taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
619 info->reg_id_isar2, boot->reg_id_isar2);
620 taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
621 info->reg_id_isar3, boot->reg_id_isar3);
622 taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
623 info->reg_id_isar4, boot->reg_id_isar4);
624 taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
625 info->reg_id_isar5, boot->reg_id_isar5);
628 * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
629 * ACTLR formats could differ across CPUs and therefore would have to
630 * be trapped for virtualization anyway.
632 taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
633 info->reg_id_mmfr0, boot->reg_id_mmfr0);
634 taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
635 info->reg_id_mmfr1, boot->reg_id_mmfr1);
636 taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
637 info->reg_id_mmfr2, boot->reg_id_mmfr2);
638 taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
639 info->reg_id_mmfr3, boot->reg_id_mmfr3);
640 taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
641 info->reg_id_pfr0, boot->reg_id_pfr0);
642 taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
643 info->reg_id_pfr1, boot->reg_id_pfr1);
644 taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
645 info->reg_mvfr0, boot->reg_mvfr0);
646 taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
647 info->reg_mvfr1, boot->reg_mvfr1);
648 taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
649 info->reg_mvfr2, boot->reg_mvfr2);
653 * Mismatched CPU features are a recipe for disaster. Don't even
654 * pretend to support them.
656 WARN_TAINT_ONCE(taint, TAINT_CPU_OUT_OF_SPEC,
657 "Unsupported CPU feature variation.\n");
660 u64 read_system_reg(u32 id)
662 struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);
664 /* We shouldn't get a request for an unsupported register */
666 return regp->sys_val;
670 * __raw_read_system_reg() - Used by a STARTING cpu before cpuinfo is populated.
671 * Read the system register on the current CPU
673 static u64 __raw_read_system_reg(u32 sys_id)
676 case SYS_ID_PFR0_EL1: return read_cpuid(ID_PFR0_EL1);
677 case SYS_ID_PFR1_EL1: return read_cpuid(ID_PFR1_EL1);
678 case SYS_ID_DFR0_EL1: return read_cpuid(ID_DFR0_EL1);
679 case SYS_ID_MMFR0_EL1: return read_cpuid(ID_MMFR0_EL1);
680 case SYS_ID_MMFR1_EL1: return read_cpuid(ID_MMFR1_EL1);
681 case SYS_ID_MMFR2_EL1: return read_cpuid(ID_MMFR2_EL1);
682 case SYS_ID_MMFR3_EL1: return read_cpuid(ID_MMFR3_EL1);
683 case SYS_ID_ISAR0_EL1: return read_cpuid(ID_ISAR0_EL1);
684 case SYS_ID_ISAR1_EL1: return read_cpuid(ID_ISAR1_EL1);
685 case SYS_ID_ISAR2_EL1: return read_cpuid(ID_ISAR2_EL1);
686 case SYS_ID_ISAR3_EL1: return read_cpuid(ID_ISAR3_EL1);
687 case SYS_ID_ISAR4_EL1: return read_cpuid(ID_ISAR4_EL1);
688 case SYS_ID_ISAR5_EL1: return read_cpuid(ID_ISAR5_EL1);
689 case SYS_MVFR0_EL1: return read_cpuid(MVFR0_EL1);
690 case SYS_MVFR1_EL1: return read_cpuid(MVFR1_EL1);
691 case SYS_MVFR2_EL1: return read_cpuid(MVFR2_EL1);
693 case SYS_ID_AA64PFR0_EL1: return read_cpuid(ID_AA64PFR0_EL1);
694 case SYS_ID_AA64PFR1_EL1: return read_cpuid(ID_AA64PFR1_EL1);
695 case SYS_ID_AA64DFR0_EL1: return read_cpuid(ID_AA64DFR0_EL1);
696 case SYS_ID_AA64DFR1_EL1: return read_cpuid(ID_AA64DFR1_EL1);
697 case SYS_ID_AA64MMFR0_EL1: return read_cpuid(ID_AA64MMFR0_EL1);
698 case SYS_ID_AA64MMFR1_EL1: return read_cpuid(ID_AA64MMFR1_EL1);
699 case SYS_ID_AA64MMFR2_EL1: return read_cpuid(ID_AA64MMFR2_EL1);
700 case SYS_ID_AA64ISAR0_EL1: return read_cpuid(ID_AA64ISAR0_EL1);
701 case SYS_ID_AA64ISAR1_EL1: return read_cpuid(ID_AA64ISAR1_EL1);
702 case SYS_ID_AA64ISAR2_EL1: return read_cpuid(ID_AA64ISAR2_EL1);
704 case SYS_CNTFRQ_EL0: return read_cpuid(CNTFRQ_EL0);
705 case SYS_CTR_EL0: return read_cpuid(CTR_EL0);
706 case SYS_DCZID_EL0: return read_cpuid(DCZID_EL0);
713 #include <linux/irqchip/arm-gic-v3.h>
716 feature_matches(u64 reg, const struct arm64_cpu_capabilities *entry)
718 int val = cpuid_feature_extract_field(reg, entry->field_pos, entry->sign);
720 return val >= entry->min_field_value;
724 has_cpuid_feature(const struct arm64_cpu_capabilities *entry, int scope)
728 WARN_ON(scope == SCOPE_LOCAL_CPU && preemptible());
729 if (scope == SCOPE_SYSTEM)
730 val = read_system_reg(entry->sys_reg);
732 val = __raw_read_system_reg(entry->sys_reg);
734 return feature_matches(val, entry);
737 static bool has_useable_gicv3_cpuif(const struct arm64_cpu_capabilities *entry, int scope)
741 if (!has_cpuid_feature(entry, scope))
744 has_sre = gic_enable_sre();
746 pr_warn_once("%s present but disabled by higher exception level\n",
752 static bool has_no_hw_prefetch(const struct arm64_cpu_capabilities *entry, int __unused)
754 u32 midr = read_cpuid_id();
756 /* Cavium ThunderX pass 1.x and 2.x */
757 return MIDR_IS_CPU_MODEL_RANGE(midr, MIDR_THUNDERX,
758 MIDR_CPU_VAR_REV(0, 0),
759 MIDR_CPU_VAR_REV(1, MIDR_REVISION_MASK));
762 static bool runs_at_el2(const struct arm64_cpu_capabilities *entry, int __unused)
764 return is_kernel_in_hyp_mode();
767 static bool hyp_offset_low(const struct arm64_cpu_capabilities *entry,
770 phys_addr_t idmap_addr = virt_to_phys(__hyp_idmap_text_start);
773 * Activate the lower HYP offset only if:
774 * - the idmap doesn't clash with it,
775 * - the kernel is not running at EL2.
777 return idmap_addr > GENMASK(VA_BITS - 2, 0) && !is_kernel_in_hyp_mode();
780 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
781 static int __kpti_forced; /* 0: not forced, >0: forced on, <0: forced off */
783 static bool unmap_kernel_at_el0(const struct arm64_cpu_capabilities *entry,
786 /* List of CPUs that are not vulnerable and don't need KPTI */
787 static const struct midr_range kpti_safe_list[] = {
788 MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
789 MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
790 MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
791 MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
792 MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
793 MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
794 MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
795 MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
797 char const *str = "command line option";
798 u64 pfr0 = read_system_reg(SYS_ID_AA64PFR0_EL1);
801 * For reasons that aren't entirely clear, enabling KPTI on Cavium
802 * ThunderX leads to apparent I-cache corruption of kernel text, which
803 * ends as well as you might imagine. Don't even try.
805 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_27456)) {
806 str = "ARM64_WORKAROUND_CAVIUM_27456";
812 pr_info_once("kernel page table isolation forced %s by %s\n",
813 __kpti_forced > 0 ? "ON" : "OFF", str);
814 return __kpti_forced > 0;
817 /* Useful for KASLR robustness */
818 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE))
821 /* Don't force KPTI for CPUs that are not vulnerable */
822 if (is_midr_in_range_list(read_cpuid_id(), kpti_safe_list))
825 /* Defer to CPU feature registers */
826 return !cpuid_feature_extract_unsigned_field(pfr0,
827 ID_AA64PFR0_CSV3_SHIFT);
831 kpti_install_ng_mappings(const struct arm64_cpu_capabilities *__unused)
833 typedef void (kpti_remap_fn)(int, int, phys_addr_t);
834 extern kpti_remap_fn idmap_kpti_install_ng_mappings;
835 kpti_remap_fn *remap_fn;
837 static bool kpti_applied = false;
838 int cpu = smp_processor_id();
840 if (__this_cpu_read(this_cpu_vector) == vectors) {
841 const char *v = arm64_get_bp_hardening_vector(EL1_VECTOR_KPTI);
843 __this_cpu_write(this_cpu_vector, v);
849 remap_fn = (void *)__pa_symbol(idmap_kpti_install_ng_mappings);
852 remap_fn(cpu, num_online_cpus(), __pa_symbol(swapper_pg_dir));
853 cpu_uninstall_idmap();
861 static int __init parse_kpti(char *str)
864 int ret = strtobool(str, &enabled);
869 __kpti_forced = enabled ? 1 : -1;
872 early_param("kpti", parse_kpti);
873 #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
875 static void cpu_copy_el2regs(const struct arm64_cpu_capabilities *__unused)
878 * Copy register values that aren't redirected by hardware.
880 * Before code patching, we only set tpidr_el1, all CPUs need to copy
881 * this value to tpidr_el2 before we patch the code. Once we've done
882 * that, freshly-onlined CPUs will set tpidr_el2, so we don't need to
885 if (!alternatives_applied)
886 write_sysreg(read_sysreg(tpidr_el1), tpidr_el2);
889 static void elf_hwcap_fixup(void)
891 #ifdef CONFIG_ARM64_ERRATUM_1742098
892 if (cpus_have_const_cap(ARM64_WORKAROUND_1742098))
893 compat_elf_hwcap2 &= ~COMPAT_HWCAP2_AES;
894 #endif /* ARM64_ERRATUM_1742098 */
897 static const struct arm64_cpu_capabilities arm64_features[] = {
899 .desc = "GIC system register CPU interface",
900 .capability = ARM64_HAS_SYSREG_GIC_CPUIF,
901 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
902 .matches = has_useable_gicv3_cpuif,
903 .sys_reg = SYS_ID_AA64PFR0_EL1,
904 .field_pos = ID_AA64PFR0_GIC_SHIFT,
905 .sign = FTR_UNSIGNED,
906 .min_field_value = 1,
908 #ifdef CONFIG_ARM64_PAN
910 .desc = "Privileged Access Never",
911 .capability = ARM64_HAS_PAN,
912 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
913 .matches = has_cpuid_feature,
914 .sys_reg = SYS_ID_AA64MMFR1_EL1,
915 .field_pos = ID_AA64MMFR1_PAN_SHIFT,
916 .sign = FTR_UNSIGNED,
917 .min_field_value = 1,
918 .cpu_enable = cpu_enable_pan,
920 #endif /* CONFIG_ARM64_PAN */
921 #if defined(CONFIG_AS_LSE) && defined(CONFIG_ARM64_LSE_ATOMICS)
923 .desc = "LSE atomic instructions",
924 .capability = ARM64_HAS_LSE_ATOMICS,
925 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
926 .matches = has_cpuid_feature,
927 .sys_reg = SYS_ID_AA64ISAR0_EL1,
928 .field_pos = ID_AA64ISAR0_ATOMICS_SHIFT,
929 .sign = FTR_UNSIGNED,
930 .min_field_value = 2,
932 #endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
934 .desc = "Software prefetching using PRFM",
935 .capability = ARM64_HAS_NO_HW_PREFETCH,
936 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
937 .matches = has_no_hw_prefetch,
939 #ifdef CONFIG_ARM64_UAO
941 .desc = "User Access Override",
942 .capability = ARM64_HAS_UAO,
943 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
944 .matches = has_cpuid_feature,
945 .sys_reg = SYS_ID_AA64MMFR2_EL1,
946 .field_pos = ID_AA64MMFR2_UAO_SHIFT,
947 .min_field_value = 1,
949 * We rely on stop_machine() calling uao_thread_switch() to set
950 * UAO immediately after patching.
953 #endif /* CONFIG_ARM64_UAO */
954 #ifdef CONFIG_ARM64_PAN
956 .capability = ARM64_ALT_PAN_NOT_UAO,
957 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
958 .matches = cpufeature_pan_not_uao,
960 #endif /* CONFIG_ARM64_PAN */
962 .desc = "Virtualization Host Extensions",
963 .capability = ARM64_HAS_VIRT_HOST_EXTN,
964 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
965 .matches = runs_at_el2,
966 .cpu_enable = cpu_copy_el2regs,
969 .desc = "32-bit EL0 Support",
970 .capability = ARM64_HAS_32BIT_EL0,
971 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
972 .matches = has_cpuid_feature,
973 .sys_reg = SYS_ID_AA64PFR0_EL1,
974 .sign = FTR_UNSIGNED,
975 .field_pos = ID_AA64PFR0_EL0_SHIFT,
976 .min_field_value = ID_AA64PFR0_EL0_32BIT_64BIT,
979 .desc = "Reduced HYP mapping offset",
980 .capability = ARM64_HYP_OFFSET_LOW,
981 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
982 .matches = hyp_offset_low,
984 #ifdef CONFIG_UNMAP_KERNEL_AT_EL0
986 .desc = "Kernel page table isolation (KPTI)",
987 .capability = ARM64_UNMAP_KERNEL_AT_EL0,
988 .type = ARM64_CPUCAP_SYSTEM_FEATURE,
989 .matches = unmap_kernel_at_el0,
990 .cpu_enable = kpti_install_ng_mappings,
996 #define HWCAP_CAP(reg, field, s, min_value, cap_type, cap) \
999 .type = ARM64_CPUCAP_SYSTEM_FEATURE, \
1000 .matches = has_cpuid_feature, \
1002 .field_pos = field, \
1004 .min_field_value = min_value, \
1005 .hwcap_type = cap_type, \
1009 static const struct arm64_cpu_capabilities arm64_elf_hwcaps[] = {
1010 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_PMULL),
1011 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_AES_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_AES),
1012 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA1),
1013 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_SHA2),
1014 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_HWCAP, HWCAP_CRC32),
1015 HWCAP_CAP(SYS_ID_AA64ISAR0_EL1, ID_AA64ISAR0_ATOMICS_SHIFT, FTR_UNSIGNED, 2, CAP_HWCAP, HWCAP_ATOMICS),
1016 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_FP),
1017 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_FP_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_FPHP),
1018 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 0, CAP_HWCAP, HWCAP_ASIMD),
1019 HWCAP_CAP(SYS_ID_AA64PFR0_EL1, ID_AA64PFR0_ASIMD_SHIFT, FTR_SIGNED, 1, CAP_HWCAP, HWCAP_ASIMDHP),
1023 static const struct arm64_cpu_capabilities compat_elf_hwcaps[] = {
1024 #ifdef CONFIG_COMPAT
1025 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 2, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_PMULL),
1026 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_AES_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_AES),
1027 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA1_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA1),
1028 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_SHA2_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_SHA2),
1029 HWCAP_CAP(SYS_ID_ISAR5_EL1, ID_ISAR5_CRC32_SHIFT, FTR_UNSIGNED, 1, CAP_COMPAT_HWCAP2, COMPAT_HWCAP2_CRC32),
1034 static void __init cap_set_elf_hwcap(const struct arm64_cpu_capabilities *cap)
1036 switch (cap->hwcap_type) {
1038 elf_hwcap |= cap->hwcap;
1040 #ifdef CONFIG_COMPAT
1041 case CAP_COMPAT_HWCAP:
1042 compat_elf_hwcap |= (u32)cap->hwcap;
1044 case CAP_COMPAT_HWCAP2:
1045 compat_elf_hwcap2 |= (u32)cap->hwcap;
1054 /* Check if we have a particular HWCAP enabled */
1055 static bool cpus_have_elf_hwcap(const struct arm64_cpu_capabilities *cap)
1059 switch (cap->hwcap_type) {
1061 rc = (elf_hwcap & cap->hwcap) != 0;
1063 #ifdef CONFIG_COMPAT
1064 case CAP_COMPAT_HWCAP:
1065 rc = (compat_elf_hwcap & (u32)cap->hwcap) != 0;
1067 case CAP_COMPAT_HWCAP2:
1068 rc = (compat_elf_hwcap2 & (u32)cap->hwcap) != 0;
1079 static void __init setup_elf_hwcaps(const struct arm64_cpu_capabilities *hwcaps)
1081 for (; hwcaps->matches; hwcaps++)
1082 if (hwcaps->matches(hwcaps, cpucap_default_scope(hwcaps)))
1083 cap_set_elf_hwcap(hwcaps);
1087 * Check if the current CPU has a given feature capability.
1088 * Should be called from non-preemptible context.
1090 static bool __this_cpu_has_cap(const struct arm64_cpu_capabilities *cap_array,
1093 const struct arm64_cpu_capabilities *caps;
1095 if (WARN_ON(preemptible()))
1098 for (caps = cap_array; caps->matches; caps++)
1099 if (caps->capability == cap &&
1100 caps->matches(caps, SCOPE_LOCAL_CPU))
1105 static void update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
1108 for (; caps->matches; caps++) {
1109 if (!caps->matches(caps, cpucap_default_scope(caps)))
1112 if (!cpus_have_cap(caps->capability) && caps->desc)
1113 pr_info("%s %s\n", info, caps->desc);
1114 cpus_set_cap(caps->capability);
1118 static int __enable_cpu_capability(void *arg)
1120 const struct arm64_cpu_capabilities *cap = arg;
1122 cap->cpu_enable(cap);
1127 * Run through the enabled capabilities and enable() it on all active
1131 enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps)
1133 for (; caps->matches; caps++) {
1134 unsigned int num = caps->capability;
1136 if (!cpus_have_cap(num))
1139 /* Ensure cpus_have_const_cap(num) works */
1140 static_branch_enable(&cpu_hwcap_keys[num]);
1142 if (caps->cpu_enable) {
1144 * Use stop_machine() as it schedules the work allowing
1145 * us to modify PSTATE, instead of on_each_cpu() which
1146 * uses an IPI, giving us a PSTATE that disappears when
1149 stop_machine(__enable_cpu_capability, (void *)caps,
1156 * Flag to indicate if we have computed the system wide
1157 * capabilities based on the boot time active CPUs. This
1158 * will be used to determine if a new booting CPU should
1159 * go through the verification process to make sure that it
1160 * supports the system capabilities, without using a hotplug
1163 static bool sys_caps_initialised;
1165 static inline void set_sys_caps_initialised(void)
1167 sys_caps_initialised = true;
1171 * Check for CPU features that are used in early boot
1172 * based on the Boot CPU value.
1174 static void check_early_cpu_features(void)
1176 verify_cpu_run_el();
1177 verify_cpu_asid_bits();
1181 verify_local_elf_hwcaps(const struct arm64_cpu_capabilities *caps)
1184 for (; caps->matches; caps++)
1185 if (cpus_have_elf_hwcap(caps) && !caps->matches(caps, SCOPE_LOCAL_CPU)) {
1186 pr_crit("CPU%d: missing HWCAP: %s\n",
1187 smp_processor_id(), caps->desc);
1193 verify_local_cpu_features(const struct arm64_cpu_capabilities *caps_list)
1195 const struct arm64_cpu_capabilities *caps = caps_list;
1196 for (; caps->matches; caps++) {
1197 if (!cpus_have_cap(caps->capability))
1200 * If the new CPU misses an advertised feature, we cannot proceed
1201 * further, park the cpu.
1203 if (!__this_cpu_has_cap(caps_list, caps->capability)) {
1204 pr_crit("CPU%d: missing feature: %s\n",
1205 smp_processor_id(), caps->desc);
1208 if (caps->cpu_enable)
1209 caps->cpu_enable(caps);
1214 * The CPU Errata work arounds are detected and applied at boot time
1215 * and the related information is freed soon after. If the new CPU requires
1216 * an errata not detected at boot, fail this CPU.
1218 static void verify_local_cpu_errata_workarounds(void)
1220 const struct arm64_cpu_capabilities *caps = arm64_errata;
1222 for (; caps->matches; caps++) {
1223 if (cpus_have_cap(caps->capability)) {
1224 if (caps->cpu_enable)
1225 caps->cpu_enable(caps);
1226 } else if (caps->matches(caps, SCOPE_LOCAL_CPU)) {
1227 pr_crit("CPU%d: Requires work around for %s, not detected"
1230 caps->desc ? : "an erratum");
1236 static void update_cpu_errata_workarounds(void)
1238 update_cpu_capabilities(arm64_errata, "enabling workaround for");
1241 static void __init enable_errata_workarounds(void)
1243 enable_cpu_capabilities(arm64_errata);
1247 * Run through the enabled system capabilities and enable() it on this CPU.
1248 * The capabilities were decided based on the available CPUs at the boot time.
1249 * Any new CPU should match the system wide status of the capability. If the
1250 * new CPU doesn't have a capability which the system now has enabled, we
1251 * cannot do anything to fix it up and could cause unexpected failures. So
1254 static void verify_local_cpu_capabilities(void)
1256 verify_local_cpu_errata_workarounds();
1257 verify_local_cpu_features(arm64_features);
1258 verify_local_elf_hwcaps(arm64_elf_hwcaps);
1259 if (system_supports_32bit_el0())
1260 verify_local_elf_hwcaps(compat_elf_hwcaps);
1263 void check_local_cpu_capabilities(void)
1266 * All secondary CPUs should conform to the early CPU features
1267 * in use by the kernel based on boot CPU.
1269 check_early_cpu_features();
1272 * If we haven't finalised the system capabilities, this CPU gets
1273 * a chance to update the errata work arounds.
1274 * Otherwise, this CPU should verify that it has all the system
1275 * advertised capabilities.
1277 if (!sys_caps_initialised)
1278 update_cpu_errata_workarounds();
1280 verify_local_cpu_capabilities();
1283 static void __init setup_feature_capabilities(void)
1285 update_cpu_capabilities(arm64_features, "detected feature:");
1286 enable_cpu_capabilities(arm64_features);
1289 DEFINE_STATIC_KEY_FALSE(arm64_const_caps_ready);
1290 EXPORT_SYMBOL(arm64_const_caps_ready);
1292 static void __init mark_const_caps_ready(void)
1294 static_branch_enable(&arm64_const_caps_ready);
1297 extern const struct arm64_cpu_capabilities arm64_errata[];
1299 bool this_cpu_has_cap(unsigned int cap)
1301 return (__this_cpu_has_cap(arm64_features, cap) ||
1302 __this_cpu_has_cap(arm64_errata, cap));
1305 void __init setup_cpu_features(void)
1310 /* Set the CPU feature capabilies */
1311 setup_feature_capabilities();
1312 enable_errata_workarounds();
1313 mark_const_caps_ready();
1314 setup_elf_hwcaps(arm64_elf_hwcaps);
1316 if (system_supports_32bit_el0()) {
1317 setup_elf_hwcaps(compat_elf_hwcaps);
1321 /* Advertise that we have computed the system capabilities */
1322 set_sys_caps_initialised();
1325 * Check for sane CTR_EL0.CWG value.
1327 cwg = cache_type_cwg();
1328 cls = cache_line_size();
1330 pr_warn("No Cache Writeback Granule information, assuming cache line size %d\n",
1332 if (L1_CACHE_BYTES < cls)
1333 pr_warn("L1_CACHE_BYTES smaller than the Cache Writeback Granule (%d < %d)\n",
1334 L1_CACHE_BYTES, cls);
1337 static bool __maybe_unused
1338 cpufeature_pan_not_uao(const struct arm64_cpu_capabilities *entry, int __unused)
1340 return (cpus_have_const_cap(ARM64_HAS_PAN) && !cpus_have_const_cap(ARM64_HAS_UAO));