1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #ifndef __ARM_KVM_INIT_H__
8 #define __ARM_KVM_INIT_H__
11 #error Assembly-only header
14 #include <asm/kvm_arm.h>
15 #include <asm/ptrace.h>
16 #include <asm/sysreg.h>
17 #include <linux/irqchip/arm-gic-v3.h>
19 .macro __init_el2_sctlr
20 mov_q x0, INIT_SCTLR_EL2_MMU_OFF
25 .macro __init_el2_hcrx
26 mrs x0, id_aa64mmfr1_el1
27 ubfx x0, x0, #ID_AA64MMFR1_EL1_HCX_SHIFT, #4
28 cbz x0, .Lskip_hcrx_\@
29 mov_q x0, HCRX_HOST_FLAGS
30 msr_s SYS_HCRX_EL2, x0
34 /* Check if running in host at EL2 mode, i.e., (h)VHE. Jump to fail if not. */
35 .macro __check_hvhe fail, tmp
37 and \tmp, \tmp, #HCR_E2H
42 * Allow Non-secure EL1 and EL0 to access physical timer and counter.
43 * This is not necessary for VHE, since the host kernel runs in EL2,
44 * and EL0 accesses are configured in the later stage of boot process.
45 * Note that when HCR_EL2.E2H == 1, CNTHCTL_EL2 has the same bit layout
46 * as CNTKCTL_EL1, and CNTKCTL_EL1 accessing instructions are redefined
47 * to access CNTHCTL_EL2. This allows the kernel designed to run at EL1
48 * to transparently mess with the EL0 bits via CNTKCTL_EL1 access in
51 .macro __init_el2_timers
52 mov x0, #3 // Enable EL1 physical timers
53 __check_hvhe .LnVHE_\@, x1
57 msr cntvoff_el2, xzr // Clear virtual offset
60 .macro __init_el2_debug
61 mrs x1, id_aa64dfr0_el1
62 sbfx x0, x1, #ID_AA64DFR0_EL1_PMUVer_SHIFT, #4
64 b.lt .Lskip_pmu_\@ // Skip if no PMU present
65 mrs x0, pmcr_el0 // Disable debug access traps
66 ubfx x0, x0, #11, #5 // to EL2 and allow access to
68 csel x2, xzr, x0, lt // all PMU counters from EL1
70 /* Statistical profiling */
71 ubfx x0, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
72 cbz x0, .Lskip_spe_\@ // Skip if SPE not present
74 mrs_s x0, SYS_PMBIDR_EL1 // If SPE available at EL2,
75 and x0, x0, #(1 << PMBIDR_EL1_P_SHIFT)
76 cbnz x0, .Lskip_spe_el2_\@ // then permit sampling of physical
77 mov x0, #(1 << PMSCR_EL2_PCT_SHIFT | \
78 1 << PMSCR_EL2_PA_SHIFT)
79 msr_s SYS_PMSCR_EL2, x0 // addresses and physical counter
81 mov x0, #(MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT)
82 orr x2, x2, x0 // If we don't have VHE, then
83 // use EL1&0 translation.
87 ubfx x0, x1, #ID_AA64DFR0_EL1_TraceBuffer_SHIFT, #4
88 cbz x0, .Lskip_trace_\@ // Skip if TraceBuffer is not present
90 mrs_s x0, SYS_TRBIDR_EL1
91 and x0, x0, TRBIDR_EL1_P
92 cbnz x0, .Lskip_trace_\@ // If TRBE is available at EL2
94 mov x0, #(MDCR_EL2_E2TB_MASK << MDCR_EL2_E2TB_SHIFT)
95 orr x2, x2, x0 // allow the EL1&0 translation
99 msr mdcr_el2, x2 // Configure debug traps
103 .macro __init_el2_lor
104 mrs x1, id_aa64mmfr1_el1
105 ubfx x0, x1, #ID_AA64MMFR1_EL1_LO_SHIFT, 4
106 cbz x0, .Lskip_lor_\@
107 msr_s SYS_LORC_EL1, xzr
111 /* Stage-2 translation */
112 .macro __init_el2_stage2
116 /* GICv3 system register access */
117 .macro __init_el2_gicv3
118 mrs x0, id_aa64pfr0_el1
119 ubfx x0, x0, #ID_AA64PFR0_EL1_GIC_SHIFT, #4
120 cbz x0, .Lskip_gicv3_\@
122 mrs_s x0, SYS_ICC_SRE_EL2
123 orr x0, x0, #ICC_SRE_EL2_SRE // Set ICC_SRE_EL2.SRE==1
124 orr x0, x0, #ICC_SRE_EL2_ENABLE // Set ICC_SRE_EL2.Enable==1
125 msr_s SYS_ICC_SRE_EL2, x0
126 isb // Make sure SRE is now set
127 mrs_s x0, SYS_ICC_SRE_EL2 // Read SRE back,
128 tbz x0, #0, .Lskip_gicv3_\@ // and check that it sticks
129 msr_s SYS_ICH_HCR_EL2, xzr // Reset ICH_HCR_EL2 to defaults
133 .macro __init_el2_hstr
134 msr hstr_el2, xzr // Disable CP15 traps to EL2
137 /* Virtual CPU ID registers */
138 .macro __init_el2_nvhe_idregs
145 /* Coprocessor traps */
146 .macro __init_el2_cptr
147 __check_hvhe .LnVHE_\@, x1
148 mov x0, #(CPACR_EL1_FPEN_EL1EN | CPACR_EL1_FPEN_EL0EN)
153 msr cptr_el2, x0 // Disable copro. traps to EL2
157 /* Disable any fine grained traps */
158 .macro __init_el2_fgt
159 mrs x1, id_aa64mmfr0_el1
160 ubfx x1, x1, #ID_AA64MMFR0_EL1_FGT_SHIFT, #4
161 cbz x1, .Lskip_fgt_\@
164 mrs x1, id_aa64dfr0_el1
165 ubfx x1, x1, #ID_AA64DFR0_EL1_PMSVer_SHIFT, #4
167 b.lt .Lset_debug_fgt_\@
168 /* Disable PMSNEVFR_EL1 read and write traps */
169 orr x0, x0, #(1 << 62)
172 msr_s SYS_HDFGRTR_EL2, x0
173 msr_s SYS_HDFGWTR_EL2, x0
176 mrs x1, id_aa64pfr1_el1
177 ubfx x1, x1, #ID_AA64PFR1_EL1_SME_SHIFT, #4
178 cbz x1, .Lset_pie_fgt_\@
180 /* Disable nVHE traps of TPIDR2 and SMPRI */
181 orr x0, x0, #HFGxTR_EL2_nSMPRI_EL1_MASK
182 orr x0, x0, #HFGxTR_EL2_nTPIDR2_EL0_MASK
185 mrs_s x1, SYS_ID_AA64MMFR3_EL1
186 ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4
189 /* Disable trapping of PIR_EL1 / PIRE0_EL1 */
190 orr x0, x0, #HFGxTR_EL2_nPIR_EL1
191 orr x0, x0, #HFGxTR_EL2_nPIRE0_EL1
194 msr_s SYS_HFGRTR_EL2, x0
195 msr_s SYS_HFGWTR_EL2, x0
196 msr_s SYS_HFGITR_EL2, xzr
198 mrs x1, id_aa64pfr0_el1 // AMU traps UNDEF without AMU
199 ubfx x1, x1, #ID_AA64PFR0_EL1_AMU_SHIFT, #4
200 cbz x1, .Lskip_fgt_\@
202 msr_s SYS_HAFGRTR_EL2, xzr
206 .macro __init_el2_nvhe_prepare_eret
207 mov x0, #INIT_PSTATE_EL1
212 * Initialize EL2 registers to sane values. This should be called early on all
213 * cores that were booted in EL2. Note that everything gets initialised as
214 * if VHE was not available. The kernel context will be upgraded to VHE
215 * if possible later on in the boot process
217 * Regs: x0, x1 and x2 are clobbered.
219 .macro init_el2_state
228 __init_el2_nvhe_idregs
233 #ifndef __KVM_NVHE_HYPERVISOR__
234 // This will clobber tmp1 and tmp2, and expect tmp1 to contain
235 // the id register value as read from the HW
236 .macro __check_override idreg, fld, width, pass, fail, tmp1, tmp2
237 ubfx \tmp1, \tmp1, #\fld, #\width
240 adr_l \tmp1, \idreg\()_override
241 ldr \tmp2, [\tmp1, FTR_OVR_VAL_OFFSET]
242 ldr \tmp1, [\tmp1, FTR_OVR_MASK_OFFSET]
243 ubfx \tmp2, \tmp2, #\fld, #\width
244 ubfx \tmp1, \tmp1, #\fld, #\width
246 and \tmp2, \tmp2, \tmp1
247 csinv \tmp2, \tmp2, xzr, ne
252 // This will clobber tmp1 and tmp2
253 .macro check_override idreg, fld, pass, fail, tmp1, tmp2
254 mrs \tmp1, \idreg\()_el1
255 __check_override \idreg \fld 4 \pass \fail \tmp1 \tmp2
258 // This will clobber tmp
259 .macro __check_override idreg, fld, width, pass, fail, tmp, ignore
260 ldr_l \tmp, \idreg\()_el1_sys_val
261 ubfx \tmp, \tmp, #\fld, #\width
266 .macro check_override idreg, fld, pass, fail, tmp, ignore
267 __check_override \idreg \fld 4 \pass \fail \tmp \ignore
271 .macro finalise_el2_state
272 check_override id_aa64pfr0, ID_AA64PFR0_EL1_SVE_SHIFT, .Linit_sve_\@, .Lskip_sve_\@, x1, x2
274 .Linit_sve_\@: /* SVE register access */
275 __check_hvhe .Lcptr_nvhe_\@, x1
278 mrs x0, cpacr_el1 // Disable SVE traps
279 orr x0, x0, #(CPACR_EL1_ZEN_EL1EN | CPACR_EL1_ZEN_EL0EN)
283 .Lcptr_nvhe_\@: // nVHE case
284 mrs x0, cptr_el2 // Disable SVE traps
285 bic x0, x0, #CPTR_EL2_TZ
289 mov x1, #ZCR_ELx_LEN_MASK // SVE: Enable full vector
290 msr_s SYS_ZCR_EL2, x1 // length for EL1.
293 check_override id_aa64pfr1, ID_AA64PFR1_EL1_SME_SHIFT, .Linit_sme_\@, .Lskip_sme_\@, x1, x2
295 .Linit_sme_\@: /* SME register access and priority mapping */
296 __check_hvhe .Lcptr_nvhe_sme_\@, x1
299 mrs x0, cpacr_el1 // Disable SME traps
300 orr x0, x0, #(CPACR_EL1_SMEN_EL0EN | CPACR_EL1_SMEN_EL1EN)
302 b .Lskip_set_cptr_sme_\@
304 .Lcptr_nvhe_sme_\@: // nVHE case
305 mrs x0, cptr_el2 // Disable SME traps
306 bic x0, x0, #CPTR_EL2_TSM
308 .Lskip_set_cptr_sme_\@:
312 orr x1, x1, #SCTLR_ELx_ENTP2 // Disable TPIDR2 traps
316 mov x0, #0 // SMCR controls
319 mrs_s x1, SYS_ID_AA64SMFR0_EL1
320 __check_override id_aa64smfr0, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, .Linit_sme_fa64_\@, .Lskip_sme_fa64_\@, x1, x2
323 orr x0, x0, SMCR_ELx_FA64_MASK
327 mrs_s x1, SYS_ID_AA64SMFR0_EL1
328 __check_override id_aa64smfr0, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, .Linit_sme_zt0_\@, .Lskip_sme_zt0_\@, x1, x2
330 orr x0, x0, SMCR_ELx_EZT0_MASK
333 orr x0, x0, #SMCR_ELx_LEN_MASK // Enable full SME vector
334 msr_s SYS_SMCR_EL2, x0 // length for EL1.
336 mrs_s x1, SYS_SMIDR_EL1 // Priority mapping supported?
337 ubfx x1, x1, #SMIDR_EL1_SMPS_SHIFT, #1
338 cbz x1, .Lskip_sme_\@
340 msr_s SYS_SMPRIMAP_EL2, xzr // Make all priorities equal
344 #endif /* __ARM_KVM_INIT_H__ */