2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/arm-smccc.h>
19 #include <linux/types.h>
20 #include <linux/jump_label.h>
21 #include <uapi/linux/psci.h>
23 #include <kvm/arm_psci.h>
25 #include <asm/extable.h>
26 #include <asm/kvm_asm.h>
27 #include <asm/kvm_emulate.h>
28 #include <asm/kvm_hyp.h>
29 #include <asm/fpsimd.h>
31 extern struct exception_table_entry __start___kvm_ex_table;
32 extern struct exception_table_entry __stop___kvm_ex_table;
34 static bool __hyp_text __fpsimd_enabled_nvhe(void)
36 return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
39 static bool __hyp_text __fpsimd_enabled_vhe(void)
41 return !!(read_sysreg(cpacr_el1) & CPACR_EL1_FPEN);
44 static hyp_alternate_select(__fpsimd_is_enabled,
45 __fpsimd_enabled_nvhe, __fpsimd_enabled_vhe,
46 ARM64_HAS_VIRT_HOST_EXTN);
48 bool __hyp_text __fpsimd_enabled(void)
50 return __fpsimd_is_enabled()();
53 static void __hyp_text __activate_traps_vhe(void)
57 val = read_sysreg(cpacr_el1);
59 val &= ~CPACR_EL1_FPEN;
60 write_sysreg(val, cpacr_el1);
62 write_sysreg(kvm_get_hyp_vector(), vbar_el1);
65 static void __hyp_text __activate_traps_nvhe(void)
69 val = CPTR_EL2_DEFAULT;
70 val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
71 write_sysreg(val, cptr_el2);
74 static hyp_alternate_select(__activate_traps_arch,
75 __activate_traps_nvhe, __activate_traps_vhe,
76 ARM64_HAS_VIRT_HOST_EXTN);
78 static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
83 * We are about to set CPTR_EL2.TFP to trap all floating point
84 * register accesses to EL2, however, the ARM ARM clearly states that
85 * traps are only taken to EL2 if the operation would not otherwise
86 * trap to EL1. Therefore, always make sure that for 32-bit guests,
87 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
88 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
89 * it will cause an exception.
91 val = vcpu->arch.hcr_el2;
92 if (!(val & HCR_RW) && system_supports_fpsimd()) {
93 write_sysreg(1 << 30, fpexc32_el2);
96 write_sysreg(val, hcr_el2);
97 /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
98 write_sysreg(1 << 15, hstr_el2);
100 * Make sure we trap PMU access from EL0 to EL2. Also sanitize
101 * PMSELR_EL0 to make sure it never contains the cycle
102 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
103 * EL1 instead of being trapped to EL2.
105 write_sysreg(0, pmselr_el0);
106 write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
107 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
108 __activate_traps_arch()();
111 static void __hyp_text __deactivate_traps_vhe(void)
113 extern char vectors[]; /* kernel exception vectors */
114 u64 mdcr_el2 = read_sysreg(mdcr_el2);
116 mdcr_el2 &= MDCR_EL2_HPMN_MASK |
117 MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT |
120 write_sysreg(mdcr_el2, mdcr_el2);
121 write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
122 write_sysreg(CPACR_EL1_FPEN, cpacr_el1);
123 write_sysreg(vectors, vbar_el1);
126 static void __hyp_text __deactivate_traps_nvhe(void)
128 u64 mdcr_el2 = read_sysreg(mdcr_el2);
130 mdcr_el2 &= MDCR_EL2_HPMN_MASK;
131 mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
133 write_sysreg(mdcr_el2, mdcr_el2);
134 write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
135 write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
138 static hyp_alternate_select(__deactivate_traps_arch,
139 __deactivate_traps_nvhe, __deactivate_traps_vhe,
140 ARM64_HAS_VIRT_HOST_EXTN);
142 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
145 * If we pended a virtual abort, preserve it until it gets
146 * cleared. See D1.14.3 (Virtual Interrupts) for details, but
147 * the crucial bit is "On taking a vSError interrupt,
148 * HCR_EL2.VSE is cleared to 0."
150 if (vcpu->arch.hcr_el2 & HCR_VSE)
151 vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
153 __deactivate_traps_arch()();
154 write_sysreg(0, hstr_el2);
155 write_sysreg(0, pmuserenr_el0);
158 static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
160 struct kvm *kvm = kern_hyp_va(vcpu->kvm);
161 write_sysreg(kvm->arch.vttbr, vttbr_el2);
164 static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
166 write_sysreg(0, vttbr_el2);
169 static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
171 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
172 __vgic_v3_save_state(vcpu);
174 __vgic_v2_save_state(vcpu);
176 write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2);
179 static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
183 val = read_sysreg(hcr_el2);
184 val |= HCR_INT_OVERRIDE;
185 val |= vcpu->arch.irq_lines;
186 write_sysreg(val, hcr_el2);
188 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
189 __vgic_v3_restore_state(vcpu);
191 __vgic_v2_restore_state(vcpu);
194 static bool __hyp_text __true_value(void)
199 static bool __hyp_text __false_value(void)
204 static hyp_alternate_select(__check_arm_834220,
205 __false_value, __true_value,
206 ARM64_WORKAROUND_834220);
208 static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
213 * Resolve the IPA the hard way using the guest VA.
215 * Stage-1 translation already validated the memory access
216 * rights. As such, we can use the EL1 translation regime, and
217 * don't have to distinguish between EL0 and EL1 access.
219 * We do need to save/restore PAR_EL1 though, as we haven't
220 * saved the guest context yet, and we may return early...
222 par = read_sysreg(par_el1);
223 if (!__kvm_at("s1e1r", far))
224 tmp = read_sysreg(par_el1);
226 tmp = 1; /* back to the guest */
227 write_sysreg(par, par_el1);
229 if (unlikely(tmp & 1))
230 return false; /* Translation failed, back to guest */
232 /* Convert PAR to HPFAR format */
233 *hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4;
237 static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
239 u64 esr = read_sysreg_el2(esr);
240 u8 ec = ESR_ELx_EC(esr);
243 vcpu->arch.fault.esr_el2 = esr;
245 if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
248 far = read_sysreg_el2(far);
251 * The HPFAR can be invalid if the stage 2 fault did not
252 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
253 * bit is clear) and one of the two following cases are true:
254 * 1. The fault was due to a permission fault
255 * 2. The processor carries errata 834220
257 * Therefore, for all non S1PTW faults where we either have a
258 * permission fault or the errata workaround is enabled, we
259 * resolve the IPA using the AT instruction.
261 if (!(esr & ESR_ELx_S1PTW) &&
262 (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
263 if (!__translate_far_to_hpfar(far, &hpfar))
266 hpfar = read_sysreg(hpfar_el2);
269 vcpu->arch.fault.far_el2 = far;
270 vcpu->arch.fault.hpfar_el2 = hpfar;
274 static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
276 *vcpu_pc(vcpu) = read_sysreg_el2(elr);
278 if (vcpu_mode_is_32bit(vcpu)) {
279 vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr);
280 kvm_skip_instr32(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
281 write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr);
286 write_sysreg_el2(*vcpu_pc(vcpu), elr);
289 static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
291 if (!cpus_have_const_cap(ARM64_SSBD))
294 return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
297 static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
299 #ifdef CONFIG_ARM64_SSBD
301 * The host runs with the workaround always present. If the
302 * guest wants it disabled, so be it...
304 if (__needs_ssbd_off(vcpu) &&
305 __hyp_this_cpu_read(arm64_ssbd_callback_required))
306 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
310 static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
312 #ifdef CONFIG_ARM64_SSBD
314 * If the guest has disabled the workaround, bring it back on.
316 if (__needs_ssbd_off(vcpu) &&
317 __hyp_this_cpu_read(arm64_ssbd_callback_required))
318 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
322 int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
324 struct kvm_cpu_context *host_ctxt;
325 struct kvm_cpu_context *guest_ctxt;
329 vcpu = kern_hyp_va(vcpu);
331 host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
332 host_ctxt->__hyp_running_vcpu = vcpu;
333 guest_ctxt = &vcpu->arch.ctxt;
335 __sysreg_save_host_state(host_ctxt);
336 __debug_cond_save_host_state(vcpu);
338 __activate_traps(vcpu);
341 __vgic_restore_state(vcpu);
342 __timer_restore_state(vcpu);
345 * We must restore the 32-bit state before the sysregs, thanks
346 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
348 __sysreg32_restore_state(vcpu);
349 __sysreg_restore_guest_state(guest_ctxt);
350 __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
352 __set_guest_arch_workaround_state(vcpu);
354 /* Jump in the fire! */
356 exit_code = __guest_enter(vcpu, host_ctxt);
357 /* And we're baaack! */
360 * We're using the raw exception code in order to only process
361 * the trap if no SError is pending. We will come back to the
362 * same PC once the SError has been injected, and replay the
363 * trapping instruction.
365 if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
368 if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
369 exit_code == ARM_EXCEPTION_TRAP) {
372 valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
373 kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
374 kvm_vcpu_dabt_isvalid(vcpu) &&
375 !kvm_vcpu_dabt_isextabt(vcpu) &&
376 !kvm_vcpu_dabt_iss1tw(vcpu);
379 int ret = __vgic_v2_perform_cpuif_access(vcpu);
387 /* Promote an illegal access to an SError */
389 exit_code = ARM_EXCEPTION_EL1_SERROR;
392 /* 0 falls through to be handler out of EL2 */
396 if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
397 exit_code == ARM_EXCEPTION_TRAP &&
398 (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 ||
399 kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
400 int ret = __vgic_v3_perform_cpuif_access(vcpu);
407 /* 0 falls through to be handled out of EL2 */
410 __set_host_arch_workaround_state(vcpu);
412 fp_enabled = __fpsimd_enabled();
414 __sysreg_save_guest_state(guest_ctxt);
415 __sysreg32_save_state(vcpu);
416 __timer_save_state(vcpu);
417 __vgic_save_state(vcpu);
419 __deactivate_traps(vcpu);
420 __deactivate_vm(vcpu);
422 __sysreg_restore_host_state(host_ctxt);
425 __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs);
426 __fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs);
429 __debug_save_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
431 * This must come after restoring the host sysregs, since a non-VHE
432 * system may enable SPE here and make use of the TTBRs.
434 __debug_cond_restore_host_state(vcpu);
439 static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
441 static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
442 struct kvm_vcpu *vcpu)
444 unsigned long str_va;
447 * Force the panic string to be loaded from the literal pool,
448 * making sure it is a kernel address and not a PC-relative
451 asm volatile("ldr %0, =%1" : "=r" (str_va) : "S" (__hyp_panic_string));
453 __hyp_do_panic(str_va,
455 read_sysreg(esr_el2), read_sysreg_el2(far),
456 read_sysreg(hpfar_el2), par, vcpu);
459 static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
460 struct kvm_vcpu *vcpu)
462 panic(__hyp_panic_string,
464 read_sysreg_el2(esr), read_sysreg_el2(far),
465 read_sysreg(hpfar_el2), par, vcpu);
468 static hyp_alternate_select(__hyp_call_panic,
469 __hyp_call_panic_nvhe, __hyp_call_panic_vhe,
470 ARM64_HAS_VIRT_HOST_EXTN);
472 void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
474 struct kvm_vcpu *vcpu = NULL;
476 u64 spsr = read_sysreg_el2(spsr);
477 u64 elr = read_sysreg_el2(elr);
478 u64 par = read_sysreg(par_el1);
480 if (read_sysreg(vttbr_el2)) {
481 vcpu = host_ctxt->__hyp_running_vcpu;
482 __timer_save_state(vcpu);
483 __deactivate_traps(vcpu);
484 __deactivate_vm(vcpu);
485 __sysreg_restore_host_state(host_ctxt);
488 /* Call panic for real */
489 __hyp_call_panic()(spsr, elr, par, vcpu);
494 asmlinkage void __hyp_text kvm_unexpected_el2_exception(void)
496 unsigned long addr, fixup;
497 struct kvm_cpu_context *host_ctxt;
498 struct exception_table_entry *entry, *end;
499 unsigned long elr_el2 = read_sysreg(elr_el2);
501 entry = hyp_symbol_addr(__start___kvm_ex_table);
502 end = hyp_symbol_addr(__stop___kvm_ex_table);
503 host_ctxt = __hyp_this_cpu_ptr(kvm_host_cpu_state);
505 while (entry < end) {
506 addr = (unsigned long)&entry->insn + entry->insn;
507 fixup = (unsigned long)&entry->fixup + entry->fixup;
509 if (addr != elr_el2) {
514 write_sysreg(fixup, elr_el2);
518 hyp_panic(host_ctxt);