1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/arm-smccc.h>
8 #include <linux/kvm_host.h>
9 #include <linux/types.h>
10 #include <linux/jump_label.h>
11 #include <uapi/linux/psci.h>
13 #include <kvm/arm_psci.h>
15 #include <asm/arch_gicv3.h>
16 #include <asm/cpufeature.h>
17 #include <asm/extable.h>
18 #include <asm/kprobes.h>
19 #include <asm/kvm_asm.h>
20 #include <asm/kvm_emulate.h>
21 #include <asm/kvm_host.h>
22 #include <asm/kvm_hyp.h>
23 #include <asm/kvm_mmu.h>
24 #include <asm/fpsimd.h>
25 #include <asm/debug-monitors.h>
26 #include <asm/processor.h>
27 #include <asm/thread_info.h>
28 #include <asm/vectors.h>
30 extern struct exception_table_entry __start___kvm_ex_table;
31 extern struct exception_table_entry __stop___kvm_ex_table;
33 /* Check whether the FP regs were dirtied while in the host-side run loop: */
34 static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu)
37 * When the system doesn't support FP/SIMD, we cannot rely on
38 * the _TIF_FOREIGN_FPSTATE flag. However, we always inject an
39 * abort on the very first access to FP and thus we should never
40 * see KVM_ARM64_FP_ENABLED. For added safety, make sure we always
43 if (!system_supports_fpsimd() ||
44 vcpu->arch.host_thread_info->flags & _TIF_FOREIGN_FPSTATE)
45 vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
48 return !!(vcpu->arch.flags & KVM_ARM64_FP_ENABLED);
51 /* Save the 32-bit only FPSIMD system register state */
52 static void __hyp_text __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu)
54 if (!vcpu_el1_is_32bit(vcpu))
57 vcpu->arch.ctxt.sys_regs[FPEXC32_EL2] = read_sysreg(fpexc32_el2);
60 static void __hyp_text __activate_traps_fpsimd32(struct kvm_vcpu *vcpu)
63 * We are about to set CPTR_EL2.TFP to trap all floating point
64 * register accesses to EL2, however, the ARM ARM clearly states that
65 * traps are only taken to EL2 if the operation would not otherwise
66 * trap to EL1. Therefore, always make sure that for 32-bit guests,
67 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
68 * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to
69 * it will cause an exception.
71 if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) {
72 write_sysreg(1 << 30, fpexc32_el2);
77 static void __hyp_text __activate_traps_common(struct kvm_vcpu *vcpu)
79 /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */
80 write_sysreg(1 << 15, hstr_el2);
83 * Make sure we trap PMU access from EL0 to EL2. Also sanitize
84 * PMSELR_EL0 to make sure it never contains the cycle
85 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
86 * EL1 instead of being trapped to EL2.
88 write_sysreg(0, pmselr_el0);
89 write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
90 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
93 static void __hyp_text __deactivate_traps_common(void)
95 write_sysreg(0, hstr_el2);
96 write_sysreg(0, pmuserenr_el0);
99 static void activate_traps_vhe(struct kvm_vcpu *vcpu)
103 val = read_sysreg(cpacr_el1);
104 val |= CPACR_EL1_TTA;
105 val &= ~CPACR_EL1_ZEN;
106 if (update_fp_enabled(vcpu)) {
107 if (vcpu_has_sve(vcpu))
108 val |= CPACR_EL1_ZEN;
110 val &= ~CPACR_EL1_FPEN;
111 __activate_traps_fpsimd32(vcpu);
114 write_sysreg(val, cpacr_el1);
116 write_sysreg(kvm_get_hyp_vector(), vbar_el1);
118 NOKPROBE_SYMBOL(activate_traps_vhe);
120 static void __hyp_text __activate_traps_nvhe(struct kvm_vcpu *vcpu)
124 __activate_traps_common(vcpu);
126 val = CPTR_EL2_DEFAULT;
127 val |= CPTR_EL2_TTA | CPTR_EL2_TZ;
128 if (!update_fp_enabled(vcpu)) {
130 __activate_traps_fpsimd32(vcpu);
133 write_sysreg(val, cptr_el2);
136 static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
138 u64 hcr = vcpu->arch.hcr_el2;
140 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM))
143 write_sysreg(hcr, hcr_el2);
145 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE))
146 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
149 activate_traps_vhe(vcpu);
151 __activate_traps_nvhe(vcpu);
154 static void deactivate_traps_vhe(void)
156 const char *host_vectors = vectors;
157 write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
160 * ARM erratum 1165522 requires the actual execution of the above
161 * before we can switch to the EL2/EL0 translation regime used by
164 asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_1165522));
166 write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1);
168 if (!arm64_kernel_unmapped_at_el0())
169 host_vectors = __this_cpu_read(this_cpu_vector);
170 write_sysreg(host_vectors, vbar_el1);
172 NOKPROBE_SYMBOL(deactivate_traps_vhe);
174 static void __hyp_text __deactivate_traps_nvhe(void)
176 u64 mdcr_el2 = read_sysreg(mdcr_el2);
178 __deactivate_traps_common();
180 mdcr_el2 &= MDCR_EL2_HPMN_MASK;
181 mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT;
183 write_sysreg(mdcr_el2, mdcr_el2);
184 write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
185 write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
188 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
191 * If we pended a virtual abort, preserve it until it gets
192 * cleared. See D1.14.3 (Virtual Interrupts) for details, but
193 * the crucial bit is "On taking a vSError interrupt,
194 * HCR_EL2.VSE is cleared to 0."
196 if (vcpu->arch.hcr_el2 & HCR_VSE) {
197 vcpu->arch.hcr_el2 &= ~HCR_VSE;
198 vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE;
202 deactivate_traps_vhe();
204 __deactivate_traps_nvhe();
207 void activate_traps_vhe_load(struct kvm_vcpu *vcpu)
209 __activate_traps_common(vcpu);
212 void deactivate_traps_vhe_put(void)
214 u64 mdcr_el2 = read_sysreg(mdcr_el2);
216 mdcr_el2 &= MDCR_EL2_HPMN_MASK |
217 MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT |
220 write_sysreg(mdcr_el2, mdcr_el2);
222 __deactivate_traps_common();
225 static void __hyp_text __activate_vm(struct kvm *kvm)
227 __load_guest_stage2(kvm);
230 static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
232 write_sysreg(0, vttbr_el2);
235 /* Save VGICv3 state on non-VHE systems */
236 static void __hyp_text __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
238 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
239 __vgic_v3_save_state(vcpu);
240 __vgic_v3_deactivate_traps(vcpu);
244 /* Restore VGICv3 state on non_VEH systems */
245 static void __hyp_text __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
247 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
248 __vgic_v3_activate_traps(vcpu);
249 __vgic_v3_restore_state(vcpu);
253 static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
258 * Resolve the IPA the hard way using the guest VA.
260 * Stage-1 translation already validated the memory access
261 * rights. As such, we can use the EL1 translation regime, and
262 * don't have to distinguish between EL0 and EL1 access.
264 * We do need to save/restore PAR_EL1 though, as we haven't
265 * saved the guest context yet, and we may return early...
267 par = read_sysreg(par_el1);
268 if (!__kvm_at("s1e1r", far))
269 tmp = read_sysreg(par_el1);
271 tmp = SYS_PAR_EL1_F; /* back to the guest */
272 write_sysreg(par, par_el1);
274 if (unlikely(tmp & SYS_PAR_EL1_F))
275 return false; /* Translation failed, back to guest */
277 /* Convert PAR to HPFAR format */
278 *hpfar = PAR_TO_HPFAR(tmp);
282 static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
288 esr = vcpu->arch.fault.esr_el2;
289 ec = ESR_ELx_EC(esr);
291 if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
294 far = read_sysreg_el2(SYS_FAR);
297 * The HPFAR can be invalid if the stage 2 fault did not
298 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
299 * bit is clear) and one of the two following cases are true:
300 * 1. The fault was due to a permission fault
301 * 2. The processor carries errata 834220
303 * Therefore, for all non S1PTW faults where we either have a
304 * permission fault or the errata workaround is enabled, we
305 * resolve the IPA using the AT instruction.
307 if (!(esr & ESR_ELx_S1PTW) &&
308 (cpus_have_const_cap(ARM64_WORKAROUND_834220) ||
309 (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
310 if (!__translate_far_to_hpfar(far, &hpfar))
313 hpfar = read_sysreg(hpfar_el2);
316 vcpu->arch.fault.far_el2 = far;
317 vcpu->arch.fault.hpfar_el2 = hpfar;
321 /* Check for an FPSIMD/SVE trap and handle as appropriate */
322 static bool __hyp_text __hyp_handle_fpsimd(struct kvm_vcpu *vcpu)
324 bool vhe, sve_guest, sve_host;
327 if (!system_supports_fpsimd())
330 if (system_supports_sve()) {
331 sve_guest = vcpu_has_sve(vcpu);
332 sve_host = vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE;
340 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
341 if (hsr_ec != ESR_ELx_EC_FP_ASIMD &&
342 hsr_ec != ESR_ELx_EC_SVE)
345 /* Don't handle SVE traps for non-SVE vcpus here: */
347 if (hsr_ec != ESR_ELx_EC_FP_ASIMD)
350 /* Valid trap. Switch the context: */
353 u64 reg = read_sysreg(cpacr_el1) | CPACR_EL1_FPEN;
356 reg |= CPACR_EL1_ZEN;
358 write_sysreg(reg, cpacr_el1);
360 write_sysreg(read_sysreg(cptr_el2) & ~(u64)CPTR_EL2_TFP,
366 if (vcpu->arch.flags & KVM_ARM64_FP_HOST) {
368 * In the SVE case, VHE is assumed: it is enforced by
369 * Kconfig and kvm_arch_init().
372 struct thread_struct *thread = container_of(
373 vcpu->arch.host_fpsimd_state,
374 struct thread_struct, uw.fpsimd_state);
376 sve_save_state(sve_pffr(thread),
377 &vcpu->arch.host_fpsimd_state->fpsr);
379 __fpsimd_save_state(vcpu->arch.host_fpsimd_state);
382 vcpu->arch.flags &= ~KVM_ARM64_FP_HOST;
386 sve_load_state(vcpu_sve_pffr(vcpu),
387 &vcpu->arch.ctxt.gp_regs.fp_regs.fpsr,
388 sve_vq_from_vl(vcpu->arch.sve_max_vl) - 1);
389 write_sysreg_s(vcpu->arch.ctxt.sys_regs[ZCR_EL1], SYS_ZCR_EL12);
391 __fpsimd_restore_state(&vcpu->arch.ctxt.gp_regs.fp_regs);
394 /* Skip restoring fpexc32 for AArch64 guests */
395 if (!(read_sysreg(hcr_el2) & HCR_RW))
396 write_sysreg(vcpu->arch.ctxt.sys_regs[FPEXC32_EL2],
399 vcpu->arch.flags |= KVM_ARM64_FP_ENABLED;
404 static bool __hyp_text handle_tx2_tvm(struct kvm_vcpu *vcpu)
406 u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_hsr(vcpu));
407 int rt = kvm_vcpu_sys_get_rt(vcpu);
408 u64 val = vcpu_get_reg(vcpu, rt);
411 * The normal sysreg handling code expects to see the traps,
412 * let's not do anything here.
414 if (vcpu->arch.hcr_el2 & HCR_TVM)
419 write_sysreg_el1(val, SYS_SCTLR);
422 write_sysreg_el1(val, SYS_TTBR0);
425 write_sysreg_el1(val, SYS_TTBR1);
428 write_sysreg_el1(val, SYS_TCR);
431 write_sysreg_el1(val, SYS_ESR);
434 write_sysreg_el1(val, SYS_FAR);
437 write_sysreg_el1(val, SYS_AFSR0);
440 write_sysreg_el1(val, SYS_AFSR1);
443 write_sysreg_el1(val, SYS_MAIR);
446 write_sysreg_el1(val, SYS_AMAIR);
448 case SYS_CONTEXTIDR_EL1:
449 write_sysreg_el1(val, SYS_CONTEXTIDR);
455 __kvm_skip_instr(vcpu);
460 * Return true when we were able to fixup the guest exit and should return to
461 * the guest, false when we should restore the host state and return to the
464 static bool __hyp_text fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code)
466 if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ)
467 vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR);
470 * We're using the raw exception code in order to only process
471 * the trap if no SError is pending. We will come back to the
472 * same PC once the SError has been injected, and replay the
473 * trapping instruction.
475 if (*exit_code != ARM_EXCEPTION_TRAP)
478 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) &&
479 kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 &&
480 handle_tx2_tvm(vcpu))
484 * We trap the first access to the FP/SIMD to save the host context
485 * and restore the guest context lazily.
486 * If FP/SIMD is not implemented, handle the trap and inject an
487 * undefined instruction exception to the guest.
488 * Similarly for trapped SVE accesses.
490 if (__hyp_handle_fpsimd(vcpu))
493 if (!__populate_fault_info(vcpu))
496 if (static_branch_unlikely(&vgic_v2_cpuif_trap)) {
499 valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
500 kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
501 kvm_vcpu_dabt_isvalid(vcpu) &&
502 !kvm_vcpu_dabt_isextabt(vcpu) &&
503 !kvm_vcpu_abt_iss1tw(vcpu);
506 int ret = __vgic_v2_perform_cpuif_access(vcpu);
511 /* Promote an illegal access to an SError.*/
513 *exit_code = ARM_EXCEPTION_EL1_SERROR;
519 if (static_branch_unlikely(&vgic_v3_cpuif_trap) &&
520 (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 ||
521 kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) {
522 int ret = __vgic_v3_perform_cpuif_access(vcpu);
529 /* Return to the host kernel and handle the exit */
533 static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
535 if (!cpus_have_const_cap(ARM64_SSBD))
538 return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
541 static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
543 #ifdef CONFIG_ARM64_SSBD
545 * The host runs with the workaround always present. If the
546 * guest wants it disabled, so be it...
548 if (__needs_ssbd_off(vcpu) &&
549 __hyp_this_cpu_read(arm64_ssbd_callback_required))
550 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
554 static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
556 #ifdef CONFIG_ARM64_SSBD
558 * If the guest has disabled the workaround, bring it back on.
560 if (__needs_ssbd_off(vcpu) &&
561 __hyp_this_cpu_read(arm64_ssbd_callback_required))
562 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
567 * Disable host events, enable guest events
569 static bool __hyp_text __pmu_switch_to_guest(struct kvm_cpu_context *host_ctxt)
571 struct kvm_host_data *host;
572 struct kvm_pmu_events *pmu;
574 host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
575 pmu = &host->pmu_events;
577 if (pmu->events_host)
578 write_sysreg(pmu->events_host, pmcntenclr_el0);
580 if (pmu->events_guest)
581 write_sysreg(pmu->events_guest, pmcntenset_el0);
583 return (pmu->events_host || pmu->events_guest);
587 * Disable guest events, enable host events
589 static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
591 struct kvm_host_data *host;
592 struct kvm_pmu_events *pmu;
594 host = container_of(host_ctxt, struct kvm_host_data, host_ctxt);
595 pmu = &host->pmu_events;
597 if (pmu->events_guest)
598 write_sysreg(pmu->events_guest, pmcntenclr_el0);
600 if (pmu->events_host)
601 write_sysreg(pmu->events_host, pmcntenset_el0);
604 /* Switch to the guest for VHE systems running in EL2 */
605 int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
607 struct kvm_cpu_context *host_ctxt;
608 struct kvm_cpu_context *guest_ctxt;
611 host_ctxt = vcpu->arch.host_cpu_context;
612 host_ctxt->__hyp_running_vcpu = vcpu;
613 guest_ctxt = &vcpu->arch.ctxt;
615 sysreg_save_host_state_vhe(host_ctxt);
618 * ARM erratum 1165522 requires us to configure both stage 1 and
619 * stage 2 translation for the guest context before we clear
622 * We have already configured the guest's stage 1 translation in
623 * kvm_vcpu_load_sysregs above. We must now call __activate_vm
624 * before __activate_traps, because __activate_vm configures
625 * stage 2 translation, and __activate_traps clear HCR_EL2.TGE
626 * (among other things).
628 __activate_vm(vcpu->kvm);
629 __activate_traps(vcpu);
631 sysreg_restore_guest_state_vhe(guest_ctxt);
632 __debug_switch_to_guest(vcpu);
634 __set_guest_arch_workaround_state(vcpu);
637 /* Jump in the fire! */
638 exit_code = __guest_enter(vcpu, host_ctxt);
640 /* And we're baaack! */
641 } while (fixup_guest_exit(vcpu, &exit_code));
643 __set_host_arch_workaround_state(vcpu);
645 sysreg_save_guest_state_vhe(guest_ctxt);
647 __deactivate_traps(vcpu);
649 sysreg_restore_host_state_vhe(host_ctxt);
651 if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
652 __fpsimd_save_fpexc32(vcpu);
654 __debug_switch_to_host(vcpu);
658 NOKPROBE_SYMBOL(kvm_vcpu_run_vhe);
660 /* Switch to the guest for legacy non-VHE systems */
661 int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
663 struct kvm_cpu_context *host_ctxt;
664 struct kvm_cpu_context *guest_ctxt;
665 bool pmu_switch_needed;
669 * Having IRQs masked via PMR when entering the guest means the GIC
670 * will not signal the CPU of interrupts of lower priority, and the
671 * only way to get out will be via guest exceptions.
672 * Naturally, we want to avoid this.
674 if (system_uses_irq_prio_masking()) {
675 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
679 vcpu = kern_hyp_va(vcpu);
681 host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
682 host_ctxt->__hyp_running_vcpu = vcpu;
683 guest_ctxt = &vcpu->arch.ctxt;
685 pmu_switch_needed = __pmu_switch_to_guest(host_ctxt);
687 __sysreg_save_state_nvhe(host_ctxt);
690 * We must flush and disable the SPE buffer for nVHE, as
691 * the translation regime(EL1&0) is going to be loaded with
692 * that of the guest. And we must do this before we change the
693 * translation regime to EL2 (via MDCR_EL2_EPB == 0) and
694 * before we load guest Stage1.
696 __debug_save_host_buffers_nvhe(vcpu);
698 __activate_vm(kern_hyp_va(vcpu->kvm));
699 __activate_traps(vcpu);
701 __hyp_vgic_restore_state(vcpu);
702 __timer_enable_traps(vcpu);
705 * We must restore the 32-bit state before the sysregs, thanks
706 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
708 __sysreg32_restore_state(vcpu);
709 __sysreg_restore_state_nvhe(guest_ctxt);
710 __debug_switch_to_guest(vcpu);
712 __set_guest_arch_workaround_state(vcpu);
715 /* Jump in the fire! */
716 exit_code = __guest_enter(vcpu, host_ctxt);
718 /* And we're baaack! */
719 } while (fixup_guest_exit(vcpu, &exit_code));
721 __set_host_arch_workaround_state(vcpu);
723 __sysreg_save_state_nvhe(guest_ctxt);
724 __sysreg32_save_state(vcpu);
725 __timer_disable_traps(vcpu);
726 __hyp_vgic_save_state(vcpu);
728 __deactivate_traps(vcpu);
729 __deactivate_vm(vcpu);
731 __sysreg_restore_state_nvhe(host_ctxt);
733 if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED)
734 __fpsimd_save_fpexc32(vcpu);
736 __debug_switch_to_host(vcpu);
739 * This must come after restoring the host sysregs, since a non-VHE
740 * system may enable SPE here and make use of the TTBRs.
742 __debug_restore_host_buffers_nvhe(vcpu);
744 if (pmu_switch_needed)
745 __pmu_switch_to_host(host_ctxt);
747 /* Returning to host will clear PSR.I, remask PMR if needed */
748 if (system_uses_irq_prio_masking())
749 gic_write_pmr(GIC_PRIO_IRQOFF);
754 static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
756 static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
757 struct kvm_cpu_context *__host_ctxt)
759 struct kvm_vcpu *vcpu;
760 unsigned long str_va;
762 vcpu = __host_ctxt->__hyp_running_vcpu;
764 if (read_sysreg(vttbr_el2)) {
765 __timer_disable_traps(vcpu);
766 __deactivate_traps(vcpu);
767 __deactivate_vm(vcpu);
768 __sysreg_restore_state_nvhe(__host_ctxt);
772 * Force the panic string to be loaded from the literal pool,
773 * making sure it is a kernel address and not a PC-relative
776 asm volatile("ldr %0, =%1" : "=r" (str_va) : "S" (__hyp_panic_string));
778 __hyp_do_panic(str_va,
780 read_sysreg(esr_el2), read_sysreg_el2(SYS_FAR),
781 read_sysreg(hpfar_el2), par, vcpu);
784 static void __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
785 struct kvm_cpu_context *host_ctxt)
787 struct kvm_vcpu *vcpu;
788 vcpu = host_ctxt->__hyp_running_vcpu;
790 __deactivate_traps(vcpu);
791 sysreg_restore_host_state_vhe(host_ctxt);
793 panic(__hyp_panic_string,
795 read_sysreg_el2(SYS_ESR), read_sysreg_el2(SYS_FAR),
796 read_sysreg(hpfar_el2), par, vcpu);
798 NOKPROBE_SYMBOL(__hyp_call_panic_vhe);
800 void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
802 u64 spsr = read_sysreg_el2(SYS_SPSR);
803 u64 elr = read_sysreg_el2(SYS_ELR);
804 u64 par = read_sysreg(par_el1);
807 __hyp_call_panic_nvhe(spsr, elr, par, host_ctxt);
809 __hyp_call_panic_vhe(spsr, elr, par, host_ctxt);
814 asmlinkage void __hyp_text kvm_unexpected_el2_exception(void)
816 unsigned long addr, fixup;
817 struct kvm_cpu_context *host_ctxt;
818 struct exception_table_entry *entry, *end;
819 unsigned long elr_el2 = read_sysreg(elr_el2);
821 entry = hyp_symbol_addr(__start___kvm_ex_table);
822 end = hyp_symbol_addr(__stop___kvm_ex_table);
823 host_ctxt = &__hyp_this_cpu_ptr(kvm_host_data)->host_ctxt;
825 while (entry < end) {
826 addr = (unsigned long)&entry->insn + entry->insn;
827 fixup = (unsigned long)&entry->fixup + entry->fixup;
829 if (addr != elr_el2) {
834 write_sysreg(fixup, elr_el2);
838 hyp_panic(host_ctxt);