2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/arm-smccc.h>
19 #include <linux/types.h>
20 #include <linux/jump_label.h>
21 #include <uapi/linux/psci.h>
23 #include <kvm/arm_psci.h>
25 #include <asm/kvm_asm.h>
26 #include <asm/kvm_emulate.h>
27 #include <asm/kvm_hyp.h>
28 #include <asm/uaccess.h>
29 #include <asm/vectors.h>
30 extern struct exception_table_entry __start___kvm_ex_table;
31 extern struct exception_table_entry __stop___kvm_ex_table;
33 static bool __hyp_text __fpsimd_enabled_nvhe(void)
35 return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
38 static bool __hyp_text __fpsimd_enabled_vhe(void)
40 return !!(read_sysreg(cpacr_el1) & CPACR_EL1_FPEN);
43 static hyp_alternate_select(__fpsimd_is_enabled,
44 __fpsimd_enabled_nvhe, __fpsimd_enabled_vhe,
45 ARM64_HAS_VIRT_HOST_EXTN);
47 bool __hyp_text __fpsimd_enabled(void)
49 return __fpsimd_is_enabled()();
52 static void __hyp_text __activate_traps_vhe(void)
56 val = read_sysreg(cpacr_el1);
58 val &= ~CPACR_EL1_FPEN;
59 write_sysreg(val, cpacr_el1);
61 write_sysreg(kvm_get_hyp_vector(), vbar_el1);
64 static void __hyp_text __activate_traps_nvhe(void)
68 val = CPTR_EL2_DEFAULT;
69 val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
70 write_sysreg(val, cptr_el2);
73 static hyp_alternate_select(__activate_traps_arch,
74 __activate_traps_nvhe, __activate_traps_vhe,
75 ARM64_HAS_VIRT_HOST_EXTN);
77 static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
82 * We are about to set CPTR_EL2.TFP to trap all floating point
83 * register accesses to EL2, however, the ARM ARM clearly states that
84 * traps are only taken to EL2 if the operation would not otherwise
85 * trap to EL1. Therefore, always make sure that for 32-bit guests,
86 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
88 val = vcpu->arch.hcr_el2;
89 if (!(val & HCR_RW)) {
90 write_sysreg(1 << 30, fpexc32_el2);
93 write_sysreg(val, hcr_el2);
94 /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
95 write_sysreg(1 << 15, hstr_el2);
97 * Make sure we trap PMU access from EL0 to EL2. Also sanitize
98 * PMSELR_EL0 to make sure it never contains the cycle
99 * counter, which could make a PMXEVCNTR_EL0 access UNDEF at
100 * EL1 instead of being trapped to EL2.
102 write_sysreg(0, pmselr_el0);
103 write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
104 write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
105 __activate_traps_arch()();
108 static void __hyp_text __deactivate_traps_vhe(void)
110 const char *host_vectors = vectors;
112 write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
113 write_sysreg(CPACR_EL1_FPEN, cpacr_el1);
115 if (!arm64_kernel_unmapped_at_el0())
116 host_vectors = __this_cpu_read(this_cpu_vector);
117 write_sysreg(host_vectors, vbar_el1);
120 static void __hyp_text __deactivate_traps_nvhe(void)
122 write_sysreg(HCR_HOST_NVHE_FLAGS, hcr_el2);
123 write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
126 static hyp_alternate_select(__deactivate_traps_arch,
127 __deactivate_traps_nvhe, __deactivate_traps_vhe,
128 ARM64_HAS_VIRT_HOST_EXTN);
130 static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
133 * If we pended a virtual abort, preserve it until it gets
134 * cleared. See D1.14.3 (Virtual Interrupts) for details, but
135 * the crucial bit is "On taking a vSError interrupt,
136 * HCR_EL2.VSE is cleared to 0."
138 if (vcpu->arch.hcr_el2 & HCR_VSE)
139 vcpu->arch.hcr_el2 = read_sysreg(hcr_el2);
141 __deactivate_traps_arch()();
142 write_sysreg(0, hstr_el2);
143 write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
144 write_sysreg(0, pmuserenr_el0);
147 static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
149 struct kvm *kvm = kern_hyp_va(vcpu->kvm);
150 write_sysreg(kvm->arch.vttbr, vttbr_el2);
153 static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
155 write_sysreg(0, vttbr_el2);
158 static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
160 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
161 __vgic_v3_save_state(vcpu);
163 __vgic_v2_save_state(vcpu);
165 write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2);
168 static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
172 val = read_sysreg(hcr_el2);
173 val |= HCR_INT_OVERRIDE;
174 val |= vcpu->arch.irq_lines;
175 write_sysreg(val, hcr_el2);
177 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif))
178 __vgic_v3_restore_state(vcpu);
180 __vgic_v2_restore_state(vcpu);
183 static bool __hyp_text __true_value(void)
188 static bool __hyp_text __false_value(void)
193 static hyp_alternate_select(__check_arm_834220,
194 __false_value, __true_value,
195 ARM64_WORKAROUND_834220);
197 static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
202 * Resolve the IPA the hard way using the guest VA.
204 * Stage-1 translation already validated the memory access
205 * rights. As such, we can use the EL1 translation regime, and
206 * don't have to distinguish between EL0 and EL1 access.
208 * We do need to save/restore PAR_EL1 though, as we haven't
209 * saved the guest context yet, and we may return early...
211 par = read_sysreg(par_el1);
212 if (!__kvm_at("s1e1r", far))
213 tmp = read_sysreg(par_el1);
215 tmp = 1; /* back to the guest */
216 write_sysreg(par, par_el1);
218 if (unlikely(tmp & 1))
219 return false; /* Translation failed, back to guest */
221 /* Convert PAR to HPFAR format */
222 *hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4;
226 static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
228 u64 esr = read_sysreg_el2(esr);
229 u8 ec = ESR_ELx_EC(esr);
232 vcpu->arch.fault.esr_el2 = esr;
234 if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
237 far = read_sysreg_el2(far);
240 * The HPFAR can be invalid if the stage 2 fault did not
241 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
242 * bit is clear) and one of the two following cases are true:
243 * 1. The fault was due to a permission fault
244 * 2. The processor carries errata 834220
246 * Therefore, for all non S1PTW faults where we either have a
247 * permission fault or the errata workaround is enabled, we
248 * resolve the IPA using the AT instruction.
250 if (!(esr & ESR_ELx_S1PTW) &&
251 (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
252 if (!__translate_far_to_hpfar(far, &hpfar))
255 hpfar = read_sysreg(hpfar_el2);
258 vcpu->arch.fault.far_el2 = far;
259 vcpu->arch.fault.hpfar_el2 = hpfar;
263 static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
265 *vcpu_pc(vcpu) = read_sysreg_el2(elr);
267 if (vcpu_mode_is_32bit(vcpu)) {
268 vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr);
269 kvm_skip_instr32(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
270 write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr);
275 write_sysreg_el2(*vcpu_pc(vcpu), elr);
278 static inline bool __hyp_text __needs_ssbd_off(struct kvm_vcpu *vcpu)
280 if (!cpus_have_cap(ARM64_SSBD))
283 return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
286 static void __hyp_text __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
288 #ifdef CONFIG_ARM64_SSBD
290 * The host runs with the workaround always present. If the
291 * guest wants it disabled, so be it...
293 if (__needs_ssbd_off(vcpu) &&
294 __hyp_this_cpu_read(arm64_ssbd_callback_required))
295 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
299 static void __hyp_text __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
301 #ifdef CONFIG_ARM64_SSBD
303 * If the guest has disabled the workaround, bring it back on.
305 if (__needs_ssbd_off(vcpu) &&
306 __hyp_this_cpu_read(arm64_ssbd_callback_required))
307 arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
311 int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
313 struct kvm_cpu_context *host_ctxt;
314 struct kvm_cpu_context *guest_ctxt;
318 vcpu = kern_hyp_va(vcpu);
320 host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
321 host_ctxt->__hyp_running_vcpu = vcpu;
322 guest_ctxt = &vcpu->arch.ctxt;
324 __sysreg_save_host_state(host_ctxt);
325 __debug_cond_save_host_state(vcpu);
327 __activate_traps(vcpu);
330 __vgic_restore_state(vcpu);
331 __timer_restore_state(vcpu);
334 * We must restore the 32-bit state before the sysregs, thanks
335 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
337 __sysreg32_restore_state(vcpu);
338 __sysreg_restore_guest_state(guest_ctxt);
339 __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
341 __set_guest_arch_workaround_state(vcpu);
343 /* Jump in the fire! */
345 exit_code = __guest_enter(vcpu, host_ctxt);
346 /* And we're baaack! */
349 * We're using the raw exception code in order to only process
350 * the trap if no SError is pending. We will come back to the
351 * same PC once the SError has been injected, and replay the
352 * trapping instruction.
354 if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
357 if (static_branch_unlikely(&vgic_v2_cpuif_trap) &&
358 exit_code == ARM_EXCEPTION_TRAP) {
361 valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW &&
362 kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT &&
363 kvm_vcpu_dabt_isvalid(vcpu) &&
364 !kvm_vcpu_dabt_isextabt(vcpu) &&
365 !kvm_vcpu_dabt_iss1tw(vcpu);
368 int ret = __vgic_v2_perform_cpuif_access(vcpu);
376 /* Promote an illegal access to an SError */
378 exit_code = ARM_EXCEPTION_EL1_SERROR;
381 /* 0 falls through to be handler out of EL2 */
385 __set_host_arch_workaround_state(vcpu);
387 fp_enabled = __fpsimd_enabled();
389 __sysreg_save_guest_state(guest_ctxt);
390 __sysreg32_save_state(vcpu);
391 __timer_save_state(vcpu);
392 __vgic_save_state(vcpu);
394 __deactivate_traps(vcpu);
395 __deactivate_vm(vcpu);
397 __sysreg_restore_host_state(host_ctxt);
400 __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs);
401 __fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs);
404 __debug_save_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
405 __debug_cond_restore_host_state(vcpu);
410 static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
412 static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par,
413 struct kvm_vcpu *vcpu)
415 unsigned long str_va;
418 * Force the panic string to be loaded from the literal pool,
419 * making sure it is a kernel address and not a PC-relative
422 asm volatile("ldr %0, =%1" : "=r" (str_va) : "S" (__hyp_panic_string));
424 __hyp_do_panic(str_va,
426 read_sysreg(esr_el2), read_sysreg_el2(far),
427 read_sysreg(hpfar_el2), par, vcpu);
430 static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par,
431 struct kvm_vcpu *vcpu)
433 panic(__hyp_panic_string,
435 read_sysreg_el2(esr), read_sysreg_el2(far),
436 read_sysreg(hpfar_el2), par, vcpu);
439 static hyp_alternate_select(__hyp_call_panic,
440 __hyp_call_panic_nvhe, __hyp_call_panic_vhe,
441 ARM64_HAS_VIRT_HOST_EXTN);
443 void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
445 struct kvm_vcpu *vcpu = NULL;
447 u64 spsr = read_sysreg_el2(spsr);
448 u64 elr = read_sysreg_el2(elr);
449 u64 par = read_sysreg(par_el1);
451 if (read_sysreg(vttbr_el2)) {
452 vcpu = host_ctxt->__hyp_running_vcpu;
453 __timer_save_state(vcpu);
454 __deactivate_traps(vcpu);
455 __deactivate_vm(vcpu);
456 __sysreg_restore_host_state(host_ctxt);
459 /* Call panic for real */
460 __hyp_call_panic()(spsr, elr, par, vcpu);
465 asmlinkage void __hyp_text kvm_unexpected_el2_exception(void)
467 unsigned long addr, fixup;
468 struct kvm_cpu_context *host_ctxt;
469 struct exception_table_entry *entry, *end;
470 unsigned long elr_el2 = read_sysreg(elr_el2);
472 entry = hyp_symbol_addr(__start___kvm_ex_table);
473 end = hyp_symbol_addr(__stop___kvm_ex_table);
474 host_ctxt = __hyp_this_cpu_ptr(kvm_host_cpu_state);
476 while (entry < end) {
477 addr = (unsigned long)&entry->insn + entry->insn;
478 fixup = (unsigned long)&entry->fixup + entry->fixup;
480 if (addr != elr_el2) {
485 write_sysreg(fixup, elr_el2);
489 hyp_panic(host_ctxt);