2 * Fault injection for both 32 and 64bit guests.
4 * Copyright (C) 2012,2013 - ARM Ltd
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 * Based on arch/arm/kvm/emulate.c
8 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
9 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 * This program is free software: you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 #include <linux/kvm_host.h>
25 #include <asm/kvm_emulate.h>
28 #define CURRENT_EL_SP_EL0_VECTOR 0x0
29 #define CURRENT_EL_SP_ELx_VECTOR 0x200
30 #define LOWER_EL_AArch64_VECTOR 0x400
31 #define LOWER_EL_AArch32_VECTOR 0x600
35 except_type_irq = 0x80,
36 except_type_fiq = 0x100,
37 except_type_serror = 0x180,
40 static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
44 switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
46 exc_offset = CURRENT_EL_SP_EL0_VECTOR;
49 exc_offset = CURRENT_EL_SP_ELx_VECTOR;
52 exc_offset = LOWER_EL_AArch64_VECTOR;
55 exc_offset = LOWER_EL_AArch32_VECTOR;
58 return vcpu_read_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
62 * When an exception is taken, most PSTATE fields are left unchanged in the
63 * handler. However, some are explicitly overridden (e.g. M[4:0]). Luckily all
64 * of the inherited bits have the same position in the AArch64/AArch32 SPSR_ELx
65 * layouts, so we don't need to shuffle these for exceptions from AArch32 EL0.
67 * For the SPSR_ELx layout for AArch64, see ARM DDI 0487E.a page C5-429.
68 * For the SPSR_ELx layout for AArch32, see ARM DDI 0487E.a page C5-426.
70 * Here we manipulate the fields in order of the AArch64 SPSR_ELx layout, from
73 static unsigned long get_except64_pstate(struct kvm_vcpu *vcpu)
75 unsigned long sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
76 unsigned long old, new;
78 old = *vcpu_cpsr(vcpu);
81 new |= (old & PSR_N_BIT);
82 new |= (old & PSR_Z_BIT);
83 new |= (old & PSR_C_BIT);
84 new |= (old & PSR_V_BIT);
86 // TODO: TCO (if/when ARMv8.5-MemTag is exposed to guests)
88 new |= (old & PSR_DIT_BIT);
90 // PSTATE.UAO is set to zero upon any exception to AArch64
91 // See ARM DDI 0487E.a, page D5-2579.
93 // PSTATE.PAN is unchanged unless SCTLR_ELx.SPAN == 0b0
94 // SCTLR_ELx.SPAN is RES1 when ARMv8.1-PAN is not implemented
95 // See ARM DDI 0487E.a, page D5-2578.
96 new |= (old & PSR_PAN_BIT);
97 if (!(sctlr & SCTLR_EL1_SPAN))
100 // PSTATE.SS is set to zero upon any exception to AArch64
101 // See ARM DDI 0487E.a, page D2-2452.
103 // PSTATE.IL is set to zero upon any exception to AArch64
104 // See ARM DDI 0487E.a, page D1-2306.
106 // PSTATE.SSBS is set to SCTLR_ELx.DSSBS upon any exception to AArch64
107 // See ARM DDI 0487E.a, page D13-3258
108 if (sctlr & SCTLR_ELx_DSSBS)
111 // PSTATE.BTYPE is set to zero upon any exception to AArch64
112 // See ARM DDI 0487E.a, pages D1-2293 to D1-2294.
119 new |= PSR_MODE_EL1h;
124 static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
126 unsigned long cpsr = *vcpu_cpsr(vcpu);
127 bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
130 vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
131 *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
133 *vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
134 vcpu_write_spsr(vcpu, cpsr);
136 vcpu_write_sys_reg(vcpu, addr, FAR_EL1);
139 * Build an {i,d}abort, depending on the level and the
140 * instruction set. Report an external synchronous abort.
142 if (kvm_vcpu_trap_il_is32bit(vcpu))
146 * Here, the guest runs in AArch64 mode when in EL1. If we get
147 * an AArch32 fault, it means we managed to trap an EL0 fault.
149 if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
150 esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
152 esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
155 esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
157 vcpu_write_sys_reg(vcpu, esr | ESR_ELx_FSC_EXTABT, ESR_EL1);
160 static void inject_undef64(struct kvm_vcpu *vcpu)
162 unsigned long cpsr = *vcpu_cpsr(vcpu);
163 u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
165 vcpu_write_elr_el1(vcpu, *vcpu_pc(vcpu));
166 *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
168 *vcpu_cpsr(vcpu) = get_except64_pstate(vcpu);
169 vcpu_write_spsr(vcpu, cpsr);
172 * Build an unknown exception, depending on the instruction
175 if (kvm_vcpu_trap_il_is32bit(vcpu))
178 vcpu_write_sys_reg(vcpu, esr, ESR_EL1);
182 * kvm_inject_dabt - inject a data abort into the guest
183 * @vcpu: The VCPU to receive the undefined exception
184 * @addr: The address to report in the DFAR
186 * It is assumed that this code is called from the VCPU thread and that the
187 * VCPU therefore is not currently executing guest code.
189 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
191 if (vcpu_el1_is_32bit(vcpu))
192 kvm_inject_dabt32(vcpu, addr);
194 inject_abt64(vcpu, false, addr);
198 * kvm_inject_pabt - inject a prefetch abort into the guest
199 * @vcpu: The VCPU to receive the undefined exception
200 * @addr: The address to report in the DFAR
202 * It is assumed that this code is called from the VCPU thread and that the
203 * VCPU therefore is not currently executing guest code.
205 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
207 if (vcpu_el1_is_32bit(vcpu))
208 kvm_inject_pabt32(vcpu, addr);
210 inject_abt64(vcpu, true, addr);
214 * kvm_inject_undefined - inject an undefined instruction into the guest
216 * It is assumed that this code is called from the VCPU thread and that the
217 * VCPU therefore is not currently executing guest code.
219 void kvm_inject_undefined(struct kvm_vcpu *vcpu)
221 if (vcpu_el1_is_32bit(vcpu))
222 kvm_inject_undef32(vcpu);
224 inject_undef64(vcpu);
227 void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 esr)
229 vcpu_set_vsesr(vcpu, esr & ESR_ELx_ISS_MASK);
230 *vcpu_hcr(vcpu) |= HCR_VSE;
234 * kvm_inject_vabt - inject an async abort / SError into the guest
235 * @vcpu: The VCPU to receive the exception
237 * It is assumed that this code is called from the VCPU thread and that the
238 * VCPU therefore is not currently executing guest code.
240 * Systems with the RAS Extensions specify an imp-def ESR (ISV/IDS = 1) with
241 * the remaining ISS all-zeros so that this error is not interpreted as an
242 * uncategorized RAS error. Without the RAS Extensions we can't specify an ESR
243 * value, so the CPU generates an imp-def value.
245 void kvm_inject_vabt(struct kvm_vcpu *vcpu)
247 kvm_set_sei_esr(vcpu, ESR_ELx_ISV);