2 * Copyright (C) 2015 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 #include <linux/linkage.h>
20 #include <asm/alternative.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/assembler.h>
23 #include <asm/fpsimdmacros.h>
25 #include <asm/kvm_arm.h>
26 #include <asm/kvm_asm.h>
27 #include <asm/kvm_mmu.h>
29 #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
30 #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
33 .pushsection .hyp.text, "ax"
35 .macro save_callee_saved_regs ctxt
36 stp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
37 stp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
38 stp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
39 stp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
40 stp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
41 stp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
44 .macro restore_callee_saved_regs ctxt
45 ldp x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
46 ldp x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
47 ldp x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
48 ldp x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
49 ldp x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
50 ldp x29, lr, [\ctxt, #CPU_XREG_OFFSET(29)]
54 * u64 __guest_enter(struct kvm_vcpu *vcpu,
55 * struct kvm_cpu_context *host_ctxt);
60 // x2-x17: clobbered by macros
63 // Store the host regs
64 save_callee_saved_regs x1
66 // Now the host state is stored if we have a pending RAS SError it must
67 // affect the host. If any asynchronous exception is pending we defer
71 mov x0, #ARM_EXCEPTION_IRQ
75 add x18, x0, #VCPU_CONTEXT
77 // Restore guest regs x0-x17
78 ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
79 ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
80 ldp x4, x5, [x18, #CPU_XREG_OFFSET(4)]
81 ldp x6, x7, [x18, #CPU_XREG_OFFSET(6)]
82 ldp x8, x9, [x18, #CPU_XREG_OFFSET(8)]
83 ldp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
84 ldp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
85 ldp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
86 ldp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
88 // Restore guest regs x19-x29, lr
89 restore_callee_saved_regs x18
91 // Restore guest reg x18
92 ldr x18, [x18, #CPU_XREG_OFFSET(18)]
94 // Do not touch any register after this!
96 ENDPROC(__guest_enter)
101 // x2-x29,lr: vcpu regs
102 // vcpu x0-x1 on the stack
104 add x1, x1, #VCPU_CONTEXT
106 ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
108 // Store the guest regs x2 and x3
109 stp x2, x3, [x1, #CPU_XREG_OFFSET(2)]
111 // Retrieve the guest regs x0-x1 from the stack
112 ldp x2, x3, [sp], #16 // x0, x1
114 // Store the guest regs x0-x1 and x4-x18
115 stp x2, x3, [x1, #CPU_XREG_OFFSET(0)]
116 stp x4, x5, [x1, #CPU_XREG_OFFSET(4)]
117 stp x6, x7, [x1, #CPU_XREG_OFFSET(6)]
118 stp x8, x9, [x1, #CPU_XREG_OFFSET(8)]
119 stp x10, x11, [x1, #CPU_XREG_OFFSET(10)]
120 stp x12, x13, [x1, #CPU_XREG_OFFSET(12)]
121 stp x14, x15, [x1, #CPU_XREG_OFFSET(14)]
122 stp x16, x17, [x1, #CPU_XREG_OFFSET(16)]
123 str x18, [x1, #CPU_XREG_OFFSET(18)]
125 // Store the guest regs x19-x29, lr
126 save_callee_saved_regs x1
130 // Now restore the host regs
131 restore_callee_saved_regs x2
133 // If we have a pending asynchronous abort, now is the
134 // time to find out. From your VAXorcist book, page 666:
135 // "Threaten me not, oh Evil one! For I speak with
136 // the power of DEC, and I command thee to show thyself!"
142 dsb sy // Synchronize against in-flight ld/st
143 msr daifclr, #4 // Unmask aborts
145 // This is our single instruction exception window. A pending
146 // SError is guaranteed to occur at the earliest when we unmask
147 // it, and at the latest just after the ISB.
148 abort_guest_exit_start:
152 abort_guest_exit_end:
153 msr daifset, #4 // Mask aborts
156 _kvm_extable abort_guest_exit_start, 9997f
157 _kvm_extable abort_guest_exit_end, 9997f
159 msr daifset, #4 // Mask aborts
160 mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
162 // restore the EL1 exception context so that we can report some
163 // information. Merge the exception code with the SError pending bit.
169 ENDPROC(__guest_exit)
171 ENTRY(__fpsimd_guest_restore)
174 // x2-x29,lr: vcpu regs
175 // vcpu x0-x1 on the stack
176 stp x2, x3, [sp, #-16]!
177 stp x4, lr, [sp, #-16]!
179 alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
181 bic x2, x2, #CPTR_EL2_TFP
185 orr x2, x2, #CPACR_EL1_FPEN
192 ldr x0, [x3, #VCPU_HOST_CONTEXT]
194 add x0, x0, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
195 bl __fpsimd_save_state
197 add x2, x3, #VCPU_CONTEXT
198 add x0, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
199 bl __fpsimd_restore_state
201 // Skip restoring fpexc32 for AArch64 guests
203 tbnz x1, #HCR_RW_SHIFT, 1f
204 ldr x4, [x3, #VCPU_FPEXC32_EL2]
207 ldp x4, lr, [sp], #16
208 ldp x2, x3, [sp], #16
209 ldp x0, x1, [sp], #16
212 ENDPROC(__fpsimd_guest_restore)