2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/kvm_asm.h>
24 #include <asm/asm-offsets.h>
25 #include <asm/exception-64s.h>
26 #include <asm/asm-compat.h>
28 #if defined(CONFIG_PPC_BOOK3S_64)
29 #ifdef PPC64_ELF_ABI_v2
30 #define FUNC(name) name
32 #define FUNC(name) GLUE(.,name)
34 #define GET_SHADOW_VCPU(reg) addi reg, r13, PACA_SVCPU
36 #elif defined(CONFIG_PPC_BOOK3S_32)
37 #define FUNC(name) name
38 #define GET_SHADOW_VCPU(reg) lwz reg, (THREAD + THREAD_KVM_SVCPU)(r2)
40 #endif /* CONFIG_PPC_BOOK3S_XX */
42 #define VCPU_LOAD_NVGPRS(vcpu) \
43 PPC_LL r14, VCPU_GPR(R14)(vcpu); \
44 PPC_LL r15, VCPU_GPR(R15)(vcpu); \
45 PPC_LL r16, VCPU_GPR(R16)(vcpu); \
46 PPC_LL r17, VCPU_GPR(R17)(vcpu); \
47 PPC_LL r18, VCPU_GPR(R18)(vcpu); \
48 PPC_LL r19, VCPU_GPR(R19)(vcpu); \
49 PPC_LL r20, VCPU_GPR(R20)(vcpu); \
50 PPC_LL r21, VCPU_GPR(R21)(vcpu); \
51 PPC_LL r22, VCPU_GPR(R22)(vcpu); \
52 PPC_LL r23, VCPU_GPR(R23)(vcpu); \
53 PPC_LL r24, VCPU_GPR(R24)(vcpu); \
54 PPC_LL r25, VCPU_GPR(R25)(vcpu); \
55 PPC_LL r26, VCPU_GPR(R26)(vcpu); \
56 PPC_LL r27, VCPU_GPR(R27)(vcpu); \
57 PPC_LL r28, VCPU_GPR(R28)(vcpu); \
58 PPC_LL r29, VCPU_GPR(R29)(vcpu); \
59 PPC_LL r30, VCPU_GPR(R30)(vcpu); \
60 PPC_LL r31, VCPU_GPR(R31)(vcpu); \
62 /*****************************************************************************
64 * Guest entry / exit code that is in kernel module memory (highmem) *
66 ****************************************************************************/
72 _GLOBAL(__kvmppc_vcpu_run)
75 /* Write correct stack frame */
77 PPC_STL r0,PPC_LR_STKOFF(r1)
79 /* Save host state to the stack */
80 PPC_STLU r1, -SWITCH_FRAME_SIZE(r1)
82 /* Save r3 (kvm_run) and r4 (vcpu) */
85 /* Save non-volatile registers (r14 - r31) */
95 /* Load non-volatile guest state from the vcpu */
98 kvm_start_lightweight:
99 /* Copy registers into shadow vcpu so we can access them in real mode */
101 bl FUNC(kvmppc_copy_to_svcpu)
105 #ifdef CONFIG_PPC_BOOK3S_64
106 /* Get the dcbz32 flag */
107 PPC_LL r3, VCPU_HFLAGS(r4)
108 rldicl r3, r3, 0, 63 /* r3 &= 1 */
109 stb r3, HSTATE_RESTORE_HID5(r13)
111 /* Load up guest SPRG3 value, since it's user readable */
112 lwz r3, VCPU_SHAREDBE(r4)
114 ld r5, VCPU_SHARED(r4)
115 beq sprg3_little_endian
117 #ifdef __BIG_ENDIAN__
118 ld r3, VCPU_SHARED_SPRG3(r5)
120 addi r5, r5, VCPU_SHARED_SPRG3
125 #ifdef __LITTLE_ENDIAN__
126 ld r3, VCPU_SHARED_SPRG3(r5)
128 addi r5, r5, VCPU_SHARED_SPRG3
134 #endif /* CONFIG_PPC_BOOK3S_64 */
136 PPC_LL r4, VCPU_SHADOW_MSR(r4) /* get shadow_msr */
138 /* Jump to segment patching handler and into our guest */
139 bl FUNC(kvmppc_entry_trampoline)
143 * This is the handler in module memory. It gets jumped at from the
144 * lowmem trampoline code, so it's basically the guest exit code.
149 * Register usage at this point:
153 * R12 = exit handler id
160 PPC_LL r3, GPR4(r1) /* vcpu pointer */
163 * kvmppc_copy_from_svcpu can clobber volatile registers, save
164 * the exit handler id to the vcpu and restore it from there later.
166 stw r12, VCPU_TRAP(r3)
168 /* Transfer reg values from shadow vcpu back to vcpu struct */
170 bl FUNC(kvmppc_copy_from_svcpu)
173 #ifdef CONFIG_PPC_BOOK3S_64
175 * Reload kernel SPRG3 value.
176 * No need to save guest value as usermode can't modify SPRG3.
178 ld r3, PACA_SPRG_VDSO(r13)
179 mtspr SPRN_SPRG_VDSO_WRITE, r3
180 #endif /* CONFIG_PPC_BOOK3S_64 */
185 PPC_STL r14, VCPU_GPR(R14)(r7)
186 PPC_STL r15, VCPU_GPR(R15)(r7)
187 PPC_STL r16, VCPU_GPR(R16)(r7)
188 PPC_STL r17, VCPU_GPR(R17)(r7)
189 PPC_STL r18, VCPU_GPR(R18)(r7)
190 PPC_STL r19, VCPU_GPR(R19)(r7)
191 PPC_STL r20, VCPU_GPR(R20)(r7)
192 PPC_STL r21, VCPU_GPR(R21)(r7)
193 PPC_STL r22, VCPU_GPR(R22)(r7)
194 PPC_STL r23, VCPU_GPR(R23)(r7)
195 PPC_STL r24, VCPU_GPR(R24)(r7)
196 PPC_STL r25, VCPU_GPR(R25)(r7)
197 PPC_STL r26, VCPU_GPR(R26)(r7)
198 PPC_STL r27, VCPU_GPR(R27)(r7)
199 PPC_STL r28, VCPU_GPR(R28)(r7)
200 PPC_STL r29, VCPU_GPR(R29)(r7)
201 PPC_STL r30, VCPU_GPR(R30)(r7)
202 PPC_STL r31, VCPU_GPR(R31)(r7)
204 /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
205 lwz r5, VCPU_TRAP(r7)
207 /* Restore r3 (kvm_run) and r4 (vcpu) */
209 bl FUNC(kvmppc_handle_exit_pr)
211 /* If RESUME_GUEST, get back in the loop */
212 cmpwi r3, RESUME_GUEST
213 beq kvm_loop_lightweight
215 cmpwi r3, RESUME_GUEST_NV
216 beq kvm_loop_heavyweight
226 /* Restore non-volatile host registers (r14 - r31) */
229 addi r1, r1, SWITCH_FRAME_SIZE
232 kvm_loop_heavyweight:
235 PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1)
237 /* Load vcpu and cpu_run */
240 /* Load non-volatile guest state from the vcpu */
243 /* Jump back into the beginning of this function */
244 b kvm_start_lightweight
246 kvm_loop_lightweight:
248 /* We'll need the vcpu pointer */
251 /* Jump back into the beginning of this function */
252 b kvm_start_lightweight