1 /* SPDX-License-Identifier: GPL-2.0-only */
2 #include <asm/asm-offsets.h>
4 #include <asm/code-patching-asm.h>
5 #include <asm/exception-64s.h>
6 #include <asm/export.h>
7 #include <asm/kvm_asm.h>
8 #include <asm/kvm_book3s_asm.h>
10 #include <asm/ppc_asm.h>
11 #include <asm/ptrace.h>
13 #include <asm/ultravisor-api.h>
16 * These are branched to from interrupt handlers in exception-64s.S which set
17 * IKVM_REAL or IKVM_VIRT, if HSTATE_IN_GUEST was found to be non-zero.
21 * This is a hcall, so register convention is as
22 * Documentation/powerpc/papr_hcalls.rst.
24 * This may also be a syscall from PR-KVM userspace that is to be
25 * reflected to the PR guest kernel, so registers may be set up for
26 * a system call rather than hcall. We don't currently clobber
27 * anything here, but the 0xc00 handler has already clobbered CTR
28 * and CR0, so PR-KVM can not support a guest kernel that preserves
29 * those registers across its system calls.
31 * The state of registers is as kvmppc_interrupt, except CFAR is not
32 * saved, R13 is not in SCRATCH0, and R10 does not contain the trap.
35 .balign IFETCH_ALIGN_BYTES
37 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
38 lbz r10,HSTATE_IN_GUEST(r13)
39 cmpwi r10,KVM_GUEST_MODE_HV_P9
40 beq kvmppc_p9_exit_hcall
42 ld r10,PACA_EXGEN+EX_R13(r13)
45 /* Now we look like kvmppc_interrupt */
50 * KVM interrupt entry occurs after GEN_INT_ENTRY runs, and follows that
53 * guest R9-R13, CTR, CFAR, PPR saved in PACA EX_xxx save area
54 * guest (H)DAR, (H)DSISR are also in the save area for relevant interrupts
55 * guest R13 also saved in SCRATCH0
60 * PPR is set to medium
62 * With the addition for KVM:
65 .global kvmppc_interrupt
66 .balign IFETCH_ALIGN_BYTES
68 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
69 std r10,HSTATE_SCRATCH0(r13)
70 lbz r10,HSTATE_IN_GUEST(r13)
71 cmpwi r10,KVM_GUEST_MODE_HV_P9
72 beq kvmppc_p9_exit_interrupt
73 ld r10,HSTATE_SCRATCH0(r13)
85 std r12,HSTATE_CFAR(r13)
86 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
91 std r12,HSTATE_PPR(r13)
92 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
94 std r12,HSTATE_SCRATCH0(r13)
102 * Hcalls and other interrupts come here after normalising register
103 * contents and save locations:
105 * R12 = (guest CR << 32) | interrupt vector
107 * guest R12 saved in shadow HSTATE_SCRATCH0
108 * guest R13 saved in SPRN_SCRATCH0
110 std r9,HSTATE_SCRATCH2(r13)
111 lbz r9,HSTATE_IN_GUEST(r13)
112 cmpwi r9,KVM_GUEST_MODE_SKIP
115 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
116 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
117 cmpwi r9,KVM_GUEST_MODE_GUEST
118 beq kvmppc_interrupt_pr
120 b kvmppc_interrupt_hv
122 b kvmppc_interrupt_pr
126 * "Skip" interrupts are part of a trick KVM uses a with hash guests to load
127 * the faulting instruction in guest memory from the the hypervisor without
128 * walking page tables.
130 * When the guest takes a fault that requires the hypervisor to load the
131 * instruction (e.g., MMIO emulation), KVM is running in real-mode with HV=1
132 * and the guest MMU context loaded. It sets KVM_GUEST_MODE_SKIP, and sets
133 * MSR[DR]=1 while leaving MSR[IR]=0, so it continues to fetch HV instructions
134 * but loads and stores will access the guest context. This is used to load
135 * the faulting instruction using the faulting guest effective address.
137 * However the guest context may not be able to translate, or it may cause a
138 * machine check or other issue, which results in a fault in the host
139 * (even with KVM-HV).
141 * These faults come here because KVM_GUEST_MODE_SKIP was set, so if they
142 * are (or are likely) caused by that load, the instruction is skipped by
143 * just returning with the PC advanced +4, where it is noticed the load did
144 * not execute and it goes to the slow path which walks the page tables to
148 cmpwi r12,BOOK3S_INTERRUPT_MACHINE_CHECK
150 cmpwi r12,BOOK3S_INTERRUPT_DATA_STORAGE
152 cmpwi r12,BOOK3S_INTERRUPT_DATA_SEGMENT
154 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
155 /* HSRR interrupts get 2 added to interrupt number */
156 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE | 0x2
160 1: mfspr r9,SPRN_SRR0
163 ld r12,HSTATE_SCRATCH0(r13)
164 ld r9,HSTATE_SCRATCH2(r13)
167 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
168 2: mfspr r9,SPRN_HSRR0
171 ld r12,HSTATE_SCRATCH0(r13)
172 ld r9,HSTATE_SCRATCH2(r13)
177 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
179 /* Stack frame offsets for kvmppc_p9_enter_guest */
180 #define SFS (144 + STACK_FRAME_MIN_SIZE)
181 #define STACK_SLOT_NVGPRS (SFS - 144) /* 18 gprs */
184 * void kvmppc_p9_enter_guest(struct vcpu *vcpu);
186 * Enter the guest on a ISAv3.0 or later system.
188 .balign IFETCH_ALIGN_BYTES
189 _GLOBAL(kvmppc_p9_enter_guest)
190 EXPORT_SYMBOL_GPL(kvmppc_p9_enter_guest)
192 std r0,PPC_LR_STKOFF(r1)
195 std r1,HSTATE_HOST_R1(r13)
202 std reg,STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
218 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
222 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
226 ld reg,__VCPU_GPR(reg)(r3)
231 lbz r4,KVM_SECURE_GUEST(r4)
233 ld r4,VCPU_GPR(R4)(r3)
238 ld r0,VCPU_GPR(R0)(r3)
239 ld r1,VCPU_GPR(R1)(r3)
240 ld r2,VCPU_GPR(R2)(r3)
241 ld r3,VCPU_GPR(R3)(r3)
247 * Use UV_RETURN ultracall to return control back to the Ultravisor
248 * after processing an hypercall or interrupt that was forwarded
249 * (a.k.a. reflected) to the Hypervisor.
251 * All registers have already been reloaded except the ucall requires:
253 * R2 = SRR1, so UV can detect a synthesized interrupt (if any)
258 ld r1,VCPU_GPR(R1)(r3)
260 ld r0,VCPU_GPR(R3)(r3)
262 LOAD_REG_IMMEDIATE(r3, UV_RETURN)
266 * kvmppc_p9_exit_hcall and kvmppc_p9_exit_interrupt are branched to from
267 * above if the interrupt was taken for a guest that was entered via
268 * kvmppc_p9_enter_guest().
270 * The exit code recovers the host stack and vcpu pointer, saves all guest GPRs
271 * and CR, LR, XER as well as guest MSR and NIA into the VCPU, then re-
272 * establishes the host stack and registers to return from the
273 * kvmppc_p9_enter_guest() function, which saves CTR and other guest registers
274 * (SPRs and FP, VEC, etc).
276 .balign IFETCH_ALIGN_BYTES
277 kvmppc_p9_exit_hcall:
281 std r10,HSTATE_SCRATCH0(r13)
283 .balign IFETCH_ALIGN_BYTES
284 kvmppc_p9_exit_interrupt:
286 * If set to KVM_GUEST_MODE_HV_P9 but we're still in the
287 * hypervisor, that means we can't return from the entry stack.
289 rldicl. r10,r12,64-MSR_HV_LG,63
290 bne- kvmppc_p9_bad_interrupt
292 std r1,HSTATE_SCRATCH1(r13)
293 std r3,HSTATE_SCRATCH2(r13)
294 ld r1,HSTATE_HOST_R1(r13)
295 ld r3,HSTATE_KVM_VCPU(r13)
305 std reg,__VCPU_GPR(reg)(r3)
309 /* r1, r3, r9-r13 are saved to vcpu by C code */
310 std r0,VCPU_GPR(R0)(r3)
311 std r2,VCPU_GPR(R2)(r3)
314 std reg,__VCPU_GPR(reg)(r3)
327 ld reg,STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
335 * Flush the link stack here, before executing the first blr on the
336 * way out of the guest.
338 * The link stack won't match coming out of the guest anyway so the
339 * only cost is the flush itself. The call clobbers r0.
342 patch_site 1b patch__call_kvm_flush_link_stack_p9
345 ld r0,PPC_LR_STKOFF(r1)
350 * Took an interrupt somewhere right before HRFID to guest, so registers are
351 * in a bad way. Return things hopefully enough to run host virtual code and
352 * run the Linux interrupt handler (SRESET or MCE) to print something useful.
354 * We could be really clever and save all host registers in known locations
355 * before setting HSTATE_IN_GUEST, then restoring them all here, and setting
356 * return address to a fixup that sets them up again. But that's a lot of
357 * effort for a small bit of code. Lots of other things to do first.
359 kvmppc_p9_bad_interrupt:
360 BEGIN_MMU_FTR_SECTION
362 * Hash host doesn't try to recover MMU (requires host SLB reload)
365 END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_RADIX)
367 * Clean up guest registers to give host a chance to run.
373 mtspr SPRN_DAWRX0,r10
375 mtspr SPRN_DAWRX1,r10
376 END_FTR_SECTION_IFSET(CPU_FTR_DAWR1)
380 * Switch to host MMU mode
382 ld r10, HSTATE_KVM_VCPU(r13)
383 ld r10, VCPU_KVM(r10)
384 lwz r10, KVM_HOST_LPID(r10)
387 ld r10, HSTATE_KVM_VCPU(r13)
388 ld r10, VCPU_KVM(r10)
389 ld r10, KVM_HOST_LPCR(r10)
393 * Set GUEST_MODE_NONE so the handler won't branch to KVM, and clear
394 * MSR_RI in r12 ([H]SRR1) so the handler won't try to return.
396 li r10,KVM_GUEST_MODE_NONE
397 stb r10,HSTATE_IN_GUEST(r13)
402 * Go back to interrupt handler. MCE and SRESET have their specific
403 * PACA save area so they should be used directly. They set up their
404 * own stack. The other handlers all use EXGEN. They will use the
405 * guest r1 if it looks like a kernel stack, so just load the
406 * emergency stack and go to program check for all other interrupts.
408 ld r10,HSTATE_SCRATCH0(r13)
409 cmpwi r10,BOOK3S_INTERRUPT_MACHINE_CHECK
410 beq .Lcall_machine_check_common
412 cmpwi r10,BOOK3S_INTERRUPT_SYSTEM_RESET
413 beq .Lcall_system_reset_common
417 .Lcall_machine_check_common:
418 b machine_check_common
420 .Lcall_system_reset_common:
421 b system_reset_common