1 /* SPDX-License-Identifier: GPL-2.0-only */
4 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
6 * Derived from book3s_rmhandlers.S and other files, which are:
8 * Copyright SUSE Linux Products GmbH 2009
10 * Authors: Alexander Graf <agraf@suse.de>
13 #include <asm/ppc_asm.h>
14 #include <asm/code-patching-asm.h>
15 #include <asm/kvm_asm.h>
19 #include <asm/ptrace.h>
20 #include <asm/hvcall.h>
21 #include <asm/asm-offsets.h>
22 #include <asm/exception-64s.h>
23 #include <asm/kvm_book3s_asm.h>
24 #include <asm/book3s/64/mmu-hash.h>
25 #include <asm/export.h>
28 #include <asm/xive-regs.h>
29 #include <asm/thread_info.h>
30 #include <asm/asm-compat.h>
31 #include <asm/feature-fixups.h>
32 #include <asm/cpuidle.h>
33 #include <asm/ultravisor-api.h>
35 /* Sign-extend HDEC if not on POWER9 */
36 #define EXTEND_HDEC(reg) \
39 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
41 /* Values in HSTATE_NAPPING(r13) */
42 #define NAPPING_CEDE 1
43 #define NAPPING_NOVCPU 2
44 #define NAPPING_UNSPLIT 3
46 /* Stack frame offsets for kvmppc_hv_entry */
48 #define STACK_SLOT_TRAP (SFS-4)
49 #define STACK_SLOT_SHORT_PATH (SFS-8)
50 #define STACK_SLOT_TID (SFS-16)
51 #define STACK_SLOT_PSSCR (SFS-24)
52 #define STACK_SLOT_PID (SFS-32)
53 #define STACK_SLOT_IAMR (SFS-40)
54 #define STACK_SLOT_CIABR (SFS-48)
55 #define STACK_SLOT_DAWR (SFS-56)
56 #define STACK_SLOT_DAWRX (SFS-64)
57 #define STACK_SLOT_HFSCR (SFS-72)
58 #define STACK_SLOT_AMR (SFS-80)
59 #define STACK_SLOT_UAMOR (SFS-88)
60 /* the following is used by the P9 short path */
61 #define STACK_SLOT_NVGPRS (SFS-152) /* 18 gprs */
64 * Call kvmppc_hv_entry in real mode.
65 * Must be called with interrupts hard-disabled.
69 * LR = return address to continue at after eventually re-enabling MMU
71 _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
73 std r0, PPC_LR_STKOFF(r1)
76 std r10, HSTATE_HOST_MSR(r13)
77 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
82 mtmsrd r0,1 /* clear RI in MSR */
89 /* On P9, do LPCR setting, if necessary */
90 ld r3, HSTATE_SPLIT_MODE(r13)
93 lwz r4, KVM_SPLIT_DO_SET(r3)
99 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
101 ld r4, HSTATE_KVM_VCPU(r13)
104 /* Back from guest - restore host state and return to caller */
107 /* Restore host DABR and DABRX */
108 ld r5,HSTATE_DABR(r13)
112 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
115 ld r3,PACA_SPRG_VDSO(r13)
116 mtspr SPRN_SPRG_VDSO_WRITE,r3
118 /* Reload the host's PMU registers */
119 bl kvmhv_load_host_pmu
122 * Reload DEC. HDEC interrupts were disabled when
123 * we reloaded the host's LPCR value.
125 ld r3, HSTATE_DECEXP(r13)
130 /* hwthread_req may have got set by cede or no vcpu, so clear it */
132 stb r0, HSTATE_HWTHREAD_REQ(r13)
135 * For external interrupts we need to call the Linux
136 * handler to process the interrupt. We do that by jumping
137 * to absolute address 0x500 for external interrupts.
138 * The [h]rfid at the end of the handler will return to
139 * the book3s_hv_interrupts.S code. For other interrupts
140 * we do the rfid to get back to the book3s_hv_interrupts.S
143 ld r8, 112+PPC_LR_STKOFF(r1)
145 ld r7, HSTATE_HOST_MSR(r13)
147 /* Return the trap number on this thread as the return value */
151 * If we came back from the guest via a relocation-on interrupt,
152 * we will be in virtual mode at this point, which makes it a
153 * little easier to get back to the caller.
156 andi. r0, r0, MSR_IR /* in real mode? */
159 /* RFI into the highmem handler */
163 mtmsrd r6, 1 /* Clear RI in MSR */
168 /* Virtual-mode return */
173 kvmppc_primary_no_guest:
174 /* We handle this much like a ceded vcpu */
175 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
176 /* HDEC may be larger than DEC for arch >= v3.00, but since the */
177 /* HDEC value came from DEC in the first place, it will fit */
181 * Make sure the primary has finished the MMU switch.
182 * We should never get here on a secondary thread, but
183 * check it for robustness' sake.
185 ld r5, HSTATE_KVM_VCORE(r13)
186 65: lbz r0, VCORE_IN_GUEST(r5)
193 /* set our bit in napping_threads */
194 ld r5, HSTATE_KVM_VCORE(r13)
195 lbz r7, HSTATE_PTID(r13)
198 addi r6, r5, VCORE_NAPPING_THREADS
203 /* order napping_threads update vs testing entry_exit_map */
206 lwz r7, VCORE_ENTRY_EXIT(r5)
208 bge kvm_novcpu_exit /* another thread already exiting */
209 li r3, NAPPING_NOVCPU
210 stb r3, HSTATE_NAPPING(r13)
212 li r3, 0 /* Don't wake on privileged (OS) doorbell */
217 * Entered from kvm_start_guest if kvm_hstate.napping is set
223 ld r1, HSTATE_HOST_R1(r13)
224 ld r5, HSTATE_KVM_VCORE(r13)
226 stb r0, HSTATE_NAPPING(r13)
228 /* check the wake reason */
229 bl kvmppc_check_wake_reason
232 * Restore volatile registers since we could have called
233 * a C routine in kvmppc_check_wake_reason.
236 ld r5, HSTATE_KVM_VCORE(r13)
238 /* see if any other thread is already exiting */
239 lwz r0, VCORE_ENTRY_EXIT(r5)
243 /* clear our bit in napping_threads */
244 lbz r7, HSTATE_PTID(r13)
247 addi r6, r5, VCORE_NAPPING_THREADS
253 /* See if the wake reason means we need to exit */
257 /* See if our timeslice has expired (HDEC is negative) */
260 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
264 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
265 ld r4, HSTATE_KVM_VCPU(r13)
267 beq kvmppc_primary_no_guest
269 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
270 addi r3, r4, VCPU_TB_RMENTRY
271 bl kvmhv_start_timing
276 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
277 ld r4, HSTATE_KVM_VCPU(r13)
280 addi r3, r4, VCPU_TB_RMEXIT
281 bl kvmhv_accumulate_time
284 stw r12, STACK_SLOT_TRAP(r1)
285 bl kvmhv_commence_exit
287 b kvmhv_switch_to_host
290 * We come in here when wakened from Linux offline idle code.
292 * r3 contains the SRR1 wakeup value, SRR1 is trashed.
294 _GLOBAL(idle_kvm_start_guest)
297 std r5, 8(r1) // Save CR in caller's frame
298 std r0, 16(r1) // Save LR in caller's frame
299 // Create frame on emergency stack
300 ld r4, PACAEMERGSP(r13)
301 stdu r1, -SWITCH_FRAME_SIZE(r4)
302 // Switch to new frame on emergency stack
304 std r3, 32(r1) // Save SRR1 wakeup value
308 * Could avoid this and pass it through in r3. For now,
309 * code expects it to be in SRR1.
314 stb r0,PACA_FTRACE_ENABLED(r13)
316 li r0,KVM_HWTHREAD_IN_KVM
317 stb r0,HSTATE_HWTHREAD_STATE(r13)
319 /* kvm cede / napping does not come through here */
320 lbz r0,HSTATE_NAPPING(r13)
327 stb r0, HSTATE_NAPPING(r13)
332 * We weren't napping due to cede, so this must be a secondary
333 * thread being woken up to run a guest, or being woken up due
334 * to a stray IPI. (Or due to some machine check or hypervisor
335 * maintenance interrupt while the core is in KVM.)
338 /* Check the wake reason in SRR1 to see why we got here */
339 bl kvmppc_check_wake_reason
341 * kvmppc_check_wake_reason could invoke a C routine, but we
342 * have no volatile registers to restore when we return.
348 /* get vcore pointer, NULL if we have nothing to run */
349 ld r5,HSTATE_KVM_VCORE(r13)
351 /* if we have no vcore to run, go back to sleep */
354 kvm_secondary_got_guest:
356 // About to go to guest, clear saved SRR1
360 /* Set HSTATE_DSCR(r13) to something sensible */
361 ld r6, PACA_DSCR_DEFAULT(r13)
362 std r6, HSTATE_DSCR(r13)
364 /* On thread 0 of a subcore, set HDEC to max */
365 lbz r4, HSTATE_PTID(r13)
368 LOAD_REG_ADDR(r6, decrementer_max)
371 /* and set per-LPAR registers, if doing dynamic micro-threading */
372 ld r6, HSTATE_SPLIT_MODE(r13)
376 ld r0, KVM_SPLIT_RPR(r6)
378 ld r0, KVM_SPLIT_PMMAR(r6)
380 ld r0, KVM_SPLIT_LDBAR(r6)
384 /* On P9 we use the split_info for coordinating LPCR changes */
385 lwz r4, KVM_SPLIT_DO_SET(r6)
392 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
394 /* Order load of vcpu after load of vcore */
396 ld r4, HSTATE_KVM_VCPU(r13)
399 /* Back from the guest, go back to nap */
400 /* Clear our vcpu and vcore pointers so we don't come back in early */
402 std r0, HSTATE_KVM_VCPU(r13)
404 * Once we clear HSTATE_KVM_VCORE(r13), the code in
405 * kvmppc_run_core() is going to assume that all our vcpu
406 * state is visible in memory. This lwsync makes sure
410 std r0, HSTATE_KVM_VCORE(r13)
413 * All secondaries exiting guest will fall through this path.
414 * Before proceeding, just check for HMI interrupt and
415 * invoke opal hmi handler. By now we are sure that the
416 * primary thread on this core/subcore has already made partition
417 * switch/TB resync and we are good to call opal hmi handler.
419 cmpwi r12, BOOK3S_INTERRUPT_HMI
422 li r3,0 /* NULL argument */
423 bl hmi_exception_realmode
425 * At this point we have finished executing in the guest.
426 * We need to wait for hwthread_req to become zero, since
427 * we may not turn on the MMU while hwthread_req is non-zero.
428 * While waiting we also need to check if we get given a vcpu to run.
431 lbz r3, HSTATE_HWTHREAD_REQ(r13)
435 li r0, KVM_HWTHREAD_IN_KERNEL
436 stb r0, HSTATE_HWTHREAD_STATE(r13)
437 /* need to recheck hwthread_req after a barrier, to avoid race */
439 lbz r3, HSTATE_HWTHREAD_REQ(r13)
444 * Jump to idle_return_gpr_loss, which returns to the
445 * idle_kvm_start_guest caller.
449 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
451 // Return SRR1 wakeup value, or 0 if we went into the guest
454 ld r1, 0(r1) // Switch back to caller stack
455 ld r0, 16(r1) // Reload LR
456 ld r5, 8(r1) // Reload CR
462 ld r5, HSTATE_KVM_VCORE(r13)
465 ld r3, HSTATE_SPLIT_MODE(r13)
468 lwz r0, KVM_SPLIT_DO_SET(r3)
471 lwz r0, KVM_SPLIT_DO_RESTORE(r3)
474 lbz r0, KVM_SPLIT_DO_NAP(r3)
480 b kvm_secondary_got_guest
482 54: li r0, KVM_HWTHREAD_IN_KVM
483 stb r0, HSTATE_HWTHREAD_STATE(r13)
487 /* Set LPCR, LPIDR etc. on P9 */
495 bl kvmhv_p9_restore_lpcr
500 * Here the primary thread is trying to return the core to
501 * whole-core mode, so we need to nap.
505 * When secondaries are napping in kvm_unsplit_nap() with
506 * hwthread_req = 1, HMI goes ignored even though subcores are
507 * already exited the guest. Hence HMI keeps waking up secondaries
508 * from nap in a loop and secondaries always go back to nap since
509 * no vcore is assigned to them. This makes impossible for primary
510 * thread to get hold of secondary threads resulting into a soft
511 * lockup in KVM path.
513 * Let us check if HMI is pending and handle it before we go to nap.
515 cmpwi r12, BOOK3S_INTERRUPT_HMI
517 li r3, 0 /* NULL argument */
518 bl hmi_exception_realmode
521 * Ensure that secondary doesn't nap when it has
522 * its vcore pointer set.
524 sync /* matches smp_mb() before setting split_info.do_nap */
525 ld r0, HSTATE_KVM_VCORE(r13)
528 /* clear any pending message */
530 lis r6, (PPC_DBELL_SERVER << (63-36))@h
532 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
533 /* Set kvm_split_mode.napped[tid] = 1 */
534 ld r3, HSTATE_SPLIT_MODE(r13)
536 lbz r4, HSTATE_TID(r13)
537 addi r4, r4, KVM_SPLIT_NAPPED
539 /* Check the do_nap flag again after setting napped[] */
541 lbz r0, KVM_SPLIT_DO_NAP(r3)
544 li r3, NAPPING_UNSPLIT
545 stb r3, HSTATE_NAPPING(r13)
546 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
548 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
555 /******************************************************************************
559 *****************************************************************************/
561 .global kvmppc_hv_entry
566 * R4 = vcpu pointer (or NULL)
571 * all other volatile GPRS = free
572 * Does not preserve non-volatile GPRs or CR fields
575 std r0, PPC_LR_STKOFF(r1)
578 /* Save R1 in the PACA */
579 std r1, HSTATE_HOST_R1(r13)
581 li r6, KVM_GUEST_MODE_HOST_HV
582 stb r6, HSTATE_IN_GUEST(r13)
584 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
585 /* Store initial timestamp */
588 addi r3, r4, VCPU_TB_RMENTRY
589 bl kvmhv_start_timing
593 ld r5, HSTATE_KVM_VCORE(r13)
594 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
597 * POWER7/POWER8 host -> guest partition switch code.
598 * We don't have to lock against concurrent tlbies,
599 * but we do have to coordinate across hardware threads.
601 /* Set bit in entry map iff exit map is zero. */
603 lbz r6, HSTATE_PTID(r13)
605 addi r8, r5, VCORE_ENTRY_EXIT
607 cmpwi r3, 0x100 /* any threads starting to exit? */
608 bge secondary_too_late /* if so we're too late to the party */
613 /* Primary thread switches to guest partition. */
620 li r0,LPID_RSVD /* switch to reserved LPID */
623 mtspr SPRN_SDR1,r6 /* switch to partition page table */
624 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
628 /* See if we need to flush the TLB. */
629 mr r3, r9 /* kvm pointer */
630 lhz r4, PACAPACAINDEX(r13) /* physical cpu number */
631 li r5, 0 /* nested vcpu pointer */
632 bl kvmppc_check_need_tlb_flush
634 ld r5, HSTATE_KVM_VCORE(r13)
636 /* Add timebase offset onto timebase */
637 22: ld r8,VCORE_TB_OFFSET(r5)
640 std r8, VCORE_TB_OFFSET_APPL(r5)
641 mftb r6 /* current host timebase */
643 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
644 mftb r7 /* check if lower 24 bits overflowed */
649 addis r8,r8,0x100 /* if so, increment upper 40 bits */
652 /* Load guest PCR value to select appropriate compat mode */
653 37: ld r7, VCORE_PCR(r5)
654 LOAD_REG_IMMEDIATE(r6, PCR_MASK)
662 /* DPDES and VTB are shared between threads */
663 ld r8, VCORE_DPDES(r5)
667 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
669 /* Mark the subcore state as inside guest */
670 bl kvmppc_subcore_enter_guest
672 ld r5, HSTATE_KVM_VCORE(r13)
673 ld r4, HSTATE_KVM_VCPU(r13)
675 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
677 /* Do we have a guest vcpu to run? */
679 beq kvmppc_primary_no_guest
681 /* Increment yield count if they have a VPA */
685 li r6, LPPACA_YIELDCOUNT
690 stb r6, VCPU_VPA_DIRTY(r4)
693 /* Save purr/spurr */
696 std r5,HSTATE_PURR(r13)
697 std r6,HSTATE_SPURR(r13)
703 /* Save host values of some registers */
708 std r5, STACK_SLOT_TID(r1)
709 std r6, STACK_SLOT_PSSCR(r1)
710 std r7, STACK_SLOT_PID(r1)
712 std r5, STACK_SLOT_HFSCR(r1)
713 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
719 std r5, STACK_SLOT_CIABR(r1)
720 std r6, STACK_SLOT_DAWR(r1)
721 std r7, STACK_SLOT_DAWRX(r1)
722 std r8, STACK_SLOT_IAMR(r1)
723 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
726 std r5, STACK_SLOT_AMR(r1)
728 std r6, STACK_SLOT_UAMOR(r1)
731 /* Set partition DABR */
732 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
733 lwz r5,VCPU_DABRX(r4)
738 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
740 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
742 * Branch around the call if both CPU_FTR_TM and
743 * CPU_FTR_P9_TM_HV_ASSIST are off.
747 END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
749 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
753 li r5, 0 /* don't preserve non-vol regs */
754 bl kvmppc_restore_tm_hv
756 ld r4, HSTATE_KVM_VCPU(r13)
760 /* Load guest PMU registers; r4 = vcpu pointer here */
762 bl kvmhv_load_guest_pmu
764 /* Load up FP, VMX and VSX registers */
765 ld r4, HSTATE_KVM_VCPU(r13)
768 ld r14, VCPU_GPR(R14)(r4)
769 ld r15, VCPU_GPR(R15)(r4)
770 ld r16, VCPU_GPR(R16)(r4)
771 ld r17, VCPU_GPR(R17)(r4)
772 ld r18, VCPU_GPR(R18)(r4)
773 ld r19, VCPU_GPR(R19)(r4)
774 ld r20, VCPU_GPR(R20)(r4)
775 ld r21, VCPU_GPR(R21)(r4)
776 ld r22, VCPU_GPR(R22)(r4)
777 ld r23, VCPU_GPR(R23)(r4)
778 ld r24, VCPU_GPR(R24)(r4)
779 ld r25, VCPU_GPR(R25)(r4)
780 ld r26, VCPU_GPR(R26)(r4)
781 ld r27, VCPU_GPR(R27)(r4)
782 ld r28, VCPU_GPR(R28)(r4)
783 ld r29, VCPU_GPR(R29)(r4)
784 ld r30, VCPU_GPR(R30)(r4)
785 ld r31, VCPU_GPR(R31)(r4)
787 /* Switch DSCR to guest value */
792 /* Skip next section on POWER7 */
794 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
795 /* Load up POWER8-specific registers */
797 lwz r6, VCPU_PSPB(r4)
803 * Handle broken DAWR case by not writing it. This means we
804 * can still store the DAWR register for migration.
806 LOAD_REG_ADDR(r5, dawr_force_enable)
811 ld r6, VCPU_DAWRX(r4)
815 ld r7, VCPU_CIABR(r4)
820 ld r8, VCPU_EBBHR(r4)
823 ld r5, VCPU_EBBRR(r4)
824 ld r6, VCPU_BESCR(r4)
825 lwz r7, VCPU_GUEST_PID(r4)
832 /* POWER8-only registers */
833 ld r5, VCPU_TCSCR(r4)
835 ld r7, VCPU_CSIGR(r4)
843 /* POWER9-only registers */
845 ld r6, VCPU_PSSCR(r4)
846 lbz r8, HSTATE_FAKE_SUSPEND(r13)
847 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
848 rldimi r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG
849 ld r7, VCPU_HFSCR(r4)
853 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
856 ld r5, VCPU_SPRG0(r4)
857 ld r6, VCPU_SPRG1(r4)
858 ld r7, VCPU_SPRG2(r4)
859 ld r8, VCPU_SPRG3(r4)
865 /* Load up DAR and DSISR */
867 lwz r6, VCPU_DSISR(r4)
871 /* Restore AMR and UAMOR, set AMOR to all 1s */
879 /* Restore state of CTRL run bit; assume 1 on entry */
887 /* Secondary threads wait for primary to have done partition switch */
888 ld r5, HSTATE_KVM_VCORE(r13)
889 lbz r6, HSTATE_PTID(r13)
892 lbz r0, VCORE_IN_GUEST(r5)
896 20: lwz r3, VCORE_ENTRY_EXIT(r5)
899 lbz r0, VCORE_IN_GUEST(r5)
910 * Set the decrementer to the guest decrementer.
912 ld r8,VCPU_DEC_EXPIRES(r4)
913 /* r8 is a host timebase value here, convert to guest TB */
914 ld r5,HSTATE_KVM_VCORE(r13)
915 ld r6,VCORE_TB_OFFSET_APPL(r5)
921 /* Check if HDEC expires soon */
924 cmpdi r3, 512 /* 1 microsecond */
927 /* For hash guest, clear out and reload the SLB */
929 lbz r0, KVM_RADIX(r6)
937 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
938 lwz r5,VCPU_SLB_MAX(r4)
943 1: ld r8,VCPU_SLB_E(r6)
946 addi r6,r6,VCPU_SLB_SIZE
950 #ifdef CONFIG_KVM_XICS
951 /* We are entering the guest on that thread, push VCPU to XIVE */
952 ld r11, VCPU_XIVE_SAVED_STATE(r4)
954 lwz r8, VCPU_XIVE_CAM_WORD(r4)
957 li r7, TM_QW1_OS + TM_WORD2
959 andi. r0, r0, MSR_DR /* in real mode? */
961 ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
968 2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
975 stb r9, VCPU_XIVE_PUSHED(r4)
979 * We clear the irq_pending flag. There is a small chance of a
980 * race vs. the escalation interrupt happening on another
981 * processor setting it again, but the only consequence is to
982 * cause a spurrious wakeup on the next H_CEDE which is not an
986 stb r0, VCPU_IRQ_PENDING(r4)
989 * In single escalation mode, if the escalation interrupt is
992 lbz r0, VCPU_XIVE_ESC_ON(r4)
995 li r9, XIVE_ESB_SET_PQ_01
996 beq 4f /* in real mode? */
997 ld r10, VCPU_XIVE_ESC_VADDR(r4)
1000 4: ld r10, VCPU_XIVE_ESC_RADDR(r4)
1004 /* We have a possible subtle race here: The escalation interrupt might
1005 * have fired and be on its way to the host queue while we mask it,
1006 * and if we unmask it early enough (re-cede right away), there is
1007 * a theorical possibility that it fires again, thus landing in the
1008 * target queue more than once which is a big no-no.
1010 * Fortunately, solving this is rather easy. If the above load setting
1011 * PQ to 01 returns a previous value where P is set, then we know the
1012 * escalation interrupt is somewhere on its way to the host. In that
1013 * case we simply don't clear the xive_esc_on flag below. It will be
1014 * eventually cleared by the handler for the escalation interrupt.
1016 * Then, when doing a cede, we check that flag again before re-enabling
1017 * the escalation interrupt, and if set, we abort the cede.
1019 andi. r0, r0, XIVE_ESB_VAL_P
1022 /* Now P is 0, we can clear the flag */
1024 stb r0, VCPU_XIVE_ESC_ON(r4)
1027 #endif /* CONFIG_KVM_XICS */
1030 stw r0, STACK_SLOT_SHORT_PATH(r1)
1032 deliver_guest_interrupt: /* r4 = vcpu, r13 = paca */
1033 /* Check if we can deliver an external or decrementer interrupt now */
1034 ld r0, VCPU_PENDING_EXC(r4)
1036 /* On POWER9, also check for emulated doorbell interrupt */
1037 lbz r3, VCPU_DBELL_REQ(r4)
1039 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1043 bl kvmppc_guest_entry_inject_int
1044 ld r4, HSTATE_KVM_VCPU(r13)
1046 ld r6, VCPU_SRR0(r4)
1047 ld r7, VCPU_SRR1(r4)
1053 ld r11, VCPU_MSR(r4)
1054 /* r11 = vcpu->arch.msr & ~MSR_HV */
1055 rldicl r11, r11, 63 - MSR_HV_LG, 1
1056 rotldi r11, r11, 1 + MSR_HV_LG
1057 ori r11, r11, MSR_ME
1067 * R10: value for HSRR0
1068 * R11: value for HSRR1
1073 stb r0,VCPU_CEDED(r4) /* cancel cede */
1074 mtspr SPRN_HSRR0,r10
1075 mtspr SPRN_HSRR1,r11
1077 /* Activate guest mode, so faults get handled by KVM */
1078 li r9, KVM_GUEST_MODE_GUEST_HV
1079 stb r9, HSTATE_IN_GUEST(r13)
1081 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1082 /* Accumulate timing */
1083 addi r3, r4, VCPU_TB_GUEST
1084 bl kvmhv_accumulate_time
1090 ld r5, VCPU_CFAR(r4)
1092 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1095 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1100 ld r1, VCPU_GPR(R1)(r4)
1101 ld r5, VCPU_GPR(R5)(r4)
1102 ld r8, VCPU_GPR(R8)(r4)
1103 ld r9, VCPU_GPR(R9)(r4)
1104 ld r10, VCPU_GPR(R10)(r4)
1105 ld r11, VCPU_GPR(R11)(r4)
1106 ld r12, VCPU_GPR(R12)(r4)
1107 ld r13, VCPU_GPR(R13)(r4)
1111 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1113 /* Move canary into DSISR to check for later */
1116 mtspr SPRN_HDSISR, r0
1117 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1120 lbz r7, KVM_SECURE_GUEST(r6)
1122 ld r6, VCPU_GPR(R6)(r4)
1123 ld r7, VCPU_GPR(R7)(r4)
1129 ld r0, VCPU_GPR(R0)(r4)
1130 ld r2, VCPU_GPR(R2)(r4)
1131 ld r3, VCPU_GPR(R3)(r4)
1132 ld r4, VCPU_GPR(R4)(r4)
1136 * Use UV_RETURN ultracall to return control back to the Ultravisor after
1137 * processing an hypercall or interrupt that was forwarded (a.k.a. reflected)
1138 * to the Hypervisor.
1140 * All registers have already been loaded, except:
1142 * R2 = SRR1, so UV can detect a synthesized interrupt (if any)
1149 ld r0, VCPU_GPR(R3)(r4)
1152 ori r3, r3, UV_RETURN
1153 ld r4, VCPU_GPR(R4)(r4)
1157 * Enter the guest on a P9 or later system where we have exactly
1158 * one vcpu per vcore and we don't need to go to real mode
1159 * (which implies that host and guest are both using radix MMU mode).
1161 * Most SPRs and all the VSRs have been loaded already.
1163 _GLOBAL(__kvmhv_vcpu_entry_p9)
1164 EXPORT_SYMBOL_GPL(__kvmhv_vcpu_entry_p9)
1166 std r0, PPC_LR_STKOFF(r1)
1170 stw r0, STACK_SLOT_SHORT_PATH(r1)
1172 std r3, HSTATE_KVM_VCPU(r13)
1176 std r1, HSTATE_HOST_R1(r13)
1180 std reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
1186 ld reg, __VCPU_GPR(reg)(r3)
1191 std r10, HSTATE_HOST_MSR(r13)
1194 b fast_guest_entry_c
1195 guest_exit_short_path:
1197 li r0, KVM_GUEST_MODE_NONE
1198 stb r0, HSTATE_IN_GUEST(r13)
1202 std reg, __VCPU_GPR(reg)(r9)
1208 ld reg, STACK_SLOT_NVGPRS + ((reg - 14) * 8)(r1)
1215 mr r3, r12 /* trap number */
1218 ld r0, PPC_LR_STKOFF(r1)
1221 /* If we are in real mode, do a rfid to get back to the caller */
1223 andi. r5, r4, MSR_IR
1225 rldicl r5, r4, 64 - MSR_TS_S_LG, 62 /* extract TS field */
1227 ld r10, HSTATE_HOST_MSR(r13)
1228 rldimi r10, r5, MSR_TS_S_LG, 63 - MSR_TS_T_LG
1229 mtspr SPRN_SRR1, r10
1235 stw r12, STACK_SLOT_TRAP(r1)
1238 stw r12, VCPU_TRAP(r4)
1239 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1240 addi r3, r4, VCPU_TB_RMEXIT
1241 bl kvmhv_accumulate_time
1243 11: b kvmhv_switch_to_host
1250 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
1251 12: stw r12, VCPU_TRAP(r4)
1253 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1254 addi r3, r4, VCPU_TB_RMEXIT
1255 bl kvmhv_accumulate_time
1259 /******************************************************************************
1263 *****************************************************************************/
1266 * We come here from the first-level interrupt handlers.
1268 .globl kvmppc_interrupt_hv
1269 kvmppc_interrupt_hv:
1271 * Register contents:
1272 * R12 = (guest CR << 32) | interrupt vector
1274 * guest R12 saved in shadow VCPU SCRATCH0
1275 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE
1276 * guest R13 saved in SPRN_SCRATCH0
1278 std r9, HSTATE_SCRATCH2(r13)
1279 lbz r9, HSTATE_IN_GUEST(r13)
1280 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1281 beq kvmppc_bad_host_intr
1282 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1283 cmpwi r9, KVM_GUEST_MODE_GUEST
1284 ld r9, HSTATE_SCRATCH2(r13)
1285 beq kvmppc_interrupt_pr
1287 /* We're now back in the host but in guest MMU context */
1288 li r9, KVM_GUEST_MODE_HOST_HV
1289 stb r9, HSTATE_IN_GUEST(r13)
1291 ld r9, HSTATE_KVM_VCPU(r13)
1293 /* Save registers */
1295 std r0, VCPU_GPR(R0)(r9)
1296 std r1, VCPU_GPR(R1)(r9)
1297 std r2, VCPU_GPR(R2)(r9)
1298 std r3, VCPU_GPR(R3)(r9)
1299 std r4, VCPU_GPR(R4)(r9)
1300 std r5, VCPU_GPR(R5)(r9)
1301 std r6, VCPU_GPR(R6)(r9)
1302 std r7, VCPU_GPR(R7)(r9)
1303 std r8, VCPU_GPR(R8)(r9)
1304 ld r0, HSTATE_SCRATCH2(r13)
1305 std r0, VCPU_GPR(R9)(r9)
1306 std r10, VCPU_GPR(R10)(r9)
1307 std r11, VCPU_GPR(R11)(r9)
1308 ld r3, HSTATE_SCRATCH0(r13)
1309 std r3, VCPU_GPR(R12)(r9)
1310 /* CR is in the high half of r12 */
1314 ld r3, HSTATE_CFAR(r13)
1315 std r3, VCPU_CFAR(r9)
1316 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1318 ld r4, HSTATE_PPR(r13)
1319 std r4, VCPU_PPR(r9)
1320 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1322 /* Restore R1/R2 so we can handle faults */
1323 ld r1, HSTATE_HOST_R1(r13)
1326 mfspr r10, SPRN_SRR0
1327 mfspr r11, SPRN_SRR1
1328 std r10, VCPU_SRR0(r9)
1329 std r11, VCPU_SRR1(r9)
1330 /* trap is in the low half of r12, clear CR from the high half */
1332 andi. r0, r12, 2 /* need to read HSRR0/1? */
1334 mfspr r10, SPRN_HSRR0
1335 mfspr r11, SPRN_HSRR1
1337 1: std r10, VCPU_PC(r9)
1338 std r11, VCPU_MSR(r9)
1342 std r3, VCPU_GPR(R13)(r9)
1345 stw r12,VCPU_TRAP(r9)
1348 * Now that we have saved away SRR0/1 and HSRR0/1,
1349 * interrupts are recoverable in principle, so set MSR_RI.
1350 * This becomes important for relocation-on interrupts from
1351 * the guest, which we can get in radix mode on POWER9.
1356 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1357 addi r3, r9, VCPU_TB_RMINTR
1359 bl kvmhv_accumulate_time
1360 ld r5, VCPU_GPR(R5)(r9)
1361 ld r6, VCPU_GPR(R6)(r9)
1362 ld r7, VCPU_GPR(R7)(r9)
1363 ld r8, VCPU_GPR(R8)(r9)
1366 /* Save HEIR (HV emulation assist reg) in emul_inst
1367 if this is an HEI (HV emulation interrupt, e40) */
1368 li r3,KVM_INST_FETCH_FAILED
1369 stw r3,VCPU_LAST_INST(r9)
1370 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1373 11: stw r3,VCPU_HEIR(r9)
1375 /* these are volatile across C function calls */
1376 #ifdef CONFIG_RELOCATABLE
1377 ld r3, HSTATE_SCRATCH1(r13)
1383 std r3, VCPU_CTR(r9)
1384 std r4, VCPU_XER(r9)
1386 /* Save more register state */
1389 std r3, VCPU_DAR(r9)
1390 stw r4, VCPU_DSISR(r9)
1392 /* If this is a page table miss then see if it's theirs or ours */
1393 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1395 std r3, VCPU_FAULT_DAR(r9)
1396 stw r4, VCPU_FAULT_DSISR(r9)
1397 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1400 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1401 /* For softpatch interrupt, go off and do TM instruction emulation */
1402 cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
1406 /* See if this is a leftover HDEC interrupt */
1407 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1413 bge fast_guest_return
1415 /* See if this is an hcall we can handle in real mode */
1416 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1417 beq hcall_try_real_mode
1419 /* Hypervisor doorbell - exit only if host IPI flag set */
1420 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1425 /* always exit if we're running a nested guest */
1426 ld r0, VCPU_NESTED(r9)
1429 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1430 lbz r0, HSTATE_HOST_IPI(r13)
1432 beq maybe_reenter_guest
1435 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */
1436 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL
1438 mfspr r3, SPRN_HFSCR
1439 std r3, VCPU_HFSCR(r9)
1442 /* External interrupt ? */
1443 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1444 beq kvmppc_guest_external
1445 /* See if it is a machine check */
1446 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1447 beq machine_check_realmode
1448 /* Or a hypervisor maintenance interrupt */
1449 cmpwi r12, BOOK3S_INTERRUPT_HMI
1452 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1454 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1455 addi r3, r9, VCPU_TB_RMEXIT
1457 bl kvmhv_accumulate_time
1459 #ifdef CONFIG_KVM_XICS
1460 /* We are exiting, pull the VP from the XIVE */
1461 lbz r0, VCPU_XIVE_PUSHED(r9)
1464 li r7, TM_SPC_PULL_OS_CTX
1467 andi. r0, r0, MSR_DR /* in real mode? */
1469 ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
1472 /* First load to pull the context, we ignore the value */
1475 /* Second load to recover the context state (Words 0 and 1) */
1478 2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
1481 /* First load to pull the context, we ignore the value */
1484 /* Second load to recover the context state (Words 0 and 1) */
1486 3: std r11, VCPU_XIVE_SAVED_STATE(r9)
1487 /* Fixup some of the state for the next load */
1490 stb r10, VCPU_XIVE_PUSHED(r9)
1491 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
1492 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
1495 #endif /* CONFIG_KVM_XICS */
1498 * Possibly flush the link stack here, before we do a blr in
1499 * guest_exit_short_path.
1502 patch_site 1b patch__call_kvm_flush_link_stack
1504 /* If we came in through the P9 short path, go back out to C now */
1505 lwz r0, STACK_SLOT_SHORT_PATH(r1)
1507 bne guest_exit_short_path
1509 /* For hash guest, read the guest SLB and save it away */
1511 lbz r0, KVM_RADIX(r5)
1514 bne 3f /* for radix, save 0 entries */
1515 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1520 andis. r0,r8,SLB_ESID_V@h
1522 add r8,r8,r6 /* put index in */
1524 std r8,VCPU_SLB_E(r7)
1525 std r3,VCPU_SLB_V(r7)
1526 addi r7,r7,VCPU_SLB_SIZE
1530 /* Finally clear out the SLB */
1535 3: stw r5,VCPU_SLB_MAX(r9)
1537 /* load host SLB entries */
1538 BEGIN_MMU_FTR_SECTION
1540 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1541 ld r8,PACA_SLBSHADOWPTR(r13)
1543 .rept SLB_NUM_BOLTED
1544 li r3, SLBSHADOW_SAVEAREA
1548 andis. r7,r5,SLB_ESID_V@h
1556 stw r12, STACK_SLOT_TRAP(r1)
1559 /* Do this before kvmhv_commence_exit so we know TB is guest TB */
1560 ld r3, HSTATE_KVM_VCORE(r13)
1563 /* On P9, if the guest has large decr enabled, don't sign extend */
1565 ld r4, VCORE_LPCR(r3)
1566 andis. r4, r4, LPCR_LD@h
1568 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1571 /* r5 is a guest timebase value here, convert to host TB */
1572 ld r4,VCORE_TB_OFFSET_APPL(r3)
1574 std r5,VCPU_DEC_EXPIRES(r9)
1576 /* Increment exit count, poke other threads to exit */
1578 bl kvmhv_commence_exit
1580 ld r9, HSTATE_KVM_VCPU(r13)
1582 /* Stop others sending VCPU interrupts to this physical CPU */
1584 stw r0, VCPU_CPU(r9)
1585 stw r0, VCPU_THREAD_CPU(r9)
1587 /* Save guest CTRL register, set runlatch to 1 */
1589 stw r6,VCPU_CTRL(r9)
1596 * Save the guest PURR/SPURR
1601 ld r8,VCPU_SPURR(r9)
1602 std r5,VCPU_PURR(r9)
1603 std r6,VCPU_SPURR(r9)
1608 * Restore host PURR/SPURR and add guest times
1609 * so that the time in the guest gets accounted.
1611 ld r3,HSTATE_PURR(r13)
1612 ld r4,HSTATE_SPURR(r13)
1620 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1621 /* Save POWER8-specific registers */
1625 std r5, VCPU_IAMR(r9)
1626 stw r6, VCPU_PSPB(r9)
1627 std r7, VCPU_FSCR(r9)
1631 std r7, VCPU_TAR(r9)
1632 mfspr r8, SPRN_EBBHR
1633 std r8, VCPU_EBBHR(r9)
1634 mfspr r5, SPRN_EBBRR
1635 mfspr r6, SPRN_BESCR
1638 std r5, VCPU_EBBRR(r9)
1639 std r6, VCPU_BESCR(r9)
1640 stw r7, VCPU_GUEST_PID(r9)
1641 std r8, VCPU_WORT(r9)
1643 mfspr r5, SPRN_TCSCR
1645 mfspr r7, SPRN_CSIGR
1647 std r5, VCPU_TCSCR(r9)
1648 std r6, VCPU_ACOP(r9)
1649 std r7, VCPU_CSIGR(r9)
1650 std r8, VCPU_TACR(r9)
1653 mfspr r6, SPRN_PSSCR
1654 std r5, VCPU_TID(r9)
1655 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
1657 std r6, VCPU_PSSCR(r9)
1658 /* Restore host HFSCR value */
1659 ld r7, STACK_SLOT_HFSCR(r1)
1660 mtspr SPRN_HFSCR, r7
1661 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
1663 * Restore various registers to 0, where non-zero values
1664 * set by the guest could disrupt the host.
1670 mtspr SPRN_TCSCR, r0
1671 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1674 mtspr SPRN_MMCRS, r0
1675 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1677 /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */
1678 ld r8, STACK_SLOT_IAMR(r1)
1681 8: /* Power7 jumps back in here */
1685 std r6,VCPU_UAMOR(r9)
1686 ld r5,STACK_SLOT_AMR(r1)
1687 ld r6,STACK_SLOT_UAMOR(r1)
1689 mtspr SPRN_UAMOR, r6
1691 /* Switch DSCR back to host value */
1693 ld r7, HSTATE_DSCR(r13)
1694 std r8, VCPU_DSCR(r9)
1697 /* Save non-volatile GPRs */
1698 std r14, VCPU_GPR(R14)(r9)
1699 std r15, VCPU_GPR(R15)(r9)
1700 std r16, VCPU_GPR(R16)(r9)
1701 std r17, VCPU_GPR(R17)(r9)
1702 std r18, VCPU_GPR(R18)(r9)
1703 std r19, VCPU_GPR(R19)(r9)
1704 std r20, VCPU_GPR(R20)(r9)
1705 std r21, VCPU_GPR(R21)(r9)
1706 std r22, VCPU_GPR(R22)(r9)
1707 std r23, VCPU_GPR(R23)(r9)
1708 std r24, VCPU_GPR(R24)(r9)
1709 std r25, VCPU_GPR(R25)(r9)
1710 std r26, VCPU_GPR(R26)(r9)
1711 std r27, VCPU_GPR(R27)(r9)
1712 std r28, VCPU_GPR(R28)(r9)
1713 std r29, VCPU_GPR(R29)(r9)
1714 std r30, VCPU_GPR(R30)(r9)
1715 std r31, VCPU_GPR(R31)(r9)
1718 mfspr r3, SPRN_SPRG0
1719 mfspr r4, SPRN_SPRG1
1720 mfspr r5, SPRN_SPRG2
1721 mfspr r6, SPRN_SPRG3
1722 std r3, VCPU_SPRG0(r9)
1723 std r4, VCPU_SPRG1(r9)
1724 std r5, VCPU_SPRG2(r9)
1725 std r6, VCPU_SPRG3(r9)
1731 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1733 * Branch around the call if both CPU_FTR_TM and
1734 * CPU_FTR_P9_TM_HV_ASSIST are off.
1738 END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
1740 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
1744 li r5, 0 /* don't preserve non-vol regs */
1745 bl kvmppc_save_tm_hv
1747 ld r9, HSTATE_KVM_VCPU(r13)
1751 /* Increment yield count if they have a VPA */
1752 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1755 li r4, LPPACA_YIELDCOUNT
1760 stb r3, VCPU_VPA_DIRTY(r9)
1762 /* Save PMU registers if requested */
1763 /* r8 and cr0.eq are live here */
1766 beq 21f /* if no VPA, save PMU stuff anyway */
1767 lbz r4, LPPACA_PMCINUSE(r8)
1768 21: bl kvmhv_save_guest_pmu
1769 ld r9, HSTATE_KVM_VCPU(r13)
1771 /* Restore host values of some registers */
1773 ld r5, STACK_SLOT_CIABR(r1)
1774 ld r6, STACK_SLOT_DAWR(r1)
1775 ld r7, STACK_SLOT_DAWRX(r1)
1776 mtspr SPRN_CIABR, r5
1778 * If the DAWR doesn't work, it's ok to write these here as
1779 * this value should always be zero
1782 mtspr SPRN_DAWRX, r7
1783 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1785 ld r5, STACK_SLOT_TID(r1)
1786 ld r6, STACK_SLOT_PSSCR(r1)
1787 ld r7, STACK_SLOT_PID(r1)
1789 mtspr SPRN_PSSCR, r6
1791 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1793 #ifdef CONFIG_PPC_RADIX_MMU
1795 * Are we running hash or radix ?
1798 lbz r0, KVM_RADIX(r5)
1803 * Radix: do eieio; tlbsync; ptesync sequence in case we
1804 * interrupted the guest between a tlbie and a ptesync.
1810 /* Radix: Handle the case where the guest used an illegal PID */
1811 LOAD_REG_ADDR(r4, mmu_base_pid)
1812 lwz r3, VCPU_GUEST_PID(r9)
1818 * Illegal PID, the HW might have prefetched and cached in the TLB
1819 * some translations for the LPID 0 / guest PID combination which
1820 * Linux doesn't know about, so we need to flush that PID out of
1821 * the TLB. First we need to set LPIDR to 0 so tlbiel applies to
1822 * the right context.
1828 /* Then do a congruence class local flush */
1830 lwz r0,KVM_TLB_SETS(r6)
1832 li r7,0x400 /* IS field = 0b01 */
1834 sldi r0,r3,32 /* RS has PID */
1835 1: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */
1841 #endif /* CONFIG_PPC_RADIX_MMU */
1844 * POWER7/POWER8 guest -> host partition switch code.
1845 * We don't have to lock against tlbies but we do
1846 * have to coordinate the hardware threads.
1847 * Here STACK_SLOT_TRAP(r1) contains the trap number.
1849 kvmhv_switch_to_host:
1850 /* Secondary threads wait for primary to do partition switch */
1851 ld r5,HSTATE_KVM_VCORE(r13)
1852 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1853 lbz r3,HSTATE_PTID(r13)
1857 13: lbz r3,VCORE_IN_GUEST(r5)
1863 /* Primary thread waits for all the secondaries to exit guest */
1864 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1865 rlwinm r0,r3,32-8,0xff
1871 /* Did we actually switch to the guest at all? */
1872 lbz r6, VCORE_IN_GUEST(r5)
1876 /* Primary thread switches back to host partition */
1877 lwz r7,KVM_HOST_LPID(r4)
1879 ld r6,KVM_HOST_SDR1(r4)
1880 li r8,LPID_RSVD /* switch to reserved LPID */
1883 mtspr SPRN_SDR1,r6 /* switch to host page table */
1884 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1889 /* DPDES and VTB are shared between threads */
1890 mfspr r7, SPRN_DPDES
1892 std r7, VCORE_DPDES(r5)
1893 std r8, VCORE_VTB(r5)
1894 /* clear DPDES so we don't get guest doorbells in the host */
1896 mtspr SPRN_DPDES, r8
1897 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1899 /* Subtract timebase offset from timebase */
1900 ld r8, VCORE_TB_OFFSET_APPL(r5)
1904 std r0, VCORE_TB_OFFSET_APPL(r5)
1905 mftb r6 /* current guest timebase */
1907 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1908 mftb r7 /* check if lower 24 bits overflowed */
1913 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1918 * If this is an HMI, we called kvmppc_realmode_hmi_handler
1919 * above, which may or may not have already called
1920 * kvmppc_subcore_exit_guest. Fortunately, all that
1921 * kvmppc_subcore_exit_guest does is clear a flag, so calling
1922 * it again here is benign even if kvmppc_realmode_hmi_handler
1923 * has already called it.
1925 bl kvmppc_subcore_exit_guest
1927 30: ld r5,HSTATE_KVM_VCORE(r13)
1928 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1931 ld r0, VCORE_PCR(r5)
1932 LOAD_REG_IMMEDIATE(r6, PCR_MASK)
1937 /* Signal secondary CPUs to continue */
1939 stb r0,VCORE_IN_GUEST(r5)
1940 19: lis r8,0x7fff /* MAX_INT@h */
1945 /* On POWER9 with HPT-on-radix we need to wait for all other threads */
1946 ld r3, HSTATE_SPLIT_MODE(r13)
1949 lwz r8, KVM_SPLIT_DO_RESTORE(r3)
1952 bl kvmhv_p9_restore_lpcr
1956 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1957 ld r8,KVM_HOST_LPCR(r4)
1961 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1962 /* Finish timing, if we have a vcpu */
1963 ld r4, HSTATE_KVM_VCPU(r13)
1967 bl kvmhv_accumulate_time
1970 /* Unset guest mode */
1971 li r0, KVM_GUEST_MODE_NONE
1972 stb r0, HSTATE_IN_GUEST(r13)
1974 lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */
1975 ld r0, SFS+PPC_LR_STKOFF(r1)
1981 .global kvm_flush_link_stack
1982 kvm_flush_link_stack:
1983 /* Save LR into r0 */
1986 /* Flush the link stack. On Power8 it's up to 32 entries in size. */
1991 /* And on Power9 it's up to 64. */
1996 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2002 kvmppc_guest_external:
2003 /* External interrupt, first check for host_ipi. If this is
2004 * set, we know the host wants us out so let's do it now
2009 * Restore the active volatile registers after returning from
2012 ld r9, HSTATE_KVM_VCPU(r13)
2013 li r12, BOOK3S_INTERRUPT_EXTERNAL
2016 * kvmppc_read_intr return codes:
2018 * Exit to host (r3 > 0)
2019 * 1 An interrupt is pending that needs to be handled by the host
2020 * Exit guest and return to host by branching to guest_exit_cont
2022 * 2 Passthrough that needs completion in the host
2023 * Exit guest and return to host by branching to guest_exit_cont
2024 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
2025 * to indicate to the host to complete handling the interrupt
2027 * Before returning to guest, we check if any CPU is heading out
2028 * to the host and if so, we head out also. If no CPUs are heading
2029 * check return values <= 0.
2031 * Return to guest (r3 <= 0)
2032 * 0 No external interrupt is pending
2033 * -1 A guest wakeup IPI (which has now been cleared)
2034 * In either case, we return to guest to deliver any pending
2037 * -2 A PCI passthrough external interrupt was handled
2038 * (interrupt was delivered directly to guest)
2039 * Return to guest to deliver any pending guest interrupts.
2045 /* Return code = 2 */
2046 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
2047 stw r12, VCPU_TRAP(r9)
2050 1: /* Return code <= 1 */
2054 /* Return code <= 0 */
2055 maybe_reenter_guest:
2056 ld r5, HSTATE_KVM_VCORE(r13)
2057 lwz r0, VCORE_ENTRY_EXIT(r5)
2060 blt deliver_guest_interrupt
2063 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2065 * Softpatch interrupt for transactional memory emulation cases
2066 * on POWER9 DD2.2. This is early in the guest exit path - we
2067 * haven't saved registers or done a treclaim yet.
2070 /* Save instruction image in HEIR */
2072 stw r3, VCPU_HEIR(r9)
2075 * The cases we want to handle here are those where the guest
2076 * is in real suspend mode and is trying to transition to
2077 * transactional mode.
2079 lbz r0, HSTATE_FAKE_SUSPEND(r13)
2080 cmpwi r0, 0 /* keep exiting guest if in fake suspend */
2082 rldicl r3, r11, 64 - MSR_TS_S_LG, 62
2083 cmpwi r3, 1 /* or if not in suspend state */
2086 /* Call C code to do the emulation */
2088 bl kvmhv_p9_tm_emulation_early
2090 ld r9, HSTATE_KVM_VCPU(r13)
2091 li r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
2093 beq guest_exit_cont /* continue exiting if not handled */
2095 ld r11, VCPU_MSR(r9)
2096 b fast_interrupt_c_return /* go back to guest if handled */
2097 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2100 * Check whether an HDSI is an HPTE not found fault or something else.
2101 * If it is an HPTE not found fault that is due to the guest accessing
2102 * a page that they have mapped but which we have paged out, then
2103 * we continue on with the guest exit path. In all other cases,
2104 * reflect the HDSI to the guest as a DSI.
2108 lbz r0, KVM_RADIX(r3)
2110 mfspr r6, SPRN_HDSISR
2112 /* Look for DSISR canary. If we find it, retry instruction */
2115 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2117 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
2118 /* HPTE not found fault or protection fault? */
2119 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
2120 beq 1f /* if not, send it to the guest */
2121 andi. r0, r11, MSR_DR /* data relocation enabled? */
2124 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2126 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2128 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
2129 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
2130 bne 7f /* if no SLB entry found */
2131 4: std r4, VCPU_FAULT_DAR(r9)
2132 stw r6, VCPU_FAULT_DSISR(r9)
2134 /* Search the hash table. */
2135 mr r3, r9 /* vcpu pointer */
2136 li r7, 1 /* data fault */
2137 bl kvmppc_hpte_hv_fault
2138 ld r9, HSTATE_KVM_VCPU(r13)
2140 ld r11, VCPU_MSR(r9)
2141 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
2142 cmpdi r3, 0 /* retry the instruction */
2144 cmpdi r3, -1 /* handle in kernel mode */
2146 cmpdi r3, -2 /* MMIO emulation; need instr word */
2149 /* Synthesize a DSI (or DSegI) for the guest */
2150 ld r4, VCPU_FAULT_DAR(r9)
2152 1: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
2153 mtspr SPRN_DSISR, r6
2154 7: mtspr SPRN_DAR, r4
2155 mtspr SPRN_SRR0, r10
2156 mtspr SPRN_SRR1, r11
2158 bl kvmppc_msr_interrupt
2159 fast_interrupt_c_return:
2160 6: ld r7, VCPU_CTR(r9)
2167 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
2168 ld r5, KVM_VRMA_SLB_V(r5)
2171 /* If this is for emulated MMIO, load the instruction word */
2172 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
2174 /* Set guest mode to 'jump over instruction' so if lwz faults
2175 * we'll just continue at the next IP. */
2176 li r0, KVM_GUEST_MODE_SKIP
2177 stb r0, HSTATE_IN_GUEST(r13)
2179 /* Do the access with MSR:DR enabled */
2181 ori r4, r3, MSR_DR /* Enable paging for data */
2186 /* Store the result */
2187 stw r8, VCPU_LAST_INST(r9)
2189 /* Unset guest mode. */
2190 li r0, KVM_GUEST_MODE_HOST_HV
2191 stb r0, HSTATE_IN_GUEST(r13)
2195 std r4, VCPU_FAULT_DAR(r9)
2196 stw r6, VCPU_FAULT_DSISR(r9)
2199 std r5, VCPU_FAULT_GPA(r9)
2203 * Similarly for an HISI, reflect it to the guest as an ISI unless
2204 * it is an HPTE not found fault for a page that we have paged out.
2208 lbz r0, KVM_RADIX(r3)
2210 bne .Lradix_hisi /* for radix, just save ASDR */
2211 andis. r0, r11, SRR1_ISI_NOPT@h
2213 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
2216 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2218 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2220 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
2221 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
2222 bne 7f /* if no SLB entry found */
2224 /* Search the hash table. */
2225 mr r3, r9 /* vcpu pointer */
2228 li r7, 0 /* instruction fault */
2229 bl kvmppc_hpte_hv_fault
2230 ld r9, HSTATE_KVM_VCPU(r13)
2232 ld r11, VCPU_MSR(r9)
2233 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
2234 cmpdi r3, 0 /* retry the instruction */
2235 beq fast_interrupt_c_return
2236 cmpdi r3, -1 /* handle in kernel mode */
2239 /* Synthesize an ISI (or ISegI) for the guest */
2241 1: li r0, BOOK3S_INTERRUPT_INST_STORAGE
2242 7: mtspr SPRN_SRR0, r10
2243 mtspr SPRN_SRR1, r11
2245 bl kvmppc_msr_interrupt
2246 b fast_interrupt_c_return
2248 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
2249 ld r5, KVM_VRMA_SLB_V(r6)
2253 * Try to handle an hcall in real mode.
2254 * Returns to the guest if we handle it, or continues on up to
2255 * the kernel if we can't (i.e. if we don't have a handler for
2256 * it, or if the handler returns H_TOO_HARD).
2258 * r5 - r8 contain hcall args,
2259 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
2261 hcall_try_real_mode:
2262 ld r3,VCPU_GPR(R3)(r9)
2264 /* sc 1 from userspace - reflect to guest syscall */
2265 bne sc_1_fast_return
2266 /* sc 1 from nested guest - give it to L1 to handle */
2267 ld r0, VCPU_NESTED(r9)
2271 cmpldi r3,hcall_real_table_end - hcall_real_table
2273 /* See if this hcall is enabled for in-kernel handling */
2275 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
2276 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
2278 ld r0, KVM_ENABLED_HCALLS(r4)
2279 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
2283 /* Get pointer to handler, if any, and call it */
2284 LOAD_REG_ADDR(r4, hcall_real_table)
2290 mr r3,r9 /* get vcpu pointer */
2291 ld r4,VCPU_GPR(R4)(r9)
2294 beq hcall_real_fallback
2295 ld r4,HSTATE_KVM_VCPU(r13)
2296 std r3,VCPU_GPR(R3)(r4)
2304 li r10, BOOK3S_INTERRUPT_SYSCALL
2305 bl kvmppc_msr_interrupt
2309 /* We've attempted a real mode hcall, but it's punted it back
2310 * to userspace. We need to restore some clobbered volatiles
2311 * before resuming the pass-it-to-qemu path */
2312 hcall_real_fallback:
2313 li r12,BOOK3S_INTERRUPT_SYSCALL
2314 ld r9, HSTATE_KVM_VCPU(r13)
2318 .globl hcall_real_table
2320 .long 0 /* 0 - unused */
2321 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
2322 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
2323 .long DOTSYM(kvmppc_h_read) - hcall_real_table
2324 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
2325 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
2326 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
2327 #ifdef CONFIG_SPAPR_TCE_IOMMU
2328 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
2329 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
2334 .long 0 /* 0x24 - H_SET_SPRG0 */
2335 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
2336 .long DOTSYM(kvmppc_rm_h_page_init) - hcall_real_table
2350 #ifdef CONFIG_KVM_XICS
2351 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
2352 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
2353 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
2354 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
2355 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
2357 .long 0 /* 0x64 - H_EOI */
2358 .long 0 /* 0x68 - H_CPPR */
2359 .long 0 /* 0x6c - H_IPI */
2360 .long 0 /* 0x70 - H_IPOLL */
2361 .long 0 /* 0x74 - H_XIRR */
2389 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
2390 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
2406 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
2410 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
2411 #ifdef CONFIG_SPAPR_TCE_IOMMU
2412 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
2413 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
2529 #ifdef CONFIG_KVM_XICS
2530 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
2532 .long 0 /* 0x2fc - H_XIRR_X*/
2534 .long DOTSYM(kvmppc_h_random) - hcall_real_table
2535 .globl hcall_real_table_end
2536 hcall_real_table_end:
2538 _GLOBAL_TOC(kvmppc_h_set_xdabr)
2539 EXPORT_SYMBOL_GPL(kvmppc_h_set_xdabr)
2540 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2542 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2545 6: li r3, H_PARAMETER
2548 _GLOBAL_TOC(kvmppc_h_set_dabr)
2549 EXPORT_SYMBOL_GPL(kvmppc_h_set_dabr)
2550 li r5, DABRX_USER | DABRX_KERNEL
2554 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2555 std r4,VCPU_DABR(r3)
2556 stw r5, VCPU_DABRX(r3)
2557 mtspr SPRN_DABRX, r5
2558 /* Work around P7 bug where DABR can get corrupted on mtspr */
2559 1: mtspr SPRN_DABR,r4
2568 LOAD_REG_ADDR(r11, dawr_force_enable)
2575 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2576 rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
2577 rlwimi r5, r4, 2, DAWRX_WT
2579 std r4, VCPU_DAWR(r3)
2580 std r5, VCPU_DAWRX(r3)
2582 * If came in through the real mode hcall handler then it is necessary
2583 * to write the registers since the return path won't. Otherwise it is
2584 * sufficient to store then in the vcpu struct as they will be loaded
2585 * next time the vcpu is run.
2588 andi. r6, r6, MSR_DR /* in real mode? */
2591 mtspr SPRN_DAWRX, r5
2595 _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
2597 std r11,VCPU_MSR(r3)
2599 stb r0,VCPU_CEDED(r3)
2600 sync /* order setting ceded vs. testing prodded */
2601 lbz r5,VCPU_PRODDED(r3)
2603 bne kvm_cede_prodded
2604 li r12,0 /* set trap to 0 to say hcall is handled */
2605 stw r12,VCPU_TRAP(r3)
2607 std r0,VCPU_GPR(R3)(r3)
2610 * Set our bit in the bitmask of napping threads unless all the
2611 * other threads are already napping, in which case we send this
2614 ld r5,HSTATE_KVM_VCORE(r13)
2615 lbz r6,HSTATE_PTID(r13)
2616 lwz r8,VCORE_ENTRY_EXIT(r5)
2620 addi r6,r5,VCORE_NAPPING_THREADS
2627 /* order napping_threads update vs testing entry_exit_map */
2630 stb r0,HSTATE_NAPPING(r13)
2631 lwz r7,VCORE_ENTRY_EXIT(r5)
2633 bge 33f /* another thread already exiting */
2636 * Although not specifically required by the architecture, POWER7
2637 * preserves the following registers in nap mode, even if an SMT mode
2638 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2639 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2641 /* Save non-volatile GPRs */
2642 std r14, VCPU_GPR(R14)(r3)
2643 std r15, VCPU_GPR(R15)(r3)
2644 std r16, VCPU_GPR(R16)(r3)
2645 std r17, VCPU_GPR(R17)(r3)
2646 std r18, VCPU_GPR(R18)(r3)
2647 std r19, VCPU_GPR(R19)(r3)
2648 std r20, VCPU_GPR(R20)(r3)
2649 std r21, VCPU_GPR(R21)(r3)
2650 std r22, VCPU_GPR(R22)(r3)
2651 std r23, VCPU_GPR(R23)(r3)
2652 std r24, VCPU_GPR(R24)(r3)
2653 std r25, VCPU_GPR(R25)(r3)
2654 std r26, VCPU_GPR(R26)(r3)
2655 std r27, VCPU_GPR(R27)(r3)
2656 std r28, VCPU_GPR(R28)(r3)
2657 std r29, VCPU_GPR(R29)(r3)
2658 std r30, VCPU_GPR(R30)(r3)
2659 std r31, VCPU_GPR(R31)(r3)
2664 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2666 * Branch around the call if both CPU_FTR_TM and
2667 * CPU_FTR_P9_TM_HV_ASSIST are off.
2671 END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
2673 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
2675 ld r3, HSTATE_KVM_VCPU(r13)
2677 li r5, 0 /* don't preserve non-vol regs */
2678 bl kvmppc_save_tm_hv
2684 * Set DEC to the smaller of DEC and HDEC, so that we wake
2685 * no later than the end of our timeslice (HDEC interrupts
2686 * don't wake us from nap).
2692 /* On P9 check whether the guest has large decrementer mode enabled */
2693 ld r6, HSTATE_KVM_VCORE(r13)
2694 ld r6, VCORE_LPCR(r6)
2695 andis. r6, r6, LPCR_LD@h
2697 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2704 /* save expiry time of guest decrementer */
2706 ld r4, HSTATE_KVM_VCPU(r13)
2707 ld r5, HSTATE_KVM_VCORE(r13)
2708 ld r6, VCORE_TB_OFFSET_APPL(r5)
2709 subf r3, r6, r3 /* convert to host TB value */
2710 std r3, VCPU_DEC_EXPIRES(r4)
2712 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2713 ld r4, HSTATE_KVM_VCPU(r13)
2714 addi r3, r4, VCPU_TB_CEDE
2715 bl kvmhv_accumulate_time
2718 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2720 /* Go back to host stack */
2721 ld r1, HSTATE_HOST_R1(r13)
2724 * Take a nap until a decrementer or external or doobell interrupt
2725 * occurs, with PECE1 and PECE0 set in LPCR.
2726 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
2727 * Also clear the runlatch bit before napping.
2730 mfspr r0, SPRN_CTRLF
2732 mtspr SPRN_CTRLT, r0
2735 stb r0,HSTATE_HWTHREAD_REQ(r13)
2737 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
2739 ori r5, r5, LPCR_PECEDH
2740 rlwimi r5, r3, 0, LPCR_PECEDP
2741 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2743 kvm_nap_sequence: /* desired LPCR value in r5 */
2746 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
2747 * enable state loss = 1 (allow SMT mode switch)
2748 * requested level = 0 (just stop dispatching)
2750 lis r3, (PSSCR_EC | PSSCR_ESL)@h
2751 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
2752 li r4, LPCR_PECE_HVEE@higher
2756 li r3, PNV_THREAD_NAP
2757 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
2762 bl isa300_idle_stop_mayloss
2764 bl isa206_idle_insn_mayloss
2765 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_300)
2767 mfspr r0, SPRN_CTRLF
2769 mtspr SPRN_CTRLT, r0
2774 stb r0, PACA_FTRACE_ENABLED(r13)
2776 li r0, KVM_HWTHREAD_IN_KVM
2777 stb r0, HSTATE_HWTHREAD_STATE(r13)
2779 lbz r0, HSTATE_NAPPING(r13)
2780 cmpwi r0, NAPPING_CEDE
2782 cmpwi r0, NAPPING_NOVCPU
2783 beq kvm_novcpu_wakeup
2784 cmpwi r0, NAPPING_UNSPLIT
2785 beq kvm_unsplit_wakeup
2786 twi 31,0,0 /* Nap state must not be zero */
2794 /* Woken by external or decrementer interrupt */
2796 /* get vcpu pointer */
2797 ld r4, HSTATE_KVM_VCPU(r13)
2799 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2800 addi r3, r4, VCPU_TB_RMINTR
2801 bl kvmhv_accumulate_time
2804 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2806 * Branch around the call if both CPU_FTR_TM and
2807 * CPU_FTR_P9_TM_HV_ASSIST are off.
2811 END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
2813 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS (but not CR)
2817 li r5, 0 /* don't preserve non-vol regs */
2818 bl kvmppc_restore_tm_hv
2820 ld r4, HSTATE_KVM_VCPU(r13)
2824 /* load up FP state */
2827 /* Restore guest decrementer */
2828 ld r3, VCPU_DEC_EXPIRES(r4)
2829 ld r5, HSTATE_KVM_VCORE(r13)
2830 ld r6, VCORE_TB_OFFSET_APPL(r5)
2831 add r3, r3, r6 /* convert host TB to guest TB value */
2837 ld r14, VCPU_GPR(R14)(r4)
2838 ld r15, VCPU_GPR(R15)(r4)
2839 ld r16, VCPU_GPR(R16)(r4)
2840 ld r17, VCPU_GPR(R17)(r4)
2841 ld r18, VCPU_GPR(R18)(r4)
2842 ld r19, VCPU_GPR(R19)(r4)
2843 ld r20, VCPU_GPR(R20)(r4)
2844 ld r21, VCPU_GPR(R21)(r4)
2845 ld r22, VCPU_GPR(R22)(r4)
2846 ld r23, VCPU_GPR(R23)(r4)
2847 ld r24, VCPU_GPR(R24)(r4)
2848 ld r25, VCPU_GPR(R25)(r4)
2849 ld r26, VCPU_GPR(R26)(r4)
2850 ld r27, VCPU_GPR(R27)(r4)
2851 ld r28, VCPU_GPR(R28)(r4)
2852 ld r29, VCPU_GPR(R29)(r4)
2853 ld r30, VCPU_GPR(R30)(r4)
2854 ld r31, VCPU_GPR(R31)(r4)
2856 /* Check the wake reason in SRR1 to see why we got here */
2857 bl kvmppc_check_wake_reason
2860 * Restore volatile registers since we could have called a
2861 * C routine in kvmppc_check_wake_reason
2863 * r3 tells us whether we need to return to host or not
2864 * WARNING: it gets checked further down:
2865 * should not modify r3 until this check is done.
2867 ld r4, HSTATE_KVM_VCPU(r13)
2869 /* clear our bit in vcore->napping_threads */
2870 34: ld r5,HSTATE_KVM_VCORE(r13)
2871 lbz r7,HSTATE_PTID(r13)
2874 addi r6,r5,VCORE_NAPPING_THREADS
2880 stb r0,HSTATE_NAPPING(r13)
2882 /* See if the wake reason saved in r3 means we need to exit */
2883 stw r12, VCPU_TRAP(r4)
2887 b maybe_reenter_guest
2889 /* cede when already previously prodded case */
2892 stb r0,VCPU_PRODDED(r3)
2893 sync /* order testing prodded vs. clearing ceded */
2894 stb r0,VCPU_CEDED(r3)
2898 /* we've ceded but we want to give control to the host */
2900 ld r9, HSTATE_KVM_VCPU(r13)
2901 #ifdef CONFIG_KVM_XICS
2902 /* are we using XIVE with single escalation? */
2903 ld r10, VCPU_XIVE_ESC_VADDR(r9)
2906 li r6, XIVE_ESB_SET_PQ_00
2908 * If we still have a pending escalation, abort the cede,
2909 * and we must set PQ to 10 rather than 00 so that we don't
2910 * potentially end up with two entries for the escalation
2911 * interrupt in the XIVE interrupt queue. In that case
2912 * we also don't want to set xive_esc_on to 1 here in
2913 * case we race with xive_esc_irq().
2915 lbz r5, VCPU_XIVE_ESC_ON(r9)
2919 stb r0, VCPU_CEDED(r9)
2920 li r6, XIVE_ESB_SET_PQ_10
2923 stb r0, VCPU_XIVE_ESC_ON(r9)
2924 /* make sure store to xive_esc_on is seen before xive_esc_irq runs */
2926 5: /* Enable XIVE escalation */
2928 andi. r0, r0, MSR_DR /* in real mode? */
2932 1: ld r10, VCPU_XIVE_ESC_RADDR(r9)
2935 #endif /* CONFIG_KVM_XICS */
2936 3: b guest_exit_cont
2938 /* Try to do machine check recovery in real mode */
2939 machine_check_realmode:
2940 mr r3, r9 /* get vcpu pointer */
2941 bl kvmppc_realmode_machine_check
2943 /* all machine checks go to virtual mode for further handling */
2944 ld r9, HSTATE_KVM_VCPU(r13)
2945 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2949 * Call C code to handle a HMI in real mode.
2950 * Only the primary thread does the call, secondary threads are handled
2951 * by calling hmi_exception_realmode() after kvmppc_hv_entry returns.
2952 * r9 points to the vcpu on entry
2955 lbz r0, HSTATE_PTID(r13)
2958 bl kvmppc_realmode_hmi_handler
2959 ld r9, HSTATE_KVM_VCPU(r13)
2960 li r12, BOOK3S_INTERRUPT_HMI
2964 * Check the reason we woke from nap, and take appropriate action.
2966 * 0 if nothing needs to be done
2967 * 1 if something happened that needs to be handled by the host
2968 * -1 if there was a guest wakeup (IPI or msgsnd)
2969 * -2 if we handled a PCI passthrough interrupt (returned by
2970 * kvmppc_read_intr only)
2972 * Also sets r12 to the interrupt vector for any interrupt that needs
2973 * to be handled now by the host (0x500 for external interrupt), or zero.
2974 * Modifies all volatile registers (since it may call a C function).
2975 * This routine calls kvmppc_read_intr, a C function, if an external
2976 * interrupt is pending.
2978 kvmppc_check_wake_reason:
2981 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2983 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2984 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2985 cmpwi r6, 8 /* was it an external interrupt? */
2986 beq 7f /* if so, see what it was */
2989 cmpwi r6, 6 /* was it the decrementer? */
2992 cmpwi r6, 5 /* privileged doorbell? */
2994 cmpwi r6, 3 /* hypervisor doorbell? */
2996 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2997 cmpwi r6, 0xa /* Hypervisor maintenance ? */
2999 li r3, 1 /* anything else, return 1 */
3002 /* hypervisor doorbell */
3003 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
3006 * Clear the doorbell as we will invoke the handler
3007 * explicitly in the guest exit path.
3009 lis r6, (PPC_DBELL_SERVER << (63-36))@h
3011 /* see if it's a host IPI */
3016 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
3017 lbz r0, HSTATE_HOST_IPI(r13)
3020 /* if not, return -1 */
3024 /* Woken up due to Hypervisor maintenance interrupt */
3025 4: li r12, BOOK3S_INTERRUPT_HMI
3029 /* external interrupt - create a stack frame so we can call C */
3031 std r0, PPC_LR_STKOFF(r1)
3032 stdu r1, -PPC_MIN_STKFRM(r1)
3035 li r12, BOOK3S_INTERRUPT_EXTERNAL
3040 * Return code of 2 means PCI passthrough interrupt, but
3041 * we need to return back to host to complete handling the
3042 * interrupt. Trap reason is expected in r12 by guest
3045 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
3047 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
3048 addi r1, r1, PPC_MIN_STKFRM
3053 * Save away FP, VMX and VSX registers.
3055 * N.B. r30 and r31 are volatile across this function,
3056 * thus it is not callable from C.
3063 #ifdef CONFIG_ALTIVEC
3065 oris r8,r8,MSR_VEC@h
3066 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3070 oris r8,r8,MSR_VSX@h
3071 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
3074 addi r3,r3,VCPU_FPRS
3076 #ifdef CONFIG_ALTIVEC
3078 addi r3,r31,VCPU_VRS
3080 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3082 mfspr r6,SPRN_VRSAVE
3083 stw r6,VCPU_VRSAVE(r31)
3088 * Load up FP, VMX and VSX registers
3090 * N.B. r30 and r31 are volatile across this function,
3091 * thus it is not callable from C.
3098 #ifdef CONFIG_ALTIVEC
3100 oris r8,r8,MSR_VEC@h
3101 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3105 oris r8,r8,MSR_VSX@h
3106 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
3109 addi r3,r4,VCPU_FPRS
3111 #ifdef CONFIG_ALTIVEC
3113 addi r3,r31,VCPU_VRS
3115 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3117 lwz r7,VCPU_VRSAVE(r31)
3118 mtspr SPRN_VRSAVE,r7
3123 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
3125 * Save transactional state and TM-related registers.
3126 * Called with r3 pointing to the vcpu struct and r4 containing
3127 * the guest MSR value.
3128 * r5 is non-zero iff non-volatile register state needs to be maintained.
3129 * If r5 == 0, this can modify all checkpointed registers, but
3130 * restores r1 and r2 before exit.
3132 _GLOBAL_TOC(kvmppc_save_tm_hv)
3133 EXPORT_SYMBOL_GPL(kvmppc_save_tm_hv)
3134 /* See if we need to handle fake suspend mode */
3137 END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
3139 lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */
3141 beq __kvmppc_save_tm
3143 /* The following code handles the fake_suspend = 1 case */
3145 std r0, PPC_LR_STKOFF(r1)
3146 stdu r1, -TM_FRAME_SIZE(r1)
3151 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
3154 rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */
3157 bl pnv_power9_force_smt4_catch
3158 END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
3162 * It's possible that treclaim. may modify registers, if we have lost
3163 * track of fake-suspend state in the guest due to it using rfscv.
3164 * Save and restore registers in case this occurs.
3169 /* SPRN_TAR would need to be saved here if the kernel ever used it */
3177 std r1, HSTATE_HOST_R1(r13)
3179 /* We have to treclaim here because that's the only way to do S->N */
3180 li r3, TM_CAUSE_KVM_RESCHED
3184 ld r1, HSTATE_HOST_R1(r13)
3198 * We were in fake suspend, so we are not going to save the
3199 * register state as the guest checkpointed state (since
3200 * we already have it), therefore we can now use any volatile GPR.
3201 * In fact treclaim in fake suspend state doesn't modify
3206 bl pnv_power9_force_smt4_release
3207 END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
3211 mfspr r3, SPRN_PSSCR
3212 /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */
3213 li r0, PSSCR_FAKE_SUSPEND
3215 mtspr SPRN_PSSCR, r3
3217 /* Don't save TEXASR, use value from last exit in real suspend state */
3218 ld r9, HSTATE_KVM_VCPU(r13)
3219 mfspr r5, SPRN_TFHAR
3220 mfspr r6, SPRN_TFIAR
3221 std r5, VCPU_TFHAR(r9)
3222 std r6, VCPU_TFIAR(r9)
3224 addi r1, r1, TM_FRAME_SIZE
3225 ld r0, PPC_LR_STKOFF(r1)
3230 * Restore transactional state and TM-related registers.
3231 * Called with r3 pointing to the vcpu struct
3232 * and r4 containing the guest MSR value.
3233 * r5 is non-zero iff non-volatile register state needs to be maintained.
3234 * This potentially modifies all checkpointed registers.
3235 * It restores r1 and r2 from the PACA.
3237 _GLOBAL_TOC(kvmppc_restore_tm_hv)
3238 EXPORT_SYMBOL_GPL(kvmppc_restore_tm_hv)
3240 * If we are doing TM emulation for the guest on a POWER9 DD2,
3241 * then we don't actually do a trechkpt -- we either set up
3242 * fake-suspend mode, or emulate a TM rollback.
3245 b __kvmppc_restore_tm
3246 END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
3248 std r0, PPC_LR_STKOFF(r1)
3251 stb r0, HSTATE_FAKE_SUSPEND(r13)
3253 /* Turn on TM so we can restore TM SPRs */
3256 rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG
3260 * The user may change these outside of a transaction, so they must
3261 * always be context switched.
3263 ld r5, VCPU_TFHAR(r3)
3264 ld r6, VCPU_TFIAR(r3)
3265 ld r7, VCPU_TEXASR(r3)
3266 mtspr SPRN_TFHAR, r5
3267 mtspr SPRN_TFIAR, r6
3268 mtspr SPRN_TEXASR, r7
3270 rldicl. r5, r4, 64 - MSR_TS_S_LG, 62
3271 beqlr /* TM not active in guest */
3273 /* Make sure the failure summary is set */
3274 oris r7, r7, (TEXASR_FS)@h
3275 mtspr SPRN_TEXASR, r7
3277 cmpwi r5, 1 /* check for suspended state */
3279 stb r5, HSTATE_FAKE_SUSPEND(r13)
3280 b 9f /* and return */
3281 10: stdu r1, -PPC_MIN_STKFRM(r1)
3282 /* guest is in transactional state, so simulate rollback */
3283 bl kvmhv_emulate_tm_rollback
3285 addi r1, r1, PPC_MIN_STKFRM
3286 9: ld r0, PPC_LR_STKOFF(r1)
3289 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
3292 * We come here if we get any exception or interrupt while we are
3293 * executing host real mode code while in guest MMU context.
3294 * r12 is (CR << 32) | vector
3295 * r13 points to our PACA
3296 * r12 is saved in HSTATE_SCRATCH0(r13)
3297 * ctr is saved in HSTATE_SCRATCH1(r13) if RELOCATABLE
3298 * r9 is saved in HSTATE_SCRATCH2(r13)
3299 * r13 is saved in HSPRG1
3300 * cfar is saved in HSTATE_CFAR(r13)
3301 * ppr is saved in HSTATE_PPR(r13)
3303 kvmppc_bad_host_intr:
3305 * Switch to the emergency stack, but start half-way down in
3306 * case we were already on it.
3310 ld r1, PACAEMERGSP(r13)
3311 subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE
3324 mfspr r3, SPRN_HSRR0
3325 mfspr r4, SPRN_HSRR1
3327 mfspr r6, SPRN_HDSISR
3329 1: mfspr r3, SPRN_SRR0
3332 mfspr r6, SPRN_DSISR
3337 ld r9, HSTATE_SCRATCH2(r13)
3338 ld r12, HSTATE_SCRATCH0(r13)
3343 ld r5, HSTATE_CFAR(r13)
3344 std r5, ORIG_GPR3(r1)
3346 #ifdef CONFIG_RELOCATABLE
3347 ld r4, HSTATE_SCRATCH1(r13)
3352 lbz r6, PACAIRQSOFTMASK(r13)
3358 LOAD_REG_IMMEDIATE(3, 0x7265677368657265)
3359 std r3, STACK_FRAME_OVERHEAD-16(r1)
3362 * On POWER9 do a minimal restore of the MMU and call C code,
3363 * which will print a message and panic.
3364 * XXX On POWER7 and POWER8, we just spin here since we don't
3365 * know what the other threads are doing (and we don't want to
3366 * coordinate with them) - but at least we now have register state
3367 * in memory that we might be able to look at from another CPU.
3371 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
3372 ld r9, HSTATE_KVM_VCPU(r13)
3373 ld r10, VCPU_KVM(r9)
3378 mtspr SPRN_CIABR, r0
3379 mtspr SPRN_DAWRX, r0
3381 BEGIN_MMU_FTR_SECTION
3383 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
3388 ld r8, PACA_SLBSHADOWPTR(r13)
3389 .rept SLB_NUM_BOLTED
3390 li r3, SLBSHADOW_SAVEAREA
3394 andis. r7, r5, SLB_ESID_V@h
3400 4: lwz r7, KVM_HOST_LPID(r10)
3403 ld r8, KVM_HOST_LPCR(r10)
3406 li r0, KVM_GUEST_MODE_NONE
3407 stb r0, HSTATE_IN_GUEST(r13)
3410 * Turn on the MMU and jump to C code
3414 addi r3, r3, 9f - 5b
3416 rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */
3417 ld r4, PACAKMSR(r13)
3421 9: addi r3, r1, STACK_FRAME_OVERHEAD
3422 bl kvmppc_bad_interrupt
3426 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
3427 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
3428 * r11 has the guest MSR value (in/out)
3429 * r9 has a vcpu pointer (in)
3430 * r0 is used as a scratch register
3432 kvmppc_msr_interrupt:
3433 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
3434 cmpwi r0, 2 /* Check if we are in transactional state.. */
3435 ld r11, VCPU_INTR_MSR(r9)
3437 /* ... if transactional, change to suspended */
3439 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
3443 * Load up guest PMU state. R3 points to the vcpu struct.
3445 _GLOBAL(kvmhv_load_guest_pmu)
3446 EXPORT_SYMBOL_GPL(kvmhv_load_guest_pmu)
3450 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
3451 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
3454 ld r3, VCPU_MMCR(r4)
3455 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
3456 cmpwi r5, MMCR0_PMAO
3457 beql kvmppc_fix_pmao
3458 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
3459 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
3460 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
3461 lwz r6, VCPU_PMC + 8(r4)
3462 lwz r7, VCPU_PMC + 12(r4)
3463 lwz r8, VCPU_PMC + 16(r4)
3464 lwz r9, VCPU_PMC + 20(r4)
3471 ld r3, VCPU_MMCR(r4)
3472 ld r5, VCPU_MMCR + 8(r4)
3473 ld r6, VCPU_MMCR + 16(r4)
3474 ld r7, VCPU_SIAR(r4)
3475 ld r8, VCPU_SDAR(r4)
3476 mtspr SPRN_MMCR1, r5
3477 mtspr SPRN_MMCRA, r6
3481 ld r5, VCPU_MMCR + 24(r4)
3482 ld r6, VCPU_SIER(r4)
3483 mtspr SPRN_MMCR2, r5
3485 BEGIN_FTR_SECTION_NESTED(96)
3486 lwz r7, VCPU_PMC + 24(r4)
3487 lwz r8, VCPU_PMC + 28(r4)
3488 ld r9, VCPU_MMCR + 32(r4)
3489 mtspr SPRN_SPMC1, r7
3490 mtspr SPRN_SPMC2, r8
3491 mtspr SPRN_MMCRS, r9
3492 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
3493 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3494 mtspr SPRN_MMCR0, r3
3500 * Reload host PMU state saved in the PACA by kvmhv_save_host_pmu.
3502 _GLOBAL(kvmhv_load_host_pmu)
3503 EXPORT_SYMBOL_GPL(kvmhv_load_host_pmu)
3505 lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
3507 beq 23f /* skip if not */
3509 ld r3, HSTATE_MMCR0(r13)
3510 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
3511 cmpwi r4, MMCR0_PMAO
3512 beql kvmppc_fix_pmao
3513 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
3514 lwz r3, HSTATE_PMC1(r13)
3515 lwz r4, HSTATE_PMC2(r13)
3516 lwz r5, HSTATE_PMC3(r13)
3517 lwz r6, HSTATE_PMC4(r13)
3518 lwz r8, HSTATE_PMC5(r13)
3519 lwz r9, HSTATE_PMC6(r13)
3526 ld r3, HSTATE_MMCR0(r13)
3527 ld r4, HSTATE_MMCR1(r13)
3528 ld r5, HSTATE_MMCRA(r13)
3529 ld r6, HSTATE_SIAR(r13)
3530 ld r7, HSTATE_SDAR(r13)
3531 mtspr SPRN_MMCR1, r4
3532 mtspr SPRN_MMCRA, r5
3536 ld r8, HSTATE_MMCR2(r13)
3537 ld r9, HSTATE_SIER(r13)
3538 mtspr SPRN_MMCR2, r8
3540 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3541 mtspr SPRN_MMCR0, r3
3547 * Save guest PMU state into the vcpu struct.
3548 * r3 = vcpu, r4 = full save flag (PMU in use flag set in VPA)
3550 _GLOBAL(kvmhv_save_guest_pmu)
3551 EXPORT_SYMBOL_GPL(kvmhv_save_guest_pmu)
3556 * POWER8 seems to have a hardware bug where setting
3557 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
3558 * when some counters are already negative doesn't seem
3559 * to cause a performance monitor alert (and hence interrupt).
3560 * The effect of this is that when saving the PMU state,
3561 * if there is no PMU alert pending when we read MMCR0
3562 * before freezing the counters, but one becomes pending
3563 * before we read the counters, we lose it.
3564 * To work around this, we need a way to freeze the counters
3565 * before reading MMCR0. Normally, freezing the counters
3566 * is done by writing MMCR0 (to set MMCR0[FC]) which
3567 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
3568 * we can also freeze the counters using MMCR2, by writing
3569 * 1s to all the counter freeze condition bits (there are
3570 * 9 bits each for 6 counters).
3572 li r3, -1 /* set all freeze bits */
3574 mfspr r10, SPRN_MMCR2
3575 mtspr SPRN_MMCR2, r3
3577 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3579 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
3580 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
3581 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
3582 mfspr r6, SPRN_MMCRA
3583 /* Clear MMCRA in order to disable SDAR updates */
3585 mtspr SPRN_MMCRA, r7
3587 cmpwi r8, 0 /* did they ask for PMU stuff to be saved? */
3589 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
3591 21: mfspr r5, SPRN_MMCR1
3594 std r4, VCPU_MMCR(r9)
3595 std r5, VCPU_MMCR + 8(r9)
3596 std r6, VCPU_MMCR + 16(r9)
3598 std r10, VCPU_MMCR + 24(r9)
3599 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3600 std r7, VCPU_SIAR(r9)
3601 std r8, VCPU_SDAR(r9)
3608 stw r3, VCPU_PMC(r9)
3609 stw r4, VCPU_PMC + 4(r9)
3610 stw r5, VCPU_PMC + 8(r9)
3611 stw r6, VCPU_PMC + 12(r9)
3612 stw r7, VCPU_PMC + 16(r9)
3613 stw r8, VCPU_PMC + 20(r9)
3616 std r5, VCPU_SIER(r9)
3617 BEGIN_FTR_SECTION_NESTED(96)
3618 mfspr r6, SPRN_SPMC1
3619 mfspr r7, SPRN_SPMC2
3620 mfspr r8, SPRN_MMCRS
3621 stw r6, VCPU_PMC + 24(r9)
3622 stw r7, VCPU_PMC + 28(r9)
3623 std r8, VCPU_MMCR + 32(r9)
3625 mtspr SPRN_MMCRS, r4
3626 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
3627 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3631 * This works around a hardware bug on POWER8E processors, where
3632 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
3633 * performance monitor interrupt. Instead, when we need to have
3634 * an interrupt pending, we have to arrange for a counter to overflow.
3638 mtspr SPRN_MMCR2, r3
3639 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
3640 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
3641 mtspr SPRN_MMCR0, r3
3648 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
3650 * Start timing an activity
3651 * r3 = pointer to time accumulation struct, r4 = vcpu
3654 ld r5, HSTATE_KVM_VCORE(r13)
3655 ld r6, VCORE_TB_OFFSET_APPL(r5)
3657 subf r5, r6, r5 /* subtract current timebase offset */
3658 std r3, VCPU_CUR_ACTIVITY(r4)
3659 std r5, VCPU_ACTIVITY_START(r4)
3663 * Accumulate time to one activity and start another.
3664 * r3 = pointer to new time accumulation struct, r4 = vcpu
3666 kvmhv_accumulate_time:
3667 ld r5, HSTATE_KVM_VCORE(r13)
3668 ld r8, VCORE_TB_OFFSET_APPL(r5)
3669 ld r5, VCPU_CUR_ACTIVITY(r4)
3670 ld r6, VCPU_ACTIVITY_START(r4)
3671 std r3, VCPU_CUR_ACTIVITY(r4)
3673 subf r7, r8, r7 /* subtract current timebase offset */
3674 std r7, VCPU_ACTIVITY_START(r4)
3678 ld r8, TAS_SEQCOUNT(r5)
3681 std r8, TAS_SEQCOUNT(r5)
3683 ld r7, TAS_TOTAL(r5)
3685 std r7, TAS_TOTAL(r5)
3691 3: std r3, TAS_MIN(r5)
3697 std r8, TAS_SEQCOUNT(r5)