2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
13 * Derived from book3s_rmhandlers.S and other files, which are:
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/ppc_asm.h>
21 #include <asm/code-patching-asm.h>
22 #include <asm/kvm_asm.h>
26 #include <asm/ptrace.h>
27 #include <asm/hvcall.h>
28 #include <asm/asm-offsets.h>
29 #include <asm/exception-64s.h>
30 #include <asm/kvm_book3s_asm.h>
31 #include <asm/book3s/64/mmu-hash.h>
34 #include <asm/xive-regs.h>
35 #include <asm/thread_info.h>
36 #include <asm/asm-compat.h>
37 #include <asm/feature-fixups.h>
39 /* Sign-extend HDEC if not on POWER9 */
40 #define EXTEND_HDEC(reg) \
43 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
45 /* Values in HSTATE_NAPPING(r13) */
46 #define NAPPING_CEDE 1
47 #define NAPPING_NOVCPU 2
49 /* Stack frame offsets for kvmppc_hv_entry */
51 #define STACK_SLOT_TRAP (SFS-4)
52 #define STACK_SLOT_TID (SFS-16)
53 #define STACK_SLOT_PSSCR (SFS-24)
54 #define STACK_SLOT_PID (SFS-32)
55 #define STACK_SLOT_IAMR (SFS-40)
56 #define STACK_SLOT_CIABR (SFS-48)
57 #define STACK_SLOT_DAWR (SFS-56)
58 #define STACK_SLOT_DAWRX (SFS-64)
59 #define STACK_SLOT_HFSCR (SFS-72)
60 #define STACK_SLOT_AMR (SFS-80)
61 #define STACK_SLOT_UAMOR (SFS-88)
64 * Call kvmppc_hv_entry in real mode.
65 * Must be called with interrupts hard-disabled.
69 * LR = return address to continue at after eventually re-enabling MMU
71 _GLOBAL_TOC(kvmppc_hv_entry_trampoline)
73 std r0, PPC_LR_STKOFF(r1)
76 std r10, HSTATE_HOST_MSR(r13)
77 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
82 mtmsrd r0,1 /* clear RI in MSR */
89 /* On P9, do LPCR setting, if necessary */
90 ld r3, HSTATE_SPLIT_MODE(r13)
93 lwz r4, KVM_SPLIT_DO_SET(r3)
99 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
101 ld r4, HSTATE_KVM_VCPU(r13)
104 /* Back from guest - restore host state and return to caller */
107 /* Restore host DABR and DABRX */
108 ld r5,HSTATE_DABR(r13)
112 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
115 ld r3,PACA_SPRG_VDSO(r13)
116 mtspr SPRN_SPRG_VDSO_WRITE,r3
118 /* Reload the host's PMU registers */
119 lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
121 beq 23f /* skip if not */
123 ld r3, HSTATE_MMCR0(r13)
124 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
127 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
128 lwz r3, HSTATE_PMC1(r13)
129 lwz r4, HSTATE_PMC2(r13)
130 lwz r5, HSTATE_PMC3(r13)
131 lwz r6, HSTATE_PMC4(r13)
132 lwz r8, HSTATE_PMC5(r13)
133 lwz r9, HSTATE_PMC6(r13)
140 ld r3, HSTATE_MMCR0(r13)
141 ld r4, HSTATE_MMCR1(r13)
142 ld r5, HSTATE_MMCRA(r13)
143 ld r6, HSTATE_SIAR(r13)
144 ld r7, HSTATE_SDAR(r13)
150 ld r8, HSTATE_MMCR2(r13)
151 ld r9, HSTATE_SIER(r13)
154 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
160 * Reload DEC. HDEC interrupts were disabled when
161 * we reloaded the host's LPCR value.
163 ld r3, HSTATE_DECEXP(r13)
168 /* hwthread_req may have got set by cede or no vcpu, so clear it */
170 stb r0, HSTATE_HWTHREAD_REQ(r13)
173 * For external interrupts we need to call the Linux
174 * handler to process the interrupt. We do that by jumping
175 * to absolute address 0x500 for external interrupts.
176 * The [h]rfid at the end of the handler will return to
177 * the book3s_hv_interrupts.S code. For other interrupts
178 * we do the rfid to get back to the book3s_hv_interrupts.S
181 ld r8, 112+PPC_LR_STKOFF(r1)
183 ld r7, HSTATE_HOST_MSR(r13)
185 /* Return the trap number on this thread as the return value */
189 * If we came back from the guest via a relocation-on interrupt,
190 * we will be in virtual mode at this point, which makes it a
191 * little easier to get back to the caller.
194 andi. r0, r0, MSR_IR /* in real mode? */
197 /* RFI into the highmem handler */
201 mtmsrd r6, 1 /* Clear RI in MSR */
206 /* Virtual-mode return */
211 kvmppc_primary_no_guest:
212 /* We handle this much like a ceded vcpu */
213 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
214 /* HDEC may be larger than DEC for arch >= v3.00, but since the */
215 /* HDEC value came from DEC in the first place, it will fit */
219 * Make sure the primary has finished the MMU switch.
220 * We should never get here on a secondary thread, but
221 * check it for robustness' sake.
223 ld r5, HSTATE_KVM_VCORE(r13)
224 65: lbz r0, VCORE_IN_GUEST(r5)
231 /* set our bit in napping_threads */
232 ld r5, HSTATE_KVM_VCORE(r13)
233 lbz r7, HSTATE_PTID(r13)
236 addi r6, r5, VCORE_NAPPING_THREADS
241 /* order napping_threads update vs testing entry_exit_map */
244 lwz r7, VCORE_ENTRY_EXIT(r5)
246 bge kvm_novcpu_exit /* another thread already exiting */
247 li r3, NAPPING_NOVCPU
248 stb r3, HSTATE_NAPPING(r13)
250 li r3, 0 /* Don't wake on privileged (OS) doorbell */
255 * Entered from kvm_start_guest if kvm_hstate.napping is set
261 ld r1, HSTATE_HOST_R1(r13)
262 ld r5, HSTATE_KVM_VCORE(r13)
264 stb r0, HSTATE_NAPPING(r13)
266 /* check the wake reason */
267 bl kvmppc_check_wake_reason
270 * Restore volatile registers since we could have called
271 * a C routine in kvmppc_check_wake_reason.
274 ld r5, HSTATE_KVM_VCORE(r13)
276 /* see if any other thread is already exiting */
277 lwz r0, VCORE_ENTRY_EXIT(r5)
281 /* clear our bit in napping_threads */
282 lbz r7, HSTATE_PTID(r13)
285 addi r6, r5, VCORE_NAPPING_THREADS
291 /* See if the wake reason means we need to exit */
295 /* See if our timeslice has expired (HDEC is negative) */
298 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
302 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
303 ld r4, HSTATE_KVM_VCPU(r13)
305 beq kvmppc_primary_no_guest
307 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
308 addi r3, r4, VCPU_TB_RMENTRY
309 bl kvmhv_start_timing
314 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
315 ld r4, HSTATE_KVM_VCPU(r13)
318 addi r3, r4, VCPU_TB_RMEXIT
319 bl kvmhv_accumulate_time
322 stw r12, STACK_SLOT_TRAP(r1)
323 bl kvmhv_commence_exit
325 b kvmhv_switch_to_host
328 * We come in here when wakened from nap mode.
329 * Relocation is off and most register values are lost.
330 * r13 points to the PACA.
331 * r3 contains the SRR1 wakeup value, SRR1 is trashed.
333 .globl kvm_start_guest
335 /* Set runlatch bit the minute you wake up from nap */
341 * Could avoid this and pass it through in r3. For now,
342 * code expects it to be in SRR1.
349 stb r0,PACA_FTRACE_ENABLED(r13)
351 li r0,KVM_HWTHREAD_IN_KVM
352 stb r0,HSTATE_HWTHREAD_STATE(r13)
354 /* NV GPR values from power7_idle() will no longer be valid */
356 stb r0,PACA_NAPSTATELOST(r13)
358 /* were we napping due to cede? */
359 lbz r0,HSTATE_NAPPING(r13)
360 cmpwi r0,NAPPING_CEDE
362 cmpwi r0,NAPPING_NOVCPU
363 beq kvm_novcpu_wakeup
365 ld r1,PACAEMERGSP(r13)
366 subi r1,r1,STACK_FRAME_OVERHEAD
369 * We weren't napping due to cede, so this must be a secondary
370 * thread being woken up to run a guest, or being woken up due
371 * to a stray IPI. (Or due to some machine check or hypervisor
372 * maintenance interrupt while the core is in KVM.)
375 /* Check the wake reason in SRR1 to see why we got here */
376 bl kvmppc_check_wake_reason
378 * kvmppc_check_wake_reason could invoke a C routine, but we
379 * have no volatile registers to restore when we return.
385 /* get vcore pointer, NULL if we have nothing to run */
386 ld r5,HSTATE_KVM_VCORE(r13)
388 /* if we have no vcore to run, go back to sleep */
391 kvm_secondary_got_guest:
393 /* Set HSTATE_DSCR(r13) to something sensible */
394 ld r6, PACA_DSCR_DEFAULT(r13)
395 std r6, HSTATE_DSCR(r13)
397 /* On thread 0 of a subcore, set HDEC to max */
398 lbz r4, HSTATE_PTID(r13)
401 LOAD_REG_ADDR(r6, decrementer_max)
404 /* and set per-LPAR registers, if doing dynamic micro-threading */
405 ld r6, HSTATE_SPLIT_MODE(r13)
409 ld r0, KVM_SPLIT_RPR(r6)
411 ld r0, KVM_SPLIT_PMMAR(r6)
413 ld r0, KVM_SPLIT_LDBAR(r6)
417 /* On P9 we use the split_info for coordinating LPCR changes */
418 lwz r4, KVM_SPLIT_DO_SET(r6)
425 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
427 /* Order load of vcpu after load of vcore */
429 ld r4, HSTATE_KVM_VCPU(r13)
432 /* Back from the guest, go back to nap */
433 /* Clear our vcpu and vcore pointers so we don't come back in early */
435 std r0, HSTATE_KVM_VCPU(r13)
437 * Once we clear HSTATE_KVM_VCORE(r13), the code in
438 * kvmppc_run_core() is going to assume that all our vcpu
439 * state is visible in memory. This lwsync makes sure
443 std r0, HSTATE_KVM_VCORE(r13)
446 * All secondaries exiting guest will fall through this path.
447 * Before proceeding, just check for HMI interrupt and
448 * invoke opal hmi handler. By now we are sure that the
449 * primary thread on this core/subcore has already made partition
450 * switch/TB resync and we are good to call opal hmi handler.
452 cmpwi r12, BOOK3S_INTERRUPT_HMI
455 li r3,0 /* NULL argument */
456 bl hmi_exception_realmode
458 * At this point we have finished executing in the guest.
459 * We need to wait for hwthread_req to become zero, since
460 * we may not turn on the MMU while hwthread_req is non-zero.
461 * While waiting we also need to check if we get given a vcpu to run.
464 lbz r3, HSTATE_HWTHREAD_REQ(r13)
468 li r0, KVM_HWTHREAD_IN_KERNEL
469 stb r0, HSTATE_HWTHREAD_STATE(r13)
470 /* need to recheck hwthread_req after a barrier, to avoid race */
472 lbz r3, HSTATE_HWTHREAD_REQ(r13)
476 * We jump to pnv_wakeup_loss, which will return to the caller
477 * of power7_nap in the powernv cpu offline loop. The value we
478 * put in r3 becomes the return value for power7_nap. pnv_wakeup_loss
479 * requires SRR1 in r12.
483 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
490 ld r5, HSTATE_KVM_VCORE(r13)
493 ld r3, HSTATE_SPLIT_MODE(r13)
496 lwz r0, KVM_SPLIT_DO_SET(r3)
499 lwz r0, KVM_SPLIT_DO_RESTORE(r3)
502 lbz r0, KVM_SPLIT_DO_NAP(r3)
508 b kvm_secondary_got_guest
510 54: li r0, KVM_HWTHREAD_IN_KVM
511 stb r0, HSTATE_HWTHREAD_STATE(r13)
515 /* Set LPCR, LPIDR etc. on P9 */
523 bl kvmhv_p9_restore_lpcr
528 * Here the primary thread is trying to return the core to
529 * whole-core mode, so we need to nap.
533 * When secondaries are napping in kvm_unsplit_nap() with
534 * hwthread_req = 1, HMI goes ignored even though subcores are
535 * already exited the guest. Hence HMI keeps waking up secondaries
536 * from nap in a loop and secondaries always go back to nap since
537 * no vcore is assigned to them. This makes impossible for primary
538 * thread to get hold of secondary threads resulting into a soft
539 * lockup in KVM path.
541 * Let us check if HMI is pending and handle it before we go to nap.
543 cmpwi r12, BOOK3S_INTERRUPT_HMI
545 li r3, 0 /* NULL argument */
546 bl hmi_exception_realmode
549 * Ensure that secondary doesn't nap when it has
550 * its vcore pointer set.
552 sync /* matches smp_mb() before setting split_info.do_nap */
553 ld r0, HSTATE_KVM_VCORE(r13)
556 /* clear any pending message */
558 lis r6, (PPC_DBELL_SERVER << (63-36))@h
560 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
561 /* Set kvm_split_mode.napped[tid] = 1 */
562 ld r3, HSTATE_SPLIT_MODE(r13)
564 lbz r4, HSTATE_TID(r13)
565 addi r4, r4, KVM_SPLIT_NAPPED
567 /* Check the do_nap flag again after setting napped[] */
569 lbz r0, KVM_SPLIT_DO_NAP(r3)
572 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
574 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
581 /******************************************************************************
585 *****************************************************************************/
587 .global kvmppc_hv_entry
592 * R4 = vcpu pointer (or NULL)
597 * all other volatile GPRS = free
598 * Does not preserve non-volatile GPRs or CR fields
601 std r0, PPC_LR_STKOFF(r1)
604 /* Save R1 in the PACA */
605 std r1, HSTATE_HOST_R1(r13)
607 li r6, KVM_GUEST_MODE_HOST_HV
608 stb r6, HSTATE_IN_GUEST(r13)
610 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
611 /* Store initial timestamp */
614 addi r3, r4, VCPU_TB_RMENTRY
615 bl kvmhv_start_timing
619 /* Use cr7 as an indication of radix mode */
620 ld r5, HSTATE_KVM_VCORE(r13)
621 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
622 lbz r0, KVM_RADIX(r9)
626 * POWER7/POWER8 host -> guest partition switch code.
627 * We don't have to lock against concurrent tlbies,
628 * but we do have to coordinate across hardware threads.
630 /* Set bit in entry map iff exit map is zero. */
632 lbz r6, HSTATE_PTID(r13)
634 addi r8, r5, VCORE_ENTRY_EXIT
636 cmpwi r3, 0x100 /* any threads starting to exit? */
637 bge secondary_too_late /* if so we're too late to the party */
642 /* Primary thread switches to guest partition. */
646 /* Radix has already switched LPID and flushed core TLB */
652 li r0,LPID_RSVD /* switch to reserved LPID */
655 mtspr SPRN_SDR1,r6 /* switch to partition page table */
656 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
660 /* See if we need to flush the TLB. Hash has to be done in RM */
661 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
664 * On POWER9, individual threads can come in here, but the
665 * TLB is shared between the 4 threads in a core, hence
666 * invalidating on one thread invalidates for all.
667 * Thus we make all 4 threads use the same bit here.
670 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
671 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
672 srdi r6,r6,6 /* doubleword number */
673 sldi r6,r6,3 /* address offset */
675 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
681 /* Flush the TLB of any entries for this LPID */
682 lwz r0,KVM_TLB_SETS(r9)
684 li r7,0x800 /* IS field = 0b10 */
686 li r0,0 /* RS for P9 version of tlbiel */
687 28: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */
691 23: ldarx r7,0,r6 /* clear the bit after TLB flushed */
696 /* Add timebase offset onto timebase */
697 22: ld r8,VCORE_TB_OFFSET(r5)
700 std r8, VCORE_TB_OFFSET_APPL(r5)
701 mftb r6 /* current host timebase */
703 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
704 mftb r7 /* check if lower 24 bits overflowed */
709 addis r8,r8,0x100 /* if so, increment upper 40 bits */
712 /* Load guest PCR value to select appropriate compat mode */
713 37: ld r7, VCORE_PCR(r5)
720 /* DPDES and VTB are shared between threads */
721 ld r8, VCORE_DPDES(r5)
725 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
727 /* Mark the subcore state as inside guest */
728 bl kvmppc_subcore_enter_guest
730 ld r5, HSTATE_KVM_VCORE(r13)
731 ld r4, HSTATE_KVM_VCPU(r13)
733 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
735 /* Do we have a guest vcpu to run? */
737 beq kvmppc_primary_no_guest
739 /* Increment yield count if they have a VPA */
743 li r6, LPPACA_YIELDCOUNT
748 stb r6, VCPU_VPA_DIRTY(r4)
751 /* Save purr/spurr */
754 std r5,HSTATE_PURR(r13)
755 std r6,HSTATE_SPURR(r13)
761 /* Save host values of some registers */
766 std r5, STACK_SLOT_TID(r1)
767 std r6, STACK_SLOT_PSSCR(r1)
768 std r7, STACK_SLOT_PID(r1)
770 std r5, STACK_SLOT_HFSCR(r1)
771 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
777 std r5, STACK_SLOT_CIABR(r1)
778 std r6, STACK_SLOT_DAWR(r1)
779 std r7, STACK_SLOT_DAWRX(r1)
780 std r8, STACK_SLOT_IAMR(r1)
781 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
784 std r5, STACK_SLOT_AMR(r1)
786 std r6, STACK_SLOT_UAMOR(r1)
789 /* Set partition DABR */
790 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
791 lwz r5,VCPU_DABRX(r4)
796 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
798 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
800 * Branch around the call if both CPU_FTR_TM and
801 * CPU_FTR_P9_TM_HV_ASSIST are off.
805 END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
807 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
811 bl kvmppc_restore_tm_hv
812 ld r4, HSTATE_KVM_VCPU(r13)
816 /* Load guest PMU registers */
817 /* R4 is live here (vcpu pointer) */
819 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
820 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
824 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
827 END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
828 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
829 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
830 lwz r6, VCPU_PMC + 8(r4)
831 lwz r7, VCPU_PMC + 12(r4)
832 lwz r8, VCPU_PMC + 16(r4)
833 lwz r9, VCPU_PMC + 20(r4)
841 ld r5, VCPU_MMCR + 8(r4)
842 ld r6, VCPU_MMCR + 16(r4)
850 ld r5, VCPU_MMCR + 24(r4)
854 BEGIN_FTR_SECTION_NESTED(96)
855 lwz r7, VCPU_PMC + 24(r4)
856 lwz r8, VCPU_PMC + 28(r4)
857 ld r9, VCPU_MMCR + 32(r4)
861 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
862 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
866 /* Load up FP, VMX and VSX registers */
869 ld r14, VCPU_GPR(R14)(r4)
870 ld r15, VCPU_GPR(R15)(r4)
871 ld r16, VCPU_GPR(R16)(r4)
872 ld r17, VCPU_GPR(R17)(r4)
873 ld r18, VCPU_GPR(R18)(r4)
874 ld r19, VCPU_GPR(R19)(r4)
875 ld r20, VCPU_GPR(R20)(r4)
876 ld r21, VCPU_GPR(R21)(r4)
877 ld r22, VCPU_GPR(R22)(r4)
878 ld r23, VCPU_GPR(R23)(r4)
879 ld r24, VCPU_GPR(R24)(r4)
880 ld r25, VCPU_GPR(R25)(r4)
881 ld r26, VCPU_GPR(R26)(r4)
882 ld r27, VCPU_GPR(R27)(r4)
883 ld r28, VCPU_GPR(R28)(r4)
884 ld r29, VCPU_GPR(R29)(r4)
885 ld r30, VCPU_GPR(R30)(r4)
886 ld r31, VCPU_GPR(R31)(r4)
888 /* Switch DSCR to guest value */
893 /* Skip next section on POWER7 */
895 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
896 /* Load up POWER8-specific registers */
898 lwz r6, VCPU_PSPB(r4)
904 ld r6, VCPU_DAWRX(r4)
905 ld r7, VCPU_CIABR(r4)
908 * Handle broken DAWR case by not writing it. This means we
909 * can still store the DAWR register for migration.
914 END_FTR_SECTION_IFSET(CPU_FTR_DAWR)
918 ld r8, VCPU_EBBHR(r4)
921 ld r5, VCPU_EBBRR(r4)
922 ld r6, VCPU_BESCR(r4)
923 lwz r7, VCPU_GUEST_PID(r4)
930 /* POWER8-only registers */
931 ld r5, VCPU_TCSCR(r4)
933 ld r7, VCPU_CSIGR(r4)
941 /* POWER9-only registers */
943 ld r6, VCPU_PSSCR(r4)
944 lbz r8, HSTATE_FAKE_SUSPEND(r13)
945 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
946 rldimi r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG
947 ld r7, VCPU_HFSCR(r4)
951 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
954 ld r5, VCPU_SPRG0(r4)
955 ld r6, VCPU_SPRG1(r4)
956 ld r7, VCPU_SPRG2(r4)
957 ld r8, VCPU_SPRG3(r4)
963 /* Load up DAR and DSISR */
965 lwz r6, VCPU_DSISR(r4)
969 /* Restore AMR and UAMOR, set AMOR to all 1s */
977 /* Restore state of CTRL run bit; assume 1 on entry */
985 /* Secondary threads wait for primary to have done partition switch */
986 ld r5, HSTATE_KVM_VCORE(r13)
987 lbz r6, HSTATE_PTID(r13)
990 lbz r0, VCORE_IN_GUEST(r5)
994 20: lwz r3, VCORE_ENTRY_EXIT(r5)
997 lbz r0, VCORE_IN_GUEST(r5)
1003 ld r8,VCORE_LPCR(r5)
1008 * Set the decrementer to the guest decrementer.
1010 ld r8,VCPU_DEC_EXPIRES(r4)
1011 /* r8 is a host timebase value here, convert to guest TB */
1012 ld r5,HSTATE_KVM_VCORE(r13)
1013 ld r6,VCORE_TB_OFFSET_APPL(r5)
1019 /* Check if HDEC expires soon */
1022 cmpdi r3, 512 /* 1 microsecond */
1025 /* For hash guest, clear out and reload the SLB */
1027 lbz r0, KVM_RADIX(r6)
1035 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
1036 lwz r5,VCPU_SLB_MAX(r4)
1041 1: ld r8,VCPU_SLB_E(r6)
1042 ld r9,VCPU_SLB_V(r6)
1044 addi r6,r6,VCPU_SLB_SIZE
1048 #ifdef CONFIG_KVM_XICS
1049 /* We are entering the guest on that thread, push VCPU to XIVE */
1050 ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
1053 ld r11, VCPU_XIVE_SAVED_STATE(r4)
1057 lwz r11, VCPU_XIVE_CAM_WORD(r4)
1058 li r9, TM_QW1_OS + TM_WORD2
1061 stb r9, VCPU_XIVE_PUSHED(r4)
1065 * We clear the irq_pending flag. There is a small chance of a
1066 * race vs. the escalation interrupt happening on another
1067 * processor setting it again, but the only consequence is to
1068 * cause a spurrious wakeup on the next H_CEDE which is not an
1072 stb r0, VCPU_IRQ_PENDING(r4)
1075 * In single escalation mode, if the escalation interrupt is
1078 lbz r0, VCPU_XIVE_ESC_ON(r4)
1081 ld r10, VCPU_XIVE_ESC_RADDR(r4)
1082 li r9, XIVE_ESB_SET_PQ_01
1086 /* We have a possible subtle race here: The escalation interrupt might
1087 * have fired and be on its way to the host queue while we mask it,
1088 * and if we unmask it early enough (re-cede right away), there is
1089 * a theorical possibility that it fires again, thus landing in the
1090 * target queue more than once which is a big no-no.
1092 * Fortunately, solving this is rather easy. If the above load setting
1093 * PQ to 01 returns a previous value where P is set, then we know the
1094 * escalation interrupt is somewhere on its way to the host. In that
1095 * case we simply don't clear the xive_esc_on flag below. It will be
1096 * eventually cleared by the handler for the escalation interrupt.
1098 * Then, when doing a cede, we check that flag again before re-enabling
1099 * the escalation interrupt, and if set, we abort the cede.
1101 andi. r0, r0, XIVE_ESB_VAL_P
1104 /* Now P is 0, we can clear the flag */
1106 stb r0, VCPU_XIVE_ESC_ON(r4)
1109 #endif /* CONFIG_KVM_XICS */
1111 deliver_guest_interrupt:
1118 kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
1120 ld r11, VCPU_MSR(r4)
1121 ld r6, VCPU_SRR0(r4)
1122 ld r7, VCPU_SRR1(r4)
1126 /* r11 = vcpu->arch.msr & ~MSR_HV */
1127 rldicl r11, r11, 63 - MSR_HV_LG, 1
1128 rotldi r11, r11, 1 + MSR_HV_LG
1129 ori r11, r11, MSR_ME
1131 /* Check if we can deliver an external or decrementer interrupt now */
1132 ld r0, VCPU_PENDING_EXC(r4)
1133 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
1135 andi. r8, r11, MSR_EE
1137 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
1138 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
1142 li r0, BOOK3S_INTERRUPT_EXTERNAL
1146 /* On POWER9 check whether the guest has large decrementer enabled */
1147 andis. r8, r8, LPCR_LD@h
1149 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1152 li r0, BOOK3S_INTERRUPT_DECREMENTER
1155 12: mtspr SPRN_SRR0, r10
1157 mtspr SPRN_SRR1, r11
1159 bl kvmppc_msr_interrupt
1163 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1164 /* On POWER9, check for pending doorbell requests */
1165 lbz r0, VCPU_DBELL_REQ(r4)
1167 beq fast_guest_return
1168 ld r5, HSTATE_KVM_VCORE(r13)
1169 /* Set DPDES register so the CPU will take a doorbell interrupt */
1171 mtspr SPRN_DPDES, r0
1172 std r0, VCORE_DPDES(r5)
1173 /* Make sure other cpus see vcore->dpdes set before dbell req clear */
1175 /* Clear the pending doorbell request */
1177 stb r0, VCPU_DBELL_REQ(r4)
1182 * R10: value for HSRR0
1183 * R11: value for HSRR1
1188 stb r0,VCPU_CEDED(r4) /* cancel cede */
1189 mtspr SPRN_HSRR0,r10
1190 mtspr SPRN_HSRR1,r11
1192 /* Activate guest mode, so faults get handled by KVM */
1193 li r9, KVM_GUEST_MODE_GUEST_HV
1194 stb r9, HSTATE_IN_GUEST(r13)
1196 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1197 /* Accumulate timing */
1198 addi r3, r4, VCPU_TB_GUEST
1199 bl kvmhv_accumulate_time
1205 ld r5, VCPU_CFAR(r4)
1207 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1210 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1217 ld r1, VCPU_GPR(R1)(r4)
1218 ld r2, VCPU_GPR(R2)(r4)
1219 ld r3, VCPU_GPR(R3)(r4)
1220 ld r5, VCPU_GPR(R5)(r4)
1221 ld r6, VCPU_GPR(R6)(r4)
1222 ld r7, VCPU_GPR(R7)(r4)
1223 ld r8, VCPU_GPR(R8)(r4)
1224 ld r9, VCPU_GPR(R9)(r4)
1225 ld r10, VCPU_GPR(R10)(r4)
1226 ld r11, VCPU_GPR(R11)(r4)
1227 ld r12, VCPU_GPR(R12)(r4)
1228 ld r13, VCPU_GPR(R13)(r4)
1232 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1234 /* Move canary into DSISR to check for later */
1237 mtspr SPRN_HDSISR, r0
1238 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1240 ld r0, VCPU_GPR(R0)(r4)
1241 ld r4, VCPU_GPR(R4)(r4)
1247 stw r12, STACK_SLOT_TRAP(r1)
1250 stw r12, VCPU_TRAP(r4)
1251 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1252 addi r3, r4, VCPU_TB_RMEXIT
1253 bl kvmhv_accumulate_time
1255 11: b kvmhv_switch_to_host
1262 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
1263 12: stw r12, VCPU_TRAP(r4)
1265 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1266 addi r3, r4, VCPU_TB_RMEXIT
1267 bl kvmhv_accumulate_time
1271 /******************************************************************************
1275 *****************************************************************************/
1278 * We come here from the first-level interrupt handlers.
1280 .globl kvmppc_interrupt_hv
1281 kvmppc_interrupt_hv:
1283 * Register contents:
1284 * R12 = (guest CR << 32) | interrupt vector
1286 * guest R12 saved in shadow VCPU SCRATCH0
1287 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE
1288 * guest R13 saved in SPRN_SCRATCH0
1290 std r9, HSTATE_SCRATCH2(r13)
1291 lbz r9, HSTATE_IN_GUEST(r13)
1292 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1293 beq kvmppc_bad_host_intr
1294 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1295 cmpwi r9, KVM_GUEST_MODE_GUEST
1296 ld r9, HSTATE_SCRATCH2(r13)
1297 beq kvmppc_interrupt_pr
1299 /* We're now back in the host but in guest MMU context */
1300 li r9, KVM_GUEST_MODE_HOST_HV
1301 stb r9, HSTATE_IN_GUEST(r13)
1303 ld r9, HSTATE_KVM_VCPU(r13)
1305 /* Save registers */
1307 std r0, VCPU_GPR(R0)(r9)
1308 std r1, VCPU_GPR(R1)(r9)
1309 std r2, VCPU_GPR(R2)(r9)
1310 std r3, VCPU_GPR(R3)(r9)
1311 std r4, VCPU_GPR(R4)(r9)
1312 std r5, VCPU_GPR(R5)(r9)
1313 std r6, VCPU_GPR(R6)(r9)
1314 std r7, VCPU_GPR(R7)(r9)
1315 std r8, VCPU_GPR(R8)(r9)
1316 ld r0, HSTATE_SCRATCH2(r13)
1317 std r0, VCPU_GPR(R9)(r9)
1318 std r10, VCPU_GPR(R10)(r9)
1319 std r11, VCPU_GPR(R11)(r9)
1320 ld r3, HSTATE_SCRATCH0(r13)
1321 std r3, VCPU_GPR(R12)(r9)
1322 /* CR is in the high half of r12 */
1326 ld r3, HSTATE_CFAR(r13)
1327 std r3, VCPU_CFAR(r9)
1328 END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1330 ld r4, HSTATE_PPR(r13)
1331 std r4, VCPU_PPR(r9)
1332 END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1334 /* Restore R1/R2 so we can handle faults */
1335 ld r1, HSTATE_HOST_R1(r13)
1338 mfspr r10, SPRN_SRR0
1339 mfspr r11, SPRN_SRR1
1340 std r10, VCPU_SRR0(r9)
1341 std r11, VCPU_SRR1(r9)
1342 /* trap is in the low half of r12, clear CR from the high half */
1344 andi. r0, r12, 2 /* need to read HSRR0/1? */
1346 mfspr r10, SPRN_HSRR0
1347 mfspr r11, SPRN_HSRR1
1349 1: std r10, VCPU_PC(r9)
1350 std r11, VCPU_MSR(r9)
1354 std r3, VCPU_GPR(R13)(r9)
1357 stw r12,VCPU_TRAP(r9)
1360 * Now that we have saved away SRR0/1 and HSRR0/1,
1361 * interrupts are recoverable in principle, so set MSR_RI.
1362 * This becomes important for relocation-on interrupts from
1363 * the guest, which we can get in radix mode on POWER9.
1368 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1369 addi r3, r9, VCPU_TB_RMINTR
1371 bl kvmhv_accumulate_time
1372 ld r5, VCPU_GPR(R5)(r9)
1373 ld r6, VCPU_GPR(R6)(r9)
1374 ld r7, VCPU_GPR(R7)(r9)
1375 ld r8, VCPU_GPR(R8)(r9)
1378 /* Save HEIR (HV emulation assist reg) in emul_inst
1379 if this is an HEI (HV emulation interrupt, e40) */
1380 li r3,KVM_INST_FETCH_FAILED
1381 stw r3,VCPU_LAST_INST(r9)
1382 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1385 11: stw r3,VCPU_HEIR(r9)
1387 /* these are volatile across C function calls */
1388 #ifdef CONFIG_RELOCATABLE
1389 ld r3, HSTATE_SCRATCH1(r13)
1395 std r3, VCPU_CTR(r9)
1396 std r4, VCPU_XER(r9)
1398 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1399 /* For softpatch interrupt, go off and do TM instruction emulation */
1400 cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
1404 /* If this is a page table miss then see if it's theirs or ours */
1405 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1407 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1410 /* See if this is a leftover HDEC interrupt */
1411 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1417 bge fast_guest_return
1419 /* See if this is an hcall we can handle in real mode */
1420 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1421 beq hcall_try_real_mode
1423 /* Hypervisor doorbell - exit only if host IPI flag set */
1424 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1429 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1430 lbz r0, HSTATE_HOST_IPI(r13)
1435 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */
1436 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL
1438 mfspr r3, SPRN_HFSCR
1439 std r3, VCPU_HFSCR(r9)
1442 /* External interrupt ? */
1443 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1444 bne+ guest_exit_cont
1446 /* External interrupt, first check for host_ipi. If this is
1447 * set, we know the host wants us out so let's do it now
1452 * Restore the active volatile registers after returning from
1455 ld r9, HSTATE_KVM_VCPU(r13)
1456 li r12, BOOK3S_INTERRUPT_EXTERNAL
1459 * kvmppc_read_intr return codes:
1461 * Exit to host (r3 > 0)
1462 * 1 An interrupt is pending that needs to be handled by the host
1463 * Exit guest and return to host by branching to guest_exit_cont
1465 * 2 Passthrough that needs completion in the host
1466 * Exit guest and return to host by branching to guest_exit_cont
1467 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
1468 * to indicate to the host to complete handling the interrupt
1470 * Before returning to guest, we check if any CPU is heading out
1471 * to the host and if so, we head out also. If no CPUs are heading
1472 * check return values <= 0.
1474 * Return to guest (r3 <= 0)
1475 * 0 No external interrupt is pending
1476 * -1 A guest wakeup IPI (which has now been cleared)
1477 * In either case, we return to guest to deliver any pending
1480 * -2 A PCI passthrough external interrupt was handled
1481 * (interrupt was delivered directly to guest)
1482 * Return to guest to deliver any pending guest interrupts.
1488 /* Return code = 2 */
1489 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
1490 stw r12, VCPU_TRAP(r9)
1493 1: /* Return code <= 1 */
1497 /* Return code <= 0 */
1498 4: ld r5, HSTATE_KVM_VCORE(r13)
1499 lwz r0, VCORE_ENTRY_EXIT(r5)
1502 blt deliver_guest_interrupt
1504 guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
1505 /* Save more register state */
1508 std r6, VCPU_DAR(r9)
1509 stw r7, VCPU_DSISR(r9)
1510 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1511 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1513 std r6, VCPU_FAULT_DAR(r9)
1514 stw r7, VCPU_FAULT_DSISR(r9)
1516 /* See if it is a machine check */
1517 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1518 beq machine_check_realmode
1520 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1521 addi r3, r9, VCPU_TB_RMEXIT
1523 bl kvmhv_accumulate_time
1525 #ifdef CONFIG_KVM_XICS
1526 /* We are exiting, pull the VP from the XIVE */
1527 lbz r0, VCPU_XIVE_PUSHED(r9)
1530 li r7, TM_SPC_PULL_OS_CTX
1533 andi. r0, r0, MSR_DR /* in real mode? */
1535 ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
1538 /* First load to pull the context, we ignore the value */
1541 /* Second load to recover the context state (Words 0 and 1) */
1544 2: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
1547 /* First load to pull the context, we ignore the value */
1550 /* Second load to recover the context state (Words 0 and 1) */
1552 3: std r11, VCPU_XIVE_SAVED_STATE(r9)
1553 /* Fixup some of the state for the next load */
1556 stb r10, VCPU_XIVE_PUSHED(r9)
1557 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
1558 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
1561 #endif /* CONFIG_KVM_XICS */
1563 /* Possibly flush the link stack here. */
1565 patch_site 1b patch__call_kvm_flush_link_stack
1567 /* For hash guest, read the guest SLB and save it away */
1569 lbz r0, KVM_RADIX(r5)
1572 bne 3f /* for radix, save 0 entries */
1573 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1578 andis. r0,r8,SLB_ESID_V@h
1580 add r8,r8,r6 /* put index in */
1582 std r8,VCPU_SLB_E(r7)
1583 std r3,VCPU_SLB_V(r7)
1584 addi r7,r7,VCPU_SLB_SIZE
1588 /* Finally clear out the SLB */
1593 3: stw r5,VCPU_SLB_MAX(r9)
1595 /* load host SLB entries */
1596 BEGIN_MMU_FTR_SECTION
1598 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1599 ld r8,PACA_SLBSHADOWPTR(r13)
1601 .rept SLB_NUM_BOLTED
1602 li r3, SLBSHADOW_SAVEAREA
1606 andis. r7,r5,SLB_ESID_V@h
1614 stw r12, STACK_SLOT_TRAP(r1)
1617 /* Do this before kvmhv_commence_exit so we know TB is guest TB */
1618 ld r3, HSTATE_KVM_VCORE(r13)
1621 /* On P9, if the guest has large decr enabled, don't sign extend */
1623 ld r4, VCORE_LPCR(r3)
1624 andis. r4, r4, LPCR_LD@h
1626 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1629 /* r5 is a guest timebase value here, convert to host TB */
1630 ld r4,VCORE_TB_OFFSET_APPL(r3)
1632 std r5,VCPU_DEC_EXPIRES(r9)
1634 /* Increment exit count, poke other threads to exit */
1636 bl kvmhv_commence_exit
1638 ld r9, HSTATE_KVM_VCPU(r13)
1640 /* Stop others sending VCPU interrupts to this physical CPU */
1642 stw r0, VCPU_CPU(r9)
1643 stw r0, VCPU_THREAD_CPU(r9)
1645 /* Save guest CTRL register, set runlatch to 1 */
1647 stw r6,VCPU_CTRL(r9)
1654 * Save the guest PURR/SPURR
1659 ld r8,VCPU_SPURR(r9)
1660 std r5,VCPU_PURR(r9)
1661 std r6,VCPU_SPURR(r9)
1666 * Restore host PURR/SPURR and add guest times
1667 * so that the time in the guest gets accounted.
1669 ld r3,HSTATE_PURR(r13)
1670 ld r4,HSTATE_SPURR(r13)
1678 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1679 /* Save POWER8-specific registers */
1683 std r5, VCPU_IAMR(r9)
1684 stw r6, VCPU_PSPB(r9)
1685 std r7, VCPU_FSCR(r9)
1689 std r7, VCPU_TAR(r9)
1690 mfspr r8, SPRN_EBBHR
1691 std r8, VCPU_EBBHR(r9)
1692 mfspr r5, SPRN_EBBRR
1693 mfspr r6, SPRN_BESCR
1696 std r5, VCPU_EBBRR(r9)
1697 std r6, VCPU_BESCR(r9)
1698 stw r7, VCPU_GUEST_PID(r9)
1699 std r8, VCPU_WORT(r9)
1701 mfspr r5, SPRN_TCSCR
1703 mfspr r7, SPRN_CSIGR
1705 std r5, VCPU_TCSCR(r9)
1706 std r6, VCPU_ACOP(r9)
1707 std r7, VCPU_CSIGR(r9)
1708 std r8, VCPU_TACR(r9)
1711 mfspr r6, SPRN_PSSCR
1712 std r5, VCPU_TID(r9)
1713 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
1715 std r6, VCPU_PSSCR(r9)
1716 /* Restore host HFSCR value */
1717 ld r7, STACK_SLOT_HFSCR(r1)
1718 mtspr SPRN_HFSCR, r7
1719 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
1721 * Restore various registers to 0, where non-zero values
1722 * set by the guest could disrupt the host.
1728 mtspr SPRN_TCSCR, r0
1729 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1732 mtspr SPRN_MMCRS, r0
1733 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1735 /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */
1736 ld r8, STACK_SLOT_IAMR(r1)
1739 8: /* Power7 jumps back in here */
1743 std r6,VCPU_UAMOR(r9)
1744 ld r5,STACK_SLOT_AMR(r1)
1745 ld r6,STACK_SLOT_UAMOR(r1)
1747 mtspr SPRN_UAMOR, r6
1749 /* Switch DSCR back to host value */
1751 ld r7, HSTATE_DSCR(r13)
1752 std r8, VCPU_DSCR(r9)
1755 /* Save non-volatile GPRs */
1756 std r14, VCPU_GPR(R14)(r9)
1757 std r15, VCPU_GPR(R15)(r9)
1758 std r16, VCPU_GPR(R16)(r9)
1759 std r17, VCPU_GPR(R17)(r9)
1760 std r18, VCPU_GPR(R18)(r9)
1761 std r19, VCPU_GPR(R19)(r9)
1762 std r20, VCPU_GPR(R20)(r9)
1763 std r21, VCPU_GPR(R21)(r9)
1764 std r22, VCPU_GPR(R22)(r9)
1765 std r23, VCPU_GPR(R23)(r9)
1766 std r24, VCPU_GPR(R24)(r9)
1767 std r25, VCPU_GPR(R25)(r9)
1768 std r26, VCPU_GPR(R26)(r9)
1769 std r27, VCPU_GPR(R27)(r9)
1770 std r28, VCPU_GPR(R28)(r9)
1771 std r29, VCPU_GPR(R29)(r9)
1772 std r30, VCPU_GPR(R30)(r9)
1773 std r31, VCPU_GPR(R31)(r9)
1776 mfspr r3, SPRN_SPRG0
1777 mfspr r4, SPRN_SPRG1
1778 mfspr r5, SPRN_SPRG2
1779 mfspr r6, SPRN_SPRG3
1780 std r3, VCPU_SPRG0(r9)
1781 std r4, VCPU_SPRG1(r9)
1782 std r5, VCPU_SPRG2(r9)
1783 std r6, VCPU_SPRG3(r9)
1789 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1791 * Branch around the call if both CPU_FTR_TM and
1792 * CPU_FTR_P9_TM_HV_ASSIST are off.
1796 END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
1798 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
1802 bl kvmppc_save_tm_hv
1803 ld r9, HSTATE_KVM_VCPU(r13)
1807 /* Increment yield count if they have a VPA */
1808 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1811 li r4, LPPACA_YIELDCOUNT
1816 stb r3, VCPU_VPA_DIRTY(r9)
1818 /* Save PMU registers if requested */
1819 /* r8 and cr0.eq are live here */
1822 * POWER8 seems to have a hardware bug where setting
1823 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1824 * when some counters are already negative doesn't seem
1825 * to cause a performance monitor alert (and hence interrupt).
1826 * The effect of this is that when saving the PMU state,
1827 * if there is no PMU alert pending when we read MMCR0
1828 * before freezing the counters, but one becomes pending
1829 * before we read the counters, we lose it.
1830 * To work around this, we need a way to freeze the counters
1831 * before reading MMCR0. Normally, freezing the counters
1832 * is done by writing MMCR0 (to set MMCR0[FC]) which
1833 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1834 * we can also freeze the counters using MMCR2, by writing
1835 * 1s to all the counter freeze condition bits (there are
1836 * 9 bits each for 6 counters).
1838 li r3, -1 /* set all freeze bits */
1840 mfspr r10, SPRN_MMCR2
1841 mtspr SPRN_MMCR2, r3
1843 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1845 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1846 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1847 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1848 mfspr r6, SPRN_MMCRA
1849 /* Clear MMCRA in order to disable SDAR updates */
1851 mtspr SPRN_MMCRA, r7
1853 beq 21f /* if no VPA, save PMU stuff anyway */
1854 lbz r7, LPPACA_PMCINUSE(r8)
1855 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1857 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1859 21: mfspr r5, SPRN_MMCR1
1862 std r4, VCPU_MMCR(r9)
1863 std r5, VCPU_MMCR + 8(r9)
1864 std r6, VCPU_MMCR + 16(r9)
1866 std r10, VCPU_MMCR + 24(r9)
1867 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1868 std r7, VCPU_SIAR(r9)
1869 std r8, VCPU_SDAR(r9)
1876 stw r3, VCPU_PMC(r9)
1877 stw r4, VCPU_PMC + 4(r9)
1878 stw r5, VCPU_PMC + 8(r9)
1879 stw r6, VCPU_PMC + 12(r9)
1880 stw r7, VCPU_PMC + 16(r9)
1881 stw r8, VCPU_PMC + 20(r9)
1884 std r5, VCPU_SIER(r9)
1885 BEGIN_FTR_SECTION_NESTED(96)
1886 mfspr r6, SPRN_SPMC1
1887 mfspr r7, SPRN_SPMC2
1888 mfspr r8, SPRN_MMCRS
1889 stw r6, VCPU_PMC + 24(r9)
1890 stw r7, VCPU_PMC + 28(r9)
1891 std r8, VCPU_MMCR + 32(r9)
1893 mtspr SPRN_MMCRS, r4
1894 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
1895 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1898 /* Restore host values of some registers */
1900 ld r5, STACK_SLOT_CIABR(r1)
1901 ld r6, STACK_SLOT_DAWR(r1)
1902 ld r7, STACK_SLOT_DAWRX(r1)
1903 mtspr SPRN_CIABR, r5
1905 * If the DAWR doesn't work, it's ok to write these here as
1906 * this value should always be zero
1909 mtspr SPRN_DAWRX, r7
1910 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1912 ld r5, STACK_SLOT_TID(r1)
1913 ld r6, STACK_SLOT_PSSCR(r1)
1914 ld r7, STACK_SLOT_PID(r1)
1916 mtspr SPRN_PSSCR, r6
1918 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1920 #ifdef CONFIG_PPC_RADIX_MMU
1922 * Are we running hash or radix ?
1925 lbz r0, KVM_RADIX(r5)
1930 * Radix: do eieio; tlbsync; ptesync sequence in case we
1931 * interrupted the guest between a tlbie and a ptesync.
1937 /* Radix: Handle the case where the guest used an illegal PID */
1938 LOAD_REG_ADDR(r4, mmu_base_pid)
1939 lwz r3, VCPU_GUEST_PID(r9)
1945 * Illegal PID, the HW might have prefetched and cached in the TLB
1946 * some translations for the LPID 0 / guest PID combination which
1947 * Linux doesn't know about, so we need to flush that PID out of
1948 * the TLB. First we need to set LPIDR to 0 so tlbiel applies to
1949 * the right context.
1955 /* Then do a congruence class local flush */
1957 lwz r0,KVM_TLB_SETS(r6)
1959 li r7,0x400 /* IS field = 0b01 */
1961 sldi r0,r3,32 /* RS has PID */
1962 1: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */
1968 #endif /* CONFIG_PPC_RADIX_MMU */
1971 * POWER7/POWER8 guest -> host partition switch code.
1972 * We don't have to lock against tlbies but we do
1973 * have to coordinate the hardware threads.
1974 * Here STACK_SLOT_TRAP(r1) contains the trap number.
1976 kvmhv_switch_to_host:
1977 /* Secondary threads wait for primary to do partition switch */
1978 ld r5,HSTATE_KVM_VCORE(r13)
1979 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1980 lbz r3,HSTATE_PTID(r13)
1984 13: lbz r3,VCORE_IN_GUEST(r5)
1990 /* Primary thread waits for all the secondaries to exit guest */
1991 15: lwz r3,VCORE_ENTRY_EXIT(r5)
1992 rlwinm r0,r3,32-8,0xff
1998 /* Did we actually switch to the guest at all? */
1999 lbz r6, VCORE_IN_GUEST(r5)
2003 /* Primary thread switches back to host partition */
2004 lwz r7,KVM_HOST_LPID(r4)
2006 ld r6,KVM_HOST_SDR1(r4)
2007 li r8,LPID_RSVD /* switch to reserved LPID */
2010 mtspr SPRN_SDR1,r6 /* switch to host page table */
2011 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
2016 /* DPDES and VTB are shared between threads */
2017 mfspr r7, SPRN_DPDES
2019 std r7, VCORE_DPDES(r5)
2020 std r8, VCORE_VTB(r5)
2021 /* clear DPDES so we don't get guest doorbells in the host */
2023 mtspr SPRN_DPDES, r8
2024 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2026 /* If HMI, call kvmppc_realmode_hmi_handler() */
2027 lwz r12, STACK_SLOT_TRAP(r1)
2028 cmpwi r12, BOOK3S_INTERRUPT_HMI
2030 bl kvmppc_realmode_hmi_handler
2034 * At this point kvmppc_realmode_hmi_handler may have resync-ed
2035 * the TB, and if it has, we must not subtract the guest timebase
2036 * offset from the timebase. So, skip it.
2038 * Also, do not call kvmppc_subcore_exit_guest() because it has
2039 * been invoked as part of kvmppc_realmode_hmi_handler().
2044 /* Subtract timebase offset from timebase */
2045 ld r8, VCORE_TB_OFFSET_APPL(r5)
2049 std r0, VCORE_TB_OFFSET_APPL(r5)
2050 mftb r6 /* current guest timebase */
2052 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
2053 mftb r7 /* check if lower 24 bits overflowed */
2058 addis r8,r8,0x100 /* if so, increment upper 40 bits */
2061 17: bl kvmppc_subcore_exit_guest
2063 30: ld r5,HSTATE_KVM_VCORE(r13)
2064 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
2067 ld r0, VCORE_PCR(r5)
2073 /* Signal secondary CPUs to continue */
2074 stb r0,VCORE_IN_GUEST(r5)
2075 19: lis r8,0x7fff /* MAX_INT@h */
2080 /* On POWER9 with HPT-on-radix we need to wait for all other threads */
2081 ld r3, HSTATE_SPLIT_MODE(r13)
2084 lwz r8, KVM_SPLIT_DO_RESTORE(r3)
2087 bl kvmhv_p9_restore_lpcr
2091 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2092 ld r8,KVM_HOST_LPCR(r4)
2096 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2097 /* Finish timing, if we have a vcpu */
2098 ld r4, HSTATE_KVM_VCPU(r13)
2102 bl kvmhv_accumulate_time
2105 /* Unset guest mode */
2106 li r0, KVM_GUEST_MODE_NONE
2107 stb r0, HSTATE_IN_GUEST(r13)
2109 lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */
2110 ld r0, SFS+PPC_LR_STKOFF(r1)
2116 .global kvm_flush_link_stack
2117 kvm_flush_link_stack:
2118 /* Save LR into r0 */
2121 /* Flush the link stack. On Power8 it's up to 32 entries in size. */
2126 /* And on Power9 it's up to 64. */
2131 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2138 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2140 * Softpatch interrupt for transactional memory emulation cases
2141 * on POWER9 DD2.2. This is early in the guest exit path - we
2142 * haven't saved registers or done a treclaim yet.
2145 /* Save instruction image in HEIR */
2147 stw r3, VCPU_HEIR(r9)
2150 * The cases we want to handle here are those where the guest
2151 * is in real suspend mode and is trying to transition to
2152 * transactional mode.
2154 lbz r0, HSTATE_FAKE_SUSPEND(r13)
2155 cmpwi r0, 0 /* keep exiting guest if in fake suspend */
2157 rldicl r3, r11, 64 - MSR_TS_S_LG, 62
2158 cmpwi r3, 1 /* or if not in suspend state */
2161 /* Call C code to do the emulation */
2163 bl kvmhv_p9_tm_emulation_early
2165 ld r9, HSTATE_KVM_VCPU(r13)
2166 li r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
2168 beq guest_exit_cont /* continue exiting if not handled */
2170 ld r11, VCPU_MSR(r9)
2171 b fast_interrupt_c_return /* go back to guest if handled */
2172 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2175 * Check whether an HDSI is an HPTE not found fault or something else.
2176 * If it is an HPTE not found fault that is due to the guest accessing
2177 * a page that they have mapped but which we have paged out, then
2178 * we continue on with the guest exit path. In all other cases,
2179 * reflect the HDSI to the guest as a DSI.
2183 lbz r0, KVM_RADIX(r3)
2185 mfspr r6, SPRN_HDSISR
2187 /* Look for DSISR canary. If we find it, retry instruction */
2190 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2192 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
2193 /* HPTE not found fault or protection fault? */
2194 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
2195 beq 1f /* if not, send it to the guest */
2196 andi. r0, r11, MSR_DR /* data relocation enabled? */
2199 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2201 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2203 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
2204 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
2205 bne 7f /* if no SLB entry found */
2206 4: std r4, VCPU_FAULT_DAR(r9)
2207 stw r6, VCPU_FAULT_DSISR(r9)
2209 /* Search the hash table. */
2210 mr r3, r9 /* vcpu pointer */
2211 li r7, 1 /* data fault */
2212 bl kvmppc_hpte_hv_fault
2213 ld r9, HSTATE_KVM_VCPU(r13)
2215 ld r11, VCPU_MSR(r9)
2216 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
2217 cmpdi r3, 0 /* retry the instruction */
2219 cmpdi r3, -1 /* handle in kernel mode */
2221 cmpdi r3, -2 /* MMIO emulation; need instr word */
2224 /* Synthesize a DSI (or DSegI) for the guest */
2225 ld r4, VCPU_FAULT_DAR(r9)
2227 1: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
2228 mtspr SPRN_DSISR, r6
2229 7: mtspr SPRN_DAR, r4
2230 mtspr SPRN_SRR0, r10
2231 mtspr SPRN_SRR1, r11
2233 bl kvmppc_msr_interrupt
2234 fast_interrupt_c_return:
2235 6: ld r7, VCPU_CTR(r9)
2242 3: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
2243 ld r5, KVM_VRMA_SLB_V(r5)
2246 /* If this is for emulated MMIO, load the instruction word */
2247 2: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
2249 /* Set guest mode to 'jump over instruction' so if lwz faults
2250 * we'll just continue at the next IP. */
2251 li r0, KVM_GUEST_MODE_SKIP
2252 stb r0, HSTATE_IN_GUEST(r13)
2254 /* Do the access with MSR:DR enabled */
2256 ori r4, r3, MSR_DR /* Enable paging for data */
2261 /* Store the result */
2262 stw r8, VCPU_LAST_INST(r9)
2264 /* Unset guest mode. */
2265 li r0, KVM_GUEST_MODE_HOST_HV
2266 stb r0, HSTATE_IN_GUEST(r13)
2270 std r4, VCPU_FAULT_DAR(r9)
2271 stw r6, VCPU_FAULT_DSISR(r9)
2274 std r5, VCPU_FAULT_GPA(r9)
2278 * Similarly for an HISI, reflect it to the guest as an ISI unless
2279 * it is an HPTE not found fault for a page that we have paged out.
2283 lbz r0, KVM_RADIX(r3)
2285 bne .Lradix_hisi /* for radix, just save ASDR */
2286 andis. r0, r11, SRR1_ISI_NOPT@h
2288 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
2291 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2293 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2295 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
2296 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
2297 bne 7f /* if no SLB entry found */
2299 /* Search the hash table. */
2300 mr r3, r9 /* vcpu pointer */
2303 li r7, 0 /* instruction fault */
2304 bl kvmppc_hpte_hv_fault
2305 ld r9, HSTATE_KVM_VCPU(r13)
2307 ld r11, VCPU_MSR(r9)
2308 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
2309 cmpdi r3, 0 /* retry the instruction */
2310 beq fast_interrupt_c_return
2311 cmpdi r3, -1 /* handle in kernel mode */
2314 /* Synthesize an ISI (or ISegI) for the guest */
2316 1: li r0, BOOK3S_INTERRUPT_INST_STORAGE
2317 7: mtspr SPRN_SRR0, r10
2318 mtspr SPRN_SRR1, r11
2320 bl kvmppc_msr_interrupt
2321 b fast_interrupt_c_return
2323 3: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
2324 ld r5, KVM_VRMA_SLB_V(r6)
2328 * Try to handle an hcall in real mode.
2329 * Returns to the guest if we handle it, or continues on up to
2330 * the kernel if we can't (i.e. if we don't have a handler for
2331 * it, or if the handler returns H_TOO_HARD).
2333 * r5 - r8 contain hcall args,
2334 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
2336 hcall_try_real_mode:
2337 ld r3,VCPU_GPR(R3)(r9)
2339 /* sc 1 from userspace - reflect to guest syscall */
2340 bne sc_1_fast_return
2342 cmpldi r3,hcall_real_table_end - hcall_real_table
2344 /* See if this hcall is enabled for in-kernel handling */
2346 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
2347 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
2349 ld r0, KVM_ENABLED_HCALLS(r4)
2350 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
2354 /* Get pointer to handler, if any, and call it */
2355 LOAD_REG_ADDR(r4, hcall_real_table)
2361 mr r3,r9 /* get vcpu pointer */
2362 ld r4,VCPU_GPR(R4)(r9)
2365 beq hcall_real_fallback
2366 ld r4,HSTATE_KVM_VCPU(r13)
2367 std r3,VCPU_GPR(R3)(r4)
2375 li r10, BOOK3S_INTERRUPT_SYSCALL
2376 bl kvmppc_msr_interrupt
2380 /* We've attempted a real mode hcall, but it's punted it back
2381 * to userspace. We need to restore some clobbered volatiles
2382 * before resuming the pass-it-to-qemu path */
2383 hcall_real_fallback:
2384 li r12,BOOK3S_INTERRUPT_SYSCALL
2385 ld r9, HSTATE_KVM_VCPU(r13)
2389 .globl hcall_real_table
2391 .long 0 /* 0 - unused */
2392 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
2393 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
2394 .long DOTSYM(kvmppc_h_read) - hcall_real_table
2395 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
2396 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
2397 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
2398 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
2399 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
2400 .long 0 /* 0x24 - H_SET_SPRG0 */
2401 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
2416 #ifdef CONFIG_KVM_XICS
2417 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
2418 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
2419 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
2420 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
2421 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
2423 .long 0 /* 0x64 - H_EOI */
2424 .long 0 /* 0x68 - H_CPPR */
2425 .long 0 /* 0x6c - H_IPI */
2426 .long 0 /* 0x70 - H_IPOLL */
2427 .long 0 /* 0x74 - H_XIRR */
2455 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
2456 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
2472 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
2476 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
2477 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
2478 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
2590 #ifdef CONFIG_KVM_XICS
2591 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
2593 .long 0 /* 0x2fc - H_XIRR_X*/
2595 .long DOTSYM(kvmppc_h_random) - hcall_real_table
2596 .globl hcall_real_table_end
2597 hcall_real_table_end:
2599 _GLOBAL(kvmppc_h_set_xdabr)
2600 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2602 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2605 6: li r3, H_PARAMETER
2608 _GLOBAL(kvmppc_h_set_dabr)
2609 li r5, DABRX_USER | DABRX_KERNEL
2613 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2614 std r4,VCPU_DABR(r3)
2615 stw r5, VCPU_DABRX(r3)
2616 mtspr SPRN_DABRX, r5
2617 /* Work around P7 bug where DABR can get corrupted on mtspr */
2618 1: mtspr SPRN_DABR,r4
2628 /* POWER9 with disabled DAWR */
2631 END_FTR_SECTION_IFCLR(CPU_FTR_DAWR)
2632 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
2633 rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
2634 rlwimi r5, r4, 2, DAWRX_WT
2636 std r4, VCPU_DAWR(r3)
2637 std r5, VCPU_DAWRX(r3)
2639 mtspr SPRN_DAWRX, r5
2643 _GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
2645 std r11,VCPU_MSR(r3)
2647 stb r0,VCPU_CEDED(r3)
2648 sync /* order setting ceded vs. testing prodded */
2649 lbz r5,VCPU_PRODDED(r3)
2651 bne kvm_cede_prodded
2652 li r12,0 /* set trap to 0 to say hcall is handled */
2653 stw r12,VCPU_TRAP(r3)
2655 std r0,VCPU_GPR(R3)(r3)
2658 * Set our bit in the bitmask of napping threads unless all the
2659 * other threads are already napping, in which case we send this
2662 ld r5,HSTATE_KVM_VCORE(r13)
2663 lbz r6,HSTATE_PTID(r13)
2664 lwz r8,VCORE_ENTRY_EXIT(r5)
2668 addi r6,r5,VCORE_NAPPING_THREADS
2675 /* order napping_threads update vs testing entry_exit_map */
2678 stb r0,HSTATE_NAPPING(r13)
2679 lwz r7,VCORE_ENTRY_EXIT(r5)
2681 bge 33f /* another thread already exiting */
2684 * Although not specifically required by the architecture, POWER7
2685 * preserves the following registers in nap mode, even if an SMT mode
2686 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2687 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2689 /* Save non-volatile GPRs */
2690 std r14, VCPU_GPR(R14)(r3)
2691 std r15, VCPU_GPR(R15)(r3)
2692 std r16, VCPU_GPR(R16)(r3)
2693 std r17, VCPU_GPR(R17)(r3)
2694 std r18, VCPU_GPR(R18)(r3)
2695 std r19, VCPU_GPR(R19)(r3)
2696 std r20, VCPU_GPR(R20)(r3)
2697 std r21, VCPU_GPR(R21)(r3)
2698 std r22, VCPU_GPR(R22)(r3)
2699 std r23, VCPU_GPR(R23)(r3)
2700 std r24, VCPU_GPR(R24)(r3)
2701 std r25, VCPU_GPR(R25)(r3)
2702 std r26, VCPU_GPR(R26)(r3)
2703 std r27, VCPU_GPR(R27)(r3)
2704 std r28, VCPU_GPR(R28)(r3)
2705 std r29, VCPU_GPR(R29)(r3)
2706 std r30, VCPU_GPR(R30)(r3)
2707 std r31, VCPU_GPR(R31)(r3)
2712 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2714 * Branch around the call if both CPU_FTR_TM and
2715 * CPU_FTR_P9_TM_HV_ASSIST are off.
2719 END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
2721 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
2723 ld r3, HSTATE_KVM_VCPU(r13)
2725 bl kvmppc_save_tm_hv
2730 * Set DEC to the smaller of DEC and HDEC, so that we wake
2731 * no later than the end of our timeslice (HDEC interrupts
2732 * don't wake us from nap).
2738 /* On P9 check whether the guest has large decrementer mode enabled */
2739 ld r6, HSTATE_KVM_VCORE(r13)
2740 ld r6, VCORE_LPCR(r6)
2741 andis. r6, r6, LPCR_LD@h
2743 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2750 /* save expiry time of guest decrementer */
2752 ld r4, HSTATE_KVM_VCPU(r13)
2753 ld r5, HSTATE_KVM_VCORE(r13)
2754 ld r6, VCORE_TB_OFFSET_APPL(r5)
2755 subf r3, r6, r3 /* convert to host TB value */
2756 std r3, VCPU_DEC_EXPIRES(r4)
2758 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2759 ld r4, HSTATE_KVM_VCPU(r13)
2760 addi r3, r4, VCPU_TB_CEDE
2761 bl kvmhv_accumulate_time
2764 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2767 * Take a nap until a decrementer or external or doobell interrupt
2768 * occurs, with PECE1 and PECE0 set in LPCR.
2769 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
2770 * Also clear the runlatch bit before napping.
2773 mfspr r0, SPRN_CTRLF
2775 mtspr SPRN_CTRLT, r0
2778 stb r0,HSTATE_HWTHREAD_REQ(r13)
2780 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
2782 ori r5, r5, LPCR_PECEDH
2783 rlwimi r5, r3, 0, LPCR_PECEDP
2784 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2786 kvm_nap_sequence: /* desired LPCR value in r5 */
2789 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
2790 * enable state loss = 1 (allow SMT mode switch)
2791 * requested level = 0 (just stop dispatching)
2793 lis r3, (PSSCR_EC | PSSCR_ESL)@h
2794 mtspr SPRN_PSSCR, r3
2795 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
2796 li r4, LPCR_PECE_HVEE@higher
2799 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2803 std r0, HSTATE_SCRATCH0(r13)
2805 ld r0, HSTATE_SCRATCH0(r13)
2812 ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
2821 /* get vcpu pointer */
2822 ld r4, HSTATE_KVM_VCPU(r13)
2824 /* Woken by external or decrementer interrupt */
2825 ld r1, HSTATE_HOST_R1(r13)
2827 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2828 addi r3, r4, VCPU_TB_RMINTR
2829 bl kvmhv_accumulate_time
2832 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2834 * Branch around the call if both CPU_FTR_TM and
2835 * CPU_FTR_P9_TM_HV_ASSIST are off.
2839 END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
2841 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
2845 bl kvmppc_restore_tm_hv
2846 ld r4, HSTATE_KVM_VCPU(r13)
2850 /* load up FP state */
2853 /* Restore guest decrementer */
2854 ld r3, VCPU_DEC_EXPIRES(r4)
2855 ld r5, HSTATE_KVM_VCORE(r13)
2856 ld r6, VCORE_TB_OFFSET_APPL(r5)
2857 add r3, r3, r6 /* convert host TB to guest TB value */
2863 ld r14, VCPU_GPR(R14)(r4)
2864 ld r15, VCPU_GPR(R15)(r4)
2865 ld r16, VCPU_GPR(R16)(r4)
2866 ld r17, VCPU_GPR(R17)(r4)
2867 ld r18, VCPU_GPR(R18)(r4)
2868 ld r19, VCPU_GPR(R19)(r4)
2869 ld r20, VCPU_GPR(R20)(r4)
2870 ld r21, VCPU_GPR(R21)(r4)
2871 ld r22, VCPU_GPR(R22)(r4)
2872 ld r23, VCPU_GPR(R23)(r4)
2873 ld r24, VCPU_GPR(R24)(r4)
2874 ld r25, VCPU_GPR(R25)(r4)
2875 ld r26, VCPU_GPR(R26)(r4)
2876 ld r27, VCPU_GPR(R27)(r4)
2877 ld r28, VCPU_GPR(R28)(r4)
2878 ld r29, VCPU_GPR(R29)(r4)
2879 ld r30, VCPU_GPR(R30)(r4)
2880 ld r31, VCPU_GPR(R31)(r4)
2882 /* Check the wake reason in SRR1 to see why we got here */
2883 bl kvmppc_check_wake_reason
2886 * Restore volatile registers since we could have called a
2887 * C routine in kvmppc_check_wake_reason
2889 * r3 tells us whether we need to return to host or not
2890 * WARNING: it gets checked further down:
2891 * should not modify r3 until this check is done.
2893 ld r4, HSTATE_KVM_VCPU(r13)
2895 /* clear our bit in vcore->napping_threads */
2896 34: ld r5,HSTATE_KVM_VCORE(r13)
2897 lbz r7,HSTATE_PTID(r13)
2900 addi r6,r5,VCORE_NAPPING_THREADS
2906 stb r0,HSTATE_NAPPING(r13)
2908 /* See if the wake reason saved in r3 means we need to exit */
2909 stw r12, VCPU_TRAP(r4)
2914 /* see if any other thread is already exiting */
2915 lwz r0,VCORE_ENTRY_EXIT(r5)
2919 b kvmppc_cede_reentry /* if not go back to guest */
2921 /* cede when already previously prodded case */
2924 stb r0,VCPU_PRODDED(r3)
2925 sync /* order testing prodded vs. clearing ceded */
2926 stb r0,VCPU_CEDED(r3)
2930 /* we've ceded but we want to give control to the host */
2932 ld r9, HSTATE_KVM_VCPU(r13)
2933 #ifdef CONFIG_KVM_XICS
2934 /* are we using XIVE with single escalation? */
2935 ld r10, VCPU_XIVE_ESC_VADDR(r9)
2938 li r6, XIVE_ESB_SET_PQ_00
2940 * If we still have a pending escalation, abort the cede,
2941 * and we must set PQ to 10 rather than 00 so that we don't
2942 * potentially end up with two entries for the escalation
2943 * interrupt in the XIVE interrupt queue. In that case
2944 * we also don't want to set xive_esc_on to 1 here in
2945 * case we race with xive_esc_irq().
2947 lbz r5, VCPU_XIVE_ESC_ON(r9)
2951 stb r0, VCPU_CEDED(r9)
2952 li r6, XIVE_ESB_SET_PQ_10
2955 stb r0, VCPU_XIVE_ESC_ON(r9)
2956 /* make sure store to xive_esc_on is seen before xive_esc_irq runs */
2958 5: /* Enable XIVE escalation */
2960 andi. r0, r0, MSR_DR /* in real mode? */
2964 1: ld r10, VCPU_XIVE_ESC_RADDR(r9)
2967 #endif /* CONFIG_KVM_XICS */
2968 3: b guest_exit_cont
2970 /* Try to handle a machine check in real mode */
2971 machine_check_realmode:
2972 mr r3, r9 /* get vcpu pointer */
2973 bl kvmppc_realmode_machine_check
2975 ld r9, HSTATE_KVM_VCPU(r13)
2976 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2978 * For the guest that is FWNMI capable, deliver all the MCE errors
2979 * (handled/unhandled) by exiting the guest with KVM_EXIT_NMI exit
2980 * reason. This new approach injects machine check errors in guest
2981 * address space to guest with additional information in the form
2982 * of RTAS event, thus enabling guest kernel to suitably handle
2985 * For the guest that is not FWNMI capable (old QEMU) fallback
2986 * to old behaviour for backward compatibility:
2987 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest either
2988 * through machine check interrupt (set HSRR0 to 0x200).
2989 * For handled errors (no-fatal), just go back to guest execution
2990 * with current HSRR0.
2991 * if we receive machine check with MSR(RI=0) then deliver it to
2992 * guest as machine check causing guest to crash.
2994 ld r11, VCPU_MSR(r9)
2995 rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
2996 bne mc_cont /* if so, exit to host */
2997 /* Check if guest is capable of handling NMI exit */
2998 ld r10, VCPU_KVM(r9)
2999 lbz r10, KVM_FWNMI(r10)
3000 cmpdi r10, 1 /* FWNMI capable? */
3001 beq mc_cont /* if so, exit with KVM_EXIT_NMI. */
3003 /* if not, fall through for backward compatibility. */
3004 andi. r10, r11, MSR_RI /* check for unrecoverable exception */
3005 beq 1f /* Deliver a machine check to guest */
3007 cmpdi r3, 0 /* Did we handle MCE ? */
3008 bne 2f /* Continue guest execution. */
3009 /* If not, deliver a machine check. SRR0/1 are already set */
3010 1: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
3011 bl kvmppc_msr_interrupt
3012 2: b fast_interrupt_c_return
3015 * Check the reason we woke from nap, and take appropriate action.
3017 * 0 if nothing needs to be done
3018 * 1 if something happened that needs to be handled by the host
3019 * -1 if there was a guest wakeup (IPI or msgsnd)
3020 * -2 if we handled a PCI passthrough interrupt (returned by
3021 * kvmppc_read_intr only)
3023 * Also sets r12 to the interrupt vector for any interrupt that needs
3024 * to be handled now by the host (0x500 for external interrupt), or zero.
3025 * Modifies all volatile registers (since it may call a C function).
3026 * This routine calls kvmppc_read_intr, a C function, if an external
3027 * interrupt is pending.
3029 kvmppc_check_wake_reason:
3032 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
3034 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
3035 ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
3036 cmpwi r6, 8 /* was it an external interrupt? */
3037 beq 7f /* if so, see what it was */
3040 cmpwi r6, 6 /* was it the decrementer? */
3043 cmpwi r6, 5 /* privileged doorbell? */
3045 cmpwi r6, 3 /* hypervisor doorbell? */
3047 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
3048 cmpwi r6, 0xa /* Hypervisor maintenance ? */
3050 li r3, 1 /* anything else, return 1 */
3053 /* hypervisor doorbell */
3054 3: li r12, BOOK3S_INTERRUPT_H_DOORBELL
3057 * Clear the doorbell as we will invoke the handler
3058 * explicitly in the guest exit path.
3060 lis r6, (PPC_DBELL_SERVER << (63-36))@h
3062 /* see if it's a host IPI */
3067 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
3068 lbz r0, HSTATE_HOST_IPI(r13)
3071 /* if not, return -1 */
3075 /* Woken up due to Hypervisor maintenance interrupt */
3076 4: li r12, BOOK3S_INTERRUPT_HMI
3080 /* external interrupt - create a stack frame so we can call C */
3082 std r0, PPC_LR_STKOFF(r1)
3083 stdu r1, -PPC_MIN_STKFRM(r1)
3086 li r12, BOOK3S_INTERRUPT_EXTERNAL
3091 * Return code of 2 means PCI passthrough interrupt, but
3092 * we need to return back to host to complete handling the
3093 * interrupt. Trap reason is expected in r12 by guest
3096 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
3098 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
3099 addi r1, r1, PPC_MIN_STKFRM
3104 * Save away FP, VMX and VSX registers.
3106 * N.B. r30 and r31 are volatile across this function,
3107 * thus it is not callable from C.
3114 #ifdef CONFIG_ALTIVEC
3116 oris r8,r8,MSR_VEC@h
3117 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3121 oris r8,r8,MSR_VSX@h
3122 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
3125 addi r3,r3,VCPU_FPRS
3127 #ifdef CONFIG_ALTIVEC
3129 addi r3,r31,VCPU_VRS
3131 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3133 mfspr r6,SPRN_VRSAVE
3134 stw r6,VCPU_VRSAVE(r31)
3139 * Load up FP, VMX and VSX registers
3141 * N.B. r30 and r31 are volatile across this function,
3142 * thus it is not callable from C.
3149 #ifdef CONFIG_ALTIVEC
3151 oris r8,r8,MSR_VEC@h
3152 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3156 oris r8,r8,MSR_VSX@h
3157 END_FTR_SECTION_IFSET(CPU_FTR_VSX)
3160 addi r3,r4,VCPU_FPRS
3162 #ifdef CONFIG_ALTIVEC
3164 addi r3,r31,VCPU_VRS
3166 END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3168 lwz r7,VCPU_VRSAVE(r31)
3169 mtspr SPRN_VRSAVE,r7
3174 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
3176 * Save transactional state and TM-related registers.
3177 * Called with r3 pointing to the vcpu struct and r4 containing
3178 * the guest MSR value.
3179 * This can modify all checkpointed registers, but
3180 * restores r1 and r2 before exit.
3183 /* See if we need to handle fake suspend mode */
3186 END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
3188 lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */
3190 beq __kvmppc_save_tm
3192 /* The following code handles the fake_suspend = 1 case */
3194 std r0, PPC_LR_STKOFF(r1)
3195 stdu r1, -PPC_MIN_STKFRM(r1)
3200 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
3203 rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */
3206 bl pnv_power9_force_smt4_catch
3207 END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
3210 std r1, HSTATE_HOST_R1(r13)
3212 /* Clear the MSR RI since r1, r13 may be foobar. */
3216 /* We have to treclaim here because that's the only way to do S->N */
3217 li r3, TM_CAUSE_KVM_RESCHED
3221 * We were in fake suspend, so we are not going to save the
3222 * register state as the guest checkpointed state (since
3223 * we already have it), therefore we can now use any volatile GPR.
3225 /* Reload PACA pointer, stack pointer and TOC. */
3227 ld r1, HSTATE_HOST_R1(r13)
3230 /* Set MSR RI now we have r1 and r13 back. */
3235 ld r6, HSTATE_DSCR(r13)
3237 BEGIN_FTR_SECTION_NESTED(96)
3238 bl pnv_power9_force_smt4_release
3239 END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96)
3243 mfspr r3, SPRN_PSSCR
3244 /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */
3245 li r0, PSSCR_FAKE_SUSPEND
3247 mtspr SPRN_PSSCR, r3
3249 /* Don't save TEXASR, use value from last exit in real suspend state */
3250 ld r9, HSTATE_KVM_VCPU(r13)
3251 mfspr r5, SPRN_TFHAR
3252 mfspr r6, SPRN_TFIAR
3253 std r5, VCPU_TFHAR(r9)
3254 std r6, VCPU_TFIAR(r9)
3256 addi r1, r1, PPC_MIN_STKFRM
3257 ld r0, PPC_LR_STKOFF(r1)
3262 * Restore transactional state and TM-related registers.
3263 * Called with r3 pointing to the vcpu struct
3264 * and r4 containing the guest MSR value.
3265 * This potentially modifies all checkpointed registers.
3266 * It restores r1 and r2 from the PACA.
3268 kvmppc_restore_tm_hv:
3270 * If we are doing TM emulation for the guest on a POWER9 DD2,
3271 * then we don't actually do a trechkpt -- we either set up
3272 * fake-suspend mode, or emulate a TM rollback.
3275 b __kvmppc_restore_tm
3276 END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
3278 std r0, PPC_LR_STKOFF(r1)
3281 stb r0, HSTATE_FAKE_SUSPEND(r13)
3283 /* Turn on TM so we can restore TM SPRs */
3286 rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG
3290 * The user may change these outside of a transaction, so they must
3291 * always be context switched.
3293 ld r5, VCPU_TFHAR(r3)
3294 ld r6, VCPU_TFIAR(r3)
3295 ld r7, VCPU_TEXASR(r3)
3296 mtspr SPRN_TFHAR, r5
3297 mtspr SPRN_TFIAR, r6
3298 mtspr SPRN_TEXASR, r7
3300 rldicl. r5, r4, 64 - MSR_TS_S_LG, 62
3301 beqlr /* TM not active in guest */
3303 /* Make sure the failure summary is set */
3304 oris r7, r7, (TEXASR_FS)@h
3305 mtspr SPRN_TEXASR, r7
3307 cmpwi r5, 1 /* check for suspended state */
3309 stb r5, HSTATE_FAKE_SUSPEND(r13)
3310 b 9f /* and return */
3311 10: stdu r1, -PPC_MIN_STKFRM(r1)
3312 /* guest is in transactional state, so simulate rollback */
3313 bl kvmhv_emulate_tm_rollback
3315 addi r1, r1, PPC_MIN_STKFRM
3316 9: ld r0, PPC_LR_STKOFF(r1)
3319 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
3322 * We come here if we get any exception or interrupt while we are
3323 * executing host real mode code while in guest MMU context.
3324 * r12 is (CR << 32) | vector
3325 * r13 points to our PACA
3326 * r12 is saved in HSTATE_SCRATCH0(r13)
3327 * ctr is saved in HSTATE_SCRATCH1(r13) if RELOCATABLE
3328 * r9 is saved in HSTATE_SCRATCH2(r13)
3329 * r13 is saved in HSPRG1
3330 * cfar is saved in HSTATE_CFAR(r13)
3331 * ppr is saved in HSTATE_PPR(r13)
3333 kvmppc_bad_host_intr:
3335 * Switch to the emergency stack, but start half-way down in
3336 * case we were already on it.
3340 ld r1, PACAEMERGSP(r13)
3341 subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE
3354 mfspr r3, SPRN_HSRR0
3355 mfspr r4, SPRN_HSRR1
3357 mfspr r6, SPRN_HDSISR
3359 1: mfspr r3, SPRN_SRR0
3362 mfspr r6, SPRN_DSISR
3367 ld r9, HSTATE_SCRATCH2(r13)
3368 ld r12, HSTATE_SCRATCH0(r13)
3373 ld r5, HSTATE_CFAR(r13)
3374 std r5, ORIG_GPR3(r1)
3376 #ifdef CONFIG_RELOCATABLE
3377 ld r4, HSTATE_SCRATCH1(r13)
3382 lbz r6, PACAIRQSOFTMASK(r13)
3388 LOAD_REG_IMMEDIATE(3, 0x7265677368657265)
3389 std r3, STACK_FRAME_OVERHEAD-16(r1)
3392 * On POWER9 do a minimal restore of the MMU and call C code,
3393 * which will print a message and panic.
3394 * XXX On POWER7 and POWER8, we just spin here since we don't
3395 * know what the other threads are doing (and we don't want to
3396 * coordinate with them) - but at least we now have register state
3397 * in memory that we might be able to look at from another CPU.
3401 END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
3402 ld r9, HSTATE_KVM_VCPU(r13)
3403 ld r10, VCPU_KVM(r9)
3408 mtspr SPRN_CIABR, r0
3409 mtspr SPRN_DAWRX, r0
3411 BEGIN_MMU_FTR_SECTION
3413 END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
3418 ld r8, PACA_SLBSHADOWPTR(r13)
3419 .rept SLB_NUM_BOLTED
3420 li r3, SLBSHADOW_SAVEAREA
3424 andis. r7, r5, SLB_ESID_V@h
3430 4: lwz r7, KVM_HOST_LPID(r10)
3433 ld r8, KVM_HOST_LPCR(r10)
3436 li r0, KVM_GUEST_MODE_NONE
3437 stb r0, HSTATE_IN_GUEST(r13)
3440 * Turn on the MMU and jump to C code
3444 addi r3, r3, 9f - 5b
3446 rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */
3447 ld r4, PACAKMSR(r13)
3451 9: addi r3, r1, STACK_FRAME_OVERHEAD
3452 bl kvmppc_bad_interrupt
3456 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
3457 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
3458 * r11 has the guest MSR value (in/out)
3459 * r9 has a vcpu pointer (in)
3460 * r0 is used as a scratch register
3462 kvmppc_msr_interrupt:
3463 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
3464 cmpwi r0, 2 /* Check if we are in transactional state.. */
3465 ld r11, VCPU_INTR_MSR(r9)
3467 /* ... if transactional, change to suspended */
3469 1: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
3473 * This works around a hardware bug on POWER8E processors, where
3474 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
3475 * performance monitor interrupt. Instead, when we need to have
3476 * an interrupt pending, we have to arrange for a counter to overflow.
3480 mtspr SPRN_MMCR2, r3
3481 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
3482 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
3483 mtspr SPRN_MMCR0, r3
3490 #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
3492 * Start timing an activity
3493 * r3 = pointer to time accumulation struct, r4 = vcpu
3496 ld r5, HSTATE_KVM_VCORE(r13)
3497 ld r6, VCORE_TB_OFFSET_APPL(r5)
3499 subf r5, r6, r5 /* subtract current timebase offset */
3500 std r3, VCPU_CUR_ACTIVITY(r4)
3501 std r5, VCPU_ACTIVITY_START(r4)
3505 * Accumulate time to one activity and start another.
3506 * r3 = pointer to new time accumulation struct, r4 = vcpu
3508 kvmhv_accumulate_time:
3509 ld r5, HSTATE_KVM_VCORE(r13)
3510 ld r8, VCORE_TB_OFFSET_APPL(r5)
3511 ld r5, VCPU_CUR_ACTIVITY(r4)
3512 ld r6, VCPU_ACTIVITY_START(r4)
3513 std r3, VCPU_CUR_ACTIVITY(r4)
3515 subf r7, r8, r7 /* subtract current timebase offset */
3516 std r7, VCPU_ACTIVITY_START(r4)
3520 ld r8, TAS_SEQCOUNT(r5)
3523 std r8, TAS_SEQCOUNT(r5)
3525 ld r7, TAS_TOTAL(r5)
3527 std r7, TAS_TOTAL(r5)
3533 3: std r3, TAS_MIN(r5)
3539 std r8, TAS_SEQCOUNT(r5)