1 /* SPDX-License-Identifier: GPL-2.0-only */
4 * Copyright SUSE Linux Products GmbH 2009
6 * Authors: Alexander Graf <agraf@suse.de>
9 #include <asm/asm-compat.h>
10 #include <asm/feature-fixups.h>
12 #define SHADOW_SLB_ENTRY_LEN 0x10
13 #define OFFSET_ESID(x) (SHADOW_SLB_ENTRY_LEN * x)
14 #define OFFSET_VSID(x) ((SHADOW_SLB_ENTRY_LEN * x) + 8)
16 /******************************************************************************
20 *****************************************************************************/
22 .macro LOAD_GUEST_SEGMENTS
31 * all other volatile GPRS = free except R4, R6
32 * SVCPU[CR] = guest CR
33 * SVCPU[XER] = guest XER
34 * SVCPU[CTR] = guest CTR
35 * SVCPU[LR] = guest LR
40 /* Declare SLB shadow as 0 entries big */
42 ld r11, PACA_SLBSHADOWPTR(r13)
46 END_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR)
54 /* Fill SLB with our shadow */
56 lbz r12, SVCPU_SLB_MAX(r3)
58 addi r12, r12, SVCPU_SLB
61 /* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */
69 andis. r9, r10, SLB_ESID_V@h
70 beq slb_loop_enter_skip
84 /******************************************************************************
88 *****************************************************************************/
90 .macro LOAD_HOST_SEGMENTS
92 /* Register usage at this point:
96 * R12 = exit handler id
97 * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
99 * SVCPU[CR] = guest CR
100 * SVCPU[XER] = guest XER
101 * SVCPU[CTR] = guest CTR
102 * SVCPU[LR] = guest LR
106 /* Remove all SLB entries that are in use. */
112 /* Restore bolted entries from the shadow */
114 ld r11, PACA_SLBSHADOWPTR(r13)
118 /* Declare SLB shadow as SLB_NUM_BOLTED entries big */
120 li r8, SLB_NUM_BOLTED
123 END_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR)
125 /* Manually load all entries from shadow SLB */
127 li r8, SLBSHADOW_SAVEAREA
128 li r7, SLBSHADOW_SAVEAREA + 8
136 1: addi r7, r7, SHADOW_SLB_ENTRY_LEN
137 addi r8, r8, SHADOW_SLB_ENTRY_LEN