2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright SUSE Linux Products GmbH 2009
17 * Authors: Alexander Graf <agraf@suse.de>
20 #include <asm/asm-compat.h>
21 #include <asm/feature-fixups.h>
23 #define SHADOW_SLB_ENTRY_LEN 0x10
24 #define OFFSET_ESID(x) (SHADOW_SLB_ENTRY_LEN * x)
25 #define OFFSET_VSID(x) ((SHADOW_SLB_ENTRY_LEN * x) + 8)
27 /******************************************************************************
31 *****************************************************************************/
33 .macro LOAD_GUEST_SEGMENTS
42 * all other volatile GPRS = free except R4, R6
43 * SVCPU[CR] = guest CR
44 * SVCPU[XER] = guest XER
45 * SVCPU[CTR] = guest CTR
46 * SVCPU[LR] = guest LR
51 /* Declare SLB shadow as 0 entries big */
53 ld r11, PACA_SLBSHADOWPTR(r13)
57 END_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR)
65 /* Fill SLB with our shadow */
67 lbz r12, SVCPU_SLB_MAX(r3)
69 addi r12, r12, SVCPU_SLB
72 /* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */
80 andis. r9, r10, SLB_ESID_V@h
81 beq slb_loop_enter_skip
95 /******************************************************************************
99 *****************************************************************************/
101 .macro LOAD_HOST_SEGMENTS
103 /* Register usage at this point:
107 * R12 = exit handler id
108 * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
110 * SVCPU[CR] = guest CR
111 * SVCPU[XER] = guest XER
112 * SVCPU[CTR] = guest CTR
113 * SVCPU[LR] = guest LR
117 /* Remove all SLB entries that are in use. */
123 /* Restore bolted entries from the shadow */
125 ld r11, PACA_SLBSHADOWPTR(r13)
129 /* Declare SLB shadow as SLB_NUM_BOLTED entries big */
131 li r8, SLB_NUM_BOLTED
134 END_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR)
136 /* Manually load all entries from shadow SLB */
138 li r8, SLBSHADOW_SAVEAREA
139 li r7, SLBSHADOW_SAVEAREA + 8
147 1: addi r7, r7, SHADOW_SLB_ENTRY_LEN
148 addi r8, r8, SHADOW_SLB_ENTRY_LEN