1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
6 #include <linux/linkage.h>
8 #include <asm/asmmacro.h>
9 #include <asm/loongarch.h>
10 #include <asm/regdef.h>
11 #include <asm/stackframe.h>
13 #define HGPR_OFFSET(x) (PT_R0 + 8*x)
14 #define GGPR_OFFSET(x) (KVM_ARCH_GGPR + 8*x)
16 .macro kvm_save_host_gpr base
17 .irp n,1,2,3,22,23,24,25,26,27,28,29,30,31
18 st.d $r\n, \base, HGPR_OFFSET(\n)
22 .macro kvm_restore_host_gpr base
23 .irp n,1,2,3,22,23,24,25,26,27,28,29,30,31
24 ld.d $r\n, \base, HGPR_OFFSET(\n)
29 * Save and restore all GPRs except base register,
30 * and default value of base register is a2.
32 .macro kvm_save_guest_gprs base
33 .irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
34 st.d $r\n, \base, GGPR_OFFSET(\n)
38 .macro kvm_restore_guest_gprs base
39 .irp n,1,2,3,4,5,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
40 ld.d $r\n, \base, GGPR_OFFSET(\n)
45 * Prepare switch to guest, save host regs and restore guest regs.
46 * a2: kvm_vcpu_arch, don't touch it until 'ertn'
47 * t0, t1: temp register
49 .macro kvm_switch_to_guest
50 /* Set host ECFG.VS=0, all exceptions share one exception entry */
51 csrrd t0, LOONGARCH_CSR_ECFG
52 bstrins.w t0, zero, CSR_ECFG_VS_SHIFT_END, CSR_ECFG_VS_SHIFT
53 csrwr t0, LOONGARCH_CSR_ECFG
55 /* Load up the new EENTRY */
56 ld.d t0, a2, KVM_ARCH_GEENTRY
57 csrwr t0, LOONGARCH_CSR_EENTRY
60 ld.d t0, a2, KVM_ARCH_GPC
61 csrwr t0, LOONGARCH_CSR_ERA
64 csrrd t0, LOONGARCH_CSR_PGDL
65 st.d t0, a2, KVM_ARCH_HPGD
68 ld.d t1, a2, KVM_VCPU_KVM - KVM_VCPU_ARCH
73 csrwr t0, LOONGARCH_CSR_PGDL
76 csrrd t1, LOONGARCH_CSR_GSTAT
77 bstrpick.w t1, t1, CSR_GSTAT_GID_SHIFT_END, CSR_GSTAT_GID_SHIFT
78 csrrd t0, LOONGARCH_CSR_GTLBC
79 bstrins.w t0, t1, CSR_GTLBC_TGID_SHIFT_END, CSR_GTLBC_TGID_SHIFT
80 csrwr t0, LOONGARCH_CSR_GTLBC
83 * Enable intr in root mode with future ertn so that host interrupt
84 * can be responsed during VM runs
85 * Guest CRMD comes from separate GCSR_CRMD register
87 ori t0, zero, CSR_PRMD_PIE
88 csrxchg t0, t0, LOONGARCH_CSR_PRMD
90 /* Set PVM bit to setup ertn to guest context */
91 ori t0, zero, CSR_GSTAT_PVM
92 csrxchg t0, t0, LOONGARCH_CSR_GSTAT
95 kvm_restore_guest_gprs a2
96 /* Load KVM_ARCH register */
97 ld.d a2, a2, (KVM_ARCH_GGPR + 8 * REG_A2)
99 ertn /* Switch to guest: GSTAT.PGM = 1, ERRCTL.ISERR = 0, TLBRPRMD.ISTLBR = 0 */
103 * Exception entry for general exception from guest mode
105 * - kernel privilege in root mode
106 * - page mode keep unchanged from previous PRMD in root mode
107 * - Fixme: tlb exception cannot happen since registers relative with TLB
108 * - is still in guest mode, such as pgd table/vmid registers etc,
109 * - will fix with hw page walk enabled in future
110 * load kvm_vcpu from reserved CSR KVM_VCPU_KS, and save a2 to KVM_TEMP_KS
113 .cfi_sections .debug_frame
114 SYM_CODE_START(kvm_exc_entry)
115 csrwr a2, KVM_TEMP_KS
116 csrrd a2, KVM_VCPU_KS
117 addi.d a2, a2, KVM_VCPU_ARCH
119 /* After save GPRs, free to use any GPR */
120 kvm_save_guest_gprs a2
122 csrrd t0, KVM_TEMP_KS
123 st.d t0, a2, (KVM_ARCH_GGPR + 8 * REG_A2)
125 /* A2 is kvm_vcpu_arch, A1 is free to use */
126 csrrd s1, KVM_VCPU_KS
127 ld.d s0, s1, KVM_VCPU_RUN
129 csrrd t0, LOONGARCH_CSR_ESTAT
130 st.d t0, a2, KVM_ARCH_HESTAT
131 csrrd t0, LOONGARCH_CSR_ERA
132 st.d t0, a2, KVM_ARCH_GPC
133 csrrd t0, LOONGARCH_CSR_BADV
134 st.d t0, a2, KVM_ARCH_HBADV
135 csrrd t0, LOONGARCH_CSR_BADI
136 st.d t0, a2, KVM_ARCH_HBADI
138 /* Restore host ECFG.VS */
139 csrrd t0, LOONGARCH_CSR_ECFG
140 ld.d t1, a2, KVM_ARCH_HECFG
142 csrwr t0, LOONGARCH_CSR_ECFG
144 /* Restore host EENTRY */
145 ld.d t0, a2, KVM_ARCH_HEENTRY
146 csrwr t0, LOONGARCH_CSR_EENTRY
148 /* Restore host pgd table */
149 ld.d t0, a2, KVM_ARCH_HPGD
150 csrwr t0, LOONGARCH_CSR_PGDL
153 * Disable PGM bit to enter root mode by default with next ertn
155 ori t0, zero, CSR_GSTAT_PVM
156 csrxchg zero, t0, LOONGARCH_CSR_GSTAT
159 * Clear GTLBC.TGID field
160 * 0: for root tlb update in future tlb instr
161 * others: for guest tlb update like gpa to hpa in future tlb instr
163 csrrd t0, LOONGARCH_CSR_GTLBC
164 bstrins.w t0, zero, CSR_GTLBC_TGID_SHIFT_END, CSR_GTLBC_TGID_SHIFT
165 csrwr t0, LOONGARCH_CSR_GTLBC
166 ld.d tp, a2, KVM_ARCH_HTP
167 ld.d sp, a2, KVM_ARCH_HSP
168 /* restore per cpu register */
169 ld.d u0, a2, KVM_ARCH_HPERCPU
170 addi.d sp, sp, -PT_SIZE
172 /* Prepare handle exception */
175 ld.d t8, a2, KVM_ARCH_HANDLE_EXIT
179 addi.d a2, a2, KVM_VCPU_ARCH
181 /* Resume host when ret <= 0 */
186 * Save per cpu register again, maybe switched to another cpu
188 st.d u0, a2, KVM_ARCH_HPERCPU
190 /* Save kvm_vcpu to kscratch */
191 csrwr s1, KVM_VCPU_KS
195 ld.d a2, a2, KVM_ARCH_HSP
196 addi.d a2, a2, -PT_SIZE
197 kvm_restore_host_gpr a2
200 SYM_INNER_LABEL(kvm_exc_entry_end, SYM_L_LOCAL)
201 SYM_CODE_END(kvm_exc_entry)
204 * int kvm_enter_guest(struct kvm_run *run, struct kvm_vcpu *vcpu)
210 SYM_FUNC_START(kvm_enter_guest)
211 /* Allocate space in stack bottom */
212 addi.d a2, sp, -PT_SIZE
216 /* Save host CRMD, PRMD to stack */
217 csrrd a3, LOONGARCH_CSR_CRMD
219 csrrd a3, LOONGARCH_CSR_PRMD
222 addi.d a2, a1, KVM_VCPU_ARCH
223 st.d sp, a2, KVM_ARCH_HSP
224 st.d tp, a2, KVM_ARCH_HTP
225 /* Save per cpu register */
226 st.d u0, a2, KVM_ARCH_HPERCPU
228 /* Save kvm_vcpu to kscratch */
229 csrwr a1, KVM_VCPU_KS
231 SYM_INNER_LABEL(kvm_enter_guest_end, SYM_L_LOCAL)
232 SYM_FUNC_END(kvm_enter_guest)
234 SYM_FUNC_START(kvm_save_fpu)
236 fpu_save_double a0 t1
239 SYM_FUNC_END(kvm_save_fpu)
241 SYM_FUNC_START(kvm_restore_fpu)
242 fpu_restore_double a0 t1
243 fpu_restore_csr a0 t1 t2
244 fpu_restore_cc a0 t1 t2
246 SYM_FUNC_END(kvm_restore_fpu)
249 SYM_DATA(kvm_exception_size, .quad kvm_exc_entry_end - kvm_exc_entry)
250 SYM_DATA(kvm_enter_guest_size, .quad kvm_enter_guest_end - kvm_enter_guest)