1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corporation, 2018
4 * Authors Suraj Jitindar Singh <sjitindarsingh@gmail.com>
5 * Paul Mackerras <paulus@ozlabs.org>
7 * Description: KVM functions specific to running nested KVM-HV guests
8 * on Book3S processors (specifically POWER9 and later).
11 #include <linux/kernel.h>
12 #include <linux/kvm_host.h>
13 #include <linux/llist.h>
14 #include <linux/pgtable.h>
16 #include <asm/kvm_ppc.h>
17 #include <asm/kvm_book3s.h>
19 #include <asm/pgalloc.h>
20 #include <asm/pte-walk.h>
23 static struct patb_entry *pseries_partition_tb;
25 static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp);
26 static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free);
28 void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
30 struct kvmppc_vcore *vc = vcpu->arch.vcore;
32 hr->pcr = vc->pcr | PCR_MASK;
33 hr->dpdes = vc->dpdes;
34 hr->hfscr = vcpu->arch.hfscr;
35 hr->tb_offset = vc->tb_offset;
36 hr->dawr0 = vcpu->arch.dawr;
37 hr->dawrx0 = vcpu->arch.dawrx;
38 hr->ciabr = vcpu->arch.ciabr;
39 hr->purr = vcpu->arch.purr;
40 hr->spurr = vcpu->arch.spurr;
41 hr->ic = vcpu->arch.ic;
43 hr->srr0 = vcpu->arch.shregs.srr0;
44 hr->srr1 = vcpu->arch.shregs.srr1;
45 hr->sprg[0] = vcpu->arch.shregs.sprg0;
46 hr->sprg[1] = vcpu->arch.shregs.sprg1;
47 hr->sprg[2] = vcpu->arch.shregs.sprg2;
48 hr->sprg[3] = vcpu->arch.shregs.sprg3;
49 hr->pidr = vcpu->arch.pid;
50 hr->cfar = vcpu->arch.cfar;
51 hr->ppr = vcpu->arch.ppr;
54 /* Use noinline_for_stack due to https://bugs.llvm.org/show_bug.cgi?id=49610 */
55 static noinline_for_stack void byteswap_pt_regs(struct pt_regs *regs)
57 unsigned long *addr = (unsigned long *) regs;
59 for (; addr < ((unsigned long *) (regs + 1)); addr++)
60 *addr = swab64(*addr);
63 static void byteswap_hv_regs(struct hv_guest_state *hr)
65 hr->version = swab64(hr->version);
66 hr->lpid = swab32(hr->lpid);
67 hr->vcpu_token = swab32(hr->vcpu_token);
68 hr->lpcr = swab64(hr->lpcr);
69 hr->pcr = swab64(hr->pcr) | PCR_MASK;
70 hr->amor = swab64(hr->amor);
71 hr->dpdes = swab64(hr->dpdes);
72 hr->hfscr = swab64(hr->hfscr);
73 hr->tb_offset = swab64(hr->tb_offset);
74 hr->dawr0 = swab64(hr->dawr0);
75 hr->dawrx0 = swab64(hr->dawrx0);
76 hr->ciabr = swab64(hr->ciabr);
77 hr->hdec_expiry = swab64(hr->hdec_expiry);
78 hr->purr = swab64(hr->purr);
79 hr->spurr = swab64(hr->spurr);
80 hr->ic = swab64(hr->ic);
81 hr->vtb = swab64(hr->vtb);
82 hr->hdar = swab64(hr->hdar);
83 hr->hdsisr = swab64(hr->hdsisr);
84 hr->heir = swab64(hr->heir);
85 hr->asdr = swab64(hr->asdr);
86 hr->srr0 = swab64(hr->srr0);
87 hr->srr1 = swab64(hr->srr1);
88 hr->sprg[0] = swab64(hr->sprg[0]);
89 hr->sprg[1] = swab64(hr->sprg[1]);
90 hr->sprg[2] = swab64(hr->sprg[2]);
91 hr->sprg[3] = swab64(hr->sprg[3]);
92 hr->pidr = swab64(hr->pidr);
93 hr->cfar = swab64(hr->cfar);
94 hr->ppr = swab64(hr->ppr);
97 static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap,
98 struct hv_guest_state *hr)
100 struct kvmppc_vcore *vc = vcpu->arch.vcore;
102 hr->dpdes = vc->dpdes;
103 hr->hfscr = vcpu->arch.hfscr;
104 hr->purr = vcpu->arch.purr;
105 hr->spurr = vcpu->arch.spurr;
106 hr->ic = vcpu->arch.ic;
108 hr->srr0 = vcpu->arch.shregs.srr0;
109 hr->srr1 = vcpu->arch.shregs.srr1;
110 hr->sprg[0] = vcpu->arch.shregs.sprg0;
111 hr->sprg[1] = vcpu->arch.shregs.sprg1;
112 hr->sprg[2] = vcpu->arch.shregs.sprg2;
113 hr->sprg[3] = vcpu->arch.shregs.sprg3;
114 hr->pidr = vcpu->arch.pid;
115 hr->cfar = vcpu->arch.cfar;
116 hr->ppr = vcpu->arch.ppr;
118 case BOOK3S_INTERRUPT_H_DATA_STORAGE:
119 hr->hdar = vcpu->arch.fault_dar;
120 hr->hdsisr = vcpu->arch.fault_dsisr;
121 hr->asdr = vcpu->arch.fault_gpa;
123 case BOOK3S_INTERRUPT_H_INST_STORAGE:
124 hr->asdr = vcpu->arch.fault_gpa;
126 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
127 hr->heir = vcpu->arch.emul_inst;
132 static void sanitise_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
135 * Don't let L1 enable features for L2 which we've disabled for L1,
136 * but preserve the interrupt cause field.
138 hr->hfscr &= (HFSCR_INTR_CAUSE | vcpu->arch.hfscr);
140 /* Don't let data address watchpoint match in hypervisor state */
141 hr->dawrx0 &= ~DAWRX_HYP;
143 /* Don't let completed instruction address breakpt match in HV state */
144 if ((hr->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
145 hr->ciabr &= ~CIABR_PRIV;
148 static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
150 struct kvmppc_vcore *vc = vcpu->arch.vcore;
152 vc->pcr = hr->pcr | PCR_MASK;
153 vc->dpdes = hr->dpdes;
154 vcpu->arch.hfscr = hr->hfscr;
155 vcpu->arch.dawr = hr->dawr0;
156 vcpu->arch.dawrx = hr->dawrx0;
157 vcpu->arch.ciabr = hr->ciabr;
158 vcpu->arch.purr = hr->purr;
159 vcpu->arch.spurr = hr->spurr;
160 vcpu->arch.ic = hr->ic;
162 vcpu->arch.shregs.srr0 = hr->srr0;
163 vcpu->arch.shregs.srr1 = hr->srr1;
164 vcpu->arch.shregs.sprg0 = hr->sprg[0];
165 vcpu->arch.shregs.sprg1 = hr->sprg[1];
166 vcpu->arch.shregs.sprg2 = hr->sprg[2];
167 vcpu->arch.shregs.sprg3 = hr->sprg[3];
168 vcpu->arch.pid = hr->pidr;
169 vcpu->arch.cfar = hr->cfar;
170 vcpu->arch.ppr = hr->ppr;
173 void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
174 struct hv_guest_state *hr)
176 struct kvmppc_vcore *vc = vcpu->arch.vcore;
178 vc->dpdes = hr->dpdes;
179 vcpu->arch.hfscr = hr->hfscr;
180 vcpu->arch.purr = hr->purr;
181 vcpu->arch.spurr = hr->spurr;
182 vcpu->arch.ic = hr->ic;
184 vcpu->arch.fault_dar = hr->hdar;
185 vcpu->arch.fault_dsisr = hr->hdsisr;
186 vcpu->arch.fault_gpa = hr->asdr;
187 vcpu->arch.emul_inst = hr->heir;
188 vcpu->arch.shregs.srr0 = hr->srr0;
189 vcpu->arch.shregs.srr1 = hr->srr1;
190 vcpu->arch.shregs.sprg0 = hr->sprg[0];
191 vcpu->arch.shregs.sprg1 = hr->sprg[1];
192 vcpu->arch.shregs.sprg2 = hr->sprg[2];
193 vcpu->arch.shregs.sprg3 = hr->sprg[3];
194 vcpu->arch.pid = hr->pidr;
195 vcpu->arch.cfar = hr->cfar;
196 vcpu->arch.ppr = hr->ppr;
199 static void kvmhv_nested_mmio_needed(struct kvm_vcpu *vcpu, u64 regs_ptr)
201 /* No need to reflect the page fault to L1, we've handled it */
205 * Since the L2 gprs have already been written back into L1 memory when
206 * we complete the mmio, store the L1 memory location of the L2 gpr
207 * being loaded into by the mmio so that the loaded value can be
208 * written there in kvmppc_complete_mmio_load()
210 if (((vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) == KVM_MMIO_REG_GPR)
211 && (vcpu->mmio_is_write == 0)) {
212 vcpu->arch.nested_io_gpr = (gpa_t) regs_ptr +
213 offsetof(struct pt_regs,
214 gpr[vcpu->arch.io_gpr]);
215 vcpu->arch.io_gpr = KVM_MMIO_REG_NESTED_GPR;
219 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
222 struct kvm_nested_guest *l2;
223 struct pt_regs l2_regs, saved_l1_regs;
224 struct hv_guest_state l2_hv, saved_l1_hv;
225 struct kvmppc_vcore *vc = vcpu->arch.vcore;
226 u64 hv_ptr, regs_ptr;
228 s64 delta_purr, delta_spurr, delta_ic, delta_vtb;
232 if (vcpu->kvm->arch.l1_ptcr == 0)
233 return H_NOT_AVAILABLE;
235 if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
238 /* copy parameters in */
239 hv_ptr = kvmppc_get_gpr(vcpu, 4);
240 regs_ptr = kvmppc_get_gpr(vcpu, 5);
241 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
242 err = kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv,
243 sizeof(struct hv_guest_state)) ||
244 kvm_vcpu_read_guest(vcpu, regs_ptr, &l2_regs,
245 sizeof(struct pt_regs));
246 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
250 if (kvmppc_need_byteswap(vcpu))
251 byteswap_hv_regs(&l2_hv);
252 if (l2_hv.version != HV_GUEST_STATE_VERSION)
255 if (kvmppc_need_byteswap(vcpu))
256 byteswap_pt_regs(&l2_regs);
257 if (l2_hv.vcpu_token >= NR_CPUS)
261 * L1 must have set up a suspended state to enter the L2 in a
262 * transactional state, and only in that case. These have to be
263 * filtered out here to prevent causing a TM Bad Thing in the
264 * host HRFID. We could synthesize a TM Bad Thing back to the L1
265 * here but there doesn't seem like much point.
267 if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) {
268 if (!MSR_TM_ACTIVE(l2_regs.msr))
271 if (l2_regs.msr & MSR_TS_MASK)
273 if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_TS_MASK))
278 l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
281 if (!l2->l1_gr_to_hr) {
282 mutex_lock(&l2->tlb_lock);
283 kvmhv_update_ptbl_cache(l2);
284 mutex_unlock(&l2->tlb_lock);
287 /* save l1 values of things */
288 vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
289 saved_l1_regs = vcpu->arch.regs;
290 kvmhv_save_hv_regs(vcpu, &saved_l1_hv);
292 /* convert TB values/offsets to host (L0) values */
293 hdec_exp = l2_hv.hdec_expiry - vc->tb_offset;
294 vc->tb_offset += l2_hv.tb_offset;
296 /* set L1 state to L2 state */
297 vcpu->arch.nested = l2;
298 vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token;
299 vcpu->arch.regs = l2_regs;
300 vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
301 mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD |
302 LPCR_LPES | LPCR_MER;
303 lpcr = (vc->lpcr & ~mask) | (l2_hv.lpcr & mask);
304 sanitise_hv_regs(vcpu, &l2_hv);
305 restore_hv_regs(vcpu, &l2_hv);
307 vcpu->arch.ret = RESUME_GUEST;
310 if (mftb() >= hdec_exp) {
311 vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
315 r = kvmhv_run_single_vcpu(vcpu, hdec_exp, lpcr);
316 } while (is_kvmppc_resume_guest(r));
318 /* save L2 state for return */
319 l2_regs = vcpu->arch.regs;
320 l2_regs.msr = vcpu->arch.shregs.msr;
321 delta_purr = vcpu->arch.purr - l2_hv.purr;
322 delta_spurr = vcpu->arch.spurr - l2_hv.spurr;
323 delta_ic = vcpu->arch.ic - l2_hv.ic;
324 delta_vtb = vc->vtb - l2_hv.vtb;
325 save_hv_return_state(vcpu, vcpu->arch.trap, &l2_hv);
327 /* restore L1 state */
328 vcpu->arch.nested = NULL;
329 vcpu->arch.regs = saved_l1_regs;
330 vcpu->arch.shregs.msr = saved_l1_regs.msr & ~MSR_TS_MASK;
331 /* set L1 MSR TS field according to L2 transaction state */
332 if (l2_regs.msr & MSR_TS_MASK)
333 vcpu->arch.shregs.msr |= MSR_TS_S;
334 vc->tb_offset = saved_l1_hv.tb_offset;
335 restore_hv_regs(vcpu, &saved_l1_hv);
336 vcpu->arch.purr += delta_purr;
337 vcpu->arch.spurr += delta_spurr;
338 vcpu->arch.ic += delta_ic;
339 vc->vtb += delta_vtb;
341 kvmhv_put_nested(l2);
343 /* copy l2_hv_state and regs back to guest */
344 if (kvmppc_need_byteswap(vcpu)) {
345 byteswap_hv_regs(&l2_hv);
346 byteswap_pt_regs(&l2_regs);
348 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
349 err = kvm_vcpu_write_guest(vcpu, hv_ptr, &l2_hv,
350 sizeof(struct hv_guest_state)) ||
351 kvm_vcpu_write_guest(vcpu, regs_ptr, &l2_regs,
352 sizeof(struct pt_regs));
353 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
360 if (vcpu->mmio_needed) {
361 kvmhv_nested_mmio_needed(vcpu, regs_ptr);
365 return vcpu->arch.trap;
368 long kvmhv_nested_init(void)
374 if (!kvmhv_on_pseries())
376 if (!radix_enabled())
379 /* find log base 2 of KVMPPC_NR_LPIDS, rounding up */
380 ptb_order = __ilog2(KVMPPC_NR_LPIDS - 1) + 1;
383 pseries_partition_tb = kmalloc(sizeof(struct patb_entry) << ptb_order,
385 if (!pseries_partition_tb) {
386 pr_err("kvm-hv: failed to allocated nested partition table\n");
390 ptcr = __pa(pseries_partition_tb) | (ptb_order - 8);
391 rc = plpar_hcall_norets(H_SET_PARTITION_TABLE, ptcr);
392 if (rc != H_SUCCESS) {
393 pr_err("kvm-hv: Parent hypervisor does not support nesting (rc=%ld)\n",
395 kfree(pseries_partition_tb);
396 pseries_partition_tb = NULL;
403 void kvmhv_nested_exit(void)
406 * N.B. the kvmhv_on_pseries() test is there because it enables
407 * the compiler to remove the call to plpar_hcall_norets()
408 * when CONFIG_PPC_PSERIES=n.
410 if (kvmhv_on_pseries() && pseries_partition_tb) {
411 plpar_hcall_norets(H_SET_PARTITION_TABLE, 0);
412 kfree(pseries_partition_tb);
413 pseries_partition_tb = NULL;
417 static void kvmhv_flush_lpid(unsigned int lpid)
421 if (!kvmhv_on_pseries()) {
422 radix__flush_all_lpid(lpid);
426 rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1),
427 lpid, TLBIEL_INVAL_SET_LPID);
429 pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc);
432 void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1)
434 if (!kvmhv_on_pseries()) {
435 mmu_partition_table_set_entry(lpid, dw0, dw1, true);
439 pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
440 pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
441 /* L0 will do the necessary barriers */
442 kvmhv_flush_lpid(lpid);
445 static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
449 dw0 = PATB_HR | radix__get_tree_size() |
450 __pa(gp->shadow_pgtable) | RADIX_PGD_INDEX_SIZE;
451 kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table);
454 void kvmhv_vm_nested_init(struct kvm *kvm)
456 kvm->arch.max_nested_lpid = -1;
460 * Handle the H_SET_PARTITION_TABLE hcall.
461 * r4 = guest real address of partition table + log_2(size) - 12
462 * (formatted as for the PTCR).
464 long kvmhv_set_partition_table(struct kvm_vcpu *vcpu)
466 struct kvm *kvm = vcpu->kvm;
467 unsigned long ptcr = kvmppc_get_gpr(vcpu, 4);
469 long ret = H_SUCCESS;
471 srcu_idx = srcu_read_lock(&kvm->srcu);
473 * Limit the partition table to 4096 entries (because that's what
474 * hardware supports), and check the base address.
476 if ((ptcr & PRTS_MASK) > 12 - 8 ||
477 !kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT))
479 srcu_read_unlock(&kvm->srcu, srcu_idx);
480 if (ret == H_SUCCESS)
481 kvm->arch.l1_ptcr = ptcr;
486 * Handle the H_COPY_TOFROM_GUEST hcall.
487 * r4 = L1 lpid of nested guest
489 * r6 = eaddr to access
490 * r7 = to buffer (L1 gpa)
491 * r8 = from buffer (L1 gpa)
492 * r9 = n bytes to copy
494 long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu)
496 struct kvm_nested_guest *gp;
497 int l1_lpid = kvmppc_get_gpr(vcpu, 4);
498 int pid = kvmppc_get_gpr(vcpu, 5);
499 gva_t eaddr = kvmppc_get_gpr(vcpu, 6);
500 gpa_t gp_to = (gpa_t) kvmppc_get_gpr(vcpu, 7);
501 gpa_t gp_from = (gpa_t) kvmppc_get_gpr(vcpu, 8);
503 unsigned long n = kvmppc_get_gpr(vcpu, 9);
504 bool is_load = !!gp_to;
507 if (gp_to && gp_from) /* One must be NULL to determine the direction */
510 if (eaddr & (0xFFFUL << 52))
513 buf = kzalloc(n, GFP_KERNEL | __GFP_NOWARN);
517 gp = kvmhv_get_nested(vcpu->kvm, l1_lpid, false);
523 mutex_lock(&gp->tlb_lock);
526 /* Load from the nested guest into our buffer */
527 rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
528 eaddr, buf, NULL, n);
532 /* Write what was loaded into our buffer back to the L1 guest */
533 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
534 rc = kvm_vcpu_write_guest(vcpu, gp_to, buf, n);
535 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
539 /* Load the data to be stored from the L1 guest into our buf */
540 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
541 rc = kvm_vcpu_read_guest(vcpu, gp_from, buf, n);
542 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
546 /* Store from our buffer into the nested guest */
547 rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
548 eaddr, NULL, buf, n);
554 mutex_unlock(&gp->tlb_lock);
555 kvmhv_put_nested(gp);
565 * Reload the partition table entry for a guest.
566 * Caller must hold gp->tlb_lock.
568 static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp)
571 struct patb_entry ptbl_entry;
572 unsigned long ptbl_addr;
573 struct kvm *kvm = gp->l1_host;
576 ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4);
577 if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 8))) {
578 int srcu_idx = srcu_read_lock(&kvm->srcu);
579 ret = kvm_read_guest(kvm, ptbl_addr,
580 &ptbl_entry, sizeof(ptbl_entry));
581 srcu_read_unlock(&kvm->srcu, srcu_idx);
585 gp->process_table = 0;
587 gp->l1_gr_to_hr = be64_to_cpu(ptbl_entry.patb0);
588 gp->process_table = be64_to_cpu(ptbl_entry.patb1);
590 kvmhv_set_nested_ptbl(gp);
593 static struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid)
595 struct kvm_nested_guest *gp;
598 gp = kzalloc(sizeof(*gp), GFP_KERNEL);
603 mutex_init(&gp->tlb_lock);
604 gp->shadow_pgtable = pgd_alloc(kvm->mm);
605 if (!gp->shadow_pgtable)
607 shadow_lpid = kvmppc_alloc_lpid();
610 gp->shadow_lpid = shadow_lpid;
613 memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu));
618 pgd_free(kvm->mm, gp->shadow_pgtable);
625 * Free up any resources allocated for a nested guest.
627 static void kvmhv_release_nested(struct kvm_nested_guest *gp)
629 struct kvm *kvm = gp->l1_host;
631 if (gp->shadow_pgtable) {
633 * No vcpu is using this struct and no call to
634 * kvmhv_get_nested can find this struct,
635 * so we don't need to hold kvm->mmu_lock.
637 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
639 pgd_free(kvm->mm, gp->shadow_pgtable);
641 kvmhv_set_ptbl_entry(gp->shadow_lpid, 0, 0);
642 kvmppc_free_lpid(gp->shadow_lpid);
646 static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
648 struct kvm *kvm = gp->l1_host;
649 int lpid = gp->l1_lpid;
652 spin_lock(&kvm->mmu_lock);
653 if (gp == kvm->arch.nested_guests[lpid]) {
654 kvm->arch.nested_guests[lpid] = NULL;
655 if (lpid == kvm->arch.max_nested_lpid) {
656 while (--lpid >= 0 && !kvm->arch.nested_guests[lpid])
658 kvm->arch.max_nested_lpid = lpid;
663 spin_unlock(&kvm->mmu_lock);
665 kvmhv_release_nested(gp);
669 * Free up all nested resources allocated for this guest.
670 * This is called with no vcpus of the guest running, when
671 * switching the guest to HPT mode or when destroying the
674 void kvmhv_release_all_nested(struct kvm *kvm)
677 struct kvm_nested_guest *gp;
678 struct kvm_nested_guest *freelist = NULL;
679 struct kvm_memory_slot *memslot;
682 spin_lock(&kvm->mmu_lock);
683 for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
684 gp = kvm->arch.nested_guests[i];
687 kvm->arch.nested_guests[i] = NULL;
688 if (--gp->refcnt == 0) {
693 kvm->arch.max_nested_lpid = -1;
694 spin_unlock(&kvm->mmu_lock);
695 while ((gp = freelist) != NULL) {
697 kvmhv_release_nested(gp);
700 srcu_idx = srcu_read_lock(&kvm->srcu);
701 kvm_for_each_memslot(memslot, kvm_memslots(kvm))
702 kvmhv_free_memslot_nest_rmap(memslot);
703 srcu_read_unlock(&kvm->srcu, srcu_idx);
706 /* caller must hold gp->tlb_lock */
707 static void kvmhv_flush_nested(struct kvm_nested_guest *gp)
709 struct kvm *kvm = gp->l1_host;
711 spin_lock(&kvm->mmu_lock);
712 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid);
713 spin_unlock(&kvm->mmu_lock);
714 kvmhv_flush_lpid(gp->shadow_lpid);
715 kvmhv_update_ptbl_cache(gp);
716 if (gp->l1_gr_to_hr == 0)
717 kvmhv_remove_nested(gp);
720 struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
723 struct kvm_nested_guest *gp, *newgp;
725 if (l1_lpid >= KVM_MAX_NESTED_GUESTS ||
726 l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4)))
729 spin_lock(&kvm->mmu_lock);
730 gp = kvm->arch.nested_guests[l1_lpid];
733 spin_unlock(&kvm->mmu_lock);
738 newgp = kvmhv_alloc_nested(kvm, l1_lpid);
741 spin_lock(&kvm->mmu_lock);
742 if (kvm->arch.nested_guests[l1_lpid]) {
743 /* someone else beat us to it */
744 gp = kvm->arch.nested_guests[l1_lpid];
746 kvm->arch.nested_guests[l1_lpid] = newgp;
750 if (l1_lpid > kvm->arch.max_nested_lpid)
751 kvm->arch.max_nested_lpid = l1_lpid;
754 spin_unlock(&kvm->mmu_lock);
757 kvmhv_release_nested(newgp);
762 void kvmhv_put_nested(struct kvm_nested_guest *gp)
764 struct kvm *kvm = gp->l1_host;
767 spin_lock(&kvm->mmu_lock);
769 spin_unlock(&kvm->mmu_lock);
771 kvmhv_release_nested(gp);
774 static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid)
776 if (lpid > kvm->arch.max_nested_lpid)
778 return kvm->arch.nested_guests[lpid];
781 pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
782 unsigned long ea, unsigned *hshift)
784 struct kvm_nested_guest *gp;
787 gp = kvmhv_find_nested(kvm, lpid);
791 VM_WARN(!spin_is_locked(&kvm->mmu_lock),
792 "%s called with kvm mmu_lock not held \n", __func__);
793 pte = __find_linux_pte(gp->shadow_pgtable, ea, NULL, hshift);
798 static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2)
800 return !((rmap_1 ^ rmap_2) & (RMAP_NESTED_LPID_MASK |
801 RMAP_NESTED_GPA_MASK));
804 void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
805 struct rmap_nested **n_rmap)
807 struct llist_node *entry = ((struct llist_head *) rmapp)->first;
808 struct rmap_nested *cursor;
809 u64 rmap, new_rmap = (*n_rmap)->rmap;
811 /* Are there any existing entries? */
813 /* No -> use the rmap as a single entry */
814 *rmapp = new_rmap | RMAP_NESTED_IS_SINGLE_ENTRY;
818 /* Do any entries match what we're trying to insert? */
819 for_each_nest_rmap_safe(cursor, entry, &rmap) {
820 if (kvmhv_n_rmap_is_equal(rmap, new_rmap))
824 /* Do we need to create a list or just add the new entry? */
826 if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */
828 llist_add(&((*n_rmap)->list), (struct llist_head *) rmapp);
829 if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */
830 (*n_rmap)->list.next = (struct llist_node *) rmap;
832 /* Set NULL so not freed by caller */
836 static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap,
837 unsigned long clr, unsigned long set,
838 unsigned long hpa, unsigned long mask)
841 unsigned int shift, lpid;
844 gpa = n_rmap & RMAP_NESTED_GPA_MASK;
845 lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
848 ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
850 * If the pte is present and the pfn is still the same, update the pte.
851 * If the pfn has changed then this is a stale rmap entry, the nested
852 * gpa actually points somewhere else now, and there is nothing to do.
853 * XXX A future optimisation would be to remove the rmap entry here.
855 if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa)) {
856 __radix_pte_update(ptep, clr, set);
857 kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
862 * For a given list of rmap entries, update the rc bits in all ptes in shadow
863 * page tables for nested guests which are referenced by the rmap list.
865 void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
866 unsigned long clr, unsigned long set,
867 unsigned long hpa, unsigned long nbytes)
869 struct llist_node *entry = ((struct llist_head *) rmapp)->first;
870 struct rmap_nested *cursor;
871 unsigned long rmap, mask;
873 if ((clr | set) & ~(_PAGE_DIRTY | _PAGE_ACCESSED))
876 mask = PTE_RPN_MASK & ~(nbytes - 1);
879 for_each_nest_rmap_safe(cursor, entry, &rmap)
880 kvmhv_update_nest_rmap_rc(kvm, rmap, clr, set, hpa, mask);
883 static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
884 unsigned long hpa, unsigned long mask)
886 struct kvm_nested_guest *gp;
888 unsigned int shift, lpid;
891 gpa = n_rmap & RMAP_NESTED_GPA_MASK;
892 lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
893 gp = kvmhv_find_nested(kvm, lpid);
897 /* Find and invalidate the pte */
898 ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
899 /* Don't spuriously invalidate ptes if the pfn has changed */
900 if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa))
901 kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
904 static void kvmhv_remove_nest_rmap_list(struct kvm *kvm, unsigned long *rmapp,
905 unsigned long hpa, unsigned long mask)
907 struct llist_node *entry = llist_del_all((struct llist_head *) rmapp);
908 struct rmap_nested *cursor;
911 for_each_nest_rmap_safe(cursor, entry, &rmap) {
912 kvmhv_remove_nest_rmap(kvm, rmap, hpa, mask);
917 /* called with kvm->mmu_lock held */
918 void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
919 const struct kvm_memory_slot *memslot,
920 unsigned long gpa, unsigned long hpa,
921 unsigned long nbytes)
923 unsigned long gfn, end_gfn;
924 unsigned long addr_mask;
928 gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn;
929 end_gfn = gfn + (nbytes >> PAGE_SHIFT);
931 addr_mask = PTE_RPN_MASK & ~(nbytes - 1);
934 for (; gfn < end_gfn; gfn++) {
935 unsigned long *rmap = &memslot->arch.rmap[gfn];
936 kvmhv_remove_nest_rmap_list(kvm, rmap, hpa, addr_mask);
940 static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free)
944 for (page = 0; page < free->npages; page++) {
945 unsigned long rmap, *rmapp = &free->arch.rmap[page];
946 struct rmap_nested *cursor;
947 struct llist_node *entry;
949 entry = llist_del_all((struct llist_head *) rmapp);
950 for_each_nest_rmap_safe(cursor, entry, &rmap)
955 static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu,
956 struct kvm_nested_guest *gp,
957 long gpa, int *shift_ret)
959 struct kvm *kvm = vcpu->kvm;
964 spin_lock(&kvm->mmu_lock);
965 ptep = find_kvm_nested_guest_pte(kvm, gp->l1_lpid, gpa, &shift);
968 if (ptep && pte_present(*ptep)) {
969 kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
972 spin_unlock(&kvm->mmu_lock);
979 static inline int get_ric(unsigned int instr)
981 return (instr >> 18) & 0x3;
984 static inline int get_prs(unsigned int instr)
986 return (instr >> 17) & 0x1;
989 static inline int get_r(unsigned int instr)
991 return (instr >> 16) & 0x1;
994 static inline int get_lpid(unsigned long r_val)
996 return r_val & 0xffffffff;
999 static inline int get_is(unsigned long r_val)
1001 return (r_val >> 10) & 0x3;
1004 static inline int get_ap(unsigned long r_val)
1006 return (r_val >> 5) & 0x7;
1009 static inline long get_epn(unsigned long r_val)
1014 static int kvmhv_emulate_tlbie_tlb_addr(struct kvm_vcpu *vcpu, int lpid,
1017 struct kvm *kvm = vcpu->kvm;
1018 struct kvm_nested_guest *gp;
1020 int shift, shadow_shift;
1023 shift = ap_to_shift(ap);
1026 /* Invalid ap encoding */
1029 addr &= ~((1UL << shift) - 1);
1030 npages = 1UL << (shift - PAGE_SHIFT);
1032 gp = kvmhv_get_nested(kvm, lpid, false);
1033 if (!gp) /* No such guest -> nothing to do */
1035 mutex_lock(&gp->tlb_lock);
1037 /* There may be more than one host page backing this single guest pte */
1039 kvmhv_invalidate_shadow_pte(vcpu, gp, addr, &shadow_shift);
1041 npages -= 1UL << (shadow_shift - PAGE_SHIFT);
1042 addr += 1UL << shadow_shift;
1043 } while (npages > 0);
1045 mutex_unlock(&gp->tlb_lock);
1046 kvmhv_put_nested(gp);
1050 static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu *vcpu,
1051 struct kvm_nested_guest *gp, int ric)
1053 struct kvm *kvm = vcpu->kvm;
1055 mutex_lock(&gp->tlb_lock);
1058 /* Invalidate TLB */
1059 spin_lock(&kvm->mmu_lock);
1060 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
1062 kvmhv_flush_lpid(gp->shadow_lpid);
1063 spin_unlock(&kvm->mmu_lock);
1068 * We don't cache this -> nothing to do
1072 /* Invalidate TLB, PWC and caching of partition table entries */
1073 kvmhv_flush_nested(gp);
1078 mutex_unlock(&gp->tlb_lock);
1081 static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu *vcpu, int ric)
1083 struct kvm *kvm = vcpu->kvm;
1084 struct kvm_nested_guest *gp;
1087 spin_lock(&kvm->mmu_lock);
1088 for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
1089 gp = kvm->arch.nested_guests[i];
1091 spin_unlock(&kvm->mmu_lock);
1092 kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1093 spin_lock(&kvm->mmu_lock);
1096 spin_unlock(&kvm->mmu_lock);
1099 static int kvmhv_emulate_priv_tlbie(struct kvm_vcpu *vcpu, unsigned int instr,
1100 unsigned long rsval, unsigned long rbval)
1102 struct kvm *kvm = vcpu->kvm;
1103 struct kvm_nested_guest *gp;
1104 int r, ric, prs, is, ap;
1109 ric = get_ric(instr);
1110 prs = get_prs(instr);
1112 lpid = get_lpid(rsval);
1116 * These cases are invalid and are not handled:
1117 * r != 1 -> Only radix supported
1118 * prs == 1 -> Not HV privileged
1119 * ric == 3 -> No cluster bombs for radix
1120 * is == 1 -> Partition scoped translations not associated with pid
1121 * (!is) && (ric == 1 || ric == 2) -> Not supported by ISA
1123 if ((!r) || (prs) || (ric == 3) || (is == 1) ||
1124 ((!is) && (ric == 1 || ric == 2)))
1131 * Invalidate TLB for a given target address
1133 epn = get_epn(rbval);
1135 ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap, epn);
1138 /* Invalidate matching LPID */
1139 gp = kvmhv_get_nested(kvm, lpid, false);
1141 kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1142 kvmhv_put_nested(gp);
1146 /* Invalidate ALL LPIDs */
1147 kvmhv_emulate_tlbie_all_lpid(vcpu, ric);
1158 * This handles the H_TLB_INVALIDATE hcall.
1159 * Parameters are (r4) tlbie instruction code, (r5) rS contents,
1162 long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu)
1166 ret = kvmhv_emulate_priv_tlbie(vcpu, kvmppc_get_gpr(vcpu, 4),
1167 kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6));
1173 /* Used to convert a nested guest real address to a L1 guest real address */
1174 static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu,
1175 struct kvm_nested_guest *gp,
1176 unsigned long n_gpa, unsigned long dsisr,
1177 struct kvmppc_pte *gpte_p)
1179 u64 fault_addr, flags = dsisr & DSISR_ISSTORE;
1182 ret = kvmppc_mmu_walk_radix_tree(vcpu, n_gpa, gpte_p, gp->l1_gr_to_hr,
1186 /* We didn't find a pte */
1187 if (ret == -EINVAL) {
1188 /* Unsupported mmu config */
1189 flags |= DSISR_UNSUPP_MMU;
1190 } else if (ret == -ENOENT) {
1191 /* No translation found */
1192 flags |= DSISR_NOHPTE;
1193 } else if (ret == -EFAULT) {
1194 /* Couldn't access L1 real address */
1195 flags |= DSISR_PRTABLE_FAULT;
1196 vcpu->arch.fault_gpa = fault_addr;
1203 /* We found a pte -> check permissions */
1204 if (dsisr & DSISR_ISSTORE) {
1206 if (!gpte_p->may_write) {
1207 flags |= DSISR_PROTFAULT;
1210 } else if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
1211 /* Can we execute? */
1212 if (!gpte_p->may_execute) {
1213 flags |= SRR1_ISI_N_G_OR_CIP;
1218 if (!gpte_p->may_read && !gpte_p->may_write) {
1219 flags |= DSISR_PROTFAULT;
1228 vcpu->arch.fault_dsisr = flags;
1229 if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
1230 vcpu->arch.shregs.msr &= SRR1_MSR_BITS;
1231 vcpu->arch.shregs.msr |= flags;
1236 static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu,
1237 struct kvm_nested_guest *gp,
1238 unsigned long n_gpa,
1239 struct kvmppc_pte gpte,
1240 unsigned long dsisr)
1242 struct kvm *kvm = vcpu->kvm;
1243 bool writing = !!(dsisr & DSISR_ISSTORE);
1247 /* Are the rc bits set in the L1 partition scoped pte? */
1248 pgflags = _PAGE_ACCESSED;
1250 pgflags |= _PAGE_DIRTY;
1251 if (pgflags & ~gpte.rc)
1254 spin_lock(&kvm->mmu_lock);
1255 /* Set the rc bit in the pte of our (L0) pgtable for the L1 guest */
1256 ret = kvmppc_hv_handle_set_rc(kvm, false, writing,
1257 gpte.raddr, kvm->arch.lpid);
1263 /* Set the rc bit in the pte of the shadow_pgtable for the nest guest */
1264 ret = kvmppc_hv_handle_set_rc(kvm, true, writing,
1265 n_gpa, gp->l1_lpid);
1272 spin_unlock(&kvm->mmu_lock);
1276 static inline int kvmppc_radix_level_to_shift(int level)
1288 static inline int kvmppc_radix_shift_to_level(int shift)
1290 if (shift == PUD_SHIFT)
1292 if (shift == PMD_SHIFT)
1294 if (shift == PAGE_SHIFT)
1300 /* called with gp->tlb_lock held */
1301 static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
1302 struct kvm_nested_guest *gp)
1304 struct kvm *kvm = vcpu->kvm;
1305 struct kvm_memory_slot *memslot;
1306 struct rmap_nested *n_rmap;
1307 struct kvmppc_pte gpte;
1309 unsigned long mmu_seq;
1310 unsigned long dsisr = vcpu->arch.fault_dsisr;
1311 unsigned long ea = vcpu->arch.fault_dar;
1312 unsigned long *rmapp;
1313 unsigned long n_gpa, gpa, gfn, perm = 0UL;
1314 unsigned int shift, l1_shift, level;
1315 bool writing = !!(dsisr & DSISR_ISSTORE);
1316 bool kvm_ro = false;
1319 if (!gp->l1_gr_to_hr) {
1320 kvmhv_update_ptbl_cache(gp);
1321 if (!gp->l1_gr_to_hr)
1325 /* Convert the nested guest real address into a L1 guest real address */
1327 n_gpa = vcpu->arch.fault_gpa & ~0xF000000000000FFFULL;
1328 if (!(dsisr & DSISR_PRTABLE_FAULT))
1329 n_gpa |= ea & 0xFFF;
1330 ret = kvmhv_translate_addr_nested(vcpu, gp, n_gpa, dsisr, &gpte);
1333 * If the hardware found a translation but we don't now have a usable
1334 * translation in the l1 partition-scoped tree, remove the shadow pte
1335 * and let the guest retry.
1337 if (ret == RESUME_HOST &&
1338 (dsisr & (DSISR_PROTFAULT | DSISR_BADACCESS | DSISR_NOEXEC_OR_G |
1339 DSISR_BAD_COPYPASTE)))
1344 /* Failed to set the reference/change bits */
1345 if (dsisr & DSISR_SET_RC) {
1346 ret = kvmhv_handle_nested_set_rc(vcpu, gp, n_gpa, gpte, dsisr);
1347 if (ret == RESUME_HOST)
1351 dsisr &= ~DSISR_SET_RC;
1352 if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
1354 return RESUME_GUEST;
1358 * We took an HISI or HDSI while we were running a nested guest which
1359 * means we have no partition scoped translation for that. This means
1360 * we need to insert a pte for the mapping into our shadow_pgtable.
1363 l1_shift = gpte.page_shift;
1364 if (l1_shift < PAGE_SHIFT) {
1365 /* We don't support l1 using a page size smaller than our own */
1366 pr_err("KVM: L1 guest page shift (%d) less than our own (%d)\n",
1367 l1_shift, PAGE_SHIFT);
1371 gfn = gpa >> PAGE_SHIFT;
1373 /* 1. Get the corresponding host memslot */
1375 memslot = gfn_to_memslot(kvm, gfn);
1376 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
1377 if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) {
1378 /* unusual error -> reflect to the guest as a DSI */
1379 kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
1380 return RESUME_GUEST;
1383 /* passthrough of emulated MMIO case */
1384 return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
1386 if (memslot->flags & KVM_MEM_READONLY) {
1388 /* Give the guest a DSI */
1389 kvmppc_core_queue_data_storage(vcpu, ea,
1390 DSISR_ISSTORE | DSISR_PROTFAULT);
1391 return RESUME_GUEST;
1396 /* 2. Find the host pte for this L1 guest real address */
1398 /* Used to check for invalidations in progress */
1399 mmu_seq = kvm->mmu_notifier_seq;
1402 /* See if can find translation in our partition scoped tables for L1 */
1404 spin_lock(&kvm->mmu_lock);
1405 pte_p = find_kvm_secondary_pte(kvm, gpa, &shift);
1410 spin_unlock(&kvm->mmu_lock);
1412 if (!pte_present(pte) || (writing && !(pte_val(pte) & _PAGE_WRITE))) {
1413 /* No suitable pte found -> try to insert a mapping */
1414 ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot,
1415 writing, kvm_ro, &pte, &level);
1417 return RESUME_GUEST;
1420 shift = kvmppc_radix_level_to_shift(level);
1422 /* Align gfn to the start of the page */
1423 gfn = (gpa & ~((1UL << shift) - 1)) >> PAGE_SHIFT;
1425 /* 3. Compute the pte we need to insert for nest_gpa -> host r_addr */
1427 /* The permissions is the combination of the host and l1 guest ptes */
1428 perm |= gpte.may_read ? 0UL : _PAGE_READ;
1429 perm |= gpte.may_write ? 0UL : _PAGE_WRITE;
1430 perm |= gpte.may_execute ? 0UL : _PAGE_EXEC;
1431 /* Only set accessed/dirty (rc) bits if set in host and l1 guest ptes */
1432 perm |= (gpte.rc & _PAGE_ACCESSED) ? 0UL : _PAGE_ACCESSED;
1433 perm |= ((gpte.rc & _PAGE_DIRTY) && writing) ? 0UL : _PAGE_DIRTY;
1434 pte = __pte(pte_val(pte) & ~perm);
1436 /* What size pte can we insert? */
1437 if (shift > l1_shift) {
1439 unsigned int actual_shift = PAGE_SHIFT;
1440 if (PMD_SHIFT < l1_shift)
1441 actual_shift = PMD_SHIFT;
1442 mask = (1UL << shift) - (1UL << actual_shift);
1443 pte = __pte(pte_val(pte) | (gpa & mask));
1444 shift = actual_shift;
1446 level = kvmppc_radix_shift_to_level(shift);
1447 n_gpa &= ~((1UL << shift) - 1);
1449 /* 4. Insert the pte into our shadow_pgtable */
1451 n_rmap = kzalloc(sizeof(*n_rmap), GFP_KERNEL);
1453 return RESUME_GUEST; /* Let the guest try again */
1454 n_rmap->rmap = (n_gpa & RMAP_NESTED_GPA_MASK) |
1455 (((unsigned long) gp->l1_lpid) << RMAP_NESTED_LPID_SHIFT);
1456 rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1457 ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level,
1458 mmu_seq, gp->shadow_lpid, rmapp, &n_rmap);
1461 ret = RESUME_GUEST; /* Let the guest try again */
1466 kvmhv_invalidate_shadow_pte(vcpu, gp, n_gpa, NULL);
1467 return RESUME_GUEST;
1470 long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu)
1472 struct kvm_nested_guest *gp = vcpu->arch.nested;
1475 mutex_lock(&gp->tlb_lock);
1476 ret = __kvmhv_nested_page_fault(vcpu, gp);
1477 mutex_unlock(&gp->tlb_lock);
1481 int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid)
1485 spin_lock(&kvm->mmu_lock);
1486 while (++lpid <= kvm->arch.max_nested_lpid) {
1487 if (kvm->arch.nested_guests[lpid]) {
1492 spin_unlock(&kvm->mmu_lock);