GNU Linux-libre 5.10.217-gnu1
[releases.git] / arch / powerpc / kvm / book3s_hv_nested.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright IBM Corporation, 2018
4  * Authors Suraj Jitindar Singh <sjitindarsingh@gmail.com>
5  *         Paul Mackerras <paulus@ozlabs.org>
6  *
7  * Description: KVM functions specific to running nested KVM-HV guests
8  * on Book3S processors (specifically POWER9 and later).
9  */
10
11 #include <linux/kernel.h>
12 #include <linux/kvm_host.h>
13 #include <linux/llist.h>
14 #include <linux/pgtable.h>
15
16 #include <asm/kvm_ppc.h>
17 #include <asm/kvm_book3s.h>
18 #include <asm/mmu.h>
19 #include <asm/pgalloc.h>
20 #include <asm/pte-walk.h>
21 #include <asm/reg.h>
22
23 static struct patb_entry *pseries_partition_tb;
24
25 static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp);
26 static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free);
27
28 void kvmhv_save_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
29 {
30         struct kvmppc_vcore *vc = vcpu->arch.vcore;
31
32         hr->pcr = vc->pcr | PCR_MASK;
33         hr->dpdes = vc->dpdes;
34         hr->hfscr = vcpu->arch.hfscr;
35         hr->tb_offset = vc->tb_offset;
36         hr->dawr0 = vcpu->arch.dawr;
37         hr->dawrx0 = vcpu->arch.dawrx;
38         hr->ciabr = vcpu->arch.ciabr;
39         hr->purr = vcpu->arch.purr;
40         hr->spurr = vcpu->arch.spurr;
41         hr->ic = vcpu->arch.ic;
42         hr->vtb = vc->vtb;
43         hr->srr0 = vcpu->arch.shregs.srr0;
44         hr->srr1 = vcpu->arch.shregs.srr1;
45         hr->sprg[0] = vcpu->arch.shregs.sprg0;
46         hr->sprg[1] = vcpu->arch.shregs.sprg1;
47         hr->sprg[2] = vcpu->arch.shregs.sprg2;
48         hr->sprg[3] = vcpu->arch.shregs.sprg3;
49         hr->pidr = vcpu->arch.pid;
50         hr->cfar = vcpu->arch.cfar;
51         hr->ppr = vcpu->arch.ppr;
52 }
53
54 /* Use noinline_for_stack due to https://bugs.llvm.org/show_bug.cgi?id=49610 */
55 static noinline_for_stack void byteswap_pt_regs(struct pt_regs *regs)
56 {
57         unsigned long *addr = (unsigned long *) regs;
58
59         for (; addr < ((unsigned long *) (regs + 1)); addr++)
60                 *addr = swab64(*addr);
61 }
62
63 static void byteswap_hv_regs(struct hv_guest_state *hr)
64 {
65         hr->version = swab64(hr->version);
66         hr->lpid = swab32(hr->lpid);
67         hr->vcpu_token = swab32(hr->vcpu_token);
68         hr->lpcr = swab64(hr->lpcr);
69         hr->pcr = swab64(hr->pcr) | PCR_MASK;
70         hr->amor = swab64(hr->amor);
71         hr->dpdes = swab64(hr->dpdes);
72         hr->hfscr = swab64(hr->hfscr);
73         hr->tb_offset = swab64(hr->tb_offset);
74         hr->dawr0 = swab64(hr->dawr0);
75         hr->dawrx0 = swab64(hr->dawrx0);
76         hr->ciabr = swab64(hr->ciabr);
77         hr->hdec_expiry = swab64(hr->hdec_expiry);
78         hr->purr = swab64(hr->purr);
79         hr->spurr = swab64(hr->spurr);
80         hr->ic = swab64(hr->ic);
81         hr->vtb = swab64(hr->vtb);
82         hr->hdar = swab64(hr->hdar);
83         hr->hdsisr = swab64(hr->hdsisr);
84         hr->heir = swab64(hr->heir);
85         hr->asdr = swab64(hr->asdr);
86         hr->srr0 = swab64(hr->srr0);
87         hr->srr1 = swab64(hr->srr1);
88         hr->sprg[0] = swab64(hr->sprg[0]);
89         hr->sprg[1] = swab64(hr->sprg[1]);
90         hr->sprg[2] = swab64(hr->sprg[2]);
91         hr->sprg[3] = swab64(hr->sprg[3]);
92         hr->pidr = swab64(hr->pidr);
93         hr->cfar = swab64(hr->cfar);
94         hr->ppr = swab64(hr->ppr);
95 }
96
97 static void save_hv_return_state(struct kvm_vcpu *vcpu, int trap,
98                                  struct hv_guest_state *hr)
99 {
100         struct kvmppc_vcore *vc = vcpu->arch.vcore;
101
102         hr->dpdes = vc->dpdes;
103         hr->hfscr = vcpu->arch.hfscr;
104         hr->purr = vcpu->arch.purr;
105         hr->spurr = vcpu->arch.spurr;
106         hr->ic = vcpu->arch.ic;
107         hr->vtb = vc->vtb;
108         hr->srr0 = vcpu->arch.shregs.srr0;
109         hr->srr1 = vcpu->arch.shregs.srr1;
110         hr->sprg[0] = vcpu->arch.shregs.sprg0;
111         hr->sprg[1] = vcpu->arch.shregs.sprg1;
112         hr->sprg[2] = vcpu->arch.shregs.sprg2;
113         hr->sprg[3] = vcpu->arch.shregs.sprg3;
114         hr->pidr = vcpu->arch.pid;
115         hr->cfar = vcpu->arch.cfar;
116         hr->ppr = vcpu->arch.ppr;
117         switch (trap) {
118         case BOOK3S_INTERRUPT_H_DATA_STORAGE:
119                 hr->hdar = vcpu->arch.fault_dar;
120                 hr->hdsisr = vcpu->arch.fault_dsisr;
121                 hr->asdr = vcpu->arch.fault_gpa;
122                 break;
123         case BOOK3S_INTERRUPT_H_INST_STORAGE:
124                 hr->asdr = vcpu->arch.fault_gpa;
125                 break;
126         case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
127                 hr->heir = vcpu->arch.emul_inst;
128                 break;
129         }
130 }
131
132 static void sanitise_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
133 {
134         /*
135          * Don't let L1 enable features for L2 which we've disabled for L1,
136          * but preserve the interrupt cause field.
137          */
138         hr->hfscr &= (HFSCR_INTR_CAUSE | vcpu->arch.hfscr);
139
140         /* Don't let data address watchpoint match in hypervisor state */
141         hr->dawrx0 &= ~DAWRX_HYP;
142
143         /* Don't let completed instruction address breakpt match in HV state */
144         if ((hr->ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
145                 hr->ciabr &= ~CIABR_PRIV;
146 }
147
148 static void restore_hv_regs(struct kvm_vcpu *vcpu, struct hv_guest_state *hr)
149 {
150         struct kvmppc_vcore *vc = vcpu->arch.vcore;
151
152         vc->pcr = hr->pcr | PCR_MASK;
153         vc->dpdes = hr->dpdes;
154         vcpu->arch.hfscr = hr->hfscr;
155         vcpu->arch.dawr = hr->dawr0;
156         vcpu->arch.dawrx = hr->dawrx0;
157         vcpu->arch.ciabr = hr->ciabr;
158         vcpu->arch.purr = hr->purr;
159         vcpu->arch.spurr = hr->spurr;
160         vcpu->arch.ic = hr->ic;
161         vc->vtb = hr->vtb;
162         vcpu->arch.shregs.srr0 = hr->srr0;
163         vcpu->arch.shregs.srr1 = hr->srr1;
164         vcpu->arch.shregs.sprg0 = hr->sprg[0];
165         vcpu->arch.shregs.sprg1 = hr->sprg[1];
166         vcpu->arch.shregs.sprg2 = hr->sprg[2];
167         vcpu->arch.shregs.sprg3 = hr->sprg[3];
168         vcpu->arch.pid = hr->pidr;
169         vcpu->arch.cfar = hr->cfar;
170         vcpu->arch.ppr = hr->ppr;
171 }
172
173 void kvmhv_restore_hv_return_state(struct kvm_vcpu *vcpu,
174                                    struct hv_guest_state *hr)
175 {
176         struct kvmppc_vcore *vc = vcpu->arch.vcore;
177
178         vc->dpdes = hr->dpdes;
179         vcpu->arch.hfscr = hr->hfscr;
180         vcpu->arch.purr = hr->purr;
181         vcpu->arch.spurr = hr->spurr;
182         vcpu->arch.ic = hr->ic;
183         vc->vtb = hr->vtb;
184         vcpu->arch.fault_dar = hr->hdar;
185         vcpu->arch.fault_dsisr = hr->hdsisr;
186         vcpu->arch.fault_gpa = hr->asdr;
187         vcpu->arch.emul_inst = hr->heir;
188         vcpu->arch.shregs.srr0 = hr->srr0;
189         vcpu->arch.shregs.srr1 = hr->srr1;
190         vcpu->arch.shregs.sprg0 = hr->sprg[0];
191         vcpu->arch.shregs.sprg1 = hr->sprg[1];
192         vcpu->arch.shregs.sprg2 = hr->sprg[2];
193         vcpu->arch.shregs.sprg3 = hr->sprg[3];
194         vcpu->arch.pid = hr->pidr;
195         vcpu->arch.cfar = hr->cfar;
196         vcpu->arch.ppr = hr->ppr;
197 }
198
199 static void kvmhv_nested_mmio_needed(struct kvm_vcpu *vcpu, u64 regs_ptr)
200 {
201         /* No need to reflect the page fault to L1, we've handled it */
202         vcpu->arch.trap = 0;
203
204         /*
205          * Since the L2 gprs have already been written back into L1 memory when
206          * we complete the mmio, store the L1 memory location of the L2 gpr
207          * being loaded into by the mmio so that the loaded value can be
208          * written there in kvmppc_complete_mmio_load()
209          */
210         if (((vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) == KVM_MMIO_REG_GPR)
211             && (vcpu->mmio_is_write == 0)) {
212                 vcpu->arch.nested_io_gpr = (gpa_t) regs_ptr +
213                                            offsetof(struct pt_regs,
214                                                     gpr[vcpu->arch.io_gpr]);
215                 vcpu->arch.io_gpr = KVM_MMIO_REG_NESTED_GPR;
216         }
217 }
218
219 long kvmhv_enter_nested_guest(struct kvm_vcpu *vcpu)
220 {
221         long int err, r;
222         struct kvm_nested_guest *l2;
223         struct pt_regs l2_regs, saved_l1_regs;
224         struct hv_guest_state l2_hv, saved_l1_hv;
225         struct kvmppc_vcore *vc = vcpu->arch.vcore;
226         u64 hv_ptr, regs_ptr;
227         u64 hdec_exp;
228         s64 delta_purr, delta_spurr, delta_ic, delta_vtb;
229         u64 mask;
230         unsigned long lpcr;
231
232         if (vcpu->kvm->arch.l1_ptcr == 0)
233                 return H_NOT_AVAILABLE;
234
235         if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
236                 return H_BAD_MODE;
237
238         /* copy parameters in */
239         hv_ptr = kvmppc_get_gpr(vcpu, 4);
240         regs_ptr = kvmppc_get_gpr(vcpu, 5);
241         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
242         err = kvm_vcpu_read_guest(vcpu, hv_ptr, &l2_hv,
243                                   sizeof(struct hv_guest_state)) ||
244                 kvm_vcpu_read_guest(vcpu, regs_ptr, &l2_regs,
245                                     sizeof(struct pt_regs));
246         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
247         if (err)
248                 return H_PARAMETER;
249
250         if (kvmppc_need_byteswap(vcpu))
251                 byteswap_hv_regs(&l2_hv);
252         if (l2_hv.version != HV_GUEST_STATE_VERSION)
253                 return H_P2;
254
255         if (kvmppc_need_byteswap(vcpu))
256                 byteswap_pt_regs(&l2_regs);
257         if (l2_hv.vcpu_token >= NR_CPUS)
258                 return H_PARAMETER;
259
260         /*
261          * L1 must have set up a suspended state to enter the L2 in a
262          * transactional state, and only in that case. These have to be
263          * filtered out here to prevent causing a TM Bad Thing in the
264          * host HRFID. We could synthesize a TM Bad Thing back to the L1
265          * here but there doesn't seem like much point.
266          */
267         if (MSR_TM_SUSPENDED(vcpu->arch.shregs.msr)) {
268                 if (!MSR_TM_ACTIVE(l2_regs.msr))
269                         return H_BAD_MODE;
270         } else {
271                 if (l2_regs.msr & MSR_TS_MASK)
272                         return H_BAD_MODE;
273                 if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_TS_MASK))
274                         return H_BAD_MODE;
275         }
276
277         /* translate lpid */
278         l2 = kvmhv_get_nested(vcpu->kvm, l2_hv.lpid, true);
279         if (!l2)
280                 return H_PARAMETER;
281         if (!l2->l1_gr_to_hr) {
282                 mutex_lock(&l2->tlb_lock);
283                 kvmhv_update_ptbl_cache(l2);
284                 mutex_unlock(&l2->tlb_lock);
285         }
286
287         /* save l1 values of things */
288         vcpu->arch.regs.msr = vcpu->arch.shregs.msr;
289         saved_l1_regs = vcpu->arch.regs;
290         kvmhv_save_hv_regs(vcpu, &saved_l1_hv);
291
292         /* convert TB values/offsets to host (L0) values */
293         hdec_exp = l2_hv.hdec_expiry - vc->tb_offset;
294         vc->tb_offset += l2_hv.tb_offset;
295
296         /* set L1 state to L2 state */
297         vcpu->arch.nested = l2;
298         vcpu->arch.nested_vcpu_id = l2_hv.vcpu_token;
299         vcpu->arch.regs = l2_regs;
300         vcpu->arch.shregs.msr = vcpu->arch.regs.msr;
301         mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD |
302                 LPCR_LPES | LPCR_MER;
303         lpcr = (vc->lpcr & ~mask) | (l2_hv.lpcr & mask);
304         sanitise_hv_regs(vcpu, &l2_hv);
305         restore_hv_regs(vcpu, &l2_hv);
306
307         vcpu->arch.ret = RESUME_GUEST;
308         vcpu->arch.trap = 0;
309         do {
310                 if (mftb() >= hdec_exp) {
311                         vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER;
312                         r = RESUME_HOST;
313                         break;
314                 }
315                 r = kvmhv_run_single_vcpu(vcpu, hdec_exp, lpcr);
316         } while (is_kvmppc_resume_guest(r));
317
318         /* save L2 state for return */
319         l2_regs = vcpu->arch.regs;
320         l2_regs.msr = vcpu->arch.shregs.msr;
321         delta_purr = vcpu->arch.purr - l2_hv.purr;
322         delta_spurr = vcpu->arch.spurr - l2_hv.spurr;
323         delta_ic = vcpu->arch.ic - l2_hv.ic;
324         delta_vtb = vc->vtb - l2_hv.vtb;
325         save_hv_return_state(vcpu, vcpu->arch.trap, &l2_hv);
326
327         /* restore L1 state */
328         vcpu->arch.nested = NULL;
329         vcpu->arch.regs = saved_l1_regs;
330         vcpu->arch.shregs.msr = saved_l1_regs.msr & ~MSR_TS_MASK;
331         /* set L1 MSR TS field according to L2 transaction state */
332         if (l2_regs.msr & MSR_TS_MASK)
333                 vcpu->arch.shregs.msr |= MSR_TS_S;
334         vc->tb_offset = saved_l1_hv.tb_offset;
335         restore_hv_regs(vcpu, &saved_l1_hv);
336         vcpu->arch.purr += delta_purr;
337         vcpu->arch.spurr += delta_spurr;
338         vcpu->arch.ic += delta_ic;
339         vc->vtb += delta_vtb;
340
341         kvmhv_put_nested(l2);
342
343         /* copy l2_hv_state and regs back to guest */
344         if (kvmppc_need_byteswap(vcpu)) {
345                 byteswap_hv_regs(&l2_hv);
346                 byteswap_pt_regs(&l2_regs);
347         }
348         vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
349         err = kvm_vcpu_write_guest(vcpu, hv_ptr, &l2_hv,
350                                    sizeof(struct hv_guest_state)) ||
351                 kvm_vcpu_write_guest(vcpu, regs_ptr, &l2_regs,
352                                    sizeof(struct pt_regs));
353         srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
354         if (err)
355                 return H_AUTHORITY;
356
357         if (r == -EINTR)
358                 return H_INTERRUPT;
359
360         if (vcpu->mmio_needed) {
361                 kvmhv_nested_mmio_needed(vcpu, regs_ptr);
362                 return H_TOO_HARD;
363         }
364
365         return vcpu->arch.trap;
366 }
367
368 long kvmhv_nested_init(void)
369 {
370         long int ptb_order;
371         unsigned long ptcr;
372         long rc;
373
374         if (!kvmhv_on_pseries())
375                 return 0;
376         if (!radix_enabled())
377                 return -ENODEV;
378
379         /* find log base 2 of KVMPPC_NR_LPIDS, rounding up */
380         ptb_order = __ilog2(KVMPPC_NR_LPIDS - 1) + 1;
381         if (ptb_order < 8)
382                 ptb_order = 8;
383         pseries_partition_tb = kmalloc(sizeof(struct patb_entry) << ptb_order,
384                                        GFP_KERNEL);
385         if (!pseries_partition_tb) {
386                 pr_err("kvm-hv: failed to allocated nested partition table\n");
387                 return -ENOMEM;
388         }
389
390         ptcr = __pa(pseries_partition_tb) | (ptb_order - 8);
391         rc = plpar_hcall_norets(H_SET_PARTITION_TABLE, ptcr);
392         if (rc != H_SUCCESS) {
393                 pr_err("kvm-hv: Parent hypervisor does not support nesting (rc=%ld)\n",
394                        rc);
395                 kfree(pseries_partition_tb);
396                 pseries_partition_tb = NULL;
397                 return -ENODEV;
398         }
399
400         return 0;
401 }
402
403 void kvmhv_nested_exit(void)
404 {
405         /*
406          * N.B. the kvmhv_on_pseries() test is there because it enables
407          * the compiler to remove the call to plpar_hcall_norets()
408          * when CONFIG_PPC_PSERIES=n.
409          */
410         if (kvmhv_on_pseries() && pseries_partition_tb) {
411                 plpar_hcall_norets(H_SET_PARTITION_TABLE, 0);
412                 kfree(pseries_partition_tb);
413                 pseries_partition_tb = NULL;
414         }
415 }
416
417 static void kvmhv_flush_lpid(unsigned int lpid)
418 {
419         long rc;
420
421         if (!kvmhv_on_pseries()) {
422                 radix__flush_all_lpid(lpid);
423                 return;
424         }
425
426         rc = plpar_hcall_norets(H_TLB_INVALIDATE, H_TLBIE_P1_ENC(2, 0, 1),
427                                 lpid, TLBIEL_INVAL_SET_LPID);
428         if (rc)
429                 pr_err("KVM: TLB LPID invalidation hcall failed, rc=%ld\n", rc);
430 }
431
432 void kvmhv_set_ptbl_entry(unsigned int lpid, u64 dw0, u64 dw1)
433 {
434         if (!kvmhv_on_pseries()) {
435                 mmu_partition_table_set_entry(lpid, dw0, dw1, true);
436                 return;
437         }
438
439         pseries_partition_tb[lpid].patb0 = cpu_to_be64(dw0);
440         pseries_partition_tb[lpid].patb1 = cpu_to_be64(dw1);
441         /* L0 will do the necessary barriers */
442         kvmhv_flush_lpid(lpid);
443 }
444
445 static void kvmhv_set_nested_ptbl(struct kvm_nested_guest *gp)
446 {
447         unsigned long dw0;
448
449         dw0 = PATB_HR | radix__get_tree_size() |
450                 __pa(gp->shadow_pgtable) | RADIX_PGD_INDEX_SIZE;
451         kvmhv_set_ptbl_entry(gp->shadow_lpid, dw0, gp->process_table);
452 }
453
454 void kvmhv_vm_nested_init(struct kvm *kvm)
455 {
456         kvm->arch.max_nested_lpid = -1;
457 }
458
459 /*
460  * Handle the H_SET_PARTITION_TABLE hcall.
461  * r4 = guest real address of partition table + log_2(size) - 12
462  * (formatted as for the PTCR).
463  */
464 long kvmhv_set_partition_table(struct kvm_vcpu *vcpu)
465 {
466         struct kvm *kvm = vcpu->kvm;
467         unsigned long ptcr = kvmppc_get_gpr(vcpu, 4);
468         int srcu_idx;
469         long ret = H_SUCCESS;
470
471         srcu_idx = srcu_read_lock(&kvm->srcu);
472         /*
473          * Limit the partition table to 4096 entries (because that's what
474          * hardware supports), and check the base address.
475          */
476         if ((ptcr & PRTS_MASK) > 12 - 8 ||
477             !kvm_is_visible_gfn(vcpu->kvm, (ptcr & PRTB_MASK) >> PAGE_SHIFT))
478                 ret = H_PARAMETER;
479         srcu_read_unlock(&kvm->srcu, srcu_idx);
480         if (ret == H_SUCCESS)
481                 kvm->arch.l1_ptcr = ptcr;
482         return ret;
483 }
484
485 /*
486  * Handle the H_COPY_TOFROM_GUEST hcall.
487  * r4 = L1 lpid of nested guest
488  * r5 = pid
489  * r6 = eaddr to access
490  * r7 = to buffer (L1 gpa)
491  * r8 = from buffer (L1 gpa)
492  * r9 = n bytes to copy
493  */
494 long kvmhv_copy_tofrom_guest_nested(struct kvm_vcpu *vcpu)
495 {
496         struct kvm_nested_guest *gp;
497         int l1_lpid = kvmppc_get_gpr(vcpu, 4);
498         int pid = kvmppc_get_gpr(vcpu, 5);
499         gva_t eaddr = kvmppc_get_gpr(vcpu, 6);
500         gpa_t gp_to = (gpa_t) kvmppc_get_gpr(vcpu, 7);
501         gpa_t gp_from = (gpa_t) kvmppc_get_gpr(vcpu, 8);
502         void *buf;
503         unsigned long n = kvmppc_get_gpr(vcpu, 9);
504         bool is_load = !!gp_to;
505         long rc;
506
507         if (gp_to && gp_from) /* One must be NULL to determine the direction */
508                 return H_PARAMETER;
509
510         if (eaddr & (0xFFFUL << 52))
511                 return H_PARAMETER;
512
513         buf = kzalloc(n, GFP_KERNEL | __GFP_NOWARN);
514         if (!buf)
515                 return H_NO_MEM;
516
517         gp = kvmhv_get_nested(vcpu->kvm, l1_lpid, false);
518         if (!gp) {
519                 rc = H_PARAMETER;
520                 goto out_free;
521         }
522
523         mutex_lock(&gp->tlb_lock);
524
525         if (is_load) {
526                 /* Load from the nested guest into our buffer */
527                 rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
528                                                      eaddr, buf, NULL, n);
529                 if (rc)
530                         goto not_found;
531
532                 /* Write what was loaded into our buffer back to the L1 guest */
533                 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
534                 rc = kvm_vcpu_write_guest(vcpu, gp_to, buf, n);
535                 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
536                 if (rc)
537                         goto not_found;
538         } else {
539                 /* Load the data to be stored from the L1 guest into our buf */
540                 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
541                 rc = kvm_vcpu_read_guest(vcpu, gp_from, buf, n);
542                 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
543                 if (rc)
544                         goto not_found;
545
546                 /* Store from our buffer into the nested guest */
547                 rc = __kvmhv_copy_tofrom_guest_radix(gp->shadow_lpid, pid,
548                                                      eaddr, NULL, buf, n);
549                 if (rc)
550                         goto not_found;
551         }
552
553 out_unlock:
554         mutex_unlock(&gp->tlb_lock);
555         kvmhv_put_nested(gp);
556 out_free:
557         kfree(buf);
558         return rc;
559 not_found:
560         rc = H_NOT_FOUND;
561         goto out_unlock;
562 }
563
564 /*
565  * Reload the partition table entry for a guest.
566  * Caller must hold gp->tlb_lock.
567  */
568 static void kvmhv_update_ptbl_cache(struct kvm_nested_guest *gp)
569 {
570         int ret;
571         struct patb_entry ptbl_entry;
572         unsigned long ptbl_addr;
573         struct kvm *kvm = gp->l1_host;
574
575         ret = -EFAULT;
576         ptbl_addr = (kvm->arch.l1_ptcr & PRTB_MASK) + (gp->l1_lpid << 4);
577         if (gp->l1_lpid < (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 8))) {
578                 int srcu_idx = srcu_read_lock(&kvm->srcu);
579                 ret = kvm_read_guest(kvm, ptbl_addr,
580                                      &ptbl_entry, sizeof(ptbl_entry));
581                 srcu_read_unlock(&kvm->srcu, srcu_idx);
582         }
583         if (ret) {
584                 gp->l1_gr_to_hr = 0;
585                 gp->process_table = 0;
586         } else {
587                 gp->l1_gr_to_hr = be64_to_cpu(ptbl_entry.patb0);
588                 gp->process_table = be64_to_cpu(ptbl_entry.patb1);
589         }
590         kvmhv_set_nested_ptbl(gp);
591 }
592
593 static struct kvm_nested_guest *kvmhv_alloc_nested(struct kvm *kvm, unsigned int lpid)
594 {
595         struct kvm_nested_guest *gp;
596         long shadow_lpid;
597
598         gp = kzalloc(sizeof(*gp), GFP_KERNEL);
599         if (!gp)
600                 return NULL;
601         gp->l1_host = kvm;
602         gp->l1_lpid = lpid;
603         mutex_init(&gp->tlb_lock);
604         gp->shadow_pgtable = pgd_alloc(kvm->mm);
605         if (!gp->shadow_pgtable)
606                 goto out_free;
607         shadow_lpid = kvmppc_alloc_lpid();
608         if (shadow_lpid < 0)
609                 goto out_free2;
610         gp->shadow_lpid = shadow_lpid;
611         gp->radix = 1;
612
613         memset(gp->prev_cpu, -1, sizeof(gp->prev_cpu));
614
615         return gp;
616
617  out_free2:
618         pgd_free(kvm->mm, gp->shadow_pgtable);
619  out_free:
620         kfree(gp);
621         return NULL;
622 }
623
624 /*
625  * Free up any resources allocated for a nested guest.
626  */
627 static void kvmhv_release_nested(struct kvm_nested_guest *gp)
628 {
629         struct kvm *kvm = gp->l1_host;
630
631         if (gp->shadow_pgtable) {
632                 /*
633                  * No vcpu is using this struct and no call to
634                  * kvmhv_get_nested can find this struct,
635                  * so we don't need to hold kvm->mmu_lock.
636                  */
637                 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
638                                           gp->shadow_lpid);
639                 pgd_free(kvm->mm, gp->shadow_pgtable);
640         }
641         kvmhv_set_ptbl_entry(gp->shadow_lpid, 0, 0);
642         kvmppc_free_lpid(gp->shadow_lpid);
643         kfree(gp);
644 }
645
646 static void kvmhv_remove_nested(struct kvm_nested_guest *gp)
647 {
648         struct kvm *kvm = gp->l1_host;
649         int lpid = gp->l1_lpid;
650         long ref;
651
652         spin_lock(&kvm->mmu_lock);
653         if (gp == kvm->arch.nested_guests[lpid]) {
654                 kvm->arch.nested_guests[lpid] = NULL;
655                 if (lpid == kvm->arch.max_nested_lpid) {
656                         while (--lpid >= 0 && !kvm->arch.nested_guests[lpid])
657                                 ;
658                         kvm->arch.max_nested_lpid = lpid;
659                 }
660                 --gp->refcnt;
661         }
662         ref = gp->refcnt;
663         spin_unlock(&kvm->mmu_lock);
664         if (ref == 0)
665                 kvmhv_release_nested(gp);
666 }
667
668 /*
669  * Free up all nested resources allocated for this guest.
670  * This is called with no vcpus of the guest running, when
671  * switching the guest to HPT mode or when destroying the
672  * guest.
673  */
674 void kvmhv_release_all_nested(struct kvm *kvm)
675 {
676         int i;
677         struct kvm_nested_guest *gp;
678         struct kvm_nested_guest *freelist = NULL;
679         struct kvm_memory_slot *memslot;
680         int srcu_idx;
681
682         spin_lock(&kvm->mmu_lock);
683         for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
684                 gp = kvm->arch.nested_guests[i];
685                 if (!gp)
686                         continue;
687                 kvm->arch.nested_guests[i] = NULL;
688                 if (--gp->refcnt == 0) {
689                         gp->next = freelist;
690                         freelist = gp;
691                 }
692         }
693         kvm->arch.max_nested_lpid = -1;
694         spin_unlock(&kvm->mmu_lock);
695         while ((gp = freelist) != NULL) {
696                 freelist = gp->next;
697                 kvmhv_release_nested(gp);
698         }
699
700         srcu_idx = srcu_read_lock(&kvm->srcu);
701         kvm_for_each_memslot(memslot, kvm_memslots(kvm))
702                 kvmhv_free_memslot_nest_rmap(memslot);
703         srcu_read_unlock(&kvm->srcu, srcu_idx);
704 }
705
706 /* caller must hold gp->tlb_lock */
707 static void kvmhv_flush_nested(struct kvm_nested_guest *gp)
708 {
709         struct kvm *kvm = gp->l1_host;
710
711         spin_lock(&kvm->mmu_lock);
712         kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable, gp->shadow_lpid);
713         spin_unlock(&kvm->mmu_lock);
714         kvmhv_flush_lpid(gp->shadow_lpid);
715         kvmhv_update_ptbl_cache(gp);
716         if (gp->l1_gr_to_hr == 0)
717                 kvmhv_remove_nested(gp);
718 }
719
720 struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
721                                           bool create)
722 {
723         struct kvm_nested_guest *gp, *newgp;
724
725         if (l1_lpid >= KVM_MAX_NESTED_GUESTS ||
726             l1_lpid >= (1ul << ((kvm->arch.l1_ptcr & PRTS_MASK) + 12 - 4)))
727                 return NULL;
728
729         spin_lock(&kvm->mmu_lock);
730         gp = kvm->arch.nested_guests[l1_lpid];
731         if (gp)
732                 ++gp->refcnt;
733         spin_unlock(&kvm->mmu_lock);
734
735         if (gp || !create)
736                 return gp;
737
738         newgp = kvmhv_alloc_nested(kvm, l1_lpid);
739         if (!newgp)
740                 return NULL;
741         spin_lock(&kvm->mmu_lock);
742         if (kvm->arch.nested_guests[l1_lpid]) {
743                 /* someone else beat us to it */
744                 gp = kvm->arch.nested_guests[l1_lpid];
745         } else {
746                 kvm->arch.nested_guests[l1_lpid] = newgp;
747                 ++newgp->refcnt;
748                 gp = newgp;
749                 newgp = NULL;
750                 if (l1_lpid > kvm->arch.max_nested_lpid)
751                         kvm->arch.max_nested_lpid = l1_lpid;
752         }
753         ++gp->refcnt;
754         spin_unlock(&kvm->mmu_lock);
755
756         if (newgp)
757                 kvmhv_release_nested(newgp);
758
759         return gp;
760 }
761
762 void kvmhv_put_nested(struct kvm_nested_guest *gp)
763 {
764         struct kvm *kvm = gp->l1_host;
765         long ref;
766
767         spin_lock(&kvm->mmu_lock);
768         ref = --gp->refcnt;
769         spin_unlock(&kvm->mmu_lock);
770         if (ref == 0)
771                 kvmhv_release_nested(gp);
772 }
773
774 static struct kvm_nested_guest *kvmhv_find_nested(struct kvm *kvm, int lpid)
775 {
776         if (lpid > kvm->arch.max_nested_lpid)
777                 return NULL;
778         return kvm->arch.nested_guests[lpid];
779 }
780
781 pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
782                                  unsigned long ea, unsigned *hshift)
783 {
784         struct kvm_nested_guest *gp;
785         pte_t *pte;
786
787         gp = kvmhv_find_nested(kvm, lpid);
788         if (!gp)
789                 return NULL;
790
791         VM_WARN(!spin_is_locked(&kvm->mmu_lock),
792                 "%s called with kvm mmu_lock not held \n", __func__);
793         pte = __find_linux_pte(gp->shadow_pgtable, ea, NULL, hshift);
794
795         return pte;
796 }
797
798 static inline bool kvmhv_n_rmap_is_equal(u64 rmap_1, u64 rmap_2)
799 {
800         return !((rmap_1 ^ rmap_2) & (RMAP_NESTED_LPID_MASK |
801                                        RMAP_NESTED_GPA_MASK));
802 }
803
804 void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
805                             struct rmap_nested **n_rmap)
806 {
807         struct llist_node *entry = ((struct llist_head *) rmapp)->first;
808         struct rmap_nested *cursor;
809         u64 rmap, new_rmap = (*n_rmap)->rmap;
810
811         /* Are there any existing entries? */
812         if (!(*rmapp)) {
813                 /* No -> use the rmap as a single entry */
814                 *rmapp = new_rmap | RMAP_NESTED_IS_SINGLE_ENTRY;
815                 return;
816         }
817
818         /* Do any entries match what we're trying to insert? */
819         for_each_nest_rmap_safe(cursor, entry, &rmap) {
820                 if (kvmhv_n_rmap_is_equal(rmap, new_rmap))
821                         return;
822         }
823
824         /* Do we need to create a list or just add the new entry? */
825         rmap = *rmapp;
826         if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */
827                 *rmapp = 0UL;
828         llist_add(&((*n_rmap)->list), (struct llist_head *) rmapp);
829         if (rmap & RMAP_NESTED_IS_SINGLE_ENTRY) /* Not previously a list */
830                 (*n_rmap)->list.next = (struct llist_node *) rmap;
831
832         /* Set NULL so not freed by caller */
833         *n_rmap = NULL;
834 }
835
836 static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap,
837                                       unsigned long clr, unsigned long set,
838                                       unsigned long hpa, unsigned long mask)
839 {
840         unsigned long gpa;
841         unsigned int shift, lpid;
842         pte_t *ptep;
843
844         gpa = n_rmap & RMAP_NESTED_GPA_MASK;
845         lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
846
847         /* Find the pte */
848         ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
849         /*
850          * If the pte is present and the pfn is still the same, update the pte.
851          * If the pfn has changed then this is a stale rmap entry, the nested
852          * gpa actually points somewhere else now, and there is nothing to do.
853          * XXX A future optimisation would be to remove the rmap entry here.
854          */
855         if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa)) {
856                 __radix_pte_update(ptep, clr, set);
857                 kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
858         }
859 }
860
861 /*
862  * For a given list of rmap entries, update the rc bits in all ptes in shadow
863  * page tables for nested guests which are referenced by the rmap list.
864  */
865 void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
866                                     unsigned long clr, unsigned long set,
867                                     unsigned long hpa, unsigned long nbytes)
868 {
869         struct llist_node *entry = ((struct llist_head *) rmapp)->first;
870         struct rmap_nested *cursor;
871         unsigned long rmap, mask;
872
873         if ((clr | set) & ~(_PAGE_DIRTY | _PAGE_ACCESSED))
874                 return;
875
876         mask = PTE_RPN_MASK & ~(nbytes - 1);
877         hpa &= mask;
878
879         for_each_nest_rmap_safe(cursor, entry, &rmap)
880                 kvmhv_update_nest_rmap_rc(kvm, rmap, clr, set, hpa, mask);
881 }
882
883 static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
884                                    unsigned long hpa, unsigned long mask)
885 {
886         struct kvm_nested_guest *gp;
887         unsigned long gpa;
888         unsigned int shift, lpid;
889         pte_t *ptep;
890
891         gpa = n_rmap & RMAP_NESTED_GPA_MASK;
892         lpid = (n_rmap & RMAP_NESTED_LPID_MASK) >> RMAP_NESTED_LPID_SHIFT;
893         gp = kvmhv_find_nested(kvm, lpid);
894         if (!gp)
895                 return;
896
897         /* Find and invalidate the pte */
898         ptep = find_kvm_nested_guest_pte(kvm, lpid, gpa, &shift);
899         /* Don't spuriously invalidate ptes if the pfn has changed */
900         if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa))
901                 kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
902 }
903
904 static void kvmhv_remove_nest_rmap_list(struct kvm *kvm, unsigned long *rmapp,
905                                         unsigned long hpa, unsigned long mask)
906 {
907         struct llist_node *entry = llist_del_all((struct llist_head *) rmapp);
908         struct rmap_nested *cursor;
909         unsigned long rmap;
910
911         for_each_nest_rmap_safe(cursor, entry, &rmap) {
912                 kvmhv_remove_nest_rmap(kvm, rmap, hpa, mask);
913                 kfree(cursor);
914         }
915 }
916
917 /* called with kvm->mmu_lock held */
918 void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
919                                   const struct kvm_memory_slot *memslot,
920                                   unsigned long gpa, unsigned long hpa,
921                                   unsigned long nbytes)
922 {
923         unsigned long gfn, end_gfn;
924         unsigned long addr_mask;
925
926         if (!memslot)
927                 return;
928         gfn = (gpa >> PAGE_SHIFT) - memslot->base_gfn;
929         end_gfn = gfn + (nbytes >> PAGE_SHIFT);
930
931         addr_mask = PTE_RPN_MASK & ~(nbytes - 1);
932         hpa &= addr_mask;
933
934         for (; gfn < end_gfn; gfn++) {
935                 unsigned long *rmap = &memslot->arch.rmap[gfn];
936                 kvmhv_remove_nest_rmap_list(kvm, rmap, hpa, addr_mask);
937         }
938 }
939
940 static void kvmhv_free_memslot_nest_rmap(struct kvm_memory_slot *free)
941 {
942         unsigned long page;
943
944         for (page = 0; page < free->npages; page++) {
945                 unsigned long rmap, *rmapp = &free->arch.rmap[page];
946                 struct rmap_nested *cursor;
947                 struct llist_node *entry;
948
949                 entry = llist_del_all((struct llist_head *) rmapp);
950                 for_each_nest_rmap_safe(cursor, entry, &rmap)
951                         kfree(cursor);
952         }
953 }
954
955 static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu,
956                                         struct kvm_nested_guest *gp,
957                                         long gpa, int *shift_ret)
958 {
959         struct kvm *kvm = vcpu->kvm;
960         bool ret = false;
961         pte_t *ptep;
962         int shift;
963
964         spin_lock(&kvm->mmu_lock);
965         ptep = find_kvm_nested_guest_pte(kvm, gp->l1_lpid, gpa, &shift);
966         if (!shift)
967                 shift = PAGE_SHIFT;
968         if (ptep && pte_present(*ptep)) {
969                 kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
970                 ret = true;
971         }
972         spin_unlock(&kvm->mmu_lock);
973
974         if (shift_ret)
975                 *shift_ret = shift;
976         return ret;
977 }
978
979 static inline int get_ric(unsigned int instr)
980 {
981         return (instr >> 18) & 0x3;
982 }
983
984 static inline int get_prs(unsigned int instr)
985 {
986         return (instr >> 17) & 0x1;
987 }
988
989 static inline int get_r(unsigned int instr)
990 {
991         return (instr >> 16) & 0x1;
992 }
993
994 static inline int get_lpid(unsigned long r_val)
995 {
996         return r_val & 0xffffffff;
997 }
998
999 static inline int get_is(unsigned long r_val)
1000 {
1001         return (r_val >> 10) & 0x3;
1002 }
1003
1004 static inline int get_ap(unsigned long r_val)
1005 {
1006         return (r_val >> 5) & 0x7;
1007 }
1008
1009 static inline long get_epn(unsigned long r_val)
1010 {
1011         return r_val >> 12;
1012 }
1013
1014 static int kvmhv_emulate_tlbie_tlb_addr(struct kvm_vcpu *vcpu, int lpid,
1015                                         int ap, long epn)
1016 {
1017         struct kvm *kvm = vcpu->kvm;
1018         struct kvm_nested_guest *gp;
1019         long npages;
1020         int shift, shadow_shift;
1021         unsigned long addr;
1022
1023         shift = ap_to_shift(ap);
1024         addr = epn << 12;
1025         if (shift < 0)
1026                 /* Invalid ap encoding */
1027                 return -EINVAL;
1028
1029         addr &= ~((1UL << shift) - 1);
1030         npages = 1UL << (shift - PAGE_SHIFT);
1031
1032         gp = kvmhv_get_nested(kvm, lpid, false);
1033         if (!gp) /* No such guest -> nothing to do */
1034                 return 0;
1035         mutex_lock(&gp->tlb_lock);
1036
1037         /* There may be more than one host page backing this single guest pte */
1038         do {
1039                 kvmhv_invalidate_shadow_pte(vcpu, gp, addr, &shadow_shift);
1040
1041                 npages -= 1UL << (shadow_shift - PAGE_SHIFT);
1042                 addr += 1UL << shadow_shift;
1043         } while (npages > 0);
1044
1045         mutex_unlock(&gp->tlb_lock);
1046         kvmhv_put_nested(gp);
1047         return 0;
1048 }
1049
1050 static void kvmhv_emulate_tlbie_lpid(struct kvm_vcpu *vcpu,
1051                                      struct kvm_nested_guest *gp, int ric)
1052 {
1053         struct kvm *kvm = vcpu->kvm;
1054
1055         mutex_lock(&gp->tlb_lock);
1056         switch (ric) {
1057         case 0:
1058                 /* Invalidate TLB */
1059                 spin_lock(&kvm->mmu_lock);
1060                 kvmppc_free_pgtable_radix(kvm, gp->shadow_pgtable,
1061                                           gp->shadow_lpid);
1062                 kvmhv_flush_lpid(gp->shadow_lpid);
1063                 spin_unlock(&kvm->mmu_lock);
1064                 break;
1065         case 1:
1066                 /*
1067                  * Invalidate PWC
1068                  * We don't cache this -> nothing to do
1069                  */
1070                 break;
1071         case 2:
1072                 /* Invalidate TLB, PWC and caching of partition table entries */
1073                 kvmhv_flush_nested(gp);
1074                 break;
1075         default:
1076                 break;
1077         }
1078         mutex_unlock(&gp->tlb_lock);
1079 }
1080
1081 static void kvmhv_emulate_tlbie_all_lpid(struct kvm_vcpu *vcpu, int ric)
1082 {
1083         struct kvm *kvm = vcpu->kvm;
1084         struct kvm_nested_guest *gp;
1085         int i;
1086
1087         spin_lock(&kvm->mmu_lock);
1088         for (i = 0; i <= kvm->arch.max_nested_lpid; i++) {
1089                 gp = kvm->arch.nested_guests[i];
1090                 if (gp) {
1091                         spin_unlock(&kvm->mmu_lock);
1092                         kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1093                         spin_lock(&kvm->mmu_lock);
1094                 }
1095         }
1096         spin_unlock(&kvm->mmu_lock);
1097 }
1098
1099 static int kvmhv_emulate_priv_tlbie(struct kvm_vcpu *vcpu, unsigned int instr,
1100                                     unsigned long rsval, unsigned long rbval)
1101 {
1102         struct kvm *kvm = vcpu->kvm;
1103         struct kvm_nested_guest *gp;
1104         int r, ric, prs, is, ap;
1105         int lpid;
1106         long epn;
1107         int ret = 0;
1108
1109         ric = get_ric(instr);
1110         prs = get_prs(instr);
1111         r = get_r(instr);
1112         lpid = get_lpid(rsval);
1113         is = get_is(rbval);
1114
1115         /*
1116          * These cases are invalid and are not handled:
1117          * r   != 1 -> Only radix supported
1118          * prs == 1 -> Not HV privileged
1119          * ric == 3 -> No cluster bombs for radix
1120          * is  == 1 -> Partition scoped translations not associated with pid
1121          * (!is) && (ric == 1 || ric == 2) -> Not supported by ISA
1122          */
1123         if ((!r) || (prs) || (ric == 3) || (is == 1) ||
1124             ((!is) && (ric == 1 || ric == 2)))
1125                 return -EINVAL;
1126
1127         switch (is) {
1128         case 0:
1129                 /*
1130                  * We know ric == 0
1131                  * Invalidate TLB for a given target address
1132                  */
1133                 epn = get_epn(rbval);
1134                 ap = get_ap(rbval);
1135                 ret = kvmhv_emulate_tlbie_tlb_addr(vcpu, lpid, ap, epn);
1136                 break;
1137         case 2:
1138                 /* Invalidate matching LPID */
1139                 gp = kvmhv_get_nested(kvm, lpid, false);
1140                 if (gp) {
1141                         kvmhv_emulate_tlbie_lpid(vcpu, gp, ric);
1142                         kvmhv_put_nested(gp);
1143                 }
1144                 break;
1145         case 3:
1146                 /* Invalidate ALL LPIDs */
1147                 kvmhv_emulate_tlbie_all_lpid(vcpu, ric);
1148                 break;
1149         default:
1150                 ret = -EINVAL;
1151                 break;
1152         }
1153
1154         return ret;
1155 }
1156
1157 /*
1158  * This handles the H_TLB_INVALIDATE hcall.
1159  * Parameters are (r4) tlbie instruction code, (r5) rS contents,
1160  * (r6) rB contents.
1161  */
1162 long kvmhv_do_nested_tlbie(struct kvm_vcpu *vcpu)
1163 {
1164         int ret;
1165
1166         ret = kvmhv_emulate_priv_tlbie(vcpu, kvmppc_get_gpr(vcpu, 4),
1167                         kvmppc_get_gpr(vcpu, 5), kvmppc_get_gpr(vcpu, 6));
1168         if (ret)
1169                 return H_PARAMETER;
1170         return H_SUCCESS;
1171 }
1172
1173 /* Used to convert a nested guest real address to a L1 guest real address */
1174 static int kvmhv_translate_addr_nested(struct kvm_vcpu *vcpu,
1175                                        struct kvm_nested_guest *gp,
1176                                        unsigned long n_gpa, unsigned long dsisr,
1177                                        struct kvmppc_pte *gpte_p)
1178 {
1179         u64 fault_addr, flags = dsisr & DSISR_ISSTORE;
1180         int ret;
1181
1182         ret = kvmppc_mmu_walk_radix_tree(vcpu, n_gpa, gpte_p, gp->l1_gr_to_hr,
1183                                          &fault_addr);
1184
1185         if (ret) {
1186                 /* We didn't find a pte */
1187                 if (ret == -EINVAL) {
1188                         /* Unsupported mmu config */
1189                         flags |= DSISR_UNSUPP_MMU;
1190                 } else if (ret == -ENOENT) {
1191                         /* No translation found */
1192                         flags |= DSISR_NOHPTE;
1193                 } else if (ret == -EFAULT) {
1194                         /* Couldn't access L1 real address */
1195                         flags |= DSISR_PRTABLE_FAULT;
1196                         vcpu->arch.fault_gpa = fault_addr;
1197                 } else {
1198                         /* Unknown error */
1199                         return ret;
1200                 }
1201                 goto forward_to_l1;
1202         } else {
1203                 /* We found a pte -> check permissions */
1204                 if (dsisr & DSISR_ISSTORE) {
1205                         /* Can we write? */
1206                         if (!gpte_p->may_write) {
1207                                 flags |= DSISR_PROTFAULT;
1208                                 goto forward_to_l1;
1209                         }
1210                 } else if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
1211                         /* Can we execute? */
1212                         if (!gpte_p->may_execute) {
1213                                 flags |= SRR1_ISI_N_G_OR_CIP;
1214                                 goto forward_to_l1;
1215                         }
1216                 } else {
1217                         /* Can we read? */
1218                         if (!gpte_p->may_read && !gpte_p->may_write) {
1219                                 flags |= DSISR_PROTFAULT;
1220                                 goto forward_to_l1;
1221                         }
1222                 }
1223         }
1224
1225         return 0;
1226
1227 forward_to_l1:
1228         vcpu->arch.fault_dsisr = flags;
1229         if (vcpu->arch.trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
1230                 vcpu->arch.shregs.msr &= SRR1_MSR_BITS;
1231                 vcpu->arch.shregs.msr |= flags;
1232         }
1233         return RESUME_HOST;
1234 }
1235
1236 static long kvmhv_handle_nested_set_rc(struct kvm_vcpu *vcpu,
1237                                        struct kvm_nested_guest *gp,
1238                                        unsigned long n_gpa,
1239                                        struct kvmppc_pte gpte,
1240                                        unsigned long dsisr)
1241 {
1242         struct kvm *kvm = vcpu->kvm;
1243         bool writing = !!(dsisr & DSISR_ISSTORE);
1244         u64 pgflags;
1245         long ret;
1246
1247         /* Are the rc bits set in the L1 partition scoped pte? */
1248         pgflags = _PAGE_ACCESSED;
1249         if (writing)
1250                 pgflags |= _PAGE_DIRTY;
1251         if (pgflags & ~gpte.rc)
1252                 return RESUME_HOST;
1253
1254         spin_lock(&kvm->mmu_lock);
1255         /* Set the rc bit in the pte of our (L0) pgtable for the L1 guest */
1256         ret = kvmppc_hv_handle_set_rc(kvm, false, writing,
1257                                       gpte.raddr, kvm->arch.lpid);
1258         if (!ret) {
1259                 ret = -EINVAL;
1260                 goto out_unlock;
1261         }
1262
1263         /* Set the rc bit in the pte of the shadow_pgtable for the nest guest */
1264         ret = kvmppc_hv_handle_set_rc(kvm, true, writing,
1265                                       n_gpa, gp->l1_lpid);
1266         if (!ret)
1267                 ret = -EINVAL;
1268         else
1269                 ret = 0;
1270
1271 out_unlock:
1272         spin_unlock(&kvm->mmu_lock);
1273         return ret;
1274 }
1275
1276 static inline int kvmppc_radix_level_to_shift(int level)
1277 {
1278         switch (level) {
1279         case 2:
1280                 return PUD_SHIFT;
1281         case 1:
1282                 return PMD_SHIFT;
1283         default:
1284                 return PAGE_SHIFT;
1285         }
1286 }
1287
1288 static inline int kvmppc_radix_shift_to_level(int shift)
1289 {
1290         if (shift == PUD_SHIFT)
1291                 return 2;
1292         if (shift == PMD_SHIFT)
1293                 return 1;
1294         if (shift == PAGE_SHIFT)
1295                 return 0;
1296         WARN_ON_ONCE(1);
1297         return 0;
1298 }
1299
1300 /* called with gp->tlb_lock held */
1301 static long int __kvmhv_nested_page_fault(struct kvm_vcpu *vcpu,
1302                                           struct kvm_nested_guest *gp)
1303 {
1304         struct kvm *kvm = vcpu->kvm;
1305         struct kvm_memory_slot *memslot;
1306         struct rmap_nested *n_rmap;
1307         struct kvmppc_pte gpte;
1308         pte_t pte, *pte_p;
1309         unsigned long mmu_seq;
1310         unsigned long dsisr = vcpu->arch.fault_dsisr;
1311         unsigned long ea = vcpu->arch.fault_dar;
1312         unsigned long *rmapp;
1313         unsigned long n_gpa, gpa, gfn, perm = 0UL;
1314         unsigned int shift, l1_shift, level;
1315         bool writing = !!(dsisr & DSISR_ISSTORE);
1316         bool kvm_ro = false;
1317         long int ret;
1318
1319         if (!gp->l1_gr_to_hr) {
1320                 kvmhv_update_ptbl_cache(gp);
1321                 if (!gp->l1_gr_to_hr)
1322                         return RESUME_HOST;
1323         }
1324
1325         /* Convert the nested guest real address into a L1 guest real address */
1326
1327         n_gpa = vcpu->arch.fault_gpa & ~0xF000000000000FFFULL;
1328         if (!(dsisr & DSISR_PRTABLE_FAULT))
1329                 n_gpa |= ea & 0xFFF;
1330         ret = kvmhv_translate_addr_nested(vcpu, gp, n_gpa, dsisr, &gpte);
1331
1332         /*
1333          * If the hardware found a translation but we don't now have a usable
1334          * translation in the l1 partition-scoped tree, remove the shadow pte
1335          * and let the guest retry.
1336          */
1337         if (ret == RESUME_HOST &&
1338             (dsisr & (DSISR_PROTFAULT | DSISR_BADACCESS | DSISR_NOEXEC_OR_G |
1339                       DSISR_BAD_COPYPASTE)))
1340                 goto inval;
1341         if (ret)
1342                 return ret;
1343
1344         /* Failed to set the reference/change bits */
1345         if (dsisr & DSISR_SET_RC) {
1346                 ret = kvmhv_handle_nested_set_rc(vcpu, gp, n_gpa, gpte, dsisr);
1347                 if (ret == RESUME_HOST)
1348                         return ret;
1349                 if (ret)
1350                         goto inval;
1351                 dsisr &= ~DSISR_SET_RC;
1352                 if (!(dsisr & (DSISR_BAD_FAULT_64S | DSISR_NOHPTE |
1353                                DSISR_PROTFAULT)))
1354                         return RESUME_GUEST;
1355         }
1356
1357         /*
1358          * We took an HISI or HDSI while we were running a nested guest which
1359          * means we have no partition scoped translation for that. This means
1360          * we need to insert a pte for the mapping into our shadow_pgtable.
1361          */
1362
1363         l1_shift = gpte.page_shift;
1364         if (l1_shift < PAGE_SHIFT) {
1365                 /* We don't support l1 using a page size smaller than our own */
1366                 pr_err("KVM: L1 guest page shift (%d) less than our own (%d)\n",
1367                         l1_shift, PAGE_SHIFT);
1368                 return -EINVAL;
1369         }
1370         gpa = gpte.raddr;
1371         gfn = gpa >> PAGE_SHIFT;
1372
1373         /* 1. Get the corresponding host memslot */
1374
1375         memslot = gfn_to_memslot(kvm, gfn);
1376         if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
1377                 if (dsisr & (DSISR_PRTABLE_FAULT | DSISR_BADACCESS)) {
1378                         /* unusual error -> reflect to the guest as a DSI */
1379                         kvmppc_core_queue_data_storage(vcpu, ea, dsisr);
1380                         return RESUME_GUEST;
1381                 }
1382
1383                 /* passthrough of emulated MMIO case */
1384                 return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, writing);
1385         }
1386         if (memslot->flags & KVM_MEM_READONLY) {
1387                 if (writing) {
1388                         /* Give the guest a DSI */
1389                         kvmppc_core_queue_data_storage(vcpu, ea,
1390                                         DSISR_ISSTORE | DSISR_PROTFAULT);
1391                         return RESUME_GUEST;
1392                 }
1393                 kvm_ro = true;
1394         }
1395
1396         /* 2. Find the host pte for this L1 guest real address */
1397
1398         /* Used to check for invalidations in progress */
1399         mmu_seq = kvm->mmu_notifier_seq;
1400         smp_rmb();
1401
1402         /* See if can find translation in our partition scoped tables for L1 */
1403         pte = __pte(0);
1404         spin_lock(&kvm->mmu_lock);
1405         pte_p = find_kvm_secondary_pte(kvm, gpa, &shift);
1406         if (!shift)
1407                 shift = PAGE_SHIFT;
1408         if (pte_p)
1409                 pte = *pte_p;
1410         spin_unlock(&kvm->mmu_lock);
1411
1412         if (!pte_present(pte) || (writing && !(pte_val(pte) & _PAGE_WRITE))) {
1413                 /* No suitable pte found -> try to insert a mapping */
1414                 ret = kvmppc_book3s_instantiate_page(vcpu, gpa, memslot,
1415                                         writing, kvm_ro, &pte, &level);
1416                 if (ret == -EAGAIN)
1417                         return RESUME_GUEST;
1418                 else if (ret)
1419                         return ret;
1420                 shift = kvmppc_radix_level_to_shift(level);
1421         }
1422         /* Align gfn to the start of the page */
1423         gfn = (gpa & ~((1UL << shift) - 1)) >> PAGE_SHIFT;
1424
1425         /* 3. Compute the pte we need to insert for nest_gpa -> host r_addr */
1426
1427         /* The permissions is the combination of the host and l1 guest ptes */
1428         perm |= gpte.may_read ? 0UL : _PAGE_READ;
1429         perm |= gpte.may_write ? 0UL : _PAGE_WRITE;
1430         perm |= gpte.may_execute ? 0UL : _PAGE_EXEC;
1431         /* Only set accessed/dirty (rc) bits if set in host and l1 guest ptes */
1432         perm |= (gpte.rc & _PAGE_ACCESSED) ? 0UL : _PAGE_ACCESSED;
1433         perm |= ((gpte.rc & _PAGE_DIRTY) && writing) ? 0UL : _PAGE_DIRTY;
1434         pte = __pte(pte_val(pte) & ~perm);
1435
1436         /* What size pte can we insert? */
1437         if (shift > l1_shift) {
1438                 u64 mask;
1439                 unsigned int actual_shift = PAGE_SHIFT;
1440                 if (PMD_SHIFT < l1_shift)
1441                         actual_shift = PMD_SHIFT;
1442                 mask = (1UL << shift) - (1UL << actual_shift);
1443                 pte = __pte(pte_val(pte) | (gpa & mask));
1444                 shift = actual_shift;
1445         }
1446         level = kvmppc_radix_shift_to_level(shift);
1447         n_gpa &= ~((1UL << shift) - 1);
1448
1449         /* 4. Insert the pte into our shadow_pgtable */
1450
1451         n_rmap = kzalloc(sizeof(*n_rmap), GFP_KERNEL);
1452         if (!n_rmap)
1453                 return RESUME_GUEST; /* Let the guest try again */
1454         n_rmap->rmap = (n_gpa & RMAP_NESTED_GPA_MASK) |
1455                 (((unsigned long) gp->l1_lpid) << RMAP_NESTED_LPID_SHIFT);
1456         rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn];
1457         ret = kvmppc_create_pte(kvm, gp->shadow_pgtable, pte, n_gpa, level,
1458                                 mmu_seq, gp->shadow_lpid, rmapp, &n_rmap);
1459         kfree(n_rmap);
1460         if (ret == -EAGAIN)
1461                 ret = RESUME_GUEST;     /* Let the guest try again */
1462
1463         return ret;
1464
1465  inval:
1466         kvmhv_invalidate_shadow_pte(vcpu, gp, n_gpa, NULL);
1467         return RESUME_GUEST;
1468 }
1469
1470 long int kvmhv_nested_page_fault(struct kvm_vcpu *vcpu)
1471 {
1472         struct kvm_nested_guest *gp = vcpu->arch.nested;
1473         long int ret;
1474
1475         mutex_lock(&gp->tlb_lock);
1476         ret = __kvmhv_nested_page_fault(vcpu, gp);
1477         mutex_unlock(&gp->tlb_lock);
1478         return ret;
1479 }
1480
1481 int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid)
1482 {
1483         int ret = -1;
1484
1485         spin_lock(&kvm->mmu_lock);
1486         while (++lpid <= kvm->arch.max_nested_lpid) {
1487                 if (kvm->arch.nested_guests[lpid]) {
1488                         ret = lpid;
1489                         break;
1490                 }
1491         }
1492         spin_unlock(&kvm->mmu_lock);
1493         return ret;
1494 }