GNU Linux-libre 6.5.10-gnu
[releases.git] / arch / powerpc / kvm / e500_mmu.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
4  *
5  * Author: Yu Liu, yu.liu@freescale.com
6  *         Scott Wood, scottwood@freescale.com
7  *         Ashish Kalra, ashish.kalra@freescale.com
8  *         Varun Sethi, varun.sethi@freescale.com
9  *         Alexander Graf, agraf@suse.de
10  *
11  * Description:
12  * This file is based on arch/powerpc/kvm/44x_tlb.c,
13  * by Hollis Blanchard <hollisb@us.ibm.com>.
14  */
15
16 #include <linux/kernel.h>
17 #include <linux/types.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/kvm.h>
21 #include <linux/kvm_host.h>
22 #include <linux/highmem.h>
23 #include <linux/log2.h>
24 #include <linux/uaccess.h>
25 #include <linux/sched.h>
26 #include <linux/rwsem.h>
27 #include <linux/vmalloc.h>
28 #include <linux/hugetlb.h>
29 #include <asm/kvm_ppc.h>
30
31 #include "e500.h"
32 #include "trace_booke.h"
33 #include "timing.h"
34 #include "e500_mmu_host.h"
35
36 static inline unsigned int gtlb0_get_next_victim(
37                 struct kvmppc_vcpu_e500 *vcpu_e500)
38 {
39         unsigned int victim;
40
41         victim = vcpu_e500->gtlb_nv[0]++;
42         if (unlikely(vcpu_e500->gtlb_nv[0] >= vcpu_e500->gtlb_params[0].ways))
43                 vcpu_e500->gtlb_nv[0] = 0;
44
45         return victim;
46 }
47
48 static int tlb0_set_base(gva_t addr, int sets, int ways)
49 {
50         int set_base;
51
52         set_base = (addr >> PAGE_SHIFT) & (sets - 1);
53         set_base *= ways;
54
55         return set_base;
56 }
57
58 static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr)
59 {
60         return tlb0_set_base(addr, vcpu_e500->gtlb_params[0].sets,
61                              vcpu_e500->gtlb_params[0].ways);
62 }
63
64 static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel)
65 {
66         struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
67         int esel = get_tlb_esel_bit(vcpu);
68
69         if (tlbsel == 0) {
70                 esel &= vcpu_e500->gtlb_params[0].ways - 1;
71                 esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2);
72         } else {
73                 esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1;
74         }
75
76         return esel;
77 }
78
79 /* Search the guest TLB for a matching entry. */
80 static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
81                 gva_t eaddr, int tlbsel, unsigned int pid, int as)
82 {
83         int size = vcpu_e500->gtlb_params[tlbsel].entries;
84         unsigned int set_base, offset;
85         int i;
86
87         if (tlbsel == 0) {
88                 set_base = gtlb0_set_base(vcpu_e500, eaddr);
89                 size = vcpu_e500->gtlb_params[0].ways;
90         } else {
91                 if (eaddr < vcpu_e500->tlb1_min_eaddr ||
92                                 eaddr > vcpu_e500->tlb1_max_eaddr)
93                         return -1;
94                 set_base = 0;
95         }
96
97         offset = vcpu_e500->gtlb_offset[tlbsel];
98
99         for (i = 0; i < size; i++) {
100                 struct kvm_book3e_206_tlb_entry *tlbe =
101                         &vcpu_e500->gtlb_arch[offset + set_base + i];
102                 unsigned int tid;
103
104                 if (eaddr < get_tlb_eaddr(tlbe))
105                         continue;
106
107                 if (eaddr > get_tlb_end(tlbe))
108                         continue;
109
110                 tid = get_tlb_tid(tlbe);
111                 if (tid && (tid != pid))
112                         continue;
113
114                 if (!get_tlb_v(tlbe))
115                         continue;
116
117                 if (get_tlb_ts(tlbe) != as && as != -1)
118                         continue;
119
120                 return set_base + i;
121         }
122
123         return -1;
124 }
125
126 static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
127                 gva_t eaddr, int as)
128 {
129         struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
130         unsigned int victim, tsized;
131         int tlbsel;
132
133         /* since we only have two TLBs, only lower bit is used. */
134         tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1;
135         victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
136         tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f;
137
138         vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
139                 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
140         vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
141                 | MAS1_TID(get_tlbmiss_tid(vcpu))
142                 | MAS1_TSIZE(tsized);
143         vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN)
144                 | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK);
145         vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
146         vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1)
147                 | (get_cur_pid(vcpu) << 16)
148                 | (as ? MAS6_SAS : 0);
149 }
150
151 static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500)
152 {
153         int size = vcpu_e500->gtlb_params[1].entries;
154         unsigned int offset;
155         gva_t eaddr;
156         int i;
157
158         vcpu_e500->tlb1_min_eaddr = ~0UL;
159         vcpu_e500->tlb1_max_eaddr = 0;
160         offset = vcpu_e500->gtlb_offset[1];
161
162         for (i = 0; i < size; i++) {
163                 struct kvm_book3e_206_tlb_entry *tlbe =
164                         &vcpu_e500->gtlb_arch[offset + i];
165
166                 if (!get_tlb_v(tlbe))
167                         continue;
168
169                 eaddr = get_tlb_eaddr(tlbe);
170                 vcpu_e500->tlb1_min_eaddr =
171                                 min(vcpu_e500->tlb1_min_eaddr, eaddr);
172
173                 eaddr = get_tlb_end(tlbe);
174                 vcpu_e500->tlb1_max_eaddr =
175                                 max(vcpu_e500->tlb1_max_eaddr, eaddr);
176         }
177 }
178
179 static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500,
180                                 struct kvm_book3e_206_tlb_entry *gtlbe)
181 {
182         unsigned long start, end, size;
183
184         size = get_tlb_bytes(gtlbe);
185         start = get_tlb_eaddr(gtlbe) & ~(size - 1);
186         end = start + size - 1;
187
188         return vcpu_e500->tlb1_min_eaddr == start ||
189                         vcpu_e500->tlb1_max_eaddr == end;
190 }
191
192 /* This function is supposed to be called for a adding a new valid tlb entry */
193 static void kvmppc_set_tlb1map_range(struct kvm_vcpu *vcpu,
194                                 struct kvm_book3e_206_tlb_entry *gtlbe)
195 {
196         unsigned long start, end, size;
197         struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
198
199         if (!get_tlb_v(gtlbe))
200                 return;
201
202         size = get_tlb_bytes(gtlbe);
203         start = get_tlb_eaddr(gtlbe) & ~(size - 1);
204         end = start + size - 1;
205
206         vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start);
207         vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, end);
208 }
209
210 static inline int kvmppc_e500_gtlbe_invalidate(
211                                 struct kvmppc_vcpu_e500 *vcpu_e500,
212                                 int tlbsel, int esel)
213 {
214         struct kvm_book3e_206_tlb_entry *gtlbe =
215                 get_entry(vcpu_e500, tlbsel, esel);
216
217         if (unlikely(get_tlb_iprot(gtlbe)))
218                 return -1;
219
220         if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
221                 kvmppc_recalc_tlb1map_range(vcpu_e500);
222
223         gtlbe->mas1 = 0;
224
225         return 0;
226 }
227
228 int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
229 {
230         int esel;
231
232         if (value & MMUCSR0_TLB0FI)
233                 for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++)
234                         kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
235         if (value & MMUCSR0_TLB1FI)
236                 for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++)
237                         kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
238
239         /* Invalidate all host shadow mappings */
240         kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
241
242         return EMULATE_DONE;
243 }
244
245 int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, gva_t ea)
246 {
247         struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
248         unsigned int ia;
249         int esel, tlbsel;
250
251         ia = (ea >> 2) & 0x1;
252
253         /* since we only have two TLBs, only lower bit is used. */
254         tlbsel = (ea >> 3) & 0x1;
255
256         if (ia) {
257                 /* invalidate all entries */
258                 for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries;
259                      esel++)
260                         kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
261         } else {
262                 ea &= 0xfffff000;
263                 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel,
264                                 get_cur_pid(vcpu), -1);
265                 if (esel >= 0)
266                         kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
267         }
268
269         /* Invalidate all host shadow mappings */
270         kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
271
272         return EMULATE_DONE;
273 }
274
275 static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
276                        int pid, int type)
277 {
278         struct kvm_book3e_206_tlb_entry *tlbe;
279         int tid, esel;
280
281         /* invalidate all entries */
282         for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) {
283                 tlbe = get_entry(vcpu_e500, tlbsel, esel);
284                 tid = get_tlb_tid(tlbe);
285                 if (type == 0 || tid == pid) {
286                         inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
287                         kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
288                 }
289         }
290 }
291
292 static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid,
293                        gva_t ea)
294 {
295         int tlbsel, esel;
296
297         for (tlbsel = 0; tlbsel < 2; tlbsel++) {
298                 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1);
299                 if (esel >= 0) {
300                         inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
301                         kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
302                         break;
303                 }
304         }
305 }
306
307 int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int type, gva_t ea)
308 {
309         struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
310         int pid = get_cur_spid(vcpu);
311
312         if (type == 0 || type == 1) {
313                 tlbilx_all(vcpu_e500, 0, pid, type);
314                 tlbilx_all(vcpu_e500, 1, pid, type);
315         } else if (type == 3) {
316                 tlbilx_one(vcpu_e500, pid, ea);
317         }
318
319         return EMULATE_DONE;
320 }
321
322 int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
323 {
324         struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
325         int tlbsel, esel;
326         struct kvm_book3e_206_tlb_entry *gtlbe;
327
328         tlbsel = get_tlb_tlbsel(vcpu);
329         esel = get_tlb_esel(vcpu, tlbsel);
330
331         gtlbe = get_entry(vcpu_e500, tlbsel, esel);
332         vcpu->arch.shared->mas0 &= ~MAS0_NV(~0);
333         vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
334         vcpu->arch.shared->mas1 = gtlbe->mas1;
335         vcpu->arch.shared->mas2 = gtlbe->mas2;
336         vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
337
338         return EMULATE_DONE;
339 }
340
341 int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, gva_t ea)
342 {
343         struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
344         int as = !!get_cur_sas(vcpu);
345         unsigned int pid = get_cur_spid(vcpu);
346         int esel, tlbsel;
347         struct kvm_book3e_206_tlb_entry *gtlbe = NULL;
348
349         for (tlbsel = 0; tlbsel < 2; tlbsel++) {
350                 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
351                 if (esel >= 0) {
352                         gtlbe = get_entry(vcpu_e500, tlbsel, esel);
353                         break;
354                 }
355         }
356
357         if (gtlbe) {
358                 esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1;
359
360                 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
361                         | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
362                 vcpu->arch.shared->mas1 = gtlbe->mas1;
363                 vcpu->arch.shared->mas2 = gtlbe->mas2;
364                 vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
365         } else {
366                 int victim;
367
368                 /* since we only have two TLBs, only lower bit is used. */
369                 tlbsel = vcpu->arch.shared->mas4 >> 28 & 0x1;
370                 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
371
372                 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel)
373                         | MAS0_ESEL(victim)
374                         | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
375                 vcpu->arch.shared->mas1 =
376                           (vcpu->arch.shared->mas6 & MAS6_SPID0)
377                         | ((vcpu->arch.shared->mas6 & MAS6_SAS) ? MAS1_TS : 0)
378                         | (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0));
379                 vcpu->arch.shared->mas2 &= MAS2_EPN;
380                 vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 &
381                                            MAS2_ATTRIB_MASK;
382                 vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 |
383                                              MAS3_U2 | MAS3_U3;
384         }
385
386         kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
387         return EMULATE_DONE;
388 }
389
390 int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
391 {
392         struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
393         struct kvm_book3e_206_tlb_entry *gtlbe;
394         int tlbsel, esel;
395         int recal = 0;
396         int idx;
397
398         tlbsel = get_tlb_tlbsel(vcpu);
399         esel = get_tlb_esel(vcpu, tlbsel);
400
401         gtlbe = get_entry(vcpu_e500, tlbsel, esel);
402
403         if (get_tlb_v(gtlbe)) {
404                 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
405                 if ((tlbsel == 1) &&
406                         kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
407                         recal = 1;
408         }
409
410         gtlbe->mas1 = vcpu->arch.shared->mas1;
411         gtlbe->mas2 = vcpu->arch.shared->mas2;
412         if (!(vcpu->arch.shared->msr & MSR_CM))
413                 gtlbe->mas2 &= 0xffffffffUL;
414         gtlbe->mas7_3 = vcpu->arch.shared->mas7_3;
415
416         trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
417                                       gtlbe->mas2, gtlbe->mas7_3);
418
419         if (tlbsel == 1) {
420                 /*
421                  * If a valid tlb1 entry is overwritten then recalculate the
422                  * min/max TLB1 map address range otherwise no need to look
423                  * in tlb1 array.
424                  */
425                 if (recal)
426                         kvmppc_recalc_tlb1map_range(vcpu_e500);
427                 else
428                         kvmppc_set_tlb1map_range(vcpu, gtlbe);
429         }
430
431         idx = srcu_read_lock(&vcpu->kvm->srcu);
432
433         /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
434         if (tlbe_is_host_safe(vcpu, gtlbe)) {
435                 u64 eaddr = get_tlb_eaddr(gtlbe);
436                 u64 raddr = get_tlb_raddr(gtlbe);
437
438                 if (tlbsel == 0) {
439                         gtlbe->mas1 &= ~MAS1_TSIZE(~0);
440                         gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
441                 }
442
443                 /* Premap the faulting page */
444                 kvmppc_mmu_map(vcpu, eaddr, raddr, index_of(tlbsel, esel));
445         }
446
447         srcu_read_unlock(&vcpu->kvm->srcu, idx);
448
449         kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
450         return EMULATE_DONE;
451 }
452
453 static int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
454                                   gva_t eaddr, unsigned int pid, int as)
455 {
456         struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
457         int esel, tlbsel;
458
459         for (tlbsel = 0; tlbsel < 2; tlbsel++) {
460                 esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
461                 if (esel >= 0)
462                         return index_of(tlbsel, esel);
463         }
464
465         return -1;
466 }
467
468 /* 'linear_address' is actually an encoding of AS|PID|EADDR . */
469 int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
470                                struct kvm_translation *tr)
471 {
472         int index;
473         gva_t eaddr;
474         u8 pid;
475         u8 as;
476
477         eaddr = tr->linear_address;
478         pid = (tr->linear_address >> 32) & 0xff;
479         as = (tr->linear_address >> 40) & 0x1;
480
481         index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as);
482         if (index < 0) {
483                 tr->valid = 0;
484                 return 0;
485         }
486
487         tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr);
488         /* XXX what does "writeable" and "usermode" even mean? */
489         tr->valid = 1;
490
491         return 0;
492 }
493
494
495 int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
496 {
497         unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
498
499         return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
500 }
501
502 int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
503 {
504         unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
505
506         return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
507 }
508
509 void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
510 {
511         unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
512
513         kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.regs.nip, as);
514 }
515
516 void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
517 {
518         unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
519
520         kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
521 }
522
523 gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
524                         gva_t eaddr)
525 {
526         struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
527         struct kvm_book3e_206_tlb_entry *gtlbe;
528         u64 pgmask;
529
530         gtlbe = get_entry(vcpu_e500, tlbsel_of(index), esel_of(index));
531         pgmask = get_tlb_bytes(gtlbe) - 1;
532
533         return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
534 }
535
536 /*****************************************/
537
538 static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
539 {
540         int i;
541
542         kvmppc_core_flush_tlb(&vcpu_e500->vcpu);
543         kfree(vcpu_e500->g2h_tlb1_map);
544         kfree(vcpu_e500->gtlb_priv[0]);
545         kfree(vcpu_e500->gtlb_priv[1]);
546
547         if (vcpu_e500->shared_tlb_pages) {
548                 vfree((void *)(round_down((uintptr_t)vcpu_e500->gtlb_arch,
549                                           PAGE_SIZE)));
550
551                 for (i = 0; i < vcpu_e500->num_shared_tlb_pages; i++) {
552                         set_page_dirty_lock(vcpu_e500->shared_tlb_pages[i]);
553                         put_page(vcpu_e500->shared_tlb_pages[i]);
554                 }
555
556                 vcpu_e500->num_shared_tlb_pages = 0;
557
558                 kfree(vcpu_e500->shared_tlb_pages);
559                 vcpu_e500->shared_tlb_pages = NULL;
560         } else {
561                 kfree(vcpu_e500->gtlb_arch);
562         }
563
564         vcpu_e500->gtlb_arch = NULL;
565 }
566
567 void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
568 {
569         sregs->u.e.mas0 = vcpu->arch.shared->mas0;
570         sregs->u.e.mas1 = vcpu->arch.shared->mas1;
571         sregs->u.e.mas2 = vcpu->arch.shared->mas2;
572         sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
573         sregs->u.e.mas4 = vcpu->arch.shared->mas4;
574         sregs->u.e.mas6 = vcpu->arch.shared->mas6;
575
576         sregs->u.e.mmucfg = vcpu->arch.mmucfg;
577         sregs->u.e.tlbcfg[0] = vcpu->arch.tlbcfg[0];
578         sregs->u.e.tlbcfg[1] = vcpu->arch.tlbcfg[1];
579         sregs->u.e.tlbcfg[2] = 0;
580         sregs->u.e.tlbcfg[3] = 0;
581 }
582
583 int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
584 {
585         if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) {
586                 vcpu->arch.shared->mas0 = sregs->u.e.mas0;
587                 vcpu->arch.shared->mas1 = sregs->u.e.mas1;
588                 vcpu->arch.shared->mas2 = sregs->u.e.mas2;
589                 vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
590                 vcpu->arch.shared->mas4 = sregs->u.e.mas4;
591                 vcpu->arch.shared->mas6 = sregs->u.e.mas6;
592         }
593
594         return 0;
595 }
596
597 int kvmppc_get_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
598                                 union kvmppc_one_reg *val)
599 {
600         int r = 0;
601         long int i;
602
603         switch (id) {
604         case KVM_REG_PPC_MAS0:
605                 *val = get_reg_val(id, vcpu->arch.shared->mas0);
606                 break;
607         case KVM_REG_PPC_MAS1:
608                 *val = get_reg_val(id, vcpu->arch.shared->mas1);
609                 break;
610         case KVM_REG_PPC_MAS2:
611                 *val = get_reg_val(id, vcpu->arch.shared->mas2);
612                 break;
613         case KVM_REG_PPC_MAS7_3:
614                 *val = get_reg_val(id, vcpu->arch.shared->mas7_3);
615                 break;
616         case KVM_REG_PPC_MAS4:
617                 *val = get_reg_val(id, vcpu->arch.shared->mas4);
618                 break;
619         case KVM_REG_PPC_MAS6:
620                 *val = get_reg_val(id, vcpu->arch.shared->mas6);
621                 break;
622         case KVM_REG_PPC_MMUCFG:
623                 *val = get_reg_val(id, vcpu->arch.mmucfg);
624                 break;
625         case KVM_REG_PPC_EPTCFG:
626                 *val = get_reg_val(id, vcpu->arch.eptcfg);
627                 break;
628         case KVM_REG_PPC_TLB0CFG:
629         case KVM_REG_PPC_TLB1CFG:
630         case KVM_REG_PPC_TLB2CFG:
631         case KVM_REG_PPC_TLB3CFG:
632                 i = id - KVM_REG_PPC_TLB0CFG;
633                 *val = get_reg_val(id, vcpu->arch.tlbcfg[i]);
634                 break;
635         case KVM_REG_PPC_TLB0PS:
636         case KVM_REG_PPC_TLB1PS:
637         case KVM_REG_PPC_TLB2PS:
638         case KVM_REG_PPC_TLB3PS:
639                 i = id - KVM_REG_PPC_TLB0PS;
640                 *val = get_reg_val(id, vcpu->arch.tlbps[i]);
641                 break;
642         default:
643                 r = -EINVAL;
644                 break;
645         }
646
647         return r;
648 }
649
650 int kvmppc_set_one_reg_e500_tlb(struct kvm_vcpu *vcpu, u64 id,
651                                union kvmppc_one_reg *val)
652 {
653         int r = 0;
654         long int i;
655
656         switch (id) {
657         case KVM_REG_PPC_MAS0:
658                 vcpu->arch.shared->mas0 = set_reg_val(id, *val);
659                 break;
660         case KVM_REG_PPC_MAS1:
661                 vcpu->arch.shared->mas1 = set_reg_val(id, *val);
662                 break;
663         case KVM_REG_PPC_MAS2:
664                 vcpu->arch.shared->mas2 = set_reg_val(id, *val);
665                 break;
666         case KVM_REG_PPC_MAS7_3:
667                 vcpu->arch.shared->mas7_3 = set_reg_val(id, *val);
668                 break;
669         case KVM_REG_PPC_MAS4:
670                 vcpu->arch.shared->mas4 = set_reg_val(id, *val);
671                 break;
672         case KVM_REG_PPC_MAS6:
673                 vcpu->arch.shared->mas6 = set_reg_val(id, *val);
674                 break;
675         /* Only allow MMU registers to be set to the config supported by KVM */
676         case KVM_REG_PPC_MMUCFG: {
677                 u32 reg = set_reg_val(id, *val);
678                 if (reg != vcpu->arch.mmucfg)
679                         r = -EINVAL;
680                 break;
681         }
682         case KVM_REG_PPC_EPTCFG: {
683                 u32 reg = set_reg_val(id, *val);
684                 if (reg != vcpu->arch.eptcfg)
685                         r = -EINVAL;
686                 break;
687         }
688         case KVM_REG_PPC_TLB0CFG:
689         case KVM_REG_PPC_TLB1CFG:
690         case KVM_REG_PPC_TLB2CFG:
691         case KVM_REG_PPC_TLB3CFG: {
692                 /* MMU geometry (N_ENTRY/ASSOC) can be set only using SW_TLB */
693                 u32 reg = set_reg_val(id, *val);
694                 i = id - KVM_REG_PPC_TLB0CFG;
695                 if (reg != vcpu->arch.tlbcfg[i])
696                         r = -EINVAL;
697                 break;
698         }
699         case KVM_REG_PPC_TLB0PS:
700         case KVM_REG_PPC_TLB1PS:
701         case KVM_REG_PPC_TLB2PS:
702         case KVM_REG_PPC_TLB3PS: {
703                 u32 reg = set_reg_val(id, *val);
704                 i = id - KVM_REG_PPC_TLB0PS;
705                 if (reg != vcpu->arch.tlbps[i])
706                         r = -EINVAL;
707                 break;
708         }
709         default:
710                 r = -EINVAL;
711                 break;
712         }
713
714         return r;
715 }
716
717 static int vcpu_mmu_geometry_update(struct kvm_vcpu *vcpu,
718                 struct kvm_book3e_206_tlb_params *params)
719 {
720         vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
721         if (params->tlb_sizes[0] <= 2048)
722                 vcpu->arch.tlbcfg[0] |= params->tlb_sizes[0];
723         vcpu->arch.tlbcfg[0] |= params->tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
724
725         vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
726         vcpu->arch.tlbcfg[1] |= params->tlb_sizes[1];
727         vcpu->arch.tlbcfg[1] |= params->tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
728         return 0;
729 }
730
731 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
732                               struct kvm_config_tlb *cfg)
733 {
734         struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
735         struct kvm_book3e_206_tlb_params params;
736         char *virt;
737         struct page **pages;
738         struct tlbe_priv *privs[2] = {};
739         u64 *g2h_bitmap;
740         size_t array_len;
741         u32 sets;
742         int num_pages, ret, i;
743
744         if (cfg->mmu_type != KVM_MMU_FSL_BOOKE_NOHV)
745                 return -EINVAL;
746
747         if (copy_from_user(&params, (void __user *)(uintptr_t)cfg->params,
748                            sizeof(params)))
749                 return -EFAULT;
750
751         if (params.tlb_sizes[1] > 64)
752                 return -EINVAL;
753         if (params.tlb_ways[1] != params.tlb_sizes[1])
754                 return -EINVAL;
755         if (params.tlb_sizes[2] != 0 || params.tlb_sizes[3] != 0)
756                 return -EINVAL;
757         if (params.tlb_ways[2] != 0 || params.tlb_ways[3] != 0)
758                 return -EINVAL;
759
760         if (!is_power_of_2(params.tlb_ways[0]))
761                 return -EINVAL;
762
763         sets = params.tlb_sizes[0] >> ilog2(params.tlb_ways[0]);
764         if (!is_power_of_2(sets))
765                 return -EINVAL;
766
767         array_len = params.tlb_sizes[0] + params.tlb_sizes[1];
768         array_len *= sizeof(struct kvm_book3e_206_tlb_entry);
769
770         if (cfg->array_len < array_len)
771                 return -EINVAL;
772
773         num_pages = DIV_ROUND_UP(cfg->array + array_len - 1, PAGE_SIZE) -
774                     cfg->array / PAGE_SIZE;
775         pages = kmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL);
776         if (!pages)
777                 return -ENOMEM;
778
779         ret = get_user_pages_fast(cfg->array, num_pages, FOLL_WRITE, pages);
780         if (ret < 0)
781                 goto free_pages;
782
783         if (ret != num_pages) {
784                 num_pages = ret;
785                 ret = -EFAULT;
786                 goto put_pages;
787         }
788
789         virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL);
790         if (!virt) {
791                 ret = -ENOMEM;
792                 goto put_pages;
793         }
794
795         privs[0] = kcalloc(params.tlb_sizes[0], sizeof(*privs[0]), GFP_KERNEL);
796         if (!privs[0]) {
797                 ret = -ENOMEM;
798                 goto put_pages;
799         }
800
801         privs[1] = kcalloc(params.tlb_sizes[1], sizeof(*privs[1]), GFP_KERNEL);
802         if (!privs[1]) {
803                 ret = -ENOMEM;
804                 goto free_privs_first;
805         }
806
807         g2h_bitmap = kcalloc(params.tlb_sizes[1],
808                              sizeof(*g2h_bitmap),
809                              GFP_KERNEL);
810         if (!g2h_bitmap) {
811                 ret = -ENOMEM;
812                 goto free_privs_second;
813         }
814
815         free_gtlb(vcpu_e500);
816
817         vcpu_e500->gtlb_priv[0] = privs[0];
818         vcpu_e500->gtlb_priv[1] = privs[1];
819         vcpu_e500->g2h_tlb1_map = g2h_bitmap;
820
821         vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *)
822                 (virt + (cfg->array & (PAGE_SIZE - 1)));
823
824         vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0];
825         vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1];
826
827         vcpu_e500->gtlb_offset[0] = 0;
828         vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0];
829
830         /* Update vcpu's MMU geometry based on SW_TLB input */
831         vcpu_mmu_geometry_update(vcpu, &params);
832
833         vcpu_e500->shared_tlb_pages = pages;
834         vcpu_e500->num_shared_tlb_pages = num_pages;
835
836         vcpu_e500->gtlb_params[0].ways = params.tlb_ways[0];
837         vcpu_e500->gtlb_params[0].sets = sets;
838
839         vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1];
840         vcpu_e500->gtlb_params[1].sets = 1;
841
842         kvmppc_recalc_tlb1map_range(vcpu_e500);
843         return 0;
844  free_privs_second:
845         kfree(privs[1]);
846  free_privs_first:
847         kfree(privs[0]);
848  put_pages:
849         for (i = 0; i < num_pages; i++)
850                 put_page(pages[i]);
851  free_pages:
852         kfree(pages);
853         return ret;
854 }
855
856 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
857                              struct kvm_dirty_tlb *dirty)
858 {
859         struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
860         kvmppc_recalc_tlb1map_range(vcpu_e500);
861         kvmppc_core_flush_tlb(vcpu);
862         return 0;
863 }
864
865 /* Vcpu's MMU default configuration */
866 static int vcpu_mmu_init(struct kvm_vcpu *vcpu,
867                        struct kvmppc_e500_tlb_params *params)
868 {
869         /* Initialize RASIZE, PIDSIZE, NTLBS and MAVN fields with host values*/
870         vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE;
871
872         /* Initialize TLBnCFG fields with host values and SW_TLB geometry*/
873         vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) &
874                              ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
875         vcpu->arch.tlbcfg[0] |= params[0].entries;
876         vcpu->arch.tlbcfg[0] |= params[0].ways << TLBnCFG_ASSOC_SHIFT;
877
878         vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) &
879                              ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
880         vcpu->arch.tlbcfg[1] |= params[1].entries;
881         vcpu->arch.tlbcfg[1] |= params[1].ways << TLBnCFG_ASSOC_SHIFT;
882
883         if (has_feature(vcpu, VCPU_FTR_MMU_V2)) {
884                 vcpu->arch.tlbps[0] = mfspr(SPRN_TLB0PS);
885                 vcpu->arch.tlbps[1] = mfspr(SPRN_TLB1PS);
886
887                 vcpu->arch.mmucfg &= ~MMUCFG_LRAT;
888
889                 /* Guest mmu emulation currently doesn't handle E.PT */
890                 vcpu->arch.eptcfg = 0;
891                 vcpu->arch.tlbcfg[0] &= ~TLBnCFG_PT;
892                 vcpu->arch.tlbcfg[1] &= ~TLBnCFG_IND;
893         }
894
895         return 0;
896 }
897
898 int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
899 {
900         struct kvm_vcpu *vcpu = &vcpu_e500->vcpu;
901
902         if (e500_mmu_host_init(vcpu_e500))
903                 goto free_vcpu;
904
905         vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE;
906         vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE;
907
908         vcpu_e500->gtlb_params[0].ways = KVM_E500_TLB0_WAY_NUM;
909         vcpu_e500->gtlb_params[0].sets =
910                 KVM_E500_TLB0_SIZE / KVM_E500_TLB0_WAY_NUM;
911
912         vcpu_e500->gtlb_params[1].ways = KVM_E500_TLB1_SIZE;
913         vcpu_e500->gtlb_params[1].sets = 1;
914
915         vcpu_e500->gtlb_arch = kmalloc_array(KVM_E500_TLB0_SIZE +
916                                              KVM_E500_TLB1_SIZE,
917                                              sizeof(*vcpu_e500->gtlb_arch),
918                                              GFP_KERNEL);
919         if (!vcpu_e500->gtlb_arch)
920                 return -ENOMEM;
921
922         vcpu_e500->gtlb_offset[0] = 0;
923         vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE;
924
925         vcpu_e500->gtlb_priv[0] = kcalloc(vcpu_e500->gtlb_params[0].entries,
926                                           sizeof(struct tlbe_ref),
927                                           GFP_KERNEL);
928         if (!vcpu_e500->gtlb_priv[0])
929                 goto free_vcpu;
930
931         vcpu_e500->gtlb_priv[1] = kcalloc(vcpu_e500->gtlb_params[1].entries,
932                                           sizeof(struct tlbe_ref),
933                                           GFP_KERNEL);
934         if (!vcpu_e500->gtlb_priv[1])
935                 goto free_vcpu;
936
937         vcpu_e500->g2h_tlb1_map = kcalloc(vcpu_e500->gtlb_params[1].entries,
938                                           sizeof(*vcpu_e500->g2h_tlb1_map),
939                                           GFP_KERNEL);
940         if (!vcpu_e500->g2h_tlb1_map)
941                 goto free_vcpu;
942
943         vcpu_mmu_init(vcpu, vcpu_e500->gtlb_params);
944
945         kvmppc_recalc_tlb1map_range(vcpu_e500);
946         return 0;
947  free_vcpu:
948         free_gtlb(vcpu_e500);
949         return -1;
950 }
951
952 void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
953 {
954         free_gtlb(vcpu_e500);
955         e500_mmu_host_uninit(vcpu_e500);
956 }