GNU Linux-libre 5.4.200-gnu1
[releases.git] / arch / mips / kvm / mmu.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * KVM/MIPS MMU handling in the KVM module.
7  *
8  * Copyright (C) 2012  MIPS Technologies, Inc.  All rights reserved.
9  * Authors: Sanjay Lal <sanjayl@kymasys.com>
10  */
11
12 #include <linux/highmem.h>
13 #include <linux/kvm_host.h>
14 #include <linux/uaccess.h>
15 #include <asm/mmu_context.h>
16 #include <asm/pgalloc.h>
17
18 /*
19  * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels
20  * for which pages need to be cached.
21  */
22 #if defined(__PAGETABLE_PMD_FOLDED)
23 #define KVM_MMU_CACHE_MIN_PAGES 1
24 #else
25 #define KVM_MMU_CACHE_MIN_PAGES 2
26 #endif
27
28 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
29                                   int min, int max)
30 {
31         void *page;
32
33         BUG_ON(max > KVM_NR_MEM_OBJS);
34         if (cache->nobjs >= min)
35                 return 0;
36         while (cache->nobjs < max) {
37                 page = (void *)__get_free_page(GFP_KERNEL);
38                 if (!page)
39                         return -ENOMEM;
40                 cache->objects[cache->nobjs++] = page;
41         }
42         return 0;
43 }
44
45 static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
46 {
47         while (mc->nobjs)
48                 free_page((unsigned long)mc->objects[--mc->nobjs]);
49 }
50
51 static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
52 {
53         void *p;
54
55         BUG_ON(!mc || !mc->nobjs);
56         p = mc->objects[--mc->nobjs];
57         return p;
58 }
59
60 void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
61 {
62         mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
63 }
64
65 /**
66  * kvm_pgd_init() - Initialise KVM GPA page directory.
67  * @page:       Pointer to page directory (PGD) for KVM GPA.
68  *
69  * Initialise a KVM GPA page directory with pointers to the invalid table, i.e.
70  * representing no mappings. This is similar to pgd_init(), however it
71  * initialises all the page directory pointers, not just the ones corresponding
72  * to the userland address space (since it is for the guest physical address
73  * space rather than a virtual address space).
74  */
75 static void kvm_pgd_init(void *page)
76 {
77         unsigned long *p, *end;
78         unsigned long entry;
79
80 #ifdef __PAGETABLE_PMD_FOLDED
81         entry = (unsigned long)invalid_pte_table;
82 #else
83         entry = (unsigned long)invalid_pmd_table;
84 #endif
85
86         p = (unsigned long *)page;
87         end = p + PTRS_PER_PGD;
88
89         do {
90                 p[0] = entry;
91                 p[1] = entry;
92                 p[2] = entry;
93                 p[3] = entry;
94                 p[4] = entry;
95                 p += 8;
96                 p[-3] = entry;
97                 p[-2] = entry;
98                 p[-1] = entry;
99         } while (p != end);
100 }
101
102 /**
103  * kvm_pgd_alloc() - Allocate and initialise a KVM GPA page directory.
104  *
105  * Allocate a blank KVM GPA page directory (PGD) for representing guest physical
106  * to host physical page mappings.
107  *
108  * Returns:     Pointer to new KVM GPA page directory.
109  *              NULL on allocation failure.
110  */
111 pgd_t *kvm_pgd_alloc(void)
112 {
113         pgd_t *ret;
114
115         ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_ORDER);
116         if (ret)
117                 kvm_pgd_init(ret);
118
119         return ret;
120 }
121
122 /**
123  * kvm_mips_walk_pgd() - Walk page table with optional allocation.
124  * @pgd:        Page directory pointer.
125  * @addr:       Address to index page table using.
126  * @cache:      MMU page cache to allocate new page tables from, or NULL.
127  *
128  * Walk the page tables pointed to by @pgd to find the PTE corresponding to the
129  * address @addr. If page tables don't exist for @addr, they will be created
130  * from the MMU cache if @cache is not NULL.
131  *
132  * Returns:     Pointer to pte_t corresponding to @addr.
133  *              NULL if a page table doesn't exist for @addr and !@cache.
134  *              NULL if a page table allocation failed.
135  */
136 static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
137                                 unsigned long addr)
138 {
139         pud_t *pud;
140         pmd_t *pmd;
141
142         pgd += pgd_index(addr);
143         if (pgd_none(*pgd)) {
144                 /* Not used on MIPS yet */
145                 BUG();
146                 return NULL;
147         }
148         pud = pud_offset(pgd, addr);
149         if (pud_none(*pud)) {
150                 pmd_t *new_pmd;
151
152                 if (!cache)
153                         return NULL;
154                 new_pmd = mmu_memory_cache_alloc(cache);
155                 pmd_init((unsigned long)new_pmd,
156                          (unsigned long)invalid_pte_table);
157                 pud_populate(NULL, pud, new_pmd);
158         }
159         pmd = pmd_offset(pud, addr);
160         if (pmd_none(*pmd)) {
161                 pte_t *new_pte;
162
163                 if (!cache)
164                         return NULL;
165                 new_pte = mmu_memory_cache_alloc(cache);
166                 clear_page(new_pte);
167                 pmd_populate_kernel(NULL, pmd, new_pte);
168         }
169         return pte_offset(pmd, addr);
170 }
171
172 /* Caller must hold kvm->mm_lock */
173 static pte_t *kvm_mips_pte_for_gpa(struct kvm *kvm,
174                                    struct kvm_mmu_memory_cache *cache,
175                                    unsigned long addr)
176 {
177         return kvm_mips_walk_pgd(kvm->arch.gpa_mm.pgd, cache, addr);
178 }
179
180 /*
181  * kvm_mips_flush_gpa_{pte,pmd,pud,pgd,pt}.
182  * Flush a range of guest physical address space from the VM's GPA page tables.
183  */
184
185 static bool kvm_mips_flush_gpa_pte(pte_t *pte, unsigned long start_gpa,
186                                    unsigned long end_gpa)
187 {
188         int i_min = __pte_offset(start_gpa);
189         int i_max = __pte_offset(end_gpa);
190         bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
191         int i;
192
193         for (i = i_min; i <= i_max; ++i) {
194                 if (!pte_present(pte[i]))
195                         continue;
196
197                 set_pte(pte + i, __pte(0));
198         }
199         return safe_to_remove;
200 }
201
202 static bool kvm_mips_flush_gpa_pmd(pmd_t *pmd, unsigned long start_gpa,
203                                    unsigned long end_gpa)
204 {
205         pte_t *pte;
206         unsigned long end = ~0ul;
207         int i_min = __pmd_offset(start_gpa);
208         int i_max = __pmd_offset(end_gpa);
209         bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1);
210         int i;
211
212         for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
213                 if (!pmd_present(pmd[i]))
214                         continue;
215
216                 pte = pte_offset(pmd + i, 0);
217                 if (i == i_max)
218                         end = end_gpa;
219
220                 if (kvm_mips_flush_gpa_pte(pte, start_gpa, end)) {
221                         pmd_clear(pmd + i);
222                         pte_free_kernel(NULL, pte);
223                 } else {
224                         safe_to_remove = false;
225                 }
226         }
227         return safe_to_remove;
228 }
229
230 static bool kvm_mips_flush_gpa_pud(pud_t *pud, unsigned long start_gpa,
231                                    unsigned long end_gpa)
232 {
233         pmd_t *pmd;
234         unsigned long end = ~0ul;
235         int i_min = __pud_offset(start_gpa);
236         int i_max = __pud_offset(end_gpa);
237         bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1);
238         int i;
239
240         for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
241                 if (!pud_present(pud[i]))
242                         continue;
243
244                 pmd = pmd_offset(pud + i, 0);
245                 if (i == i_max)
246                         end = end_gpa;
247
248                 if (kvm_mips_flush_gpa_pmd(pmd, start_gpa, end)) {
249                         pud_clear(pud + i);
250                         pmd_free(NULL, pmd);
251                 } else {
252                         safe_to_remove = false;
253                 }
254         }
255         return safe_to_remove;
256 }
257
258 static bool kvm_mips_flush_gpa_pgd(pgd_t *pgd, unsigned long start_gpa,
259                                    unsigned long end_gpa)
260 {
261         pud_t *pud;
262         unsigned long end = ~0ul;
263         int i_min = pgd_index(start_gpa);
264         int i_max = pgd_index(end_gpa);
265         bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1);
266         int i;
267
268         for (i = i_min; i <= i_max; ++i, start_gpa = 0) {
269                 if (!pgd_present(pgd[i]))
270                         continue;
271
272                 pud = pud_offset(pgd + i, 0);
273                 if (i == i_max)
274                         end = end_gpa;
275
276                 if (kvm_mips_flush_gpa_pud(pud, start_gpa, end)) {
277                         pgd_clear(pgd + i);
278                         pud_free(NULL, pud);
279                 } else {
280                         safe_to_remove = false;
281                 }
282         }
283         return safe_to_remove;
284 }
285
286 /**
287  * kvm_mips_flush_gpa_pt() - Flush a range of guest physical addresses.
288  * @kvm:        KVM pointer.
289  * @start_gfn:  Guest frame number of first page in GPA range to flush.
290  * @end_gfn:    Guest frame number of last page in GPA range to flush.
291  *
292  * Flushes a range of GPA mappings from the GPA page tables.
293  *
294  * The caller must hold the @kvm->mmu_lock spinlock.
295  *
296  * Returns:     Whether its safe to remove the top level page directory because
297  *              all lower levels have been removed.
298  */
299 bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
300 {
301         return kvm_mips_flush_gpa_pgd(kvm->arch.gpa_mm.pgd,
302                                       start_gfn << PAGE_SHIFT,
303                                       end_gfn << PAGE_SHIFT);
304 }
305
306 #define BUILD_PTE_RANGE_OP(name, op)                                    \
307 static int kvm_mips_##name##_pte(pte_t *pte, unsigned long start,       \
308                                  unsigned long end)                     \
309 {                                                                       \
310         int ret = 0;                                                    \
311         int i_min = __pte_offset(start);                                \
312         int i_max = __pte_offset(end);                                  \
313         int i;                                                          \
314         pte_t old, new;                                                 \
315                                                                         \
316         for (i = i_min; i <= i_max; ++i) {                              \
317                 if (!pte_present(pte[i]))                               \
318                         continue;                                       \
319                                                                         \
320                 old = pte[i];                                           \
321                 new = op(old);                                          \
322                 if (pte_val(new) == pte_val(old))                       \
323                         continue;                                       \
324                 set_pte(pte + i, new);                                  \
325                 ret = 1;                                                \
326         }                                                               \
327         return ret;                                                     \
328 }                                                                       \
329                                                                         \
330 /* returns true if anything was done */                                 \
331 static int kvm_mips_##name##_pmd(pmd_t *pmd, unsigned long start,       \
332                                  unsigned long end)                     \
333 {                                                                       \
334         int ret = 0;                                                    \
335         pte_t *pte;                                                     \
336         unsigned long cur_end = ~0ul;                                   \
337         int i_min = __pmd_offset(start);                                \
338         int i_max = __pmd_offset(end);                                  \
339         int i;                                                          \
340                                                                         \
341         for (i = i_min; i <= i_max; ++i, start = 0) {                   \
342                 if (!pmd_present(pmd[i]))                               \
343                         continue;                                       \
344                                                                         \
345                 pte = pte_offset(pmd + i, 0);                           \
346                 if (i == i_max)                                         \
347                         cur_end = end;                                  \
348                                                                         \
349                 ret |= kvm_mips_##name##_pte(pte, start, cur_end);      \
350         }                                                               \
351         return ret;                                                     \
352 }                                                                       \
353                                                                         \
354 static int kvm_mips_##name##_pud(pud_t *pud, unsigned long start,       \
355                                  unsigned long end)                     \
356 {                                                                       \
357         int ret = 0;                                                    \
358         pmd_t *pmd;                                                     \
359         unsigned long cur_end = ~0ul;                                   \
360         int i_min = __pud_offset(start);                                \
361         int i_max = __pud_offset(end);                                  \
362         int i;                                                          \
363                                                                         \
364         for (i = i_min; i <= i_max; ++i, start = 0) {                   \
365                 if (!pud_present(pud[i]))                               \
366                         continue;                                       \
367                                                                         \
368                 pmd = pmd_offset(pud + i, 0);                           \
369                 if (i == i_max)                                         \
370                         cur_end = end;                                  \
371                                                                         \
372                 ret |= kvm_mips_##name##_pmd(pmd, start, cur_end);      \
373         }                                                               \
374         return ret;                                                     \
375 }                                                                       \
376                                                                         \
377 static int kvm_mips_##name##_pgd(pgd_t *pgd, unsigned long start,       \
378                                  unsigned long end)                     \
379 {                                                                       \
380         int ret = 0;                                                    \
381         pud_t *pud;                                                     \
382         unsigned long cur_end = ~0ul;                                   \
383         int i_min = pgd_index(start);                                   \
384         int i_max = pgd_index(end);                                     \
385         int i;                                                          \
386                                                                         \
387         for (i = i_min; i <= i_max; ++i, start = 0) {                   \
388                 if (!pgd_present(pgd[i]))                               \
389                         continue;                                       \
390                                                                         \
391                 pud = pud_offset(pgd + i, 0);                           \
392                 if (i == i_max)                                         \
393                         cur_end = end;                                  \
394                                                                         \
395                 ret |= kvm_mips_##name##_pud(pud, start, cur_end);      \
396         }                                                               \
397         return ret;                                                     \
398 }
399
400 /*
401  * kvm_mips_mkclean_gpa_pt.
402  * Mark a range of guest physical address space clean (writes fault) in the VM's
403  * GPA page table to allow dirty page tracking.
404  */
405
406 BUILD_PTE_RANGE_OP(mkclean, pte_mkclean)
407
408 /**
409  * kvm_mips_mkclean_gpa_pt() - Make a range of guest physical addresses clean.
410  * @kvm:        KVM pointer.
411  * @start_gfn:  Guest frame number of first page in GPA range to flush.
412  * @end_gfn:    Guest frame number of last page in GPA range to flush.
413  *
414  * Make a range of GPA mappings clean so that guest writes will fault and
415  * trigger dirty page logging.
416  *
417  * The caller must hold the @kvm->mmu_lock spinlock.
418  *
419  * Returns:     Whether any GPA mappings were modified, which would require
420  *              derived mappings (GVA page tables & TLB enties) to be
421  *              invalidated.
422  */
423 int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn)
424 {
425         return kvm_mips_mkclean_pgd(kvm->arch.gpa_mm.pgd,
426                                     start_gfn << PAGE_SHIFT,
427                                     end_gfn << PAGE_SHIFT);
428 }
429
430 /**
431  * kvm_arch_mmu_enable_log_dirty_pt_masked() - write protect dirty pages
432  * @kvm:        The KVM pointer
433  * @slot:       The memory slot associated with mask
434  * @gfn_offset: The gfn offset in memory slot
435  * @mask:       The mask of dirty pages at offset 'gfn_offset' in this memory
436  *              slot to be write protected
437  *
438  * Walks bits set in mask write protects the associated pte's. Caller must
439  * acquire @kvm->mmu_lock.
440  */
441 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
442                 struct kvm_memory_slot *slot,
443                 gfn_t gfn_offset, unsigned long mask)
444 {
445         gfn_t base_gfn = slot->base_gfn + gfn_offset;
446         gfn_t start = base_gfn +  __ffs(mask);
447         gfn_t end = base_gfn + __fls(mask);
448
449         kvm_mips_mkclean_gpa_pt(kvm, start, end);
450 }
451
452 /*
453  * kvm_mips_mkold_gpa_pt.
454  * Mark a range of guest physical address space old (all accesses fault) in the
455  * VM's GPA page table to allow detection of commonly used pages.
456  */
457
458 BUILD_PTE_RANGE_OP(mkold, pte_mkold)
459
460 static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn,
461                                  gfn_t end_gfn)
462 {
463         return kvm_mips_mkold_pgd(kvm->arch.gpa_mm.pgd,
464                                   start_gfn << PAGE_SHIFT,
465                                   end_gfn << PAGE_SHIFT);
466 }
467
468 static int handle_hva_to_gpa(struct kvm *kvm,
469                              unsigned long start,
470                              unsigned long end,
471                              int (*handler)(struct kvm *kvm, gfn_t gfn,
472                                             gpa_t gfn_end,
473                                             struct kvm_memory_slot *memslot,
474                                             void *data),
475                              void *data)
476 {
477         struct kvm_memslots *slots;
478         struct kvm_memory_slot *memslot;
479         int ret = 0;
480
481         slots = kvm_memslots(kvm);
482
483         /* we only care about the pages that the guest sees */
484         kvm_for_each_memslot(memslot, slots) {
485                 unsigned long hva_start, hva_end;
486                 gfn_t gfn, gfn_end;
487
488                 hva_start = max(start, memslot->userspace_addr);
489                 hva_end = min(end, memslot->userspace_addr +
490                                         (memslot->npages << PAGE_SHIFT));
491                 if (hva_start >= hva_end)
492                         continue;
493
494                 /*
495                  * {gfn(page) | page intersects with [hva_start, hva_end)} =
496                  * {gfn_start, gfn_start+1, ..., gfn_end-1}.
497                  */
498                 gfn = hva_to_gfn_memslot(hva_start, memslot);
499                 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
500
501                 ret |= handler(kvm, gfn, gfn_end, memslot, data);
502         }
503
504         return ret;
505 }
506
507
508 static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
509                                  struct kvm_memory_slot *memslot, void *data)
510 {
511         kvm_mips_flush_gpa_pt(kvm, gfn, gfn_end);
512         return 1;
513 }
514
515 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end,
516                         unsigned flags)
517 {
518         handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
519
520         kvm_mips_callbacks->flush_shadow_all(kvm);
521         return 0;
522 }
523
524 static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
525                                 struct kvm_memory_slot *memslot, void *data)
526 {
527         gpa_t gpa = gfn << PAGE_SHIFT;
528         pte_t hva_pte = *(pte_t *)data;
529         pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
530         pte_t old_pte;
531
532         if (!gpa_pte)
533                 return 0;
534
535         /* Mapping may need adjusting depending on memslot flags */
536         old_pte = *gpa_pte;
537         if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte))
538                 hva_pte = pte_mkclean(hva_pte);
539         else if (memslot->flags & KVM_MEM_READONLY)
540                 hva_pte = pte_wrprotect(hva_pte);
541
542         set_pte(gpa_pte, hva_pte);
543
544         /* Replacing an absent or old page doesn't need flushes */
545         if (!pte_present(old_pte) || !pte_young(old_pte))
546                 return 0;
547
548         /* Pages swapped, aged, moved, or cleaned require flushes */
549         return !pte_present(hva_pte) ||
550                !pte_young(hva_pte) ||
551                pte_pfn(old_pte) != pte_pfn(hva_pte) ||
552                (pte_dirty(old_pte) && !pte_dirty(hva_pte));
553 }
554
555 int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
556 {
557         unsigned long end = hva + PAGE_SIZE;
558         int ret;
559
560         ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte);
561         if (ret)
562                 kvm_mips_callbacks->flush_shadow_all(kvm);
563         return 0;
564 }
565
566 static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
567                                struct kvm_memory_slot *memslot, void *data)
568 {
569         return kvm_mips_mkold_gpa_pt(kvm, gfn, gfn_end);
570 }
571
572 static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end,
573                                     struct kvm_memory_slot *memslot, void *data)
574 {
575         gpa_t gpa = gfn << PAGE_SHIFT;
576         pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
577
578         if (!gpa_pte)
579                 return 0;
580         return pte_young(*gpa_pte);
581 }
582
583 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
584 {
585         return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL);
586 }
587
588 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
589 {
590         return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL);
591 }
592
593 /**
594  * _kvm_mips_map_page_fast() - Fast path GPA fault handler.
595  * @vcpu:               VCPU pointer.
596  * @gpa:                Guest physical address of fault.
597  * @write_fault:        Whether the fault was due to a write.
598  * @out_entry:          New PTE for @gpa (written on success unless NULL).
599  * @out_buddy:          New PTE for @gpa's buddy (written on success unless
600  *                      NULL).
601  *
602  * Perform fast path GPA fault handling, doing all that can be done without
603  * calling into KVM. This handles marking old pages young (for idle page
604  * tracking), and dirtying of clean pages (for dirty page logging).
605  *
606  * Returns:     0 on success, in which case we can update derived mappings and
607  *              resume guest execution.
608  *              -EFAULT on failure due to absent GPA mapping or write to
609  *              read-only page, in which case KVM must be consulted.
610  */
611 static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa,
612                                    bool write_fault,
613                                    pte_t *out_entry, pte_t *out_buddy)
614 {
615         struct kvm *kvm = vcpu->kvm;
616         gfn_t gfn = gpa >> PAGE_SHIFT;
617         pte_t *ptep;
618         kvm_pfn_t pfn = 0;      /* silence bogus GCC warning */
619         bool pfn_valid = false;
620         int ret = 0;
621
622         spin_lock(&kvm->mmu_lock);
623
624         /* Fast path - just check GPA page table for an existing entry */
625         ptep = kvm_mips_pte_for_gpa(kvm, NULL, gpa);
626         if (!ptep || !pte_present(*ptep)) {
627                 ret = -EFAULT;
628                 goto out;
629         }
630
631         /* Track access to pages marked old */
632         if (!pte_young(*ptep)) {
633                 set_pte(ptep, pte_mkyoung(*ptep));
634                 pfn = pte_pfn(*ptep);
635                 pfn_valid = true;
636                 /* call kvm_set_pfn_accessed() after unlock */
637         }
638         if (write_fault && !pte_dirty(*ptep)) {
639                 if (!pte_write(*ptep)) {
640                         ret = -EFAULT;
641                         goto out;
642                 }
643
644                 /* Track dirtying of writeable pages */
645                 set_pte(ptep, pte_mkdirty(*ptep));
646                 pfn = pte_pfn(*ptep);
647                 mark_page_dirty(kvm, gfn);
648                 kvm_set_pfn_dirty(pfn);
649         }
650
651         if (out_entry)
652                 *out_entry = *ptep;
653         if (out_buddy)
654                 *out_buddy = *ptep_buddy(ptep);
655
656 out:
657         spin_unlock(&kvm->mmu_lock);
658         if (pfn_valid)
659                 kvm_set_pfn_accessed(pfn);
660         return ret;
661 }
662
663 /**
664  * kvm_mips_map_page() - Map a guest physical page.
665  * @vcpu:               VCPU pointer.
666  * @gpa:                Guest physical address of fault.
667  * @write_fault:        Whether the fault was due to a write.
668  * @out_entry:          New PTE for @gpa (written on success unless NULL).
669  * @out_buddy:          New PTE for @gpa's buddy (written on success unless
670  *                      NULL).
671  *
672  * Handle GPA faults by creating a new GPA mapping (or updating an existing
673  * one).
674  *
675  * This takes care of marking pages young or dirty (idle/dirty page tracking),
676  * asking KVM for the corresponding PFN, and creating a mapping in the GPA page
677  * tables. Derived mappings (GVA page tables and TLBs) must be handled by the
678  * caller.
679  *
680  * Returns:     0 on success, in which case the caller may use the @out_entry
681  *              and @out_buddy PTEs to update derived mappings and resume guest
682  *              execution.
683  *              -EFAULT if there is no memory region at @gpa or a write was
684  *              attempted to a read-only memory region. This is usually handled
685  *              as an MMIO access.
686  */
687 static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa,
688                              bool write_fault,
689                              pte_t *out_entry, pte_t *out_buddy)
690 {
691         struct kvm *kvm = vcpu->kvm;
692         struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
693         gfn_t gfn = gpa >> PAGE_SHIFT;
694         int srcu_idx, err;
695         kvm_pfn_t pfn;
696         pte_t *ptep, entry, old_pte;
697         bool writeable;
698         unsigned long prot_bits;
699         unsigned long mmu_seq;
700
701         /* Try the fast path to handle old / clean pages */
702         srcu_idx = srcu_read_lock(&kvm->srcu);
703         err = _kvm_mips_map_page_fast(vcpu, gpa, write_fault, out_entry,
704                                       out_buddy);
705         if (!err)
706                 goto out;
707
708         /* We need a minimum of cached pages ready for page table creation */
709         err = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
710                                      KVM_NR_MEM_OBJS);
711         if (err)
712                 goto out;
713
714 retry:
715         /*
716          * Used to check for invalidations in progress, of the pfn that is
717          * returned by pfn_to_pfn_prot below.
718          */
719         mmu_seq = kvm->mmu_notifier_seq;
720         /*
721          * Ensure the read of mmu_notifier_seq isn't reordered with PTE reads in
722          * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't
723          * risk the page we get a reference to getting unmapped before we have a
724          * chance to grab the mmu_lock without mmu_notifier_retry() noticing.
725          *
726          * This smp_rmb() pairs with the effective smp_wmb() of the combination
727          * of the pte_unmap_unlock() after the PTE is zapped, and the
728          * spin_lock() in kvm_mmu_notifier_invalidate_<page|range_end>() before
729          * mmu_notifier_seq is incremented.
730          */
731         smp_rmb();
732
733         /* Slow path - ask KVM core whether we can access this GPA */
734         pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writeable);
735         if (is_error_noslot_pfn(pfn)) {
736                 err = -EFAULT;
737                 goto out;
738         }
739
740         spin_lock(&kvm->mmu_lock);
741         /* Check if an invalidation has taken place since we got pfn */
742         if (mmu_notifier_retry(kvm, mmu_seq)) {
743                 /*
744                  * This can happen when mappings are changed asynchronously, but
745                  * also synchronously if a COW is triggered by
746                  * gfn_to_pfn_prot().
747                  */
748                 spin_unlock(&kvm->mmu_lock);
749                 kvm_release_pfn_clean(pfn);
750                 goto retry;
751         }
752
753         /* Ensure page tables are allocated */
754         ptep = kvm_mips_pte_for_gpa(kvm, memcache, gpa);
755
756         /* Set up the PTE */
757         prot_bits = _PAGE_PRESENT | __READABLE | _page_cachable_default;
758         if (writeable) {
759                 prot_bits |= _PAGE_WRITE;
760                 if (write_fault) {
761                         prot_bits |= __WRITEABLE;
762                         mark_page_dirty(kvm, gfn);
763                         kvm_set_pfn_dirty(pfn);
764                 }
765         }
766         entry = pfn_pte(pfn, __pgprot(prot_bits));
767
768         /* Write the PTE */
769         old_pte = *ptep;
770         set_pte(ptep, entry);
771
772         err = 0;
773         if (out_entry)
774                 *out_entry = *ptep;
775         if (out_buddy)
776                 *out_buddy = *ptep_buddy(ptep);
777
778         spin_unlock(&kvm->mmu_lock);
779         kvm_release_pfn_clean(pfn);
780         kvm_set_pfn_accessed(pfn);
781 out:
782         srcu_read_unlock(&kvm->srcu, srcu_idx);
783         return err;
784 }
785
786 static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu,
787                                         unsigned long addr)
788 {
789         struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
790         pgd_t *pgdp;
791         int ret;
792
793         /* We need a minimum of cached pages ready for page table creation */
794         ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
795                                      KVM_NR_MEM_OBJS);
796         if (ret)
797                 return NULL;
798
799         if (KVM_GUEST_KERNEL_MODE(vcpu))
800                 pgdp = vcpu->arch.guest_kernel_mm.pgd;
801         else
802                 pgdp = vcpu->arch.guest_user_mm.pgd;
803
804         return kvm_mips_walk_pgd(pgdp, memcache, addr);
805 }
806
807 void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
808                                   bool user)
809 {
810         pgd_t *pgdp;
811         pte_t *ptep;
812
813         addr &= PAGE_MASK << 1;
814
815         pgdp = vcpu->arch.guest_kernel_mm.pgd;
816         ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
817         if (ptep) {
818                 ptep[0] = pfn_pte(0, __pgprot(0));
819                 ptep[1] = pfn_pte(0, __pgprot(0));
820         }
821
822         if (user) {
823                 pgdp = vcpu->arch.guest_user_mm.pgd;
824                 ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
825                 if (ptep) {
826                         ptep[0] = pfn_pte(0, __pgprot(0));
827                         ptep[1] = pfn_pte(0, __pgprot(0));
828                 }
829         }
830 }
831
832 /*
833  * kvm_mips_flush_gva_{pte,pmd,pud,pgd,pt}.
834  * Flush a range of guest physical address space from the VM's GPA page tables.
835  */
836
837 static bool kvm_mips_flush_gva_pte(pte_t *pte, unsigned long start_gva,
838                                    unsigned long end_gva)
839 {
840         int i_min = __pte_offset(start_gva);
841         int i_max = __pte_offset(end_gva);
842         bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
843         int i;
844
845         /*
846          * There's no freeing to do, so there's no point clearing individual
847          * entries unless only part of the last level page table needs flushing.
848          */
849         if (safe_to_remove)
850                 return true;
851
852         for (i = i_min; i <= i_max; ++i) {
853                 if (!pte_present(pte[i]))
854                         continue;
855
856                 set_pte(pte + i, __pte(0));
857         }
858         return false;
859 }
860
861 static bool kvm_mips_flush_gva_pmd(pmd_t *pmd, unsigned long start_gva,
862                                    unsigned long end_gva)
863 {
864         pte_t *pte;
865         unsigned long end = ~0ul;
866         int i_min = __pmd_offset(start_gva);
867         int i_max = __pmd_offset(end_gva);
868         bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1);
869         int i;
870
871         for (i = i_min; i <= i_max; ++i, start_gva = 0) {
872                 if (!pmd_present(pmd[i]))
873                         continue;
874
875                 pte = pte_offset(pmd + i, 0);
876                 if (i == i_max)
877                         end = end_gva;
878
879                 if (kvm_mips_flush_gva_pte(pte, start_gva, end)) {
880                         pmd_clear(pmd + i);
881                         pte_free_kernel(NULL, pte);
882                 } else {
883                         safe_to_remove = false;
884                 }
885         }
886         return safe_to_remove;
887 }
888
889 static bool kvm_mips_flush_gva_pud(pud_t *pud, unsigned long start_gva,
890                                    unsigned long end_gva)
891 {
892         pmd_t *pmd;
893         unsigned long end = ~0ul;
894         int i_min = __pud_offset(start_gva);
895         int i_max = __pud_offset(end_gva);
896         bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1);
897         int i;
898
899         for (i = i_min; i <= i_max; ++i, start_gva = 0) {
900                 if (!pud_present(pud[i]))
901                         continue;
902
903                 pmd = pmd_offset(pud + i, 0);
904                 if (i == i_max)
905                         end = end_gva;
906
907                 if (kvm_mips_flush_gva_pmd(pmd, start_gva, end)) {
908                         pud_clear(pud + i);
909                         pmd_free(NULL, pmd);
910                 } else {
911                         safe_to_remove = false;
912                 }
913         }
914         return safe_to_remove;
915 }
916
917 static bool kvm_mips_flush_gva_pgd(pgd_t *pgd, unsigned long start_gva,
918                                    unsigned long end_gva)
919 {
920         pud_t *pud;
921         unsigned long end = ~0ul;
922         int i_min = pgd_index(start_gva);
923         int i_max = pgd_index(end_gva);
924         bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1);
925         int i;
926
927         for (i = i_min; i <= i_max; ++i, start_gva = 0) {
928                 if (!pgd_present(pgd[i]))
929                         continue;
930
931                 pud = pud_offset(pgd + i, 0);
932                 if (i == i_max)
933                         end = end_gva;
934
935                 if (kvm_mips_flush_gva_pud(pud, start_gva, end)) {
936                         pgd_clear(pgd + i);
937                         pud_free(NULL, pud);
938                 } else {
939                         safe_to_remove = false;
940                 }
941         }
942         return safe_to_remove;
943 }
944
945 void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags)
946 {
947         if (flags & KMF_GPA) {
948                 /* all of guest virtual address space could be affected */
949                 if (flags & KMF_KERN)
950                         /* useg, kseg0, seg2/3 */
951                         kvm_mips_flush_gva_pgd(pgd, 0, 0x7fffffff);
952                 else
953                         /* useg */
954                         kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
955         } else {
956                 /* useg */
957                 kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
958
959                 /* kseg2/3 */
960                 if (flags & KMF_KERN)
961                         kvm_mips_flush_gva_pgd(pgd, 0x60000000, 0x7fffffff);
962         }
963 }
964
965 static pte_t kvm_mips_gpa_pte_to_gva_unmapped(pte_t pte)
966 {
967         /*
968          * Don't leak writeable but clean entries from GPA page tables. We don't
969          * want the normal Linux tlbmod handler to handle dirtying when KVM
970          * accesses guest memory.
971          */
972         if (!pte_dirty(pte))
973                 pte = pte_wrprotect(pte);
974
975         return pte;
976 }
977
978 static pte_t kvm_mips_gpa_pte_to_gva_mapped(pte_t pte, long entrylo)
979 {
980         /* Guest EntryLo overrides host EntryLo */
981         if (!(entrylo & ENTRYLO_D))
982                 pte = pte_mkclean(pte);
983
984         return kvm_mips_gpa_pte_to_gva_unmapped(pte);
985 }
986
987 #ifdef CONFIG_KVM_MIPS_VZ
988 int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr,
989                                       struct kvm_vcpu *vcpu,
990                                       bool write_fault)
991 {
992         int ret;
993
994         ret = kvm_mips_map_page(vcpu, badvaddr, write_fault, NULL, NULL);
995         if (ret)
996                 return ret;
997
998         /* Invalidate this entry in the TLB */
999         return kvm_vz_host_tlb_inv(vcpu, badvaddr);
1000 }
1001 #endif
1002
1003 /* XXXKYMA: Must be called with interrupts disabled */
1004 int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
1005                                     struct kvm_vcpu *vcpu,
1006                                     bool write_fault)
1007 {
1008         unsigned long gpa;
1009         pte_t pte_gpa[2], *ptep_gva;
1010         int idx;
1011
1012         if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
1013                 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
1014                 kvm_mips_dump_host_tlbs();
1015                 return -1;
1016         }
1017
1018         /* Get the GPA page table entry */
1019         gpa = KVM_GUEST_CPHYSADDR(badvaddr);
1020         idx = (badvaddr >> PAGE_SHIFT) & 1;
1021         if (kvm_mips_map_page(vcpu, gpa, write_fault, &pte_gpa[idx],
1022                               &pte_gpa[!idx]) < 0)
1023                 return -1;
1024
1025         /* Get the GVA page table entry */
1026         ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, badvaddr & ~PAGE_SIZE);
1027         if (!ptep_gva) {
1028                 kvm_err("No ptep for gva %lx\n", badvaddr);
1029                 return -1;
1030         }
1031
1032         /* Copy a pair of entries from GPA page table to GVA page table */
1033         ptep_gva[0] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[0]);
1034         ptep_gva[1] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[1]);
1035
1036         /* Invalidate this entry in the TLB, guest kernel ASID only */
1037         kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true);
1038         return 0;
1039 }
1040
1041 int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
1042                                          struct kvm_mips_tlb *tlb,
1043                                          unsigned long gva,
1044                                          bool write_fault)
1045 {
1046         struct kvm *kvm = vcpu->kvm;
1047         long tlb_lo[2];
1048         pte_t pte_gpa[2], *ptep_buddy, *ptep_gva;
1049         unsigned int idx = TLB_LO_IDX(*tlb, gva);
1050         bool kernel = KVM_GUEST_KERNEL_MODE(vcpu);
1051
1052         tlb_lo[0] = tlb->tlb_lo[0];
1053         tlb_lo[1] = tlb->tlb_lo[1];
1054
1055         /*
1056          * The commpage address must not be mapped to anything else if the guest
1057          * TLB contains entries nearby, or commpage accesses will break.
1058          */
1059         if (!((gva ^ KVM_GUEST_COMMPAGE_ADDR) & VPN2_MASK & (PAGE_MASK << 1)))
1060                 tlb_lo[TLB_LO_IDX(*tlb, KVM_GUEST_COMMPAGE_ADDR)] = 0;
1061
1062         /* Get the GPA page table entry */
1063         if (kvm_mips_map_page(vcpu, mips3_tlbpfn_to_paddr(tlb_lo[idx]),
1064                               write_fault, &pte_gpa[idx], NULL) < 0)
1065                 return -1;
1066
1067         /* And its GVA buddy's GPA page table entry if it also exists */
1068         pte_gpa[!idx] = pfn_pte(0, __pgprot(0));
1069         if (tlb_lo[!idx] & ENTRYLO_V) {
1070                 spin_lock(&kvm->mmu_lock);
1071                 ptep_buddy = kvm_mips_pte_for_gpa(kvm, NULL,
1072                                         mips3_tlbpfn_to_paddr(tlb_lo[!idx]));
1073                 if (ptep_buddy)
1074                         pte_gpa[!idx] = *ptep_buddy;
1075                 spin_unlock(&kvm->mmu_lock);
1076         }
1077
1078         /* Get the GVA page table entry pair */
1079         ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, gva & ~PAGE_SIZE);
1080         if (!ptep_gva) {
1081                 kvm_err("No ptep for gva %lx\n", gva);
1082                 return -1;
1083         }
1084
1085         /* Copy a pair of entries from GPA page table to GVA page table */
1086         ptep_gva[0] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[0], tlb_lo[0]);
1087         ptep_gva[1] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[1], tlb_lo[1]);
1088
1089         /* Invalidate this entry in the TLB, current guest mode ASID only */
1090         kvm_mips_host_tlb_inv(vcpu, gva, !kernel, kernel);
1091
1092         kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
1093                   tlb->tlb_lo[0], tlb->tlb_lo[1]);
1094
1095         return 0;
1096 }
1097
1098 int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
1099                                        struct kvm_vcpu *vcpu)
1100 {
1101         kvm_pfn_t pfn;
1102         pte_t *ptep;
1103
1104         ptep = kvm_trap_emul_pte_for_gva(vcpu, badvaddr);
1105         if (!ptep) {
1106                 kvm_err("No ptep for commpage %lx\n", badvaddr);
1107                 return -1;
1108         }
1109
1110         pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage));
1111         /* Also set valid and dirty, so refill handler doesn't have to */
1112         *ptep = pte_mkyoung(pte_mkdirty(pfn_pte(pfn, PAGE_SHARED)));
1113
1114         /* Invalidate this entry in the TLB, guest kernel ASID only */
1115         kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true);
1116         return 0;
1117 }
1118
1119 /**
1120  * kvm_mips_migrate_count() - Migrate timer.
1121  * @vcpu:       Virtual CPU.
1122  *
1123  * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
1124  * if it was running prior to being cancelled.
1125  *
1126  * Must be called when the VCPU is migrated to a different CPU to ensure that
1127  * timer expiry during guest execution interrupts the guest and causes the
1128  * interrupt to be delivered in a timely manner.
1129  */
1130 static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
1131 {
1132         if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
1133                 hrtimer_restart(&vcpu->arch.comparecount_timer);
1134 }
1135
1136 /* Restore ASID once we are scheduled back after preemption */
1137 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1138 {
1139         unsigned long flags;
1140
1141         kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
1142
1143         local_irq_save(flags);
1144
1145         vcpu->cpu = cpu;
1146         if (vcpu->arch.last_sched_cpu != cpu) {
1147                 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
1148                           vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
1149                 /*
1150                  * Migrate the timer interrupt to the current CPU so that it
1151                  * always interrupts the guest and synchronously triggers a
1152                  * guest timer interrupt.
1153                  */
1154                 kvm_mips_migrate_count(vcpu);
1155         }
1156
1157         /* restore guest state to registers */
1158         kvm_mips_callbacks->vcpu_load(vcpu, cpu);
1159
1160         local_irq_restore(flags);
1161 }
1162
1163 /* ASID can change if another task is scheduled during preemption */
1164 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1165 {
1166         unsigned long flags;
1167         int cpu;
1168
1169         local_irq_save(flags);
1170
1171         cpu = smp_processor_id();
1172         vcpu->arch.last_sched_cpu = cpu;
1173         vcpu->cpu = -1;
1174
1175         /* save guest state in registers */
1176         kvm_mips_callbacks->vcpu_put(vcpu, cpu);
1177
1178         local_irq_restore(flags);
1179 }
1180
1181 /**
1182  * kvm_trap_emul_gva_fault() - Safely attempt to handle a GVA access fault.
1183  * @vcpu:       Virtual CPU.
1184  * @gva:        Guest virtual address to be accessed.
1185  * @write:      True if write attempted (must be dirtied and made writable).
1186  *
1187  * Safely attempt to handle a GVA fault, mapping GVA pages if necessary, and
1188  * dirtying the page if @write so that guest instructions can be modified.
1189  *
1190  * Returns:     KVM_MIPS_MAPPED on success.
1191  *              KVM_MIPS_GVA if bad guest virtual address.
1192  *              KVM_MIPS_GPA if bad guest physical address.
1193  *              KVM_MIPS_TLB if guest TLB not present.
1194  *              KVM_MIPS_TLBINV if guest TLB present but not valid.
1195  *              KVM_MIPS_TLBMOD if guest TLB read only.
1196  */
1197 enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu,
1198                                                    unsigned long gva,
1199                                                    bool write)
1200 {
1201         struct mips_coproc *cop0 = vcpu->arch.cop0;
1202         struct kvm_mips_tlb *tlb;
1203         int index;
1204
1205         if (KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG0) {
1206                 if (kvm_mips_handle_kseg0_tlb_fault(gva, vcpu, write) < 0)
1207                         return KVM_MIPS_GPA;
1208         } else if ((KVM_GUEST_KSEGX(gva) < KVM_GUEST_KSEG0) ||
1209                    KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG23) {
1210                 /* Address should be in the guest TLB */
1211                 index = kvm_mips_guest_tlb_lookup(vcpu, (gva & VPN2_MASK) |
1212                           (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID));
1213                 if (index < 0)
1214                         return KVM_MIPS_TLB;
1215                 tlb = &vcpu->arch.guest_tlb[index];
1216
1217                 /* Entry should be valid, and dirty for writes */
1218                 if (!TLB_IS_VALID(*tlb, gva))
1219                         return KVM_MIPS_TLBINV;
1220                 if (write && !TLB_IS_DIRTY(*tlb, gva))
1221                         return KVM_MIPS_TLBMOD;
1222
1223                 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, gva, write))
1224                         return KVM_MIPS_GPA;
1225         } else {
1226                 return KVM_MIPS_GVA;
1227         }
1228
1229         return KVM_MIPS_MAPPED;
1230 }
1231
1232 int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
1233 {
1234         int err;
1235
1236         if (WARN(IS_ENABLED(CONFIG_KVM_MIPS_VZ),
1237                  "Expect BadInstr/BadInstrP registers to be used with VZ\n"))
1238                 return -EINVAL;
1239
1240 retry:
1241         kvm_trap_emul_gva_lockless_begin(vcpu);
1242         err = get_user(*out, opc);
1243         kvm_trap_emul_gva_lockless_end(vcpu);
1244
1245         if (unlikely(err)) {
1246                 /*
1247                  * Try to handle the fault, maybe we just raced with a GVA
1248                  * invalidation.
1249                  */
1250                 err = kvm_trap_emul_gva_fault(vcpu, (unsigned long)opc,
1251                                               false);
1252                 if (unlikely(err)) {
1253                         kvm_err("%s: illegal address: %p\n",
1254                                 __func__, opc);
1255                         return -EFAULT;
1256                 }
1257
1258                 /* Hopefully it'll work now */
1259                 goto retry;
1260         }
1261         return 0;
1262 }