2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/hugetlb.h>
14 #include <linux/module.h>
15 #include <linux/log2.h>
17 #include <asm/trace.h>
18 #include <asm/kvm_ppc.h>
19 #include <asm/kvm_book3s.h>
20 #include <asm/book3s/64/mmu-hash.h>
21 #include <asm/hvcall.h>
22 #include <asm/synch.h>
23 #include <asm/ppc-opcode.h>
24 #include <asm/pte-walk.h>
26 /* Translate address of a vmalloc'd thing to a linear map address */
27 static void *real_vmalloc_addr(void *x)
29 unsigned long addr = (unsigned long) x;
32 * assume we don't have huge pages in vmalloc space...
33 * So don't worry about THP collapse/split. Called
34 * Only in realmode with MSR_EE = 0, hence won't need irq_save/restore.
36 p = find_init_mm_pte(addr, NULL);
37 if (!p || !pte_present(*p))
39 addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
43 /* Return 1 if we need to do a global tlbie, 0 if we can use tlbiel */
44 static int global_invalidates(struct kvm *kvm)
50 * If there is only one vcore, and it's currently running,
51 * as indicated by local_paca->kvm_hstate.kvm_vcpu being set,
52 * we can use tlbiel as long as we mark all other physical
53 * cores as potentially having stale TLB entries for this lpid.
54 * Otherwise, don't use tlbiel.
56 if (kvm->arch.online_vcores == 1 && local_paca->kvm_hstate.kvm_vcpu)
62 /* any other core might now have stale TLB entries... */
64 cpumask_setall(&kvm->arch.need_tlb_flush);
65 cpu = local_paca->kvm_hstate.kvm_vcore->pcpu;
67 * On POWER9, threads are independent but the TLB is shared,
68 * so use the bit for the first thread to represent the core.
70 if (cpu_has_feature(CPU_FTR_ARCH_300))
71 cpu = cpu_first_thread_sibling(cpu);
72 cpumask_clear_cpu(cpu, &kvm->arch.need_tlb_flush);
79 * Add this HPTE into the chain for the real page.
80 * Must be called with the chain locked; it unlocks the chain.
82 void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
83 unsigned long *rmap, long pte_index, int realmode)
85 struct revmap_entry *head, *tail;
88 if (*rmap & KVMPPC_RMAP_PRESENT) {
89 i = *rmap & KVMPPC_RMAP_INDEX;
90 head = &kvm->arch.hpt.rev[i];
92 head = real_vmalloc_addr(head);
93 tail = &kvm->arch.hpt.rev[head->back];
95 tail = real_vmalloc_addr(tail);
97 rev->back = head->back;
98 tail->forw = pte_index;
99 head->back = pte_index;
101 rev->forw = rev->back = pte_index;
102 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) |
103 pte_index | KVMPPC_RMAP_PRESENT;
107 EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
109 /* Update the dirty bitmap of a memslot */
110 void kvmppc_update_dirty_map(struct kvm_memory_slot *memslot,
111 unsigned long gfn, unsigned long psize)
113 unsigned long npages;
115 if (!psize || !memslot->dirty_bitmap)
117 npages = (psize + PAGE_SIZE - 1) / PAGE_SIZE;
118 gfn -= memslot->base_gfn;
119 set_dirty_bits_atomic(memslot->dirty_bitmap, gfn, npages);
121 EXPORT_SYMBOL_GPL(kvmppc_update_dirty_map);
123 static void kvmppc_set_dirty_from_hpte(struct kvm *kvm,
124 unsigned long hpte_v, unsigned long hpte_gr)
126 struct kvm_memory_slot *memslot;
130 psize = kvmppc_actual_pgsz(hpte_v, hpte_gr);
131 gfn = hpte_rpn(hpte_gr, psize);
132 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
133 if (memslot && memslot->dirty_bitmap)
134 kvmppc_update_dirty_map(memslot, gfn, psize);
137 /* Returns a pointer to the revmap entry for the page mapped by a HPTE */
138 static unsigned long *revmap_for_hpte(struct kvm *kvm, unsigned long hpte_v,
139 unsigned long hpte_gr,
140 struct kvm_memory_slot **memslotp,
143 struct kvm_memory_slot *memslot;
147 gfn = hpte_rpn(hpte_gr, kvmppc_actual_pgsz(hpte_v, hpte_gr));
148 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
156 rmap = real_vmalloc_addr(&memslot->arch.rmap[gfn - memslot->base_gfn]);
160 /* Remove this HPTE from the chain for a real page */
161 static void remove_revmap_chain(struct kvm *kvm, long pte_index,
162 struct revmap_entry *rev,
163 unsigned long hpte_v, unsigned long hpte_r)
165 struct revmap_entry *next, *prev;
166 unsigned long ptel, head;
168 unsigned long rcbits;
169 struct kvm_memory_slot *memslot;
172 rcbits = hpte_r & (HPTE_R_R | HPTE_R_C);
173 ptel = rev->guest_rpte |= rcbits;
174 rmap = revmap_for_hpte(kvm, hpte_v, ptel, &memslot, &gfn);
179 head = *rmap & KVMPPC_RMAP_INDEX;
180 next = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->forw]);
181 prev = real_vmalloc_addr(&kvm->arch.hpt.rev[rev->back]);
182 next->back = rev->back;
183 prev->forw = rev->forw;
184 if (head == pte_index) {
186 if (head == pte_index)
187 *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
189 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
191 *rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT;
192 if (rcbits & HPTE_R_C)
193 kvmppc_update_dirty_map(memslot, gfn,
194 kvmppc_actual_pgsz(hpte_v, hpte_r));
198 long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
199 long pte_index, unsigned long pteh, unsigned long ptel,
200 pgd_t *pgdir, bool realmode, unsigned long *pte_idx_ret)
202 unsigned long i, pa, gpa, gfn, psize;
203 unsigned long slot_fn, hva;
205 struct revmap_entry *rev;
206 unsigned long g_ptel;
207 struct kvm_memory_slot *memslot;
208 unsigned hpage_shift;
212 unsigned int writing;
213 unsigned long mmu_seq;
214 unsigned long rcbits, irq_flags = 0;
216 if (kvm_is_radix(kvm))
218 psize = kvmppc_actual_pgsz(pteh, ptel);
221 writing = hpte_is_writable(ptel);
222 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
223 ptel &= ~HPTE_GR_RESERVED;
226 /* used later to detect if we might have been invalidated */
227 mmu_seq = kvm->mmu_notifier_seq;
230 /* Find the memslot (if any) for this address */
231 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
232 gfn = gpa >> PAGE_SHIFT;
233 memslot = __gfn_to_memslot(kvm_memslots_raw(kvm), gfn);
237 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
238 /* Emulated MMIO - mark this with key=31 */
239 pteh |= HPTE_V_ABSENT;
240 ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
244 /* Check if the requested page fits entirely in the memslot. */
245 if (!slot_is_aligned(memslot, psize))
247 slot_fn = gfn - memslot->base_gfn;
248 rmap = &memslot->arch.rmap[slot_fn];
250 /* Translate to host virtual address */
251 hva = __gfn_to_hva_memslot(memslot, gfn);
253 * If we had a page table table change after lookup, we would
254 * retry via mmu_notifier_retry.
257 local_irq_save(irq_flags);
259 * If called in real mode we have MSR_EE = 0. Otherwise
260 * we disable irq above.
262 ptep = __find_linux_pte(pgdir, hva, NULL, &hpage_shift);
265 unsigned int host_pte_size;
268 host_pte_size = 1ul << hpage_shift;
270 host_pte_size = PAGE_SIZE;
272 * We should always find the guest page size
273 * to <= host page size, if host is using hugepage
275 if (host_pte_size < psize) {
277 local_irq_restore(flags);
280 pte = kvmppc_read_update_linux_pte(ptep, writing);
281 if (pte_present(pte) && !pte_protnone(pte)) {
282 if (writing && !__pte_write(pte))
283 /* make the actual HPTE be read-only */
284 ptel = hpte_make_readonly(ptel);
286 pa = pte_pfn(pte) << PAGE_SHIFT;
287 pa |= hva & (host_pte_size - 1);
288 pa |= gpa & ~PAGE_MASK;
292 local_irq_restore(irq_flags);
294 ptel &= HPTE_R_KEY | HPTE_R_PP0 | (psize-1);
298 pteh |= HPTE_V_VALID;
300 pteh |= HPTE_V_ABSENT;
301 ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
304 /*If we had host pte mapping then Check WIMG */
305 if (ptep && !hpte_cache_flags_ok(ptel, is_ci)) {
309 * Allow guest to map emulated device memory as
310 * uncacheable, but actually make it cacheable.
312 ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
316 /* Find and lock the HPTEG slot to use */
318 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
320 if (likely((flags & H_EXACT) == 0)) {
322 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
323 for (i = 0; i < 8; ++i) {
324 if ((be64_to_cpu(*hpte) & HPTE_V_VALID) == 0 &&
325 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
332 * Since try_lock_hpte doesn't retry (not even stdcx.
333 * failures), it could be that there is a free slot
334 * but we transiently failed to lock it. Try again,
335 * actually locking each slot and checking it.
338 for (i = 0; i < 8; ++i) {
340 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
342 pte = be64_to_cpu(hpte[0]);
343 if (!(pte & (HPTE_V_VALID | HPTE_V_ABSENT)))
345 __unlock_hpte(hpte, pte);
353 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
354 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
356 /* Lock the slot and check again */
359 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
361 pte = be64_to_cpu(hpte[0]);
362 if (pte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
363 __unlock_hpte(hpte, pte);
369 /* Save away the guest's idea of the second HPTE dword */
370 rev = &kvm->arch.hpt.rev[pte_index];
372 rev = real_vmalloc_addr(rev);
374 rev->guest_rpte = g_ptel;
375 note_hpte_modification(kvm, rev);
378 /* Link HPTE into reverse-map chain */
379 if (pteh & HPTE_V_VALID) {
381 rmap = real_vmalloc_addr(rmap);
383 /* Check for pending invalidations under the rmap chain lock */
384 if (mmu_notifier_retry(kvm, mmu_seq)) {
385 /* inval in progress, write a non-present HPTE */
386 pteh |= HPTE_V_ABSENT;
387 pteh &= ~HPTE_V_VALID;
388 ptel &= ~(HPTE_R_KEY_HI | HPTE_R_KEY_LO);
391 kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
393 /* Only set R/C in real HPTE if already set in *rmap */
394 rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
395 ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C);
399 /* Convert to new format on P9 */
400 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
401 ptel = hpte_old_to_new_r(pteh, ptel);
402 pteh = hpte_old_to_new_v(pteh);
404 hpte[1] = cpu_to_be64(ptel);
406 /* Write the first HPTE dword, unlocking the HPTE and making it valid */
408 __unlock_hpte(hpte, pteh);
409 asm volatile("ptesync" : : : "memory");
411 *pte_idx_ret = pte_index;
414 EXPORT_SYMBOL_GPL(kvmppc_do_h_enter);
416 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
417 long pte_index, unsigned long pteh, unsigned long ptel)
419 return kvmppc_do_h_enter(vcpu->kvm, flags, pte_index, pteh, ptel,
420 vcpu->arch.pgdir, true,
421 &vcpu->arch.regs.gpr[4]);
424 #ifdef __BIG_ENDIAN__
425 #define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
427 #define LOCK_TOKEN (*(u32 *)(&get_paca()->paca_index))
430 static inline int is_mmio_hpte(unsigned long v, unsigned long r)
432 return ((v & HPTE_V_ABSENT) &&
433 (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
434 (HPTE_R_KEY_HI | HPTE_R_KEY_LO));
437 static inline void fixup_tlbie_lpid(unsigned long rb_value, unsigned long lpid)
440 if (cpu_has_feature(CPU_FTR_P9_TLBIE_ERAT_BUG)) {
441 /* Radix flush for a hash guest */
443 unsigned long rb,rs,prs,r,ric;
445 rb = PPC_BIT(52); /* IS = 2 */
446 rs = 0; /* lpid = 0 */
447 prs = 0; /* partition scoped */
448 r = 1; /* radix format */
449 ric = 0; /* RIC_FLSUH_TLB */
452 * Need the extra ptesync to make sure we don't
455 asm volatile("ptesync": : :"memory");
456 asm volatile(PPC_TLBIE_5(%0, %4, %3, %2, %1)
457 : : "r"(rb), "i"(r), "i"(prs),
458 "i"(ric), "r"(rs) : "memory");
461 if (cpu_has_feature(CPU_FTR_P9_TLBIE_STQ_BUG)) {
462 asm volatile("ptesync": : :"memory");
463 asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
464 "r" (rb_value), "r" (lpid));
468 static void do_tlbies(struct kvm *kvm, unsigned long *rbvalues,
469 long npages, int global, bool need_sync)
474 * We use the POWER9 5-operand versions of tlbie and tlbiel here.
475 * Since we are using RIC=0 PRS=0 R=0, and P7/P8 tlbiel ignores
476 * the RS field, this is backwards-compatible with P7 and P8.
480 asm volatile("ptesync" : : : "memory");
481 for (i = 0; i < npages; ++i) {
482 asm volatile(PPC_TLBIE_5(%0,%1,0,0,0) : :
483 "r" (rbvalues[i]), "r" (kvm->arch.lpid));
486 fixup_tlbie_lpid(rbvalues[i - 1], kvm->arch.lpid);
487 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
490 asm volatile("ptesync" : : : "memory");
491 for (i = 0; i < npages; ++i) {
492 asm volatile(PPC_TLBIEL(%0,%1,0,0,0) : :
493 "r" (rbvalues[i]), "r" (0));
495 asm volatile("ptesync" : : : "memory");
499 long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags,
500 unsigned long pte_index, unsigned long avpn,
501 unsigned long *hpret)
504 unsigned long v, r, rb;
505 struct revmap_entry *rev;
506 u64 pte, orig_pte, pte_r;
508 if (kvm_is_radix(kvm))
510 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
512 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
513 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
515 pte = orig_pte = be64_to_cpu(hpte[0]);
516 pte_r = be64_to_cpu(hpte[1]);
517 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
518 pte = hpte_new_to_old_v(pte, pte_r);
519 pte_r = hpte_new_to_old_r(pte_r);
521 if ((pte & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
522 ((flags & H_AVPN) && (pte & ~0x7fUL) != avpn) ||
523 ((flags & H_ANDCOND) && (pte & avpn) != 0)) {
524 __unlock_hpte(hpte, orig_pte);
528 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
529 v = pte & ~HPTE_V_HVLOCK;
530 if (v & HPTE_V_VALID) {
531 hpte[0] &= ~cpu_to_be64(HPTE_V_VALID);
532 rb = compute_tlbie_rb(v, pte_r, pte_index);
533 do_tlbies(kvm, &rb, 1, global_invalidates(kvm), true);
535 * The reference (R) and change (C) bits in a HPT
536 * entry can be set by hardware at any time up until
537 * the HPTE is invalidated and the TLB invalidation
538 * sequence has completed. This means that when
539 * removing a HPTE, we need to re-read the HPTE after
540 * the invalidation sequence has completed in order to
541 * obtain reliable values of R and C.
543 remove_revmap_chain(kvm, pte_index, rev, v,
544 be64_to_cpu(hpte[1]));
546 r = rev->guest_rpte & ~HPTE_GR_RESERVED;
547 note_hpte_modification(kvm, rev);
548 unlock_hpte(hpte, 0);
550 if (is_mmio_hpte(v, pte_r))
551 atomic64_inc(&kvm->arch.mmio_update);
553 if (v & HPTE_V_ABSENT)
554 v = (v & ~HPTE_V_ABSENT) | HPTE_V_VALID;
559 EXPORT_SYMBOL_GPL(kvmppc_do_h_remove);
561 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
562 unsigned long pte_index, unsigned long avpn)
564 return kvmppc_do_h_remove(vcpu->kvm, flags, pte_index, avpn,
565 &vcpu->arch.regs.gpr[4]);
568 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
570 struct kvm *kvm = vcpu->kvm;
571 unsigned long *args = &vcpu->arch.regs.gpr[4];
572 __be64 *hp, *hptes[4];
573 unsigned long tlbrb[4];
574 long int i, j, k, n, found, indexes[4];
575 unsigned long flags, req, pte_index, rcbits;
577 long int ret = H_SUCCESS;
578 struct revmap_entry *rev, *revs[4];
581 if (kvm_is_radix(kvm))
583 global = global_invalidates(kvm);
584 for (i = 0; i < 4 && ret == H_SUCCESS; ) {
589 flags = pte_index >> 56;
590 pte_index &= ((1ul << 56) - 1);
593 if (req == 3) { /* no more requests */
597 if (req != 1 || flags == 3 ||
598 pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt)) {
599 /* parameter error */
600 args[j] = ((0xa0 | flags) << 56) + pte_index;
604 hp = (__be64 *) (kvm->arch.hpt.virt + (pte_index << 4));
605 /* to avoid deadlock, don't spin except for first */
606 if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
609 while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
613 hp0 = be64_to_cpu(hp[0]);
614 hp1 = be64_to_cpu(hp[1]);
615 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
616 hp0 = hpte_new_to_old_v(hp0, hp1);
617 hp1 = hpte_new_to_old_r(hp1);
619 if (hp0 & (HPTE_V_ABSENT | HPTE_V_VALID)) {
621 case 0: /* absolute */
624 case 1: /* andcond */
625 if (!(hp0 & args[j + 1]))
629 if ((hp0 & ~0x7fUL) == args[j + 1])
635 hp[0] &= ~cpu_to_be64(HPTE_V_HVLOCK);
636 args[j] = ((0x90 | flags) << 56) + pte_index;
640 args[j] = ((0x80 | flags) << 56) + pte_index;
641 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
642 note_hpte_modification(kvm, rev);
644 if (!(hp0 & HPTE_V_VALID)) {
645 /* insert R and C bits from PTE */
646 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
647 args[j] |= rcbits << (56 - 5);
649 if (is_mmio_hpte(hp0, hp1))
650 atomic64_inc(&kvm->arch.mmio_update);
654 /* leave it locked */
655 hp[0] &= ~cpu_to_be64(HPTE_V_VALID);
656 tlbrb[n] = compute_tlbie_rb(hp0, hp1, pte_index);
666 /* Now that we've collected a batch, do the tlbies */
667 do_tlbies(kvm, tlbrb, n, global, true);
669 /* Read PTE low words after tlbie to get final R/C values */
670 for (k = 0; k < n; ++k) {
672 pte_index = args[j] & ((1ul << 56) - 1);
675 remove_revmap_chain(kvm, pte_index, rev,
676 be64_to_cpu(hp[0]), be64_to_cpu(hp[1]));
677 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
678 args[j] |= rcbits << (56 - 5);
679 __unlock_hpte(hp, 0);
686 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
687 unsigned long pte_index, unsigned long avpn,
690 struct kvm *kvm = vcpu->kvm;
692 struct revmap_entry *rev;
693 unsigned long v, r, rb, mask, bits;
696 if (kvm_is_radix(kvm))
698 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
701 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
702 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
704 v = pte_v = be64_to_cpu(hpte[0]);
705 if (cpu_has_feature(CPU_FTR_ARCH_300))
706 v = hpte_new_to_old_v(v, be64_to_cpu(hpte[1]));
707 if ((v & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
708 ((flags & H_AVPN) && (v & ~0x7fUL) != avpn)) {
709 __unlock_hpte(hpte, pte_v);
713 pte_r = be64_to_cpu(hpte[1]);
714 bits = (flags << 55) & HPTE_R_PP0;
715 bits |= (flags << 48) & HPTE_R_KEY_HI;
716 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
718 /* Update guest view of 2nd HPTE dword */
719 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
720 HPTE_R_KEY_HI | HPTE_R_KEY_LO;
721 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
723 r = (rev->guest_rpte & ~mask) | bits;
725 note_hpte_modification(kvm, rev);
729 if (v & HPTE_V_VALID) {
731 * If the page is valid, don't let it transition from
732 * readonly to writable. If it should be writable, we'll
733 * take a trap and let the page fault code sort it out.
735 r = (pte_r & ~mask) | bits;
736 if (hpte_is_writable(r) && !hpte_is_writable(pte_r))
737 r = hpte_make_readonly(r);
738 /* If the PTE is changing, invalidate it first */
740 rb = compute_tlbie_rb(v, r, pte_index);
741 hpte[0] = cpu_to_be64((pte_v & ~HPTE_V_VALID) |
743 do_tlbies(kvm, &rb, 1, global_invalidates(kvm), true);
744 /* Don't lose R/C bit updates done by hardware */
745 r |= be64_to_cpu(hpte[1]) & (HPTE_R_R | HPTE_R_C);
746 hpte[1] = cpu_to_be64(r);
749 unlock_hpte(hpte, pte_v & ~HPTE_V_HVLOCK);
750 asm volatile("ptesync" : : : "memory");
751 if (is_mmio_hpte(v, pte_r))
752 atomic64_inc(&kvm->arch.mmio_update);
757 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
758 unsigned long pte_index)
760 struct kvm *kvm = vcpu->kvm;
764 struct revmap_entry *rev = NULL;
766 if (kvm_is_radix(kvm))
768 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
770 if (flags & H_READ_4) {
774 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
775 for (i = 0; i < n; ++i, ++pte_index) {
776 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
777 v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
778 r = be64_to_cpu(hpte[1]);
779 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
780 v = hpte_new_to_old_v(v, r);
781 r = hpte_new_to_old_r(r);
783 if (v & HPTE_V_ABSENT) {
787 if (v & HPTE_V_VALID) {
788 r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
789 r &= ~HPTE_GR_RESERVED;
791 vcpu->arch.regs.gpr[4 + i * 2] = v;
792 vcpu->arch.regs.gpr[5 + i * 2] = r;
797 long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
798 unsigned long pte_index)
800 struct kvm *kvm = vcpu->kvm;
802 unsigned long v, r, gr;
803 struct revmap_entry *rev;
805 long ret = H_NOT_FOUND;
807 if (kvm_is_radix(kvm))
809 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
812 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
813 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
814 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
816 v = be64_to_cpu(hpte[0]);
817 r = be64_to_cpu(hpte[1]);
818 if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
821 gr = rev->guest_rpte;
822 if (rev->guest_rpte & HPTE_R_R) {
823 rev->guest_rpte &= ~HPTE_R_R;
824 note_hpte_modification(kvm, rev);
826 if (v & HPTE_V_VALID) {
827 gr |= r & (HPTE_R_R | HPTE_R_C);
829 kvmppc_clear_ref_hpte(kvm, hpte, pte_index);
830 rmap = revmap_for_hpte(kvm, v, gr, NULL, NULL);
833 *rmap |= KVMPPC_RMAP_REFERENCED;
838 vcpu->arch.regs.gpr[4] = gr;
841 unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
845 long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
846 unsigned long pte_index)
848 struct kvm *kvm = vcpu->kvm;
850 unsigned long v, r, gr;
851 struct revmap_entry *rev;
852 long ret = H_NOT_FOUND;
854 if (kvm_is_radix(kvm))
856 if (pte_index >= kvmppc_hpt_npte(&kvm->arch.hpt))
859 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[pte_index]);
860 hpte = (__be64 *)(kvm->arch.hpt.virt + (pte_index << 4));
861 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
863 v = be64_to_cpu(hpte[0]);
864 r = be64_to_cpu(hpte[1]);
865 if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT)))
868 gr = rev->guest_rpte;
870 rev->guest_rpte &= ~HPTE_R_C;
871 note_hpte_modification(kvm, rev);
873 if (v & HPTE_V_VALID) {
874 /* need to make it temporarily absent so C is stable */
875 hpte[0] |= cpu_to_be64(HPTE_V_ABSENT);
876 kvmppc_invalidate_hpte(kvm, hpte, pte_index);
877 r = be64_to_cpu(hpte[1]);
878 gr |= r & (HPTE_R_R | HPTE_R_C);
880 hpte[1] = cpu_to_be64(r & ~HPTE_R_C);
882 kvmppc_set_dirty_from_hpte(kvm, v, gr);
885 vcpu->arch.regs.gpr[4] = gr;
888 unlock_hpte(hpte, v & ~HPTE_V_HVLOCK);
892 void kvmppc_invalidate_hpte(struct kvm *kvm, __be64 *hptep,
893 unsigned long pte_index)
898 hptep[0] &= ~cpu_to_be64(HPTE_V_VALID);
899 hp0 = be64_to_cpu(hptep[0]);
900 hp1 = be64_to_cpu(hptep[1]);
901 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
902 hp0 = hpte_new_to_old_v(hp0, hp1);
903 hp1 = hpte_new_to_old_r(hp1);
905 rb = compute_tlbie_rb(hp0, hp1, pte_index);
906 do_tlbies(kvm, &rb, 1, 1, true);
908 EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
910 void kvmppc_clear_ref_hpte(struct kvm *kvm, __be64 *hptep,
911 unsigned long pte_index)
917 hp0 = be64_to_cpu(hptep[0]);
918 hp1 = be64_to_cpu(hptep[1]);
919 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
920 hp0 = hpte_new_to_old_v(hp0, hp1);
921 hp1 = hpte_new_to_old_r(hp1);
923 rb = compute_tlbie_rb(hp0, hp1, pte_index);
924 rbyte = (be64_to_cpu(hptep[1]) & ~HPTE_R_R) >> 8;
925 /* modify only the second-last byte, which contains the ref bit */
926 *((char *)hptep + 14) = rbyte;
927 do_tlbies(kvm, &rb, 1, 1, false);
929 EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
931 static int slb_base_page_shift[4] = {
935 20, /* 1M, unsupported */
938 static struct mmio_hpte_cache_entry *mmio_cache_search(struct kvm_vcpu *vcpu,
939 unsigned long eaddr, unsigned long slb_v, long mmio_update)
941 struct mmio_hpte_cache_entry *entry = NULL;
945 for (i = 0; i < MMIO_HPTE_CACHE_SIZE; i++) {
946 entry = &vcpu->arch.mmio_cache.entry[i];
947 if (entry->mmio_update == mmio_update) {
948 pshift = entry->slb_base_pshift;
949 if ((entry->eaddr >> pshift) == (eaddr >> pshift) &&
950 entry->slb_v == slb_v)
957 static struct mmio_hpte_cache_entry *
958 next_mmio_cache_entry(struct kvm_vcpu *vcpu)
960 unsigned int index = vcpu->arch.mmio_cache.index;
962 vcpu->arch.mmio_cache.index++;
963 if (vcpu->arch.mmio_cache.index == MMIO_HPTE_CACHE_SIZE)
964 vcpu->arch.mmio_cache.index = 0;
966 return &vcpu->arch.mmio_cache.entry[index];
969 /* When called from virtmode, this func should be protected by
970 * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
971 * can trigger deadlock issue.
973 long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
978 unsigned long somask;
979 unsigned long vsid, hash;
982 unsigned long mask, val;
983 unsigned long v, r, orig_v;
985 /* Get page shift, work out hash and AVPN etc. */
986 mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
989 if (slb_v & SLB_VSID_L) {
990 mask |= HPTE_V_LARGE;
992 pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4];
994 if (slb_v & SLB_VSID_B_1T) {
995 somask = (1UL << 40) - 1;
996 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T;
999 somask = (1UL << 28) - 1;
1000 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
1002 hash = (vsid ^ ((eaddr & somask) >> pshift)) & kvmppc_hpt_mask(&kvm->arch.hpt);
1003 avpn = slb_v & ~(somask >> 16); /* also includes B */
1004 avpn |= (eaddr & somask) >> 16;
1007 avpn &= ~((1UL << (pshift - 16)) - 1);
1013 hpte = (__be64 *)(kvm->arch.hpt.virt + (hash << 7));
1015 for (i = 0; i < 16; i += 2) {
1016 /* Read the PTE racily */
1017 v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
1018 if (cpu_has_feature(CPU_FTR_ARCH_300))
1019 v = hpte_new_to_old_v(v, be64_to_cpu(hpte[i+1]));
1021 /* Check valid/absent, hash, segment size and AVPN */
1022 if (!(v & valid) || (v & mask) != val)
1025 /* Lock the PTE and read it under the lock */
1026 while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
1028 v = orig_v = be64_to_cpu(hpte[i]) & ~HPTE_V_HVLOCK;
1029 r = be64_to_cpu(hpte[i+1]);
1030 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1031 v = hpte_new_to_old_v(v, r);
1032 r = hpte_new_to_old_r(r);
1036 * Check the HPTE again, including base page size
1038 if ((v & valid) && (v & mask) == val &&
1039 kvmppc_hpte_base_page_shift(v, r) == pshift)
1040 /* Return with the HPTE still locked */
1041 return (hash << 3) + (i >> 1);
1043 __unlock_hpte(&hpte[i], orig_v);
1046 if (val & HPTE_V_SECONDARY)
1048 val |= HPTE_V_SECONDARY;
1049 hash = hash ^ kvmppc_hpt_mask(&kvm->arch.hpt);
1053 EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
1056 * Called in real mode to check whether an HPTE not found fault
1057 * is due to accessing a paged-out page or an emulated MMIO page,
1058 * or if a protection fault is due to accessing a page that the
1059 * guest wanted read/write access to but which we made read-only.
1060 * Returns a possibly modified status (DSISR) value if not
1061 * (i.e. pass the interrupt to the guest),
1062 * -1 to pass the fault up to host kernel mode code, -2 to do that
1063 * and also load the instruction word (for MMIO emulation),
1064 * or 0 if we should make the guest retry the access.
1066 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
1067 unsigned long slb_v, unsigned int status, bool data)
1069 struct kvm *kvm = vcpu->kvm;
1071 unsigned long v, r, gr, orig_v;
1073 unsigned long valid;
1074 struct revmap_entry *rev;
1075 unsigned long pp, key;
1076 struct mmio_hpte_cache_entry *cache_entry = NULL;
1077 long mmio_update = 0;
1079 /* For protection fault, expect to find a valid HPTE */
1080 valid = HPTE_V_VALID;
1081 if (status & DSISR_NOHPTE) {
1082 valid |= HPTE_V_ABSENT;
1083 mmio_update = atomic64_read(&kvm->arch.mmio_update);
1084 cache_entry = mmio_cache_search(vcpu, addr, slb_v, mmio_update);
1087 index = cache_entry->pte_index;
1088 v = cache_entry->hpte_v;
1089 r = cache_entry->hpte_r;
1090 gr = cache_entry->rpte;
1092 index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
1094 if (status & DSISR_NOHPTE)
1095 return status; /* there really was no HPTE */
1096 return 0; /* for prot fault, HPTE disappeared */
1098 hpte = (__be64 *)(kvm->arch.hpt.virt + (index << 4));
1099 v = orig_v = be64_to_cpu(hpte[0]) & ~HPTE_V_HVLOCK;
1100 r = be64_to_cpu(hpte[1]);
1101 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
1102 v = hpte_new_to_old_v(v, r);
1103 r = hpte_new_to_old_r(r);
1105 rev = real_vmalloc_addr(&kvm->arch.hpt.rev[index]);
1106 gr = rev->guest_rpte;
1108 unlock_hpte(hpte, orig_v);
1111 /* For not found, if the HPTE is valid by now, retry the instruction */
1112 if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
1115 /* Check access permissions to the page */
1116 pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
1117 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
1118 status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */
1120 if (gr & (HPTE_R_N | HPTE_R_G))
1121 return status | SRR1_ISI_N_OR_G;
1122 if (!hpte_read_permission(pp, slb_v & key))
1123 return status | SRR1_ISI_PROT;
1124 } else if (status & DSISR_ISSTORE) {
1125 /* check write permission */
1126 if (!hpte_write_permission(pp, slb_v & key))
1127 return status | DSISR_PROTFAULT;
1129 if (!hpte_read_permission(pp, slb_v & key))
1130 return status | DSISR_PROTFAULT;
1133 /* Check storage key, if applicable */
1134 if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
1135 unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
1136 if (status & DSISR_ISSTORE)
1139 return status | DSISR_KEYFAULT;
1142 /* Save HPTE info for virtual-mode handler */
1143 vcpu->arch.pgfault_addr = addr;
1144 vcpu->arch.pgfault_index = index;
1145 vcpu->arch.pgfault_hpte[0] = v;
1146 vcpu->arch.pgfault_hpte[1] = r;
1147 vcpu->arch.pgfault_cache = cache_entry;
1149 /* Check the storage key to see if it is possibly emulated MMIO */
1150 if ((r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
1151 (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) {
1153 unsigned int pshift = 12;
1154 unsigned int pshift_index;
1156 if (slb_v & SLB_VSID_L) {
1157 pshift_index = ((slb_v & SLB_VSID_LP) >> 4);
1158 pshift = slb_base_page_shift[pshift_index];
1160 cache_entry = next_mmio_cache_entry(vcpu);
1161 cache_entry->eaddr = addr;
1162 cache_entry->slb_base_pshift = pshift;
1163 cache_entry->pte_index = index;
1164 cache_entry->hpte_v = v;
1165 cache_entry->hpte_r = r;
1166 cache_entry->rpte = gr;
1167 cache_entry->slb_v = slb_v;
1168 cache_entry->mmio_update = mmio_update;
1170 if (data && (vcpu->arch.shregs.msr & MSR_IR))
1171 return -2; /* MMIO emulation - load instr word */
1174 return -1; /* send fault up to host kernel mode */