1 /* SPDX-License-Identifier: GPL-2.0-only */
4 * Copyright SUSE Linux Products GmbH 2010
6 * Authors: Alexander Graf <agraf@suse.de>
9 #ifndef __ASM_KVM_BOOK3S_64_H__
10 #define __ASM_KVM_BOOK3S_64_H__
12 #include <linux/string.h>
13 #include <asm/bitops.h>
14 #include <asm/book3s/64/mmu-hash.h>
15 #include <asm/cpu_has_feature.h>
16 #include <asm/ppc-opcode.h>
17 #include <asm/pte-walk.h>
20 * Structure for a nested guest, that is, for a guest that is managed by
23 struct kvm_nested_guest {
24 struct kvm *l1_host; /* L1 VM that owns this nested guest */
25 int l1_lpid; /* lpid L1 guest thinks this guest is */
26 int shadow_lpid; /* real lpid of this nested guest */
27 pgd_t *shadow_pgtable; /* our page table for this guest */
28 u64 l1_gr_to_hr; /* L1's addr of part'n-scoped table */
29 u64 process_table; /* process table entry for this guest */
30 long refcnt; /* number of pointers to this struct */
31 struct mutex tlb_lock; /* serialize page faults and tlbies */
32 struct kvm_nested_guest *next;
33 cpumask_t need_tlb_flush;
34 short prev_cpu[NR_CPUS];
35 u8 radix; /* is this nested guest radix */
39 * We define a nested rmap entry as a single 64-bit quantity
40 * 0xFFF0000000000000 12-bit lpid field
41 * 0x000FFFFFFFFFF000 40-bit guest 4k page frame number
42 * 0x0000000000000001 1-bit single entry flag
44 #define RMAP_NESTED_LPID_MASK 0xFFF0000000000000UL
45 #define RMAP_NESTED_LPID_SHIFT (52)
46 #define RMAP_NESTED_GPA_MASK 0x000FFFFFFFFFF000UL
47 #define RMAP_NESTED_IS_SINGLE_ENTRY 0x0000000000000001UL
49 /* Structure for a nested guest rmap entry */
51 struct llist_node list;
56 * for_each_nest_rmap_safe - iterate over the list of nested rmap entries
57 * safe against removal of the list entry or NULL list
58 * @pos: a (struct rmap_nested *) to use as a loop cursor
59 * @node: pointer to the first entry
60 * NOTE: this can be NULL
61 * @rmapp: an (unsigned long *) in which to return the rmap entries on each
63 * NOTE: this must point to already allocated memory
65 * The nested_rmap is a llist of (struct rmap_nested) entries pointed to by the
66 * rmap entry in the memslot. The list is always terminated by a "single entry"
67 * stored in the list element of the final entry of the llist. If there is ONLY
68 * a single entry then this is itself in the rmap entry of the memslot, not a
71 * Note that the iterator below assumes that a nested rmap entry is always
72 * non-zero. This is true for our usage because the LPID field is always
73 * non-zero (zero is reserved for the host).
75 * This should be used to iterate over the list of rmap_nested entries with
76 * processing done on the u64 rmap value given by each iteration. This is safe
77 * against removal of list entries and it is always safe to call free on (pos).
80 * struct rmap_nested *cursor;
81 * struct llist_node *first;
83 * for_each_nest_rmap_safe(cursor, first, &rmap) {
88 #define for_each_nest_rmap_safe(pos, node, rmapp) \
89 for ((pos) = llist_entry((node), typeof(*(pos)), list); \
91 (*(rmapp) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \
92 ((u64) (node)) : ((pos)->rmap))) && \
93 (((node) = ((RMAP_NESTED_IS_SINGLE_ENTRY & ((u64) (node))) ? \
94 ((struct llist_node *) ((pos) = NULL)) : \
95 (pos)->list.next)), true); \
96 (pos) = llist_entry((node), typeof(*(pos)), list))
98 struct kvm_nested_guest *kvmhv_get_nested(struct kvm *kvm, int l1_lpid,
100 void kvmhv_put_nested(struct kvm_nested_guest *gp);
101 int kvmhv_nested_next_lpid(struct kvm *kvm, int lpid);
103 /* Encoding of first parameter for H_TLB_INVALIDATE */
104 #define H_TLBIE_P1_ENC(ric, prs, r) (___PPC_RIC(ric) | ___PPC_PRS(prs) | \
107 /* Power architecture requires HPT is at least 256kiB, at most 64TiB */
108 #define PPC_MIN_HPT_ORDER 18
109 #define PPC_MAX_HPT_ORDER 46
111 #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
112 static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
115 return &get_paca()->shadow_vcpu;
118 static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
124 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
126 static inline bool kvm_is_radix(struct kvm *kvm)
128 return kvm->arch.radix;
131 static inline bool kvmhv_vcpu_is_radix(struct kvm_vcpu *vcpu)
135 if (vcpu->arch.nested)
136 radix = vcpu->arch.nested->radix;
138 radix = kvm_is_radix(vcpu->kvm);
143 unsigned long kvmppc_msr_hard_disable_set_facilities(struct kvm_vcpu *vcpu, unsigned long msr);
145 int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb);
147 #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
151 * Invalid HDSISR value which is used to indicate when HW has not set the reg.
152 * Used to work around an errata.
154 #define HDSISR_CANARY 0x7fff
157 * We use a lock bit in HPTE dword 0 to synchronize updates and
158 * accesses to each HPTE, and another bit to indicate non-present
161 #define HPTE_V_HVLOCK 0x40UL
162 #define HPTE_V_ABSENT 0x20UL
165 * We use this bit in the guest_rpte field of the revmap entry
166 * to indicate a modified HPTE.
168 #define HPTE_GR_MODIFIED (1ul << 62)
170 /* These bits are reserved in the guest view of the HPTE */
171 #define HPTE_GR_RESERVED HPTE_GR_MODIFIED
173 static inline long try_lock_hpte(__be64 *hpte, unsigned long bits)
175 unsigned long tmp, old;
176 __be64 be_lockbit, be_bits;
179 * We load/store in native endian, but the HTAB is in big endian. If
180 * we byte swap all data we apply on the PTE we're implicitly correct
183 be_lockbit = cpu_to_be64(HPTE_V_HVLOCK);
184 be_bits = cpu_to_be64(bits);
186 asm volatile(" ldarx %0,0,%2\n"
194 : "=&r" (tmp), "=&r" (old)
195 : "r" (hpte), "r" (be_bits), "r" (be_lockbit)
200 static inline void unlock_hpte(__be64 *hpte, unsigned long hpte_v)
202 hpte_v &= ~HPTE_V_HVLOCK;
203 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
204 hpte[0] = cpu_to_be64(hpte_v);
207 /* Without barrier */
208 static inline void __unlock_hpte(__be64 *hpte, unsigned long hpte_v)
210 hpte_v &= ~HPTE_V_HVLOCK;
211 hpte[0] = cpu_to_be64(hpte_v);
215 * These functions encode knowledge of the POWER7/8/9 hardware
216 * interpretations of the HPTE LP (large page size) field.
218 static inline int kvmppc_hpte_page_shifts(unsigned long h, unsigned long l)
222 if (!(h & HPTE_V_LARGE))
224 lphi = (l >> 16) & 0xf;
225 switch ((l >> 12) & 0xf) {
227 return !lphi ? 24 : 0; /* 16MB */
230 return 16; /* 64kB */
233 return !lphi ? 34 : 0; /* 16GB */
236 return (16 << 8) + 12; /* 64kB in 4kB */
240 return (24 << 8) + 16; /* 16MB in 64kkB */
242 return (24 << 8) + 12; /* 16MB in 4kB */
248 static inline int kvmppc_hpte_base_page_shift(unsigned long h, unsigned long l)
250 return kvmppc_hpte_page_shifts(h, l) & 0xff;
253 static inline int kvmppc_hpte_actual_page_shift(unsigned long h, unsigned long l)
255 int tmp = kvmppc_hpte_page_shifts(h, l);
262 static inline unsigned long kvmppc_actual_pgsz(unsigned long v, unsigned long r)
264 int shift = kvmppc_hpte_actual_page_shift(v, r);
271 static inline int kvmppc_pgsize_lp_encoding(int base_shift, int actual_shift)
273 switch (base_shift) {
275 switch (actual_shift) {
285 switch (actual_shift) {
298 static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
299 unsigned long pte_index)
301 int a_pgshift, b_pgshift;
302 unsigned long rb = 0, va_low, sllp;
304 b_pgshift = a_pgshift = kvmppc_hpte_page_shifts(v, r);
305 if (a_pgshift >= 0x100) {
311 * Ignore the top 14 bits of va
312 * v have top two bits covering segment size, hence move
313 * by 16 bits, Also clear the lower HPTE_V_AVPN_SHIFT (7) bits.
314 * AVA field in v also have the lower 23 bits ignored.
315 * For base page size 4K we need 14 .. 65 bits (so need to
316 * collect extra 11 bits)
317 * For others we need 14..14+i
319 /* This covers 14..54 bits of va*/
320 rb = (v & ~0x7fUL) << 16; /* AVA field */
323 * AVA in v had cleared lower 23 bits. We need to derive
324 * that from pteg index
326 va_low = pte_index >> 3;
327 if (v & HPTE_V_SECONDARY)
330 * get the vpn bits from va_low using reverse of hashing.
331 * In v we have va with 23 bits dropped and then left shifted
332 * HPTE_V_AVPN_SHIFT (7) bits. Now to find vsid we need
333 * right shift it with (SID_SHIFT - (23 - 7))
335 if (!(v & HPTE_V_1TB_SEG))
336 va_low ^= v >> (SID_SHIFT - 16);
338 va_low ^= v >> (SID_SHIFT_1T - 16);
341 if (b_pgshift <= 12) {
342 if (a_pgshift > 12) {
343 sllp = (a_pgshift == 16) ? 5 : 4;
344 rb |= sllp << 5; /* AP field */
346 rb |= (va_low & 0x7ff) << 12; /* remaining 11 bits of AVA */
350 * remaining bits of AVA/LP fields
351 * Also contain the rr bits of LP
353 rb |= (va_low << b_pgshift) & 0x7ff000;
355 * Now clear not needed LP bits based on actual psize
357 rb &= ~((1ul << a_pgshift) - 1);
359 * AVAL field 58..77 - base_page_shift bits of va
360 * we have space for 58..64 bits, Missing bits should
361 * be zero filled. +1 is to take care of L bit shift
363 aval_shift = 64 - (77 - b_pgshift) + 1;
364 rb |= ((va_low << aval_shift) & 0xfe);
366 rb |= 1; /* L field */
367 rb |= r & 0xff000 & ((1ul << a_pgshift) - 1); /* LP field */
370 * This sets both bits of the B field in the PTE. 0b1x values are
371 * reserved, but those will have been filtered by kvmppc_do_h_enter.
373 rb |= (v >> HPTE_V_SSIZE_SHIFT) << 8; /* B field */
377 static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
379 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
382 static inline int hpte_is_writable(unsigned long ptel)
384 unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
386 return pp != PP_RXRX && pp != PP_RXXX;
389 static inline unsigned long hpte_make_readonly(unsigned long ptel)
391 if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
392 ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
398 static inline bool hpte_cache_flags_ok(unsigned long hptel, bool is_ci)
400 unsigned int wimg = hptel & HPTE_R_WIMG;
403 if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
404 cpu_has_feature(CPU_FTR_ARCH_206))
408 return wimg == HPTE_R_M;
410 * if host is mapped cache inhibited, make sure hptel also have
413 if (wimg & HPTE_R_W) /* FIXME!! is this ok for all guest. ? */
415 return !!(wimg & HPTE_R_I);
419 * If it's present and writable, atomically set dirty and referenced bits and
420 * return the PTE, otherwise return 0.
422 static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing)
424 pte_t old_pte, new_pte = __pte(0);
428 * Make sure we don't reload from ptep
430 old_pte = READ_ONCE(*ptep);
432 * wait until H_PAGE_BUSY is clear then set it atomically
434 if (unlikely(pte_val(old_pte) & H_PAGE_BUSY)) {
438 /* If pte is not present return None */
439 if (unlikely(!pte_present(old_pte)))
442 new_pte = pte_mkyoung(old_pte);
443 if (writing && pte_write(old_pte))
444 new_pte = pte_mkdirty(new_pte);
446 if (pte_xchg(ptep, old_pte, new_pte))
452 static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
455 return PP_RWRX <= pp && pp <= PP_RXRX;
459 static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
462 return pp == PP_RWRW;
463 return pp <= PP_RWRW;
466 static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
470 skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
471 ((hpte_r & HPTE_R_KEY_LO) >> 9);
472 return (amr >> (62 - 2 * skey)) & 3;
475 static inline void lock_rmap(unsigned long *rmap)
478 while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
480 } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
483 static inline void unlock_rmap(unsigned long *rmap)
485 __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
488 static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
489 unsigned long pagesize)
491 unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
493 if (pagesize <= PAGE_SIZE)
495 return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
499 * This works for 4k, 64k and 16M pages on POWER7,
500 * and 4k and 16M pages on PPC970.
502 static inline unsigned long slb_pgsize_encoding(unsigned long psize)
504 unsigned long senc = 0;
506 if (psize > 0x1000) {
508 if (psize == 0x10000)
509 senc |= SLB_VSID_LP_01;
514 static inline int is_vrma_hpte(unsigned long hpte_v)
516 return (hpte_v & ~0xffffffUL) ==
517 (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)));
520 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
522 * Note modification of an HPTE; set the HPTE modified bit
523 * if anyone is interested.
525 static inline void note_hpte_modification(struct kvm *kvm,
526 struct revmap_entry *rev)
528 if (atomic_read(&kvm->arch.hpte_mod_interest))
529 rev->guest_rpte |= HPTE_GR_MODIFIED;
533 * Like kvm_memslots(), but for use in real mode when we can't do
534 * any RCU stuff (since the secondary threads are offline from the
535 * kernel's point of view), and we can't print anything.
536 * Thus we use rcu_dereference_raw() rather than rcu_dereference_check().
538 static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
540 return rcu_dereference_raw_check(kvm->memslots[0]);
543 extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
544 extern void kvmhv_radix_debugfs_init(struct kvm *kvm);
546 extern void kvmhv_rm_send_ipi(int cpu);
548 static inline unsigned long kvmppc_hpt_npte(struct kvm_hpt_info *hpt)
550 /* HPTEs are 2**4 bytes long */
551 return 1UL << (hpt->order - 4);
554 static inline unsigned long kvmppc_hpt_mask(struct kvm_hpt_info *hpt)
556 /* 128 (2**7) bytes in each HPTEG */
557 return (1UL << (hpt->order - 7)) - 1;
560 /* Set bits in a dirty bitmap, which is in LE format */
561 static inline void set_dirty_bits(unsigned long *map, unsigned long i,
562 unsigned long npages)
566 memset((char *)map + i / 8, 0xff, npages / 8);
568 for (; npages; ++i, --npages)
569 __set_bit_le(i, map);
572 static inline void set_dirty_bits_atomic(unsigned long *map, unsigned long i,
573 unsigned long npages)
576 memset((char *)map + i / 8, 0xff, npages / 8);
578 for (; npages; ++i, --npages)
582 static inline u64 sanitize_msr(u64 msr)
589 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
590 static inline void copy_from_checkpoint(struct kvm_vcpu *vcpu)
592 vcpu->arch.regs.ccr = vcpu->arch.cr_tm;
593 vcpu->arch.regs.xer = vcpu->arch.xer_tm;
594 vcpu->arch.regs.link = vcpu->arch.lr_tm;
595 vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
596 vcpu->arch.amr = vcpu->arch.amr_tm;
597 vcpu->arch.ppr = vcpu->arch.ppr_tm;
598 vcpu->arch.dscr = vcpu->arch.dscr_tm;
599 vcpu->arch.tar = vcpu->arch.tar_tm;
600 memcpy(vcpu->arch.regs.gpr, vcpu->arch.gpr_tm,
601 sizeof(vcpu->arch.regs.gpr));
602 vcpu->arch.fp = vcpu->arch.fp_tm;
603 vcpu->arch.vr = vcpu->arch.vr_tm;
604 vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
607 static inline void copy_to_checkpoint(struct kvm_vcpu *vcpu)
609 vcpu->arch.cr_tm = vcpu->arch.regs.ccr;
610 vcpu->arch.xer_tm = vcpu->arch.regs.xer;
611 vcpu->arch.lr_tm = vcpu->arch.regs.link;
612 vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
613 vcpu->arch.amr_tm = vcpu->arch.amr;
614 vcpu->arch.ppr_tm = vcpu->arch.ppr;
615 vcpu->arch.dscr_tm = vcpu->arch.dscr;
616 vcpu->arch.tar_tm = vcpu->arch.tar;
617 memcpy(vcpu->arch.gpr_tm, vcpu->arch.regs.gpr,
618 sizeof(vcpu->arch.regs.gpr));
619 vcpu->arch.fp_tm = vcpu->arch.fp;
620 vcpu->arch.vr_tm = vcpu->arch.vr;
621 vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
623 #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
625 extern int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
626 unsigned long gpa, unsigned int level,
627 unsigned long mmu_seq, u64 lpid,
628 unsigned long *rmapp, struct rmap_nested **n_rmap);
629 extern void kvmhv_insert_nest_rmap(struct kvm *kvm, unsigned long *rmapp,
630 struct rmap_nested **n_rmap);
631 extern void kvmhv_update_nest_rmap_rc_list(struct kvm *kvm, unsigned long *rmapp,
632 unsigned long clr, unsigned long set,
633 unsigned long hpa, unsigned long nbytes);
634 extern void kvmhv_remove_nest_rmap_range(struct kvm *kvm,
635 const struct kvm_memory_slot *memslot,
636 unsigned long gpa, unsigned long hpa,
637 unsigned long nbytes);
639 static inline pte_t *
640 find_kvm_secondary_pte_unlocked(struct kvm *kvm, unsigned long ea,
645 pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
649 static inline pte_t *find_kvm_secondary_pte(struct kvm *kvm, unsigned long ea,
654 VM_WARN(!spin_is_locked(&kvm->mmu_lock),
655 "%s called with kvm mmu_lock not held \n", __func__);
656 pte = __find_linux_pte(kvm->arch.pgtable, ea, NULL, hshift);
661 static inline pte_t *find_kvm_host_pte(struct kvm *kvm, unsigned long mmu_seq,
662 unsigned long ea, unsigned *hshift)
666 VM_WARN(!spin_is_locked(&kvm->mmu_lock),
667 "%s called with kvm mmu_lock not held \n", __func__);
669 if (mmu_invalidate_retry(kvm, mmu_seq))
672 pte = __find_linux_pte(kvm->mm->pgd, ea, NULL, hshift);
677 extern pte_t *find_kvm_nested_guest_pte(struct kvm *kvm, unsigned long lpid,
678 unsigned long ea, unsigned *hshift);
680 int kvmhv_nestedv2_vcpu_create(struct kvm_vcpu *vcpu, struct kvmhv_nestedv2_io *io);
681 void kvmhv_nestedv2_vcpu_free(struct kvm_vcpu *vcpu, struct kvmhv_nestedv2_io *io);
682 int kvmhv_nestedv2_flush_vcpu(struct kvm_vcpu *vcpu, u64 time_limit);
683 int kvmhv_nestedv2_set_ptbl_entry(unsigned long lpid, u64 dw0, u64 dw1);
684 int kvmhv_nestedv2_parse_output(struct kvm_vcpu *vcpu);
686 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
688 #endif /* __ASM_KVM_BOOK3S_64_H__ */