2 * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved.
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License, version 2, as
10 * published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
22 #include <linux/kvm_host.h>
24 #include <asm/kvm_ppc.h>
25 #include <asm/kvm_book3s.h>
26 #include <asm/book3s/64/mmu-hash.h>
27 #include <asm/machdep.h>
28 #include <asm/mmu_context.h>
29 #include <asm/hw_irq.h>
35 void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
37 mmu_hash_ops.hpte_invalidate(pte->slot, pte->host_vpn,
38 pte->pagesize, pte->pagesize,
39 MMU_SEGSIZE_256M, false);
42 /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
43 * a hash, so we don't waste cycles on looping */
44 static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
46 return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
47 ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
48 ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
49 ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
50 ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
51 ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
52 ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
53 ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
57 static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
59 struct kvmppc_sid_map *map;
62 if (kvmppc_get_msr(vcpu) & MSR_PR)
65 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
66 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
67 if (map->valid && (map->guest_vsid == gvsid)) {
68 trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
72 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
73 if (map->valid && (map->guest_vsid == gvsid)) {
74 trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
78 trace_kvm_book3s_slb_fail(sid_map_mask, gvsid);
82 int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
93 struct kvmppc_sid_map *map;
95 int hpsize = MMU_PAGE_4K;
97 unsigned long mmu_seq;
98 struct kvm *kvm = vcpu->kvm;
99 struct hpte_cache *cpte;
100 unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT;
103 /* used to check for invalidations in progress */
104 mmu_seq = kvm->mmu_notifier_seq;
107 /* Get host physical address for gpa */
108 pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable);
109 if (is_error_noslot_pfn(pfn)) {
110 printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n",
115 hpaddr = pfn << PAGE_SHIFT;
117 /* and write the mapping ea -> hpa into the pt */
118 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
119 map = find_sid_vsid(vcpu, vsid);
121 ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
123 map = find_sid_vsid(vcpu, vsid);
126 printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
127 vsid, orig_pte->eaddr);
133 vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M);
135 kvm_set_pfn_accessed(pfn);
136 if (!orig_pte->may_write || !writable)
139 mark_page_dirty(vcpu->kvm, gfn);
140 kvm_set_pfn_dirty(pfn);
143 if (!orig_pte->may_execute)
146 kvmppc_mmu_flush_icache(pfn);
148 rflags = (rflags & ~HPTE_R_WIMG) | orig_pte->wimg;
151 * Use 64K pages if possible; otherwise, on 64K page kernels,
152 * we need to transfer 4 more bits from guest real to host real addr.
155 hpsize = MMU_PAGE_64K;
157 hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
159 hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M);
161 cpte = kvmppc_mmu_hpte_cache_next(vcpu);
163 spin_lock(&kvm->mmu_lock);
164 if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
170 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
172 /* In case we tried normal mapping already, let's nuke old entries */
174 if (mmu_hash_ops.hpte_remove(hpteg) < 0) {
179 ret = mmu_hash_ops.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
180 hpsize, hpsize, MMU_SEGSIZE_256M);
183 /* If we couldn't map a primary PTE, try a secondary */
185 vflags ^= HPTE_V_SECONDARY;
188 } else if (ret < 0) {
192 trace_kvm_book3s_64_mmu_map(rflags, hpteg,
193 vpn, hpaddr, orig_pte);
196 * The mmu_hash_ops code may give us a secondary entry even
197 * though we asked for a primary. Fix up.
199 if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {
201 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
204 cpte->slot = hpteg + (ret & 7);
205 cpte->host_vpn = vpn;
206 cpte->pte = *orig_pte;
208 cpte->pagesize = hpsize;
210 kvmppc_mmu_hpte_cache_map(vcpu, cpte);
215 spin_unlock(&kvm->mmu_lock);
216 kvm_release_pfn_clean(pfn);
218 kvmppc_mmu_hpte_cache_free(cpte);
224 void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
226 u64 mask = 0xfffffffffULL;
229 vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid);
231 mask = 0xffffffff0ULL;
232 kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask);
235 static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
237 unsigned long vsid_bits = VSID_BITS_65_256M;
238 struct kvmppc_sid_map *map;
239 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
241 static int backwards_map = 0;
243 if (kvmppc_get_msr(vcpu) & MSR_PR)
246 /* We might get collisions that trap in preceding order, so let's
247 map them differently */
249 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
251 sid_map_mask = SID_MAP_MASK - sid_map_mask;
253 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
255 /* Make sure we're taking the other map next time */
256 backwards_map = !backwards_map;
258 /* Uh-oh ... out of mappings. Let's flush! */
259 if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) {
260 vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first;
261 memset(vcpu_book3s->sid_map, 0,
262 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
263 kvmppc_mmu_pte_flush(vcpu, 0, 0);
264 kvmppc_mmu_flush_segments(vcpu);
267 if (mmu_has_feature(MMU_FTR_68_BIT_VA))
268 vsid_bits = VSID_BITS_256M;
270 map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++,
271 VSID_MULTIPLIER_256M, vsid_bits);
273 map->guest_vsid = gvsid;
276 trace_kvm_book3s_slb_map(sid_map_mask, gvsid, map->host_vsid);
281 static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
283 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
285 int max_slb_size = 64;
286 int found_inval = -1;
289 /* Are we overwriting? */
290 for (i = 0; i < svcpu->slb_max; i++) {
291 if (!(svcpu->slb[i].esid & SLB_ESID_V))
293 else if ((svcpu->slb[i].esid & ESID_MASK) == esid) {
299 /* Found a spare entry that was invalidated before */
300 if (found_inval >= 0) {
305 /* No spare invalid entry, so create one */
307 if (mmu_slb_size < 64)
308 max_slb_size = mmu_slb_size;
310 /* Overflowing -> purge */
311 if ((svcpu->slb_max) == max_slb_size)
312 kvmppc_mmu_flush_segments(vcpu);
322 int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
324 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
325 u64 esid = eaddr >> SID_SHIFT;
326 u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
327 u64 slb_vsid = SLB_VSID_USER;
330 struct kvmppc_sid_map *map;
333 slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
335 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
336 /* Invalidate an entry */
337 svcpu->slb[slb_index].esid = 0;
342 map = find_sid_vsid(vcpu, gvsid);
344 map = create_sid_map(vcpu, gvsid);
346 map->guest_esid = esid;
348 slb_vsid |= (map->host_vsid << 12);
349 slb_vsid &= ~SLB_VSID_KP;
350 slb_esid |= slb_index;
352 #ifdef CONFIG_PPC_64K_PAGES
353 /* Set host segment base page size to 64K if possible */
354 if (gvsid & VSID_64K)
355 slb_vsid |= mmu_psize_defs[MMU_PAGE_64K].sllp;
358 svcpu->slb[slb_index].esid = slb_esid;
359 svcpu->slb[slb_index].vsid = slb_vsid;
361 trace_kvm_book3s_slbmte(slb_vsid, slb_esid);
368 void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size)
370 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
371 ulong seg_mask = -seg_size;
374 for (i = 0; i < svcpu->slb_max; i++) {
375 if ((svcpu->slb[i].esid & SLB_ESID_V) &&
376 (svcpu->slb[i].esid & seg_mask) == ea) {
377 /* Invalidate this entry */
378 svcpu->slb[i].esid = 0;
385 void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
387 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
389 svcpu->slb[0].esid = 0;
393 void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
395 kvmppc_mmu_hpte_destroy(vcpu);
396 __destroy_context(to_book3s(vcpu)->context_id[0]);
399 int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
401 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
404 err = hash__alloc_context_id();
407 vcpu3s->context_id[0] = err;
409 vcpu3s->proto_vsid_max = ((u64)(vcpu3s->context_id[0] + 1)
411 vcpu3s->proto_vsid_first = (u64)vcpu3s->context_id[0] << ESID_BITS;
412 vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
414 kvmppc_mmu_hpte_init(vcpu);