1 // SPDX-License-Identifier: GPL-2.0
3 * Secure pages management: Migration of pages between normal and secure
4 * memory of KVM guests.
6 * Copyright 2018 Bharata B Rao, IBM Corp. <bharata@linux.ibm.com>
10 * A pseries guest can be run as secure guest on Ultravisor-enabled
11 * POWER platforms. On such platforms, this driver will be used to manage
12 * the movement of guest pages between the normal memory managed by
13 * hypervisor (HV) and secure memory managed by Ultravisor (UV).
15 * The page-in or page-out requests from UV will come to HV as hcalls and
16 * HV will call back into UV via ultracalls to satisfy these page requests.
18 * Private ZONE_DEVICE memory equal to the amount of secure memory
19 * available in the platform for running secure guests is hotplugged.
20 * Whenever a page belonging to the guest becomes secure, a page from this
21 * private device memory is used to represent and track that secure page
22 * on the HV side. Some pages (like virtio buffers, VPA pages etc) are
23 * shared between UV and HV. However such pages aren't represented by
24 * device private memory and mappings to shared memory exist in both
25 * UV and HV page tables.
31 * kvm->arch.uvmem_lock is a per-guest lock that prevents concurrent
32 * page-in and page-out requests for the same GPA. Concurrent accesses
33 * can either come via UV (guest vCPUs requesting for same page)
34 * or when HV and guest simultaneously access the same page.
35 * This mutex serializes the migration of page from HV(normal) to
36 * UV(secure) and vice versa. So the serialization points are around
37 * migrate_vma routines and page-in/out routines.
39 * Per-guest mutex comes with a cost though. Mainly it serializes the
40 * fault path as page-out can occur when HV faults on accessing secure
41 * guest pages. Currently UV issues page-in requests for all the guest
42 * PFNs one at a time during early boot (UV_ESM uvcall), so this is
43 * not a cause for concern. Also currently the number of page-outs caused
44 * by HV touching secure pages is very very low. If an when UV supports
45 * overcommitting, then we might see concurrent guest driven page-outs.
49 * 1. kvm->srcu - Protects KVM memslots
50 * 2. kvm->mm->mmap_lock - find_vma, migrate_vma_pages and helpers, ksm_madvise
51 * 3. kvm->arch.uvmem_lock - protects read/writes to uvmem slots thus acting
52 * as sync-points for page-in/out
58 * Currently UV uses 2MB mappings internally, but will issue H_SVM_PAGE_IN
59 * and H_SVM_PAGE_OUT hcalls in PAGE_SIZE(64K) granularity. HV tracks
60 * secure GPAs at 64K page size and maintains one device PFN for each
61 * 64K secure GPA. UV_PAGE_IN and UV_PAGE_OUT calls by HV are also issued
62 * for 64K page at a time.
64 * HV faulting on secure pages: When HV touches any secure page, it
65 * faults and issues a UV_PAGE_OUT request with 64K page size. Currently
66 * UV splits and remaps the 2MB page if necessary and copies out the
67 * required 64K page contents.
69 * Shared pages: Whenever guest shares a secure page, UV will split and
70 * remap the 2MB page if required and issue H_SVM_PAGE_IN with 64K page size.
72 * HV invalidating a page: When a regular page belonging to secure
73 * guest gets unmapped, HV informs UV with UV_PAGE_INVAL of 64K
74 * page size. Using 64K page size is correct here because any non-secure
75 * page will essentially be of 64K page size. Splitting by UV during sharing
76 * and page-out ensures this.
78 * Page fault handling: When HV handles page fault of a page belonging
79 * to secure guest, it sends that to UV with a 64K UV_PAGE_IN request.
80 * Using 64K size is correct here too as UV would have split the 2MB page
81 * into 64k mappings and would have done page-outs earlier.
83 * In summary, the current secure pages handling code in HV assumes
84 * 64K page size and in fact fails any page-in/page-out requests of
85 * non-64K size upfront. If and when UV starts supporting multiple
86 * page-sizes, we need to break this assumption.
89 #include <linux/pagemap.h>
90 #include <linux/migrate.h>
91 #include <linux/kvm_host.h>
92 #include <linux/ksm.h>
93 #include <asm/ultravisor.h>
95 #include <asm/kvm_ppc.h>
96 #include <asm/kvm_book3s_uvmem.h>
98 static struct dev_pagemap kvmppc_uvmem_pgmap;
99 static unsigned long *kvmppc_uvmem_bitmap;
100 static DEFINE_SPINLOCK(kvmppc_uvmem_bitmap_lock);
105 * The GFN can be in one of the following states.
107 * (a) Secure - The GFN is secure. The GFN is associated with
108 * a Secure VM, the contents of the GFN is not accessible
109 * to the Hypervisor. This GFN can be backed by a secure-PFN,
110 * or can be backed by a normal-PFN with contents encrypted.
111 * The former is true when the GFN is paged-in into the
112 * ultravisor. The latter is true when the GFN is paged-out
115 * (b) Shared - The GFN is shared. The GFN is associated with a
116 * a secure VM. The contents of the GFN is accessible to
117 * Hypervisor. This GFN is backed by a normal-PFN and its
118 * content is un-encrypted.
120 * (c) Normal - The GFN is a normal. The GFN is associated with
121 * a normal VM. The contents of the GFN is accesible to
122 * the Hypervisor. Its content is never encrypted.
127 * Normal VM: A VM whose contents are always accessible to
128 * the hypervisor. All its GFNs are normal-GFNs.
130 * Secure VM: A VM whose contents are not accessible to the
131 * hypervisor without the VM's consent. Its GFNs are
132 * either Shared-GFN or Secure-GFNs.
134 * Transient VM: A Normal VM that is transitioning to secure VM.
135 * The transition starts on successful return of
136 * H_SVM_INIT_START, and ends on successful return
137 * of H_SVM_INIT_DONE. This transient VM, can have GFNs
138 * in any of the three states; i.e Secure-GFN, Shared-GFN,
139 * and Normal-GFN. The VM never executes in this state
140 * in supervisor-mode.
143 * -----------------------------
144 * The state of a memory slot mirrors the state of the
145 * VM the memory slot is associated with.
147 * VM State transition.
148 * --------------------
150 * A VM always starts in Normal Mode.
152 * H_SVM_INIT_START moves the VM into transient state. During this
153 * time the Ultravisor may request some of its GFNs to be shared or
154 * secured. So its GFNs can be in one of the three GFN states.
156 * H_SVM_INIT_DONE moves the VM entirely from transient state to
157 * secure-state. At this point any left-over normal-GFNs are
158 * transitioned to Secure-GFN.
160 * H_SVM_INIT_ABORT moves the transient VM back to normal VM.
161 * All its GFNs are moved to Normal-GFNs.
163 * UV_TERMINATE transitions the secure-VM back to normal-VM. All
164 * the secure-GFN and shared-GFNs are tranistioned to normal-GFN
165 * Note: The contents of the normal-GFN is undefined at this point.
167 * GFN state implementation:
168 * -------------------------
170 * Secure GFN is associated with a secure-PFN; also called uvmem_pfn,
171 * when the GFN is paged-in. Its pfn[] has KVMPPC_GFN_UVMEM_PFN flag
172 * set, and contains the value of the secure-PFN.
173 * It is associated with a normal-PFN; also called mem_pfn, when
174 * the GFN is pagedout. Its pfn[] has KVMPPC_GFN_MEM_PFN flag set.
175 * The value of the normal-PFN is not tracked.
177 * Shared GFN is associated with a normal-PFN. Its pfn[] has
178 * KVMPPC_UVMEM_SHARED_PFN flag set. The value of the normal-PFN
181 * Normal GFN is associated with normal-PFN. Its pfn[] has
182 * no flag set. The value of the normal-PFN is not tracked.
184 * Life cycle of a GFN
185 * --------------------
187 * --------------------------------------------------------------
188 * | | Share | Unshare | SVM |H_SVM_INIT_DONE|
189 * | |operation |operation | abort/ | |
190 * | | | | terminate | |
191 * -------------------------------------------------------------
193 * | Secure | Shared | Secure |Normal |Secure |
195 * | Shared | Shared | Secure |Normal |Shared |
197 * | Normal | Shared | Secure |Normal |Secure |
198 * --------------------------------------------------------------
201 * --------------------
203 * --------------------------------------------------------------------
204 * | | start | H_SVM_ |H_SVM_ |H_SVM_ |UV_SVM_ |
205 * | | VM |INIT_START|INIT_DONE|INIT_ABORT |TERMINATE |
207 * --------- ----------------------------------------------------------
209 * | Normal | Normal | Transient|Error |Error |Normal |
211 * | Secure | Error | Error |Error |Error |Normal |
213 * |Transient| N/A | Error |Secure |Normal |Normal |
214 * --------------------------------------------------------------------
217 #define KVMPPC_GFN_UVMEM_PFN (1UL << 63)
218 #define KVMPPC_GFN_MEM_PFN (1UL << 62)
219 #define KVMPPC_GFN_SHARED (1UL << 61)
220 #define KVMPPC_GFN_SECURE (KVMPPC_GFN_UVMEM_PFN | KVMPPC_GFN_MEM_PFN)
221 #define KVMPPC_GFN_FLAG_MASK (KVMPPC_GFN_SECURE | KVMPPC_GFN_SHARED)
222 #define KVMPPC_GFN_PFN_MASK (~KVMPPC_GFN_FLAG_MASK)
224 struct kvmppc_uvmem_slot {
225 struct list_head list;
226 unsigned long nr_pfns;
227 unsigned long base_pfn;
230 struct kvmppc_uvmem_page_pvt {
237 bool kvmppc_uvmem_available(void)
240 * If kvmppc_uvmem_bitmap != NULL, then there is an ultravisor
241 * and our data structures have been initialized successfully.
243 return !!kvmppc_uvmem_bitmap;
246 int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
248 struct kvmppc_uvmem_slot *p;
250 p = kzalloc(sizeof(*p), GFP_KERNEL);
253 p->pfns = vcalloc(slot->npages, sizeof(*p->pfns));
258 p->nr_pfns = slot->npages;
259 p->base_pfn = slot->base_gfn;
261 mutex_lock(&kvm->arch.uvmem_lock);
262 list_add(&p->list, &kvm->arch.uvmem_pfns);
263 mutex_unlock(&kvm->arch.uvmem_lock);
269 * All device PFNs are already released by the time we come here.
271 void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot)
273 struct kvmppc_uvmem_slot *p, *next;
275 mutex_lock(&kvm->arch.uvmem_lock);
276 list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) {
277 if (p->base_pfn == slot->base_gfn) {
284 mutex_unlock(&kvm->arch.uvmem_lock);
287 static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm,
288 unsigned long flag, unsigned long uvmem_pfn)
290 struct kvmppc_uvmem_slot *p;
292 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
293 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
294 unsigned long index = gfn - p->base_pfn;
296 if (flag == KVMPPC_GFN_UVMEM_PFN)
297 p->pfns[index] = uvmem_pfn | flag;
299 p->pfns[index] = flag;
305 /* mark the GFN as secure-GFN associated with @uvmem pfn device-PFN. */
306 static void kvmppc_gfn_secure_uvmem_pfn(unsigned long gfn,
307 unsigned long uvmem_pfn, struct kvm *kvm)
309 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_UVMEM_PFN, uvmem_pfn);
312 /* mark the GFN as secure-GFN associated with a memory-PFN. */
313 static void kvmppc_gfn_secure_mem_pfn(unsigned long gfn, struct kvm *kvm)
315 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0);
318 /* mark the GFN as a shared GFN. */
319 static void kvmppc_gfn_shared(unsigned long gfn, struct kvm *kvm)
321 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_SHARED, 0);
324 /* mark the GFN as a non-existent GFN. */
325 static void kvmppc_gfn_remove(unsigned long gfn, struct kvm *kvm)
327 kvmppc_mark_gfn(gfn, kvm, 0, 0);
330 /* return true, if the GFN is a secure-GFN backed by a secure-PFN */
331 static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
332 unsigned long *uvmem_pfn)
334 struct kvmppc_uvmem_slot *p;
336 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
337 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
338 unsigned long index = gfn - p->base_pfn;
340 if (p->pfns[index] & KVMPPC_GFN_UVMEM_PFN) {
342 *uvmem_pfn = p->pfns[index] &
353 * starting from *gfn search for the next available GFN that is not yet
354 * transitioned to a secure GFN. return the value of that GFN in *gfn. If a
355 * GFN is found, return true, else return false
357 * Must be called with kvm->arch.uvmem_lock held.
359 static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot,
360 struct kvm *kvm, unsigned long *gfn)
362 struct kvmppc_uvmem_slot *p = NULL, *iter;
366 list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list)
367 if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) {
374 * The code below assumes, one to one correspondence between
375 * kvmppc_uvmem_slot and memslot.
377 for (i = *gfn; i < p->base_pfn + p->nr_pfns; i++) {
378 unsigned long index = i - p->base_pfn;
380 if (!(p->pfns[index] & KVMPPC_GFN_FLAG_MASK)) {
389 static int kvmppc_memslot_page_merge(struct kvm *kvm,
390 const struct kvm_memory_slot *memslot, bool merge)
392 unsigned long gfn = memslot->base_gfn;
393 unsigned long end, start = gfn_to_hva(kvm, gfn);
395 struct vm_area_struct *vma;
396 int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE;
398 if (kvm_is_error_hva(start))
401 end = start + (memslot->npages << PAGE_SHIFT);
403 mmap_write_lock(kvm->mm);
405 vma = find_vma_intersection(kvm->mm, start, end);
410 ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
411 merge_flag, &vma->vm_flags);
417 } while (end > vma->vm_end);
419 mmap_write_unlock(kvm->mm);
423 static void __kvmppc_uvmem_memslot_delete(struct kvm *kvm,
424 const struct kvm_memory_slot *memslot)
426 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
427 kvmppc_uvmem_slot_free(kvm, memslot);
428 kvmppc_memslot_page_merge(kvm, memslot, true);
431 static int __kvmppc_uvmem_memslot_create(struct kvm *kvm,
432 const struct kvm_memory_slot *memslot)
434 int ret = H_PARAMETER;
436 if (kvmppc_memslot_page_merge(kvm, memslot, false))
439 if (kvmppc_uvmem_slot_init(kvm, memslot))
442 ret = uv_register_mem_slot(kvm->arch.lpid,
443 memslot->base_gfn << PAGE_SHIFT,
444 memslot->npages * PAGE_SIZE,
452 kvmppc_uvmem_slot_free(kvm, memslot);
454 kvmppc_memslot_page_merge(kvm, memslot, true);
458 unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
460 struct kvm_memslots *slots;
461 struct kvm_memory_slot *memslot, *m;
465 kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START;
467 if (!kvmppc_uvmem_bitmap)
468 return H_UNSUPPORTED;
470 /* Only radix guests can be secure guests */
471 if (!kvm_is_radix(kvm))
472 return H_UNSUPPORTED;
474 /* NAK the transition to secure if not enabled */
475 if (!kvm->arch.svm_enabled)
478 srcu_idx = srcu_read_lock(&kvm->srcu);
480 /* register the memslot */
481 slots = kvm_memslots(kvm);
482 kvm_for_each_memslot(memslot, slots) {
483 ret = __kvmppc_uvmem_memslot_create(kvm, memslot);
489 slots = kvm_memslots(kvm);
490 kvm_for_each_memslot(m, slots) {
493 __kvmppc_uvmem_memslot_delete(kvm, memslot);
497 srcu_read_unlock(&kvm->srcu, srcu_idx);
502 * Provision a new page on HV side and copy over the contents
503 * from secure memory using UV_PAGE_OUT uvcall.
504 * Caller must held kvm->arch.uvmem_lock.
506 static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
508 unsigned long end, unsigned long page_shift,
509 struct kvm *kvm, unsigned long gpa)
511 unsigned long src_pfn, dst_pfn = 0;
512 struct migrate_vma mig;
513 struct page *dpage, *spage;
514 struct kvmppc_uvmem_page_pvt *pvt;
518 memset(&mig, 0, sizeof(mig));
524 mig.pgmap_owner = &kvmppc_uvmem_pgmap;
525 mig.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
527 /* The requested page is already paged-out, nothing to do */
528 if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL))
531 ret = migrate_vma_setup(&mig);
535 spage = migrate_pfn_to_page(*mig.src);
536 if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE))
539 if (!is_zone_device_page(spage))
542 dpage = alloc_page_vma(GFP_HIGHUSER, vma, start);
549 pvt = spage->zone_device_data;
550 pfn = page_to_pfn(dpage);
553 * This function is used in two cases:
554 * - When HV touches a secure page, for which we do UV_PAGE_OUT
555 * - When a secure page is converted to shared page, we *get*
556 * the page to essentially unmap the device page. In this
557 * case we skip page-out.
559 if (!pvt->skip_page_out)
560 ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
563 if (ret == U_SUCCESS)
564 *mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
571 migrate_vma_pages(&mig);
574 migrate_vma_finalize(&mig);
578 static inline int kvmppc_svm_page_out(struct vm_area_struct *vma,
579 unsigned long start, unsigned long end,
580 unsigned long page_shift,
581 struct kvm *kvm, unsigned long gpa)
585 mutex_lock(&kvm->arch.uvmem_lock);
586 ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa);
587 mutex_unlock(&kvm->arch.uvmem_lock);
593 * Drop device pages that we maintain for the secure guest
595 * We first mark the pages to be skipped from UV_PAGE_OUT when there
596 * is HV side fault on these pages. Next we *get* these pages, forcing
597 * fault on them, do fault time migration to replace the device PTEs in
598 * QEMU page table with normal PTEs from newly allocated pages.
600 void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *slot,
601 struct kvm *kvm, bool skip_page_out)
604 struct kvmppc_uvmem_page_pvt *pvt;
605 struct page *uvmem_page;
606 struct vm_area_struct *vma = NULL;
607 unsigned long uvmem_pfn, gfn;
610 mmap_read_lock(kvm->mm);
612 addr = slot->userspace_addr;
614 gfn = slot->base_gfn;
615 for (i = slot->npages; i; --i, ++gfn, addr += PAGE_SIZE) {
617 /* Fetch the VMA if addr is not in the latest fetched one */
618 if (!vma || addr >= vma->vm_end) {
619 vma = find_vma_intersection(kvm->mm, addr, addr+1);
621 pr_err("Can't find VMA for gfn:0x%lx\n", gfn);
626 mutex_lock(&kvm->arch.uvmem_lock);
628 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
629 uvmem_page = pfn_to_page(uvmem_pfn);
630 pvt = uvmem_page->zone_device_data;
631 pvt->skip_page_out = skip_page_out;
632 pvt->remove_gfn = true;
634 if (__kvmppc_svm_page_out(vma, addr, addr + PAGE_SIZE,
635 PAGE_SHIFT, kvm, pvt->gpa))
636 pr_err("Can't page out gpa:0x%lx addr:0x%lx\n",
639 /* Remove the shared flag if any */
640 kvmppc_gfn_remove(gfn, kvm);
643 mutex_unlock(&kvm->arch.uvmem_lock);
646 mmap_read_unlock(kvm->mm);
649 unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
652 struct kvm_memory_slot *memslot;
655 * Expect to be called only after INIT_START and before INIT_DONE.
656 * If INIT_DONE was completed, use normal VM termination sequence.
658 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
659 return H_UNSUPPORTED;
661 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
664 srcu_idx = srcu_read_lock(&kvm->srcu);
666 kvm_for_each_memslot(memslot, kvm_memslots(kvm))
667 kvmppc_uvmem_drop_pages(memslot, kvm, false);
669 srcu_read_unlock(&kvm->srcu, srcu_idx);
671 kvm->arch.secure_guest = 0;
672 uv_svm_terminate(kvm->arch.lpid);
678 * Get a free device PFN from the pool
680 * Called when a normal page is moved to secure memory (UV_PAGE_IN). Device
681 * PFN will be used to keep track of the secure page on HV side.
683 * Called with kvm->arch.uvmem_lock held
685 static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
687 struct page *dpage = NULL;
688 unsigned long bit, uvmem_pfn;
689 struct kvmppc_uvmem_page_pvt *pvt;
690 unsigned long pfn_last, pfn_first;
692 pfn_first = kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT;
693 pfn_last = pfn_first +
694 (range_len(&kvmppc_uvmem_pgmap.range) >> PAGE_SHIFT);
696 spin_lock(&kvmppc_uvmem_bitmap_lock);
697 bit = find_first_zero_bit(kvmppc_uvmem_bitmap,
698 pfn_last - pfn_first);
699 if (bit >= (pfn_last - pfn_first))
701 bitmap_set(kvmppc_uvmem_bitmap, bit, 1);
702 spin_unlock(&kvmppc_uvmem_bitmap_lock);
704 pvt = kzalloc(sizeof(*pvt), GFP_KERNEL);
708 uvmem_pfn = bit + pfn_first;
709 kvmppc_gfn_secure_uvmem_pfn(gpa >> PAGE_SHIFT, uvmem_pfn, kvm);
714 dpage = pfn_to_page(uvmem_pfn);
715 dpage->zone_device_data = pvt;
720 spin_lock(&kvmppc_uvmem_bitmap_lock);
721 bitmap_clear(kvmppc_uvmem_bitmap, bit, 1);
723 spin_unlock(&kvmppc_uvmem_bitmap_lock);
728 * Alloc a PFN from private device memory pool. If @pagein is true,
729 * copy page from normal memory to secure memory using UV_PAGE_IN uvcall.
731 static int kvmppc_svm_page_in(struct vm_area_struct *vma,
733 unsigned long end, unsigned long gpa, struct kvm *kvm,
734 unsigned long page_shift,
737 unsigned long src_pfn, dst_pfn = 0;
738 struct migrate_vma mig;
744 memset(&mig, 0, sizeof(mig));
750 mig.flags = MIGRATE_VMA_SELECT_SYSTEM;
752 ret = migrate_vma_setup(&mig);
756 if (!(*mig.src & MIGRATE_PFN_MIGRATE)) {
761 dpage = kvmppc_uvmem_get_page(gpa, kvm);
768 pfn = *mig.src >> MIGRATE_PFN_SHIFT;
769 spage = migrate_pfn_to_page(*mig.src);
771 ret = uv_page_in(kvm->arch.lpid, pfn << page_shift,
778 *mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
779 migrate_vma_pages(&mig);
781 migrate_vma_finalize(&mig);
785 static int kvmppc_uv_migrate_mem_slot(struct kvm *kvm,
786 const struct kvm_memory_slot *memslot)
788 unsigned long gfn = memslot->base_gfn;
789 struct vm_area_struct *vma;
790 unsigned long start, end;
793 mmap_read_lock(kvm->mm);
794 mutex_lock(&kvm->arch.uvmem_lock);
795 while (kvmppc_next_nontransitioned_gfn(memslot, kvm, &gfn)) {
797 start = gfn_to_hva(kvm, gfn);
798 if (kvm_is_error_hva(start))
801 end = start + (1UL << PAGE_SHIFT);
802 vma = find_vma_intersection(kvm->mm, start, end);
803 if (!vma || vma->vm_start > start || vma->vm_end < end)
806 ret = kvmppc_svm_page_in(vma, start, end,
807 (gfn << PAGE_SHIFT), kvm, PAGE_SHIFT, false);
813 /* relinquish the cpu if needed */
816 mutex_unlock(&kvm->arch.uvmem_lock);
817 mmap_read_unlock(kvm->mm);
821 unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
823 struct kvm_memslots *slots;
824 struct kvm_memory_slot *memslot;
826 long ret = H_SUCCESS;
828 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
829 return H_UNSUPPORTED;
831 /* migrate any unmoved normal pfn to device pfns*/
832 srcu_idx = srcu_read_lock(&kvm->srcu);
833 slots = kvm_memslots(kvm);
834 kvm_for_each_memslot(memslot, slots) {
835 ret = kvmppc_uv_migrate_mem_slot(kvm, memslot);
838 * The pages will remain transitioned.
839 * Its the callers responsibility to
840 * terminate the VM, which will undo
841 * all state of the VM. Till then
842 * this VM is in a erroneous state.
843 * Its KVMPPC_SECURE_INIT_DONE will
851 kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
852 pr_info("LPID %d went secure\n", kvm->arch.lpid);
855 srcu_read_unlock(&kvm->srcu, srcu_idx);
860 * Shares the page with HV, thus making it a normal page.
862 * - If the page is already secure, then provision a new page and share
863 * - If the page is a normal page, share the existing page
865 * In the former case, uses dev_pagemap_ops.migrate_to_ram handler
866 * to unmap the device page from QEMU's page tables.
868 static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa,
869 unsigned long page_shift)
872 int ret = H_PARAMETER;
873 struct page *uvmem_page;
874 struct kvmppc_uvmem_page_pvt *pvt;
876 unsigned long gfn = gpa >> page_shift;
878 unsigned long uvmem_pfn;
880 srcu_idx = srcu_read_lock(&kvm->srcu);
881 mutex_lock(&kvm->arch.uvmem_lock);
882 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
883 uvmem_page = pfn_to_page(uvmem_pfn);
884 pvt = uvmem_page->zone_device_data;
885 pvt->skip_page_out = true;
887 * do not drop the GFN. It is a valid GFN
888 * that is transitioned to a shared GFN.
890 pvt->remove_gfn = false;
894 mutex_unlock(&kvm->arch.uvmem_lock);
895 pfn = gfn_to_pfn(kvm, gfn);
896 if (is_error_noslot_pfn(pfn))
899 mutex_lock(&kvm->arch.uvmem_lock);
900 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
901 uvmem_page = pfn_to_page(uvmem_pfn);
902 pvt = uvmem_page->zone_device_data;
903 pvt->skip_page_out = true;
904 pvt->remove_gfn = false; /* it continues to be a valid GFN */
905 kvm_release_pfn_clean(pfn);
909 if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0,
911 kvmppc_gfn_shared(gfn, kvm);
914 kvm_release_pfn_clean(pfn);
915 mutex_unlock(&kvm->arch.uvmem_lock);
917 srcu_read_unlock(&kvm->srcu, srcu_idx);
922 * H_SVM_PAGE_IN: Move page from normal memory to secure memory.
924 * H_PAGE_IN_SHARED flag makes the page shared which means that the same
925 * memory in is visible from both UV and HV.
927 unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
929 unsigned long page_shift)
931 unsigned long start, end;
932 struct vm_area_struct *vma;
934 unsigned long gfn = gpa >> page_shift;
937 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
938 return H_UNSUPPORTED;
940 if (page_shift != PAGE_SHIFT)
943 if (flags & ~H_PAGE_IN_SHARED)
946 if (flags & H_PAGE_IN_SHARED)
947 return kvmppc_share_page(kvm, gpa, page_shift);
950 srcu_idx = srcu_read_lock(&kvm->srcu);
951 mmap_read_lock(kvm->mm);
953 start = gfn_to_hva(kvm, gfn);
954 if (kvm_is_error_hva(start))
957 mutex_lock(&kvm->arch.uvmem_lock);
958 /* Fail the page-in request of an already paged-in page */
959 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
962 end = start + (1UL << page_shift);
963 vma = find_vma_intersection(kvm->mm, start, end);
964 if (!vma || vma->vm_start > start || vma->vm_end < end)
967 if (kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift,
974 mutex_unlock(&kvm->arch.uvmem_lock);
976 mmap_read_unlock(kvm->mm);
977 srcu_read_unlock(&kvm->srcu, srcu_idx);
983 * Fault handler callback that gets called when HV touches any page that
984 * has been moved to secure memory, we ask UV to give back the page by
985 * issuing UV_PAGE_OUT uvcall.
987 * This eventually results in dropping of device PFN and the newly
988 * provisioned page/PFN gets populated in QEMU page tables.
990 static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf)
992 struct kvmppc_uvmem_page_pvt *pvt = vmf->page->zone_device_data;
994 if (kvmppc_svm_page_out(vmf->vma, vmf->address,
995 vmf->address + PAGE_SIZE, PAGE_SHIFT,
997 return VM_FAULT_SIGBUS;
1003 * Release the device PFN back to the pool
1005 * Gets called when secure GFN tranistions from a secure-PFN
1006 * to a normal PFN during H_SVM_PAGE_OUT.
1007 * Gets called with kvm->arch.uvmem_lock held.
1009 static void kvmppc_uvmem_page_free(struct page *page)
1011 unsigned long pfn = page_to_pfn(page) -
1012 (kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT);
1013 struct kvmppc_uvmem_page_pvt *pvt;
1015 spin_lock(&kvmppc_uvmem_bitmap_lock);
1016 bitmap_clear(kvmppc_uvmem_bitmap, pfn, 1);
1017 spin_unlock(&kvmppc_uvmem_bitmap_lock);
1019 pvt = page->zone_device_data;
1020 page->zone_device_data = NULL;
1021 if (pvt->remove_gfn)
1022 kvmppc_gfn_remove(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
1024 kvmppc_gfn_secure_mem_pfn(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
1028 static const struct dev_pagemap_ops kvmppc_uvmem_ops = {
1029 .page_free = kvmppc_uvmem_page_free,
1030 .migrate_to_ram = kvmppc_uvmem_migrate_to_ram,
1034 * H_SVM_PAGE_OUT: Move page from secure memory to normal memory.
1037 kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
1038 unsigned long flags, unsigned long page_shift)
1040 unsigned long gfn = gpa >> page_shift;
1041 unsigned long start, end;
1042 struct vm_area_struct *vma;
1046 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
1047 return H_UNSUPPORTED;
1049 if (page_shift != PAGE_SHIFT)
1056 srcu_idx = srcu_read_lock(&kvm->srcu);
1057 mmap_read_lock(kvm->mm);
1058 start = gfn_to_hva(kvm, gfn);
1059 if (kvm_is_error_hva(start))
1062 end = start + (1UL << page_shift);
1063 vma = find_vma_intersection(kvm->mm, start, end);
1064 if (!vma || vma->vm_start > start || vma->vm_end < end)
1067 if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa))
1070 mmap_read_unlock(kvm->mm);
1071 srcu_read_unlock(&kvm->srcu, srcu_idx);
1075 int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
1078 int ret = U_SUCCESS;
1080 pfn = gfn_to_pfn(kvm, gfn);
1081 if (is_error_noslot_pfn(pfn))
1084 mutex_lock(&kvm->arch.uvmem_lock);
1085 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
1088 ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT,
1091 kvm_release_pfn_clean(pfn);
1092 mutex_unlock(&kvm->arch.uvmem_lock);
1093 return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT;
1096 int kvmppc_uvmem_memslot_create(struct kvm *kvm, const struct kvm_memory_slot *new)
1098 int ret = __kvmppc_uvmem_memslot_create(kvm, new);
1101 ret = kvmppc_uv_migrate_mem_slot(kvm, new);
1106 void kvmppc_uvmem_memslot_delete(struct kvm *kvm, const struct kvm_memory_slot *old)
1108 __kvmppc_uvmem_memslot_delete(kvm, old);
1111 static u64 kvmppc_get_secmem_size(void)
1113 struct device_node *np;
1119 * First try the new ibm,secure-memory nodes which supersede the
1120 * secure-memory-ranges property.
1121 * If we found some, no need to read the deprecated ones.
1123 for_each_compatible_node(np, NULL, "ibm,secure-memory") {
1124 prop = of_get_property(np, "reg", &len);
1127 size += of_read_number(prop + 2, 2);
1132 np = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware");
1136 prop = of_get_property(np, "secure-memory-ranges", &len);
1140 for (i = 0; i < len / (sizeof(*prop) * 4); i++)
1141 size += of_read_number(prop + (i * 4) + 2, 2);
1149 int kvmppc_uvmem_init(void)
1153 struct resource *res;
1155 unsigned long pfn_last, pfn_first;
1157 size = kvmppc_get_secmem_size();
1160 * Don't fail the initialization of kvm-hv module if
1161 * the platform doesn't export ibm,uv-firmware node.
1162 * Let normal guests run on such PEF-disabled platform.
1164 pr_info("KVMPPC-UVMEM: No support for secure guests\n");
1168 res = request_free_mem_region(&iomem_resource, size, "kvmppc_uvmem");
1174 kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE;
1175 kvmppc_uvmem_pgmap.range.start = res->start;
1176 kvmppc_uvmem_pgmap.range.end = res->end;
1177 kvmppc_uvmem_pgmap.nr_range = 1;
1178 kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops;
1179 /* just one global instance: */
1180 kvmppc_uvmem_pgmap.owner = &kvmppc_uvmem_pgmap;
1181 addr = memremap_pages(&kvmppc_uvmem_pgmap, NUMA_NO_NODE);
1183 ret = PTR_ERR(addr);
1184 goto out_free_region;
1187 pfn_first = res->start >> PAGE_SHIFT;
1188 pfn_last = pfn_first + (resource_size(res) >> PAGE_SHIFT);
1189 kvmppc_uvmem_bitmap = kcalloc(BITS_TO_LONGS(pfn_last - pfn_first),
1190 sizeof(unsigned long), GFP_KERNEL);
1191 if (!kvmppc_uvmem_bitmap) {
1196 pr_info("KVMPPC-UVMEM: Secure Memory size 0x%lx\n", size);
1199 memunmap_pages(&kvmppc_uvmem_pgmap);
1201 release_mem_region(res->start, size);
1206 void kvmppc_uvmem_free(void)
1208 if (!kvmppc_uvmem_bitmap)
1211 memunmap_pages(&kvmppc_uvmem_pgmap);
1212 release_mem_region(kvmppc_uvmem_pgmap.range.start,
1213 range_len(&kvmppc_uvmem_pgmap.range));
1214 kfree(kvmppc_uvmem_bitmap);