1 // SPDX-License-Identifier: GPL-2.0
3 * Secure pages management: Migration of pages between normal and secure
4 * memory of KVM guests.
6 * Copyright 2018 Bharata B Rao, IBM Corp. <bharata@linux.ibm.com>
10 * A pseries guest can be run as secure guest on Ultravisor-enabled
11 * POWER platforms. On such platforms, this driver will be used to manage
12 * the movement of guest pages between the normal memory managed by
13 * hypervisor (HV) and secure memory managed by Ultravisor (UV).
15 * The page-in or page-out requests from UV will come to HV as hcalls and
16 * HV will call back into UV via ultracalls to satisfy these page requests.
18 * Private ZONE_DEVICE memory equal to the amount of secure memory
19 * available in the platform for running secure guests is hotplugged.
20 * Whenever a page belonging to the guest becomes secure, a page from this
21 * private device memory is used to represent and track that secure page
22 * on the HV side. Some pages (like virtio buffers, VPA pages etc) are
23 * shared between UV and HV. However such pages aren't represented by
24 * device private memory and mappings to shared memory exist in both
25 * UV and HV page tables.
31 * kvm->arch.uvmem_lock is a per-guest lock that prevents concurrent
32 * page-in and page-out requests for the same GPA. Concurrent accesses
33 * can either come via UV (guest vCPUs requesting for same page)
34 * or when HV and guest simultaneously access the same page.
35 * This mutex serializes the migration of page from HV(normal) to
36 * UV(secure) and vice versa. So the serialization points are around
37 * migrate_vma routines and page-in/out routines.
39 * Per-guest mutex comes with a cost though. Mainly it serializes the
40 * fault path as page-out can occur when HV faults on accessing secure
41 * guest pages. Currently UV issues page-in requests for all the guest
42 * PFNs one at a time during early boot (UV_ESM uvcall), so this is
43 * not a cause for concern. Also currently the number of page-outs caused
44 * by HV touching secure pages is very very low. If an when UV supports
45 * overcommitting, then we might see concurrent guest driven page-outs.
49 * 1. kvm->srcu - Protects KVM memslots
50 * 2. kvm->mm->mmap_lock - find_vma, migrate_vma_pages and helpers, ksm_madvise
51 * 3. kvm->arch.uvmem_lock - protects read/writes to uvmem slots thus acting
52 * as sync-points for page-in/out
58 * Currently UV uses 2MB mappings internally, but will issue H_SVM_PAGE_IN
59 * and H_SVM_PAGE_OUT hcalls in PAGE_SIZE(64K) granularity. HV tracks
60 * secure GPAs at 64K page size and maintains one device PFN for each
61 * 64K secure GPA. UV_PAGE_IN and UV_PAGE_OUT calls by HV are also issued
62 * for 64K page at a time.
64 * HV faulting on secure pages: When HV touches any secure page, it
65 * faults and issues a UV_PAGE_OUT request with 64K page size. Currently
66 * UV splits and remaps the 2MB page if necessary and copies out the
67 * required 64K page contents.
69 * Shared pages: Whenever guest shares a secure page, UV will split and
70 * remap the 2MB page if required and issue H_SVM_PAGE_IN with 64K page size.
72 * HV invalidating a page: When a regular page belonging to secure
73 * guest gets unmapped, HV informs UV with UV_PAGE_INVAL of 64K
74 * page size. Using 64K page size is correct here because any non-secure
75 * page will essentially be of 64K page size. Splitting by UV during sharing
76 * and page-out ensures this.
78 * Page fault handling: When HV handles page fault of a page belonging
79 * to secure guest, it sends that to UV with a 64K UV_PAGE_IN request.
80 * Using 64K size is correct here too as UV would have split the 2MB page
81 * into 64k mappings and would have done page-outs earlier.
83 * In summary, the current secure pages handling code in HV assumes
84 * 64K page size and in fact fails any page-in/page-out requests of
85 * non-64K size upfront. If and when UV starts supporting multiple
86 * page-sizes, we need to break this assumption.
89 #include <linux/pagemap.h>
90 #include <linux/migrate.h>
91 #include <linux/kvm_host.h>
92 #include <linux/ksm.h>
94 #include <asm/ultravisor.h>
96 #include <asm/kvm_ppc.h>
97 #include <asm/kvm_book3s_uvmem.h>
99 static struct dev_pagemap kvmppc_uvmem_pgmap;
100 static unsigned long *kvmppc_uvmem_bitmap;
101 static DEFINE_SPINLOCK(kvmppc_uvmem_bitmap_lock);
106 * The GFN can be in one of the following states.
108 * (a) Secure - The GFN is secure. The GFN is associated with
109 * a Secure VM, the contents of the GFN is not accessible
110 * to the Hypervisor. This GFN can be backed by a secure-PFN,
111 * or can be backed by a normal-PFN with contents encrypted.
112 * The former is true when the GFN is paged-in into the
113 * ultravisor. The latter is true when the GFN is paged-out
116 * (b) Shared - The GFN is shared. The GFN is associated with a
117 * a secure VM. The contents of the GFN is accessible to
118 * Hypervisor. This GFN is backed by a normal-PFN and its
119 * content is un-encrypted.
121 * (c) Normal - The GFN is a normal. The GFN is associated with
122 * a normal VM. The contents of the GFN is accesible to
123 * the Hypervisor. Its content is never encrypted.
128 * Normal VM: A VM whose contents are always accessible to
129 * the hypervisor. All its GFNs are normal-GFNs.
131 * Secure VM: A VM whose contents are not accessible to the
132 * hypervisor without the VM's consent. Its GFNs are
133 * either Shared-GFN or Secure-GFNs.
135 * Transient VM: A Normal VM that is transitioning to secure VM.
136 * The transition starts on successful return of
137 * H_SVM_INIT_START, and ends on successful return
138 * of H_SVM_INIT_DONE. This transient VM, can have GFNs
139 * in any of the three states; i.e Secure-GFN, Shared-GFN,
140 * and Normal-GFN. The VM never executes in this state
141 * in supervisor-mode.
144 * -----------------------------
145 * The state of a memory slot mirrors the state of the
146 * VM the memory slot is associated with.
148 * VM State transition.
149 * --------------------
151 * A VM always starts in Normal Mode.
153 * H_SVM_INIT_START moves the VM into transient state. During this
154 * time the Ultravisor may request some of its GFNs to be shared or
155 * secured. So its GFNs can be in one of the three GFN states.
157 * H_SVM_INIT_DONE moves the VM entirely from transient state to
158 * secure-state. At this point any left-over normal-GFNs are
159 * transitioned to Secure-GFN.
161 * H_SVM_INIT_ABORT moves the transient VM back to normal VM.
162 * All its GFNs are moved to Normal-GFNs.
164 * UV_TERMINATE transitions the secure-VM back to normal-VM. All
165 * the secure-GFN and shared-GFNs are tranistioned to normal-GFN
166 * Note: The contents of the normal-GFN is undefined at this point.
168 * GFN state implementation:
169 * -------------------------
171 * Secure GFN is associated with a secure-PFN; also called uvmem_pfn,
172 * when the GFN is paged-in. Its pfn[] has KVMPPC_GFN_UVMEM_PFN flag
173 * set, and contains the value of the secure-PFN.
174 * It is associated with a normal-PFN; also called mem_pfn, when
175 * the GFN is pagedout. Its pfn[] has KVMPPC_GFN_MEM_PFN flag set.
176 * The value of the normal-PFN is not tracked.
178 * Shared GFN is associated with a normal-PFN. Its pfn[] has
179 * KVMPPC_UVMEM_SHARED_PFN flag set. The value of the normal-PFN
182 * Normal GFN is associated with normal-PFN. Its pfn[] has
183 * no flag set. The value of the normal-PFN is not tracked.
185 * Life cycle of a GFN
186 * --------------------
188 * --------------------------------------------------------------
189 * | | Share | Unshare | SVM |H_SVM_INIT_DONE|
190 * | |operation |operation | abort/ | |
191 * | | | | terminate | |
192 * -------------------------------------------------------------
194 * | Secure | Shared | Secure |Normal |Secure |
196 * | Shared | Shared | Secure |Normal |Shared |
198 * | Normal | Shared | Secure |Normal |Secure |
199 * --------------------------------------------------------------
202 * --------------------
204 * --------------------------------------------------------------------
205 * | | start | H_SVM_ |H_SVM_ |H_SVM_ |UV_SVM_ |
206 * | | VM |INIT_START|INIT_DONE|INIT_ABORT |TERMINATE |
208 * --------- ----------------------------------------------------------
210 * | Normal | Normal | Transient|Error |Error |Normal |
212 * | Secure | Error | Error |Error |Error |Normal |
214 * |Transient| N/A | Error |Secure |Normal |Normal |
215 * --------------------------------------------------------------------
218 #define KVMPPC_GFN_UVMEM_PFN (1UL << 63)
219 #define KVMPPC_GFN_MEM_PFN (1UL << 62)
220 #define KVMPPC_GFN_SHARED (1UL << 61)
221 #define KVMPPC_GFN_SECURE (KVMPPC_GFN_UVMEM_PFN | KVMPPC_GFN_MEM_PFN)
222 #define KVMPPC_GFN_FLAG_MASK (KVMPPC_GFN_SECURE | KVMPPC_GFN_SHARED)
223 #define KVMPPC_GFN_PFN_MASK (~KVMPPC_GFN_FLAG_MASK)
225 struct kvmppc_uvmem_slot {
226 struct list_head list;
227 unsigned long nr_pfns;
228 unsigned long base_pfn;
231 struct kvmppc_uvmem_page_pvt {
238 bool kvmppc_uvmem_available(void)
241 * If kvmppc_uvmem_bitmap != NULL, then there is an ultravisor
242 * and our data structures have been initialized successfully.
244 return !!kvmppc_uvmem_bitmap;
247 int kvmppc_uvmem_slot_init(struct kvm *kvm, const struct kvm_memory_slot *slot)
249 struct kvmppc_uvmem_slot *p;
251 p = kzalloc(sizeof(*p), GFP_KERNEL);
254 p->pfns = vcalloc(slot->npages, sizeof(*p->pfns));
259 p->nr_pfns = slot->npages;
260 p->base_pfn = slot->base_gfn;
262 mutex_lock(&kvm->arch.uvmem_lock);
263 list_add(&p->list, &kvm->arch.uvmem_pfns);
264 mutex_unlock(&kvm->arch.uvmem_lock);
270 * All device PFNs are already released by the time we come here.
272 void kvmppc_uvmem_slot_free(struct kvm *kvm, const struct kvm_memory_slot *slot)
274 struct kvmppc_uvmem_slot *p, *next;
276 mutex_lock(&kvm->arch.uvmem_lock);
277 list_for_each_entry_safe(p, next, &kvm->arch.uvmem_pfns, list) {
278 if (p->base_pfn == slot->base_gfn) {
285 mutex_unlock(&kvm->arch.uvmem_lock);
288 static void kvmppc_mark_gfn(unsigned long gfn, struct kvm *kvm,
289 unsigned long flag, unsigned long uvmem_pfn)
291 struct kvmppc_uvmem_slot *p;
293 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
294 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
295 unsigned long index = gfn - p->base_pfn;
297 if (flag == KVMPPC_GFN_UVMEM_PFN)
298 p->pfns[index] = uvmem_pfn | flag;
300 p->pfns[index] = flag;
306 /* mark the GFN as secure-GFN associated with @uvmem pfn device-PFN. */
307 static void kvmppc_gfn_secure_uvmem_pfn(unsigned long gfn,
308 unsigned long uvmem_pfn, struct kvm *kvm)
310 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_UVMEM_PFN, uvmem_pfn);
313 /* mark the GFN as secure-GFN associated with a memory-PFN. */
314 static void kvmppc_gfn_secure_mem_pfn(unsigned long gfn, struct kvm *kvm)
316 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_MEM_PFN, 0);
319 /* mark the GFN as a shared GFN. */
320 static void kvmppc_gfn_shared(unsigned long gfn, struct kvm *kvm)
322 kvmppc_mark_gfn(gfn, kvm, KVMPPC_GFN_SHARED, 0);
325 /* mark the GFN as a non-existent GFN. */
326 static void kvmppc_gfn_remove(unsigned long gfn, struct kvm *kvm)
328 kvmppc_mark_gfn(gfn, kvm, 0, 0);
331 /* return true, if the GFN is a secure-GFN backed by a secure-PFN */
332 static bool kvmppc_gfn_is_uvmem_pfn(unsigned long gfn, struct kvm *kvm,
333 unsigned long *uvmem_pfn)
335 struct kvmppc_uvmem_slot *p;
337 list_for_each_entry(p, &kvm->arch.uvmem_pfns, list) {
338 if (gfn >= p->base_pfn && gfn < p->base_pfn + p->nr_pfns) {
339 unsigned long index = gfn - p->base_pfn;
341 if (p->pfns[index] & KVMPPC_GFN_UVMEM_PFN) {
343 *uvmem_pfn = p->pfns[index] &
354 * starting from *gfn search for the next available GFN that is not yet
355 * transitioned to a secure GFN. return the value of that GFN in *gfn. If a
356 * GFN is found, return true, else return false
358 * Must be called with kvm->arch.uvmem_lock held.
360 static bool kvmppc_next_nontransitioned_gfn(const struct kvm_memory_slot *memslot,
361 struct kvm *kvm, unsigned long *gfn)
363 struct kvmppc_uvmem_slot *p = NULL, *iter;
367 list_for_each_entry(iter, &kvm->arch.uvmem_pfns, list)
368 if (*gfn >= iter->base_pfn && *gfn < iter->base_pfn + iter->nr_pfns) {
375 * The code below assumes, one to one correspondence between
376 * kvmppc_uvmem_slot and memslot.
378 for (i = *gfn; i < p->base_pfn + p->nr_pfns; i++) {
379 unsigned long index = i - p->base_pfn;
381 if (!(p->pfns[index] & KVMPPC_GFN_FLAG_MASK)) {
390 static int kvmppc_memslot_page_merge(struct kvm *kvm,
391 const struct kvm_memory_slot *memslot, bool merge)
393 unsigned long gfn = memslot->base_gfn;
394 unsigned long end, start = gfn_to_hva(kvm, gfn);
396 struct vm_area_struct *vma;
397 int merge_flag = (merge) ? MADV_MERGEABLE : MADV_UNMERGEABLE;
399 if (kvm_is_error_hva(start))
402 end = start + (memslot->npages << PAGE_SHIFT);
404 mmap_write_lock(kvm->mm);
406 vma = find_vma_intersection(kvm->mm, start, end);
411 ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
412 merge_flag, &vma->vm_flags);
418 } while (end > vma->vm_end);
420 mmap_write_unlock(kvm->mm);
424 static void __kvmppc_uvmem_memslot_delete(struct kvm *kvm,
425 const struct kvm_memory_slot *memslot)
427 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id);
428 kvmppc_uvmem_slot_free(kvm, memslot);
429 kvmppc_memslot_page_merge(kvm, memslot, true);
432 static int __kvmppc_uvmem_memslot_create(struct kvm *kvm,
433 const struct kvm_memory_slot *memslot)
435 int ret = H_PARAMETER;
437 if (kvmppc_memslot_page_merge(kvm, memslot, false))
440 if (kvmppc_uvmem_slot_init(kvm, memslot))
443 ret = uv_register_mem_slot(kvm->arch.lpid,
444 memslot->base_gfn << PAGE_SHIFT,
445 memslot->npages * PAGE_SIZE,
453 kvmppc_uvmem_slot_free(kvm, memslot);
455 kvmppc_memslot_page_merge(kvm, memslot, true);
459 unsigned long kvmppc_h_svm_init_start(struct kvm *kvm)
461 struct kvm_memslots *slots;
462 struct kvm_memory_slot *memslot, *m;
466 kvm->arch.secure_guest = KVMPPC_SECURE_INIT_START;
468 if (!kvmppc_uvmem_bitmap)
469 return H_UNSUPPORTED;
471 /* Only radix guests can be secure guests */
472 if (!kvm_is_radix(kvm))
473 return H_UNSUPPORTED;
475 /* NAK the transition to secure if not enabled */
476 if (!kvm->arch.svm_enabled)
479 srcu_idx = srcu_read_lock(&kvm->srcu);
481 /* register the memslot */
482 slots = kvm_memslots(kvm);
483 kvm_for_each_memslot(memslot, slots) {
484 ret = __kvmppc_uvmem_memslot_create(kvm, memslot);
490 slots = kvm_memslots(kvm);
491 kvm_for_each_memslot(m, slots) {
494 __kvmppc_uvmem_memslot_delete(kvm, memslot);
498 srcu_read_unlock(&kvm->srcu, srcu_idx);
503 * Provision a new page on HV side and copy over the contents
504 * from secure memory using UV_PAGE_OUT uvcall.
505 * Caller must held kvm->arch.uvmem_lock.
507 static int __kvmppc_svm_page_out(struct vm_area_struct *vma,
509 unsigned long end, unsigned long page_shift,
510 struct kvm *kvm, unsigned long gpa)
512 unsigned long src_pfn, dst_pfn = 0;
513 struct migrate_vma mig;
514 struct page *dpage, *spage;
515 struct kvmppc_uvmem_page_pvt *pvt;
519 memset(&mig, 0, sizeof(mig));
525 mig.pgmap_owner = &kvmppc_uvmem_pgmap;
526 mig.flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE;
528 /* The requested page is already paged-out, nothing to do */
529 if (!kvmppc_gfn_is_uvmem_pfn(gpa >> page_shift, kvm, NULL))
532 ret = migrate_vma_setup(&mig);
536 spage = migrate_pfn_to_page(*mig.src);
537 if (!spage || !(*mig.src & MIGRATE_PFN_MIGRATE))
540 if (!is_zone_device_page(spage))
543 dpage = alloc_page_vma(GFP_HIGHUSER, vma, start);
550 pvt = spage->zone_device_data;
551 pfn = page_to_pfn(dpage);
554 * This function is used in two cases:
555 * - When HV touches a secure page, for which we do UV_PAGE_OUT
556 * - When a secure page is converted to shared page, we *get*
557 * the page to essentially unmap the device page. In this
558 * case we skip page-out.
560 if (!pvt->skip_page_out)
561 ret = uv_page_out(kvm->arch.lpid, pfn << page_shift,
564 if (ret == U_SUCCESS)
565 *mig.dst = migrate_pfn(pfn) | MIGRATE_PFN_LOCKED;
572 migrate_vma_pages(&mig);
575 migrate_vma_finalize(&mig);
579 static inline int kvmppc_svm_page_out(struct vm_area_struct *vma,
580 unsigned long start, unsigned long end,
581 unsigned long page_shift,
582 struct kvm *kvm, unsigned long gpa)
586 mutex_lock(&kvm->arch.uvmem_lock);
587 ret = __kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa);
588 mutex_unlock(&kvm->arch.uvmem_lock);
594 * Drop device pages that we maintain for the secure guest
596 * We first mark the pages to be skipped from UV_PAGE_OUT when there
597 * is HV side fault on these pages. Next we *get* these pages, forcing
598 * fault on them, do fault time migration to replace the device PTEs in
599 * QEMU page table with normal PTEs from newly allocated pages.
601 void kvmppc_uvmem_drop_pages(const struct kvm_memory_slot *slot,
602 struct kvm *kvm, bool skip_page_out)
605 struct kvmppc_uvmem_page_pvt *pvt;
606 struct page *uvmem_page;
607 struct vm_area_struct *vma = NULL;
608 unsigned long uvmem_pfn, gfn;
611 mmap_read_lock(kvm->mm);
613 addr = slot->userspace_addr;
615 gfn = slot->base_gfn;
616 for (i = slot->npages; i; --i, ++gfn, addr += PAGE_SIZE) {
618 /* Fetch the VMA if addr is not in the latest fetched one */
619 if (!vma || addr >= vma->vm_end) {
620 vma = vma_lookup(kvm->mm, addr);
622 pr_err("Can't find VMA for gfn:0x%lx\n", gfn);
627 mutex_lock(&kvm->arch.uvmem_lock);
629 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
630 uvmem_page = pfn_to_page(uvmem_pfn);
631 pvt = uvmem_page->zone_device_data;
632 pvt->skip_page_out = skip_page_out;
633 pvt->remove_gfn = true;
635 if (__kvmppc_svm_page_out(vma, addr, addr + PAGE_SIZE,
636 PAGE_SHIFT, kvm, pvt->gpa))
637 pr_err("Can't page out gpa:0x%lx addr:0x%lx\n",
640 /* Remove the shared flag if any */
641 kvmppc_gfn_remove(gfn, kvm);
644 mutex_unlock(&kvm->arch.uvmem_lock);
647 mmap_read_unlock(kvm->mm);
650 unsigned long kvmppc_h_svm_init_abort(struct kvm *kvm)
653 struct kvm_memory_slot *memslot;
656 * Expect to be called only after INIT_START and before INIT_DONE.
657 * If INIT_DONE was completed, use normal VM termination sequence.
659 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
660 return H_UNSUPPORTED;
662 if (kvm->arch.secure_guest & KVMPPC_SECURE_INIT_DONE)
665 srcu_idx = srcu_read_lock(&kvm->srcu);
667 kvm_for_each_memslot(memslot, kvm_memslots(kvm))
668 kvmppc_uvmem_drop_pages(memslot, kvm, false);
670 srcu_read_unlock(&kvm->srcu, srcu_idx);
672 kvm->arch.secure_guest = 0;
673 uv_svm_terminate(kvm->arch.lpid);
679 * Get a free device PFN from the pool
681 * Called when a normal page is moved to secure memory (UV_PAGE_IN). Device
682 * PFN will be used to keep track of the secure page on HV side.
684 * Called with kvm->arch.uvmem_lock held
686 static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm)
688 struct page *dpage = NULL;
689 unsigned long bit, uvmem_pfn;
690 struct kvmppc_uvmem_page_pvt *pvt;
691 unsigned long pfn_last, pfn_first;
693 pfn_first = kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT;
694 pfn_last = pfn_first +
695 (range_len(&kvmppc_uvmem_pgmap.range) >> PAGE_SHIFT);
697 spin_lock(&kvmppc_uvmem_bitmap_lock);
698 bit = find_first_zero_bit(kvmppc_uvmem_bitmap,
699 pfn_last - pfn_first);
700 if (bit >= (pfn_last - pfn_first))
702 bitmap_set(kvmppc_uvmem_bitmap, bit, 1);
703 spin_unlock(&kvmppc_uvmem_bitmap_lock);
705 pvt = kzalloc(sizeof(*pvt), GFP_KERNEL);
709 uvmem_pfn = bit + pfn_first;
710 kvmppc_gfn_secure_uvmem_pfn(gpa >> PAGE_SHIFT, uvmem_pfn, kvm);
715 dpage = pfn_to_page(uvmem_pfn);
716 dpage->zone_device_data = pvt;
721 spin_lock(&kvmppc_uvmem_bitmap_lock);
722 bitmap_clear(kvmppc_uvmem_bitmap, bit, 1);
724 spin_unlock(&kvmppc_uvmem_bitmap_lock);
729 * Alloc a PFN from private device memory pool. If @pagein is true,
730 * copy page from normal memory to secure memory using UV_PAGE_IN uvcall.
732 static int kvmppc_svm_page_in(struct vm_area_struct *vma,
734 unsigned long end, unsigned long gpa, struct kvm *kvm,
735 unsigned long page_shift,
738 unsigned long src_pfn, dst_pfn = 0;
739 struct migrate_vma mig;
745 memset(&mig, 0, sizeof(mig));
751 mig.flags = MIGRATE_VMA_SELECT_SYSTEM;
753 ret = migrate_vma_setup(&mig);
757 if (!(*mig.src & MIGRATE_PFN_MIGRATE)) {
762 dpage = kvmppc_uvmem_get_page(gpa, kvm);
769 pfn = *mig.src >> MIGRATE_PFN_SHIFT;
770 spage = migrate_pfn_to_page(*mig.src);
772 ret = uv_page_in(kvm->arch.lpid, pfn << page_shift,
779 *mig.dst = migrate_pfn(page_to_pfn(dpage)) | MIGRATE_PFN_LOCKED;
780 migrate_vma_pages(&mig);
782 migrate_vma_finalize(&mig);
786 static int kvmppc_uv_migrate_mem_slot(struct kvm *kvm,
787 const struct kvm_memory_slot *memslot)
789 unsigned long gfn = memslot->base_gfn;
790 struct vm_area_struct *vma;
791 unsigned long start, end;
794 mmap_read_lock(kvm->mm);
795 mutex_lock(&kvm->arch.uvmem_lock);
796 while (kvmppc_next_nontransitioned_gfn(memslot, kvm, &gfn)) {
798 start = gfn_to_hva(kvm, gfn);
799 if (kvm_is_error_hva(start))
802 end = start + (1UL << PAGE_SHIFT);
803 vma = find_vma_intersection(kvm->mm, start, end);
804 if (!vma || vma->vm_start > start || vma->vm_end < end)
807 ret = kvmppc_svm_page_in(vma, start, end,
808 (gfn << PAGE_SHIFT), kvm, PAGE_SHIFT, false);
814 /* relinquish the cpu if needed */
817 mutex_unlock(&kvm->arch.uvmem_lock);
818 mmap_read_unlock(kvm->mm);
822 unsigned long kvmppc_h_svm_init_done(struct kvm *kvm)
824 struct kvm_memslots *slots;
825 struct kvm_memory_slot *memslot;
827 long ret = H_SUCCESS;
829 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
830 return H_UNSUPPORTED;
832 /* migrate any unmoved normal pfn to device pfns*/
833 srcu_idx = srcu_read_lock(&kvm->srcu);
834 slots = kvm_memslots(kvm);
835 kvm_for_each_memslot(memslot, slots) {
836 ret = kvmppc_uv_migrate_mem_slot(kvm, memslot);
839 * The pages will remain transitioned.
840 * Its the callers responsibility to
841 * terminate the VM, which will undo
842 * all state of the VM. Till then
843 * this VM is in a erroneous state.
844 * Its KVMPPC_SECURE_INIT_DONE will
852 kvm->arch.secure_guest |= KVMPPC_SECURE_INIT_DONE;
853 pr_info("LPID %d went secure\n", kvm->arch.lpid);
856 srcu_read_unlock(&kvm->srcu, srcu_idx);
861 * Shares the page with HV, thus making it a normal page.
863 * - If the page is already secure, then provision a new page and share
864 * - If the page is a normal page, share the existing page
866 * In the former case, uses dev_pagemap_ops.migrate_to_ram handler
867 * to unmap the device page from QEMU's page tables.
869 static unsigned long kvmppc_share_page(struct kvm *kvm, unsigned long gpa,
870 unsigned long page_shift)
873 int ret = H_PARAMETER;
874 struct page *uvmem_page;
875 struct kvmppc_uvmem_page_pvt *pvt;
877 unsigned long gfn = gpa >> page_shift;
879 unsigned long uvmem_pfn;
881 srcu_idx = srcu_read_lock(&kvm->srcu);
882 mutex_lock(&kvm->arch.uvmem_lock);
883 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
884 uvmem_page = pfn_to_page(uvmem_pfn);
885 pvt = uvmem_page->zone_device_data;
886 pvt->skip_page_out = true;
888 * do not drop the GFN. It is a valid GFN
889 * that is transitioned to a shared GFN.
891 pvt->remove_gfn = false;
895 mutex_unlock(&kvm->arch.uvmem_lock);
896 pfn = gfn_to_pfn(kvm, gfn);
897 if (is_error_noslot_pfn(pfn))
900 mutex_lock(&kvm->arch.uvmem_lock);
901 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, &uvmem_pfn)) {
902 uvmem_page = pfn_to_page(uvmem_pfn);
903 pvt = uvmem_page->zone_device_data;
904 pvt->skip_page_out = true;
905 pvt->remove_gfn = false; /* it continues to be a valid GFN */
906 kvm_release_pfn_clean(pfn);
910 if (!uv_page_in(kvm->arch.lpid, pfn << page_shift, gpa, 0,
912 kvmppc_gfn_shared(gfn, kvm);
915 kvm_release_pfn_clean(pfn);
916 mutex_unlock(&kvm->arch.uvmem_lock);
918 srcu_read_unlock(&kvm->srcu, srcu_idx);
923 * H_SVM_PAGE_IN: Move page from normal memory to secure memory.
925 * H_PAGE_IN_SHARED flag makes the page shared which means that the same
926 * memory in is visible from both UV and HV.
928 unsigned long kvmppc_h_svm_page_in(struct kvm *kvm, unsigned long gpa,
930 unsigned long page_shift)
932 unsigned long start, end;
933 struct vm_area_struct *vma;
935 unsigned long gfn = gpa >> page_shift;
938 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
939 return H_UNSUPPORTED;
941 if (page_shift != PAGE_SHIFT)
944 if (flags & ~H_PAGE_IN_SHARED)
947 if (flags & H_PAGE_IN_SHARED)
948 return kvmppc_share_page(kvm, gpa, page_shift);
951 srcu_idx = srcu_read_lock(&kvm->srcu);
952 mmap_read_lock(kvm->mm);
954 start = gfn_to_hva(kvm, gfn);
955 if (kvm_is_error_hva(start))
958 mutex_lock(&kvm->arch.uvmem_lock);
959 /* Fail the page-in request of an already paged-in page */
960 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
963 end = start + (1UL << page_shift);
964 vma = find_vma_intersection(kvm->mm, start, end);
965 if (!vma || vma->vm_start > start || vma->vm_end < end)
968 if (kvmppc_svm_page_in(vma, start, end, gpa, kvm, page_shift,
975 mutex_unlock(&kvm->arch.uvmem_lock);
977 mmap_read_unlock(kvm->mm);
978 srcu_read_unlock(&kvm->srcu, srcu_idx);
984 * Fault handler callback that gets called when HV touches any page that
985 * has been moved to secure memory, we ask UV to give back the page by
986 * issuing UV_PAGE_OUT uvcall.
988 * This eventually results in dropping of device PFN and the newly
989 * provisioned page/PFN gets populated in QEMU page tables.
991 static vm_fault_t kvmppc_uvmem_migrate_to_ram(struct vm_fault *vmf)
993 struct kvmppc_uvmem_page_pvt *pvt = vmf->page->zone_device_data;
995 if (kvmppc_svm_page_out(vmf->vma, vmf->address,
996 vmf->address + PAGE_SIZE, PAGE_SHIFT,
998 return VM_FAULT_SIGBUS;
1004 * Release the device PFN back to the pool
1006 * Gets called when secure GFN tranistions from a secure-PFN
1007 * to a normal PFN during H_SVM_PAGE_OUT.
1008 * Gets called with kvm->arch.uvmem_lock held.
1010 static void kvmppc_uvmem_page_free(struct page *page)
1012 unsigned long pfn = page_to_pfn(page) -
1013 (kvmppc_uvmem_pgmap.range.start >> PAGE_SHIFT);
1014 struct kvmppc_uvmem_page_pvt *pvt;
1016 spin_lock(&kvmppc_uvmem_bitmap_lock);
1017 bitmap_clear(kvmppc_uvmem_bitmap, pfn, 1);
1018 spin_unlock(&kvmppc_uvmem_bitmap_lock);
1020 pvt = page->zone_device_data;
1021 page->zone_device_data = NULL;
1022 if (pvt->remove_gfn)
1023 kvmppc_gfn_remove(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
1025 kvmppc_gfn_secure_mem_pfn(pvt->gpa >> PAGE_SHIFT, pvt->kvm);
1029 static const struct dev_pagemap_ops kvmppc_uvmem_ops = {
1030 .page_free = kvmppc_uvmem_page_free,
1031 .migrate_to_ram = kvmppc_uvmem_migrate_to_ram,
1035 * H_SVM_PAGE_OUT: Move page from secure memory to normal memory.
1038 kvmppc_h_svm_page_out(struct kvm *kvm, unsigned long gpa,
1039 unsigned long flags, unsigned long page_shift)
1041 unsigned long gfn = gpa >> page_shift;
1042 unsigned long start, end;
1043 struct vm_area_struct *vma;
1047 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START))
1048 return H_UNSUPPORTED;
1050 if (page_shift != PAGE_SHIFT)
1057 srcu_idx = srcu_read_lock(&kvm->srcu);
1058 mmap_read_lock(kvm->mm);
1059 start = gfn_to_hva(kvm, gfn);
1060 if (kvm_is_error_hva(start))
1063 end = start + (1UL << page_shift);
1064 vma = find_vma_intersection(kvm->mm, start, end);
1065 if (!vma || vma->vm_start > start || vma->vm_end < end)
1068 if (!kvmppc_svm_page_out(vma, start, end, page_shift, kvm, gpa))
1071 mmap_read_unlock(kvm->mm);
1072 srcu_read_unlock(&kvm->srcu, srcu_idx);
1076 int kvmppc_send_page_to_uv(struct kvm *kvm, unsigned long gfn)
1079 int ret = U_SUCCESS;
1081 pfn = gfn_to_pfn(kvm, gfn);
1082 if (is_error_noslot_pfn(pfn))
1085 mutex_lock(&kvm->arch.uvmem_lock);
1086 if (kvmppc_gfn_is_uvmem_pfn(gfn, kvm, NULL))
1089 ret = uv_page_in(kvm->arch.lpid, pfn << PAGE_SHIFT, gfn << PAGE_SHIFT,
1092 kvm_release_pfn_clean(pfn);
1093 mutex_unlock(&kvm->arch.uvmem_lock);
1094 return (ret == U_SUCCESS) ? RESUME_GUEST : -EFAULT;
1097 int kvmppc_uvmem_memslot_create(struct kvm *kvm, const struct kvm_memory_slot *new)
1099 int ret = __kvmppc_uvmem_memslot_create(kvm, new);
1102 ret = kvmppc_uv_migrate_mem_slot(kvm, new);
1107 void kvmppc_uvmem_memslot_delete(struct kvm *kvm, const struct kvm_memory_slot *old)
1109 __kvmppc_uvmem_memslot_delete(kvm, old);
1112 static u64 kvmppc_get_secmem_size(void)
1114 struct device_node *np;
1120 * First try the new ibm,secure-memory nodes which supersede the
1121 * secure-memory-ranges property.
1122 * If we found some, no need to read the deprecated ones.
1124 for_each_compatible_node(np, NULL, "ibm,secure-memory") {
1125 prop = of_get_property(np, "reg", &len);
1128 size += of_read_number(prop + 2, 2);
1133 np = of_find_compatible_node(NULL, NULL, "ibm,uv-firmware");
1137 prop = of_get_property(np, "secure-memory-ranges", &len);
1141 for (i = 0; i < len / (sizeof(*prop) * 4); i++)
1142 size += of_read_number(prop + (i * 4) + 2, 2);
1150 int kvmppc_uvmem_init(void)
1154 struct resource *res;
1156 unsigned long pfn_last, pfn_first;
1158 size = kvmppc_get_secmem_size();
1161 * Don't fail the initialization of kvm-hv module if
1162 * the platform doesn't export ibm,uv-firmware node.
1163 * Let normal guests run on such PEF-disabled platform.
1165 pr_info("KVMPPC-UVMEM: No support for secure guests\n");
1169 res = request_free_mem_region(&iomem_resource, size, "kvmppc_uvmem");
1175 kvmppc_uvmem_pgmap.type = MEMORY_DEVICE_PRIVATE;
1176 kvmppc_uvmem_pgmap.range.start = res->start;
1177 kvmppc_uvmem_pgmap.range.end = res->end;
1178 kvmppc_uvmem_pgmap.nr_range = 1;
1179 kvmppc_uvmem_pgmap.ops = &kvmppc_uvmem_ops;
1180 /* just one global instance: */
1181 kvmppc_uvmem_pgmap.owner = &kvmppc_uvmem_pgmap;
1182 addr = memremap_pages(&kvmppc_uvmem_pgmap, NUMA_NO_NODE);
1184 ret = PTR_ERR(addr);
1185 goto out_free_region;
1188 pfn_first = res->start >> PAGE_SHIFT;
1189 pfn_last = pfn_first + (resource_size(res) >> PAGE_SHIFT);
1190 kvmppc_uvmem_bitmap = kcalloc(BITS_TO_LONGS(pfn_last - pfn_first),
1191 sizeof(unsigned long), GFP_KERNEL);
1192 if (!kvmppc_uvmem_bitmap) {
1197 pr_info("KVMPPC-UVMEM: Secure Memory size 0x%lx\n", size);
1200 memunmap_pages(&kvmppc_uvmem_pgmap);
1202 release_mem_region(res->start, size);
1207 void kvmppc_uvmem_free(void)
1209 if (!kvmppc_uvmem_bitmap)
1212 memunmap_pages(&kvmppc_uvmem_pgmap);
1213 release_mem_region(kvmppc_uvmem_pgmap.range.start,
1214 range_len(&kvmppc_uvmem_pgmap.range));
1215 kfree(kvmppc_uvmem_bitmap);