1 // SPDX-License-Identifier: GPL-2.0
3 * KVM guest address space mapping code
5 * Copyright IBM Corp. 2007, 2016, 2018
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 * David Hildenbrand <david@redhat.com>
8 * Janosch Frank <frankja@linux.vnet.ibm.com>
11 #include <linux/kernel.h>
12 #include <linux/pagewalk.h>
13 #include <linux/swap.h>
14 #include <linux/smp.h>
15 #include <linux/spinlock.h>
16 #include <linux/slab.h>
17 #include <linux/swapops.h>
18 #include <linux/ksm.h>
19 #include <linux/mman.h>
21 #include <asm/pgtable.h>
22 #include <asm/pgalloc.h>
26 #define GMAP_SHADOW_FAKE_TABLE 1ULL
29 * gmap_alloc - allocate and initialize a guest address space
30 * @mm: pointer to the parent mm_struct
31 * @limit: maximum address of the gmap address space
33 * Returns a guest address space structure.
35 static struct gmap *gmap_alloc(unsigned long limit)
40 unsigned long etype, atype;
42 if (limit < _REGION3_SIZE) {
43 limit = _REGION3_SIZE - 1;
44 atype = _ASCE_TYPE_SEGMENT;
45 etype = _SEGMENT_ENTRY_EMPTY;
46 } else if (limit < _REGION2_SIZE) {
47 limit = _REGION2_SIZE - 1;
48 atype = _ASCE_TYPE_REGION3;
49 etype = _REGION3_ENTRY_EMPTY;
50 } else if (limit < _REGION1_SIZE) {
51 limit = _REGION1_SIZE - 1;
52 atype = _ASCE_TYPE_REGION2;
53 etype = _REGION2_ENTRY_EMPTY;
56 atype = _ASCE_TYPE_REGION1;
57 etype = _REGION1_ENTRY_EMPTY;
59 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
62 INIT_LIST_HEAD(&gmap->crst_list);
63 INIT_LIST_HEAD(&gmap->children);
64 INIT_LIST_HEAD(&gmap->pt_list);
65 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
66 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
67 INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
68 spin_lock_init(&gmap->guest_table_lock);
69 spin_lock_init(&gmap->shadow_lock);
70 refcount_set(&gmap->ref_count, 1);
71 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
75 list_add(&page->lru, &gmap->crst_list);
76 table = (unsigned long *) page_to_phys(page);
77 crst_table_init(table, etype);
79 gmap->asce = atype | _ASCE_TABLE_LENGTH |
80 _ASCE_USER_BITS | __pa(table);
81 gmap->asce_end = limit;
91 * gmap_create - create a guest address space
92 * @mm: pointer to the parent mm_struct
93 * @limit: maximum size of the gmap address space
95 * Returns a guest address space structure.
97 struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
100 unsigned long gmap_asce;
102 gmap = gmap_alloc(limit);
106 spin_lock(&mm->context.lock);
107 list_add_rcu(&gmap->list, &mm->context.gmap_list);
108 if (list_is_singular(&mm->context.gmap_list))
109 gmap_asce = gmap->asce;
112 WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
113 spin_unlock(&mm->context.lock);
116 EXPORT_SYMBOL_GPL(gmap_create);
118 static void gmap_flush_tlb(struct gmap *gmap)
120 if (MACHINE_HAS_IDTE)
121 __tlb_flush_idte(gmap->asce);
123 __tlb_flush_global();
126 static void gmap_radix_tree_free(struct radix_tree_root *root)
128 struct radix_tree_iter iter;
129 unsigned long indices[16];
134 /* A radix tree is freed by deleting all of its entries */
138 radix_tree_for_each_slot(slot, root, &iter, index) {
139 indices[nr] = iter.index;
143 for (i = 0; i < nr; i++) {
145 radix_tree_delete(root, index);
150 static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
152 struct gmap_rmap *rmap, *rnext, *head;
153 struct radix_tree_iter iter;
154 unsigned long indices[16];
159 /* A radix tree is freed by deleting all of its entries */
163 radix_tree_for_each_slot(slot, root, &iter, index) {
164 indices[nr] = iter.index;
168 for (i = 0; i < nr; i++) {
170 head = radix_tree_delete(root, index);
171 gmap_for_each_rmap_safe(rmap, rnext, head)
178 * gmap_free - free a guest address space
179 * @gmap: pointer to the guest address space structure
181 * No locks required. There are no references to this gmap anymore.
183 static void gmap_free(struct gmap *gmap)
185 struct page *page, *next;
187 /* Flush tlb of all gmaps (if not already done for shadows) */
188 if (!(gmap_is_shadow(gmap) && gmap->removed))
189 gmap_flush_tlb(gmap);
190 /* Free all segment & region tables. */
191 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
192 __free_pages(page, CRST_ALLOC_ORDER);
193 gmap_radix_tree_free(&gmap->guest_to_host);
194 gmap_radix_tree_free(&gmap->host_to_guest);
196 /* Free additional data for a shadow gmap */
197 if (gmap_is_shadow(gmap)) {
198 /* Free all page tables. */
199 list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
200 page_table_free_pgste(page);
201 gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
202 /* Release reference to the parent */
203 gmap_put(gmap->parent);
210 * gmap_get - increase reference counter for guest address space
211 * @gmap: pointer to the guest address space structure
213 * Returns the gmap pointer
215 struct gmap *gmap_get(struct gmap *gmap)
217 refcount_inc(&gmap->ref_count);
220 EXPORT_SYMBOL_GPL(gmap_get);
223 * gmap_put - decrease reference counter for guest address space
224 * @gmap: pointer to the guest address space structure
226 * If the reference counter reaches zero the guest address space is freed.
228 void gmap_put(struct gmap *gmap)
230 if (refcount_dec_and_test(&gmap->ref_count))
233 EXPORT_SYMBOL_GPL(gmap_put);
236 * gmap_remove - remove a guest address space but do not free it yet
237 * @gmap: pointer to the guest address space structure
239 void gmap_remove(struct gmap *gmap)
241 struct gmap *sg, *next;
242 unsigned long gmap_asce;
244 /* Remove all shadow gmaps linked to this gmap */
245 if (!list_empty(&gmap->children)) {
246 spin_lock(&gmap->shadow_lock);
247 list_for_each_entry_safe(sg, next, &gmap->children, list) {
251 spin_unlock(&gmap->shadow_lock);
253 /* Remove gmap from the pre-mm list */
254 spin_lock(&gmap->mm->context.lock);
255 list_del_rcu(&gmap->list);
256 if (list_empty(&gmap->mm->context.gmap_list))
258 else if (list_is_singular(&gmap->mm->context.gmap_list))
259 gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
260 struct gmap, list)->asce;
263 WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
264 spin_unlock(&gmap->mm->context.lock);
269 EXPORT_SYMBOL_GPL(gmap_remove);
272 * gmap_enable - switch primary space to the guest address space
273 * @gmap: pointer to the guest address space structure
275 void gmap_enable(struct gmap *gmap)
277 S390_lowcore.gmap = (unsigned long) gmap;
279 EXPORT_SYMBOL_GPL(gmap_enable);
282 * gmap_disable - switch back to the standard primary address space
283 * @gmap: pointer to the guest address space structure
285 void gmap_disable(struct gmap *gmap)
287 S390_lowcore.gmap = 0UL;
289 EXPORT_SYMBOL_GPL(gmap_disable);
292 * gmap_get_enabled - get a pointer to the currently enabled gmap
294 * Returns a pointer to the currently enabled gmap. 0 if none is enabled.
296 struct gmap *gmap_get_enabled(void)
298 return (struct gmap *) S390_lowcore.gmap;
300 EXPORT_SYMBOL_GPL(gmap_get_enabled);
303 * gmap_alloc_table is assumed to be called with mmap_sem held
305 static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
306 unsigned long init, unsigned long gaddr)
311 /* since we dont free the gmap table until gmap_free we can unlock */
312 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
315 new = (unsigned long *) page_to_phys(page);
316 crst_table_init(new, init);
317 spin_lock(&gmap->guest_table_lock);
318 if (*table & _REGION_ENTRY_INVALID) {
319 list_add(&page->lru, &gmap->crst_list);
320 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
321 (*table & _REGION_ENTRY_TYPE_MASK);
325 spin_unlock(&gmap->guest_table_lock);
327 __free_pages(page, CRST_ALLOC_ORDER);
332 * __gmap_segment_gaddr - find virtual address from segment pointer
333 * @entry: pointer to a segment table entry in the guest address space
335 * Returns the virtual address in the guest address space for the segment
337 static unsigned long __gmap_segment_gaddr(unsigned long *entry)
340 unsigned long offset, mask;
342 offset = (unsigned long) entry / sizeof(unsigned long);
343 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
344 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
345 page = virt_to_page((void *)((unsigned long) entry & mask));
346 return page->index + offset;
350 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
351 * @gmap: pointer to the guest address space structure
352 * @vmaddr: address in the host process address space
354 * Returns 1 if a TLB flush is required
356 static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
358 unsigned long *entry;
361 BUG_ON(gmap_is_shadow(gmap));
362 spin_lock(&gmap->guest_table_lock);
363 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
365 flush = (*entry != _SEGMENT_ENTRY_EMPTY);
366 *entry = _SEGMENT_ENTRY_EMPTY;
368 spin_unlock(&gmap->guest_table_lock);
373 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
374 * @gmap: pointer to the guest address space structure
375 * @gaddr: address in the guest address space
377 * Returns 1 if a TLB flush is required
379 static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
381 unsigned long vmaddr;
383 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
385 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
389 * gmap_unmap_segment - unmap segment from the guest address space
390 * @gmap: pointer to the guest address space structure
391 * @to: address in the guest address space
392 * @len: length of the memory area to unmap
394 * Returns 0 if the unmap succeeded, -EINVAL if not.
396 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
401 BUG_ON(gmap_is_shadow(gmap));
402 if ((to | len) & (PMD_SIZE - 1))
404 if (len == 0 || to + len < to)
408 down_write(&gmap->mm->mmap_sem);
409 for (off = 0; off < len; off += PMD_SIZE)
410 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
411 up_write(&gmap->mm->mmap_sem);
413 gmap_flush_tlb(gmap);
416 EXPORT_SYMBOL_GPL(gmap_unmap_segment);
419 * gmap_map_segment - map a segment to the guest address space
420 * @gmap: pointer to the guest address space structure
421 * @from: source address in the parent address space
422 * @to: target address in the guest address space
423 * @len: length of the memory area to map
425 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
427 int gmap_map_segment(struct gmap *gmap, unsigned long from,
428 unsigned long to, unsigned long len)
433 BUG_ON(gmap_is_shadow(gmap));
434 if ((from | to | len) & (PMD_SIZE - 1))
436 if (len == 0 || from + len < from || to + len < to ||
437 from + len - 1 > TASK_SIZE_MAX || to + len - 1 > gmap->asce_end)
441 down_write(&gmap->mm->mmap_sem);
442 for (off = 0; off < len; off += PMD_SIZE) {
443 /* Remove old translation */
444 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
445 /* Store new translation */
446 if (radix_tree_insert(&gmap->guest_to_host,
447 (to + off) >> PMD_SHIFT,
448 (void *) from + off))
451 up_write(&gmap->mm->mmap_sem);
453 gmap_flush_tlb(gmap);
456 gmap_unmap_segment(gmap, to, len);
459 EXPORT_SYMBOL_GPL(gmap_map_segment);
462 * __gmap_translate - translate a guest address to a user space address
463 * @gmap: pointer to guest mapping meta data structure
464 * @gaddr: guest address
466 * Returns user space address which corresponds to the guest address or
467 * -EFAULT if no such mapping exists.
468 * This function does not establish potentially missing page table entries.
469 * The mmap_sem of the mm that belongs to the address space must be held
470 * when this function gets called.
472 * Note: Can also be called for shadow gmaps.
474 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
476 unsigned long vmaddr;
478 vmaddr = (unsigned long)
479 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
480 /* Note: guest_to_host is empty for a shadow gmap */
481 return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
483 EXPORT_SYMBOL_GPL(__gmap_translate);
486 * gmap_translate - translate a guest address to a user space address
487 * @gmap: pointer to guest mapping meta data structure
488 * @gaddr: guest address
490 * Returns user space address which corresponds to the guest address or
491 * -EFAULT if no such mapping exists.
492 * This function does not establish potentially missing page table entries.
494 unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
498 down_read(&gmap->mm->mmap_sem);
499 rc = __gmap_translate(gmap, gaddr);
500 up_read(&gmap->mm->mmap_sem);
503 EXPORT_SYMBOL_GPL(gmap_translate);
506 * gmap_unlink - disconnect a page table from the gmap shadow tables
507 * @gmap: pointer to guest mapping meta data structure
508 * @table: pointer to the host page table
509 * @vmaddr: vm address associated with the host page table
511 void gmap_unlink(struct mm_struct *mm, unsigned long *table,
512 unsigned long vmaddr)
518 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
519 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
521 gmap_flush_tlb(gmap);
526 static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *old, pmd_t new,
527 unsigned long gaddr);
530 * gmap_link - set up shadow page tables to connect a host to a guest address
531 * @gmap: pointer to guest mapping meta data structure
532 * @gaddr: guest address
533 * @vmaddr: vm address
535 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
536 * if the vm address is already mapped to a different guest segment.
537 * The mmap_sem of the mm that belongs to the address space must be held
538 * when this function gets called.
540 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
542 struct mm_struct *mm;
543 unsigned long *table;
552 BUG_ON(gmap_is_shadow(gmap));
553 /* Create higher level tables in the gmap page table */
555 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
556 table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
557 if ((*table & _REGION_ENTRY_INVALID) &&
558 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
559 gaddr & _REGION1_MASK))
561 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
563 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
564 table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
565 if ((*table & _REGION_ENTRY_INVALID) &&
566 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
567 gaddr & _REGION2_MASK))
569 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
571 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
572 table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
573 if ((*table & _REGION_ENTRY_INVALID) &&
574 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
575 gaddr & _REGION3_MASK))
577 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
579 table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
580 /* Walk the parent mm page table */
582 pgd = pgd_offset(mm, vmaddr);
583 VM_BUG_ON(pgd_none(*pgd));
584 p4d = p4d_offset(pgd, vmaddr);
585 VM_BUG_ON(p4d_none(*p4d));
586 pud = pud_offset(p4d, vmaddr);
587 VM_BUG_ON(pud_none(*pud));
588 /* large puds cannot yet be handled */
591 pmd = pmd_offset(pud, vmaddr);
592 VM_BUG_ON(pmd_none(*pmd));
593 /* Are we allowed to use huge pages? */
594 if (pmd_large(*pmd) && !gmap->mm->context.allow_gmap_hpage_1m)
596 /* Link gmap segment table entry location to page table. */
597 rc = radix_tree_preload(GFP_KERNEL);
600 ptl = pmd_lock(mm, pmd);
601 spin_lock(&gmap->guest_table_lock);
602 if (*table == _SEGMENT_ENTRY_EMPTY) {
603 rc = radix_tree_insert(&gmap->host_to_guest,
604 vmaddr >> PMD_SHIFT, table);
606 if (pmd_large(*pmd)) {
607 *table = (pmd_val(*pmd) &
608 _SEGMENT_ENTRY_HARDWARE_BITS_LARGE)
609 | _SEGMENT_ENTRY_GMAP_UC;
611 *table = pmd_val(*pmd) &
612 _SEGMENT_ENTRY_HARDWARE_BITS;
614 } else if (*table & _SEGMENT_ENTRY_PROTECT &&
615 !(pmd_val(*pmd) & _SEGMENT_ENTRY_PROTECT)) {
616 unprot = (u64)*table;
617 unprot &= ~_SEGMENT_ENTRY_PROTECT;
618 unprot |= _SEGMENT_ENTRY_GMAP_UC;
619 gmap_pmdp_xchg(gmap, (pmd_t *)table, __pmd(unprot), gaddr);
621 spin_unlock(&gmap->guest_table_lock);
623 radix_tree_preload_end();
628 * gmap_fault - resolve a fault on a guest address
629 * @gmap: pointer to guest mapping meta data structure
630 * @gaddr: guest address
631 * @fault_flags: flags to pass down to handle_mm_fault()
633 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
634 * if the vm address is already mapped to a different guest segment.
636 int gmap_fault(struct gmap *gmap, unsigned long gaddr,
637 unsigned int fault_flags)
639 unsigned long vmaddr;
643 down_read(&gmap->mm->mmap_sem);
647 vmaddr = __gmap_translate(gmap, gaddr);
648 if (IS_ERR_VALUE(vmaddr)) {
652 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
658 * In the case that fixup_user_fault unlocked the mmap_sem during
659 * faultin redo __gmap_translate to not race with a map/unmap_segment.
664 rc = __gmap_link(gmap, gaddr, vmaddr);
666 up_read(&gmap->mm->mmap_sem);
669 EXPORT_SYMBOL_GPL(gmap_fault);
672 * this function is assumed to be called with mmap_sem held
674 void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
676 unsigned long vmaddr;
680 /* Find the vm address for the guest address */
681 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
684 vmaddr |= gaddr & ~PMD_MASK;
685 /* Get pointer to the page table entry */
686 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
688 ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
689 pte_unmap_unlock(ptep, ptl);
693 EXPORT_SYMBOL_GPL(__gmap_zap);
695 void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
697 unsigned long gaddr, vmaddr, size;
698 struct vm_area_struct *vma;
700 down_read(&gmap->mm->mmap_sem);
701 for (gaddr = from; gaddr < to;
702 gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
703 /* Find the vm address for the guest address */
704 vmaddr = (unsigned long)
705 radix_tree_lookup(&gmap->guest_to_host,
709 vmaddr |= gaddr & ~PMD_MASK;
710 /* Find vma in the parent mm */
711 vma = find_vma(gmap->mm, vmaddr);
715 * We do not discard pages that are backed by
716 * hugetlbfs, so we don't have to refault them.
718 if (is_vm_hugetlb_page(vma))
720 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
721 zap_page_range(vma, vmaddr, size);
723 up_read(&gmap->mm->mmap_sem);
725 EXPORT_SYMBOL_GPL(gmap_discard);
727 static LIST_HEAD(gmap_notifier_list);
728 static DEFINE_SPINLOCK(gmap_notifier_lock);
731 * gmap_register_pte_notifier - register a pte invalidation callback
732 * @nb: pointer to the gmap notifier block
734 void gmap_register_pte_notifier(struct gmap_notifier *nb)
736 spin_lock(&gmap_notifier_lock);
737 list_add_rcu(&nb->list, &gmap_notifier_list);
738 spin_unlock(&gmap_notifier_lock);
740 EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
743 * gmap_unregister_pte_notifier - remove a pte invalidation callback
744 * @nb: pointer to the gmap notifier block
746 void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
748 spin_lock(&gmap_notifier_lock);
749 list_del_rcu(&nb->list);
750 spin_unlock(&gmap_notifier_lock);
753 EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
756 * gmap_call_notifier - call all registered invalidation callbacks
757 * @gmap: pointer to guest mapping meta data structure
758 * @start: start virtual address in the guest address space
759 * @end: end virtual address in the guest address space
761 static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
764 struct gmap_notifier *nb;
766 list_for_each_entry(nb, &gmap_notifier_list, list)
767 nb->notifier_call(gmap, start, end);
771 * gmap_table_walk - walk the gmap page tables
772 * @gmap: pointer to guest mapping meta data structure
773 * @gaddr: virtual address in the guest address space
774 * @level: page table level to stop at
776 * Returns a table entry pointer for the given guest address and @level
777 * @level=0 : returns a pointer to a page table table entry (or NULL)
778 * @level=1 : returns a pointer to a segment table entry (or NULL)
779 * @level=2 : returns a pointer to a region-3 table entry (or NULL)
780 * @level=3 : returns a pointer to a region-2 table entry (or NULL)
781 * @level=4 : returns a pointer to a region-1 table entry (or NULL)
783 * Returns NULL if the gmap page tables could not be walked to the
786 * Note: Can also be called for shadow gmaps.
788 static inline unsigned long *gmap_table_walk(struct gmap *gmap,
789 unsigned long gaddr, int level)
791 const int asce_type = gmap->asce & _ASCE_TYPE_MASK;
792 unsigned long *table;
794 if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
796 if (gmap_is_shadow(gmap) && gmap->removed)
799 if (asce_type != _ASCE_TYPE_REGION1 &&
800 gaddr & (-1UL << (31 + (asce_type >> 2) * 11)))
804 switch (gmap->asce & _ASCE_TYPE_MASK) {
805 case _ASCE_TYPE_REGION1:
806 table += (gaddr & _REGION1_INDEX) >> _REGION1_SHIFT;
809 if (*table & _REGION_ENTRY_INVALID)
811 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
813 case _ASCE_TYPE_REGION2:
814 table += (gaddr & _REGION2_INDEX) >> _REGION2_SHIFT;
817 if (*table & _REGION_ENTRY_INVALID)
819 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
821 case _ASCE_TYPE_REGION3:
822 table += (gaddr & _REGION3_INDEX) >> _REGION3_SHIFT;
825 if (*table & _REGION_ENTRY_INVALID)
827 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
829 case _ASCE_TYPE_SEGMENT:
830 table += (gaddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
833 if (*table & _REGION_ENTRY_INVALID)
835 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
836 table += (gaddr & _PAGE_INDEX) >> _PAGE_SHIFT;
842 * gmap_pte_op_walk - walk the gmap page table, get the page table lock
843 * and return the pte pointer
844 * @gmap: pointer to guest mapping meta data structure
845 * @gaddr: virtual address in the guest address space
846 * @ptl: pointer to the spinlock pointer
848 * Returns a pointer to the locked pte for a guest address, or NULL
850 static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
853 unsigned long *table;
855 BUG_ON(gmap_is_shadow(gmap));
856 /* Walk the gmap page table, lock and get pte pointer */
857 table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
858 if (!table || *table & _SEGMENT_ENTRY_INVALID)
860 return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
864 * gmap_pte_op_fixup - force a page in and connect the gmap page table
865 * @gmap: pointer to guest mapping meta data structure
866 * @gaddr: virtual address in the guest address space
867 * @vmaddr: address in the host process address space
868 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
870 * Returns 0 if the caller can retry __gmap_translate (might fail again),
871 * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
872 * up or connecting the gmap page table.
874 static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
875 unsigned long vmaddr, int prot)
877 struct mm_struct *mm = gmap->mm;
878 unsigned int fault_flags;
879 bool unlocked = false;
881 BUG_ON(gmap_is_shadow(gmap));
882 fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
883 if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked))
886 /* lost mmap_sem, caller has to retry __gmap_translate */
888 /* Connect the page tables */
889 return __gmap_link(gmap, gaddr, vmaddr);
893 * gmap_pte_op_end - release the page table lock
894 * @ptl: pointer to the spinlock pointer
896 static void gmap_pte_op_end(spinlock_t *ptl)
903 * gmap_pmd_op_walk - walk the gmap tables, get the guest table lock
904 * and return the pmd pointer
905 * @gmap: pointer to guest mapping meta data structure
906 * @gaddr: virtual address in the guest address space
908 * Returns a pointer to the pmd for a guest address, or NULL
910 static inline pmd_t *gmap_pmd_op_walk(struct gmap *gmap, unsigned long gaddr)
914 BUG_ON(gmap_is_shadow(gmap));
915 pmdp = (pmd_t *) gmap_table_walk(gmap, gaddr, 1);
919 /* without huge pages, there is no need to take the table lock */
920 if (!gmap->mm->context.allow_gmap_hpage_1m)
921 return pmd_none(*pmdp) ? NULL : pmdp;
923 spin_lock(&gmap->guest_table_lock);
924 if (pmd_none(*pmdp)) {
925 spin_unlock(&gmap->guest_table_lock);
929 /* 4k page table entries are locked via the pte (pte_alloc_map_lock). */
930 if (!pmd_large(*pmdp))
931 spin_unlock(&gmap->guest_table_lock);
936 * gmap_pmd_op_end - release the guest_table_lock if needed
937 * @gmap: pointer to the guest mapping meta data structure
938 * @pmdp: pointer to the pmd
940 static inline void gmap_pmd_op_end(struct gmap *gmap, pmd_t *pmdp)
942 if (pmd_large(*pmdp))
943 spin_unlock(&gmap->guest_table_lock);
947 * gmap_protect_pmd - remove access rights to memory and set pmd notification bits
948 * @pmdp: pointer to the pmd to be protected
949 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
950 * @bits: notification bits to set
953 * 0 if successfully protected
954 * -EAGAIN if a fixup is needed
955 * -EINVAL if unsupported notifier bits have been specified
957 * Expected to be called with sg->mm->mmap_sem in read and
958 * guest_table_lock held.
960 static int gmap_protect_pmd(struct gmap *gmap, unsigned long gaddr,
961 pmd_t *pmdp, int prot, unsigned long bits)
963 int pmd_i = pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID;
964 int pmd_p = pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT;
968 if ((pmd_i && (prot != PROT_NONE)) || (pmd_p && (prot == PROT_WRITE)))
971 if (prot == PROT_NONE && !pmd_i) {
972 pmd_val(new) |= _SEGMENT_ENTRY_INVALID;
973 gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
976 if (prot == PROT_READ && !pmd_p) {
977 pmd_val(new) &= ~_SEGMENT_ENTRY_INVALID;
978 pmd_val(new) |= _SEGMENT_ENTRY_PROTECT;
979 gmap_pmdp_xchg(gmap, pmdp, new, gaddr);
982 if (bits & GMAP_NOTIFY_MPROT)
983 pmd_val(*pmdp) |= _SEGMENT_ENTRY_GMAP_IN;
985 /* Shadow GMAP protection needs split PMDs */
986 if (bits & GMAP_NOTIFY_SHADOW)
993 * gmap_protect_pte - remove access rights to memory and set pgste bits
994 * @gmap: pointer to guest mapping meta data structure
995 * @gaddr: virtual address in the guest address space
996 * @pmdp: pointer to the pmd associated with the pte
997 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
998 * @bits: notification bits to set
1000 * Returns 0 if successfully protected, -ENOMEM if out of memory and
1001 * -EAGAIN if a fixup is needed.
1003 * Expected to be called with sg->mm->mmap_sem in read
1005 static int gmap_protect_pte(struct gmap *gmap, unsigned long gaddr,
1006 pmd_t *pmdp, int prot, unsigned long bits)
1010 spinlock_t *ptl = NULL;
1011 unsigned long pbits = 0;
1013 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1016 ptep = pte_alloc_map_lock(gmap->mm, pmdp, gaddr, &ptl);
1020 pbits |= (bits & GMAP_NOTIFY_MPROT) ? PGSTE_IN_BIT : 0;
1021 pbits |= (bits & GMAP_NOTIFY_SHADOW) ? PGSTE_VSIE_BIT : 0;
1022 /* Protect and unlock. */
1023 rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, pbits);
1024 gmap_pte_op_end(ptl);
1029 * gmap_protect_range - remove access rights to memory and set pgste bits
1030 * @gmap: pointer to guest mapping meta data structure
1031 * @gaddr: virtual address in the guest address space
1032 * @len: size of area
1033 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
1034 * @bits: pgste notification bits to set
1036 * Returns 0 if successfully protected, -ENOMEM if out of memory and
1037 * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
1039 * Called with sg->mm->mmap_sem in read.
1041 static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
1042 unsigned long len, int prot, unsigned long bits)
1044 unsigned long vmaddr, dist;
1048 BUG_ON(gmap_is_shadow(gmap));
1051 pmdp = gmap_pmd_op_walk(gmap, gaddr);
1053 if (!pmd_large(*pmdp)) {
1054 rc = gmap_protect_pte(gmap, gaddr, pmdp, prot,
1061 rc = gmap_protect_pmd(gmap, gaddr, pmdp, prot,
1064 dist = HPAGE_SIZE - (gaddr & ~HPAGE_MASK);
1065 len = len < dist ? 0 : len - dist;
1066 gaddr = (gaddr & HPAGE_MASK) + HPAGE_SIZE;
1069 gmap_pmd_op_end(gmap, pmdp);
1075 /* -EAGAIN, fixup of userspace mm and gmap */
1076 vmaddr = __gmap_translate(gmap, gaddr);
1077 if (IS_ERR_VALUE(vmaddr))
1079 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
1088 * gmap_mprotect_notify - change access rights for a range of ptes and
1089 * call the notifier if any pte changes again
1090 * @gmap: pointer to guest mapping meta data structure
1091 * @gaddr: virtual address in the guest address space
1092 * @len: size of area
1093 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
1095 * Returns 0 if for each page in the given range a gmap mapping exists,
1096 * the new access rights could be set and the notifier could be armed.
1097 * If the gmap mapping is missing for one or more pages -EFAULT is
1098 * returned. If no memory could be allocated -ENOMEM is returned.
1099 * This function establishes missing page table entries.
1101 int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
1102 unsigned long len, int prot)
1106 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
1108 if (!MACHINE_HAS_ESOP && prot == PROT_READ)
1110 down_read(&gmap->mm->mmap_sem);
1111 rc = gmap_protect_range(gmap, gaddr, len, prot, GMAP_NOTIFY_MPROT);
1112 up_read(&gmap->mm->mmap_sem);
1115 EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
1118 * gmap_read_table - get an unsigned long value from a guest page table using
1119 * absolute addressing, without marking the page referenced.
1120 * @gmap: pointer to guest mapping meta data structure
1121 * @gaddr: virtual address in the guest address space
1122 * @val: pointer to the unsigned long value to return
1124 * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
1125 * if reading using the virtual address failed. -EINVAL if called on a gmap
1128 * Called with gmap->mm->mmap_sem in read.
1130 int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
1132 unsigned long address, vmaddr;
1137 if (gmap_is_shadow(gmap))
1142 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
1145 if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
1146 address = pte_val(pte) & PAGE_MASK;
1147 address += gaddr & ~PAGE_MASK;
1148 *val = *(unsigned long *) address;
1149 pte_val(*ptep) |= _PAGE_YOUNG;
1150 /* Do *NOT* clear the _PAGE_INVALID bit! */
1153 gmap_pte_op_end(ptl);
1157 vmaddr = __gmap_translate(gmap, gaddr);
1158 if (IS_ERR_VALUE(vmaddr)) {
1162 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, PROT_READ);
1168 EXPORT_SYMBOL_GPL(gmap_read_table);
1171 * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
1172 * @sg: pointer to the shadow guest address space structure
1173 * @vmaddr: vm address associated with the rmap
1174 * @rmap: pointer to the rmap structure
1176 * Called with the sg->guest_table_lock
1178 static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
1179 struct gmap_rmap *rmap)
1183 BUG_ON(!gmap_is_shadow(sg));
1184 slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
1186 rmap->next = radix_tree_deref_slot_protected(slot,
1187 &sg->guest_table_lock);
1188 radix_tree_replace_slot(&sg->host_to_rmap, slot, rmap);
1191 radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
1197 * gmap_protect_rmap - restrict access rights to memory (RO) and create an rmap
1198 * @sg: pointer to the shadow guest address space structure
1199 * @raddr: rmap address in the shadow gmap
1200 * @paddr: address in the parent guest address space
1201 * @len: length of the memory area to protect
1203 * Returns 0 if successfully protected and the rmap was created, -ENOMEM
1204 * if out of memory and -EFAULT if paddr is invalid.
1206 static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
1207 unsigned long paddr, unsigned long len)
1209 struct gmap *parent;
1210 struct gmap_rmap *rmap;
1211 unsigned long vmaddr;
1216 BUG_ON(!gmap_is_shadow(sg));
1217 parent = sg->parent;
1219 vmaddr = __gmap_translate(parent, paddr);
1220 if (IS_ERR_VALUE(vmaddr))
1222 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1225 rmap->raddr = raddr;
1226 rc = radix_tree_preload(GFP_KERNEL);
1232 ptep = gmap_pte_op_walk(parent, paddr, &ptl);
1234 spin_lock(&sg->guest_table_lock);
1235 rc = ptep_force_prot(parent->mm, paddr, ptep, PROT_READ,
1238 gmap_insert_rmap(sg, vmaddr, rmap);
1239 spin_unlock(&sg->guest_table_lock);
1240 gmap_pte_op_end(ptl);
1242 radix_tree_preload_end();
1245 rc = gmap_pte_op_fixup(parent, paddr, vmaddr, PROT_READ);
1256 #define _SHADOW_RMAP_MASK 0x7
1257 #define _SHADOW_RMAP_REGION1 0x5
1258 #define _SHADOW_RMAP_REGION2 0x4
1259 #define _SHADOW_RMAP_REGION3 0x3
1260 #define _SHADOW_RMAP_SEGMENT 0x2
1261 #define _SHADOW_RMAP_PGTABLE 0x1
1264 * gmap_idte_one - invalidate a single region or segment table entry
1265 * @asce: region or segment table *origin* + table-type bits
1266 * @vaddr: virtual address to identify the table entry to flush
1268 * The invalid bit of a single region or segment table entry is set
1269 * and the associated TLB entries depending on the entry are flushed.
1270 * The table-type of the @asce identifies the portion of the @vaddr
1271 * that is used as the invalidation index.
1273 static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
1276 " .insn rrf,0xb98e0000,%0,%1,0,0"
1277 : : "a" (asce), "a" (vaddr) : "cc", "memory");
1281 * gmap_unshadow_page - remove a page from a shadow page table
1282 * @sg: pointer to the shadow guest address space structure
1283 * @raddr: rmap address in the shadow guest address space
1285 * Called with the sg->guest_table_lock
1287 static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
1289 unsigned long *table;
1291 BUG_ON(!gmap_is_shadow(sg));
1292 table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
1293 if (!table || *table & _PAGE_INVALID)
1295 gmap_call_notifier(sg, raddr, raddr + _PAGE_SIZE - 1);
1296 ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
1300 * __gmap_unshadow_pgt - remove all entries from a shadow page table
1301 * @sg: pointer to the shadow guest address space structure
1302 * @raddr: rmap address in the shadow guest address space
1303 * @pgt: pointer to the start of a shadow page table
1305 * Called with the sg->guest_table_lock
1307 static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
1312 BUG_ON(!gmap_is_shadow(sg));
1313 for (i = 0; i < _PAGE_ENTRIES; i++, raddr += _PAGE_SIZE)
1314 pgt[i] = _PAGE_INVALID;
1318 * gmap_unshadow_pgt - remove a shadow page table from a segment entry
1319 * @sg: pointer to the shadow guest address space structure
1320 * @raddr: address in the shadow guest address space
1322 * Called with the sg->guest_table_lock
1324 static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
1326 unsigned long sto, *ste, *pgt;
1329 BUG_ON(!gmap_is_shadow(sg));
1330 ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
1331 if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
1333 gmap_call_notifier(sg, raddr, raddr + _SEGMENT_SIZE - 1);
1334 sto = (unsigned long) (ste - ((raddr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT));
1335 gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
1336 pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
1337 *ste = _SEGMENT_ENTRY_EMPTY;
1338 __gmap_unshadow_pgt(sg, raddr, pgt);
1339 /* Free page table */
1340 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1341 list_del(&page->lru);
1342 page_table_free_pgste(page);
1346 * __gmap_unshadow_sgt - remove all entries from a shadow segment table
1347 * @sg: pointer to the shadow guest address space structure
1348 * @raddr: rmap address in the shadow guest address space
1349 * @sgt: pointer to the start of a shadow segment table
1351 * Called with the sg->guest_table_lock
1353 static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
1360 BUG_ON(!gmap_is_shadow(sg));
1361 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _SEGMENT_SIZE) {
1362 if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
1364 pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
1365 sgt[i] = _SEGMENT_ENTRY_EMPTY;
1366 __gmap_unshadow_pgt(sg, raddr, pgt);
1367 /* Free page table */
1368 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1369 list_del(&page->lru);
1370 page_table_free_pgste(page);
1375 * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
1376 * @sg: pointer to the shadow guest address space structure
1377 * @raddr: rmap address in the shadow guest address space
1379 * Called with the shadow->guest_table_lock
1381 static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
1383 unsigned long r3o, *r3e, *sgt;
1386 BUG_ON(!gmap_is_shadow(sg));
1387 r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
1388 if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
1390 gmap_call_notifier(sg, raddr, raddr + _REGION3_SIZE - 1);
1391 r3o = (unsigned long) (r3e - ((raddr & _REGION3_INDEX) >> _REGION3_SHIFT));
1392 gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
1393 sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
1394 *r3e = _REGION3_ENTRY_EMPTY;
1395 __gmap_unshadow_sgt(sg, raddr, sgt);
1396 /* Free segment table */
1397 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1398 list_del(&page->lru);
1399 __free_pages(page, CRST_ALLOC_ORDER);
1403 * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
1404 * @sg: pointer to the shadow guest address space structure
1405 * @raddr: address in the shadow guest address space
1406 * @r3t: pointer to the start of a shadow region-3 table
1408 * Called with the sg->guest_table_lock
1410 static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
1417 BUG_ON(!gmap_is_shadow(sg));
1418 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION3_SIZE) {
1419 if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
1421 sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
1422 r3t[i] = _REGION3_ENTRY_EMPTY;
1423 __gmap_unshadow_sgt(sg, raddr, sgt);
1424 /* Free segment table */
1425 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1426 list_del(&page->lru);
1427 __free_pages(page, CRST_ALLOC_ORDER);
1432 * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
1433 * @sg: pointer to the shadow guest address space structure
1434 * @raddr: rmap address in the shadow guest address space
1436 * Called with the sg->guest_table_lock
1438 static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
1440 unsigned long r2o, *r2e, *r3t;
1443 BUG_ON(!gmap_is_shadow(sg));
1444 r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
1445 if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
1447 gmap_call_notifier(sg, raddr, raddr + _REGION2_SIZE - 1);
1448 r2o = (unsigned long) (r2e - ((raddr & _REGION2_INDEX) >> _REGION2_SHIFT));
1449 gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
1450 r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
1451 *r2e = _REGION2_ENTRY_EMPTY;
1452 __gmap_unshadow_r3t(sg, raddr, r3t);
1453 /* Free region 3 table */
1454 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1455 list_del(&page->lru);
1456 __free_pages(page, CRST_ALLOC_ORDER);
1460 * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
1461 * @sg: pointer to the shadow guest address space structure
1462 * @raddr: rmap address in the shadow guest address space
1463 * @r2t: pointer to the start of a shadow region-2 table
1465 * Called with the sg->guest_table_lock
1467 static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
1474 BUG_ON(!gmap_is_shadow(sg));
1475 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION2_SIZE) {
1476 if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
1478 r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
1479 r2t[i] = _REGION2_ENTRY_EMPTY;
1480 __gmap_unshadow_r3t(sg, raddr, r3t);
1481 /* Free region 3 table */
1482 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1483 list_del(&page->lru);
1484 __free_pages(page, CRST_ALLOC_ORDER);
1489 * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
1490 * @sg: pointer to the shadow guest address space structure
1491 * @raddr: rmap address in the shadow guest address space
1493 * Called with the sg->guest_table_lock
1495 static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
1497 unsigned long r1o, *r1e, *r2t;
1500 BUG_ON(!gmap_is_shadow(sg));
1501 r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
1502 if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
1504 gmap_call_notifier(sg, raddr, raddr + _REGION1_SIZE - 1);
1505 r1o = (unsigned long) (r1e - ((raddr & _REGION1_INDEX) >> _REGION1_SHIFT));
1506 gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
1507 r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
1508 *r1e = _REGION1_ENTRY_EMPTY;
1509 __gmap_unshadow_r2t(sg, raddr, r2t);
1510 /* Free region 2 table */
1511 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1512 list_del(&page->lru);
1513 __free_pages(page, CRST_ALLOC_ORDER);
1517 * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
1518 * @sg: pointer to the shadow guest address space structure
1519 * @raddr: rmap address in the shadow guest address space
1520 * @r1t: pointer to the start of a shadow region-1 table
1522 * Called with the shadow->guest_table_lock
1524 static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
1527 unsigned long asce, *r2t;
1531 BUG_ON(!gmap_is_shadow(sg));
1532 asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
1533 for (i = 0; i < _CRST_ENTRIES; i++, raddr += _REGION1_SIZE) {
1534 if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
1536 r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
1537 __gmap_unshadow_r2t(sg, raddr, r2t);
1538 /* Clear entry and flush translation r1t -> r2t */
1539 gmap_idte_one(asce, raddr);
1540 r1t[i] = _REGION1_ENTRY_EMPTY;
1541 /* Free region 2 table */
1542 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1543 list_del(&page->lru);
1544 __free_pages(page, CRST_ALLOC_ORDER);
1549 * gmap_unshadow - remove a shadow page table completely
1550 * @sg: pointer to the shadow guest address space structure
1552 * Called with sg->guest_table_lock
1554 static void gmap_unshadow(struct gmap *sg)
1556 unsigned long *table;
1558 BUG_ON(!gmap_is_shadow(sg));
1562 gmap_call_notifier(sg, 0, -1UL);
1564 table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
1565 switch (sg->asce & _ASCE_TYPE_MASK) {
1566 case _ASCE_TYPE_REGION1:
1567 __gmap_unshadow_r1t(sg, 0, table);
1569 case _ASCE_TYPE_REGION2:
1570 __gmap_unshadow_r2t(sg, 0, table);
1572 case _ASCE_TYPE_REGION3:
1573 __gmap_unshadow_r3t(sg, 0, table);
1575 case _ASCE_TYPE_SEGMENT:
1576 __gmap_unshadow_sgt(sg, 0, table);
1582 * gmap_find_shadow - find a specific asce in the list of shadow tables
1583 * @parent: pointer to the parent gmap
1584 * @asce: ASCE for which the shadow table is created
1585 * @edat_level: edat level to be used for the shadow translation
1587 * Returns the pointer to a gmap if a shadow table with the given asce is
1588 * already available, ERR_PTR(-EAGAIN) if another one is just being created,
1591 static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
1596 list_for_each_entry(sg, &parent->children, list) {
1597 if (sg->orig_asce != asce || sg->edat_level != edat_level ||
1600 if (!sg->initialized)
1601 return ERR_PTR(-EAGAIN);
1602 refcount_inc(&sg->ref_count);
1609 * gmap_shadow_valid - check if a shadow guest address space matches the
1610 * given properties and is still valid
1611 * @sg: pointer to the shadow guest address space structure
1612 * @asce: ASCE for which the shadow table is requested
1613 * @edat_level: edat level to be used for the shadow translation
1615 * Returns 1 if the gmap shadow is still valid and matches the given
1616 * properties, the caller can continue using it. Returns 0 otherwise, the
1617 * caller has to request a new shadow gmap in this case.
1620 int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
1624 return sg->orig_asce == asce && sg->edat_level == edat_level;
1626 EXPORT_SYMBOL_GPL(gmap_shadow_valid);
1629 * gmap_shadow - create/find a shadow guest address space
1630 * @parent: pointer to the parent gmap
1631 * @asce: ASCE for which the shadow table is created
1632 * @edat_level: edat level to be used for the shadow translation
1634 * The pages of the top level page table referred by the asce parameter
1635 * will be set to read-only and marked in the PGSTEs of the kvm process.
1636 * The shadow table will be removed automatically on any change to the
1637 * PTE mapping for the source table.
1639 * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
1640 * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
1641 * parent gmap table could not be protected.
1643 struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
1646 struct gmap *sg, *new;
1647 unsigned long limit;
1650 BUG_ON(parent->mm->context.allow_gmap_hpage_1m);
1651 BUG_ON(gmap_is_shadow(parent));
1652 spin_lock(&parent->shadow_lock);
1653 sg = gmap_find_shadow(parent, asce, edat_level);
1654 spin_unlock(&parent->shadow_lock);
1657 /* Create a new shadow gmap */
1658 limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
1659 if (asce & _ASCE_REAL_SPACE)
1661 new = gmap_alloc(limit);
1663 return ERR_PTR(-ENOMEM);
1664 new->mm = parent->mm;
1665 new->parent = gmap_get(parent);
1666 new->orig_asce = asce;
1667 new->edat_level = edat_level;
1668 new->initialized = false;
1669 spin_lock(&parent->shadow_lock);
1670 /* Recheck if another CPU created the same shadow */
1671 sg = gmap_find_shadow(parent, asce, edat_level);
1673 spin_unlock(&parent->shadow_lock);
1677 if (asce & _ASCE_REAL_SPACE) {
1678 /* only allow one real-space gmap shadow */
1679 list_for_each_entry(sg, &parent->children, list) {
1680 if (sg->orig_asce & _ASCE_REAL_SPACE) {
1681 spin_lock(&sg->guest_table_lock);
1683 spin_unlock(&sg->guest_table_lock);
1684 list_del(&sg->list);
1690 refcount_set(&new->ref_count, 2);
1691 list_add(&new->list, &parent->children);
1692 if (asce & _ASCE_REAL_SPACE) {
1693 /* nothing to protect, return right away */
1694 new->initialized = true;
1695 spin_unlock(&parent->shadow_lock);
1698 spin_unlock(&parent->shadow_lock);
1699 /* protect after insertion, so it will get properly invalidated */
1700 down_read(&parent->mm->mmap_sem);
1701 rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
1702 ((asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE,
1703 PROT_READ, GMAP_NOTIFY_SHADOW);
1704 up_read(&parent->mm->mmap_sem);
1705 spin_lock(&parent->shadow_lock);
1706 new->initialized = true;
1708 list_del(&new->list);
1712 spin_unlock(&parent->shadow_lock);
1715 EXPORT_SYMBOL_GPL(gmap_shadow);
1718 * gmap_shadow_r2t - create an empty shadow region 2 table
1719 * @sg: pointer to the shadow guest address space structure
1720 * @saddr: faulting address in the shadow gmap
1721 * @r2t: parent gmap address of the region 2 table to get shadowed
1722 * @fake: r2t references contiguous guest memory block, not a r2t
1724 * The r2t parameter specifies the address of the source table. The
1725 * four pages of the source table are made read-only in the parent gmap
1726 * address space. A write to the source table area @r2t will automatically
1727 * remove the shadow r2 table and all of its decendents.
1729 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1730 * shadow table structure is incomplete, -ENOMEM if out of memory and
1731 * -EFAULT if an address in the parent gmap could not be resolved.
1733 * Called with sg->mm->mmap_sem in read.
1735 int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
1738 unsigned long raddr, origin, offset, len;
1739 unsigned long *s_r2t, *table;
1743 BUG_ON(!gmap_is_shadow(sg));
1744 /* Allocate a shadow region second table */
1745 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
1748 page->index = r2t & _REGION_ENTRY_ORIGIN;
1750 page->index |= GMAP_SHADOW_FAKE_TABLE;
1751 s_r2t = (unsigned long *) page_to_phys(page);
1752 /* Install shadow region second table */
1753 spin_lock(&sg->guest_table_lock);
1754 table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
1756 rc = -EAGAIN; /* Race with unshadow */
1759 if (!(*table & _REGION_ENTRY_INVALID)) {
1760 rc = 0; /* Already established */
1762 } else if (*table & _REGION_ENTRY_ORIGIN) {
1763 rc = -EAGAIN; /* Race with shadow */
1766 crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
1767 /* mark as invalid as long as the parent table is not protected */
1768 *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
1769 _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
1770 if (sg->edat_level >= 1)
1771 *table |= (r2t & _REGION_ENTRY_PROTECT);
1772 list_add(&page->lru, &sg->crst_list);
1774 /* nothing to protect for fake tables */
1775 *table &= ~_REGION_ENTRY_INVALID;
1776 spin_unlock(&sg->guest_table_lock);
1779 spin_unlock(&sg->guest_table_lock);
1780 /* Make r2t read-only in parent gmap page table */
1781 raddr = (saddr & _REGION1_MASK) | _SHADOW_RMAP_REGION1;
1782 origin = r2t & _REGION_ENTRY_ORIGIN;
1783 offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1784 len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1785 rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
1786 spin_lock(&sg->guest_table_lock);
1788 table = gmap_table_walk(sg, saddr, 4);
1789 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1790 (unsigned long) s_r2t)
1791 rc = -EAGAIN; /* Race with unshadow */
1793 *table &= ~_REGION_ENTRY_INVALID;
1795 gmap_unshadow_r2t(sg, raddr);
1797 spin_unlock(&sg->guest_table_lock);
1800 spin_unlock(&sg->guest_table_lock);
1801 __free_pages(page, CRST_ALLOC_ORDER);
1804 EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
1807 * gmap_shadow_r3t - create a shadow region 3 table
1808 * @sg: pointer to the shadow guest address space structure
1809 * @saddr: faulting address in the shadow gmap
1810 * @r3t: parent gmap address of the region 3 table to get shadowed
1811 * @fake: r3t references contiguous guest memory block, not a r3t
1813 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1814 * shadow table structure is incomplete, -ENOMEM if out of memory and
1815 * -EFAULT if an address in the parent gmap could not be resolved.
1817 * Called with sg->mm->mmap_sem in read.
1819 int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
1822 unsigned long raddr, origin, offset, len;
1823 unsigned long *s_r3t, *table;
1827 BUG_ON(!gmap_is_shadow(sg));
1828 /* Allocate a shadow region second table */
1829 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
1832 page->index = r3t & _REGION_ENTRY_ORIGIN;
1834 page->index |= GMAP_SHADOW_FAKE_TABLE;
1835 s_r3t = (unsigned long *) page_to_phys(page);
1836 /* Install shadow region second table */
1837 spin_lock(&sg->guest_table_lock);
1838 table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
1840 rc = -EAGAIN; /* Race with unshadow */
1843 if (!(*table & _REGION_ENTRY_INVALID)) {
1844 rc = 0; /* Already established */
1846 } else if (*table & _REGION_ENTRY_ORIGIN) {
1847 rc = -EAGAIN; /* Race with shadow */
1850 crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
1851 /* mark as invalid as long as the parent table is not protected */
1852 *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
1853 _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
1854 if (sg->edat_level >= 1)
1855 *table |= (r3t & _REGION_ENTRY_PROTECT);
1856 list_add(&page->lru, &sg->crst_list);
1858 /* nothing to protect for fake tables */
1859 *table &= ~_REGION_ENTRY_INVALID;
1860 spin_unlock(&sg->guest_table_lock);
1863 spin_unlock(&sg->guest_table_lock);
1864 /* Make r3t read-only in parent gmap page table */
1865 raddr = (saddr & _REGION2_MASK) | _SHADOW_RMAP_REGION2;
1866 origin = r3t & _REGION_ENTRY_ORIGIN;
1867 offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1868 len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1869 rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
1870 spin_lock(&sg->guest_table_lock);
1872 table = gmap_table_walk(sg, saddr, 3);
1873 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1874 (unsigned long) s_r3t)
1875 rc = -EAGAIN; /* Race with unshadow */
1877 *table &= ~_REGION_ENTRY_INVALID;
1879 gmap_unshadow_r3t(sg, raddr);
1881 spin_unlock(&sg->guest_table_lock);
1884 spin_unlock(&sg->guest_table_lock);
1885 __free_pages(page, CRST_ALLOC_ORDER);
1888 EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
1891 * gmap_shadow_sgt - create a shadow segment table
1892 * @sg: pointer to the shadow guest address space structure
1893 * @saddr: faulting address in the shadow gmap
1894 * @sgt: parent gmap address of the segment table to get shadowed
1895 * @fake: sgt references contiguous guest memory block, not a sgt
1897 * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
1898 * shadow table structure is incomplete, -ENOMEM if out of memory and
1899 * -EFAULT if an address in the parent gmap could not be resolved.
1901 * Called with sg->mm->mmap_sem in read.
1903 int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
1906 unsigned long raddr, origin, offset, len;
1907 unsigned long *s_sgt, *table;
1911 BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
1912 /* Allocate a shadow segment table */
1913 page = alloc_pages(GFP_KERNEL, CRST_ALLOC_ORDER);
1916 page->index = sgt & _REGION_ENTRY_ORIGIN;
1918 page->index |= GMAP_SHADOW_FAKE_TABLE;
1919 s_sgt = (unsigned long *) page_to_phys(page);
1920 /* Install shadow region second table */
1921 spin_lock(&sg->guest_table_lock);
1922 table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
1924 rc = -EAGAIN; /* Race with unshadow */
1927 if (!(*table & _REGION_ENTRY_INVALID)) {
1928 rc = 0; /* Already established */
1930 } else if (*table & _REGION_ENTRY_ORIGIN) {
1931 rc = -EAGAIN; /* Race with shadow */
1934 crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
1935 /* mark as invalid as long as the parent table is not protected */
1936 *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
1937 _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
1938 if (sg->edat_level >= 1)
1939 *table |= sgt & _REGION_ENTRY_PROTECT;
1940 list_add(&page->lru, &sg->crst_list);
1942 /* nothing to protect for fake tables */
1943 *table &= ~_REGION_ENTRY_INVALID;
1944 spin_unlock(&sg->guest_table_lock);
1947 spin_unlock(&sg->guest_table_lock);
1948 /* Make sgt read-only in parent gmap page table */
1949 raddr = (saddr & _REGION3_MASK) | _SHADOW_RMAP_REGION3;
1950 origin = sgt & _REGION_ENTRY_ORIGIN;
1951 offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * PAGE_SIZE;
1952 len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * PAGE_SIZE - offset;
1953 rc = gmap_protect_rmap(sg, raddr, origin + offset, len);
1954 spin_lock(&sg->guest_table_lock);
1956 table = gmap_table_walk(sg, saddr, 2);
1957 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1958 (unsigned long) s_sgt)
1959 rc = -EAGAIN; /* Race with unshadow */
1961 *table &= ~_REGION_ENTRY_INVALID;
1963 gmap_unshadow_sgt(sg, raddr);
1965 spin_unlock(&sg->guest_table_lock);
1968 spin_unlock(&sg->guest_table_lock);
1969 __free_pages(page, CRST_ALLOC_ORDER);
1972 EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
1975 * gmap_shadow_lookup_pgtable - find a shadow page table
1976 * @sg: pointer to the shadow guest address space structure
1977 * @saddr: the address in the shadow aguest address space
1978 * @pgt: parent gmap address of the page table to get shadowed
1979 * @dat_protection: if the pgtable is marked as protected by dat
1980 * @fake: pgt references contiguous guest memory block, not a pgtable
1982 * Returns 0 if the shadow page table was found and -EAGAIN if the page
1983 * table was not found.
1985 * Called with sg->mm->mmap_sem in read.
1987 int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
1988 unsigned long *pgt, int *dat_protection,
1991 unsigned long *table;
1995 BUG_ON(!gmap_is_shadow(sg));
1996 spin_lock(&sg->guest_table_lock);
1997 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1998 if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
1999 /* Shadow page tables are full pages (pte+pgste) */
2000 page = pfn_to_page(*table >> PAGE_SHIFT);
2001 *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
2002 *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
2003 *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
2008 spin_unlock(&sg->guest_table_lock);
2012 EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
2015 * gmap_shadow_pgt - instantiate a shadow page table
2016 * @sg: pointer to the shadow guest address space structure
2017 * @saddr: faulting address in the shadow gmap
2018 * @pgt: parent gmap address of the page table to get shadowed
2019 * @fake: pgt references contiguous guest memory block, not a pgtable
2021 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
2022 * shadow table structure is incomplete, -ENOMEM if out of memory,
2023 * -EFAULT if an address in the parent gmap could not be resolved and
2025 * Called with gmap->mm->mmap_sem in read
2027 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
2030 unsigned long raddr, origin;
2031 unsigned long *s_pgt, *table;
2035 BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
2036 /* Allocate a shadow page table */
2037 page = page_table_alloc_pgste(sg->mm);
2040 page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
2042 page->index |= GMAP_SHADOW_FAKE_TABLE;
2043 s_pgt = (unsigned long *) page_to_phys(page);
2044 /* Install shadow page table */
2045 spin_lock(&sg->guest_table_lock);
2046 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
2048 rc = -EAGAIN; /* Race with unshadow */
2051 if (!(*table & _SEGMENT_ENTRY_INVALID)) {
2052 rc = 0; /* Already established */
2054 } else if (*table & _SEGMENT_ENTRY_ORIGIN) {
2055 rc = -EAGAIN; /* Race with shadow */
2058 /* mark as invalid as long as the parent table is not protected */
2059 *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
2060 (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
2061 list_add(&page->lru, &sg->pt_list);
2063 /* nothing to protect for fake tables */
2064 *table &= ~_SEGMENT_ENTRY_INVALID;
2065 spin_unlock(&sg->guest_table_lock);
2068 spin_unlock(&sg->guest_table_lock);
2069 /* Make pgt read-only in parent gmap page table (not the pgste) */
2070 raddr = (saddr & _SEGMENT_MASK) | _SHADOW_RMAP_SEGMENT;
2071 origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
2072 rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE);
2073 spin_lock(&sg->guest_table_lock);
2075 table = gmap_table_walk(sg, saddr, 1);
2076 if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) !=
2077 (unsigned long) s_pgt)
2078 rc = -EAGAIN; /* Race with unshadow */
2080 *table &= ~_SEGMENT_ENTRY_INVALID;
2082 gmap_unshadow_pgt(sg, raddr);
2084 spin_unlock(&sg->guest_table_lock);
2087 spin_unlock(&sg->guest_table_lock);
2088 page_table_free_pgste(page);
2092 EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
2095 * gmap_shadow_page - create a shadow page mapping
2096 * @sg: pointer to the shadow guest address space structure
2097 * @saddr: faulting address in the shadow gmap
2098 * @pte: pte in parent gmap address space to get shadowed
2100 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
2101 * shadow table structure is incomplete, -ENOMEM if out of memory and
2102 * -EFAULT if an address in the parent gmap could not be resolved.
2104 * Called with sg->mm->mmap_sem in read.
2106 int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
2108 struct gmap *parent;
2109 struct gmap_rmap *rmap;
2110 unsigned long vmaddr, paddr;
2112 pte_t *sptep, *tptep;
2116 BUG_ON(!gmap_is_shadow(sg));
2117 parent = sg->parent;
2118 prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE;
2120 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
2123 rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
2126 paddr = pte_val(pte) & PAGE_MASK;
2127 vmaddr = __gmap_translate(parent, paddr);
2128 if (IS_ERR_VALUE(vmaddr)) {
2132 rc = radix_tree_preload(GFP_KERNEL);
2136 sptep = gmap_pte_op_walk(parent, paddr, &ptl);
2138 spin_lock(&sg->guest_table_lock);
2139 /* Get page table pointer */
2140 tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
2142 spin_unlock(&sg->guest_table_lock);
2143 gmap_pte_op_end(ptl);
2144 radix_tree_preload_end();
2147 rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
2149 /* Success and a new mapping */
2150 gmap_insert_rmap(sg, vmaddr, rmap);
2154 gmap_pte_op_end(ptl);
2155 spin_unlock(&sg->guest_table_lock);
2157 radix_tree_preload_end();
2160 rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
2167 EXPORT_SYMBOL_GPL(gmap_shadow_page);
2170 * gmap_shadow_notify - handle notifications for shadow gmap
2172 * Called with sg->parent->shadow_lock.
2174 static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
2175 unsigned long gaddr)
2177 struct gmap_rmap *rmap, *rnext, *head;
2178 unsigned long start, end, bits, raddr;
2180 BUG_ON(!gmap_is_shadow(sg));
2182 spin_lock(&sg->guest_table_lock);
2184 spin_unlock(&sg->guest_table_lock);
2187 /* Check for top level table */
2188 start = sg->orig_asce & _ASCE_ORIGIN;
2189 end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * PAGE_SIZE;
2190 if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
2192 /* The complete shadow table has to go */
2194 spin_unlock(&sg->guest_table_lock);
2195 list_del(&sg->list);
2199 /* Remove the page table tree from on specific entry */
2200 head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
2201 gmap_for_each_rmap_safe(rmap, rnext, head) {
2202 bits = rmap->raddr & _SHADOW_RMAP_MASK;
2203 raddr = rmap->raddr ^ bits;
2205 case _SHADOW_RMAP_REGION1:
2206 gmap_unshadow_r2t(sg, raddr);
2208 case _SHADOW_RMAP_REGION2:
2209 gmap_unshadow_r3t(sg, raddr);
2211 case _SHADOW_RMAP_REGION3:
2212 gmap_unshadow_sgt(sg, raddr);
2214 case _SHADOW_RMAP_SEGMENT:
2215 gmap_unshadow_pgt(sg, raddr);
2217 case _SHADOW_RMAP_PGTABLE:
2218 gmap_unshadow_page(sg, raddr);
2223 spin_unlock(&sg->guest_table_lock);
2227 * ptep_notify - call all invalidation callbacks for a specific pte.
2228 * @mm: pointer to the process mm_struct
2229 * @addr: virtual address in the process address space
2230 * @pte: pointer to the page table entry
2231 * @bits: bits from the pgste that caused the notify call
2233 * This function is assumed to be called with the page table lock held
2234 * for the pte to notify.
2236 void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
2237 pte_t *pte, unsigned long bits)
2239 unsigned long offset, gaddr = 0;
2240 unsigned long *table;
2241 struct gmap *gmap, *sg, *next;
2243 offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
2244 offset = offset * (PAGE_SIZE / sizeof(pte_t));
2246 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2247 spin_lock(&gmap->guest_table_lock);
2248 table = radix_tree_lookup(&gmap->host_to_guest,
2249 vmaddr >> PMD_SHIFT);
2251 gaddr = __gmap_segment_gaddr(table) + offset;
2252 spin_unlock(&gmap->guest_table_lock);
2256 if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
2257 spin_lock(&gmap->shadow_lock);
2258 list_for_each_entry_safe(sg, next,
2259 &gmap->children, list)
2260 gmap_shadow_notify(sg, vmaddr, gaddr);
2261 spin_unlock(&gmap->shadow_lock);
2263 if (bits & PGSTE_IN_BIT)
2264 gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
2268 EXPORT_SYMBOL_GPL(ptep_notify);
2270 static void pmdp_notify_gmap(struct gmap *gmap, pmd_t *pmdp,
2271 unsigned long gaddr)
2273 pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_IN;
2274 gmap_call_notifier(gmap, gaddr, gaddr + HPAGE_SIZE - 1);
2278 * gmap_pmdp_xchg - exchange a gmap pmd with another
2279 * @gmap: pointer to the guest address space structure
2280 * @pmdp: pointer to the pmd entry
2281 * @new: replacement entry
2282 * @gaddr: the affected guest address
2284 * This function is assumed to be called with the guest_table_lock
2287 static void gmap_pmdp_xchg(struct gmap *gmap, pmd_t *pmdp, pmd_t new,
2288 unsigned long gaddr)
2290 gaddr &= HPAGE_MASK;
2291 pmdp_notify_gmap(gmap, pmdp, gaddr);
2292 pmd_val(new) &= ~_SEGMENT_ENTRY_GMAP_IN;
2293 if (MACHINE_HAS_TLB_GUEST)
2294 __pmdp_idte(gaddr, (pmd_t *)pmdp, IDTE_GUEST_ASCE, gmap->asce,
2296 else if (MACHINE_HAS_IDTE)
2297 __pmdp_idte(gaddr, (pmd_t *)pmdp, 0, 0, IDTE_GLOBAL);
2303 static void gmap_pmdp_clear(struct mm_struct *mm, unsigned long vmaddr,
2308 unsigned long gaddr;
2311 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2312 spin_lock(&gmap->guest_table_lock);
2313 pmdp = (pmd_t *)radix_tree_delete(&gmap->host_to_guest,
2314 vmaddr >> PMD_SHIFT);
2316 gaddr = __gmap_segment_gaddr((unsigned long *)pmdp);
2317 pmdp_notify_gmap(gmap, pmdp, gaddr);
2318 WARN_ON(pmd_val(*pmdp) & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2319 _SEGMENT_ENTRY_GMAP_UC));
2322 pmd_val(*pmdp) = _SEGMENT_ENTRY_EMPTY;
2324 spin_unlock(&gmap->guest_table_lock);
2330 * gmap_pmdp_invalidate - invalidate all affected guest pmd entries without
2332 * @mm: pointer to the process mm_struct
2333 * @vmaddr: virtual address in the process address space
2335 void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr)
2337 gmap_pmdp_clear(mm, vmaddr, 0);
2339 EXPORT_SYMBOL_GPL(gmap_pmdp_invalidate);
2342 * gmap_pmdp_csp - csp all affected guest pmd entries
2343 * @mm: pointer to the process mm_struct
2344 * @vmaddr: virtual address in the process address space
2346 void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr)
2348 gmap_pmdp_clear(mm, vmaddr, 1);
2350 EXPORT_SYMBOL_GPL(gmap_pmdp_csp);
2353 * gmap_pmdp_idte_local - invalidate and clear a guest pmd entry
2354 * @mm: pointer to the process mm_struct
2355 * @vmaddr: virtual address in the process address space
2357 void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr)
2359 unsigned long *entry, gaddr;
2364 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2365 spin_lock(&gmap->guest_table_lock);
2366 entry = radix_tree_delete(&gmap->host_to_guest,
2367 vmaddr >> PMD_SHIFT);
2369 pmdp = (pmd_t *)entry;
2370 gaddr = __gmap_segment_gaddr(entry);
2371 pmdp_notify_gmap(gmap, pmdp, gaddr);
2372 WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2373 _SEGMENT_ENTRY_GMAP_UC));
2374 if (MACHINE_HAS_TLB_GUEST)
2375 __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
2376 gmap->asce, IDTE_LOCAL);
2377 else if (MACHINE_HAS_IDTE)
2378 __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_LOCAL);
2379 *entry = _SEGMENT_ENTRY_EMPTY;
2381 spin_unlock(&gmap->guest_table_lock);
2385 EXPORT_SYMBOL_GPL(gmap_pmdp_idte_local);
2388 * gmap_pmdp_idte_global - invalidate and clear a guest pmd entry
2389 * @mm: pointer to the process mm_struct
2390 * @vmaddr: virtual address in the process address space
2392 void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr)
2394 unsigned long *entry, gaddr;
2399 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2400 spin_lock(&gmap->guest_table_lock);
2401 entry = radix_tree_delete(&gmap->host_to_guest,
2402 vmaddr >> PMD_SHIFT);
2404 pmdp = (pmd_t *)entry;
2405 gaddr = __gmap_segment_gaddr(entry);
2406 pmdp_notify_gmap(gmap, pmdp, gaddr);
2407 WARN_ON(*entry & ~(_SEGMENT_ENTRY_HARDWARE_BITS_LARGE |
2408 _SEGMENT_ENTRY_GMAP_UC));
2409 if (MACHINE_HAS_TLB_GUEST)
2410 __pmdp_idte(gaddr, pmdp, IDTE_GUEST_ASCE,
2411 gmap->asce, IDTE_GLOBAL);
2412 else if (MACHINE_HAS_IDTE)
2413 __pmdp_idte(gaddr, pmdp, 0, 0, IDTE_GLOBAL);
2416 *entry = _SEGMENT_ENTRY_EMPTY;
2418 spin_unlock(&gmap->guest_table_lock);
2422 EXPORT_SYMBOL_GPL(gmap_pmdp_idte_global);
2425 * gmap_test_and_clear_dirty_pmd - test and reset segment dirty status
2426 * @gmap: pointer to guest address space
2427 * @pmdp: pointer to the pmd to be tested
2428 * @gaddr: virtual address in the guest address space
2430 * This function is assumed to be called with the guest_table_lock
2433 static bool gmap_test_and_clear_dirty_pmd(struct gmap *gmap, pmd_t *pmdp,
2434 unsigned long gaddr)
2436 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
2439 /* Already protected memory, which did not change is clean */
2440 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_PROTECT &&
2441 !(pmd_val(*pmdp) & _SEGMENT_ENTRY_GMAP_UC))
2444 /* Clear UC indication and reset protection */
2445 pmd_val(*pmdp) &= ~_SEGMENT_ENTRY_GMAP_UC;
2446 gmap_protect_pmd(gmap, gaddr, pmdp, PROT_READ, 0);
2451 * gmap_sync_dirty_log_pmd - set bitmap based on dirty status of segment
2452 * @gmap: pointer to guest address space
2453 * @bitmap: dirty bitmap for this pmd
2454 * @gaddr: virtual address in the guest address space
2455 * @vmaddr: virtual address in the host address space
2457 * This function is assumed to be called with the guest_table_lock
2460 void gmap_sync_dirty_log_pmd(struct gmap *gmap, unsigned long bitmap[4],
2461 unsigned long gaddr, unsigned long vmaddr)
2468 pmdp = gmap_pmd_op_walk(gmap, gaddr);
2472 if (pmd_large(*pmdp)) {
2473 if (gmap_test_and_clear_dirty_pmd(gmap, pmdp, gaddr))
2474 bitmap_fill(bitmap, _PAGE_ENTRIES);
2476 for (i = 0; i < _PAGE_ENTRIES; i++, vmaddr += PAGE_SIZE) {
2477 ptep = pte_alloc_map_lock(gmap->mm, pmdp, vmaddr, &ptl);
2480 if (ptep_test_and_clear_uc(gmap->mm, vmaddr, ptep))
2485 gmap_pmd_op_end(gmap, pmdp);
2487 EXPORT_SYMBOL_GPL(gmap_sync_dirty_log_pmd);
2489 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2490 static int thp_split_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
2491 unsigned long end, struct mm_walk *walk)
2493 struct vm_area_struct *vma = walk->vma;
2495 split_huge_pmd(vma, pmd, addr);
2499 static const struct mm_walk_ops thp_split_walk_ops = {
2500 .pmd_entry = thp_split_walk_pmd_entry,
2503 static inline void thp_split_mm(struct mm_struct *mm)
2505 struct vm_area_struct *vma;
2507 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
2508 vma->vm_flags &= ~VM_HUGEPAGE;
2509 vma->vm_flags |= VM_NOHUGEPAGE;
2510 walk_page_vma(vma, &thp_split_walk_ops, NULL);
2512 mm->def_flags |= VM_NOHUGEPAGE;
2515 static inline void thp_split_mm(struct mm_struct *mm)
2518 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
2521 * Remove all empty zero pages from the mapping for lazy refaulting
2522 * - This must be called after mm->context.has_pgste is set, to avoid
2523 * future creation of zero pages
2524 * - This must be called after THP was enabled
2526 static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
2527 unsigned long end, struct mm_walk *walk)
2531 for (addr = start; addr != end; addr += PAGE_SIZE) {
2535 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
2536 if (is_zero_pfn(pte_pfn(*ptep)))
2537 ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
2538 pte_unmap_unlock(ptep, ptl);
2543 static const struct mm_walk_ops zap_zero_walk_ops = {
2544 .pmd_entry = __zap_zero_pages,
2548 * switch on pgstes for its userspace process (for kvm)
2550 int s390_enable_sie(void)
2552 struct mm_struct *mm = current->mm;
2554 /* Do we have pgstes? if yes, we are done */
2555 if (mm_has_pgste(mm))
2557 /* Fail if the page tables are 2K */
2558 if (!mm_alloc_pgste(mm))
2560 down_write(&mm->mmap_sem);
2561 mm->context.has_pgste = 1;
2562 /* split thp mappings and disable thp for future mappings */
2564 walk_page_range(mm, 0, TASK_SIZE, &zap_zero_walk_ops, NULL);
2565 up_write(&mm->mmap_sem);
2568 EXPORT_SYMBOL_GPL(s390_enable_sie);
2571 * Enable storage key handling from now on and initialize the storage
2572 * keys with the default key.
2574 static int __s390_enable_skey_pte(pte_t *pte, unsigned long addr,
2575 unsigned long next, struct mm_walk *walk)
2577 /* Clear storage key */
2578 ptep_zap_key(walk->mm, addr, pte);
2583 * Give a chance to schedule after setting a key to 256 pages.
2584 * We only hold the mm lock, which is a rwsem and the kvm srcu.
2587 static int __s390_enable_skey_pmd(pmd_t *pmd, unsigned long addr,
2588 unsigned long next, struct mm_walk *walk)
2594 static int __s390_enable_skey_hugetlb(pte_t *pte, unsigned long addr,
2595 unsigned long hmask, unsigned long next,
2596 struct mm_walk *walk)
2598 pmd_t *pmd = (pmd_t *)pte;
2599 unsigned long start, end;
2600 struct page *page = pmd_page(*pmd);
2603 * The write check makes sure we do not set a key on shared
2604 * memory. This is needed as the walker does not differentiate
2605 * between actual guest memory and the process executable or
2608 if (pmd_val(*pmd) & _SEGMENT_ENTRY_INVALID ||
2609 !(pmd_val(*pmd) & _SEGMENT_ENTRY_WRITE))
2612 start = pmd_val(*pmd) & HPAGE_MASK;
2613 end = start + HPAGE_SIZE - 1;
2614 __storage_key_init_range(start, end);
2615 set_bit(PG_arch_1, &page->flags);
2620 static const struct mm_walk_ops enable_skey_walk_ops = {
2621 .hugetlb_entry = __s390_enable_skey_hugetlb,
2622 .pte_entry = __s390_enable_skey_pte,
2623 .pmd_entry = __s390_enable_skey_pmd,
2626 int s390_enable_skey(void)
2628 struct mm_struct *mm = current->mm;
2629 struct vm_area_struct *vma;
2632 down_write(&mm->mmap_sem);
2633 if (mm_uses_skeys(mm))
2636 mm->context.uses_skeys = 1;
2637 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2638 if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
2639 MADV_UNMERGEABLE, &vma->vm_flags)) {
2640 mm->context.uses_skeys = 0;
2645 mm->def_flags &= ~VM_MERGEABLE;
2647 walk_page_range(mm, 0, TASK_SIZE, &enable_skey_walk_ops, NULL);
2650 up_write(&mm->mmap_sem);
2653 EXPORT_SYMBOL_GPL(s390_enable_skey);
2656 * Reset CMMA state, make all pages stable again.
2658 static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
2659 unsigned long next, struct mm_walk *walk)
2661 ptep_zap_unused(walk->mm, addr, pte, 1);
2665 static const struct mm_walk_ops reset_cmma_walk_ops = {
2666 .pte_entry = __s390_reset_cmma,
2669 void s390_reset_cmma(struct mm_struct *mm)
2671 down_write(&mm->mmap_sem);
2672 walk_page_range(mm, 0, TASK_SIZE, &reset_cmma_walk_ops, NULL);
2673 up_write(&mm->mmap_sem);
2675 EXPORT_SYMBOL_GPL(s390_reset_cmma);