2 * KVM guest address space mapping code
4 * Copyright IBM Corp. 2007, 2016
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8 #include <linux/kernel.h>
10 #include <linux/swap.h>
11 #include <linux/smp.h>
12 #include <linux/spinlock.h>
13 #include <linux/slab.h>
14 #include <linux/swapops.h>
15 #include <linux/ksm.h>
16 #include <linux/mman.h>
18 #include <asm/pgtable.h>
19 #include <asm/pgalloc.h>
23 #define GMAP_SHADOW_FAKE_TABLE 1ULL
26 * gmap_alloc - allocate and initialize a guest address space
27 * @mm: pointer to the parent mm_struct
28 * @limit: maximum address of the gmap address space
30 * Returns a guest address space structure.
32 static struct gmap *gmap_alloc(unsigned long limit)
37 unsigned long etype, atype;
39 if (limit < (1UL << 31)) {
40 limit = (1UL << 31) - 1;
41 atype = _ASCE_TYPE_SEGMENT;
42 etype = _SEGMENT_ENTRY_EMPTY;
43 } else if (limit < (1UL << 42)) {
44 limit = (1UL << 42) - 1;
45 atype = _ASCE_TYPE_REGION3;
46 etype = _REGION3_ENTRY_EMPTY;
47 } else if (limit < (1UL << 53)) {
48 limit = (1UL << 53) - 1;
49 atype = _ASCE_TYPE_REGION2;
50 etype = _REGION2_ENTRY_EMPTY;
53 atype = _ASCE_TYPE_REGION1;
54 etype = _REGION1_ENTRY_EMPTY;
56 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
59 INIT_LIST_HEAD(&gmap->crst_list);
60 INIT_LIST_HEAD(&gmap->children);
61 INIT_LIST_HEAD(&gmap->pt_list);
62 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
63 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
64 INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
65 spin_lock_init(&gmap->guest_table_lock);
66 spin_lock_init(&gmap->shadow_lock);
67 atomic_set(&gmap->ref_count, 1);
68 page = alloc_pages(GFP_KERNEL, 2);
72 list_add(&page->lru, &gmap->crst_list);
73 table = (unsigned long *) page_to_phys(page);
74 crst_table_init(table, etype);
76 gmap->asce = atype | _ASCE_TABLE_LENGTH |
77 _ASCE_USER_BITS | __pa(table);
78 gmap->asce_end = limit;
88 * gmap_create - create a guest address space
89 * @mm: pointer to the parent mm_struct
90 * @limit: maximum size of the gmap address space
92 * Returns a guest address space structure.
94 struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
97 unsigned long gmap_asce;
99 gmap = gmap_alloc(limit);
103 spin_lock(&mm->context.gmap_lock);
104 list_add_rcu(&gmap->list, &mm->context.gmap_list);
105 if (list_is_singular(&mm->context.gmap_list))
106 gmap_asce = gmap->asce;
109 WRITE_ONCE(mm->context.gmap_asce, gmap_asce);
110 spin_unlock(&mm->context.gmap_lock);
113 EXPORT_SYMBOL_GPL(gmap_create);
115 static void gmap_flush_tlb(struct gmap *gmap)
117 if (MACHINE_HAS_IDTE)
118 __tlb_flush_idte(gmap->asce);
120 __tlb_flush_global();
123 static void gmap_radix_tree_free(struct radix_tree_root *root)
125 struct radix_tree_iter iter;
126 unsigned long indices[16];
131 /* A radix tree is freed by deleting all of its entries */
135 radix_tree_for_each_slot(slot, root, &iter, index) {
136 indices[nr] = iter.index;
140 for (i = 0; i < nr; i++) {
142 radix_tree_delete(root, index);
147 static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
149 struct gmap_rmap *rmap, *rnext, *head;
150 struct radix_tree_iter iter;
151 unsigned long indices[16];
156 /* A radix tree is freed by deleting all of its entries */
160 radix_tree_for_each_slot(slot, root, &iter, index) {
161 indices[nr] = iter.index;
165 for (i = 0; i < nr; i++) {
167 head = radix_tree_delete(root, index);
168 gmap_for_each_rmap_safe(rmap, rnext, head)
175 * gmap_free - free a guest address space
176 * @gmap: pointer to the guest address space structure
178 * No locks required. There are no references to this gmap anymore.
180 static void gmap_free(struct gmap *gmap)
182 struct page *page, *next;
184 /* Flush tlb of all gmaps (if not already done for shadows) */
185 if (!(gmap_is_shadow(gmap) && gmap->removed))
186 gmap_flush_tlb(gmap);
187 /* Free all segment & region tables. */
188 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
189 __free_pages(page, 2);
190 gmap_radix_tree_free(&gmap->guest_to_host);
191 gmap_radix_tree_free(&gmap->host_to_guest);
193 /* Free additional data for a shadow gmap */
194 if (gmap_is_shadow(gmap)) {
195 /* Free all page tables. */
196 list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
197 page_table_free_pgste(page);
198 gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
199 /* Release reference to the parent */
200 gmap_put(gmap->parent);
207 * gmap_get - increase reference counter for guest address space
208 * @gmap: pointer to the guest address space structure
210 * Returns the gmap pointer
212 struct gmap *gmap_get(struct gmap *gmap)
214 atomic_inc(&gmap->ref_count);
217 EXPORT_SYMBOL_GPL(gmap_get);
220 * gmap_put - decrease reference counter for guest address space
221 * @gmap: pointer to the guest address space structure
223 * If the reference counter reaches zero the guest address space is freed.
225 void gmap_put(struct gmap *gmap)
227 if (atomic_dec_return(&gmap->ref_count) == 0)
230 EXPORT_SYMBOL_GPL(gmap_put);
233 * gmap_remove - remove a guest address space but do not free it yet
234 * @gmap: pointer to the guest address space structure
236 void gmap_remove(struct gmap *gmap)
238 struct gmap *sg, *next;
239 unsigned long gmap_asce;
241 /* Remove all shadow gmaps linked to this gmap */
242 if (!list_empty(&gmap->children)) {
243 spin_lock(&gmap->shadow_lock);
244 list_for_each_entry_safe(sg, next, &gmap->children, list) {
248 spin_unlock(&gmap->shadow_lock);
250 /* Remove gmap from the pre-mm list */
251 spin_lock(&gmap->mm->context.gmap_lock);
252 list_del_rcu(&gmap->list);
253 if (list_empty(&gmap->mm->context.gmap_list))
255 else if (list_is_singular(&gmap->mm->context.gmap_list))
256 gmap_asce = list_first_entry(&gmap->mm->context.gmap_list,
257 struct gmap, list)->asce;
260 WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce);
261 spin_unlock(&gmap->mm->context.gmap_lock);
266 EXPORT_SYMBOL_GPL(gmap_remove);
269 * gmap_enable - switch primary space to the guest address space
270 * @gmap: pointer to the guest address space structure
272 void gmap_enable(struct gmap *gmap)
274 S390_lowcore.gmap = (unsigned long) gmap;
276 EXPORT_SYMBOL_GPL(gmap_enable);
279 * gmap_disable - switch back to the standard primary address space
280 * @gmap: pointer to the guest address space structure
282 void gmap_disable(struct gmap *gmap)
284 S390_lowcore.gmap = 0UL;
286 EXPORT_SYMBOL_GPL(gmap_disable);
289 * gmap_get_enabled - get a pointer to the currently enabled gmap
291 * Returns a pointer to the currently enabled gmap. 0 if none is enabled.
293 struct gmap *gmap_get_enabled(void)
295 return (struct gmap *) S390_lowcore.gmap;
297 EXPORT_SYMBOL_GPL(gmap_get_enabled);
300 * gmap_alloc_table is assumed to be called with mmap_sem held
302 static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
303 unsigned long init, unsigned long gaddr)
308 /* since we dont free the gmap table until gmap_free we can unlock */
309 page = alloc_pages(GFP_KERNEL, 2);
312 new = (unsigned long *) page_to_phys(page);
313 crst_table_init(new, init);
314 spin_lock(&gmap->guest_table_lock);
315 if (*table & _REGION_ENTRY_INVALID) {
316 list_add(&page->lru, &gmap->crst_list);
317 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
318 (*table & _REGION_ENTRY_TYPE_MASK);
322 spin_unlock(&gmap->guest_table_lock);
324 __free_pages(page, 2);
329 * __gmap_segment_gaddr - find virtual address from segment pointer
330 * @entry: pointer to a segment table entry in the guest address space
332 * Returns the virtual address in the guest address space for the segment
334 static unsigned long __gmap_segment_gaddr(unsigned long *entry)
337 unsigned long offset, mask;
339 offset = (unsigned long) entry / sizeof(unsigned long);
340 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
341 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
342 page = virt_to_page((void *)((unsigned long) entry & mask));
343 return page->index + offset;
347 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
348 * @gmap: pointer to the guest address space structure
349 * @vmaddr: address in the host process address space
351 * Returns 1 if a TLB flush is required
353 static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
355 unsigned long *entry;
358 BUG_ON(gmap_is_shadow(gmap));
359 spin_lock(&gmap->guest_table_lock);
360 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
362 flush = (*entry != _SEGMENT_ENTRY_INVALID);
363 *entry = _SEGMENT_ENTRY_INVALID;
365 spin_unlock(&gmap->guest_table_lock);
370 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
371 * @gmap: pointer to the guest address space structure
372 * @gaddr: address in the guest address space
374 * Returns 1 if a TLB flush is required
376 static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
378 unsigned long vmaddr;
380 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
382 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
386 * gmap_unmap_segment - unmap segment from the guest address space
387 * @gmap: pointer to the guest address space structure
388 * @to: address in the guest address space
389 * @len: length of the memory area to unmap
391 * Returns 0 if the unmap succeeded, -EINVAL if not.
393 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
398 BUG_ON(gmap_is_shadow(gmap));
399 if ((to | len) & (PMD_SIZE - 1))
401 if (len == 0 || to + len < to)
405 down_write(&gmap->mm->mmap_sem);
406 for (off = 0; off < len; off += PMD_SIZE)
407 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
408 up_write(&gmap->mm->mmap_sem);
410 gmap_flush_tlb(gmap);
413 EXPORT_SYMBOL_GPL(gmap_unmap_segment);
416 * gmap_map_segment - map a segment to the guest address space
417 * @gmap: pointer to the guest address space structure
418 * @from: source address in the parent address space
419 * @to: target address in the guest address space
420 * @len: length of the memory area to map
422 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
424 int gmap_map_segment(struct gmap *gmap, unsigned long from,
425 unsigned long to, unsigned long len)
430 BUG_ON(gmap_is_shadow(gmap));
431 if ((from | to | len) & (PMD_SIZE - 1))
433 if (len == 0 || from + len < from || to + len < to ||
434 from + len - 1 > TASK_MAX_SIZE || to + len - 1 > gmap->asce_end)
438 down_write(&gmap->mm->mmap_sem);
439 for (off = 0; off < len; off += PMD_SIZE) {
440 /* Remove old translation */
441 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
442 /* Store new translation */
443 if (radix_tree_insert(&gmap->guest_to_host,
444 (to + off) >> PMD_SHIFT,
445 (void *) from + off))
448 up_write(&gmap->mm->mmap_sem);
450 gmap_flush_tlb(gmap);
453 gmap_unmap_segment(gmap, to, len);
456 EXPORT_SYMBOL_GPL(gmap_map_segment);
459 * __gmap_translate - translate a guest address to a user space address
460 * @gmap: pointer to guest mapping meta data structure
461 * @gaddr: guest address
463 * Returns user space address which corresponds to the guest address or
464 * -EFAULT if no such mapping exists.
465 * This function does not establish potentially missing page table entries.
466 * The mmap_sem of the mm that belongs to the address space must be held
467 * when this function gets called.
469 * Note: Can also be called for shadow gmaps.
471 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
473 unsigned long vmaddr;
475 vmaddr = (unsigned long)
476 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
477 /* Note: guest_to_host is empty for a shadow gmap */
478 return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
480 EXPORT_SYMBOL_GPL(__gmap_translate);
483 * gmap_translate - translate a guest address to a user space address
484 * @gmap: pointer to guest mapping meta data structure
485 * @gaddr: guest address
487 * Returns user space address which corresponds to the guest address or
488 * -EFAULT if no such mapping exists.
489 * This function does not establish potentially missing page table entries.
491 unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
495 down_read(&gmap->mm->mmap_sem);
496 rc = __gmap_translate(gmap, gaddr);
497 up_read(&gmap->mm->mmap_sem);
500 EXPORT_SYMBOL_GPL(gmap_translate);
503 * gmap_unlink - disconnect a page table from the gmap shadow tables
504 * @gmap: pointer to guest mapping meta data structure
505 * @table: pointer to the host page table
506 * @vmaddr: vm address associated with the host page table
508 void gmap_unlink(struct mm_struct *mm, unsigned long *table,
509 unsigned long vmaddr)
515 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
516 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
518 gmap_flush_tlb(gmap);
524 * gmap_link - set up shadow page tables to connect a host to a guest address
525 * @gmap: pointer to guest mapping meta data structure
526 * @gaddr: guest address
527 * @vmaddr: vm address
529 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
530 * if the vm address is already mapped to a different guest segment.
531 * The mmap_sem of the mm that belongs to the address space must be held
532 * when this function gets called.
534 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
536 struct mm_struct *mm;
537 unsigned long *table;
544 BUG_ON(gmap_is_shadow(gmap));
545 /* Create higher level tables in the gmap page table */
547 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
548 table += (gaddr >> 53) & 0x7ff;
549 if ((*table & _REGION_ENTRY_INVALID) &&
550 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
551 gaddr & 0xffe0000000000000UL))
553 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
555 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
556 table += (gaddr >> 42) & 0x7ff;
557 if ((*table & _REGION_ENTRY_INVALID) &&
558 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
559 gaddr & 0xfffffc0000000000UL))
561 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
563 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
564 table += (gaddr >> 31) & 0x7ff;
565 if ((*table & _REGION_ENTRY_INVALID) &&
566 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
567 gaddr & 0xffffffff80000000UL))
569 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
571 table += (gaddr >> 20) & 0x7ff;
572 /* Walk the parent mm page table */
574 pgd = pgd_offset(mm, vmaddr);
575 VM_BUG_ON(pgd_none(*pgd));
576 pud = pud_offset(pgd, vmaddr);
577 VM_BUG_ON(pud_none(*pud));
578 /* large puds cannot yet be handled */
581 pmd = pmd_offset(pud, vmaddr);
582 VM_BUG_ON(pmd_none(*pmd));
583 /* large pmds cannot yet be handled */
586 /* Link gmap segment table entry location to page table. */
587 rc = radix_tree_preload(GFP_KERNEL);
590 ptl = pmd_lock(mm, pmd);
591 spin_lock(&gmap->guest_table_lock);
592 if (*table == _SEGMENT_ENTRY_INVALID) {
593 rc = radix_tree_insert(&gmap->host_to_guest,
594 vmaddr >> PMD_SHIFT, table);
596 *table = pmd_val(*pmd);
599 spin_unlock(&gmap->guest_table_lock);
601 radix_tree_preload_end();
606 * gmap_fault - resolve a fault on a guest address
607 * @gmap: pointer to guest mapping meta data structure
608 * @gaddr: guest address
609 * @fault_flags: flags to pass down to handle_mm_fault()
611 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
612 * if the vm address is already mapped to a different guest segment.
614 int gmap_fault(struct gmap *gmap, unsigned long gaddr,
615 unsigned int fault_flags)
617 unsigned long vmaddr;
621 down_read(&gmap->mm->mmap_sem);
625 vmaddr = __gmap_translate(gmap, gaddr);
626 if (IS_ERR_VALUE(vmaddr)) {
630 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
636 * In the case that fixup_user_fault unlocked the mmap_sem during
637 * faultin redo __gmap_translate to not race with a map/unmap_segment.
642 rc = __gmap_link(gmap, gaddr, vmaddr);
644 up_read(&gmap->mm->mmap_sem);
647 EXPORT_SYMBOL_GPL(gmap_fault);
650 * this function is assumed to be called with mmap_sem held
652 void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
654 unsigned long vmaddr;
658 /* Find the vm address for the guest address */
659 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
662 vmaddr |= gaddr & ~PMD_MASK;
663 /* Get pointer to the page table entry */
664 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
666 ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
667 pte_unmap_unlock(ptep, ptl);
671 EXPORT_SYMBOL_GPL(__gmap_zap);
673 void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
675 unsigned long gaddr, vmaddr, size;
676 struct vm_area_struct *vma;
678 down_read(&gmap->mm->mmap_sem);
679 for (gaddr = from; gaddr < to;
680 gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
681 /* Find the vm address for the guest address */
682 vmaddr = (unsigned long)
683 radix_tree_lookup(&gmap->guest_to_host,
687 vmaddr |= gaddr & ~PMD_MASK;
688 /* Find vma in the parent mm */
689 vma = find_vma(gmap->mm, vmaddr);
692 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
693 zap_page_range(vma, vmaddr, size, NULL);
695 up_read(&gmap->mm->mmap_sem);
697 EXPORT_SYMBOL_GPL(gmap_discard);
699 static LIST_HEAD(gmap_notifier_list);
700 static DEFINE_SPINLOCK(gmap_notifier_lock);
703 * gmap_register_pte_notifier - register a pte invalidation callback
704 * @nb: pointer to the gmap notifier block
706 void gmap_register_pte_notifier(struct gmap_notifier *nb)
708 spin_lock(&gmap_notifier_lock);
709 list_add_rcu(&nb->list, &gmap_notifier_list);
710 spin_unlock(&gmap_notifier_lock);
712 EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
715 * gmap_unregister_pte_notifier - remove a pte invalidation callback
716 * @nb: pointer to the gmap notifier block
718 void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
720 spin_lock(&gmap_notifier_lock);
721 list_del_rcu(&nb->list);
722 spin_unlock(&gmap_notifier_lock);
725 EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
728 * gmap_call_notifier - call all registered invalidation callbacks
729 * @gmap: pointer to guest mapping meta data structure
730 * @start: start virtual address in the guest address space
731 * @end: end virtual address in the guest address space
733 static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
736 struct gmap_notifier *nb;
738 list_for_each_entry(nb, &gmap_notifier_list, list)
739 nb->notifier_call(gmap, start, end);
743 * gmap_table_walk - walk the gmap page tables
744 * @gmap: pointer to guest mapping meta data structure
745 * @gaddr: virtual address in the guest address space
746 * @level: page table level to stop at
748 * Returns a table entry pointer for the given guest address and @level
749 * @level=0 : returns a pointer to a page table table entry (or NULL)
750 * @level=1 : returns a pointer to a segment table entry (or NULL)
751 * @level=2 : returns a pointer to a region-3 table entry (or NULL)
752 * @level=3 : returns a pointer to a region-2 table entry (or NULL)
753 * @level=4 : returns a pointer to a region-1 table entry (or NULL)
755 * Returns NULL if the gmap page tables could not be walked to the
758 * Note: Can also be called for shadow gmaps.
760 static inline unsigned long *gmap_table_walk(struct gmap *gmap,
761 unsigned long gaddr, int level)
763 const int asce_type = gmap->asce & _ASCE_TYPE_MASK;
764 unsigned long *table;
766 if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
768 if (gmap_is_shadow(gmap) && gmap->removed)
771 if (asce_type != _ASCE_TYPE_REGION1 &&
772 gaddr & (-1UL << (31 + (asce_type >> 2) * 11)))
776 switch (gmap->asce & _ASCE_TYPE_MASK) {
777 case _ASCE_TYPE_REGION1:
778 table += (gaddr >> 53) & 0x7ff;
781 if (*table & _REGION_ENTRY_INVALID)
783 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
785 case _ASCE_TYPE_REGION2:
786 table += (gaddr >> 42) & 0x7ff;
789 if (*table & _REGION_ENTRY_INVALID)
791 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
793 case _ASCE_TYPE_REGION3:
794 table += (gaddr >> 31) & 0x7ff;
797 if (*table & _REGION_ENTRY_INVALID)
799 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
801 case _ASCE_TYPE_SEGMENT:
802 table += (gaddr >> 20) & 0x7ff;
805 if (*table & _REGION_ENTRY_INVALID)
807 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
808 table += (gaddr >> 12) & 0xff;
814 * gmap_pte_op_walk - walk the gmap page table, get the page table lock
815 * and return the pte pointer
816 * @gmap: pointer to guest mapping meta data structure
817 * @gaddr: virtual address in the guest address space
818 * @ptl: pointer to the spinlock pointer
820 * Returns a pointer to the locked pte for a guest address, or NULL
822 * Note: Can also be called for shadow gmaps.
824 static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
827 unsigned long *table;
829 if (gmap_is_shadow(gmap))
830 spin_lock(&gmap->guest_table_lock);
831 /* Walk the gmap page table, lock and get pte pointer */
832 table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
833 if (!table || *table & _SEGMENT_ENTRY_INVALID) {
834 if (gmap_is_shadow(gmap))
835 spin_unlock(&gmap->guest_table_lock);
838 if (gmap_is_shadow(gmap)) {
839 *ptl = &gmap->guest_table_lock;
840 return pte_offset_map((pmd_t *) table, gaddr);
842 return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
846 * gmap_pte_op_fixup - force a page in and connect the gmap page table
847 * @gmap: pointer to guest mapping meta data structure
848 * @gaddr: virtual address in the guest address space
849 * @vmaddr: address in the host process address space
850 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
852 * Returns 0 if the caller can retry __gmap_translate (might fail again),
853 * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
854 * up or connecting the gmap page table.
856 static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
857 unsigned long vmaddr, int prot)
859 struct mm_struct *mm = gmap->mm;
860 unsigned int fault_flags;
861 bool unlocked = false;
863 BUG_ON(gmap_is_shadow(gmap));
864 fault_flags = (prot == PROT_WRITE) ? FAULT_FLAG_WRITE : 0;
865 if (fixup_user_fault(current, mm, vmaddr, fault_flags, &unlocked))
868 /* lost mmap_sem, caller has to retry __gmap_translate */
870 /* Connect the page tables */
871 return __gmap_link(gmap, gaddr, vmaddr);
875 * gmap_pte_op_end - release the page table lock
876 * @ptl: pointer to the spinlock pointer
878 static void gmap_pte_op_end(spinlock_t *ptl)
884 * gmap_protect_range - remove access rights to memory and set pgste bits
885 * @gmap: pointer to guest mapping meta data structure
886 * @gaddr: virtual address in the guest address space
888 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
889 * @bits: pgste notification bits to set
891 * Returns 0 if successfully protected, -ENOMEM if out of memory and
892 * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
894 * Called with sg->mm->mmap_sem in read.
896 * Note: Can also be called for shadow gmaps.
898 static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
899 unsigned long len, int prot, unsigned long bits)
901 unsigned long vmaddr;
908 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
910 rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, bits);
911 gmap_pte_op_end(ptl);
914 vmaddr = __gmap_translate(gmap, gaddr);
915 if (IS_ERR_VALUE(vmaddr))
917 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, prot);
929 * gmap_mprotect_notify - change access rights for a range of ptes and
930 * call the notifier if any pte changes again
931 * @gmap: pointer to guest mapping meta data structure
932 * @gaddr: virtual address in the guest address space
934 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
936 * Returns 0 if for each page in the given range a gmap mapping exists,
937 * the new access rights could be set and the notifier could be armed.
938 * If the gmap mapping is missing for one or more pages -EFAULT is
939 * returned. If no memory could be allocated -ENOMEM is returned.
940 * This function establishes missing page table entries.
942 int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
943 unsigned long len, int prot)
947 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
949 if (!MACHINE_HAS_ESOP && prot == PROT_READ)
951 down_read(&gmap->mm->mmap_sem);
952 rc = gmap_protect_range(gmap, gaddr, len, prot, PGSTE_IN_BIT);
953 up_read(&gmap->mm->mmap_sem);
956 EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
959 * gmap_read_table - get an unsigned long value from a guest page table using
960 * absolute addressing, without marking the page referenced.
961 * @gmap: pointer to guest mapping meta data structure
962 * @gaddr: virtual address in the guest address space
963 * @val: pointer to the unsigned long value to return
965 * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
966 * if reading using the virtual address failed.
968 * Called with gmap->mm->mmap_sem in read.
970 int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
972 unsigned long address, vmaddr;
979 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
982 if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
983 address = pte_val(pte) & PAGE_MASK;
984 address += gaddr & ~PAGE_MASK;
985 *val = *(unsigned long *) address;
986 pte_val(*ptep) |= _PAGE_YOUNG;
987 /* Do *NOT* clear the _PAGE_INVALID bit! */
990 gmap_pte_op_end(ptl);
994 vmaddr = __gmap_translate(gmap, gaddr);
995 if (IS_ERR_VALUE(vmaddr)) {
999 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr, PROT_READ);
1005 EXPORT_SYMBOL_GPL(gmap_read_table);
1008 * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
1009 * @sg: pointer to the shadow guest address space structure
1010 * @vmaddr: vm address associated with the rmap
1011 * @rmap: pointer to the rmap structure
1013 * Called with the sg->guest_table_lock
1015 static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
1016 struct gmap_rmap *rmap)
1020 BUG_ON(!gmap_is_shadow(sg));
1021 slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
1023 rmap->next = radix_tree_deref_slot_protected(slot,
1024 &sg->guest_table_lock);
1025 radix_tree_replace_slot(slot, rmap);
1028 radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
1034 * gmap_protect_rmap - modify access rights to memory and create an rmap
1035 * @sg: pointer to the shadow guest address space structure
1036 * @raddr: rmap address in the shadow gmap
1037 * @paddr: address in the parent guest address space
1038 * @len: length of the memory area to protect
1039 * @prot: indicates access rights: none, read-only or read-write
1041 * Returns 0 if successfully protected and the rmap was created, -ENOMEM
1042 * if out of memory and -EFAULT if paddr is invalid.
1044 static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
1045 unsigned long paddr, unsigned long len, int prot)
1047 struct gmap *parent;
1048 struct gmap_rmap *rmap;
1049 unsigned long vmaddr;
1054 BUG_ON(!gmap_is_shadow(sg));
1055 parent = sg->parent;
1057 vmaddr = __gmap_translate(parent, paddr);
1058 if (IS_ERR_VALUE(vmaddr))
1060 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1063 rmap->raddr = raddr;
1064 rc = radix_tree_preload(GFP_KERNEL);
1070 ptep = gmap_pte_op_walk(parent, paddr, &ptl);
1072 spin_lock(&sg->guest_table_lock);
1073 rc = ptep_force_prot(parent->mm, paddr, ptep, prot,
1076 gmap_insert_rmap(sg, vmaddr, rmap);
1077 spin_unlock(&sg->guest_table_lock);
1078 gmap_pte_op_end(ptl);
1080 radix_tree_preload_end();
1083 rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
1094 #define _SHADOW_RMAP_MASK 0x7
1095 #define _SHADOW_RMAP_REGION1 0x5
1096 #define _SHADOW_RMAP_REGION2 0x4
1097 #define _SHADOW_RMAP_REGION3 0x3
1098 #define _SHADOW_RMAP_SEGMENT 0x2
1099 #define _SHADOW_RMAP_PGTABLE 0x1
1102 * gmap_idte_one - invalidate a single region or segment table entry
1103 * @asce: region or segment table *origin* + table-type bits
1104 * @vaddr: virtual address to identify the table entry to flush
1106 * The invalid bit of a single region or segment table entry is set
1107 * and the associated TLB entries depending on the entry are flushed.
1108 * The table-type of the @asce identifies the portion of the @vaddr
1109 * that is used as the invalidation index.
1111 static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
1114 " .insn rrf,0xb98e0000,%0,%1,0,0"
1115 : : "a" (asce), "a" (vaddr) : "cc", "memory");
1119 * gmap_unshadow_page - remove a page from a shadow page table
1120 * @sg: pointer to the shadow guest address space structure
1121 * @raddr: rmap address in the shadow guest address space
1123 * Called with the sg->guest_table_lock
1125 static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
1127 unsigned long *table;
1129 BUG_ON(!gmap_is_shadow(sg));
1130 table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
1131 if (!table || *table & _PAGE_INVALID)
1133 gmap_call_notifier(sg, raddr, raddr + (1UL << 12) - 1);
1134 ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
1138 * __gmap_unshadow_pgt - remove all entries from a shadow page table
1139 * @sg: pointer to the shadow guest address space structure
1140 * @raddr: rmap address in the shadow guest address space
1141 * @pgt: pointer to the start of a shadow page table
1143 * Called with the sg->guest_table_lock
1145 static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
1150 BUG_ON(!gmap_is_shadow(sg));
1151 for (i = 0; i < 256; i++, raddr += 1UL << 12)
1152 pgt[i] = _PAGE_INVALID;
1156 * gmap_unshadow_pgt - remove a shadow page table from a segment entry
1157 * @sg: pointer to the shadow guest address space structure
1158 * @raddr: address in the shadow guest address space
1160 * Called with the sg->guest_table_lock
1162 static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
1164 unsigned long sto, *ste, *pgt;
1167 BUG_ON(!gmap_is_shadow(sg));
1168 ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
1169 if (!ste || !(*ste & _SEGMENT_ENTRY_ORIGIN))
1171 gmap_call_notifier(sg, raddr, raddr + (1UL << 20) - 1);
1172 sto = (unsigned long) (ste - ((raddr >> 20) & 0x7ff));
1173 gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
1174 pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
1175 *ste = _SEGMENT_ENTRY_EMPTY;
1176 __gmap_unshadow_pgt(sg, raddr, pgt);
1177 /* Free page table */
1178 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1179 list_del(&page->lru);
1180 page_table_free_pgste(page);
1184 * __gmap_unshadow_sgt - remove all entries from a shadow segment table
1185 * @sg: pointer to the shadow guest address space structure
1186 * @raddr: rmap address in the shadow guest address space
1187 * @sgt: pointer to the start of a shadow segment table
1189 * Called with the sg->guest_table_lock
1191 static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
1194 unsigned long asce, *pgt;
1198 BUG_ON(!gmap_is_shadow(sg));
1199 asce = (unsigned long) sgt | _ASCE_TYPE_SEGMENT;
1200 for (i = 0; i < 2048; i++, raddr += 1UL << 20) {
1201 if (!(sgt[i] & _SEGMENT_ENTRY_ORIGIN))
1203 pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
1204 sgt[i] = _SEGMENT_ENTRY_EMPTY;
1205 __gmap_unshadow_pgt(sg, raddr, pgt);
1206 /* Free page table */
1207 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1208 list_del(&page->lru);
1209 page_table_free_pgste(page);
1214 * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
1215 * @sg: pointer to the shadow guest address space structure
1216 * @raddr: rmap address in the shadow guest address space
1218 * Called with the shadow->guest_table_lock
1220 static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
1222 unsigned long r3o, *r3e, *sgt;
1225 BUG_ON(!gmap_is_shadow(sg));
1226 r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
1227 if (!r3e || !(*r3e & _REGION_ENTRY_ORIGIN))
1229 gmap_call_notifier(sg, raddr, raddr + (1UL << 31) - 1);
1230 r3o = (unsigned long) (r3e - ((raddr >> 31) & 0x7ff));
1231 gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
1232 sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
1233 *r3e = _REGION3_ENTRY_EMPTY;
1234 __gmap_unshadow_sgt(sg, raddr, sgt);
1235 /* Free segment table */
1236 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1237 list_del(&page->lru);
1238 __free_pages(page, 2);
1242 * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
1243 * @sg: pointer to the shadow guest address space structure
1244 * @raddr: address in the shadow guest address space
1245 * @r3t: pointer to the start of a shadow region-3 table
1247 * Called with the sg->guest_table_lock
1249 static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
1252 unsigned long asce, *sgt;
1256 BUG_ON(!gmap_is_shadow(sg));
1257 asce = (unsigned long) r3t | _ASCE_TYPE_REGION3;
1258 for (i = 0; i < 2048; i++, raddr += 1UL << 31) {
1259 if (!(r3t[i] & _REGION_ENTRY_ORIGIN))
1261 sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
1262 r3t[i] = _REGION3_ENTRY_EMPTY;
1263 __gmap_unshadow_sgt(sg, raddr, sgt);
1264 /* Free segment table */
1265 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1266 list_del(&page->lru);
1267 __free_pages(page, 2);
1272 * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
1273 * @sg: pointer to the shadow guest address space structure
1274 * @raddr: rmap address in the shadow guest address space
1276 * Called with the sg->guest_table_lock
1278 static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
1280 unsigned long r2o, *r2e, *r3t;
1283 BUG_ON(!gmap_is_shadow(sg));
1284 r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
1285 if (!r2e || !(*r2e & _REGION_ENTRY_ORIGIN))
1287 gmap_call_notifier(sg, raddr, raddr + (1UL << 42) - 1);
1288 r2o = (unsigned long) (r2e - ((raddr >> 42) & 0x7ff));
1289 gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
1290 r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
1291 *r2e = _REGION2_ENTRY_EMPTY;
1292 __gmap_unshadow_r3t(sg, raddr, r3t);
1293 /* Free region 3 table */
1294 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1295 list_del(&page->lru);
1296 __free_pages(page, 2);
1300 * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
1301 * @sg: pointer to the shadow guest address space structure
1302 * @raddr: rmap address in the shadow guest address space
1303 * @r2t: pointer to the start of a shadow region-2 table
1305 * Called with the sg->guest_table_lock
1307 static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
1310 unsigned long asce, *r3t;
1314 BUG_ON(!gmap_is_shadow(sg));
1315 asce = (unsigned long) r2t | _ASCE_TYPE_REGION2;
1316 for (i = 0; i < 2048; i++, raddr += 1UL << 42) {
1317 if (!(r2t[i] & _REGION_ENTRY_ORIGIN))
1319 r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
1320 r2t[i] = _REGION2_ENTRY_EMPTY;
1321 __gmap_unshadow_r3t(sg, raddr, r3t);
1322 /* Free region 3 table */
1323 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1324 list_del(&page->lru);
1325 __free_pages(page, 2);
1330 * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
1331 * @sg: pointer to the shadow guest address space structure
1332 * @raddr: rmap address in the shadow guest address space
1334 * Called with the sg->guest_table_lock
1336 static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
1338 unsigned long r1o, *r1e, *r2t;
1341 BUG_ON(!gmap_is_shadow(sg));
1342 r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
1343 if (!r1e || !(*r1e & _REGION_ENTRY_ORIGIN))
1345 gmap_call_notifier(sg, raddr, raddr + (1UL << 53) - 1);
1346 r1o = (unsigned long) (r1e - ((raddr >> 53) & 0x7ff));
1347 gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
1348 r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
1349 *r1e = _REGION1_ENTRY_EMPTY;
1350 __gmap_unshadow_r2t(sg, raddr, r2t);
1351 /* Free region 2 table */
1352 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1353 list_del(&page->lru);
1354 __free_pages(page, 2);
1358 * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
1359 * @sg: pointer to the shadow guest address space structure
1360 * @raddr: rmap address in the shadow guest address space
1361 * @r1t: pointer to the start of a shadow region-1 table
1363 * Called with the shadow->guest_table_lock
1365 static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
1368 unsigned long asce, *r2t;
1372 BUG_ON(!gmap_is_shadow(sg));
1373 asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
1374 for (i = 0; i < 2048; i++, raddr += 1UL << 53) {
1375 if (!(r1t[i] & _REGION_ENTRY_ORIGIN))
1377 r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
1378 __gmap_unshadow_r2t(sg, raddr, r2t);
1379 /* Clear entry and flush translation r1t -> r2t */
1380 gmap_idte_one(asce, raddr);
1381 r1t[i] = _REGION1_ENTRY_EMPTY;
1382 /* Free region 2 table */
1383 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1384 list_del(&page->lru);
1385 __free_pages(page, 2);
1390 * gmap_unshadow - remove a shadow page table completely
1391 * @sg: pointer to the shadow guest address space structure
1393 * Called with sg->guest_table_lock
1395 static void gmap_unshadow(struct gmap *sg)
1397 unsigned long *table;
1399 BUG_ON(!gmap_is_shadow(sg));
1403 gmap_call_notifier(sg, 0, -1UL);
1405 table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
1406 switch (sg->asce & _ASCE_TYPE_MASK) {
1407 case _ASCE_TYPE_REGION1:
1408 __gmap_unshadow_r1t(sg, 0, table);
1410 case _ASCE_TYPE_REGION2:
1411 __gmap_unshadow_r2t(sg, 0, table);
1413 case _ASCE_TYPE_REGION3:
1414 __gmap_unshadow_r3t(sg, 0, table);
1416 case _ASCE_TYPE_SEGMENT:
1417 __gmap_unshadow_sgt(sg, 0, table);
1423 * gmap_find_shadow - find a specific asce in the list of shadow tables
1424 * @parent: pointer to the parent gmap
1425 * @asce: ASCE for which the shadow table is created
1426 * @edat_level: edat level to be used for the shadow translation
1428 * Returns the pointer to a gmap if a shadow table with the given asce is
1429 * already available, ERR_PTR(-EAGAIN) if another one is just being created,
1432 static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce,
1437 list_for_each_entry(sg, &parent->children, list) {
1438 if (sg->orig_asce != asce || sg->edat_level != edat_level ||
1441 if (!sg->initialized)
1442 return ERR_PTR(-EAGAIN);
1443 atomic_inc(&sg->ref_count);
1450 * gmap_shadow_valid - check if a shadow guest address space matches the
1451 * given properties and is still valid
1452 * @sg: pointer to the shadow guest address space structure
1453 * @asce: ASCE for which the shadow table is requested
1454 * @edat_level: edat level to be used for the shadow translation
1456 * Returns 1 if the gmap shadow is still valid and matches the given
1457 * properties, the caller can continue using it. Returns 0 otherwise, the
1458 * caller has to request a new shadow gmap in this case.
1461 int gmap_shadow_valid(struct gmap *sg, unsigned long asce, int edat_level)
1465 return sg->orig_asce == asce && sg->edat_level == edat_level;
1467 EXPORT_SYMBOL_GPL(gmap_shadow_valid);
1470 * gmap_shadow - create/find a shadow guest address space
1471 * @parent: pointer to the parent gmap
1472 * @asce: ASCE for which the shadow table is created
1473 * @edat_level: edat level to be used for the shadow translation
1475 * The pages of the top level page table referred by the asce parameter
1476 * will be set to read-only and marked in the PGSTEs of the kvm process.
1477 * The shadow table will be removed automatically on any change to the
1478 * PTE mapping for the source table.
1480 * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
1481 * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
1482 * parent gmap table could not be protected.
1484 struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce,
1487 struct gmap *sg, *new;
1488 unsigned long limit;
1491 BUG_ON(gmap_is_shadow(parent));
1492 spin_lock(&parent->shadow_lock);
1493 sg = gmap_find_shadow(parent, asce, edat_level);
1494 spin_unlock(&parent->shadow_lock);
1497 /* Create a new shadow gmap */
1498 limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
1499 if (asce & _ASCE_REAL_SPACE)
1501 new = gmap_alloc(limit);
1503 return ERR_PTR(-ENOMEM);
1504 new->mm = parent->mm;
1505 new->parent = gmap_get(parent);
1506 new->orig_asce = asce;
1507 new->edat_level = edat_level;
1508 new->initialized = false;
1509 spin_lock(&parent->shadow_lock);
1510 /* Recheck if another CPU created the same shadow */
1511 sg = gmap_find_shadow(parent, asce, edat_level);
1513 spin_unlock(&parent->shadow_lock);
1517 if (asce & _ASCE_REAL_SPACE) {
1518 /* only allow one real-space gmap shadow */
1519 list_for_each_entry(sg, &parent->children, list) {
1520 if (sg->orig_asce & _ASCE_REAL_SPACE) {
1521 spin_lock(&sg->guest_table_lock);
1523 spin_unlock(&sg->guest_table_lock);
1524 list_del(&sg->list);
1530 atomic_set(&new->ref_count, 2);
1531 list_add(&new->list, &parent->children);
1532 if (asce & _ASCE_REAL_SPACE) {
1533 /* nothing to protect, return right away */
1534 new->initialized = true;
1535 spin_unlock(&parent->shadow_lock);
1538 spin_unlock(&parent->shadow_lock);
1539 /* protect after insertion, so it will get properly invalidated */
1540 down_read(&parent->mm->mmap_sem);
1541 rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
1542 ((asce & _ASCE_TABLE_LENGTH) + 1) * 4096,
1543 PROT_READ, PGSTE_VSIE_BIT);
1544 up_read(&parent->mm->mmap_sem);
1545 spin_lock(&parent->shadow_lock);
1546 new->initialized = true;
1548 list_del(&new->list);
1552 spin_unlock(&parent->shadow_lock);
1555 EXPORT_SYMBOL_GPL(gmap_shadow);
1558 * gmap_shadow_r2t - create an empty shadow region 2 table
1559 * @sg: pointer to the shadow guest address space structure
1560 * @saddr: faulting address in the shadow gmap
1561 * @r2t: parent gmap address of the region 2 table to get shadowed
1562 * @fake: r2t references contiguous guest memory block, not a r2t
1564 * The r2t parameter specifies the address of the source table. The
1565 * four pages of the source table are made read-only in the parent gmap
1566 * address space. A write to the source table area @r2t will automatically
1567 * remove the shadow r2 table and all of its decendents.
1569 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1570 * shadow table structure is incomplete, -ENOMEM if out of memory and
1571 * -EFAULT if an address in the parent gmap could not be resolved.
1573 * Called with sg->mm->mmap_sem in read.
1575 int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t,
1578 unsigned long raddr, origin, offset, len;
1579 unsigned long *s_r2t, *table;
1583 BUG_ON(!gmap_is_shadow(sg));
1584 /* Allocate a shadow region second table */
1585 page = alloc_pages(GFP_KERNEL, 2);
1588 page->index = r2t & _REGION_ENTRY_ORIGIN;
1590 page->index |= GMAP_SHADOW_FAKE_TABLE;
1591 s_r2t = (unsigned long *) page_to_phys(page);
1592 /* Install shadow region second table */
1593 spin_lock(&sg->guest_table_lock);
1594 table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
1596 rc = -EAGAIN; /* Race with unshadow */
1599 if (!(*table & _REGION_ENTRY_INVALID)) {
1600 rc = 0; /* Already established */
1602 } else if (*table & _REGION_ENTRY_ORIGIN) {
1603 rc = -EAGAIN; /* Race with shadow */
1606 crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
1607 /* mark as invalid as long as the parent table is not protected */
1608 *table = (unsigned long) s_r2t | _REGION_ENTRY_LENGTH |
1609 _REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID;
1610 if (sg->edat_level >= 1)
1611 *table |= (r2t & _REGION_ENTRY_PROTECT);
1612 list_add(&page->lru, &sg->crst_list);
1614 /* nothing to protect for fake tables */
1615 *table &= ~_REGION_ENTRY_INVALID;
1616 spin_unlock(&sg->guest_table_lock);
1619 spin_unlock(&sg->guest_table_lock);
1620 /* Make r2t read-only in parent gmap page table */
1621 raddr = (saddr & 0xffe0000000000000UL) | _SHADOW_RMAP_REGION1;
1622 origin = r2t & _REGION_ENTRY_ORIGIN;
1623 offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
1624 len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
1625 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1626 spin_lock(&sg->guest_table_lock);
1628 table = gmap_table_walk(sg, saddr, 4);
1629 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1630 (unsigned long) s_r2t)
1631 rc = -EAGAIN; /* Race with unshadow */
1633 *table &= ~_REGION_ENTRY_INVALID;
1635 gmap_unshadow_r2t(sg, raddr);
1637 spin_unlock(&sg->guest_table_lock);
1640 spin_unlock(&sg->guest_table_lock);
1641 __free_pages(page, 2);
1644 EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
1647 * gmap_shadow_r3t - create a shadow region 3 table
1648 * @sg: pointer to the shadow guest address space structure
1649 * @saddr: faulting address in the shadow gmap
1650 * @r3t: parent gmap address of the region 3 table to get shadowed
1651 * @fake: r3t references contiguous guest memory block, not a r3t
1653 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1654 * shadow table structure is incomplete, -ENOMEM if out of memory and
1655 * -EFAULT if an address in the parent gmap could not be resolved.
1657 * Called with sg->mm->mmap_sem in read.
1659 int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t,
1662 unsigned long raddr, origin, offset, len;
1663 unsigned long *s_r3t, *table;
1667 BUG_ON(!gmap_is_shadow(sg));
1668 /* Allocate a shadow region second table */
1669 page = alloc_pages(GFP_KERNEL, 2);
1672 page->index = r3t & _REGION_ENTRY_ORIGIN;
1674 page->index |= GMAP_SHADOW_FAKE_TABLE;
1675 s_r3t = (unsigned long *) page_to_phys(page);
1676 /* Install shadow region second table */
1677 spin_lock(&sg->guest_table_lock);
1678 table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
1680 rc = -EAGAIN; /* Race with unshadow */
1683 if (!(*table & _REGION_ENTRY_INVALID)) {
1684 rc = 0; /* Already established */
1686 } else if (*table & _REGION_ENTRY_ORIGIN) {
1687 rc = -EAGAIN; /* Race with shadow */
1690 crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
1691 /* mark as invalid as long as the parent table is not protected */
1692 *table = (unsigned long) s_r3t | _REGION_ENTRY_LENGTH |
1693 _REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID;
1694 if (sg->edat_level >= 1)
1695 *table |= (r3t & _REGION_ENTRY_PROTECT);
1696 list_add(&page->lru, &sg->crst_list);
1698 /* nothing to protect for fake tables */
1699 *table &= ~_REGION_ENTRY_INVALID;
1700 spin_unlock(&sg->guest_table_lock);
1703 spin_unlock(&sg->guest_table_lock);
1704 /* Make r3t read-only in parent gmap page table */
1705 raddr = (saddr & 0xfffffc0000000000UL) | _SHADOW_RMAP_REGION2;
1706 origin = r3t & _REGION_ENTRY_ORIGIN;
1707 offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
1708 len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
1709 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1710 spin_lock(&sg->guest_table_lock);
1712 table = gmap_table_walk(sg, saddr, 3);
1713 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1714 (unsigned long) s_r3t)
1715 rc = -EAGAIN; /* Race with unshadow */
1717 *table &= ~_REGION_ENTRY_INVALID;
1719 gmap_unshadow_r3t(sg, raddr);
1721 spin_unlock(&sg->guest_table_lock);
1724 spin_unlock(&sg->guest_table_lock);
1725 __free_pages(page, 2);
1728 EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
1731 * gmap_shadow_sgt - create a shadow segment table
1732 * @sg: pointer to the shadow guest address space structure
1733 * @saddr: faulting address in the shadow gmap
1734 * @sgt: parent gmap address of the segment table to get shadowed
1735 * @fake: sgt references contiguous guest memory block, not a sgt
1737 * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
1738 * shadow table structure is incomplete, -ENOMEM if out of memory and
1739 * -EFAULT if an address in the parent gmap could not be resolved.
1741 * Called with sg->mm->mmap_sem in read.
1743 int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt,
1746 unsigned long raddr, origin, offset, len;
1747 unsigned long *s_sgt, *table;
1751 BUG_ON(!gmap_is_shadow(sg) || (sgt & _REGION3_ENTRY_LARGE));
1752 /* Allocate a shadow segment table */
1753 page = alloc_pages(GFP_KERNEL, 2);
1756 page->index = sgt & _REGION_ENTRY_ORIGIN;
1758 page->index |= GMAP_SHADOW_FAKE_TABLE;
1759 s_sgt = (unsigned long *) page_to_phys(page);
1760 /* Install shadow region second table */
1761 spin_lock(&sg->guest_table_lock);
1762 table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
1764 rc = -EAGAIN; /* Race with unshadow */
1767 if (!(*table & _REGION_ENTRY_INVALID)) {
1768 rc = 0; /* Already established */
1770 } else if (*table & _REGION_ENTRY_ORIGIN) {
1771 rc = -EAGAIN; /* Race with shadow */
1774 crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
1775 /* mark as invalid as long as the parent table is not protected */
1776 *table = (unsigned long) s_sgt | _REGION_ENTRY_LENGTH |
1777 _REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID;
1778 if (sg->edat_level >= 1)
1779 *table |= sgt & _REGION_ENTRY_PROTECT;
1780 list_add(&page->lru, &sg->crst_list);
1782 /* nothing to protect for fake tables */
1783 *table &= ~_REGION_ENTRY_INVALID;
1784 spin_unlock(&sg->guest_table_lock);
1787 spin_unlock(&sg->guest_table_lock);
1788 /* Make sgt read-only in parent gmap page table */
1789 raddr = (saddr & 0xffffffff80000000UL) | _SHADOW_RMAP_REGION3;
1790 origin = sgt & _REGION_ENTRY_ORIGIN;
1791 offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * 4096;
1792 len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
1793 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1794 spin_lock(&sg->guest_table_lock);
1796 table = gmap_table_walk(sg, saddr, 2);
1797 if (!table || (*table & _REGION_ENTRY_ORIGIN) !=
1798 (unsigned long) s_sgt)
1799 rc = -EAGAIN; /* Race with unshadow */
1801 *table &= ~_REGION_ENTRY_INVALID;
1803 gmap_unshadow_sgt(sg, raddr);
1805 spin_unlock(&sg->guest_table_lock);
1808 spin_unlock(&sg->guest_table_lock);
1809 __free_pages(page, 2);
1812 EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
1815 * gmap_shadow_lookup_pgtable - find a shadow page table
1816 * @sg: pointer to the shadow guest address space structure
1817 * @saddr: the address in the shadow aguest address space
1818 * @pgt: parent gmap address of the page table to get shadowed
1819 * @dat_protection: if the pgtable is marked as protected by dat
1820 * @fake: pgt references contiguous guest memory block, not a pgtable
1822 * Returns 0 if the shadow page table was found and -EAGAIN if the page
1823 * table was not found.
1825 * Called with sg->mm->mmap_sem in read.
1827 int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
1828 unsigned long *pgt, int *dat_protection,
1831 unsigned long *table;
1835 BUG_ON(!gmap_is_shadow(sg));
1836 spin_lock(&sg->guest_table_lock);
1837 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1838 if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
1839 /* Shadow page tables are full pages (pte+pgste) */
1840 page = pfn_to_page(*table >> PAGE_SHIFT);
1841 *pgt = page->index & ~GMAP_SHADOW_FAKE_TABLE;
1842 *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
1843 *fake = !!(page->index & GMAP_SHADOW_FAKE_TABLE);
1848 spin_unlock(&sg->guest_table_lock);
1852 EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
1855 * gmap_shadow_pgt - instantiate a shadow page table
1856 * @sg: pointer to the shadow guest address space structure
1857 * @saddr: faulting address in the shadow gmap
1858 * @pgt: parent gmap address of the page table to get shadowed
1859 * @fake: pgt references contiguous guest memory block, not a pgtable
1861 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1862 * shadow table structure is incomplete, -ENOMEM if out of memory,
1863 * -EFAULT if an address in the parent gmap could not be resolved and
1865 * Called with gmap->mm->mmap_sem in read
1867 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt,
1870 unsigned long raddr, origin;
1871 unsigned long *s_pgt, *table;
1875 BUG_ON(!gmap_is_shadow(sg) || (pgt & _SEGMENT_ENTRY_LARGE));
1876 /* Allocate a shadow page table */
1877 page = page_table_alloc_pgste(sg->mm);
1880 page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
1882 page->index |= GMAP_SHADOW_FAKE_TABLE;
1883 s_pgt = (unsigned long *) page_to_phys(page);
1884 /* Install shadow page table */
1885 spin_lock(&sg->guest_table_lock);
1886 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1888 rc = -EAGAIN; /* Race with unshadow */
1891 if (!(*table & _SEGMENT_ENTRY_INVALID)) {
1892 rc = 0; /* Already established */
1894 } else if (*table & _SEGMENT_ENTRY_ORIGIN) {
1895 rc = -EAGAIN; /* Race with shadow */
1898 /* mark as invalid as long as the parent table is not protected */
1899 *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
1900 (pgt & _SEGMENT_ENTRY_PROTECT) | _SEGMENT_ENTRY_INVALID;
1901 list_add(&page->lru, &sg->pt_list);
1903 /* nothing to protect for fake tables */
1904 *table &= ~_SEGMENT_ENTRY_INVALID;
1905 spin_unlock(&sg->guest_table_lock);
1908 spin_unlock(&sg->guest_table_lock);
1909 /* Make pgt read-only in parent gmap page table (not the pgste) */
1910 raddr = (saddr & 0xfffffffffff00000UL) | _SHADOW_RMAP_SEGMENT;
1911 origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
1912 rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE, PROT_READ);
1913 spin_lock(&sg->guest_table_lock);
1915 table = gmap_table_walk(sg, saddr, 1);
1916 if (!table || (*table & _SEGMENT_ENTRY_ORIGIN) !=
1917 (unsigned long) s_pgt)
1918 rc = -EAGAIN; /* Race with unshadow */
1920 *table &= ~_SEGMENT_ENTRY_INVALID;
1922 gmap_unshadow_pgt(sg, raddr);
1924 spin_unlock(&sg->guest_table_lock);
1927 spin_unlock(&sg->guest_table_lock);
1928 page_table_free_pgste(page);
1932 EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
1935 * gmap_shadow_page - create a shadow page mapping
1936 * @sg: pointer to the shadow guest address space structure
1937 * @saddr: faulting address in the shadow gmap
1938 * @pte: pte in parent gmap address space to get shadowed
1940 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1941 * shadow table structure is incomplete, -ENOMEM if out of memory and
1942 * -EFAULT if an address in the parent gmap could not be resolved.
1944 * Called with sg->mm->mmap_sem in read.
1946 int gmap_shadow_page(struct gmap *sg, unsigned long saddr, pte_t pte)
1948 struct gmap *parent;
1949 struct gmap_rmap *rmap;
1950 unsigned long vmaddr, paddr;
1952 pte_t *sptep, *tptep;
1956 BUG_ON(!gmap_is_shadow(sg));
1957 parent = sg->parent;
1958 prot = (pte_val(pte) & _PAGE_PROTECT) ? PROT_READ : PROT_WRITE;
1960 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1963 rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
1966 paddr = pte_val(pte) & PAGE_MASK;
1967 vmaddr = __gmap_translate(parent, paddr);
1968 if (IS_ERR_VALUE(vmaddr)) {
1972 rc = radix_tree_preload(GFP_KERNEL);
1976 sptep = gmap_pte_op_walk(parent, paddr, &ptl);
1978 spin_lock(&sg->guest_table_lock);
1979 /* Get page table pointer */
1980 tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
1982 spin_unlock(&sg->guest_table_lock);
1983 gmap_pte_op_end(ptl);
1984 radix_tree_preload_end();
1987 rc = ptep_shadow_pte(sg->mm, saddr, sptep, tptep, pte);
1989 /* Success and a new mapping */
1990 gmap_insert_rmap(sg, vmaddr, rmap);
1994 gmap_pte_op_end(ptl);
1995 spin_unlock(&sg->guest_table_lock);
1997 radix_tree_preload_end();
2000 rc = gmap_pte_op_fixup(parent, paddr, vmaddr, prot);
2007 EXPORT_SYMBOL_GPL(gmap_shadow_page);
2010 * gmap_shadow_notify - handle notifications for shadow gmap
2012 * Called with sg->parent->shadow_lock.
2014 static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
2015 unsigned long offset, pte_t *pte)
2017 struct gmap_rmap *rmap, *rnext, *head;
2018 unsigned long gaddr, start, end, bits, raddr;
2019 unsigned long *table;
2021 BUG_ON(!gmap_is_shadow(sg));
2022 spin_lock(&sg->parent->guest_table_lock);
2023 table = radix_tree_lookup(&sg->parent->host_to_guest,
2024 vmaddr >> PMD_SHIFT);
2025 gaddr = table ? __gmap_segment_gaddr(table) + offset : 0;
2026 spin_unlock(&sg->parent->guest_table_lock);
2030 spin_lock(&sg->guest_table_lock);
2032 spin_unlock(&sg->guest_table_lock);
2035 /* Check for top level table */
2036 start = sg->orig_asce & _ASCE_ORIGIN;
2037 end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * 4096;
2038 if (!(sg->orig_asce & _ASCE_REAL_SPACE) && gaddr >= start &&
2040 /* The complete shadow table has to go */
2042 spin_unlock(&sg->guest_table_lock);
2043 list_del(&sg->list);
2047 /* Remove the page table tree from on specific entry */
2048 head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> 12);
2049 gmap_for_each_rmap_safe(rmap, rnext, head) {
2050 bits = rmap->raddr & _SHADOW_RMAP_MASK;
2051 raddr = rmap->raddr ^ bits;
2053 case _SHADOW_RMAP_REGION1:
2054 gmap_unshadow_r2t(sg, raddr);
2056 case _SHADOW_RMAP_REGION2:
2057 gmap_unshadow_r3t(sg, raddr);
2059 case _SHADOW_RMAP_REGION3:
2060 gmap_unshadow_sgt(sg, raddr);
2062 case _SHADOW_RMAP_SEGMENT:
2063 gmap_unshadow_pgt(sg, raddr);
2065 case _SHADOW_RMAP_PGTABLE:
2066 gmap_unshadow_page(sg, raddr);
2071 spin_unlock(&sg->guest_table_lock);
2075 * ptep_notify - call all invalidation callbacks for a specific pte.
2076 * @mm: pointer to the process mm_struct
2077 * @addr: virtual address in the process address space
2078 * @pte: pointer to the page table entry
2079 * @bits: bits from the pgste that caused the notify call
2081 * This function is assumed to be called with the page table lock held
2082 * for the pte to notify.
2084 void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
2085 pte_t *pte, unsigned long bits)
2087 unsigned long offset, gaddr;
2088 unsigned long *table;
2089 struct gmap *gmap, *sg, *next;
2091 offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
2092 offset = offset * (4096 / sizeof(pte_t));
2094 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
2095 if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
2096 spin_lock(&gmap->shadow_lock);
2097 list_for_each_entry_safe(sg, next,
2098 &gmap->children, list)
2099 gmap_shadow_notify(sg, vmaddr, offset, pte);
2100 spin_unlock(&gmap->shadow_lock);
2102 if (!(bits & PGSTE_IN_BIT))
2104 spin_lock(&gmap->guest_table_lock);
2105 table = radix_tree_lookup(&gmap->host_to_guest,
2106 vmaddr >> PMD_SHIFT);
2108 gaddr = __gmap_segment_gaddr(table) + offset;
2109 spin_unlock(&gmap->guest_table_lock);
2111 gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
2115 EXPORT_SYMBOL_GPL(ptep_notify);
2117 static inline void thp_split_mm(struct mm_struct *mm)
2119 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2120 struct vm_area_struct *vma;
2123 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
2124 for (addr = vma->vm_start;
2127 follow_page(vma, addr, FOLL_SPLIT);
2128 vma->vm_flags &= ~VM_HUGEPAGE;
2129 vma->vm_flags |= VM_NOHUGEPAGE;
2131 mm->def_flags |= VM_NOHUGEPAGE;
2136 * Remove all empty zero pages from the mapping for lazy refaulting
2137 * - This must be called after mm->context.has_pgste is set, to avoid
2138 * future creation of zero pages
2139 * - This must be called after THP was enabled
2141 static int __zap_zero_pages(pmd_t *pmd, unsigned long start,
2142 unsigned long end, struct mm_walk *walk)
2146 for (addr = start; addr != end; addr += PAGE_SIZE) {
2150 ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
2151 if (is_zero_pfn(pte_pfn(*ptep)))
2152 ptep_xchg_direct(walk->mm, addr, ptep, __pte(_PAGE_INVALID));
2153 pte_unmap_unlock(ptep, ptl);
2158 static inline void zap_zero_pages(struct mm_struct *mm)
2160 struct mm_walk walk = { .pmd_entry = __zap_zero_pages };
2163 walk_page_range(0, TASK_SIZE, &walk);
2167 * switch on pgstes for its userspace process (for kvm)
2169 int s390_enable_sie(void)
2171 struct mm_struct *mm = current->mm;
2173 /* Do we have pgstes? if yes, we are done */
2174 if (mm_has_pgste(mm))
2176 /* Fail if the page tables are 2K */
2177 if (!mm_alloc_pgste(mm))
2179 down_write(&mm->mmap_sem);
2180 mm->context.has_pgste = 1;
2181 /* split thp mappings and disable thp for future mappings */
2184 up_write(&mm->mmap_sem);
2187 EXPORT_SYMBOL_GPL(s390_enable_sie);
2190 * Enable storage key handling from now on and initialize the storage
2191 * keys with the default key.
2193 static int __s390_enable_skey(pte_t *pte, unsigned long addr,
2194 unsigned long next, struct mm_walk *walk)
2196 /* Clear storage key */
2197 ptep_zap_key(walk->mm, addr, pte);
2201 int s390_enable_skey(void)
2203 struct mm_walk walk = { .pte_entry = __s390_enable_skey };
2204 struct mm_struct *mm = current->mm;
2205 struct vm_area_struct *vma;
2208 down_write(&mm->mmap_sem);
2209 if (mm_use_skey(mm))
2212 mm->context.use_skey = 1;
2213 for (vma = mm->mmap; vma; vma = vma->vm_next) {
2214 if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
2215 MADV_UNMERGEABLE, &vma->vm_flags)) {
2216 mm->context.use_skey = 0;
2221 mm->def_flags &= ~VM_MERGEABLE;
2224 walk_page_range(0, TASK_SIZE, &walk);
2227 up_write(&mm->mmap_sem);
2230 EXPORT_SYMBOL_GPL(s390_enable_skey);
2233 * Reset CMMA state, make all pages stable again.
2235 static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
2236 unsigned long next, struct mm_walk *walk)
2238 ptep_zap_unused(walk->mm, addr, pte, 1);
2242 void s390_reset_cmma(struct mm_struct *mm)
2244 struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
2246 down_write(&mm->mmap_sem);
2248 walk_page_range(0, TASK_SIZE, &walk);
2249 up_write(&mm->mmap_sem);
2251 EXPORT_SYMBOL_GPL(s390_reset_cmma);