1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/swapops.h>
20 #include <linux/shmem_fs.h>
23 #include <asm/pgalloc.h>
32 SCAN_EXCEED_SHARED_PTE,
36 SCAN_LACK_REFERENCED_PAGE,
50 SCAN_ALLOC_HUGE_PAGE_FAIL,
51 SCAN_CGROUP_CHARGE_FAIL,
53 SCAN_PAGE_HAS_PRIVATE,
56 #define CREATE_TRACE_POINTS
57 #include <trace/events/huge_memory.h>
59 static struct task_struct *khugepaged_thread __read_mostly;
60 static DEFINE_MUTEX(khugepaged_mutex);
62 /* default scan 8*512 pte (or vmas) every 30 second */
63 static unsigned int khugepaged_pages_to_scan __read_mostly;
64 static unsigned int khugepaged_pages_collapsed;
65 static unsigned int khugepaged_full_scans;
66 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
67 /* during fragmentation poll the hugepage allocator once every minute */
68 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
69 static unsigned long khugepaged_sleep_expire;
70 static DEFINE_SPINLOCK(khugepaged_mm_lock);
71 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
73 * default collapse hugepages if there is at least one pte mapped like
74 * it would have happened if the vma was large enough during page
77 static unsigned int khugepaged_max_ptes_none __read_mostly;
78 static unsigned int khugepaged_max_ptes_swap __read_mostly;
79 static unsigned int khugepaged_max_ptes_shared __read_mostly;
81 #define MM_SLOTS_HASH_BITS 10
82 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
84 static struct kmem_cache *mm_slot_cache __read_mostly;
86 #define MAX_PTE_MAPPED_THP 8
89 * struct mm_slot - hash lookup from mm to mm_slot
90 * @hash: hash collision list
91 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
92 * @mm: the mm that this information is valid for
95 struct hlist_node hash;
96 struct list_head mm_node;
99 /* pte-mapped THP in this mm */
100 int nr_pte_mapped_thp;
101 unsigned long pte_mapped_thp[MAX_PTE_MAPPED_THP];
105 * struct khugepaged_scan - cursor for scanning
106 * @mm_head: the head of the mm list to scan
107 * @mm_slot: the current mm_slot we are scanning
108 * @address: the next address inside that to be scanned
110 * There is only the one khugepaged_scan instance of this cursor structure.
112 struct khugepaged_scan {
113 struct list_head mm_head;
114 struct mm_slot *mm_slot;
115 unsigned long address;
118 static struct khugepaged_scan khugepaged_scan = {
119 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
123 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
124 struct kobj_attribute *attr,
127 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
130 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
131 struct kobj_attribute *attr,
132 const char *buf, size_t count)
137 err = kstrtoul(buf, 10, &msecs);
138 if (err || msecs > UINT_MAX)
141 khugepaged_scan_sleep_millisecs = msecs;
142 khugepaged_sleep_expire = 0;
143 wake_up_interruptible(&khugepaged_wait);
147 static struct kobj_attribute scan_sleep_millisecs_attr =
148 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
149 scan_sleep_millisecs_store);
151 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
152 struct kobj_attribute *attr,
155 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
158 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
159 struct kobj_attribute *attr,
160 const char *buf, size_t count)
165 err = kstrtoul(buf, 10, &msecs);
166 if (err || msecs > UINT_MAX)
169 khugepaged_alloc_sleep_millisecs = msecs;
170 khugepaged_sleep_expire = 0;
171 wake_up_interruptible(&khugepaged_wait);
175 static struct kobj_attribute alloc_sleep_millisecs_attr =
176 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
177 alloc_sleep_millisecs_store);
179 static ssize_t pages_to_scan_show(struct kobject *kobj,
180 struct kobj_attribute *attr,
183 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
185 static ssize_t pages_to_scan_store(struct kobject *kobj,
186 struct kobj_attribute *attr,
187 const char *buf, size_t count)
192 err = kstrtoul(buf, 10, &pages);
193 if (err || !pages || pages > UINT_MAX)
196 khugepaged_pages_to_scan = pages;
200 static struct kobj_attribute pages_to_scan_attr =
201 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
202 pages_to_scan_store);
204 static ssize_t pages_collapsed_show(struct kobject *kobj,
205 struct kobj_attribute *attr,
208 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
210 static struct kobj_attribute pages_collapsed_attr =
211 __ATTR_RO(pages_collapsed);
213 static ssize_t full_scans_show(struct kobject *kobj,
214 struct kobj_attribute *attr,
217 return sprintf(buf, "%u\n", khugepaged_full_scans);
219 static struct kobj_attribute full_scans_attr =
220 __ATTR_RO(full_scans);
222 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
223 struct kobj_attribute *attr, char *buf)
225 return single_hugepage_flag_show(kobj, attr, buf,
226 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
228 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
229 struct kobj_attribute *attr,
230 const char *buf, size_t count)
232 return single_hugepage_flag_store(kobj, attr, buf, count,
233 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
235 static struct kobj_attribute khugepaged_defrag_attr =
236 __ATTR(defrag, 0644, khugepaged_defrag_show,
237 khugepaged_defrag_store);
240 * max_ptes_none controls if khugepaged should collapse hugepages over
241 * any unmapped ptes in turn potentially increasing the memory
242 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
243 * reduce the available free memory in the system as it
244 * runs. Increasing max_ptes_none will instead potentially reduce the
245 * free memory in the system during the khugepaged scan.
247 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
248 struct kobj_attribute *attr,
251 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
253 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
254 struct kobj_attribute *attr,
255 const char *buf, size_t count)
258 unsigned long max_ptes_none;
260 err = kstrtoul(buf, 10, &max_ptes_none);
261 if (err || max_ptes_none > HPAGE_PMD_NR-1)
264 khugepaged_max_ptes_none = max_ptes_none;
268 static struct kobj_attribute khugepaged_max_ptes_none_attr =
269 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
270 khugepaged_max_ptes_none_store);
272 static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
273 struct kobj_attribute *attr,
276 return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
279 static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
280 struct kobj_attribute *attr,
281 const char *buf, size_t count)
284 unsigned long max_ptes_swap;
286 err = kstrtoul(buf, 10, &max_ptes_swap);
287 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
290 khugepaged_max_ptes_swap = max_ptes_swap;
295 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
296 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
297 khugepaged_max_ptes_swap_store);
299 static ssize_t khugepaged_max_ptes_shared_show(struct kobject *kobj,
300 struct kobj_attribute *attr,
303 return sprintf(buf, "%u\n", khugepaged_max_ptes_shared);
306 static ssize_t khugepaged_max_ptes_shared_store(struct kobject *kobj,
307 struct kobj_attribute *attr,
308 const char *buf, size_t count)
311 unsigned long max_ptes_shared;
313 err = kstrtoul(buf, 10, &max_ptes_shared);
314 if (err || max_ptes_shared > HPAGE_PMD_NR-1)
317 khugepaged_max_ptes_shared = max_ptes_shared;
322 static struct kobj_attribute khugepaged_max_ptes_shared_attr =
323 __ATTR(max_ptes_shared, 0644, khugepaged_max_ptes_shared_show,
324 khugepaged_max_ptes_shared_store);
326 static struct attribute *khugepaged_attr[] = {
327 &khugepaged_defrag_attr.attr,
328 &khugepaged_max_ptes_none_attr.attr,
329 &khugepaged_max_ptes_swap_attr.attr,
330 &khugepaged_max_ptes_shared_attr.attr,
331 &pages_to_scan_attr.attr,
332 &pages_collapsed_attr.attr,
333 &full_scans_attr.attr,
334 &scan_sleep_millisecs_attr.attr,
335 &alloc_sleep_millisecs_attr.attr,
339 struct attribute_group khugepaged_attr_group = {
340 .attrs = khugepaged_attr,
341 .name = "khugepaged",
343 #endif /* CONFIG_SYSFS */
345 int hugepage_madvise(struct vm_area_struct *vma,
346 unsigned long *vm_flags, int advice)
352 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
353 * can't handle this properly after s390_enable_sie, so we simply
354 * ignore the madvise to prevent qemu from causing a SIGSEGV.
356 if (mm_has_pgste(vma->vm_mm))
359 *vm_flags &= ~VM_NOHUGEPAGE;
360 *vm_flags |= VM_HUGEPAGE;
362 * If the vma become good for khugepaged to scan,
363 * register it here without waiting a page fault that
364 * may not happen any time soon.
366 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
367 khugepaged_enter_vma_merge(vma, *vm_flags))
370 case MADV_NOHUGEPAGE:
371 *vm_flags &= ~VM_HUGEPAGE;
372 *vm_flags |= VM_NOHUGEPAGE;
374 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
375 * this vma even if we leave the mm registered in khugepaged if
376 * it got registered before VM_NOHUGEPAGE was set.
384 int __init khugepaged_init(void)
386 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
387 sizeof(struct mm_slot),
388 __alignof__(struct mm_slot), 0, NULL);
392 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
393 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
394 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
395 khugepaged_max_ptes_shared = HPAGE_PMD_NR / 2;
400 void __init khugepaged_destroy(void)
402 kmem_cache_destroy(mm_slot_cache);
405 static inline struct mm_slot *alloc_mm_slot(void)
407 if (!mm_slot_cache) /* initialization failed */
409 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
412 static inline void free_mm_slot(struct mm_slot *mm_slot)
414 kmem_cache_free(mm_slot_cache, mm_slot);
417 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
419 struct mm_slot *mm_slot;
421 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
422 if (mm == mm_slot->mm)
428 static void insert_to_mm_slots_hash(struct mm_struct *mm,
429 struct mm_slot *mm_slot)
432 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
435 static inline int khugepaged_test_exit(struct mm_struct *mm)
437 return atomic_read(&mm->mm_users) == 0;
440 static bool hugepage_vma_check(struct vm_area_struct *vma,
441 unsigned long vm_flags)
443 if (!transhuge_vma_enabled(vma, vm_flags))
446 if (vma->vm_file && !IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) -
447 vma->vm_pgoff, HPAGE_PMD_NR))
450 /* Enabled via shmem mount options or sysfs settings. */
451 if (shmem_file(vma->vm_file))
452 return shmem_huge_enabled(vma);
454 /* THP settings require madvise. */
455 if (!(vm_flags & VM_HUGEPAGE) && !khugepaged_always())
458 /* Only regular file is valid */
459 if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && vma->vm_file &&
460 (vm_flags & VM_DENYWRITE)) {
461 struct inode *inode = vma->vm_file->f_inode;
463 return S_ISREG(inode->i_mode);
466 if (!vma->anon_vma || vma->vm_ops)
468 if (vma_is_temporary_stack(vma))
470 return !(vm_flags & VM_NO_KHUGEPAGED);
473 int __khugepaged_enter(struct mm_struct *mm)
475 struct mm_slot *mm_slot;
478 mm_slot = alloc_mm_slot();
482 /* __khugepaged_exit() must not run from under us */
483 VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
484 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
485 free_mm_slot(mm_slot);
489 spin_lock(&khugepaged_mm_lock);
490 insert_to_mm_slots_hash(mm, mm_slot);
492 * Insert just behind the scanning cursor, to let the area settle
495 wakeup = list_empty(&khugepaged_scan.mm_head);
496 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
497 spin_unlock(&khugepaged_mm_lock);
501 wake_up_interruptible(&khugepaged_wait);
506 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
507 unsigned long vm_flags)
509 unsigned long hstart, hend;
512 * khugepaged only supports read-only files for non-shmem files.
513 * khugepaged does not yet work on special mappings. And
514 * file-private shmem THP is not supported.
516 if (!hugepage_vma_check(vma, vm_flags))
519 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
520 hend = vma->vm_end & HPAGE_PMD_MASK;
522 return khugepaged_enter(vma, vm_flags);
526 void __khugepaged_exit(struct mm_struct *mm)
528 struct mm_slot *mm_slot;
531 spin_lock(&khugepaged_mm_lock);
532 mm_slot = get_mm_slot(mm);
533 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
534 hash_del(&mm_slot->hash);
535 list_del(&mm_slot->mm_node);
538 spin_unlock(&khugepaged_mm_lock);
541 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
542 free_mm_slot(mm_slot);
544 } else if (mm_slot) {
546 * This is required to serialize against
547 * khugepaged_test_exit() (which is guaranteed to run
548 * under mmap sem read mode). Stop here (after we
549 * return all pagetables will be destroyed) until
550 * khugepaged has finished working on the pagetables
551 * under the mmap_lock.
554 mmap_write_unlock(mm);
558 static void release_pte_page(struct page *page)
560 mod_node_page_state(page_pgdat(page),
561 NR_ISOLATED_ANON + page_is_file_lru(page),
564 putback_lru_page(page);
567 static void release_pte_pages(pte_t *pte, pte_t *_pte,
568 struct list_head *compound_pagelist)
570 struct page *page, *tmp;
572 while (--_pte >= pte) {
573 pte_t pteval = *_pte;
575 page = pte_page(pteval);
576 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)) &&
578 release_pte_page(page);
581 list_for_each_entry_safe(page, tmp, compound_pagelist, lru) {
582 list_del(&page->lru);
583 release_pte_page(page);
587 static bool is_refcount_suitable(struct page *page)
589 int expected_refcount;
591 expected_refcount = total_mapcount(page);
592 if (PageSwapCache(page))
593 expected_refcount += compound_nr(page);
595 return page_count(page) == expected_refcount;
598 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
599 unsigned long address,
601 struct list_head *compound_pagelist)
603 struct page *page = NULL;
605 int none_or_zero = 0, shared = 0, result = 0, referenced = 0;
606 bool writable = false;
608 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
609 _pte++, address += PAGE_SIZE) {
610 pte_t pteval = *_pte;
611 if (pte_none(pteval) || (pte_present(pteval) &&
612 is_zero_pfn(pte_pfn(pteval)))) {
613 if (!userfaultfd_armed(vma) &&
614 ++none_or_zero <= khugepaged_max_ptes_none) {
617 result = SCAN_EXCEED_NONE_PTE;
621 if (!pte_present(pteval)) {
622 result = SCAN_PTE_NON_PRESENT;
625 if (pte_uffd_wp(pteval)) {
626 result = SCAN_PTE_UFFD_WP;
629 page = vm_normal_page(vma, address, pteval);
630 if (unlikely(!page)) {
631 result = SCAN_PAGE_NULL;
635 VM_BUG_ON_PAGE(!PageAnon(page), page);
637 if (page_mapcount(page) > 1 &&
638 ++shared > khugepaged_max_ptes_shared) {
639 result = SCAN_EXCEED_SHARED_PTE;
643 if (PageCompound(page)) {
645 page = compound_head(page);
648 * Check if we have dealt with the compound page
651 list_for_each_entry(p, compound_pagelist, lru) {
658 * We can do it before isolate_lru_page because the
659 * page can't be freed from under us. NOTE: PG_lock
660 * is needed to serialize against split_huge_page
661 * when invoked from the VM.
663 if (!trylock_page(page)) {
664 result = SCAN_PAGE_LOCK;
669 * Check if the page has any GUP (or other external) pins.
671 * The page table that maps the page has been already unlinked
672 * from the page table tree and this process cannot get
673 * an additinal pin on the page.
675 * New pins can come later if the page is shared across fork,
676 * but not from this process. The other process cannot write to
677 * the page, only trigger CoW.
679 if (!is_refcount_suitable(page)) {
681 result = SCAN_PAGE_COUNT;
684 if (!pte_write(pteval) && PageSwapCache(page) &&
685 !reuse_swap_page(page, NULL)) {
687 * Page is in the swap cache and cannot be re-used.
688 * It cannot be collapsed into a THP.
691 result = SCAN_SWAP_CACHE_PAGE;
696 * Isolate the page to avoid collapsing an hugepage
697 * currently in use by the VM.
699 if (isolate_lru_page(page)) {
701 result = SCAN_DEL_PAGE_LRU;
704 mod_node_page_state(page_pgdat(page),
705 NR_ISOLATED_ANON + page_is_file_lru(page),
707 VM_BUG_ON_PAGE(!PageLocked(page), page);
708 VM_BUG_ON_PAGE(PageLRU(page), page);
710 if (PageCompound(page))
711 list_add_tail(&page->lru, compound_pagelist);
713 /* There should be enough young pte to collapse the page */
714 if (pte_young(pteval) ||
715 page_is_young(page) || PageReferenced(page) ||
716 mmu_notifier_test_young(vma->vm_mm, address))
719 if (pte_write(pteval))
723 if (unlikely(!writable)) {
724 result = SCAN_PAGE_RO;
725 } else if (unlikely(!referenced)) {
726 result = SCAN_LACK_REFERENCED_PAGE;
728 result = SCAN_SUCCEED;
729 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
730 referenced, writable, result);
734 release_pte_pages(pte, _pte, compound_pagelist);
735 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
736 referenced, writable, result);
740 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
741 struct vm_area_struct *vma,
742 unsigned long address,
744 struct list_head *compound_pagelist)
746 struct page *src_page, *tmp;
748 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
749 _pte++, page++, address += PAGE_SIZE) {
750 pte_t pteval = *_pte;
752 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
753 clear_user_highpage(page, address);
754 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
755 if (is_zero_pfn(pte_pfn(pteval))) {
757 * ptl mostly unnecessary.
761 * paravirt calls inside pte_clear here are
764 pte_clear(vma->vm_mm, address, _pte);
768 src_page = pte_page(pteval);
769 copy_user_highpage(page, src_page, address, vma);
770 if (!PageCompound(src_page))
771 release_pte_page(src_page);
773 * ptl mostly unnecessary, but preempt has to
774 * be disabled to update the per-cpu stats
775 * inside page_remove_rmap().
779 * paravirt calls inside pte_clear here are
782 pte_clear(vma->vm_mm, address, _pte);
783 page_remove_rmap(src_page, false);
785 free_page_and_swap_cache(src_page);
789 list_for_each_entry_safe(src_page, tmp, compound_pagelist, lru) {
790 list_del(&src_page->lru);
791 release_pte_page(src_page);
795 static void khugepaged_alloc_sleep(void)
799 add_wait_queue(&khugepaged_wait, &wait);
800 freezable_schedule_timeout_interruptible(
801 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
802 remove_wait_queue(&khugepaged_wait, &wait);
805 static int khugepaged_node_load[MAX_NUMNODES];
807 static bool khugepaged_scan_abort(int nid)
812 * If node_reclaim_mode is disabled, then no extra effort is made to
813 * allocate memory locally.
815 if (!node_reclaim_mode)
818 /* If there is a count for this node already, it must be acceptable */
819 if (khugepaged_node_load[nid])
822 for (i = 0; i < MAX_NUMNODES; i++) {
823 if (!khugepaged_node_load[i])
825 if (node_distance(nid, i) > node_reclaim_distance)
831 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
832 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
834 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
838 static int khugepaged_find_target_node(void)
840 static int last_khugepaged_target_node = NUMA_NO_NODE;
841 int nid, target_node = 0, max_value = 0;
843 /* find first node with max normal pages hit */
844 for (nid = 0; nid < MAX_NUMNODES; nid++)
845 if (khugepaged_node_load[nid] > max_value) {
846 max_value = khugepaged_node_load[nid];
850 /* do some balance if several nodes have the same hit record */
851 if (target_node <= last_khugepaged_target_node)
852 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
854 if (max_value == khugepaged_node_load[nid]) {
859 last_khugepaged_target_node = target_node;
863 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
865 if (IS_ERR(*hpage)) {
871 khugepaged_alloc_sleep();
881 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
883 VM_BUG_ON_PAGE(*hpage, *hpage);
885 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
886 if (unlikely(!*hpage)) {
887 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
888 *hpage = ERR_PTR(-ENOMEM);
892 prep_transhuge_page(*hpage);
893 count_vm_event(THP_COLLAPSE_ALLOC);
897 static int khugepaged_find_target_node(void)
902 static inline struct page *alloc_khugepaged_hugepage(void)
906 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
909 prep_transhuge_page(page);
913 static struct page *khugepaged_alloc_hugepage(bool *wait)
918 hpage = alloc_khugepaged_hugepage();
920 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
925 khugepaged_alloc_sleep();
927 count_vm_event(THP_COLLAPSE_ALLOC);
928 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
933 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
936 * If the hpage allocated earlier was briefly exposed in page cache
937 * before collapse_file() failed, it is possible that racing lookups
938 * have not yet completed, and would then be unpleasantly surprised by
939 * finding the hpage reused for the same mapping at a different offset.
940 * Just release the previous allocation if there is any danger of that.
942 if (*hpage && page_count(*hpage) > 1) {
948 *hpage = khugepaged_alloc_hugepage(wait);
950 if (unlikely(!*hpage))
957 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
966 * If mmap_lock temporarily dropped, revalidate vma
967 * before taking mmap_lock.
968 * Return 0 if succeeds, otherwise return none-zero
972 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
973 struct vm_area_struct **vmap)
975 struct vm_area_struct *vma;
976 unsigned long hstart, hend;
978 if (unlikely(khugepaged_test_exit(mm)))
979 return SCAN_ANY_PROCESS;
981 *vmap = vma = find_vma(mm, address);
983 return SCAN_VMA_NULL;
985 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
986 hend = vma->vm_end & HPAGE_PMD_MASK;
987 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
988 return SCAN_ADDRESS_RANGE;
989 if (!hugepage_vma_check(vma, vma->vm_flags))
990 return SCAN_VMA_CHECK;
991 /* Anon VMA expected */
992 if (!vma->anon_vma || vma->vm_ops)
993 return SCAN_VMA_CHECK;
998 * Bring missing pages in from swap, to complete THP collapse.
999 * Only done if khugepaged_scan_pmd believes it is worthwhile.
1001 * Called and returns without pte mapped or spinlocks held,
1002 * but with mmap_lock held to protect against vma changes.
1005 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
1006 struct vm_area_struct *vma,
1007 unsigned long address, pmd_t *pmd,
1012 struct vm_fault vmf = {
1015 .flags = FAULT_FLAG_ALLOW_RETRY,
1017 .pgoff = linear_page_index(vma, address),
1020 vmf.pte = pte_offset_map(pmd, address);
1021 for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
1022 vmf.pte++, vmf.address += PAGE_SIZE) {
1023 vmf.orig_pte = *vmf.pte;
1024 if (!is_swap_pte(vmf.orig_pte))
1027 ret = do_swap_page(&vmf);
1029 /* do_swap_page returns VM_FAULT_RETRY with released mmap_lock */
1030 if (ret & VM_FAULT_RETRY) {
1032 if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
1033 /* vma is no longer available, don't continue to swapin */
1034 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1037 /* check if the pmd is still valid */
1038 if (mm_find_pmd(mm, address) != pmd) {
1039 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1043 if (ret & VM_FAULT_ERROR) {
1044 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
1047 /* pte is unmapped now, we need to map it */
1048 vmf.pte = pte_offset_map(pmd, vmf.address);
1053 /* Drain LRU add pagevec to remove extra pin on the swapped in pages */
1057 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
1061 static void collapse_huge_page(struct mm_struct *mm,
1062 unsigned long address,
1063 struct page **hpage,
1064 int node, int referenced, int unmapped)
1066 LIST_HEAD(compound_pagelist);
1070 struct page *new_page;
1071 spinlock_t *pmd_ptl, *pte_ptl;
1072 int isolated = 0, result = 0;
1073 struct vm_area_struct *vma;
1074 struct mmu_notifier_range range;
1077 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1079 /* Only allocate from the target node */
1080 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1083 * Before allocating the hugepage, release the mmap_lock read lock.
1084 * The allocation can take potentially a long time if it involves
1085 * sync compaction, and we do not need to hold the mmap_lock during
1086 * that. We will recheck the vma after taking it again in write mode.
1088 mmap_read_unlock(mm);
1089 new_page = khugepaged_alloc_page(hpage, gfp, node);
1091 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1095 if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
1096 result = SCAN_CGROUP_CHARGE_FAIL;
1099 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1102 result = hugepage_vma_revalidate(mm, address, &vma);
1104 mmap_read_unlock(mm);
1108 pmd = mm_find_pmd(mm, address);
1110 result = SCAN_PMD_NULL;
1111 mmap_read_unlock(mm);
1116 * __collapse_huge_page_swapin always returns with mmap_lock locked.
1117 * If it fails, we release mmap_lock and jump out_nolock.
1118 * Continuing to collapse causes inconsistency.
1120 if (unmapped && !__collapse_huge_page_swapin(mm, vma, address,
1122 mmap_read_unlock(mm);
1126 mmap_read_unlock(mm);
1128 * Prevent all access to pagetables with the exception of
1129 * gup_fast later handled by the ptep_clear_flush and the VM
1130 * handled by the anon_vma lock + PG_lock.
1132 mmap_write_lock(mm);
1133 result = hugepage_vma_revalidate(mm, address, &vma);
1136 /* check if the pmd is still valid */
1137 if (mm_find_pmd(mm, address) != pmd)
1140 anon_vma_lock_write(vma->anon_vma);
1142 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
1143 address, address + HPAGE_PMD_SIZE);
1144 mmu_notifier_invalidate_range_start(&range);
1146 pte = pte_offset_map(pmd, address);
1147 pte_ptl = pte_lockptr(mm, pmd);
1149 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1151 * This removes any huge TLB entry from the CPU so we won't allow
1152 * huge and small TLB entries for the same virtual address to
1153 * avoid the risk of CPU bugs in that area.
1155 * Parallel fast GUP is fine since fast GUP will back off when
1156 * it detects PMD is changed.
1158 _pmd = pmdp_collapse_flush(vma, address, pmd);
1159 spin_unlock(pmd_ptl);
1160 mmu_notifier_invalidate_range_end(&range);
1161 tlb_remove_table_sync_one();
1164 isolated = __collapse_huge_page_isolate(vma, address, pte,
1165 &compound_pagelist);
1166 spin_unlock(pte_ptl);
1168 if (unlikely(!isolated)) {
1171 BUG_ON(!pmd_none(*pmd));
1173 * We can only use set_pmd_at when establishing
1174 * hugepmds and never for establishing regular pmds that
1175 * points to regular pagetables. Use pmd_populate for that
1177 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1178 spin_unlock(pmd_ptl);
1179 anon_vma_unlock_write(vma->anon_vma);
1185 * All pages are isolated and locked so anon_vma rmap
1186 * can't run anymore.
1188 anon_vma_unlock_write(vma->anon_vma);
1190 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl,
1191 &compound_pagelist);
1193 __SetPageUptodate(new_page);
1194 pgtable = pmd_pgtable(_pmd);
1196 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1197 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1200 * spin_lock() below is not the equivalent of smp_wmb(), so
1201 * this is needed to avoid the copy_huge_page writes to become
1202 * visible after the set_pmd_at() write.
1207 BUG_ON(!pmd_none(*pmd));
1208 page_add_new_anon_rmap(new_page, vma, address, true);
1209 lru_cache_add_inactive_or_unevictable(new_page, vma);
1210 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1211 set_pmd_at(mm, address, pmd, _pmd);
1212 update_mmu_cache_pmd(vma, address, pmd);
1213 spin_unlock(pmd_ptl);
1217 khugepaged_pages_collapsed++;
1218 result = SCAN_SUCCEED;
1220 mmap_write_unlock(mm);
1222 if (!IS_ERR_OR_NULL(*hpage))
1223 mem_cgroup_uncharge(*hpage);
1224 trace_mm_collapse_huge_page(mm, isolated, result);
1230 static int khugepaged_scan_pmd(struct mm_struct *mm,
1231 struct vm_area_struct *vma,
1232 unsigned long address,
1233 struct page **hpage)
1237 int ret = 0, result = 0, referenced = 0;
1238 int none_or_zero = 0, shared = 0;
1239 struct page *page = NULL;
1240 unsigned long _address;
1242 int node = NUMA_NO_NODE, unmapped = 0;
1243 bool writable = false;
1245 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1247 pmd = mm_find_pmd(mm, address);
1249 result = SCAN_PMD_NULL;
1253 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1254 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1255 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1256 _pte++, _address += PAGE_SIZE) {
1257 pte_t pteval = *_pte;
1258 if (is_swap_pte(pteval)) {
1259 if (++unmapped <= khugepaged_max_ptes_swap) {
1261 * Always be strict with uffd-wp
1262 * enabled swap entries. Please see
1263 * comment below for pte_uffd_wp().
1265 if (pte_swp_uffd_wp(pteval)) {
1266 result = SCAN_PTE_UFFD_WP;
1271 result = SCAN_EXCEED_SWAP_PTE;
1275 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1276 if (!userfaultfd_armed(vma) &&
1277 ++none_or_zero <= khugepaged_max_ptes_none) {
1280 result = SCAN_EXCEED_NONE_PTE;
1284 if (!pte_present(pteval)) {
1285 result = SCAN_PTE_NON_PRESENT;
1288 if (pte_uffd_wp(pteval)) {
1290 * Don't collapse the page if any of the small
1291 * PTEs are armed with uffd write protection.
1292 * Here we can also mark the new huge pmd as
1293 * write protected if any of the small ones is
1294 * marked but that could bring uknown
1295 * userfault messages that falls outside of
1296 * the registered range. So, just be simple.
1298 result = SCAN_PTE_UFFD_WP;
1301 if (pte_write(pteval))
1304 page = vm_normal_page(vma, _address, pteval);
1305 if (unlikely(!page)) {
1306 result = SCAN_PAGE_NULL;
1310 if (page_mapcount(page) > 1 &&
1311 ++shared > khugepaged_max_ptes_shared) {
1312 result = SCAN_EXCEED_SHARED_PTE;
1316 page = compound_head(page);
1319 * Record which node the original page is from and save this
1320 * information to khugepaged_node_load[].
1321 * Khupaged will allocate hugepage from the node has the max
1324 node = page_to_nid(page);
1325 if (khugepaged_scan_abort(node)) {
1326 result = SCAN_SCAN_ABORT;
1329 khugepaged_node_load[node]++;
1330 if (!PageLRU(page)) {
1331 result = SCAN_PAGE_LRU;
1334 if (PageLocked(page)) {
1335 result = SCAN_PAGE_LOCK;
1338 if (!PageAnon(page)) {
1339 result = SCAN_PAGE_ANON;
1344 * Check if the page has any GUP (or other external) pins.
1346 * Here the check is racy it may see totmal_mapcount > refcount
1348 * For example, one process with one forked child process.
1349 * The parent has the PMD split due to MADV_DONTNEED, then
1350 * the child is trying unmap the whole PMD, but khugepaged
1351 * may be scanning the parent between the child has
1352 * PageDoubleMap flag cleared and dec the mapcount. So
1353 * khugepaged may see total_mapcount > refcount.
1355 * But such case is ephemeral we could always retry collapse
1356 * later. However it may report false positive if the page
1357 * has excessive GUP pins (i.e. 512). Anyway the same check
1358 * will be done again later the risk seems low.
1360 if (!is_refcount_suitable(page)) {
1361 result = SCAN_PAGE_COUNT;
1364 if (pte_young(pteval) ||
1365 page_is_young(page) || PageReferenced(page) ||
1366 mmu_notifier_test_young(vma->vm_mm, address))
1370 result = SCAN_PAGE_RO;
1371 } else if (!referenced || (unmapped && referenced < HPAGE_PMD_NR/2)) {
1372 result = SCAN_LACK_REFERENCED_PAGE;
1374 result = SCAN_SUCCEED;
1378 pte_unmap_unlock(pte, ptl);
1380 node = khugepaged_find_target_node();
1381 /* collapse_huge_page will return with the mmap_lock released */
1382 collapse_huge_page(mm, address, hpage, node,
1383 referenced, unmapped);
1386 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1387 none_or_zero, result, unmapped);
1391 static void collect_mm_slot(struct mm_slot *mm_slot)
1393 struct mm_struct *mm = mm_slot->mm;
1395 lockdep_assert_held(&khugepaged_mm_lock);
1397 if (khugepaged_test_exit(mm)) {
1399 hash_del(&mm_slot->hash);
1400 list_del(&mm_slot->mm_node);
1403 * Not strictly needed because the mm exited already.
1405 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1408 /* khugepaged_mm_lock actually not necessary for the below */
1409 free_mm_slot(mm_slot);
1416 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
1417 * khugepaged should try to collapse the page table.
1419 static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
1422 struct mm_slot *mm_slot;
1424 VM_BUG_ON(addr & ~HPAGE_PMD_MASK);
1426 spin_lock(&khugepaged_mm_lock);
1427 mm_slot = get_mm_slot(mm);
1428 if (likely(mm_slot && mm_slot->nr_pte_mapped_thp < MAX_PTE_MAPPED_THP))
1429 mm_slot->pte_mapped_thp[mm_slot->nr_pte_mapped_thp++] = addr;
1430 spin_unlock(&khugepaged_mm_lock);
1435 * Try to collapse a pte-mapped THP for mm at address haddr.
1437 * This function checks whether all the PTEs in the PMD are pointing to the
1438 * right THP. If so, retract the page table so the THP can refault in with
1441 void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
1443 unsigned long haddr = addr & HPAGE_PMD_MASK;
1444 struct vm_area_struct *vma = find_vma(mm, haddr);
1446 pte_t *start_pte, *pte;
1451 struct mmu_notifier_range range;
1453 if (!vma || !vma->vm_file ||
1454 vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE)
1458 * This vm_flags may not have VM_HUGEPAGE if the page was not
1459 * collapsed by this mm. But we can still collapse if the page is
1460 * the valid THP. Add extra VM_HUGEPAGE so hugepage_vma_check()
1461 * will not fail the vma for missing VM_HUGEPAGE
1463 if (!hugepage_vma_check(vma, vma->vm_flags | VM_HUGEPAGE))
1466 hpage = find_lock_page(vma->vm_file->f_mapping,
1467 linear_page_index(vma, haddr));
1471 if (!PageHead(hpage))
1474 pmd = mm_find_pmd(mm, haddr);
1479 * We need to lock the mapping so that from here on, only GUP-fast and
1480 * hardware page walks can access the parts of the page tables that
1481 * we're operating on.
1483 i_mmap_lock_write(vma->vm_file->f_mapping);
1486 * This spinlock should be unnecessary: Nobody else should be accessing
1487 * the page tables under spinlock protection here, only
1488 * lockless_pages_from_mm() and the hardware page walker can access page
1489 * tables while all the high-level locks are held in write mode.
1491 start_pte = pte_offset_map_lock(mm, pmd, haddr, &ptl);
1493 /* step 1: check all mapped PTEs are to the right huge page */
1494 for (i = 0, addr = haddr, pte = start_pte;
1495 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1498 /* empty pte, skip */
1502 /* page swapped out, abort */
1503 if (!pte_present(*pte))
1506 page = vm_normal_page(vma, addr, *pte);
1509 * Note that uprobe, debugger, or MAP_PRIVATE may change the
1510 * page table, but the new page will not be a subpage of hpage.
1512 if (hpage + i != page)
1517 /* step 2: adjust rmap */
1518 for (i = 0, addr = haddr, pte = start_pte;
1519 i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE, pte++) {
1524 page = vm_normal_page(vma, addr, *pte);
1525 page_remove_rmap(page, false);
1528 pte_unmap_unlock(start_pte, ptl);
1530 /* step 3: set proper refcount and mm_counters. */
1532 page_ref_sub(hpage, count);
1533 add_mm_counter(vma->vm_mm, mm_counter_file(hpage), -count);
1536 /* step 4: collapse pmd */
1537 /* we make no change to anon, but protect concurrent anon page lookup */
1539 anon_vma_lock_write(vma->anon_vma);
1541 mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, haddr,
1542 haddr + HPAGE_PMD_SIZE);
1543 mmu_notifier_invalidate_range_start(&range);
1544 _pmd = pmdp_collapse_flush(vma, haddr, pmd);
1546 tlb_remove_table_sync_one();
1547 mmu_notifier_invalidate_range_end(&range);
1548 pte_free(mm, pmd_pgtable(_pmd));
1551 anon_vma_unlock_write(vma->anon_vma);
1552 i_mmap_unlock_write(vma->vm_file->f_mapping);
1560 pte_unmap_unlock(start_pte, ptl);
1561 i_mmap_unlock_write(vma->vm_file->f_mapping);
1565 static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
1567 struct mm_struct *mm = mm_slot->mm;
1570 if (likely(mm_slot->nr_pte_mapped_thp == 0))
1573 if (!mmap_write_trylock(mm))
1576 if (unlikely(khugepaged_test_exit(mm)))
1579 for (i = 0; i < mm_slot->nr_pte_mapped_thp; i++)
1580 collapse_pte_mapped_thp(mm, mm_slot->pte_mapped_thp[i]);
1583 mm_slot->nr_pte_mapped_thp = 0;
1584 mmap_write_unlock(mm);
1588 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1590 struct vm_area_struct *vma;
1591 struct mm_struct *mm;
1595 i_mmap_lock_write(mapping);
1596 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1598 * Check vma->anon_vma to exclude MAP_PRIVATE mappings that
1599 * got written to. These VMAs are likely not worth investing
1600 * mmap_write_lock(mm) as PMD-mapping is likely to be split
1603 * Not that vma->anon_vma check is racy: it can be set up after
1604 * the check but before we took mmap_lock by the fault path.
1605 * But page lock would prevent establishing any new ptes of the
1606 * page, so we are safe.
1608 * An alternative would be drop the check, but check that page
1609 * table is clear before calling pmdp_collapse_flush() under
1610 * ptl. It has higher chance to recover THP for the VMA, but
1611 * has higher cost too. It would also probably require locking
1616 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1617 if (addr & ~HPAGE_PMD_MASK)
1619 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1622 pmd = mm_find_pmd(mm, addr);
1626 * We need exclusive mmap_lock to retract page table.
1628 * We use trylock due to lock inversion: we need to acquire
1629 * mmap_lock while holding page lock. Fault path does it in
1630 * reverse order. Trylock is a way to avoid deadlock.
1632 if (mmap_write_trylock(mm)) {
1633 if (!khugepaged_test_exit(mm)) {
1634 struct mmu_notifier_range range;
1636 mmu_notifier_range_init(&range,
1637 MMU_NOTIFY_CLEAR, 0,
1639 addr + HPAGE_PMD_SIZE);
1640 mmu_notifier_invalidate_range_start(&range);
1641 /* assume page table is clear */
1642 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1644 tlb_remove_table_sync_one();
1645 pte_free(mm, pmd_pgtable(_pmd));
1646 mmu_notifier_invalidate_range_end(&range);
1648 mmap_write_unlock(mm);
1650 /* Try again later */
1651 khugepaged_add_pte_mapped_thp(mm, addr);
1654 i_mmap_unlock_write(mapping);
1658 * collapse_file - collapse filemap/tmpfs/shmem pages into huge one.
1660 * Basic scheme is simple, details are more complex:
1661 * - allocate and lock a new huge page;
1662 * - scan page cache replacing old pages with the new one
1663 * + swap/gup in pages if necessary;
1665 * + keep old pages around in case rollback is required;
1666 * - if replacing succeeds:
1669 * + unlock huge page;
1670 * - if replacing failed;
1671 * + put all pages back and unfreeze them;
1672 * + restore gaps in the page cache;
1673 * + unlock and free huge page;
1675 static void collapse_file(struct mm_struct *mm,
1676 struct file *file, pgoff_t start,
1677 struct page **hpage, int node)
1679 struct address_space *mapping = file->f_mapping;
1681 struct page *new_page;
1682 pgoff_t index, end = start + HPAGE_PMD_NR;
1683 LIST_HEAD(pagelist);
1684 XA_STATE_ORDER(xas, &mapping->i_pages, start, HPAGE_PMD_ORDER);
1685 int nr_none = 0, result = SCAN_SUCCEED;
1686 bool is_shmem = shmem_file(file);
1688 VM_BUG_ON(!IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) && !is_shmem);
1689 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1691 /* Only allocate from the target node */
1692 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1694 new_page = khugepaged_alloc_page(hpage, gfp, node);
1696 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1700 if (unlikely(mem_cgroup_charge(new_page, mm, gfp))) {
1701 result = SCAN_CGROUP_CHARGE_FAIL;
1704 count_memcg_page_event(new_page, THP_COLLAPSE_ALLOC);
1706 /* This will be less messy when we use multi-index entries */
1709 xas_create_range(&xas);
1710 if (!xas_error(&xas))
1712 xas_unlock_irq(&xas);
1713 if (!xas_nomem(&xas, GFP_KERNEL)) {
1719 __SetPageLocked(new_page);
1721 __SetPageSwapBacked(new_page);
1722 new_page->index = start;
1723 new_page->mapping = mapping;
1726 * At this point the new_page is locked and not up-to-date.
1727 * It's safe to insert it into the page cache, because nobody would
1728 * be able to map it or use it in another way until we unlock it.
1731 xas_set(&xas, start);
1732 for (index = start; index < end; index++) {
1733 struct page *page = xas_next(&xas);
1735 VM_BUG_ON(index != xas.xa_index);
1739 * Stop if extent has been truncated or
1740 * hole-punched, and is now completely
1743 if (index == start) {
1744 if (!xas_next_entry(&xas, end - 1)) {
1745 result = SCAN_TRUNCATED;
1748 xas_set(&xas, index);
1750 if (!shmem_charge(mapping->host, 1)) {
1754 xas_store(&xas, new_page);
1759 if (xa_is_value(page) || !PageUptodate(page)) {
1760 xas_unlock_irq(&xas);
1761 /* swap in or instantiate fallocated page */
1762 if (shmem_getpage(mapping->host, index, &page,
1767 } else if (trylock_page(page)) {
1769 xas_unlock_irq(&xas);
1771 result = SCAN_PAGE_LOCK;
1774 } else { /* !is_shmem */
1775 if (!page || xa_is_value(page)) {
1776 xas_unlock_irq(&xas);
1777 page_cache_sync_readahead(mapping, &file->f_ra,
1780 /* drain pagevecs to help isolate_lru_page() */
1782 page = find_lock_page(mapping, index);
1783 if (unlikely(page == NULL)) {
1787 } else if (PageDirty(page)) {
1789 * khugepaged only works on read-only fd,
1790 * so this page is dirty because it hasn't
1791 * been flushed since first write. There
1792 * won't be new dirty pages.
1794 * Trigger async flush here and hope the
1795 * writeback is done when khugepaged
1796 * revisits this page.
1798 * This is a one-off situation. We are not
1799 * forcing writeback in loop.
1801 xas_unlock_irq(&xas);
1802 filemap_flush(mapping);
1805 } else if (PageWriteback(page)) {
1806 xas_unlock_irq(&xas);
1809 } else if (trylock_page(page)) {
1811 xas_unlock_irq(&xas);
1813 result = SCAN_PAGE_LOCK;
1819 * The page must be locked, so we can drop the i_pages lock
1820 * without racing with truncate.
1822 VM_BUG_ON_PAGE(!PageLocked(page), page);
1824 /* make sure the page is up to date */
1825 if (unlikely(!PageUptodate(page))) {
1831 * If file was truncated then extended, or hole-punched, before
1832 * we locked the first page, then a THP might be there already.
1834 if (PageTransCompound(page)) {
1835 result = SCAN_PAGE_COMPOUND;
1839 if (page_mapping(page) != mapping) {
1840 result = SCAN_TRUNCATED;
1844 if (!is_shmem && (PageDirty(page) ||
1845 PageWriteback(page))) {
1847 * khugepaged only works on read-only fd, so this
1848 * page is dirty because it hasn't been flushed
1849 * since first write.
1855 if (isolate_lru_page(page)) {
1856 result = SCAN_DEL_PAGE_LRU;
1860 if (page_has_private(page) &&
1861 !try_to_release_page(page, GFP_KERNEL)) {
1862 result = SCAN_PAGE_HAS_PRIVATE;
1863 putback_lru_page(page);
1867 if (page_mapped(page))
1868 unmap_mapping_pages(mapping, index, 1, false);
1871 xas_set(&xas, index);
1873 VM_BUG_ON_PAGE(page != xas_load(&xas), page);
1874 VM_BUG_ON_PAGE(page_mapped(page), page);
1877 * The page is expected to have page_count() == 3:
1878 * - we hold a pin on it;
1879 * - one reference from page cache;
1880 * - one from isolate_lru_page;
1882 if (!page_ref_freeze(page, 3)) {
1883 result = SCAN_PAGE_COUNT;
1884 xas_unlock_irq(&xas);
1885 putback_lru_page(page);
1890 * Add the page to the list to be able to undo the collapse if
1891 * something go wrong.
1893 list_add_tail(&page->lru, &pagelist);
1895 /* Finally, replace with the new page. */
1896 xas_store(&xas, new_page);
1905 __inc_node_page_state(new_page, NR_SHMEM_THPS);
1907 __inc_node_page_state(new_page, NR_FILE_THPS);
1908 filemap_nr_thps_inc(mapping);
1912 __mod_lruvec_page_state(new_page, NR_FILE_PAGES, nr_none);
1914 __mod_lruvec_page_state(new_page, NR_SHMEM, nr_none);
1918 xas_unlock_irq(&xas);
1921 if (result == SCAN_SUCCEED) {
1922 struct page *page, *tmp;
1925 * Replacing old pages with new one has succeeded, now we
1926 * need to copy the content and free the old pages.
1929 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1930 while (index < page->index) {
1931 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1934 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1936 list_del(&page->lru);
1937 page->mapping = NULL;
1938 page_ref_unfreeze(page, 1);
1939 ClearPageActive(page);
1940 ClearPageUnevictable(page);
1945 while (index < end) {
1946 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1950 SetPageUptodate(new_page);
1951 page_ref_add(new_page, HPAGE_PMD_NR - 1);
1953 set_page_dirty(new_page);
1954 lru_cache_add(new_page);
1957 * Remove pte page tables, so we can re-fault the page as huge.
1959 retract_page_tables(mapping, start);
1962 khugepaged_pages_collapsed++;
1966 /* Something went wrong: roll back page cache changes */
1968 mapping->nrpages -= nr_none;
1971 shmem_uncharge(mapping->host, nr_none);
1973 xas_set(&xas, start);
1974 xas_for_each(&xas, page, end - 1) {
1975 page = list_first_entry_or_null(&pagelist,
1977 if (!page || xas.xa_index < page->index) {
1981 /* Put holes back where they were */
1982 xas_store(&xas, NULL);
1986 VM_BUG_ON_PAGE(page->index != xas.xa_index, page);
1988 /* Unfreeze the page. */
1989 list_del(&page->lru);
1990 page_ref_unfreeze(page, 2);
1991 xas_store(&xas, page);
1993 xas_unlock_irq(&xas);
1995 putback_lru_page(page);
1999 xas_unlock_irq(&xas);
2001 new_page->mapping = NULL;
2004 unlock_page(new_page);
2006 VM_BUG_ON(!list_empty(&pagelist));
2007 if (!IS_ERR_OR_NULL(*hpage))
2008 mem_cgroup_uncharge(*hpage);
2009 /* TODO: tracepoints */
2012 static void khugepaged_scan_file(struct mm_struct *mm,
2013 struct file *file, pgoff_t start, struct page **hpage)
2015 struct page *page = NULL;
2016 struct address_space *mapping = file->f_mapping;
2017 XA_STATE(xas, &mapping->i_pages, start);
2019 int node = NUMA_NO_NODE;
2020 int result = SCAN_SUCCEED;
2024 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
2026 xas_for_each(&xas, page, start + HPAGE_PMD_NR - 1) {
2027 if (xas_retry(&xas, page))
2030 if (xa_is_value(page)) {
2031 if (++swap > khugepaged_max_ptes_swap) {
2032 result = SCAN_EXCEED_SWAP_PTE;
2038 if (PageTransCompound(page)) {
2039 result = SCAN_PAGE_COMPOUND;
2043 node = page_to_nid(page);
2044 if (khugepaged_scan_abort(node)) {
2045 result = SCAN_SCAN_ABORT;
2048 khugepaged_node_load[node]++;
2050 if (!PageLRU(page)) {
2051 result = SCAN_PAGE_LRU;
2055 if (page_count(page) !=
2056 1 + page_mapcount(page) + page_has_private(page)) {
2057 result = SCAN_PAGE_COUNT;
2062 * We probably should check if the page is referenced here, but
2063 * nobody would transfer pte_young() to PageReferenced() for us.
2064 * And rmap walk here is just too costly...
2069 if (need_resched()) {
2076 if (result == SCAN_SUCCEED) {
2077 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
2078 result = SCAN_EXCEED_NONE_PTE;
2080 node = khugepaged_find_target_node();
2081 collapse_file(mm, file, start, hpage, node);
2085 /* TODO: tracepoints */
2088 static void khugepaged_scan_file(struct mm_struct *mm,
2089 struct file *file, pgoff_t start, struct page **hpage)
2094 static int khugepaged_collapse_pte_mapped_thps(struct mm_slot *mm_slot)
2100 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
2101 struct page **hpage)
2102 __releases(&khugepaged_mm_lock)
2103 __acquires(&khugepaged_mm_lock)
2105 struct mm_slot *mm_slot;
2106 struct mm_struct *mm;
2107 struct vm_area_struct *vma;
2111 lockdep_assert_held(&khugepaged_mm_lock);
2113 if (khugepaged_scan.mm_slot)
2114 mm_slot = khugepaged_scan.mm_slot;
2116 mm_slot = list_entry(khugepaged_scan.mm_head.next,
2117 struct mm_slot, mm_node);
2118 khugepaged_scan.address = 0;
2119 khugepaged_scan.mm_slot = mm_slot;
2121 spin_unlock(&khugepaged_mm_lock);
2122 khugepaged_collapse_pte_mapped_thps(mm_slot);
2126 * Don't wait for semaphore (to avoid long wait times). Just move to
2127 * the next mm on the list.
2130 if (unlikely(!mmap_read_trylock(mm)))
2131 goto breakouterloop_mmap_lock;
2132 if (likely(!khugepaged_test_exit(mm)))
2133 vma = find_vma(mm, khugepaged_scan.address);
2136 for (; vma; vma = vma->vm_next) {
2137 unsigned long hstart, hend;
2140 if (unlikely(khugepaged_test_exit(mm))) {
2144 if (!hugepage_vma_check(vma, vma->vm_flags)) {
2149 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2150 hend = vma->vm_end & HPAGE_PMD_MASK;
2153 if (khugepaged_scan.address > hend)
2155 if (khugepaged_scan.address < hstart)
2156 khugepaged_scan.address = hstart;
2157 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
2158 if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
2161 while (khugepaged_scan.address < hend) {
2164 if (unlikely(khugepaged_test_exit(mm)))
2165 goto breakouterloop;
2167 VM_BUG_ON(khugepaged_scan.address < hstart ||
2168 khugepaged_scan.address + HPAGE_PMD_SIZE >
2170 if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
2171 struct file *file = get_file(vma->vm_file);
2172 pgoff_t pgoff = linear_page_index(vma,
2173 khugepaged_scan.address);
2175 mmap_read_unlock(mm);
2177 khugepaged_scan_file(mm, file, pgoff, hpage);
2180 ret = khugepaged_scan_pmd(mm, vma,
2181 khugepaged_scan.address,
2184 /* move to next address */
2185 khugepaged_scan.address += HPAGE_PMD_SIZE;
2186 progress += HPAGE_PMD_NR;
2188 /* we released mmap_lock so break loop */
2189 goto breakouterloop_mmap_lock;
2190 if (progress >= pages)
2191 goto breakouterloop;
2195 mmap_read_unlock(mm); /* exit_mmap will destroy ptes after this */
2196 breakouterloop_mmap_lock:
2198 spin_lock(&khugepaged_mm_lock);
2199 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
2201 * Release the current mm_slot if this mm is about to die, or
2202 * if we scanned all vmas of this mm.
2204 if (khugepaged_test_exit(mm) || !vma) {
2206 * Make sure that if mm_users is reaching zero while
2207 * khugepaged runs here, khugepaged_exit will find
2208 * mm_slot not pointing to the exiting mm.
2210 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
2211 khugepaged_scan.mm_slot = list_entry(
2212 mm_slot->mm_node.next,
2213 struct mm_slot, mm_node);
2214 khugepaged_scan.address = 0;
2216 khugepaged_scan.mm_slot = NULL;
2217 khugepaged_full_scans++;
2220 collect_mm_slot(mm_slot);
2226 static int khugepaged_has_work(void)
2228 return !list_empty(&khugepaged_scan.mm_head) &&
2229 khugepaged_enabled();
2232 static int khugepaged_wait_event(void)
2234 return !list_empty(&khugepaged_scan.mm_head) ||
2235 kthread_should_stop();
2238 static void khugepaged_do_scan(void)
2240 struct page *hpage = NULL;
2241 unsigned int progress = 0, pass_through_head = 0;
2242 unsigned int pages = khugepaged_pages_to_scan;
2245 barrier(); /* write khugepaged_pages_to_scan to local stack */
2247 lru_add_drain_all();
2249 while (progress < pages) {
2250 if (!khugepaged_prealloc_page(&hpage, &wait))
2255 if (unlikely(kthread_should_stop() || try_to_freeze()))
2258 spin_lock(&khugepaged_mm_lock);
2259 if (!khugepaged_scan.mm_slot)
2260 pass_through_head++;
2261 if (khugepaged_has_work() &&
2262 pass_through_head < 2)
2263 progress += khugepaged_scan_mm_slot(pages - progress,
2267 spin_unlock(&khugepaged_mm_lock);
2270 if (!IS_ERR_OR_NULL(hpage))
2274 static bool khugepaged_should_wakeup(void)
2276 return kthread_should_stop() ||
2277 time_after_eq(jiffies, khugepaged_sleep_expire);
2280 static void khugepaged_wait_work(void)
2282 if (khugepaged_has_work()) {
2283 const unsigned long scan_sleep_jiffies =
2284 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
2286 if (!scan_sleep_jiffies)
2289 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
2290 wait_event_freezable_timeout(khugepaged_wait,
2291 khugepaged_should_wakeup(),
2292 scan_sleep_jiffies);
2296 if (khugepaged_enabled())
2297 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
2300 static int khugepaged(void *none)
2302 struct mm_slot *mm_slot;
2305 set_user_nice(current, MAX_NICE);
2307 while (!kthread_should_stop()) {
2308 khugepaged_do_scan();
2309 khugepaged_wait_work();
2312 spin_lock(&khugepaged_mm_lock);
2313 mm_slot = khugepaged_scan.mm_slot;
2314 khugepaged_scan.mm_slot = NULL;
2316 collect_mm_slot(mm_slot);
2317 spin_unlock(&khugepaged_mm_lock);
2321 static void set_recommended_min_free_kbytes(void)
2325 unsigned long recommended_min;
2327 for_each_populated_zone(zone) {
2329 * We don't need to worry about fragmentation of
2330 * ZONE_MOVABLE since it only has movable pages.
2332 if (zone_idx(zone) > gfp_zone(GFP_USER))
2338 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
2339 recommended_min = pageblock_nr_pages * nr_zones * 2;
2342 * Make sure that on average at least two pageblocks are almost free
2343 * of another type, one for a migratetype to fall back to and a
2344 * second to avoid subsequent fallbacks of other types There are 3
2345 * MIGRATE_TYPES we care about.
2347 recommended_min += pageblock_nr_pages * nr_zones *
2348 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
2350 /* don't ever allow to reserve more than 5% of the lowmem */
2351 recommended_min = min(recommended_min,
2352 (unsigned long) nr_free_buffer_pages() / 20);
2353 recommended_min <<= (PAGE_SHIFT-10);
2355 if (recommended_min > min_free_kbytes) {
2356 if (user_min_free_kbytes >= 0)
2357 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
2358 min_free_kbytes, recommended_min);
2360 min_free_kbytes = recommended_min;
2362 setup_per_zone_wmarks();
2365 int start_stop_khugepaged(void)
2369 mutex_lock(&khugepaged_mutex);
2370 if (khugepaged_enabled()) {
2371 if (!khugepaged_thread)
2372 khugepaged_thread = kthread_run(khugepaged, NULL,
2374 if (IS_ERR(khugepaged_thread)) {
2375 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
2376 err = PTR_ERR(khugepaged_thread);
2377 khugepaged_thread = NULL;
2381 if (!list_empty(&khugepaged_scan.mm_head))
2382 wake_up_interruptible(&khugepaged_wait);
2384 set_recommended_min_free_kbytes();
2385 } else if (khugepaged_thread) {
2386 kthread_stop(khugepaged_thread);
2387 khugepaged_thread = NULL;
2390 mutex_unlock(&khugepaged_mutex);
2394 void khugepaged_min_free_kbytes_update(void)
2396 mutex_lock(&khugepaged_mutex);
2397 if (khugepaged_enabled() && khugepaged_thread)
2398 set_recommended_min_free_kbytes();
2399 mutex_unlock(&khugepaged_mutex);