1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/swapops.h>
20 #include <linux/shmem_fs.h>
23 #include <asm/pgalloc.h>
26 /* gross hack for <=4.19 stable */
27 #if defined(CONFIG_S390) || defined(CONFIG_ARM)
28 static void tlb_remove_table_smp_sync(void *arg)
30 /* Simply deliver the interrupt */
33 static void tlb_remove_table_sync_one(void)
35 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
46 SCAN_LACK_REFERENCED_PAGE,
60 SCAN_ALLOC_HUGE_PAGE_FAIL,
61 SCAN_CGROUP_CHARGE_FAIL,
66 #define CREATE_TRACE_POINTS
67 #include <trace/events/huge_memory.h>
69 static struct task_struct *khugepaged_thread __read_mostly;
70 static DEFINE_MUTEX(khugepaged_mutex);
72 /* default scan 8*512 pte (or vmas) every 30 second */
73 static unsigned int khugepaged_pages_to_scan __read_mostly;
74 static unsigned int khugepaged_pages_collapsed;
75 static unsigned int khugepaged_full_scans;
76 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
77 /* during fragmentation poll the hugepage allocator once every minute */
78 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
79 static unsigned long khugepaged_sleep_expire;
80 static DEFINE_SPINLOCK(khugepaged_mm_lock);
81 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
83 * default collapse hugepages if there is at least one pte mapped like
84 * it would have happened if the vma was large enough during page
87 static unsigned int khugepaged_max_ptes_none __read_mostly;
88 static unsigned int khugepaged_max_ptes_swap __read_mostly;
90 #define MM_SLOTS_HASH_BITS 10
91 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
93 static struct kmem_cache *mm_slot_cache __read_mostly;
96 * struct mm_slot - hash lookup from mm to mm_slot
97 * @hash: hash collision list
98 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
99 * @mm: the mm that this information is valid for
102 struct hlist_node hash;
103 struct list_head mm_node;
104 struct mm_struct *mm;
108 * struct khugepaged_scan - cursor for scanning
109 * @mm_head: the head of the mm list to scan
110 * @mm_slot: the current mm_slot we are scanning
111 * @address: the next address inside that to be scanned
113 * There is only the one khugepaged_scan instance of this cursor structure.
115 struct khugepaged_scan {
116 struct list_head mm_head;
117 struct mm_slot *mm_slot;
118 unsigned long address;
121 static struct khugepaged_scan khugepaged_scan = {
122 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
126 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
127 struct kobj_attribute *attr,
130 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
133 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
134 struct kobj_attribute *attr,
135 const char *buf, size_t count)
140 err = kstrtoul(buf, 10, &msecs);
141 if (err || msecs > UINT_MAX)
144 khugepaged_scan_sleep_millisecs = msecs;
145 khugepaged_sleep_expire = 0;
146 wake_up_interruptible(&khugepaged_wait);
150 static struct kobj_attribute scan_sleep_millisecs_attr =
151 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
152 scan_sleep_millisecs_store);
154 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
155 struct kobj_attribute *attr,
158 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
161 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
162 struct kobj_attribute *attr,
163 const char *buf, size_t count)
168 err = kstrtoul(buf, 10, &msecs);
169 if (err || msecs > UINT_MAX)
172 khugepaged_alloc_sleep_millisecs = msecs;
173 khugepaged_sleep_expire = 0;
174 wake_up_interruptible(&khugepaged_wait);
178 static struct kobj_attribute alloc_sleep_millisecs_attr =
179 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
180 alloc_sleep_millisecs_store);
182 static ssize_t pages_to_scan_show(struct kobject *kobj,
183 struct kobj_attribute *attr,
186 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
188 static ssize_t pages_to_scan_store(struct kobject *kobj,
189 struct kobj_attribute *attr,
190 const char *buf, size_t count)
195 err = kstrtoul(buf, 10, &pages);
196 if (err || !pages || pages > UINT_MAX)
199 khugepaged_pages_to_scan = pages;
203 static struct kobj_attribute pages_to_scan_attr =
204 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
205 pages_to_scan_store);
207 static ssize_t pages_collapsed_show(struct kobject *kobj,
208 struct kobj_attribute *attr,
211 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
213 static struct kobj_attribute pages_collapsed_attr =
214 __ATTR_RO(pages_collapsed);
216 static ssize_t full_scans_show(struct kobject *kobj,
217 struct kobj_attribute *attr,
220 return sprintf(buf, "%u\n", khugepaged_full_scans);
222 static struct kobj_attribute full_scans_attr =
223 __ATTR_RO(full_scans);
225 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
226 struct kobj_attribute *attr, char *buf)
228 return single_hugepage_flag_show(kobj, attr, buf,
229 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
231 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
232 struct kobj_attribute *attr,
233 const char *buf, size_t count)
235 return single_hugepage_flag_store(kobj, attr, buf, count,
236 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
238 static struct kobj_attribute khugepaged_defrag_attr =
239 __ATTR(defrag, 0644, khugepaged_defrag_show,
240 khugepaged_defrag_store);
243 * max_ptes_none controls if khugepaged should collapse hugepages over
244 * any unmapped ptes in turn potentially increasing the memory
245 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
246 * reduce the available free memory in the system as it
247 * runs. Increasing max_ptes_none will instead potentially reduce the
248 * free memory in the system during the khugepaged scan.
250 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
251 struct kobj_attribute *attr,
254 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
256 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
257 struct kobj_attribute *attr,
258 const char *buf, size_t count)
261 unsigned long max_ptes_none;
263 err = kstrtoul(buf, 10, &max_ptes_none);
264 if (err || max_ptes_none > HPAGE_PMD_NR-1)
267 khugepaged_max_ptes_none = max_ptes_none;
271 static struct kobj_attribute khugepaged_max_ptes_none_attr =
272 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
273 khugepaged_max_ptes_none_store);
275 static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
276 struct kobj_attribute *attr,
279 return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
282 static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
283 struct kobj_attribute *attr,
284 const char *buf, size_t count)
287 unsigned long max_ptes_swap;
289 err = kstrtoul(buf, 10, &max_ptes_swap);
290 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
293 khugepaged_max_ptes_swap = max_ptes_swap;
298 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
299 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
300 khugepaged_max_ptes_swap_store);
302 static struct attribute *khugepaged_attr[] = {
303 &khugepaged_defrag_attr.attr,
304 &khugepaged_max_ptes_none_attr.attr,
305 &pages_to_scan_attr.attr,
306 &pages_collapsed_attr.attr,
307 &full_scans_attr.attr,
308 &scan_sleep_millisecs_attr.attr,
309 &alloc_sleep_millisecs_attr.attr,
310 &khugepaged_max_ptes_swap_attr.attr,
314 struct attribute_group khugepaged_attr_group = {
315 .attrs = khugepaged_attr,
316 .name = "khugepaged",
318 #endif /* CONFIG_SYSFS */
320 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
322 int hugepage_madvise(struct vm_area_struct *vma,
323 unsigned long *vm_flags, int advice)
329 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
330 * can't handle this properly after s390_enable_sie, so we simply
331 * ignore the madvise to prevent qemu from causing a SIGSEGV.
333 if (mm_has_pgste(vma->vm_mm))
336 *vm_flags &= ~VM_NOHUGEPAGE;
337 *vm_flags |= VM_HUGEPAGE;
339 * If the vma become good for khugepaged to scan,
340 * register it here without waiting a page fault that
341 * may not happen any time soon.
343 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
344 khugepaged_enter_vma_merge(vma, *vm_flags))
347 case MADV_NOHUGEPAGE:
348 *vm_flags &= ~VM_HUGEPAGE;
349 *vm_flags |= VM_NOHUGEPAGE;
351 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
352 * this vma even if we leave the mm registered in khugepaged if
353 * it got registered before VM_NOHUGEPAGE was set.
361 int __init khugepaged_init(void)
363 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
364 sizeof(struct mm_slot),
365 __alignof__(struct mm_slot), 0, NULL);
369 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
370 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
371 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
376 void __init khugepaged_destroy(void)
378 kmem_cache_destroy(mm_slot_cache);
381 static inline struct mm_slot *alloc_mm_slot(void)
383 if (!mm_slot_cache) /* initialization failed */
385 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
388 static inline void free_mm_slot(struct mm_slot *mm_slot)
390 kmem_cache_free(mm_slot_cache, mm_slot);
393 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
395 struct mm_slot *mm_slot;
397 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
398 if (mm == mm_slot->mm)
404 static void insert_to_mm_slots_hash(struct mm_struct *mm,
405 struct mm_slot *mm_slot)
408 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
411 static inline int khugepaged_test_exit(struct mm_struct *mm)
413 return atomic_read(&mm->mm_users) == 0 || !mmget_still_valid(mm);
416 static bool hugepage_vma_check(struct vm_area_struct *vma,
417 unsigned long vm_flags)
419 if ((!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
420 (vm_flags & VM_NOHUGEPAGE) ||
421 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
423 if (shmem_file(vma->vm_file)) {
424 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
426 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
429 if (!vma->anon_vma || vma->vm_ops)
431 if (is_vma_temporary_stack(vma))
433 return !(vm_flags & VM_NO_KHUGEPAGED);
436 int __khugepaged_enter(struct mm_struct *mm)
438 struct mm_slot *mm_slot;
441 mm_slot = alloc_mm_slot();
445 /* __khugepaged_exit() must not run from under us */
446 VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
447 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
448 free_mm_slot(mm_slot);
452 spin_lock(&khugepaged_mm_lock);
453 insert_to_mm_slots_hash(mm, mm_slot);
455 * Insert just behind the scanning cursor, to let the area settle
458 wakeup = list_empty(&khugepaged_scan.mm_head);
459 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
460 spin_unlock(&khugepaged_mm_lock);
464 wake_up_interruptible(&khugepaged_wait);
469 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
470 unsigned long vm_flags)
472 unsigned long hstart, hend;
475 * khugepaged does not yet work on non-shmem files or special
476 * mappings. And file-private shmem THP is not supported.
478 if (!hugepage_vma_check(vma, vm_flags))
481 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
482 hend = vma->vm_end & HPAGE_PMD_MASK;
484 return khugepaged_enter(vma, vm_flags);
488 void __khugepaged_exit(struct mm_struct *mm)
490 struct mm_slot *mm_slot;
493 spin_lock(&khugepaged_mm_lock);
494 mm_slot = get_mm_slot(mm);
495 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
496 hash_del(&mm_slot->hash);
497 list_del(&mm_slot->mm_node);
500 spin_unlock(&khugepaged_mm_lock);
503 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
504 free_mm_slot(mm_slot);
506 } else if (mm_slot) {
508 * This is required to serialize against
509 * khugepaged_test_exit() (which is guaranteed to run
510 * under mmap sem read mode). Stop here (after we
511 * return all pagetables will be destroyed) until
512 * khugepaged has finished working on the pagetables
513 * under the mmap_sem.
515 down_write(&mm->mmap_sem);
516 up_write(&mm->mmap_sem);
520 static void release_pte_page(struct page *page)
522 dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
524 putback_lru_page(page);
527 static void release_pte_pages(pte_t *pte, pte_t *_pte)
529 while (--_pte >= pte) {
530 pte_t pteval = *_pte;
531 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
532 release_pte_page(pte_page(pteval));
536 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
537 unsigned long address,
540 struct page *page = NULL;
542 int none_or_zero = 0, result = 0, referenced = 0;
543 bool writable = false;
545 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
546 _pte++, address += PAGE_SIZE) {
547 pte_t pteval = *_pte;
548 if (pte_none(pteval) || (pte_present(pteval) &&
549 is_zero_pfn(pte_pfn(pteval)))) {
550 if (!userfaultfd_armed(vma) &&
551 ++none_or_zero <= khugepaged_max_ptes_none) {
554 result = SCAN_EXCEED_NONE_PTE;
558 if (!pte_present(pteval)) {
559 result = SCAN_PTE_NON_PRESENT;
562 page = vm_normal_page(vma, address, pteval);
563 if (unlikely(!page)) {
564 result = SCAN_PAGE_NULL;
568 /* TODO: teach khugepaged to collapse THP mapped with pte */
569 if (PageCompound(page)) {
570 result = SCAN_PAGE_COMPOUND;
574 VM_BUG_ON_PAGE(!PageAnon(page), page);
577 * We can do it before isolate_lru_page because the
578 * page can't be freed from under us. NOTE: PG_lock
579 * is needed to serialize against split_huge_page
580 * when invoked from the VM.
582 if (!trylock_page(page)) {
583 result = SCAN_PAGE_LOCK;
588 * cannot use mapcount: can't collapse if there's a gup pin.
589 * The page must only be referenced by the scanned process
590 * and page swap cache.
592 if (page_count(page) != 1 + PageSwapCache(page)) {
594 result = SCAN_PAGE_COUNT;
597 if (pte_write(pteval)) {
600 if (PageSwapCache(page) &&
601 !reuse_swap_page(page, NULL)) {
603 result = SCAN_SWAP_CACHE_PAGE;
607 * Page is not in the swap cache. It can be collapsed
613 * Isolate the page to avoid collapsing an hugepage
614 * currently in use by the VM.
616 if (isolate_lru_page(page)) {
618 result = SCAN_DEL_PAGE_LRU;
621 inc_node_page_state(page,
622 NR_ISOLATED_ANON + page_is_file_cache(page));
623 VM_BUG_ON_PAGE(!PageLocked(page), page);
624 VM_BUG_ON_PAGE(PageLRU(page), page);
626 /* There should be enough young pte to collapse the page */
627 if (pte_young(pteval) ||
628 page_is_young(page) || PageReferenced(page) ||
629 mmu_notifier_test_young(vma->vm_mm, address))
633 if (unlikely(!writable)) {
634 result = SCAN_PAGE_RO;
635 } else if (unlikely(!referenced)) {
636 result = SCAN_LACK_REFERENCED_PAGE;
638 result = SCAN_SUCCEED;
639 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
640 referenced, writable, result);
644 release_pte_pages(pte, _pte);
645 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
646 referenced, writable, result);
650 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
651 struct vm_area_struct *vma,
652 unsigned long address,
656 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
657 _pte++, page++, address += PAGE_SIZE) {
658 pte_t pteval = *_pte;
659 struct page *src_page;
661 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
662 clear_user_highpage(page, address);
663 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
664 if (is_zero_pfn(pte_pfn(pteval))) {
666 * ptl mostly unnecessary.
670 * paravirt calls inside pte_clear here are
673 pte_clear(vma->vm_mm, address, _pte);
677 src_page = pte_page(pteval);
678 copy_user_highpage(page, src_page, address, vma);
679 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
680 release_pte_page(src_page);
682 * ptl mostly unnecessary, but preempt has to
683 * be disabled to update the per-cpu stats
684 * inside page_remove_rmap().
688 * paravirt calls inside pte_clear here are
691 pte_clear(vma->vm_mm, address, _pte);
692 page_remove_rmap(src_page, false);
694 free_page_and_swap_cache(src_page);
699 static void khugepaged_alloc_sleep(void)
703 add_wait_queue(&khugepaged_wait, &wait);
704 freezable_schedule_timeout_interruptible(
705 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
706 remove_wait_queue(&khugepaged_wait, &wait);
709 static int khugepaged_node_load[MAX_NUMNODES];
711 static bool khugepaged_scan_abort(int nid)
716 * If node_reclaim_mode is disabled, then no extra effort is made to
717 * allocate memory locally.
719 if (!node_reclaim_mode)
722 /* If there is a count for this node already, it must be acceptable */
723 if (khugepaged_node_load[nid])
726 for (i = 0; i < MAX_NUMNODES; i++) {
727 if (!khugepaged_node_load[i])
729 if (node_distance(nid, i) > RECLAIM_DISTANCE)
735 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
736 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
738 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
742 static int khugepaged_find_target_node(void)
744 static int last_khugepaged_target_node = NUMA_NO_NODE;
745 int nid, target_node = 0, max_value = 0;
747 /* find first node with max normal pages hit */
748 for (nid = 0; nid < MAX_NUMNODES; nid++)
749 if (khugepaged_node_load[nid] > max_value) {
750 max_value = khugepaged_node_load[nid];
754 /* do some balance if several nodes have the same hit record */
755 if (target_node <= last_khugepaged_target_node)
756 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
758 if (max_value == khugepaged_node_load[nid]) {
763 last_khugepaged_target_node = target_node;
767 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
769 if (IS_ERR(*hpage)) {
775 khugepaged_alloc_sleep();
785 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
787 VM_BUG_ON_PAGE(*hpage, *hpage);
789 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
790 if (unlikely(!*hpage)) {
791 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
792 *hpage = ERR_PTR(-ENOMEM);
796 prep_transhuge_page(*hpage);
797 count_vm_event(THP_COLLAPSE_ALLOC);
801 static int khugepaged_find_target_node(void)
806 static inline struct page *alloc_khugepaged_hugepage(void)
810 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
813 prep_transhuge_page(page);
817 static struct page *khugepaged_alloc_hugepage(bool *wait)
822 hpage = alloc_khugepaged_hugepage();
824 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
829 khugepaged_alloc_sleep();
831 count_vm_event(THP_COLLAPSE_ALLOC);
832 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
837 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
840 * If the hpage allocated earlier was briefly exposed in page cache
841 * before collapse_file() failed, it is possible that racing lookups
842 * have not yet completed, and would then be unpleasantly surprised by
843 * finding the hpage reused for the same mapping at a different offset.
844 * Just release the previous allocation if there is any danger of that.
846 if (*hpage && page_count(*hpage) > 1) {
852 *hpage = khugepaged_alloc_hugepage(wait);
854 if (unlikely(!*hpage))
861 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
870 * If mmap_sem temporarily dropped, revalidate vma
871 * before taking mmap_sem.
872 * Return 0 if succeeds, otherwise return none-zero
876 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
877 struct vm_area_struct **vmap)
879 struct vm_area_struct *vma;
880 unsigned long hstart, hend;
882 if (unlikely(khugepaged_test_exit(mm)))
883 return SCAN_ANY_PROCESS;
885 *vmap = vma = find_vma(mm, address);
887 return SCAN_VMA_NULL;
889 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
890 hend = vma->vm_end & HPAGE_PMD_MASK;
891 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
892 return SCAN_ADDRESS_RANGE;
893 if (!hugepage_vma_check(vma, vma->vm_flags))
894 return SCAN_VMA_CHECK;
899 * Bring missing pages in from swap, to complete THP collapse.
900 * Only done if khugepaged_scan_pmd believes it is worthwhile.
902 * Called and returns without pte mapped or spinlocks held,
903 * but with mmap_sem held to protect against vma changes.
906 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
907 struct vm_area_struct *vma,
908 unsigned long address, pmd_t *pmd,
913 struct vm_fault vmf = {
916 .flags = FAULT_FLAG_ALLOW_RETRY,
918 .pgoff = linear_page_index(vma, address),
921 /* we only decide to swapin, if there is enough young ptes */
922 if (referenced < HPAGE_PMD_NR/2) {
923 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
926 vmf.pte = pte_offset_map(pmd, address);
927 for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
928 vmf.pte++, vmf.address += PAGE_SIZE) {
929 vmf.orig_pte = *vmf.pte;
930 if (!is_swap_pte(vmf.orig_pte))
933 ret = do_swap_page(&vmf);
935 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
936 if (ret & VM_FAULT_RETRY) {
937 down_read(&mm->mmap_sem);
938 if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
939 /* vma is no longer available, don't continue to swapin */
940 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
943 /* check if the pmd is still valid */
944 if (mm_find_pmd(mm, address) != pmd) {
945 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
949 if (ret & VM_FAULT_ERROR) {
950 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
953 /* pte is unmapped now, we need to map it */
954 vmf.pte = pte_offset_map(pmd, vmf.address);
958 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
962 static void collapse_huge_page(struct mm_struct *mm,
963 unsigned long address,
965 int node, int referenced)
970 struct page *new_page;
971 spinlock_t *pmd_ptl, *pte_ptl;
972 int isolated = 0, result = 0;
973 struct mem_cgroup *memcg;
974 struct vm_area_struct *vma;
975 unsigned long mmun_start; /* For mmu_notifiers */
976 unsigned long mmun_end; /* For mmu_notifiers */
979 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
981 /* Only allocate from the target node */
982 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
985 * Before allocating the hugepage, release the mmap_sem read lock.
986 * The allocation can take potentially a long time if it involves
987 * sync compaction, and we do not need to hold the mmap_sem during
988 * that. We will recheck the vma after taking it again in write mode.
990 up_read(&mm->mmap_sem);
991 new_page = khugepaged_alloc_page(hpage, gfp, node);
993 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
997 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
998 result = SCAN_CGROUP_CHARGE_FAIL;
1002 down_read(&mm->mmap_sem);
1003 result = hugepage_vma_revalidate(mm, address, &vma);
1005 mem_cgroup_cancel_charge(new_page, memcg, true);
1006 up_read(&mm->mmap_sem);
1010 pmd = mm_find_pmd(mm, address);
1012 result = SCAN_PMD_NULL;
1013 mem_cgroup_cancel_charge(new_page, memcg, true);
1014 up_read(&mm->mmap_sem);
1019 * __collapse_huge_page_swapin always returns with mmap_sem locked.
1020 * If it fails, we release mmap_sem and jump out_nolock.
1021 * Continuing to collapse causes inconsistency.
1023 if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
1024 mem_cgroup_cancel_charge(new_page, memcg, true);
1025 up_read(&mm->mmap_sem);
1029 up_read(&mm->mmap_sem);
1031 * Prevent all access to pagetables with the exception of
1032 * gup_fast later handled by the ptep_clear_flush and the VM
1033 * handled by the anon_vma lock + PG_lock.
1035 down_write(&mm->mmap_sem);
1036 result = hugepage_vma_revalidate(mm, address, &vma);
1039 /* check if the pmd is still valid */
1040 if (mm_find_pmd(mm, address) != pmd)
1043 anon_vma_lock_write(vma->anon_vma);
1045 pte = pte_offset_map(pmd, address);
1046 pte_ptl = pte_lockptr(mm, pmd);
1048 mmun_start = address;
1049 mmun_end = address + HPAGE_PMD_SIZE;
1050 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1051 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1053 * After this gup_fast can't run anymore. This also removes
1054 * any huge TLB entry from the CPU so we won't allow
1055 * huge and small TLB entries for the same virtual address
1056 * to avoid the risk of CPU bugs in that area.
1058 _pmd = pmdp_collapse_flush(vma, address, pmd);
1059 spin_unlock(pmd_ptl);
1060 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1061 tlb_remove_table_sync_one();
1064 isolated = __collapse_huge_page_isolate(vma, address, pte);
1065 spin_unlock(pte_ptl);
1067 if (unlikely(!isolated)) {
1070 BUG_ON(!pmd_none(*pmd));
1072 * We can only use set_pmd_at when establishing
1073 * hugepmds and never for establishing regular pmds that
1074 * points to regular pagetables. Use pmd_populate for that
1076 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1077 spin_unlock(pmd_ptl);
1078 anon_vma_unlock_write(vma->anon_vma);
1084 * All pages are isolated and locked so anon_vma rmap
1085 * can't run anymore.
1087 anon_vma_unlock_write(vma->anon_vma);
1089 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
1091 __SetPageUptodate(new_page);
1092 pgtable = pmd_pgtable(_pmd);
1094 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1095 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1098 * spin_lock() below is not the equivalent of smp_wmb(), so
1099 * this is needed to avoid the copy_huge_page writes to become
1100 * visible after the set_pmd_at() write.
1105 BUG_ON(!pmd_none(*pmd));
1106 page_add_new_anon_rmap(new_page, vma, address, true);
1107 mem_cgroup_commit_charge(new_page, memcg, false, true);
1108 lru_cache_add_active_or_unevictable(new_page, vma);
1109 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1110 set_pmd_at(mm, address, pmd, _pmd);
1111 update_mmu_cache_pmd(vma, address, pmd);
1112 spin_unlock(pmd_ptl);
1116 khugepaged_pages_collapsed++;
1117 result = SCAN_SUCCEED;
1119 up_write(&mm->mmap_sem);
1121 trace_mm_collapse_huge_page(mm, isolated, result);
1124 mem_cgroup_cancel_charge(new_page, memcg, true);
1128 static int khugepaged_scan_pmd(struct mm_struct *mm,
1129 struct vm_area_struct *vma,
1130 unsigned long address,
1131 struct page **hpage)
1135 int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
1136 struct page *page = NULL;
1137 unsigned long _address;
1139 int node = NUMA_NO_NODE, unmapped = 0;
1140 bool writable = false;
1142 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1144 pmd = mm_find_pmd(mm, address);
1146 result = SCAN_PMD_NULL;
1150 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1151 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1152 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1153 _pte++, _address += PAGE_SIZE) {
1154 pte_t pteval = *_pte;
1155 if (is_swap_pte(pteval)) {
1156 if (++unmapped <= khugepaged_max_ptes_swap) {
1159 result = SCAN_EXCEED_SWAP_PTE;
1163 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1164 if (!userfaultfd_armed(vma) &&
1165 ++none_or_zero <= khugepaged_max_ptes_none) {
1168 result = SCAN_EXCEED_NONE_PTE;
1172 if (!pte_present(pteval)) {
1173 result = SCAN_PTE_NON_PRESENT;
1176 if (pte_write(pteval))
1179 page = vm_normal_page(vma, _address, pteval);
1180 if (unlikely(!page)) {
1181 result = SCAN_PAGE_NULL;
1185 /* TODO: teach khugepaged to collapse THP mapped with pte */
1186 if (PageCompound(page)) {
1187 result = SCAN_PAGE_COMPOUND;
1192 * Record which node the original page is from and save this
1193 * information to khugepaged_node_load[].
1194 * Khupaged will allocate hugepage from the node has the max
1197 node = page_to_nid(page);
1198 if (khugepaged_scan_abort(node)) {
1199 result = SCAN_SCAN_ABORT;
1202 khugepaged_node_load[node]++;
1203 if (!PageLRU(page)) {
1204 result = SCAN_PAGE_LRU;
1207 if (PageLocked(page)) {
1208 result = SCAN_PAGE_LOCK;
1211 if (!PageAnon(page)) {
1212 result = SCAN_PAGE_ANON;
1217 * cannot use mapcount: can't collapse if there's a gup pin.
1218 * The page must only be referenced by the scanned process
1219 * and page swap cache.
1221 if (page_count(page) != 1 + PageSwapCache(page)) {
1222 result = SCAN_PAGE_COUNT;
1225 if (pte_young(pteval) ||
1226 page_is_young(page) || PageReferenced(page) ||
1227 mmu_notifier_test_young(vma->vm_mm, address))
1232 result = SCAN_SUCCEED;
1235 result = SCAN_LACK_REFERENCED_PAGE;
1238 result = SCAN_PAGE_RO;
1241 pte_unmap_unlock(pte, ptl);
1243 node = khugepaged_find_target_node();
1244 /* collapse_huge_page will return with the mmap_sem released */
1245 collapse_huge_page(mm, address, hpage, node, referenced);
1248 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1249 none_or_zero, result, unmapped);
1253 static void collect_mm_slot(struct mm_slot *mm_slot)
1255 struct mm_struct *mm = mm_slot->mm;
1257 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1259 if (khugepaged_test_exit(mm)) {
1261 hash_del(&mm_slot->hash);
1262 list_del(&mm_slot->mm_node);
1265 * Not strictly needed because the mm exited already.
1267 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1270 /* khugepaged_mm_lock actually not necessary for the below */
1271 free_mm_slot(mm_slot);
1276 #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
1277 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1279 struct vm_area_struct *vma;
1280 struct mm_struct *mm;
1284 i_mmap_lock_write(mapping);
1285 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1286 /* probably overkill */
1289 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1290 if (addr & ~HPAGE_PMD_MASK)
1292 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1295 pmd = mm_find_pmd(mm, addr);
1299 * We need exclusive mmap_sem to retract page table.
1300 * If trylock fails we would end up with pte-mapped THP after
1301 * re-fault. Not ideal, but it's more important to not disturb
1302 * the system too much.
1304 if (down_write_trylock(&mm->mmap_sem)) {
1305 if (!khugepaged_test_exit(mm)) {
1307 unsigned long end = addr + HPAGE_PMD_SIZE;
1309 mmu_notifier_invalidate_range_start(mm, addr,
1311 ptl = pmd_lock(mm, pmd);
1312 /* assume page table is clear */
1313 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1316 tlb_remove_table_sync_one();
1317 pte_free(mm, pmd_pgtable(_pmd));
1318 mmu_notifier_invalidate_range_end(mm, addr,
1321 up_write(&mm->mmap_sem);
1324 i_mmap_unlock_write(mapping);
1328 * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
1330 * Basic scheme is simple, details are more complex:
1331 * - allocate and lock a new huge page;
1332 * - scan over radix tree replacing old pages the new one
1333 * + swap in pages if necessary;
1335 * + keep old pages around in case if rollback is required;
1336 * - if replacing succeed:
1339 * + unlock huge page;
1340 * - if replacing failed;
1341 * + put all pages back and unfreeze them;
1342 * + restore gaps in the radix-tree;
1343 * + unlock and free huge page;
1345 static void collapse_shmem(struct mm_struct *mm,
1346 struct address_space *mapping, pgoff_t start,
1347 struct page **hpage, int node)
1350 struct page *page, *new_page, *tmp;
1351 struct mem_cgroup *memcg;
1352 pgoff_t index, end = start + HPAGE_PMD_NR;
1353 LIST_HEAD(pagelist);
1354 struct radix_tree_iter iter;
1356 int nr_none = 0, result = SCAN_SUCCEED;
1358 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1360 /* Only allocate from the target node */
1361 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1363 new_page = khugepaged_alloc_page(hpage, gfp, node);
1365 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1369 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
1370 result = SCAN_CGROUP_CHARGE_FAIL;
1374 __SetPageLocked(new_page);
1375 __SetPageSwapBacked(new_page);
1376 new_page->index = start;
1377 new_page->mapping = mapping;
1380 * At this point the new_page is locked and not up-to-date.
1381 * It's safe to insert it into the page cache, because nobody would
1382 * be able to map it or use it in another way until we unlock it.
1386 xa_lock_irq(&mapping->i_pages);
1387 radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
1388 int n = min(iter.index, end) - index;
1391 * Stop if extent has been hole-punched, and is now completely
1392 * empty (the more obvious i_size_read() check would take an
1393 * irq-unsafe seqlock on 32-bit).
1395 if (n >= HPAGE_PMD_NR) {
1396 result = SCAN_TRUNCATED;
1401 * Handle holes in the radix tree: charge it from shmem and
1402 * insert relevant subpage of new_page into the radix-tree.
1404 if (n && !shmem_charge(mapping->host, n)) {
1408 for (; index < min(iter.index, end); index++) {
1409 radix_tree_insert(&mapping->i_pages, index,
1410 new_page + (index % HPAGE_PMD_NR));
1418 page = radix_tree_deref_slot_protected(slot,
1419 &mapping->i_pages.xa_lock);
1420 if (radix_tree_exceptional_entry(page) || !PageUptodate(page)) {
1421 xa_unlock_irq(&mapping->i_pages);
1422 /* swap in or instantiate fallocated page */
1423 if (shmem_getpage(mapping->host, index, &page,
1428 } else if (trylock_page(page)) {
1430 xa_unlock_irq(&mapping->i_pages);
1432 result = SCAN_PAGE_LOCK;
1437 * The page must be locked, so we can drop the i_pages lock
1438 * without racing with truncate.
1440 VM_BUG_ON_PAGE(!PageLocked(page), page);
1441 VM_BUG_ON_PAGE(!PageUptodate(page), page);
1444 * If file was truncated then extended, or hole-punched, before
1445 * we locked the first page, then a THP might be there already.
1447 if (PageTransCompound(page)) {
1448 result = SCAN_PAGE_COMPOUND;
1452 if (page_mapping(page) != mapping) {
1453 result = SCAN_TRUNCATED;
1457 if (isolate_lru_page(page)) {
1458 result = SCAN_DEL_PAGE_LRU;
1462 if (page_mapped(page))
1463 unmap_mapping_pages(mapping, index, 1, false);
1465 xa_lock_irq(&mapping->i_pages);
1467 slot = radix_tree_lookup_slot(&mapping->i_pages, index);
1468 VM_BUG_ON_PAGE(page != radix_tree_deref_slot_protected(slot,
1469 &mapping->i_pages.xa_lock), page);
1470 VM_BUG_ON_PAGE(page_mapped(page), page);
1473 * The page is expected to have page_count() == 3:
1474 * - we hold a pin on it;
1475 * - one reference from radix tree;
1476 * - one from isolate_lru_page;
1478 if (!page_ref_freeze(page, 3)) {
1479 result = SCAN_PAGE_COUNT;
1480 xa_unlock_irq(&mapping->i_pages);
1481 putback_lru_page(page);
1486 * Add the page to the list to be able to undo the collapse if
1487 * something go wrong.
1489 list_add_tail(&page->lru, &pagelist);
1491 /* Finally, replace with the new page. */
1492 radix_tree_replace_slot(&mapping->i_pages, slot,
1493 new_page + (index % HPAGE_PMD_NR));
1495 slot = radix_tree_iter_resume(slot, &iter);
1505 * Handle hole in radix tree at the end of the range.
1506 * This code only triggers if there's nothing in radix tree
1510 int n = end - index;
1512 /* Stop if extent has been truncated, and is now empty */
1513 if (n >= HPAGE_PMD_NR) {
1514 result = SCAN_TRUNCATED;
1517 if (!shmem_charge(mapping->host, n)) {
1521 for (; index < end; index++) {
1522 radix_tree_insert(&mapping->i_pages, index,
1523 new_page + (index % HPAGE_PMD_NR));
1528 __inc_node_page_state(new_page, NR_SHMEM_THPS);
1530 struct zone *zone = page_zone(new_page);
1532 __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1533 __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
1537 xa_unlock_irq(&mapping->i_pages);
1540 if (result == SCAN_SUCCEED) {
1542 * Replacing old pages with new one has succeed, now we need to
1543 * copy the content and free old pages.
1546 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1547 while (index < page->index) {
1548 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1551 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1553 list_del(&page->lru);
1554 page->mapping = NULL;
1555 page_ref_unfreeze(page, 1);
1556 ClearPageActive(page);
1557 ClearPageUnevictable(page);
1562 while (index < end) {
1563 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1567 SetPageUptodate(new_page);
1568 page_ref_add(new_page, HPAGE_PMD_NR - 1);
1569 set_page_dirty(new_page);
1570 mem_cgroup_commit_charge(new_page, memcg, false, true);
1571 lru_cache_add_anon(new_page);
1574 * Remove pte page tables, so we can re-fault the page as huge.
1576 retract_page_tables(mapping, start);
1579 khugepaged_pages_collapsed++;
1581 /* Something went wrong: rollback changes to the radix-tree */
1582 xa_lock_irq(&mapping->i_pages);
1583 mapping->nrpages -= nr_none;
1584 shmem_uncharge(mapping->host, nr_none);
1586 radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
1587 if (iter.index >= end)
1589 page = list_first_entry_or_null(&pagelist,
1591 if (!page || iter.index < page->index) {
1595 /* Put holes back where they were */
1596 radix_tree_delete(&mapping->i_pages, iter.index);
1600 VM_BUG_ON_PAGE(page->index != iter.index, page);
1602 /* Unfreeze the page. */
1603 list_del(&page->lru);
1604 page_ref_unfreeze(page, 2);
1605 radix_tree_replace_slot(&mapping->i_pages, slot, page);
1606 slot = radix_tree_iter_resume(slot, &iter);
1607 xa_unlock_irq(&mapping->i_pages);
1609 putback_lru_page(page);
1610 xa_lock_irq(&mapping->i_pages);
1613 xa_unlock_irq(&mapping->i_pages);
1615 mem_cgroup_cancel_charge(new_page, memcg, true);
1616 new_page->mapping = NULL;
1619 unlock_page(new_page);
1621 VM_BUG_ON(!list_empty(&pagelist));
1622 /* TODO: tracepoints */
1625 static void khugepaged_scan_shmem(struct mm_struct *mm,
1626 struct address_space *mapping,
1627 pgoff_t start, struct page **hpage)
1629 struct page *page = NULL;
1630 struct radix_tree_iter iter;
1633 int node = NUMA_NO_NODE;
1634 int result = SCAN_SUCCEED;
1638 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1640 radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
1641 if (iter.index >= start + HPAGE_PMD_NR)
1644 page = radix_tree_deref_slot(slot);
1645 if (radix_tree_deref_retry(page)) {
1646 slot = radix_tree_iter_retry(&iter);
1650 if (radix_tree_exception(page)) {
1651 if (++swap > khugepaged_max_ptes_swap) {
1652 result = SCAN_EXCEED_SWAP_PTE;
1658 if (PageTransCompound(page)) {
1659 result = SCAN_PAGE_COMPOUND;
1663 node = page_to_nid(page);
1664 if (khugepaged_scan_abort(node)) {
1665 result = SCAN_SCAN_ABORT;
1668 khugepaged_node_load[node]++;
1670 if (!PageLRU(page)) {
1671 result = SCAN_PAGE_LRU;
1675 if (page_count(page) != 1 + page_mapcount(page)) {
1676 result = SCAN_PAGE_COUNT;
1681 * We probably should check if the page is referenced here, but
1682 * nobody would transfer pte_young() to PageReferenced() for us.
1683 * And rmap walk here is just too costly...
1688 if (need_resched()) {
1689 slot = radix_tree_iter_resume(slot, &iter);
1695 if (result == SCAN_SUCCEED) {
1696 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
1697 result = SCAN_EXCEED_NONE_PTE;
1699 node = khugepaged_find_target_node();
1700 collapse_shmem(mm, mapping, start, hpage, node);
1704 /* TODO: tracepoints */
1707 static void khugepaged_scan_shmem(struct mm_struct *mm,
1708 struct address_space *mapping,
1709 pgoff_t start, struct page **hpage)
1715 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1716 struct page **hpage)
1717 __releases(&khugepaged_mm_lock)
1718 __acquires(&khugepaged_mm_lock)
1720 struct mm_slot *mm_slot;
1721 struct mm_struct *mm;
1722 struct vm_area_struct *vma;
1726 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1728 if (khugepaged_scan.mm_slot)
1729 mm_slot = khugepaged_scan.mm_slot;
1731 mm_slot = list_entry(khugepaged_scan.mm_head.next,
1732 struct mm_slot, mm_node);
1733 khugepaged_scan.address = 0;
1734 khugepaged_scan.mm_slot = mm_slot;
1736 spin_unlock(&khugepaged_mm_lock);
1740 * Don't wait for semaphore (to avoid long wait times). Just move to
1741 * the next mm on the list.
1744 if (unlikely(!down_read_trylock(&mm->mmap_sem)))
1745 goto breakouterloop_mmap_sem;
1746 if (likely(!khugepaged_test_exit(mm)))
1747 vma = find_vma(mm, khugepaged_scan.address);
1750 for (; vma; vma = vma->vm_next) {
1751 unsigned long hstart, hend;
1754 if (unlikely(khugepaged_test_exit(mm))) {
1758 if (!hugepage_vma_check(vma, vma->vm_flags)) {
1763 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1764 hend = vma->vm_end & HPAGE_PMD_MASK;
1767 if (khugepaged_scan.address > hend)
1769 if (khugepaged_scan.address < hstart)
1770 khugepaged_scan.address = hstart;
1771 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1773 while (khugepaged_scan.address < hend) {
1776 if (unlikely(khugepaged_test_exit(mm)))
1777 goto breakouterloop;
1779 VM_BUG_ON(khugepaged_scan.address < hstart ||
1780 khugepaged_scan.address + HPAGE_PMD_SIZE >
1782 if (shmem_file(vma->vm_file)) {
1784 pgoff_t pgoff = linear_page_index(vma,
1785 khugepaged_scan.address);
1786 if (!shmem_huge_enabled(vma))
1788 file = get_file(vma->vm_file);
1789 up_read(&mm->mmap_sem);
1791 khugepaged_scan_shmem(mm, file->f_mapping,
1795 ret = khugepaged_scan_pmd(mm, vma,
1796 khugepaged_scan.address,
1799 /* move to next address */
1800 khugepaged_scan.address += HPAGE_PMD_SIZE;
1801 progress += HPAGE_PMD_NR;
1803 /* we released mmap_sem so break loop */
1804 goto breakouterloop_mmap_sem;
1805 if (progress >= pages)
1806 goto breakouterloop;
1810 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
1811 breakouterloop_mmap_sem:
1813 spin_lock(&khugepaged_mm_lock);
1814 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
1816 * Release the current mm_slot if this mm is about to die, or
1817 * if we scanned all vmas of this mm.
1819 if (khugepaged_test_exit(mm) || !vma) {
1821 * Make sure that if mm_users is reaching zero while
1822 * khugepaged runs here, khugepaged_exit will find
1823 * mm_slot not pointing to the exiting mm.
1825 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
1826 khugepaged_scan.mm_slot = list_entry(
1827 mm_slot->mm_node.next,
1828 struct mm_slot, mm_node);
1829 khugepaged_scan.address = 0;
1831 khugepaged_scan.mm_slot = NULL;
1832 khugepaged_full_scans++;
1835 collect_mm_slot(mm_slot);
1841 static int khugepaged_has_work(void)
1843 return !list_empty(&khugepaged_scan.mm_head) &&
1844 khugepaged_enabled();
1847 static int khugepaged_wait_event(void)
1849 return !list_empty(&khugepaged_scan.mm_head) ||
1850 kthread_should_stop();
1853 static void khugepaged_do_scan(void)
1855 struct page *hpage = NULL;
1856 unsigned int progress = 0, pass_through_head = 0;
1857 unsigned int pages = khugepaged_pages_to_scan;
1860 barrier(); /* write khugepaged_pages_to_scan to local stack */
1862 while (progress < pages) {
1863 if (!khugepaged_prealloc_page(&hpage, &wait))
1868 if (unlikely(kthread_should_stop() || try_to_freeze()))
1871 spin_lock(&khugepaged_mm_lock);
1872 if (!khugepaged_scan.mm_slot)
1873 pass_through_head++;
1874 if (khugepaged_has_work() &&
1875 pass_through_head < 2)
1876 progress += khugepaged_scan_mm_slot(pages - progress,
1880 spin_unlock(&khugepaged_mm_lock);
1883 if (!IS_ERR_OR_NULL(hpage))
1887 static bool khugepaged_should_wakeup(void)
1889 return kthread_should_stop() ||
1890 time_after_eq(jiffies, khugepaged_sleep_expire);
1893 static void khugepaged_wait_work(void)
1895 if (khugepaged_has_work()) {
1896 const unsigned long scan_sleep_jiffies =
1897 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
1899 if (!scan_sleep_jiffies)
1902 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
1903 wait_event_freezable_timeout(khugepaged_wait,
1904 khugepaged_should_wakeup(),
1905 scan_sleep_jiffies);
1909 if (khugepaged_enabled())
1910 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
1913 static int khugepaged(void *none)
1915 struct mm_slot *mm_slot;
1918 set_user_nice(current, MAX_NICE);
1920 while (!kthread_should_stop()) {
1921 khugepaged_do_scan();
1922 khugepaged_wait_work();
1925 spin_lock(&khugepaged_mm_lock);
1926 mm_slot = khugepaged_scan.mm_slot;
1927 khugepaged_scan.mm_slot = NULL;
1929 collect_mm_slot(mm_slot);
1930 spin_unlock(&khugepaged_mm_lock);
1934 static void set_recommended_min_free_kbytes(void)
1938 unsigned long recommended_min;
1940 for_each_populated_zone(zone) {
1942 * We don't need to worry about fragmentation of
1943 * ZONE_MOVABLE since it only has movable pages.
1945 if (zone_idx(zone) > gfp_zone(GFP_USER))
1951 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
1952 recommended_min = pageblock_nr_pages * nr_zones * 2;
1955 * Make sure that on average at least two pageblocks are almost free
1956 * of another type, one for a migratetype to fall back to and a
1957 * second to avoid subsequent fallbacks of other types There are 3
1958 * MIGRATE_TYPES we care about.
1960 recommended_min += pageblock_nr_pages * nr_zones *
1961 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
1963 /* don't ever allow to reserve more than 5% of the lowmem */
1964 recommended_min = min(recommended_min,
1965 (unsigned long) nr_free_buffer_pages() / 20);
1966 recommended_min <<= (PAGE_SHIFT-10);
1968 if (recommended_min > min_free_kbytes) {
1969 if (user_min_free_kbytes >= 0)
1970 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
1971 min_free_kbytes, recommended_min);
1973 min_free_kbytes = recommended_min;
1975 setup_per_zone_wmarks();
1978 int start_stop_khugepaged(void)
1982 mutex_lock(&khugepaged_mutex);
1983 if (khugepaged_enabled()) {
1984 if (!khugepaged_thread)
1985 khugepaged_thread = kthread_run(khugepaged, NULL,
1987 if (IS_ERR(khugepaged_thread)) {
1988 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
1989 err = PTR_ERR(khugepaged_thread);
1990 khugepaged_thread = NULL;
1994 if (!list_empty(&khugepaged_scan.mm_head))
1995 wake_up_interruptible(&khugepaged_wait);
1997 set_recommended_min_free_kbytes();
1998 } else if (khugepaged_thread) {
1999 kthread_stop(khugepaged_thread);
2000 khugepaged_thread = NULL;
2003 mutex_unlock(&khugepaged_mutex);
2007 void khugepaged_min_free_kbytes_update(void)
2009 mutex_lock(&khugepaged_mutex);
2010 if (khugepaged_enabled() && khugepaged_thread)
2011 set_recommended_min_free_kbytes();
2012 mutex_unlock(&khugepaged_mutex);