1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/swapops.h>
20 #include <linux/shmem_fs.h>
23 #include <asm/pgalloc.h>
33 SCAN_LACK_REFERENCED_PAGE,
47 SCAN_ALLOC_HUGE_PAGE_FAIL,
48 SCAN_CGROUP_CHARGE_FAIL,
53 #define CREATE_TRACE_POINTS
54 #include <trace/events/huge_memory.h>
56 static struct task_struct *khugepaged_thread __read_mostly;
57 static DEFINE_MUTEX(khugepaged_mutex);
59 /* default scan 8*512 pte (or vmas) every 30 second */
60 static unsigned int khugepaged_pages_to_scan __read_mostly;
61 static unsigned int khugepaged_pages_collapsed;
62 static unsigned int khugepaged_full_scans;
63 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
64 /* during fragmentation poll the hugepage allocator once every minute */
65 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
66 static unsigned long khugepaged_sleep_expire;
67 static DEFINE_SPINLOCK(khugepaged_mm_lock);
68 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
70 * default collapse hugepages if there is at least one pte mapped like
71 * it would have happened if the vma was large enough during page
74 static unsigned int khugepaged_max_ptes_none __read_mostly;
75 static unsigned int khugepaged_max_ptes_swap __read_mostly;
77 #define MM_SLOTS_HASH_BITS 10
78 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
80 static struct kmem_cache *mm_slot_cache __read_mostly;
83 * struct mm_slot - hash lookup from mm to mm_slot
84 * @hash: hash collision list
85 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
86 * @mm: the mm that this information is valid for
89 struct hlist_node hash;
90 struct list_head mm_node;
95 * struct khugepaged_scan - cursor for scanning
96 * @mm_head: the head of the mm list to scan
97 * @mm_slot: the current mm_slot we are scanning
98 * @address: the next address inside that to be scanned
100 * There is only the one khugepaged_scan instance of this cursor structure.
102 struct khugepaged_scan {
103 struct list_head mm_head;
104 struct mm_slot *mm_slot;
105 unsigned long address;
108 static struct khugepaged_scan khugepaged_scan = {
109 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
113 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
114 struct kobj_attribute *attr,
117 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
120 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
121 struct kobj_attribute *attr,
122 const char *buf, size_t count)
127 err = kstrtoul(buf, 10, &msecs);
128 if (err || msecs > UINT_MAX)
131 khugepaged_scan_sleep_millisecs = msecs;
132 khugepaged_sleep_expire = 0;
133 wake_up_interruptible(&khugepaged_wait);
137 static struct kobj_attribute scan_sleep_millisecs_attr =
138 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
139 scan_sleep_millisecs_store);
141 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
142 struct kobj_attribute *attr,
145 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
148 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
149 struct kobj_attribute *attr,
150 const char *buf, size_t count)
155 err = kstrtoul(buf, 10, &msecs);
156 if (err || msecs > UINT_MAX)
159 khugepaged_alloc_sleep_millisecs = msecs;
160 khugepaged_sleep_expire = 0;
161 wake_up_interruptible(&khugepaged_wait);
165 static struct kobj_attribute alloc_sleep_millisecs_attr =
166 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
167 alloc_sleep_millisecs_store);
169 static ssize_t pages_to_scan_show(struct kobject *kobj,
170 struct kobj_attribute *attr,
173 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
175 static ssize_t pages_to_scan_store(struct kobject *kobj,
176 struct kobj_attribute *attr,
177 const char *buf, size_t count)
182 err = kstrtoul(buf, 10, &pages);
183 if (err || !pages || pages > UINT_MAX)
186 khugepaged_pages_to_scan = pages;
190 static struct kobj_attribute pages_to_scan_attr =
191 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
192 pages_to_scan_store);
194 static ssize_t pages_collapsed_show(struct kobject *kobj,
195 struct kobj_attribute *attr,
198 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
200 static struct kobj_attribute pages_collapsed_attr =
201 __ATTR_RO(pages_collapsed);
203 static ssize_t full_scans_show(struct kobject *kobj,
204 struct kobj_attribute *attr,
207 return sprintf(buf, "%u\n", khugepaged_full_scans);
209 static struct kobj_attribute full_scans_attr =
210 __ATTR_RO(full_scans);
212 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
213 struct kobj_attribute *attr, char *buf)
215 return single_hugepage_flag_show(kobj, attr, buf,
216 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
218 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
219 struct kobj_attribute *attr,
220 const char *buf, size_t count)
222 return single_hugepage_flag_store(kobj, attr, buf, count,
223 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
225 static struct kobj_attribute khugepaged_defrag_attr =
226 __ATTR(defrag, 0644, khugepaged_defrag_show,
227 khugepaged_defrag_store);
230 * max_ptes_none controls if khugepaged should collapse hugepages over
231 * any unmapped ptes in turn potentially increasing the memory
232 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
233 * reduce the available free memory in the system as it
234 * runs. Increasing max_ptes_none will instead potentially reduce the
235 * free memory in the system during the khugepaged scan.
237 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
238 struct kobj_attribute *attr,
241 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
243 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
244 struct kobj_attribute *attr,
245 const char *buf, size_t count)
248 unsigned long max_ptes_none;
250 err = kstrtoul(buf, 10, &max_ptes_none);
251 if (err || max_ptes_none > HPAGE_PMD_NR-1)
254 khugepaged_max_ptes_none = max_ptes_none;
258 static struct kobj_attribute khugepaged_max_ptes_none_attr =
259 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
260 khugepaged_max_ptes_none_store);
262 static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
263 struct kobj_attribute *attr,
266 return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
269 static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
270 struct kobj_attribute *attr,
271 const char *buf, size_t count)
274 unsigned long max_ptes_swap;
276 err = kstrtoul(buf, 10, &max_ptes_swap);
277 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
280 khugepaged_max_ptes_swap = max_ptes_swap;
285 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
286 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
287 khugepaged_max_ptes_swap_store);
289 static struct attribute *khugepaged_attr[] = {
290 &khugepaged_defrag_attr.attr,
291 &khugepaged_max_ptes_none_attr.attr,
292 &pages_to_scan_attr.attr,
293 &pages_collapsed_attr.attr,
294 &full_scans_attr.attr,
295 &scan_sleep_millisecs_attr.attr,
296 &alloc_sleep_millisecs_attr.attr,
297 &khugepaged_max_ptes_swap_attr.attr,
301 struct attribute_group khugepaged_attr_group = {
302 .attrs = khugepaged_attr,
303 .name = "khugepaged",
305 #endif /* CONFIG_SYSFS */
307 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
309 int hugepage_madvise(struct vm_area_struct *vma,
310 unsigned long *vm_flags, int advice)
316 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
317 * can't handle this properly after s390_enable_sie, so we simply
318 * ignore the madvise to prevent qemu from causing a SIGSEGV.
320 if (mm_has_pgste(vma->vm_mm))
323 *vm_flags &= ~VM_NOHUGEPAGE;
324 *vm_flags |= VM_HUGEPAGE;
326 * If the vma become good for khugepaged to scan,
327 * register it here without waiting a page fault that
328 * may not happen any time soon.
330 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
331 khugepaged_enter_vma_merge(vma, *vm_flags))
334 case MADV_NOHUGEPAGE:
335 *vm_flags &= ~VM_HUGEPAGE;
336 *vm_flags |= VM_NOHUGEPAGE;
338 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
339 * this vma even if we leave the mm registered in khugepaged if
340 * it got registered before VM_NOHUGEPAGE was set.
348 int __init khugepaged_init(void)
350 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
351 sizeof(struct mm_slot),
352 __alignof__(struct mm_slot), 0, NULL);
356 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
357 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
358 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
363 void __init khugepaged_destroy(void)
365 kmem_cache_destroy(mm_slot_cache);
368 static inline struct mm_slot *alloc_mm_slot(void)
370 if (!mm_slot_cache) /* initialization failed */
372 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
375 static inline void free_mm_slot(struct mm_slot *mm_slot)
377 kmem_cache_free(mm_slot_cache, mm_slot);
380 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
382 struct mm_slot *mm_slot;
384 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
385 if (mm == mm_slot->mm)
391 static void insert_to_mm_slots_hash(struct mm_struct *mm,
392 struct mm_slot *mm_slot)
395 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
398 static inline int khugepaged_test_exit(struct mm_struct *mm)
400 return atomic_read(&mm->mm_users) == 0 || !mmget_still_valid(mm);
403 static bool hugepage_vma_check(struct vm_area_struct *vma,
404 unsigned long vm_flags)
406 if ((!(vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
407 (vm_flags & VM_NOHUGEPAGE) ||
408 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
410 if (shmem_file(vma->vm_file)) {
411 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
413 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
416 if (!vma->anon_vma || vma->vm_ops)
418 if (is_vma_temporary_stack(vma))
420 return !(vm_flags & VM_NO_KHUGEPAGED);
423 int __khugepaged_enter(struct mm_struct *mm)
425 struct mm_slot *mm_slot;
428 mm_slot = alloc_mm_slot();
432 /* __khugepaged_exit() must not run from under us */
433 VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
434 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
435 free_mm_slot(mm_slot);
439 spin_lock(&khugepaged_mm_lock);
440 insert_to_mm_slots_hash(mm, mm_slot);
442 * Insert just behind the scanning cursor, to let the area settle
445 wakeup = list_empty(&khugepaged_scan.mm_head);
446 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
447 spin_unlock(&khugepaged_mm_lock);
451 wake_up_interruptible(&khugepaged_wait);
456 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
457 unsigned long vm_flags)
459 unsigned long hstart, hend;
462 * khugepaged does not yet work on non-shmem files or special
463 * mappings. And file-private shmem THP is not supported.
465 if (!hugepage_vma_check(vma, vm_flags))
468 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
469 hend = vma->vm_end & HPAGE_PMD_MASK;
471 return khugepaged_enter(vma, vm_flags);
475 void __khugepaged_exit(struct mm_struct *mm)
477 struct mm_slot *mm_slot;
480 spin_lock(&khugepaged_mm_lock);
481 mm_slot = get_mm_slot(mm);
482 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
483 hash_del(&mm_slot->hash);
484 list_del(&mm_slot->mm_node);
487 spin_unlock(&khugepaged_mm_lock);
490 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
491 free_mm_slot(mm_slot);
493 } else if (mm_slot) {
495 * This is required to serialize against
496 * khugepaged_test_exit() (which is guaranteed to run
497 * under mmap sem read mode). Stop here (after we
498 * return all pagetables will be destroyed) until
499 * khugepaged has finished working on the pagetables
500 * under the mmap_sem.
502 down_write(&mm->mmap_sem);
503 up_write(&mm->mmap_sem);
507 static void release_pte_page(struct page *page)
509 dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
511 putback_lru_page(page);
514 static void release_pte_pages(pte_t *pte, pte_t *_pte)
516 while (--_pte >= pte) {
517 pte_t pteval = *_pte;
518 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
519 release_pte_page(pte_page(pteval));
523 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
524 unsigned long address,
527 struct page *page = NULL;
529 int none_or_zero = 0, result = 0, referenced = 0;
530 bool writable = false;
532 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
533 _pte++, address += PAGE_SIZE) {
534 pte_t pteval = *_pte;
535 if (pte_none(pteval) || (pte_present(pteval) &&
536 is_zero_pfn(pte_pfn(pteval)))) {
537 if (!userfaultfd_armed(vma) &&
538 ++none_or_zero <= khugepaged_max_ptes_none) {
541 result = SCAN_EXCEED_NONE_PTE;
545 if (!pte_present(pteval)) {
546 result = SCAN_PTE_NON_PRESENT;
549 page = vm_normal_page(vma, address, pteval);
550 if (unlikely(!page)) {
551 result = SCAN_PAGE_NULL;
555 /* TODO: teach khugepaged to collapse THP mapped with pte */
556 if (PageCompound(page)) {
557 result = SCAN_PAGE_COMPOUND;
561 VM_BUG_ON_PAGE(!PageAnon(page), page);
564 * We can do it before isolate_lru_page because the
565 * page can't be freed from under us. NOTE: PG_lock
566 * is needed to serialize against split_huge_page
567 * when invoked from the VM.
569 if (!trylock_page(page)) {
570 result = SCAN_PAGE_LOCK;
575 * cannot use mapcount: can't collapse if there's a gup pin.
576 * The page must only be referenced by the scanned process
577 * and page swap cache.
579 if (page_count(page) != 1 + PageSwapCache(page)) {
581 result = SCAN_PAGE_COUNT;
584 if (pte_write(pteval)) {
587 if (PageSwapCache(page) &&
588 !reuse_swap_page(page, NULL)) {
590 result = SCAN_SWAP_CACHE_PAGE;
594 * Page is not in the swap cache. It can be collapsed
600 * Isolate the page to avoid collapsing an hugepage
601 * currently in use by the VM.
603 if (isolate_lru_page(page)) {
605 result = SCAN_DEL_PAGE_LRU;
608 inc_node_page_state(page,
609 NR_ISOLATED_ANON + page_is_file_cache(page));
610 VM_BUG_ON_PAGE(!PageLocked(page), page);
611 VM_BUG_ON_PAGE(PageLRU(page), page);
613 /* There should be enough young pte to collapse the page */
614 if (pte_young(pteval) ||
615 page_is_young(page) || PageReferenced(page) ||
616 mmu_notifier_test_young(vma->vm_mm, address))
620 if (unlikely(!writable)) {
621 result = SCAN_PAGE_RO;
622 } else if (unlikely(!referenced)) {
623 result = SCAN_LACK_REFERENCED_PAGE;
625 result = SCAN_SUCCEED;
626 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
627 referenced, writable, result);
631 release_pte_pages(pte, _pte);
632 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
633 referenced, writable, result);
637 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
638 struct vm_area_struct *vma,
639 unsigned long address,
643 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
644 _pte++, page++, address += PAGE_SIZE) {
645 pte_t pteval = *_pte;
646 struct page *src_page;
648 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
649 clear_user_highpage(page, address);
650 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
651 if (is_zero_pfn(pte_pfn(pteval))) {
653 * ptl mostly unnecessary.
657 * paravirt calls inside pte_clear here are
660 pte_clear(vma->vm_mm, address, _pte);
664 src_page = pte_page(pteval);
665 copy_user_highpage(page, src_page, address, vma);
666 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
667 release_pte_page(src_page);
669 * ptl mostly unnecessary, but preempt has to
670 * be disabled to update the per-cpu stats
671 * inside page_remove_rmap().
675 * paravirt calls inside pte_clear here are
678 pte_clear(vma->vm_mm, address, _pte);
679 page_remove_rmap(src_page, false);
681 free_page_and_swap_cache(src_page);
686 static void khugepaged_alloc_sleep(void)
690 add_wait_queue(&khugepaged_wait, &wait);
691 freezable_schedule_timeout_interruptible(
692 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
693 remove_wait_queue(&khugepaged_wait, &wait);
696 static int khugepaged_node_load[MAX_NUMNODES];
698 static bool khugepaged_scan_abort(int nid)
703 * If node_reclaim_mode is disabled, then no extra effort is made to
704 * allocate memory locally.
706 if (!node_reclaim_mode)
709 /* If there is a count for this node already, it must be acceptable */
710 if (khugepaged_node_load[nid])
713 for (i = 0; i < MAX_NUMNODES; i++) {
714 if (!khugepaged_node_load[i])
716 if (node_distance(nid, i) > RECLAIM_DISTANCE)
722 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
723 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
725 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
729 static int khugepaged_find_target_node(void)
731 static int last_khugepaged_target_node = NUMA_NO_NODE;
732 int nid, target_node = 0, max_value = 0;
734 /* find first node with max normal pages hit */
735 for (nid = 0; nid < MAX_NUMNODES; nid++)
736 if (khugepaged_node_load[nid] > max_value) {
737 max_value = khugepaged_node_load[nid];
741 /* do some balance if several nodes have the same hit record */
742 if (target_node <= last_khugepaged_target_node)
743 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
745 if (max_value == khugepaged_node_load[nid]) {
750 last_khugepaged_target_node = target_node;
754 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
756 if (IS_ERR(*hpage)) {
762 khugepaged_alloc_sleep();
772 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
774 VM_BUG_ON_PAGE(*hpage, *hpage);
776 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
777 if (unlikely(!*hpage)) {
778 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
779 *hpage = ERR_PTR(-ENOMEM);
783 prep_transhuge_page(*hpage);
784 count_vm_event(THP_COLLAPSE_ALLOC);
788 static int khugepaged_find_target_node(void)
793 static inline struct page *alloc_khugepaged_hugepage(void)
797 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
800 prep_transhuge_page(page);
804 static struct page *khugepaged_alloc_hugepage(bool *wait)
809 hpage = alloc_khugepaged_hugepage();
811 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
816 khugepaged_alloc_sleep();
818 count_vm_event(THP_COLLAPSE_ALLOC);
819 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
824 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
827 * If the hpage allocated earlier was briefly exposed in page cache
828 * before collapse_file() failed, it is possible that racing lookups
829 * have not yet completed, and would then be unpleasantly surprised by
830 * finding the hpage reused for the same mapping at a different offset.
831 * Just release the previous allocation if there is any danger of that.
833 if (*hpage && page_count(*hpage) > 1) {
839 *hpage = khugepaged_alloc_hugepage(wait);
841 if (unlikely(!*hpage))
848 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
857 * If mmap_sem temporarily dropped, revalidate vma
858 * before taking mmap_sem.
859 * Return 0 if succeeds, otherwise return none-zero
863 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
864 struct vm_area_struct **vmap)
866 struct vm_area_struct *vma;
867 unsigned long hstart, hend;
869 if (unlikely(khugepaged_test_exit(mm)))
870 return SCAN_ANY_PROCESS;
872 *vmap = vma = find_vma(mm, address);
874 return SCAN_VMA_NULL;
876 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
877 hend = vma->vm_end & HPAGE_PMD_MASK;
878 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
879 return SCAN_ADDRESS_RANGE;
880 if (!hugepage_vma_check(vma, vma->vm_flags))
881 return SCAN_VMA_CHECK;
886 * Bring missing pages in from swap, to complete THP collapse.
887 * Only done if khugepaged_scan_pmd believes it is worthwhile.
889 * Called and returns without pte mapped or spinlocks held,
890 * but with mmap_sem held to protect against vma changes.
893 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
894 struct vm_area_struct *vma,
895 unsigned long address, pmd_t *pmd,
900 struct vm_fault vmf = {
903 .flags = FAULT_FLAG_ALLOW_RETRY,
905 .pgoff = linear_page_index(vma, address),
908 /* we only decide to swapin, if there is enough young ptes */
909 if (referenced < HPAGE_PMD_NR/2) {
910 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
913 vmf.pte = pte_offset_map(pmd, address);
914 for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
915 vmf.pte++, vmf.address += PAGE_SIZE) {
916 vmf.orig_pte = *vmf.pte;
917 if (!is_swap_pte(vmf.orig_pte))
920 ret = do_swap_page(&vmf);
922 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
923 if (ret & VM_FAULT_RETRY) {
924 down_read(&mm->mmap_sem);
925 if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
926 /* vma is no longer available, don't continue to swapin */
927 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
930 /* check if the pmd is still valid */
931 if (mm_find_pmd(mm, address) != pmd) {
932 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
936 if (ret & VM_FAULT_ERROR) {
937 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
940 /* pte is unmapped now, we need to map it */
941 vmf.pte = pte_offset_map(pmd, vmf.address);
945 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
949 static void collapse_huge_page(struct mm_struct *mm,
950 unsigned long address,
952 int node, int referenced)
957 struct page *new_page;
958 spinlock_t *pmd_ptl, *pte_ptl;
959 int isolated = 0, result = 0;
960 struct mem_cgroup *memcg;
961 struct vm_area_struct *vma;
962 unsigned long mmun_start; /* For mmu_notifiers */
963 unsigned long mmun_end; /* For mmu_notifiers */
966 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
968 /* Only allocate from the target node */
969 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
972 * Before allocating the hugepage, release the mmap_sem read lock.
973 * The allocation can take potentially a long time if it involves
974 * sync compaction, and we do not need to hold the mmap_sem during
975 * that. We will recheck the vma after taking it again in write mode.
977 up_read(&mm->mmap_sem);
978 new_page = khugepaged_alloc_page(hpage, gfp, node);
980 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
984 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
985 result = SCAN_CGROUP_CHARGE_FAIL;
989 down_read(&mm->mmap_sem);
990 result = hugepage_vma_revalidate(mm, address, &vma);
992 mem_cgroup_cancel_charge(new_page, memcg, true);
993 up_read(&mm->mmap_sem);
997 pmd = mm_find_pmd(mm, address);
999 result = SCAN_PMD_NULL;
1000 mem_cgroup_cancel_charge(new_page, memcg, true);
1001 up_read(&mm->mmap_sem);
1006 * __collapse_huge_page_swapin always returns with mmap_sem locked.
1007 * If it fails, we release mmap_sem and jump out_nolock.
1008 * Continuing to collapse causes inconsistency.
1010 if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
1011 mem_cgroup_cancel_charge(new_page, memcg, true);
1012 up_read(&mm->mmap_sem);
1016 up_read(&mm->mmap_sem);
1018 * Prevent all access to pagetables with the exception of
1019 * gup_fast later handled by the ptep_clear_flush and the VM
1020 * handled by the anon_vma lock + PG_lock.
1022 down_write(&mm->mmap_sem);
1023 result = hugepage_vma_revalidate(mm, address, &vma);
1026 /* check if the pmd is still valid */
1027 if (mm_find_pmd(mm, address) != pmd)
1030 anon_vma_lock_write(vma->anon_vma);
1032 pte = pte_offset_map(pmd, address);
1033 pte_ptl = pte_lockptr(mm, pmd);
1035 mmun_start = address;
1036 mmun_end = address + HPAGE_PMD_SIZE;
1037 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1038 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1040 * After this gup_fast can't run anymore. This also removes
1041 * any huge TLB entry from the CPU so we won't allow
1042 * huge and small TLB entries for the same virtual address
1043 * to avoid the risk of CPU bugs in that area.
1045 _pmd = pmdp_collapse_flush(vma, address, pmd);
1046 spin_unlock(pmd_ptl);
1047 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1050 isolated = __collapse_huge_page_isolate(vma, address, pte);
1051 spin_unlock(pte_ptl);
1053 if (unlikely(!isolated)) {
1056 BUG_ON(!pmd_none(*pmd));
1058 * We can only use set_pmd_at when establishing
1059 * hugepmds and never for establishing regular pmds that
1060 * points to regular pagetables. Use pmd_populate for that
1062 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1063 spin_unlock(pmd_ptl);
1064 anon_vma_unlock_write(vma->anon_vma);
1070 * All pages are isolated and locked so anon_vma rmap
1071 * can't run anymore.
1073 anon_vma_unlock_write(vma->anon_vma);
1075 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
1077 __SetPageUptodate(new_page);
1078 pgtable = pmd_pgtable(_pmd);
1080 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1081 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1084 * spin_lock() below is not the equivalent of smp_wmb(), so
1085 * this is needed to avoid the copy_huge_page writes to become
1086 * visible after the set_pmd_at() write.
1091 BUG_ON(!pmd_none(*pmd));
1092 page_add_new_anon_rmap(new_page, vma, address, true);
1093 mem_cgroup_commit_charge(new_page, memcg, false, true);
1094 lru_cache_add_active_or_unevictable(new_page, vma);
1095 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1096 set_pmd_at(mm, address, pmd, _pmd);
1097 update_mmu_cache_pmd(vma, address, pmd);
1098 spin_unlock(pmd_ptl);
1102 khugepaged_pages_collapsed++;
1103 result = SCAN_SUCCEED;
1105 up_write(&mm->mmap_sem);
1107 trace_mm_collapse_huge_page(mm, isolated, result);
1110 mem_cgroup_cancel_charge(new_page, memcg, true);
1114 static int khugepaged_scan_pmd(struct mm_struct *mm,
1115 struct vm_area_struct *vma,
1116 unsigned long address,
1117 struct page **hpage)
1121 int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
1122 struct page *page = NULL;
1123 unsigned long _address;
1125 int node = NUMA_NO_NODE, unmapped = 0;
1126 bool writable = false;
1128 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1130 pmd = mm_find_pmd(mm, address);
1132 result = SCAN_PMD_NULL;
1136 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1137 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1138 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1139 _pte++, _address += PAGE_SIZE) {
1140 pte_t pteval = *_pte;
1141 if (is_swap_pte(pteval)) {
1142 if (++unmapped <= khugepaged_max_ptes_swap) {
1145 result = SCAN_EXCEED_SWAP_PTE;
1149 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1150 if (!userfaultfd_armed(vma) &&
1151 ++none_or_zero <= khugepaged_max_ptes_none) {
1154 result = SCAN_EXCEED_NONE_PTE;
1158 if (!pte_present(pteval)) {
1159 result = SCAN_PTE_NON_PRESENT;
1162 if (pte_write(pteval))
1165 page = vm_normal_page(vma, _address, pteval);
1166 if (unlikely(!page)) {
1167 result = SCAN_PAGE_NULL;
1171 /* TODO: teach khugepaged to collapse THP mapped with pte */
1172 if (PageCompound(page)) {
1173 result = SCAN_PAGE_COMPOUND;
1178 * Record which node the original page is from and save this
1179 * information to khugepaged_node_load[].
1180 * Khupaged will allocate hugepage from the node has the max
1183 node = page_to_nid(page);
1184 if (khugepaged_scan_abort(node)) {
1185 result = SCAN_SCAN_ABORT;
1188 khugepaged_node_load[node]++;
1189 if (!PageLRU(page)) {
1190 result = SCAN_PAGE_LRU;
1193 if (PageLocked(page)) {
1194 result = SCAN_PAGE_LOCK;
1197 if (!PageAnon(page)) {
1198 result = SCAN_PAGE_ANON;
1203 * cannot use mapcount: can't collapse if there's a gup pin.
1204 * The page must only be referenced by the scanned process
1205 * and page swap cache.
1207 if (page_count(page) != 1 + PageSwapCache(page)) {
1208 result = SCAN_PAGE_COUNT;
1211 if (pte_young(pteval) ||
1212 page_is_young(page) || PageReferenced(page) ||
1213 mmu_notifier_test_young(vma->vm_mm, address))
1218 result = SCAN_SUCCEED;
1221 result = SCAN_LACK_REFERENCED_PAGE;
1224 result = SCAN_PAGE_RO;
1227 pte_unmap_unlock(pte, ptl);
1229 node = khugepaged_find_target_node();
1230 /* collapse_huge_page will return with the mmap_sem released */
1231 collapse_huge_page(mm, address, hpage, node, referenced);
1234 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1235 none_or_zero, result, unmapped);
1239 static void collect_mm_slot(struct mm_slot *mm_slot)
1241 struct mm_struct *mm = mm_slot->mm;
1243 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1245 if (khugepaged_test_exit(mm)) {
1247 hash_del(&mm_slot->hash);
1248 list_del(&mm_slot->mm_node);
1251 * Not strictly needed because the mm exited already.
1253 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1256 /* khugepaged_mm_lock actually not necessary for the below */
1257 free_mm_slot(mm_slot);
1262 #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
1263 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1265 struct vm_area_struct *vma;
1266 struct mm_struct *mm;
1270 i_mmap_lock_write(mapping);
1271 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1272 /* probably overkill */
1275 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1276 if (addr & ~HPAGE_PMD_MASK)
1278 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1281 pmd = mm_find_pmd(mm, addr);
1285 * We need exclusive mmap_sem to retract page table.
1286 * If trylock fails we would end up with pte-mapped THP after
1287 * re-fault. Not ideal, but it's more important to not disturb
1288 * the system too much.
1290 if (down_write_trylock(&mm->mmap_sem)) {
1291 if (!khugepaged_test_exit(mm)) {
1292 spinlock_t *ptl = pmd_lock(mm, pmd);
1293 /* assume page table is clear */
1294 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1297 pte_free(mm, pmd_pgtable(_pmd));
1299 up_write(&mm->mmap_sem);
1302 i_mmap_unlock_write(mapping);
1306 * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
1308 * Basic scheme is simple, details are more complex:
1309 * - allocate and lock a new huge page;
1310 * - scan over radix tree replacing old pages the new one
1311 * + swap in pages if necessary;
1313 * + keep old pages around in case if rollback is required;
1314 * - if replacing succeed:
1317 * + unlock huge page;
1318 * - if replacing failed;
1319 * + put all pages back and unfreeze them;
1320 * + restore gaps in the radix-tree;
1321 * + unlock and free huge page;
1323 static void collapse_shmem(struct mm_struct *mm,
1324 struct address_space *mapping, pgoff_t start,
1325 struct page **hpage, int node)
1328 struct page *page, *new_page, *tmp;
1329 struct mem_cgroup *memcg;
1330 pgoff_t index, end = start + HPAGE_PMD_NR;
1331 LIST_HEAD(pagelist);
1332 struct radix_tree_iter iter;
1334 int nr_none = 0, result = SCAN_SUCCEED;
1336 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1338 /* Only allocate from the target node */
1339 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1341 new_page = khugepaged_alloc_page(hpage, gfp, node);
1343 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1347 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp, &memcg, true))) {
1348 result = SCAN_CGROUP_CHARGE_FAIL;
1352 __SetPageLocked(new_page);
1353 __SetPageSwapBacked(new_page);
1354 new_page->index = start;
1355 new_page->mapping = mapping;
1358 * At this point the new_page is locked and not up-to-date.
1359 * It's safe to insert it into the page cache, because nobody would
1360 * be able to map it or use it in another way until we unlock it.
1364 xa_lock_irq(&mapping->i_pages);
1365 radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
1366 int n = min(iter.index, end) - index;
1369 * Stop if extent has been hole-punched, and is now completely
1370 * empty (the more obvious i_size_read() check would take an
1371 * irq-unsafe seqlock on 32-bit).
1373 if (n >= HPAGE_PMD_NR) {
1374 result = SCAN_TRUNCATED;
1379 * Handle holes in the radix tree: charge it from shmem and
1380 * insert relevant subpage of new_page into the radix-tree.
1382 if (n && !shmem_charge(mapping->host, n)) {
1386 for (; index < min(iter.index, end); index++) {
1387 radix_tree_insert(&mapping->i_pages, index,
1388 new_page + (index % HPAGE_PMD_NR));
1396 page = radix_tree_deref_slot_protected(slot,
1397 &mapping->i_pages.xa_lock);
1398 if (radix_tree_exceptional_entry(page) || !PageUptodate(page)) {
1399 xa_unlock_irq(&mapping->i_pages);
1400 /* swap in or instantiate fallocated page */
1401 if (shmem_getpage(mapping->host, index, &page,
1406 } else if (trylock_page(page)) {
1408 xa_unlock_irq(&mapping->i_pages);
1410 result = SCAN_PAGE_LOCK;
1415 * The page must be locked, so we can drop the i_pages lock
1416 * without racing with truncate.
1418 VM_BUG_ON_PAGE(!PageLocked(page), page);
1419 VM_BUG_ON_PAGE(!PageUptodate(page), page);
1422 * If file was truncated then extended, or hole-punched, before
1423 * we locked the first page, then a THP might be there already.
1425 if (PageTransCompound(page)) {
1426 result = SCAN_PAGE_COMPOUND;
1430 if (page_mapping(page) != mapping) {
1431 result = SCAN_TRUNCATED;
1435 if (isolate_lru_page(page)) {
1436 result = SCAN_DEL_PAGE_LRU;
1440 if (page_mapped(page))
1441 unmap_mapping_pages(mapping, index, 1, false);
1443 xa_lock_irq(&mapping->i_pages);
1445 slot = radix_tree_lookup_slot(&mapping->i_pages, index);
1446 VM_BUG_ON_PAGE(page != radix_tree_deref_slot_protected(slot,
1447 &mapping->i_pages.xa_lock), page);
1448 VM_BUG_ON_PAGE(page_mapped(page), page);
1451 * The page is expected to have page_count() == 3:
1452 * - we hold a pin on it;
1453 * - one reference from radix tree;
1454 * - one from isolate_lru_page;
1456 if (!page_ref_freeze(page, 3)) {
1457 result = SCAN_PAGE_COUNT;
1458 xa_unlock_irq(&mapping->i_pages);
1459 putback_lru_page(page);
1464 * Add the page to the list to be able to undo the collapse if
1465 * something go wrong.
1467 list_add_tail(&page->lru, &pagelist);
1469 /* Finally, replace with the new page. */
1470 radix_tree_replace_slot(&mapping->i_pages, slot,
1471 new_page + (index % HPAGE_PMD_NR));
1473 slot = radix_tree_iter_resume(slot, &iter);
1483 * Handle hole in radix tree at the end of the range.
1484 * This code only triggers if there's nothing in radix tree
1488 int n = end - index;
1490 /* Stop if extent has been truncated, and is now empty */
1491 if (n >= HPAGE_PMD_NR) {
1492 result = SCAN_TRUNCATED;
1495 if (!shmem_charge(mapping->host, n)) {
1499 for (; index < end; index++) {
1500 radix_tree_insert(&mapping->i_pages, index,
1501 new_page + (index % HPAGE_PMD_NR));
1506 __inc_node_page_state(new_page, NR_SHMEM_THPS);
1508 struct zone *zone = page_zone(new_page);
1510 __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1511 __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
1515 xa_unlock_irq(&mapping->i_pages);
1518 if (result == SCAN_SUCCEED) {
1520 * Replacing old pages with new one has succeed, now we need to
1521 * copy the content and free old pages.
1524 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1525 while (index < page->index) {
1526 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1529 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1531 list_del(&page->lru);
1532 page->mapping = NULL;
1533 page_ref_unfreeze(page, 1);
1534 ClearPageActive(page);
1535 ClearPageUnevictable(page);
1540 while (index < end) {
1541 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1545 SetPageUptodate(new_page);
1546 page_ref_add(new_page, HPAGE_PMD_NR - 1);
1547 set_page_dirty(new_page);
1548 mem_cgroup_commit_charge(new_page, memcg, false, true);
1549 lru_cache_add_anon(new_page);
1552 * Remove pte page tables, so we can re-fault the page as huge.
1554 retract_page_tables(mapping, start);
1557 khugepaged_pages_collapsed++;
1559 /* Something went wrong: rollback changes to the radix-tree */
1560 xa_lock_irq(&mapping->i_pages);
1561 mapping->nrpages -= nr_none;
1562 shmem_uncharge(mapping->host, nr_none);
1564 radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
1565 if (iter.index >= end)
1567 page = list_first_entry_or_null(&pagelist,
1569 if (!page || iter.index < page->index) {
1573 /* Put holes back where they were */
1574 radix_tree_delete(&mapping->i_pages, iter.index);
1578 VM_BUG_ON_PAGE(page->index != iter.index, page);
1580 /* Unfreeze the page. */
1581 list_del(&page->lru);
1582 page_ref_unfreeze(page, 2);
1583 radix_tree_replace_slot(&mapping->i_pages, slot, page);
1584 slot = radix_tree_iter_resume(slot, &iter);
1585 xa_unlock_irq(&mapping->i_pages);
1587 putback_lru_page(page);
1588 xa_lock_irq(&mapping->i_pages);
1591 xa_unlock_irq(&mapping->i_pages);
1593 mem_cgroup_cancel_charge(new_page, memcg, true);
1594 new_page->mapping = NULL;
1597 unlock_page(new_page);
1599 VM_BUG_ON(!list_empty(&pagelist));
1600 /* TODO: tracepoints */
1603 static void khugepaged_scan_shmem(struct mm_struct *mm,
1604 struct address_space *mapping,
1605 pgoff_t start, struct page **hpage)
1607 struct page *page = NULL;
1608 struct radix_tree_iter iter;
1611 int node = NUMA_NO_NODE;
1612 int result = SCAN_SUCCEED;
1616 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1618 radix_tree_for_each_slot(slot, &mapping->i_pages, &iter, start) {
1619 if (iter.index >= start + HPAGE_PMD_NR)
1622 page = radix_tree_deref_slot(slot);
1623 if (radix_tree_deref_retry(page)) {
1624 slot = radix_tree_iter_retry(&iter);
1628 if (radix_tree_exception(page)) {
1629 if (++swap > khugepaged_max_ptes_swap) {
1630 result = SCAN_EXCEED_SWAP_PTE;
1636 if (PageTransCompound(page)) {
1637 result = SCAN_PAGE_COMPOUND;
1641 node = page_to_nid(page);
1642 if (khugepaged_scan_abort(node)) {
1643 result = SCAN_SCAN_ABORT;
1646 khugepaged_node_load[node]++;
1648 if (!PageLRU(page)) {
1649 result = SCAN_PAGE_LRU;
1653 if (page_count(page) != 1 + page_mapcount(page)) {
1654 result = SCAN_PAGE_COUNT;
1659 * We probably should check if the page is referenced here, but
1660 * nobody would transfer pte_young() to PageReferenced() for us.
1661 * And rmap walk here is just too costly...
1666 if (need_resched()) {
1667 slot = radix_tree_iter_resume(slot, &iter);
1673 if (result == SCAN_SUCCEED) {
1674 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
1675 result = SCAN_EXCEED_NONE_PTE;
1677 node = khugepaged_find_target_node();
1678 collapse_shmem(mm, mapping, start, hpage, node);
1682 /* TODO: tracepoints */
1685 static void khugepaged_scan_shmem(struct mm_struct *mm,
1686 struct address_space *mapping,
1687 pgoff_t start, struct page **hpage)
1693 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1694 struct page **hpage)
1695 __releases(&khugepaged_mm_lock)
1696 __acquires(&khugepaged_mm_lock)
1698 struct mm_slot *mm_slot;
1699 struct mm_struct *mm;
1700 struct vm_area_struct *vma;
1704 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1706 if (khugepaged_scan.mm_slot)
1707 mm_slot = khugepaged_scan.mm_slot;
1709 mm_slot = list_entry(khugepaged_scan.mm_head.next,
1710 struct mm_slot, mm_node);
1711 khugepaged_scan.address = 0;
1712 khugepaged_scan.mm_slot = mm_slot;
1714 spin_unlock(&khugepaged_mm_lock);
1718 * Don't wait for semaphore (to avoid long wait times). Just move to
1719 * the next mm on the list.
1722 if (unlikely(!down_read_trylock(&mm->mmap_sem)))
1723 goto breakouterloop_mmap_sem;
1724 if (likely(!khugepaged_test_exit(mm)))
1725 vma = find_vma(mm, khugepaged_scan.address);
1728 for (; vma; vma = vma->vm_next) {
1729 unsigned long hstart, hend;
1732 if (unlikely(khugepaged_test_exit(mm))) {
1736 if (!hugepage_vma_check(vma, vma->vm_flags)) {
1741 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1742 hend = vma->vm_end & HPAGE_PMD_MASK;
1745 if (khugepaged_scan.address > hend)
1747 if (khugepaged_scan.address < hstart)
1748 khugepaged_scan.address = hstart;
1749 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1751 while (khugepaged_scan.address < hend) {
1754 if (unlikely(khugepaged_test_exit(mm)))
1755 goto breakouterloop;
1757 VM_BUG_ON(khugepaged_scan.address < hstart ||
1758 khugepaged_scan.address + HPAGE_PMD_SIZE >
1760 if (shmem_file(vma->vm_file)) {
1762 pgoff_t pgoff = linear_page_index(vma,
1763 khugepaged_scan.address);
1764 if (!shmem_huge_enabled(vma))
1766 file = get_file(vma->vm_file);
1767 up_read(&mm->mmap_sem);
1769 khugepaged_scan_shmem(mm, file->f_mapping,
1773 ret = khugepaged_scan_pmd(mm, vma,
1774 khugepaged_scan.address,
1777 /* move to next address */
1778 khugepaged_scan.address += HPAGE_PMD_SIZE;
1779 progress += HPAGE_PMD_NR;
1781 /* we released mmap_sem so break loop */
1782 goto breakouterloop_mmap_sem;
1783 if (progress >= pages)
1784 goto breakouterloop;
1788 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
1789 breakouterloop_mmap_sem:
1791 spin_lock(&khugepaged_mm_lock);
1792 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
1794 * Release the current mm_slot if this mm is about to die, or
1795 * if we scanned all vmas of this mm.
1797 if (khugepaged_test_exit(mm) || !vma) {
1799 * Make sure that if mm_users is reaching zero while
1800 * khugepaged runs here, khugepaged_exit will find
1801 * mm_slot not pointing to the exiting mm.
1803 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
1804 khugepaged_scan.mm_slot = list_entry(
1805 mm_slot->mm_node.next,
1806 struct mm_slot, mm_node);
1807 khugepaged_scan.address = 0;
1809 khugepaged_scan.mm_slot = NULL;
1810 khugepaged_full_scans++;
1813 collect_mm_slot(mm_slot);
1819 static int khugepaged_has_work(void)
1821 return !list_empty(&khugepaged_scan.mm_head) &&
1822 khugepaged_enabled();
1825 static int khugepaged_wait_event(void)
1827 return !list_empty(&khugepaged_scan.mm_head) ||
1828 kthread_should_stop();
1831 static void khugepaged_do_scan(void)
1833 struct page *hpage = NULL;
1834 unsigned int progress = 0, pass_through_head = 0;
1835 unsigned int pages = khugepaged_pages_to_scan;
1838 barrier(); /* write khugepaged_pages_to_scan to local stack */
1840 while (progress < pages) {
1841 if (!khugepaged_prealloc_page(&hpage, &wait))
1846 if (unlikely(kthread_should_stop() || try_to_freeze()))
1849 spin_lock(&khugepaged_mm_lock);
1850 if (!khugepaged_scan.mm_slot)
1851 pass_through_head++;
1852 if (khugepaged_has_work() &&
1853 pass_through_head < 2)
1854 progress += khugepaged_scan_mm_slot(pages - progress,
1858 spin_unlock(&khugepaged_mm_lock);
1861 if (!IS_ERR_OR_NULL(hpage))
1865 static bool khugepaged_should_wakeup(void)
1867 return kthread_should_stop() ||
1868 time_after_eq(jiffies, khugepaged_sleep_expire);
1871 static void khugepaged_wait_work(void)
1873 if (khugepaged_has_work()) {
1874 const unsigned long scan_sleep_jiffies =
1875 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
1877 if (!scan_sleep_jiffies)
1880 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
1881 wait_event_freezable_timeout(khugepaged_wait,
1882 khugepaged_should_wakeup(),
1883 scan_sleep_jiffies);
1887 if (khugepaged_enabled())
1888 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
1891 static int khugepaged(void *none)
1893 struct mm_slot *mm_slot;
1896 set_user_nice(current, MAX_NICE);
1898 while (!kthread_should_stop()) {
1899 khugepaged_do_scan();
1900 khugepaged_wait_work();
1903 spin_lock(&khugepaged_mm_lock);
1904 mm_slot = khugepaged_scan.mm_slot;
1905 khugepaged_scan.mm_slot = NULL;
1907 collect_mm_slot(mm_slot);
1908 spin_unlock(&khugepaged_mm_lock);
1912 static void set_recommended_min_free_kbytes(void)
1916 unsigned long recommended_min;
1918 for_each_populated_zone(zone) {
1920 * We don't need to worry about fragmentation of
1921 * ZONE_MOVABLE since it only has movable pages.
1923 if (zone_idx(zone) > gfp_zone(GFP_USER))
1929 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
1930 recommended_min = pageblock_nr_pages * nr_zones * 2;
1933 * Make sure that on average at least two pageblocks are almost free
1934 * of another type, one for a migratetype to fall back to and a
1935 * second to avoid subsequent fallbacks of other types There are 3
1936 * MIGRATE_TYPES we care about.
1938 recommended_min += pageblock_nr_pages * nr_zones *
1939 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
1941 /* don't ever allow to reserve more than 5% of the lowmem */
1942 recommended_min = min(recommended_min,
1943 (unsigned long) nr_free_buffer_pages() / 20);
1944 recommended_min <<= (PAGE_SHIFT-10);
1946 if (recommended_min > min_free_kbytes) {
1947 if (user_min_free_kbytes >= 0)
1948 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
1949 min_free_kbytes, recommended_min);
1951 min_free_kbytes = recommended_min;
1953 setup_per_zone_wmarks();
1956 int start_stop_khugepaged(void)
1960 mutex_lock(&khugepaged_mutex);
1961 if (khugepaged_enabled()) {
1962 if (!khugepaged_thread)
1963 khugepaged_thread = kthread_run(khugepaged, NULL,
1965 if (IS_ERR(khugepaged_thread)) {
1966 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
1967 err = PTR_ERR(khugepaged_thread);
1968 khugepaged_thread = NULL;
1972 if (!list_empty(&khugepaged_scan.mm_head))
1973 wake_up_interruptible(&khugepaged_wait);
1975 set_recommended_min_free_kbytes();
1976 } else if (khugepaged_thread) {
1977 kthread_stop(khugepaged_thread);
1978 khugepaged_thread = NULL;
1981 mutex_unlock(&khugepaged_mutex);
1985 void khugepaged_min_free_kbytes_update(void)
1987 mutex_lock(&khugepaged_mutex);
1988 if (khugepaged_enabled() && khugepaged_thread)
1989 set_recommended_min_free_kbytes();
1990 mutex_unlock(&khugepaged_mutex);