1 // SPDX-License-Identifier: GPL-2.0
2 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
5 #include <linux/sched.h>
6 #include <linux/sched/mm.h>
7 #include <linux/sched/coredump.h>
8 #include <linux/mmu_notifier.h>
9 #include <linux/rmap.h>
10 #include <linux/swap.h>
11 #include <linux/mm_inline.h>
12 #include <linux/kthread.h>
13 #include <linux/khugepaged.h>
14 #include <linux/freezer.h>
15 #include <linux/mman.h>
16 #include <linux/hashtable.h>
17 #include <linux/userfaultfd_k.h>
18 #include <linux/page_idle.h>
19 #include <linux/swapops.h>
20 #include <linux/shmem_fs.h>
23 #include <asm/pgalloc.h>
26 /* gross hack for <=4.19 stable */
27 #if defined(CONFIG_S390) || defined(CONFIG_ARM)
28 static void tlb_remove_table_smp_sync(void *arg)
30 /* Simply deliver the interrupt */
33 static void tlb_remove_table_sync_one(void)
35 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
46 SCAN_LACK_REFERENCED_PAGE,
60 SCAN_ALLOC_HUGE_PAGE_FAIL,
61 SCAN_CGROUP_CHARGE_FAIL,
66 #define CREATE_TRACE_POINTS
67 #include <trace/events/huge_memory.h>
69 static struct task_struct *khugepaged_thread __read_mostly;
70 static DEFINE_MUTEX(khugepaged_mutex);
72 /* default scan 8*512 pte (or vmas) every 30 second */
73 static unsigned int khugepaged_pages_to_scan __read_mostly;
74 static unsigned int khugepaged_pages_collapsed;
75 static unsigned int khugepaged_full_scans;
76 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
77 /* during fragmentation poll the hugepage allocator once every minute */
78 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
79 static unsigned long khugepaged_sleep_expire;
80 static DEFINE_SPINLOCK(khugepaged_mm_lock);
81 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
83 * default collapse hugepages if there is at least one pte mapped like
84 * it would have happened if the vma was large enough during page
87 static unsigned int khugepaged_max_ptes_none __read_mostly;
88 static unsigned int khugepaged_max_ptes_swap __read_mostly;
90 #define MM_SLOTS_HASH_BITS 10
91 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
93 static struct kmem_cache *mm_slot_cache __read_mostly;
96 * struct mm_slot - hash lookup from mm to mm_slot
97 * @hash: hash collision list
98 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
99 * @mm: the mm that this information is valid for
102 struct hlist_node hash;
103 struct list_head mm_node;
104 struct mm_struct *mm;
108 * struct khugepaged_scan - cursor for scanning
109 * @mm_head: the head of the mm list to scan
110 * @mm_slot: the current mm_slot we are scanning
111 * @address: the next address inside that to be scanned
113 * There is only the one khugepaged_scan instance of this cursor structure.
115 struct khugepaged_scan {
116 struct list_head mm_head;
117 struct mm_slot *mm_slot;
118 unsigned long address;
121 static struct khugepaged_scan khugepaged_scan = {
122 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
126 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
127 struct kobj_attribute *attr,
130 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
133 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
134 struct kobj_attribute *attr,
135 const char *buf, size_t count)
140 err = kstrtoul(buf, 10, &msecs);
141 if (err || msecs > UINT_MAX)
144 khugepaged_scan_sleep_millisecs = msecs;
145 khugepaged_sleep_expire = 0;
146 wake_up_interruptible(&khugepaged_wait);
150 static struct kobj_attribute scan_sleep_millisecs_attr =
151 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
152 scan_sleep_millisecs_store);
154 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
155 struct kobj_attribute *attr,
158 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
161 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
162 struct kobj_attribute *attr,
163 const char *buf, size_t count)
168 err = kstrtoul(buf, 10, &msecs);
169 if (err || msecs > UINT_MAX)
172 khugepaged_alloc_sleep_millisecs = msecs;
173 khugepaged_sleep_expire = 0;
174 wake_up_interruptible(&khugepaged_wait);
178 static struct kobj_attribute alloc_sleep_millisecs_attr =
179 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
180 alloc_sleep_millisecs_store);
182 static ssize_t pages_to_scan_show(struct kobject *kobj,
183 struct kobj_attribute *attr,
186 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
188 static ssize_t pages_to_scan_store(struct kobject *kobj,
189 struct kobj_attribute *attr,
190 const char *buf, size_t count)
195 err = kstrtoul(buf, 10, &pages);
196 if (err || !pages || pages > UINT_MAX)
199 khugepaged_pages_to_scan = pages;
203 static struct kobj_attribute pages_to_scan_attr =
204 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
205 pages_to_scan_store);
207 static ssize_t pages_collapsed_show(struct kobject *kobj,
208 struct kobj_attribute *attr,
211 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
213 static struct kobj_attribute pages_collapsed_attr =
214 __ATTR_RO(pages_collapsed);
216 static ssize_t full_scans_show(struct kobject *kobj,
217 struct kobj_attribute *attr,
220 return sprintf(buf, "%u\n", khugepaged_full_scans);
222 static struct kobj_attribute full_scans_attr =
223 __ATTR_RO(full_scans);
225 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
226 struct kobj_attribute *attr, char *buf)
228 return single_hugepage_flag_show(kobj, attr, buf,
229 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
231 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
232 struct kobj_attribute *attr,
233 const char *buf, size_t count)
235 return single_hugepage_flag_store(kobj, attr, buf, count,
236 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
238 static struct kobj_attribute khugepaged_defrag_attr =
239 __ATTR(defrag, 0644, khugepaged_defrag_show,
240 khugepaged_defrag_store);
243 * max_ptes_none controls if khugepaged should collapse hugepages over
244 * any unmapped ptes in turn potentially increasing the memory
245 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
246 * reduce the available free memory in the system as it
247 * runs. Increasing max_ptes_none will instead potentially reduce the
248 * free memory in the system during the khugepaged scan.
250 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
251 struct kobj_attribute *attr,
254 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
256 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
257 struct kobj_attribute *attr,
258 const char *buf, size_t count)
261 unsigned long max_ptes_none;
263 err = kstrtoul(buf, 10, &max_ptes_none);
264 if (err || max_ptes_none > HPAGE_PMD_NR-1)
267 khugepaged_max_ptes_none = max_ptes_none;
271 static struct kobj_attribute khugepaged_max_ptes_none_attr =
272 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
273 khugepaged_max_ptes_none_store);
275 static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
276 struct kobj_attribute *attr,
279 return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
282 static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
283 struct kobj_attribute *attr,
284 const char *buf, size_t count)
287 unsigned long max_ptes_swap;
289 err = kstrtoul(buf, 10, &max_ptes_swap);
290 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
293 khugepaged_max_ptes_swap = max_ptes_swap;
298 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
299 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
300 khugepaged_max_ptes_swap_store);
302 static struct attribute *khugepaged_attr[] = {
303 &khugepaged_defrag_attr.attr,
304 &khugepaged_max_ptes_none_attr.attr,
305 &pages_to_scan_attr.attr,
306 &pages_collapsed_attr.attr,
307 &full_scans_attr.attr,
308 &scan_sleep_millisecs_attr.attr,
309 &alloc_sleep_millisecs_attr.attr,
310 &khugepaged_max_ptes_swap_attr.attr,
314 struct attribute_group khugepaged_attr_group = {
315 .attrs = khugepaged_attr,
316 .name = "khugepaged",
318 #endif /* CONFIG_SYSFS */
320 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
322 int hugepage_madvise(struct vm_area_struct *vma,
323 unsigned long *vm_flags, int advice)
329 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
330 * can't handle this properly after s390_enable_sie, so we simply
331 * ignore the madvise to prevent qemu from causing a SIGSEGV.
333 if (mm_has_pgste(vma->vm_mm))
336 *vm_flags &= ~VM_NOHUGEPAGE;
337 *vm_flags |= VM_HUGEPAGE;
339 * If the vma become good for khugepaged to scan,
340 * register it here without waiting a page fault that
341 * may not happen any time soon.
343 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
344 khugepaged_enter_vma_merge(vma, *vm_flags))
347 case MADV_NOHUGEPAGE:
348 *vm_flags &= ~VM_HUGEPAGE;
349 *vm_flags |= VM_NOHUGEPAGE;
351 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
352 * this vma even if we leave the mm registered in khugepaged if
353 * it got registered before VM_NOHUGEPAGE was set.
361 int __init khugepaged_init(void)
363 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
364 sizeof(struct mm_slot),
365 __alignof__(struct mm_slot), 0, NULL);
369 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
370 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
371 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
376 void __init khugepaged_destroy(void)
378 kmem_cache_destroy(mm_slot_cache);
381 static inline struct mm_slot *alloc_mm_slot(void)
383 if (!mm_slot_cache) /* initialization failed */
385 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
388 static inline void free_mm_slot(struct mm_slot *mm_slot)
390 kmem_cache_free(mm_slot_cache, mm_slot);
393 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
395 struct mm_slot *mm_slot;
397 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
398 if (mm == mm_slot->mm)
404 static void insert_to_mm_slots_hash(struct mm_struct *mm,
405 struct mm_slot *mm_slot)
408 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
411 static inline int khugepaged_test_exit(struct mm_struct *mm)
413 return atomic_read(&mm->mm_users) == 0 || !mmget_still_valid(mm);
416 int __khugepaged_enter(struct mm_struct *mm)
418 struct mm_slot *mm_slot;
421 mm_slot = alloc_mm_slot();
425 /* __khugepaged_exit() must not run from under us */
426 VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
427 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
428 free_mm_slot(mm_slot);
432 spin_lock(&khugepaged_mm_lock);
433 insert_to_mm_slots_hash(mm, mm_slot);
435 * Insert just behind the scanning cursor, to let the area settle
438 wakeup = list_empty(&khugepaged_scan.mm_head);
439 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
440 spin_unlock(&khugepaged_mm_lock);
444 wake_up_interruptible(&khugepaged_wait);
449 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
450 unsigned long vm_flags)
452 unsigned long hstart, hend;
455 * Not yet faulted in so we will register later in the
456 * page fault if needed.
459 if (vma->vm_ops || (vm_flags & VM_NO_KHUGEPAGED))
460 /* khugepaged not yet working on file or special mappings */
462 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
463 hend = vma->vm_end & HPAGE_PMD_MASK;
465 return khugepaged_enter(vma, vm_flags);
469 void __khugepaged_exit(struct mm_struct *mm)
471 struct mm_slot *mm_slot;
474 spin_lock(&khugepaged_mm_lock);
475 mm_slot = get_mm_slot(mm);
476 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
477 hash_del(&mm_slot->hash);
478 list_del(&mm_slot->mm_node);
481 spin_unlock(&khugepaged_mm_lock);
484 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
485 free_mm_slot(mm_slot);
487 } else if (mm_slot) {
489 * This is required to serialize against
490 * khugepaged_test_exit() (which is guaranteed to run
491 * under mmap sem read mode). Stop here (after we
492 * return all pagetables will be destroyed) until
493 * khugepaged has finished working on the pagetables
494 * under the mmap_sem.
496 down_write(&mm->mmap_sem);
497 up_write(&mm->mmap_sem);
501 static void release_pte_page(struct page *page)
503 dec_node_page_state(page, NR_ISOLATED_ANON + page_is_file_cache(page));
505 putback_lru_page(page);
508 static void release_pte_pages(pte_t *pte, pte_t *_pte)
510 while (--_pte >= pte) {
511 pte_t pteval = *_pte;
512 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
513 release_pte_page(pte_page(pteval));
517 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
518 unsigned long address,
521 struct page *page = NULL;
523 int none_or_zero = 0, result = 0, referenced = 0;
524 bool writable = false;
526 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
527 _pte++, address += PAGE_SIZE) {
528 pte_t pteval = *_pte;
529 if (pte_none(pteval) || (pte_present(pteval) &&
530 is_zero_pfn(pte_pfn(pteval)))) {
531 if (!userfaultfd_armed(vma) &&
532 ++none_or_zero <= khugepaged_max_ptes_none) {
535 result = SCAN_EXCEED_NONE_PTE;
539 if (!pte_present(pteval)) {
540 result = SCAN_PTE_NON_PRESENT;
543 page = vm_normal_page(vma, address, pteval);
544 if (unlikely(!page)) {
545 result = SCAN_PAGE_NULL;
549 /* TODO: teach khugepaged to collapse THP mapped with pte */
550 if (PageCompound(page)) {
551 result = SCAN_PAGE_COMPOUND;
555 VM_BUG_ON_PAGE(!PageAnon(page), page);
558 * We can do it before isolate_lru_page because the
559 * page can't be freed from under us. NOTE: PG_lock
560 * is needed to serialize against split_huge_page
561 * when invoked from the VM.
563 if (!trylock_page(page)) {
564 result = SCAN_PAGE_LOCK;
569 * cannot use mapcount: can't collapse if there's a gup pin.
570 * The page must only be referenced by the scanned process
571 * and page swap cache.
573 if (page_count(page) != 1 + PageSwapCache(page)) {
575 result = SCAN_PAGE_COUNT;
578 if (pte_write(pteval)) {
581 if (PageSwapCache(page) &&
582 !reuse_swap_page(page, NULL)) {
584 result = SCAN_SWAP_CACHE_PAGE;
588 * Page is not in the swap cache. It can be collapsed
594 * Isolate the page to avoid collapsing an hugepage
595 * currently in use by the VM.
597 if (isolate_lru_page(page)) {
599 result = SCAN_DEL_PAGE_LRU;
602 inc_node_page_state(page,
603 NR_ISOLATED_ANON + page_is_file_cache(page));
604 VM_BUG_ON_PAGE(!PageLocked(page), page);
605 VM_BUG_ON_PAGE(PageLRU(page), page);
607 /* There should be enough young pte to collapse the page */
608 if (pte_young(pteval) ||
609 page_is_young(page) || PageReferenced(page) ||
610 mmu_notifier_test_young(vma->vm_mm, address))
614 if (unlikely(!writable)) {
615 result = SCAN_PAGE_RO;
616 } else if (unlikely(!referenced)) {
617 result = SCAN_LACK_REFERENCED_PAGE;
619 result = SCAN_SUCCEED;
620 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
621 referenced, writable, result);
625 release_pte_pages(pte, _pte);
626 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
627 referenced, writable, result);
631 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
632 struct vm_area_struct *vma,
633 unsigned long address,
637 for (_pte = pte; _pte < pte + HPAGE_PMD_NR;
638 _pte++, page++, address += PAGE_SIZE) {
639 pte_t pteval = *_pte;
640 struct page *src_page;
642 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
643 clear_user_highpage(page, address);
644 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
645 if (is_zero_pfn(pte_pfn(pteval))) {
647 * ptl mostly unnecessary.
651 * paravirt calls inside pte_clear here are
654 pte_clear(vma->vm_mm, address, _pte);
658 src_page = pte_page(pteval);
659 copy_user_highpage(page, src_page, address, vma);
660 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
661 release_pte_page(src_page);
663 * ptl mostly unnecessary, but preempt has to
664 * be disabled to update the per-cpu stats
665 * inside page_remove_rmap().
669 * paravirt calls inside pte_clear here are
672 pte_clear(vma->vm_mm, address, _pte);
673 page_remove_rmap(src_page, false);
675 free_page_and_swap_cache(src_page);
680 static void khugepaged_alloc_sleep(void)
684 add_wait_queue(&khugepaged_wait, &wait);
685 freezable_schedule_timeout_interruptible(
686 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
687 remove_wait_queue(&khugepaged_wait, &wait);
690 static int khugepaged_node_load[MAX_NUMNODES];
692 static bool khugepaged_scan_abort(int nid)
697 * If node_reclaim_mode is disabled, then no extra effort is made to
698 * allocate memory locally.
700 if (!node_reclaim_mode)
703 /* If there is a count for this node already, it must be acceptable */
704 if (khugepaged_node_load[nid])
707 for (i = 0; i < MAX_NUMNODES; i++) {
708 if (!khugepaged_node_load[i])
710 if (node_distance(nid, i) > RECLAIM_DISTANCE)
716 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
717 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
719 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
723 static int khugepaged_find_target_node(void)
725 static int last_khugepaged_target_node = NUMA_NO_NODE;
726 int nid, target_node = 0, max_value = 0;
728 /* find first node with max normal pages hit */
729 for (nid = 0; nid < MAX_NUMNODES; nid++)
730 if (khugepaged_node_load[nid] > max_value) {
731 max_value = khugepaged_node_load[nid];
735 /* do some balance if several nodes have the same hit record */
736 if (target_node <= last_khugepaged_target_node)
737 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
739 if (max_value == khugepaged_node_load[nid]) {
744 last_khugepaged_target_node = target_node;
748 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
750 if (IS_ERR(*hpage)) {
756 khugepaged_alloc_sleep();
766 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
768 VM_BUG_ON_PAGE(*hpage, *hpage);
770 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
771 if (unlikely(!*hpage)) {
772 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
773 *hpage = ERR_PTR(-ENOMEM);
777 prep_transhuge_page(*hpage);
778 count_vm_event(THP_COLLAPSE_ALLOC);
782 static int khugepaged_find_target_node(void)
787 static inline struct page *alloc_khugepaged_hugepage(void)
791 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
794 prep_transhuge_page(page);
798 static struct page *khugepaged_alloc_hugepage(bool *wait)
803 hpage = alloc_khugepaged_hugepage();
805 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
810 khugepaged_alloc_sleep();
812 count_vm_event(THP_COLLAPSE_ALLOC);
813 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
818 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
821 * If the hpage allocated earlier was briefly exposed in page cache
822 * before collapse_file() failed, it is possible that racing lookups
823 * have not yet completed, and would then be unpleasantly surprised by
824 * finding the hpage reused for the same mapping at a different offset.
825 * Just release the previous allocation if there is any danger of that.
827 if (*hpage && page_count(*hpage) > 1) {
833 *hpage = khugepaged_alloc_hugepage(wait);
835 if (unlikely(!*hpage))
842 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
850 static bool hugepage_vma_check(struct vm_area_struct *vma)
852 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
853 (vma->vm_flags & VM_NOHUGEPAGE) ||
854 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
856 if (shmem_file(vma->vm_file)) {
857 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
859 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
862 if (!vma->anon_vma || vma->vm_ops)
864 if (is_vma_temporary_stack(vma))
866 return !(vma->vm_flags & VM_NO_KHUGEPAGED);
870 * If mmap_sem temporarily dropped, revalidate vma
871 * before taking mmap_sem.
872 * Return 0 if succeeds, otherwise return none-zero
876 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
877 struct vm_area_struct **vmap)
879 struct vm_area_struct *vma;
880 unsigned long hstart, hend;
882 if (unlikely(khugepaged_test_exit(mm)))
883 return SCAN_ANY_PROCESS;
885 *vmap = vma = find_vma(mm, address);
887 return SCAN_VMA_NULL;
889 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
890 hend = vma->vm_end & HPAGE_PMD_MASK;
891 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
892 return SCAN_ADDRESS_RANGE;
893 if (!hugepage_vma_check(vma))
894 return SCAN_VMA_CHECK;
899 * Bring missing pages in from swap, to complete THP collapse.
900 * Only done if khugepaged_scan_pmd believes it is worthwhile.
902 * Called and returns without pte mapped or spinlocks held,
903 * but with mmap_sem held to protect against vma changes.
906 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
907 struct vm_area_struct *vma,
908 unsigned long address, pmd_t *pmd,
911 int swapped_in = 0, ret = 0;
912 struct vm_fault vmf = {
915 .flags = FAULT_FLAG_ALLOW_RETRY,
917 .pgoff = linear_page_index(vma, address),
920 /* we only decide to swapin, if there is enough young ptes */
921 if (referenced < HPAGE_PMD_NR/2) {
922 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
925 vmf.pte = pte_offset_map(pmd, address);
926 for (; vmf.address < address + HPAGE_PMD_NR*PAGE_SIZE;
927 vmf.pte++, vmf.address += PAGE_SIZE) {
928 vmf.orig_pte = *vmf.pte;
929 if (!is_swap_pte(vmf.orig_pte))
932 ret = do_swap_page(&vmf);
934 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
935 if (ret & VM_FAULT_RETRY) {
936 down_read(&mm->mmap_sem);
937 if (hugepage_vma_revalidate(mm, address, &vmf.vma)) {
938 /* vma is no longer available, don't continue to swapin */
939 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
942 /* check if the pmd is still valid */
943 if (mm_find_pmd(mm, address) != pmd) {
944 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
948 if (ret & VM_FAULT_ERROR) {
949 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
952 /* pte is unmapped now, we need to map it */
953 vmf.pte = pte_offset_map(pmd, vmf.address);
957 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
961 static void collapse_huge_page(struct mm_struct *mm,
962 unsigned long address,
964 int node, int referenced)
969 struct page *new_page;
970 spinlock_t *pmd_ptl, *pte_ptl;
971 int isolated = 0, result = 0;
972 struct mem_cgroup *memcg;
973 struct vm_area_struct *vma;
974 unsigned long mmun_start; /* For mmu_notifiers */
975 unsigned long mmun_end; /* For mmu_notifiers */
978 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
980 /* Only allocate from the target node */
981 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
984 * Before allocating the hugepage, release the mmap_sem read lock.
985 * The allocation can take potentially a long time if it involves
986 * sync compaction, and we do not need to hold the mmap_sem during
987 * that. We will recheck the vma after taking it again in write mode.
989 up_read(&mm->mmap_sem);
990 new_page = khugepaged_alloc_page(hpage, gfp, node);
992 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
996 /* Do not oom kill for khugepaged charges */
997 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
999 result = SCAN_CGROUP_CHARGE_FAIL;
1003 down_read(&mm->mmap_sem);
1004 result = hugepage_vma_revalidate(mm, address, &vma);
1006 mem_cgroup_cancel_charge(new_page, memcg, true);
1007 up_read(&mm->mmap_sem);
1011 pmd = mm_find_pmd(mm, address);
1013 result = SCAN_PMD_NULL;
1014 mem_cgroup_cancel_charge(new_page, memcg, true);
1015 up_read(&mm->mmap_sem);
1020 * __collapse_huge_page_swapin always returns with mmap_sem locked.
1021 * If it fails, we release mmap_sem and jump out_nolock.
1022 * Continuing to collapse causes inconsistency.
1024 if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
1025 mem_cgroup_cancel_charge(new_page, memcg, true);
1026 up_read(&mm->mmap_sem);
1030 up_read(&mm->mmap_sem);
1032 * Prevent all access to pagetables with the exception of
1033 * gup_fast later handled by the ptep_clear_flush and the VM
1034 * handled by the anon_vma lock + PG_lock.
1036 down_write(&mm->mmap_sem);
1037 result = hugepage_vma_revalidate(mm, address, &vma);
1040 /* check if the pmd is still valid */
1041 if (mm_find_pmd(mm, address) != pmd)
1044 anon_vma_lock_write(vma->anon_vma);
1046 pte = pte_offset_map(pmd, address);
1047 pte_ptl = pte_lockptr(mm, pmd);
1049 mmun_start = address;
1050 mmun_end = address + HPAGE_PMD_SIZE;
1051 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1052 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1054 * After this gup_fast can't run anymore. This also removes
1055 * any huge TLB entry from the CPU so we won't allow
1056 * huge and small TLB entries for the same virtual address
1057 * to avoid the risk of CPU bugs in that area.
1059 _pmd = pmdp_collapse_flush(vma, address, pmd);
1060 spin_unlock(pmd_ptl);
1061 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1062 tlb_remove_table_sync_one();
1065 isolated = __collapse_huge_page_isolate(vma, address, pte);
1066 spin_unlock(pte_ptl);
1068 if (unlikely(!isolated)) {
1071 BUG_ON(!pmd_none(*pmd));
1073 * We can only use set_pmd_at when establishing
1074 * hugepmds and never for establishing regular pmds that
1075 * points to regular pagetables. Use pmd_populate for that
1077 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1078 spin_unlock(pmd_ptl);
1079 anon_vma_unlock_write(vma->anon_vma);
1085 * All pages are isolated and locked so anon_vma rmap
1086 * can't run anymore.
1088 anon_vma_unlock_write(vma->anon_vma);
1090 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
1092 __SetPageUptodate(new_page);
1093 pgtable = pmd_pgtable(_pmd);
1095 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1096 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1099 * spin_lock() below is not the equivalent of smp_wmb(), so
1100 * this is needed to avoid the copy_huge_page writes to become
1101 * visible after the set_pmd_at() write.
1106 BUG_ON(!pmd_none(*pmd));
1107 page_add_new_anon_rmap(new_page, vma, address, true);
1108 mem_cgroup_commit_charge(new_page, memcg, false, true);
1109 lru_cache_add_active_or_unevictable(new_page, vma);
1110 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1111 set_pmd_at(mm, address, pmd, _pmd);
1112 update_mmu_cache_pmd(vma, address, pmd);
1113 spin_unlock(pmd_ptl);
1117 khugepaged_pages_collapsed++;
1118 result = SCAN_SUCCEED;
1120 up_write(&mm->mmap_sem);
1122 trace_mm_collapse_huge_page(mm, isolated, result);
1125 mem_cgroup_cancel_charge(new_page, memcg, true);
1129 static int khugepaged_scan_pmd(struct mm_struct *mm,
1130 struct vm_area_struct *vma,
1131 unsigned long address,
1132 struct page **hpage)
1136 int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
1137 struct page *page = NULL;
1138 unsigned long _address;
1140 int node = NUMA_NO_NODE, unmapped = 0;
1141 bool writable = false;
1143 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1145 pmd = mm_find_pmd(mm, address);
1147 result = SCAN_PMD_NULL;
1151 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1152 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1153 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1154 _pte++, _address += PAGE_SIZE) {
1155 pte_t pteval = *_pte;
1156 if (is_swap_pte(pteval)) {
1157 if (++unmapped <= khugepaged_max_ptes_swap) {
1160 result = SCAN_EXCEED_SWAP_PTE;
1164 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1165 if (!userfaultfd_armed(vma) &&
1166 ++none_or_zero <= khugepaged_max_ptes_none) {
1169 result = SCAN_EXCEED_NONE_PTE;
1173 if (!pte_present(pteval)) {
1174 result = SCAN_PTE_NON_PRESENT;
1177 if (pte_write(pteval))
1180 page = vm_normal_page(vma, _address, pteval);
1181 if (unlikely(!page)) {
1182 result = SCAN_PAGE_NULL;
1186 /* TODO: teach khugepaged to collapse THP mapped with pte */
1187 if (PageCompound(page)) {
1188 result = SCAN_PAGE_COMPOUND;
1193 * Record which node the original page is from and save this
1194 * information to khugepaged_node_load[].
1195 * Khupaged will allocate hugepage from the node has the max
1198 node = page_to_nid(page);
1199 if (khugepaged_scan_abort(node)) {
1200 result = SCAN_SCAN_ABORT;
1203 khugepaged_node_load[node]++;
1204 if (!PageLRU(page)) {
1205 result = SCAN_PAGE_LRU;
1208 if (PageLocked(page)) {
1209 result = SCAN_PAGE_LOCK;
1212 if (!PageAnon(page)) {
1213 result = SCAN_PAGE_ANON;
1218 * cannot use mapcount: can't collapse if there's a gup pin.
1219 * The page must only be referenced by the scanned process
1220 * and page swap cache.
1222 if (page_count(page) != 1 + PageSwapCache(page)) {
1223 result = SCAN_PAGE_COUNT;
1226 if (pte_young(pteval) ||
1227 page_is_young(page) || PageReferenced(page) ||
1228 mmu_notifier_test_young(vma->vm_mm, address))
1233 result = SCAN_SUCCEED;
1236 result = SCAN_LACK_REFERENCED_PAGE;
1239 result = SCAN_PAGE_RO;
1242 pte_unmap_unlock(pte, ptl);
1244 node = khugepaged_find_target_node();
1245 /* collapse_huge_page will return with the mmap_sem released */
1246 collapse_huge_page(mm, address, hpage, node, referenced);
1249 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1250 none_or_zero, result, unmapped);
1254 static void collect_mm_slot(struct mm_slot *mm_slot)
1256 struct mm_struct *mm = mm_slot->mm;
1258 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1260 if (khugepaged_test_exit(mm)) {
1262 hash_del(&mm_slot->hash);
1263 list_del(&mm_slot->mm_node);
1266 * Not strictly needed because the mm exited already.
1268 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1271 /* khugepaged_mm_lock actually not necessary for the below */
1272 free_mm_slot(mm_slot);
1277 #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
1278 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1280 struct vm_area_struct *vma;
1281 struct mm_struct *mm;
1285 i_mmap_lock_write(mapping);
1286 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1287 /* probably overkill */
1290 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1291 if (addr & ~HPAGE_PMD_MASK)
1293 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1296 pmd = mm_find_pmd(mm, addr);
1300 * We need exclusive mmap_sem to retract page table.
1301 * If trylock fails we would end up with pte-mapped THP after
1302 * re-fault. Not ideal, but it's more important to not disturb
1303 * the system too much.
1305 if (down_write_trylock(&mm->mmap_sem)) {
1306 if (!khugepaged_test_exit(mm)) {
1308 unsigned long end = addr + HPAGE_PMD_SIZE;
1310 mmu_notifier_invalidate_range_start(mm, addr,
1312 ptl = pmd_lock(mm, pmd);
1313 /* assume page table is clear */
1314 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1316 atomic_long_dec(&mm->nr_ptes);
1317 tlb_remove_table_sync_one();
1318 pte_free(mm, pmd_pgtable(_pmd));
1319 mmu_notifier_invalidate_range_end(mm, addr,
1322 up_write(&mm->mmap_sem);
1325 i_mmap_unlock_write(mapping);
1329 * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
1331 * Basic scheme is simple, details are more complex:
1332 * - allocate and lock a new huge page;
1333 * - scan over radix tree replacing old pages the new one
1334 * + swap in pages if necessary;
1336 * + keep old pages around in case if rollback is required;
1337 * - if replacing succeed:
1340 * + unlock huge page;
1341 * - if replacing failed;
1342 * + put all pages back and unfreeze them;
1343 * + restore gaps in the radix-tree;
1344 * + unlock and free huge page;
1346 static void collapse_shmem(struct mm_struct *mm,
1347 struct address_space *mapping, pgoff_t start,
1348 struct page **hpage, int node)
1351 struct page *page, *new_page, *tmp;
1352 struct mem_cgroup *memcg;
1353 pgoff_t index, end = start + HPAGE_PMD_NR;
1354 LIST_HEAD(pagelist);
1355 struct radix_tree_iter iter;
1357 int nr_none = 0, result = SCAN_SUCCEED;
1359 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1361 /* Only allocate from the target node */
1362 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
1364 new_page = khugepaged_alloc_page(hpage, gfp, node);
1366 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1370 /* Do not oom kill for khugepaged charges */
1371 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
1373 result = SCAN_CGROUP_CHARGE_FAIL;
1377 __SetPageLocked(new_page);
1378 __SetPageSwapBacked(new_page);
1379 new_page->index = start;
1380 new_page->mapping = mapping;
1383 * At this point the new_page is locked and not up-to-date.
1384 * It's safe to insert it into the page cache, because nobody would
1385 * be able to map it or use it in another way until we unlock it.
1389 spin_lock_irq(&mapping->tree_lock);
1390 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1391 int n = min(iter.index, end) - index;
1394 * Stop if extent has been hole-punched, and is now completely
1395 * empty (the more obvious i_size_read() check would take an
1396 * irq-unsafe seqlock on 32-bit).
1398 if (n >= HPAGE_PMD_NR) {
1399 result = SCAN_TRUNCATED;
1404 * Handle holes in the radix tree: charge it from shmem and
1405 * insert relevant subpage of new_page into the radix-tree.
1407 if (n && !shmem_charge(mapping->host, n)) {
1411 for (; index < min(iter.index, end); index++) {
1412 radix_tree_insert(&mapping->page_tree, index,
1413 new_page + (index % HPAGE_PMD_NR));
1421 page = radix_tree_deref_slot_protected(slot,
1422 &mapping->tree_lock);
1423 if (radix_tree_exceptional_entry(page) || !PageUptodate(page)) {
1424 spin_unlock_irq(&mapping->tree_lock);
1425 /* swap in or instantiate fallocated page */
1426 if (shmem_getpage(mapping->host, index, &page,
1431 } else if (trylock_page(page)) {
1433 spin_unlock_irq(&mapping->tree_lock);
1435 result = SCAN_PAGE_LOCK;
1440 * The page must be locked, so we can drop the tree_lock
1441 * without racing with truncate.
1443 VM_BUG_ON_PAGE(!PageLocked(page), page);
1444 VM_BUG_ON_PAGE(!PageUptodate(page), page);
1447 * If file was truncated then extended, or hole-punched, before
1448 * we locked the first page, then a THP might be there already.
1450 if (PageTransCompound(page)) {
1451 result = SCAN_PAGE_COMPOUND;
1455 if (page_mapping(page) != mapping) {
1456 result = SCAN_TRUNCATED;
1460 if (isolate_lru_page(page)) {
1461 result = SCAN_DEL_PAGE_LRU;
1465 if (page_mapped(page))
1466 unmap_mapping_range(mapping, index << PAGE_SHIFT,
1469 spin_lock_irq(&mapping->tree_lock);
1471 slot = radix_tree_lookup_slot(&mapping->page_tree, index);
1472 VM_BUG_ON_PAGE(page != radix_tree_deref_slot_protected(slot,
1473 &mapping->tree_lock), page);
1474 VM_BUG_ON_PAGE(page_mapped(page), page);
1477 * The page is expected to have page_count() == 3:
1478 * - we hold a pin on it;
1479 * - one reference from radix tree;
1480 * - one from isolate_lru_page;
1482 if (!page_ref_freeze(page, 3)) {
1483 result = SCAN_PAGE_COUNT;
1484 spin_unlock_irq(&mapping->tree_lock);
1485 putback_lru_page(page);
1490 * Add the page to the list to be able to undo the collapse if
1491 * something go wrong.
1493 list_add_tail(&page->lru, &pagelist);
1495 /* Finally, replace with the new page. */
1496 radix_tree_replace_slot(&mapping->page_tree, slot,
1497 new_page + (index % HPAGE_PMD_NR));
1499 slot = radix_tree_iter_resume(slot, &iter);
1509 * Handle hole in radix tree at the end of the range.
1510 * This code only triggers if there's nothing in radix tree
1514 int n = end - index;
1516 /* Stop if extent has been truncated, and is now empty */
1517 if (n >= HPAGE_PMD_NR) {
1518 result = SCAN_TRUNCATED;
1521 if (!shmem_charge(mapping->host, n)) {
1525 for (; index < end; index++) {
1526 radix_tree_insert(&mapping->page_tree, index,
1527 new_page + (index % HPAGE_PMD_NR));
1532 __inc_node_page_state(new_page, NR_SHMEM_THPS);
1534 struct zone *zone = page_zone(new_page);
1536 __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1537 __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
1541 spin_unlock_irq(&mapping->tree_lock);
1544 if (result == SCAN_SUCCEED) {
1546 * Replacing old pages with new one has succeed, now we need to
1547 * copy the content and free old pages.
1550 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1551 while (index < page->index) {
1552 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1555 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1557 list_del(&page->lru);
1558 page->mapping = NULL;
1559 page_ref_unfreeze(page, 1);
1560 ClearPageActive(page);
1561 ClearPageUnevictable(page);
1566 while (index < end) {
1567 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1571 SetPageUptodate(new_page);
1572 page_ref_add(new_page, HPAGE_PMD_NR - 1);
1573 set_page_dirty(new_page);
1574 mem_cgroup_commit_charge(new_page, memcg, false, true);
1575 lru_cache_add_anon(new_page);
1578 * Remove pte page tables, so we can re-fault the page as huge.
1580 retract_page_tables(mapping, start);
1583 /* Something went wrong: rollback changes to the radix-tree */
1584 spin_lock_irq(&mapping->tree_lock);
1585 mapping->nrpages -= nr_none;
1586 shmem_uncharge(mapping->host, nr_none);
1588 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter,
1590 if (iter.index >= end)
1592 page = list_first_entry_or_null(&pagelist,
1594 if (!page || iter.index < page->index) {
1598 /* Put holes back where they were */
1599 radix_tree_delete(&mapping->page_tree,
1604 VM_BUG_ON_PAGE(page->index != iter.index, page);
1606 /* Unfreeze the page. */
1607 list_del(&page->lru);
1608 page_ref_unfreeze(page, 2);
1609 radix_tree_replace_slot(&mapping->page_tree,
1611 slot = radix_tree_iter_resume(slot, &iter);
1612 spin_unlock_irq(&mapping->tree_lock);
1614 putback_lru_page(page);
1615 spin_lock_irq(&mapping->tree_lock);
1618 spin_unlock_irq(&mapping->tree_lock);
1620 mem_cgroup_cancel_charge(new_page, memcg, true);
1621 new_page->mapping = NULL;
1624 unlock_page(new_page);
1626 VM_BUG_ON(!list_empty(&pagelist));
1627 /* TODO: tracepoints */
1630 static void khugepaged_scan_shmem(struct mm_struct *mm,
1631 struct address_space *mapping,
1632 pgoff_t start, struct page **hpage)
1634 struct page *page = NULL;
1635 struct radix_tree_iter iter;
1638 int node = NUMA_NO_NODE;
1639 int result = SCAN_SUCCEED;
1643 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1645 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1646 if (iter.index >= start + HPAGE_PMD_NR)
1649 page = radix_tree_deref_slot(slot);
1650 if (radix_tree_deref_retry(page)) {
1651 slot = radix_tree_iter_retry(&iter);
1655 if (radix_tree_exception(page)) {
1656 if (++swap > khugepaged_max_ptes_swap) {
1657 result = SCAN_EXCEED_SWAP_PTE;
1663 if (PageTransCompound(page)) {
1664 result = SCAN_PAGE_COMPOUND;
1668 node = page_to_nid(page);
1669 if (khugepaged_scan_abort(node)) {
1670 result = SCAN_SCAN_ABORT;
1673 khugepaged_node_load[node]++;
1675 if (!PageLRU(page)) {
1676 result = SCAN_PAGE_LRU;
1680 if (page_count(page) != 1 + page_mapcount(page)) {
1681 result = SCAN_PAGE_COUNT;
1686 * We probably should check if the page is referenced here, but
1687 * nobody would transfer pte_young() to PageReferenced() for us.
1688 * And rmap walk here is just too costly...
1693 if (need_resched()) {
1694 slot = radix_tree_iter_resume(slot, &iter);
1700 if (result == SCAN_SUCCEED) {
1701 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
1702 result = SCAN_EXCEED_NONE_PTE;
1704 node = khugepaged_find_target_node();
1705 collapse_shmem(mm, mapping, start, hpage, node);
1709 /* TODO: tracepoints */
1712 static void khugepaged_scan_shmem(struct mm_struct *mm,
1713 struct address_space *mapping,
1714 pgoff_t start, struct page **hpage)
1720 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1721 struct page **hpage)
1722 __releases(&khugepaged_mm_lock)
1723 __acquires(&khugepaged_mm_lock)
1725 struct mm_slot *mm_slot;
1726 struct mm_struct *mm;
1727 struct vm_area_struct *vma;
1731 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1733 if (khugepaged_scan.mm_slot)
1734 mm_slot = khugepaged_scan.mm_slot;
1736 mm_slot = list_entry(khugepaged_scan.mm_head.next,
1737 struct mm_slot, mm_node);
1738 khugepaged_scan.address = 0;
1739 khugepaged_scan.mm_slot = mm_slot;
1741 spin_unlock(&khugepaged_mm_lock);
1745 * Don't wait for semaphore (to avoid long wait times). Just move to
1746 * the next mm on the list.
1749 if (unlikely(!down_read_trylock(&mm->mmap_sem)))
1750 goto breakouterloop_mmap_sem;
1751 if (likely(!khugepaged_test_exit(mm)))
1752 vma = find_vma(mm, khugepaged_scan.address);
1755 for (; vma; vma = vma->vm_next) {
1756 unsigned long hstart, hend;
1759 if (unlikely(khugepaged_test_exit(mm))) {
1763 if (!hugepage_vma_check(vma)) {
1768 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1769 hend = vma->vm_end & HPAGE_PMD_MASK;
1772 if (khugepaged_scan.address > hend)
1774 if (khugepaged_scan.address < hstart)
1775 khugepaged_scan.address = hstart;
1776 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1778 while (khugepaged_scan.address < hend) {
1781 if (unlikely(khugepaged_test_exit(mm)))
1782 goto breakouterloop;
1784 VM_BUG_ON(khugepaged_scan.address < hstart ||
1785 khugepaged_scan.address + HPAGE_PMD_SIZE >
1787 if (shmem_file(vma->vm_file)) {
1789 pgoff_t pgoff = linear_page_index(vma,
1790 khugepaged_scan.address);
1791 if (!shmem_huge_enabled(vma))
1793 file = get_file(vma->vm_file);
1794 up_read(&mm->mmap_sem);
1796 khugepaged_scan_shmem(mm, file->f_mapping,
1800 ret = khugepaged_scan_pmd(mm, vma,
1801 khugepaged_scan.address,
1804 /* move to next address */
1805 khugepaged_scan.address += HPAGE_PMD_SIZE;
1806 progress += HPAGE_PMD_NR;
1808 /* we released mmap_sem so break loop */
1809 goto breakouterloop_mmap_sem;
1810 if (progress >= pages)
1811 goto breakouterloop;
1815 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
1816 breakouterloop_mmap_sem:
1818 spin_lock(&khugepaged_mm_lock);
1819 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
1821 * Release the current mm_slot if this mm is about to die, or
1822 * if we scanned all vmas of this mm.
1824 if (khugepaged_test_exit(mm) || !vma) {
1826 * Make sure that if mm_users is reaching zero while
1827 * khugepaged runs here, khugepaged_exit will find
1828 * mm_slot not pointing to the exiting mm.
1830 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
1831 khugepaged_scan.mm_slot = list_entry(
1832 mm_slot->mm_node.next,
1833 struct mm_slot, mm_node);
1834 khugepaged_scan.address = 0;
1836 khugepaged_scan.mm_slot = NULL;
1837 khugepaged_full_scans++;
1840 collect_mm_slot(mm_slot);
1846 static int khugepaged_has_work(void)
1848 return !list_empty(&khugepaged_scan.mm_head) &&
1849 khugepaged_enabled();
1852 static int khugepaged_wait_event(void)
1854 return !list_empty(&khugepaged_scan.mm_head) ||
1855 kthread_should_stop();
1858 static void khugepaged_do_scan(void)
1860 struct page *hpage = NULL;
1861 unsigned int progress = 0, pass_through_head = 0;
1862 unsigned int pages = khugepaged_pages_to_scan;
1865 barrier(); /* write khugepaged_pages_to_scan to local stack */
1867 while (progress < pages) {
1868 if (!khugepaged_prealloc_page(&hpage, &wait))
1873 if (unlikely(kthread_should_stop() || try_to_freeze()))
1876 spin_lock(&khugepaged_mm_lock);
1877 if (!khugepaged_scan.mm_slot)
1878 pass_through_head++;
1879 if (khugepaged_has_work() &&
1880 pass_through_head < 2)
1881 progress += khugepaged_scan_mm_slot(pages - progress,
1885 spin_unlock(&khugepaged_mm_lock);
1888 if (!IS_ERR_OR_NULL(hpage))
1892 static bool khugepaged_should_wakeup(void)
1894 return kthread_should_stop() ||
1895 time_after_eq(jiffies, khugepaged_sleep_expire);
1898 static void khugepaged_wait_work(void)
1900 if (khugepaged_has_work()) {
1901 const unsigned long scan_sleep_jiffies =
1902 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
1904 if (!scan_sleep_jiffies)
1907 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
1908 wait_event_freezable_timeout(khugepaged_wait,
1909 khugepaged_should_wakeup(),
1910 scan_sleep_jiffies);
1914 if (khugepaged_enabled())
1915 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
1918 static int khugepaged(void *none)
1920 struct mm_slot *mm_slot;
1923 set_user_nice(current, MAX_NICE);
1925 while (!kthread_should_stop()) {
1926 khugepaged_do_scan();
1927 khugepaged_wait_work();
1930 spin_lock(&khugepaged_mm_lock);
1931 mm_slot = khugepaged_scan.mm_slot;
1932 khugepaged_scan.mm_slot = NULL;
1934 collect_mm_slot(mm_slot);
1935 spin_unlock(&khugepaged_mm_lock);
1939 static void set_recommended_min_free_kbytes(void)
1943 unsigned long recommended_min;
1945 for_each_populated_zone(zone)
1948 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
1949 recommended_min = pageblock_nr_pages * nr_zones * 2;
1952 * Make sure that on average at least two pageblocks are almost free
1953 * of another type, one for a migratetype to fall back to and a
1954 * second to avoid subsequent fallbacks of other types There are 3
1955 * MIGRATE_TYPES we care about.
1957 recommended_min += pageblock_nr_pages * nr_zones *
1958 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
1960 /* don't ever allow to reserve more than 5% of the lowmem */
1961 recommended_min = min(recommended_min,
1962 (unsigned long) nr_free_buffer_pages() / 20);
1963 recommended_min <<= (PAGE_SHIFT-10);
1965 if (recommended_min > min_free_kbytes) {
1966 if (user_min_free_kbytes >= 0)
1967 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
1968 min_free_kbytes, recommended_min);
1970 min_free_kbytes = recommended_min;
1972 setup_per_zone_wmarks();
1975 int start_stop_khugepaged(void)
1979 mutex_lock(&khugepaged_mutex);
1980 if (khugepaged_enabled()) {
1981 if (!khugepaged_thread)
1982 khugepaged_thread = kthread_run(khugepaged, NULL,
1984 if (IS_ERR(khugepaged_thread)) {
1985 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
1986 err = PTR_ERR(khugepaged_thread);
1987 khugepaged_thread = NULL;
1991 if (!list_empty(&khugepaged_scan.mm_head))
1992 wake_up_interruptible(&khugepaged_wait);
1994 set_recommended_min_free_kbytes();
1995 } else if (khugepaged_thread) {
1996 kthread_stop(khugepaged_thread);
1997 khugepaged_thread = NULL;
2000 mutex_unlock(&khugepaged_mutex);
2004 void khugepaged_min_free_kbytes_update(void)
2006 mutex_lock(&khugepaged_mutex);
2007 if (khugepaged_enabled() && khugepaged_thread)
2008 set_recommended_min_free_kbytes();
2009 mutex_unlock(&khugepaged_mutex);