1 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4 #include <linux/sched.h>
5 #include <linux/mmu_notifier.h>
6 #include <linux/rmap.h>
7 #include <linux/swap.h>
8 #include <linux/mm_inline.h>
9 #include <linux/kthread.h>
10 #include <linux/khugepaged.h>
11 #include <linux/freezer.h>
12 #include <linux/mman.h>
13 #include <linux/hashtable.h>
14 #include <linux/userfaultfd_k.h>
15 #include <linux/page_idle.h>
16 #include <linux/swapops.h>
17 #include <linux/shmem_fs.h>
20 #include <asm/pgalloc.h>
30 SCAN_LACK_REFERENCED_PAGE,
44 SCAN_ALLOC_HUGE_PAGE_FAIL,
45 SCAN_CGROUP_CHARGE_FAIL,
50 #define CREATE_TRACE_POINTS
51 #include <trace/events/huge_memory.h>
53 static struct task_struct *khugepaged_thread __read_mostly;
54 static DEFINE_MUTEX(khugepaged_mutex);
56 /* default scan 8*512 pte (or vmas) every 30 second */
57 static unsigned int khugepaged_pages_to_scan __read_mostly;
58 static unsigned int khugepaged_pages_collapsed;
59 static unsigned int khugepaged_full_scans;
60 static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
61 /* during fragmentation poll the hugepage allocator once every minute */
62 static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
63 static unsigned long khugepaged_sleep_expire;
64 static DEFINE_SPINLOCK(khugepaged_mm_lock);
65 static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
67 * default collapse hugepages if there is at least one pte mapped like
68 * it would have happened if the vma was large enough during page
71 static unsigned int khugepaged_max_ptes_none __read_mostly;
72 static unsigned int khugepaged_max_ptes_swap __read_mostly;
74 #define MM_SLOTS_HASH_BITS 10
75 static __read_mostly DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS);
77 static struct kmem_cache *mm_slot_cache __read_mostly;
80 * struct mm_slot - hash lookup from mm to mm_slot
81 * @hash: hash collision list
82 * @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
83 * @mm: the mm that this information is valid for
86 struct hlist_node hash;
87 struct list_head mm_node;
92 * struct khugepaged_scan - cursor for scanning
93 * @mm_head: the head of the mm list to scan
94 * @mm_slot: the current mm_slot we are scanning
95 * @address: the next address inside that to be scanned
97 * There is only the one khugepaged_scan instance of this cursor structure.
99 struct khugepaged_scan {
100 struct list_head mm_head;
101 struct mm_slot *mm_slot;
102 unsigned long address;
105 static struct khugepaged_scan khugepaged_scan = {
106 .mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
110 static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
111 struct kobj_attribute *attr,
114 return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
117 static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
118 struct kobj_attribute *attr,
119 const char *buf, size_t count)
124 err = kstrtoul(buf, 10, &msecs);
125 if (err || msecs > UINT_MAX)
128 khugepaged_scan_sleep_millisecs = msecs;
129 khugepaged_sleep_expire = 0;
130 wake_up_interruptible(&khugepaged_wait);
134 static struct kobj_attribute scan_sleep_millisecs_attr =
135 __ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
136 scan_sleep_millisecs_store);
138 static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
139 struct kobj_attribute *attr,
142 return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
145 static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
146 struct kobj_attribute *attr,
147 const char *buf, size_t count)
152 err = kstrtoul(buf, 10, &msecs);
153 if (err || msecs > UINT_MAX)
156 khugepaged_alloc_sleep_millisecs = msecs;
157 khugepaged_sleep_expire = 0;
158 wake_up_interruptible(&khugepaged_wait);
162 static struct kobj_attribute alloc_sleep_millisecs_attr =
163 __ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
164 alloc_sleep_millisecs_store);
166 static ssize_t pages_to_scan_show(struct kobject *kobj,
167 struct kobj_attribute *attr,
170 return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
172 static ssize_t pages_to_scan_store(struct kobject *kobj,
173 struct kobj_attribute *attr,
174 const char *buf, size_t count)
179 err = kstrtoul(buf, 10, &pages);
180 if (err || !pages || pages > UINT_MAX)
183 khugepaged_pages_to_scan = pages;
187 static struct kobj_attribute pages_to_scan_attr =
188 __ATTR(pages_to_scan, 0644, pages_to_scan_show,
189 pages_to_scan_store);
191 static ssize_t pages_collapsed_show(struct kobject *kobj,
192 struct kobj_attribute *attr,
195 return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
197 static struct kobj_attribute pages_collapsed_attr =
198 __ATTR_RO(pages_collapsed);
200 static ssize_t full_scans_show(struct kobject *kobj,
201 struct kobj_attribute *attr,
204 return sprintf(buf, "%u\n", khugepaged_full_scans);
206 static struct kobj_attribute full_scans_attr =
207 __ATTR_RO(full_scans);
209 static ssize_t khugepaged_defrag_show(struct kobject *kobj,
210 struct kobj_attribute *attr, char *buf)
212 return single_hugepage_flag_show(kobj, attr, buf,
213 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
215 static ssize_t khugepaged_defrag_store(struct kobject *kobj,
216 struct kobj_attribute *attr,
217 const char *buf, size_t count)
219 return single_hugepage_flag_store(kobj, attr, buf, count,
220 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
222 static struct kobj_attribute khugepaged_defrag_attr =
223 __ATTR(defrag, 0644, khugepaged_defrag_show,
224 khugepaged_defrag_store);
227 * max_ptes_none controls if khugepaged should collapse hugepages over
228 * any unmapped ptes in turn potentially increasing the memory
229 * footprint of the vmas. When max_ptes_none is 0 khugepaged will not
230 * reduce the available free memory in the system as it
231 * runs. Increasing max_ptes_none will instead potentially reduce the
232 * free memory in the system during the khugepaged scan.
234 static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
235 struct kobj_attribute *attr,
238 return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
240 static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
241 struct kobj_attribute *attr,
242 const char *buf, size_t count)
245 unsigned long max_ptes_none;
247 err = kstrtoul(buf, 10, &max_ptes_none);
248 if (err || max_ptes_none > HPAGE_PMD_NR-1)
251 khugepaged_max_ptes_none = max_ptes_none;
255 static struct kobj_attribute khugepaged_max_ptes_none_attr =
256 __ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
257 khugepaged_max_ptes_none_store);
259 static ssize_t khugepaged_max_ptes_swap_show(struct kobject *kobj,
260 struct kobj_attribute *attr,
263 return sprintf(buf, "%u\n", khugepaged_max_ptes_swap);
266 static ssize_t khugepaged_max_ptes_swap_store(struct kobject *kobj,
267 struct kobj_attribute *attr,
268 const char *buf, size_t count)
271 unsigned long max_ptes_swap;
273 err = kstrtoul(buf, 10, &max_ptes_swap);
274 if (err || max_ptes_swap > HPAGE_PMD_NR-1)
277 khugepaged_max_ptes_swap = max_ptes_swap;
282 static struct kobj_attribute khugepaged_max_ptes_swap_attr =
283 __ATTR(max_ptes_swap, 0644, khugepaged_max_ptes_swap_show,
284 khugepaged_max_ptes_swap_store);
286 static struct attribute *khugepaged_attr[] = {
287 &khugepaged_defrag_attr.attr,
288 &khugepaged_max_ptes_none_attr.attr,
289 &pages_to_scan_attr.attr,
290 &pages_collapsed_attr.attr,
291 &full_scans_attr.attr,
292 &scan_sleep_millisecs_attr.attr,
293 &alloc_sleep_millisecs_attr.attr,
294 &khugepaged_max_ptes_swap_attr.attr,
298 struct attribute_group khugepaged_attr_group = {
299 .attrs = khugepaged_attr,
300 .name = "khugepaged",
302 #endif /* CONFIG_SYSFS */
304 #define VM_NO_KHUGEPAGED (VM_SPECIAL | VM_HUGETLB)
306 int hugepage_madvise(struct vm_area_struct *vma,
307 unsigned long *vm_flags, int advice)
313 * qemu blindly sets MADV_HUGEPAGE on all allocations, but s390
314 * can't handle this properly after s390_enable_sie, so we simply
315 * ignore the madvise to prevent qemu from causing a SIGSEGV.
317 if (mm_has_pgste(vma->vm_mm))
320 *vm_flags &= ~VM_NOHUGEPAGE;
321 *vm_flags |= VM_HUGEPAGE;
323 * If the vma become good for khugepaged to scan,
324 * register it here without waiting a page fault that
325 * may not happen any time soon.
327 if (!(*vm_flags & VM_NO_KHUGEPAGED) &&
328 khugepaged_enter_vma_merge(vma, *vm_flags))
331 case MADV_NOHUGEPAGE:
332 *vm_flags &= ~VM_HUGEPAGE;
333 *vm_flags |= VM_NOHUGEPAGE;
335 * Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
336 * this vma even if we leave the mm registered in khugepaged if
337 * it got registered before VM_NOHUGEPAGE was set.
345 int __init khugepaged_init(void)
347 mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
348 sizeof(struct mm_slot),
349 __alignof__(struct mm_slot), 0, NULL);
353 khugepaged_pages_to_scan = HPAGE_PMD_NR * 8;
354 khugepaged_max_ptes_none = HPAGE_PMD_NR - 1;
355 khugepaged_max_ptes_swap = HPAGE_PMD_NR / 8;
360 void __init khugepaged_destroy(void)
362 kmem_cache_destroy(mm_slot_cache);
365 static inline struct mm_slot *alloc_mm_slot(void)
367 if (!mm_slot_cache) /* initialization failed */
369 return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
372 static inline void free_mm_slot(struct mm_slot *mm_slot)
374 kmem_cache_free(mm_slot_cache, mm_slot);
377 static struct mm_slot *get_mm_slot(struct mm_struct *mm)
379 struct mm_slot *mm_slot;
381 hash_for_each_possible(mm_slots_hash, mm_slot, hash, (unsigned long)mm)
382 if (mm == mm_slot->mm)
388 static void insert_to_mm_slots_hash(struct mm_struct *mm,
389 struct mm_slot *mm_slot)
392 hash_add(mm_slots_hash, &mm_slot->hash, (long)mm);
395 static inline int khugepaged_test_exit(struct mm_struct *mm)
397 return atomic_read(&mm->mm_users) == 0 || !mmget_still_valid(mm);
400 int __khugepaged_enter(struct mm_struct *mm)
402 struct mm_slot *mm_slot;
405 mm_slot = alloc_mm_slot();
409 /* __khugepaged_exit() must not run from under us */
410 VM_BUG_ON_MM(atomic_read(&mm->mm_users) == 0, mm);
411 if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
412 free_mm_slot(mm_slot);
416 spin_lock(&khugepaged_mm_lock);
417 insert_to_mm_slots_hash(mm, mm_slot);
419 * Insert just behind the scanning cursor, to let the area settle
422 wakeup = list_empty(&khugepaged_scan.mm_head);
423 list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
424 spin_unlock(&khugepaged_mm_lock);
426 atomic_inc(&mm->mm_count);
428 wake_up_interruptible(&khugepaged_wait);
433 int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
434 unsigned long vm_flags)
436 unsigned long hstart, hend;
439 * Not yet faulted in so we will register later in the
440 * page fault if needed.
443 if (vma->vm_ops || (vm_flags & VM_NO_KHUGEPAGED))
444 /* khugepaged not yet working on file or special mappings */
446 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
447 hend = vma->vm_end & HPAGE_PMD_MASK;
449 return khugepaged_enter(vma, vm_flags);
453 void __khugepaged_exit(struct mm_struct *mm)
455 struct mm_slot *mm_slot;
458 spin_lock(&khugepaged_mm_lock);
459 mm_slot = get_mm_slot(mm);
460 if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
461 hash_del(&mm_slot->hash);
462 list_del(&mm_slot->mm_node);
465 spin_unlock(&khugepaged_mm_lock);
468 clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
469 free_mm_slot(mm_slot);
471 } else if (mm_slot) {
473 * This is required to serialize against
474 * khugepaged_test_exit() (which is guaranteed to run
475 * under mmap sem read mode). Stop here (after we
476 * return all pagetables will be destroyed) until
477 * khugepaged has finished working on the pagetables
478 * under the mmap_sem.
480 down_write(&mm->mmap_sem);
481 up_write(&mm->mmap_sem);
485 static void release_pte_page(struct page *page)
487 /* 0 stands for page_is_file_cache(page) == false */
488 dec_node_page_state(page, NR_ISOLATED_ANON + 0);
490 putback_lru_page(page);
493 static void release_pte_pages(pte_t *pte, pte_t *_pte)
495 while (--_pte >= pte) {
496 pte_t pteval = *_pte;
497 if (!pte_none(pteval) && !is_zero_pfn(pte_pfn(pteval)))
498 release_pte_page(pte_page(pteval));
502 static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
503 unsigned long address,
506 struct page *page = NULL;
508 int none_or_zero = 0, result = 0, referenced = 0;
509 bool writable = false;
511 for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
512 _pte++, address += PAGE_SIZE) {
513 pte_t pteval = *_pte;
514 if (pte_none(pteval) || (pte_present(pteval) &&
515 is_zero_pfn(pte_pfn(pteval)))) {
516 if (!userfaultfd_armed(vma) &&
517 ++none_or_zero <= khugepaged_max_ptes_none) {
520 result = SCAN_EXCEED_NONE_PTE;
524 if (!pte_present(pteval)) {
525 result = SCAN_PTE_NON_PRESENT;
528 page = vm_normal_page(vma, address, pteval);
529 if (unlikely(!page)) {
530 result = SCAN_PAGE_NULL;
534 /* TODO: teach khugepaged to collapse THP mapped with pte */
535 if (PageCompound(page)) {
536 result = SCAN_PAGE_COMPOUND;
540 VM_BUG_ON_PAGE(!PageAnon(page), page);
541 VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
544 * We can do it before isolate_lru_page because the
545 * page can't be freed from under us. NOTE: PG_lock
546 * is needed to serialize against split_huge_page
547 * when invoked from the VM.
549 if (!trylock_page(page)) {
550 result = SCAN_PAGE_LOCK;
555 * cannot use mapcount: can't collapse if there's a gup pin.
556 * The page must only be referenced by the scanned process
557 * and page swap cache.
559 if (page_count(page) != 1 + !!PageSwapCache(page)) {
561 result = SCAN_PAGE_COUNT;
564 if (pte_write(pteval)) {
567 if (PageSwapCache(page) &&
568 !reuse_swap_page(page, NULL)) {
570 result = SCAN_SWAP_CACHE_PAGE;
574 * Page is not in the swap cache. It can be collapsed
580 * Isolate the page to avoid collapsing an hugepage
581 * currently in use by the VM.
583 if (isolate_lru_page(page)) {
585 result = SCAN_DEL_PAGE_LRU;
588 /* 0 stands for page_is_file_cache(page) == false */
589 inc_node_page_state(page, NR_ISOLATED_ANON + 0);
590 VM_BUG_ON_PAGE(!PageLocked(page), page);
591 VM_BUG_ON_PAGE(PageLRU(page), page);
593 /* There should be enough young pte to collapse the page */
594 if (pte_young(pteval) ||
595 page_is_young(page) || PageReferenced(page) ||
596 mmu_notifier_test_young(vma->vm_mm, address))
600 if (unlikely(!writable)) {
601 result = SCAN_PAGE_RO;
602 } else if (unlikely(!referenced)) {
603 result = SCAN_LACK_REFERENCED_PAGE;
605 result = SCAN_SUCCEED;
606 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
607 referenced, writable, result);
611 release_pte_pages(pte, _pte);
612 trace_mm_collapse_huge_page_isolate(page, none_or_zero,
613 referenced, writable, result);
617 static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
618 struct vm_area_struct *vma,
619 unsigned long address,
623 for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
624 pte_t pteval = *_pte;
625 struct page *src_page;
627 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
628 clear_user_highpage(page, address);
629 add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
630 if (is_zero_pfn(pte_pfn(pteval))) {
632 * ptl mostly unnecessary.
636 * paravirt calls inside pte_clear here are
639 pte_clear(vma->vm_mm, address, _pte);
643 src_page = pte_page(pteval);
644 copy_user_highpage(page, src_page, address, vma);
645 VM_BUG_ON_PAGE(page_mapcount(src_page) != 1, src_page);
646 release_pte_page(src_page);
648 * ptl mostly unnecessary, but preempt has to
649 * be disabled to update the per-cpu stats
650 * inside page_remove_rmap().
654 * paravirt calls inside pte_clear here are
657 pte_clear(vma->vm_mm, address, _pte);
658 page_remove_rmap(src_page, false);
660 free_page_and_swap_cache(src_page);
663 address += PAGE_SIZE;
668 static void khugepaged_alloc_sleep(void)
672 add_wait_queue(&khugepaged_wait, &wait);
673 freezable_schedule_timeout_interruptible(
674 msecs_to_jiffies(khugepaged_alloc_sleep_millisecs));
675 remove_wait_queue(&khugepaged_wait, &wait);
678 static int khugepaged_node_load[MAX_NUMNODES];
680 static bool khugepaged_scan_abort(int nid)
685 * If node_reclaim_mode is disabled, then no extra effort is made to
686 * allocate memory locally.
688 if (!node_reclaim_mode)
691 /* If there is a count for this node already, it must be acceptable */
692 if (khugepaged_node_load[nid])
695 for (i = 0; i < MAX_NUMNODES; i++) {
696 if (!khugepaged_node_load[i])
698 if (node_distance(nid, i) > RECLAIM_DISTANCE)
704 /* Defrag for khugepaged will enter direct reclaim/compaction if necessary */
705 static inline gfp_t alloc_hugepage_khugepaged_gfpmask(void)
707 return khugepaged_defrag() ? GFP_TRANSHUGE : GFP_TRANSHUGE_LIGHT;
711 static int khugepaged_find_target_node(void)
713 static int last_khugepaged_target_node = NUMA_NO_NODE;
714 int nid, target_node = 0, max_value = 0;
716 /* find first node with max normal pages hit */
717 for (nid = 0; nid < MAX_NUMNODES; nid++)
718 if (khugepaged_node_load[nid] > max_value) {
719 max_value = khugepaged_node_load[nid];
723 /* do some balance if several nodes have the same hit record */
724 if (target_node <= last_khugepaged_target_node)
725 for (nid = last_khugepaged_target_node + 1; nid < MAX_NUMNODES;
727 if (max_value == khugepaged_node_load[nid]) {
732 last_khugepaged_target_node = target_node;
736 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
738 if (IS_ERR(*hpage)) {
744 khugepaged_alloc_sleep();
754 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
756 VM_BUG_ON_PAGE(*hpage, *hpage);
758 *hpage = __alloc_pages_node(node, gfp, HPAGE_PMD_ORDER);
759 if (unlikely(!*hpage)) {
760 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
761 *hpage = ERR_PTR(-ENOMEM);
765 prep_transhuge_page(*hpage);
766 count_vm_event(THP_COLLAPSE_ALLOC);
770 static int khugepaged_find_target_node(void)
775 static inline struct page *alloc_khugepaged_hugepage(void)
779 page = alloc_pages(alloc_hugepage_khugepaged_gfpmask(),
782 prep_transhuge_page(page);
786 static struct page *khugepaged_alloc_hugepage(bool *wait)
791 hpage = alloc_khugepaged_hugepage();
793 count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
798 khugepaged_alloc_sleep();
800 count_vm_event(THP_COLLAPSE_ALLOC);
801 } while (unlikely(!hpage) && likely(khugepaged_enabled()));
806 static bool khugepaged_prealloc_page(struct page **hpage, bool *wait)
809 * If the hpage allocated earlier was briefly exposed in page cache
810 * before collapse_file() failed, it is possible that racing lookups
811 * have not yet completed, and would then be unpleasantly surprised by
812 * finding the hpage reused for the same mapping at a different offset.
813 * Just release the previous allocation if there is any danger of that.
815 if (*hpage && page_count(*hpage) > 1) {
821 *hpage = khugepaged_alloc_hugepage(wait);
823 if (unlikely(!*hpage))
830 khugepaged_alloc_page(struct page **hpage, gfp_t gfp, int node)
838 static bool hugepage_vma_check(struct vm_area_struct *vma)
840 if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
841 (vma->vm_flags & VM_NOHUGEPAGE))
843 if (shmem_file(vma->vm_file)) {
844 if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
846 return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
849 if (!vma->anon_vma || vma->vm_ops)
851 if (is_vma_temporary_stack(vma))
853 return !(vma->vm_flags & VM_NO_KHUGEPAGED);
857 * If mmap_sem temporarily dropped, revalidate vma
858 * before taking mmap_sem.
859 * Return 0 if succeeds, otherwise return none-zero
863 static int hugepage_vma_revalidate(struct mm_struct *mm, unsigned long address,
864 struct vm_area_struct **vmap)
866 struct vm_area_struct *vma;
867 unsigned long hstart, hend;
869 if (unlikely(khugepaged_test_exit(mm)))
870 return SCAN_ANY_PROCESS;
872 *vmap = vma = find_vma(mm, address);
874 return SCAN_VMA_NULL;
876 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
877 hend = vma->vm_end & HPAGE_PMD_MASK;
878 if (address < hstart || address + HPAGE_PMD_SIZE > hend)
879 return SCAN_ADDRESS_RANGE;
880 if (!hugepage_vma_check(vma))
881 return SCAN_VMA_CHECK;
886 * Bring missing pages in from swap, to complete THP collapse.
887 * Only done if khugepaged_scan_pmd believes it is worthwhile.
889 * Called and returns without pte mapped or spinlocks held,
890 * but with mmap_sem held to protect against vma changes.
893 static bool __collapse_huge_page_swapin(struct mm_struct *mm,
894 struct vm_area_struct *vma,
895 unsigned long address, pmd_t *pmd,
899 int swapped_in = 0, ret = 0;
900 struct fault_env fe = {
903 .flags = FAULT_FLAG_ALLOW_RETRY,
907 /* we only decide to swapin, if there is enough young ptes */
908 if (referenced < HPAGE_PMD_NR/2) {
909 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
912 fe.pte = pte_offset_map(pmd, address);
913 for (; fe.address < address + HPAGE_PMD_NR*PAGE_SIZE;
914 fe.pte++, fe.address += PAGE_SIZE) {
916 if (!is_swap_pte(pteval))
919 ret = do_swap_page(&fe, pteval);
921 /* do_swap_page returns VM_FAULT_RETRY with released mmap_sem */
922 if (ret & VM_FAULT_RETRY) {
923 down_read(&mm->mmap_sem);
924 if (hugepage_vma_revalidate(mm, address, &fe.vma)) {
925 /* vma is no longer available, don't continue to swapin */
926 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
929 /* check if the pmd is still valid */
930 if (mm_find_pmd(mm, address) != pmd)
933 if (ret & VM_FAULT_ERROR) {
934 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 0);
937 /* pte is unmapped now, we need to map it */
938 fe.pte = pte_offset_map(pmd, fe.address);
942 trace_mm_collapse_huge_page_swapin(mm, swapped_in, referenced, 1);
946 static void collapse_huge_page(struct mm_struct *mm,
947 unsigned long address,
949 int node, int referenced)
954 struct page *new_page;
955 spinlock_t *pmd_ptl, *pte_ptl;
956 int isolated = 0, result = 0;
957 struct mem_cgroup *memcg;
958 struct vm_area_struct *vma;
959 unsigned long mmun_start; /* For mmu_notifiers */
960 unsigned long mmun_end; /* For mmu_notifiers */
963 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
965 /* Only allocate from the target node */
966 gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_OTHER_NODE | __GFP_THISNODE;
969 * Before allocating the hugepage, release the mmap_sem read lock.
970 * The allocation can take potentially a long time if it involves
971 * sync compaction, and we do not need to hold the mmap_sem during
972 * that. We will recheck the vma after taking it again in write mode.
974 up_read(&mm->mmap_sem);
975 new_page = khugepaged_alloc_page(hpage, gfp, node);
977 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
981 /* Do not oom kill for khugepaged charges */
982 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
984 result = SCAN_CGROUP_CHARGE_FAIL;
988 down_read(&mm->mmap_sem);
989 result = hugepage_vma_revalidate(mm, address, &vma);
991 mem_cgroup_cancel_charge(new_page, memcg, true);
992 up_read(&mm->mmap_sem);
996 pmd = mm_find_pmd(mm, address);
998 result = SCAN_PMD_NULL;
999 mem_cgroup_cancel_charge(new_page, memcg, true);
1000 up_read(&mm->mmap_sem);
1005 * __collapse_huge_page_swapin always returns with mmap_sem locked.
1006 * If it fails, we release mmap_sem and jump out_nolock.
1007 * Continuing to collapse causes inconsistency.
1009 if (!__collapse_huge_page_swapin(mm, vma, address, pmd, referenced)) {
1010 mem_cgroup_cancel_charge(new_page, memcg, true);
1011 up_read(&mm->mmap_sem);
1015 up_read(&mm->mmap_sem);
1017 * Prevent all access to pagetables with the exception of
1018 * gup_fast later handled by the ptep_clear_flush and the VM
1019 * handled by the anon_vma lock + PG_lock.
1021 down_write(&mm->mmap_sem);
1022 result = hugepage_vma_revalidate(mm, address, &vma);
1025 /* check if the pmd is still valid */
1026 if (mm_find_pmd(mm, address) != pmd)
1029 anon_vma_lock_write(vma->anon_vma);
1031 pte = pte_offset_map(pmd, address);
1032 pte_ptl = pte_lockptr(mm, pmd);
1034 mmun_start = address;
1035 mmun_end = address + HPAGE_PMD_SIZE;
1036 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
1037 pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
1039 * After this gup_fast can't run anymore. This also removes
1040 * any huge TLB entry from the CPU so we won't allow
1041 * huge and small TLB entries for the same virtual address
1042 * to avoid the risk of CPU bugs in that area.
1044 _pmd = pmdp_collapse_flush(vma, address, pmd);
1045 spin_unlock(pmd_ptl);
1046 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
1049 isolated = __collapse_huge_page_isolate(vma, address, pte);
1050 spin_unlock(pte_ptl);
1052 if (unlikely(!isolated)) {
1055 BUG_ON(!pmd_none(*pmd));
1057 * We can only use set_pmd_at when establishing
1058 * hugepmds and never for establishing regular pmds that
1059 * points to regular pagetables. Use pmd_populate for that
1061 pmd_populate(mm, pmd, pmd_pgtable(_pmd));
1062 spin_unlock(pmd_ptl);
1063 anon_vma_unlock_write(vma->anon_vma);
1069 * All pages are isolated and locked so anon_vma rmap
1070 * can't run anymore.
1072 anon_vma_unlock_write(vma->anon_vma);
1074 __collapse_huge_page_copy(pte, new_page, vma, address, pte_ptl);
1076 __SetPageUptodate(new_page);
1077 pgtable = pmd_pgtable(_pmd);
1079 _pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
1080 _pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
1083 * spin_lock() below is not the equivalent of smp_wmb(), so
1084 * this is needed to avoid the copy_huge_page writes to become
1085 * visible after the set_pmd_at() write.
1090 BUG_ON(!pmd_none(*pmd));
1091 page_add_new_anon_rmap(new_page, vma, address, true);
1092 mem_cgroup_commit_charge(new_page, memcg, false, true);
1093 lru_cache_add_active_or_unevictable(new_page, vma);
1094 pgtable_trans_huge_deposit(mm, pmd, pgtable);
1095 set_pmd_at(mm, address, pmd, _pmd);
1096 update_mmu_cache_pmd(vma, address, pmd);
1097 spin_unlock(pmd_ptl);
1101 khugepaged_pages_collapsed++;
1102 result = SCAN_SUCCEED;
1104 up_write(&mm->mmap_sem);
1106 trace_mm_collapse_huge_page(mm, isolated, result);
1109 mem_cgroup_cancel_charge(new_page, memcg, true);
1113 static int khugepaged_scan_pmd(struct mm_struct *mm,
1114 struct vm_area_struct *vma,
1115 unsigned long address,
1116 struct page **hpage)
1120 int ret = 0, none_or_zero = 0, result = 0, referenced = 0;
1121 struct page *page = NULL;
1122 unsigned long _address;
1124 int node = NUMA_NO_NODE, unmapped = 0;
1125 bool writable = false;
1127 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1129 pmd = mm_find_pmd(mm, address);
1131 result = SCAN_PMD_NULL;
1135 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1136 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
1137 for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
1138 _pte++, _address += PAGE_SIZE) {
1139 pte_t pteval = *_pte;
1140 if (is_swap_pte(pteval)) {
1141 if (++unmapped <= khugepaged_max_ptes_swap) {
1144 result = SCAN_EXCEED_SWAP_PTE;
1148 if (pte_none(pteval) || is_zero_pfn(pte_pfn(pteval))) {
1149 if (!userfaultfd_armed(vma) &&
1150 ++none_or_zero <= khugepaged_max_ptes_none) {
1153 result = SCAN_EXCEED_NONE_PTE;
1157 if (!pte_present(pteval)) {
1158 result = SCAN_PTE_NON_PRESENT;
1161 if (pte_write(pteval))
1164 page = vm_normal_page(vma, _address, pteval);
1165 if (unlikely(!page)) {
1166 result = SCAN_PAGE_NULL;
1170 /* TODO: teach khugepaged to collapse THP mapped with pte */
1171 if (PageCompound(page)) {
1172 result = SCAN_PAGE_COMPOUND;
1177 * Record which node the original page is from and save this
1178 * information to khugepaged_node_load[].
1179 * Khupaged will allocate hugepage from the node has the max
1182 node = page_to_nid(page);
1183 if (khugepaged_scan_abort(node)) {
1184 result = SCAN_SCAN_ABORT;
1187 khugepaged_node_load[node]++;
1188 if (!PageLRU(page)) {
1189 result = SCAN_PAGE_LRU;
1192 if (PageLocked(page)) {
1193 result = SCAN_PAGE_LOCK;
1196 if (!PageAnon(page)) {
1197 result = SCAN_PAGE_ANON;
1202 * cannot use mapcount: can't collapse if there's a gup pin.
1203 * The page must only be referenced by the scanned process
1204 * and page swap cache.
1206 if (page_count(page) != 1 + !!PageSwapCache(page)) {
1207 result = SCAN_PAGE_COUNT;
1210 if (pte_young(pteval) ||
1211 page_is_young(page) || PageReferenced(page) ||
1212 mmu_notifier_test_young(vma->vm_mm, address))
1217 result = SCAN_SUCCEED;
1220 result = SCAN_LACK_REFERENCED_PAGE;
1223 result = SCAN_PAGE_RO;
1226 pte_unmap_unlock(pte, ptl);
1228 node = khugepaged_find_target_node();
1229 /* collapse_huge_page will return with the mmap_sem released */
1230 collapse_huge_page(mm, address, hpage, node, referenced);
1233 trace_mm_khugepaged_scan_pmd(mm, page, writable, referenced,
1234 none_or_zero, result, unmapped);
1238 static void collect_mm_slot(struct mm_slot *mm_slot)
1240 struct mm_struct *mm = mm_slot->mm;
1242 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1244 if (khugepaged_test_exit(mm)) {
1246 hash_del(&mm_slot->hash);
1247 list_del(&mm_slot->mm_node);
1250 * Not strictly needed because the mm exited already.
1252 * clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
1255 /* khugepaged_mm_lock actually not necessary for the below */
1256 free_mm_slot(mm_slot);
1261 #if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
1262 static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
1264 struct vm_area_struct *vma;
1265 struct mm_struct *mm;
1269 i_mmap_lock_write(mapping);
1270 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1271 /* probably overkill */
1274 addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
1275 if (addr & ~HPAGE_PMD_MASK)
1277 if (vma->vm_end < addr + HPAGE_PMD_SIZE)
1280 pmd = mm_find_pmd(mm, addr);
1284 * We need exclusive mmap_sem to retract page table.
1285 * If trylock fails we would end up with pte-mapped THP after
1286 * re-fault. Not ideal, but it's more important to not disturb
1287 * the system too much.
1289 if (down_write_trylock(&mm->mmap_sem)) {
1290 if (!khugepaged_test_exit(mm)) {
1291 spinlock_t *ptl = pmd_lock(mm, pmd);
1292 /* assume page table is clear */
1293 _pmd = pmdp_collapse_flush(vma, addr, pmd);
1295 atomic_long_dec(&mm->nr_ptes);
1296 pte_free(mm, pmd_pgtable(_pmd));
1298 up_write(&mm->mmap_sem);
1301 i_mmap_unlock_write(mapping);
1305 * collapse_shmem - collapse small tmpfs/shmem pages into huge one.
1307 * Basic scheme is simple, details are more complex:
1308 * - allocate and lock a new huge page;
1309 * - scan over radix tree replacing old pages the new one
1310 * + swap in pages if necessary;
1312 * + keep old pages around in case if rollback is required;
1313 * - if replacing succeed:
1316 * + unlock huge page;
1317 * - if replacing failed;
1318 * + put all pages back and unfreeze them;
1319 * + restore gaps in the radix-tree;
1320 * + unlock and free huge page;
1322 static void collapse_shmem(struct mm_struct *mm,
1323 struct address_space *mapping, pgoff_t start,
1324 struct page **hpage, int node)
1327 struct page *page, *new_page, *tmp;
1328 struct mem_cgroup *memcg;
1329 pgoff_t index, end = start + HPAGE_PMD_NR;
1330 LIST_HEAD(pagelist);
1331 struct radix_tree_iter iter;
1333 int nr_none = 0, result = SCAN_SUCCEED;
1335 VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
1337 /* Only allocate from the target node */
1338 gfp = alloc_hugepage_khugepaged_gfpmask() |
1339 __GFP_OTHER_NODE | __GFP_THISNODE;
1341 new_page = khugepaged_alloc_page(hpage, gfp, node);
1343 result = SCAN_ALLOC_HUGE_PAGE_FAIL;
1347 /* Do not oom kill for khugepaged charges */
1348 if (unlikely(mem_cgroup_try_charge(new_page, mm, gfp | __GFP_NORETRY,
1350 result = SCAN_CGROUP_CHARGE_FAIL;
1354 __SetPageLocked(new_page);
1355 __SetPageSwapBacked(new_page);
1356 new_page->index = start;
1357 new_page->mapping = mapping;
1360 * At this point the new_page is locked and not up-to-date.
1361 * It's safe to insert it into the page cache, because nobody would
1362 * be able to map it or use it in another way until we unlock it.
1366 spin_lock_irq(&mapping->tree_lock);
1367 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1368 int n = min(iter.index, end) - index;
1371 * Stop if extent has been hole-punched, and is now completely
1372 * empty (the more obvious i_size_read() check would take an
1373 * irq-unsafe seqlock on 32-bit).
1375 if (n >= HPAGE_PMD_NR) {
1376 result = SCAN_TRUNCATED;
1381 * Handle holes in the radix tree: charge it from shmem and
1382 * insert relevant subpage of new_page into the radix-tree.
1384 if (n && !shmem_charge(mapping->host, n)) {
1388 for (; index < min(iter.index, end); index++) {
1389 radix_tree_insert(&mapping->page_tree, index,
1390 new_page + (index % HPAGE_PMD_NR));
1398 page = radix_tree_deref_slot_protected(slot,
1399 &mapping->tree_lock);
1400 if (radix_tree_exceptional_entry(page) || !PageUptodate(page)) {
1401 spin_unlock_irq(&mapping->tree_lock);
1402 /* swap in or instantiate fallocated page */
1403 if (shmem_getpage(mapping->host, index, &page,
1408 } else if (trylock_page(page)) {
1410 spin_unlock_irq(&mapping->tree_lock);
1412 result = SCAN_PAGE_LOCK;
1417 * The page must be locked, so we can drop the tree_lock
1418 * without racing with truncate.
1420 VM_BUG_ON_PAGE(!PageLocked(page), page);
1421 VM_BUG_ON_PAGE(!PageUptodate(page), page);
1424 * If file was truncated then extended, or hole-punched, before
1425 * we locked the first page, then a THP might be there already.
1427 if (PageTransCompound(page)) {
1428 result = SCAN_PAGE_COMPOUND;
1432 if (page_mapping(page) != mapping) {
1433 result = SCAN_TRUNCATED;
1437 if (isolate_lru_page(page)) {
1438 result = SCAN_DEL_PAGE_LRU;
1442 if (page_mapped(page))
1443 unmap_mapping_range(mapping, index << PAGE_SHIFT,
1446 spin_lock_irq(&mapping->tree_lock);
1448 slot = radix_tree_lookup_slot(&mapping->page_tree, index);
1449 VM_BUG_ON_PAGE(page != radix_tree_deref_slot_protected(slot,
1450 &mapping->tree_lock), page);
1451 VM_BUG_ON_PAGE(page_mapped(page), page);
1454 * The page is expected to have page_count() == 3:
1455 * - we hold a pin on it;
1456 * - one reference from radix tree;
1457 * - one from isolate_lru_page;
1459 if (!page_ref_freeze(page, 3)) {
1460 result = SCAN_PAGE_COUNT;
1461 spin_unlock_irq(&mapping->tree_lock);
1462 putback_lru_page(page);
1467 * Add the page to the list to be able to undo the collapse if
1468 * something go wrong.
1470 list_add_tail(&page->lru, &pagelist);
1472 /* Finally, replace with the new page. */
1473 radix_tree_replace_slot(slot,
1474 new_page + (index % HPAGE_PMD_NR));
1476 slot = radix_tree_iter_next(&iter);
1486 * Handle hole in radix tree at the end of the range.
1487 * This code only triggers if there's nothing in radix tree
1491 int n = end - index;
1493 /* Stop if extent has been truncated, and is now empty */
1494 if (n >= HPAGE_PMD_NR) {
1495 result = SCAN_TRUNCATED;
1498 if (!shmem_charge(mapping->host, n)) {
1502 for (; index < end; index++) {
1503 radix_tree_insert(&mapping->page_tree, index,
1504 new_page + (index % HPAGE_PMD_NR));
1509 __inc_node_page_state(new_page, NR_SHMEM_THPS);
1511 struct zone *zone = page_zone(new_page);
1513 __mod_node_page_state(zone->zone_pgdat, NR_FILE_PAGES, nr_none);
1514 __mod_node_page_state(zone->zone_pgdat, NR_SHMEM, nr_none);
1518 spin_unlock_irq(&mapping->tree_lock);
1521 if (result == SCAN_SUCCEED) {
1523 * Replacing old pages with new one has succeed, now we need to
1524 * copy the content and free old pages.
1527 list_for_each_entry_safe(page, tmp, &pagelist, lru) {
1528 while (index < page->index) {
1529 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1532 copy_highpage(new_page + (page->index % HPAGE_PMD_NR),
1534 list_del(&page->lru);
1535 page->mapping = NULL;
1536 page_ref_unfreeze(page, 1);
1537 ClearPageActive(page);
1538 ClearPageUnevictable(page);
1543 while (index < end) {
1544 clear_highpage(new_page + (index % HPAGE_PMD_NR));
1548 SetPageUptodate(new_page);
1549 page_ref_add(new_page, HPAGE_PMD_NR - 1);
1550 set_page_dirty(new_page);
1551 mem_cgroup_commit_charge(new_page, memcg, false, true);
1552 lru_cache_add_anon(new_page);
1555 * Remove pte page tables, so we can re-fault the page as huge.
1557 retract_page_tables(mapping, start);
1560 /* Something went wrong: rollback changes to the radix-tree */
1561 spin_lock_irq(&mapping->tree_lock);
1562 mapping->nrpages -= nr_none;
1563 shmem_uncharge(mapping->host, nr_none);
1565 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter,
1567 if (iter.index >= end)
1569 page = list_first_entry_or_null(&pagelist,
1571 if (!page || iter.index < page->index) {
1575 /* Put holes back where they were */
1576 radix_tree_delete(&mapping->page_tree,
1578 slot = radix_tree_iter_next(&iter);
1582 VM_BUG_ON_PAGE(page->index != iter.index, page);
1584 /* Unfreeze the page. */
1585 list_del(&page->lru);
1586 page_ref_unfreeze(page, 2);
1587 radix_tree_replace_slot(slot, page);
1588 spin_unlock_irq(&mapping->tree_lock);
1590 putback_lru_page(page);
1591 spin_lock_irq(&mapping->tree_lock);
1592 slot = radix_tree_iter_next(&iter);
1595 spin_unlock_irq(&mapping->tree_lock);
1597 mem_cgroup_cancel_charge(new_page, memcg, true);
1598 new_page->mapping = NULL;
1601 unlock_page(new_page);
1603 VM_BUG_ON(!list_empty(&pagelist));
1604 /* TODO: tracepoints */
1607 static void khugepaged_scan_shmem(struct mm_struct *mm,
1608 struct address_space *mapping,
1609 pgoff_t start, struct page **hpage)
1611 struct page *page = NULL;
1612 struct radix_tree_iter iter;
1615 int node = NUMA_NO_NODE;
1616 int result = SCAN_SUCCEED;
1620 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
1622 radix_tree_for_each_slot(slot, &mapping->page_tree, &iter, start) {
1623 if (iter.index >= start + HPAGE_PMD_NR)
1626 page = radix_tree_deref_slot(slot);
1627 if (radix_tree_deref_retry(page)) {
1628 slot = radix_tree_iter_retry(&iter);
1632 if (radix_tree_exception(page)) {
1633 if (++swap > khugepaged_max_ptes_swap) {
1634 result = SCAN_EXCEED_SWAP_PTE;
1640 if (PageTransCompound(page)) {
1641 result = SCAN_PAGE_COMPOUND;
1645 node = page_to_nid(page);
1646 if (khugepaged_scan_abort(node)) {
1647 result = SCAN_SCAN_ABORT;
1650 khugepaged_node_load[node]++;
1652 if (!PageLRU(page)) {
1653 result = SCAN_PAGE_LRU;
1657 if (page_count(page) != 1 + page_mapcount(page)) {
1658 result = SCAN_PAGE_COUNT;
1663 * We probably should check if the page is referenced here, but
1664 * nobody would transfer pte_young() to PageReferenced() for us.
1665 * And rmap walk here is just too costly...
1670 if (need_resched()) {
1672 slot = radix_tree_iter_next(&iter);
1677 if (result == SCAN_SUCCEED) {
1678 if (present < HPAGE_PMD_NR - khugepaged_max_ptes_none) {
1679 result = SCAN_EXCEED_NONE_PTE;
1681 node = khugepaged_find_target_node();
1682 collapse_shmem(mm, mapping, start, hpage, node);
1686 /* TODO: tracepoints */
1689 static void khugepaged_scan_shmem(struct mm_struct *mm,
1690 struct address_space *mapping,
1691 pgoff_t start, struct page **hpage)
1697 static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
1698 struct page **hpage)
1699 __releases(&khugepaged_mm_lock)
1700 __acquires(&khugepaged_mm_lock)
1702 struct mm_slot *mm_slot;
1703 struct mm_struct *mm;
1704 struct vm_area_struct *vma;
1708 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
1710 if (khugepaged_scan.mm_slot)
1711 mm_slot = khugepaged_scan.mm_slot;
1713 mm_slot = list_entry(khugepaged_scan.mm_head.next,
1714 struct mm_slot, mm_node);
1715 khugepaged_scan.address = 0;
1716 khugepaged_scan.mm_slot = mm_slot;
1718 spin_unlock(&khugepaged_mm_lock);
1722 * Don't wait for semaphore (to avoid long wait times). Just move to
1723 * the next mm on the list.
1726 if (unlikely(!down_read_trylock(&mm->mmap_sem)))
1727 goto breakouterloop_mmap_sem;
1728 if (likely(!khugepaged_test_exit(mm)))
1729 vma = find_vma(mm, khugepaged_scan.address);
1732 for (; vma; vma = vma->vm_next) {
1733 unsigned long hstart, hend;
1736 if (unlikely(khugepaged_test_exit(mm))) {
1740 if (!hugepage_vma_check(vma)) {
1745 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
1746 hend = vma->vm_end & HPAGE_PMD_MASK;
1749 if (khugepaged_scan.address > hend)
1751 if (khugepaged_scan.address < hstart)
1752 khugepaged_scan.address = hstart;
1753 VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
1755 while (khugepaged_scan.address < hend) {
1758 if (unlikely(khugepaged_test_exit(mm)))
1759 goto breakouterloop;
1761 VM_BUG_ON(khugepaged_scan.address < hstart ||
1762 khugepaged_scan.address + HPAGE_PMD_SIZE >
1764 if (shmem_file(vma->vm_file)) {
1766 pgoff_t pgoff = linear_page_index(vma,
1767 khugepaged_scan.address);
1768 if (!shmem_huge_enabled(vma))
1770 file = get_file(vma->vm_file);
1771 up_read(&mm->mmap_sem);
1773 khugepaged_scan_shmem(mm, file->f_mapping,
1777 ret = khugepaged_scan_pmd(mm, vma,
1778 khugepaged_scan.address,
1781 /* move to next address */
1782 khugepaged_scan.address += HPAGE_PMD_SIZE;
1783 progress += HPAGE_PMD_NR;
1785 /* we released mmap_sem so break loop */
1786 goto breakouterloop_mmap_sem;
1787 if (progress >= pages)
1788 goto breakouterloop;
1792 up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
1793 breakouterloop_mmap_sem:
1795 spin_lock(&khugepaged_mm_lock);
1796 VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
1798 * Release the current mm_slot if this mm is about to die, or
1799 * if we scanned all vmas of this mm.
1801 if (khugepaged_test_exit(mm) || !vma) {
1803 * Make sure that if mm_users is reaching zero while
1804 * khugepaged runs here, khugepaged_exit will find
1805 * mm_slot not pointing to the exiting mm.
1807 if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
1808 khugepaged_scan.mm_slot = list_entry(
1809 mm_slot->mm_node.next,
1810 struct mm_slot, mm_node);
1811 khugepaged_scan.address = 0;
1813 khugepaged_scan.mm_slot = NULL;
1814 khugepaged_full_scans++;
1817 collect_mm_slot(mm_slot);
1823 static int khugepaged_has_work(void)
1825 return !list_empty(&khugepaged_scan.mm_head) &&
1826 khugepaged_enabled();
1829 static int khugepaged_wait_event(void)
1831 return !list_empty(&khugepaged_scan.mm_head) ||
1832 kthread_should_stop();
1835 static void khugepaged_do_scan(void)
1837 struct page *hpage = NULL;
1838 unsigned int progress = 0, pass_through_head = 0;
1839 unsigned int pages = khugepaged_pages_to_scan;
1842 barrier(); /* write khugepaged_pages_to_scan to local stack */
1844 while (progress < pages) {
1845 if (!khugepaged_prealloc_page(&hpage, &wait))
1850 if (unlikely(kthread_should_stop() || try_to_freeze()))
1853 spin_lock(&khugepaged_mm_lock);
1854 if (!khugepaged_scan.mm_slot)
1855 pass_through_head++;
1856 if (khugepaged_has_work() &&
1857 pass_through_head < 2)
1858 progress += khugepaged_scan_mm_slot(pages - progress,
1862 spin_unlock(&khugepaged_mm_lock);
1865 if (!IS_ERR_OR_NULL(hpage))
1869 static bool khugepaged_should_wakeup(void)
1871 return kthread_should_stop() ||
1872 time_after_eq(jiffies, khugepaged_sleep_expire);
1875 static void khugepaged_wait_work(void)
1877 if (khugepaged_has_work()) {
1878 const unsigned long scan_sleep_jiffies =
1879 msecs_to_jiffies(khugepaged_scan_sleep_millisecs);
1881 if (!scan_sleep_jiffies)
1884 khugepaged_sleep_expire = jiffies + scan_sleep_jiffies;
1885 wait_event_freezable_timeout(khugepaged_wait,
1886 khugepaged_should_wakeup(),
1887 scan_sleep_jiffies);
1891 if (khugepaged_enabled())
1892 wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
1895 static int khugepaged(void *none)
1897 struct mm_slot *mm_slot;
1900 set_user_nice(current, MAX_NICE);
1902 while (!kthread_should_stop()) {
1903 khugepaged_do_scan();
1904 khugepaged_wait_work();
1907 spin_lock(&khugepaged_mm_lock);
1908 mm_slot = khugepaged_scan.mm_slot;
1909 khugepaged_scan.mm_slot = NULL;
1911 collect_mm_slot(mm_slot);
1912 spin_unlock(&khugepaged_mm_lock);
1916 static void set_recommended_min_free_kbytes(void)
1920 unsigned long recommended_min;
1922 for_each_populated_zone(zone)
1925 /* Ensure 2 pageblocks are free to assist fragmentation avoidance */
1926 recommended_min = pageblock_nr_pages * nr_zones * 2;
1929 * Make sure that on average at least two pageblocks are almost free
1930 * of another type, one for a migratetype to fall back to and a
1931 * second to avoid subsequent fallbacks of other types There are 3
1932 * MIGRATE_TYPES we care about.
1934 recommended_min += pageblock_nr_pages * nr_zones *
1935 MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
1937 /* don't ever allow to reserve more than 5% of the lowmem */
1938 recommended_min = min(recommended_min,
1939 (unsigned long) nr_free_buffer_pages() / 20);
1940 recommended_min <<= (PAGE_SHIFT-10);
1942 if (recommended_min > min_free_kbytes) {
1943 if (user_min_free_kbytes >= 0)
1944 pr_info("raising min_free_kbytes from %d to %lu to help transparent hugepage allocations\n",
1945 min_free_kbytes, recommended_min);
1947 min_free_kbytes = recommended_min;
1949 setup_per_zone_wmarks();
1952 int start_stop_khugepaged(void)
1956 mutex_lock(&khugepaged_mutex);
1957 if (khugepaged_enabled()) {
1958 if (!khugepaged_thread)
1959 khugepaged_thread = kthread_run(khugepaged, NULL,
1961 if (IS_ERR(khugepaged_thread)) {
1962 pr_err("khugepaged: kthread_run(khugepaged) failed\n");
1963 err = PTR_ERR(khugepaged_thread);
1964 khugepaged_thread = NULL;
1968 if (!list_empty(&khugepaged_scan.mm_head))
1969 wake_up_interruptible(&khugepaged_wait);
1971 set_recommended_min_free_kbytes();
1972 } else if (khugepaged_thread) {
1973 kthread_stop(khugepaged_thread);
1974 khugepaged_thread = NULL;
1977 mutex_unlock(&khugepaged_mutex);
1981 void khugepaged_min_free_kbytes_update(void)
1983 mutex_lock(&khugepaged_mutex);
1984 if (khugepaged_enabled() && khugepaged_thread)
1985 set_recommended_min_free_kbytes();
1986 mutex_unlock(&khugepaged_mutex);