1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGE_MM_H
3 #define _LINUX_HUGE_MM_H
5 #include <linux/sched/coredump.h>
6 #include <linux/mm_types.h>
8 #include <linux/fs.h> /* only for vma_is_dax() */
10 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
13 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
14 void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd);
15 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 struct vm_area_struct *vma);
19 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
20 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
22 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
27 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
28 struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
29 unsigned long addr, pmd_t *pmd,
31 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
32 pmd_t *pmd, unsigned long addr, unsigned long next);
33 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
35 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
37 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
38 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
39 int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, unsigned long addr,
40 pgprot_t newprot, unsigned long cp_flags);
41 vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
42 pgprot_t pgprot, bool write);
45 * vmf_insert_pfn_pmd - insert a pmd size pfn
46 * @vmf: Structure describing the fault
48 * @pgprot: page protection to use
49 * @write: whether it's a write fault
51 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
53 * Return: vm_fault_t value.
55 static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn,
58 return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
60 vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
61 pgprot_t pgprot, bool write);
64 * vmf_insert_pfn_pud - insert a pud size pfn
65 * @vmf: Structure describing the fault
67 * @pgprot: page protection to use
68 * @write: whether it's a write fault
70 * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
72 * Return: vm_fault_t value.
74 static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
77 return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
80 enum transparent_hugepage_flag {
81 TRANSPARENT_HUGEPAGE_NEVER_DAX,
82 TRANSPARENT_HUGEPAGE_FLAG,
83 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
84 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
85 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
86 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
87 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
88 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
89 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
90 #ifdef CONFIG_DEBUG_VM
91 TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG,
96 struct kobj_attribute;
98 ssize_t single_hugepage_flag_store(struct kobject *kobj,
99 struct kobj_attribute *attr,
100 const char *buf, size_t count,
101 enum transparent_hugepage_flag flag);
102 ssize_t single_hugepage_flag_show(struct kobject *kobj,
103 struct kobj_attribute *attr, char *buf,
104 enum transparent_hugepage_flag flag);
105 extern struct kobj_attribute shmem_enabled_attr;
107 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
108 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
110 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
111 #define HPAGE_PMD_SHIFT PMD_SHIFT
112 #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
113 #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
115 #define HPAGE_PUD_SHIFT PUD_SHIFT
116 #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
117 #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
119 extern unsigned long transparent_hugepage_flags;
121 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
124 /* Don't have to check pgoff for anonymous vma */
125 if (!vma_is_anonymous(vma)) {
126 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
131 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
136 static inline bool transhuge_vma_enabled(struct vm_area_struct *vma,
137 unsigned long vm_flags)
139 /* Explicitly disabled through madvise. */
140 if ((vm_flags & VM_NOHUGEPAGE) ||
141 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags))
147 * to be used on vmas which are known to support THP.
148 * Use transparent_hugepage_active otherwise
150 static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
154 * If the hardware/firmware marked hugepage support disabled.
156 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_NEVER_DAX))
159 if (!transhuge_vma_enabled(vma, vma->vm_flags))
162 if (vma_is_temporary_stack(vma))
165 if (transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_FLAG))
171 if (transparent_hugepage_flags &
172 (1 << TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG))
173 return !!(vma->vm_flags & VM_HUGEPAGE);
178 bool transparent_hugepage_active(struct vm_area_struct *vma);
180 #define transparent_hugepage_use_zero_page() \
181 (transparent_hugepage_flags & \
182 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
184 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
185 unsigned long len, unsigned long pgoff, unsigned long flags);
187 void prep_transhuge_page(struct page *page);
188 void free_transhuge_page(struct page *page);
189 bool is_transparent_hugepage(struct page *page);
191 bool can_split_huge_page(struct page *page, int *pextra_pins);
192 int split_huge_page_to_list(struct page *page, struct list_head *list);
193 static inline int split_huge_page(struct page *page)
195 return split_huge_page_to_list(page, NULL);
197 void deferred_split_huge_page(struct page *page);
199 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
200 unsigned long address, bool freeze, struct page *page);
202 #define split_huge_pmd(__vma, __pmd, __address) \
204 pmd_t *____pmd = (__pmd); \
205 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
206 || pmd_devmap(*____pmd)) \
207 __split_huge_pmd(__vma, __pmd, __address, \
212 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
213 bool freeze, struct page *page);
215 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
216 unsigned long address);
218 #define split_huge_pud(__vma, __pud, __address) \
220 pud_t *____pud = (__pud); \
221 if (pud_trans_huge(*____pud) \
222 || pud_devmap(*____pud)) \
223 __split_huge_pud(__vma, __pud, __address); \
226 int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
228 void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
229 unsigned long end, long adjust_next);
230 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
231 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
233 static inline int is_swap_pmd(pmd_t pmd)
235 return !pmd_none(pmd) && !pmd_present(pmd);
238 /* mmap_lock must be held on entry */
239 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
240 struct vm_area_struct *vma)
242 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
243 return __pmd_trans_huge_lock(pmd, vma);
247 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
248 struct vm_area_struct *vma)
250 if (pud_trans_huge(*pud) || pud_devmap(*pud))
251 return __pud_trans_huge_lock(pud, vma);
257 * thp_head - Head page of a transparent huge page.
258 * @page: Any page (tail, head or regular) found in the page cache.
260 static inline struct page *thp_head(struct page *page)
262 return compound_head(page);
266 * thp_order - Order of a transparent huge page.
267 * @page: Head page of a transparent huge page.
269 static inline unsigned int thp_order(struct page *page)
271 VM_BUG_ON_PGFLAGS(PageTail(page), page);
273 return HPAGE_PMD_ORDER;
278 * thp_nr_pages - The number of regular pages in this huge page.
279 * @page: The head page of a huge page.
281 static inline int thp_nr_pages(struct page *page)
283 VM_BUG_ON_PGFLAGS(PageTail(page), page);
289 struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
290 pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
291 struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
292 pud_t *pud, int flags, struct dev_pagemap **pgmap);
294 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
296 extern struct page *huge_zero_page;
297 extern unsigned long huge_zero_pfn;
299 static inline bool is_huge_zero_page(struct page *page)
301 return READ_ONCE(huge_zero_page) == page;
304 static inline bool is_huge_zero_pmd(pmd_t pmd)
306 return READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd) && pmd_present(pmd);
309 static inline bool is_huge_zero_pud(pud_t pud)
314 struct page *mm_get_huge_zero_page(struct mm_struct *mm);
315 void mm_put_huge_zero_page(struct mm_struct *mm);
317 #define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
319 static inline bool thp_migration_supported(void)
321 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
324 static inline struct list_head *page_deferred_list(struct page *page)
327 * Global or memcg deferred list in the second tail pages is
328 * occupied by compound_head.
330 return &page[2].deferred_list;
333 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
334 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
335 #define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
336 #define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
338 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
339 #define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
340 #define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
342 static inline struct page *thp_head(struct page *page)
344 VM_BUG_ON_PGFLAGS(PageTail(page), page);
348 static inline unsigned int thp_order(struct page *page)
350 VM_BUG_ON_PGFLAGS(PageTail(page), page);
354 static inline int thp_nr_pages(struct page *page)
356 VM_BUG_ON_PGFLAGS(PageTail(page), page);
360 static inline bool __transparent_hugepage_enabled(struct vm_area_struct *vma)
365 static inline bool transparent_hugepage_active(struct vm_area_struct *vma)
370 static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
376 static inline bool transhuge_vma_enabled(struct vm_area_struct *vma,
377 unsigned long vm_flags)
382 static inline void prep_transhuge_page(struct page *page) {}
384 static inline bool is_transparent_hugepage(struct page *page)
389 #define transparent_hugepage_flags 0UL
391 #define thp_get_unmapped_area NULL
394 can_split_huge_page(struct page *page, int *pextra_pins)
400 split_huge_page_to_list(struct page *page, struct list_head *list)
404 static inline int split_huge_page(struct page *page)
408 static inline void deferred_split_huge_page(struct page *page) {}
409 #define split_huge_pmd(__vma, __pmd, __address) \
412 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
413 unsigned long address, bool freeze, struct page *page) {}
414 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
415 unsigned long address, bool freeze, struct page *page) {}
417 #define split_huge_pud(__vma, __pmd, __address) \
420 static inline int hugepage_madvise(struct vm_area_struct *vma,
421 unsigned long *vm_flags, int advice)
426 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
432 static inline int is_swap_pmd(pmd_t pmd)
436 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
437 struct vm_area_struct *vma)
441 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
442 struct vm_area_struct *vma)
447 static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf,
453 static inline bool is_huge_zero_page(struct page *page)
458 static inline bool is_huge_zero_pmd(pmd_t pmd)
463 static inline bool is_huge_zero_pud(pud_t pud)
468 static inline void mm_put_huge_zero_page(struct mm_struct *mm)
473 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
474 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
479 static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
480 unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
485 static inline bool thp_migration_supported(void)
489 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
492 * thp_size - Size of a transparent huge page.
493 * @page: Head page of a transparent huge page.
495 * Return: Number of bytes in this page.
497 static inline unsigned long thp_size(struct page *page)
499 return PAGE_SIZE << thp_order(page);
502 #endif /* _LINUX_HUGE_MM_H */