2 * Generic hugetlb support.
3 * (C) Nadia Yvette Chambers, April 2004
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/module.h>
9 #include <linux/seq_file.h>
10 #include <linux/sysctl.h>
11 #include <linux/highmem.h>
12 #include <linux/mmu_notifier.h>
13 #include <linux/nodemask.h>
14 #include <linux/pagemap.h>
15 #include <linux/mempolicy.h>
16 #include <linux/compiler.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/bootmem.h>
20 #include <linux/sysfs.h>
21 #include <linux/slab.h>
22 #include <linux/rmap.h>
23 #include <linux/swap.h>
24 #include <linux/swapops.h>
25 #include <linux/page-isolation.h>
26 #include <linux/jhash.h>
29 #include <asm/pgtable.h>
33 #include <linux/hugetlb.h>
34 #include <linux/hugetlb_cgroup.h>
35 #include <linux/node.h>
38 int hugepages_treat_as_movable;
40 int hugetlb_max_hstate __read_mostly;
41 unsigned int default_hstate_idx;
42 struct hstate hstates[HUGE_MAX_HSTATE];
44 * Minimum page order among possible hugepage sizes, set to a proper value
47 static unsigned int minimum_order __read_mostly = UINT_MAX;
49 __initdata LIST_HEAD(huge_boot_pages);
51 /* for command line parsing */
52 static struct hstate * __initdata parsed_hstate;
53 static unsigned long __initdata default_hstate_max_huge_pages;
54 static unsigned long __initdata default_hstate_size;
57 * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
58 * free_huge_pages, and surplus_huge_pages.
60 DEFINE_SPINLOCK(hugetlb_lock);
63 * Serializes faults on the same logical page. This is used to
64 * prevent spurious OOMs when the hugepage pool is fully utilized.
66 static int num_fault_mutexes;
67 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
69 static inline bool PageHugeFreed(struct page *head)
71 return page_private(head + 4) == -1UL;
74 static inline void SetPageHugeFreed(struct page *head)
76 set_page_private(head + 4, -1UL);
79 static inline void ClearPageHugeFreed(struct page *head)
81 set_page_private(head + 4, 0);
84 /* Forward declaration */
85 static int hugetlb_acct_memory(struct hstate *h, long delta);
87 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
89 bool free = (spool->count == 0) && (spool->used_hpages == 0);
91 spin_unlock(&spool->lock);
93 /* If no pages are used, and no other handles to the subpool
94 * remain, give up any reservations mased on minimum size and
97 if (spool->min_hpages != -1)
98 hugetlb_acct_memory(spool->hstate,
104 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
107 struct hugepage_subpool *spool;
109 spool = kzalloc(sizeof(*spool), GFP_KERNEL);
113 spin_lock_init(&spool->lock);
115 spool->max_hpages = max_hpages;
117 spool->min_hpages = min_hpages;
119 if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
123 spool->rsv_hpages = min_hpages;
128 void hugepage_put_subpool(struct hugepage_subpool *spool)
130 spin_lock(&spool->lock);
131 BUG_ON(!spool->count);
133 unlock_or_release_subpool(spool);
137 * Subpool accounting for allocating and reserving pages.
138 * Return -ENOMEM if there are not enough resources to satisfy the
139 * the request. Otherwise, return the number of pages by which the
140 * global pools must be adjusted (upward). The returned value may
141 * only be different than the passed value (delta) in the case where
142 * a subpool minimum size must be manitained.
144 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
152 spin_lock(&spool->lock);
154 if (spool->max_hpages != -1) { /* maximum size accounting */
155 if ((spool->used_hpages + delta) <= spool->max_hpages)
156 spool->used_hpages += delta;
163 if (spool->min_hpages != -1) { /* minimum size accounting */
164 if (delta > spool->rsv_hpages) {
166 * Asking for more reserves than those already taken on
167 * behalf of subpool. Return difference.
169 ret = delta - spool->rsv_hpages;
170 spool->rsv_hpages = 0;
172 ret = 0; /* reserves already accounted for */
173 spool->rsv_hpages -= delta;
178 spin_unlock(&spool->lock);
183 * Subpool accounting for freeing and unreserving pages.
184 * Return the number of global page reservations that must be dropped.
185 * The return value may only be different than the passed value (delta)
186 * in the case where a subpool minimum size must be maintained.
188 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
196 spin_lock(&spool->lock);
198 if (spool->max_hpages != -1) /* maximum size accounting */
199 spool->used_hpages -= delta;
201 if (spool->min_hpages != -1) { /* minimum size accounting */
202 if (spool->rsv_hpages + delta <= spool->min_hpages)
205 ret = spool->rsv_hpages + delta - spool->min_hpages;
207 spool->rsv_hpages += delta;
208 if (spool->rsv_hpages > spool->min_hpages)
209 spool->rsv_hpages = spool->min_hpages;
213 * If hugetlbfs_put_super couldn't free spool due to an outstanding
214 * quota reference, free it now.
216 unlock_or_release_subpool(spool);
221 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
223 return HUGETLBFS_SB(inode->i_sb)->spool;
226 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
228 return subpool_inode(file_inode(vma->vm_file));
232 * Region tracking -- allows tracking of reservations and instantiated pages
233 * across the pages in a mapping.
235 * The region data structures are embedded into a resv_map and protected
236 * by a resv_map's lock. The set of regions within the resv_map represent
237 * reservations for huge pages, or huge pages that have already been
238 * instantiated within the map. The from and to elements are huge page
239 * indicies into the associated mapping. from indicates the starting index
240 * of the region. to represents the first index past the end of the region.
242 * For example, a file region structure with from == 0 and to == 4 represents
243 * four huge pages in a mapping. It is important to note that the to element
244 * represents the first element past the end of the region. This is used in
245 * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
247 * Interval notation of the form [from, to) will be used to indicate that
248 * the endpoint from is inclusive and to is exclusive.
251 struct list_head link;
257 * Add the huge page range represented by [f, t) to the reserve
258 * map. In the normal case, existing regions will be expanded
259 * to accommodate the specified range. Sufficient regions should
260 * exist for expansion due to the previous call to region_chg
261 * with the same range. However, it is possible that region_del
262 * could have been called after region_chg and modifed the map
263 * in such a way that no region exists to be expanded. In this
264 * case, pull a region descriptor from the cache associated with
265 * the map and use that for the new range.
267 * Return the number of new huge pages added to the map. This
268 * number is greater than or equal to zero.
270 static long region_add(struct resv_map *resv, long f, long t)
272 struct list_head *head = &resv->regions;
273 struct file_region *rg, *nrg, *trg;
276 spin_lock(&resv->lock);
277 /* Locate the region we are either in or before. */
278 list_for_each_entry(rg, head, link)
283 * If no region exists which can be expanded to include the
284 * specified range, the list must have been modified by an
285 * interleving call to region_del(). Pull a region descriptor
286 * from the cache and use it for this range.
288 if (&rg->link == head || t < rg->from) {
289 VM_BUG_ON(resv->region_cache_count <= 0);
291 resv->region_cache_count--;
292 nrg = list_first_entry(&resv->region_cache, struct file_region,
294 list_del(&nrg->link);
298 list_add(&nrg->link, rg->link.prev);
304 /* Round our left edge to the current segment if it encloses us. */
308 /* Check for and consume any regions we now overlap with. */
310 list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
311 if (&rg->link == head)
316 /* If this area reaches higher then extend our area to
317 * include it completely. If this is not the first area
318 * which we intend to reuse, free it. */
322 /* Decrement return value by the deleted range.
323 * Another range will span this area so that by
324 * end of routine add will be >= zero
326 add -= (rg->to - rg->from);
332 add += (nrg->from - f); /* Added to beginning of region */
334 add += t - nrg->to; /* Added to end of region */
338 resv->adds_in_progress--;
339 spin_unlock(&resv->lock);
345 * Examine the existing reserve map and determine how many
346 * huge pages in the specified range [f, t) are NOT currently
347 * represented. This routine is called before a subsequent
348 * call to region_add that will actually modify the reserve
349 * map to add the specified range [f, t). region_chg does
350 * not change the number of huge pages represented by the
351 * map. However, if the existing regions in the map can not
352 * be expanded to represent the new range, a new file_region
353 * structure is added to the map as a placeholder. This is
354 * so that the subsequent region_add call will have all the
355 * regions it needs and will not fail.
357 * Upon entry, region_chg will also examine the cache of region descriptors
358 * associated with the map. If there are not enough descriptors cached, one
359 * will be allocated for the in progress add operation.
361 * Returns the number of huge pages that need to be added to the existing
362 * reservation map for the range [f, t). This number is greater or equal to
363 * zero. -ENOMEM is returned if a new file_region structure or cache entry
364 * is needed and can not be allocated.
366 static long region_chg(struct resv_map *resv, long f, long t)
368 struct list_head *head = &resv->regions;
369 struct file_region *rg, *nrg = NULL;
373 spin_lock(&resv->lock);
375 resv->adds_in_progress++;
378 * Check for sufficient descriptors in the cache to accommodate
379 * the number of in progress add operations.
381 if (resv->adds_in_progress > resv->region_cache_count) {
382 struct file_region *trg;
384 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
385 /* Must drop lock to allocate a new descriptor. */
386 resv->adds_in_progress--;
387 spin_unlock(&resv->lock);
389 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
395 spin_lock(&resv->lock);
396 list_add(&trg->link, &resv->region_cache);
397 resv->region_cache_count++;
401 /* Locate the region we are before or in. */
402 list_for_each_entry(rg, head, link)
406 /* If we are below the current region then a new region is required.
407 * Subtle, allocate a new region at the position but make it zero
408 * size such that we can guarantee to record the reservation. */
409 if (&rg->link == head || t < rg->from) {
411 resv->adds_in_progress--;
412 spin_unlock(&resv->lock);
413 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
419 INIT_LIST_HEAD(&nrg->link);
423 list_add(&nrg->link, rg->link.prev);
428 /* Round our left edge to the current segment if it encloses us. */
433 /* Check for and consume any regions we now overlap with. */
434 list_for_each_entry(rg, rg->link.prev, link) {
435 if (&rg->link == head)
440 /* We overlap with this area, if it extends further than
441 * us then we must extend ourselves. Account for its
442 * existing reservation. */
447 chg -= rg->to - rg->from;
451 spin_unlock(&resv->lock);
452 /* We already know we raced and no longer need the new region */
456 spin_unlock(&resv->lock);
461 * Abort the in progress add operation. The adds_in_progress field
462 * of the resv_map keeps track of the operations in progress between
463 * calls to region_chg and region_add. Operations are sometimes
464 * aborted after the call to region_chg. In such cases, region_abort
465 * is called to decrement the adds_in_progress counter.
467 * NOTE: The range arguments [f, t) are not needed or used in this
468 * routine. They are kept to make reading the calling code easier as
469 * arguments will match the associated region_chg call.
471 static void region_abort(struct resv_map *resv, long f, long t)
473 spin_lock(&resv->lock);
474 VM_BUG_ON(!resv->region_cache_count);
475 resv->adds_in_progress--;
476 spin_unlock(&resv->lock);
480 * Delete the specified range [f, t) from the reserve map. If the
481 * t parameter is LONG_MAX, this indicates that ALL regions after f
482 * should be deleted. Locate the regions which intersect [f, t)
483 * and either trim, delete or split the existing regions.
485 * Returns the number of huge pages deleted from the reserve map.
486 * In the normal case, the return value is zero or more. In the
487 * case where a region must be split, a new region descriptor must
488 * be allocated. If the allocation fails, -ENOMEM will be returned.
489 * NOTE: If the parameter t == LONG_MAX, then we will never split
490 * a region and possibly return -ENOMEM. Callers specifying
491 * t == LONG_MAX do not need to check for -ENOMEM error.
493 static long region_del(struct resv_map *resv, long f, long t)
495 struct list_head *head = &resv->regions;
496 struct file_region *rg, *trg;
497 struct file_region *nrg = NULL;
501 spin_lock(&resv->lock);
502 list_for_each_entry_safe(rg, trg, head, link) {
504 * Skip regions before the range to be deleted. file_region
505 * ranges are normally of the form [from, to). However, there
506 * may be a "placeholder" entry in the map which is of the form
507 * (from, to) with from == to. Check for placeholder entries
508 * at the beginning of the range to be deleted.
510 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
516 if (f > rg->from && t < rg->to) { /* Must split region */
518 * Check for an entry in the cache before dropping
519 * lock and attempting allocation.
522 resv->region_cache_count > resv->adds_in_progress) {
523 nrg = list_first_entry(&resv->region_cache,
526 list_del(&nrg->link);
527 resv->region_cache_count--;
531 spin_unlock(&resv->lock);
532 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
540 /* New entry for end of split region */
543 INIT_LIST_HEAD(&nrg->link);
545 /* Original entry is trimmed */
548 list_add(&nrg->link, &rg->link);
553 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
554 del += rg->to - rg->from;
560 if (f <= rg->from) { /* Trim beginning of region */
563 } else { /* Trim end of region */
569 spin_unlock(&resv->lock);
575 * A rare out of memory error was encountered which prevented removal of
576 * the reserve map region for a page. The huge page itself was free'ed
577 * and removed from the page cache. This routine will adjust the subpool
578 * usage count, and the global reserve count if needed. By incrementing
579 * these counts, the reserve map entry which could not be deleted will
580 * appear as a "reserved" entry instead of simply dangling with incorrect
583 void hugetlb_fix_reserve_counts(struct inode *inode, bool restore_reserve)
585 struct hugepage_subpool *spool = subpool_inode(inode);
588 rsv_adjust = hugepage_subpool_get_pages(spool, 1);
589 if (restore_reserve && rsv_adjust) {
590 struct hstate *h = hstate_inode(inode);
592 hugetlb_acct_memory(h, 1);
597 * Count and return the number of huge pages in the reserve map
598 * that intersect with the range [f, t).
600 static long region_count(struct resv_map *resv, long f, long t)
602 struct list_head *head = &resv->regions;
603 struct file_region *rg;
606 spin_lock(&resv->lock);
607 /* Locate each segment we overlap with, and count that overlap. */
608 list_for_each_entry(rg, head, link) {
617 seg_from = max(rg->from, f);
618 seg_to = min(rg->to, t);
620 chg += seg_to - seg_from;
622 spin_unlock(&resv->lock);
628 * Convert the address within this vma to the page offset within
629 * the mapping, in pagecache page units; huge pages here.
631 static pgoff_t vma_hugecache_offset(struct hstate *h,
632 struct vm_area_struct *vma, unsigned long address)
634 return ((address - vma->vm_start) >> huge_page_shift(h)) +
635 (vma->vm_pgoff >> huge_page_order(h));
638 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
639 unsigned long address)
641 return vma_hugecache_offset(hstate_vma(vma), vma, address);
645 * Return the size of the pages allocated when backing a VMA. In the majority
646 * cases this will be same size as used by the page table entries.
648 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
650 struct hstate *hstate;
652 if (!is_vm_hugetlb_page(vma))
655 hstate = hstate_vma(vma);
657 return 1UL << huge_page_shift(hstate);
659 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
662 * Return the page size being used by the MMU to back a VMA. In the majority
663 * of cases, the page size used by the kernel matches the MMU size. On
664 * architectures where it differs, an architecture-specific version of this
665 * function is required.
667 #ifndef vma_mmu_pagesize
668 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
670 return vma_kernel_pagesize(vma);
675 * Flags for MAP_PRIVATE reservations. These are stored in the bottom
676 * bits of the reservation map pointer, which are always clear due to
679 #define HPAGE_RESV_OWNER (1UL << 0)
680 #define HPAGE_RESV_UNMAPPED (1UL << 1)
681 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
684 * These helpers are used to track how many pages are reserved for
685 * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
686 * is guaranteed to have their future faults succeed.
688 * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
689 * the reserve counters are updated with the hugetlb_lock held. It is safe
690 * to reset the VMA at fork() time as it is not in use yet and there is no
691 * chance of the global counters getting corrupted as a result of the values.
693 * The private mapping reservation is represented in a subtly different
694 * manner to a shared mapping. A shared mapping has a region map associated
695 * with the underlying file, this region map represents the backing file
696 * pages which have ever had a reservation assigned which this persists even
697 * after the page is instantiated. A private mapping has a region map
698 * associated with the original mmap which is attached to all VMAs which
699 * reference it, this region map represents those offsets which have consumed
700 * reservation ie. where pages have been instantiated.
702 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
704 return (unsigned long)vma->vm_private_data;
707 static void set_vma_private_data(struct vm_area_struct *vma,
710 vma->vm_private_data = (void *)value;
713 struct resv_map *resv_map_alloc(void)
715 struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
716 struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
718 if (!resv_map || !rg) {
724 kref_init(&resv_map->refs);
725 spin_lock_init(&resv_map->lock);
726 INIT_LIST_HEAD(&resv_map->regions);
728 resv_map->adds_in_progress = 0;
730 INIT_LIST_HEAD(&resv_map->region_cache);
731 list_add(&rg->link, &resv_map->region_cache);
732 resv_map->region_cache_count = 1;
737 void resv_map_release(struct kref *ref)
739 struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
740 struct list_head *head = &resv_map->region_cache;
741 struct file_region *rg, *trg;
743 /* Clear out any active regions before we release the map. */
744 region_del(resv_map, 0, LONG_MAX);
746 /* ... and any entries left in the cache */
747 list_for_each_entry_safe(rg, trg, head, link) {
752 VM_BUG_ON(resv_map->adds_in_progress);
757 static inline struct resv_map *inode_resv_map(struct inode *inode)
759 return inode->i_mapping->private_data;
762 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
764 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
765 if (vma->vm_flags & VM_MAYSHARE) {
766 struct address_space *mapping = vma->vm_file->f_mapping;
767 struct inode *inode = mapping->host;
769 return inode_resv_map(inode);
772 return (struct resv_map *)(get_vma_private_data(vma) &
777 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
779 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
780 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
782 set_vma_private_data(vma, (get_vma_private_data(vma) &
783 HPAGE_RESV_MASK) | (unsigned long)map);
786 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
788 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
789 VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
791 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
794 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
796 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
798 return (get_vma_private_data(vma) & flag) != 0;
801 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
802 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
804 VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
805 if (!(vma->vm_flags & VM_MAYSHARE))
806 vma->vm_private_data = (void *)0;
809 /* Returns true if the VMA has associated reserve pages */
810 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
812 if (vma->vm_flags & VM_NORESERVE) {
814 * This address is already reserved by other process(chg == 0),
815 * so, we should decrement reserved count. Without decrementing,
816 * reserve count remains after releasing inode, because this
817 * allocated page will go into page cache and is regarded as
818 * coming from reserved pool in releasing step. Currently, we
819 * don't have any other solution to deal with this situation
820 * properly, so add work-around here.
822 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
828 /* Shared mappings always use reserves */
829 if (vma->vm_flags & VM_MAYSHARE) {
831 * We know VM_NORESERVE is not set. Therefore, there SHOULD
832 * be a region map for all pages. The only situation where
833 * there is no region map is if a hole was punched via
834 * fallocate. In this case, there really are no reverves to
835 * use. This situation is indicated if chg != 0.
844 * Only the process that called mmap() has reserves for
847 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
853 static void enqueue_huge_page(struct hstate *h, struct page *page)
855 int nid = page_to_nid(page);
856 list_move(&page->lru, &h->hugepage_freelists[nid]);
857 h->free_huge_pages++;
858 h->free_huge_pages_node[nid]++;
859 SetPageHugeFreed(page);
862 static struct page *dequeue_huge_page_node(struct hstate *h, int nid)
866 list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
867 if (!is_migrate_isolate_page(page))
870 * if 'non-isolated free hugepage' not found on the list,
871 * the allocation fails.
873 if (&h->hugepage_freelists[nid] == &page->lru)
875 list_move(&page->lru, &h->hugepage_activelist);
876 set_page_refcounted(page);
877 ClearPageHugeFreed(page);
878 h->free_huge_pages--;
879 h->free_huge_pages_node[nid]--;
883 /* Movability of hugepages depends on migration support. */
884 static inline gfp_t htlb_alloc_mask(struct hstate *h)
886 if (hugepages_treat_as_movable || hugepage_migration_supported(h))
887 return GFP_HIGHUSER_MOVABLE;
892 static struct page *dequeue_huge_page_vma(struct hstate *h,
893 struct vm_area_struct *vma,
894 unsigned long address, int avoid_reserve,
897 struct page *page = NULL;
898 struct mempolicy *mpol;
899 nodemask_t *nodemask;
900 struct zonelist *zonelist;
903 unsigned int cpuset_mems_cookie;
906 * A child process with MAP_PRIVATE mappings created by their parent
907 * have no page reserves. This check ensures that reservations are
908 * not "stolen". The child may still get SIGKILLed
910 if (!vma_has_reserves(vma, chg) &&
911 h->free_huge_pages - h->resv_huge_pages == 0)
914 /* If reserves cannot be used, ensure enough pages are in the pool */
915 if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
919 cpuset_mems_cookie = read_mems_allowed_begin();
920 zonelist = huge_zonelist(vma, address,
921 htlb_alloc_mask(h), &mpol, &nodemask);
923 for_each_zone_zonelist_nodemask(zone, z, zonelist,
924 MAX_NR_ZONES - 1, nodemask) {
925 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
926 page = dequeue_huge_page_node(h, zone_to_nid(zone));
930 if (!vma_has_reserves(vma, chg))
933 SetPagePrivate(page);
934 h->resv_huge_pages--;
941 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
950 * common helper functions for hstate_next_node_to_{alloc|free}.
951 * We may have allocated or freed a huge page based on a different
952 * nodes_allowed previously, so h->next_node_to_{alloc|free} might
953 * be outside of *nodes_allowed. Ensure that we use an allowed
954 * node for alloc or free.
956 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
958 nid = next_node(nid, *nodes_allowed);
959 if (nid == MAX_NUMNODES)
960 nid = first_node(*nodes_allowed);
961 VM_BUG_ON(nid >= MAX_NUMNODES);
966 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
968 if (!node_isset(nid, *nodes_allowed))
969 nid = next_node_allowed(nid, nodes_allowed);
974 * returns the previously saved node ["this node"] from which to
975 * allocate a persistent huge page for the pool and advance the
976 * next node from which to allocate, handling wrap at end of node
979 static int hstate_next_node_to_alloc(struct hstate *h,
980 nodemask_t *nodes_allowed)
984 VM_BUG_ON(!nodes_allowed);
986 nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
987 h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
993 * helper for free_pool_huge_page() - return the previously saved
994 * node ["this node"] from which to free a huge page. Advance the
995 * next node id whether or not we find a free huge page to free so
996 * that the next attempt to free addresses the next node.
998 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1002 VM_BUG_ON(!nodes_allowed);
1004 nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1005 h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1010 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask) \
1011 for (nr_nodes = nodes_weight(*mask); \
1013 ((node = hstate_next_node_to_alloc(hs, mask)) || 1); \
1016 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask) \
1017 for (nr_nodes = nodes_weight(*mask); \
1019 ((node = hstate_next_node_to_free(hs, mask)) || 1); \
1022 #if defined(CONFIG_CMA) && defined(CONFIG_X86_64)
1023 static void destroy_compound_gigantic_page(struct page *page,
1027 int nr_pages = 1 << order;
1028 struct page *p = page + 1;
1030 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1031 clear_compound_head(p);
1032 set_page_refcounted(p);
1035 set_compound_order(page, 0);
1036 __ClearPageHead(page);
1039 static void free_gigantic_page(struct page *page, unsigned int order)
1041 free_contig_range(page_to_pfn(page), 1 << order);
1044 static int __alloc_gigantic_page(unsigned long start_pfn,
1045 unsigned long nr_pages)
1047 unsigned long end_pfn = start_pfn + nr_pages;
1048 return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
1051 static bool pfn_range_valid_gigantic(unsigned long start_pfn,
1052 unsigned long nr_pages)
1054 unsigned long i, end_pfn = start_pfn + nr_pages;
1057 for (i = start_pfn; i < end_pfn; i++) {
1061 page = pfn_to_page(i);
1063 if (PageReserved(page))
1066 if (page_count(page) > 0)
1076 static bool zone_spans_last_pfn(const struct zone *zone,
1077 unsigned long start_pfn, unsigned long nr_pages)
1079 unsigned long last_pfn = start_pfn + nr_pages - 1;
1080 return zone_spans_pfn(zone, last_pfn);
1083 static struct page *alloc_gigantic_page(int nid, unsigned int order)
1085 unsigned long nr_pages = 1 << order;
1086 unsigned long ret, pfn, flags;
1089 z = NODE_DATA(nid)->node_zones;
1090 for (; z - NODE_DATA(nid)->node_zones < MAX_NR_ZONES; z++) {
1091 spin_lock_irqsave(&z->lock, flags);
1093 pfn = ALIGN(z->zone_start_pfn, nr_pages);
1094 while (zone_spans_last_pfn(z, pfn, nr_pages)) {
1095 if (pfn_range_valid_gigantic(pfn, nr_pages)) {
1097 * We release the zone lock here because
1098 * alloc_contig_range() will also lock the zone
1099 * at some point. If there's an allocation
1100 * spinning on this lock, it may win the race
1101 * and cause alloc_contig_range() to fail...
1103 spin_unlock_irqrestore(&z->lock, flags);
1104 ret = __alloc_gigantic_page(pfn, nr_pages);
1106 return pfn_to_page(pfn);
1107 spin_lock_irqsave(&z->lock, flags);
1112 spin_unlock_irqrestore(&z->lock, flags);
1118 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1119 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1121 static struct page *alloc_fresh_gigantic_page_node(struct hstate *h, int nid)
1125 page = alloc_gigantic_page(nid, huge_page_order(h));
1127 prep_compound_gigantic_page(page, huge_page_order(h));
1128 prep_new_huge_page(h, page, nid);
1134 static int alloc_fresh_gigantic_page(struct hstate *h,
1135 nodemask_t *nodes_allowed)
1137 struct page *page = NULL;
1140 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1141 page = alloc_fresh_gigantic_page_node(h, node);
1149 static inline bool gigantic_page_supported(void) { return true; }
1151 static inline bool gigantic_page_supported(void) { return false; }
1152 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1153 static inline void destroy_compound_gigantic_page(struct page *page,
1154 unsigned int order) { }
1155 static inline int alloc_fresh_gigantic_page(struct hstate *h,
1156 nodemask_t *nodes_allowed) { return 0; }
1159 static void update_and_free_page(struct hstate *h, struct page *page)
1162 struct page *subpage = page;
1164 if (hstate_is_gigantic(h) && !gigantic_page_supported())
1168 h->nr_huge_pages_node[page_to_nid(page)]--;
1169 for (i = 0; i < pages_per_huge_page(h);
1170 i++, subpage = mem_map_next(subpage, page, i)) {
1171 subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
1172 1 << PG_referenced | 1 << PG_dirty |
1173 1 << PG_active | 1 << PG_private |
1176 VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1177 set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1178 set_page_refcounted(page);
1179 if (hstate_is_gigantic(h)) {
1180 destroy_compound_gigantic_page(page, huge_page_order(h));
1181 free_gigantic_page(page, huge_page_order(h));
1183 __free_pages(page, huge_page_order(h));
1187 struct hstate *size_to_hstate(unsigned long size)
1191 for_each_hstate(h) {
1192 if (huge_page_size(h) == size)
1199 * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1200 * to hstate->hugepage_activelist.)
1202 * This function can be called for tail pages, but never returns true for them.
1204 bool page_huge_active(struct page *page)
1206 return PageHeadHuge(page) && PagePrivate(&page[1]);
1209 /* never called for tail page */
1210 void set_page_huge_active(struct page *page)
1212 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1213 SetPagePrivate(&page[1]);
1216 static void clear_page_huge_active(struct page *page)
1218 VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1219 ClearPagePrivate(&page[1]);
1222 void free_huge_page(struct page *page)
1225 * Can't pass hstate in here because it is called from the
1226 * compound page destructor.
1228 struct hstate *h = page_hstate(page);
1229 int nid = page_to_nid(page);
1230 struct hugepage_subpool *spool =
1231 (struct hugepage_subpool *)page_private(page);
1232 bool restore_reserve;
1234 set_page_private(page, 0);
1235 page->mapping = NULL;
1236 BUG_ON(page_count(page));
1237 BUG_ON(page_mapcount(page));
1238 restore_reserve = PagePrivate(page);
1239 ClearPagePrivate(page);
1242 * If PagePrivate() was set on page, page allocation consumed a
1243 * reservation. If the page was associated with a subpool, there
1244 * would have been a page reserved in the subpool before allocation
1245 * via hugepage_subpool_get_pages(). Since we are 'restoring' the
1246 * reservtion, do not call hugepage_subpool_put_pages() as this will
1247 * remove the reserved page from the subpool.
1249 if (!restore_reserve) {
1251 * A return code of zero implies that the subpool will be
1252 * under its minimum size if the reservation is not restored
1253 * after page is free. Therefore, force restore_reserve
1256 if (hugepage_subpool_put_pages(spool, 1) == 0)
1257 restore_reserve = true;
1260 spin_lock(&hugetlb_lock);
1261 clear_page_huge_active(page);
1262 hugetlb_cgroup_uncharge_page(hstate_index(h),
1263 pages_per_huge_page(h), page);
1264 if (restore_reserve)
1265 h->resv_huge_pages++;
1267 if (h->surplus_huge_pages_node[nid]) {
1268 /* remove the page from active list */
1269 list_del(&page->lru);
1270 update_and_free_page(h, page);
1271 h->surplus_huge_pages--;
1272 h->surplus_huge_pages_node[nid]--;
1274 arch_clear_hugepage_flags(page);
1275 enqueue_huge_page(h, page);
1277 spin_unlock(&hugetlb_lock);
1280 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1282 INIT_LIST_HEAD(&page->lru);
1283 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1284 spin_lock(&hugetlb_lock);
1285 set_hugetlb_cgroup(page, NULL);
1287 h->nr_huge_pages_node[nid]++;
1288 ClearPageHugeFreed(page);
1289 spin_unlock(&hugetlb_lock);
1290 put_page(page); /* free it into the hugepage allocator */
1293 static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1296 int nr_pages = 1 << order;
1297 struct page *p = page + 1;
1299 /* we rely on prep_new_huge_page to set the destructor */
1300 set_compound_order(page, order);
1301 __SetPageHead(page);
1302 __ClearPageReserved(page);
1303 for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1305 * For gigantic hugepages allocated through bootmem at
1306 * boot, it's safer to be consistent with the not-gigantic
1307 * hugepages and clear the PG_reserved bit from all tail pages
1308 * too. Otherwse drivers using get_user_pages() to access tail
1309 * pages may get the reference counting wrong if they see
1310 * PG_reserved set on a tail page (despite the head page not
1311 * having PG_reserved set). Enforcing this consistency between
1312 * head and tail pages allows drivers to optimize away a check
1313 * on the head page when they need know if put_page() is needed
1314 * after get_user_pages().
1316 __ClearPageReserved(p);
1317 set_page_count(p, 0);
1318 set_compound_head(p, page);
1323 * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1324 * transparent huge pages. See the PageTransHuge() documentation for more
1327 int PageHuge(struct page *page)
1329 if (!PageCompound(page))
1332 page = compound_head(page);
1333 return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1335 EXPORT_SYMBOL_GPL(PageHuge);
1338 * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1339 * normal or transparent huge pages.
1341 int PageHeadHuge(struct page *page_head)
1343 if (!PageHead(page_head))
1346 return get_compound_page_dtor(page_head) == free_huge_page;
1349 pgoff_t __basepage_index(struct page *page)
1351 struct page *page_head = compound_head(page);
1352 pgoff_t index = page_index(page_head);
1353 unsigned long compound_idx;
1355 if (!PageHuge(page_head))
1356 return page_index(page);
1358 if (compound_order(page_head) >= MAX_ORDER)
1359 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1361 compound_idx = page - page_head;
1363 return (index << compound_order(page_head)) + compound_idx;
1366 static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid)
1370 page = __alloc_pages_node(nid,
1371 htlb_alloc_mask(h)|__GFP_COMP|__GFP_THISNODE|
1372 __GFP_REPEAT|__GFP_NOWARN,
1373 huge_page_order(h));
1375 prep_new_huge_page(h, page, nid);
1381 static int alloc_fresh_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1387 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1388 page = alloc_fresh_huge_page_node(h, node);
1396 count_vm_event(HTLB_BUDDY_PGALLOC);
1398 count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1404 * Free huge page from pool from next node to free.
1405 * Attempt to keep persistent huge pages more or less
1406 * balanced over allowed nodes.
1407 * Called with hugetlb_lock locked.
1409 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1415 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1417 * If we're returning unused surplus pages, only examine
1418 * nodes with surplus pages.
1420 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1421 !list_empty(&h->hugepage_freelists[node])) {
1423 list_entry(h->hugepage_freelists[node].next,
1425 list_del(&page->lru);
1426 h->free_huge_pages--;
1427 h->free_huge_pages_node[node]--;
1429 h->surplus_huge_pages--;
1430 h->surplus_huge_pages_node[node]--;
1432 update_and_free_page(h, page);
1442 * Dissolve a given free hugepage into free buddy pages. This function does
1443 * nothing for in-use (including surplus) hugepages.
1445 static void dissolve_free_huge_page(struct page *page)
1448 spin_lock(&hugetlb_lock);
1449 if (PageHuge(page) && !page_count(page)) {
1450 struct page *head = compound_head(page);
1451 struct hstate *h = page_hstate(head);
1452 int nid = page_to_nid(head);
1455 * We should make sure that the page is already on the free list
1456 * when it is dissolved.
1458 if (unlikely(!PageHugeFreed(head))) {
1459 spin_unlock(&hugetlb_lock);
1463 * Theoretically, we should return -EBUSY when we
1464 * encounter this race. In fact, we have a chance
1465 * to successfully dissolve the page if we do a
1466 * retry. Because the race window is quite small.
1467 * If we seize this opportunity, it is an optimization
1468 * for increasing the success rate of dissolving page.
1473 list_del(&head->lru);
1474 h->free_huge_pages--;
1475 h->free_huge_pages_node[nid]--;
1476 update_and_free_page(h, head);
1478 spin_unlock(&hugetlb_lock);
1482 * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1483 * make specified memory blocks removable from the system.
1484 * Note that this will dissolve a free gigantic hugepage completely, if any
1485 * part of it lies within the given range.
1487 void dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1491 if (!hugepages_supported())
1494 for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order)
1495 dissolve_free_huge_page(pfn_to_page(pfn));
1499 * There are 3 ways this can get called:
1500 * 1. With vma+addr: we use the VMA's memory policy
1501 * 2. With !vma, but nid=NUMA_NO_NODE: We try to allocate a huge
1502 * page from any node, and let the buddy allocator itself figure
1504 * 3. With !vma, but nid!=NUMA_NO_NODE. We allocate a huge page
1505 * strictly from 'nid'
1507 static struct page *__hugetlb_alloc_buddy_huge_page(struct hstate *h,
1508 struct vm_area_struct *vma, unsigned long addr, int nid)
1510 int order = huge_page_order(h);
1511 gfp_t gfp = htlb_alloc_mask(h)|__GFP_COMP|__GFP_REPEAT|__GFP_NOWARN;
1512 unsigned int cpuset_mems_cookie;
1515 * We need a VMA to get a memory policy. If we do not
1516 * have one, we use the 'nid' argument.
1518 * The mempolicy stuff below has some non-inlined bits
1519 * and calls ->vm_ops. That makes it hard to optimize at
1520 * compile-time, even when NUMA is off and it does
1521 * nothing. This helps the compiler optimize it out.
1523 if (!IS_ENABLED(CONFIG_NUMA) || !vma) {
1525 * If a specific node is requested, make sure to
1526 * get memory from there, but only when a node
1527 * is explicitly specified.
1529 if (nid != NUMA_NO_NODE)
1530 gfp |= __GFP_THISNODE;
1532 * Make sure to call something that can handle
1535 return alloc_pages_node(nid, gfp, order);
1539 * OK, so we have a VMA. Fetch the mempolicy and try to
1540 * allocate a huge page with it. We will only reach this
1541 * when CONFIG_NUMA=y.
1545 struct mempolicy *mpol;
1546 struct zonelist *zl;
1547 nodemask_t *nodemask;
1549 cpuset_mems_cookie = read_mems_allowed_begin();
1550 zl = huge_zonelist(vma, addr, gfp, &mpol, &nodemask);
1551 mpol_cond_put(mpol);
1552 page = __alloc_pages_nodemask(gfp, order, zl, nodemask);
1555 } while (read_mems_allowed_retry(cpuset_mems_cookie));
1561 * There are two ways to allocate a huge page:
1562 * 1. When you have a VMA and an address (like a fault)
1563 * 2. When you have no VMA (like when setting /proc/.../nr_hugepages)
1565 * 'vma' and 'addr' are only for (1). 'nid' is always NUMA_NO_NODE in
1566 * this case which signifies that the allocation should be done with
1567 * respect for the VMA's memory policy.
1569 * For (2), we ignore 'vma' and 'addr' and use 'nid' exclusively. This
1570 * implies that memory policies will not be taken in to account.
1572 static struct page *__alloc_buddy_huge_page(struct hstate *h,
1573 struct vm_area_struct *vma, unsigned long addr, int nid)
1578 if (hstate_is_gigantic(h))
1582 * Make sure that anyone specifying 'nid' is not also specifying a VMA.
1583 * This makes sure the caller is picking _one_ of the modes with which
1584 * we can call this function, not both.
1586 if (vma || (addr != -1)) {
1587 VM_WARN_ON_ONCE(addr == -1);
1588 VM_WARN_ON_ONCE(nid != NUMA_NO_NODE);
1591 * Assume we will successfully allocate the surplus page to
1592 * prevent racing processes from causing the surplus to exceed
1595 * This however introduces a different race, where a process B
1596 * tries to grow the static hugepage pool while alloc_pages() is
1597 * called by process A. B will only examine the per-node
1598 * counters in determining if surplus huge pages can be
1599 * converted to normal huge pages in adjust_pool_surplus(). A
1600 * won't be able to increment the per-node counter, until the
1601 * lock is dropped by B, but B doesn't drop hugetlb_lock until
1602 * no more huge pages can be converted from surplus to normal
1603 * state (and doesn't try to convert again). Thus, we have a
1604 * case where a surplus huge page exists, the pool is grown, and
1605 * the surplus huge page still exists after, even though it
1606 * should just have been converted to a normal huge page. This
1607 * does not leak memory, though, as the hugepage will be freed
1608 * once it is out of use. It also does not allow the counters to
1609 * go out of whack in adjust_pool_surplus() as we don't modify
1610 * the node values until we've gotten the hugepage and only the
1611 * per-node value is checked there.
1613 spin_lock(&hugetlb_lock);
1614 if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1615 spin_unlock(&hugetlb_lock);
1619 h->surplus_huge_pages++;
1621 spin_unlock(&hugetlb_lock);
1623 page = __hugetlb_alloc_buddy_huge_page(h, vma, addr, nid);
1625 spin_lock(&hugetlb_lock);
1627 INIT_LIST_HEAD(&page->lru);
1628 r_nid = page_to_nid(page);
1629 set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1630 set_hugetlb_cgroup(page, NULL);
1632 * We incremented the global counters already
1634 h->nr_huge_pages_node[r_nid]++;
1635 h->surplus_huge_pages_node[r_nid]++;
1636 __count_vm_event(HTLB_BUDDY_PGALLOC);
1639 h->surplus_huge_pages--;
1640 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1642 spin_unlock(&hugetlb_lock);
1648 * Allocate a huge page from 'nid'. Note, 'nid' may be
1649 * NUMA_NO_NODE, which means that it may be allocated
1653 struct page *__alloc_buddy_huge_page_no_mpol(struct hstate *h, int nid)
1655 unsigned long addr = -1;
1657 return __alloc_buddy_huge_page(h, NULL, addr, nid);
1661 * Use the VMA's mpolicy to allocate a huge page from the buddy.
1664 struct page *__alloc_buddy_huge_page_with_mpol(struct hstate *h,
1665 struct vm_area_struct *vma, unsigned long addr)
1667 return __alloc_buddy_huge_page(h, vma, addr, NUMA_NO_NODE);
1671 * This allocation function is useful in the context where vma is irrelevant.
1672 * E.g. soft-offlining uses this function because it only cares physical
1673 * address of error page.
1675 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1677 struct page *page = NULL;
1679 spin_lock(&hugetlb_lock);
1680 if (h->free_huge_pages - h->resv_huge_pages > 0)
1681 page = dequeue_huge_page_node(h, nid);
1682 spin_unlock(&hugetlb_lock);
1685 page = __alloc_buddy_huge_page_no_mpol(h, nid);
1691 * Increase the hugetlb pool such that it can accommodate a reservation
1694 static int gather_surplus_pages(struct hstate *h, int delta)
1696 struct list_head surplus_list;
1697 struct page *page, *tmp;
1699 int needed, allocated;
1700 bool alloc_ok = true;
1702 needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1704 h->resv_huge_pages += delta;
1709 INIT_LIST_HEAD(&surplus_list);
1713 spin_unlock(&hugetlb_lock);
1714 for (i = 0; i < needed; i++) {
1715 page = __alloc_buddy_huge_page_no_mpol(h, NUMA_NO_NODE);
1720 list_add(&page->lru, &surplus_list);
1725 * After retaking hugetlb_lock, we need to recalculate 'needed'
1726 * because either resv_huge_pages or free_huge_pages may have changed.
1728 spin_lock(&hugetlb_lock);
1729 needed = (h->resv_huge_pages + delta) -
1730 (h->free_huge_pages + allocated);
1735 * We were not able to allocate enough pages to
1736 * satisfy the entire reservation so we free what
1737 * we've allocated so far.
1742 * The surplus_list now contains _at_least_ the number of extra pages
1743 * needed to accommodate the reservation. Add the appropriate number
1744 * of pages to the hugetlb pool and free the extras back to the buddy
1745 * allocator. Commit the entire reservation here to prevent another
1746 * process from stealing the pages as they are added to the pool but
1747 * before they are reserved.
1749 needed += allocated;
1750 h->resv_huge_pages += delta;
1753 /* Free the needed pages to the hugetlb pool */
1754 list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1758 * This page is now managed by the hugetlb allocator and has
1759 * no users -- drop the buddy allocator's reference.
1761 put_page_testzero(page);
1762 VM_BUG_ON_PAGE(page_count(page), page);
1763 enqueue_huge_page(h, page);
1766 spin_unlock(&hugetlb_lock);
1768 /* Free unnecessary surplus pages to the buddy allocator */
1769 list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1771 spin_lock(&hugetlb_lock);
1777 * This routine has two main purposes:
1778 * 1) Decrement the reservation count (resv_huge_pages) by the value passed
1779 * in unused_resv_pages. This corresponds to the prior adjustments made
1780 * to the associated reservation map.
1781 * 2) Free any unused surplus pages that may have been allocated to satisfy
1782 * the reservation. As many as unused_resv_pages may be freed.
1784 * Called with hugetlb_lock held. However, the lock could be dropped (and
1785 * reacquired) during calls to cond_resched_lock. Whenever dropping the lock,
1786 * we must make sure nobody else can claim pages we are in the process of
1787 * freeing. Do this by ensuring resv_huge_page always is greater than the
1788 * number of huge pages we plan to free when dropping the lock.
1790 static void return_unused_surplus_pages(struct hstate *h,
1791 unsigned long unused_resv_pages)
1793 unsigned long nr_pages;
1795 /* Cannot return gigantic pages currently */
1796 if (hstate_is_gigantic(h))
1800 * Part (or even all) of the reservation could have been backed
1801 * by pre-allocated pages. Only free surplus pages.
1803 nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1806 * We want to release as many surplus pages as possible, spread
1807 * evenly across all nodes with memory. Iterate across these nodes
1808 * until we can no longer free unreserved surplus pages. This occurs
1809 * when the nodes with surplus pages have no free pages.
1810 * free_pool_huge_page() will balance the the freed pages across the
1811 * on-line nodes with memory and will handle the hstate accounting.
1813 * Note that we decrement resv_huge_pages as we free the pages. If
1814 * we drop the lock, resv_huge_pages will still be sufficiently large
1815 * to cover subsequent pages we may free.
1817 while (nr_pages--) {
1818 h->resv_huge_pages--;
1819 unused_resv_pages--;
1820 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1822 cond_resched_lock(&hugetlb_lock);
1826 /* Fully uncommit the reservation */
1827 h->resv_huge_pages -= unused_resv_pages;
1832 * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
1833 * are used by the huge page allocation routines to manage reservations.
1835 * vma_needs_reservation is called to determine if the huge page at addr
1836 * within the vma has an associated reservation. If a reservation is
1837 * needed, the value 1 is returned. The caller is then responsible for
1838 * managing the global reservation and subpool usage counts. After
1839 * the huge page has been allocated, vma_commit_reservation is called
1840 * to add the page to the reservation map. If the page allocation fails,
1841 * the reservation must be ended instead of committed. vma_end_reservation
1842 * is called in such cases.
1844 * In the normal case, vma_commit_reservation returns the same value
1845 * as the preceding vma_needs_reservation call. The only time this
1846 * is not the case is if a reserve map was changed between calls. It
1847 * is the responsibility of the caller to notice the difference and
1848 * take appropriate action.
1850 enum vma_resv_mode {
1855 static long __vma_reservation_common(struct hstate *h,
1856 struct vm_area_struct *vma, unsigned long addr,
1857 enum vma_resv_mode mode)
1859 struct resv_map *resv;
1863 resv = vma_resv_map(vma);
1867 idx = vma_hugecache_offset(h, vma, addr);
1869 case VMA_NEEDS_RESV:
1870 ret = region_chg(resv, idx, idx + 1);
1872 case VMA_COMMIT_RESV:
1873 ret = region_add(resv, idx, idx + 1);
1876 region_abort(resv, idx, idx + 1);
1883 if (vma->vm_flags & VM_MAYSHARE)
1886 return ret < 0 ? ret : 0;
1889 static long vma_needs_reservation(struct hstate *h,
1890 struct vm_area_struct *vma, unsigned long addr)
1892 return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1895 static long vma_commit_reservation(struct hstate *h,
1896 struct vm_area_struct *vma, unsigned long addr)
1898 return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1901 static void vma_end_reservation(struct hstate *h,
1902 struct vm_area_struct *vma, unsigned long addr)
1904 (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
1907 struct page *alloc_huge_page(struct vm_area_struct *vma,
1908 unsigned long addr, int avoid_reserve)
1910 struct hugepage_subpool *spool = subpool_vma(vma);
1911 struct hstate *h = hstate_vma(vma);
1913 long map_chg, map_commit;
1916 struct hugetlb_cgroup *h_cg;
1918 idx = hstate_index(h);
1920 * Examine the region/reserve map to determine if the process
1921 * has a reservation for the page to be allocated. A return
1922 * code of zero indicates a reservation exists (no change).
1924 map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
1926 return ERR_PTR(-ENOMEM);
1929 * Processes that did not create the mapping will have no
1930 * reserves as indicated by the region/reserve map. Check
1931 * that the allocation will not exceed the subpool limit.
1932 * Allocations for MAP_NORESERVE mappings also need to be
1933 * checked against any subpool limit.
1935 if (map_chg || avoid_reserve) {
1936 gbl_chg = hugepage_subpool_get_pages(spool, 1);
1938 vma_end_reservation(h, vma, addr);
1939 return ERR_PTR(-ENOSPC);
1943 * Even though there was no reservation in the region/reserve
1944 * map, there could be reservations associated with the
1945 * subpool that can be used. This would be indicated if the
1946 * return value of hugepage_subpool_get_pages() is zero.
1947 * However, if avoid_reserve is specified we still avoid even
1948 * the subpool reservations.
1954 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
1956 goto out_subpool_put;
1958 spin_lock(&hugetlb_lock);
1960 * glb_chg is passed to indicate whether or not a page must be taken
1961 * from the global free pool (global change). gbl_chg == 0 indicates
1962 * a reservation exists for the allocation.
1964 page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
1966 spin_unlock(&hugetlb_lock);
1967 page = __alloc_buddy_huge_page_with_mpol(h, vma, addr);
1969 goto out_uncharge_cgroup;
1970 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
1971 SetPagePrivate(page);
1972 h->resv_huge_pages--;
1974 spin_lock(&hugetlb_lock);
1975 list_move(&page->lru, &h->hugepage_activelist);
1978 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
1979 spin_unlock(&hugetlb_lock);
1981 set_page_private(page, (unsigned long)spool);
1983 map_commit = vma_commit_reservation(h, vma, addr);
1984 if (unlikely(map_chg > map_commit)) {
1986 * The page was added to the reservation map between
1987 * vma_needs_reservation and vma_commit_reservation.
1988 * This indicates a race with hugetlb_reserve_pages.
1989 * Adjust for the subpool count incremented above AND
1990 * in hugetlb_reserve_pages for the same page. Also,
1991 * the reservation count added in hugetlb_reserve_pages
1992 * no longer applies.
1996 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
1997 hugetlb_acct_memory(h, -rsv_adjust);
2001 out_uncharge_cgroup:
2002 hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2004 if (map_chg || avoid_reserve)
2005 hugepage_subpool_put_pages(spool, 1);
2006 vma_end_reservation(h, vma, addr);
2007 return ERR_PTR(-ENOSPC);
2011 * alloc_huge_page()'s wrapper which simply returns the page if allocation
2012 * succeeds, otherwise NULL. This function is called from new_vma_page(),
2013 * where no ERR_VALUE is expected to be returned.
2015 struct page *alloc_huge_page_noerr(struct vm_area_struct *vma,
2016 unsigned long addr, int avoid_reserve)
2018 struct page *page = alloc_huge_page(vma, addr, avoid_reserve);
2024 int __weak alloc_bootmem_huge_page(struct hstate *h)
2026 struct huge_bootmem_page *m;
2029 for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2032 addr = memblock_virt_alloc_try_nid_nopanic(
2033 huge_page_size(h), huge_page_size(h),
2034 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
2037 * Use the beginning of the huge page to store the
2038 * huge_bootmem_page struct (until gather_bootmem
2039 * puts them into the mem_map).
2048 BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2049 /* Put them into a private list first because mem_map is not up yet */
2050 list_add(&m->list, &huge_boot_pages);
2055 static void __init prep_compound_huge_page(struct page *page,
2058 if (unlikely(order > (MAX_ORDER - 1)))
2059 prep_compound_gigantic_page(page, order);
2061 prep_compound_page(page, order);
2064 /* Put bootmem huge pages into the standard lists after mem_map is up */
2065 static void __init gather_bootmem_prealloc(void)
2067 struct huge_bootmem_page *m;
2069 list_for_each_entry(m, &huge_boot_pages, list) {
2070 struct hstate *h = m->hstate;
2073 #ifdef CONFIG_HIGHMEM
2074 page = pfn_to_page(m->phys >> PAGE_SHIFT);
2075 memblock_free_late(__pa(m),
2076 sizeof(struct huge_bootmem_page));
2078 page = virt_to_page(m);
2080 WARN_ON(page_count(page) != 1);
2081 prep_compound_huge_page(page, h->order);
2082 WARN_ON(PageReserved(page));
2083 prep_new_huge_page(h, page, page_to_nid(page));
2085 * If we had gigantic hugepages allocated at boot time, we need
2086 * to restore the 'stolen' pages to totalram_pages in order to
2087 * fix confusing memory reports from free(1) and another
2088 * side-effects, like CommitLimit going negative.
2090 if (hstate_is_gigantic(h))
2091 adjust_managed_page_count(page, 1 << h->order);
2096 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2100 for (i = 0; i < h->max_huge_pages; ++i) {
2101 if (hstate_is_gigantic(h)) {
2102 if (!alloc_bootmem_huge_page(h))
2104 } else if (!alloc_fresh_huge_page(h,
2105 &node_states[N_MEMORY]))
2108 h->max_huge_pages = i;
2111 static void __init hugetlb_init_hstates(void)
2115 for_each_hstate(h) {
2116 if (minimum_order > huge_page_order(h))
2117 minimum_order = huge_page_order(h);
2119 /* oversize hugepages were init'ed in early boot */
2120 if (!hstate_is_gigantic(h))
2121 hugetlb_hstate_alloc_pages(h);
2123 VM_BUG_ON(minimum_order == UINT_MAX);
2126 static char * __init memfmt(char *buf, unsigned long n)
2128 if (n >= (1UL << 30))
2129 sprintf(buf, "%lu GB", n >> 30);
2130 else if (n >= (1UL << 20))
2131 sprintf(buf, "%lu MB", n >> 20);
2133 sprintf(buf, "%lu KB", n >> 10);
2137 static void __init report_hugepages(void)
2141 for_each_hstate(h) {
2143 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2144 memfmt(buf, huge_page_size(h)),
2145 h->free_huge_pages);
2149 #ifdef CONFIG_HIGHMEM
2150 static void try_to_free_low(struct hstate *h, unsigned long count,
2151 nodemask_t *nodes_allowed)
2155 if (hstate_is_gigantic(h))
2158 for_each_node_mask(i, *nodes_allowed) {
2159 struct page *page, *next;
2160 struct list_head *freel = &h->hugepage_freelists[i];
2161 list_for_each_entry_safe(page, next, freel, lru) {
2162 if (count >= h->nr_huge_pages)
2164 if (PageHighMem(page))
2166 list_del(&page->lru);
2167 update_and_free_page(h, page);
2168 h->free_huge_pages--;
2169 h->free_huge_pages_node[page_to_nid(page)]--;
2174 static inline void try_to_free_low(struct hstate *h, unsigned long count,
2175 nodemask_t *nodes_allowed)
2181 * Increment or decrement surplus_huge_pages. Keep node-specific counters
2182 * balanced by operating on them in a round-robin fashion.
2183 * Returns 1 if an adjustment was made.
2185 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2190 VM_BUG_ON(delta != -1 && delta != 1);
2193 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2194 if (h->surplus_huge_pages_node[node])
2198 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2199 if (h->surplus_huge_pages_node[node] <
2200 h->nr_huge_pages_node[node])
2207 h->surplus_huge_pages += delta;
2208 h->surplus_huge_pages_node[node] += delta;
2212 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2213 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
2214 nodemask_t *nodes_allowed)
2216 unsigned long min_count, ret;
2218 if (hstate_is_gigantic(h) && !gigantic_page_supported())
2219 return h->max_huge_pages;
2222 * Increase the pool size
2223 * First take pages out of surplus state. Then make up the
2224 * remaining difference by allocating fresh huge pages.
2226 * We might race with __alloc_buddy_huge_page() here and be unable
2227 * to convert a surplus huge page to a normal huge page. That is
2228 * not critical, though, it just means the overall size of the
2229 * pool might be one hugepage larger than it needs to be, but
2230 * within all the constraints specified by the sysctls.
2232 spin_lock(&hugetlb_lock);
2233 while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2234 if (!adjust_pool_surplus(h, nodes_allowed, -1))
2238 while (count > persistent_huge_pages(h)) {
2240 * If this allocation races such that we no longer need the
2241 * page, free_huge_page will handle it by freeing the page
2242 * and reducing the surplus.
2244 spin_unlock(&hugetlb_lock);
2246 /* yield cpu to avoid soft lockup */
2249 if (hstate_is_gigantic(h))
2250 ret = alloc_fresh_gigantic_page(h, nodes_allowed);
2252 ret = alloc_fresh_huge_page(h, nodes_allowed);
2253 spin_lock(&hugetlb_lock);
2257 /* Bail for signals. Probably ctrl-c from user */
2258 if (signal_pending(current))
2263 * Decrease the pool size
2264 * First return free pages to the buddy allocator (being careful
2265 * to keep enough around to satisfy reservations). Then place
2266 * pages into surplus state as needed so the pool will shrink
2267 * to the desired size as pages become free.
2269 * By placing pages into the surplus state independent of the
2270 * overcommit value, we are allowing the surplus pool size to
2271 * exceed overcommit. There are few sane options here. Since
2272 * __alloc_buddy_huge_page() is checking the global counter,
2273 * though, we'll note that we're not allowed to exceed surplus
2274 * and won't grow the pool anywhere else. Not until one of the
2275 * sysctls are changed, or the surplus pages go out of use.
2277 min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2278 min_count = max(count, min_count);
2279 try_to_free_low(h, min_count, nodes_allowed);
2280 while (min_count < persistent_huge_pages(h)) {
2281 if (!free_pool_huge_page(h, nodes_allowed, 0))
2283 cond_resched_lock(&hugetlb_lock);
2285 while (count < persistent_huge_pages(h)) {
2286 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2290 ret = persistent_huge_pages(h);
2291 spin_unlock(&hugetlb_lock);
2295 #define HSTATE_ATTR_RO(_name) \
2296 static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2298 #define HSTATE_ATTR(_name) \
2299 static struct kobj_attribute _name##_attr = \
2300 __ATTR(_name, 0644, _name##_show, _name##_store)
2302 static struct kobject *hugepages_kobj;
2303 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2305 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2307 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2311 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2312 if (hstate_kobjs[i] == kobj) {
2314 *nidp = NUMA_NO_NODE;
2318 return kobj_to_node_hstate(kobj, nidp);
2321 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2322 struct kobj_attribute *attr, char *buf)
2325 unsigned long nr_huge_pages;
2328 h = kobj_to_hstate(kobj, &nid);
2329 if (nid == NUMA_NO_NODE)
2330 nr_huge_pages = h->nr_huge_pages;
2332 nr_huge_pages = h->nr_huge_pages_node[nid];
2334 return sprintf(buf, "%lu\n", nr_huge_pages);
2337 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2338 struct hstate *h, int nid,
2339 unsigned long count, size_t len)
2342 NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2344 if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2349 if (nid == NUMA_NO_NODE) {
2351 * global hstate attribute
2353 if (!(obey_mempolicy &&
2354 init_nodemask_of_mempolicy(nodes_allowed))) {
2355 NODEMASK_FREE(nodes_allowed);
2356 nodes_allowed = &node_states[N_MEMORY];
2358 } else if (nodes_allowed) {
2360 * per node hstate attribute: adjust count to global,
2361 * but restrict alloc/free to the specified node.
2363 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2364 init_nodemask_of_node(nodes_allowed, nid);
2366 nodes_allowed = &node_states[N_MEMORY];
2368 h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2370 if (nodes_allowed != &node_states[N_MEMORY])
2371 NODEMASK_FREE(nodes_allowed);
2375 NODEMASK_FREE(nodes_allowed);
2379 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2380 struct kobject *kobj, const char *buf,
2384 unsigned long count;
2388 err = kstrtoul(buf, 10, &count);
2392 h = kobj_to_hstate(kobj, &nid);
2393 return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2396 static ssize_t nr_hugepages_show(struct kobject *kobj,
2397 struct kobj_attribute *attr, char *buf)
2399 return nr_hugepages_show_common(kobj, attr, buf);
2402 static ssize_t nr_hugepages_store(struct kobject *kobj,
2403 struct kobj_attribute *attr, const char *buf, size_t len)
2405 return nr_hugepages_store_common(false, kobj, buf, len);
2407 HSTATE_ATTR(nr_hugepages);
2412 * hstate attribute for optionally mempolicy-based constraint on persistent
2413 * huge page alloc/free.
2415 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2416 struct kobj_attribute *attr, char *buf)
2418 return nr_hugepages_show_common(kobj, attr, buf);
2421 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2422 struct kobj_attribute *attr, const char *buf, size_t len)
2424 return nr_hugepages_store_common(true, kobj, buf, len);
2426 HSTATE_ATTR(nr_hugepages_mempolicy);
2430 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2431 struct kobj_attribute *attr, char *buf)
2433 struct hstate *h = kobj_to_hstate(kobj, NULL);
2434 return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2437 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2438 struct kobj_attribute *attr, const char *buf, size_t count)
2441 unsigned long input;
2442 struct hstate *h = kobj_to_hstate(kobj, NULL);
2444 if (hstate_is_gigantic(h))
2447 err = kstrtoul(buf, 10, &input);
2451 spin_lock(&hugetlb_lock);
2452 h->nr_overcommit_huge_pages = input;
2453 spin_unlock(&hugetlb_lock);
2457 HSTATE_ATTR(nr_overcommit_hugepages);
2459 static ssize_t free_hugepages_show(struct kobject *kobj,
2460 struct kobj_attribute *attr, char *buf)
2463 unsigned long free_huge_pages;
2466 h = kobj_to_hstate(kobj, &nid);
2467 if (nid == NUMA_NO_NODE)
2468 free_huge_pages = h->free_huge_pages;
2470 free_huge_pages = h->free_huge_pages_node[nid];
2472 return sprintf(buf, "%lu\n", free_huge_pages);
2474 HSTATE_ATTR_RO(free_hugepages);
2476 static ssize_t resv_hugepages_show(struct kobject *kobj,
2477 struct kobj_attribute *attr, char *buf)
2479 struct hstate *h = kobj_to_hstate(kobj, NULL);
2480 return sprintf(buf, "%lu\n", h->resv_huge_pages);
2482 HSTATE_ATTR_RO(resv_hugepages);
2484 static ssize_t surplus_hugepages_show(struct kobject *kobj,
2485 struct kobj_attribute *attr, char *buf)
2488 unsigned long surplus_huge_pages;
2491 h = kobj_to_hstate(kobj, &nid);
2492 if (nid == NUMA_NO_NODE)
2493 surplus_huge_pages = h->surplus_huge_pages;
2495 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2497 return sprintf(buf, "%lu\n", surplus_huge_pages);
2499 HSTATE_ATTR_RO(surplus_hugepages);
2501 static struct attribute *hstate_attrs[] = {
2502 &nr_hugepages_attr.attr,
2503 &nr_overcommit_hugepages_attr.attr,
2504 &free_hugepages_attr.attr,
2505 &resv_hugepages_attr.attr,
2506 &surplus_hugepages_attr.attr,
2508 &nr_hugepages_mempolicy_attr.attr,
2513 static struct attribute_group hstate_attr_group = {
2514 .attrs = hstate_attrs,
2517 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2518 struct kobject **hstate_kobjs,
2519 struct attribute_group *hstate_attr_group)
2522 int hi = hstate_index(h);
2524 hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2525 if (!hstate_kobjs[hi])
2528 retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2530 kobject_put(hstate_kobjs[hi]);
2531 hstate_kobjs[hi] = NULL;
2537 static void __init hugetlb_sysfs_init(void)
2542 hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2543 if (!hugepages_kobj)
2546 for_each_hstate(h) {
2547 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2548 hstate_kobjs, &hstate_attr_group);
2550 pr_err("Hugetlb: Unable to add hstate %s", h->name);
2557 * node_hstate/s - associate per node hstate attributes, via their kobjects,
2558 * with node devices in node_devices[] using a parallel array. The array
2559 * index of a node device or _hstate == node id.
2560 * This is here to avoid any static dependency of the node device driver, in
2561 * the base kernel, on the hugetlb module.
2563 struct node_hstate {
2564 struct kobject *hugepages_kobj;
2565 struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2567 static struct node_hstate node_hstates[MAX_NUMNODES];
2570 * A subset of global hstate attributes for node devices
2572 static struct attribute *per_node_hstate_attrs[] = {
2573 &nr_hugepages_attr.attr,
2574 &free_hugepages_attr.attr,
2575 &surplus_hugepages_attr.attr,
2579 static struct attribute_group per_node_hstate_attr_group = {
2580 .attrs = per_node_hstate_attrs,
2584 * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2585 * Returns node id via non-NULL nidp.
2587 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2591 for (nid = 0; nid < nr_node_ids; nid++) {
2592 struct node_hstate *nhs = &node_hstates[nid];
2594 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2595 if (nhs->hstate_kobjs[i] == kobj) {
2607 * Unregister hstate attributes from a single node device.
2608 * No-op if no hstate attributes attached.
2610 static void hugetlb_unregister_node(struct node *node)
2613 struct node_hstate *nhs = &node_hstates[node->dev.id];
2615 if (!nhs->hugepages_kobj)
2616 return; /* no hstate attributes */
2618 for_each_hstate(h) {
2619 int idx = hstate_index(h);
2620 if (nhs->hstate_kobjs[idx]) {
2621 kobject_put(nhs->hstate_kobjs[idx]);
2622 nhs->hstate_kobjs[idx] = NULL;
2626 kobject_put(nhs->hugepages_kobj);
2627 nhs->hugepages_kobj = NULL;
2631 * hugetlb module exit: unregister hstate attributes from node devices
2634 static void hugetlb_unregister_all_nodes(void)
2639 * disable node device registrations.
2641 register_hugetlbfs_with_node(NULL, NULL);
2644 * remove hstate attributes from any nodes that have them.
2646 for (nid = 0; nid < nr_node_ids; nid++)
2647 hugetlb_unregister_node(node_devices[nid]);
2651 * Register hstate attributes for a single node device.
2652 * No-op if attributes already registered.
2654 static void hugetlb_register_node(struct node *node)
2657 struct node_hstate *nhs = &node_hstates[node->dev.id];
2660 if (nhs->hugepages_kobj)
2661 return; /* already allocated */
2663 nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2665 if (!nhs->hugepages_kobj)
2668 for_each_hstate(h) {
2669 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2671 &per_node_hstate_attr_group);
2673 pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2674 h->name, node->dev.id);
2675 hugetlb_unregister_node(node);
2682 * hugetlb init time: register hstate attributes for all registered node
2683 * devices of nodes that have memory. All on-line nodes should have
2684 * registered their associated device by this time.
2686 static void __init hugetlb_register_all_nodes(void)
2690 for_each_node_state(nid, N_MEMORY) {
2691 struct node *node = node_devices[nid];
2692 if (node->dev.id == nid)
2693 hugetlb_register_node(node);
2697 * Let the node device driver know we're here so it can
2698 * [un]register hstate attributes on node hotplug.
2700 register_hugetlbfs_with_node(hugetlb_register_node,
2701 hugetlb_unregister_node);
2703 #else /* !CONFIG_NUMA */
2705 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2713 static void hugetlb_unregister_all_nodes(void) { }
2715 static void hugetlb_register_all_nodes(void) { }
2719 static void __exit hugetlb_exit(void)
2723 hugetlb_unregister_all_nodes();
2725 for_each_hstate(h) {
2726 kobject_put(hstate_kobjs[hstate_index(h)]);
2729 kobject_put(hugepages_kobj);
2730 kfree(hugetlb_fault_mutex_table);
2732 module_exit(hugetlb_exit);
2734 static int __init hugetlb_init(void)
2738 if (!hugepages_supported())
2741 if (!size_to_hstate(default_hstate_size)) {
2742 default_hstate_size = HPAGE_SIZE;
2743 if (!size_to_hstate(default_hstate_size))
2744 hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2746 default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2747 if (default_hstate_max_huge_pages)
2748 default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2750 hugetlb_init_hstates();
2751 gather_bootmem_prealloc();
2754 hugetlb_sysfs_init();
2755 hugetlb_register_all_nodes();
2756 hugetlb_cgroup_file_init();
2759 num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2761 num_fault_mutexes = 1;
2763 hugetlb_fault_mutex_table =
2764 kmalloc(sizeof(struct mutex) * num_fault_mutexes, GFP_KERNEL);
2765 BUG_ON(!hugetlb_fault_mutex_table);
2767 for (i = 0; i < num_fault_mutexes; i++)
2768 mutex_init(&hugetlb_fault_mutex_table[i]);
2771 module_init(hugetlb_init);
2773 /* Should be called on processing a hugepagesz=... option */
2774 void __init hugetlb_add_hstate(unsigned int order)
2779 if (size_to_hstate(PAGE_SIZE << order)) {
2780 pr_warning("hugepagesz= specified twice, ignoring\n");
2783 BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2785 h = &hstates[hugetlb_max_hstate++];
2787 h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2788 h->nr_huge_pages = 0;
2789 h->free_huge_pages = 0;
2790 for (i = 0; i < MAX_NUMNODES; ++i)
2791 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2792 INIT_LIST_HEAD(&h->hugepage_activelist);
2793 h->next_nid_to_alloc = first_node(node_states[N_MEMORY]);
2794 h->next_nid_to_free = first_node(node_states[N_MEMORY]);
2795 snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2796 huge_page_size(h)/1024);
2801 static int __init hugetlb_nrpages_setup(char *s)
2804 static unsigned long *last_mhp;
2807 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2808 * so this hugepages= parameter goes to the "default hstate".
2810 if (!hugetlb_max_hstate)
2811 mhp = &default_hstate_max_huge_pages;
2813 mhp = &parsed_hstate->max_huge_pages;
2815 if (mhp == last_mhp) {
2816 pr_warning("hugepages= specified twice without "
2817 "interleaving hugepagesz=, ignoring\n");
2821 if (sscanf(s, "%lu", mhp) <= 0)
2825 * Global state is always initialized later in hugetlb_init.
2826 * But we need to allocate >= MAX_ORDER hstates here early to still
2827 * use the bootmem allocator.
2829 if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2830 hugetlb_hstate_alloc_pages(parsed_hstate);
2836 __setup("hugepages=", hugetlb_nrpages_setup);
2838 static int __init hugetlb_default_setup(char *s)
2840 default_hstate_size = memparse(s, &s);
2843 __setup("default_hugepagesz=", hugetlb_default_setup);
2845 static unsigned int cpuset_mems_nr(unsigned int *array)
2848 unsigned int nr = 0;
2850 for_each_node_mask(node, cpuset_current_mems_allowed)
2856 #ifdef CONFIG_SYSCTL
2857 static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
2858 void *buffer, size_t *length,
2859 loff_t *ppos, unsigned long *out)
2861 struct ctl_table dup_table;
2864 * In order to avoid races with __do_proc_doulongvec_minmax(), we
2865 * can duplicate the @table and alter the duplicate of it.
2868 dup_table.data = out;
2870 return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
2873 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2874 struct ctl_table *table, int write,
2875 void __user *buffer, size_t *length, loff_t *ppos)
2877 struct hstate *h = &default_hstate;
2878 unsigned long tmp = h->max_huge_pages;
2881 if (!hugepages_supported())
2884 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
2890 ret = __nr_hugepages_store_common(obey_mempolicy, h,
2891 NUMA_NO_NODE, tmp, *length);
2896 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
2897 void __user *buffer, size_t *length, loff_t *ppos)
2900 return hugetlb_sysctl_handler_common(false, table, write,
2901 buffer, length, ppos);
2905 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
2906 void __user *buffer, size_t *length, loff_t *ppos)
2908 return hugetlb_sysctl_handler_common(true, table, write,
2909 buffer, length, ppos);
2911 #endif /* CONFIG_NUMA */
2913 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
2914 void __user *buffer,
2915 size_t *length, loff_t *ppos)
2917 struct hstate *h = &default_hstate;
2921 if (!hugepages_supported())
2924 tmp = h->nr_overcommit_huge_pages;
2926 if (write && hstate_is_gigantic(h))
2929 ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
2935 spin_lock(&hugetlb_lock);
2936 h->nr_overcommit_huge_pages = tmp;
2937 spin_unlock(&hugetlb_lock);
2943 #endif /* CONFIG_SYSCTL */
2945 void hugetlb_report_meminfo(struct seq_file *m)
2947 struct hstate *h = &default_hstate;
2948 if (!hugepages_supported())
2951 "HugePages_Total: %5lu\n"
2952 "HugePages_Free: %5lu\n"
2953 "HugePages_Rsvd: %5lu\n"
2954 "HugePages_Surp: %5lu\n"
2955 "Hugepagesize: %8lu kB\n",
2959 h->surplus_huge_pages,
2960 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2963 int hugetlb_report_node_meminfo(int nid, char *buf)
2965 struct hstate *h = &default_hstate;
2966 if (!hugepages_supported())
2969 "Node %d HugePages_Total: %5u\n"
2970 "Node %d HugePages_Free: %5u\n"
2971 "Node %d HugePages_Surp: %5u\n",
2972 nid, h->nr_huge_pages_node[nid],
2973 nid, h->free_huge_pages_node[nid],
2974 nid, h->surplus_huge_pages_node[nid]);
2977 void hugetlb_show_meminfo(void)
2982 if (!hugepages_supported())
2985 for_each_node_state(nid, N_MEMORY)
2987 pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
2989 h->nr_huge_pages_node[nid],
2990 h->free_huge_pages_node[nid],
2991 h->surplus_huge_pages_node[nid],
2992 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
2995 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
2997 seq_printf(m, "HugetlbPages:\t%8lu kB\n",
2998 atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
3001 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
3002 unsigned long hugetlb_total_pages(void)
3005 unsigned long nr_total_pages = 0;
3008 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3009 return nr_total_pages;
3012 static int hugetlb_acct_memory(struct hstate *h, long delta)
3016 spin_lock(&hugetlb_lock);
3018 * When cpuset is configured, it breaks the strict hugetlb page
3019 * reservation as the accounting is done on a global variable. Such
3020 * reservation is completely rubbish in the presence of cpuset because
3021 * the reservation is not checked against page availability for the
3022 * current cpuset. Application can still potentially OOM'ed by kernel
3023 * with lack of free htlb page in cpuset that the task is in.
3024 * Attempt to enforce strict accounting with cpuset is almost
3025 * impossible (or too ugly) because cpuset is too fluid that
3026 * task or memory node can be dynamically moved between cpusets.
3028 * The change of semantics for shared hugetlb mapping with cpuset is
3029 * undesirable. However, in order to preserve some of the semantics,
3030 * we fall back to check against current free page availability as
3031 * a best attempt and hopefully to minimize the impact of changing
3032 * semantics that cpuset has.
3035 if (gather_surplus_pages(h, delta) < 0)
3038 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
3039 return_unused_surplus_pages(h, delta);
3046 return_unused_surplus_pages(h, (unsigned long) -delta);
3049 spin_unlock(&hugetlb_lock);
3053 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
3055 struct resv_map *resv = vma_resv_map(vma);
3058 * This new VMA should share its siblings reservation map if present.
3059 * The VMA will only ever have a valid reservation map pointer where
3060 * it is being copied for another still existing VMA. As that VMA
3061 * has a reference to the reservation map it cannot disappear until
3062 * after this open call completes. It is therefore safe to take a
3063 * new reference here without additional locking.
3065 if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3066 kref_get(&resv->refs);
3069 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3071 struct hstate *h = hstate_vma(vma);
3072 struct resv_map *resv = vma_resv_map(vma);
3073 struct hugepage_subpool *spool = subpool_vma(vma);
3074 unsigned long reserve, start, end;
3077 if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3080 start = vma_hugecache_offset(h, vma, vma->vm_start);
3081 end = vma_hugecache_offset(h, vma, vma->vm_end);
3083 reserve = (end - start) - region_count(resv, start, end);
3085 kref_put(&resv->refs, resv_map_release);
3089 * Decrement reserve counts. The global reserve count may be
3090 * adjusted if the subpool has a minimum size.
3092 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3093 hugetlb_acct_memory(h, -gbl_reserve);
3098 * We cannot handle pagefaults against hugetlb pages at all. They cause
3099 * handle_mm_fault() to try to instantiate regular-sized pages in the
3100 * hugegpage VMA. do_page_fault() is supposed to trap this, so BUG is we get
3103 static int hugetlb_vm_op_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
3109 const struct vm_operations_struct hugetlb_vm_ops = {
3110 .fault = hugetlb_vm_op_fault,
3111 .open = hugetlb_vm_op_open,
3112 .close = hugetlb_vm_op_close,
3115 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3121 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3122 vma->vm_page_prot)));
3124 entry = huge_pte_wrprotect(mk_huge_pte(page,
3125 vma->vm_page_prot));
3127 entry = pte_mkyoung(entry);
3128 entry = pte_mkhuge(entry);
3129 entry = arch_make_huge_pte(entry, vma, page, writable);
3134 static void set_huge_ptep_writable(struct vm_area_struct *vma,
3135 unsigned long address, pte_t *ptep)
3139 entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3140 if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3141 update_mmu_cache(vma, address, ptep);
3144 static int is_hugetlb_entry_migration(pte_t pte)
3148 if (huge_pte_none(pte) || pte_present(pte))
3150 swp = pte_to_swp_entry(pte);
3151 if (non_swap_entry(swp) && is_migration_entry(swp))
3157 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3161 if (huge_pte_none(pte) || pte_present(pte))
3163 swp = pte_to_swp_entry(pte);
3164 if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3170 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3171 struct vm_area_struct *vma)
3173 pte_t *src_pte, *dst_pte, entry, dst_entry;
3174 struct page *ptepage;
3177 struct hstate *h = hstate_vma(vma);
3178 unsigned long sz = huge_page_size(h);
3179 unsigned long mmun_start; /* For mmu_notifiers */
3180 unsigned long mmun_end; /* For mmu_notifiers */
3183 cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3185 mmun_start = vma->vm_start;
3186 mmun_end = vma->vm_end;
3188 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
3190 for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3191 spinlock_t *src_ptl, *dst_ptl;
3192 src_pte = huge_pte_offset(src, addr);
3195 dst_pte = huge_pte_alloc(dst, addr, sz);
3202 * If the pagetables are shared don't copy or take references.
3203 * dst_pte == src_pte is the common case of src/dest sharing.
3205 * However, src could have 'unshared' and dst shares with
3206 * another vma. If dst_pte !none, this implies sharing.
3207 * Check here before taking page table lock, and once again
3208 * after taking the lock below.
3210 dst_entry = huge_ptep_get(dst_pte);
3211 if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
3214 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3215 src_ptl = huge_pte_lockptr(h, src, src_pte);
3216 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3217 entry = huge_ptep_get(src_pte);
3218 dst_entry = huge_ptep_get(dst_pte);
3219 if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
3221 * Skip if src entry none. Also, skip in the
3222 * unlikely case dst entry !none as this implies
3223 * sharing with another vma.
3226 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3227 is_hugetlb_entry_hwpoisoned(entry))) {
3228 swp_entry_t swp_entry = pte_to_swp_entry(entry);
3230 if (is_write_migration_entry(swp_entry) && cow) {
3232 * COW mappings require pages in both
3233 * parent and child to be set to read.
3235 make_migration_entry_read(&swp_entry);
3236 entry = swp_entry_to_pte(swp_entry);
3237 set_huge_pte_at(src, addr, src_pte, entry);
3239 set_huge_pte_at(dst, addr, dst_pte, entry);
3242 huge_ptep_set_wrprotect(src, addr, src_pte);
3243 mmu_notifier_invalidate_range(src, mmun_start,
3246 entry = huge_ptep_get(src_pte);
3247 ptepage = pte_page(entry);
3249 page_dup_rmap(ptepage);
3250 set_huge_pte_at(dst, addr, dst_pte, entry);
3251 hugetlb_count_add(pages_per_huge_page(h), dst);
3253 spin_unlock(src_ptl);
3254 spin_unlock(dst_ptl);
3258 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
3263 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3264 unsigned long start, unsigned long end,
3265 struct page *ref_page)
3267 int force_flush = 0;
3268 struct mm_struct *mm = vma->vm_mm;
3269 unsigned long address;
3274 struct hstate *h = hstate_vma(vma);
3275 unsigned long sz = huge_page_size(h);
3276 const unsigned long mmun_start = start; /* For mmu_notifiers */
3277 const unsigned long mmun_end = end; /* For mmu_notifiers */
3279 WARN_ON(!is_vm_hugetlb_page(vma));
3280 BUG_ON(start & ~huge_page_mask(h));
3281 BUG_ON(end & ~huge_page_mask(h));
3283 tlb_start_vma(tlb, vma);
3284 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3287 for (; address < end; address += sz) {
3288 ptep = huge_pte_offset(mm, address);
3292 ptl = huge_pte_lock(h, mm, ptep);
3293 if (huge_pmd_unshare(mm, &address, ptep))
3296 pte = huge_ptep_get(ptep);
3297 if (huge_pte_none(pte))
3301 * Migrating hugepage or HWPoisoned hugepage is already
3302 * unmapped and its refcount is dropped, so just clear pte here.
3304 if (unlikely(!pte_present(pte))) {
3305 huge_pte_clear(mm, address, ptep);
3309 page = pte_page(pte);
3311 * If a reference page is supplied, it is because a specific
3312 * page is being unmapped, not a range. Ensure the page we
3313 * are about to unmap is the actual page of interest.
3316 if (page != ref_page)
3320 * Mark the VMA as having unmapped its page so that
3321 * future faults in this VMA will fail rather than
3322 * looking like data was lost
3324 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3327 pte = huge_ptep_get_and_clear(mm, address, ptep);
3328 tlb_remove_tlb_entry(tlb, ptep, address);
3329 if (huge_pte_dirty(pte))
3330 set_page_dirty(page);
3332 hugetlb_count_sub(pages_per_huge_page(h), mm);
3333 page_remove_rmap(page);
3334 force_flush = !__tlb_remove_page(tlb, page);
3340 /* Bail out after unmapping reference page if supplied */
3349 * mmu_gather ran out of room to batch pages, we break out of
3350 * the PTE lock to avoid doing the potential expensive TLB invalidate
3351 * and page-free while holding it.
3356 if (address < end && !ref_page)
3359 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3360 tlb_end_vma(tlb, vma);
3363 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3364 struct vm_area_struct *vma, unsigned long start,
3365 unsigned long end, struct page *ref_page)
3367 __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3370 * Clear this flag so that x86's huge_pmd_share page_table_shareable
3371 * test will fail on a vma being torn down, and not grab a page table
3372 * on its way out. We're lucky that the flag has such an appropriate
3373 * name, and can in fact be safely cleared here. We could clear it
3374 * before the __unmap_hugepage_range above, but all that's necessary
3375 * is to clear it before releasing the i_mmap_rwsem. This works
3376 * because in the context this is called, the VMA is about to be
3377 * destroyed and the i_mmap_rwsem is held.
3379 vma->vm_flags &= ~VM_MAYSHARE;
3382 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3383 unsigned long end, struct page *ref_page)
3385 struct mm_struct *mm;
3386 struct mmu_gather tlb;
3390 tlb_gather_mmu(&tlb, mm, start, end);
3391 __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3392 tlb_finish_mmu(&tlb, start, end);
3396 * This is called when the original mapper is failing to COW a MAP_PRIVATE
3397 * mappping it owns the reserve page for. The intention is to unmap the page
3398 * from other VMAs and let the children be SIGKILLed if they are faulting the
3401 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3402 struct page *page, unsigned long address)
3404 struct hstate *h = hstate_vma(vma);
3405 struct vm_area_struct *iter_vma;
3406 struct address_space *mapping;
3410 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
3411 * from page cache lookup which is in HPAGE_SIZE units.
3413 address = address & huge_page_mask(h);
3414 pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3416 mapping = file_inode(vma->vm_file)->i_mapping;
3419 * Take the mapping lock for the duration of the table walk. As
3420 * this mapping should be shared between all the VMAs,
3421 * __unmap_hugepage_range() is called as the lock is already held
3423 i_mmap_lock_write(mapping);
3424 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3425 /* Do not unmap the current VMA */
3426 if (iter_vma == vma)
3430 * Shared VMAs have their own reserves and do not affect
3431 * MAP_PRIVATE accounting but it is possible that a shared
3432 * VMA is using the same page so check and skip such VMAs.
3434 if (iter_vma->vm_flags & VM_MAYSHARE)
3438 * Unmap the page from other VMAs without their own reserves.
3439 * They get marked to be SIGKILLed if they fault in these
3440 * areas. This is because a future no-page fault on this VMA
3441 * could insert a zeroed page instead of the data existing
3442 * from the time of fork. This would look like data corruption
3444 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3445 unmap_hugepage_range(iter_vma, address,
3446 address + huge_page_size(h), page);
3448 i_mmap_unlock_write(mapping);
3452 * Hugetlb_cow() should be called with page lock of the original hugepage held.
3453 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3454 * cannot race with other handlers or page migration.
3455 * Keep the pte_same checks anyway to make transition from the mutex easier.
3457 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3458 unsigned long address, pte_t *ptep, pte_t pte,
3459 struct page *pagecache_page, spinlock_t *ptl)
3461 struct hstate *h = hstate_vma(vma);
3462 struct page *old_page, *new_page;
3463 int ret = 0, outside_reserve = 0;
3464 unsigned long mmun_start; /* For mmu_notifiers */
3465 unsigned long mmun_end; /* For mmu_notifiers */
3467 old_page = pte_page(pte);
3470 /* If no-one else is actually using this page, avoid the copy
3471 * and just make the page writable */
3472 if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3473 page_move_anon_rmap(old_page, vma, address);
3474 set_huge_ptep_writable(vma, address, ptep);
3479 * If the process that created a MAP_PRIVATE mapping is about to
3480 * perform a COW due to a shared page count, attempt to satisfy
3481 * the allocation without using the existing reserves. The pagecache
3482 * page is used to determine if the reserve at this address was
3483 * consumed or not. If reserves were used, a partial faulted mapping
3484 * at the time of fork() could consume its reserves on COW instead
3485 * of the full address range.
3487 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3488 old_page != pagecache_page)
3489 outside_reserve = 1;
3491 page_cache_get(old_page);
3494 * Drop page table lock as buddy allocator may be called. It will
3495 * be acquired again before returning to the caller, as expected.
3498 new_page = alloc_huge_page(vma, address, outside_reserve);
3500 if (IS_ERR(new_page)) {
3502 * If a process owning a MAP_PRIVATE mapping fails to COW,
3503 * it is due to references held by a child and an insufficient
3504 * huge page pool. To guarantee the original mappers
3505 * reliability, unmap the page from child processes. The child
3506 * may get SIGKILLed if it later faults.
3508 if (outside_reserve) {
3509 page_cache_release(old_page);
3510 BUG_ON(huge_pte_none(pte));
3511 unmap_ref_private(mm, vma, old_page, address);
3512 BUG_ON(huge_pte_none(pte));
3514 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3516 pte_same(huge_ptep_get(ptep), pte)))
3517 goto retry_avoidcopy;
3519 * race occurs while re-acquiring page table
3520 * lock, and our job is done.
3525 ret = (PTR_ERR(new_page) == -ENOMEM) ?
3526 VM_FAULT_OOM : VM_FAULT_SIGBUS;
3527 goto out_release_old;
3531 * When the original hugepage is shared one, it does not have
3532 * anon_vma prepared.
3534 if (unlikely(anon_vma_prepare(vma))) {
3536 goto out_release_all;
3539 copy_user_huge_page(new_page, old_page, address, vma,
3540 pages_per_huge_page(h));
3541 __SetPageUptodate(new_page);
3543 mmun_start = address & huge_page_mask(h);
3544 mmun_end = mmun_start + huge_page_size(h);
3545 mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3548 * Retake the page table lock to check for racing updates
3549 * before the page tables are altered
3552 ptep = huge_pte_offset(mm, address & huge_page_mask(h));
3553 if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3554 ClearPagePrivate(new_page);
3557 huge_ptep_clear_flush(vma, address, ptep);
3558 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
3559 set_huge_pte_at(mm, address, ptep,
3560 make_huge_pte(vma, new_page, 1));
3561 page_remove_rmap(old_page);
3562 hugepage_add_new_anon_rmap(new_page, vma, address);
3563 set_page_huge_active(new_page);
3564 /* Make the old page be freed below */
3565 new_page = old_page;
3568 mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3570 page_cache_release(new_page);
3572 page_cache_release(old_page);
3574 spin_lock(ptl); /* Caller expects lock to be held */
3578 /* Return the pagecache page at a given address within a VMA */
3579 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3580 struct vm_area_struct *vma, unsigned long address)
3582 struct address_space *mapping;
3585 mapping = vma->vm_file->f_mapping;
3586 idx = vma_hugecache_offset(h, vma, address);
3588 return find_lock_page(mapping, idx);
3592 * Return whether there is a pagecache page to back given address within VMA.
3593 * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3595 static bool hugetlbfs_pagecache_present(struct hstate *h,
3596 struct vm_area_struct *vma, unsigned long address)
3598 struct address_space *mapping;
3602 mapping = vma->vm_file->f_mapping;
3603 idx = vma_hugecache_offset(h, vma, address);
3605 page = find_get_page(mapping, idx);
3608 return page != NULL;
3611 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3614 struct inode *inode = mapping->host;
3615 struct hstate *h = hstate_inode(inode);
3616 int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3620 ClearPagePrivate(page);
3623 * set page dirty so that it will not be removed from cache/file
3624 * by non-hugetlbfs specific code paths.
3626 set_page_dirty(page);
3628 spin_lock(&inode->i_lock);
3629 inode->i_blocks += blocks_per_huge_page(h);
3630 spin_unlock(&inode->i_lock);
3634 static int hugetlb_no_page(struct mm_struct *mm, struct vm_area_struct *vma,
3635 struct address_space *mapping, pgoff_t idx,
3636 unsigned long address, pte_t *ptep, unsigned int flags)
3638 struct hstate *h = hstate_vma(vma);
3639 int ret = VM_FAULT_SIGBUS;
3645 bool new_page = false;
3648 * Currently, we are forced to kill the process in the event the
3649 * original mapper has unmapped pages from the child due to a failed
3650 * COW. Warn that such a situation has occurred as it may not be obvious
3652 if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3653 pr_warning("PID %d killed due to inadequate hugepage pool\n",
3659 * Use page lock to guard against racing truncation
3660 * before we get page_table_lock.
3663 page = find_lock_page(mapping, idx);
3665 size = i_size_read(mapping->host) >> huge_page_shift(h);
3668 page = alloc_huge_page(vma, address, 0);
3670 ret = PTR_ERR(page);
3674 ret = VM_FAULT_SIGBUS;
3677 clear_huge_page(page, address, pages_per_huge_page(h));
3678 __SetPageUptodate(page);
3681 if (vma->vm_flags & VM_MAYSHARE) {
3682 int err = huge_add_to_page_cache(page, mapping, idx);
3691 if (unlikely(anon_vma_prepare(vma))) {
3693 goto backout_unlocked;
3699 * If memory error occurs between mmap() and fault, some process
3700 * don't have hwpoisoned swap entry for errored virtual address.
3701 * So we need to block hugepage fault by PG_hwpoison bit check.
3703 if (unlikely(PageHWPoison(page))) {
3704 ret = VM_FAULT_HWPOISON_LARGE |
3705 VM_FAULT_SET_HINDEX(hstate_index(h));
3706 goto backout_unlocked;
3711 * If we are going to COW a private mapping later, we examine the
3712 * pending reservations for this page now. This will ensure that
3713 * any allocations necessary to record that reservation occur outside
3716 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3717 if (vma_needs_reservation(h, vma, address) < 0) {
3719 goto backout_unlocked;
3721 /* Just decrements count, does not deallocate */
3722 vma_end_reservation(h, vma, address);
3725 ptl = huge_pte_lockptr(h, mm, ptep);
3727 size = i_size_read(mapping->host) >> huge_page_shift(h);
3732 if (!huge_pte_none(huge_ptep_get(ptep)))
3736 ClearPagePrivate(page);
3737 hugepage_add_new_anon_rmap(page, vma, address);
3739 page_dup_rmap(page);
3740 new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3741 && (vma->vm_flags & VM_SHARED)));
3742 set_huge_pte_at(mm, address, ptep, new_pte);
3744 hugetlb_count_add(pages_per_huge_page(h), mm);
3745 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3746 /* Optimization, do the COW without a second fault */
3747 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page, ptl);
3753 * Only make newly allocated pages active. Existing pages found
3754 * in the pagecache could be !page_huge_active() if they have been
3755 * isolated for migration.
3758 set_page_huge_active(page);
3773 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
3776 unsigned long key[2];
3779 key[0] = (unsigned long) mapping;
3782 hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
3784 return hash & (num_fault_mutexes - 1);
3788 * For uniprocesor systems we always use a single mutex, so just
3789 * return 0 and avoid the hashing overhead.
3791 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
3798 int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3799 unsigned long address, unsigned int flags)
3806 struct page *page = NULL;
3807 struct page *pagecache_page = NULL;
3808 struct hstate *h = hstate_vma(vma);
3809 struct address_space *mapping;
3810 int need_wait_lock = 0;
3812 address &= huge_page_mask(h);
3814 ptep = huge_pte_offset(mm, address);
3816 entry = huge_ptep_get(ptep);
3817 if (unlikely(is_hugetlb_entry_migration(entry))) {
3818 migration_entry_wait_huge(vma, mm, ptep);
3820 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
3821 return VM_FAULT_HWPOISON_LARGE |
3822 VM_FAULT_SET_HINDEX(hstate_index(h));
3824 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
3826 return VM_FAULT_OOM;
3829 mapping = vma->vm_file->f_mapping;
3830 idx = vma_hugecache_offset(h, vma, address);
3833 * Serialize hugepage allocation and instantiation, so that we don't
3834 * get spurious allocation failures if two CPUs race to instantiate
3835 * the same page in the page cache.
3837 hash = hugetlb_fault_mutex_hash(h, mapping, idx);
3838 mutex_lock(&hugetlb_fault_mutex_table[hash]);
3840 entry = huge_ptep_get(ptep);
3841 if (huge_pte_none(entry)) {
3842 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
3849 * entry could be a migration/hwpoison entry at this point, so this
3850 * check prevents the kernel from going below assuming that we have
3851 * a active hugepage in pagecache. This goto expects the 2nd page fault,
3852 * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
3855 if (!pte_present(entry))
3859 * If we are going to COW the mapping later, we examine the pending
3860 * reservations for this page now. This will ensure that any
3861 * allocations necessary to record that reservation occur outside the
3862 * spinlock. For private mappings, we also lookup the pagecache
3863 * page now as it is used to determine if a reservation has been
3866 if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
3867 if (vma_needs_reservation(h, vma, address) < 0) {
3871 /* Just decrements count, does not deallocate */
3872 vma_end_reservation(h, vma, address);
3874 if (!(vma->vm_flags & VM_MAYSHARE))
3875 pagecache_page = hugetlbfs_pagecache_page(h,
3879 ptl = huge_pte_lock(h, mm, ptep);
3881 /* Check for a racing update before calling hugetlb_cow */
3882 if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
3886 * hugetlb_cow() requires page locks of pte_page(entry) and
3887 * pagecache_page, so here we need take the former one
3888 * when page != pagecache_page or !pagecache_page.
3890 page = pte_page(entry);
3891 if (page != pagecache_page)
3892 if (!trylock_page(page)) {
3899 if (flags & FAULT_FLAG_WRITE) {
3900 if (!huge_pte_write(entry)) {
3901 ret = hugetlb_cow(mm, vma, address, ptep, entry,
3902 pagecache_page, ptl);
3905 entry = huge_pte_mkdirty(entry);
3907 entry = pte_mkyoung(entry);
3908 if (huge_ptep_set_access_flags(vma, address, ptep, entry,
3909 flags & FAULT_FLAG_WRITE))
3910 update_mmu_cache(vma, address, ptep);
3912 if (page != pagecache_page)
3918 if (pagecache_page) {
3919 unlock_page(pagecache_page);
3920 put_page(pagecache_page);
3923 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3925 * Generally it's safe to hold refcount during waiting page lock. But
3926 * here we just wait to defer the next page fault to avoid busy loop and
3927 * the page is not used after unlocked before returning from the current
3928 * page fault. So we are safe from accessing freed page, even if we wait
3929 * here without taking refcount.
3932 wait_on_page_locked(page);
3936 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
3937 struct page **pages, struct vm_area_struct **vmas,
3938 unsigned long *position, unsigned long *nr_pages,
3939 long i, unsigned int flags)
3941 unsigned long pfn_offset;
3942 unsigned long vaddr = *position;
3943 unsigned long remainder = *nr_pages;
3944 struct hstate *h = hstate_vma(vma);
3947 while (vaddr < vma->vm_end && remainder) {
3949 spinlock_t *ptl = NULL;
3954 * If we have a pending SIGKILL, don't keep faulting pages and
3955 * potentially allocating memory.
3957 if (unlikely(fatal_signal_pending(current))) {
3963 * Some archs (sparc64, sh*) have multiple pte_ts to
3964 * each hugepage. We have to make sure we get the
3965 * first, for the page indexing below to work.
3967 * Note that page table lock is not held when pte is null.
3969 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h));
3971 ptl = huge_pte_lock(h, mm, pte);
3972 absent = !pte || huge_pte_none(huge_ptep_get(pte));
3975 * When coredumping, it suits get_dump_page if we just return
3976 * an error where there's an empty slot with no huge pagecache
3977 * to back it. This way, we avoid allocating a hugepage, and
3978 * the sparse dumpfile avoids allocating disk blocks, but its
3979 * huge holes still show up with zeroes where they need to be.
3981 if (absent && (flags & FOLL_DUMP) &&
3982 !hugetlbfs_pagecache_present(h, vma, vaddr)) {
3990 * We need call hugetlb_fault for both hugepages under migration
3991 * (in which case hugetlb_fault waits for the migration,) and
3992 * hwpoisoned hugepages (in which case we need to prevent the
3993 * caller from accessing to them.) In order to do this, we use
3994 * here is_swap_pte instead of is_hugetlb_entry_migration and
3995 * is_hugetlb_entry_hwpoisoned. This is because it simply covers
3996 * both cases, and because we can't follow correct pages
3997 * directly from any kind of swap entries.
3999 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
4000 ((flags & FOLL_WRITE) &&
4001 !huge_pte_write(huge_ptep_get(pte)))) {
4006 ret = hugetlb_fault(mm, vma, vaddr,
4007 (flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0);
4008 if (!(ret & VM_FAULT_ERROR))
4015 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
4016 page = pte_page(huge_ptep_get(pte));
4019 * Instead of doing 'try_get_page_foll()' below in the same_page
4020 * loop, just check the count once here.
4022 if (unlikely(page_count(page) <= 0)) {
4032 pages[i] = mem_map_offset(page, pfn_offset);
4033 get_page_foll(pages[i]);
4043 if (vaddr < vma->vm_end && remainder &&
4044 pfn_offset < pages_per_huge_page(h)) {
4046 * We use pfn_offset to avoid touching the pageframes
4047 * of this compound page.
4053 *nr_pages = remainder;
4059 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
4060 unsigned long address, unsigned long end, pgprot_t newprot)
4062 struct mm_struct *mm = vma->vm_mm;
4063 unsigned long start = address;
4066 struct hstate *h = hstate_vma(vma);
4067 unsigned long pages = 0;
4069 BUG_ON(address >= end);
4070 flush_cache_range(vma, address, end);
4072 mmu_notifier_invalidate_range_start(mm, start, end);
4073 i_mmap_lock_write(vma->vm_file->f_mapping);
4074 for (; address < end; address += huge_page_size(h)) {
4076 ptep = huge_pte_offset(mm, address);
4079 ptl = huge_pte_lock(h, mm, ptep);
4080 if (huge_pmd_unshare(mm, &address, ptep)) {
4085 pte = huge_ptep_get(ptep);
4086 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
4090 if (unlikely(is_hugetlb_entry_migration(pte))) {
4091 swp_entry_t entry = pte_to_swp_entry(pte);
4093 if (is_write_migration_entry(entry)) {
4096 make_migration_entry_read(&entry);
4097 newpte = swp_entry_to_pte(entry);
4098 set_huge_pte_at(mm, address, ptep, newpte);
4104 if (!huge_pte_none(pte)) {
4105 pte = huge_ptep_get_and_clear(mm, address, ptep);
4106 pte = pte_mkhuge(huge_pte_modify(pte, newprot));
4107 pte = arch_make_huge_pte(pte, vma, NULL, 0);
4108 set_huge_pte_at(mm, address, ptep, pte);
4114 * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
4115 * may have cleared our pud entry and done put_page on the page table:
4116 * once we release i_mmap_rwsem, another task can do the final put_page
4117 * and that page table be reused and filled with junk.
4119 flush_tlb_range(vma, start, end);
4120 mmu_notifier_invalidate_range(mm, start, end);
4121 i_mmap_unlock_write(vma->vm_file->f_mapping);
4122 mmu_notifier_invalidate_range_end(mm, start, end);
4124 return pages << h->order;
4127 int hugetlb_reserve_pages(struct inode *inode,
4129 struct vm_area_struct *vma,
4130 vm_flags_t vm_flags)
4133 struct hstate *h = hstate_inode(inode);
4134 struct hugepage_subpool *spool = subpool_inode(inode);
4135 struct resv_map *resv_map;
4138 /* This should never happen */
4140 #ifdef CONFIG_DEBUG_VM
4141 WARN(1, "%s called with a negative range\n", __func__);
4147 * Only apply hugepage reservation if asked. At fault time, an
4148 * attempt will be made for VM_NORESERVE to allocate a page
4149 * without using reserves
4151 if (vm_flags & VM_NORESERVE)
4155 * Shared mappings base their reservation on the number of pages that
4156 * are already allocated on behalf of the file. Private mappings need
4157 * to reserve the full area even if read-only as mprotect() may be
4158 * called to make the mapping read-write. Assume !vma is a shm mapping
4160 if (!vma || vma->vm_flags & VM_MAYSHARE) {
4161 resv_map = inode_resv_map(inode);
4163 chg = region_chg(resv_map, from, to);
4166 resv_map = resv_map_alloc();
4172 set_vma_resv_map(vma, resv_map);
4173 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4182 * There must be enough pages in the subpool for the mapping. If
4183 * the subpool has a minimum size, there may be some global
4184 * reservations already in place (gbl_reserve).
4186 gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4187 if (gbl_reserve < 0) {
4193 * Check enough hugepages are available for the reservation.
4194 * Hand the pages back to the subpool if there are not
4196 ret = hugetlb_acct_memory(h, gbl_reserve);
4198 /* put back original number of pages, chg */
4199 (void)hugepage_subpool_put_pages(spool, chg);
4204 * Account for the reservations made. Shared mappings record regions
4205 * that have reservations as they are shared by multiple VMAs.
4206 * When the last VMA disappears, the region map says how much
4207 * the reservation was and the page cache tells how much of
4208 * the reservation was consumed. Private mappings are per-VMA and
4209 * only the consumed reservations are tracked. When the VMA
4210 * disappears, the original reservation is the VMA size and the
4211 * consumed reservations are stored in the map. Hence, nothing
4212 * else has to be done for private mappings here
4214 if (!vma || vma->vm_flags & VM_MAYSHARE) {
4215 long add = region_add(resv_map, from, to);
4217 if (unlikely(chg > add)) {
4219 * pages in this range were added to the reserve
4220 * map between region_chg and region_add. This
4221 * indicates a race with alloc_huge_page. Adjust
4222 * the subpool and reserve counts modified above
4223 * based on the difference.
4227 rsv_adjust = hugepage_subpool_put_pages(spool,
4229 hugetlb_acct_memory(h, -rsv_adjust);
4234 if (!vma || vma->vm_flags & VM_MAYSHARE)
4235 /* Don't call region_abort if region_chg failed */
4237 region_abort(resv_map, from, to);
4238 if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4239 kref_put(&resv_map->refs, resv_map_release);
4243 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4246 struct hstate *h = hstate_inode(inode);
4247 struct resv_map *resv_map = inode_resv_map(inode);
4249 struct hugepage_subpool *spool = subpool_inode(inode);
4253 chg = region_del(resv_map, start, end);
4255 * region_del() can fail in the rare case where a region
4256 * must be split and another region descriptor can not be
4257 * allocated. If end == LONG_MAX, it will not fail.
4263 spin_lock(&inode->i_lock);
4264 inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4265 spin_unlock(&inode->i_lock);
4268 * If the subpool has a minimum size, the number of global
4269 * reservations to be released may be adjusted.
4271 gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4272 hugetlb_acct_memory(h, -gbl_reserve);
4277 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4278 static unsigned long page_table_shareable(struct vm_area_struct *svma,
4279 struct vm_area_struct *vma,
4280 unsigned long addr, pgoff_t idx)
4282 unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4284 unsigned long sbase = saddr & PUD_MASK;
4285 unsigned long s_end = sbase + PUD_SIZE;
4287 /* Allow segments to share if only one is marked locked */
4288 unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4289 unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4292 * match the virtual addresses, permission and the alignment of the
4295 if (pmd_index(addr) != pmd_index(saddr) ||
4296 vm_flags != svm_flags ||
4297 sbase < svma->vm_start || svma->vm_end < s_end)
4303 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4305 unsigned long base = addr & PUD_MASK;
4306 unsigned long end = base + PUD_SIZE;
4309 * check on proper vm_flags and page table alignment
4311 if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
4316 #define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
4318 * Determine if start,end range within vma could be mapped by shared pmd.
4319 * If yes, adjust start and end to cover range associated with possible
4320 * shared pmd mappings.
4322 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4323 unsigned long *start, unsigned long *end)
4325 unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
4326 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
4329 * vma need span at least one aligned PUD size and the start,end range
4330 * must at least partialy within it.
4332 if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
4333 (*end <= v_start) || (*start >= v_end))
4336 /* Extend the range to be PUD aligned for a worst case scenario */
4337 if (*start > v_start)
4338 *start = ALIGN_DOWN(*start, PUD_SIZE);
4341 *end = ALIGN(*end, PUD_SIZE);
4345 * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4346 * and returns the corresponding pte. While this is not necessary for the
4347 * !shared pmd case because we can allocate the pmd later as well, it makes the
4348 * code much cleaner. pmd allocation is essential for the shared case because
4349 * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4350 * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4351 * bad pmd for sharing.
4353 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4355 struct vm_area_struct *vma = find_vma(mm, addr);
4356 struct address_space *mapping = vma->vm_file->f_mapping;
4357 pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4359 struct vm_area_struct *svma;
4360 unsigned long saddr;
4365 if (!vma_shareable(vma, addr))
4366 return (pte_t *)pmd_alloc(mm, pud, addr);
4368 i_mmap_lock_write(mapping);
4369 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4373 saddr = page_table_shareable(svma, vma, addr, idx);
4375 spte = huge_pte_offset(svma->vm_mm, saddr);
4377 get_page(virt_to_page(spte));
4386 ptl = huge_pte_lockptr(hstate_vma(vma), mm, spte);
4388 if (pud_none(*pud)) {
4389 pud_populate(mm, pud,
4390 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4393 put_page(virt_to_page(spte));
4397 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4398 i_mmap_unlock_write(mapping);
4403 * unmap huge page backed by shared pte.
4405 * Hugetlb pte page is ref counted at the time of mapping. If pte is shared
4406 * indicated by page_count > 1, unmap is achieved by clearing pud and
4407 * decrementing the ref count. If count == 1, the pte page is not shared.
4409 * called with page table lock held.
4411 * returns: 1 successfully unmapped a shared pte page
4412 * 0 the underlying pte page is not shared, or it is the last user
4414 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4416 pgd_t *pgd = pgd_offset(mm, *addr);
4417 pud_t *pud = pud_offset(pgd, *addr);
4419 BUG_ON(page_count(virt_to_page(ptep)) == 0);
4420 if (page_count(virt_to_page(ptep)) == 1)
4424 put_page(virt_to_page(ptep));
4426 *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4429 #define want_pmd_share() (1)
4430 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4431 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4436 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4441 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4442 unsigned long *start, unsigned long *end)
4445 #define want_pmd_share() (0)
4446 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4448 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4449 pte_t *huge_pte_alloc(struct mm_struct *mm,
4450 unsigned long addr, unsigned long sz)
4456 pgd = pgd_offset(mm, addr);
4457 pud = pud_alloc(mm, pgd, addr);
4459 if (sz == PUD_SIZE) {
4462 BUG_ON(sz != PMD_SIZE);
4463 if (want_pmd_share() && pud_none(*pud))
4464 pte = huge_pmd_share(mm, addr, pud);
4466 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4469 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
4474 pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
4480 pgd = pgd_offset(mm, addr);
4481 if (pgd_present(*pgd)) {
4482 pud = pud_offset(pgd, addr);
4483 if (pud_present(*pud)) {
4485 return (pte_t *)pud;
4486 pmd = pmd_offset(pud, addr);
4489 return (pte_t *) pmd;
4492 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
4495 * These functions are overwritable if your architecture needs its own
4498 struct page * __weak
4499 follow_huge_addr(struct mm_struct *mm, unsigned long address,
4502 return ERR_PTR(-EINVAL);
4505 struct page * __weak
4506 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4507 pmd_t *pmd, int flags)
4509 struct page *page = NULL;
4513 ptl = pmd_lockptr(mm, pmd);
4516 * make sure that the address range covered by this pmd is not
4517 * unmapped from other threads.
4519 if (!pmd_huge(*pmd))
4521 pte = huge_ptep_get((pte_t *)pmd);
4522 if (pte_present(pte)) {
4523 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4524 if (flags & FOLL_GET)
4527 if (is_hugetlb_entry_migration(pte)) {
4529 __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4533 * hwpoisoned entry is treated as no_page_table in
4534 * follow_page_mask().
4542 struct page * __weak
4543 follow_huge_pud(struct mm_struct *mm, unsigned long address,
4544 pud_t *pud, int flags)
4546 if (flags & FOLL_GET)
4549 return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4552 #ifdef CONFIG_MEMORY_FAILURE
4555 * This function is called from memory failure code.
4556 * Assume the caller holds page lock of the head page.
4558 int dequeue_hwpoisoned_huge_page(struct page *hpage)
4560 struct hstate *h = page_hstate(hpage);
4561 int nid = page_to_nid(hpage);
4564 spin_lock(&hugetlb_lock);
4566 * Just checking !page_huge_active is not enough, because that could be
4567 * an isolated/hwpoisoned hugepage (which have >0 refcount).
4569 if (!page_huge_active(hpage) && !page_count(hpage)) {
4571 * Hwpoisoned hugepage isn't linked to activelist or freelist,
4572 * but dangling hpage->lru can trigger list-debug warnings
4573 * (this happens when we call unpoison_memory() on it),
4574 * so let it point to itself with list_del_init().
4576 list_del_init(&hpage->lru);
4577 set_page_refcounted(hpage);
4578 h->free_huge_pages--;
4579 h->free_huge_pages_node[nid]--;
4582 spin_unlock(&hugetlb_lock);
4587 bool isolate_huge_page(struct page *page, struct list_head *list)
4591 spin_lock(&hugetlb_lock);
4592 if (!PageHeadHuge(page) || !page_huge_active(page) ||
4593 !get_page_unless_zero(page)) {
4597 clear_page_huge_active(page);
4598 list_move_tail(&page->lru, list);
4600 spin_unlock(&hugetlb_lock);
4604 void putback_active_hugepage(struct page *page)
4606 VM_BUG_ON_PAGE(!PageHead(page), page);
4607 spin_lock(&hugetlb_lock);
4608 set_page_huge_active(page);
4609 list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
4610 spin_unlock(&hugetlb_lock);