GNU Linux-libre 4.19.211-gnu1
[releases.git] / mm / hugetlb.c
1 /*
2  * Generic hugetlb support.
3  * (C) Nadia Yvette Chambers, April 2004
4  */
5 #include <linux/list.h>
6 #include <linux/init.h>
7 #include <linux/mm.h>
8 #include <linux/seq_file.h>
9 #include <linux/sysctl.h>
10 #include <linux/highmem.h>
11 #include <linux/mmu_notifier.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/compiler.h>
16 #include <linux/cpuset.h>
17 #include <linux/mutex.h>
18 #include <linux/bootmem.h>
19 #include <linux/sysfs.h>
20 #include <linux/slab.h>
21 #include <linux/mmdebug.h>
22 #include <linux/sched/signal.h>
23 #include <linux/rmap.h>
24 #include <linux/string_helpers.h>
25 #include <linux/swap.h>
26 #include <linux/swapops.h>
27 #include <linux/jhash.h>
28
29 #include <asm/page.h>
30 #include <asm/pgtable.h>
31 #include <asm/tlb.h>
32
33 #include <linux/io.h>
34 #include <linux/hugetlb.h>
35 #include <linux/hugetlb_cgroup.h>
36 #include <linux/node.h>
37 #include <linux/userfaultfd_k.h>
38 #include <linux/page_owner.h>
39 #include "internal.h"
40
41 int hugetlb_max_hstate __read_mostly;
42 unsigned int default_hstate_idx;
43 struct hstate hstates[HUGE_MAX_HSTATE];
44 /*
45  * Minimum page order among possible hugepage sizes, set to a proper value
46  * at boot time.
47  */
48 static unsigned int minimum_order __read_mostly = UINT_MAX;
49
50 __initdata LIST_HEAD(huge_boot_pages);
51
52 /* for command line parsing */
53 static struct hstate * __initdata parsed_hstate;
54 static unsigned long __initdata default_hstate_max_huge_pages;
55 static unsigned long __initdata default_hstate_size;
56 static bool __initdata parsed_valid_hugepagesz = true;
57
58 /*
59  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
60  * free_huge_pages, and surplus_huge_pages.
61  */
62 DEFINE_SPINLOCK(hugetlb_lock);
63
64 /*
65  * Serializes faults on the same logical page.  This is used to
66  * prevent spurious OOMs when the hugepage pool is fully utilized.
67  */
68 static int num_fault_mutexes;
69 struct mutex *hugetlb_fault_mutex_table ____cacheline_aligned_in_smp;
70
71 static inline bool PageHugeFreed(struct page *head)
72 {
73         return page_private(head + 4) == -1UL;
74 }
75
76 static inline void SetPageHugeFreed(struct page *head)
77 {
78         set_page_private(head + 4, -1UL);
79 }
80
81 static inline void ClearPageHugeFreed(struct page *head)
82 {
83         set_page_private(head + 4, 0);
84 }
85
86 /* Forward declaration */
87 static int hugetlb_acct_memory(struct hstate *h, long delta);
88
89 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool)
90 {
91         bool free = (spool->count == 0) && (spool->used_hpages == 0);
92
93         spin_unlock(&spool->lock);
94
95         /* If no pages are used, and no other handles to the subpool
96          * remain, give up any reservations mased on minimum size and
97          * free the subpool */
98         if (free) {
99                 if (spool->min_hpages != -1)
100                         hugetlb_acct_memory(spool->hstate,
101                                                 -spool->min_hpages);
102                 kfree(spool);
103         }
104 }
105
106 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
107                                                 long min_hpages)
108 {
109         struct hugepage_subpool *spool;
110
111         spool = kzalloc(sizeof(*spool), GFP_KERNEL);
112         if (!spool)
113                 return NULL;
114
115         spin_lock_init(&spool->lock);
116         spool->count = 1;
117         spool->max_hpages = max_hpages;
118         spool->hstate = h;
119         spool->min_hpages = min_hpages;
120
121         if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
122                 kfree(spool);
123                 return NULL;
124         }
125         spool->rsv_hpages = min_hpages;
126
127         return spool;
128 }
129
130 void hugepage_put_subpool(struct hugepage_subpool *spool)
131 {
132         spin_lock(&spool->lock);
133         BUG_ON(!spool->count);
134         spool->count--;
135         unlock_or_release_subpool(spool);
136 }
137
138 /*
139  * Subpool accounting for allocating and reserving pages.
140  * Return -ENOMEM if there are not enough resources to satisfy the
141  * the request.  Otherwise, return the number of pages by which the
142  * global pools must be adjusted (upward).  The returned value may
143  * only be different than the passed value (delta) in the case where
144  * a subpool minimum size must be manitained.
145  */
146 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
147                                       long delta)
148 {
149         long ret = delta;
150
151         if (!spool)
152                 return ret;
153
154         spin_lock(&spool->lock);
155
156         if (spool->max_hpages != -1) {          /* maximum size accounting */
157                 if ((spool->used_hpages + delta) <= spool->max_hpages)
158                         spool->used_hpages += delta;
159                 else {
160                         ret = -ENOMEM;
161                         goto unlock_ret;
162                 }
163         }
164
165         /* minimum size accounting */
166         if (spool->min_hpages != -1 && spool->rsv_hpages) {
167                 if (delta > spool->rsv_hpages) {
168                         /*
169                          * Asking for more reserves than those already taken on
170                          * behalf of subpool.  Return difference.
171                          */
172                         ret = delta - spool->rsv_hpages;
173                         spool->rsv_hpages = 0;
174                 } else {
175                         ret = 0;        /* reserves already accounted for */
176                         spool->rsv_hpages -= delta;
177                 }
178         }
179
180 unlock_ret:
181         spin_unlock(&spool->lock);
182         return ret;
183 }
184
185 /*
186  * Subpool accounting for freeing and unreserving pages.
187  * Return the number of global page reservations that must be dropped.
188  * The return value may only be different than the passed value (delta)
189  * in the case where a subpool minimum size must be maintained.
190  */
191 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
192                                        long delta)
193 {
194         long ret = delta;
195
196         if (!spool)
197                 return delta;
198
199         spin_lock(&spool->lock);
200
201         if (spool->max_hpages != -1)            /* maximum size accounting */
202                 spool->used_hpages -= delta;
203
204          /* minimum size accounting */
205         if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
206                 if (spool->rsv_hpages + delta <= spool->min_hpages)
207                         ret = 0;
208                 else
209                         ret = spool->rsv_hpages + delta - spool->min_hpages;
210
211                 spool->rsv_hpages += delta;
212                 if (spool->rsv_hpages > spool->min_hpages)
213                         spool->rsv_hpages = spool->min_hpages;
214         }
215
216         /*
217          * If hugetlbfs_put_super couldn't free spool due to an outstanding
218          * quota reference, free it now.
219          */
220         unlock_or_release_subpool(spool);
221
222         return ret;
223 }
224
225 static inline struct hugepage_subpool *subpool_inode(struct inode *inode)
226 {
227         return HUGETLBFS_SB(inode->i_sb)->spool;
228 }
229
230 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
231 {
232         return subpool_inode(file_inode(vma->vm_file));
233 }
234
235 /*
236  * Region tracking -- allows tracking of reservations and instantiated pages
237  *                    across the pages in a mapping.
238  *
239  * The region data structures are embedded into a resv_map and protected
240  * by a resv_map's lock.  The set of regions within the resv_map represent
241  * reservations for huge pages, or huge pages that have already been
242  * instantiated within the map.  The from and to elements are huge page
243  * indicies into the associated mapping.  from indicates the starting index
244  * of the region.  to represents the first index past the end of  the region.
245  *
246  * For example, a file region structure with from == 0 and to == 4 represents
247  * four huge pages in a mapping.  It is important to note that the to element
248  * represents the first element past the end of the region. This is used in
249  * arithmetic as 4(to) - 0(from) = 4 huge pages in the region.
250  *
251  * Interval notation of the form [from, to) will be used to indicate that
252  * the endpoint from is inclusive and to is exclusive.
253  */
254 struct file_region {
255         struct list_head link;
256         long from;
257         long to;
258 };
259
260 /*
261  * Add the huge page range represented by [f, t) to the reserve
262  * map.  In the normal case, existing regions will be expanded
263  * to accommodate the specified range.  Sufficient regions should
264  * exist for expansion due to the previous call to region_chg
265  * with the same range.  However, it is possible that region_del
266  * could have been called after region_chg and modifed the map
267  * in such a way that no region exists to be expanded.  In this
268  * case, pull a region descriptor from the cache associated with
269  * the map and use that for the new range.
270  *
271  * Return the number of new huge pages added to the map.  This
272  * number is greater than or equal to zero.
273  */
274 static long region_add(struct resv_map *resv, long f, long t)
275 {
276         struct list_head *head = &resv->regions;
277         struct file_region *rg, *nrg, *trg;
278         long add = 0;
279
280         spin_lock(&resv->lock);
281         /* Locate the region we are either in or before. */
282         list_for_each_entry(rg, head, link)
283                 if (f <= rg->to)
284                         break;
285
286         /*
287          * If no region exists which can be expanded to include the
288          * specified range, the list must have been modified by an
289          * interleving call to region_del().  Pull a region descriptor
290          * from the cache and use it for this range.
291          */
292         if (&rg->link == head || t < rg->from) {
293                 VM_BUG_ON(resv->region_cache_count <= 0);
294
295                 resv->region_cache_count--;
296                 nrg = list_first_entry(&resv->region_cache, struct file_region,
297                                         link);
298                 list_del(&nrg->link);
299
300                 nrg->from = f;
301                 nrg->to = t;
302                 list_add(&nrg->link, rg->link.prev);
303
304                 add += t - f;
305                 goto out_locked;
306         }
307
308         /* Round our left edge to the current segment if it encloses us. */
309         if (f > rg->from)
310                 f = rg->from;
311
312         /* Check for and consume any regions we now overlap with. */
313         nrg = rg;
314         list_for_each_entry_safe(rg, trg, rg->link.prev, link) {
315                 if (&rg->link == head)
316                         break;
317                 if (rg->from > t)
318                         break;
319
320                 /* If this area reaches higher then extend our area to
321                  * include it completely.  If this is not the first area
322                  * which we intend to reuse, free it. */
323                 if (rg->to > t)
324                         t = rg->to;
325                 if (rg != nrg) {
326                         /* Decrement return value by the deleted range.
327                          * Another range will span this area so that by
328                          * end of routine add will be >= zero
329                          */
330                         add -= (rg->to - rg->from);
331                         list_del(&rg->link);
332                         kfree(rg);
333                 }
334         }
335
336         add += (nrg->from - f);         /* Added to beginning of region */
337         nrg->from = f;
338         add += t - nrg->to;             /* Added to end of region */
339         nrg->to = t;
340
341 out_locked:
342         resv->adds_in_progress--;
343         spin_unlock(&resv->lock);
344         VM_BUG_ON(add < 0);
345         return add;
346 }
347
348 /*
349  * Examine the existing reserve map and determine how many
350  * huge pages in the specified range [f, t) are NOT currently
351  * represented.  This routine is called before a subsequent
352  * call to region_add that will actually modify the reserve
353  * map to add the specified range [f, t).  region_chg does
354  * not change the number of huge pages represented by the
355  * map.  However, if the existing regions in the map can not
356  * be expanded to represent the new range, a new file_region
357  * structure is added to the map as a placeholder.  This is
358  * so that the subsequent region_add call will have all the
359  * regions it needs and will not fail.
360  *
361  * Upon entry, region_chg will also examine the cache of region descriptors
362  * associated with the map.  If there are not enough descriptors cached, one
363  * will be allocated for the in progress add operation.
364  *
365  * Returns the number of huge pages that need to be added to the existing
366  * reservation map for the range [f, t).  This number is greater or equal to
367  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
368  * is needed and can not be allocated.
369  */
370 static long region_chg(struct resv_map *resv, long f, long t)
371 {
372         struct list_head *head = &resv->regions;
373         struct file_region *rg, *nrg = NULL;
374         long chg = 0;
375
376 retry:
377         spin_lock(&resv->lock);
378 retry_locked:
379         resv->adds_in_progress++;
380
381         /*
382          * Check for sufficient descriptors in the cache to accommodate
383          * the number of in progress add operations.
384          */
385         if (resv->adds_in_progress > resv->region_cache_count) {
386                 struct file_region *trg;
387
388                 VM_BUG_ON(resv->adds_in_progress - resv->region_cache_count > 1);
389                 /* Must drop lock to allocate a new descriptor. */
390                 resv->adds_in_progress--;
391                 spin_unlock(&resv->lock);
392
393                 trg = kmalloc(sizeof(*trg), GFP_KERNEL);
394                 if (!trg) {
395                         kfree(nrg);
396                         return -ENOMEM;
397                 }
398
399                 spin_lock(&resv->lock);
400                 list_add(&trg->link, &resv->region_cache);
401                 resv->region_cache_count++;
402                 goto retry_locked;
403         }
404
405         /* Locate the region we are before or in. */
406         list_for_each_entry(rg, head, link)
407                 if (f <= rg->to)
408                         break;
409
410         /* If we are below the current region then a new region is required.
411          * Subtle, allocate a new region at the position but make it zero
412          * size such that we can guarantee to record the reservation. */
413         if (&rg->link == head || t < rg->from) {
414                 if (!nrg) {
415                         resv->adds_in_progress--;
416                         spin_unlock(&resv->lock);
417                         nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
418                         if (!nrg)
419                                 return -ENOMEM;
420
421                         nrg->from = f;
422                         nrg->to   = f;
423                         INIT_LIST_HEAD(&nrg->link);
424                         goto retry;
425                 }
426
427                 list_add(&nrg->link, rg->link.prev);
428                 chg = t - f;
429                 goto out_nrg;
430         }
431
432         /* Round our left edge to the current segment if it encloses us. */
433         if (f > rg->from)
434                 f = rg->from;
435         chg = t - f;
436
437         /* Check for and consume any regions we now overlap with. */
438         list_for_each_entry(rg, rg->link.prev, link) {
439                 if (&rg->link == head)
440                         break;
441                 if (rg->from > t)
442                         goto out;
443
444                 /* We overlap with this area, if it extends further than
445                  * us then we must extend ourselves.  Account for its
446                  * existing reservation. */
447                 if (rg->to > t) {
448                         chg += rg->to - t;
449                         t = rg->to;
450                 }
451                 chg -= rg->to - rg->from;
452         }
453
454 out:
455         spin_unlock(&resv->lock);
456         /*  We already know we raced and no longer need the new region */
457         kfree(nrg);
458         return chg;
459 out_nrg:
460         spin_unlock(&resv->lock);
461         return chg;
462 }
463
464 /*
465  * Abort the in progress add operation.  The adds_in_progress field
466  * of the resv_map keeps track of the operations in progress between
467  * calls to region_chg and region_add.  Operations are sometimes
468  * aborted after the call to region_chg.  In such cases, region_abort
469  * is called to decrement the adds_in_progress counter.
470  *
471  * NOTE: The range arguments [f, t) are not needed or used in this
472  * routine.  They are kept to make reading the calling code easier as
473  * arguments will match the associated region_chg call.
474  */
475 static void region_abort(struct resv_map *resv, long f, long t)
476 {
477         spin_lock(&resv->lock);
478         VM_BUG_ON(!resv->region_cache_count);
479         resv->adds_in_progress--;
480         spin_unlock(&resv->lock);
481 }
482
483 /*
484  * Delete the specified range [f, t) from the reserve map.  If the
485  * t parameter is LONG_MAX, this indicates that ALL regions after f
486  * should be deleted.  Locate the regions which intersect [f, t)
487  * and either trim, delete or split the existing regions.
488  *
489  * Returns the number of huge pages deleted from the reserve map.
490  * In the normal case, the return value is zero or more.  In the
491  * case where a region must be split, a new region descriptor must
492  * be allocated.  If the allocation fails, -ENOMEM will be returned.
493  * NOTE: If the parameter t == LONG_MAX, then we will never split
494  * a region and possibly return -ENOMEM.  Callers specifying
495  * t == LONG_MAX do not need to check for -ENOMEM error.
496  */
497 static long region_del(struct resv_map *resv, long f, long t)
498 {
499         struct list_head *head = &resv->regions;
500         struct file_region *rg, *trg;
501         struct file_region *nrg = NULL;
502         long del = 0;
503
504 retry:
505         spin_lock(&resv->lock);
506         list_for_each_entry_safe(rg, trg, head, link) {
507                 /*
508                  * Skip regions before the range to be deleted.  file_region
509                  * ranges are normally of the form [from, to).  However, there
510                  * may be a "placeholder" entry in the map which is of the form
511                  * (from, to) with from == to.  Check for placeholder entries
512                  * at the beginning of the range to be deleted.
513                  */
514                 if (rg->to <= f && (rg->to != rg->from || rg->to != f))
515                         continue;
516
517                 if (rg->from >= t)
518                         break;
519
520                 if (f > rg->from && t < rg->to) { /* Must split region */
521                         /*
522                          * Check for an entry in the cache before dropping
523                          * lock and attempting allocation.
524                          */
525                         if (!nrg &&
526                             resv->region_cache_count > resv->adds_in_progress) {
527                                 nrg = list_first_entry(&resv->region_cache,
528                                                         struct file_region,
529                                                         link);
530                                 list_del(&nrg->link);
531                                 resv->region_cache_count--;
532                         }
533
534                         if (!nrg) {
535                                 spin_unlock(&resv->lock);
536                                 nrg = kmalloc(sizeof(*nrg), GFP_KERNEL);
537                                 if (!nrg)
538                                         return -ENOMEM;
539                                 goto retry;
540                         }
541
542                         del += t - f;
543
544                         /* New entry for end of split region */
545                         nrg->from = t;
546                         nrg->to = rg->to;
547                         INIT_LIST_HEAD(&nrg->link);
548
549                         /* Original entry is trimmed */
550                         rg->to = f;
551
552                         list_add(&nrg->link, &rg->link);
553                         nrg = NULL;
554                         break;
555                 }
556
557                 if (f <= rg->from && t >= rg->to) { /* Remove entire region */
558                         del += rg->to - rg->from;
559                         list_del(&rg->link);
560                         kfree(rg);
561                         continue;
562                 }
563
564                 if (f <= rg->from) {    /* Trim beginning of region */
565                         del += t - rg->from;
566                         rg->from = t;
567                 } else {                /* Trim end of region */
568                         del += rg->to - f;
569                         rg->to = f;
570                 }
571         }
572
573         spin_unlock(&resv->lock);
574         kfree(nrg);
575         return del;
576 }
577
578 /*
579  * A rare out of memory error was encountered which prevented removal of
580  * the reserve map region for a page.  The huge page itself was free'ed
581  * and removed from the page cache.  This routine will adjust the subpool
582  * usage count, and the global reserve count if needed.  By incrementing
583  * these counts, the reserve map entry which could not be deleted will
584  * appear as a "reserved" entry instead of simply dangling with incorrect
585  * counts.
586  */
587 void hugetlb_fix_reserve_counts(struct inode *inode)
588 {
589         struct hugepage_subpool *spool = subpool_inode(inode);
590         long rsv_adjust;
591         bool reserved = false;
592
593         rsv_adjust = hugepage_subpool_get_pages(spool, 1);
594         if (rsv_adjust > 0) {
595                 struct hstate *h = hstate_inode(inode);
596
597                 if (!hugetlb_acct_memory(h, 1))
598                         reserved = true;
599         } else if (!rsv_adjust) {
600                 reserved = true;
601         }
602
603         if (!reserved)
604                 pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
605 }
606
607 /*
608  * Count and return the number of huge pages in the reserve map
609  * that intersect with the range [f, t).
610  */
611 static long region_count(struct resv_map *resv, long f, long t)
612 {
613         struct list_head *head = &resv->regions;
614         struct file_region *rg;
615         long chg = 0;
616
617         spin_lock(&resv->lock);
618         /* Locate each segment we overlap with, and count that overlap. */
619         list_for_each_entry(rg, head, link) {
620                 long seg_from;
621                 long seg_to;
622
623                 if (rg->to <= f)
624                         continue;
625                 if (rg->from >= t)
626                         break;
627
628                 seg_from = max(rg->from, f);
629                 seg_to = min(rg->to, t);
630
631                 chg += seg_to - seg_from;
632         }
633         spin_unlock(&resv->lock);
634
635         return chg;
636 }
637
638 /*
639  * Convert the address within this vma to the page offset within
640  * the mapping, in pagecache page units; huge pages here.
641  */
642 static pgoff_t vma_hugecache_offset(struct hstate *h,
643                         struct vm_area_struct *vma, unsigned long address)
644 {
645         return ((address - vma->vm_start) >> huge_page_shift(h)) +
646                         (vma->vm_pgoff >> huge_page_order(h));
647 }
648
649 pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
650                                      unsigned long address)
651 {
652         return vma_hugecache_offset(hstate_vma(vma), vma, address);
653 }
654 EXPORT_SYMBOL_GPL(linear_hugepage_index);
655
656 /*
657  * Return the size of the pages allocated when backing a VMA. In the majority
658  * cases this will be same size as used by the page table entries.
659  */
660 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
661 {
662         if (vma->vm_ops && vma->vm_ops->pagesize)
663                 return vma->vm_ops->pagesize(vma);
664         return PAGE_SIZE;
665 }
666 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
667
668 /*
669  * Return the page size being used by the MMU to back a VMA. In the majority
670  * of cases, the page size used by the kernel matches the MMU size. On
671  * architectures where it differs, an architecture-specific 'strong'
672  * version of this symbol is required.
673  */
674 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
675 {
676         return vma_kernel_pagesize(vma);
677 }
678
679 /*
680  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
681  * bits of the reservation map pointer, which are always clear due to
682  * alignment.
683  */
684 #define HPAGE_RESV_OWNER    (1UL << 0)
685 #define HPAGE_RESV_UNMAPPED (1UL << 1)
686 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
687
688 /*
689  * These helpers are used to track how many pages are reserved for
690  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
691  * is guaranteed to have their future faults succeed.
692  *
693  * With the exception of reset_vma_resv_huge_pages() which is called at fork(),
694  * the reserve counters are updated with the hugetlb_lock held. It is safe
695  * to reset the VMA at fork() time as it is not in use yet and there is no
696  * chance of the global counters getting corrupted as a result of the values.
697  *
698  * The private mapping reservation is represented in a subtly different
699  * manner to a shared mapping.  A shared mapping has a region map associated
700  * with the underlying file, this region map represents the backing file
701  * pages which have ever had a reservation assigned which this persists even
702  * after the page is instantiated.  A private mapping has a region map
703  * associated with the original mmap which is attached to all VMAs which
704  * reference it, this region map represents those offsets which have consumed
705  * reservation ie. where pages have been instantiated.
706  */
707 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
708 {
709         return (unsigned long)vma->vm_private_data;
710 }
711
712 static void set_vma_private_data(struct vm_area_struct *vma,
713                                                         unsigned long value)
714 {
715         vma->vm_private_data = (void *)value;
716 }
717
718 struct resv_map *resv_map_alloc(void)
719 {
720         struct resv_map *resv_map = kmalloc(sizeof(*resv_map), GFP_KERNEL);
721         struct file_region *rg = kmalloc(sizeof(*rg), GFP_KERNEL);
722
723         if (!resv_map || !rg) {
724                 kfree(resv_map);
725                 kfree(rg);
726                 return NULL;
727         }
728
729         kref_init(&resv_map->refs);
730         spin_lock_init(&resv_map->lock);
731         INIT_LIST_HEAD(&resv_map->regions);
732
733         resv_map->adds_in_progress = 0;
734
735         INIT_LIST_HEAD(&resv_map->region_cache);
736         list_add(&rg->link, &resv_map->region_cache);
737         resv_map->region_cache_count = 1;
738
739         return resv_map;
740 }
741
742 void resv_map_release(struct kref *ref)
743 {
744         struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
745         struct list_head *head = &resv_map->region_cache;
746         struct file_region *rg, *trg;
747
748         /* Clear out any active regions before we release the map. */
749         region_del(resv_map, 0, LONG_MAX);
750
751         /* ... and any entries left in the cache */
752         list_for_each_entry_safe(rg, trg, head, link) {
753                 list_del(&rg->link);
754                 kfree(rg);
755         }
756
757         VM_BUG_ON(resv_map->adds_in_progress);
758
759         kfree(resv_map);
760 }
761
762 static inline struct resv_map *inode_resv_map(struct inode *inode)
763 {
764         return inode->i_mapping->private_data;
765 }
766
767 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
768 {
769         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
770         if (vma->vm_flags & VM_MAYSHARE) {
771                 struct address_space *mapping = vma->vm_file->f_mapping;
772                 struct inode *inode = mapping->host;
773
774                 return inode_resv_map(inode);
775
776         } else {
777                 return (struct resv_map *)(get_vma_private_data(vma) &
778                                                         ~HPAGE_RESV_MASK);
779         }
780 }
781
782 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
783 {
784         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
785         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
786
787         set_vma_private_data(vma, (get_vma_private_data(vma) &
788                                 HPAGE_RESV_MASK) | (unsigned long)map);
789 }
790
791 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
792 {
793         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
794         VM_BUG_ON_VMA(vma->vm_flags & VM_MAYSHARE, vma);
795
796         set_vma_private_data(vma, get_vma_private_data(vma) | flags);
797 }
798
799 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
800 {
801         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
802
803         return (get_vma_private_data(vma) & flag) != 0;
804 }
805
806 /* Reset counters to 0 and clear all HPAGE_RESV_* flags */
807 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
808 {
809         VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
810         if (!(vma->vm_flags & VM_MAYSHARE))
811                 vma->vm_private_data = (void *)0;
812 }
813
814 /* Returns true if the VMA has associated reserve pages */
815 static bool vma_has_reserves(struct vm_area_struct *vma, long chg)
816 {
817         if (vma->vm_flags & VM_NORESERVE) {
818                 /*
819                  * This address is already reserved by other process(chg == 0),
820                  * so, we should decrement reserved count. Without decrementing,
821                  * reserve count remains after releasing inode, because this
822                  * allocated page will go into page cache and is regarded as
823                  * coming from reserved pool in releasing step.  Currently, we
824                  * don't have any other solution to deal with this situation
825                  * properly, so add work-around here.
826                  */
827                 if (vma->vm_flags & VM_MAYSHARE && chg == 0)
828                         return true;
829                 else
830                         return false;
831         }
832
833         /* Shared mappings always use reserves */
834         if (vma->vm_flags & VM_MAYSHARE) {
835                 /*
836                  * We know VM_NORESERVE is not set.  Therefore, there SHOULD
837                  * be a region map for all pages.  The only situation where
838                  * there is no region map is if a hole was punched via
839                  * fallocate.  In this case, there really are no reverves to
840                  * use.  This situation is indicated if chg != 0.
841                  */
842                 if (chg)
843                         return false;
844                 else
845                         return true;
846         }
847
848         /*
849          * Only the process that called mmap() has reserves for
850          * private mappings.
851          */
852         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
853                 /*
854                  * Like the shared case above, a hole punch or truncate
855                  * could have been performed on the private mapping.
856                  * Examine the value of chg to determine if reserves
857                  * actually exist or were previously consumed.
858                  * Very Subtle - The value of chg comes from a previous
859                  * call to vma_needs_reserves().  The reserve map for
860                  * private mappings has different (opposite) semantics
861                  * than that of shared mappings.  vma_needs_reserves()
862                  * has already taken this difference in semantics into
863                  * account.  Therefore, the meaning of chg is the same
864                  * as in the shared case above.  Code could easily be
865                  * combined, but keeping it separate draws attention to
866                  * subtle differences.
867                  */
868                 if (chg)
869                         return false;
870                 else
871                         return true;
872         }
873
874         return false;
875 }
876
877 static void enqueue_huge_page(struct hstate *h, struct page *page)
878 {
879         int nid = page_to_nid(page);
880         list_move(&page->lru, &h->hugepage_freelists[nid]);
881         h->free_huge_pages++;
882         h->free_huge_pages_node[nid]++;
883         SetPageHugeFreed(page);
884 }
885
886 static struct page *dequeue_huge_page_node_exact(struct hstate *h, int nid)
887 {
888         struct page *page;
889
890         list_for_each_entry(page, &h->hugepage_freelists[nid], lru)
891                 if (!PageHWPoison(page))
892                         break;
893         /*
894          * if 'non-isolated free hugepage' not found on the list,
895          * the allocation fails.
896          */
897         if (&h->hugepage_freelists[nid] == &page->lru)
898                 return NULL;
899         list_move(&page->lru, &h->hugepage_activelist);
900         set_page_refcounted(page);
901         ClearPageHugeFreed(page);
902         h->free_huge_pages--;
903         h->free_huge_pages_node[nid]--;
904         return page;
905 }
906
907 static struct page *dequeue_huge_page_nodemask(struct hstate *h, gfp_t gfp_mask, int nid,
908                 nodemask_t *nmask)
909 {
910         unsigned int cpuset_mems_cookie;
911         struct zonelist *zonelist;
912         struct zone *zone;
913         struct zoneref *z;
914         int node = -1;
915
916         zonelist = node_zonelist(nid, gfp_mask);
917
918 retry_cpuset:
919         cpuset_mems_cookie = read_mems_allowed_begin();
920         for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
921                 struct page *page;
922
923                 if (!cpuset_zone_allowed(zone, gfp_mask))
924                         continue;
925                 /*
926                  * no need to ask again on the same node. Pool is node rather than
927                  * zone aware
928                  */
929                 if (zone_to_nid(zone) == node)
930                         continue;
931                 node = zone_to_nid(zone);
932
933                 page = dequeue_huge_page_node_exact(h, node);
934                 if (page)
935                         return page;
936         }
937         if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
938                 goto retry_cpuset;
939
940         return NULL;
941 }
942
943 /* Movability of hugepages depends on migration support. */
944 static inline gfp_t htlb_alloc_mask(struct hstate *h)
945 {
946         if (hugepage_migration_supported(h))
947                 return GFP_HIGHUSER_MOVABLE;
948         else
949                 return GFP_HIGHUSER;
950 }
951
952 static struct page *dequeue_huge_page_vma(struct hstate *h,
953                                 struct vm_area_struct *vma,
954                                 unsigned long address, int avoid_reserve,
955                                 long chg)
956 {
957         struct page *page;
958         struct mempolicy *mpol;
959         gfp_t gfp_mask;
960         nodemask_t *nodemask;
961         int nid;
962
963         /*
964          * A child process with MAP_PRIVATE mappings created by their parent
965          * have no page reserves. This check ensures that reservations are
966          * not "stolen". The child may still get SIGKILLed
967          */
968         if (!vma_has_reserves(vma, chg) &&
969                         h->free_huge_pages - h->resv_huge_pages == 0)
970                 goto err;
971
972         /* If reserves cannot be used, ensure enough pages are in the pool */
973         if (avoid_reserve && h->free_huge_pages - h->resv_huge_pages == 0)
974                 goto err;
975
976         gfp_mask = htlb_alloc_mask(h);
977         nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
978         page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask);
979         if (page && !avoid_reserve && vma_has_reserves(vma, chg)) {
980                 SetPagePrivate(page);
981                 h->resv_huge_pages--;
982         }
983
984         mpol_cond_put(mpol);
985         return page;
986
987 err:
988         return NULL;
989 }
990
991 /*
992  * common helper functions for hstate_next_node_to_{alloc|free}.
993  * We may have allocated or freed a huge page based on a different
994  * nodes_allowed previously, so h->next_node_to_{alloc|free} might
995  * be outside of *nodes_allowed.  Ensure that we use an allowed
996  * node for alloc or free.
997  */
998 static int next_node_allowed(int nid, nodemask_t *nodes_allowed)
999 {
1000         nid = next_node_in(nid, *nodes_allowed);
1001         VM_BUG_ON(nid >= MAX_NUMNODES);
1002
1003         return nid;
1004 }
1005
1006 static int get_valid_node_allowed(int nid, nodemask_t *nodes_allowed)
1007 {
1008         if (!node_isset(nid, *nodes_allowed))
1009                 nid = next_node_allowed(nid, nodes_allowed);
1010         return nid;
1011 }
1012
1013 /*
1014  * returns the previously saved node ["this node"] from which to
1015  * allocate a persistent huge page for the pool and advance the
1016  * next node from which to allocate, handling wrap at end of node
1017  * mask.
1018  */
1019 static int hstate_next_node_to_alloc(struct hstate *h,
1020                                         nodemask_t *nodes_allowed)
1021 {
1022         int nid;
1023
1024         VM_BUG_ON(!nodes_allowed);
1025
1026         nid = get_valid_node_allowed(h->next_nid_to_alloc, nodes_allowed);
1027         h->next_nid_to_alloc = next_node_allowed(nid, nodes_allowed);
1028
1029         return nid;
1030 }
1031
1032 /*
1033  * helper for free_pool_huge_page() - return the previously saved
1034  * node ["this node"] from which to free a huge page.  Advance the
1035  * next node id whether or not we find a free huge page to free so
1036  * that the next attempt to free addresses the next node.
1037  */
1038 static int hstate_next_node_to_free(struct hstate *h, nodemask_t *nodes_allowed)
1039 {
1040         int nid;
1041
1042         VM_BUG_ON(!nodes_allowed);
1043
1044         nid = get_valid_node_allowed(h->next_nid_to_free, nodes_allowed);
1045         h->next_nid_to_free = next_node_allowed(nid, nodes_allowed);
1046
1047         return nid;
1048 }
1049
1050 #define for_each_node_mask_to_alloc(hs, nr_nodes, node, mask)           \
1051         for (nr_nodes = nodes_weight(*mask);                            \
1052                 nr_nodes > 0 &&                                         \
1053                 ((node = hstate_next_node_to_alloc(hs, mask)) || 1);    \
1054                 nr_nodes--)
1055
1056 #define for_each_node_mask_to_free(hs, nr_nodes, node, mask)            \
1057         for (nr_nodes = nodes_weight(*mask);                            \
1058                 nr_nodes > 0 &&                                         \
1059                 ((node = hstate_next_node_to_free(hs, mask)) || 1);     \
1060                 nr_nodes--)
1061
1062 #ifdef CONFIG_ARCH_HAS_GIGANTIC_PAGE
1063 static void destroy_compound_gigantic_page(struct page *page,
1064                                         unsigned int order)
1065 {
1066         int i;
1067         int nr_pages = 1 << order;
1068         struct page *p = page + 1;
1069
1070         atomic_set(compound_mapcount_ptr(page), 0);
1071         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1072                 clear_compound_head(p);
1073                 set_page_refcounted(p);
1074         }
1075
1076         set_compound_order(page, 0);
1077         __ClearPageHead(page);
1078 }
1079
1080 static void free_gigantic_page(struct page *page, unsigned int order)
1081 {
1082         free_contig_range(page_to_pfn(page), 1 << order);
1083 }
1084
1085 static int __alloc_gigantic_page(unsigned long start_pfn,
1086                                 unsigned long nr_pages, gfp_t gfp_mask)
1087 {
1088         unsigned long end_pfn = start_pfn + nr_pages;
1089         return alloc_contig_range(start_pfn, end_pfn, MIGRATE_MOVABLE,
1090                                   gfp_mask);
1091 }
1092
1093 static bool pfn_range_valid_gigantic(struct zone *z,
1094                         unsigned long start_pfn, unsigned long nr_pages)
1095 {
1096         unsigned long i, end_pfn = start_pfn + nr_pages;
1097         struct page *page;
1098
1099         for (i = start_pfn; i < end_pfn; i++) {
1100                 page = pfn_to_online_page(i);
1101                 if (!page)
1102                         return false;
1103
1104                 if (page_zone(page) != z)
1105                         return false;
1106
1107                 if (PageReserved(page))
1108                         return false;
1109
1110                 if (page_count(page) > 0)
1111                         return false;
1112
1113                 if (PageHuge(page))
1114                         return false;
1115         }
1116
1117         return true;
1118 }
1119
1120 static bool zone_spans_last_pfn(const struct zone *zone,
1121                         unsigned long start_pfn, unsigned long nr_pages)
1122 {
1123         unsigned long last_pfn = start_pfn + nr_pages - 1;
1124         return zone_spans_pfn(zone, last_pfn);
1125 }
1126
1127 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1128                 int nid, nodemask_t *nodemask)
1129 {
1130         unsigned int order = huge_page_order(h);
1131         unsigned long nr_pages = 1 << order;
1132         unsigned long ret, pfn, flags;
1133         struct zonelist *zonelist;
1134         struct zone *zone;
1135         struct zoneref *z;
1136
1137         zonelist = node_zonelist(nid, gfp_mask);
1138         for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nodemask) {
1139                 spin_lock_irqsave(&zone->lock, flags);
1140
1141                 pfn = ALIGN(zone->zone_start_pfn, nr_pages);
1142                 while (zone_spans_last_pfn(zone, pfn, nr_pages)) {
1143                         if (pfn_range_valid_gigantic(zone, pfn, nr_pages)) {
1144                                 /*
1145                                  * We release the zone lock here because
1146                                  * alloc_contig_range() will also lock the zone
1147                                  * at some point. If there's an allocation
1148                                  * spinning on this lock, it may win the race
1149                                  * and cause alloc_contig_range() to fail...
1150                                  */
1151                                 spin_unlock_irqrestore(&zone->lock, flags);
1152                                 ret = __alloc_gigantic_page(pfn, nr_pages, gfp_mask);
1153                                 if (!ret)
1154                                         return pfn_to_page(pfn);
1155                                 spin_lock_irqsave(&zone->lock, flags);
1156                         }
1157                         pfn += nr_pages;
1158                 }
1159
1160                 spin_unlock_irqrestore(&zone->lock, flags);
1161         }
1162
1163         return NULL;
1164 }
1165
1166 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid);
1167 static void prep_compound_gigantic_page(struct page *page, unsigned int order);
1168
1169 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE */
1170 static inline bool gigantic_page_supported(void) { return false; }
1171 static struct page *alloc_gigantic_page(struct hstate *h, gfp_t gfp_mask,
1172                 int nid, nodemask_t *nodemask) { return NULL; }
1173 static inline void free_gigantic_page(struct page *page, unsigned int order) { }
1174 static inline void destroy_compound_gigantic_page(struct page *page,
1175                                                 unsigned int order) { }
1176 #endif
1177
1178 static void update_and_free_page(struct hstate *h, struct page *page)
1179 {
1180         int i;
1181         struct page *subpage = page;
1182
1183         if (hstate_is_gigantic(h) && !gigantic_page_supported())
1184                 return;
1185
1186         h->nr_huge_pages--;
1187         h->nr_huge_pages_node[page_to_nid(page)]--;
1188         for (i = 0; i < pages_per_huge_page(h);
1189              i++, subpage = mem_map_next(subpage, page, i)) {
1190                 subpage->flags &= ~(1 << PG_locked | 1 << PG_error |
1191                                 1 << PG_referenced | 1 << PG_dirty |
1192                                 1 << PG_active | 1 << PG_private |
1193                                 1 << PG_writeback);
1194         }
1195         VM_BUG_ON_PAGE(hugetlb_cgroup_from_page(page), page);
1196         set_compound_page_dtor(page, NULL_COMPOUND_DTOR);
1197         set_page_refcounted(page);
1198         if (hstate_is_gigantic(h)) {
1199                 destroy_compound_gigantic_page(page, huge_page_order(h));
1200                 free_gigantic_page(page, huge_page_order(h));
1201         } else {
1202                 __free_pages(page, huge_page_order(h));
1203         }
1204 }
1205
1206 struct hstate *size_to_hstate(unsigned long size)
1207 {
1208         struct hstate *h;
1209
1210         for_each_hstate(h) {
1211                 if (huge_page_size(h) == size)
1212                         return h;
1213         }
1214         return NULL;
1215 }
1216
1217 /*
1218  * Test to determine whether the hugepage is "active/in-use" (i.e. being linked
1219  * to hstate->hugepage_activelist.)
1220  *
1221  * This function can be called for tail pages, but never returns true for them.
1222  */
1223 bool page_huge_active(struct page *page)
1224 {
1225         return PageHeadHuge(page) && PagePrivate(&page[1]);
1226 }
1227
1228 /* never called for tail page */
1229 void set_page_huge_active(struct page *page)
1230 {
1231         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1232         SetPagePrivate(&page[1]);
1233 }
1234
1235 static void clear_page_huge_active(struct page *page)
1236 {
1237         VM_BUG_ON_PAGE(!PageHeadHuge(page), page);
1238         ClearPagePrivate(&page[1]);
1239 }
1240
1241 /*
1242  * Internal hugetlb specific page flag. Do not use outside of the hugetlb
1243  * code
1244  */
1245 static inline bool PageHugeTemporary(struct page *page)
1246 {
1247         if (!PageHuge(page))
1248                 return false;
1249
1250         return (unsigned long)page[2].mapping == -1U;
1251 }
1252
1253 static inline void SetPageHugeTemporary(struct page *page)
1254 {
1255         page[2].mapping = (void *)-1U;
1256 }
1257
1258 static inline void ClearPageHugeTemporary(struct page *page)
1259 {
1260         page[2].mapping = NULL;
1261 }
1262
1263 void free_huge_page(struct page *page)
1264 {
1265         /*
1266          * Can't pass hstate in here because it is called from the
1267          * compound page destructor.
1268          */
1269         struct hstate *h = page_hstate(page);
1270         int nid = page_to_nid(page);
1271         struct hugepage_subpool *spool =
1272                 (struct hugepage_subpool *)page_private(page);
1273         bool restore_reserve;
1274
1275         set_page_private(page, 0);
1276         page->mapping = NULL;
1277         VM_BUG_ON_PAGE(page_count(page), page);
1278         VM_BUG_ON_PAGE(page_mapcount(page), page);
1279         restore_reserve = PagePrivate(page);
1280         ClearPagePrivate(page);
1281
1282         /*
1283          * If PagePrivate() was set on page, page allocation consumed a
1284          * reservation.  If the page was associated with a subpool, there
1285          * would have been a page reserved in the subpool before allocation
1286          * via hugepage_subpool_get_pages().  Since we are 'restoring' the
1287          * reservtion, do not call hugepage_subpool_put_pages() as this will
1288          * remove the reserved page from the subpool.
1289          */
1290         if (!restore_reserve) {
1291                 /*
1292                  * A return code of zero implies that the subpool will be
1293                  * under its minimum size if the reservation is not restored
1294                  * after page is free.  Therefore, force restore_reserve
1295                  * operation.
1296                  */
1297                 if (hugepage_subpool_put_pages(spool, 1) == 0)
1298                         restore_reserve = true;
1299         }
1300
1301         spin_lock(&hugetlb_lock);
1302         clear_page_huge_active(page);
1303         hugetlb_cgroup_uncharge_page(hstate_index(h),
1304                                      pages_per_huge_page(h), page);
1305         if (restore_reserve)
1306                 h->resv_huge_pages++;
1307
1308         if (PageHugeTemporary(page)) {
1309                 list_del(&page->lru);
1310                 ClearPageHugeTemporary(page);
1311                 update_and_free_page(h, page);
1312         } else if (h->surplus_huge_pages_node[nid]) {
1313                 /* remove the page from active list */
1314                 list_del(&page->lru);
1315                 update_and_free_page(h, page);
1316                 h->surplus_huge_pages--;
1317                 h->surplus_huge_pages_node[nid]--;
1318         } else {
1319                 arch_clear_hugepage_flags(page);
1320                 enqueue_huge_page(h, page);
1321         }
1322         spin_unlock(&hugetlb_lock);
1323 }
1324
1325 static void prep_new_huge_page(struct hstate *h, struct page *page, int nid)
1326 {
1327         INIT_LIST_HEAD(&page->lru);
1328         set_compound_page_dtor(page, HUGETLB_PAGE_DTOR);
1329         spin_lock(&hugetlb_lock);
1330         set_hugetlb_cgroup(page, NULL);
1331         h->nr_huge_pages++;
1332         h->nr_huge_pages_node[nid]++;
1333         ClearPageHugeFreed(page);
1334         spin_unlock(&hugetlb_lock);
1335 }
1336
1337 static void prep_compound_gigantic_page(struct page *page, unsigned int order)
1338 {
1339         int i;
1340         int nr_pages = 1 << order;
1341         struct page *p = page + 1;
1342
1343         /* we rely on prep_new_huge_page to set the destructor */
1344         set_compound_order(page, order);
1345         __ClearPageReserved(page);
1346         __SetPageHead(page);
1347         for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1348                 /*
1349                  * For gigantic hugepages allocated through bootmem at
1350                  * boot, it's safer to be consistent with the not-gigantic
1351                  * hugepages and clear the PG_reserved bit from all tail pages
1352                  * too.  Otherwse drivers using get_user_pages() to access tail
1353                  * pages may get the reference counting wrong if they see
1354                  * PG_reserved set on a tail page (despite the head page not
1355                  * having PG_reserved set).  Enforcing this consistency between
1356                  * head and tail pages allows drivers to optimize away a check
1357                  * on the head page when they need know if put_page() is needed
1358                  * after get_user_pages().
1359                  */
1360                 __ClearPageReserved(p);
1361                 set_page_count(p, 0);
1362                 set_compound_head(p, page);
1363         }
1364         atomic_set(compound_mapcount_ptr(page), -1);
1365 }
1366
1367 /*
1368  * PageHuge() only returns true for hugetlbfs pages, but not for normal or
1369  * transparent huge pages.  See the PageTransHuge() documentation for more
1370  * details.
1371  */
1372 int PageHuge(struct page *page)
1373 {
1374         if (!PageCompound(page))
1375                 return 0;
1376
1377         page = compound_head(page);
1378         return page[1].compound_dtor == HUGETLB_PAGE_DTOR;
1379 }
1380 EXPORT_SYMBOL_GPL(PageHuge);
1381
1382 /*
1383  * PageHeadHuge() only returns true for hugetlbfs head page, but not for
1384  * normal or transparent huge pages.
1385  */
1386 int PageHeadHuge(struct page *page_head)
1387 {
1388         if (!PageHead(page_head))
1389                 return 0;
1390
1391         return get_compound_page_dtor(page_head) == free_huge_page;
1392 }
1393
1394 pgoff_t hugetlb_basepage_index(struct page *page)
1395 {
1396         struct page *page_head = compound_head(page);
1397         pgoff_t index = page_index(page_head);
1398         unsigned long compound_idx;
1399
1400         if (compound_order(page_head) >= MAX_ORDER)
1401                 compound_idx = page_to_pfn(page) - page_to_pfn(page_head);
1402         else
1403                 compound_idx = page - page_head;
1404
1405         return (index << compound_order(page_head)) + compound_idx;
1406 }
1407
1408 static struct page *alloc_buddy_huge_page(struct hstate *h,
1409                 gfp_t gfp_mask, int nid, nodemask_t *nmask)
1410 {
1411         int order = huge_page_order(h);
1412         struct page *page;
1413
1414         gfp_mask |= __GFP_COMP|__GFP_RETRY_MAYFAIL|__GFP_NOWARN;
1415         if (nid == NUMA_NO_NODE)
1416                 nid = numa_mem_id();
1417         page = __alloc_pages_nodemask(gfp_mask, order, nid, nmask);
1418         if (page)
1419                 __count_vm_event(HTLB_BUDDY_PGALLOC);
1420         else
1421                 __count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1422
1423         return page;
1424 }
1425
1426 /*
1427  * Common helper to allocate a fresh hugetlb page. All specific allocators
1428  * should use this function to get new hugetlb pages
1429  */
1430 static struct page *alloc_fresh_huge_page(struct hstate *h,
1431                 gfp_t gfp_mask, int nid, nodemask_t *nmask)
1432 {
1433         struct page *page;
1434
1435         if (hstate_is_gigantic(h))
1436                 page = alloc_gigantic_page(h, gfp_mask, nid, nmask);
1437         else
1438                 page = alloc_buddy_huge_page(h, gfp_mask,
1439                                 nid, nmask);
1440         if (!page)
1441                 return NULL;
1442
1443         if (hstate_is_gigantic(h))
1444                 prep_compound_gigantic_page(page, huge_page_order(h));
1445         prep_new_huge_page(h, page, page_to_nid(page));
1446
1447         return page;
1448 }
1449
1450 /*
1451  * Allocates a fresh page to the hugetlb allocator pool in the node interleaved
1452  * manner.
1453  */
1454 static int alloc_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed)
1455 {
1456         struct page *page;
1457         int nr_nodes, node;
1458         gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1459
1460         for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
1461                 page = alloc_fresh_huge_page(h, gfp_mask, node, nodes_allowed);
1462                 if (page)
1463                         break;
1464         }
1465
1466         if (!page)
1467                 return 0;
1468
1469         put_page(page); /* free it into the hugepage allocator */
1470
1471         return 1;
1472 }
1473
1474 /*
1475  * Free huge page from pool from next node to free.
1476  * Attempt to keep persistent huge pages more or less
1477  * balanced over allowed nodes.
1478  * Called with hugetlb_lock locked.
1479  */
1480 static int free_pool_huge_page(struct hstate *h, nodemask_t *nodes_allowed,
1481                                                          bool acct_surplus)
1482 {
1483         int nr_nodes, node;
1484         int ret = 0;
1485
1486         for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1487                 /*
1488                  * If we're returning unused surplus pages, only examine
1489                  * nodes with surplus pages.
1490                  */
1491                 if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1492                     !list_empty(&h->hugepage_freelists[node])) {
1493                         struct page *page =
1494                                 list_entry(h->hugepage_freelists[node].next,
1495                                           struct page, lru);
1496                         list_del(&page->lru);
1497                         h->free_huge_pages--;
1498                         h->free_huge_pages_node[node]--;
1499                         if (acct_surplus) {
1500                                 h->surplus_huge_pages--;
1501                                 h->surplus_huge_pages_node[node]--;
1502                         }
1503                         update_and_free_page(h, page);
1504                         ret = 1;
1505                         break;
1506                 }
1507         }
1508
1509         return ret;
1510 }
1511
1512 /*
1513  * Dissolve a given free hugepage into free buddy pages. This function does
1514  * nothing for in-use hugepages and non-hugepages.
1515  * This function returns values like below:
1516  *
1517  *  -EBUSY: failed to dissolved free hugepages or the hugepage is in-use
1518  *          (allocated or reserved.)
1519  *       0: successfully dissolved free hugepages or the page is not a
1520  *          hugepage (considered as already dissolved)
1521  */
1522 int dissolve_free_huge_page(struct page *page)
1523 {
1524         int rc = -EBUSY;
1525
1526 retry:
1527         /* Not to disrupt normal path by vainly holding hugetlb_lock */
1528         if (!PageHuge(page))
1529                 return 0;
1530
1531         spin_lock(&hugetlb_lock);
1532         if (!PageHuge(page)) {
1533                 rc = 0;
1534                 goto out;
1535         }
1536
1537         if (!page_count(page)) {
1538                 struct page *head = compound_head(page);
1539                 struct hstate *h = page_hstate(head);
1540                 int nid = page_to_nid(head);
1541                 if (h->free_huge_pages - h->resv_huge_pages == 0)
1542                         goto out;
1543
1544                 /*
1545                  * We should make sure that the page is already on the free list
1546                  * when it is dissolved.
1547                  */
1548                 if (unlikely(!PageHugeFreed(head))) {
1549                         spin_unlock(&hugetlb_lock);
1550                         cond_resched();
1551
1552                         /*
1553                          * Theoretically, we should return -EBUSY when we
1554                          * encounter this race. In fact, we have a chance
1555                          * to successfully dissolve the page if we do a
1556                          * retry. Because the race window is quite small.
1557                          * If we seize this opportunity, it is an optimization
1558                          * for increasing the success rate of dissolving page.
1559                          */
1560                         goto retry;
1561                 }
1562
1563                 /*
1564                  * Move PageHWPoison flag from head page to the raw error page,
1565                  * which makes any subpages rather than the error page reusable.
1566                  */
1567                 if (PageHWPoison(head) && page != head) {
1568                         SetPageHWPoison(page);
1569                         ClearPageHWPoison(head);
1570                 }
1571                 list_del(&head->lru);
1572                 h->free_huge_pages--;
1573                 h->free_huge_pages_node[nid]--;
1574                 h->max_huge_pages--;
1575                 update_and_free_page(h, head);
1576                 rc = 0;
1577         }
1578 out:
1579         spin_unlock(&hugetlb_lock);
1580         return rc;
1581 }
1582
1583 /*
1584  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
1585  * make specified memory blocks removable from the system.
1586  * Note that this will dissolve a free gigantic hugepage completely, if any
1587  * part of it lies within the given range.
1588  * Also note that if dissolve_free_huge_page() returns with an error, all
1589  * free hugepages that were dissolved before that error are lost.
1590  */
1591 int dissolve_free_huge_pages(unsigned long start_pfn, unsigned long end_pfn)
1592 {
1593         unsigned long pfn;
1594         struct page *page;
1595         int rc = 0;
1596
1597         if (!hugepages_supported())
1598                 return rc;
1599
1600         for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << minimum_order) {
1601                 page = pfn_to_page(pfn);
1602                 rc = dissolve_free_huge_page(page);
1603                 if (rc)
1604                         break;
1605         }
1606
1607         return rc;
1608 }
1609
1610 /*
1611  * Allocates a fresh surplus page from the page allocator.
1612  */
1613 static struct page *alloc_surplus_huge_page(struct hstate *h, gfp_t gfp_mask,
1614                 int nid, nodemask_t *nmask)
1615 {
1616         struct page *page = NULL;
1617
1618         if (hstate_is_gigantic(h))
1619                 return NULL;
1620
1621         spin_lock(&hugetlb_lock);
1622         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
1623                 goto out_unlock;
1624         spin_unlock(&hugetlb_lock);
1625
1626         page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
1627         if (!page)
1628                 return NULL;
1629
1630         spin_lock(&hugetlb_lock);
1631         /*
1632          * We could have raced with the pool size change.
1633          * Double check that and simply deallocate the new page
1634          * if we would end up overcommiting the surpluses. Abuse
1635          * temporary page to workaround the nasty free_huge_page
1636          * codeflow
1637          */
1638         if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
1639                 SetPageHugeTemporary(page);
1640                 spin_unlock(&hugetlb_lock);
1641                 put_page(page);
1642                 return NULL;
1643         } else {
1644                 h->surplus_huge_pages++;
1645                 h->surplus_huge_pages_node[page_to_nid(page)]++;
1646         }
1647
1648 out_unlock:
1649         spin_unlock(&hugetlb_lock);
1650
1651         return page;
1652 }
1653
1654 static struct page *alloc_migrate_huge_page(struct hstate *h, gfp_t gfp_mask,
1655                 int nid, nodemask_t *nmask)
1656 {
1657         struct page *page;
1658
1659         if (hstate_is_gigantic(h))
1660                 return NULL;
1661
1662         page = alloc_fresh_huge_page(h, gfp_mask, nid, nmask);
1663         if (!page)
1664                 return NULL;
1665
1666         /*
1667          * We do not account these pages as surplus because they are only
1668          * temporary and will be released properly on the last reference
1669          */
1670         SetPageHugeTemporary(page);
1671
1672         return page;
1673 }
1674
1675 /*
1676  * Use the VMA's mpolicy to allocate a huge page from the buddy.
1677  */
1678 static
1679 struct page *alloc_buddy_huge_page_with_mpol(struct hstate *h,
1680                 struct vm_area_struct *vma, unsigned long addr)
1681 {
1682         struct page *page;
1683         struct mempolicy *mpol;
1684         gfp_t gfp_mask = htlb_alloc_mask(h);
1685         int nid;
1686         nodemask_t *nodemask;
1687
1688         nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
1689         page = alloc_surplus_huge_page(h, gfp_mask, nid, nodemask);
1690         mpol_cond_put(mpol);
1691
1692         return page;
1693 }
1694
1695 /* page migration callback function */
1696 struct page *alloc_huge_page_node(struct hstate *h, int nid)
1697 {
1698         gfp_t gfp_mask = htlb_alloc_mask(h);
1699         struct page *page = NULL;
1700
1701         if (nid != NUMA_NO_NODE)
1702                 gfp_mask |= __GFP_THISNODE;
1703
1704         spin_lock(&hugetlb_lock);
1705         if (h->free_huge_pages - h->resv_huge_pages > 0)
1706                 page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL);
1707         spin_unlock(&hugetlb_lock);
1708
1709         if (!page)
1710                 page = alloc_migrate_huge_page(h, gfp_mask, nid, NULL);
1711
1712         return page;
1713 }
1714
1715 /* page migration callback function */
1716 struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid,
1717                 nodemask_t *nmask)
1718 {
1719         gfp_t gfp_mask = htlb_alloc_mask(h);
1720
1721         spin_lock(&hugetlb_lock);
1722         if (h->free_huge_pages - h->resv_huge_pages > 0) {
1723                 struct page *page;
1724
1725                 page = dequeue_huge_page_nodemask(h, gfp_mask, preferred_nid, nmask);
1726                 if (page) {
1727                         spin_unlock(&hugetlb_lock);
1728                         return page;
1729                 }
1730         }
1731         spin_unlock(&hugetlb_lock);
1732
1733         return alloc_migrate_huge_page(h, gfp_mask, preferred_nid, nmask);
1734 }
1735
1736 /* mempolicy aware migration callback */
1737 struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma,
1738                 unsigned long address)
1739 {
1740         struct mempolicy *mpol;
1741         nodemask_t *nodemask;
1742         struct page *page;
1743         gfp_t gfp_mask;
1744         int node;
1745
1746         gfp_mask = htlb_alloc_mask(h);
1747         node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1748         page = alloc_huge_page_nodemask(h, node, nodemask);
1749         mpol_cond_put(mpol);
1750
1751         return page;
1752 }
1753
1754 /*
1755  * Increase the hugetlb pool such that it can accommodate a reservation
1756  * of size 'delta'.
1757  */
1758 static int gather_surplus_pages(struct hstate *h, int delta)
1759 {
1760         struct list_head surplus_list;
1761         struct page *page, *tmp;
1762         int ret, i;
1763         int needed, allocated;
1764         bool alloc_ok = true;
1765
1766         needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
1767         if (needed <= 0) {
1768                 h->resv_huge_pages += delta;
1769                 return 0;
1770         }
1771
1772         allocated = 0;
1773         INIT_LIST_HEAD(&surplus_list);
1774
1775         ret = -ENOMEM;
1776 retry:
1777         spin_unlock(&hugetlb_lock);
1778         for (i = 0; i < needed; i++) {
1779                 page = alloc_surplus_huge_page(h, htlb_alloc_mask(h),
1780                                 NUMA_NO_NODE, NULL);
1781                 if (!page) {
1782                         alloc_ok = false;
1783                         break;
1784                 }
1785                 list_add(&page->lru, &surplus_list);
1786                 cond_resched();
1787         }
1788         allocated += i;
1789
1790         /*
1791          * After retaking hugetlb_lock, we need to recalculate 'needed'
1792          * because either resv_huge_pages or free_huge_pages may have changed.
1793          */
1794         spin_lock(&hugetlb_lock);
1795         needed = (h->resv_huge_pages + delta) -
1796                         (h->free_huge_pages + allocated);
1797         if (needed > 0) {
1798                 if (alloc_ok)
1799                         goto retry;
1800                 /*
1801                  * We were not able to allocate enough pages to
1802                  * satisfy the entire reservation so we free what
1803                  * we've allocated so far.
1804                  */
1805                 goto free;
1806         }
1807         /*
1808          * The surplus_list now contains _at_least_ the number of extra pages
1809          * needed to accommodate the reservation.  Add the appropriate number
1810          * of pages to the hugetlb pool and free the extras back to the buddy
1811          * allocator.  Commit the entire reservation here to prevent another
1812          * process from stealing the pages as they are added to the pool but
1813          * before they are reserved.
1814          */
1815         needed += allocated;
1816         h->resv_huge_pages += delta;
1817         ret = 0;
1818
1819         /* Free the needed pages to the hugetlb pool */
1820         list_for_each_entry_safe(page, tmp, &surplus_list, lru) {
1821                 if ((--needed) < 0)
1822                         break;
1823                 /*
1824                  * This page is now managed by the hugetlb allocator and has
1825                  * no users -- drop the buddy allocator's reference.
1826                  */
1827                 put_page_testzero(page);
1828                 VM_BUG_ON_PAGE(page_count(page), page);
1829                 enqueue_huge_page(h, page);
1830         }
1831 free:
1832         spin_unlock(&hugetlb_lock);
1833
1834         /* Free unnecessary surplus pages to the buddy allocator */
1835         list_for_each_entry_safe(page, tmp, &surplus_list, lru)
1836                 put_page(page);
1837         spin_lock(&hugetlb_lock);
1838
1839         return ret;
1840 }
1841
1842 /*
1843  * This routine has two main purposes:
1844  * 1) Decrement the reservation count (resv_huge_pages) by the value passed
1845  *    in unused_resv_pages.  This corresponds to the prior adjustments made
1846  *    to the associated reservation map.
1847  * 2) Free any unused surplus pages that may have been allocated to satisfy
1848  *    the reservation.  As many as unused_resv_pages may be freed.
1849  *
1850  * Called with hugetlb_lock held.  However, the lock could be dropped (and
1851  * reacquired) during calls to cond_resched_lock.  Whenever dropping the lock,
1852  * we must make sure nobody else can claim pages we are in the process of
1853  * freeing.  Do this by ensuring resv_huge_page always is greater than the
1854  * number of huge pages we plan to free when dropping the lock.
1855  */
1856 static void return_unused_surplus_pages(struct hstate *h,
1857                                         unsigned long unused_resv_pages)
1858 {
1859         unsigned long nr_pages;
1860
1861         /* Cannot return gigantic pages currently */
1862         if (hstate_is_gigantic(h))
1863                 goto out;
1864
1865         /*
1866          * Part (or even all) of the reservation could have been backed
1867          * by pre-allocated pages. Only free surplus pages.
1868          */
1869         nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
1870
1871         /*
1872          * We want to release as many surplus pages as possible, spread
1873          * evenly across all nodes with memory. Iterate across these nodes
1874          * until we can no longer free unreserved surplus pages. This occurs
1875          * when the nodes with surplus pages have no free pages.
1876          * free_pool_huge_page() will balance the the freed pages across the
1877          * on-line nodes with memory and will handle the hstate accounting.
1878          *
1879          * Note that we decrement resv_huge_pages as we free the pages.  If
1880          * we drop the lock, resv_huge_pages will still be sufficiently large
1881          * to cover subsequent pages we may free.
1882          */
1883         while (nr_pages--) {
1884                 h->resv_huge_pages--;
1885                 unused_resv_pages--;
1886                 if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
1887                         goto out;
1888                 cond_resched_lock(&hugetlb_lock);
1889         }
1890
1891 out:
1892         /* Fully uncommit the reservation */
1893         h->resv_huge_pages -= unused_resv_pages;
1894 }
1895
1896
1897 /*
1898  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
1899  * are used by the huge page allocation routines to manage reservations.
1900  *
1901  * vma_needs_reservation is called to determine if the huge page at addr
1902  * within the vma has an associated reservation.  If a reservation is
1903  * needed, the value 1 is returned.  The caller is then responsible for
1904  * managing the global reservation and subpool usage counts.  After
1905  * the huge page has been allocated, vma_commit_reservation is called
1906  * to add the page to the reservation map.  If the page allocation fails,
1907  * the reservation must be ended instead of committed.  vma_end_reservation
1908  * is called in such cases.
1909  *
1910  * In the normal case, vma_commit_reservation returns the same value
1911  * as the preceding vma_needs_reservation call.  The only time this
1912  * is not the case is if a reserve map was changed between calls.  It
1913  * is the responsibility of the caller to notice the difference and
1914  * take appropriate action.
1915  *
1916  * vma_add_reservation is used in error paths where a reservation must
1917  * be restored when a newly allocated huge page must be freed.  It is
1918  * to be called after calling vma_needs_reservation to determine if a
1919  * reservation exists.
1920  */
1921 enum vma_resv_mode {
1922         VMA_NEEDS_RESV,
1923         VMA_COMMIT_RESV,
1924         VMA_END_RESV,
1925         VMA_ADD_RESV,
1926 };
1927 static long __vma_reservation_common(struct hstate *h,
1928                                 struct vm_area_struct *vma, unsigned long addr,
1929                                 enum vma_resv_mode mode)
1930 {
1931         struct resv_map *resv;
1932         pgoff_t idx;
1933         long ret;
1934
1935         resv = vma_resv_map(vma);
1936         if (!resv)
1937                 return 1;
1938
1939         idx = vma_hugecache_offset(h, vma, addr);
1940         switch (mode) {
1941         case VMA_NEEDS_RESV:
1942                 ret = region_chg(resv, idx, idx + 1);
1943                 break;
1944         case VMA_COMMIT_RESV:
1945                 ret = region_add(resv, idx, idx + 1);
1946                 break;
1947         case VMA_END_RESV:
1948                 region_abort(resv, idx, idx + 1);
1949                 ret = 0;
1950                 break;
1951         case VMA_ADD_RESV:
1952                 if (vma->vm_flags & VM_MAYSHARE)
1953                         ret = region_add(resv, idx, idx + 1);
1954                 else {
1955                         region_abort(resv, idx, idx + 1);
1956                         ret = region_del(resv, idx, idx + 1);
1957                 }
1958                 break;
1959         default:
1960                 BUG();
1961         }
1962
1963         if (vma->vm_flags & VM_MAYSHARE)
1964                 return ret;
1965         else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) && ret >= 0) {
1966                 /*
1967                  * In most cases, reserves always exist for private mappings.
1968                  * However, a file associated with mapping could have been
1969                  * hole punched or truncated after reserves were consumed.
1970                  * As subsequent fault on such a range will not use reserves.
1971                  * Subtle - The reserve map for private mappings has the
1972                  * opposite meaning than that of shared mappings.  If NO
1973                  * entry is in the reserve map, it means a reservation exists.
1974                  * If an entry exists in the reserve map, it means the
1975                  * reservation has already been consumed.  As a result, the
1976                  * return value of this routine is the opposite of the
1977                  * value returned from reserve map manipulation routines above.
1978                  */
1979                 if (ret)
1980                         return 0;
1981                 else
1982                         return 1;
1983         }
1984         else
1985                 return ret < 0 ? ret : 0;
1986 }
1987
1988 static long vma_needs_reservation(struct hstate *h,
1989                         struct vm_area_struct *vma, unsigned long addr)
1990 {
1991         return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
1992 }
1993
1994 static long vma_commit_reservation(struct hstate *h,
1995                         struct vm_area_struct *vma, unsigned long addr)
1996 {
1997         return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
1998 }
1999
2000 static void vma_end_reservation(struct hstate *h,
2001                         struct vm_area_struct *vma, unsigned long addr)
2002 {
2003         (void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2004 }
2005
2006 static long vma_add_reservation(struct hstate *h,
2007                         struct vm_area_struct *vma, unsigned long addr)
2008 {
2009         return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2010 }
2011
2012 /*
2013  * This routine is called to restore a reservation on error paths.  In the
2014  * specific error paths, a huge page was allocated (via alloc_huge_page)
2015  * and is about to be freed.  If a reservation for the page existed,
2016  * alloc_huge_page would have consumed the reservation and set PagePrivate
2017  * in the newly allocated page.  When the page is freed via free_huge_page,
2018  * the global reservation count will be incremented if PagePrivate is set.
2019  * However, free_huge_page can not adjust the reserve map.  Adjust the
2020  * reserve map here to be consistent with global reserve count adjustments
2021  * to be made by free_huge_page.
2022  */
2023 static void restore_reserve_on_error(struct hstate *h,
2024                         struct vm_area_struct *vma, unsigned long address,
2025                         struct page *page)
2026 {
2027         if (unlikely(PagePrivate(page))) {
2028                 long rc = vma_needs_reservation(h, vma, address);
2029
2030                 if (unlikely(rc < 0)) {
2031                         /*
2032                          * Rare out of memory condition in reserve map
2033                          * manipulation.  Clear PagePrivate so that
2034                          * global reserve count will not be incremented
2035                          * by free_huge_page.  This will make it appear
2036                          * as though the reservation for this page was
2037                          * consumed.  This may prevent the task from
2038                          * faulting in the page at a later time.  This
2039                          * is better than inconsistent global huge page
2040                          * accounting of reserve counts.
2041                          */
2042                         ClearPagePrivate(page);
2043                 } else if (rc) {
2044                         rc = vma_add_reservation(h, vma, address);
2045                         if (unlikely(rc < 0))
2046                                 /*
2047                                  * See above comment about rare out of
2048                                  * memory condition.
2049                                  */
2050                                 ClearPagePrivate(page);
2051                 } else
2052                         vma_end_reservation(h, vma, address);
2053         }
2054 }
2055
2056 struct page *alloc_huge_page(struct vm_area_struct *vma,
2057                                     unsigned long addr, int avoid_reserve)
2058 {
2059         struct hugepage_subpool *spool = subpool_vma(vma);
2060         struct hstate *h = hstate_vma(vma);
2061         struct page *page;
2062         long map_chg, map_commit;
2063         long gbl_chg;
2064         int ret, idx;
2065         struct hugetlb_cgroup *h_cg;
2066
2067         idx = hstate_index(h);
2068         /*
2069          * Examine the region/reserve map to determine if the process
2070          * has a reservation for the page to be allocated.  A return
2071          * code of zero indicates a reservation exists (no change).
2072          */
2073         map_chg = gbl_chg = vma_needs_reservation(h, vma, addr);
2074         if (map_chg < 0)
2075                 return ERR_PTR(-ENOMEM);
2076
2077         /*
2078          * Processes that did not create the mapping will have no
2079          * reserves as indicated by the region/reserve map. Check
2080          * that the allocation will not exceed the subpool limit.
2081          * Allocations for MAP_NORESERVE mappings also need to be
2082          * checked against any subpool limit.
2083          */
2084         if (map_chg || avoid_reserve) {
2085                 gbl_chg = hugepage_subpool_get_pages(spool, 1);
2086                 if (gbl_chg < 0) {
2087                         vma_end_reservation(h, vma, addr);
2088                         return ERR_PTR(-ENOSPC);
2089                 }
2090
2091                 /*
2092                  * Even though there was no reservation in the region/reserve
2093                  * map, there could be reservations associated with the
2094                  * subpool that can be used.  This would be indicated if the
2095                  * return value of hugepage_subpool_get_pages() is zero.
2096                  * However, if avoid_reserve is specified we still avoid even
2097                  * the subpool reservations.
2098                  */
2099                 if (avoid_reserve)
2100                         gbl_chg = 1;
2101         }
2102
2103         ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2104         if (ret)
2105                 goto out_subpool_put;
2106
2107         spin_lock(&hugetlb_lock);
2108         /*
2109          * glb_chg is passed to indicate whether or not a page must be taken
2110          * from the global free pool (global change).  gbl_chg == 0 indicates
2111          * a reservation exists for the allocation.
2112          */
2113         page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve, gbl_chg);
2114         if (!page) {
2115                 spin_unlock(&hugetlb_lock);
2116                 page = alloc_buddy_huge_page_with_mpol(h, vma, addr);
2117                 if (!page)
2118                         goto out_uncharge_cgroup;
2119                 if (!avoid_reserve && vma_has_reserves(vma, gbl_chg)) {
2120                         SetPagePrivate(page);
2121                         h->resv_huge_pages--;
2122                 }
2123                 spin_lock(&hugetlb_lock);
2124                 list_move(&page->lru, &h->hugepage_activelist);
2125                 /* Fall through */
2126         }
2127         hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page);
2128         spin_unlock(&hugetlb_lock);
2129
2130         set_page_private(page, (unsigned long)spool);
2131
2132         map_commit = vma_commit_reservation(h, vma, addr);
2133         if (unlikely(map_chg > map_commit)) {
2134                 /*
2135                  * The page was added to the reservation map between
2136                  * vma_needs_reservation and vma_commit_reservation.
2137                  * This indicates a race with hugetlb_reserve_pages.
2138                  * Adjust for the subpool count incremented above AND
2139                  * in hugetlb_reserve_pages for the same page.  Also,
2140                  * the reservation count added in hugetlb_reserve_pages
2141                  * no longer applies.
2142                  */
2143                 long rsv_adjust;
2144
2145                 rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2146                 hugetlb_acct_memory(h, -rsv_adjust);
2147         }
2148         return page;
2149
2150 out_uncharge_cgroup:
2151         hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
2152 out_subpool_put:
2153         if (map_chg || avoid_reserve)
2154                 hugepage_subpool_put_pages(spool, 1);
2155         vma_end_reservation(h, vma, addr);
2156         return ERR_PTR(-ENOSPC);
2157 }
2158
2159 int alloc_bootmem_huge_page(struct hstate *h)
2160         __attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
2161 int __alloc_bootmem_huge_page(struct hstate *h)
2162 {
2163         struct huge_bootmem_page *m;
2164         int nr_nodes, node;
2165
2166         for_each_node_mask_to_alloc(h, nr_nodes, node, &node_states[N_MEMORY]) {
2167                 void *addr;
2168
2169                 addr = memblock_virt_alloc_try_nid_raw(
2170                                 huge_page_size(h), huge_page_size(h),
2171                                 0, BOOTMEM_ALLOC_ACCESSIBLE, node);
2172                 if (addr) {
2173                         /*
2174                          * Use the beginning of the huge page to store the
2175                          * huge_bootmem_page struct (until gather_bootmem
2176                          * puts them into the mem_map).
2177                          */
2178                         m = addr;
2179                         goto found;
2180                 }
2181         }
2182         return 0;
2183
2184 found:
2185         BUG_ON(!IS_ALIGNED(virt_to_phys(m), huge_page_size(h)));
2186         /* Put them into a private list first because mem_map is not up yet */
2187         INIT_LIST_HEAD(&m->list);
2188         list_add(&m->list, &huge_boot_pages);
2189         m->hstate = h;
2190         return 1;
2191 }
2192
2193 static void __init prep_compound_huge_page(struct page *page,
2194                 unsigned int order)
2195 {
2196         if (unlikely(order > (MAX_ORDER - 1)))
2197                 prep_compound_gigantic_page(page, order);
2198         else
2199                 prep_compound_page(page, order);
2200 }
2201
2202 /* Put bootmem huge pages into the standard lists after mem_map is up */
2203 static void __init gather_bootmem_prealloc(void)
2204 {
2205         struct huge_bootmem_page *m;
2206
2207         list_for_each_entry(m, &huge_boot_pages, list) {
2208                 struct page *page = virt_to_page(m);
2209                 struct hstate *h = m->hstate;
2210
2211                 WARN_ON(page_count(page) != 1);
2212                 prep_compound_huge_page(page, h->order);
2213                 WARN_ON(PageReserved(page));
2214                 prep_new_huge_page(h, page, page_to_nid(page));
2215                 put_page(page); /* free it into the hugepage allocator */
2216
2217                 /*
2218                  * If we had gigantic hugepages allocated at boot time, we need
2219                  * to restore the 'stolen' pages to totalram_pages in order to
2220                  * fix confusing memory reports from free(1) and another
2221                  * side-effects, like CommitLimit going negative.
2222                  */
2223                 if (hstate_is_gigantic(h))
2224                         adjust_managed_page_count(page, 1 << h->order);
2225                 cond_resched();
2226         }
2227 }
2228
2229 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
2230 {
2231         unsigned long i;
2232
2233         for (i = 0; i < h->max_huge_pages; ++i) {
2234                 if (hstate_is_gigantic(h)) {
2235                         if (!alloc_bootmem_huge_page(h))
2236                                 break;
2237                 } else if (!alloc_pool_huge_page(h,
2238                                          &node_states[N_MEMORY]))
2239                         break;
2240                 cond_resched();
2241         }
2242         if (i < h->max_huge_pages) {
2243                 char buf[32];
2244
2245                 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2246                 pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
2247                         h->max_huge_pages, buf, i);
2248                 h->max_huge_pages = i;
2249         }
2250 }
2251
2252 static void __init hugetlb_init_hstates(void)
2253 {
2254         struct hstate *h;
2255
2256         for_each_hstate(h) {
2257                 if (minimum_order > huge_page_order(h))
2258                         minimum_order = huge_page_order(h);
2259
2260                 /* oversize hugepages were init'ed in early boot */
2261                 if (!hstate_is_gigantic(h))
2262                         hugetlb_hstate_alloc_pages(h);
2263         }
2264         VM_BUG_ON(minimum_order == UINT_MAX);
2265 }
2266
2267 static void __init report_hugepages(void)
2268 {
2269         struct hstate *h;
2270
2271         for_each_hstate(h) {
2272                 char buf[32];
2273
2274                 string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
2275                 pr_info("HugeTLB registered %s page size, pre-allocated %ld pages\n",
2276                         buf, h->free_huge_pages);
2277         }
2278 }
2279
2280 #ifdef CONFIG_HIGHMEM
2281 static void try_to_free_low(struct hstate *h, unsigned long count,
2282                                                 nodemask_t *nodes_allowed)
2283 {
2284         int i;
2285
2286         if (hstate_is_gigantic(h))
2287                 return;
2288
2289         for_each_node_mask(i, *nodes_allowed) {
2290                 struct page *page, *next;
2291                 struct list_head *freel = &h->hugepage_freelists[i];
2292                 list_for_each_entry_safe(page, next, freel, lru) {
2293                         if (count >= h->nr_huge_pages)
2294                                 return;
2295                         if (PageHighMem(page))
2296                                 continue;
2297                         list_del(&page->lru);
2298                         update_and_free_page(h, page);
2299                         h->free_huge_pages--;
2300                         h->free_huge_pages_node[page_to_nid(page)]--;
2301                 }
2302         }
2303 }
2304 #else
2305 static inline void try_to_free_low(struct hstate *h, unsigned long count,
2306                                                 nodemask_t *nodes_allowed)
2307 {
2308 }
2309 #endif
2310
2311 /*
2312  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
2313  * balanced by operating on them in a round-robin fashion.
2314  * Returns 1 if an adjustment was made.
2315  */
2316 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
2317                                 int delta)
2318 {
2319         int nr_nodes, node;
2320
2321         VM_BUG_ON(delta != -1 && delta != 1);
2322
2323         if (delta < 0) {
2324                 for_each_node_mask_to_alloc(h, nr_nodes, node, nodes_allowed) {
2325                         if (h->surplus_huge_pages_node[node])
2326                                 goto found;
2327                 }
2328         } else {
2329                 for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
2330                         if (h->surplus_huge_pages_node[node] <
2331                                         h->nr_huge_pages_node[node])
2332                                 goto found;
2333                 }
2334         }
2335         return 0;
2336
2337 found:
2338         h->surplus_huge_pages += delta;
2339         h->surplus_huge_pages_node[node] += delta;
2340         return 1;
2341 }
2342
2343 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
2344 static unsigned long set_max_huge_pages(struct hstate *h, unsigned long count,
2345                                                 nodemask_t *nodes_allowed)
2346 {
2347         unsigned long min_count, ret;
2348
2349         if (hstate_is_gigantic(h) && !gigantic_page_supported())
2350                 return h->max_huge_pages;
2351
2352         /*
2353          * Increase the pool size
2354          * First take pages out of surplus state.  Then make up the
2355          * remaining difference by allocating fresh huge pages.
2356          *
2357          * We might race with alloc_surplus_huge_page() here and be unable
2358          * to convert a surplus huge page to a normal huge page. That is
2359          * not critical, though, it just means the overall size of the
2360          * pool might be one hugepage larger than it needs to be, but
2361          * within all the constraints specified by the sysctls.
2362          */
2363         spin_lock(&hugetlb_lock);
2364         while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
2365                 if (!adjust_pool_surplus(h, nodes_allowed, -1))
2366                         break;
2367         }
2368
2369         while (count > persistent_huge_pages(h)) {
2370                 /*
2371                  * If this allocation races such that we no longer need the
2372                  * page, free_huge_page will handle it by freeing the page
2373                  * and reducing the surplus.
2374                  */
2375                 spin_unlock(&hugetlb_lock);
2376
2377                 /* yield cpu to avoid soft lockup */
2378                 cond_resched();
2379
2380                 ret = alloc_pool_huge_page(h, nodes_allowed);
2381                 spin_lock(&hugetlb_lock);
2382                 if (!ret)
2383                         goto out;
2384
2385                 /* Bail for signals. Probably ctrl-c from user */
2386                 if (signal_pending(current))
2387                         goto out;
2388         }
2389
2390         /*
2391          * Decrease the pool size
2392          * First return free pages to the buddy allocator (being careful
2393          * to keep enough around to satisfy reservations).  Then place
2394          * pages into surplus state as needed so the pool will shrink
2395          * to the desired size as pages become free.
2396          *
2397          * By placing pages into the surplus state independent of the
2398          * overcommit value, we are allowing the surplus pool size to
2399          * exceed overcommit. There are few sane options here. Since
2400          * alloc_surplus_huge_page() is checking the global counter,
2401          * though, we'll note that we're not allowed to exceed surplus
2402          * and won't grow the pool anywhere else. Not until one of the
2403          * sysctls are changed, or the surplus pages go out of use.
2404          */
2405         min_count = h->resv_huge_pages + h->nr_huge_pages - h->free_huge_pages;
2406         min_count = max(count, min_count);
2407         try_to_free_low(h, min_count, nodes_allowed);
2408         while (min_count < persistent_huge_pages(h)) {
2409                 if (!free_pool_huge_page(h, nodes_allowed, 0))
2410                         break;
2411                 cond_resched_lock(&hugetlb_lock);
2412         }
2413         while (count < persistent_huge_pages(h)) {
2414                 if (!adjust_pool_surplus(h, nodes_allowed, 1))
2415                         break;
2416         }
2417 out:
2418         ret = persistent_huge_pages(h);
2419         spin_unlock(&hugetlb_lock);
2420         return ret;
2421 }
2422
2423 #define HSTATE_ATTR_RO(_name) \
2424         static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
2425
2426 #define HSTATE_ATTR(_name) \
2427         static struct kobj_attribute _name##_attr = \
2428                 __ATTR(_name, 0644, _name##_show, _name##_store)
2429
2430 static struct kobject *hugepages_kobj;
2431 static struct kobject *hstate_kobjs[HUGE_MAX_HSTATE];
2432
2433 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp);
2434
2435 static struct hstate *kobj_to_hstate(struct kobject *kobj, int *nidp)
2436 {
2437         int i;
2438
2439         for (i = 0; i < HUGE_MAX_HSTATE; i++)
2440                 if (hstate_kobjs[i] == kobj) {
2441                         if (nidp)
2442                                 *nidp = NUMA_NO_NODE;
2443                         return &hstates[i];
2444                 }
2445
2446         return kobj_to_node_hstate(kobj, nidp);
2447 }
2448
2449 static ssize_t nr_hugepages_show_common(struct kobject *kobj,
2450                                         struct kobj_attribute *attr, char *buf)
2451 {
2452         struct hstate *h;
2453         unsigned long nr_huge_pages;
2454         int nid;
2455
2456         h = kobj_to_hstate(kobj, &nid);
2457         if (nid == NUMA_NO_NODE)
2458                 nr_huge_pages = h->nr_huge_pages;
2459         else
2460                 nr_huge_pages = h->nr_huge_pages_node[nid];
2461
2462         return sprintf(buf, "%lu\n", nr_huge_pages);
2463 }
2464
2465 static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
2466                                            struct hstate *h, int nid,
2467                                            unsigned long count, size_t len)
2468 {
2469         int err;
2470         NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
2471
2472         if (hstate_is_gigantic(h) && !gigantic_page_supported()) {
2473                 err = -EINVAL;
2474                 goto out;
2475         }
2476
2477         if (nid == NUMA_NO_NODE) {
2478                 /*
2479                  * global hstate attribute
2480                  */
2481                 if (!(obey_mempolicy &&
2482                                 init_nodemask_of_mempolicy(nodes_allowed))) {
2483                         NODEMASK_FREE(nodes_allowed);
2484                         nodes_allowed = &node_states[N_MEMORY];
2485                 }
2486         } else if (nodes_allowed) {
2487                 /*
2488                  * per node hstate attribute: adjust count to global,
2489                  * but restrict alloc/free to the specified node.
2490                  */
2491                 count += h->nr_huge_pages - h->nr_huge_pages_node[nid];
2492                 init_nodemask_of_node(nodes_allowed, nid);
2493         } else
2494                 nodes_allowed = &node_states[N_MEMORY];
2495
2496         h->max_huge_pages = set_max_huge_pages(h, count, nodes_allowed);
2497
2498         if (nodes_allowed != &node_states[N_MEMORY])
2499                 NODEMASK_FREE(nodes_allowed);
2500
2501         return len;
2502 out:
2503         NODEMASK_FREE(nodes_allowed);
2504         return err;
2505 }
2506
2507 static ssize_t nr_hugepages_store_common(bool obey_mempolicy,
2508                                          struct kobject *kobj, const char *buf,
2509                                          size_t len)
2510 {
2511         struct hstate *h;
2512         unsigned long count;
2513         int nid;
2514         int err;
2515
2516         err = kstrtoul(buf, 10, &count);
2517         if (err)
2518                 return err;
2519
2520         h = kobj_to_hstate(kobj, &nid);
2521         return __nr_hugepages_store_common(obey_mempolicy, h, nid, count, len);
2522 }
2523
2524 static ssize_t nr_hugepages_show(struct kobject *kobj,
2525                                        struct kobj_attribute *attr, char *buf)
2526 {
2527         return nr_hugepages_show_common(kobj, attr, buf);
2528 }
2529
2530 static ssize_t nr_hugepages_store(struct kobject *kobj,
2531                struct kobj_attribute *attr, const char *buf, size_t len)
2532 {
2533         return nr_hugepages_store_common(false, kobj, buf, len);
2534 }
2535 HSTATE_ATTR(nr_hugepages);
2536
2537 #ifdef CONFIG_NUMA
2538
2539 /*
2540  * hstate attribute for optionally mempolicy-based constraint on persistent
2541  * huge page alloc/free.
2542  */
2543 static ssize_t nr_hugepages_mempolicy_show(struct kobject *kobj,
2544                                        struct kobj_attribute *attr, char *buf)
2545 {
2546         return nr_hugepages_show_common(kobj, attr, buf);
2547 }
2548
2549 static ssize_t nr_hugepages_mempolicy_store(struct kobject *kobj,
2550                struct kobj_attribute *attr, const char *buf, size_t len)
2551 {
2552         return nr_hugepages_store_common(true, kobj, buf, len);
2553 }
2554 HSTATE_ATTR(nr_hugepages_mempolicy);
2555 #endif
2556
2557
2558 static ssize_t nr_overcommit_hugepages_show(struct kobject *kobj,
2559                                         struct kobj_attribute *attr, char *buf)
2560 {
2561         struct hstate *h = kobj_to_hstate(kobj, NULL);
2562         return sprintf(buf, "%lu\n", h->nr_overcommit_huge_pages);
2563 }
2564
2565 static ssize_t nr_overcommit_hugepages_store(struct kobject *kobj,
2566                 struct kobj_attribute *attr, const char *buf, size_t count)
2567 {
2568         int err;
2569         unsigned long input;
2570         struct hstate *h = kobj_to_hstate(kobj, NULL);
2571
2572         if (hstate_is_gigantic(h))
2573                 return -EINVAL;
2574
2575         err = kstrtoul(buf, 10, &input);
2576         if (err)
2577                 return err;
2578
2579         spin_lock(&hugetlb_lock);
2580         h->nr_overcommit_huge_pages = input;
2581         spin_unlock(&hugetlb_lock);
2582
2583         return count;
2584 }
2585 HSTATE_ATTR(nr_overcommit_hugepages);
2586
2587 static ssize_t free_hugepages_show(struct kobject *kobj,
2588                                         struct kobj_attribute *attr, char *buf)
2589 {
2590         struct hstate *h;
2591         unsigned long free_huge_pages;
2592         int nid;
2593
2594         h = kobj_to_hstate(kobj, &nid);
2595         if (nid == NUMA_NO_NODE)
2596                 free_huge_pages = h->free_huge_pages;
2597         else
2598                 free_huge_pages = h->free_huge_pages_node[nid];
2599
2600         return sprintf(buf, "%lu\n", free_huge_pages);
2601 }
2602 HSTATE_ATTR_RO(free_hugepages);
2603
2604 static ssize_t resv_hugepages_show(struct kobject *kobj,
2605                                         struct kobj_attribute *attr, char *buf)
2606 {
2607         struct hstate *h = kobj_to_hstate(kobj, NULL);
2608         return sprintf(buf, "%lu\n", h->resv_huge_pages);
2609 }
2610 HSTATE_ATTR_RO(resv_hugepages);
2611
2612 static ssize_t surplus_hugepages_show(struct kobject *kobj,
2613                                         struct kobj_attribute *attr, char *buf)
2614 {
2615         struct hstate *h;
2616         unsigned long surplus_huge_pages;
2617         int nid;
2618
2619         h = kobj_to_hstate(kobj, &nid);
2620         if (nid == NUMA_NO_NODE)
2621                 surplus_huge_pages = h->surplus_huge_pages;
2622         else
2623                 surplus_huge_pages = h->surplus_huge_pages_node[nid];
2624
2625         return sprintf(buf, "%lu\n", surplus_huge_pages);
2626 }
2627 HSTATE_ATTR_RO(surplus_hugepages);
2628
2629 static struct attribute *hstate_attrs[] = {
2630         &nr_hugepages_attr.attr,
2631         &nr_overcommit_hugepages_attr.attr,
2632         &free_hugepages_attr.attr,
2633         &resv_hugepages_attr.attr,
2634         &surplus_hugepages_attr.attr,
2635 #ifdef CONFIG_NUMA
2636         &nr_hugepages_mempolicy_attr.attr,
2637 #endif
2638         NULL,
2639 };
2640
2641 static const struct attribute_group hstate_attr_group = {
2642         .attrs = hstate_attrs,
2643 };
2644
2645 static int hugetlb_sysfs_add_hstate(struct hstate *h, struct kobject *parent,
2646                                     struct kobject **hstate_kobjs,
2647                                     const struct attribute_group *hstate_attr_group)
2648 {
2649         int retval;
2650         int hi = hstate_index(h);
2651
2652         hstate_kobjs[hi] = kobject_create_and_add(h->name, parent);
2653         if (!hstate_kobjs[hi])
2654                 return -ENOMEM;
2655
2656         retval = sysfs_create_group(hstate_kobjs[hi], hstate_attr_group);
2657         if (retval) {
2658                 kobject_put(hstate_kobjs[hi]);
2659                 hstate_kobjs[hi] = NULL;
2660         }
2661
2662         return retval;
2663 }
2664
2665 static void __init hugetlb_sysfs_init(void)
2666 {
2667         struct hstate *h;
2668         int err;
2669
2670         hugepages_kobj = kobject_create_and_add("hugepages", mm_kobj);
2671         if (!hugepages_kobj)
2672                 return;
2673
2674         for_each_hstate(h) {
2675                 err = hugetlb_sysfs_add_hstate(h, hugepages_kobj,
2676                                          hstate_kobjs, &hstate_attr_group);
2677                 if (err)
2678                         pr_err("Hugetlb: Unable to add hstate %s", h->name);
2679         }
2680 }
2681
2682 #ifdef CONFIG_NUMA
2683
2684 /*
2685  * node_hstate/s - associate per node hstate attributes, via their kobjects,
2686  * with node devices in node_devices[] using a parallel array.  The array
2687  * index of a node device or _hstate == node id.
2688  * This is here to avoid any static dependency of the node device driver, in
2689  * the base kernel, on the hugetlb module.
2690  */
2691 struct node_hstate {
2692         struct kobject          *hugepages_kobj;
2693         struct kobject          *hstate_kobjs[HUGE_MAX_HSTATE];
2694 };
2695 static struct node_hstate node_hstates[MAX_NUMNODES];
2696
2697 /*
2698  * A subset of global hstate attributes for node devices
2699  */
2700 static struct attribute *per_node_hstate_attrs[] = {
2701         &nr_hugepages_attr.attr,
2702         &free_hugepages_attr.attr,
2703         &surplus_hugepages_attr.attr,
2704         NULL,
2705 };
2706
2707 static const struct attribute_group per_node_hstate_attr_group = {
2708         .attrs = per_node_hstate_attrs,
2709 };
2710
2711 /*
2712  * kobj_to_node_hstate - lookup global hstate for node device hstate attr kobj.
2713  * Returns node id via non-NULL nidp.
2714  */
2715 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2716 {
2717         int nid;
2718
2719         for (nid = 0; nid < nr_node_ids; nid++) {
2720                 struct node_hstate *nhs = &node_hstates[nid];
2721                 int i;
2722                 for (i = 0; i < HUGE_MAX_HSTATE; i++)
2723                         if (nhs->hstate_kobjs[i] == kobj) {
2724                                 if (nidp)
2725                                         *nidp = nid;
2726                                 return &hstates[i];
2727                         }
2728         }
2729
2730         BUG();
2731         return NULL;
2732 }
2733
2734 /*
2735  * Unregister hstate attributes from a single node device.
2736  * No-op if no hstate attributes attached.
2737  */
2738 static void hugetlb_unregister_node(struct node *node)
2739 {
2740         struct hstate *h;
2741         struct node_hstate *nhs = &node_hstates[node->dev.id];
2742
2743         if (!nhs->hugepages_kobj)
2744                 return;         /* no hstate attributes */
2745
2746         for_each_hstate(h) {
2747                 int idx = hstate_index(h);
2748                 if (nhs->hstate_kobjs[idx]) {
2749                         kobject_put(nhs->hstate_kobjs[idx]);
2750                         nhs->hstate_kobjs[idx] = NULL;
2751                 }
2752         }
2753
2754         kobject_put(nhs->hugepages_kobj);
2755         nhs->hugepages_kobj = NULL;
2756 }
2757
2758
2759 /*
2760  * Register hstate attributes for a single node device.
2761  * No-op if attributes already registered.
2762  */
2763 static void hugetlb_register_node(struct node *node)
2764 {
2765         struct hstate *h;
2766         struct node_hstate *nhs = &node_hstates[node->dev.id];
2767         int err;
2768
2769         if (nhs->hugepages_kobj)
2770                 return;         /* already allocated */
2771
2772         nhs->hugepages_kobj = kobject_create_and_add("hugepages",
2773                                                         &node->dev.kobj);
2774         if (!nhs->hugepages_kobj)
2775                 return;
2776
2777         for_each_hstate(h) {
2778                 err = hugetlb_sysfs_add_hstate(h, nhs->hugepages_kobj,
2779                                                 nhs->hstate_kobjs,
2780                                                 &per_node_hstate_attr_group);
2781                 if (err) {
2782                         pr_err("Hugetlb: Unable to add hstate %s for node %d\n",
2783                                 h->name, node->dev.id);
2784                         hugetlb_unregister_node(node);
2785                         break;
2786                 }
2787         }
2788 }
2789
2790 /*
2791  * hugetlb init time:  register hstate attributes for all registered node
2792  * devices of nodes that have memory.  All on-line nodes should have
2793  * registered their associated device by this time.
2794  */
2795 static void __init hugetlb_register_all_nodes(void)
2796 {
2797         int nid;
2798
2799         for_each_node_state(nid, N_MEMORY) {
2800                 struct node *node = node_devices[nid];
2801                 if (node->dev.id == nid)
2802                         hugetlb_register_node(node);
2803         }
2804
2805         /*
2806          * Let the node device driver know we're here so it can
2807          * [un]register hstate attributes on node hotplug.
2808          */
2809         register_hugetlbfs_with_node(hugetlb_register_node,
2810                                      hugetlb_unregister_node);
2811 }
2812 #else   /* !CONFIG_NUMA */
2813
2814 static struct hstate *kobj_to_node_hstate(struct kobject *kobj, int *nidp)
2815 {
2816         BUG();
2817         if (nidp)
2818                 *nidp = -1;
2819         return NULL;
2820 }
2821
2822 static void hugetlb_register_all_nodes(void) { }
2823
2824 #endif
2825
2826 static int __init hugetlb_init(void)
2827 {
2828         int i;
2829
2830         if (!hugepages_supported())
2831                 return 0;
2832
2833         if (!size_to_hstate(default_hstate_size)) {
2834                 if (default_hstate_size != 0) {
2835                         pr_err("HugeTLB: unsupported default_hugepagesz %lu. Reverting to %lu\n",
2836                                default_hstate_size, HPAGE_SIZE);
2837                 }
2838
2839                 default_hstate_size = HPAGE_SIZE;
2840                 if (!size_to_hstate(default_hstate_size))
2841                         hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
2842         }
2843         default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
2844         if (default_hstate_max_huge_pages) {
2845                 if (!default_hstate.max_huge_pages)
2846                         default_hstate.max_huge_pages = default_hstate_max_huge_pages;
2847         }
2848
2849         hugetlb_init_hstates();
2850         gather_bootmem_prealloc();
2851         report_hugepages();
2852
2853         hugetlb_sysfs_init();
2854         hugetlb_register_all_nodes();
2855         hugetlb_cgroup_file_init();
2856
2857 #ifdef CONFIG_SMP
2858         num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
2859 #else
2860         num_fault_mutexes = 1;
2861 #endif
2862         hugetlb_fault_mutex_table =
2863                 kmalloc_array(num_fault_mutexes, sizeof(struct mutex),
2864                               GFP_KERNEL);
2865         BUG_ON(!hugetlb_fault_mutex_table);
2866
2867         for (i = 0; i < num_fault_mutexes; i++)
2868                 mutex_init(&hugetlb_fault_mutex_table[i]);
2869         return 0;
2870 }
2871 subsys_initcall(hugetlb_init);
2872
2873 /* Should be called on processing a hugepagesz=... option */
2874 void __init hugetlb_bad_size(void)
2875 {
2876         parsed_valid_hugepagesz = false;
2877 }
2878
2879 void __init hugetlb_add_hstate(unsigned int order)
2880 {
2881         struct hstate *h;
2882         unsigned long i;
2883
2884         if (size_to_hstate(PAGE_SIZE << order)) {
2885                 pr_warn("hugepagesz= specified twice, ignoring\n");
2886                 return;
2887         }
2888         BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
2889         BUG_ON(order == 0);
2890         h = &hstates[hugetlb_max_hstate++];
2891         h->order = order;
2892         h->mask = ~((1ULL << (order + PAGE_SHIFT)) - 1);
2893         h->nr_huge_pages = 0;
2894         h->free_huge_pages = 0;
2895         for (i = 0; i < MAX_NUMNODES; ++i)
2896                 INIT_LIST_HEAD(&h->hugepage_freelists[i]);
2897         INIT_LIST_HEAD(&h->hugepage_activelist);
2898         h->next_nid_to_alloc = first_memory_node;
2899         h->next_nid_to_free = first_memory_node;
2900         snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
2901                                         huge_page_size(h)/1024);
2902
2903         parsed_hstate = h;
2904 }
2905
2906 static int __init hugetlb_nrpages_setup(char *s)
2907 {
2908         unsigned long *mhp;
2909         static unsigned long *last_mhp;
2910
2911         if (!parsed_valid_hugepagesz) {
2912                 pr_warn("hugepages = %s preceded by "
2913                         "an unsupported hugepagesz, ignoring\n", s);
2914                 parsed_valid_hugepagesz = true;
2915                 return 1;
2916         }
2917         /*
2918          * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter yet,
2919          * so this hugepages= parameter goes to the "default hstate".
2920          */
2921         else if (!hugetlb_max_hstate)
2922                 mhp = &default_hstate_max_huge_pages;
2923         else
2924                 mhp = &parsed_hstate->max_huge_pages;
2925
2926         if (mhp == last_mhp) {
2927                 pr_warn("hugepages= specified twice without interleaving hugepagesz=, ignoring\n");
2928                 return 1;
2929         }
2930
2931         if (sscanf(s, "%lu", mhp) <= 0)
2932                 *mhp = 0;
2933
2934         /*
2935          * Global state is always initialized later in hugetlb_init.
2936          * But we need to allocate >= MAX_ORDER hstates here early to still
2937          * use the bootmem allocator.
2938          */
2939         if (hugetlb_max_hstate && parsed_hstate->order >= MAX_ORDER)
2940                 hugetlb_hstate_alloc_pages(parsed_hstate);
2941
2942         last_mhp = mhp;
2943
2944         return 1;
2945 }
2946 __setup("hugepages=", hugetlb_nrpages_setup);
2947
2948 static int __init hugetlb_default_setup(char *s)
2949 {
2950         default_hstate_size = memparse(s, &s);
2951         return 1;
2952 }
2953 __setup("default_hugepagesz=", hugetlb_default_setup);
2954
2955 static unsigned int cpuset_mems_nr(unsigned int *array)
2956 {
2957         int node;
2958         unsigned int nr = 0;
2959
2960         for_each_node_mask(node, cpuset_current_mems_allowed)
2961                 nr += array[node];
2962
2963         return nr;
2964 }
2965
2966 #ifdef CONFIG_SYSCTL
2967 static int proc_hugetlb_doulongvec_minmax(struct ctl_table *table, int write,
2968                                           void *buffer, size_t *length,
2969                                           loff_t *ppos, unsigned long *out)
2970 {
2971         struct ctl_table dup_table;
2972
2973         /*
2974          * In order to avoid races with __do_proc_doulongvec_minmax(), we
2975          * can duplicate the @table and alter the duplicate of it.
2976          */
2977         dup_table = *table;
2978         dup_table.data = out;
2979
2980         return proc_doulongvec_minmax(&dup_table, write, buffer, length, ppos);
2981 }
2982
2983 static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
2984                          struct ctl_table *table, int write,
2985                          void __user *buffer, size_t *length, loff_t *ppos)
2986 {
2987         struct hstate *h = &default_hstate;
2988         unsigned long tmp = h->max_huge_pages;
2989         int ret;
2990
2991         if (!hugepages_supported())
2992                 return -EOPNOTSUPP;
2993
2994         ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
2995                                              &tmp);
2996         if (ret)
2997                 goto out;
2998
2999         if (write)
3000                 ret = __nr_hugepages_store_common(obey_mempolicy, h,
3001                                                   NUMA_NO_NODE, tmp, *length);
3002 out:
3003         return ret;
3004 }
3005
3006 int hugetlb_sysctl_handler(struct ctl_table *table, int write,
3007                           void __user *buffer, size_t *length, loff_t *ppos)
3008 {
3009
3010         return hugetlb_sysctl_handler_common(false, table, write,
3011                                                         buffer, length, ppos);
3012 }
3013
3014 #ifdef CONFIG_NUMA
3015 int hugetlb_mempolicy_sysctl_handler(struct ctl_table *table, int write,
3016                           void __user *buffer, size_t *length, loff_t *ppos)
3017 {
3018         return hugetlb_sysctl_handler_common(true, table, write,
3019                                                         buffer, length, ppos);
3020 }
3021 #endif /* CONFIG_NUMA */
3022
3023 int hugetlb_overcommit_handler(struct ctl_table *table, int write,
3024                         void __user *buffer,
3025                         size_t *length, loff_t *ppos)
3026 {
3027         struct hstate *h = &default_hstate;
3028         unsigned long tmp;
3029         int ret;
3030
3031         if (!hugepages_supported())
3032                 return -EOPNOTSUPP;
3033
3034         tmp = h->nr_overcommit_huge_pages;
3035
3036         if (write && hstate_is_gigantic(h))
3037                 return -EINVAL;
3038
3039         ret = proc_hugetlb_doulongvec_minmax(table, write, buffer, length, ppos,
3040                                              &tmp);
3041         if (ret)
3042                 goto out;
3043
3044         if (write) {
3045                 spin_lock(&hugetlb_lock);
3046                 h->nr_overcommit_huge_pages = tmp;
3047                 spin_unlock(&hugetlb_lock);
3048         }
3049 out:
3050         return ret;
3051 }
3052
3053 #endif /* CONFIG_SYSCTL */
3054
3055 void hugetlb_report_meminfo(struct seq_file *m)
3056 {
3057         struct hstate *h;
3058         unsigned long total = 0;
3059
3060         if (!hugepages_supported())
3061                 return;
3062
3063         for_each_hstate(h) {
3064                 unsigned long count = h->nr_huge_pages;
3065
3066                 total += (PAGE_SIZE << huge_page_order(h)) * count;
3067
3068                 if (h == &default_hstate)
3069                         seq_printf(m,
3070                                    "HugePages_Total:   %5lu\n"
3071                                    "HugePages_Free:    %5lu\n"
3072                                    "HugePages_Rsvd:    %5lu\n"
3073                                    "HugePages_Surp:    %5lu\n"
3074                                    "Hugepagesize:   %8lu kB\n",
3075                                    count,
3076                                    h->free_huge_pages,
3077                                    h->resv_huge_pages,
3078                                    h->surplus_huge_pages,
3079                                    (PAGE_SIZE << huge_page_order(h)) / 1024);
3080         }
3081
3082         seq_printf(m, "Hugetlb:        %8lu kB\n", total / 1024);
3083 }
3084
3085 int hugetlb_report_node_meminfo(int nid, char *buf)
3086 {
3087         struct hstate *h = &default_hstate;
3088         if (!hugepages_supported())
3089                 return 0;
3090         return sprintf(buf,
3091                 "Node %d HugePages_Total: %5u\n"
3092                 "Node %d HugePages_Free:  %5u\n"
3093                 "Node %d HugePages_Surp:  %5u\n",
3094                 nid, h->nr_huge_pages_node[nid],
3095                 nid, h->free_huge_pages_node[nid],
3096                 nid, h->surplus_huge_pages_node[nid]);
3097 }
3098
3099 void hugetlb_show_meminfo(void)
3100 {
3101         struct hstate *h;
3102         int nid;
3103
3104         if (!hugepages_supported())
3105                 return;
3106
3107         for_each_node_state(nid, N_MEMORY)
3108                 for_each_hstate(h)
3109                         pr_info("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
3110                                 nid,
3111                                 h->nr_huge_pages_node[nid],
3112                                 h->free_huge_pages_node[nid],
3113                                 h->surplus_huge_pages_node[nid],
3114                                 1UL << (huge_page_order(h) + PAGE_SHIFT - 10));
3115 }
3116
3117 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
3118 {
3119         seq_printf(m, "HugetlbPages:\t%8lu kB\n",
3120                    atomic_long_read(&mm->hugetlb_usage) << (PAGE_SHIFT - 10));
3121 }
3122
3123 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
3124 unsigned long hugetlb_total_pages(void)
3125 {
3126         struct hstate *h;
3127         unsigned long nr_total_pages = 0;
3128
3129         for_each_hstate(h)
3130                 nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
3131         return nr_total_pages;
3132 }
3133
3134 static int hugetlb_acct_memory(struct hstate *h, long delta)
3135 {
3136         int ret = -ENOMEM;
3137
3138         spin_lock(&hugetlb_lock);
3139         /*
3140          * When cpuset is configured, it breaks the strict hugetlb page
3141          * reservation as the accounting is done on a global variable. Such
3142          * reservation is completely rubbish in the presence of cpuset because
3143          * the reservation is not checked against page availability for the
3144          * current cpuset. Application can still potentially OOM'ed by kernel
3145          * with lack of free htlb page in cpuset that the task is in.
3146          * Attempt to enforce strict accounting with cpuset is almost
3147          * impossible (or too ugly) because cpuset is too fluid that
3148          * task or memory node can be dynamically moved between cpusets.
3149          *
3150          * The change of semantics for shared hugetlb mapping with cpuset is
3151          * undesirable. However, in order to preserve some of the semantics,
3152          * we fall back to check against current free page availability as
3153          * a best attempt and hopefully to minimize the impact of changing
3154          * semantics that cpuset has.
3155          */
3156         if (delta > 0) {
3157                 if (gather_surplus_pages(h, delta) < 0)
3158                         goto out;
3159
3160                 if (delta > cpuset_mems_nr(h->free_huge_pages_node)) {
3161                         return_unused_surplus_pages(h, delta);
3162                         goto out;
3163                 }
3164         }
3165
3166         ret = 0;
3167         if (delta < 0)
3168                 return_unused_surplus_pages(h, (unsigned long) -delta);
3169
3170 out:
3171         spin_unlock(&hugetlb_lock);
3172         return ret;
3173 }
3174
3175 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
3176 {
3177         struct resv_map *resv = vma_resv_map(vma);
3178
3179         /*
3180          * This new VMA should share its siblings reservation map if present.
3181          * The VMA will only ever have a valid reservation map pointer where
3182          * it is being copied for another still existing VMA.  As that VMA
3183          * has a reference to the reservation map it cannot disappear until
3184          * after this open call completes.  It is therefore safe to take a
3185          * new reference here without additional locking.
3186          */
3187         if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3188                 kref_get(&resv->refs);
3189 }
3190
3191 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
3192 {
3193         struct hstate *h = hstate_vma(vma);
3194         struct resv_map *resv = vma_resv_map(vma);
3195         struct hugepage_subpool *spool = subpool_vma(vma);
3196         unsigned long reserve, start, end;
3197         long gbl_reserve;
3198
3199         if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
3200                 return;
3201
3202         start = vma_hugecache_offset(h, vma, vma->vm_start);
3203         end = vma_hugecache_offset(h, vma, vma->vm_end);
3204
3205         reserve = (end - start) - region_count(resv, start, end);
3206
3207         kref_put(&resv->refs, resv_map_release);
3208
3209         if (reserve) {
3210                 /*
3211                  * Decrement reserve counts.  The global reserve count may be
3212                  * adjusted if the subpool has a minimum size.
3213                  */
3214                 gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
3215                 hugetlb_acct_memory(h, -gbl_reserve);
3216         }
3217 }
3218
3219 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
3220 {
3221         if (addr & ~(huge_page_mask(hstate_vma(vma))))
3222                 return -EINVAL;
3223         return 0;
3224 }
3225
3226 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
3227 {
3228         struct hstate *hstate = hstate_vma(vma);
3229
3230         return 1UL << huge_page_shift(hstate);
3231 }
3232
3233 /*
3234  * We cannot handle pagefaults against hugetlb pages at all.  They cause
3235  * handle_mm_fault() to try to instantiate regular-sized pages in the
3236  * hugegpage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
3237  * this far.
3238  */
3239 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
3240 {
3241         BUG();
3242         return 0;
3243 }
3244
3245 /*
3246  * When a new function is introduced to vm_operations_struct and added
3247  * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
3248  * This is because under System V memory model, mappings created via
3249  * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
3250  * their original vm_ops are overwritten with shm_vm_ops.
3251  */
3252 const struct vm_operations_struct hugetlb_vm_ops = {
3253         .fault = hugetlb_vm_op_fault,
3254         .open = hugetlb_vm_op_open,
3255         .close = hugetlb_vm_op_close,
3256         .split = hugetlb_vm_op_split,
3257         .pagesize = hugetlb_vm_op_pagesize,
3258 };
3259
3260 static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
3261                                 int writable)
3262 {
3263         pte_t entry;
3264
3265         if (writable) {
3266                 entry = huge_pte_mkwrite(huge_pte_mkdirty(mk_huge_pte(page,
3267                                          vma->vm_page_prot)));
3268         } else {
3269                 entry = huge_pte_wrprotect(mk_huge_pte(page,
3270                                            vma->vm_page_prot));
3271         }
3272         entry = pte_mkyoung(entry);
3273         entry = pte_mkhuge(entry);
3274         entry = arch_make_huge_pte(entry, vma, page, writable);
3275
3276         return entry;
3277 }
3278
3279 static void set_huge_ptep_writable(struct vm_area_struct *vma,
3280                                    unsigned long address, pte_t *ptep)
3281 {
3282         pte_t entry;
3283
3284         entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(ptep)));
3285         if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
3286                 update_mmu_cache(vma, address, ptep);
3287 }
3288
3289 bool is_hugetlb_entry_migration(pte_t pte)
3290 {
3291         swp_entry_t swp;
3292
3293         if (huge_pte_none(pte) || pte_present(pte))
3294                 return false;
3295         swp = pte_to_swp_entry(pte);
3296         if (non_swap_entry(swp) && is_migration_entry(swp))
3297                 return true;
3298         else
3299                 return false;
3300 }
3301
3302 static int is_hugetlb_entry_hwpoisoned(pte_t pte)
3303 {
3304         swp_entry_t swp;
3305
3306         if (huge_pte_none(pte) || pte_present(pte))
3307                 return 0;
3308         swp = pte_to_swp_entry(pte);
3309         if (non_swap_entry(swp) && is_hwpoison_entry(swp))
3310                 return 1;
3311         else
3312                 return 0;
3313 }
3314
3315 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
3316                             struct vm_area_struct *vma)
3317 {
3318         pte_t *src_pte, *dst_pte, entry, dst_entry;
3319         struct page *ptepage;
3320         unsigned long addr;
3321         int cow;
3322         struct hstate *h = hstate_vma(vma);
3323         unsigned long sz = huge_page_size(h);
3324         unsigned long mmun_start;       /* For mmu_notifiers */
3325         unsigned long mmun_end;         /* For mmu_notifiers */
3326         int ret = 0;
3327
3328         cow = (vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE;
3329
3330         mmun_start = vma->vm_start;
3331         mmun_end = vma->vm_end;
3332         if (cow)
3333                 mmu_notifier_invalidate_range_start(src, mmun_start, mmun_end);
3334
3335         for (addr = vma->vm_start; addr < vma->vm_end; addr += sz) {
3336                 spinlock_t *src_ptl, *dst_ptl;
3337                 src_pte = huge_pte_offset(src, addr, sz);
3338                 if (!src_pte)
3339                         continue;
3340                 dst_pte = huge_pte_alloc(dst, addr, sz);
3341                 if (!dst_pte) {
3342                         ret = -ENOMEM;
3343                         break;
3344                 }
3345
3346                 /*
3347                  * If the pagetables are shared don't copy or take references.
3348                  * dst_pte == src_pte is the common case of src/dest sharing.
3349                  *
3350                  * However, src could have 'unshared' and dst shares with
3351                  * another vma.  If dst_pte !none, this implies sharing.
3352                  * Check here before taking page table lock, and once again
3353                  * after taking the lock below.
3354                  */
3355                 dst_entry = huge_ptep_get(dst_pte);
3356                 if ((dst_pte == src_pte) || !huge_pte_none(dst_entry))
3357                         continue;
3358
3359                 dst_ptl = huge_pte_lock(h, dst, dst_pte);
3360                 src_ptl = huge_pte_lockptr(h, src, src_pte);
3361                 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
3362                 entry = huge_ptep_get(src_pte);
3363                 dst_entry = huge_ptep_get(dst_pte);
3364                 if (huge_pte_none(entry) || !huge_pte_none(dst_entry)) {
3365                         /*
3366                          * Skip if src entry none.  Also, skip in the
3367                          * unlikely case dst entry !none as this implies
3368                          * sharing with another vma.
3369                          */
3370                         ;
3371                 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
3372                                     is_hugetlb_entry_hwpoisoned(entry))) {
3373                         swp_entry_t swp_entry = pte_to_swp_entry(entry);
3374
3375                         if (is_write_migration_entry(swp_entry) && cow) {
3376                                 /*
3377                                  * COW mappings require pages in both
3378                                  * parent and child to be set to read.
3379                                  */
3380                                 make_migration_entry_read(&swp_entry);
3381                                 entry = swp_entry_to_pte(swp_entry);
3382                                 set_huge_swap_pte_at(src, addr, src_pte,
3383                                                      entry, sz);
3384                         }
3385                         set_huge_swap_pte_at(dst, addr, dst_pte, entry, sz);
3386                 } else {
3387                         if (cow) {
3388                                 /*
3389                                  * No need to notify as we are downgrading page
3390                                  * table protection not changing it to point
3391                                  * to a new page.
3392                                  *
3393                                  * See Documentation/vm/mmu_notifier.rst
3394                                  */
3395                                 huge_ptep_set_wrprotect(src, addr, src_pte);
3396                         }
3397                         entry = huge_ptep_get(src_pte);
3398                         ptepage = pte_page(entry);
3399                         get_page(ptepage);
3400                         page_dup_rmap(ptepage, true);
3401                         set_huge_pte_at(dst, addr, dst_pte, entry);
3402                         hugetlb_count_add(pages_per_huge_page(h), dst);
3403                 }
3404                 spin_unlock(src_ptl);
3405                 spin_unlock(dst_ptl);
3406         }
3407
3408         if (cow)
3409                 mmu_notifier_invalidate_range_end(src, mmun_start, mmun_end);
3410
3411         return ret;
3412 }
3413
3414 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
3415                             unsigned long start, unsigned long end,
3416                             struct page *ref_page)
3417 {
3418         struct mm_struct *mm = vma->vm_mm;
3419         unsigned long address;
3420         pte_t *ptep;
3421         pte_t pte;
3422         spinlock_t *ptl;
3423         struct page *page;
3424         struct hstate *h = hstate_vma(vma);
3425         unsigned long sz = huge_page_size(h);
3426         unsigned long mmun_start = start;       /* For mmu_notifiers */
3427         unsigned long mmun_end   = end;         /* For mmu_notifiers */
3428
3429         WARN_ON(!is_vm_hugetlb_page(vma));
3430         BUG_ON(start & ~huge_page_mask(h));
3431         BUG_ON(end & ~huge_page_mask(h));
3432
3433         /*
3434          * This is a hugetlb vma, all the pte entries should point
3435          * to huge page.
3436          */
3437         tlb_remove_check_page_size_change(tlb, sz);
3438         tlb_start_vma(tlb, vma);
3439
3440         /*
3441          * If sharing possible, alert mmu notifiers of worst case.
3442          */
3443         adjust_range_if_pmd_sharing_possible(vma, &mmun_start, &mmun_end);
3444         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3445         address = start;
3446         for (; address < end; address += sz) {
3447                 ptep = huge_pte_offset(mm, address, sz);
3448                 if (!ptep)
3449                         continue;
3450
3451                 ptl = huge_pte_lock(h, mm, ptep);
3452                 if (huge_pmd_unshare(mm, &address, ptep)) {
3453                         spin_unlock(ptl);
3454                         /*
3455                          * We just unmapped a page of PMDs by clearing a PUD.
3456                          * The caller's TLB flush range should cover this area.
3457                          */
3458                         continue;
3459                 }
3460
3461                 pte = huge_ptep_get(ptep);
3462                 if (huge_pte_none(pte)) {
3463                         spin_unlock(ptl);
3464                         continue;
3465                 }
3466
3467                 /*
3468                  * Migrating hugepage or HWPoisoned hugepage is already
3469                  * unmapped and its refcount is dropped, so just clear pte here.
3470                  */
3471                 if (unlikely(!pte_present(pte))) {
3472                         huge_pte_clear(mm, address, ptep, sz);
3473                         spin_unlock(ptl);
3474                         continue;
3475                 }
3476
3477                 page = pte_page(pte);
3478                 /*
3479                  * If a reference page is supplied, it is because a specific
3480                  * page is being unmapped, not a range. Ensure the page we
3481                  * are about to unmap is the actual page of interest.
3482                  */
3483                 if (ref_page) {
3484                         if (page != ref_page) {
3485                                 spin_unlock(ptl);
3486                                 continue;
3487                         }
3488                         /*
3489                          * Mark the VMA as having unmapped its page so that
3490                          * future faults in this VMA will fail rather than
3491                          * looking like data was lost
3492                          */
3493                         set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
3494                 }
3495
3496                 pte = huge_ptep_get_and_clear(mm, address, ptep);
3497                 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
3498                 if (huge_pte_dirty(pte))
3499                         set_page_dirty(page);
3500
3501                 hugetlb_count_sub(pages_per_huge_page(h), mm);
3502                 page_remove_rmap(page, true);
3503
3504                 spin_unlock(ptl);
3505                 tlb_remove_page_size(tlb, page, huge_page_size(h));
3506                 /*
3507                  * Bail out after unmapping reference page if supplied
3508                  */
3509                 if (ref_page)
3510                         break;
3511         }
3512         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3513         tlb_end_vma(tlb, vma);
3514 }
3515
3516 void __unmap_hugepage_range_final(struct mmu_gather *tlb,
3517                           struct vm_area_struct *vma, unsigned long start,
3518                           unsigned long end, struct page *ref_page)
3519 {
3520         __unmap_hugepage_range(tlb, vma, start, end, ref_page);
3521
3522         /*
3523          * Clear this flag so that x86's huge_pmd_share page_table_shareable
3524          * test will fail on a vma being torn down, and not grab a page table
3525          * on its way out.  We're lucky that the flag has such an appropriate
3526          * name, and can in fact be safely cleared here. We could clear it
3527          * before the __unmap_hugepage_range above, but all that's necessary
3528          * is to clear it before releasing the i_mmap_rwsem. This works
3529          * because in the context this is called, the VMA is about to be
3530          * destroyed and the i_mmap_rwsem is held.
3531          */
3532         vma->vm_flags &= ~VM_MAYSHARE;
3533 }
3534
3535 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
3536                           unsigned long end, struct page *ref_page)
3537 {
3538         struct mm_struct *mm;
3539         struct mmu_gather tlb;
3540         unsigned long tlb_start = start;
3541         unsigned long tlb_end = end;
3542
3543         /*
3544          * If shared PMDs were possibly used within this vma range, adjust
3545          * start/end for worst case tlb flushing.
3546          * Note that we can not be sure if PMDs are shared until we try to
3547          * unmap pages.  However, we want to make sure TLB flushing covers
3548          * the largest possible range.
3549          */
3550         adjust_range_if_pmd_sharing_possible(vma, &tlb_start, &tlb_end);
3551
3552         mm = vma->vm_mm;
3553
3554         tlb_gather_mmu(&tlb, mm, tlb_start, tlb_end);
3555         __unmap_hugepage_range(&tlb, vma, start, end, ref_page);
3556         tlb_finish_mmu(&tlb, tlb_start, tlb_end);
3557 }
3558
3559 /*
3560  * This is called when the original mapper is failing to COW a MAP_PRIVATE
3561  * mappping it owns the reserve page for. The intention is to unmap the page
3562  * from other VMAs and let the children be SIGKILLed if they are faulting the
3563  * same region.
3564  */
3565 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
3566                               struct page *page, unsigned long address)
3567 {
3568         struct hstate *h = hstate_vma(vma);
3569         struct vm_area_struct *iter_vma;
3570         struct address_space *mapping;
3571         pgoff_t pgoff;
3572
3573         /*
3574          * vm_pgoff is in PAGE_SIZE units, hence the different calculation
3575          * from page cache lookup which is in HPAGE_SIZE units.
3576          */
3577         address = address & huge_page_mask(h);
3578         pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
3579                         vma->vm_pgoff;
3580         mapping = vma->vm_file->f_mapping;
3581
3582         /*
3583          * Take the mapping lock for the duration of the table walk. As
3584          * this mapping should be shared between all the VMAs,
3585          * __unmap_hugepage_range() is called as the lock is already held
3586          */
3587         i_mmap_lock_write(mapping);
3588         vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
3589                 /* Do not unmap the current VMA */
3590                 if (iter_vma == vma)
3591                         continue;
3592
3593                 /*
3594                  * Shared VMAs have their own reserves and do not affect
3595                  * MAP_PRIVATE accounting but it is possible that a shared
3596                  * VMA is using the same page so check and skip such VMAs.
3597                  */
3598                 if (iter_vma->vm_flags & VM_MAYSHARE)
3599                         continue;
3600
3601                 /*
3602                  * Unmap the page from other VMAs without their own reserves.
3603                  * They get marked to be SIGKILLed if they fault in these
3604                  * areas. This is because a future no-page fault on this VMA
3605                  * could insert a zeroed page instead of the data existing
3606                  * from the time of fork. This would look like data corruption
3607                  */
3608                 if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
3609                         unmap_hugepage_range(iter_vma, address,
3610                                              address + huge_page_size(h), page);
3611         }
3612         i_mmap_unlock_write(mapping);
3613 }
3614
3615 /*
3616  * Hugetlb_cow() should be called with page lock of the original hugepage held.
3617  * Called with hugetlb_instantiation_mutex held and pte_page locked so we
3618  * cannot race with other handlers or page migration.
3619  * Keep the pte_same checks anyway to make transition from the mutex easier.
3620  */
3621 static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
3622                        unsigned long address, pte_t *ptep,
3623                        struct page *pagecache_page, spinlock_t *ptl)
3624 {
3625         pte_t pte;
3626         struct hstate *h = hstate_vma(vma);
3627         struct page *old_page, *new_page;
3628         int outside_reserve = 0;
3629         vm_fault_t ret = 0;
3630         unsigned long mmun_start;       /* For mmu_notifiers */
3631         unsigned long mmun_end;         /* For mmu_notifiers */
3632         unsigned long haddr = address & huge_page_mask(h);
3633
3634         pte = huge_ptep_get(ptep);
3635         old_page = pte_page(pte);
3636
3637 retry_avoidcopy:
3638         /* If no-one else is actually using this page, avoid the copy
3639          * and just make the page writable */
3640         if (page_mapcount(old_page) == 1 && PageAnon(old_page)) {
3641                 page_move_anon_rmap(old_page, vma);
3642                 set_huge_ptep_writable(vma, haddr, ptep);
3643                 return 0;
3644         }
3645
3646         /*
3647          * If the process that created a MAP_PRIVATE mapping is about to
3648          * perform a COW due to a shared page count, attempt to satisfy
3649          * the allocation without using the existing reserves. The pagecache
3650          * page is used to determine if the reserve at this address was
3651          * consumed or not. If reserves were used, a partial faulted mapping
3652          * at the time of fork() could consume its reserves on COW instead
3653          * of the full address range.
3654          */
3655         if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
3656                         old_page != pagecache_page)
3657                 outside_reserve = 1;
3658
3659         get_page(old_page);
3660
3661         /*
3662          * Drop page table lock as buddy allocator may be called. It will
3663          * be acquired again before returning to the caller, as expected.
3664          */
3665         spin_unlock(ptl);
3666         new_page = alloc_huge_page(vma, haddr, outside_reserve);
3667
3668         if (IS_ERR(new_page)) {
3669                 /*
3670                  * If a process owning a MAP_PRIVATE mapping fails to COW,
3671                  * it is due to references held by a child and an insufficient
3672                  * huge page pool. To guarantee the original mappers
3673                  * reliability, unmap the page from child processes. The child
3674                  * may get SIGKILLed if it later faults.
3675                  */
3676                 if (outside_reserve) {
3677                         put_page(old_page);
3678                         BUG_ON(huge_pte_none(pte));
3679                         unmap_ref_private(mm, vma, old_page, haddr);
3680                         BUG_ON(huge_pte_none(pte));
3681                         spin_lock(ptl);
3682                         ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3683                         if (likely(ptep &&
3684                                    pte_same(huge_ptep_get(ptep), pte)))
3685                                 goto retry_avoidcopy;
3686                         /*
3687                          * race occurs while re-acquiring page table
3688                          * lock, and our job is done.
3689                          */
3690                         return 0;
3691                 }
3692
3693                 ret = vmf_error(PTR_ERR(new_page));
3694                 goto out_release_old;
3695         }
3696
3697         /*
3698          * When the original hugepage is shared one, it does not have
3699          * anon_vma prepared.
3700          */
3701         if (unlikely(anon_vma_prepare(vma))) {
3702                 ret = VM_FAULT_OOM;
3703                 goto out_release_all;
3704         }
3705
3706         copy_user_huge_page(new_page, old_page, address, vma,
3707                             pages_per_huge_page(h));
3708         __SetPageUptodate(new_page);
3709
3710         mmun_start = haddr;
3711         mmun_end = mmun_start + huge_page_size(h);
3712         mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
3713
3714         /*
3715          * Retake the page table lock to check for racing updates
3716          * before the page tables are altered
3717          */
3718         spin_lock(ptl);
3719         ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
3720         if (likely(ptep && pte_same(huge_ptep_get(ptep), pte))) {
3721                 ClearPagePrivate(new_page);
3722
3723                 /* Break COW */
3724                 huge_ptep_clear_flush(vma, haddr, ptep);
3725                 mmu_notifier_invalidate_range(mm, mmun_start, mmun_end);
3726                 set_huge_pte_at(mm, haddr, ptep,
3727                                 make_huge_pte(vma, new_page, 1));
3728                 page_remove_rmap(old_page, true);
3729                 hugepage_add_new_anon_rmap(new_page, vma, haddr);
3730                 set_page_huge_active(new_page);
3731                 /* Make the old page be freed below */
3732                 new_page = old_page;
3733         }
3734         spin_unlock(ptl);
3735         mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
3736 out_release_all:
3737         restore_reserve_on_error(h, vma, haddr, new_page);
3738         put_page(new_page);
3739 out_release_old:
3740         put_page(old_page);
3741
3742         spin_lock(ptl); /* Caller expects lock to be held */
3743         return ret;
3744 }
3745
3746 /* Return the pagecache page at a given address within a VMA */
3747 static struct page *hugetlbfs_pagecache_page(struct hstate *h,
3748                         struct vm_area_struct *vma, unsigned long address)
3749 {
3750         struct address_space *mapping;
3751         pgoff_t idx;
3752
3753         mapping = vma->vm_file->f_mapping;
3754         idx = vma_hugecache_offset(h, vma, address);
3755
3756         return find_lock_page(mapping, idx);
3757 }
3758
3759 /*
3760  * Return whether there is a pagecache page to back given address within VMA.
3761  * Caller follow_hugetlb_page() holds page_table_lock so we cannot lock_page.
3762  */
3763 static bool hugetlbfs_pagecache_present(struct hstate *h,
3764                         struct vm_area_struct *vma, unsigned long address)
3765 {
3766         struct address_space *mapping;
3767         pgoff_t idx;
3768         struct page *page;
3769
3770         mapping = vma->vm_file->f_mapping;
3771         idx = vma_hugecache_offset(h, vma, address);
3772
3773         page = find_get_page(mapping, idx);
3774         if (page)
3775                 put_page(page);
3776         return page != NULL;
3777 }
3778
3779 int huge_add_to_page_cache(struct page *page, struct address_space *mapping,
3780                            pgoff_t idx)
3781 {
3782         struct inode *inode = mapping->host;
3783         struct hstate *h = hstate_inode(inode);
3784         int err = add_to_page_cache(page, mapping, idx, GFP_KERNEL);
3785
3786         if (err)
3787                 return err;
3788         ClearPagePrivate(page);
3789
3790         /*
3791          * set page dirty so that it will not be removed from cache/file
3792          * by non-hugetlbfs specific code paths.
3793          */
3794         set_page_dirty(page);
3795
3796         spin_lock(&inode->i_lock);
3797         inode->i_blocks += blocks_per_huge_page(h);
3798         spin_unlock(&inode->i_lock);
3799         return 0;
3800 }
3801
3802 static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
3803                         struct vm_area_struct *vma,
3804                         struct address_space *mapping, pgoff_t idx,
3805                         unsigned long address, pte_t *ptep, unsigned int flags)
3806 {
3807         struct hstate *h = hstate_vma(vma);
3808         vm_fault_t ret = VM_FAULT_SIGBUS;
3809         int anon_rmap = 0;
3810         unsigned long size;
3811         struct page *page;
3812         pte_t new_pte;
3813         spinlock_t *ptl;
3814         unsigned long haddr = address & huge_page_mask(h);
3815         bool new_page = false;
3816
3817         /*
3818          * Currently, we are forced to kill the process in the event the
3819          * original mapper has unmapped pages from the child due to a failed
3820          * COW. Warn that such a situation has occurred as it may not be obvious
3821          */
3822         if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
3823                 pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
3824                            current->pid);
3825                 return ret;
3826         }
3827
3828         /*
3829          * Use page lock to guard against racing truncation
3830          * before we get page_table_lock.
3831          */
3832 retry:
3833         page = find_lock_page(mapping, idx);
3834         if (!page) {
3835                 size = i_size_read(mapping->host) >> huge_page_shift(h);
3836                 if (idx >= size)
3837                         goto out;
3838
3839                 /*
3840                  * Check for page in userfault range
3841                  */
3842                 if (userfaultfd_missing(vma)) {
3843                         u32 hash;
3844                         struct vm_fault vmf = {
3845                                 .vma = vma,
3846                                 .address = haddr,
3847                                 .flags = flags,
3848                                 /*
3849                                  * Hard to debug if it ends up being
3850                                  * used by a callee that assumes
3851                                  * something about the other
3852                                  * uninitialized fields... same as in
3853                                  * memory.c
3854                                  */
3855                         };
3856
3857                         /*
3858                          * hugetlb_fault_mutex must be dropped before
3859                          * handling userfault.  Reacquire after handling
3860                          * fault to make calling code simpler.
3861                          */
3862                         hash = hugetlb_fault_mutex_hash(h, mapping, idx);
3863                         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
3864                         ret = handle_userfault(&vmf, VM_UFFD_MISSING);
3865                         mutex_lock(&hugetlb_fault_mutex_table[hash]);
3866                         goto out;
3867                 }
3868
3869                 page = alloc_huge_page(vma, haddr, 0);
3870                 if (IS_ERR(page)) {
3871                         ret = vmf_error(PTR_ERR(page));
3872                         goto out;
3873                 }
3874                 clear_huge_page(page, address, pages_per_huge_page(h));
3875                 __SetPageUptodate(page);
3876                 new_page = true;
3877
3878                 if (vma->vm_flags & VM_MAYSHARE) {
3879                         int err = huge_add_to_page_cache(page, mapping, idx);
3880                         if (err) {
3881                                 put_page(page);
3882                                 if (err == -EEXIST)
3883                                         goto retry;
3884                                 goto out;
3885                         }
3886                 } else {
3887                         lock_page(page);
3888                         if (unlikely(anon_vma_prepare(vma))) {
3889                                 ret = VM_FAULT_OOM;
3890                                 goto backout_unlocked;
3891                         }
3892                         anon_rmap = 1;
3893                 }
3894         } else {
3895                 /*
3896                  * If memory error occurs between mmap() and fault, some process
3897                  * don't have hwpoisoned swap entry for errored virtual address.
3898                  * So we need to block hugepage fault by PG_hwpoison bit check.
3899                  */
3900                 if (unlikely(PageHWPoison(page))) {
3901                         ret = VM_FAULT_HWPOISON_LARGE |
3902                                 VM_FAULT_SET_HINDEX(hstate_index(h));
3903                         goto backout_unlocked;
3904                 }
3905         }
3906
3907         /*
3908          * If we are going to COW a private mapping later, we examine the
3909          * pending reservations for this page now. This will ensure that
3910          * any allocations necessary to record that reservation occur outside
3911          * the spinlock.
3912          */
3913         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3914                 if (vma_needs_reservation(h, vma, haddr) < 0) {
3915                         ret = VM_FAULT_OOM;
3916                         goto backout_unlocked;
3917                 }
3918                 /* Just decrements count, does not deallocate */
3919                 vma_end_reservation(h, vma, haddr);
3920         }
3921
3922         ptl = huge_pte_lock(h, mm, ptep);
3923         size = i_size_read(mapping->host) >> huge_page_shift(h);
3924         if (idx >= size)
3925                 goto backout;
3926
3927         ret = 0;
3928         if (!huge_pte_none(huge_ptep_get(ptep)))
3929                 goto backout;
3930
3931         if (anon_rmap) {
3932                 ClearPagePrivate(page);
3933                 hugepage_add_new_anon_rmap(page, vma, haddr);
3934         } else
3935                 page_dup_rmap(page, true);
3936         new_pte = make_huge_pte(vma, page, ((vma->vm_flags & VM_WRITE)
3937                                 && (vma->vm_flags & VM_SHARED)));
3938         set_huge_pte_at(mm, haddr, ptep, new_pte);
3939
3940         hugetlb_count_add(pages_per_huge_page(h), mm);
3941         if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
3942                 /* Optimization, do the COW without a second fault */
3943                 ret = hugetlb_cow(mm, vma, address, ptep, page, ptl);
3944         }
3945
3946         spin_unlock(ptl);
3947
3948         /*
3949          * Only make newly allocated pages active.  Existing pages found
3950          * in the pagecache could be !page_huge_active() if they have been
3951          * isolated for migration.
3952          */
3953         if (new_page)
3954                 set_page_huge_active(page);
3955
3956         unlock_page(page);
3957 out:
3958         return ret;
3959
3960 backout:
3961         spin_unlock(ptl);
3962 backout_unlocked:
3963         unlock_page(page);
3964         restore_reserve_on_error(h, vma, haddr, page);
3965         put_page(page);
3966         goto out;
3967 }
3968
3969 #ifdef CONFIG_SMP
3970 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
3971                             pgoff_t idx)
3972 {
3973         unsigned long key[2];
3974         u32 hash;
3975
3976         key[0] = (unsigned long) mapping;
3977         key[1] = idx;
3978
3979         hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
3980
3981         return hash & (num_fault_mutexes - 1);
3982 }
3983 #else
3984 /*
3985  * For uniprocesor systems we always use a single mutex, so just
3986  * return 0 and avoid the hashing overhead.
3987  */
3988 u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
3989                             pgoff_t idx)
3990 {
3991         return 0;
3992 }
3993 #endif
3994
3995 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3996                         unsigned long address, unsigned int flags)
3997 {
3998         pte_t *ptep, entry;
3999         spinlock_t *ptl;
4000         vm_fault_t ret;
4001         u32 hash;
4002         pgoff_t idx;
4003         struct page *page = NULL;
4004         struct page *pagecache_page = NULL;
4005         struct hstate *h = hstate_vma(vma);
4006         struct address_space *mapping;
4007         int need_wait_lock = 0;
4008         unsigned long haddr = address & huge_page_mask(h);
4009
4010         ptep = huge_pte_offset(mm, haddr, huge_page_size(h));
4011         if (ptep) {
4012                 entry = huge_ptep_get(ptep);
4013                 if (unlikely(is_hugetlb_entry_migration(entry))) {
4014                         migration_entry_wait_huge(vma, mm, ptep);
4015                         return 0;
4016                 } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry)))
4017                         return VM_FAULT_HWPOISON_LARGE |
4018                                 VM_FAULT_SET_HINDEX(hstate_index(h));
4019         } else {
4020                 ptep = huge_pte_alloc(mm, haddr, huge_page_size(h));
4021                 if (!ptep)
4022                         return VM_FAULT_OOM;
4023         }
4024
4025         mapping = vma->vm_file->f_mapping;
4026         idx = vma_hugecache_offset(h, vma, haddr);
4027
4028         /*
4029          * Serialize hugepage allocation and instantiation, so that we don't
4030          * get spurious allocation failures if two CPUs race to instantiate
4031          * the same page in the page cache.
4032          */
4033         hash = hugetlb_fault_mutex_hash(h, mapping, idx);
4034         mutex_lock(&hugetlb_fault_mutex_table[hash]);
4035
4036         entry = huge_ptep_get(ptep);
4037         if (huge_pte_none(entry)) {
4038                 ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep, flags);
4039                 goto out_mutex;
4040         }
4041
4042         ret = 0;
4043
4044         /*
4045          * entry could be a migration/hwpoison entry at this point, so this
4046          * check prevents the kernel from going below assuming that we have
4047          * a active hugepage in pagecache. This goto expects the 2nd page fault,
4048          * and is_hugetlb_entry_(migration|hwpoisoned) check will properly
4049          * handle it.
4050          */
4051         if (!pte_present(entry))
4052                 goto out_mutex;
4053
4054         /*
4055          * If we are going to COW the mapping later, we examine the pending
4056          * reservations for this page now. This will ensure that any
4057          * allocations necessary to record that reservation occur outside the
4058          * spinlock. For private mappings, we also lookup the pagecache
4059          * page now as it is used to determine if a reservation has been
4060          * consumed.
4061          */
4062         if ((flags & FAULT_FLAG_WRITE) && !huge_pte_write(entry)) {
4063                 if (vma_needs_reservation(h, vma, haddr) < 0) {
4064                         ret = VM_FAULT_OOM;
4065                         goto out_mutex;
4066                 }
4067                 /* Just decrements count, does not deallocate */
4068                 vma_end_reservation(h, vma, haddr);
4069
4070                 if (!(vma->vm_flags & VM_MAYSHARE))
4071                         pagecache_page = hugetlbfs_pagecache_page(h,
4072                                                                 vma, haddr);
4073         }
4074
4075         ptl = huge_pte_lock(h, mm, ptep);
4076
4077         /* Check for a racing update before calling hugetlb_cow */
4078         if (unlikely(!pte_same(entry, huge_ptep_get(ptep))))
4079                 goto out_ptl;
4080
4081         /*
4082          * hugetlb_cow() requires page locks of pte_page(entry) and
4083          * pagecache_page, so here we need take the former one
4084          * when page != pagecache_page or !pagecache_page.
4085          */
4086         page = pte_page(entry);
4087         if (page != pagecache_page)
4088                 if (!trylock_page(page)) {
4089                         need_wait_lock = 1;
4090                         goto out_ptl;
4091                 }
4092
4093         get_page(page);
4094
4095         if (flags & FAULT_FLAG_WRITE) {
4096                 if (!huge_pte_write(entry)) {
4097                         ret = hugetlb_cow(mm, vma, address, ptep,
4098                                           pagecache_page, ptl);
4099                         goto out_put_page;
4100                 }
4101                 entry = huge_pte_mkdirty(entry);
4102         }
4103         entry = pte_mkyoung(entry);
4104         if (huge_ptep_set_access_flags(vma, haddr, ptep, entry,
4105                                                 flags & FAULT_FLAG_WRITE))
4106                 update_mmu_cache(vma, haddr, ptep);
4107 out_put_page:
4108         if (page != pagecache_page)
4109                 unlock_page(page);
4110         put_page(page);
4111 out_ptl:
4112         spin_unlock(ptl);
4113
4114         if (pagecache_page) {
4115                 unlock_page(pagecache_page);
4116                 put_page(pagecache_page);
4117         }
4118 out_mutex:
4119         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
4120         /*
4121          * Generally it's safe to hold refcount during waiting page lock. But
4122          * here we just wait to defer the next page fault to avoid busy loop and
4123          * the page is not used after unlocked before returning from the current
4124          * page fault. So we are safe from accessing freed page, even if we wait
4125          * here without taking refcount.
4126          */
4127         if (need_wait_lock)
4128                 wait_on_page_locked(page);
4129         return ret;
4130 }
4131
4132 /*
4133  * Used by userfaultfd UFFDIO_COPY.  Based on mcopy_atomic_pte with
4134  * modifications for huge pages.
4135  */
4136 int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
4137                             pte_t *dst_pte,
4138                             struct vm_area_struct *dst_vma,
4139                             unsigned long dst_addr,
4140                             unsigned long src_addr,
4141                             struct page **pagep)
4142 {
4143         struct address_space *mapping;
4144         pgoff_t idx;
4145         unsigned long size;
4146         int vm_shared = dst_vma->vm_flags & VM_SHARED;
4147         struct hstate *h = hstate_vma(dst_vma);
4148         pte_t _dst_pte;
4149         spinlock_t *ptl;
4150         int ret;
4151         struct page *page;
4152
4153         if (!*pagep) {
4154                 /* If a page already exists, then it's UFFDIO_COPY for
4155                  * a non-missing case. Return -EEXIST.
4156                  */
4157                 if (vm_shared &&
4158                     hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
4159                         ret = -EEXIST;
4160                         goto out;
4161                 }
4162
4163                 page = alloc_huge_page(dst_vma, dst_addr, 0);
4164                 if (IS_ERR(page)) {
4165                         ret = -ENOMEM;
4166                         goto out;
4167                 }
4168
4169                 ret = copy_huge_page_from_user(page,
4170                                                 (const void __user *) src_addr,
4171                                                 pages_per_huge_page(h), false);
4172
4173                 /* fallback to copy_from_user outside mmap_sem */
4174                 if (unlikely(ret)) {
4175                         ret = -ENOENT;
4176                         *pagep = page;
4177                         /* don't free the page */
4178                         goto out;
4179                 }
4180         } else {
4181                 page = *pagep;
4182                 *pagep = NULL;
4183         }
4184
4185         /*
4186          * The memory barrier inside __SetPageUptodate makes sure that
4187          * preceding stores to the page contents become visible before
4188          * the set_pte_at() write.
4189          */
4190         __SetPageUptodate(page);
4191
4192         mapping = dst_vma->vm_file->f_mapping;
4193         idx = vma_hugecache_offset(h, dst_vma, dst_addr);
4194
4195         /*
4196          * If shared, add to page cache
4197          */
4198         if (vm_shared) {
4199                 size = i_size_read(mapping->host) >> huge_page_shift(h);
4200                 ret = -EFAULT;
4201                 if (idx >= size)
4202                         goto out_release_nounlock;
4203
4204                 /*
4205                  * Serialization between remove_inode_hugepages() and
4206                  * huge_add_to_page_cache() below happens through the
4207                  * hugetlb_fault_mutex_table that here must be hold by
4208                  * the caller.
4209                  */
4210                 ret = huge_add_to_page_cache(page, mapping, idx);
4211                 if (ret)
4212                         goto out_release_nounlock;
4213         }
4214
4215         ptl = huge_pte_lockptr(h, dst_mm, dst_pte);
4216         spin_lock(ptl);
4217
4218         /*
4219          * Recheck the i_size after holding PT lock to make sure not
4220          * to leave any page mapped (as page_mapped()) beyond the end
4221          * of the i_size (remove_inode_hugepages() is strict about
4222          * enforcing that). If we bail out here, we'll also leave a
4223          * page in the radix tree in the vm_shared case beyond the end
4224          * of the i_size, but remove_inode_hugepages() will take care
4225          * of it as soon as we drop the hugetlb_fault_mutex_table.
4226          */
4227         size = i_size_read(mapping->host) >> huge_page_shift(h);
4228         ret = -EFAULT;
4229         if (idx >= size)
4230                 goto out_release_unlock;
4231
4232         ret = -EEXIST;
4233         if (!huge_pte_none(huge_ptep_get(dst_pte)))
4234                 goto out_release_unlock;
4235
4236         if (vm_shared) {
4237                 page_dup_rmap(page, true);
4238         } else {
4239                 ClearPagePrivate(page);
4240                 hugepage_add_new_anon_rmap(page, dst_vma, dst_addr);
4241         }
4242
4243         _dst_pte = make_huge_pte(dst_vma, page, dst_vma->vm_flags & VM_WRITE);
4244         if (dst_vma->vm_flags & VM_WRITE)
4245                 _dst_pte = huge_pte_mkdirty(_dst_pte);
4246         _dst_pte = pte_mkyoung(_dst_pte);
4247
4248         set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte);
4249
4250         (void)huge_ptep_set_access_flags(dst_vma, dst_addr, dst_pte, _dst_pte,
4251                                         dst_vma->vm_flags & VM_WRITE);
4252         hugetlb_count_add(pages_per_huge_page(h), dst_mm);
4253
4254         /* No need to invalidate - it was non-present before */
4255         update_mmu_cache(dst_vma, dst_addr, dst_pte);
4256
4257         spin_unlock(ptl);
4258         set_page_huge_active(page);
4259         if (vm_shared)
4260                 unlock_page(page);
4261         ret = 0;
4262 out:
4263         return ret;
4264 out_release_unlock:
4265         spin_unlock(ptl);
4266         if (vm_shared)
4267                 unlock_page(page);
4268 out_release_nounlock:
4269         put_page(page);
4270         goto out;
4271 }
4272
4273 long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
4274                          struct page **pages, struct vm_area_struct **vmas,
4275                          unsigned long *position, unsigned long *nr_pages,
4276                          long i, unsigned int flags, int *nonblocking)
4277 {
4278         unsigned long pfn_offset;
4279         unsigned long vaddr = *position;
4280         unsigned long remainder = *nr_pages;
4281         struct hstate *h = hstate_vma(vma);
4282         int err = -EFAULT;
4283
4284         while (vaddr < vma->vm_end && remainder) {
4285                 pte_t *pte;
4286                 spinlock_t *ptl = NULL;
4287                 int absent;
4288                 struct page *page;
4289
4290                 /*
4291                  * If we have a pending SIGKILL, don't keep faulting pages and
4292                  * potentially allocating memory.
4293                  */
4294                 if (unlikely(fatal_signal_pending(current))) {
4295                         remainder = 0;
4296                         break;
4297                 }
4298
4299                 /*
4300                  * Some archs (sparc64, sh*) have multiple pte_ts to
4301                  * each hugepage.  We have to make sure we get the
4302                  * first, for the page indexing below to work.
4303                  *
4304                  * Note that page table lock is not held when pte is null.
4305                  */
4306                 pte = huge_pte_offset(mm, vaddr & huge_page_mask(h),
4307                                       huge_page_size(h));
4308                 if (pte)
4309                         ptl = huge_pte_lock(h, mm, pte);
4310                 absent = !pte || huge_pte_none(huge_ptep_get(pte));
4311
4312                 /*
4313                  * When coredumping, it suits get_dump_page if we just return
4314                  * an error where there's an empty slot with no huge pagecache
4315                  * to back it.  This way, we avoid allocating a hugepage, and
4316                  * the sparse dumpfile avoids allocating disk blocks, but its
4317                  * huge holes still show up with zeroes where they need to be.
4318                  */
4319                 if (absent && (flags & FOLL_DUMP) &&
4320                     !hugetlbfs_pagecache_present(h, vma, vaddr)) {
4321                         if (pte)
4322                                 spin_unlock(ptl);
4323                         remainder = 0;
4324                         break;
4325                 }
4326
4327                 /*
4328                  * We need call hugetlb_fault for both hugepages under migration
4329                  * (in which case hugetlb_fault waits for the migration,) and
4330                  * hwpoisoned hugepages (in which case we need to prevent the
4331                  * caller from accessing to them.) In order to do this, we use
4332                  * here is_swap_pte instead of is_hugetlb_entry_migration and
4333                  * is_hugetlb_entry_hwpoisoned. This is because it simply covers
4334                  * both cases, and because we can't follow correct pages
4335                  * directly from any kind of swap entries.
4336                  */
4337                 if (absent || is_swap_pte(huge_ptep_get(pte)) ||
4338                     ((flags & FOLL_WRITE) &&
4339                       !huge_pte_write(huge_ptep_get(pte)))) {
4340                         vm_fault_t ret;
4341                         unsigned int fault_flags = 0;
4342
4343                         if (pte)
4344                                 spin_unlock(ptl);
4345                         if (flags & FOLL_WRITE)
4346                                 fault_flags |= FAULT_FLAG_WRITE;
4347                         if (nonblocking)
4348                                 fault_flags |= FAULT_FLAG_ALLOW_RETRY;
4349                         if (flags & FOLL_NOWAIT)
4350                                 fault_flags |= FAULT_FLAG_ALLOW_RETRY |
4351                                         FAULT_FLAG_RETRY_NOWAIT;
4352                         if (flags & FOLL_TRIED) {
4353                                 VM_WARN_ON_ONCE(fault_flags &
4354                                                 FAULT_FLAG_ALLOW_RETRY);
4355                                 fault_flags |= FAULT_FLAG_TRIED;
4356                         }
4357                         ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
4358                         if (ret & VM_FAULT_ERROR) {
4359                                 err = vm_fault_to_errno(ret, flags);
4360                                 remainder = 0;
4361                                 break;
4362                         }
4363                         if (ret & VM_FAULT_RETRY) {
4364                                 if (nonblocking &&
4365                                     !(fault_flags & FAULT_FLAG_RETRY_NOWAIT))
4366                                         *nonblocking = 0;
4367                                 *nr_pages = 0;
4368                                 /*
4369                                  * VM_FAULT_RETRY must not return an
4370                                  * error, it will return zero
4371                                  * instead.
4372                                  *
4373                                  * No need to update "position" as the
4374                                  * caller will not check it after
4375                                  * *nr_pages is set to 0.
4376                                  */
4377                                 return i;
4378                         }
4379                         continue;
4380                 }
4381
4382                 pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
4383                 page = pte_page(huge_ptep_get(pte));
4384
4385                 /*
4386                  * Instead of doing 'try_get_page()' below in the same_page
4387                  * loop, just check the count once here.
4388                  */
4389                 if (unlikely(page_count(page) <= 0)) {
4390                         if (pages) {
4391                                 spin_unlock(ptl);
4392                                 remainder = 0;
4393                                 err = -ENOMEM;
4394                                 break;
4395                         }
4396                 }
4397 same_page:
4398                 if (pages) {
4399                         pages[i] = mem_map_offset(page, pfn_offset);
4400                         get_page(pages[i]);
4401                 }
4402
4403                 if (vmas)
4404                         vmas[i] = vma;
4405
4406                 vaddr += PAGE_SIZE;
4407                 ++pfn_offset;
4408                 --remainder;
4409                 ++i;
4410                 if (vaddr < vma->vm_end && remainder &&
4411                                 pfn_offset < pages_per_huge_page(h)) {
4412                         /*
4413                          * We use pfn_offset to avoid touching the pageframes
4414                          * of this compound page.
4415                          */
4416                         goto same_page;
4417                 }
4418                 spin_unlock(ptl);
4419         }
4420         *nr_pages = remainder;
4421         /*
4422          * setting position is actually required only if remainder is
4423          * not zero but it's faster not to add a "if (remainder)"
4424          * branch.
4425          */
4426         *position = vaddr;
4427
4428         return i ? i : err;
4429 }
4430
4431 #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE
4432 /*
4433  * ARCHes with special requirements for evicting HUGETLB backing TLB entries can
4434  * implement this.
4435  */
4436 #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
4437 #endif
4438
4439 unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
4440                 unsigned long address, unsigned long end, pgprot_t newprot)
4441 {
4442         struct mm_struct *mm = vma->vm_mm;
4443         unsigned long start = address;
4444         pte_t *ptep;
4445         pte_t pte;
4446         struct hstate *h = hstate_vma(vma);
4447         unsigned long pages = 0;
4448         unsigned long f_start = start;
4449         unsigned long f_end = end;
4450         bool shared_pmd = false;
4451
4452         /*
4453          * In the case of shared PMDs, the area to flush could be beyond
4454          * start/end.  Set f_start/f_end to cover the maximum possible
4455          * range if PMD sharing is possible.
4456          */
4457         adjust_range_if_pmd_sharing_possible(vma, &f_start, &f_end);
4458
4459         BUG_ON(address >= end);
4460         flush_cache_range(vma, f_start, f_end);
4461
4462         mmu_notifier_invalidate_range_start(mm, f_start, f_end);
4463         i_mmap_lock_write(vma->vm_file->f_mapping);
4464         for (; address < end; address += huge_page_size(h)) {
4465                 spinlock_t *ptl;
4466                 ptep = huge_pte_offset(mm, address, huge_page_size(h));
4467                 if (!ptep)
4468                         continue;
4469                 ptl = huge_pte_lock(h, mm, ptep);
4470                 if (huge_pmd_unshare(mm, &address, ptep)) {
4471                         pages++;
4472                         spin_unlock(ptl);
4473                         shared_pmd = true;
4474                         continue;
4475                 }
4476                 pte = huge_ptep_get(ptep);
4477                 if (unlikely(is_hugetlb_entry_hwpoisoned(pte))) {
4478                         spin_unlock(ptl);
4479                         continue;
4480                 }
4481                 if (unlikely(is_hugetlb_entry_migration(pte))) {
4482                         swp_entry_t entry = pte_to_swp_entry(pte);
4483
4484                         if (is_write_migration_entry(entry)) {
4485                                 pte_t newpte;
4486
4487                                 make_migration_entry_read(&entry);
4488                                 newpte = swp_entry_to_pte(entry);
4489                                 set_huge_swap_pte_at(mm, address, ptep,
4490                                                      newpte, huge_page_size(h));
4491                                 pages++;
4492                         }
4493                         spin_unlock(ptl);
4494                         continue;
4495                 }
4496                 if (!huge_pte_none(pte)) {
4497                         pte = huge_ptep_get_and_clear(mm, address, ptep);
4498                         pte = pte_mkhuge(huge_pte_modify(pte, newprot));
4499                         pte = arch_make_huge_pte(pte, vma, NULL, 0);
4500                         set_huge_pte_at(mm, address, ptep, pte);
4501                         pages++;
4502                 }
4503                 spin_unlock(ptl);
4504         }
4505         /*
4506          * Must flush TLB before releasing i_mmap_rwsem: x86's huge_pmd_unshare
4507          * may have cleared our pud entry and done put_page on the page table:
4508          * once we release i_mmap_rwsem, another task can do the final put_page
4509          * and that page table be reused and filled with junk.  If we actually
4510          * did unshare a page of pmds, flush the range corresponding to the pud.
4511          */
4512         if (shared_pmd)
4513                 flush_hugetlb_tlb_range(vma, f_start, f_end);
4514         else
4515                 flush_hugetlb_tlb_range(vma, start, end);
4516         /*
4517          * No need to call mmu_notifier_invalidate_range() we are downgrading
4518          * page table protection not changing it to point to a new page.
4519          *
4520          * See Documentation/vm/mmu_notifier.rst
4521          */
4522         i_mmap_unlock_write(vma->vm_file->f_mapping);
4523         mmu_notifier_invalidate_range_end(mm, f_start, f_end);
4524
4525         return pages << h->order;
4526 }
4527
4528 int hugetlb_reserve_pages(struct inode *inode,
4529                                         long from, long to,
4530                                         struct vm_area_struct *vma,
4531                                         vm_flags_t vm_flags)
4532 {
4533         long ret, chg;
4534         struct hstate *h = hstate_inode(inode);
4535         struct hugepage_subpool *spool = subpool_inode(inode);
4536         struct resv_map *resv_map;
4537         long gbl_reserve;
4538
4539         /* This should never happen */
4540         if (from > to) {
4541                 VM_WARN(1, "%s called with a negative range\n", __func__);
4542                 return -EINVAL;
4543         }
4544
4545         /*
4546          * Only apply hugepage reservation if asked. At fault time, an
4547          * attempt will be made for VM_NORESERVE to allocate a page
4548          * without using reserves
4549          */
4550         if (vm_flags & VM_NORESERVE)
4551                 return 0;
4552
4553         /*
4554          * Shared mappings base their reservation on the number of pages that
4555          * are already allocated on behalf of the file. Private mappings need
4556          * to reserve the full area even if read-only as mprotect() may be
4557          * called to make the mapping read-write. Assume !vma is a shm mapping
4558          */
4559         if (!vma || vma->vm_flags & VM_MAYSHARE) {
4560                 resv_map = inode_resv_map(inode);
4561
4562                 chg = region_chg(resv_map, from, to);
4563
4564         } else {
4565                 resv_map = resv_map_alloc();
4566                 if (!resv_map)
4567                         return -ENOMEM;
4568
4569                 chg = to - from;
4570
4571                 set_vma_resv_map(vma, resv_map);
4572                 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
4573         }
4574
4575         if (chg < 0) {
4576                 ret = chg;
4577                 goto out_err;
4578         }
4579
4580         /*
4581          * There must be enough pages in the subpool for the mapping. If
4582          * the subpool has a minimum size, there may be some global
4583          * reservations already in place (gbl_reserve).
4584          */
4585         gbl_reserve = hugepage_subpool_get_pages(spool, chg);
4586         if (gbl_reserve < 0) {
4587                 ret = -ENOSPC;
4588                 goto out_err;
4589         }
4590
4591         /*
4592          * Check enough hugepages are available for the reservation.
4593          * Hand the pages back to the subpool if there are not
4594          */
4595         ret = hugetlb_acct_memory(h, gbl_reserve);
4596         if (ret < 0) {
4597                 /* put back original number of pages, chg */
4598                 (void)hugepage_subpool_put_pages(spool, chg);
4599                 goto out_err;
4600         }
4601
4602         /*
4603          * Account for the reservations made. Shared mappings record regions
4604          * that have reservations as they are shared by multiple VMAs.
4605          * When the last VMA disappears, the region map says how much
4606          * the reservation was and the page cache tells how much of
4607          * the reservation was consumed. Private mappings are per-VMA and
4608          * only the consumed reservations are tracked. When the VMA
4609          * disappears, the original reservation is the VMA size and the
4610          * consumed reservations are stored in the map. Hence, nothing
4611          * else has to be done for private mappings here
4612          */
4613         if (!vma || vma->vm_flags & VM_MAYSHARE) {
4614                 long add = region_add(resv_map, from, to);
4615
4616                 if (unlikely(chg > add)) {
4617                         /*
4618                          * pages in this range were added to the reserve
4619                          * map between region_chg and region_add.  This
4620                          * indicates a race with alloc_huge_page.  Adjust
4621                          * the subpool and reserve counts modified above
4622                          * based on the difference.
4623                          */
4624                         long rsv_adjust;
4625
4626                         rsv_adjust = hugepage_subpool_put_pages(spool,
4627                                                                 chg - add);
4628                         hugetlb_acct_memory(h, -rsv_adjust);
4629                 }
4630         }
4631         return 0;
4632 out_err:
4633         if (!vma || vma->vm_flags & VM_MAYSHARE)
4634                 /* Don't call region_abort if region_chg failed */
4635                 if (chg >= 0)
4636                         region_abort(resv_map, from, to);
4637         if (vma && is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4638                 kref_put(&resv_map->refs, resv_map_release);
4639         return ret;
4640 }
4641
4642 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
4643                                                                 long freed)
4644 {
4645         struct hstate *h = hstate_inode(inode);
4646         struct resv_map *resv_map = inode_resv_map(inode);
4647         long chg = 0;
4648         struct hugepage_subpool *spool = subpool_inode(inode);
4649         long gbl_reserve;
4650
4651         if (resv_map) {
4652                 chg = region_del(resv_map, start, end);
4653                 /*
4654                  * region_del() can fail in the rare case where a region
4655                  * must be split and another region descriptor can not be
4656                  * allocated.  If end == LONG_MAX, it will not fail.
4657                  */
4658                 if (chg < 0)
4659                         return chg;
4660         }
4661
4662         spin_lock(&inode->i_lock);
4663         inode->i_blocks -= (blocks_per_huge_page(h) * freed);
4664         spin_unlock(&inode->i_lock);
4665
4666         /*
4667          * If the subpool has a minimum size, the number of global
4668          * reservations to be released may be adjusted.
4669          */
4670         gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
4671         hugetlb_acct_memory(h, -gbl_reserve);
4672
4673         return 0;
4674 }
4675
4676 #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE
4677 static unsigned long page_table_shareable(struct vm_area_struct *svma,
4678                                 struct vm_area_struct *vma,
4679                                 unsigned long addr, pgoff_t idx)
4680 {
4681         unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
4682                                 svma->vm_start;
4683         unsigned long sbase = saddr & PUD_MASK;
4684         unsigned long s_end = sbase + PUD_SIZE;
4685
4686         /* Allow segments to share if only one is marked locked */
4687         unsigned long vm_flags = vma->vm_flags & VM_LOCKED_CLEAR_MASK;
4688         unsigned long svm_flags = svma->vm_flags & VM_LOCKED_CLEAR_MASK;
4689
4690         /*
4691          * match the virtual addresses, permission and the alignment of the
4692          * page table page.
4693          */
4694         if (pmd_index(addr) != pmd_index(saddr) ||
4695             vm_flags != svm_flags ||
4696             sbase < svma->vm_start || svma->vm_end < s_end)
4697                 return 0;
4698
4699         return saddr;
4700 }
4701
4702 static bool vma_shareable(struct vm_area_struct *vma, unsigned long addr)
4703 {
4704         unsigned long base = addr & PUD_MASK;
4705         unsigned long end = base + PUD_SIZE;
4706
4707         /*
4708          * check on proper vm_flags and page table alignment
4709          */
4710         if (vma->vm_flags & VM_MAYSHARE && range_in_vma(vma, base, end))
4711                 return true;
4712         return false;
4713 }
4714
4715 /*
4716  * Determine if start,end range within vma could be mapped by shared pmd.
4717  * If yes, adjust start and end to cover range associated with possible
4718  * shared pmd mappings.
4719  */
4720 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4721                                 unsigned long *start, unsigned long *end)
4722 {
4723         unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
4724                 v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
4725
4726         /*
4727          * vma need span at least one aligned PUD size and the start,end range
4728          * must at least partialy within it.
4729          */
4730         if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
4731                 (*end <= v_start) || (*start >= v_end))
4732                 return;
4733
4734         /* Extend the range to be PUD aligned for a worst case scenario */
4735         if (*start > v_start)
4736                 *start = ALIGN_DOWN(*start, PUD_SIZE);
4737
4738         if (*end < v_end)
4739                 *end = ALIGN(*end, PUD_SIZE);
4740 }
4741
4742 /*
4743  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
4744  * and returns the corresponding pte. While this is not necessary for the
4745  * !shared pmd case because we can allocate the pmd later as well, it makes the
4746  * code much cleaner. pmd allocation is essential for the shared case because
4747  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
4748  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
4749  * bad pmd for sharing.
4750  */
4751 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4752 {
4753         struct vm_area_struct *vma = find_vma(mm, addr);
4754         struct address_space *mapping = vma->vm_file->f_mapping;
4755         pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
4756                         vma->vm_pgoff;
4757         struct vm_area_struct *svma;
4758         unsigned long saddr;
4759         pte_t *spte = NULL;
4760         pte_t *pte;
4761         spinlock_t *ptl;
4762
4763         if (!vma_shareable(vma, addr))
4764                 return (pte_t *)pmd_alloc(mm, pud, addr);
4765
4766         i_mmap_lock_write(mapping);
4767         vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
4768                 if (svma == vma)
4769                         continue;
4770
4771                 saddr = page_table_shareable(svma, vma, addr, idx);
4772                 if (saddr) {
4773                         spte = huge_pte_offset(svma->vm_mm, saddr,
4774                                                vma_mmu_pagesize(svma));
4775                         if (spte) {
4776                                 get_page(virt_to_page(spte));
4777                                 break;
4778                         }
4779                 }
4780         }
4781
4782         if (!spte)
4783                 goto out;
4784
4785         ptl = huge_pte_lock(hstate_vma(vma), mm, spte);
4786         if (pud_none(*pud)) {
4787                 pud_populate(mm, pud,
4788                                 (pmd_t *)((unsigned long)spte & PAGE_MASK));
4789                 mm_inc_nr_pmds(mm);
4790         } else {
4791                 put_page(virt_to_page(spte));
4792         }
4793         spin_unlock(ptl);
4794 out:
4795         pte = (pte_t *)pmd_alloc(mm, pud, addr);
4796         i_mmap_unlock_write(mapping);
4797         return pte;
4798 }
4799
4800 /*
4801  * unmap huge page backed by shared pte.
4802  *
4803  * Hugetlb pte page is ref counted at the time of mapping.  If pte is shared
4804  * indicated by page_count > 1, unmap is achieved by clearing pud and
4805  * decrementing the ref count. If count == 1, the pte page is not shared.
4806  *
4807  * called with page table lock held.
4808  *
4809  * returns: 1 successfully unmapped a shared pte page
4810  *          0 the underlying pte page is not shared, or it is the last user
4811  */
4812 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4813 {
4814         pgd_t *pgd = pgd_offset(mm, *addr);
4815         p4d_t *p4d = p4d_offset(pgd, *addr);
4816         pud_t *pud = pud_offset(p4d, *addr);
4817
4818         BUG_ON(page_count(virt_to_page(ptep)) == 0);
4819         if (page_count(virt_to_page(ptep)) == 1)
4820                 return 0;
4821
4822         pud_clear(pud);
4823         put_page(virt_to_page(ptep));
4824         mm_dec_nr_pmds(mm);
4825         *addr = ALIGN(*addr, HPAGE_SIZE * PTRS_PER_PTE) - HPAGE_SIZE;
4826         return 1;
4827 }
4828 #define want_pmd_share()        (1)
4829 #else /* !CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4830 pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
4831 {
4832         return NULL;
4833 }
4834
4835 int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
4836 {
4837         return 0;
4838 }
4839
4840 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
4841                                 unsigned long *start, unsigned long *end)
4842 {
4843 }
4844 #define want_pmd_share()        (0)
4845 #endif /* CONFIG_ARCH_WANT_HUGE_PMD_SHARE */
4846
4847 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
4848 pte_t *huge_pte_alloc(struct mm_struct *mm,
4849                         unsigned long addr, unsigned long sz)
4850 {
4851         pgd_t *pgd;
4852         p4d_t *p4d;
4853         pud_t *pud;
4854         pte_t *pte = NULL;
4855
4856         pgd = pgd_offset(mm, addr);
4857         p4d = p4d_alloc(mm, pgd, addr);
4858         if (!p4d)
4859                 return NULL;
4860         pud = pud_alloc(mm, p4d, addr);
4861         if (pud) {
4862                 if (sz == PUD_SIZE) {
4863                         pte = (pte_t *)pud;
4864                 } else {
4865                         BUG_ON(sz != PMD_SIZE);
4866                         if (want_pmd_share() && pud_none(*pud))
4867                                 pte = huge_pmd_share(mm, addr, pud);
4868                         else
4869                                 pte = (pte_t *)pmd_alloc(mm, pud, addr);
4870                 }
4871         }
4872         BUG_ON(pte && pte_present(*pte) && !pte_huge(*pte));
4873
4874         return pte;
4875 }
4876
4877 /*
4878  * huge_pte_offset() - Walk the page table to resolve the hugepage
4879  * entry at address @addr
4880  *
4881  * Return: Pointer to page table or swap entry (PUD or PMD) for
4882  * address @addr, or NULL if a p*d_none() entry is encountered and the
4883  * size @sz doesn't match the hugepage size at this level of the page
4884  * table.
4885  */
4886 pte_t *huge_pte_offset(struct mm_struct *mm,
4887                        unsigned long addr, unsigned long sz)
4888 {
4889         pgd_t *pgd;
4890         p4d_t *p4d;
4891         pud_t *pud, pud_entry;
4892         pmd_t *pmd, pmd_entry;
4893
4894         pgd = pgd_offset(mm, addr);
4895         if (!pgd_present(*pgd))
4896                 return NULL;
4897         p4d = p4d_offset(pgd, addr);
4898         if (!p4d_present(*p4d))
4899                 return NULL;
4900
4901         pud = pud_offset(p4d, addr);
4902         pud_entry = READ_ONCE(*pud);
4903         if (sz != PUD_SIZE && pud_none(pud_entry))
4904                 return NULL;
4905         /* hugepage or swap? */
4906         if (pud_huge(pud_entry) || !pud_present(pud_entry))
4907                 return (pte_t *)pud;
4908
4909         pmd = pmd_offset(pud, addr);
4910         pmd_entry = READ_ONCE(*pmd);
4911         if (sz != PMD_SIZE && pmd_none(pmd_entry))
4912                 return NULL;
4913         /* hugepage or swap? */
4914         if (pmd_huge(pmd_entry) || !pmd_present(pmd_entry))
4915                 return (pte_t *)pmd;
4916
4917         return NULL;
4918 }
4919
4920 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
4921
4922 /*
4923  * These functions are overwritable if your architecture needs its own
4924  * behavior.
4925  */
4926 struct page * __weak
4927 follow_huge_addr(struct mm_struct *mm, unsigned long address,
4928                               int write)
4929 {
4930         return ERR_PTR(-EINVAL);
4931 }
4932
4933 struct page * __weak
4934 follow_huge_pd(struct vm_area_struct *vma,
4935                unsigned long address, hugepd_t hpd, int flags, int pdshift)
4936 {
4937         WARN(1, "hugepd follow called with no support for hugepage directory format\n");
4938         return NULL;
4939 }
4940
4941 struct page * __weak
4942 follow_huge_pmd(struct mm_struct *mm, unsigned long address,
4943                 pmd_t *pmd, int flags)
4944 {
4945         struct page *page = NULL;
4946         spinlock_t *ptl;
4947         pte_t pte;
4948 retry:
4949         ptl = pmd_lockptr(mm, pmd);
4950         spin_lock(ptl);
4951         /*
4952          * make sure that the address range covered by this pmd is not
4953          * unmapped from other threads.
4954          */
4955         if (!pmd_huge(*pmd))
4956                 goto out;
4957         pte = huge_ptep_get((pte_t *)pmd);
4958         if (pte_present(pte)) {
4959                 page = pmd_page(*pmd) + ((address & ~PMD_MASK) >> PAGE_SHIFT);
4960                 if (flags & FOLL_GET)
4961                         get_page(page);
4962         } else {
4963                 if (is_hugetlb_entry_migration(pte)) {
4964                         spin_unlock(ptl);
4965                         __migration_entry_wait(mm, (pte_t *)pmd, ptl);
4966                         goto retry;
4967                 }
4968                 /*
4969                  * hwpoisoned entry is treated as no_page_table in
4970                  * follow_page_mask().
4971                  */
4972         }
4973 out:
4974         spin_unlock(ptl);
4975         return page;
4976 }
4977
4978 struct page * __weak
4979 follow_huge_pud(struct mm_struct *mm, unsigned long address,
4980                 pud_t *pud, int flags)
4981 {
4982         if (flags & FOLL_GET)
4983                 return NULL;
4984
4985         return pte_page(*(pte_t *)pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
4986 }
4987
4988 struct page * __weak
4989 follow_huge_pgd(struct mm_struct *mm, unsigned long address, pgd_t *pgd, int flags)
4990 {
4991         if (flags & FOLL_GET)
4992                 return NULL;
4993
4994         return pte_page(*(pte_t *)pgd) + ((address & ~PGDIR_MASK) >> PAGE_SHIFT);
4995 }
4996
4997 bool isolate_huge_page(struct page *page, struct list_head *list)
4998 {
4999         bool ret = true;
5000
5001         spin_lock(&hugetlb_lock);
5002         if (!PageHeadHuge(page) || !page_huge_active(page) ||
5003             !get_page_unless_zero(page)) {
5004                 ret = false;
5005                 goto unlock;
5006         }
5007         clear_page_huge_active(page);
5008         list_move_tail(&page->lru, list);
5009 unlock:
5010         spin_unlock(&hugetlb_lock);
5011         return ret;
5012 }
5013
5014 void putback_active_hugepage(struct page *page)
5015 {
5016         VM_BUG_ON_PAGE(!PageHead(page), page);
5017         spin_lock(&hugetlb_lock);
5018         set_page_huge_active(page);
5019         list_move_tail(&page->lru, &(page_hstate(page))->hugepage_activelist);
5020         spin_unlock(&hugetlb_lock);
5021         put_page(page);
5022 }
5023
5024 void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason)
5025 {
5026         struct hstate *h = page_hstate(oldpage);
5027
5028         hugetlb_cgroup_migrate(oldpage, newpage);
5029         set_page_owner_migrate_reason(newpage, reason);
5030
5031         /*
5032          * transfer temporary state of the new huge page. This is
5033          * reverse to other transitions because the newpage is going to
5034          * be final while the old one will be freed so it takes over
5035          * the temporary status.
5036          *
5037          * Also note that we have to transfer the per-node surplus state
5038          * here as well otherwise the global surplus count will not match
5039          * the per-node's.
5040          */
5041         if (PageHugeTemporary(newpage)) {
5042                 int old_nid = page_to_nid(oldpage);
5043                 int new_nid = page_to_nid(newpage);
5044
5045                 SetPageHugeTemporary(oldpage);
5046                 ClearPageHugeTemporary(newpage);
5047
5048                 spin_lock(&hugetlb_lock);
5049                 if (h->surplus_huge_pages_node[old_nid]) {
5050                         h->surplus_huge_pages_node[old_nid]--;
5051                         h->surplus_huge_pages_node[new_nid]++;
5052                 }
5053                 spin_unlock(&hugetlb_lock);
5054         }
5055 }