GNU Linux-libre 6.0.2-gnu
[releases.git] / fs / hugetlbfs / inode.c
1 /*
2  * hugetlbpage-backed filesystem.  Based on ramfs.
3  *
4  * Nadia Yvette Chambers, 2002
5  *
6  * Copyright (C) 2002 Linus Torvalds.
7  * License: GPL
8  */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/thread_info.h>
13 #include <asm/current.h>
14 #include <linux/falloc.h>
15 #include <linux/fs.h>
16 #include <linux/mount.h>
17 #include <linux/file.h>
18 #include <linux/kernel.h>
19 #include <linux/writeback.h>
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/init.h>
23 #include <linux/string.h>
24 #include <linux/capability.h>
25 #include <linux/ctype.h>
26 #include <linux/backing-dev.h>
27 #include <linux/hugetlb.h>
28 #include <linux/pagevec.h>
29 #include <linux/fs_parser.h>
30 #include <linux/mman.h>
31 #include <linux/slab.h>
32 #include <linux/dnotify.h>
33 #include <linux/statfs.h>
34 #include <linux/security.h>
35 #include <linux/magic.h>
36 #include <linux/migrate.h>
37 #include <linux/uio.h>
38
39 #include <linux/uaccess.h>
40 #include <linux/sched/mm.h>
41
42 static const struct address_space_operations hugetlbfs_aops;
43 const struct file_operations hugetlbfs_file_operations;
44 static const struct inode_operations hugetlbfs_dir_inode_operations;
45 static const struct inode_operations hugetlbfs_inode_operations;
46
47 enum hugetlbfs_size_type { NO_SIZE, SIZE_STD, SIZE_PERCENT };
48
49 struct hugetlbfs_fs_context {
50         struct hstate           *hstate;
51         unsigned long long      max_size_opt;
52         unsigned long long      min_size_opt;
53         long                    max_hpages;
54         long                    nr_inodes;
55         long                    min_hpages;
56         enum hugetlbfs_size_type max_val_type;
57         enum hugetlbfs_size_type min_val_type;
58         kuid_t                  uid;
59         kgid_t                  gid;
60         umode_t                 mode;
61 };
62
63 int sysctl_hugetlb_shm_group;
64
65 enum hugetlb_param {
66         Opt_gid,
67         Opt_min_size,
68         Opt_mode,
69         Opt_nr_inodes,
70         Opt_pagesize,
71         Opt_size,
72         Opt_uid,
73 };
74
75 static const struct fs_parameter_spec hugetlb_fs_parameters[] = {
76         fsparam_u32   ("gid",           Opt_gid),
77         fsparam_string("min_size",      Opt_min_size),
78         fsparam_u32oct("mode",          Opt_mode),
79         fsparam_string("nr_inodes",     Opt_nr_inodes),
80         fsparam_string("pagesize",      Opt_pagesize),
81         fsparam_string("size",          Opt_size),
82         fsparam_u32   ("uid",           Opt_uid),
83         {}
84 };
85
86 #ifdef CONFIG_NUMA
87 static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
88                                         struct inode *inode, pgoff_t index)
89 {
90         vma->vm_policy = mpol_shared_policy_lookup(&HUGETLBFS_I(inode)->policy,
91                                                         index);
92 }
93
94 static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
95 {
96         mpol_cond_put(vma->vm_policy);
97 }
98 #else
99 static inline void hugetlb_set_vma_policy(struct vm_area_struct *vma,
100                                         struct inode *inode, pgoff_t index)
101 {
102 }
103
104 static inline void hugetlb_drop_vma_policy(struct vm_area_struct *vma)
105 {
106 }
107 #endif
108
109 /*
110  * Mask used when checking the page offset value passed in via system
111  * calls.  This value will be converted to a loff_t which is signed.
112  * Therefore, we want to check the upper PAGE_SHIFT + 1 bits of the
113  * value.  The extra bit (- 1 in the shift value) is to take the sign
114  * bit into account.
115  */
116 #define PGOFF_LOFFT_MAX \
117         (((1UL << (PAGE_SHIFT + 1)) - 1) <<  (BITS_PER_LONG - (PAGE_SHIFT + 1)))
118
119 static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma)
120 {
121         struct inode *inode = file_inode(file);
122         struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
123         loff_t len, vma_len;
124         int ret;
125         struct hstate *h = hstate_file(file);
126
127         /*
128          * vma address alignment (but not the pgoff alignment) has
129          * already been checked by prepare_hugepage_range.  If you add
130          * any error returns here, do so after setting VM_HUGETLB, so
131          * is_vm_hugetlb_page tests below unmap_region go the right
132          * way when do_mmap unwinds (may be important on powerpc
133          * and ia64).
134          */
135         vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND;
136         vma->vm_ops = &hugetlb_vm_ops;
137
138         ret = seal_check_future_write(info->seals, vma);
139         if (ret)
140                 return ret;
141
142         /*
143          * page based offset in vm_pgoff could be sufficiently large to
144          * overflow a loff_t when converted to byte offset.  This can
145          * only happen on architectures where sizeof(loff_t) ==
146          * sizeof(unsigned long).  So, only check in those instances.
147          */
148         if (sizeof(unsigned long) == sizeof(loff_t)) {
149                 if (vma->vm_pgoff & PGOFF_LOFFT_MAX)
150                         return -EINVAL;
151         }
152
153         /* must be huge page aligned */
154         if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT))
155                 return -EINVAL;
156
157         vma_len = (loff_t)(vma->vm_end - vma->vm_start);
158         len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
159         /* check for overflow */
160         if (len < vma_len)
161                 return -EINVAL;
162
163         inode_lock(inode);
164         file_accessed(file);
165
166         ret = -ENOMEM;
167         if (!hugetlb_reserve_pages(inode,
168                                 vma->vm_pgoff >> huge_page_order(h),
169                                 len >> huge_page_shift(h), vma,
170                                 vma->vm_flags))
171                 goto out;
172
173         ret = 0;
174         if (vma->vm_flags & VM_WRITE && inode->i_size < len)
175                 i_size_write(inode, len);
176 out:
177         inode_unlock(inode);
178
179         return ret;
180 }
181
182 /*
183  * Called under mmap_write_lock(mm).
184  */
185
186 static unsigned long
187 hugetlb_get_unmapped_area_bottomup(struct file *file, unsigned long addr,
188                 unsigned long len, unsigned long pgoff, unsigned long flags)
189 {
190         struct hstate *h = hstate_file(file);
191         struct vm_unmapped_area_info info;
192
193         info.flags = 0;
194         info.length = len;
195         info.low_limit = current->mm->mmap_base;
196         info.high_limit = arch_get_mmap_end(addr, len, flags);
197         info.align_mask = PAGE_MASK & ~huge_page_mask(h);
198         info.align_offset = 0;
199         return vm_unmapped_area(&info);
200 }
201
202 static unsigned long
203 hugetlb_get_unmapped_area_topdown(struct file *file, unsigned long addr,
204                 unsigned long len, unsigned long pgoff, unsigned long flags)
205 {
206         struct hstate *h = hstate_file(file);
207         struct vm_unmapped_area_info info;
208
209         info.flags = VM_UNMAPPED_AREA_TOPDOWN;
210         info.length = len;
211         info.low_limit = max(PAGE_SIZE, mmap_min_addr);
212         info.high_limit = arch_get_mmap_base(addr, current->mm->mmap_base);
213         info.align_mask = PAGE_MASK & ~huge_page_mask(h);
214         info.align_offset = 0;
215         addr = vm_unmapped_area(&info);
216
217         /*
218          * A failed mmap() very likely causes application failure,
219          * so fall back to the bottom-up function here. This scenario
220          * can happen with large stack limits and large mmap()
221          * allocations.
222          */
223         if (unlikely(offset_in_page(addr))) {
224                 VM_BUG_ON(addr != -ENOMEM);
225                 info.flags = 0;
226                 info.low_limit = current->mm->mmap_base;
227                 info.high_limit = arch_get_mmap_end(addr, len, flags);
228                 addr = vm_unmapped_area(&info);
229         }
230
231         return addr;
232 }
233
234 unsigned long
235 generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
236                                   unsigned long len, unsigned long pgoff,
237                                   unsigned long flags)
238 {
239         struct mm_struct *mm = current->mm;
240         struct vm_area_struct *vma;
241         struct hstate *h = hstate_file(file);
242         const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
243
244         if (len & ~huge_page_mask(h))
245                 return -EINVAL;
246         if (len > TASK_SIZE)
247                 return -ENOMEM;
248
249         if (flags & MAP_FIXED) {
250                 if (prepare_hugepage_range(file, addr, len))
251                         return -EINVAL;
252                 return addr;
253         }
254
255         if (addr) {
256                 addr = ALIGN(addr, huge_page_size(h));
257                 vma = find_vma(mm, addr);
258                 if (mmap_end - len >= addr &&
259                     (!vma || addr + len <= vm_start_gap(vma)))
260                         return addr;
261         }
262
263         /*
264          * Use mm->get_unmapped_area value as a hint to use topdown routine.
265          * If architectures have special needs, they should define their own
266          * version of hugetlb_get_unmapped_area.
267          */
268         if (mm->get_unmapped_area == arch_get_unmapped_area_topdown)
269                 return hugetlb_get_unmapped_area_topdown(file, addr, len,
270                                 pgoff, flags);
271         return hugetlb_get_unmapped_area_bottomup(file, addr, len,
272                         pgoff, flags);
273 }
274
275 #ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
276 static unsigned long
277 hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
278                           unsigned long len, unsigned long pgoff,
279                           unsigned long flags)
280 {
281         return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags);
282 }
283 #endif
284
285 /*
286  * Support for read() - Find the page attached to f_mapping and copy out the
287  * data. This provides functionality similar to filemap_read().
288  */
289 static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
290 {
291         struct file *file = iocb->ki_filp;
292         struct hstate *h = hstate_file(file);
293         struct address_space *mapping = file->f_mapping;
294         struct inode *inode = mapping->host;
295         unsigned long index = iocb->ki_pos >> huge_page_shift(h);
296         unsigned long offset = iocb->ki_pos & ~huge_page_mask(h);
297         unsigned long end_index;
298         loff_t isize;
299         ssize_t retval = 0;
300
301         while (iov_iter_count(to)) {
302                 struct page *page;
303                 size_t nr, copied;
304
305                 /* nr is the maximum number of bytes to copy from this page */
306                 nr = huge_page_size(h);
307                 isize = i_size_read(inode);
308                 if (!isize)
309                         break;
310                 end_index = (isize - 1) >> huge_page_shift(h);
311                 if (index > end_index)
312                         break;
313                 if (index == end_index) {
314                         nr = ((isize - 1) & ~huge_page_mask(h)) + 1;
315                         if (nr <= offset)
316                                 break;
317                 }
318                 nr = nr - offset;
319
320                 /* Find the page */
321                 page = find_lock_page(mapping, index);
322                 if (unlikely(page == NULL)) {
323                         /*
324                          * We have a HOLE, zero out the user-buffer for the
325                          * length of the hole or request.
326                          */
327                         copied = iov_iter_zero(nr, to);
328                 } else {
329                         unlock_page(page);
330
331                         /*
332                          * We have the page, copy it to user space buffer.
333                          */
334                         copied = copy_page_to_iter(page, offset, nr, to);
335                         put_page(page);
336                 }
337                 offset += copied;
338                 retval += copied;
339                 if (copied != nr && iov_iter_count(to)) {
340                         if (!retval)
341                                 retval = -EFAULT;
342                         break;
343                 }
344                 index += offset >> huge_page_shift(h);
345                 offset &= ~huge_page_mask(h);
346         }
347         iocb->ki_pos = ((loff_t)index << huge_page_shift(h)) + offset;
348         return retval;
349 }
350
351 static int hugetlbfs_write_begin(struct file *file,
352                         struct address_space *mapping,
353                         loff_t pos, unsigned len,
354                         struct page **pagep, void **fsdata)
355 {
356         return -EINVAL;
357 }
358
359 static int hugetlbfs_write_end(struct file *file, struct address_space *mapping,
360                         loff_t pos, unsigned len, unsigned copied,
361                         struct page *page, void *fsdata)
362 {
363         BUG();
364         return -EINVAL;
365 }
366
367 static void remove_huge_page(struct page *page)
368 {
369         ClearPageDirty(page);
370         ClearPageUptodate(page);
371         delete_from_page_cache(page);
372 }
373
374 static void
375 hugetlb_vmdelete_list(struct rb_root_cached *root, pgoff_t start, pgoff_t end,
376                       zap_flags_t zap_flags)
377 {
378         struct vm_area_struct *vma;
379
380         /*
381          * end == 0 indicates that the entire range after start should be
382          * unmapped.  Note, end is exclusive, whereas the interval tree takes
383          * an inclusive "last".
384          */
385         vma_interval_tree_foreach(vma, root, start, end ? end - 1 : ULONG_MAX) {
386                 unsigned long v_offset;
387                 unsigned long v_end;
388
389                 /*
390                  * Can the expression below overflow on 32-bit arches?
391                  * No, because the interval tree returns us only those vmas
392                  * which overlap the truncated area starting at pgoff,
393                  * and no vma on a 32-bit arch can span beyond the 4GB.
394                  */
395                 if (vma->vm_pgoff < start)
396                         v_offset = (start - vma->vm_pgoff) << PAGE_SHIFT;
397                 else
398                         v_offset = 0;
399
400                 if (!end)
401                         v_end = vma->vm_end;
402                 else {
403                         v_end = ((end - vma->vm_pgoff) << PAGE_SHIFT)
404                                                         + vma->vm_start;
405                         if (v_end > vma->vm_end)
406                                 v_end = vma->vm_end;
407                 }
408
409                 unmap_hugepage_range(vma, vma->vm_start + v_offset, v_end,
410                                      NULL, zap_flags);
411         }
412 }
413
414 /*
415  * remove_inode_hugepages handles two distinct cases: truncation and hole
416  * punch.  There are subtle differences in operation for each case.
417  *
418  * truncation is indicated by end of range being LLONG_MAX
419  *      In this case, we first scan the range and release found pages.
420  *      After releasing pages, hugetlb_unreserve_pages cleans up region/reserve
421  *      maps and global counts.  Page faults can not race with truncation
422  *      in this routine.  hugetlb_no_page() holds i_mmap_rwsem and prevents
423  *      page faults in the truncated range by checking i_size.  i_size is
424  *      modified while holding i_mmap_rwsem.
425  * hole punch is indicated if end is not LLONG_MAX
426  *      In the hole punch case we scan the range and release found pages.
427  *      Only when releasing a page is the associated region/reserve map
428  *      deleted.  The region/reserve map for ranges without associated
429  *      pages are not modified.  Page faults can race with hole punch.
430  *      This is indicated if we find a mapped page.
431  * Note: If the passed end of range value is beyond the end of file, but
432  * not LLONG_MAX this routine still performs a hole punch operation.
433  */
434 static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
435                                    loff_t lend)
436 {
437         struct hstate *h = hstate_inode(inode);
438         struct address_space *mapping = &inode->i_data;
439         const pgoff_t start = lstart >> huge_page_shift(h);
440         const pgoff_t end = lend >> huge_page_shift(h);
441         struct folio_batch fbatch;
442         pgoff_t next, index;
443         int i, freed = 0;
444         bool truncate_op = (lend == LLONG_MAX);
445
446         folio_batch_init(&fbatch);
447         next = start;
448         while (filemap_get_folios(mapping, &next, end - 1, &fbatch)) {
449                 for (i = 0; i < folio_batch_count(&fbatch); ++i) {
450                         struct folio *folio = fbatch.folios[i];
451                         u32 hash = 0;
452
453                         index = folio->index;
454                         if (!truncate_op) {
455                                 /*
456                                  * Only need to hold the fault mutex in the
457                                  * hole punch case.  This prevents races with
458                                  * page faults.  Races are not possible in the
459                                  * case of truncation.
460                                  */
461                                 hash = hugetlb_fault_mutex_hash(mapping, index);
462                                 mutex_lock(&hugetlb_fault_mutex_table[hash]);
463                         }
464
465                         /*
466                          * If folio is mapped, it was faulted in after being
467                          * unmapped in caller.  Unmap (again) now after taking
468                          * the fault mutex.  The mutex will prevent faults
469                          * until we finish removing the folio.
470                          *
471                          * This race can only happen in the hole punch case.
472                          * Getting here in a truncate operation is a bug.
473                          */
474                         if (unlikely(folio_mapped(folio))) {
475                                 BUG_ON(truncate_op);
476
477                                 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
478                                 i_mmap_lock_write(mapping);
479                                 mutex_lock(&hugetlb_fault_mutex_table[hash]);
480                                 hugetlb_vmdelete_list(&mapping->i_mmap,
481                                         index * pages_per_huge_page(h),
482                                         (index + 1) * pages_per_huge_page(h),
483                                         ZAP_FLAG_DROP_MARKER);
484                                 i_mmap_unlock_write(mapping);
485                         }
486
487                         folio_lock(folio);
488                         /*
489                          * We must free the huge page and remove from page
490                          * cache (remove_huge_page) BEFORE removing the
491                          * region/reserve map (hugetlb_unreserve_pages).  In
492                          * rare out of memory conditions, removal of the
493                          * region/reserve map could fail. Correspondingly,
494                          * the subpool and global reserve usage count can need
495                          * to be adjusted.
496                          */
497                         VM_BUG_ON(HPageRestoreReserve(&folio->page));
498                         remove_huge_page(&folio->page);
499                         freed++;
500                         if (!truncate_op) {
501                                 if (unlikely(hugetlb_unreserve_pages(inode,
502                                                         index, index + 1, 1)))
503                                         hugetlb_fix_reserve_counts(inode);
504                         }
505
506                         folio_unlock(folio);
507                         if (!truncate_op)
508                                 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
509                 }
510                 folio_batch_release(&fbatch);
511                 cond_resched();
512         }
513
514         if (truncate_op)
515                 (void)hugetlb_unreserve_pages(inode, start, LONG_MAX, freed);
516 }
517
518 static void hugetlbfs_evict_inode(struct inode *inode)
519 {
520         struct resv_map *resv_map;
521
522         remove_inode_hugepages(inode, 0, LLONG_MAX);
523
524         /*
525          * Get the resv_map from the address space embedded in the inode.
526          * This is the address space which points to any resv_map allocated
527          * at inode creation time.  If this is a device special inode,
528          * i_mapping may not point to the original address space.
529          */
530         resv_map = (struct resv_map *)(&inode->i_data)->private_data;
531         /* Only regular and link inodes have associated reserve maps */
532         if (resv_map)
533                 resv_map_release(&resv_map->refs);
534         clear_inode(inode);
535 }
536
537 static void hugetlb_vmtruncate(struct inode *inode, loff_t offset)
538 {
539         pgoff_t pgoff;
540         struct address_space *mapping = inode->i_mapping;
541         struct hstate *h = hstate_inode(inode);
542
543         BUG_ON(offset & ~huge_page_mask(h));
544         pgoff = offset >> PAGE_SHIFT;
545
546         i_mmap_lock_write(mapping);
547         i_size_write(inode, offset);
548         if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
549                 hugetlb_vmdelete_list(&mapping->i_mmap, pgoff, 0,
550                                       ZAP_FLAG_DROP_MARKER);
551         i_mmap_unlock_write(mapping);
552         remove_inode_hugepages(inode, offset, LLONG_MAX);
553 }
554
555 static void hugetlbfs_zero_partial_page(struct hstate *h,
556                                         struct address_space *mapping,
557                                         loff_t start,
558                                         loff_t end)
559 {
560         pgoff_t idx = start >> huge_page_shift(h);
561         struct folio *folio;
562
563         folio = filemap_lock_folio(mapping, idx);
564         if (!folio)
565                 return;
566
567         start = start & ~huge_page_mask(h);
568         end = end & ~huge_page_mask(h);
569         if (!end)
570                 end = huge_page_size(h);
571
572         folio_zero_segment(folio, (size_t)start, (size_t)end);
573
574         folio_unlock(folio);
575         folio_put(folio);
576 }
577
578 static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
579 {
580         struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
581         struct address_space *mapping = inode->i_mapping;
582         struct hstate *h = hstate_inode(inode);
583         loff_t hpage_size = huge_page_size(h);
584         loff_t hole_start, hole_end;
585
586         /*
587          * hole_start and hole_end indicate the full pages within the hole.
588          */
589         hole_start = round_up(offset, hpage_size);
590         hole_end = round_down(offset + len, hpage_size);
591
592         inode_lock(inode);
593
594         /* protected by i_rwsem */
595         if (info->seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
596                 inode_unlock(inode);
597                 return -EPERM;
598         }
599
600         i_mmap_lock_write(mapping);
601
602         /* If range starts before first full page, zero partial page. */
603         if (offset < hole_start)
604                 hugetlbfs_zero_partial_page(h, mapping,
605                                 offset, min(offset + len, hole_start));
606
607         /* Unmap users of full pages in the hole. */
608         if (hole_end > hole_start) {
609                 if (!RB_EMPTY_ROOT(&mapping->i_mmap.rb_root))
610                         hugetlb_vmdelete_list(&mapping->i_mmap,
611                                               hole_start >> PAGE_SHIFT,
612                                               hole_end >> PAGE_SHIFT, 0);
613         }
614
615         /* If range extends beyond last full page, zero partial page. */
616         if ((offset + len) > hole_end && (offset + len) > hole_start)
617                 hugetlbfs_zero_partial_page(h, mapping,
618                                 hole_end, offset + len);
619
620         i_mmap_unlock_write(mapping);
621
622         /* Remove full pages from the file. */
623         if (hole_end > hole_start)
624                 remove_inode_hugepages(inode, hole_start, hole_end);
625
626         inode_unlock(inode);
627
628         return 0;
629 }
630
631 static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
632                                 loff_t len)
633 {
634         struct inode *inode = file_inode(file);
635         struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
636         struct address_space *mapping = inode->i_mapping;
637         struct hstate *h = hstate_inode(inode);
638         struct vm_area_struct pseudo_vma;
639         struct mm_struct *mm = current->mm;
640         loff_t hpage_size = huge_page_size(h);
641         unsigned long hpage_shift = huge_page_shift(h);
642         pgoff_t start, index, end;
643         int error;
644         u32 hash;
645
646         if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
647                 return -EOPNOTSUPP;
648
649         if (mode & FALLOC_FL_PUNCH_HOLE)
650                 return hugetlbfs_punch_hole(inode, offset, len);
651
652         /*
653          * Default preallocate case.
654          * For this range, start is rounded down and end is rounded up
655          * as well as being converted to page offsets.
656          */
657         start = offset >> hpage_shift;
658         end = (offset + len + hpage_size - 1) >> hpage_shift;
659
660         inode_lock(inode);
661
662         /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
663         error = inode_newsize_ok(inode, offset + len);
664         if (error)
665                 goto out;
666
667         if ((info->seals & F_SEAL_GROW) && offset + len > inode->i_size) {
668                 error = -EPERM;
669                 goto out;
670         }
671
672         /*
673          * Initialize a pseudo vma as this is required by the huge page
674          * allocation routines.  If NUMA is configured, use page index
675          * as input to create an allocation policy.
676          */
677         vma_init(&pseudo_vma, mm);
678         pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
679         pseudo_vma.vm_file = file;
680
681         for (index = start; index < end; index++) {
682                 /*
683                  * This is supposed to be the vaddr where the page is being
684                  * faulted in, but we have no vaddr here.
685                  */
686                 struct page *page;
687                 unsigned long addr;
688
689                 cond_resched();
690
691                 /*
692                  * fallocate(2) manpage permits EINTR; we may have been
693                  * interrupted because we are using up too much memory.
694                  */
695                 if (signal_pending(current)) {
696                         error = -EINTR;
697                         break;
698                 }
699
700                 /* Set numa allocation policy based on index */
701                 hugetlb_set_vma_policy(&pseudo_vma, inode, index);
702
703                 /* addr is the offset within the file (zero based) */
704                 addr = index * hpage_size;
705
706                 /*
707                  * fault mutex taken here, protects against fault path
708                  * and hole punch.  inode_lock previously taken protects
709                  * against truncation.
710                  */
711                 hash = hugetlb_fault_mutex_hash(mapping, index);
712                 mutex_lock(&hugetlb_fault_mutex_table[hash]);
713
714                 /* See if already present in mapping to avoid alloc/free */
715                 page = find_get_page(mapping, index);
716                 if (page) {
717                         put_page(page);
718                         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
719                         hugetlb_drop_vma_policy(&pseudo_vma);
720                         continue;
721                 }
722
723                 /*
724                  * Allocate page without setting the avoid_reserve argument.
725                  * There certainly are no reserves associated with the
726                  * pseudo_vma.  However, there could be shared mappings with
727                  * reserves for the file at the inode level.  If we fallocate
728                  * pages in these areas, we need to consume the reserves
729                  * to keep reservation accounting consistent.
730                  */
731                 page = alloc_huge_page(&pseudo_vma, addr, 0);
732                 hugetlb_drop_vma_policy(&pseudo_vma);
733                 if (IS_ERR(page)) {
734                         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
735                         error = PTR_ERR(page);
736                         goto out;
737                 }
738                 clear_huge_page(page, addr, pages_per_huge_page(h));
739                 __SetPageUptodate(page);
740                 error = huge_add_to_page_cache(page, mapping, index);
741                 if (unlikely(error)) {
742                         restore_reserve_on_error(h, &pseudo_vma, addr, page);
743                         put_page(page);
744                         mutex_unlock(&hugetlb_fault_mutex_table[hash]);
745                         goto out;
746                 }
747
748                 mutex_unlock(&hugetlb_fault_mutex_table[hash]);
749
750                 SetHPageMigratable(page);
751                 /*
752                  * unlock_page because locked by huge_add_to_page_cache()
753                  * put_page() due to reference from alloc_huge_page()
754                  */
755                 unlock_page(page);
756                 put_page(page);
757         }
758
759         if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
760                 i_size_write(inode, offset + len);
761         inode->i_ctime = current_time(inode);
762 out:
763         inode_unlock(inode);
764         return error;
765 }
766
767 static int hugetlbfs_setattr(struct user_namespace *mnt_userns,
768                              struct dentry *dentry, struct iattr *attr)
769 {
770         struct inode *inode = d_inode(dentry);
771         struct hstate *h = hstate_inode(inode);
772         int error;
773         unsigned int ia_valid = attr->ia_valid;
774         struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
775
776         error = setattr_prepare(&init_user_ns, dentry, attr);
777         if (error)
778                 return error;
779
780         if (ia_valid & ATTR_SIZE) {
781                 loff_t oldsize = inode->i_size;
782                 loff_t newsize = attr->ia_size;
783
784                 if (newsize & ~huge_page_mask(h))
785                         return -EINVAL;
786                 /* protected by i_rwsem */
787                 if ((newsize < oldsize && (info->seals & F_SEAL_SHRINK)) ||
788                     (newsize > oldsize && (info->seals & F_SEAL_GROW)))
789                         return -EPERM;
790                 hugetlb_vmtruncate(inode, newsize);
791         }
792
793         setattr_copy(&init_user_ns, inode, attr);
794         mark_inode_dirty(inode);
795         return 0;
796 }
797
798 static struct inode *hugetlbfs_get_root(struct super_block *sb,
799                                         struct hugetlbfs_fs_context *ctx)
800 {
801         struct inode *inode;
802
803         inode = new_inode(sb);
804         if (inode) {
805                 inode->i_ino = get_next_ino();
806                 inode->i_mode = S_IFDIR | ctx->mode;
807                 inode->i_uid = ctx->uid;
808                 inode->i_gid = ctx->gid;
809                 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
810                 inode->i_op = &hugetlbfs_dir_inode_operations;
811                 inode->i_fop = &simple_dir_operations;
812                 /* directory inodes start off with i_nlink == 2 (for "." entry) */
813                 inc_nlink(inode);
814                 lockdep_annotate_inode_mutex_key(inode);
815         }
816         return inode;
817 }
818
819 /*
820  * Hugetlbfs is not reclaimable; therefore its i_mmap_rwsem will never
821  * be taken from reclaim -- unlike regular filesystems. This needs an
822  * annotation because huge_pmd_share() does an allocation under hugetlb's
823  * i_mmap_rwsem.
824  */
825 static struct lock_class_key hugetlbfs_i_mmap_rwsem_key;
826
827 static struct inode *hugetlbfs_get_inode(struct super_block *sb,
828                                         struct inode *dir,
829                                         umode_t mode, dev_t dev)
830 {
831         struct inode *inode;
832         struct resv_map *resv_map = NULL;
833
834         /*
835          * Reserve maps are only needed for inodes that can have associated
836          * page allocations.
837          */
838         if (S_ISREG(mode) || S_ISLNK(mode)) {
839                 resv_map = resv_map_alloc();
840                 if (!resv_map)
841                         return NULL;
842         }
843
844         inode = new_inode(sb);
845         if (inode) {
846                 struct hugetlbfs_inode_info *info = HUGETLBFS_I(inode);
847
848                 inode->i_ino = get_next_ino();
849                 inode_init_owner(&init_user_ns, inode, dir, mode);
850                 lockdep_set_class(&inode->i_mapping->i_mmap_rwsem,
851                                 &hugetlbfs_i_mmap_rwsem_key);
852                 inode->i_mapping->a_ops = &hugetlbfs_aops;
853                 inode->i_atime = inode->i_mtime = inode->i_ctime = current_time(inode);
854                 inode->i_mapping->private_data = resv_map;
855                 info->seals = F_SEAL_SEAL;
856                 switch (mode & S_IFMT) {
857                 default:
858                         init_special_inode(inode, mode, dev);
859                         break;
860                 case S_IFREG:
861                         inode->i_op = &hugetlbfs_inode_operations;
862                         inode->i_fop = &hugetlbfs_file_operations;
863                         break;
864                 case S_IFDIR:
865                         inode->i_op = &hugetlbfs_dir_inode_operations;
866                         inode->i_fop = &simple_dir_operations;
867
868                         /* directory inodes start off with i_nlink == 2 (for "." entry) */
869                         inc_nlink(inode);
870                         break;
871                 case S_IFLNK:
872                         inode->i_op = &page_symlink_inode_operations;
873                         inode_nohighmem(inode);
874                         break;
875                 }
876                 lockdep_annotate_inode_mutex_key(inode);
877         } else {
878                 if (resv_map)
879                         kref_put(&resv_map->refs, resv_map_release);
880         }
881
882         return inode;
883 }
884
885 /*
886  * File creation. Allocate an inode, and we're done..
887  */
888 static int do_hugetlbfs_mknod(struct inode *dir,
889                         struct dentry *dentry,
890                         umode_t mode,
891                         dev_t dev,
892                         bool tmpfile)
893 {
894         struct inode *inode;
895         int error = -ENOSPC;
896
897         inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev);
898         if (inode) {
899                 dir->i_ctime = dir->i_mtime = current_time(dir);
900                 if (tmpfile) {
901                         d_tmpfile(dentry, inode);
902                 } else {
903                         d_instantiate(dentry, inode);
904                         dget(dentry);/* Extra count - pin the dentry in core */
905                 }
906                 error = 0;
907         }
908         return error;
909 }
910
911 static int hugetlbfs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
912                            struct dentry *dentry, umode_t mode, dev_t dev)
913 {
914         return do_hugetlbfs_mknod(dir, dentry, mode, dev, false);
915 }
916
917 static int hugetlbfs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
918                            struct dentry *dentry, umode_t mode)
919 {
920         int retval = hugetlbfs_mknod(&init_user_ns, dir, dentry,
921                                      mode | S_IFDIR, 0);
922         if (!retval)
923                 inc_nlink(dir);
924         return retval;
925 }
926
927 static int hugetlbfs_create(struct user_namespace *mnt_userns,
928                             struct inode *dir, struct dentry *dentry,
929                             umode_t mode, bool excl)
930 {
931         return hugetlbfs_mknod(&init_user_ns, dir, dentry, mode | S_IFREG, 0);
932 }
933
934 static int hugetlbfs_tmpfile(struct user_namespace *mnt_userns,
935                              struct inode *dir, struct dentry *dentry,
936                              umode_t mode)
937 {
938         return do_hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0, true);
939 }
940
941 static int hugetlbfs_symlink(struct user_namespace *mnt_userns,
942                              struct inode *dir, struct dentry *dentry,
943                              const char *symname)
944 {
945         struct inode *inode;
946         int error = -ENOSPC;
947
948         inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0);
949         if (inode) {
950                 int l = strlen(symname)+1;
951                 error = page_symlink(inode, symname, l);
952                 if (!error) {
953                         d_instantiate(dentry, inode);
954                         dget(dentry);
955                 } else
956                         iput(inode);
957         }
958         dir->i_ctime = dir->i_mtime = current_time(dir);
959
960         return error;
961 }
962
963 #ifdef CONFIG_MIGRATION
964 static int hugetlbfs_migrate_folio(struct address_space *mapping,
965                                 struct folio *dst, struct folio *src,
966                                 enum migrate_mode mode)
967 {
968         int rc;
969
970         rc = migrate_huge_page_move_mapping(mapping, dst, src);
971         if (rc != MIGRATEPAGE_SUCCESS)
972                 return rc;
973
974         if (hugetlb_page_subpool(&src->page)) {
975                 hugetlb_set_page_subpool(&dst->page,
976                                         hugetlb_page_subpool(&src->page));
977                 hugetlb_set_page_subpool(&src->page, NULL);
978         }
979
980         if (mode != MIGRATE_SYNC_NO_COPY)
981                 folio_migrate_copy(dst, src);
982         else
983                 folio_migrate_flags(dst, src);
984
985         return MIGRATEPAGE_SUCCESS;
986 }
987 #else
988 #define hugetlbfs_migrate_folio NULL
989 #endif
990
991 static int hugetlbfs_error_remove_page(struct address_space *mapping,
992                                 struct page *page)
993 {
994         struct inode *inode = mapping->host;
995         pgoff_t index = page->index;
996
997         remove_huge_page(page);
998         if (unlikely(hugetlb_unreserve_pages(inode, index, index + 1, 1)))
999                 hugetlb_fix_reserve_counts(inode);
1000
1001         return 0;
1002 }
1003
1004 /*
1005  * Display the mount options in /proc/mounts.
1006  */
1007 static int hugetlbfs_show_options(struct seq_file *m, struct dentry *root)
1008 {
1009         struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(root->d_sb);
1010         struct hugepage_subpool *spool = sbinfo->spool;
1011         unsigned long hpage_size = huge_page_size(sbinfo->hstate);
1012         unsigned hpage_shift = huge_page_shift(sbinfo->hstate);
1013         char mod;
1014
1015         if (!uid_eq(sbinfo->uid, GLOBAL_ROOT_UID))
1016                 seq_printf(m, ",uid=%u",
1017                            from_kuid_munged(&init_user_ns, sbinfo->uid));
1018         if (!gid_eq(sbinfo->gid, GLOBAL_ROOT_GID))
1019                 seq_printf(m, ",gid=%u",
1020                            from_kgid_munged(&init_user_ns, sbinfo->gid));
1021         if (sbinfo->mode != 0755)
1022                 seq_printf(m, ",mode=%o", sbinfo->mode);
1023         if (sbinfo->max_inodes != -1)
1024                 seq_printf(m, ",nr_inodes=%lu", sbinfo->max_inodes);
1025
1026         hpage_size /= 1024;
1027         mod = 'K';
1028         if (hpage_size >= 1024) {
1029                 hpage_size /= 1024;
1030                 mod = 'M';
1031         }
1032         seq_printf(m, ",pagesize=%lu%c", hpage_size, mod);
1033         if (spool) {
1034                 if (spool->max_hpages != -1)
1035                         seq_printf(m, ",size=%llu",
1036                                    (unsigned long long)spool->max_hpages << hpage_shift);
1037                 if (spool->min_hpages != -1)
1038                         seq_printf(m, ",min_size=%llu",
1039                                    (unsigned long long)spool->min_hpages << hpage_shift);
1040         }
1041         return 0;
1042 }
1043
1044 static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1045 {
1046         struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb);
1047         struct hstate *h = hstate_inode(d_inode(dentry));
1048
1049         buf->f_type = HUGETLBFS_MAGIC;
1050         buf->f_bsize = huge_page_size(h);
1051         if (sbinfo) {
1052                 spin_lock(&sbinfo->stat_lock);
1053                 /* If no limits set, just report 0 or -1 for max/free/used
1054                  * blocks, like simple_statfs() */
1055                 if (sbinfo->spool) {
1056                         long free_pages;
1057
1058                         spin_lock_irq(&sbinfo->spool->lock);
1059                         buf->f_blocks = sbinfo->spool->max_hpages;
1060                         free_pages = sbinfo->spool->max_hpages
1061                                 - sbinfo->spool->used_hpages;
1062                         buf->f_bavail = buf->f_bfree = free_pages;
1063                         spin_unlock_irq(&sbinfo->spool->lock);
1064                         buf->f_files = sbinfo->max_inodes;
1065                         buf->f_ffree = sbinfo->free_inodes;
1066                 }
1067                 spin_unlock(&sbinfo->stat_lock);
1068         }
1069         buf->f_namelen = NAME_MAX;
1070         return 0;
1071 }
1072
1073 static void hugetlbfs_put_super(struct super_block *sb)
1074 {
1075         struct hugetlbfs_sb_info *sbi = HUGETLBFS_SB(sb);
1076
1077         if (sbi) {
1078                 sb->s_fs_info = NULL;
1079
1080                 if (sbi->spool)
1081                         hugepage_put_subpool(sbi->spool);
1082
1083                 kfree(sbi);
1084         }
1085 }
1086
1087 static inline int hugetlbfs_dec_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1088 {
1089         if (sbinfo->free_inodes >= 0) {
1090                 spin_lock(&sbinfo->stat_lock);
1091                 if (unlikely(!sbinfo->free_inodes)) {
1092                         spin_unlock(&sbinfo->stat_lock);
1093                         return 0;
1094                 }
1095                 sbinfo->free_inodes--;
1096                 spin_unlock(&sbinfo->stat_lock);
1097         }
1098
1099         return 1;
1100 }
1101
1102 static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo)
1103 {
1104         if (sbinfo->free_inodes >= 0) {
1105                 spin_lock(&sbinfo->stat_lock);
1106                 sbinfo->free_inodes++;
1107                 spin_unlock(&sbinfo->stat_lock);
1108         }
1109 }
1110
1111
1112 static struct kmem_cache *hugetlbfs_inode_cachep;
1113
1114 static struct inode *hugetlbfs_alloc_inode(struct super_block *sb)
1115 {
1116         struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb);
1117         struct hugetlbfs_inode_info *p;
1118
1119         if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo)))
1120                 return NULL;
1121         p = alloc_inode_sb(sb, hugetlbfs_inode_cachep, GFP_KERNEL);
1122         if (unlikely(!p)) {
1123                 hugetlbfs_inc_free_inodes(sbinfo);
1124                 return NULL;
1125         }
1126
1127         /*
1128          * Any time after allocation, hugetlbfs_destroy_inode can be called
1129          * for the inode.  mpol_free_shared_policy is unconditionally called
1130          * as part of hugetlbfs_destroy_inode.  So, initialize policy here
1131          * in case of a quick call to destroy.
1132          *
1133          * Note that the policy is initialized even if we are creating a
1134          * private inode.  This simplifies hugetlbfs_destroy_inode.
1135          */
1136         mpol_shared_policy_init(&p->policy, NULL);
1137
1138         return &p->vfs_inode;
1139 }
1140
1141 static void hugetlbfs_free_inode(struct inode *inode)
1142 {
1143         kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode));
1144 }
1145
1146 static void hugetlbfs_destroy_inode(struct inode *inode)
1147 {
1148         hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb));
1149         mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy);
1150 }
1151
1152 static const struct address_space_operations hugetlbfs_aops = {
1153         .write_begin    = hugetlbfs_write_begin,
1154         .write_end      = hugetlbfs_write_end,
1155         .dirty_folio    = noop_dirty_folio,
1156         .migrate_folio  = hugetlbfs_migrate_folio,
1157         .error_remove_page      = hugetlbfs_error_remove_page,
1158 };
1159
1160
1161 static void init_once(void *foo)
1162 {
1163         struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo;
1164
1165         inode_init_once(&ei->vfs_inode);
1166 }
1167
1168 const struct file_operations hugetlbfs_file_operations = {
1169         .read_iter              = hugetlbfs_read_iter,
1170         .mmap                   = hugetlbfs_file_mmap,
1171         .fsync                  = noop_fsync,
1172         .get_unmapped_area      = hugetlb_get_unmapped_area,
1173         .llseek                 = default_llseek,
1174         .fallocate              = hugetlbfs_fallocate,
1175 };
1176
1177 static const struct inode_operations hugetlbfs_dir_inode_operations = {
1178         .create         = hugetlbfs_create,
1179         .lookup         = simple_lookup,
1180         .link           = simple_link,
1181         .unlink         = simple_unlink,
1182         .symlink        = hugetlbfs_symlink,
1183         .mkdir          = hugetlbfs_mkdir,
1184         .rmdir          = simple_rmdir,
1185         .mknod          = hugetlbfs_mknod,
1186         .rename         = simple_rename,
1187         .setattr        = hugetlbfs_setattr,
1188         .tmpfile        = hugetlbfs_tmpfile,
1189 };
1190
1191 static const struct inode_operations hugetlbfs_inode_operations = {
1192         .setattr        = hugetlbfs_setattr,
1193 };
1194
1195 static const struct super_operations hugetlbfs_ops = {
1196         .alloc_inode    = hugetlbfs_alloc_inode,
1197         .free_inode     = hugetlbfs_free_inode,
1198         .destroy_inode  = hugetlbfs_destroy_inode,
1199         .evict_inode    = hugetlbfs_evict_inode,
1200         .statfs         = hugetlbfs_statfs,
1201         .put_super      = hugetlbfs_put_super,
1202         .show_options   = hugetlbfs_show_options,
1203 };
1204
1205 /*
1206  * Convert size option passed from command line to number of huge pages
1207  * in the pool specified by hstate.  Size option could be in bytes
1208  * (val_type == SIZE_STD) or percentage of the pool (val_type == SIZE_PERCENT).
1209  */
1210 static long
1211 hugetlbfs_size_to_hpages(struct hstate *h, unsigned long long size_opt,
1212                          enum hugetlbfs_size_type val_type)
1213 {
1214         if (val_type == NO_SIZE)
1215                 return -1;
1216
1217         if (val_type == SIZE_PERCENT) {
1218                 size_opt <<= huge_page_shift(h);
1219                 size_opt *= h->max_huge_pages;
1220                 do_div(size_opt, 100);
1221         }
1222
1223         size_opt >>= huge_page_shift(h);
1224         return size_opt;
1225 }
1226
1227 /*
1228  * Parse one mount parameter.
1229  */
1230 static int hugetlbfs_parse_param(struct fs_context *fc, struct fs_parameter *param)
1231 {
1232         struct hugetlbfs_fs_context *ctx = fc->fs_private;
1233         struct fs_parse_result result;
1234         char *rest;
1235         unsigned long ps;
1236         int opt;
1237
1238         opt = fs_parse(fc, hugetlb_fs_parameters, param, &result);
1239         if (opt < 0)
1240                 return opt;
1241
1242         switch (opt) {
1243         case Opt_uid:
1244                 ctx->uid = make_kuid(current_user_ns(), result.uint_32);
1245                 if (!uid_valid(ctx->uid))
1246                         goto bad_val;
1247                 return 0;
1248
1249         case Opt_gid:
1250                 ctx->gid = make_kgid(current_user_ns(), result.uint_32);
1251                 if (!gid_valid(ctx->gid))
1252                         goto bad_val;
1253                 return 0;
1254
1255         case Opt_mode:
1256                 ctx->mode = result.uint_32 & 01777U;
1257                 return 0;
1258
1259         case Opt_size:
1260                 /* memparse() will accept a K/M/G without a digit */
1261                 if (!isdigit(param->string[0]))
1262                         goto bad_val;
1263                 ctx->max_size_opt = memparse(param->string, &rest);
1264                 ctx->max_val_type = SIZE_STD;
1265                 if (*rest == '%')
1266                         ctx->max_val_type = SIZE_PERCENT;
1267                 return 0;
1268
1269         case Opt_nr_inodes:
1270                 /* memparse() will accept a K/M/G without a digit */
1271                 if (!isdigit(param->string[0]))
1272                         goto bad_val;
1273                 ctx->nr_inodes = memparse(param->string, &rest);
1274                 return 0;
1275
1276         case Opt_pagesize:
1277                 ps = memparse(param->string, &rest);
1278                 ctx->hstate = size_to_hstate(ps);
1279                 if (!ctx->hstate) {
1280                         pr_err("Unsupported page size %lu MB\n", ps / SZ_1M);
1281                         return -EINVAL;
1282                 }
1283                 return 0;
1284
1285         case Opt_min_size:
1286                 /* memparse() will accept a K/M/G without a digit */
1287                 if (!isdigit(param->string[0]))
1288                         goto bad_val;
1289                 ctx->min_size_opt = memparse(param->string, &rest);
1290                 ctx->min_val_type = SIZE_STD;
1291                 if (*rest == '%')
1292                         ctx->min_val_type = SIZE_PERCENT;
1293                 return 0;
1294
1295         default:
1296                 return -EINVAL;
1297         }
1298
1299 bad_val:
1300         return invalfc(fc, "Bad value '%s' for mount option '%s'\n",
1301                       param->string, param->key);
1302 }
1303
1304 /*
1305  * Validate the parsed options.
1306  */
1307 static int hugetlbfs_validate(struct fs_context *fc)
1308 {
1309         struct hugetlbfs_fs_context *ctx = fc->fs_private;
1310
1311         /*
1312          * Use huge page pool size (in hstate) to convert the size
1313          * options to number of huge pages.  If NO_SIZE, -1 is returned.
1314          */
1315         ctx->max_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1316                                                    ctx->max_size_opt,
1317                                                    ctx->max_val_type);
1318         ctx->min_hpages = hugetlbfs_size_to_hpages(ctx->hstate,
1319                                                    ctx->min_size_opt,
1320                                                    ctx->min_val_type);
1321
1322         /*
1323          * If max_size was specified, then min_size must be smaller
1324          */
1325         if (ctx->max_val_type > NO_SIZE &&
1326             ctx->min_hpages > ctx->max_hpages) {
1327                 pr_err("Minimum size can not be greater than maximum size\n");
1328                 return -EINVAL;
1329         }
1330
1331         return 0;
1332 }
1333
1334 static int
1335 hugetlbfs_fill_super(struct super_block *sb, struct fs_context *fc)
1336 {
1337         struct hugetlbfs_fs_context *ctx = fc->fs_private;
1338         struct hugetlbfs_sb_info *sbinfo;
1339
1340         sbinfo = kmalloc(sizeof(struct hugetlbfs_sb_info), GFP_KERNEL);
1341         if (!sbinfo)
1342                 return -ENOMEM;
1343         sb->s_fs_info = sbinfo;
1344         spin_lock_init(&sbinfo->stat_lock);
1345         sbinfo->hstate          = ctx->hstate;
1346         sbinfo->max_inodes      = ctx->nr_inodes;
1347         sbinfo->free_inodes     = ctx->nr_inodes;
1348         sbinfo->spool           = NULL;
1349         sbinfo->uid             = ctx->uid;
1350         sbinfo->gid             = ctx->gid;
1351         sbinfo->mode            = ctx->mode;
1352
1353         /*
1354          * Allocate and initialize subpool if maximum or minimum size is
1355          * specified.  Any needed reservations (for minimum size) are taken
1356          * when the subpool is created.
1357          */
1358         if (ctx->max_hpages != -1 || ctx->min_hpages != -1) {
1359                 sbinfo->spool = hugepage_new_subpool(ctx->hstate,
1360                                                      ctx->max_hpages,
1361                                                      ctx->min_hpages);
1362                 if (!sbinfo->spool)
1363                         goto out_free;
1364         }
1365         sb->s_maxbytes = MAX_LFS_FILESIZE;
1366         sb->s_blocksize = huge_page_size(ctx->hstate);
1367         sb->s_blocksize_bits = huge_page_shift(ctx->hstate);
1368         sb->s_magic = HUGETLBFS_MAGIC;
1369         sb->s_op = &hugetlbfs_ops;
1370         sb->s_time_gran = 1;
1371
1372         /*
1373          * Due to the special and limited functionality of hugetlbfs, it does
1374          * not work well as a stacking filesystem.
1375          */
1376         sb->s_stack_depth = FILESYSTEM_MAX_STACK_DEPTH;
1377         sb->s_root = d_make_root(hugetlbfs_get_root(sb, ctx));
1378         if (!sb->s_root)
1379                 goto out_free;
1380         return 0;
1381 out_free:
1382         kfree(sbinfo->spool);
1383         kfree(sbinfo);
1384         return -ENOMEM;
1385 }
1386
1387 static int hugetlbfs_get_tree(struct fs_context *fc)
1388 {
1389         int err = hugetlbfs_validate(fc);
1390         if (err)
1391                 return err;
1392         return get_tree_nodev(fc, hugetlbfs_fill_super);
1393 }
1394
1395 static void hugetlbfs_fs_context_free(struct fs_context *fc)
1396 {
1397         kfree(fc->fs_private);
1398 }
1399
1400 static const struct fs_context_operations hugetlbfs_fs_context_ops = {
1401         .free           = hugetlbfs_fs_context_free,
1402         .parse_param    = hugetlbfs_parse_param,
1403         .get_tree       = hugetlbfs_get_tree,
1404 };
1405
1406 static int hugetlbfs_init_fs_context(struct fs_context *fc)
1407 {
1408         struct hugetlbfs_fs_context *ctx;
1409
1410         ctx = kzalloc(sizeof(struct hugetlbfs_fs_context), GFP_KERNEL);
1411         if (!ctx)
1412                 return -ENOMEM;
1413
1414         ctx->max_hpages = -1; /* No limit on size by default */
1415         ctx->nr_inodes  = -1; /* No limit on number of inodes by default */
1416         ctx->uid        = current_fsuid();
1417         ctx->gid        = current_fsgid();
1418         ctx->mode       = 0755;
1419         ctx->hstate     = &default_hstate;
1420         ctx->min_hpages = -1; /* No default minimum size */
1421         ctx->max_val_type = NO_SIZE;
1422         ctx->min_val_type = NO_SIZE;
1423         fc->fs_private = ctx;
1424         fc->ops = &hugetlbfs_fs_context_ops;
1425         return 0;
1426 }
1427
1428 static struct file_system_type hugetlbfs_fs_type = {
1429         .name                   = "hugetlbfs",
1430         .init_fs_context        = hugetlbfs_init_fs_context,
1431         .parameters             = hugetlb_fs_parameters,
1432         .kill_sb                = kill_litter_super,
1433 };
1434
1435 static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
1436
1437 static int can_do_hugetlb_shm(void)
1438 {
1439         kgid_t shm_group;
1440         shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group);
1441         return capable(CAP_IPC_LOCK) || in_group_p(shm_group);
1442 }
1443
1444 static int get_hstate_idx(int page_size_log)
1445 {
1446         struct hstate *h = hstate_sizelog(page_size_log);
1447
1448         if (!h)
1449                 return -1;
1450         return hstate_index(h);
1451 }
1452
1453 /*
1454  * Note that size should be aligned to proper hugepage size in caller side,
1455  * otherwise hugetlb_reserve_pages reserves one less hugepages than intended.
1456  */
1457 struct file *hugetlb_file_setup(const char *name, size_t size,
1458                                 vm_flags_t acctflag, int creat_flags,
1459                                 int page_size_log)
1460 {
1461         struct inode *inode;
1462         struct vfsmount *mnt;
1463         int hstate_idx;
1464         struct file *file;
1465
1466         hstate_idx = get_hstate_idx(page_size_log);
1467         if (hstate_idx < 0)
1468                 return ERR_PTR(-ENODEV);
1469
1470         mnt = hugetlbfs_vfsmount[hstate_idx];
1471         if (!mnt)
1472                 return ERR_PTR(-ENOENT);
1473
1474         if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) {
1475                 struct ucounts *ucounts = current_ucounts();
1476
1477                 if (user_shm_lock(size, ucounts)) {
1478                         pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is obsolete\n",
1479                                 current->comm, current->pid);
1480                         user_shm_unlock(size, ucounts);
1481                 }
1482                 return ERR_PTR(-EPERM);
1483         }
1484
1485         file = ERR_PTR(-ENOSPC);
1486         inode = hugetlbfs_get_inode(mnt->mnt_sb, NULL, S_IFREG | S_IRWXUGO, 0);
1487         if (!inode)
1488                 goto out;
1489         if (creat_flags == HUGETLB_SHMFS_INODE)
1490                 inode->i_flags |= S_PRIVATE;
1491
1492         inode->i_size = size;
1493         clear_nlink(inode);
1494
1495         if (!hugetlb_reserve_pages(inode, 0,
1496                         size >> huge_page_shift(hstate_inode(inode)), NULL,
1497                         acctflag))
1498                 file = ERR_PTR(-ENOMEM);
1499         else
1500                 file = alloc_file_pseudo(inode, mnt, name, O_RDWR,
1501                                         &hugetlbfs_file_operations);
1502         if (!IS_ERR(file))
1503                 return file;
1504
1505         iput(inode);
1506 out:
1507         return file;
1508 }
1509
1510 static struct vfsmount *__init mount_one_hugetlbfs(struct hstate *h)
1511 {
1512         struct fs_context *fc;
1513         struct vfsmount *mnt;
1514
1515         fc = fs_context_for_mount(&hugetlbfs_fs_type, SB_KERNMOUNT);
1516         if (IS_ERR(fc)) {
1517                 mnt = ERR_CAST(fc);
1518         } else {
1519                 struct hugetlbfs_fs_context *ctx = fc->fs_private;
1520                 ctx->hstate = h;
1521                 mnt = fc_mount(fc);
1522                 put_fs_context(fc);
1523         }
1524         if (IS_ERR(mnt))
1525                 pr_err("Cannot mount internal hugetlbfs for page size %luK",
1526                        huge_page_size(h) / SZ_1K);
1527         return mnt;
1528 }
1529
1530 static int __init init_hugetlbfs_fs(void)
1531 {
1532         struct vfsmount *mnt;
1533         struct hstate *h;
1534         int error;
1535         int i;
1536
1537         if (!hugepages_supported()) {
1538                 pr_info("disabling because there are no supported hugepage sizes\n");
1539                 return -ENOTSUPP;
1540         }
1541
1542         error = -ENOMEM;
1543         hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache",
1544                                         sizeof(struct hugetlbfs_inode_info),
1545                                         0, SLAB_ACCOUNT, init_once);
1546         if (hugetlbfs_inode_cachep == NULL)
1547                 goto out;
1548
1549         error = register_filesystem(&hugetlbfs_fs_type);
1550         if (error)
1551                 goto out_free;
1552
1553         /* default hstate mount is required */
1554         mnt = mount_one_hugetlbfs(&default_hstate);
1555         if (IS_ERR(mnt)) {
1556                 error = PTR_ERR(mnt);
1557                 goto out_unreg;
1558         }
1559         hugetlbfs_vfsmount[default_hstate_idx] = mnt;
1560
1561         /* other hstates are optional */
1562         i = 0;
1563         for_each_hstate(h) {
1564                 if (i == default_hstate_idx) {
1565                         i++;
1566                         continue;
1567                 }
1568
1569                 mnt = mount_one_hugetlbfs(h);
1570                 if (IS_ERR(mnt))
1571                         hugetlbfs_vfsmount[i] = NULL;
1572                 else
1573                         hugetlbfs_vfsmount[i] = mnt;
1574                 i++;
1575         }
1576
1577         return 0;
1578
1579  out_unreg:
1580         (void)unregister_filesystem(&hugetlbfs_fs_type);
1581  out_free:
1582         kmem_cache_destroy(hugetlbfs_inode_cachep);
1583  out:
1584         return error;
1585 }
1586 fs_initcall(init_hugetlbfs_fs)