GNU Linux-libre 4.14.251-gnu1
[releases.git] / arch / tile / mm / hugetlbpage.c
1 /*
2  * Copyright 2010 Tilera Corporation. All Rights Reserved.
3  *
4  *   This program is free software; you can redistribute it and/or
5  *   modify it under the terms of the GNU General Public License
6  *   as published by the Free Software Foundation, version 2.
7  *
8  *   This program is distributed in the hope that it will be useful, but
9  *   WITHOUT ANY WARRANTY; without even the implied warranty of
10  *   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11  *   NON INFRINGEMENT.  See the GNU General Public License for
12  *   more details.
13  *
14  * TILE Huge TLB Page Support for Kernel.
15  * Taken from i386 hugetlb implementation:
16  * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
17  */
18
19 #include <linux/init.h>
20 #include <linux/fs.h>
21 #include <linux/mm.h>
22 #include <linux/sched/mm.h>
23 #include <linux/hugetlb.h>
24 #include <linux/pagemap.h>
25 #include <linux/slab.h>
26 #include <linux/err.h>
27 #include <linux/sysctl.h>
28 #include <linux/mman.h>
29 #include <asm/tlb.h>
30 #include <asm/tlbflush.h>
31 #include <asm/setup.h>
32
33 #ifdef CONFIG_HUGETLB_SUPER_PAGES
34
35 /*
36  * Provide an additional huge page size (in addition to the regular default
37  * huge page size) if no "hugepagesz" arguments are specified.
38  * Note that it must be smaller than the default huge page size so
39  * that it's possible to allocate them on demand from the buddy allocator.
40  * You can change this to 64K (on a 16K build), 256K, 1M, or 4M,
41  * or not define it at all.
42  */
43 #define ADDITIONAL_HUGE_SIZE (1024 * 1024UL)
44
45 /* "Extra" page-size multipliers, one per level of the page table. */
46 int huge_shift[HUGE_SHIFT_ENTRIES] = {
47 #ifdef ADDITIONAL_HUGE_SIZE
48 #define ADDITIONAL_HUGE_SHIFT __builtin_ctzl(ADDITIONAL_HUGE_SIZE / PAGE_SIZE)
49         [HUGE_SHIFT_PAGE] = ADDITIONAL_HUGE_SHIFT
50 #endif
51 };
52
53 #endif
54
55 pte_t *huge_pte_alloc(struct mm_struct *mm,
56                       unsigned long addr, unsigned long sz)
57 {
58         pgd_t *pgd;
59         pud_t *pud;
60
61         addr &= -sz;   /* Mask off any low bits in the address. */
62
63         pgd = pgd_offset(mm, addr);
64         pud = pud_alloc(mm, pgd, addr);
65
66 #ifdef CONFIG_HUGETLB_SUPER_PAGES
67         if (sz >= PGDIR_SIZE) {
68                 BUG_ON(sz != PGDIR_SIZE &&
69                        sz != PGDIR_SIZE << huge_shift[HUGE_SHIFT_PGDIR]);
70                 return (pte_t *)pud;
71         } else {
72                 pmd_t *pmd = pmd_alloc(mm, pud, addr);
73                 if (sz >= PMD_SIZE) {
74                         BUG_ON(sz != PMD_SIZE &&
75                                sz != (PMD_SIZE << huge_shift[HUGE_SHIFT_PMD]));
76                         return (pte_t *)pmd;
77                 }
78                 else {
79                         if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE])
80                                 panic("Unexpected page size %#lx\n", sz);
81                         return pte_alloc_map(mm, pmd, addr);
82                 }
83         }
84 #else
85         BUG_ON(sz != PMD_SIZE);
86         return (pte_t *) pmd_alloc(mm, pud, addr);
87 #endif
88 }
89
90 static pte_t *get_pte(pte_t *base, int index, int level)
91 {
92         pte_t *ptep = base + index;
93 #ifdef CONFIG_HUGETLB_SUPER_PAGES
94         if (!pte_present(*ptep) && huge_shift[level] != 0) {
95                 unsigned long mask = -1UL << huge_shift[level];
96                 pte_t *super_ptep = base + (index & mask);
97                 pte_t pte = *super_ptep;
98                 if (pte_present(pte) && pte_super(pte))
99                         ptep = super_ptep;
100         }
101 #endif
102         return ptep;
103 }
104
105 pte_t *huge_pte_offset(struct mm_struct *mm,
106                        unsigned long addr, unsigned long sz)
107 {
108         pgd_t *pgd;
109         pud_t *pud;
110         pmd_t *pmd;
111 #ifdef CONFIG_HUGETLB_SUPER_PAGES
112         pte_t *pte;
113 #endif
114
115         /* Get the top-level page table entry. */
116         pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0);
117
118         /* We don't have four levels. */
119         pud = pud_offset(pgd, addr);
120 #ifndef __PAGETABLE_PUD_FOLDED
121 # error support fourth page table level
122 #endif
123         if (!pud_present(*pud))
124                 return NULL;
125
126         /* Check for an L0 huge PTE, if we have three levels. */
127 #ifndef __PAGETABLE_PMD_FOLDED
128         if (pud_huge(*pud))
129                 return (pte_t *)pud;
130
131         pmd = (pmd_t *)get_pte((pte_t *)pud_page_vaddr(*pud),
132                                pmd_index(addr), 1);
133         if (!pmd_present(*pmd))
134                 return NULL;
135 #else
136         pmd = pmd_offset(pud, addr);
137 #endif
138
139         /* Check for an L1 huge PTE. */
140         if (pmd_huge(*pmd))
141                 return (pte_t *)pmd;
142
143 #ifdef CONFIG_HUGETLB_SUPER_PAGES
144         /* Check for an L2 huge PTE. */
145         pte = get_pte((pte_t *)pmd_page_vaddr(*pmd), pte_index(addr), 2);
146         if (!pte_present(*pte))
147                 return NULL;
148         if (pte_super(*pte))
149                 return pte;
150 #endif
151
152         return NULL;
153 }
154
155 int pmd_huge(pmd_t pmd)
156 {
157         return !!(pmd_val(pmd) & _PAGE_HUGE_PAGE);
158 }
159
160 int pud_huge(pud_t pud)
161 {
162         return !!(pud_val(pud) & _PAGE_HUGE_PAGE);
163 }
164
165 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
166 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
167                 unsigned long addr, unsigned long len,
168                 unsigned long pgoff, unsigned long flags)
169 {
170         struct hstate *h = hstate_file(file);
171         struct vm_unmapped_area_info info;
172
173         info.flags = 0;
174         info.length = len;
175         info.low_limit = TASK_UNMAPPED_BASE;
176         info.high_limit = TASK_SIZE;
177         info.align_mask = PAGE_MASK & ~huge_page_mask(h);
178         info.align_offset = 0;
179         return vm_unmapped_area(&info);
180 }
181
182 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
183                 unsigned long addr0, unsigned long len,
184                 unsigned long pgoff, unsigned long flags)
185 {
186         struct hstate *h = hstate_file(file);
187         struct vm_unmapped_area_info info;
188         unsigned long addr;
189
190         info.flags = VM_UNMAPPED_AREA_TOPDOWN;
191         info.length = len;
192         info.low_limit = PAGE_SIZE;
193         info.high_limit = current->mm->mmap_base;
194         info.align_mask = PAGE_MASK & ~huge_page_mask(h);
195         info.align_offset = 0;
196         addr = vm_unmapped_area(&info);
197
198         /*
199          * A failed mmap() very likely causes application failure,
200          * so fall back to the bottom-up function here. This scenario
201          * can happen with large stack limits and large mmap()
202          * allocations.
203          */
204         if (addr & ~PAGE_MASK) {
205                 VM_BUG_ON(addr != -ENOMEM);
206                 info.flags = 0;
207                 info.low_limit = TASK_UNMAPPED_BASE;
208                 info.high_limit = TASK_SIZE;
209                 addr = vm_unmapped_area(&info);
210         }
211
212         return addr;
213 }
214
215 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
216                 unsigned long len, unsigned long pgoff, unsigned long flags)
217 {
218         struct hstate *h = hstate_file(file);
219         struct mm_struct *mm = current->mm;
220         struct vm_area_struct *vma;
221
222         if (len & ~huge_page_mask(h))
223                 return -EINVAL;
224         if (len > TASK_SIZE)
225                 return -ENOMEM;
226
227         if (flags & MAP_FIXED) {
228                 if (prepare_hugepage_range(file, addr, len))
229                         return -EINVAL;
230                 return addr;
231         }
232
233         if (addr) {
234                 addr = ALIGN(addr, huge_page_size(h));
235                 vma = find_vma(mm, addr);
236                 if (TASK_SIZE - len >= addr &&
237                     (!vma || addr + len <= vm_start_gap(vma)))
238                         return addr;
239         }
240         if (current->mm->get_unmapped_area == arch_get_unmapped_area)
241                 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
242                                 pgoff, flags);
243         else
244                 return hugetlb_get_unmapped_area_topdown(file, addr, len,
245                                 pgoff, flags);
246 }
247 #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
248
249 #ifdef CONFIG_HUGETLB_SUPER_PAGES
250 static __init int __setup_hugepagesz(unsigned long ps)
251 {
252         int log_ps = __builtin_ctzl(ps);
253         int level, base_shift;
254
255         if ((1UL << log_ps) != ps || (log_ps & 1) != 0) {
256                 pr_warn("Not enabling %ld byte huge pages; must be a power of four\n",
257                         ps);
258                 return -EINVAL;
259         }
260
261         if (ps > 64*1024*1024*1024UL) {
262                 pr_warn("Not enabling %ld MB huge pages; largest legal value is 64 GB\n",
263                         ps >> 20);
264                 return -EINVAL;
265         } else if (ps >= PUD_SIZE) {
266                 static long hv_jpage_size;
267                 if (hv_jpage_size == 0)
268                         hv_jpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO);
269                 if (hv_jpage_size != PUD_SIZE) {
270                         pr_warn("Not enabling >= %ld MB huge pages: hypervisor reports size %ld\n",
271                                 PUD_SIZE >> 20, hv_jpage_size);
272                         return -EINVAL;
273                 }
274                 level = 0;
275                 base_shift = PUD_SHIFT;
276         } else if (ps >= PMD_SIZE) {
277                 level = 1;
278                 base_shift = PMD_SHIFT;
279         } else if (ps > PAGE_SIZE) {
280                 level = 2;
281                 base_shift = PAGE_SHIFT;
282         } else {
283                 pr_err("hugepagesz: huge page size %ld too small\n", ps);
284                 return -EINVAL;
285         }
286
287         if (log_ps != base_shift) {
288                 int shift_val = log_ps - base_shift;
289                 if (huge_shift[level] != 0) {
290                         int old_shift = base_shift + huge_shift[level];
291                         pr_warn("Not enabling %ld MB huge pages; already have size %ld MB\n",
292                                 ps >> 20, (1UL << old_shift) >> 20);
293                         return -EINVAL;
294                 }
295                 if (hv_set_pte_super_shift(level, shift_val) != 0) {
296                         pr_warn("Not enabling %ld MB huge pages; no hypervisor support\n",
297                                 ps >> 20);
298                         return -EINVAL;
299                 }
300                 printk(KERN_DEBUG "Enabled %ld MB huge pages\n", ps >> 20);
301                 huge_shift[level] = shift_val;
302         }
303
304         hugetlb_add_hstate(log_ps - PAGE_SHIFT);
305
306         return 0;
307 }
308
309 static bool saw_hugepagesz;
310
311 static __init int setup_hugepagesz(char *opt)
312 {
313         int rc;
314
315         if (!saw_hugepagesz) {
316                 saw_hugepagesz = true;
317                 memset(huge_shift, 0, sizeof(huge_shift));
318         }
319         rc = __setup_hugepagesz(memparse(opt, NULL));
320         if (rc)
321                 hugetlb_bad_size();
322         return rc;
323 }
324 __setup("hugepagesz=", setup_hugepagesz);
325
326 #ifdef ADDITIONAL_HUGE_SIZE
327 /*
328  * Provide an additional huge page size if no "hugepagesz" args are given.
329  * In that case, all the cores have properly set up their hv super_shift
330  * already, but we need to notify the hugetlb code to enable the
331  * new huge page size from the Linux point of view.
332  */
333 static __init int add_default_hugepagesz(void)
334 {
335         if (!saw_hugepagesz) {
336                 BUILD_BUG_ON(ADDITIONAL_HUGE_SIZE >= PMD_SIZE ||
337                              ADDITIONAL_HUGE_SIZE <= PAGE_SIZE);
338                 BUILD_BUG_ON((PAGE_SIZE << ADDITIONAL_HUGE_SHIFT) !=
339                              ADDITIONAL_HUGE_SIZE);
340                 BUILD_BUG_ON(ADDITIONAL_HUGE_SHIFT & 1);
341                 hugetlb_add_hstate(ADDITIONAL_HUGE_SHIFT);
342         }
343         return 0;
344 }
345 arch_initcall(add_default_hugepagesz);
346 #endif
347
348 #endif /* CONFIG_HUGETLB_SUPER_PAGES */