arm64: dts: qcom: sm8550: add TRNG node
[linux-modified.git] / arch / powerpc / mm / book3s64 / slice.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * address space "slices" (meta-segments) support
4  *
5  * Copyright (C) 2007 Benjamin Herrenschmidt, IBM Corporation.
6  *
7  * Based on hugetlb implementation
8  *
9  * Copyright (C) 2003 David Gibson, IBM Corporation.
10  */
11
12 #undef DEBUG
13
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/pagemap.h>
17 #include <linux/err.h>
18 #include <linux/spinlock.h>
19 #include <linux/export.h>
20 #include <linux/hugetlb.h>
21 #include <linux/sched/mm.h>
22 #include <linux/security.h>
23 #include <asm/mman.h>
24 #include <asm/mmu.h>
25 #include <asm/copro.h>
26 #include <asm/hugetlb.h>
27 #include <asm/mmu_context.h>
28
29 static DEFINE_SPINLOCK(slice_convert_lock);
30
31 #ifdef DEBUG
32 int _slice_debug = 1;
33
34 static void slice_print_mask(const char *label, const struct slice_mask *mask)
35 {
36         if (!_slice_debug)
37                 return;
38         pr_devel("%s low_slice: %*pbl\n", label,
39                         (int)SLICE_NUM_LOW, &mask->low_slices);
40         pr_devel("%s high_slice: %*pbl\n", label,
41                         (int)SLICE_NUM_HIGH, mask->high_slices);
42 }
43
44 #define slice_dbg(fmt...) do { if (_slice_debug) pr_devel(fmt); } while (0)
45
46 #else
47
48 static void slice_print_mask(const char *label, const struct slice_mask *mask) {}
49 #define slice_dbg(fmt...)
50
51 #endif
52
53 static inline notrace bool slice_addr_is_low(unsigned long addr)
54 {
55         u64 tmp = (u64)addr;
56
57         return tmp < SLICE_LOW_TOP;
58 }
59
60 static void slice_range_to_mask(unsigned long start, unsigned long len,
61                                 struct slice_mask *ret)
62 {
63         unsigned long end = start + len - 1;
64
65         ret->low_slices = 0;
66         if (SLICE_NUM_HIGH)
67                 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
68
69         if (slice_addr_is_low(start)) {
70                 unsigned long mend = min(end,
71                                          (unsigned long)(SLICE_LOW_TOP - 1));
72
73                 ret->low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
74                         - (1u << GET_LOW_SLICE_INDEX(start));
75         }
76
77         if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
78                 unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
79                 unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
80                 unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
81
82                 bitmap_set(ret->high_slices, start_index, count);
83         }
84 }
85
86 static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
87                               unsigned long len)
88 {
89         struct vm_area_struct *vma;
90
91         if ((mm_ctx_slb_addr_limit(&mm->context) - len) < addr)
92                 return 0;
93         vma = find_vma(mm, addr);
94         return (!vma || (addr + len) <= vm_start_gap(vma));
95 }
96
97 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
98 {
99         return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT,
100                                    1ul << SLICE_LOW_SHIFT);
101 }
102
103 static int slice_high_has_vma(struct mm_struct *mm, unsigned long slice)
104 {
105         unsigned long start = slice << SLICE_HIGH_SHIFT;
106         unsigned long end = start + (1ul << SLICE_HIGH_SHIFT);
107
108         /* Hack, so that each addresses is controlled by exactly one
109          * of the high or low area bitmaps, the first high area starts
110          * at 4GB, not 0 */
111         if (start == 0)
112                 start = (unsigned long)SLICE_LOW_TOP;
113
114         return !slice_area_is_free(mm, start, end - start);
115 }
116
117 static void slice_mask_for_free(struct mm_struct *mm, struct slice_mask *ret,
118                                 unsigned long high_limit)
119 {
120         unsigned long i;
121
122         ret->low_slices = 0;
123         if (SLICE_NUM_HIGH)
124                 bitmap_zero(ret->high_slices, SLICE_NUM_HIGH);
125
126         for (i = 0; i < SLICE_NUM_LOW; i++)
127                 if (!slice_low_has_vma(mm, i))
128                         ret->low_slices |= 1u << i;
129
130         if (slice_addr_is_low(high_limit - 1))
131                 return;
132
133         for (i = 0; i < GET_HIGH_SLICE_INDEX(high_limit); i++)
134                 if (!slice_high_has_vma(mm, i))
135                         __set_bit(i, ret->high_slices);
136 }
137
138 static bool slice_check_range_fits(struct mm_struct *mm,
139                            const struct slice_mask *available,
140                            unsigned long start, unsigned long len)
141 {
142         unsigned long end = start + len - 1;
143         u64 low_slices = 0;
144
145         if (slice_addr_is_low(start)) {
146                 unsigned long mend = min(end,
147                                          (unsigned long)(SLICE_LOW_TOP - 1));
148
149                 low_slices = (1u << (GET_LOW_SLICE_INDEX(mend) + 1))
150                                 - (1u << GET_LOW_SLICE_INDEX(start));
151         }
152         if ((low_slices & available->low_slices) != low_slices)
153                 return false;
154
155         if (SLICE_NUM_HIGH && !slice_addr_is_low(end)) {
156                 unsigned long start_index = GET_HIGH_SLICE_INDEX(start);
157                 unsigned long align_end = ALIGN(end, (1UL << SLICE_HIGH_SHIFT));
158                 unsigned long count = GET_HIGH_SLICE_INDEX(align_end) - start_index;
159                 unsigned long i;
160
161                 for (i = start_index; i < start_index + count; i++) {
162                         if (!test_bit(i, available->high_slices))
163                                 return false;
164                 }
165         }
166
167         return true;
168 }
169
170 static void slice_flush_segments(void *parm)
171 {
172 #ifdef CONFIG_PPC64
173         struct mm_struct *mm = parm;
174         unsigned long flags;
175
176         if (mm != current->active_mm)
177                 return;
178
179         copy_mm_to_paca(current->active_mm);
180
181         local_irq_save(flags);
182         slb_flush_and_restore_bolted();
183         local_irq_restore(flags);
184 #endif
185 }
186
187 static void slice_convert(struct mm_struct *mm,
188                                 const struct slice_mask *mask, int psize)
189 {
190         int index, mask_index;
191         /* Write the new slice psize bits */
192         unsigned char *hpsizes, *lpsizes;
193         struct slice_mask *psize_mask, *old_mask;
194         unsigned long i, flags;
195         int old_psize;
196
197         slice_dbg("slice_convert(mm=%p, psize=%d)\n", mm, psize);
198         slice_print_mask(" mask", mask);
199
200         psize_mask = slice_mask_for_size(&mm->context, psize);
201
202         /* We need to use a spinlock here to protect against
203          * concurrent 64k -> 4k demotion ...
204          */
205         spin_lock_irqsave(&slice_convert_lock, flags);
206
207         lpsizes = mm_ctx_low_slices(&mm->context);
208         for (i = 0; i < SLICE_NUM_LOW; i++) {
209                 if (!(mask->low_slices & (1u << i)))
210                         continue;
211
212                 mask_index = i & 0x1;
213                 index = i >> 1;
214
215                 /* Update the slice_mask */
216                 old_psize = (lpsizes[index] >> (mask_index * 4)) & 0xf;
217                 old_mask = slice_mask_for_size(&mm->context, old_psize);
218                 old_mask->low_slices &= ~(1u << i);
219                 psize_mask->low_slices |= 1u << i;
220
221                 /* Update the sizes array */
222                 lpsizes[index] = (lpsizes[index] & ~(0xf << (mask_index * 4))) |
223                                 (((unsigned long)psize) << (mask_index * 4));
224         }
225
226         hpsizes = mm_ctx_high_slices(&mm->context);
227         for (i = 0; i < GET_HIGH_SLICE_INDEX(mm_ctx_slb_addr_limit(&mm->context)); i++) {
228                 if (!test_bit(i, mask->high_slices))
229                         continue;
230
231                 mask_index = i & 0x1;
232                 index = i >> 1;
233
234                 /* Update the slice_mask */
235                 old_psize = (hpsizes[index] >> (mask_index * 4)) & 0xf;
236                 old_mask = slice_mask_for_size(&mm->context, old_psize);
237                 __clear_bit(i, old_mask->high_slices);
238                 __set_bit(i, psize_mask->high_slices);
239
240                 /* Update the sizes array */
241                 hpsizes[index] = (hpsizes[index] & ~(0xf << (mask_index * 4))) |
242                                 (((unsigned long)psize) << (mask_index * 4));
243         }
244
245         slice_dbg(" lsps=%lx, hsps=%lx\n",
246                   (unsigned long)mm_ctx_low_slices(&mm->context),
247                   (unsigned long)mm_ctx_high_slices(&mm->context));
248
249         spin_unlock_irqrestore(&slice_convert_lock, flags);
250
251         copro_flush_all_slbs(mm);
252 }
253
254 /*
255  * Compute which slice addr is part of;
256  * set *boundary_addr to the start or end boundary of that slice
257  * (depending on 'end' parameter);
258  * return boolean indicating if the slice is marked as available in the
259  * 'available' slice_mark.
260  */
261 static bool slice_scan_available(unsigned long addr,
262                                  const struct slice_mask *available,
263                                  int end, unsigned long *boundary_addr)
264 {
265         unsigned long slice;
266         if (slice_addr_is_low(addr)) {
267                 slice = GET_LOW_SLICE_INDEX(addr);
268                 *boundary_addr = (slice + end) << SLICE_LOW_SHIFT;
269                 return !!(available->low_slices & (1u << slice));
270         } else {
271                 slice = GET_HIGH_SLICE_INDEX(addr);
272                 *boundary_addr = (slice + end) ?
273                         ((slice + end) << SLICE_HIGH_SHIFT) : SLICE_LOW_TOP;
274                 return !!test_bit(slice, available->high_slices);
275         }
276 }
277
278 static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
279                                               unsigned long addr, unsigned long len,
280                                               const struct slice_mask *available,
281                                               int psize, unsigned long high_limit)
282 {
283         int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
284         unsigned long found, next_end;
285         struct vm_unmapped_area_info info;
286
287         info.flags = 0;
288         info.length = len;
289         info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
290         info.align_offset = 0;
291         /*
292          * Check till the allow max value for this mmap request
293          */
294         while (addr < high_limit) {
295                 info.low_limit = addr;
296                 if (!slice_scan_available(addr, available, 1, &addr))
297                         continue;
298
299  next_slice:
300                 /*
301                  * At this point [info.low_limit; addr) covers
302                  * available slices only and ends at a slice boundary.
303                  * Check if we need to reduce the range, or if we can
304                  * extend it to cover the next available slice.
305                  */
306                 if (addr >= high_limit)
307                         addr = high_limit;
308                 else if (slice_scan_available(addr, available, 1, &next_end)) {
309                         addr = next_end;
310                         goto next_slice;
311                 }
312                 info.high_limit = addr;
313
314                 found = vm_unmapped_area(&info);
315                 if (!(found & ~PAGE_MASK))
316                         return found;
317         }
318
319         return -ENOMEM;
320 }
321
322 static unsigned long slice_find_area_topdown(struct mm_struct *mm,
323                                              unsigned long addr, unsigned long len,
324                                              const struct slice_mask *available,
325                                              int psize, unsigned long high_limit)
326 {
327         int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
328         unsigned long found, prev;
329         struct vm_unmapped_area_info info;
330         unsigned long min_addr = max(PAGE_SIZE, mmap_min_addr);
331
332         info.flags = VM_UNMAPPED_AREA_TOPDOWN;
333         info.length = len;
334         info.align_mask = PAGE_MASK & ((1ul << pshift) - 1);
335         info.align_offset = 0;
336         /*
337          * If we are trying to allocate above DEFAULT_MAP_WINDOW
338          * Add the different to the mmap_base.
339          * Only for that request for which high_limit is above
340          * DEFAULT_MAP_WINDOW we should apply this.
341          */
342         if (high_limit > DEFAULT_MAP_WINDOW)
343                 addr += mm_ctx_slb_addr_limit(&mm->context) - DEFAULT_MAP_WINDOW;
344
345         while (addr > min_addr) {
346                 info.high_limit = addr;
347                 if (!slice_scan_available(addr - 1, available, 0, &addr))
348                         continue;
349
350  prev_slice:
351                 /*
352                  * At this point [addr; info.high_limit) covers
353                  * available slices only and starts at a slice boundary.
354                  * Check if we need to reduce the range, or if we can
355                  * extend it to cover the previous available slice.
356                  */
357                 if (addr < min_addr)
358                         addr = min_addr;
359                 else if (slice_scan_available(addr - 1, available, 0, &prev)) {
360                         addr = prev;
361                         goto prev_slice;
362                 }
363                 info.low_limit = addr;
364
365                 found = vm_unmapped_area(&info);
366                 if (!(found & ~PAGE_MASK))
367                         return found;
368         }
369
370         /*
371          * A failed mmap() very likely causes application failure,
372          * so fall back to the bottom-up function here. This scenario
373          * can happen with large stack limits and large mmap()
374          * allocations.
375          */
376         return slice_find_area_bottomup(mm, TASK_UNMAPPED_BASE, len, available, psize, high_limit);
377 }
378
379
380 static unsigned long slice_find_area(struct mm_struct *mm, unsigned long len,
381                                      const struct slice_mask *mask, int psize,
382                                      int topdown, unsigned long high_limit)
383 {
384         if (topdown)
385                 return slice_find_area_topdown(mm, mm->mmap_base, len, mask, psize, high_limit);
386         else
387                 return slice_find_area_bottomup(mm, mm->mmap_base, len, mask, psize, high_limit);
388 }
389
390 static inline void slice_copy_mask(struct slice_mask *dst,
391                                         const struct slice_mask *src)
392 {
393         dst->low_slices = src->low_slices;
394         if (!SLICE_NUM_HIGH)
395                 return;
396         bitmap_copy(dst->high_slices, src->high_slices, SLICE_NUM_HIGH);
397 }
398
399 static inline void slice_or_mask(struct slice_mask *dst,
400                                         const struct slice_mask *src1,
401                                         const struct slice_mask *src2)
402 {
403         dst->low_slices = src1->low_slices | src2->low_slices;
404         if (!SLICE_NUM_HIGH)
405                 return;
406         bitmap_or(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
407 }
408
409 static inline void slice_andnot_mask(struct slice_mask *dst,
410                                         const struct slice_mask *src1,
411                                         const struct slice_mask *src2)
412 {
413         dst->low_slices = src1->low_slices & ~src2->low_slices;
414         if (!SLICE_NUM_HIGH)
415                 return;
416         bitmap_andnot(dst->high_slices, src1->high_slices, src2->high_slices, SLICE_NUM_HIGH);
417 }
418
419 #ifdef CONFIG_PPC_64K_PAGES
420 #define MMU_PAGE_BASE   MMU_PAGE_64K
421 #else
422 #define MMU_PAGE_BASE   MMU_PAGE_4K
423 #endif
424
425 unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
426                                       unsigned long flags, unsigned int psize,
427                                       int topdown)
428 {
429         struct slice_mask good_mask;
430         struct slice_mask potential_mask;
431         const struct slice_mask *maskp;
432         const struct slice_mask *compat_maskp = NULL;
433         int fixed = (flags & MAP_FIXED);
434         int pshift = max_t(int, mmu_psize_defs[psize].shift, PAGE_SHIFT);
435         unsigned long page_size = 1UL << pshift;
436         struct mm_struct *mm = current->mm;
437         unsigned long newaddr;
438         unsigned long high_limit;
439
440         high_limit = DEFAULT_MAP_WINDOW;
441         if (addr >= high_limit || (fixed && (addr + len > high_limit)))
442                 high_limit = TASK_SIZE;
443
444         if (len > high_limit)
445                 return -ENOMEM;
446         if (len & (page_size - 1))
447                 return -EINVAL;
448         if (fixed) {
449                 if (addr & (page_size - 1))
450                         return -EINVAL;
451                 if (addr > high_limit - len)
452                         return -ENOMEM;
453         }
454
455         if (high_limit > mm_ctx_slb_addr_limit(&mm->context)) {
456                 /*
457                  * Increasing the slb_addr_limit does not require
458                  * slice mask cache to be recalculated because it should
459                  * be already initialised beyond the old address limit.
460                  */
461                 mm_ctx_set_slb_addr_limit(&mm->context, high_limit);
462
463                 on_each_cpu(slice_flush_segments, mm, 1);
464         }
465
466         /* Sanity checks */
467         BUG_ON(mm->task_size == 0);
468         BUG_ON(mm_ctx_slb_addr_limit(&mm->context) == 0);
469         VM_BUG_ON(radix_enabled());
470
471         slice_dbg("slice_get_unmapped_area(mm=%p, psize=%d...\n", mm, psize);
472         slice_dbg(" addr=%lx, len=%lx, flags=%lx, topdown=%d\n",
473                   addr, len, flags, topdown);
474
475         /* If hint, make sure it matches our alignment restrictions */
476         if (!fixed && addr) {
477                 addr = ALIGN(addr, page_size);
478                 slice_dbg(" aligned addr=%lx\n", addr);
479                 /* Ignore hint if it's too large or overlaps a VMA */
480                 if (addr > high_limit - len || addr < mmap_min_addr ||
481                     !slice_area_is_free(mm, addr, len))
482                         addr = 0;
483         }
484
485         /* First make up a "good" mask of slices that have the right size
486          * already
487          */
488         maskp = slice_mask_for_size(&mm->context, psize);
489
490         /*
491          * Here "good" means slices that are already the right page size,
492          * "compat" means slices that have a compatible page size (i.e.
493          * 4k in a 64k pagesize kernel), and "free" means slices without
494          * any VMAs.
495          *
496          * If MAP_FIXED:
497          *      check if fits in good | compat => OK
498          *      check if fits in good | compat | free => convert free
499          *      else bad
500          * If have hint:
501          *      check if hint fits in good => OK
502          *      check if hint fits in good | free => convert free
503          * Otherwise:
504          *      search in good, found => OK
505          *      search in good | free, found => convert free
506          *      search in good | compat | free, found => convert free.
507          */
508
509         /*
510          * If we support combo pages, we can allow 64k pages in 4k slices
511          * The mask copies could be avoided in most cases here if we had
512          * a pointer to good mask for the next code to use.
513          */
514         if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
515                 compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
516                 if (fixed)
517                         slice_or_mask(&good_mask, maskp, compat_maskp);
518                 else
519                         slice_copy_mask(&good_mask, maskp);
520         } else {
521                 slice_copy_mask(&good_mask, maskp);
522         }
523
524         slice_print_mask(" good_mask", &good_mask);
525         if (compat_maskp)
526                 slice_print_mask(" compat_mask", compat_maskp);
527
528         /* First check hint if it's valid or if we have MAP_FIXED */
529         if (addr != 0 || fixed) {
530                 /* Check if we fit in the good mask. If we do, we just return,
531                  * nothing else to do
532                  */
533                 if (slice_check_range_fits(mm, &good_mask, addr, len)) {
534                         slice_dbg(" fits good !\n");
535                         newaddr = addr;
536                         goto return_addr;
537                 }
538         } else {
539                 /* Now let's see if we can find something in the existing
540                  * slices for that size
541                  */
542                 newaddr = slice_find_area(mm, len, &good_mask,
543                                           psize, topdown, high_limit);
544                 if (newaddr != -ENOMEM) {
545                         /* Found within the good mask, we don't have to setup,
546                          * we thus return directly
547                          */
548                         slice_dbg(" found area at 0x%lx\n", newaddr);
549                         goto return_addr;
550                 }
551         }
552         /*
553          * We don't fit in the good mask, check what other slices are
554          * empty and thus can be converted
555          */
556         slice_mask_for_free(mm, &potential_mask, high_limit);
557         slice_or_mask(&potential_mask, &potential_mask, &good_mask);
558         slice_print_mask(" potential", &potential_mask);
559
560         if (addr != 0 || fixed) {
561                 if (slice_check_range_fits(mm, &potential_mask, addr, len)) {
562                         slice_dbg(" fits potential !\n");
563                         newaddr = addr;
564                         goto convert;
565                 }
566         }
567
568         /* If we have MAP_FIXED and failed the above steps, then error out */
569         if (fixed)
570                 return -EBUSY;
571
572         slice_dbg(" search...\n");
573
574         /* If we had a hint that didn't work out, see if we can fit
575          * anywhere in the good area.
576          */
577         if (addr) {
578                 newaddr = slice_find_area(mm, len, &good_mask,
579                                           psize, topdown, high_limit);
580                 if (newaddr != -ENOMEM) {
581                         slice_dbg(" found area at 0x%lx\n", newaddr);
582                         goto return_addr;
583                 }
584         }
585
586         /* Now let's see if we can find something in the existing slices
587          * for that size plus free slices
588          */
589         newaddr = slice_find_area(mm, len, &potential_mask,
590                                   psize, topdown, high_limit);
591
592         if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && newaddr == -ENOMEM &&
593             psize == MMU_PAGE_64K) {
594                 /* retry the search with 4k-page slices included */
595                 slice_or_mask(&potential_mask, &potential_mask, compat_maskp);
596                 newaddr = slice_find_area(mm, len, &potential_mask,
597                                           psize, topdown, high_limit);
598         }
599
600         if (newaddr == -ENOMEM)
601                 return -ENOMEM;
602
603         slice_range_to_mask(newaddr, len, &potential_mask);
604         slice_dbg(" found potential area at 0x%lx\n", newaddr);
605         slice_print_mask(" mask", &potential_mask);
606
607  convert:
608         /*
609          * Try to allocate the context before we do slice convert
610          * so that we handle the context allocation failure gracefully.
611          */
612         if (need_extra_context(mm, newaddr)) {
613                 if (alloc_extended_context(mm, newaddr) < 0)
614                         return -ENOMEM;
615         }
616
617         slice_andnot_mask(&potential_mask, &potential_mask, &good_mask);
618         if (compat_maskp && !fixed)
619                 slice_andnot_mask(&potential_mask, &potential_mask, compat_maskp);
620         if (potential_mask.low_slices ||
621                 (SLICE_NUM_HIGH &&
622                  !bitmap_empty(potential_mask.high_slices, SLICE_NUM_HIGH))) {
623                 slice_convert(mm, &potential_mask, psize);
624                 if (psize > MMU_PAGE_BASE)
625                         on_each_cpu(slice_flush_segments, mm, 1);
626         }
627         return newaddr;
628
629 return_addr:
630         if (need_extra_context(mm, newaddr)) {
631                 if (alloc_extended_context(mm, newaddr) < 0)
632                         return -ENOMEM;
633         }
634         return newaddr;
635 }
636 EXPORT_SYMBOL_GPL(slice_get_unmapped_area);
637
638 unsigned long arch_get_unmapped_area(struct file *filp,
639                                      unsigned long addr,
640                                      unsigned long len,
641                                      unsigned long pgoff,
642                                      unsigned long flags)
643 {
644         if (radix_enabled())
645                 return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
646
647         return slice_get_unmapped_area(addr, len, flags,
648                                        mm_ctx_user_psize(&current->mm->context), 0);
649 }
650
651 unsigned long arch_get_unmapped_area_topdown(struct file *filp,
652                                              const unsigned long addr0,
653                                              const unsigned long len,
654                                              const unsigned long pgoff,
655                                              const unsigned long flags)
656 {
657         if (radix_enabled())
658                 return generic_get_unmapped_area_topdown(filp, addr0, len, pgoff, flags);
659
660         return slice_get_unmapped_area(addr0, len, flags,
661                                        mm_ctx_user_psize(&current->mm->context), 1);
662 }
663
664 unsigned int notrace get_slice_psize(struct mm_struct *mm, unsigned long addr)
665 {
666         unsigned char *psizes;
667         int index, mask_index;
668
669         VM_BUG_ON(radix_enabled());
670
671         if (slice_addr_is_low(addr)) {
672                 psizes = mm_ctx_low_slices(&mm->context);
673                 index = GET_LOW_SLICE_INDEX(addr);
674         } else {
675                 psizes = mm_ctx_high_slices(&mm->context);
676                 index = GET_HIGH_SLICE_INDEX(addr);
677         }
678         mask_index = index & 0x1;
679         return (psizes[index >> 1] >> (mask_index * 4)) & 0xf;
680 }
681 EXPORT_SYMBOL_GPL(get_slice_psize);
682
683 void slice_init_new_context_exec(struct mm_struct *mm)
684 {
685         unsigned char *hpsizes, *lpsizes;
686         struct slice_mask *mask;
687         unsigned int psize = mmu_virtual_psize;
688
689         slice_dbg("slice_init_new_context_exec(mm=%p)\n", mm);
690
691         /*
692          * In the case of exec, use the default limit. In the
693          * case of fork it is just inherited from the mm being
694          * duplicated.
695          */
696         mm_ctx_set_slb_addr_limit(&mm->context, SLB_ADDR_LIMIT_DEFAULT);
697         mm_ctx_set_user_psize(&mm->context, psize);
698
699         /*
700          * Set all slice psizes to the default.
701          */
702         lpsizes = mm_ctx_low_slices(&mm->context);
703         memset(lpsizes, (psize << 4) | psize, SLICE_NUM_LOW >> 1);
704
705         hpsizes = mm_ctx_high_slices(&mm->context);
706         memset(hpsizes, (psize << 4) | psize, SLICE_NUM_HIGH >> 1);
707
708         /*
709          * Slice mask cache starts zeroed, fill the default size cache.
710          */
711         mask = slice_mask_for_size(&mm->context, psize);
712         mask->low_slices = ~0UL;
713         if (SLICE_NUM_HIGH)
714                 bitmap_fill(mask->high_slices, SLICE_NUM_HIGH);
715 }
716
717 void slice_setup_new_exec(void)
718 {
719         struct mm_struct *mm = current->mm;
720
721         slice_dbg("slice_setup_new_exec(mm=%p)\n", mm);
722
723         if (!is_32bit_task())
724                 return;
725
726         mm_ctx_set_slb_addr_limit(&mm->context, DEFAULT_MAP_WINDOW);
727 }
728
729 void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
730                            unsigned long len, unsigned int psize)
731 {
732         struct slice_mask mask;
733
734         VM_BUG_ON(radix_enabled());
735
736         slice_range_to_mask(start, len, &mask);
737         slice_convert(mm, &mask, psize);
738 }
739
740 #ifdef CONFIG_HUGETLB_PAGE
741 /*
742  * is_hugepage_only_range() is used by generic code to verify whether
743  * a normal mmap mapping (non hugetlbfs) is valid on a given area.
744  *
745  * until the generic code provides a more generic hook and/or starts
746  * calling arch get_unmapped_area for MAP_FIXED (which our implementation
747  * here knows how to deal with), we hijack it to keep standard mappings
748  * away from us.
749  *
750  * because of that generic code limitation, MAP_FIXED mapping cannot
751  * "convert" back a slice with no VMAs to the standard page size, only
752  * get_unmapped_area() can. It would be possible to fix it here but I
753  * prefer working on fixing the generic code instead.
754  *
755  * WARNING: This will not work if hugetlbfs isn't enabled since the
756  * generic code will redefine that function as 0 in that. This is ok
757  * for now as we only use slices with hugetlbfs enabled. This should
758  * be fixed as the generic code gets fixed.
759  */
760 int slice_is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
761                            unsigned long len)
762 {
763         const struct slice_mask *maskp;
764         unsigned int psize = mm_ctx_user_psize(&mm->context);
765
766         VM_BUG_ON(radix_enabled());
767
768         maskp = slice_mask_for_size(&mm->context, psize);
769
770         /* We need to account for 4k slices too */
771         if (IS_ENABLED(CONFIG_PPC_64K_PAGES) && psize == MMU_PAGE_64K) {
772                 const struct slice_mask *compat_maskp;
773                 struct slice_mask available;
774
775                 compat_maskp = slice_mask_for_size(&mm->context, MMU_PAGE_4K);
776                 slice_or_mask(&available, maskp, compat_maskp);
777                 return !slice_check_range_fits(mm, &available, addr, len);
778         }
779
780         return !slice_check_range_fits(mm, maskp, addr, len);
781 }
782
783 unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
784 {
785         /* With radix we don't use slice, so derive it from vma*/
786         if (radix_enabled())
787                 return vma_kernel_pagesize(vma);
788
789         return 1UL << mmu_psize_to_shift(get_slice_psize(vma->vm_mm, vma->vm_start));
790 }
791
792 static int file_to_psize(struct file *file)
793 {
794         struct hstate *hstate = hstate_file(file);
795         return shift_to_mmu_psize(huge_page_shift(hstate));
796 }
797
798 unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
799                                         unsigned long len, unsigned long pgoff,
800                                         unsigned long flags)
801 {
802         if (radix_enabled())
803                 return generic_hugetlb_get_unmapped_area(file, addr, len, pgoff, flags);
804
805         return slice_get_unmapped_area(addr, len, flags, file_to_psize(file), 1);
806 }
807 #endif