1 // SPDX-License-Identifier: GPL-2.0
3 * Page table allocation functions
5 * Copyright IBM Corp. 2016
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
9 #include <linux/sysctl.h>
10 #include <linux/slab.h>
12 #include <asm/mmu_context.h>
13 #include <asm/page-states.h>
14 #include <asm/pgalloc.h>
17 #include <asm/tlbflush.h>
21 int page_table_allocate_pgste = 0;
22 EXPORT_SYMBOL(page_table_allocate_pgste);
24 static struct ctl_table page_table_sysctl[] = {
26 .procname = "allocate_pgste",
27 .data = &page_table_allocate_pgste,
28 .maxlen = sizeof(int),
29 .mode = S_IRUGO | S_IWUSR,
30 .proc_handler = proc_dointvec_minmax,
31 .extra1 = SYSCTL_ZERO,
36 static int __init page_table_register_sysctl(void)
38 return register_sysctl("vm", page_table_sysctl) ? 0 : -ENOMEM;
40 __initcall(page_table_register_sysctl);
42 #endif /* CONFIG_PGSTE */
44 unsigned long *crst_table_alloc(struct mm_struct *mm)
46 struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
51 table = ptdesc_to_virt(ptdesc);
52 __arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
56 void crst_table_free(struct mm_struct *mm, unsigned long *table)
58 pagetable_free(virt_to_ptdesc(table));
61 static void __crst_table_upgrade(void *arg)
63 struct mm_struct *mm = arg;
65 /* change all active ASCEs to avoid the creation of new TLBs */
66 if (current->active_mm == mm) {
67 S390_lowcore.user_asce.val = mm->context.asce;
68 local_ctl_load(7, &S390_lowcore.user_asce);
73 int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
75 unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
76 unsigned long asce_limit = mm->context.asce_limit;
78 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
79 VM_BUG_ON(asce_limit < _REGION2_SIZE);
81 if (end <= asce_limit)
84 if (asce_limit == _REGION2_SIZE) {
85 p4d = crst_table_alloc(mm);
88 crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
90 if (end > _REGION1_SIZE) {
91 pgd = crst_table_alloc(mm);
94 crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
97 spin_lock_bh(&mm->page_table_lock);
100 * This routine gets called with mmap_lock lock held and there is
101 * no reason to optimize for the case of otherwise. However, if
102 * that would ever change, the below check will let us know.
104 VM_BUG_ON(asce_limit != mm->context.asce_limit);
107 __pgd = (unsigned long *) mm->pgd;
108 p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
109 mm->pgd = (pgd_t *) p4d;
110 mm->context.asce_limit = _REGION1_SIZE;
111 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
112 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
116 __pgd = (unsigned long *) mm->pgd;
117 pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd);
118 mm->pgd = (pgd_t *) pgd;
119 mm->context.asce_limit = TASK_SIZE_MAX;
120 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
121 _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
124 spin_unlock_bh(&mm->page_table_lock);
126 on_each_cpu(__crst_table_upgrade, mm, 0);
131 crst_table_free(mm, p4d);
138 struct page *page_table_alloc_pgste(struct mm_struct *mm)
140 struct ptdesc *ptdesc;
143 ptdesc = pagetable_alloc(GFP_KERNEL, 0);
145 table = (u64 *)ptdesc_to_virt(ptdesc);
146 __arch_set_page_dat(table, 1);
147 memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
148 memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
150 return ptdesc_page(ptdesc);
153 void page_table_free_pgste(struct page *page)
155 pagetable_free(page_ptdesc(page));
158 #endif /* CONFIG_PGSTE */
160 unsigned long *page_table_alloc(struct mm_struct *mm)
162 struct ptdesc *ptdesc;
163 unsigned long *table;
165 ptdesc = pagetable_alloc(GFP_KERNEL, 0);
168 if (!pagetable_pte_ctor(ptdesc)) {
169 pagetable_free(ptdesc);
172 table = ptdesc_to_virt(ptdesc);
173 __arch_set_page_dat(table, 1);
174 /* pt_list is used by gmap only */
175 INIT_LIST_HEAD(&ptdesc->pt_list);
176 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
177 memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
181 static void pagetable_pte_dtor_free(struct ptdesc *ptdesc)
183 pagetable_pte_dtor(ptdesc);
184 pagetable_free(ptdesc);
187 void page_table_free(struct mm_struct *mm, unsigned long *table)
189 struct ptdesc *ptdesc = virt_to_ptdesc(table);
191 pagetable_pte_dtor_free(ptdesc);
194 void __tlb_remove_table(void *table)
196 struct ptdesc *ptdesc = virt_to_ptdesc(table);
197 struct page *page = ptdesc_page(ptdesc);
199 if (compound_order(page) == CRST_ALLOC_ORDER) {
200 /* pmd, pud, or p4d */
201 pagetable_free(ptdesc);
204 pagetable_pte_dtor_free(ptdesc);
207 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
208 static void pte_free_now(struct rcu_head *head)
210 struct ptdesc *ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
212 pagetable_pte_dtor_free(ptdesc);
215 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
217 struct ptdesc *ptdesc = virt_to_ptdesc(pgtable);
219 call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
221 * THPs are not allowed for KVM guests. Warn if pgste ever reaches here.
222 * Turn to the generic pte_free_defer() version once gmap is removed.
224 WARN_ON_ONCE(mm_has_pgste(mm));
226 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
229 * Base infrastructure required to generate basic asces, region, segment,
230 * and page tables that do not make use of enhanced features like EDAT1.
233 static struct kmem_cache *base_pgt_cache;
235 static unsigned long *base_pgt_alloc(void)
237 unsigned long *table;
239 table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
241 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
245 static void base_pgt_free(unsigned long *table)
247 kmem_cache_free(base_pgt_cache, table);
250 static unsigned long *base_crst_alloc(unsigned long val)
252 unsigned long *table;
253 struct ptdesc *ptdesc;
255 ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
258 table = ptdesc_address(ptdesc);
259 crst_table_init(table, val);
263 static void base_crst_free(unsigned long *table)
265 pagetable_free(virt_to_ptdesc(table));
268 #define BASE_ADDR_END_FUNC(NAME, SIZE) \
269 static inline unsigned long base_##NAME##_addr_end(unsigned long addr, \
272 unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1); \
274 return (next - 1) < (end - 1) ? next : end; \
277 BASE_ADDR_END_FUNC(page, _PAGE_SIZE)
278 BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
279 BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
280 BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
281 BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
283 static inline unsigned long base_lra(unsigned long address)
289 : "=d" (real) : "a" (address) : "cc");
293 static int base_page_walk(unsigned long *origin, unsigned long addr,
294 unsigned long end, int alloc)
296 unsigned long *pte, next;
301 pte += (addr & _PAGE_INDEX) >> _PAGE_SHIFT;
303 next = base_page_addr_end(addr, end);
304 *pte = base_lra(addr);
305 } while (pte++, addr = next, addr < end);
309 static int base_segment_walk(unsigned long *origin, unsigned long addr,
310 unsigned long end, int alloc)
312 unsigned long *ste, next, *table;
316 ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
318 next = base_segment_addr_end(addr, end);
319 if (*ste & _SEGMENT_ENTRY_INVALID) {
322 table = base_pgt_alloc();
325 *ste = __pa(table) | _SEGMENT_ENTRY;
327 table = __va(*ste & _SEGMENT_ENTRY_ORIGIN);
328 rc = base_page_walk(table, addr, next, alloc);
332 base_pgt_free(table);
334 } while (ste++, addr = next, addr < end);
338 static int base_region3_walk(unsigned long *origin, unsigned long addr,
339 unsigned long end, int alloc)
341 unsigned long *rtte, next, *table;
345 rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
347 next = base_region3_addr_end(addr, end);
348 if (*rtte & _REGION_ENTRY_INVALID) {
351 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
354 *rtte = __pa(table) | _REGION3_ENTRY;
356 table = __va(*rtte & _REGION_ENTRY_ORIGIN);
357 rc = base_segment_walk(table, addr, next, alloc);
361 base_crst_free(table);
362 } while (rtte++, addr = next, addr < end);
366 static int base_region2_walk(unsigned long *origin, unsigned long addr,
367 unsigned long end, int alloc)
369 unsigned long *rste, next, *table;
373 rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
375 next = base_region2_addr_end(addr, end);
376 if (*rste & _REGION_ENTRY_INVALID) {
379 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
382 *rste = __pa(table) | _REGION2_ENTRY;
384 table = __va(*rste & _REGION_ENTRY_ORIGIN);
385 rc = base_region3_walk(table, addr, next, alloc);
389 base_crst_free(table);
390 } while (rste++, addr = next, addr < end);
394 static int base_region1_walk(unsigned long *origin, unsigned long addr,
395 unsigned long end, int alloc)
397 unsigned long *rfte, next, *table;
401 rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
403 next = base_region1_addr_end(addr, end);
404 if (*rfte & _REGION_ENTRY_INVALID) {
407 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
410 *rfte = __pa(table) | _REGION1_ENTRY;
412 table = __va(*rfte & _REGION_ENTRY_ORIGIN);
413 rc = base_region2_walk(table, addr, next, alloc);
417 base_crst_free(table);
418 } while (rfte++, addr = next, addr < end);
423 * base_asce_free - free asce and tables returned from base_asce_alloc()
424 * @asce: asce to be freed
426 * Frees all region, segment, and page tables that were allocated with a
427 * corresponding base_asce_alloc() call.
429 void base_asce_free(unsigned long asce)
431 unsigned long *table = __va(asce & _ASCE_ORIGIN);
435 switch (asce & _ASCE_TYPE_MASK) {
436 case _ASCE_TYPE_SEGMENT:
437 base_segment_walk(table, 0, _REGION3_SIZE, 0);
439 case _ASCE_TYPE_REGION3:
440 base_region3_walk(table, 0, _REGION2_SIZE, 0);
442 case _ASCE_TYPE_REGION2:
443 base_region2_walk(table, 0, _REGION1_SIZE, 0);
445 case _ASCE_TYPE_REGION1:
446 base_region1_walk(table, 0, TASK_SIZE_MAX, 0);
449 base_crst_free(table);
452 static int base_pgt_cache_init(void)
454 static DEFINE_MUTEX(base_pgt_cache_mutex);
455 unsigned long sz = _PAGE_TABLE_SIZE;
459 mutex_lock(&base_pgt_cache_mutex);
461 base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
462 mutex_unlock(&base_pgt_cache_mutex);
463 return base_pgt_cache ? 0 : -ENOMEM;
467 * base_asce_alloc - create kernel mapping without enhanced DAT features
468 * @addr: virtual start address of kernel mapping
469 * @num_pages: number of consecutive pages
471 * Generate an asce, including all required region, segment and page tables,
472 * that can be used to access the virtual kernel mapping. The difference is
473 * that the returned asce does not make use of any enhanced DAT features like
474 * e.g. large pages. This is required for some I/O functions that pass an
475 * asce, like e.g. some service call requests.
477 * Note: the returned asce may NEVER be attached to any cpu. It may only be
478 * used for I/O requests. tlb entries that might result because the
479 * asce was attached to a cpu won't be cleared.
481 unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
483 unsigned long asce, *table, end;
486 if (base_pgt_cache_init())
488 end = addr + num_pages * PAGE_SIZE;
489 if (end <= _REGION3_SIZE) {
490 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
493 rc = base_segment_walk(table, addr, end, 1);
494 asce = __pa(table) | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
495 } else if (end <= _REGION2_SIZE) {
496 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
499 rc = base_region3_walk(table, addr, end, 1);
500 asce = __pa(table) | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
501 } else if (end <= _REGION1_SIZE) {
502 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
505 rc = base_region2_walk(table, addr, end, 1);
506 asce = __pa(table) | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
508 table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
511 rc = base_region1_walk(table, addr, end, 1);
512 asce = __pa(table) | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
515 base_asce_free(asce);