1 /* SPDX-License-Identifier: GPL-2.0 */
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 * Derived from "include/asm-i386/pgalloc.h"
9 * Copyright (C) 1994 Linus Torvalds
12 #ifndef _S390_PGALLOC_H
13 #define _S390_PGALLOC_H
15 #include <linux/threads.h>
16 #include <linux/gfp.h>
19 #define CRST_ALLOC_ORDER 2
21 unsigned long *crst_table_alloc(struct mm_struct *);
22 void crst_table_free(struct mm_struct *, unsigned long *);
24 unsigned long *page_table_alloc(struct mm_struct *);
25 struct page *page_table_alloc_pgste(struct mm_struct *mm);
26 void page_table_free(struct mm_struct *, unsigned long *);
27 void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
28 void page_table_free_pgste(struct page *page);
29 extern int page_table_allocate_pgste;
31 static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
33 struct addrtype { char _[256]; };
36 for (i = 0; i < n; i += 256) {
39 "mvc 8(248,%[s]),0(%[s])\n"
40 : "+m" (*(struct addrtype *) s)
42 s += 256 / sizeof(long);
46 static inline void crst_table_init(unsigned long *crst, unsigned long entry)
48 clear_table(crst, entry, _CRST_TABLE_SIZE);
51 static inline unsigned long pgd_entry_type(struct mm_struct *mm)
53 if (mm->context.asce_limit <= _REGION3_SIZE)
54 return _SEGMENT_ENTRY_EMPTY;
55 if (mm->context.asce_limit <= _REGION2_SIZE)
56 return _REGION3_ENTRY_EMPTY;
57 if (mm->context.asce_limit <= _REGION1_SIZE)
58 return _REGION2_ENTRY_EMPTY;
59 return _REGION1_ENTRY_EMPTY;
62 int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
63 void crst_table_downgrade(struct mm_struct *);
65 static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
67 unsigned long *table = crst_table_alloc(mm);
70 crst_table_init(table, _REGION2_ENTRY_EMPTY);
71 return (p4d_t *) table;
74 static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
76 if (!mm_p4d_folded(mm))
77 crst_table_free(mm, (unsigned long *) p4d);
80 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
82 unsigned long *table = crst_table_alloc(mm);
84 crst_table_init(table, _REGION3_ENTRY_EMPTY);
85 return (pud_t *) table;
88 static inline void pud_free(struct mm_struct *mm, pud_t *pud)
90 if (!mm_pud_folded(mm))
91 crst_table_free(mm, (unsigned long *) pud);
94 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
96 unsigned long *table = crst_table_alloc(mm);
100 crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
101 if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
102 crst_table_free(mm, table);
105 return (pmd_t *) table;
108 static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
110 if (mm_pmd_folded(mm))
112 pgtable_pmd_page_dtor(virt_to_page(pmd));
113 crst_table_free(mm, (unsigned long *) pmd);
116 static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
118 pgd_val(*pgd) = _REGION1_ENTRY | __pa(p4d);
121 static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
123 p4d_val(*p4d) = _REGION2_ENTRY | __pa(pud);
126 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
128 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
131 static inline pgd_t *pgd_alloc(struct mm_struct *mm)
133 unsigned long *table = crst_table_alloc(mm);
137 if (mm->context.asce_limit == _REGION3_SIZE) {
138 /* Forking a compat process with 2 page table levels */
139 if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
140 crst_table_free(mm, table);
144 return (pgd_t *) table;
147 static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
149 if (mm->context.asce_limit == _REGION3_SIZE)
150 pgtable_pmd_page_dtor(virt_to_page(pgd));
151 crst_table_free(mm, (unsigned long *) pgd);
154 static inline void pmd_populate(struct mm_struct *mm,
155 pmd_t *pmd, pgtable_t pte)
157 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
160 #define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
162 #define pmd_pgtable(pmd) \
163 (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
166 * page table entry allocation/free routines.
168 #define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
169 #define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm))
171 #define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
172 #define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
174 extern void rcu_table_freelist_finish(void);
176 void vmem_map_init(void);
177 void *vmem_crst_alloc(unsigned long val);
178 pte_t *vmem_pte_alloc(void);
180 #endif /* _S390_PGALLOC_H */