1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright 2007-2008 Paul Mackerras, IBM Corp.
6 #include <linux/errno.h>
7 #include <linux/kernel.h>
9 #include <linux/types.h>
10 #include <linux/pagewalk.h>
11 #include <linux/hugetlb.h>
12 #include <linux/syscalls.h>
14 #include <linux/pgtable.h>
15 #include <linux/uaccess.h>
18 * Free all pages allocated for subpage protection maps and pointers.
19 * Also makes sure that the subpage_prot_table structure is
20 * reinitialized for the next user.
22 void subpage_prot_free(struct mm_struct *mm)
24 struct subpage_prot_table *spt = mm_ctx_subpage_prot(&mm->context);
25 unsigned long i, j, addr;
31 for (i = 0; i < 4; ++i) {
32 if (spt->low_prot[i]) {
33 free_page((unsigned long)spt->low_prot[i]);
34 spt->low_prot[i] = NULL;
38 for (i = 0; i < (TASK_SIZE_USER64 >> 43); ++i) {
42 spt->protptrs[i] = NULL;
43 for (j = 0; j < SBP_L2_COUNT && addr < spt->maxaddr;
44 ++j, addr += PAGE_SIZE)
46 free_page((unsigned long)p[j]);
47 free_page((unsigned long)p);
53 static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
63 pgd = pgd_offset(mm, addr);
64 p4d = p4d_offset(pgd, addr);
67 pud = pud_offset(p4d, addr);
70 pmd = pmd_offset(pud, addr);
73 pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
74 arch_enter_lazy_mmu_mode();
75 for (; npages > 0; --npages) {
76 pte_update(mm, addr, pte, 0, 0, 0);
80 arch_leave_lazy_mmu_mode();
81 pte_unmap_unlock(pte - 1, ptl);
85 * Clear the subpage protection map for an address range, allowing
86 * all accesses that are allowed by the pte permissions.
88 static void subpage_prot_clear(unsigned long addr, unsigned long len)
90 struct mm_struct *mm = current->mm;
91 struct subpage_prot_table *spt;
95 unsigned long next, limit;
99 spt = mm_ctx_subpage_prot(&mm->context);
104 if (limit > spt->maxaddr)
105 limit = spt->maxaddr;
106 for (; addr < limit; addr = next) {
107 next = pmd_addr_end(addr, limit);
108 if (addr < 0x100000000UL) {
111 spm = spt->protptrs[addr >> SBP_L3_SHIFT];
115 spp = spm[(addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1)];
118 spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
120 i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
121 nw = PTRS_PER_PTE - i;
122 if (addr + (nw << PAGE_SHIFT) > next)
123 nw = (next - addr) >> PAGE_SHIFT;
125 memset(spp, 0, nw * sizeof(u32));
127 /* now flush any existing HPTEs for the range */
128 hpte_flush_range(mm, addr, nw);
132 mmap_write_unlock(mm);
135 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
136 static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr,
137 unsigned long end, struct mm_walk *walk)
139 struct vm_area_struct *vma = walk->vma;
140 split_huge_pmd(vma, pmd, addr);
144 static const struct mm_walk_ops subpage_walk_ops = {
145 .pmd_entry = subpage_walk_pmd_entry,
148 static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
151 struct vm_area_struct *vma;
154 * We don't try too hard, we just mark all the vma in that range
155 * VM_NOHUGEPAGE and split them.
157 vma = find_vma(mm, addr);
159 * If the range is in unmapped range, just return
161 if (vma && ((addr + len) <= vma->vm_start))
165 if (vma->vm_start >= (addr + len))
167 vma->vm_flags |= VM_NOHUGEPAGE;
168 walk_page_vma(vma, &subpage_walk_ops, NULL);
173 static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
181 * Copy in a subpage protection map for an address range.
182 * The map has 2 bits per 4k subpage, so 32 bits per 64k page.
183 * Each 2-bit field is 0 to allow any access, 1 to prevent writes,
184 * 2 or 3 to prevent all accesses.
185 * Note that the normal page protections also apply; the subpage
186 * protection mechanism is an additional constraint, so putting 0
187 * in a 2-bit field won't allow writes to a page that is otherwise
190 SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
191 unsigned long, len, u32 __user *, map)
193 struct mm_struct *mm = current->mm;
194 struct subpage_prot_table *spt;
198 unsigned long next, limit;
204 /* Check parameters */
205 if ((addr & ~PAGE_MASK) || (len & ~PAGE_MASK) ||
206 addr >= mm->task_size || len >= mm->task_size ||
207 addr + len > mm->task_size)
210 if (is_hugepage_only_range(mm, addr, len))
214 /* Clear out the protection map for the address range */
215 subpage_prot_clear(addr, len);
219 if (!access_ok(map, (len >> PAGE_SHIFT) * sizeof(u32)))
224 spt = mm_ctx_subpage_prot(&mm->context);
227 * Allocate subpage prot table if not already done.
228 * Do this with mmap_lock held
230 spt = kzalloc(sizeof(struct subpage_prot_table), GFP_KERNEL);
235 mm->context.hash_context->spt = spt;
238 subpage_mark_vma_nohuge(mm, addr, len);
239 for (limit = addr + len; addr < limit; addr = next) {
240 next = pmd_addr_end(addr, limit);
242 if (addr < 0x100000000UL) {
245 spm = spt->protptrs[addr >> SBP_L3_SHIFT];
247 spm = (u32 **)get_zeroed_page(GFP_KERNEL);
250 spt->protptrs[addr >> SBP_L3_SHIFT] = spm;
253 spm += (addr >> SBP_L2_SHIFT) & (SBP_L2_COUNT - 1);
256 spp = (u32 *)get_zeroed_page(GFP_KERNEL);
261 spp += (addr >> PAGE_SHIFT) & (SBP_L1_COUNT - 1);
264 demote_segment_4k(mm, addr);
267 i = (addr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
268 nw = PTRS_PER_PTE - i;
269 if (addr + (nw << PAGE_SHIFT) > next)
270 nw = (next - addr) >> PAGE_SHIFT;
272 mmap_write_unlock(mm);
273 if (__copy_from_user(spp, map, nw * sizeof(u32)))
278 /* now flush any existing HPTEs for the range */
279 hpte_flush_range(mm, addr, nw);
281 if (limit > spt->maxaddr)
282 spt->maxaddr = limit;
285 mmap_write_unlock(mm);