1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
7 #include <linux/module.h>
8 #include <linux/sched/signal.h>
10 #include <asm/tlbflush.h>
11 #include <as-layout.h>
15 #include <kern_util.h>
17 struct host_vm_change {
19 enum { NONE, MMAP, MUNMAP, MPROTECT } type;
46 #define INIT_HVC(mm, force, userspace) \
47 ((struct host_vm_change) \
48 { .ops = { { .type = NONE } }, \
51 .userspace = userspace, \
55 static void report_enomem(void)
57 printk(KERN_ERR "UML ran out of memory on the host side! "
58 "This can happen due to a memory limitation or "
59 "vm.max_map_count has been reached.\n");
62 static int do_ops(struct host_vm_change *hvc, int end,
65 struct host_vm_op *op;
68 for (i = 0; i < end && !ret; i++) {
73 ret = map(&hvc->mm->context.id, op->u.mmap.addr,
74 op->u.mmap.len, op->u.mmap.prot,
76 op->u.mmap.offset, finished,
79 map_memory(op->u.mmap.addr, op->u.mmap.offset,
80 op->u.mmap.len, 1, 1, 1);
84 ret = unmap(&hvc->mm->context.id,
86 op->u.munmap.len, finished,
89 ret = os_unmap_memory(
90 (void *) op->u.munmap.addr,
96 ret = protect(&hvc->mm->context.id,
100 finished, &hvc->data);
102 ret = os_protect_memory(
103 (void *) op->u.mprotect.addr,
108 printk(KERN_ERR "Unknown op type %d in do_ops\n",
121 static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
122 unsigned int prot, struct host_vm_change *hvc)
125 struct host_vm_op *last;
126 int fd = -1, ret = 0;
129 fd = phys_mapping(phys, &offset);
132 if (hvc->index != 0) {
133 last = &hvc->ops[hvc->index - 1];
134 if ((last->type == MMAP) &&
135 (last->u.mmap.addr + last->u.mmap.len == virt) &&
136 (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
137 (last->u.mmap.offset + last->u.mmap.len == offset)) {
138 last->u.mmap.len += len;
143 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
144 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
148 hvc->ops[hvc->index++] = ((struct host_vm_op)
150 .u = { .mmap = { .addr = virt,
159 static int add_munmap(unsigned long addr, unsigned long len,
160 struct host_vm_change *hvc)
162 struct host_vm_op *last;
165 if (hvc->index != 0) {
166 last = &hvc->ops[hvc->index - 1];
167 if ((last->type == MUNMAP) &&
168 (last->u.munmap.addr + last->u.mmap.len == addr)) {
169 last->u.munmap.len += len;
174 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
175 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
179 hvc->ops[hvc->index++] = ((struct host_vm_op)
181 .u = { .munmap = { .addr = addr,
186 static int add_mprotect(unsigned long addr, unsigned long len,
187 unsigned int prot, struct host_vm_change *hvc)
189 struct host_vm_op *last;
192 if (hvc->index != 0) {
193 last = &hvc->ops[hvc->index - 1];
194 if ((last->type == MPROTECT) &&
195 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
196 (last->u.mprotect.prot == prot)) {
197 last->u.mprotect.len += len;
202 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
203 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
207 hvc->ops[hvc->index++] = ((struct host_vm_op)
209 .u = { .mprotect = { .addr = addr,
215 #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
217 static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
219 struct host_vm_change *hvc)
222 int r, w, x, prot, ret = 0;
224 pte = pte_offset_kernel(pmd, addr);
229 if (!pte_young(*pte)) {
232 } else if (!pte_dirty(*pte))
235 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
236 (x ? UM_PROT_EXEC : 0));
237 if (hvc->force || pte_newpage(*pte)) {
238 if (pte_present(*pte)) {
239 if (pte_newpage(*pte))
240 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
241 PAGE_SIZE, prot, hvc);
243 ret = add_munmap(addr, PAGE_SIZE, hvc);
244 } else if (pte_newprot(*pte))
245 ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
246 *pte = pte_mkuptodate(*pte);
247 } while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
251 static inline int update_pmd_range(pud_t *pud, unsigned long addr,
253 struct host_vm_change *hvc)
259 pmd = pmd_offset(pud, addr);
261 next = pmd_addr_end(addr, end);
262 if (!pmd_present(*pmd)) {
263 if (hvc->force || pmd_newpage(*pmd)) {
264 ret = add_munmap(addr, next - addr, hvc);
265 pmd_mkuptodate(*pmd);
268 else ret = update_pte_range(pmd, addr, next, hvc);
269 } while (pmd++, addr = next, ((addr < end) && !ret));
273 static inline int update_pud_range(p4d_t *p4d, unsigned long addr,
275 struct host_vm_change *hvc)
281 pud = pud_offset(p4d, addr);
283 next = pud_addr_end(addr, end);
284 if (!pud_present(*pud)) {
285 if (hvc->force || pud_newpage(*pud)) {
286 ret = add_munmap(addr, next - addr, hvc);
287 pud_mkuptodate(*pud);
290 else ret = update_pmd_range(pud, addr, next, hvc);
291 } while (pud++, addr = next, ((addr < end) && !ret));
295 static inline int update_p4d_range(pgd_t *pgd, unsigned long addr,
297 struct host_vm_change *hvc)
303 p4d = p4d_offset(pgd, addr);
305 next = p4d_addr_end(addr, end);
306 if (!p4d_present(*p4d)) {
307 if (hvc->force || p4d_newpage(*p4d)) {
308 ret = add_munmap(addr, next - addr, hvc);
309 p4d_mkuptodate(*p4d);
312 ret = update_pud_range(p4d, addr, next, hvc);
313 } while (p4d++, addr = next, ((addr < end) && !ret));
317 void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
318 unsigned long end_addr, int force)
321 struct host_vm_change hvc;
322 unsigned long addr = start_addr, next;
323 int ret = 0, userspace = 1;
325 hvc = INIT_HVC(mm, force, userspace);
326 pgd = pgd_offset(mm, addr);
328 next = pgd_addr_end(addr, end_addr);
329 if (!pgd_present(*pgd)) {
330 if (force || pgd_newpage(*pgd)) {
331 ret = add_munmap(addr, next - addr, &hvc);
332 pgd_mkuptodate(*pgd);
335 ret = update_p4d_range(pgd, addr, next, &hvc);
336 } while (pgd++, addr = next, ((addr < end_addr) && !ret));
339 ret = do_ops(&hvc, hvc.index, 1);
341 /* This is not an else because ret is modified above */
343 struct mm_id *mm_idp = ¤t->mm->context.id;
345 printk(KERN_ERR "fix_range_common: failed, killing current "
346 "process: %d\n", task_tgid_vnr(current));
351 static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
353 struct mm_struct *mm;
359 unsigned long addr, last;
360 int updated = 0, err = 0, force = 0, userspace = 0;
361 struct host_vm_change hvc;
364 hvc = INIT_HVC(mm, force, userspace);
365 for (addr = start; addr < end;) {
366 pgd = pgd_offset(mm, addr);
367 if (!pgd_present(*pgd)) {
368 last = ADD_ROUND(addr, PGDIR_SIZE);
371 if (pgd_newpage(*pgd)) {
373 err = add_munmap(addr, last - addr, &hvc);
375 panic("munmap failed, errno = %d\n",
382 p4d = p4d_offset(pgd, addr);
383 if (!p4d_present(*p4d)) {
384 last = ADD_ROUND(addr, P4D_SIZE);
387 if (p4d_newpage(*p4d)) {
389 err = add_munmap(addr, last - addr, &hvc);
391 panic("munmap failed, errno = %d\n",
398 pud = pud_offset(p4d, addr);
399 if (!pud_present(*pud)) {
400 last = ADD_ROUND(addr, PUD_SIZE);
403 if (pud_newpage(*pud)) {
405 err = add_munmap(addr, last - addr, &hvc);
407 panic("munmap failed, errno = %d\n",
414 pmd = pmd_offset(pud, addr);
415 if (!pmd_present(*pmd)) {
416 last = ADD_ROUND(addr, PMD_SIZE);
419 if (pmd_newpage(*pmd)) {
421 err = add_munmap(addr, last - addr, &hvc);
423 panic("munmap failed, errno = %d\n",
430 pte = pte_offset_kernel(pmd, addr);
431 if (!pte_present(*pte) || pte_newpage(*pte)) {
433 err = add_munmap(addr, PAGE_SIZE, &hvc);
435 panic("munmap failed, errno = %d\n",
437 if (pte_present(*pte))
438 err = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
441 else if (pte_newprot(*pte)) {
443 err = add_mprotect(addr, PAGE_SIZE, 0, &hvc);
448 err = do_ops(&hvc, hvc.index, 1);
451 panic("flush_tlb_kernel failed, errno = %d\n", err);
455 void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
462 struct mm_struct *mm = vma->vm_mm;
464 int r, w, x, prot, err = 0;
467 address &= PAGE_MASK;
469 pgd = pgd_offset(mm, address);
470 if (!pgd_present(*pgd))
473 p4d = p4d_offset(pgd, address);
474 if (!p4d_present(*p4d))
477 pud = pud_offset(p4d, address);
478 if (!pud_present(*pud))
481 pmd = pmd_offset(pud, address);
482 if (!pmd_present(*pmd))
485 pte = pte_offset_kernel(pmd, address);
490 if (!pte_young(*pte)) {
493 } else if (!pte_dirty(*pte)) {
497 mm_id = &mm->context.id;
498 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
499 (x ? UM_PROT_EXEC : 0));
500 if (pte_newpage(*pte)) {
501 if (pte_present(*pte)) {
502 unsigned long long offset;
505 fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
506 err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
509 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
511 else if (pte_newprot(*pte))
512 err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
521 *pte = pte_mkuptodate(*pte);
526 printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
530 void flush_tlb_all(void)
533 * Don't bother flushing if this address space is about to be
536 if (atomic_read(¤t->mm->mm_users) == 0)
539 flush_tlb_mm(current->mm);
542 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
544 flush_tlb_kernel_range_common(start, end);
547 void flush_tlb_kernel_vm(void)
549 flush_tlb_kernel_range_common(start_vm, end_vm);
552 void __flush_tlb_one(unsigned long addr)
554 flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
557 static void fix_range(struct mm_struct *mm, unsigned long start_addr,
558 unsigned long end_addr, int force)
561 * Don't bother flushing if this address space is about to be
564 if (atomic_read(&mm->mm_users) == 0)
567 fix_range_common(mm, start_addr, end_addr, force);
570 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
573 if (vma->vm_mm == NULL)
574 flush_tlb_kernel_range_common(start, end);
575 else fix_range(vma->vm_mm, start, end, 0);
577 EXPORT_SYMBOL(flush_tlb_range);
579 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
582 fix_range(mm, start, end, 0);
585 void flush_tlb_mm(struct mm_struct *mm)
587 struct vm_area_struct *vma = mm->mmap;
589 while (vma != NULL) {
590 fix_range(mm, vma->vm_start, vma->vm_end, 0);
595 void force_flush_all(void)
597 struct mm_struct *mm = current->mm;
598 struct vm_area_struct *vma = mm->mmap;
600 while (vma != NULL) {
601 fix_range(mm, vma->vm_start, vma->vm_end, 1);