1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com)
7 #include <linux/module.h>
8 #include <linux/sched/signal.h>
10 #include <asm/pgtable.h>
11 #include <asm/tlbflush.h>
12 #include <as-layout.h>
16 #include <kern_util.h>
18 struct host_vm_change {
20 enum { NONE, MMAP, MUNMAP, MPROTECT } type;
47 #define INIT_HVC(mm, force, userspace) \
48 ((struct host_vm_change) \
49 { .ops = { { .type = NONE } }, \
52 .userspace = userspace, \
56 static void report_enomem(void)
58 printk(KERN_ERR "UML ran out of memory on the host side! "
59 "This can happen due to a memory limitation or "
60 "vm.max_map_count has been reached.\n");
63 static int do_ops(struct host_vm_change *hvc, int end,
66 struct host_vm_op *op;
69 for (i = 0; i < end && !ret; i++) {
74 ret = map(&hvc->mm->context.id, op->u.mmap.addr,
75 op->u.mmap.len, op->u.mmap.prot,
77 op->u.mmap.offset, finished,
80 map_memory(op->u.mmap.addr, op->u.mmap.offset,
81 op->u.mmap.len, 1, 1, 1);
85 ret = unmap(&hvc->mm->context.id,
87 op->u.munmap.len, finished,
90 ret = os_unmap_memory(
91 (void *) op->u.munmap.addr,
97 ret = protect(&hvc->mm->context.id,
101 finished, &hvc->data);
103 ret = os_protect_memory(
104 (void *) op->u.mprotect.addr,
109 printk(KERN_ERR "Unknown op type %d in do_ops\n",
122 static int add_mmap(unsigned long virt, unsigned long phys, unsigned long len,
123 unsigned int prot, struct host_vm_change *hvc)
126 struct host_vm_op *last;
127 int fd = -1, ret = 0;
129 if (virt + len > STUB_START && virt < STUB_END)
133 fd = phys_mapping(phys, &offset);
136 if (hvc->index != 0) {
137 last = &hvc->ops[hvc->index - 1];
138 if ((last->type == MMAP) &&
139 (last->u.mmap.addr + last->u.mmap.len == virt) &&
140 (last->u.mmap.prot == prot) && (last->u.mmap.fd == fd) &&
141 (last->u.mmap.offset + last->u.mmap.len == offset)) {
142 last->u.mmap.len += len;
147 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
148 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
152 hvc->ops[hvc->index++] = ((struct host_vm_op)
154 .u = { .mmap = { .addr = virt,
163 static int add_munmap(unsigned long addr, unsigned long len,
164 struct host_vm_change *hvc)
166 struct host_vm_op *last;
169 if (addr + len > STUB_START && addr < STUB_END)
172 if (hvc->index != 0) {
173 last = &hvc->ops[hvc->index - 1];
174 if ((last->type == MUNMAP) &&
175 (last->u.munmap.addr + last->u.mmap.len == addr)) {
176 last->u.munmap.len += len;
181 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
182 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
186 hvc->ops[hvc->index++] = ((struct host_vm_op)
188 .u = { .munmap = { .addr = addr,
193 static int add_mprotect(unsigned long addr, unsigned long len,
194 unsigned int prot, struct host_vm_change *hvc)
196 struct host_vm_op *last;
199 if (addr + len > STUB_START && addr < STUB_END)
202 if (hvc->index != 0) {
203 last = &hvc->ops[hvc->index - 1];
204 if ((last->type == MPROTECT) &&
205 (last->u.mprotect.addr + last->u.mprotect.len == addr) &&
206 (last->u.mprotect.prot == prot)) {
207 last->u.mprotect.len += len;
212 if (hvc->index == ARRAY_SIZE(hvc->ops)) {
213 ret = do_ops(hvc, ARRAY_SIZE(hvc->ops), 0);
217 hvc->ops[hvc->index++] = ((struct host_vm_op)
219 .u = { .mprotect = { .addr = addr,
225 #define ADD_ROUND(n, inc) (((n) + (inc)) & ~((inc) - 1))
227 static inline int update_pte_range(pmd_t *pmd, unsigned long addr,
229 struct host_vm_change *hvc)
232 int r, w, x, prot, ret = 0;
234 pte = pte_offset_kernel(pmd, addr);
236 if ((addr >= STUB_START) && (addr < STUB_END))
242 if (!pte_young(*pte)) {
245 } else if (!pte_dirty(*pte))
248 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
249 (x ? UM_PROT_EXEC : 0));
250 if (hvc->force || pte_newpage(*pte)) {
251 if (pte_present(*pte)) {
252 if (pte_newpage(*pte))
253 ret = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
254 PAGE_SIZE, prot, hvc);
256 ret = add_munmap(addr, PAGE_SIZE, hvc);
257 } else if (pte_newprot(*pte))
258 ret = add_mprotect(addr, PAGE_SIZE, prot, hvc);
259 *pte = pte_mkuptodate(*pte);
260 } while (pte++, addr += PAGE_SIZE, ((addr < end) && !ret));
264 static inline int update_pmd_range(pud_t *pud, unsigned long addr,
266 struct host_vm_change *hvc)
272 pmd = pmd_offset(pud, addr);
274 next = pmd_addr_end(addr, end);
275 if (!pmd_present(*pmd)) {
276 if (hvc->force || pmd_newpage(*pmd)) {
277 ret = add_munmap(addr, next - addr, hvc);
278 pmd_mkuptodate(*pmd);
281 else ret = update_pte_range(pmd, addr, next, hvc);
282 } while (pmd++, addr = next, ((addr < end) && !ret));
286 static inline int update_pud_range(pgd_t *pgd, unsigned long addr,
288 struct host_vm_change *hvc)
294 pud = pud_offset(pgd, addr);
296 next = pud_addr_end(addr, end);
297 if (!pud_present(*pud)) {
298 if (hvc->force || pud_newpage(*pud)) {
299 ret = add_munmap(addr, next - addr, hvc);
300 pud_mkuptodate(*pud);
303 else ret = update_pmd_range(pud, addr, next, hvc);
304 } while (pud++, addr = next, ((addr < end) && !ret));
308 void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
309 unsigned long end_addr, int force)
312 struct host_vm_change hvc;
313 unsigned long addr = start_addr, next;
314 int ret = 0, userspace = 1;
316 hvc = INIT_HVC(mm, force, userspace);
317 pgd = pgd_offset(mm, addr);
319 next = pgd_addr_end(addr, end_addr);
320 if (!pgd_present(*pgd)) {
321 if (force || pgd_newpage(*pgd)) {
322 ret = add_munmap(addr, next - addr, &hvc);
323 pgd_mkuptodate(*pgd);
326 else ret = update_pud_range(pgd, addr, next, &hvc);
327 } while (pgd++, addr = next, ((addr < end_addr) && !ret));
330 ret = do_ops(&hvc, hvc.index, 1);
332 /* This is not an else because ret is modified above */
334 printk(KERN_ERR "fix_range_common: failed, killing current "
335 "process: %d\n", task_tgid_vnr(current));
336 /* We are under mmap_sem, release it such that current can terminate */
337 up_write(¤t->mm->mmap_sem);
339 do_signal(¤t->thread.regs);
343 static int flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
345 struct mm_struct *mm;
350 unsigned long addr, last;
351 int updated = 0, err = 0, force = 0, userspace = 0;
352 struct host_vm_change hvc;
355 hvc = INIT_HVC(mm, force, userspace);
356 for (addr = start; addr < end;) {
357 pgd = pgd_offset(mm, addr);
358 if (!pgd_present(*pgd)) {
359 last = ADD_ROUND(addr, PGDIR_SIZE);
362 if (pgd_newpage(*pgd)) {
364 err = add_munmap(addr, last - addr, &hvc);
366 panic("munmap failed, errno = %d\n",
373 pud = pud_offset(pgd, addr);
374 if (!pud_present(*pud)) {
375 last = ADD_ROUND(addr, PUD_SIZE);
378 if (pud_newpage(*pud)) {
380 err = add_munmap(addr, last - addr, &hvc);
382 panic("munmap failed, errno = %d\n",
389 pmd = pmd_offset(pud, addr);
390 if (!pmd_present(*pmd)) {
391 last = ADD_ROUND(addr, PMD_SIZE);
394 if (pmd_newpage(*pmd)) {
396 err = add_munmap(addr, last - addr, &hvc);
398 panic("munmap failed, errno = %d\n",
405 pte = pte_offset_kernel(pmd, addr);
406 if (!pte_present(*pte) || pte_newpage(*pte)) {
408 err = add_munmap(addr, PAGE_SIZE, &hvc);
410 panic("munmap failed, errno = %d\n",
412 if (pte_present(*pte))
413 err = add_mmap(addr, pte_val(*pte) & PAGE_MASK,
416 else if (pte_newprot(*pte)) {
418 err = add_mprotect(addr, PAGE_SIZE, 0, &hvc);
423 err = do_ops(&hvc, hvc.index, 1);
426 panic("flush_tlb_kernel failed, errno = %d\n", err);
430 void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
436 struct mm_struct *mm = vma->vm_mm;
438 int r, w, x, prot, err = 0;
441 address &= PAGE_MASK;
443 if (address >= STUB_START && address < STUB_END)
446 pgd = pgd_offset(mm, address);
447 if (!pgd_present(*pgd))
450 pud = pud_offset(pgd, address);
451 if (!pud_present(*pud))
454 pmd = pmd_offset(pud, address);
455 if (!pmd_present(*pmd))
458 pte = pte_offset_kernel(pmd, address);
463 if (!pte_young(*pte)) {
466 } else if (!pte_dirty(*pte)) {
470 mm_id = &mm->context.id;
471 prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
472 (x ? UM_PROT_EXEC : 0));
473 if (pte_newpage(*pte)) {
474 if (pte_present(*pte)) {
475 unsigned long long offset;
478 fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
479 err = map(mm_id, address, PAGE_SIZE, prot, fd, offset,
482 else err = unmap(mm_id, address, PAGE_SIZE, 1, &flush);
484 else if (pte_newprot(*pte))
485 err = protect(mm_id, address, PAGE_SIZE, prot, 1, &flush);
494 *pte = pte_mkuptodate(*pte);
499 printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
503 pgd_t *pgd_offset_proc(struct mm_struct *mm, unsigned long address)
505 return pgd_offset(mm, address);
508 pud_t *pud_offset_proc(pgd_t *pgd, unsigned long address)
510 return pud_offset(pgd, address);
513 pmd_t *pmd_offset_proc(pud_t *pud, unsigned long address)
515 return pmd_offset(pud, address);
518 pte_t *pte_offset_proc(pmd_t *pmd, unsigned long address)
520 return pte_offset_kernel(pmd, address);
523 pte_t *addr_pte(struct task_struct *task, unsigned long addr)
525 pgd_t *pgd = pgd_offset(task->mm, addr);
526 pud_t *pud = pud_offset(pgd, addr);
527 pmd_t *pmd = pmd_offset(pud, addr);
529 return pte_offset_map(pmd, addr);
532 void flush_tlb_all(void)
535 * Don't bother flushing if this address space is about to be
538 if (atomic_read(¤t->mm->mm_users) == 0)
541 flush_tlb_mm(current->mm);
544 void flush_tlb_kernel_range(unsigned long start, unsigned long end)
546 flush_tlb_kernel_range_common(start, end);
549 void flush_tlb_kernel_vm(void)
551 flush_tlb_kernel_range_common(start_vm, end_vm);
554 void __flush_tlb_one(unsigned long addr)
556 flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
559 static void fix_range(struct mm_struct *mm, unsigned long start_addr,
560 unsigned long end_addr, int force)
563 * Don't bother flushing if this address space is about to be
566 if (atomic_read(&mm->mm_users) == 0)
569 fix_range_common(mm, start_addr, end_addr, force);
572 void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
575 if (vma->vm_mm == NULL)
576 flush_tlb_kernel_range_common(start, end);
577 else fix_range(vma->vm_mm, start, end, 0);
579 EXPORT_SYMBOL(flush_tlb_range);
581 void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
584 fix_range(mm, start, end, 0);
587 void flush_tlb_mm(struct mm_struct *mm)
589 struct vm_area_struct *vma = mm->mmap;
591 while (vma != NULL) {
592 fix_range(mm, vma->vm_start, vma->vm_end, 0);
597 void force_flush_all(void)
599 struct mm_struct *mm = current->mm;
600 struct vm_area_struct *vma = mm->mmap;
602 while (vma != NULL) {
603 fix_range(mm, vma->vm_start, vma->vm_end, 1);