1 // SPDX-License-Identifier: GPL-2.0
5 * (C) Copyright 1996 Linus Torvalds
7 * Address space accounting code <alan@lxorguk.ukuu.org.uk>
8 * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
12 #include <linux/hugetlb.h>
13 #include <linux/shm.h>
14 #include <linux/ksm.h>
15 #include <linux/mman.h>
16 #include <linux/swap.h>
17 #include <linux/capability.h>
19 #include <linux/swapops.h>
20 #include <linux/highmem.h>
21 #include <linux/security.h>
22 #include <linux/syscalls.h>
23 #include <linux/mmu_notifier.h>
24 #include <linux/uaccess.h>
25 #include <linux/mm-arch-hooks.h>
26 #include <linux/userfaultfd_k.h>
28 #include <asm/cacheflush.h>
29 #include <asm/tlbflush.h>
33 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
40 pgd = pgd_offset(mm, addr);
41 if (pgd_none_or_clear_bad(pgd))
44 p4d = p4d_offset(pgd, addr);
45 if (p4d_none_or_clear_bad(p4d))
48 pud = pud_offset(p4d, addr);
49 if (pud_none_or_clear_bad(pud))
52 pmd = pmd_offset(pud, addr);
59 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
67 pgd = pgd_offset(mm, addr);
68 p4d = p4d_alloc(mm, pgd, addr);
71 pud = pud_alloc(mm, p4d, addr);
75 pmd = pmd_alloc(mm, pud, addr);
79 VM_BUG_ON(pmd_trans_huge(*pmd));
84 static void take_rmap_locks(struct vm_area_struct *vma)
87 i_mmap_lock_write(vma->vm_file->f_mapping);
89 anon_vma_lock_write(vma->anon_vma);
92 static void drop_rmap_locks(struct vm_area_struct *vma)
95 anon_vma_unlock_write(vma->anon_vma);
97 i_mmap_unlock_write(vma->vm_file->f_mapping);
100 static pte_t move_soft_dirty_pte(pte_t pte)
103 * Set soft dirty bit so we can notice
104 * in userspace the ptes were moved.
106 #ifdef CONFIG_MEM_SOFT_DIRTY
107 if (pte_present(pte))
108 pte = pte_mksoft_dirty(pte);
109 else if (is_swap_pte(pte))
110 pte = pte_swp_mksoft_dirty(pte);
115 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
116 unsigned long old_addr, unsigned long old_end,
117 struct vm_area_struct *new_vma, pmd_t *new_pmd,
118 unsigned long new_addr, bool need_rmap_locks)
120 struct mm_struct *mm = vma->vm_mm;
121 pte_t *old_pte, *new_pte, pte;
122 spinlock_t *old_ptl, *new_ptl;
123 bool force_flush = false;
124 unsigned long len = old_end - old_addr;
127 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
128 * locks to ensure that rmap will always observe either the old or the
129 * new ptes. This is the easiest way to avoid races with
130 * truncate_pagecache(), page migration, etc...
132 * When need_rmap_locks is false, we use other ways to avoid
135 * - During exec() shift_arg_pages(), we use a specially tagged vma
136 * which rmap call sites look for using is_vma_temporary_stack().
138 * - During mremap(), new_vma is often known to be placed after vma
139 * in rmap traversal order. This ensures rmap will always observe
140 * either the old pte, or the new pte, or both (the page table locks
141 * serialize access to individual ptes, but only rmap traversal
142 * order guarantees that we won't miss both the old and new ptes).
145 take_rmap_locks(vma);
148 * We don't have to worry about the ordering of src and dst
149 * pte locks because exclusive mmap_sem prevents deadlock.
151 old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
152 new_pte = pte_offset_map(new_pmd, new_addr);
153 new_ptl = pte_lockptr(mm, new_pmd);
154 if (new_ptl != old_ptl)
155 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
156 flush_tlb_batched_pending(vma->vm_mm);
157 arch_enter_lazy_mmu_mode();
159 for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
160 new_pte++, new_addr += PAGE_SIZE) {
161 if (pte_none(*old_pte))
164 pte = ptep_get_and_clear(mm, old_addr, old_pte);
166 * If we are remapping a valid PTE, make sure
167 * to flush TLB before we drop the PTL for the
170 * NOTE! Both old and new PTL matter: the old one
171 * for racing with page_mkclean(), the new one to
172 * make sure the physical page stays valid until
173 * the TLB entry for the old mapping has been
176 if (pte_present(pte))
178 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
179 pte = move_soft_dirty_pte(pte);
180 set_pte_at(mm, new_addr, new_pte, pte);
183 arch_leave_lazy_mmu_mode();
185 flush_tlb_range(vma, old_end - len, old_end);
186 if (new_ptl != old_ptl)
187 spin_unlock(new_ptl);
188 pte_unmap(new_pte - 1);
189 pte_unmap_unlock(old_pte - 1, old_ptl);
191 drop_rmap_locks(vma);
194 #ifdef CONFIG_HAVE_MOVE_PMD
195 static bool move_normal_pmd(struct vm_area_struct *vma, unsigned long old_addr,
196 unsigned long new_addr, unsigned long old_end,
197 pmd_t *old_pmd, pmd_t *new_pmd)
199 spinlock_t *old_ptl, *new_ptl;
200 struct mm_struct *mm = vma->vm_mm;
203 if ((old_addr & ~PMD_MASK) || (new_addr & ~PMD_MASK)
204 || old_end - old_addr < PMD_SIZE)
208 * The destination pmd shouldn't be established, free_pgtables()
209 * should have release it.
211 if (WARN_ON(!pmd_none(*new_pmd)))
215 * We don't have to worry about the ordering of src and dst
216 * ptlocks because exclusive mmap_sem prevents deadlock.
218 old_ptl = pmd_lock(vma->vm_mm, old_pmd);
219 new_ptl = pmd_lockptr(mm, new_pmd);
220 if (new_ptl != old_ptl)
221 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
227 VM_BUG_ON(!pmd_none(*new_pmd));
229 /* Set the new pmd */
230 set_pmd_at(mm, new_addr, new_pmd, pmd);
231 flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
232 if (new_ptl != old_ptl)
233 spin_unlock(new_ptl);
234 spin_unlock(old_ptl);
240 unsigned long move_page_tables(struct vm_area_struct *vma,
241 unsigned long old_addr, struct vm_area_struct *new_vma,
242 unsigned long new_addr, unsigned long len,
243 bool need_rmap_locks)
245 unsigned long extent, next, old_end;
246 struct mmu_notifier_range range;
247 pmd_t *old_pmd, *new_pmd;
252 old_end = old_addr + len;
253 flush_cache_range(vma, old_addr, old_end);
255 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
257 mmu_notifier_invalidate_range_start(&range);
259 for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
261 next = (old_addr + PMD_SIZE) & PMD_MASK;
262 /* even if next overflowed, extent below will be ok */
263 extent = next - old_addr;
264 if (extent > old_end - old_addr)
265 extent = old_end - old_addr;
266 old_pmd = get_old_pmd(vma->vm_mm, old_addr);
269 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
272 if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd) || pmd_devmap(*old_pmd)) {
273 if (extent == HPAGE_PMD_SIZE) {
275 /* See comment in move_ptes() */
277 take_rmap_locks(vma);
278 moved = move_huge_pmd(vma, old_addr, new_addr,
279 old_end, old_pmd, new_pmd);
281 drop_rmap_locks(vma);
285 split_huge_pmd(vma, old_pmd, old_addr);
286 if (pmd_trans_unstable(old_pmd))
288 } else if (extent == PMD_SIZE) {
289 #ifdef CONFIG_HAVE_MOVE_PMD
291 * If the extent is PMD-sized, try to speed the move by
292 * moving at the PMD level if possible.
297 take_rmap_locks(vma);
298 moved = move_normal_pmd(vma, old_addr, new_addr,
299 old_end, old_pmd, new_pmd);
301 drop_rmap_locks(vma);
307 if (pte_alloc(new_vma->vm_mm, new_pmd))
309 next = (new_addr + PMD_SIZE) & PMD_MASK;
310 if (extent > next - new_addr)
311 extent = next - new_addr;
312 move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
313 new_pmd, new_addr, need_rmap_locks);
316 mmu_notifier_invalidate_range_end(&range);
318 return len + old_addr - old_end; /* how much done */
321 static unsigned long move_vma(struct vm_area_struct *vma,
322 unsigned long old_addr, unsigned long old_len,
323 unsigned long new_len, unsigned long new_addr,
324 bool *locked, struct vm_userfaultfd_ctx *uf,
325 struct list_head *uf_unmap)
327 struct mm_struct *mm = vma->vm_mm;
328 struct vm_area_struct *new_vma;
329 unsigned long vm_flags = vma->vm_flags;
330 unsigned long new_pgoff;
331 unsigned long moved_len;
332 unsigned long excess = 0;
333 unsigned long hiwater_vm;
336 bool need_rmap_locks;
339 * We'd prefer to avoid failure later on in do_munmap:
340 * which may split one vma into three before unmapping.
342 if (mm->map_count >= sysctl_max_map_count - 3)
346 * Advise KSM to break any KSM pages in the area to be moved:
347 * it would be confusing if they were to turn up at the new
348 * location, where they happen to coincide with different KSM
349 * pages recently unmapped. But leave vma->vm_flags as it was,
350 * so KSM can come around to merge on vma and new_vma afterwards.
352 err = ksm_madvise(vma, old_addr, old_addr + old_len,
353 MADV_UNMERGEABLE, &vm_flags);
357 new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
358 new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
363 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
365 if (moved_len < old_len) {
367 } else if (vma->vm_ops && vma->vm_ops->mremap) {
368 err = vma->vm_ops->mremap(new_vma);
373 * On error, move entries back from new area to old,
374 * which will succeed since page tables still there,
375 * and then proceed to unmap new area instead of old.
377 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
384 mremap_userfaultfd_prep(new_vma, uf);
385 arch_remap(mm, old_addr, old_addr + old_len,
386 new_addr, new_addr + new_len);
389 /* Conceal VM_ACCOUNT so old reservation is not undone */
390 if (vm_flags & VM_ACCOUNT) {
391 vma->vm_flags &= ~VM_ACCOUNT;
392 excess = vma->vm_end - vma->vm_start - old_len;
393 if (old_addr > vma->vm_start &&
394 old_addr + old_len < vma->vm_end)
399 * If we failed to move page tables we still do total_vm increment
400 * since do_munmap() will decrement it by old_len == new_len.
402 * Since total_vm is about to be raised artificially high for a
403 * moment, we need to restore high watermark afterwards: if stats
404 * are taken meanwhile, total_vm and hiwater_vm appear too high.
405 * If this were a serious issue, we'd add a flag to do_munmap().
407 hiwater_vm = mm->hiwater_vm;
408 vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
410 /* Tell pfnmap has moved from this vma */
411 if (unlikely(vma->vm_flags & VM_PFNMAP))
412 untrack_pfn_moved(vma);
414 if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) {
415 /* OOM: unable to split vma, just get accounts right */
416 vm_unacct_memory(excess >> PAGE_SHIFT);
419 mm->hiwater_vm = hiwater_vm;
421 /* Restore VM_ACCOUNT if one or two pieces of vma left */
423 vma->vm_flags |= VM_ACCOUNT;
425 vma->vm_next->vm_flags |= VM_ACCOUNT;
428 if (vm_flags & VM_LOCKED) {
429 mm->locked_vm += new_len >> PAGE_SHIFT;
436 static struct vm_area_struct *vma_to_resize(unsigned long addr,
437 unsigned long old_len, unsigned long new_len, unsigned long *p)
439 struct mm_struct *mm = current->mm;
440 struct vm_area_struct *vma = find_vma(mm, addr);
443 if (!vma || vma->vm_start > addr)
444 return ERR_PTR(-EFAULT);
447 * !old_len is a special case where an attempt is made to 'duplicate'
448 * a mapping. This makes no sense for private mappings as it will
449 * instead create a fresh/new mapping unrelated to the original. This
450 * is contrary to the basic idea of mremap which creates new mappings
451 * based on the original. There are no known use cases for this
452 * behavior. As a result, fail such attempts.
454 if (!old_len && !(vma->vm_flags & (VM_SHARED | VM_MAYSHARE))) {
455 pr_warn_once("%s (%d): attempted to duplicate a private mapping with mremap. This is not supported.\n", current->comm, current->pid);
456 return ERR_PTR(-EINVAL);
459 if (is_vm_hugetlb_page(vma))
460 return ERR_PTR(-EINVAL);
462 /* We can't remap across vm area boundaries */
463 if (old_len > vma->vm_end - addr)
464 return ERR_PTR(-EFAULT);
466 if (new_len == old_len)
469 /* Need to be careful about a growing mapping */
470 pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
471 pgoff += vma->vm_pgoff;
472 if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
473 return ERR_PTR(-EINVAL);
475 if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
476 return ERR_PTR(-EFAULT);
478 if (vma->vm_flags & VM_LOCKED) {
479 unsigned long locked, lock_limit;
480 locked = mm->locked_vm << PAGE_SHIFT;
481 lock_limit = rlimit(RLIMIT_MEMLOCK);
482 locked += new_len - old_len;
483 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
484 return ERR_PTR(-EAGAIN);
487 if (!may_expand_vm(mm, vma->vm_flags,
488 (new_len - old_len) >> PAGE_SHIFT))
489 return ERR_PTR(-ENOMEM);
491 if (vma->vm_flags & VM_ACCOUNT) {
492 unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
493 if (security_vm_enough_memory_mm(mm, charged))
494 return ERR_PTR(-ENOMEM);
501 static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
502 unsigned long new_addr, unsigned long new_len, bool *locked,
503 struct vm_userfaultfd_ctx *uf,
504 struct list_head *uf_unmap_early,
505 struct list_head *uf_unmap)
507 struct mm_struct *mm = current->mm;
508 struct vm_area_struct *vma;
509 unsigned long ret = -EINVAL;
510 unsigned long charged = 0;
511 unsigned long map_flags;
513 if (offset_in_page(new_addr))
516 if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
519 /* Ensure the old/new locations do not overlap */
520 if (addr + old_len > new_addr && new_addr + new_len > addr)
524 * move_vma() need us to stay 4 maps below the threshold, otherwise
525 * it will bail out at the very beginning.
526 * That is a problem if we have already unmaped the regions here
527 * (new_addr, and old_addr), because userspace will not know the
528 * state of the vma's after it gets -ENOMEM.
529 * So, to avoid such scenario we can pre-compute if the whole
530 * operation has high chances to success map-wise.
531 * Worst-scenario case is when both vma's (new_addr and old_addr) get
532 * split in 3 before unmaping it.
533 * That means 2 more maps (1 for each) to the ones we already hold.
534 * Check whether current map count plus 2 still leads us to 4 maps below
535 * the threshold, otherwise return -ENOMEM here to be more safe.
537 if ((mm->map_count + 2) >= sysctl_max_map_count - 3)
540 ret = do_munmap(mm, new_addr, new_len, uf_unmap_early);
544 if (old_len >= new_len) {
545 ret = do_munmap(mm, addr+new_len, old_len - new_len, uf_unmap);
546 if (ret && old_len != new_len)
551 vma = vma_to_resize(addr, old_len, new_len, &charged);
557 map_flags = MAP_FIXED;
558 if (vma->vm_flags & VM_MAYSHARE)
559 map_flags |= MAP_SHARED;
561 ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
562 ((addr - vma->vm_start) >> PAGE_SHIFT),
564 if (offset_in_page(ret))
567 ret = move_vma(vma, addr, old_len, new_len, new_addr, locked, uf,
569 if (!(offset_in_page(ret)))
572 vm_unacct_memory(charged);
578 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
580 unsigned long end = vma->vm_end + delta;
581 if (end < vma->vm_end) /* overflow */
583 if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
585 if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
586 0, MAP_FIXED) & ~PAGE_MASK)
592 * Expand (or shrink) an existing mapping, potentially moving it at the
593 * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
595 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
596 * This option implies MREMAP_MAYMOVE.
598 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
599 unsigned long, new_len, unsigned long, flags,
600 unsigned long, new_addr)
602 struct mm_struct *mm = current->mm;
603 struct vm_area_struct *vma;
604 unsigned long ret = -EINVAL;
605 unsigned long charged = 0;
607 bool downgraded = false;
608 struct vm_userfaultfd_ctx uf = NULL_VM_UFFD_CTX;
609 LIST_HEAD(uf_unmap_early);
613 * There is a deliberate asymmetry here: we strip the pointer tag
614 * from the old address but leave the new address alone. This is
615 * for consistency with mmap(), where we prevent the creation of
616 * aliasing mappings in userspace by leaving the tag bits of the
617 * mapping address intact. A non-zero tag will cause the subsequent
618 * range checks to reject the address as invalid.
620 * See Documentation/arm64/tagged-address-abi.rst for more information.
622 addr = untagged_addr(addr);
624 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
627 if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
630 if (offset_in_page(addr))
633 old_len = PAGE_ALIGN(old_len);
634 new_len = PAGE_ALIGN(new_len);
637 * We allow a zero old-len as a special case
638 * for DOS-emu "duplicate shm area" thing. But
639 * a zero new-len is nonsensical.
644 if (down_write_killable(¤t->mm->mmap_sem))
647 if (flags & MREMAP_FIXED) {
648 ret = mremap_to(addr, old_len, new_addr, new_len,
649 &locked, &uf, &uf_unmap_early, &uf_unmap);
654 * Always allow a shrinking remap: that just unmaps
655 * the unnecessary pages..
656 * __do_munmap does all the needed commit accounting, and
657 * downgrades mmap_sem to read if so directed.
659 if (old_len >= new_len) {
662 retval = __do_munmap(mm, addr+new_len, old_len - new_len,
664 if (retval < 0 && old_len != new_len) {
667 /* Returning 1 indicates mmap_sem is downgraded to read. */
668 } else if (retval == 1)
675 * Ok, we need to grow..
677 vma = vma_to_resize(addr, old_len, new_len, &charged);
683 /* old_len exactly to the end of the area..
685 if (old_len == vma->vm_end - addr) {
686 /* can we just expand the current mapping? */
687 if (vma_expandable(vma, new_len - old_len)) {
688 int pages = (new_len - old_len) >> PAGE_SHIFT;
690 if (vma_adjust(vma, vma->vm_start, addr + new_len,
691 vma->vm_pgoff, NULL)) {
696 vm_stat_account(mm, vma->vm_flags, pages);
697 if (vma->vm_flags & VM_LOCKED) {
698 mm->locked_vm += pages;
708 * We weren't able to just expand or shrink the area,
709 * we need to create a new one and move it..
712 if (flags & MREMAP_MAYMOVE) {
713 unsigned long map_flags = 0;
714 if (vma->vm_flags & VM_MAYSHARE)
715 map_flags |= MAP_SHARED;
717 new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
719 ((addr - vma->vm_start) >> PAGE_SHIFT),
721 if (offset_in_page(new_addr)) {
726 ret = move_vma(vma, addr, old_len, new_len, new_addr,
727 &locked, &uf, &uf_unmap);
730 if (offset_in_page(ret)) {
731 vm_unacct_memory(charged);
735 up_read(¤t->mm->mmap_sem);
737 up_write(¤t->mm->mmap_sem);
738 if (locked && new_len > old_len)
739 mm_populate(new_addr + old_len, new_len - old_len);
740 userfaultfd_unmap_complete(mm, &uf_unmap_early);
741 mremap_userfaultfd_complete(&uf, addr, new_addr, old_len);
742 userfaultfd_unmap_complete(mm, &uf_unmap);