GNU Linux-libre 6.4.15-gnu
[releases.git] / mm / madvise.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *      linux/mm/madvise.c
4  *
5  * Copyright (C) 1999  Linus Torvalds
6  * Copyright (C) 2002  Christoph Hellwig
7  */
8
9 #include <linux/mman.h>
10 #include <linux/pagemap.h>
11 #include <linux/syscalls.h>
12 #include <linux/mempolicy.h>
13 #include <linux/page-isolation.h>
14 #include <linux/page_idle.h>
15 #include <linux/userfaultfd_k.h>
16 #include <linux/hugetlb.h>
17 #include <linux/falloc.h>
18 #include <linux/fadvise.h>
19 #include <linux/sched.h>
20 #include <linux/sched/mm.h>
21 #include <linux/mm_inline.h>
22 #include <linux/string.h>
23 #include <linux/uio.h>
24 #include <linux/ksm.h>
25 #include <linux/fs.h>
26 #include <linux/file.h>
27 #include <linux/blkdev.h>
28 #include <linux/backing-dev.h>
29 #include <linux/pagewalk.h>
30 #include <linux/swap.h>
31 #include <linux/swapops.h>
32 #include <linux/shmem_fs.h>
33 #include <linux/mmu_notifier.h>
34
35 #include <asm/tlb.h>
36
37 #include "internal.h"
38 #include "swap.h"
39
40 struct madvise_walk_private {
41         struct mmu_gather *tlb;
42         bool pageout;
43 };
44
45 /*
46  * Any behaviour which results in changes to the vma->vm_flags needs to
47  * take mmap_lock for writing. Others, which simply traverse vmas, need
48  * to only take it for reading.
49  */
50 static int madvise_need_mmap_write(int behavior)
51 {
52         switch (behavior) {
53         case MADV_REMOVE:
54         case MADV_WILLNEED:
55         case MADV_DONTNEED:
56         case MADV_DONTNEED_LOCKED:
57         case MADV_COLD:
58         case MADV_PAGEOUT:
59         case MADV_FREE:
60         case MADV_POPULATE_READ:
61         case MADV_POPULATE_WRITE:
62         case MADV_COLLAPSE:
63                 return 0;
64         default:
65                 /* be safe, default to 1. list exceptions explicitly */
66                 return 1;
67         }
68 }
69
70 #ifdef CONFIG_ANON_VMA_NAME
71 struct anon_vma_name *anon_vma_name_alloc(const char *name)
72 {
73         struct anon_vma_name *anon_name;
74         size_t count;
75
76         /* Add 1 for NUL terminator at the end of the anon_name->name */
77         count = strlen(name) + 1;
78         anon_name = kmalloc(struct_size(anon_name, name, count), GFP_KERNEL);
79         if (anon_name) {
80                 kref_init(&anon_name->kref);
81                 memcpy(anon_name->name, name, count);
82         }
83
84         return anon_name;
85 }
86
87 void anon_vma_name_free(struct kref *kref)
88 {
89         struct anon_vma_name *anon_name =
90                         container_of(kref, struct anon_vma_name, kref);
91         kfree(anon_name);
92 }
93
94 struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
95 {
96         mmap_assert_locked(vma->vm_mm);
97
98         return vma->anon_name;
99 }
100
101 /* mmap_lock should be write-locked */
102 static int replace_anon_vma_name(struct vm_area_struct *vma,
103                                  struct anon_vma_name *anon_name)
104 {
105         struct anon_vma_name *orig_name = anon_vma_name(vma);
106
107         if (!anon_name) {
108                 vma->anon_name = NULL;
109                 anon_vma_name_put(orig_name);
110                 return 0;
111         }
112
113         if (anon_vma_name_eq(orig_name, anon_name))
114                 return 0;
115
116         vma->anon_name = anon_vma_name_reuse(anon_name);
117         anon_vma_name_put(orig_name);
118
119         return 0;
120 }
121 #else /* CONFIG_ANON_VMA_NAME */
122 static int replace_anon_vma_name(struct vm_area_struct *vma,
123                                  struct anon_vma_name *anon_name)
124 {
125         if (anon_name)
126                 return -EINVAL;
127
128         return 0;
129 }
130 #endif /* CONFIG_ANON_VMA_NAME */
131 /*
132  * Update the vm_flags on region of a vma, splitting it or merging it as
133  * necessary.  Must be called with mmap_lock held for writing;
134  * Caller should ensure anon_name stability by raising its refcount even when
135  * anon_name belongs to a valid vma because this function might free that vma.
136  */
137 static int madvise_update_vma(struct vm_area_struct *vma,
138                               struct vm_area_struct **prev, unsigned long start,
139                               unsigned long end, unsigned long new_flags,
140                               struct anon_vma_name *anon_name)
141 {
142         struct mm_struct *mm = vma->vm_mm;
143         int error;
144         pgoff_t pgoff;
145         VMA_ITERATOR(vmi, mm, start);
146
147         if (new_flags == vma->vm_flags && anon_vma_name_eq(anon_vma_name(vma), anon_name)) {
148                 *prev = vma;
149                 return 0;
150         }
151
152         pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
153         *prev = vma_merge(&vmi, mm, *prev, start, end, new_flags,
154                           vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
155                           vma->vm_userfaultfd_ctx, anon_name);
156         if (*prev) {
157                 vma = *prev;
158                 goto success;
159         }
160
161         *prev = vma;
162
163         if (start != vma->vm_start) {
164                 error = split_vma(&vmi, vma, start, 1);
165                 if (error)
166                         return error;
167         }
168
169         if (end != vma->vm_end) {
170                 error = split_vma(&vmi, vma, end, 0);
171                 if (error)
172                         return error;
173         }
174
175 success:
176         /*
177          * vm_flags is protected by the mmap_lock held in write mode.
178          */
179         vm_flags_reset(vma, new_flags);
180         if (!vma->vm_file || vma_is_anon_shmem(vma)) {
181                 error = replace_anon_vma_name(vma, anon_name);
182                 if (error)
183                         return error;
184         }
185
186         return 0;
187 }
188
189 #ifdef CONFIG_SWAP
190 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
191         unsigned long end, struct mm_walk *walk)
192 {
193         struct vm_area_struct *vma = walk->private;
194         unsigned long index;
195         struct swap_iocb *splug = NULL;
196
197         if (pmd_none_or_trans_huge_or_clear_bad(pmd))
198                 return 0;
199
200         for (index = start; index != end; index += PAGE_SIZE) {
201                 pte_t pte;
202                 swp_entry_t entry;
203                 struct page *page;
204                 spinlock_t *ptl;
205                 pte_t *ptep;
206
207                 ptep = pte_offset_map_lock(vma->vm_mm, pmd, index, &ptl);
208                 pte = *ptep;
209                 pte_unmap_unlock(ptep, ptl);
210
211                 if (!is_swap_pte(pte))
212                         continue;
213                 entry = pte_to_swp_entry(pte);
214                 if (unlikely(non_swap_entry(entry)))
215                         continue;
216
217                 page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
218                                              vma, index, false, &splug);
219                 if (page)
220                         put_page(page);
221         }
222         swap_read_unplug(splug);
223         cond_resched();
224
225         return 0;
226 }
227
228 static const struct mm_walk_ops swapin_walk_ops = {
229         .pmd_entry              = swapin_walk_pmd_entry,
230         .walk_lock              = PGWALK_RDLOCK,
231 };
232
233 static void force_shm_swapin_readahead(struct vm_area_struct *vma,
234                 unsigned long start, unsigned long end,
235                 struct address_space *mapping)
236 {
237         XA_STATE(xas, &mapping->i_pages, linear_page_index(vma, start));
238         pgoff_t end_index = linear_page_index(vma, end + PAGE_SIZE - 1);
239         struct page *page;
240         struct swap_iocb *splug = NULL;
241
242         rcu_read_lock();
243         xas_for_each(&xas, page, end_index) {
244                 swp_entry_t swap;
245
246                 if (!xa_is_value(page))
247                         continue;
248                 swap = radix_to_swp_entry(page);
249                 /* There might be swapin error entries in shmem mapping. */
250                 if (non_swap_entry(swap))
251                         continue;
252                 xas_pause(&xas);
253                 rcu_read_unlock();
254
255                 page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
256                                              NULL, 0, false, &splug);
257                 if (page)
258                         put_page(page);
259
260                 rcu_read_lock();
261         }
262         rcu_read_unlock();
263         swap_read_unplug(splug);
264
265         lru_add_drain();        /* Push any new pages onto the LRU now */
266 }
267 #endif          /* CONFIG_SWAP */
268
269 /*
270  * Schedule all required I/O operations.  Do not wait for completion.
271  */
272 static long madvise_willneed(struct vm_area_struct *vma,
273                              struct vm_area_struct **prev,
274                              unsigned long start, unsigned long end)
275 {
276         struct mm_struct *mm = vma->vm_mm;
277         struct file *file = vma->vm_file;
278         loff_t offset;
279
280         *prev = vma;
281 #ifdef CONFIG_SWAP
282         if (!file) {
283                 walk_page_range(vma->vm_mm, start, end, &swapin_walk_ops, vma);
284                 lru_add_drain(); /* Push any new pages onto the LRU now */
285                 return 0;
286         }
287
288         if (shmem_mapping(file->f_mapping)) {
289                 force_shm_swapin_readahead(vma, start, end,
290                                         file->f_mapping);
291                 return 0;
292         }
293 #else
294         if (!file)
295                 return -EBADF;
296 #endif
297
298         if (IS_DAX(file_inode(file))) {
299                 /* no bad return value, but ignore advice */
300                 return 0;
301         }
302
303         /*
304          * Filesystem's fadvise may need to take various locks.  We need to
305          * explicitly grab a reference because the vma (and hence the
306          * vma's reference to the file) can go away as soon as we drop
307          * mmap_lock.
308          */
309         *prev = NULL;   /* tell sys_madvise we drop mmap_lock */
310         get_file(file);
311         offset = (loff_t)(start - vma->vm_start)
312                         + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
313         mmap_read_unlock(mm);
314         vfs_fadvise(file, offset, end - start, POSIX_FADV_WILLNEED);
315         fput(file);
316         mmap_read_lock(mm);
317         return 0;
318 }
319
320 static inline bool can_do_file_pageout(struct vm_area_struct *vma)
321 {
322         if (!vma->vm_file)
323                 return false;
324         /*
325          * paging out pagecache only for non-anonymous mappings that correspond
326          * to the files the calling process could (if tried) open for writing;
327          * otherwise we'd be including shared non-exclusive mappings, which
328          * opens a side channel.
329          */
330         return inode_owner_or_capable(&nop_mnt_idmap,
331                                       file_inode(vma->vm_file)) ||
332                file_permission(vma->vm_file, MAY_WRITE) == 0;
333 }
334
335 static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
336                                 unsigned long addr, unsigned long end,
337                                 struct mm_walk *walk)
338 {
339         struct madvise_walk_private *private = walk->private;
340         struct mmu_gather *tlb = private->tlb;
341         bool pageout = private->pageout;
342         struct mm_struct *mm = tlb->mm;
343         struct vm_area_struct *vma = walk->vma;
344         pte_t *orig_pte, *pte, ptent;
345         spinlock_t *ptl;
346         struct folio *folio = NULL;
347         LIST_HEAD(folio_list);
348         bool pageout_anon_only_filter;
349
350         if (fatal_signal_pending(current))
351                 return -EINTR;
352
353         pageout_anon_only_filter = pageout && !vma_is_anonymous(vma) &&
354                                         !can_do_file_pageout(vma);
355
356 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
357         if (pmd_trans_huge(*pmd)) {
358                 pmd_t orig_pmd;
359                 unsigned long next = pmd_addr_end(addr, end);
360
361                 tlb_change_page_size(tlb, HPAGE_PMD_SIZE);
362                 ptl = pmd_trans_huge_lock(pmd, vma);
363                 if (!ptl)
364                         return 0;
365
366                 orig_pmd = *pmd;
367                 if (is_huge_zero_pmd(orig_pmd))
368                         goto huge_unlock;
369
370                 if (unlikely(!pmd_present(orig_pmd))) {
371                         VM_BUG_ON(thp_migration_supported() &&
372                                         !is_pmd_migration_entry(orig_pmd));
373                         goto huge_unlock;
374                 }
375
376                 folio = pfn_folio(pmd_pfn(orig_pmd));
377
378                 /* Do not interfere with other mappings of this folio */
379                 if (folio_estimated_sharers(folio) != 1)
380                         goto huge_unlock;
381
382                 if (pageout_anon_only_filter && !folio_test_anon(folio))
383                         goto huge_unlock;
384
385                 if (next - addr != HPAGE_PMD_SIZE) {
386                         int err;
387
388                         folio_get(folio);
389                         spin_unlock(ptl);
390                         folio_lock(folio);
391                         err = split_folio(folio);
392                         folio_unlock(folio);
393                         folio_put(folio);
394                         if (!err)
395                                 goto regular_folio;
396                         return 0;
397                 }
398
399                 if (pmd_young(orig_pmd)) {
400                         pmdp_invalidate(vma, addr, pmd);
401                         orig_pmd = pmd_mkold(orig_pmd);
402
403                         set_pmd_at(mm, addr, pmd, orig_pmd);
404                         tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
405                 }
406
407                 folio_clear_referenced(folio);
408                 folio_test_clear_young(folio);
409                 if (pageout) {
410                         if (folio_isolate_lru(folio)) {
411                                 if (folio_test_unevictable(folio))
412                                         folio_putback_lru(folio);
413                                 else
414                                         list_add(&folio->lru, &folio_list);
415                         }
416                 } else
417                         folio_deactivate(folio);
418 huge_unlock:
419                 spin_unlock(ptl);
420                 if (pageout)
421                         reclaim_pages(&folio_list);
422                 return 0;
423         }
424
425 regular_folio:
426         if (pmd_trans_unstable(pmd))
427                 return 0;
428 #endif
429         tlb_change_page_size(tlb, PAGE_SIZE);
430         orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
431         flush_tlb_batched_pending(mm);
432         arch_enter_lazy_mmu_mode();
433         for (; addr < end; pte++, addr += PAGE_SIZE) {
434                 ptent = *pte;
435
436                 if (pte_none(ptent))
437                         continue;
438
439                 if (!pte_present(ptent))
440                         continue;
441
442                 folio = vm_normal_folio(vma, addr, ptent);
443                 if (!folio || folio_is_zone_device(folio))
444                         continue;
445
446                 /*
447                  * Creating a THP page is expensive so split it only if we
448                  * are sure it's worth. Split it if we are only owner.
449                  */
450                 if (folio_test_large(folio)) {
451                         if (folio_estimated_sharers(folio) != 1)
452                                 break;
453                         if (pageout_anon_only_filter && !folio_test_anon(folio))
454                                 break;
455                         folio_get(folio);
456                         if (!folio_trylock(folio)) {
457                                 folio_put(folio);
458                                 break;
459                         }
460                         pte_unmap_unlock(orig_pte, ptl);
461                         if (split_folio(folio)) {
462                                 folio_unlock(folio);
463                                 folio_put(folio);
464                                 orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
465                                 break;
466                         }
467                         folio_unlock(folio);
468                         folio_put(folio);
469                         orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
470                         pte--;
471                         addr -= PAGE_SIZE;
472                         continue;
473                 }
474
475                 /*
476                  * Do not interfere with other mappings of this folio and
477                  * non-LRU folio.
478                  */
479                 if (!folio_test_lru(folio) || folio_mapcount(folio) != 1)
480                         continue;
481
482                 if (pageout_anon_only_filter && !folio_test_anon(folio))
483                         continue;
484
485                 VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
486
487                 if (pte_young(ptent)) {
488                         ptent = ptep_get_and_clear_full(mm, addr, pte,
489                                                         tlb->fullmm);
490                         ptent = pte_mkold(ptent);
491                         set_pte_at(mm, addr, pte, ptent);
492                         tlb_remove_tlb_entry(tlb, pte, addr);
493                 }
494
495                 /*
496                  * We are deactivating a folio for accelerating reclaiming.
497                  * VM couldn't reclaim the folio unless we clear PG_young.
498                  * As a side effect, it makes confuse idle-page tracking
499                  * because they will miss recent referenced history.
500                  */
501                 folio_clear_referenced(folio);
502                 folio_test_clear_young(folio);
503                 if (pageout) {
504                         if (folio_isolate_lru(folio)) {
505                                 if (folio_test_unevictable(folio))
506                                         folio_putback_lru(folio);
507                                 else
508                                         list_add(&folio->lru, &folio_list);
509                         }
510                 } else
511                         folio_deactivate(folio);
512         }
513
514         arch_leave_lazy_mmu_mode();
515         pte_unmap_unlock(orig_pte, ptl);
516         if (pageout)
517                 reclaim_pages(&folio_list);
518         cond_resched();
519
520         return 0;
521 }
522
523 static const struct mm_walk_ops cold_walk_ops = {
524         .pmd_entry = madvise_cold_or_pageout_pte_range,
525         .walk_lock = PGWALK_RDLOCK,
526 };
527
528 static void madvise_cold_page_range(struct mmu_gather *tlb,
529                              struct vm_area_struct *vma,
530                              unsigned long addr, unsigned long end)
531 {
532         struct madvise_walk_private walk_private = {
533                 .pageout = false,
534                 .tlb = tlb,
535         };
536
537         tlb_start_vma(tlb, vma);
538         walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
539         tlb_end_vma(tlb, vma);
540 }
541
542 static inline bool can_madv_lru_vma(struct vm_area_struct *vma)
543 {
544         return !(vma->vm_flags & (VM_LOCKED|VM_PFNMAP|VM_HUGETLB));
545 }
546
547 static long madvise_cold(struct vm_area_struct *vma,
548                         struct vm_area_struct **prev,
549                         unsigned long start_addr, unsigned long end_addr)
550 {
551         struct mm_struct *mm = vma->vm_mm;
552         struct mmu_gather tlb;
553
554         *prev = vma;
555         if (!can_madv_lru_vma(vma))
556                 return -EINVAL;
557
558         lru_add_drain();
559         tlb_gather_mmu(&tlb, mm);
560         madvise_cold_page_range(&tlb, vma, start_addr, end_addr);
561         tlb_finish_mmu(&tlb);
562
563         return 0;
564 }
565
566 static void madvise_pageout_page_range(struct mmu_gather *tlb,
567                              struct vm_area_struct *vma,
568                              unsigned long addr, unsigned long end)
569 {
570         struct madvise_walk_private walk_private = {
571                 .pageout = true,
572                 .tlb = tlb,
573         };
574
575         tlb_start_vma(tlb, vma);
576         walk_page_range(vma->vm_mm, addr, end, &cold_walk_ops, &walk_private);
577         tlb_end_vma(tlb, vma);
578 }
579
580 static long madvise_pageout(struct vm_area_struct *vma,
581                         struct vm_area_struct **prev,
582                         unsigned long start_addr, unsigned long end_addr)
583 {
584         struct mm_struct *mm = vma->vm_mm;
585         struct mmu_gather tlb;
586
587         *prev = vma;
588         if (!can_madv_lru_vma(vma))
589                 return -EINVAL;
590
591         /*
592          * If the VMA belongs to a private file mapping, there can be private
593          * dirty pages which can be paged out if even this process is neither
594          * owner nor write capable of the file. We allow private file mappings
595          * further to pageout dirty anon pages.
596          */
597         if (!vma_is_anonymous(vma) && (!can_do_file_pageout(vma) &&
598                                 (vma->vm_flags & VM_MAYSHARE)))
599                 return 0;
600
601         lru_add_drain();
602         tlb_gather_mmu(&tlb, mm);
603         madvise_pageout_page_range(&tlb, vma, start_addr, end_addr);
604         tlb_finish_mmu(&tlb);
605
606         return 0;
607 }
608
609 static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
610                                 unsigned long end, struct mm_walk *walk)
611
612 {
613         struct mmu_gather *tlb = walk->private;
614         struct mm_struct *mm = tlb->mm;
615         struct vm_area_struct *vma = walk->vma;
616         spinlock_t *ptl;
617         pte_t *orig_pte, *pte, ptent;
618         struct folio *folio;
619         int nr_swap = 0;
620         unsigned long next;
621
622         next = pmd_addr_end(addr, end);
623         if (pmd_trans_huge(*pmd))
624                 if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
625                         goto next;
626
627         if (pmd_trans_unstable(pmd))
628                 return 0;
629
630         tlb_change_page_size(tlb, PAGE_SIZE);
631         orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
632         flush_tlb_batched_pending(mm);
633         arch_enter_lazy_mmu_mode();
634         for (; addr != end; pte++, addr += PAGE_SIZE) {
635                 ptent = *pte;
636
637                 if (pte_none(ptent))
638                         continue;
639                 /*
640                  * If the pte has swp_entry, just clear page table to
641                  * prevent swap-in which is more expensive rather than
642                  * (page allocation + zeroing).
643                  */
644                 if (!pte_present(ptent)) {
645                         swp_entry_t entry;
646
647                         entry = pte_to_swp_entry(ptent);
648                         if (!non_swap_entry(entry)) {
649                                 nr_swap--;
650                                 free_swap_and_cache(entry);
651                                 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
652                         } else if (is_hwpoison_entry(entry) ||
653                                    is_swapin_error_entry(entry)) {
654                                 pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
655                         }
656                         continue;
657                 }
658
659                 folio = vm_normal_folio(vma, addr, ptent);
660                 if (!folio || folio_is_zone_device(folio))
661                         continue;
662
663                 /*
664                  * If pmd isn't transhuge but the folio is large and
665                  * is owned by only this process, split it and
666                  * deactivate all pages.
667                  */
668                 if (folio_test_large(folio)) {
669                         if (folio_estimated_sharers(folio) != 1)
670                                 break;
671                         folio_get(folio);
672                         if (!folio_trylock(folio)) {
673                                 folio_put(folio);
674                                 goto out;
675                         }
676                         pte_unmap_unlock(orig_pte, ptl);
677                         if (split_folio(folio)) {
678                                 folio_unlock(folio);
679                                 folio_put(folio);
680                                 orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
681                                 goto out;
682                         }
683                         folio_unlock(folio);
684                         folio_put(folio);
685                         orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
686                         pte--;
687                         addr -= PAGE_SIZE;
688                         continue;
689                 }
690
691                 if (folio_test_swapcache(folio) || folio_test_dirty(folio)) {
692                         if (!folio_trylock(folio))
693                                 continue;
694                         /*
695                          * If folio is shared with others, we mustn't clear
696                          * the folio's dirty flag.
697                          */
698                         if (folio_mapcount(folio) != 1) {
699                                 folio_unlock(folio);
700                                 continue;
701                         }
702
703                         if (folio_test_swapcache(folio) &&
704                             !folio_free_swap(folio)) {
705                                 folio_unlock(folio);
706                                 continue;
707                         }
708
709                         folio_clear_dirty(folio);
710                         folio_unlock(folio);
711                 }
712
713                 if (pte_young(ptent) || pte_dirty(ptent)) {
714                         /*
715                          * Some of architecture(ex, PPC) don't update TLB
716                          * with set_pte_at and tlb_remove_tlb_entry so for
717                          * the portability, remap the pte with old|clean
718                          * after pte clearing.
719                          */
720                         ptent = ptep_get_and_clear_full(mm, addr, pte,
721                                                         tlb->fullmm);
722
723                         ptent = pte_mkold(ptent);
724                         ptent = pte_mkclean(ptent);
725                         set_pte_at(mm, addr, pte, ptent);
726                         tlb_remove_tlb_entry(tlb, pte, addr);
727                 }
728                 folio_mark_lazyfree(folio);
729         }
730 out:
731         if (nr_swap) {
732                 if (current->mm == mm)
733                         sync_mm_rss(mm);
734
735                 add_mm_counter(mm, MM_SWAPENTS, nr_swap);
736         }
737         arch_leave_lazy_mmu_mode();
738         pte_unmap_unlock(orig_pte, ptl);
739         cond_resched();
740 next:
741         return 0;
742 }
743
744 static const struct mm_walk_ops madvise_free_walk_ops = {
745         .pmd_entry              = madvise_free_pte_range,
746         .walk_lock              = PGWALK_RDLOCK,
747 };
748
749 static int madvise_free_single_vma(struct vm_area_struct *vma,
750                         unsigned long start_addr, unsigned long end_addr)
751 {
752         struct mm_struct *mm = vma->vm_mm;
753         struct mmu_notifier_range range;
754         struct mmu_gather tlb;
755
756         /* MADV_FREE works for only anon vma at the moment */
757         if (!vma_is_anonymous(vma))
758                 return -EINVAL;
759
760         range.start = max(vma->vm_start, start_addr);
761         if (range.start >= vma->vm_end)
762                 return -EINVAL;
763         range.end = min(vma->vm_end, end_addr);
764         if (range.end <= vma->vm_start)
765                 return -EINVAL;
766         mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
767                                 range.start, range.end);
768
769         lru_add_drain();
770         tlb_gather_mmu(&tlb, mm);
771         update_hiwater_rss(mm);
772
773         mmu_notifier_invalidate_range_start(&range);
774         tlb_start_vma(&tlb, vma);
775         walk_page_range(vma->vm_mm, range.start, range.end,
776                         &madvise_free_walk_ops, &tlb);
777         tlb_end_vma(&tlb, vma);
778         mmu_notifier_invalidate_range_end(&range);
779         tlb_finish_mmu(&tlb);
780
781         return 0;
782 }
783
784 /*
785  * Application no longer needs these pages.  If the pages are dirty,
786  * it's OK to just throw them away.  The app will be more careful about
787  * data it wants to keep.  Be sure to free swap resources too.  The
788  * zap_page_range_single call sets things up for shrink_active_list to actually
789  * free these pages later if no one else has touched them in the meantime,
790  * although we could add these pages to a global reuse list for
791  * shrink_active_list to pick up before reclaiming other pages.
792  *
793  * NB: This interface discards data rather than pushes it out to swap,
794  * as some implementations do.  This has performance implications for
795  * applications like large transactional databases which want to discard
796  * pages in anonymous maps after committing to backing store the data
797  * that was kept in them.  There is no reason to write this data out to
798  * the swap area if the application is discarding it.
799  *
800  * An interface that causes the system to free clean pages and flush
801  * dirty pages is already available as msync(MS_INVALIDATE).
802  */
803 static long madvise_dontneed_single_vma(struct vm_area_struct *vma,
804                                         unsigned long start, unsigned long end)
805 {
806         zap_page_range_single(vma, start, end - start, NULL);
807         return 0;
808 }
809
810 static bool madvise_dontneed_free_valid_vma(struct vm_area_struct *vma,
811                                             unsigned long start,
812                                             unsigned long *end,
813                                             int behavior)
814 {
815         if (!is_vm_hugetlb_page(vma)) {
816                 unsigned int forbidden = VM_PFNMAP;
817
818                 if (behavior != MADV_DONTNEED_LOCKED)
819                         forbidden |= VM_LOCKED;
820
821                 return !(vma->vm_flags & forbidden);
822         }
823
824         if (behavior != MADV_DONTNEED && behavior != MADV_DONTNEED_LOCKED)
825                 return false;
826         if (start & ~huge_page_mask(hstate_vma(vma)))
827                 return false;
828
829         /*
830          * Madvise callers expect the length to be rounded up to PAGE_SIZE
831          * boundaries, and may be unaware that this VMA uses huge pages.
832          * Avoid unexpected data loss by rounding down the number of
833          * huge pages freed.
834          */
835         *end = ALIGN_DOWN(*end, huge_page_size(hstate_vma(vma)));
836
837         return true;
838 }
839
840 static long madvise_dontneed_free(struct vm_area_struct *vma,
841                                   struct vm_area_struct **prev,
842                                   unsigned long start, unsigned long end,
843                                   int behavior)
844 {
845         struct mm_struct *mm = vma->vm_mm;
846
847         *prev = vma;
848         if (!madvise_dontneed_free_valid_vma(vma, start, &end, behavior))
849                 return -EINVAL;
850
851         if (start == end)
852                 return 0;
853
854         if (!userfaultfd_remove(vma, start, end)) {
855                 *prev = NULL; /* mmap_lock has been dropped, prev is stale */
856
857                 mmap_read_lock(mm);
858                 vma = vma_lookup(mm, start);
859                 if (!vma)
860                         return -ENOMEM;
861                 /*
862                  * Potential end adjustment for hugetlb vma is OK as
863                  * the check below keeps end within vma.
864                  */
865                 if (!madvise_dontneed_free_valid_vma(vma, start, &end,
866                                                      behavior))
867                         return -EINVAL;
868                 if (end > vma->vm_end) {
869                         /*
870                          * Don't fail if end > vma->vm_end. If the old
871                          * vma was split while the mmap_lock was
872                          * released the effect of the concurrent
873                          * operation may not cause madvise() to
874                          * have an undefined result. There may be an
875                          * adjacent next vma that we'll walk
876                          * next. userfaultfd_remove() will generate an
877                          * UFFD_EVENT_REMOVE repetition on the
878                          * end-vma->vm_end range, but the manager can
879                          * handle a repetition fine.
880                          */
881                         end = vma->vm_end;
882                 }
883                 VM_WARN_ON(start >= end);
884         }
885
886         if (behavior == MADV_DONTNEED || behavior == MADV_DONTNEED_LOCKED)
887                 return madvise_dontneed_single_vma(vma, start, end);
888         else if (behavior == MADV_FREE)
889                 return madvise_free_single_vma(vma, start, end);
890         else
891                 return -EINVAL;
892 }
893
894 static long madvise_populate(struct vm_area_struct *vma,
895                              struct vm_area_struct **prev,
896                              unsigned long start, unsigned long end,
897                              int behavior)
898 {
899         const bool write = behavior == MADV_POPULATE_WRITE;
900         struct mm_struct *mm = vma->vm_mm;
901         unsigned long tmp_end;
902         int locked = 1;
903         long pages;
904
905         *prev = vma;
906
907         while (start < end) {
908                 /*
909                  * We might have temporarily dropped the lock. For example,
910                  * our VMA might have been split.
911                  */
912                 if (!vma || start >= vma->vm_end) {
913                         vma = vma_lookup(mm, start);
914                         if (!vma)
915                                 return -ENOMEM;
916                 }
917
918                 tmp_end = min_t(unsigned long, end, vma->vm_end);
919                 /* Populate (prefault) page tables readable/writable. */
920                 pages = faultin_vma_page_range(vma, start, tmp_end, write,
921                                                &locked);
922                 if (!locked) {
923                         mmap_read_lock(mm);
924                         locked = 1;
925                         *prev = NULL;
926                         vma = NULL;
927                 }
928                 if (pages < 0) {
929                         switch (pages) {
930                         case -EINTR:
931                                 return -EINTR;
932                         case -EINVAL: /* Incompatible mappings / permissions. */
933                                 return -EINVAL;
934                         case -EHWPOISON:
935                                 return -EHWPOISON;
936                         case -EFAULT: /* VM_FAULT_SIGBUS or VM_FAULT_SIGSEGV */
937                                 return -EFAULT;
938                         default:
939                                 pr_warn_once("%s: unhandled return value: %ld\n",
940                                              __func__, pages);
941                                 fallthrough;
942                         case -ENOMEM:
943                                 return -ENOMEM;
944                         }
945                 }
946                 start += pages * PAGE_SIZE;
947         }
948         return 0;
949 }
950
951 /*
952  * Application wants to free up the pages and associated backing store.
953  * This is effectively punching a hole into the middle of a file.
954  */
955 static long madvise_remove(struct vm_area_struct *vma,
956                                 struct vm_area_struct **prev,
957                                 unsigned long start, unsigned long end)
958 {
959         loff_t offset;
960         int error;
961         struct file *f;
962         struct mm_struct *mm = vma->vm_mm;
963
964         *prev = NULL;   /* tell sys_madvise we drop mmap_lock */
965
966         if (vma->vm_flags & VM_LOCKED)
967                 return -EINVAL;
968
969         f = vma->vm_file;
970
971         if (!f || !f->f_mapping || !f->f_mapping->host) {
972                         return -EINVAL;
973         }
974
975         if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
976                 return -EACCES;
977
978         offset = (loff_t)(start - vma->vm_start)
979                         + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
980
981         /*
982          * Filesystem's fallocate may need to take i_rwsem.  We need to
983          * explicitly grab a reference because the vma (and hence the
984          * vma's reference to the file) can go away as soon as we drop
985          * mmap_lock.
986          */
987         get_file(f);
988         if (userfaultfd_remove(vma, start, end)) {
989                 /* mmap_lock was not released by userfaultfd_remove() */
990                 mmap_read_unlock(mm);
991         }
992         error = vfs_fallocate(f,
993                                 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
994                                 offset, end - start);
995         fput(f);
996         mmap_read_lock(mm);
997         return error;
998 }
999
1000 /*
1001  * Apply an madvise behavior to a region of a vma.  madvise_update_vma
1002  * will handle splitting a vm area into separate areas, each area with its own
1003  * behavior.
1004  */
1005 static int madvise_vma_behavior(struct vm_area_struct *vma,
1006                                 struct vm_area_struct **prev,
1007                                 unsigned long start, unsigned long end,
1008                                 unsigned long behavior)
1009 {
1010         int error;
1011         struct anon_vma_name *anon_name;
1012         unsigned long new_flags = vma->vm_flags;
1013
1014         switch (behavior) {
1015         case MADV_REMOVE:
1016                 return madvise_remove(vma, prev, start, end);
1017         case MADV_WILLNEED:
1018                 return madvise_willneed(vma, prev, start, end);
1019         case MADV_COLD:
1020                 return madvise_cold(vma, prev, start, end);
1021         case MADV_PAGEOUT:
1022                 return madvise_pageout(vma, prev, start, end);
1023         case MADV_FREE:
1024         case MADV_DONTNEED:
1025         case MADV_DONTNEED_LOCKED:
1026                 return madvise_dontneed_free(vma, prev, start, end, behavior);
1027         case MADV_POPULATE_READ:
1028         case MADV_POPULATE_WRITE:
1029                 return madvise_populate(vma, prev, start, end, behavior);
1030         case MADV_NORMAL:
1031                 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
1032                 break;
1033         case MADV_SEQUENTIAL:
1034                 new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
1035                 break;
1036         case MADV_RANDOM:
1037                 new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
1038                 break;
1039         case MADV_DONTFORK:
1040                 new_flags |= VM_DONTCOPY;
1041                 break;
1042         case MADV_DOFORK:
1043                 if (vma->vm_flags & VM_IO)
1044                         return -EINVAL;
1045                 new_flags &= ~VM_DONTCOPY;
1046                 break;
1047         case MADV_WIPEONFORK:
1048                 /* MADV_WIPEONFORK is only supported on anonymous memory. */
1049                 if (vma->vm_file || vma->vm_flags & VM_SHARED)
1050                         return -EINVAL;
1051                 new_flags |= VM_WIPEONFORK;
1052                 break;
1053         case MADV_KEEPONFORK:
1054                 new_flags &= ~VM_WIPEONFORK;
1055                 break;
1056         case MADV_DONTDUMP:
1057                 new_flags |= VM_DONTDUMP;
1058                 break;
1059         case MADV_DODUMP:
1060                 if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL)
1061                         return -EINVAL;
1062                 new_flags &= ~VM_DONTDUMP;
1063                 break;
1064         case MADV_MERGEABLE:
1065         case MADV_UNMERGEABLE:
1066                 error = ksm_madvise(vma, start, end, behavior, &new_flags);
1067                 if (error)
1068                         goto out;
1069                 break;
1070         case MADV_HUGEPAGE:
1071         case MADV_NOHUGEPAGE:
1072                 error = hugepage_madvise(vma, &new_flags, behavior);
1073                 if (error)
1074                         goto out;
1075                 break;
1076         case MADV_COLLAPSE:
1077                 return madvise_collapse(vma, prev, start, end);
1078         }
1079
1080         anon_name = anon_vma_name(vma);
1081         anon_vma_name_get(anon_name);
1082         error = madvise_update_vma(vma, prev, start, end, new_flags,
1083                                    anon_name);
1084         anon_vma_name_put(anon_name);
1085
1086 out:
1087         /*
1088          * madvise() returns EAGAIN if kernel resources, such as
1089          * slab, are temporarily unavailable.
1090          */
1091         if (error == -ENOMEM)
1092                 error = -EAGAIN;
1093         return error;
1094 }
1095
1096 #ifdef CONFIG_MEMORY_FAILURE
1097 /*
1098  * Error injection support for memory error handling.
1099  */
1100 static int madvise_inject_error(int behavior,
1101                 unsigned long start, unsigned long end)
1102 {
1103         unsigned long size;
1104
1105         if (!capable(CAP_SYS_ADMIN))
1106                 return -EPERM;
1107
1108
1109         for (; start < end; start += size) {
1110                 unsigned long pfn;
1111                 struct page *page;
1112                 int ret;
1113
1114                 ret = get_user_pages_fast(start, 1, 0, &page);
1115                 if (ret != 1)
1116                         return ret;
1117                 pfn = page_to_pfn(page);
1118
1119                 /*
1120                  * When soft offlining hugepages, after migrating the page
1121                  * we dissolve it, therefore in the second loop "page" will
1122                  * no longer be a compound page.
1123                  */
1124                 size = page_size(compound_head(page));
1125
1126                 if (behavior == MADV_SOFT_OFFLINE) {
1127                         pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
1128                                  pfn, start);
1129                         ret = soft_offline_page(pfn, MF_COUNT_INCREASED);
1130                 } else {
1131                         pr_info("Injecting memory failure for pfn %#lx at process virtual address %#lx\n",
1132                                  pfn, start);
1133                         ret = memory_failure(pfn, MF_COUNT_INCREASED | MF_SW_SIMULATED);
1134                         if (ret == -EOPNOTSUPP)
1135                                 ret = 0;
1136                 }
1137
1138                 if (ret)
1139                         return ret;
1140         }
1141
1142         return 0;
1143 }
1144 #endif
1145
1146 static bool
1147 madvise_behavior_valid(int behavior)
1148 {
1149         switch (behavior) {
1150         case MADV_DOFORK:
1151         case MADV_DONTFORK:
1152         case MADV_NORMAL:
1153         case MADV_SEQUENTIAL:
1154         case MADV_RANDOM:
1155         case MADV_REMOVE:
1156         case MADV_WILLNEED:
1157         case MADV_DONTNEED:
1158         case MADV_DONTNEED_LOCKED:
1159         case MADV_FREE:
1160         case MADV_COLD:
1161         case MADV_PAGEOUT:
1162         case MADV_POPULATE_READ:
1163         case MADV_POPULATE_WRITE:
1164 #ifdef CONFIG_KSM
1165         case MADV_MERGEABLE:
1166         case MADV_UNMERGEABLE:
1167 #endif
1168 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1169         case MADV_HUGEPAGE:
1170         case MADV_NOHUGEPAGE:
1171         case MADV_COLLAPSE:
1172 #endif
1173         case MADV_DONTDUMP:
1174         case MADV_DODUMP:
1175         case MADV_WIPEONFORK:
1176         case MADV_KEEPONFORK:
1177 #ifdef CONFIG_MEMORY_FAILURE
1178         case MADV_SOFT_OFFLINE:
1179         case MADV_HWPOISON:
1180 #endif
1181                 return true;
1182
1183         default:
1184                 return false;
1185         }
1186 }
1187
1188 static bool process_madvise_behavior_valid(int behavior)
1189 {
1190         switch (behavior) {
1191         case MADV_COLD:
1192         case MADV_PAGEOUT:
1193         case MADV_WILLNEED:
1194         case MADV_COLLAPSE:
1195                 return true;
1196         default:
1197                 return false;
1198         }
1199 }
1200
1201 /*
1202  * Walk the vmas in range [start,end), and call the visit function on each one.
1203  * The visit function will get start and end parameters that cover the overlap
1204  * between the current vma and the original range.  Any unmapped regions in the
1205  * original range will result in this function returning -ENOMEM while still
1206  * calling the visit function on all of the existing vmas in the range.
1207  * Must be called with the mmap_lock held for reading or writing.
1208  */
1209 static
1210 int madvise_walk_vmas(struct mm_struct *mm, unsigned long start,
1211                       unsigned long end, unsigned long arg,
1212                       int (*visit)(struct vm_area_struct *vma,
1213                                    struct vm_area_struct **prev, unsigned long start,
1214                                    unsigned long end, unsigned long arg))
1215 {
1216         struct vm_area_struct *vma;
1217         struct vm_area_struct *prev;
1218         unsigned long tmp;
1219         int unmapped_error = 0;
1220
1221         /*
1222          * If the interval [start,end) covers some unmapped address
1223          * ranges, just ignore them, but return -ENOMEM at the end.
1224          * - different from the way of handling in mlock etc.
1225          */
1226         vma = find_vma_prev(mm, start, &prev);
1227         if (vma && start > vma->vm_start)
1228                 prev = vma;
1229
1230         for (;;) {
1231                 int error;
1232
1233                 /* Still start < end. */
1234                 if (!vma)
1235                         return -ENOMEM;
1236
1237                 /* Here start < (end|vma->vm_end). */
1238                 if (start < vma->vm_start) {
1239                         unmapped_error = -ENOMEM;
1240                         start = vma->vm_start;
1241                         if (start >= end)
1242                                 break;
1243                 }
1244
1245                 /* Here vma->vm_start <= start < (end|vma->vm_end) */
1246                 tmp = vma->vm_end;
1247                 if (end < tmp)
1248                         tmp = end;
1249
1250                 /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
1251                 error = visit(vma, &prev, start, tmp, arg);
1252                 if (error)
1253                         return error;
1254                 start = tmp;
1255                 if (prev && start < prev->vm_end)
1256                         start = prev->vm_end;
1257                 if (start >= end)
1258                         break;
1259                 if (prev)
1260                         vma = find_vma(mm, prev->vm_end);
1261                 else    /* madvise_remove dropped mmap_lock */
1262                         vma = find_vma(mm, start);
1263         }
1264
1265         return unmapped_error;
1266 }
1267
1268 #ifdef CONFIG_ANON_VMA_NAME
1269 static int madvise_vma_anon_name(struct vm_area_struct *vma,
1270                                  struct vm_area_struct **prev,
1271                                  unsigned long start, unsigned long end,
1272                                  unsigned long anon_name)
1273 {
1274         int error;
1275
1276         /* Only anonymous mappings can be named */
1277         if (vma->vm_file && !vma_is_anon_shmem(vma))
1278                 return -EBADF;
1279
1280         error = madvise_update_vma(vma, prev, start, end, vma->vm_flags,
1281                                    (struct anon_vma_name *)anon_name);
1282
1283         /*
1284          * madvise() returns EAGAIN if kernel resources, such as
1285          * slab, are temporarily unavailable.
1286          */
1287         if (error == -ENOMEM)
1288                 error = -EAGAIN;
1289         return error;
1290 }
1291
1292 int madvise_set_anon_name(struct mm_struct *mm, unsigned long start,
1293                           unsigned long len_in, struct anon_vma_name *anon_name)
1294 {
1295         unsigned long end;
1296         unsigned long len;
1297
1298         if (start & ~PAGE_MASK)
1299                 return -EINVAL;
1300         len = (len_in + ~PAGE_MASK) & PAGE_MASK;
1301
1302         /* Check to see whether len was rounded up from small -ve to zero */
1303         if (len_in && !len)
1304                 return -EINVAL;
1305
1306         end = start + len;
1307         if (end < start)
1308                 return -EINVAL;
1309
1310         if (end == start)
1311                 return 0;
1312
1313         return madvise_walk_vmas(mm, start, end, (unsigned long)anon_name,
1314                                  madvise_vma_anon_name);
1315 }
1316 #endif /* CONFIG_ANON_VMA_NAME */
1317 /*
1318  * The madvise(2) system call.
1319  *
1320  * Applications can use madvise() to advise the kernel how it should
1321  * handle paging I/O in this VM area.  The idea is to help the kernel
1322  * use appropriate read-ahead and caching techniques.  The information
1323  * provided is advisory only, and can be safely disregarded by the
1324  * kernel without affecting the correct operation of the application.
1325  *
1326  * behavior values:
1327  *  MADV_NORMAL - the default behavior is to read clusters.  This
1328  *              results in some read-ahead and read-behind.
1329  *  MADV_RANDOM - the system should read the minimum amount of data
1330  *              on any access, since it is unlikely that the appli-
1331  *              cation will need more than what it asks for.
1332  *  MADV_SEQUENTIAL - pages in the given range will probably be accessed
1333  *              once, so they can be aggressively read ahead, and
1334  *              can be freed soon after they are accessed.
1335  *  MADV_WILLNEED - the application is notifying the system to read
1336  *              some pages ahead.
1337  *  MADV_DONTNEED - the application is finished with the given range,
1338  *              so the kernel can free resources associated with it.
1339  *  MADV_FREE - the application marks pages in the given range as lazy free,
1340  *              where actual purges are postponed until memory pressure happens.
1341  *  MADV_REMOVE - the application wants to free up the given range of
1342  *              pages and associated backing store.
1343  *  MADV_DONTFORK - omit this area from child's address space when forking:
1344  *              typically, to avoid COWing pages pinned by get_user_pages().
1345  *  MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
1346  *  MADV_WIPEONFORK - present the child process with zero-filled memory in this
1347  *              range after a fork.
1348  *  MADV_KEEPONFORK - undo the effect of MADV_WIPEONFORK
1349  *  MADV_HWPOISON - trigger memory error handler as if the given memory range
1350  *              were corrupted by unrecoverable hardware memory failure.
1351  *  MADV_SOFT_OFFLINE - try to soft-offline the given range of memory.
1352  *  MADV_MERGEABLE - the application recommends that KSM try to merge pages in
1353  *              this area with pages of identical content from other such areas.
1354  *  MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
1355  *  MADV_HUGEPAGE - the application wants to back the given range by transparent
1356  *              huge pages in the future. Existing pages might be coalesced and
1357  *              new pages might be allocated as THP.
1358  *  MADV_NOHUGEPAGE - mark the given range as not worth being backed by
1359  *              transparent huge pages so the existing pages will not be
1360  *              coalesced into THP and new pages will not be allocated as THP.
1361  *  MADV_COLLAPSE - synchronously coalesce pages into new THP.
1362  *  MADV_DONTDUMP - the application wants to prevent pages in the given range
1363  *              from being included in its core dump.
1364  *  MADV_DODUMP - cancel MADV_DONTDUMP: no longer exclude from core dump.
1365  *  MADV_COLD - the application is not expected to use this memory soon,
1366  *              deactivate pages in this range so that they can be reclaimed
1367  *              easily if memory pressure happens.
1368  *  MADV_PAGEOUT - the application is not expected to use this memory soon,
1369  *              page out the pages in this range immediately.
1370  *  MADV_POPULATE_READ - populate (prefault) page tables readable by
1371  *              triggering read faults if required
1372  *  MADV_POPULATE_WRITE - populate (prefault) page tables writable by
1373  *              triggering write faults if required
1374  *
1375  * return values:
1376  *  zero    - success
1377  *  -EINVAL - start + len < 0, start is not page-aligned,
1378  *              "behavior" is not a valid value, or application
1379  *              is attempting to release locked or shared pages,
1380  *              or the specified address range includes file, Huge TLB,
1381  *              MAP_SHARED or VMPFNMAP range.
1382  *  -ENOMEM - addresses in the specified range are not currently
1383  *              mapped, or are outside the AS of the process.
1384  *  -EIO    - an I/O error occurred while paging in data.
1385  *  -EBADF  - map exists, but area maps something that isn't a file.
1386  *  -EAGAIN - a kernel resource was temporarily unavailable.
1387  */
1388 int do_madvise(struct mm_struct *mm, unsigned long start, size_t len_in, int behavior)
1389 {
1390         unsigned long end;
1391         int error;
1392         int write;
1393         size_t len;
1394         struct blk_plug plug;
1395
1396         if (!madvise_behavior_valid(behavior))
1397                 return -EINVAL;
1398
1399         if (!PAGE_ALIGNED(start))
1400                 return -EINVAL;
1401         len = PAGE_ALIGN(len_in);
1402
1403         /* Check to see whether len was rounded up from small -ve to zero */
1404         if (len_in && !len)
1405                 return -EINVAL;
1406
1407         end = start + len;
1408         if (end < start)
1409                 return -EINVAL;
1410
1411         if (end == start)
1412                 return 0;
1413
1414 #ifdef CONFIG_MEMORY_FAILURE
1415         if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
1416                 return madvise_inject_error(behavior, start, start + len_in);
1417 #endif
1418
1419         write = madvise_need_mmap_write(behavior);
1420         if (write) {
1421                 if (mmap_write_lock_killable(mm))
1422                         return -EINTR;
1423         } else {
1424                 mmap_read_lock(mm);
1425         }
1426
1427         start = untagged_addr_remote(mm, start);
1428         end = start + len;
1429
1430         blk_start_plug(&plug);
1431         error = madvise_walk_vmas(mm, start, end, behavior,
1432                         madvise_vma_behavior);
1433         blk_finish_plug(&plug);
1434         if (write)
1435                 mmap_write_unlock(mm);
1436         else
1437                 mmap_read_unlock(mm);
1438
1439         return error;
1440 }
1441
1442 SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
1443 {
1444         return do_madvise(current->mm, start, len_in, behavior);
1445 }
1446
1447 SYSCALL_DEFINE5(process_madvise, int, pidfd, const struct iovec __user *, vec,
1448                 size_t, vlen, int, behavior, unsigned int, flags)
1449 {
1450         ssize_t ret;
1451         struct iovec iovstack[UIO_FASTIOV];
1452         struct iovec *iov = iovstack;
1453         struct iov_iter iter;
1454         struct task_struct *task;
1455         struct mm_struct *mm;
1456         size_t total_len;
1457         unsigned int f_flags;
1458
1459         if (flags != 0) {
1460                 ret = -EINVAL;
1461                 goto out;
1462         }
1463
1464         ret = import_iovec(ITER_DEST, vec, vlen, ARRAY_SIZE(iovstack), &iov, &iter);
1465         if (ret < 0)
1466                 goto out;
1467
1468         task = pidfd_get_task(pidfd, &f_flags);
1469         if (IS_ERR(task)) {
1470                 ret = PTR_ERR(task);
1471                 goto free_iov;
1472         }
1473
1474         if (!process_madvise_behavior_valid(behavior)) {
1475                 ret = -EINVAL;
1476                 goto release_task;
1477         }
1478
1479         /* Require PTRACE_MODE_READ to avoid leaking ASLR metadata. */
1480         mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
1481         if (IS_ERR_OR_NULL(mm)) {
1482                 ret = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
1483                 goto release_task;
1484         }
1485
1486         /*
1487          * Require CAP_SYS_NICE for influencing process performance. Note that
1488          * only non-destructive hints are currently supported.
1489          */
1490         if (!capable(CAP_SYS_NICE)) {
1491                 ret = -EPERM;
1492                 goto release_mm;
1493         }
1494
1495         total_len = iov_iter_count(&iter);
1496
1497         while (iov_iter_count(&iter)) {
1498                 ret = do_madvise(mm, (unsigned long)iter_iov_addr(&iter),
1499                                         iter_iov_len(&iter), behavior);
1500                 if (ret < 0)
1501                         break;
1502                 iov_iter_advance(&iter, iter_iov_len(&iter));
1503         }
1504
1505         ret = (total_len - iov_iter_count(&iter)) ? : ret;
1506
1507 release_mm:
1508         mmput(mm);
1509 release_task:
1510         put_task_struct(task);
1511 free_iov:
1512         kfree(iov);
1513 out:
1514         return ret;
1515 }