GNU Linux-libre 4.14.251-gnu1
[releases.git] / mm / page_vma_mapped.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm.h>
3 #include <linux/rmap.h>
4 #include <linux/hugetlb.h>
5 #include <linux/swap.h>
6 #include <linux/swapops.h>
7
8 #include "internal.h"
9
10 static inline bool not_found(struct page_vma_mapped_walk *pvmw)
11 {
12         page_vma_mapped_walk_done(pvmw);
13         return false;
14 }
15
16 static bool map_pte(struct page_vma_mapped_walk *pvmw)
17 {
18         pvmw->pte = pte_offset_map(pvmw->pmd, pvmw->address);
19         if (!(pvmw->flags & PVMW_SYNC)) {
20                 if (pvmw->flags & PVMW_MIGRATION) {
21                         if (!is_swap_pte(*pvmw->pte))
22                                 return false;
23                 } else {
24                         /*
25                          * We get here when we are trying to unmap a private
26                          * device page from the process address space. Such
27                          * page is not CPU accessible and thus is mapped as
28                          * a special swap entry, nonetheless it still does
29                          * count as a valid regular mapping for the page (and
30                          * is accounted as such in page maps count).
31                          *
32                          * So handle this special case as if it was a normal
33                          * page mapping ie lock CPU page table and returns
34                          * true.
35                          *
36                          * For more details on device private memory see HMM
37                          * (include/linux/hmm.h or mm/hmm.c).
38                          */
39                         if (is_swap_pte(*pvmw->pte)) {
40                                 swp_entry_t entry;
41
42                                 /* Handle un-addressable ZONE_DEVICE memory */
43                                 entry = pte_to_swp_entry(*pvmw->pte);
44                                 if (!is_device_private_entry(entry))
45                                         return false;
46                         } else if (!pte_present(*pvmw->pte))
47                                 return false;
48                 }
49         }
50         pvmw->ptl = pte_lockptr(pvmw->vma->vm_mm, pvmw->pmd);
51         spin_lock(pvmw->ptl);
52         return true;
53 }
54
55 /**
56  * check_pte - check if @pvmw->page is mapped at the @pvmw->pte
57  *
58  * page_vma_mapped_walk() found a place where @pvmw->page is *potentially*
59  * mapped. check_pte() has to validate this.
60  *
61  * @pvmw->pte may point to empty PTE, swap PTE or PTE pointing to arbitrary
62  * page.
63  *
64  * If PVMW_MIGRATION flag is set, returns true if @pvmw->pte contains migration
65  * entry that points to @pvmw->page or any subpage in case of THP.
66  *
67  * If PVMW_MIGRATION flag is not set, returns true if @pvmw->pte points to
68  * @pvmw->page or any subpage in case of THP.
69  *
70  * Otherwise, return false.
71  *
72  */
73 static bool check_pte(struct page_vma_mapped_walk *pvmw)
74 {
75         unsigned long pfn;
76
77         if (pvmw->flags & PVMW_MIGRATION) {
78                 swp_entry_t entry;
79                 if (!is_swap_pte(*pvmw->pte))
80                         return false;
81                 entry = pte_to_swp_entry(*pvmw->pte);
82
83                 if (!is_migration_entry(entry))
84                         return false;
85
86                 pfn = migration_entry_to_pfn(entry);
87         } else if (is_swap_pte(*pvmw->pte)) {
88                 swp_entry_t entry;
89
90                 /* Handle un-addressable ZONE_DEVICE memory */
91                 entry = pte_to_swp_entry(*pvmw->pte);
92                 if (!is_device_private_entry(entry))
93                         return false;
94
95                 pfn = device_private_entry_to_pfn(entry);
96         } else {
97                 if (!pte_present(*pvmw->pte))
98                         return false;
99
100                 pfn = pte_pfn(*pvmw->pte);
101         }
102
103         if (pfn < page_to_pfn(pvmw->page))
104                 return false;
105
106         /* THP can be referenced by any subpage */
107         if (pfn - page_to_pfn(pvmw->page) >= hpage_nr_pages(pvmw->page))
108                 return false;
109
110         return true;
111 }
112
113 static void step_forward(struct page_vma_mapped_walk *pvmw, unsigned long size)
114 {
115         pvmw->address = (pvmw->address + size) & ~(size - 1);
116         if (!pvmw->address)
117                 pvmw->address = ULONG_MAX;
118 }
119
120 /**
121  * page_vma_mapped_walk - check if @pvmw->page is mapped in @pvmw->vma at
122  * @pvmw->address
123  * @pvmw: pointer to struct page_vma_mapped_walk. page, vma, address and flags
124  * must be set. pmd, pte and ptl must be NULL.
125  *
126  * Returns true if the page is mapped in the vma. @pvmw->pmd and @pvmw->pte point
127  * to relevant page table entries. @pvmw->ptl is locked. @pvmw->address is
128  * adjusted if needed (for PTE-mapped THPs).
129  *
130  * If @pvmw->pmd is set but @pvmw->pte is not, you have found PMD-mapped page
131  * (usually THP). For PTE-mapped THP, you should run page_vma_mapped_walk() in
132  * a loop to find all PTEs that map the THP.
133  *
134  * For HugeTLB pages, @pvmw->pte is set to the relevant page table entry
135  * regardless of which page table level the page is mapped at. @pvmw->pmd is
136  * NULL.
137  *
138  * Retruns false if there are no more page table entries for the page in
139  * the vma. @pvmw->ptl is unlocked and @pvmw->pte is unmapped.
140  *
141  * If you need to stop the walk before page_vma_mapped_walk() returned false,
142  * use page_vma_mapped_walk_done(). It will do the housekeeping.
143  */
144 bool page_vma_mapped_walk(struct page_vma_mapped_walk *pvmw)
145 {
146         struct mm_struct *mm = pvmw->vma->vm_mm;
147         struct page *page = pvmw->page;
148         unsigned long end;
149         pgd_t *pgd;
150         p4d_t *p4d;
151         pud_t *pud;
152         pmd_t pmde;
153
154         /* The only possible pmd mapping has been handled on last iteration */
155         if (pvmw->pmd && !pvmw->pte)
156                 return not_found(pvmw);
157
158         if (unlikely(PageHuge(page))) {
159                 /* The only possible mapping was handled on last iteration */
160                 if (pvmw->pte)
161                         return not_found(pvmw);
162
163                 /* when pud is not present, pte will be NULL */
164                 pvmw->pte = huge_pte_offset(mm, pvmw->address,
165                                             PAGE_SIZE << compound_order(page));
166                 if (!pvmw->pte)
167                         return false;
168
169                 pvmw->ptl = huge_pte_lockptr(page_hstate(page), mm, pvmw->pte);
170                 spin_lock(pvmw->ptl);
171                 if (!check_pte(pvmw))
172                         return not_found(pvmw);
173                 return true;
174         }
175
176         /*
177          * Seek to next pte only makes sense for THP.
178          * But more important than that optimization, is to filter out
179          * any PageKsm page: whose page->index misleads vma_address()
180          * and vma_address_end() to disaster.
181          */
182         end = PageTransCompound(page) ?
183                 vma_address_end(page, pvmw->vma) :
184                 pvmw->address + PAGE_SIZE;
185         if (pvmw->pte)
186                 goto next_pte;
187 restart:
188         do {
189                 pgd = pgd_offset(mm, pvmw->address);
190                 if (!pgd_present(*pgd)) {
191                         step_forward(pvmw, PGDIR_SIZE);
192                         continue;
193                 }
194                 p4d = p4d_offset(pgd, pvmw->address);
195                 if (!p4d_present(*p4d)) {
196                         step_forward(pvmw, P4D_SIZE);
197                         continue;
198                 }
199                 pud = pud_offset(p4d, pvmw->address);
200                 if (!pud_present(*pud)) {
201                         step_forward(pvmw, PUD_SIZE);
202                         continue;
203                 }
204
205                 pvmw->pmd = pmd_offset(pud, pvmw->address);
206                 /*
207                  * Make sure the pmd value isn't cached in a register by the
208                  * compiler and used as a stale value after we've observed a
209                  * subsequent update.
210                  */
211                 pmde = READ_ONCE(*pvmw->pmd);
212
213                 if (pmd_trans_huge(pmde) || is_pmd_migration_entry(pmde)) {
214                         pvmw->ptl = pmd_lock(mm, pvmw->pmd);
215                         pmde = *pvmw->pmd;
216                         if (likely(pmd_trans_huge(pmde))) {
217                                 if (pvmw->flags & PVMW_MIGRATION)
218                                         return not_found(pvmw);
219                                 if (pmd_page(pmde) != page)
220                                         return not_found(pvmw);
221                                 return true;
222                         }
223                         if (!pmd_present(pmde)) {
224                                 swp_entry_t entry;
225
226                                 if (!thp_migration_supported() ||
227                                     !(pvmw->flags & PVMW_MIGRATION))
228                                         return not_found(pvmw);
229                                 entry = pmd_to_swp_entry(pmde);
230                                 if (!is_migration_entry(entry) ||
231                                     migration_entry_to_page(entry) != page)
232                                         return not_found(pvmw);
233                                 return true;
234                         }
235                         /* THP pmd was split under us: handle on pte level */
236                         spin_unlock(pvmw->ptl);
237                         pvmw->ptl = NULL;
238                 } else if (!pmd_present(pmde)) {
239                         /*
240                          * If PVMW_SYNC, take and drop THP pmd lock so that we
241                          * cannot return prematurely, while zap_huge_pmd() has
242                          * cleared *pmd but not decremented compound_mapcount().
243                          */
244                         if ((pvmw->flags & PVMW_SYNC) &&
245                             PageTransCompound(page)) {
246                                 spinlock_t *ptl = pmd_lock(mm, pvmw->pmd);
247
248                                 spin_unlock(ptl);
249                         }
250                         step_forward(pvmw, PMD_SIZE);
251                         continue;
252                 }
253                 if (!map_pte(pvmw))
254                         goto next_pte;
255 this_pte:
256                 if (check_pte(pvmw))
257                         return true;
258 next_pte:
259                 do {
260                         pvmw->address += PAGE_SIZE;
261                         if (pvmw->address >= end)
262                                 return not_found(pvmw);
263                         /* Did we cross page table boundary? */
264                         if ((pvmw->address & (PMD_SIZE - PAGE_SIZE)) == 0) {
265                                 if (pvmw->ptl) {
266                                         spin_unlock(pvmw->ptl);
267                                         pvmw->ptl = NULL;
268                                 }
269                                 pte_unmap(pvmw->pte);
270                                 pvmw->pte = NULL;
271                                 goto restart;
272                         }
273                         pvmw->pte++;
274                         if ((pvmw->flags & PVMW_SYNC) && !pvmw->ptl) {
275                                 pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
276                                 spin_lock(pvmw->ptl);
277                         }
278                 } while (pte_none(*pvmw->pte));
279
280                 if (!pvmw->ptl) {
281                         pvmw->ptl = pte_lockptr(mm, pvmw->pmd);
282                         spin_lock(pvmw->ptl);
283                 }
284                 goto this_pte;
285         } while (pvmw->address < end);
286
287         return false;
288 }
289
290 /**
291  * page_mapped_in_vma - check whether a page is really mapped in a VMA
292  * @page: the page to test
293  * @vma: the VMA to test
294  *
295  * Returns 1 if the page is mapped into the page tables of the VMA, 0
296  * if the page is not mapped into the page tables of this VMA.  Only
297  * valid for normal file or anonymous VMAs.
298  */
299 int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
300 {
301         struct page_vma_mapped_walk pvmw = {
302                 .page = page,
303                 .vma = vma,
304                 .flags = PVMW_SYNC,
305         };
306
307         pvmw.address = vma_address(page, vma);
308         if (pvmw.address == -EFAULT)
309                 return 0;
310         if (!page_vma_mapped_walk(&pvmw))
311                 return 0;
312         page_vma_mapped_walk_done(&pvmw);
313         return 1;
314 }