1 // SPDX-License-Identifier: GPL-2.0
3 * Lockless get_user_pages_fast for s390
5 * Copyright IBM Corp. 2010
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8 #include <linux/sched.h>
10 #include <linux/hugetlb.h>
11 #include <linux/vmstat.h>
12 #include <linux/pagemap.h>
13 #include <linux/rwsem.h>
14 #include <asm/pgtable.h>
17 * The performance critical leaf functions are made noinline otherwise gcc
18 * inlines everything into a single function which results in too much
21 static inline int gup_pte_range(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
22 unsigned long end, int write, struct page **pages, int *nr)
24 struct page *head, *page;
28 mask = (write ? _PAGE_PROTECT : 0) | _PAGE_INVALID | _PAGE_SPECIAL;
30 ptep = ((pte_t *) pmd_deref(pmd)) + pte_index(addr);
34 /* Similar to the PMD case, NUMA hinting must take slow path */
35 if (pte_protnone(pte))
37 if ((pte_val(pte) & mask) != 0)
39 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
41 head = compound_head(page);
42 if (unlikely(WARN_ON_ONCE(page_ref_count(head) < 0)
43 || !page_cache_get_speculative(head)))
45 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
49 VM_BUG_ON_PAGE(compound_head(page) != head, page);
53 } while (ptep++, addr += PAGE_SIZE, addr != end);
58 static inline int gup_huge_pmd(pmd_t *pmdp, pmd_t pmd, unsigned long addr,
59 unsigned long end, int write, struct page **pages, int *nr)
61 struct page *head, *page;
65 mask = (write ? _SEGMENT_ENTRY_PROTECT : 0) | _SEGMENT_ENTRY_INVALID;
66 if ((pmd_val(pmd) & mask) != 0)
68 VM_BUG_ON(!pfn_valid(pmd_val(pmd) >> PAGE_SHIFT));
72 page = head + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
74 VM_BUG_ON(compound_head(page) != head);
79 } while (addr += PAGE_SIZE, addr != end);
81 if (unlikely(WARN_ON_ONCE(page_ref_count(head) < 0)
82 || !page_cache_add_speculative(head, refs))) {
87 if (unlikely(pmd_val(pmd) != pmd_val(*pmdp))) {
98 static inline int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr,
99 unsigned long end, int write, struct page **pages, int *nr)
104 pmdp = (pmd_t *) pudp;
105 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
106 pmdp = (pmd_t *) pud_deref(pud);
107 pmdp += pmd_index(addr);
111 next = pmd_addr_end(addr, end);
114 if (unlikely(pmd_large(pmd))) {
116 * NUMA hinting faults need to be handled in the GUP
117 * slowpath for accounting purposes and so that they
118 * can be serialised against THP migration.
120 if (pmd_protnone(pmd))
122 if (!gup_huge_pmd(pmdp, pmd, addr, next,
125 } else if (!gup_pte_range(pmdp, pmd, addr, next,
128 } while (pmdp++, addr = next, addr != end);
133 static int gup_huge_pud(pud_t *pudp, pud_t pud, unsigned long addr,
134 unsigned long end, int write, struct page **pages, int *nr)
136 struct page *head, *page;
140 mask = (write ? _REGION_ENTRY_PROTECT : 0) | _REGION_ENTRY_INVALID;
141 if ((pud_val(pud) & mask) != 0)
143 VM_BUG_ON(!pfn_valid(pud_pfn(pud)));
146 head = pud_page(pud);
147 page = head + ((addr & ~PUD_MASK) >> PAGE_SHIFT);
149 VM_BUG_ON_PAGE(compound_head(page) != head, page);
154 } while (addr += PAGE_SIZE, addr != end);
156 if (unlikely(WARN_ON_ONCE(page_ref_count(head) < 0)
157 || !page_cache_add_speculative(head, refs))) {
162 if (unlikely(pud_val(pud) != pud_val(*pudp))) {
172 static inline int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr,
173 unsigned long end, int write, struct page **pages, int *nr)
178 pudp = (pud_t *) p4dp;
179 if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
180 pudp = (pud_t *) p4d_deref(p4d);
181 pudp += pud_index(addr);
185 next = pud_addr_end(addr, end);
188 if (unlikely(pud_large(pud))) {
189 if (!gup_huge_pud(pudp, pud, addr, next, write, pages,
192 } else if (!gup_pmd_range(pudp, pud, addr, next, write, pages,
195 } while (pudp++, addr = next, addr != end);
200 static inline int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr,
201 unsigned long end, int write, struct page **pages, int *nr)
206 p4dp = (p4d_t *) pgdp;
207 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
208 p4dp = (p4d_t *) pgd_deref(pgd);
209 p4dp += p4d_index(addr);
213 next = p4d_addr_end(addr, end);
216 if (!gup_pud_range(p4dp, p4d, addr, next, write, pages, nr))
218 } while (p4dp++, addr = next, addr != end);
224 * Like get_user_pages_fast() except its IRQ-safe in that it won't fall
225 * back to the regular GUP.
227 int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
230 struct mm_struct *mm = current->mm;
231 unsigned long addr, len, end;
232 unsigned long next, flags;
238 len = (unsigned long) nr_pages << PAGE_SHIFT;
240 if ((end <= start) || (end > mm->context.asce_limit))
243 * local_irq_save() doesn't prevent pagetable teardown, but does
244 * prevent the pagetables from being freed on s390.
246 * So long as we atomically load page table pointers versus teardown,
247 * we can follow the address down to the the page and take a ref on it.
249 local_irq_save(flags);
250 pgdp = pgd_offset(mm, addr);
254 next = pgd_addr_end(addr, end);
257 if (!gup_p4d_range(pgdp, pgd, addr, next, write, pages, &nr))
259 } while (pgdp++, addr = next, addr != end);
260 local_irq_restore(flags);
266 * get_user_pages_fast() - pin user pages in memory
267 * @start: starting user address
268 * @nr_pages: number of pages from start to pin
269 * @write: whether pages will be written to
270 * @pages: array that receives pointers to the pages pinned.
271 * Should be at least nr_pages long.
273 * Attempt to pin user pages in memory without taking mm->mmap_sem.
274 * If not successful, it will fall back to taking the lock and
275 * calling get_user_pages().
277 * Returns number of pages pinned. This may be fewer than the number
278 * requested. If nr_pages is 0 or negative, returns 0. If no pages
279 * were pinned, returns -errno.
281 int get_user_pages_fast(unsigned long start, int nr_pages, int write,
289 * The FAST_GUP case requires FOLL_WRITE even for pure reads,
290 * because get_user_pages() may need to cause an early COW in
291 * order to avoid confusing the normal COW routines. So only
292 * targets that are already writable are safe to do by just
293 * looking at the page tables.
295 nr = __get_user_pages_fast(start, nr_pages, 1, pages);
299 /* Try to get the remaining pages with get_user_pages */
300 start += nr << PAGE_SHIFT;
302 ret = get_user_pages_unlocked(start, nr_pages - nr, pages,
303 write ? FOLL_WRITE : 0);
304 /* Have to be a bit careful with return values */
306 ret = (ret < 0) ? nr : ret + nr;