1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_NOHASH_PGTABLE_H
3 #define _ASM_POWERPC_NOHASH_PGTABLE_H
6 static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
7 unsigned long clr, unsigned long set, int huge);
10 #if defined(CONFIG_PPC64)
11 #include <asm/nohash/64/pgtable.h>
13 #include <asm/nohash/32/pgtable.h>
17 * _PAGE_CHG_MASK masks of bits that are to be preserved across
20 #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL)
22 /* Permission masks used for kernel mappings */
23 #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW)
24 #define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE)
25 #define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED)
26 #define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX)
27 #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO)
28 #define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX)
32 extern int icache_44x_need_flush;
35 * PTE updates. This function is called whenever an existing
36 * valid PTE is updated. This does -not- include set_pte_at()
37 * which nowadays only sets a new PTE.
39 * Depending on the type of MMU, we may need to use atomic updates
40 * and the PTE may be either 32 or 64 bit wide. In the later case,
41 * when using atomic updates, only the low part of the PTE is
42 * accessed atomically.
44 * In addition, on 44x, we also maintain a global flag indicating
45 * that an executable user mapping was modified, which is needed
46 * to properly flush the virtually tagged instruction cache of
47 * those implementations.
50 static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p,
51 unsigned long clr, unsigned long set, int huge)
53 pte_basic_t old = pte_val(*p);
54 pte_basic_t new = (old & ~(pte_basic_t)clr) | set;
61 if (IS_ENABLED(CONFIG_44x) && !is_kernel_addr(addr) && (old & _PAGE_EXEC))
62 icache_44x_need_flush = 1;
64 /* huge pages use the old page table lock */
66 assert_pte_locked(mm, addr);
72 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
73 unsigned long addr, pte_t *ptep)
77 old = pte_update(vma->vm_mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
79 return (old & _PAGE_ACCESSED) != 0;
81 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
83 #ifndef ptep_set_wrprotect
84 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
87 pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0);
90 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
92 static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
95 return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 0));
97 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
99 static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
101 pte_update(mm, addr, ptep, ~0UL, 0, 0);
104 /* Set the dirty and/or accessed bits atomically in a linux PTE */
105 #ifndef __ptep_set_access_flags
106 static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
107 pte_t *ptep, pte_t entry,
108 unsigned long address,
111 unsigned long set = pte_val(entry) &
112 (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
113 int huge = psize > mmu_virtual_psize ? 1 : 0;
115 pte_update(vma->vm_mm, address, ptep, 0, set, huge);
117 flush_tlb_page(vma, address);
121 /* Generic accessors to PTE bits */
122 #ifndef pte_mkwrite_novma
123 static inline pte_t pte_mkwrite_novma(pte_t pte)
126 * write implies read, hence set both
128 return __pte(pte_val(pte) | _PAGE_RW);
132 static inline pte_t pte_mkdirty(pte_t pte)
134 return __pte(pte_val(pte) | _PAGE_DIRTY);
137 static inline pte_t pte_mkyoung(pte_t pte)
139 return __pte(pte_val(pte) | _PAGE_ACCESSED);
142 #ifndef pte_wrprotect
143 static inline pte_t pte_wrprotect(pte_t pte)
145 return __pte(pte_val(pte) & ~_PAGE_WRITE);
150 static inline pte_t pte_mkexec(pte_t pte)
152 return __pte(pte_val(pte) | _PAGE_EXEC);
157 static inline int pte_write(pte_t pte)
159 return pte_val(pte) & _PAGE_WRITE;
162 static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; }
163 static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; }
164 static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; }
165 static inline bool pte_hashpte(pte_t pte) { return false; }
166 static inline bool pte_ci(pte_t pte) { return pte_val(pte) & _PAGE_NO_CACHE; }
167 static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; }
169 static inline int pte_present(pte_t pte)
171 return pte_val(pte) & _PAGE_PRESENT;
174 static inline bool pte_hw_valid(pte_t pte)
176 return pte_val(pte) & _PAGE_PRESENT;
179 static inline int pte_young(pte_t pte)
181 return pte_val(pte) & _PAGE_ACCESSED;
185 * Don't just check for any non zero bits in __PAGE_READ, since for book3e
186 * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in
187 * _PAGE_READ. Need to explicitly match _PAGE_BAP_UR bit in that case too.
190 static inline bool pte_read(pte_t pte)
192 return (pte_val(pte) & _PAGE_READ) == _PAGE_READ;
197 * We only find page table entry in the last level
198 * Hence no need for other accessors
200 #define pte_access_permitted pte_access_permitted
201 static inline bool pte_access_permitted(pte_t pte, bool write)
204 * A read-only access is controlled by _PAGE_READ bit.
205 * We have _PAGE_READ set for WRITE
207 if (!pte_present(pte) || !pte_read(pte))
210 if (write && !pte_write(pte))
216 /* Conversion functions: convert a page and protection to a page entry,
217 * and a page entry and page directory to the page they refer to.
219 * Even if PTEs can be unsigned long long, a PFN is always an unsigned
222 static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) {
223 return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) |
224 pgprot_val(pgprot)); }
226 /* Generic modifiers for PTE bits */
227 static inline pte_t pte_exprotect(pte_t pte)
229 return __pte(pte_val(pte) & ~_PAGE_EXEC);
232 static inline pte_t pte_mkclean(pte_t pte)
234 return __pte(pte_val(pte) & ~_PAGE_DIRTY);
237 static inline pte_t pte_mkold(pte_t pte)
239 return __pte(pte_val(pte) & ~_PAGE_ACCESSED);
242 static inline pte_t pte_mkspecial(pte_t pte)
244 return __pte(pte_val(pte) | _PAGE_SPECIAL);
248 static inline pte_t pte_mkhuge(pte_t pte)
250 return __pte(pte_val(pte));
254 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
256 return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
259 static inline int pte_swp_exclusive(pte_t pte)
261 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
264 static inline pte_t pte_swp_mkexclusive(pte_t pte)
266 return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
269 static inline pte_t pte_swp_clear_exclusive(pte_t pte)
271 return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
274 /* This low level function performs the actual PTE insertion
275 * Setting the PTE depends on the MMU type and other factors. It's
276 * an horrible mess that I'm not going to try to clean up now but
277 * I'm keeping it in one place rather than spread around
279 static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
280 pte_t *ptep, pte_t pte, int percpu)
282 /* Second case is 32-bit with 64-bit PTE. In this case, we
283 * can just store as long as we do the two halves in the right order
284 * with a barrier in between.
285 * In the percpu case, we also fallback to the simple update
287 if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) {
288 __asm__ __volatile__("\
292 : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4))
293 : "r" (pte) : "memory");
296 /* Anything else just stores the PTE normally. That covers all 64-bit
297 * cases, and 32-bit non-hash with 32-bit PTEs.
299 #if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES)
300 ptep->pte3 = ptep->pte2 = ptep->pte1 = ptep->pte = pte_val(pte);
306 * With hardware tablewalk, a sync is needed to ensure that
307 * subsequent accesses see the PTE we just wrote. Unlike userspace
308 * mappings, we can't tolerate spurious faults, so make sure
309 * the new PTE will be seen the first time.
311 if (IS_ENABLED(CONFIG_PPC_BOOK3E_64) && is_kernel_addr(addr))
316 * Macro to mark a page protection value as "uncacheable".
319 #define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \
322 #define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
323 _PAGE_NO_CACHE | _PAGE_GUARDED))
325 #define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
328 #define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
331 #if _PAGE_WRITETHRU != 0
332 #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \
333 _PAGE_COHERENT | _PAGE_WRITETHRU))
335 #define pgprot_cached_wthru(prot) pgprot_noncached(prot)
338 #define pgprot_cached_noncoherent(prot) \
339 (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL))
341 #define pgprot_writecombine pgprot_noncached_wc
343 #ifdef CONFIG_HUGETLB_PAGE
344 static inline int hugepd_ok(hugepd_t hpd)
346 #ifdef CONFIG_PPC_8xx
347 return ((hpd_val(hpd) & _PMD_PAGE_MASK) == _PMD_PAGE_8M);
349 /* We clear the top bit to indicate hugepd */
350 return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0);
354 static inline int pmd_huge(pmd_t pmd)
359 static inline int pud_huge(pud_t pud)
364 #define is_hugepd(hpd) (hugepd_ok(hpd))
367 int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot);
368 void unmap_kernel_page(unsigned long va);
370 #endif /* __ASSEMBLY__ */