1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 #ifndef _ASM_POWERPC_PAGE_H
3 #define _ASM_POWERPC_PAGE_H
6 * Copyright (C) 2001,2005 IBM Corporation.
10 #include <linux/types.h>
11 #include <linux/kernel.h>
12 #include <linux/bug.h>
14 #include <asm/types.h>
16 #include <asm/asm-const.h>
19 * On regular PPC32 page size is 4K (but we support 4K/16K/64K/256K pages
20 * on PPC44x and 4K/16K on 8xx). For PPC64 we support either 4K or 64K software
21 * page size. When using 64K pages however, whether we are really supporting
22 * 64K pages in HW or not is irrelevant to those definitions.
24 #define PAGE_SHIFT CONFIG_PPC_PAGE_SHIFT
25 #define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
28 #ifndef CONFIG_HUGETLB_PAGE
29 #define HPAGE_SHIFT PAGE_SHIFT
30 #elif defined(CONFIG_PPC_BOOK3S_64)
31 extern unsigned int hpage_shift;
32 #define HPAGE_SHIFT hpage_shift
33 #elif defined(CONFIG_PPC_8xx)
34 #define HPAGE_SHIFT 19 /* 512k pages */
35 #elif defined(CONFIG_PPC_E500)
36 #define HPAGE_SHIFT 22 /* 4M pages */
38 #define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
39 #define HPAGE_MASK (~(HPAGE_SIZE - 1))
40 #define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
41 #define HUGE_MAX_HSTATE (MMU_PAGE_COUNT-1)
45 * Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
46 * assign PAGE_MASK to a larger type it gets extended the way we want
47 * (i.e. with 1s in the high bits)
49 #define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
52 * KERNELBASE is the virtual address of the start of the kernel, it's often
53 * the same as PAGE_OFFSET, but _might not be_.
55 * The kdump dump kernel is one example where KERNELBASE != PAGE_OFFSET.
57 * PAGE_OFFSET is the virtual address of the start of lowmem.
59 * PHYSICAL_START is the physical address of the start of the kernel.
61 * MEMORY_START is the physical address of the start of lowmem.
63 * KERNELBASE, PAGE_OFFSET, and PHYSICAL_START are all configurable on
64 * ppc32 and based on how they are set we determine MEMORY_START.
66 * For the linear mapping the following equation should be true:
67 * KERNELBASE - PAGE_OFFSET = PHYSICAL_START - MEMORY_START
69 * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START
71 * There are two ways to determine a physical address from a virtual one:
72 * va = pa + PAGE_OFFSET - MEMORY_START
73 * va = pa + KERNELBASE - PHYSICAL_START
75 * If you want to know something's offset from the start of the kernel you
76 * should subtract KERNELBASE.
78 * If you want to test if something's a kernel address, use is_kernel_addr().
81 #define KERNELBASE ASM_CONST(CONFIG_KERNEL_START)
82 #define PAGE_OFFSET ASM_CONST(CONFIG_PAGE_OFFSET)
83 #define LOAD_OFFSET ASM_CONST((CONFIG_KERNEL_START-CONFIG_PHYSICAL_START))
85 #if defined(CONFIG_NONSTATIC_KERNEL)
88 extern phys_addr_t memstart_addr;
89 extern phys_addr_t kernstart_addr;
91 #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC32)
92 extern long long virt_phys_offset;
95 #endif /* __ASSEMBLY__ */
96 #define PHYSICAL_START kernstart_addr
98 #else /* !CONFIG_NONSTATIC_KERNEL */
99 #define PHYSICAL_START ASM_CONST(CONFIG_PHYSICAL_START)
102 /* See Description below for VIRT_PHYS_OFFSET */
103 #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
104 #ifdef CONFIG_RELOCATABLE
105 #define VIRT_PHYS_OFFSET virt_phys_offset
107 #define VIRT_PHYS_OFFSET (KERNELBASE - PHYSICAL_START)
112 #define MEMORY_START 0UL
113 #elif defined(CONFIG_NONSTATIC_KERNEL)
114 #define MEMORY_START memstart_addr
116 #define MEMORY_START (PHYSICAL_START + PAGE_OFFSET - KERNELBASE)
119 #ifdef CONFIG_FLATMEM
120 #define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT))
124 * On Book-E parts we need __va to parse the device tree and we can't
125 * determine MEMORY_START until then. However we can determine PHYSICAL_START
126 * from information at hand (program counter, TLB lookup).
128 * On BookE with RELOCATABLE && PPC32
130 * With RELOCATABLE && PPC32, we support loading the kernel at any physical
131 * address without any restriction on the page alignment.
133 * We find the runtime address of _stext and relocate ourselves based on
134 * the following calculation:
136 * virtual_base = ALIGN_DOWN(KERNELBASE,256M) +
137 * MODULO(_stext.run,256M)
138 * and create the following mapping:
140 * ALIGN_DOWN(_stext.run,256M) => ALIGN_DOWN(KERNELBASE,256M)
142 * When we process relocations, we cannot depend on the
143 * existing equation for the __va()/__pa() translations:
145 * __va(x) = (x) - PHYSICAL_START + KERNELBASE
148 * PHYSICAL_START = kernstart_addr = Physical address of _stext
149 * KERNELBASE = Compiled virtual address of _stext.
151 * This formula holds true iff, kernel load address is TLB page aligned.
153 * In our case, we need to also account for the shift in the kernel Virtual
158 * Let the kernel be loaded at 64MB and KERNELBASE be 0xc0000000 (same as PAGE_OFFSET).
159 * In this case, we would be mapping 0 to 0xc0000000, and kernstart_addr = 64M
161 * Now __va(1MB) = (0x100000) - (0x4000000) + 0xc0000000
162 * = 0xbc100000 , which is wrong.
164 * Rather, it should be : 0xc0000000 + 0x100000 = 0xc0100000
165 * according to our mapping.
167 * Hence we use the following formula to get the translations right:
169 * __va(x) = (x) - [ PHYSICAL_START - Effective KERNELBASE ]
172 * PHYSICAL_START = dynamic load address.(kernstart_addr variable)
173 * Effective KERNELBASE = virtual_base =
174 * = ALIGN_DOWN(KERNELBASE,256M) +
175 * MODULO(PHYSICAL_START,256M)
177 * To make the cost of __va() / __pa() more light weight, we introduce
178 * a new variable virt_phys_offset, which will hold :
180 * virt_phys_offset = Effective KERNELBASE - PHYSICAL_START
181 * = ALIGN_DOWN(KERNELBASE,256M) -
182 * ALIGN_DOWN(PHYSICALSTART,256M)
186 * __va(x) = x - PHYSICAL_START + Effective KERNELBASE
187 * = x + virt_phys_offset
190 * __pa(x) = x + PHYSICAL_START - Effective KERNELBASE
191 * = x - virt_phys_offset
193 * On non-Book-E PPC64 PAGE_OFFSET and MEMORY_START are constants so use
194 * the other definitions for __va & __pa.
196 #if defined(CONFIG_PPC32) && defined(CONFIG_BOOKE)
197 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + VIRT_PHYS_OFFSET))
198 #define __pa(x) ((phys_addr_t)(unsigned long)(x) - VIRT_PHYS_OFFSET)
202 #define VIRTUAL_WARN_ON(x) WARN_ON(IS_ENABLED(CONFIG_DEBUG_VIRTUAL) && (x))
205 * gcc miscompiles (unsigned long)(&static_var) - PAGE_OFFSET
206 * with -mcmodel=medium, so we use & and | instead of - and + on 64-bit.
207 * This also results in better code generation.
211 VIRTUAL_WARN_ON((unsigned long)(x) >= PAGE_OFFSET); \
212 (void *)(unsigned long)((phys_addr_t)(x) | PAGE_OFFSET); \
217 VIRTUAL_WARN_ON((unsigned long)(x) < PAGE_OFFSET); \
218 (unsigned long)(x) & 0x0fffffffffffffffUL; \
221 #else /* 32-bit, non book E */
222 #define __va(x) ((void *)(unsigned long)((phys_addr_t)(x) + PAGE_OFFSET - MEMORY_START))
223 #define __pa(x) ((unsigned long)(x) - PAGE_OFFSET + MEMORY_START)
228 static inline unsigned long virt_to_pfn(const void *kaddr)
230 return __pa(kaddr) >> PAGE_SHIFT;
233 static inline const void *pfn_to_kaddr(unsigned long pfn)
235 return __va(pfn << PAGE_SHIFT);
239 #define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
240 #define virt_addr_valid(vaddr) ({ \
241 unsigned long _addr = (unsigned long)vaddr; \
242 _addr >= PAGE_OFFSET && _addr < (unsigned long)high_memory && \
243 pfn_valid(virt_to_pfn((void *)_addr)); \
247 * Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
248 * and needs to be executable. This means the whole heap ends
249 * up being executable.
251 #define VM_DATA_DEFAULT_FLAGS32 VM_DATA_FLAGS_TSK_EXEC
252 #define VM_DATA_DEFAULT_FLAGS64 VM_DATA_FLAGS_NON_EXEC
255 #include <asm/page_64.h>
257 #include <asm/page_32.h>
261 * Don't compare things with KERNELBASE or PAGE_OFFSET to test for
262 * "kernelness", use is_kernel_addr() - it should do what you want.
264 #ifdef CONFIG_PPC_BOOK3E_64
265 #define is_kernel_addr(x) ((x) >= 0x8000000000000000ul)
266 #elif defined(CONFIG_PPC_BOOK3S_64)
267 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
269 #define is_kernel_addr(x) ((x) >= TASK_SIZE)
272 #ifndef CONFIG_PPC_BOOK3S_64
274 * Use the top bit of the higher-level page table entries to indicate whether
275 * the entries we point to contain hugepages. This works because we know that
276 * the page tables live in kernel space. If we ever decide to support having
277 * page tables at arbitrary addresses, this breaks and will have to change.
280 #define PD_HUGE 0x8000000000000000UL
282 #define PD_HUGE 0x80000000
285 #else /* CONFIG_PPC_BOOK3S_64 */
287 * Book3S 64 stores real addresses in the hugepd entries to
288 * avoid overlaps with _PAGE_PRESENT and _PAGE_PTE.
290 #define HUGEPD_ADDR_MASK (0x0ffffffffffffffful & ~HUGEPD_SHIFT_MASK)
291 #endif /* CONFIG_PPC_BOOK3S_64 */
294 * Some number of bits at the level of the page table that points to
295 * a hugepte are used to encode the size. This masks those bits.
296 * On 8xx, HW assistance requires 4k alignment for the hugepte.
298 #ifdef CONFIG_PPC_8xx
299 #define HUGEPD_SHIFT_MASK 0xfff
301 #define HUGEPD_SHIFT_MASK 0x3f
306 #ifdef CONFIG_PPC_BOOK3S_64
307 #include <asm/pgtable-be-types.h>
309 #include <asm/pgtable-types.h>
313 extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
314 extern void copy_user_page(void *to, void *from, unsigned long vaddr,
316 extern int devmem_is_allowed(unsigned long pfn);
318 #ifdef CONFIG_PPC_SMLPAR
319 void arch_free_page(struct page *page, int order);
320 #define HAVE_ARCH_FREE_PAGE
323 struct vm_area_struct;
325 extern unsigned long kernstart_virt_addr;
327 static inline unsigned long kaslr_offset(void)
329 return kernstart_virt_addr - KERNELBASE;
332 #include <asm-generic/memory_model.h>
333 #endif /* __ASSEMBLY__ */
335 #endif /* _ASM_POWERPC_PAGE_H */