1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/arch/arm/mm/nommu.c
5 * ARM uCLinux supporting functions.
7 #include <linux/module.h>
9 #include <linux/pagemap.h>
11 #include <linux/memblock.h>
12 #include <linux/kernel.h>
14 #include <asm/cacheflush.h>
16 #include <asm/sections.h>
18 #include <asm/setup.h>
19 #include <asm/traps.h>
20 #include <asm/mach/arch.h>
21 #include <asm/cputype.h>
23 #include <asm/procinfo.h>
27 unsigned long vectors_base;
30 * empty_zero_page is a special page that is used for
31 * zero-initialized data and COW.
33 struct page *empty_zero_page;
34 EXPORT_SYMBOL(empty_zero_page);
37 struct mpu_rgn_info mpu_rgn_info;
40 #ifdef CONFIG_CPU_CP15
41 #ifdef CONFIG_CPU_HIGH_VECTOR
42 unsigned long setup_vectors_base(void)
44 unsigned long reg = get_cr();
49 #else /* CONFIG_CPU_HIGH_VECTOR */
50 /* Write exception base address to VBAR */
51 static inline void set_vbar(unsigned long val)
53 asm("mcr p15, 0, %0, c12, c0, 0" : : "r" (val) : "cc");
57 * Security extensions, bits[7:4], permitted values,
58 * 0b0000 - not implemented, 0b0001/0b0010 - implemented
60 static inline bool security_extensions_enabled(void)
62 /* Check CPUID Identification Scheme before ID_PFR1 read */
63 if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
64 return cpuid_feature_extract(CPUID_EXT_PFR1, 4) ||
65 cpuid_feature_extract(CPUID_EXT_PFR1, 20);
69 unsigned long setup_vectors_base(void)
71 unsigned long base = 0, reg = get_cr();
74 if (security_extensions_enabled()) {
75 if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM))
76 base = CONFIG_DRAM_BASE;
78 } else if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM)) {
79 if (CONFIG_DRAM_BASE != 0)
80 pr_err("Security extensions not enabled, vectors cannot be remapped to RAM, vectors base will be 0x00000000\n");
85 #endif /* CONFIG_CPU_HIGH_VECTOR */
86 #endif /* CONFIG_CPU_CP15 */
88 void __init arm_mm_memblock_reserve(void)
90 #ifndef CONFIG_CPU_V7M
91 vectors_base = IS_ENABLED(CONFIG_CPU_CP15) ? setup_vectors_base() : 0;
93 * Register the exception vector page.
94 * some architectures which the DRAM is the exception vector to trap,
95 * alloc_page breaks with error, although it is not NULL, but "0."
97 memblock_reserve(vectors_base, 2 * PAGE_SIZE);
98 #else /* ifndef CONFIG_CPU_V7M */
100 * There is no dedicated vector page on V7-M. So nothing needs to be
105 * In any case, always ensure address 0 is never used as many things
106 * get very confused if 0 is returned as a legitimate address.
108 memblock_reserve(0, 1);
111 static void __init adjust_lowmem_bounds_mpu(void)
113 unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA;
117 pmsav7_adjust_lowmem_bounds();
120 pmsav8_adjust_lowmem_bounds();
127 static void __init mpu_setup(void)
129 unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA;
143 void __init adjust_lowmem_bounds(void)
146 adjust_lowmem_bounds_mpu();
147 end = memblock_end_of_DRAM();
148 high_memory = __va(end - 1) + 1;
149 memblock_set_current_limit(end);
153 * paging_init() sets up the page tables, initialises the zone memory
154 * maps, and sets up the zero page, bad page and bad page tables.
156 void __init paging_init(const struct machine_desc *mdesc)
160 early_trap_init((void *)vectors_base);
163 /* allocate the zero page. */
164 zero_page = (void *)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
166 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
167 __func__, PAGE_SIZE, PAGE_SIZE);
171 empty_zero_page = virt_to_page(zero_page);
172 flush_dcache_page(empty_zero_page);
176 * We don't need to do anything here for nommu machines.
178 void setup_mm_for_reboot(void)
182 void flush_dcache_page(struct page *page)
184 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
186 EXPORT_SYMBOL(flush_dcache_page);
188 void flush_kernel_dcache_page(struct page *page)
190 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
192 EXPORT_SYMBOL(flush_kernel_dcache_page);
194 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
195 unsigned long uaddr, void *dst, const void *src,
198 memcpy(dst, src, len);
199 if (vma->vm_flags & VM_EXEC)
200 __cpuc_coherent_user_range(uaddr, uaddr + len);
203 void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
204 size_t size, unsigned int mtype)
206 if (pfn >= (0x100000000ULL >> PAGE_SHIFT))
208 return (void __iomem *) (offset + (pfn << PAGE_SHIFT));
210 EXPORT_SYMBOL(__arm_ioremap_pfn);
212 void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
213 unsigned int mtype, void *caller)
215 return (void __iomem *)phys_addr;
218 void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
220 void __iomem *ioremap(resource_size_t res_cookie, size_t size)
222 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE,
223 __builtin_return_address(0));
225 EXPORT_SYMBOL(ioremap);
227 void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
228 __alias(ioremap_cached);
230 void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size)
232 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
233 __builtin_return_address(0));
235 EXPORT_SYMBOL(ioremap_cache);
236 EXPORT_SYMBOL(ioremap_cached);
238 void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
240 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
241 __builtin_return_address(0));
243 EXPORT_SYMBOL(ioremap_wc);
247 #include <asm/mach/map.h>
249 void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
251 return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
252 __builtin_return_address(0));
254 EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
257 void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
259 return (void *)phys_addr;
262 void __iounmap(volatile void __iomem *addr)
265 EXPORT_SYMBOL(__iounmap);
267 void (*arch_iounmap)(volatile void __iomem *);
269 void iounmap(volatile void __iomem *addr)
272 EXPORT_SYMBOL(iounmap);