2 * linux/arch/arm/mm/nommu.c
4 * ARM uCLinux supporting functions.
6 #include <linux/module.h>
8 #include <linux/pagemap.h>
10 #include <linux/memblock.h>
11 #include <linux/kernel.h>
13 #include <asm/cacheflush.h>
15 #include <asm/sections.h>
17 #include <asm/setup.h>
18 #include <asm/traps.h>
19 #include <asm/mach/arch.h>
20 #include <asm/cputype.h>
22 #include <asm/procinfo.h>
26 unsigned long vectors_base;
29 * empty_zero_page is a special page that is used for
30 * zero-initialized data and COW.
32 struct page *empty_zero_page;
33 EXPORT_SYMBOL(empty_zero_page);
36 struct mpu_rgn_info mpu_rgn_info;
39 #ifdef CONFIG_CPU_CP15
40 #ifdef CONFIG_CPU_HIGH_VECTOR
41 unsigned long setup_vectors_base(void)
43 unsigned long reg = get_cr();
48 #else /* CONFIG_CPU_HIGH_VECTOR */
49 /* Write exception base address to VBAR */
50 static inline void set_vbar(unsigned long val)
52 asm("mcr p15, 0, %0, c12, c0, 0" : : "r" (val) : "cc");
56 * Security extensions, bits[7:4], permitted values,
57 * 0b0000 - not implemented, 0b0001/0b0010 - implemented
59 static inline bool security_extensions_enabled(void)
61 /* Check CPUID Identification Scheme before ID_PFR1 read */
62 if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
63 return cpuid_feature_extract(CPUID_EXT_PFR1, 4) ||
64 cpuid_feature_extract(CPUID_EXT_PFR1, 20);
68 unsigned long setup_vectors_base(void)
70 unsigned long base = 0, reg = get_cr();
73 if (security_extensions_enabled()) {
74 if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM))
75 base = CONFIG_DRAM_BASE;
77 } else if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM)) {
78 if (CONFIG_DRAM_BASE != 0)
79 pr_err("Security extensions not enabled, vectors cannot be remapped to RAM, vectors base will be 0x00000000\n");
84 #endif /* CONFIG_CPU_HIGH_VECTOR */
85 #endif /* CONFIG_CPU_CP15 */
87 void __init arm_mm_memblock_reserve(void)
89 #ifndef CONFIG_CPU_V7M
90 vectors_base = IS_ENABLED(CONFIG_CPU_CP15) ? setup_vectors_base() : 0;
92 * Register the exception vector page.
93 * some architectures which the DRAM is the exception vector to trap,
94 * alloc_page breaks with error, although it is not NULL, but "0."
96 memblock_reserve(vectors_base, 2 * PAGE_SIZE);
97 #else /* ifndef CONFIG_CPU_V7M */
99 * There is no dedicated vector page on V7-M. So nothing needs to be
104 * In any case, always ensure address 0 is never used as many things
105 * get very confused if 0 is returned as a legitimate address.
107 memblock_reserve(0, 1);
110 static void __init adjust_lowmem_bounds_mpu(void)
112 unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA;
116 pmsav7_adjust_lowmem_bounds();
119 pmsav8_adjust_lowmem_bounds();
126 static void __init mpu_setup(void)
128 unsigned long pmsa = read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA;
142 void __init adjust_lowmem_bounds(void)
145 adjust_lowmem_bounds_mpu();
146 end = memblock_end_of_DRAM();
147 high_memory = __va(end - 1) + 1;
148 memblock_set_current_limit(end);
152 * paging_init() sets up the page tables, initialises the zone memory
153 * maps, and sets up the zero page, bad page and bad page tables.
155 void __init paging_init(const struct machine_desc *mdesc)
159 early_trap_init((void *)vectors_base);
162 /* allocate the zero page. */
163 zero_page = (void *)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
165 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
166 __func__, PAGE_SIZE, PAGE_SIZE);
170 empty_zero_page = virt_to_page(zero_page);
171 flush_dcache_page(empty_zero_page);
175 * We don't need to do anything here for nommu machines.
177 void setup_mm_for_reboot(void)
181 void flush_dcache_page(struct page *page)
183 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
185 EXPORT_SYMBOL(flush_dcache_page);
187 void flush_kernel_dcache_page(struct page *page)
189 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
191 EXPORT_SYMBOL(flush_kernel_dcache_page);
193 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
194 unsigned long uaddr, void *dst, const void *src,
197 memcpy(dst, src, len);
198 if (vma->vm_flags & VM_EXEC)
199 __cpuc_coherent_user_range(uaddr, uaddr + len);
202 void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
203 size_t size, unsigned int mtype)
205 if (pfn >= (0x100000000ULL >> PAGE_SHIFT))
207 return (void __iomem *) (offset + (pfn << PAGE_SHIFT));
209 EXPORT_SYMBOL(__arm_ioremap_pfn);
211 void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
212 unsigned int mtype, void *caller)
214 return (void __iomem *)phys_addr;
217 void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
219 void __iomem *ioremap(resource_size_t res_cookie, size_t size)
221 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE,
222 __builtin_return_address(0));
224 EXPORT_SYMBOL(ioremap);
226 void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
227 __alias(ioremap_cached);
229 void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size)
231 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
232 __builtin_return_address(0));
234 EXPORT_SYMBOL(ioremap_cache);
235 EXPORT_SYMBOL(ioremap_cached);
237 void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
239 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
240 __builtin_return_address(0));
242 EXPORT_SYMBOL(ioremap_wc);
246 #include <asm/mach/map.h>
248 void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
250 return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
251 __builtin_return_address(0));
253 EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
256 void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
258 return (void *)phys_addr;
261 void __iounmap(volatile void __iomem *addr)
264 EXPORT_SYMBOL(__iounmap);
266 void (*arch_iounmap)(volatile void __iomem *);
268 void iounmap(volatile void __iomem *addr)
271 EXPORT_SYMBOL(iounmap);