2 * linux/arch/arm/mm/nommu.c
4 * ARM uCLinux supporting functions.
6 #include <linux/module.h>
8 #include <linux/pagemap.h>
10 #include <linux/memblock.h>
11 #include <linux/kernel.h>
13 #include <asm/cacheflush.h>
15 #include <asm/sections.h>
17 #include <asm/setup.h>
18 #include <asm/traps.h>
19 #include <asm/mach/arch.h>
20 #include <asm/cputype.h>
22 #include <asm/procinfo.h>
26 unsigned long vectors_base;
29 * empty_zero_page is a special page that is used for
30 * zero-initialized data and COW.
32 struct page *empty_zero_page;
33 EXPORT_SYMBOL(empty_zero_page);
36 struct mpu_rgn_info mpu_rgn_info;
39 static void rgnr_write(u32 v)
41 asm("mcr p15, 0, %0, c6, c2, 0" : : "r" (v));
44 /* Data-side / unified region attributes */
46 /* Region access control register */
47 static void dracr_write(u32 v)
49 asm("mcr p15, 0, %0, c6, c1, 4" : : "r" (v));
52 /* Region size register */
53 static void drsr_write(u32 v)
55 asm("mcr p15, 0, %0, c6, c1, 2" : : "r" (v));
58 /* Region base address register */
59 static void drbar_write(u32 v)
61 asm("mcr p15, 0, %0, c6, c1, 0" : : "r" (v));
64 static u32 drbar_read(void)
67 asm("mrc p15, 0, %0, c6, c1, 0" : "=r" (v));
70 /* Optional instruction-side region attributes */
72 /* I-side Region access control register */
73 static void iracr_write(u32 v)
75 asm("mcr p15, 0, %0, c6, c1, 5" : : "r" (v));
78 /* I-side Region size register */
79 static void irsr_write(u32 v)
81 asm("mcr p15, 0, %0, c6, c1, 3" : : "r" (v));
84 /* I-side Region base address register */
85 static void irbar_write(u32 v)
87 asm("mcr p15, 0, %0, c6, c1, 1" : : "r" (v));
90 static unsigned long irbar_read(void)
93 asm("mrc p15, 0, %0, c6, c1, 1" : "=r" (v));
97 /* MPU initialisation functions */
98 void __init adjust_lowmem_bounds_mpu(void)
100 phys_addr_t phys_offset = PHYS_OFFSET;
101 phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size;
102 struct memblock_region *reg;
104 phys_addr_t mem_start;
107 for_each_memblock(memory, reg) {
110 * Initially only use memory continuous from
112 if (reg->base != phys_offset)
113 panic("First memory bank must be contiguous from PHYS_OFFSET");
115 mem_start = reg->base;
116 mem_end = reg->base + reg->size;
117 specified_mem_size = reg->size;
121 * memblock auto merges contiguous blocks, remove
122 * all blocks afterwards in one go (we can't remove
123 * blocks separately while iterating)
125 pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
126 &mem_end, ®->base);
127 memblock_remove(reg->base, 0 - reg->base);
133 * MPU has curious alignment requirements: Size must be power of 2, and
134 * region start must be aligned to the region size
136 if (phys_offset != 0)
137 pr_info("PHYS_OFFSET != 0 => MPU Region size constrained by alignment requirements\n");
140 * Maximum aligned region might overflow phys_addr_t if phys_offset is
141 * 0. Hence we keep everything below 4G until we take the smaller of
142 * the aligned_region_size and rounded_mem_size, one of which is
143 * guaranteed to be smaller than the maximum physical address.
145 aligned_region_size = (phys_offset - 1) ^ (phys_offset);
146 /* Find the max power-of-two sized region that fits inside our bank */
147 rounded_mem_size = (1 << __fls(specified_mem_size)) - 1;
149 /* The actual region size is the smaller of the two */
150 aligned_region_size = aligned_region_size < rounded_mem_size
151 ? aligned_region_size + 1
152 : rounded_mem_size + 1;
154 if (aligned_region_size != specified_mem_size) {
155 pr_warn("Truncating memory from %pa to %pa (MPU region constraints)",
156 &specified_mem_size, &aligned_region_size);
157 memblock_remove(mem_start + aligned_region_size,
158 specified_mem_size - aligned_region_size);
160 mem_end = mem_start + aligned_region_size;
163 pr_debug("MPU Region from %pa size %pa (end %pa))\n",
164 &phys_offset, &aligned_region_size, &mem_end);
168 static int mpu_present(void)
170 return ((read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA) == MMFR0_PMSAv7);
173 static int mpu_max_regions(void)
176 * We don't support a different number of I/D side regions so if we
177 * have separate instruction and data memory maps then return
178 * whichever side has a smaller number of supported regions.
180 u32 dregions, iregions, mpuir;
181 mpuir = read_cpuid(CPUID_MPUIR);
183 dregions = iregions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION;
185 /* Check for separate d-side and i-side memory maps */
186 if (mpuir & MPUIR_nU)
187 iregions = (mpuir & MPUIR_IREGION_SZMASK) >> MPUIR_IREGION;
189 /* Use the smallest of the two maxima */
190 return min(dregions, iregions);
193 static int mpu_iside_independent(void)
195 /* MPUIR.nU specifies whether there is *not* a unified memory map */
196 return read_cpuid(CPUID_MPUIR) & MPUIR_nU;
199 static int mpu_min_region_order(void)
201 u32 drbar_result, irbar_result;
202 /* We've kept a region free for this probing */
203 rgnr_write(MPU_PROBE_REGION);
206 * As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum
209 drbar_write(0xFFFFFFFC);
210 drbar_result = irbar_result = drbar_read();
212 /* If the MPU is non-unified, we use the larger of the two minima*/
213 if (mpu_iside_independent()) {
214 irbar_write(0xFFFFFFFC);
215 irbar_result = irbar_read();
218 isb(); /* Ensure that MPU region operations have completed */
219 /* Return whichever result is larger */
220 return __ffs(max(drbar_result, irbar_result));
223 static int mpu_setup_region(unsigned int number, phys_addr_t start,
224 unsigned int size_order, unsigned int properties)
228 /* We kept a region free for probing resolution of MPU regions*/
229 if (number > mpu_max_regions() || number == MPU_PROBE_REGION)
235 if (size_order < mpu_min_region_order())
238 /* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */
239 size_data = ((size_order - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN;
241 dsb(); /* Ensure all previous data accesses occur with old mappings */
245 dracr_write(properties);
246 isb(); /* Propagate properties before enabling region */
247 drsr_write(size_data);
249 /* Check for independent I-side registers */
250 if (mpu_iside_independent()) {
252 iracr_write(properties);
254 irsr_write(size_data);
258 /* Store region info (we treat i/d side the same, so only store d) */
259 mpu_rgn_info.rgns[number].dracr = properties;
260 mpu_rgn_info.rgns[number].drbar = start;
261 mpu_rgn_info.rgns[number].drsr = size_data;
266 * Set up default MPU regions, doing nothing if there is no MPU
268 void __init mpu_setup(void)
274 region_err = mpu_setup_region(MPU_RAM_REGION, PHYS_OFFSET,
275 ilog2(memblock.memory.regions[0].size),
276 MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL);
278 panic("MPU region initialization failure! %d", region_err);
280 pr_info("Using ARMv7 PMSA Compliant MPU. "
281 "Region independence: %s, Max regions: %d\n",
282 mpu_iside_independent() ? "Yes" : "No",
287 static void adjust_lowmem_bounds_mpu(void) {}
288 static void __init mpu_setup(void) {}
289 #endif /* CONFIG_ARM_MPU */
291 #ifdef CONFIG_CPU_CP15
292 #ifdef CONFIG_CPU_HIGH_VECTOR
293 static unsigned long __init setup_vectors_base(void)
295 unsigned long reg = get_cr();
300 #else /* CONFIG_CPU_HIGH_VECTOR */
301 /* Write exception base address to VBAR */
302 static inline void set_vbar(unsigned long val)
304 asm("mcr p15, 0, %0, c12, c0, 0" : : "r" (val) : "cc");
308 * Security extensions, bits[7:4], permitted values,
309 * 0b0000 - not implemented, 0b0001/0b0010 - implemented
311 static inline bool security_extensions_enabled(void)
313 /* Check CPUID Identification Scheme before ID_PFR1 read */
314 if ((read_cpuid_id() & 0x000f0000) == 0x000f0000)
315 return !!cpuid_feature_extract(CPUID_EXT_PFR1, 4);
319 static unsigned long __init setup_vectors_base(void)
321 unsigned long base = 0, reg = get_cr();
324 if (security_extensions_enabled()) {
325 if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM))
326 base = CONFIG_DRAM_BASE;
328 } else if (IS_ENABLED(CONFIG_REMAP_VECTORS_TO_RAM)) {
329 if (CONFIG_DRAM_BASE != 0)
330 pr_err("Security extensions not enabled, vectors cannot be remapped to RAM, vectors base will be 0x00000000\n");
335 #endif /* CONFIG_CPU_HIGH_VECTOR */
336 #endif /* CONFIG_CPU_CP15 */
338 void __init arm_mm_memblock_reserve(void)
340 #ifndef CONFIG_CPU_V7M
341 vectors_base = IS_ENABLED(CONFIG_CPU_CP15) ? setup_vectors_base() : 0;
343 * Register the exception vector page.
344 * some architectures which the DRAM is the exception vector to trap,
345 * alloc_page breaks with error, although it is not NULL, but "0."
347 memblock_reserve(vectors_base, 2 * PAGE_SIZE);
348 #else /* ifndef CONFIG_CPU_V7M */
350 * There is no dedicated vector page on V7-M. So nothing needs to be
355 * In any case, always ensure address 0 is never used as many things
356 * get very confused if 0 is returned as a legitimate address.
358 memblock_reserve(0, 1);
361 void __init adjust_lowmem_bounds(void)
364 adjust_lowmem_bounds_mpu();
365 end = memblock_end_of_DRAM();
366 high_memory = __va(end - 1) + 1;
367 memblock_set_current_limit(end);
371 * paging_init() sets up the page tables, initialises the zone memory
372 * maps, and sets up the zero page, bad page and bad page tables.
374 void __init paging_init(const struct machine_desc *mdesc)
378 early_trap_init((void *)vectors_base);
381 /* allocate the zero page. */
382 zero_page = (void *)memblock_alloc(PAGE_SIZE, PAGE_SIZE);
384 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
385 __func__, PAGE_SIZE, PAGE_SIZE);
389 empty_zero_page = virt_to_page(zero_page);
390 flush_dcache_page(empty_zero_page);
394 * We don't need to do anything here for nommu machines.
396 void setup_mm_for_reboot(void)
400 void flush_dcache_page(struct page *page)
402 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
404 EXPORT_SYMBOL(flush_dcache_page);
406 void flush_kernel_dcache_page(struct page *page)
408 __cpuc_flush_dcache_area(page_address(page), PAGE_SIZE);
410 EXPORT_SYMBOL(flush_kernel_dcache_page);
412 void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
413 unsigned long uaddr, void *dst, const void *src,
416 memcpy(dst, src, len);
417 if (vma->vm_flags & VM_EXEC)
418 __cpuc_coherent_user_range(uaddr, uaddr + len);
421 void __iomem *__arm_ioremap_pfn(unsigned long pfn, unsigned long offset,
422 size_t size, unsigned int mtype)
424 if (pfn >= (0x100000000ULL >> PAGE_SHIFT))
426 return (void __iomem *) (offset + (pfn << PAGE_SHIFT));
428 EXPORT_SYMBOL(__arm_ioremap_pfn);
430 void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
431 unsigned int mtype, void *caller)
433 return (void __iomem *)phys_addr;
436 void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t, unsigned int, void *);
438 void __iomem *ioremap(resource_size_t res_cookie, size_t size)
440 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE,
441 __builtin_return_address(0));
443 EXPORT_SYMBOL(ioremap);
445 void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
446 __alias(ioremap_cached);
448 void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size)
450 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
451 __builtin_return_address(0));
453 EXPORT_SYMBOL(ioremap_cache);
454 EXPORT_SYMBOL(ioremap_cached);
456 void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
458 return __arm_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
459 __builtin_return_address(0));
461 EXPORT_SYMBOL(ioremap_wc);
465 #include <asm/mach/map.h>
467 void __iomem *pci_remap_cfgspace(resource_size_t res_cookie, size_t size)
469 return arch_ioremap_caller(res_cookie, size, MT_UNCACHED,
470 __builtin_return_address(0));
472 EXPORT_SYMBOL_GPL(pci_remap_cfgspace);
475 void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
477 return (void *)phys_addr;
480 void __iounmap(volatile void __iomem *addr)
483 EXPORT_SYMBOL(__iounmap);
485 void (*arch_iounmap)(volatile void __iomem *);
487 void iounmap(volatile void __iomem *addr)
490 EXPORT_SYMBOL(iounmap);