2 * Based on arch/arm/mm/init.c
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/errno.h>
23 #include <linux/swap.h>
24 #include <linux/init.h>
25 #include <linux/bootmem.h>
26 #include <linux/cache.h>
27 #include <linux/mman.h>
28 #include <linux/nodemask.h>
29 #include <linux/initrd.h>
30 #include <linux/gfp.h>
31 #include <linux/memblock.h>
32 #include <linux/sort.h>
33 #include <linux/of_fdt.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/dma-contiguous.h>
36 #include <linux/efi.h>
37 #include <linux/swiotlb.h>
38 #include <linux/vmalloc.h>
41 #include <asm/fixmap.h>
42 #include <asm/kasan.h>
43 #include <asm/kernel-pgtable.h>
44 #include <asm/memory.h>
46 #include <asm/sections.h>
47 #include <asm/setup.h>
48 #include <asm/sizes.h>
50 #include <asm/alternative.h>
53 * We need to be able to catch inadvertent references to memstart_addr
54 * that occur (potentially in generic code) before arm64_memblock_init()
55 * executes, which assigns it its actual value. So use a default value
56 * that cannot be mistaken for a real physical address.
58 s64 memstart_addr __ro_after_init = -1;
59 phys_addr_t arm64_dma_phys_limit __ro_after_init;
61 #ifdef CONFIG_BLK_DEV_INITRD
62 static int __init early_initrd(char *p)
64 unsigned long start, size;
67 start = memparse(p, &endp);
69 size = memparse(endp + 1, NULL);
72 initrd_end = start + size;
76 early_param("initrd", early_initrd);
80 * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
81 * currently assumes that for memory starting above 4G, 32-bit devices will
84 static phys_addr_t __init max_zone_dma_phys(void)
86 phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
87 return min(offset + (1ULL << 32), memblock_end_of_DRAM());
92 static void __init zone_sizes_init(unsigned long min, unsigned long max)
94 unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
96 if (IS_ENABLED(CONFIG_ZONE_DMA))
97 max_zone_pfns[ZONE_DMA] = PFN_DOWN(max_zone_dma_phys());
98 max_zone_pfns[ZONE_NORMAL] = max;
100 free_area_init_nodes(max_zone_pfns);
105 static void __init zone_sizes_init(unsigned long min, unsigned long max)
107 struct memblock_region *reg;
108 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
109 unsigned long max_dma = min;
111 memset(zone_size, 0, sizeof(zone_size));
113 /* 4GB maximum for 32-bit only capable devices */
114 #ifdef CONFIG_ZONE_DMA
115 max_dma = PFN_DOWN(arm64_dma_phys_limit);
116 zone_size[ZONE_DMA] = max_dma - min;
118 zone_size[ZONE_NORMAL] = max - max_dma;
120 memcpy(zhole_size, zone_size, sizeof(zhole_size));
122 for_each_memblock(memory, reg) {
123 unsigned long start = memblock_region_memory_base_pfn(reg);
124 unsigned long end = memblock_region_memory_end_pfn(reg);
129 #ifdef CONFIG_ZONE_DMA
130 if (start < max_dma) {
131 unsigned long dma_end = min(end, max_dma);
132 zhole_size[ZONE_DMA] -= dma_end - start;
136 unsigned long normal_end = min(end, max);
137 unsigned long normal_start = max(start, max_dma);
138 zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
142 free_area_init_node(0, zone_size, min, zhole_size);
145 #endif /* CONFIG_NUMA */
147 #ifdef CONFIG_HAVE_ARCH_PFN_VALID
148 int pfn_valid(unsigned long pfn)
150 phys_addr_t addr = pfn << PAGE_SHIFT;
152 if ((addr >> PAGE_SHIFT) != pfn)
154 return memblock_is_map_memory(addr);
156 EXPORT_SYMBOL(pfn_valid);
159 #ifndef CONFIG_SPARSEMEM
160 static void __init arm64_memory_present(void)
164 static void __init arm64_memory_present(void)
166 struct memblock_region *reg;
168 for_each_memblock(memory, reg) {
169 int nid = memblock_get_region_node(reg);
171 memory_present(nid, memblock_region_memory_base_pfn(reg),
172 memblock_region_memory_end_pfn(reg));
177 static phys_addr_t memory_limit = (phys_addr_t)ULLONG_MAX;
180 * Limit the memory size that was specified via FDT.
182 static int __init early_mem(char *p)
187 memory_limit = memparse(p, &p) & PAGE_MASK;
188 pr_notice("Memory limited to %lldMB\n", memory_limit >> 20);
192 early_param("mem", early_mem);
194 void __init arm64_memblock_init(void)
196 const s64 linear_region_size = -(s64)PAGE_OFFSET;
199 * Ensure that the linear region takes up exactly half of the kernel
200 * virtual address space. This way, we can distinguish a linear address
201 * from a kernel/module/vmalloc address by testing a single bit.
203 BUILD_BUG_ON(linear_region_size != BIT(VA_BITS - 1));
206 * Select a suitable value for the base of physical memory.
208 memstart_addr = round_down(memblock_start_of_DRAM(),
209 ARM64_MEMSTART_ALIGN);
212 * Remove the memory that we will not be able to cover with the
213 * linear mapping. Take care not to clip the kernel which may be
216 memblock_remove(max_t(u64, memstart_addr + linear_region_size, __pa(_end)),
218 if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
219 /* ensure that memstart_addr remains sufficiently aligned */
220 memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
221 ARM64_MEMSTART_ALIGN);
222 memblock_remove(0, memstart_addr);
226 * Apply the memory limit if it was set. Since the kernel may be loaded
227 * high up in memory, add back the kernel region that must be accessible
228 * via the linear mapping.
230 if (memory_limit != (phys_addr_t)ULLONG_MAX) {
231 memblock_mem_limit_remove_map(memory_limit);
232 memblock_add(__pa(_text), (u64)(_end - _text));
235 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_start) {
237 * Add back the memory we just removed if it results in the
238 * initrd to become inaccessible via the linear mapping.
239 * Otherwise, this is a no-op
241 u64 base = initrd_start & PAGE_MASK;
242 u64 size = PAGE_ALIGN(initrd_end) - base;
245 * We can only add back the initrd memory if we don't end up
246 * with more memory than we can address via the linear mapping.
247 * It is up to the bootloader to position the kernel and the
248 * initrd reasonably close to each other (i.e., within 32 GB of
249 * each other) so that all granule/#levels combinations can
250 * always access both.
252 if (WARN(base < memblock_start_of_DRAM() ||
253 base + size > memblock_start_of_DRAM() +
255 "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
258 memblock_remove(base, size); /* clear MEMBLOCK_ flags */
259 memblock_add(base, size);
260 memblock_reserve(base, size);
264 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
265 extern u16 memstart_offset_seed;
266 u64 range = linear_region_size -
267 (memblock_end_of_DRAM() - memblock_start_of_DRAM());
270 * If the size of the linear region exceeds, by a sufficient
271 * margin, the size of the region that the available physical
272 * memory spans, randomize the linear region as well.
274 if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
275 range /= ARM64_MEMSTART_ALIGN;
276 memstart_addr -= ARM64_MEMSTART_ALIGN *
277 ((range * memstart_offset_seed) >> 16);
282 * Register the kernel text, kernel data, initrd, and initial
283 * pagetables with memblock.
285 memblock_reserve(__pa(_text), _end - _text);
286 #ifdef CONFIG_BLK_DEV_INITRD
288 memblock_reserve(initrd_start, initrd_end - initrd_start);
290 /* the generic initrd code expects virtual addresses */
291 initrd_start = __phys_to_virt(initrd_start);
292 initrd_end = __phys_to_virt(initrd_end);
296 early_init_fdt_scan_reserved_mem();
298 /* 4GB maximum for 32-bit only capable devices */
299 if (IS_ENABLED(CONFIG_ZONE_DMA))
300 arm64_dma_phys_limit = max_zone_dma_phys();
302 arm64_dma_phys_limit = PHYS_MASK + 1;
303 high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
304 dma_contiguous_reserve(arm64_dma_phys_limit);
306 memblock_allow_resize();
309 void __init bootmem_init(void)
311 unsigned long min, max;
313 min = PFN_UP(memblock_start_of_DRAM());
314 max = PFN_DOWN(memblock_end_of_DRAM());
316 early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
318 max_pfn = max_low_pfn = max;
322 * Sparsemem tries to allocate bootmem in memory_present(), so must be
323 * done after the fixed reservations.
325 arm64_memory_present();
328 zone_sizes_init(min, max);
333 #ifndef CONFIG_SPARSEMEM_VMEMMAP
334 static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
336 struct page *start_pg, *end_pg;
337 unsigned long pg, pgend;
340 * Convert start_pfn/end_pfn to a struct page pointer.
342 start_pg = pfn_to_page(start_pfn - 1) + 1;
343 end_pg = pfn_to_page(end_pfn - 1) + 1;
346 * Convert to physical addresses, and round start upwards and end
349 pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
350 pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
353 * If there are free pages between these, free the section of the
357 free_bootmem(pg, pgend - pg);
361 * The mem_map array can get very big. Free the unused area of the memory map.
363 static void __init free_unused_memmap(void)
365 unsigned long start, prev_end = 0;
366 struct memblock_region *reg;
368 for_each_memblock(memory, reg) {
369 start = __phys_to_pfn(reg->base);
371 #ifdef CONFIG_SPARSEMEM
373 * Take care not to free memmap entries that don't exist due
374 * to SPARSEMEM sections which aren't present.
376 start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
379 * If we had a previous bank, and there is a space between the
380 * current bank and the previous, free it.
382 if (prev_end && prev_end < start)
383 free_memmap(prev_end, start);
386 * Align up here since the VM subsystem insists that the
387 * memmap entries are valid from the bank end aligned to
388 * MAX_ORDER_NR_PAGES.
390 prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size),
394 #ifdef CONFIG_SPARSEMEM
395 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
396 free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
399 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
402 * mem_init() marks the free areas in the mem_map and tells us how much memory
403 * is free. This is done after various parts of the system have claimed their
404 * memory after the kernel image.
406 void __init mem_init(void)
408 if (swiotlb_force == SWIOTLB_FORCE ||
409 max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
412 swiotlb_force = SWIOTLB_NO_FORCE;
414 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
416 #ifndef CONFIG_SPARSEMEM_VMEMMAP
417 free_unused_memmap();
419 /* this will put all unused low memory onto the freelists */
422 mem_init_print_info(NULL);
424 #define MLK(b, t) b, t, ((t) - (b)) >> 10
425 #define MLM(b, t) b, t, ((t) - (b)) >> 20
426 #define MLG(b, t) b, t, ((t) - (b)) >> 30
427 #define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
429 pr_notice("Virtual kernel memory layout:\n");
431 pr_notice(" kasan : 0x%16lx - 0x%16lx (%6ld GB)\n",
432 MLG(KASAN_SHADOW_START, KASAN_SHADOW_END));
434 pr_notice(" modules : 0x%16lx - 0x%16lx (%6ld MB)\n",
435 MLM(MODULES_VADDR, MODULES_END));
436 pr_notice(" vmalloc : 0x%16lx - 0x%16lx (%6ld GB)\n",
437 MLG(VMALLOC_START, VMALLOC_END));
438 pr_notice(" .text : 0x%p" " - 0x%p" " (%6ld KB)\n",
439 MLK_ROUNDUP(_text, _etext));
440 pr_notice(" .rodata : 0x%p" " - 0x%p" " (%6ld KB)\n",
441 MLK_ROUNDUP(__start_rodata, __init_begin));
442 pr_notice(" .init : 0x%p" " - 0x%p" " (%6ld KB)\n",
443 MLK_ROUNDUP(__init_begin, __init_end));
444 pr_notice(" .data : 0x%p" " - 0x%p" " (%6ld KB)\n",
445 MLK_ROUNDUP(_sdata, _edata));
446 pr_notice(" .bss : 0x%p" " - 0x%p" " (%6ld KB)\n",
447 MLK_ROUNDUP(__bss_start, __bss_stop));
448 pr_notice(" fixed : 0x%16lx - 0x%16lx (%6ld KB)\n",
449 MLK(FIXADDR_START, FIXADDR_TOP));
450 pr_notice(" PCI I/O : 0x%16lx - 0x%16lx (%6ld MB)\n",
451 MLM(PCI_IO_START, PCI_IO_END));
452 #ifdef CONFIG_SPARSEMEM_VMEMMAP
453 pr_notice(" vmemmap : 0x%16lx - 0x%16lx (%6ld GB maximum)\n",
454 MLG(VMEMMAP_START, VMEMMAP_START + VMEMMAP_SIZE));
455 pr_notice(" 0x%16lx - 0x%16lx (%6ld MB actual)\n",
456 MLM((unsigned long)phys_to_page(memblock_start_of_DRAM()),
457 (unsigned long)virt_to_page(high_memory)));
459 pr_notice(" memory : 0x%16lx - 0x%16lx (%6ld MB)\n",
460 MLM(__phys_to_virt(memblock_start_of_DRAM()),
461 (unsigned long)high_memory));
468 * Check boundaries twice: Some fundamental inconsistencies can be
469 * detected at build time already.
472 BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64);
475 #ifdef CONFIG_SPARSEMEM_VMEMMAP
477 * Make sure we chose the upper bound of sizeof(struct page)
478 * correctly when sizing the VMEMMAP array.
480 BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));
483 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
484 extern int sysctl_overcommit_memory;
486 * On a machine this small we won't get anywhere without
487 * overcommit, so turn it on by default.
489 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
493 void free_initmem(void)
495 free_reserved_area(__va(__pa(__init_begin)), __va(__pa(__init_end)),
498 * Unmap the __init region but leave the VM area in place. This
499 * prevents the region from being reused for kernel modules, which
500 * is not supported by kallsyms.
502 unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
505 #ifdef CONFIG_BLK_DEV_INITRD
507 static int keep_initrd __initdata;
509 void __init free_initrd_mem(unsigned long start, unsigned long end)
512 free_reserved_area((void *)start, (void *)end, 0, "initrd");
515 static int __init keepinitrd_setup(char *__unused)
521 __setup("keepinitrd", keepinitrd_setup);
525 * Dump out memory limit information on panic.
527 static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p)
529 if (memory_limit != (phys_addr_t)ULLONG_MAX) {
530 pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
532 pr_emerg("Memory Limit: none\n");
537 static struct notifier_block mem_limit_notifier = {
538 .notifier_call = dump_mem_limit,
541 static int __init register_mem_limit_dumper(void)
543 atomic_notifier_chain_register(&panic_notifier_list,
544 &mem_limit_notifier);
547 __initcall(register_mem_limit_dumper);