1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
7 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
8 * Copyright (C) 1996 Paul Mackerras
9 * PPC44x/36-bit changes by Matt Porter (mporter@mvista.com)
11 * Derived from "arch/i386/mm/init.c"
12 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
15 #include <linux/memblock.h>
16 #include <linux/highmem.h>
17 #include <linux/suspend.h>
18 #include <linux/dma-direct.h>
20 #include <asm/swiotlb.h>
21 #include <asm/machdep.h>
23 #include <asm/kasan.h>
25 #include <asm/mmzone.h>
26 #include <asm/ftrace.h>
27 #include <asm/code-patching.h>
28 #include <asm/setup.h>
29 #include <asm/fixmap.h>
31 #include <mm/mmu_decl.h>
33 unsigned long long memory_limit;
35 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
36 EXPORT_SYMBOL(empty_zero_page);
38 pgprot_t __phys_mem_access_prot(unsigned long pfn, unsigned long size,
41 if (ppc_md.phys_mem_access_prot)
42 return ppc_md.phys_mem_access_prot(pfn, size, vma_prot);
44 if (!page_is_ram(pfn))
45 vma_prot = pgprot_noncached(vma_prot);
49 EXPORT_SYMBOL(__phys_mem_access_prot);
51 #ifdef CONFIG_MEMORY_HOTPLUG
52 static DEFINE_MUTEX(linear_mapping_mutex);
55 int memory_add_physaddr_to_nid(u64 start)
57 return hot_add_scn_to_nid(start);
59 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
62 int __weak create_section_mapping(unsigned long start, unsigned long end,
63 int nid, pgprot_t prot)
68 int __weak remove_section_mapping(unsigned long start, unsigned long end)
73 int __ref arch_create_linear_mapping(int nid, u64 start, u64 size,
74 struct mhp_params *params)
78 start = (unsigned long)__va(start);
79 mutex_lock(&linear_mapping_mutex);
80 rc = create_section_mapping(start, start + size, nid,
82 mutex_unlock(&linear_mapping_mutex);
84 pr_warn("Unable to create linear mapping for 0x%llx..0x%llx: %d\n",
85 start, start + size, rc);
91 void __ref arch_remove_linear_mapping(u64 start, u64 size)
95 /* Remove htab bolted mappings for this section of memory */
96 start = (unsigned long)__va(start);
98 mutex_lock(&linear_mapping_mutex);
99 ret = remove_section_mapping(start, start + size);
100 mutex_unlock(&linear_mapping_mutex);
102 pr_warn("Unable to remove linear mapping for 0x%llx..0x%llx: %d\n",
103 start, start + size, ret);
105 /* Ensure all vmalloc mappings are flushed in case they also
106 * hit that section of memory
112 * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
115 static void update_end_of_memory_vars(u64 start, u64 size)
117 unsigned long end_pfn = PFN_UP(start + size);
119 if (end_pfn > max_pfn) {
121 max_low_pfn = end_pfn;
122 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
126 int __ref add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
127 struct mhp_params *params)
131 ret = __add_pages(nid, start_pfn, nr_pages, params);
135 /* update max_pfn, max_low_pfn and high_memory */
136 update_end_of_memory_vars(start_pfn << PAGE_SHIFT,
137 nr_pages << PAGE_SHIFT);
142 int __ref arch_add_memory(int nid, u64 start, u64 size,
143 struct mhp_params *params)
145 unsigned long start_pfn = start >> PAGE_SHIFT;
146 unsigned long nr_pages = size >> PAGE_SHIFT;
149 rc = arch_create_linear_mapping(nid, start, size, params);
152 rc = add_pages(nid, start_pfn, nr_pages, params);
154 arch_remove_linear_mapping(start, size);
158 void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
160 unsigned long start_pfn = start >> PAGE_SHIFT;
161 unsigned long nr_pages = size >> PAGE_SHIFT;
163 __remove_pages(start_pfn, nr_pages, altmap);
164 arch_remove_linear_mapping(start, size);
169 void __init mem_topology_setup(void)
171 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
172 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
173 #ifdef CONFIG_HIGHMEM
174 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
177 /* Place all memblock_regions in the same node and merge contiguous
180 memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
183 void __init initmem_init(void)
188 /* mark pages that don't exist as nosave */
189 static int __init mark_nonram_nosave(void)
191 unsigned long spfn, epfn, prev = 0;
194 for_each_mem_pfn_range(i, MAX_NUMNODES, &spfn, &epfn, NULL) {
195 if (prev && prev < spfn)
196 register_nosave_region(prev, spfn);
203 #else /* CONFIG_NUMA */
204 static int __init mark_nonram_nosave(void)
213 * We setup ZONE_DMA to be 31-bits on all platforms and ZONE_NORMAL to be
214 * everything else. GFP_DMA32 page allocations automatically fall back to
217 * By using 31-bit unconditionally, we can exploit zone_dma_bits to inform the
218 * generic DMA mapping code. 32-bit only devices (if not handled by an IOMMU
219 * anyway) will take a first dip into ZONE_NORMAL and get otherwise served by
222 static unsigned long max_zone_pfns[MAX_NR_ZONES];
225 * paging_init() sets up the page tables - in fact we've already done this.
227 void __init paging_init(void)
229 unsigned long long total_ram = memblock_phys_mem_size();
230 phys_addr_t top_of_ram = memblock_end_of_DRAM();
232 #ifdef CONFIG_HIGHMEM
233 unsigned long v = __fix_to_virt(FIX_KMAP_END);
234 unsigned long end = __fix_to_virt(FIX_KMAP_BEGIN);
236 for (; v < end; v += PAGE_SIZE)
237 map_kernel_page(v, 0, __pgprot(0)); /* XXX gross */
239 map_kernel_page(PKMAP_BASE, 0, __pgprot(0)); /* XXX gross */
240 pkmap_page_table = virt_to_kpte(PKMAP_BASE);
241 #endif /* CONFIG_HIGHMEM */
243 printk(KERN_DEBUG "Top of RAM: 0x%llx, Total RAM: 0x%llx\n",
244 (unsigned long long)top_of_ram, total_ram);
245 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
246 (long int)((top_of_ram - total_ram) >> 20));
249 * Allow 30-bit DMA for very limited Broadcom wifi chips on many
252 if (IS_ENABLED(CONFIG_PPC32))
257 #ifdef CONFIG_ZONE_DMA
258 max_zone_pfns[ZONE_DMA] = min(max_low_pfn,
259 1UL << (zone_dma_bits - PAGE_SHIFT));
261 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
262 #ifdef CONFIG_HIGHMEM
263 max_zone_pfns[ZONE_HIGHMEM] = max_pfn;
266 free_area_init(max_zone_pfns);
268 mark_nonram_nosave();
271 void __init mem_init(void)
274 * book3s is limited to 16 page sizes due to encoding this in
275 * a 4-bit field for slices.
277 BUILD_BUG_ON(MMU_PAGE_COUNT > 16);
279 #ifdef CONFIG_SWIOTLB
281 * Some platforms (e.g. 85xx) limit DMA-able memory way below
282 * 4G. We force memblock to bottom-up mode to ensure that the
283 * memory allocated in swiotlb_init() is DMA-able.
284 * As it's the last memblock allocation, no need to reset it
287 memblock_set_bottom_up(true);
288 swiotlb_init(ppc_swiotlb_enable, ppc_swiotlb_flags);
291 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
297 #ifdef CONFIG_HIGHMEM
299 unsigned long pfn, highmem_mapnr;
301 highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
302 for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
303 phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
304 struct page *page = pfn_to_page(pfn);
305 if (memblock_is_memory(paddr) && !memblock_is_reserved(paddr))
306 free_highmem_page(page);
309 #endif /* CONFIG_HIGHMEM */
311 #if defined(CONFIG_PPC_E500) && !defined(CONFIG_SMP)
313 * If smp is enabled, next_tlbcam_idx is initialized in the cpu up
314 * functions.... do it here for the non-smp case.
316 per_cpu(next_tlbcam_idx, smp_processor_id()) =
317 (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
321 pr_info("Kernel virtual memory layout:\n");
323 pr_info(" * 0x%08lx..0x%08lx : kasan shadow mem\n",
324 KASAN_SHADOW_START, KASAN_SHADOW_END);
326 pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);
327 #ifdef CONFIG_HIGHMEM
328 pr_info(" * 0x%08lx..0x%08lx : highmem PTEs\n",
329 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP));
330 #endif /* CONFIG_HIGHMEM */
331 if (ioremap_bot != IOREMAP_TOP)
332 pr_info(" * 0x%08lx..0x%08lx : early ioremap\n",
333 ioremap_bot, IOREMAP_TOP);
334 pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
335 VMALLOC_START, VMALLOC_END);
337 pr_info(" * 0x%08lx..0x%08lx : modules\n",
338 MODULES_VADDR, MODULES_END);
340 #endif /* CONFIG_PPC32 */
343 void free_initmem(void)
345 ppc_md.progress = ppc_printk_progress;
347 free_initmem_default(POISON_FREE_INITMEM);
348 ftrace_free_init_tramp();
352 * System memory should not be in /proc/iomem but various tools expect it
355 static int __init add_system_ram_resources(void)
357 phys_addr_t start, end;
360 for_each_mem_range(i, &start, &end) {
361 struct resource *res;
363 res = kzalloc(sizeof(struct resource), GFP_KERNEL);
367 res->name = "System RAM";
370 * In memblock, end points to the first byte after
371 * the range while in resourses, end points to the
372 * last byte in the range.
375 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
376 WARN_ON(request_resource(&iomem_resource, res) < 0);
382 subsys_initcall(add_system_ram_resources);
384 #ifdef CONFIG_STRICT_DEVMEM
386 * devmem_is_allowed(): check to see if /dev/mem access to a certain address
387 * is valid. The argument is a physical page number.
389 * Access has to be given to non-kernel-ram areas as well, these contain the
390 * PCI mmio resources as well as potential bios/acpi data regions.
392 int devmem_is_allowed(unsigned long pfn)
394 if (page_is_rtas_user_buf(pfn))
396 if (iomem_is_exclusive(PFN_PHYS(pfn)))
398 if (!page_is_ram(pfn))
402 #endif /* CONFIG_STRICT_DEVMEM */
405 * This is defined in kernel/resource.c but only powerpc needs to export it, for
406 * the EHEA driver. Drop this when drivers/net/ethernet/ibm/ehea is removed.
408 EXPORT_SYMBOL_GPL(walk_system_ram_range);