1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
7 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
8 * Copyright (C) 1996 Paul Mackerras
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 * Dave Engebretsen <engebret@us.ibm.com>
14 * Rework for PPC64 port.
19 #include <linux/signal.h>
20 #include <linux/sched.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/string.h>
24 #include <linux/types.h>
25 #include <linux/mman.h>
27 #include <linux/swap.h>
28 #include <linux/stddef.h>
29 #include <linux/vmalloc.h>
30 #include <linux/init.h>
31 #include <linux/delay.h>
32 #include <linux/highmem.h>
33 #include <linux/idr.h>
34 #include <linux/nodemask.h>
35 #include <linux/module.h>
36 #include <linux/poison.h>
37 #include <linux/memblock.h>
38 #include <linux/hugetlb.h>
39 #include <linux/slab.h>
40 #include <linux/of_fdt.h>
41 #include <linux/libfdt.h>
42 #include <linux/memremap.h>
44 #include <asm/pgalloc.h>
49 #include <asm/mmu_context.h>
50 #include <asm/pgtable.h>
52 #include <linux/uaccess.h>
54 #include <asm/machdep.h>
57 #include <asm/processor.h>
58 #include <asm/mmzone.h>
59 #include <asm/cputable.h>
60 #include <asm/sections.h>
61 #include <asm/iommu.h>
64 #include <mm/mmu_decl.h>
66 phys_addr_t memstart_addr = ~0;
67 EXPORT_SYMBOL_GPL(memstart_addr);
68 phys_addr_t kernstart_addr;
69 EXPORT_SYMBOL_GPL(kernstart_addr);
71 #ifdef CONFIG_SPARSEMEM_VMEMMAP
73 * Given an address within the vmemmap, determine the pfn of the page that
74 * represents the start of the section it is within. Note that we have to
75 * do this by hand as the proffered address may not be correctly aligned.
76 * Subtraction of non-aligned pointers produces undefined results.
78 static unsigned long __meminit vmemmap_section_start(unsigned long page)
80 unsigned long offset = page - ((unsigned long)(vmemmap));
82 /* Return the pfn of the start of the section. */
83 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
87 * Check if this vmemmap page is already initialised. If any section
88 * which overlaps this vmemmap page is initialised then this page is
89 * initialised already.
91 static int __meminit vmemmap_populated(unsigned long start, int page_size)
93 unsigned long end = start + page_size;
94 start = (unsigned long)(pfn_to_page(vmemmap_section_start(start)));
96 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
97 if (pfn_valid(page_to_pfn((struct page *)start)))
104 * vmemmap virtual address space management does not have a traditonal page
105 * table to track which virtual struct pages are backed by physical mapping.
106 * The virtual to physical mappings are tracked in a simple linked list
107 * format. 'vmemmap_list' maintains the entire vmemmap physical mapping at
108 * all times where as the 'next' list maintains the available
109 * vmemmap_backing structures which have been deleted from the
110 * 'vmemmap_global' list during system runtime (memory hotplug remove
111 * operation). The freed 'vmemmap_backing' structures are reused later when
112 * new requests come in without allocating fresh memory. This pointer also
113 * tracks the allocated 'vmemmap_backing' structures as we allocate one
114 * full page memory at a time when we dont have any.
116 struct vmemmap_backing *vmemmap_list;
117 static struct vmemmap_backing *next;
120 * The same pointer 'next' tracks individual chunks inside the allocated
121 * full page during the boot time and again tracks the freeed nodes during
122 * runtime. It is racy but it does not happen as they are separated by the
123 * boot process. Will create problem if some how we have memory hotplug
124 * operation during boot !!
127 static int num_freed;
129 static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
131 struct vmemmap_backing *vmem_back;
132 /* get from freed entries first */
141 /* allocate a page when required and hand out chunks */
143 next = vmemmap_alloc_block(PAGE_SIZE, node);
144 if (unlikely(!next)) {
148 num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
156 static __meminit void vmemmap_list_populate(unsigned long phys,
160 struct vmemmap_backing *vmem_back;
162 vmem_back = vmemmap_list_alloc(node);
163 if (unlikely(!vmem_back)) {
168 vmem_back->phys = phys;
169 vmem_back->virt_addr = start;
170 vmem_back->list = vmemmap_list;
172 vmemmap_list = vmem_back;
175 static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
176 unsigned long page_size)
178 unsigned long nr_pfn = page_size / sizeof(struct page);
179 unsigned long start_pfn = page_to_pfn((struct page *)start);
181 if ((start_pfn + nr_pfn - 1) > altmap->end_pfn)
184 if (start_pfn < altmap->base_pfn)
190 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
191 struct vmem_altmap *altmap)
193 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
195 /* Align to the page size of the linear mapping. */
196 start = _ALIGN_DOWN(start, page_size);
198 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
200 for (; start < end; start += page_size) {
204 if (vmemmap_populated(start, page_size))
208 * Allocate from the altmap first if we have one. This may
209 * fail due to alignment issues when using 16MB hugepages, so
210 * fall back to system memory if the altmap allocation fail.
212 if (altmap && !altmap_cross_boundary(altmap, start, page_size)) {
213 p = altmap_alloc_block_buf(page_size, altmap);
215 pr_debug("altmap block allocation failed, falling back to system memory");
218 p = vmemmap_alloc_block_buf(page_size, node);
222 vmemmap_list_populate(__pa(p), start, node);
224 pr_debug(" * %016lx..%016lx allocated at %p\n",
225 start, start + page_size, p);
227 rc = vmemmap_create_mapping(start, page_size, __pa(p));
229 pr_warn("%s: Unable to create vmemmap mapping: %d\n",
238 #ifdef CONFIG_MEMORY_HOTPLUG
239 static unsigned long vmemmap_list_free(unsigned long start)
241 struct vmemmap_backing *vmem_back, *vmem_back_prev;
243 vmem_back_prev = vmem_back = vmemmap_list;
245 /* look for it with prev pointer recorded */
246 for (; vmem_back; vmem_back = vmem_back->list) {
247 if (vmem_back->virt_addr == start)
249 vmem_back_prev = vmem_back;
252 if (unlikely(!vmem_back)) {
257 /* remove it from vmemmap_list */
258 if (vmem_back == vmemmap_list) /* remove head */
259 vmemmap_list = vmem_back->list;
261 vmem_back_prev->list = vmem_back->list;
263 /* next point to this freed entry */
264 vmem_back->list = next;
268 return vmem_back->phys;
271 void __ref vmemmap_free(unsigned long start, unsigned long end,
272 struct vmem_altmap *altmap)
274 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
275 unsigned long page_order = get_order(page_size);
276 unsigned long alt_start = ~0, alt_end = ~0;
277 unsigned long base_pfn;
279 start = _ALIGN_DOWN(start, page_size);
281 alt_start = altmap->base_pfn;
282 alt_end = altmap->base_pfn + altmap->reserve + altmap->free;
285 pr_debug("vmemmap_free %lx...%lx\n", start, end);
287 for (; start < end; start += page_size) {
288 unsigned long nr_pages, addr;
292 * the section has already be marked as invalid, so
293 * vmemmap_populated() true means some other sections still
294 * in this page, so skip it.
296 if (vmemmap_populated(start, page_size))
299 addr = vmemmap_list_free(start);
303 page = pfn_to_page(addr >> PAGE_SHIFT);
304 nr_pages = 1 << page_order;
305 base_pfn = PHYS_PFN(addr);
307 if (base_pfn >= alt_start && base_pfn < alt_end) {
308 vmem_altmap_free(altmap, nr_pages);
309 } else if (PageReserved(page)) {
310 /* allocated from bootmem */
311 if (page_size < PAGE_SIZE) {
313 * this shouldn't happen, but if it is
314 * the case, leave the memory there
319 free_reserved_page(page++);
322 free_pages((unsigned long)(__va(addr)), page_order);
325 vmemmap_remove_mapping(start, page_size);
329 void register_page_bootmem_memmap(unsigned long section_nr,
330 struct page *start_page, unsigned long size)
334 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
336 #ifdef CONFIG_PPC_BOOK3S_64
337 static bool disable_radix = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
339 static int __init parse_disable_radix(char *p)
345 else if (kstrtobool(p, &val))
352 early_param("disable_radix", parse_disable_radix);
355 * If we're running under a hypervisor, we need to check the contents of
356 * /chosen/ibm,architecture-vec-5 to see if the hypervisor is willing to do
357 * radix. If not, we clear the radix feature bit so we fall back to hash.
359 static void __init early_check_vec5(void)
361 unsigned long root, chosen;
366 root = of_get_flat_dt_root();
367 chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
368 if (chosen == -FDT_ERR_NOTFOUND) {
369 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
372 vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
374 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
377 if (size <= OV5_INDX(OV5_MMU_SUPPORT)) {
378 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
382 /* Check for supported configuration */
383 mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] &
384 OV5_FEAT(OV5_MMU_SUPPORT);
385 if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) {
386 /* Hypervisor only supports radix - check enabled && GTSE */
387 if (!early_radix_enabled()) {
388 pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
390 if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
391 OV5_FEAT(OV5_RADIX_GTSE))) {
392 pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n");
394 /* Do radix anyway - the hypervisor said we had to */
395 cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
396 } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
397 /* Hypervisor only supports hash - disable radix */
398 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
402 void __init mmu_early_init_devtree(void)
404 /* Disable radix mode based on kernel command line. */
406 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
409 * Check /chosen/ibm,architecture-vec-5 if running as a guest.
410 * When running bare-metal, we can use radix if we like
411 * even though the ibm,architecture-vec-5 property created by
412 * skiboot doesn't have the necessary bits set.
414 if (!(mfmsr() & MSR_HV))
417 if (early_radix_enabled()) {
418 radix__early_init_devtree();
420 * We have finalized the translation we are going to use by now.
421 * Radix mode is not limited by RMA / VRMA addressing.
422 * Hence don't limit memblock allocations.
424 ppc64_rma_size = ULONG_MAX;
425 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
427 hash__early_init_devtree();
429 #endif /* CONFIG_PPC_BOOK3S_64 */