1 // SPDX-License-Identifier: GPL-2.0-or-later
4 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
7 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
8 * Copyright (C) 1996 Paul Mackerras
10 * Derived from "arch/i386/mm/init.c"
11 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
13 * Dave Engebretsen <engebret@us.ibm.com>
14 * Rework for PPC64 port.
19 #include <linux/signal.h>
20 #include <linux/sched.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/string.h>
24 #include <linux/types.h>
25 #include <linux/mman.h>
27 #include <linux/swap.h>
28 #include <linux/stddef.h>
29 #include <linux/vmalloc.h>
30 #include <linux/init.h>
31 #include <linux/delay.h>
32 #include <linux/highmem.h>
33 #include <linux/idr.h>
34 #include <linux/nodemask.h>
35 #include <linux/module.h>
36 #include <linux/poison.h>
37 #include <linux/memblock.h>
38 #include <linux/hugetlb.h>
39 #include <linux/slab.h>
40 #include <linux/of_fdt.h>
41 #include <linux/libfdt.h>
42 #include <linux/memremap.h>
44 #include <asm/pgalloc.h>
49 #include <asm/mmu_context.h>
51 #include <linux/uaccess.h>
53 #include <asm/machdep.h>
56 #include <asm/processor.h>
57 #include <asm/mmzone.h>
58 #include <asm/cputable.h>
59 #include <asm/sections.h>
60 #include <asm/iommu.h>
62 #include <asm/hugetlb.h>
64 #include <mm/mmu_decl.h>
66 #ifdef CONFIG_SPARSEMEM_VMEMMAP
68 * Given an address within the vmemmap, determine the page that
69 * represents the start of the subsection it is within. Note that we have to
70 * do this by hand as the proffered address may not be correctly aligned.
71 * Subtraction of non-aligned pointers produces undefined results.
73 static struct page * __meminit vmemmap_subsection_start(unsigned long vmemmap_addr)
75 unsigned long start_pfn;
76 unsigned long offset = vmemmap_addr - ((unsigned long)(vmemmap));
78 /* Return the pfn of the start of the section. */
79 start_pfn = (offset / sizeof(struct page)) & PAGE_SUBSECTION_MASK;
80 return pfn_to_page(start_pfn);
84 * Since memory is added in sub-section chunks, before creating a new vmemmap
85 * mapping, the kernel should check whether there is an existing memmap mapping
86 * covering the new subsection added. This is needed because kernel can map
87 * vmemmap area using 16MB pages which will cover a memory range of 16G. Such
88 * a range covers multiple subsections (2M)
90 * If any subsection in the 16G range mapped by vmemmap is valid we consider the
91 * vmemmap populated (There is a page table entry already present). We can't do
92 * a page table lookup here because with the hash translation we don't keep
93 * vmemmap details in linux page table.
95 static int __meminit vmemmap_populated(unsigned long vmemmap_addr, int vmemmap_map_size)
98 unsigned long vmemmap_end = vmemmap_addr + vmemmap_map_size;
99 start = vmemmap_subsection_start(vmemmap_addr);
101 for (; (unsigned long)start < vmemmap_end; start += PAGES_PER_SUBSECTION)
103 * pfn valid check here is intended to really check
104 * whether we have any subsection already initialized
107 if (pfn_valid(page_to_pfn(start)))
114 * vmemmap virtual address space management does not have a traditional page
115 * table to track which virtual struct pages are backed by physical mapping.
116 * The virtual to physical mappings are tracked in a simple linked list
117 * format. 'vmemmap_list' maintains the entire vmemmap physical mapping at
118 * all times where as the 'next' list maintains the available
119 * vmemmap_backing structures which have been deleted from the
120 * 'vmemmap_global' list during system runtime (memory hotplug remove
121 * operation). The freed 'vmemmap_backing' structures are reused later when
122 * new requests come in without allocating fresh memory. This pointer also
123 * tracks the allocated 'vmemmap_backing' structures as we allocate one
124 * full page memory at a time when we dont have any.
126 struct vmemmap_backing *vmemmap_list;
127 static struct vmemmap_backing *next;
130 * The same pointer 'next' tracks individual chunks inside the allocated
131 * full page during the boot time and again tracks the freed nodes during
132 * runtime. It is racy but it does not happen as they are separated by the
133 * boot process. Will create problem if some how we have memory hotplug
134 * operation during boot !!
137 static int num_freed;
139 static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
141 struct vmemmap_backing *vmem_back;
142 /* get from freed entries first */
151 /* allocate a page when required and hand out chunks */
153 next = vmemmap_alloc_block(PAGE_SIZE, node);
154 if (unlikely(!next)) {
158 num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
166 static __meminit int vmemmap_list_populate(unsigned long phys,
170 struct vmemmap_backing *vmem_back;
172 vmem_back = vmemmap_list_alloc(node);
173 if (unlikely(!vmem_back)) {
174 pr_debug("vmemap list allocation failed\n");
178 vmem_back->phys = phys;
179 vmem_back->virt_addr = start;
180 vmem_back->list = vmemmap_list;
182 vmemmap_list = vmem_back;
186 static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
187 unsigned long page_size)
189 unsigned long nr_pfn = page_size / sizeof(struct page);
190 unsigned long start_pfn = page_to_pfn((struct page *)start);
192 if ((start_pfn + nr_pfn - 1) > altmap->end_pfn)
195 if (start_pfn < altmap->base_pfn)
201 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
202 struct vmem_altmap *altmap)
205 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
207 /* Align to the page size of the linear mapping. */
208 start = ALIGN_DOWN(start, page_size);
210 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
212 for (; start < end; start += page_size) {
217 * This vmemmap range is backing different subsections. If any
218 * of that subsection is marked valid, that means we already
219 * have initialized a page table covering this range and hence
220 * the vmemmap range is populated.
222 if (vmemmap_populated(start, page_size))
226 * Allocate from the altmap first if we have one. This may
227 * fail due to alignment issues when using 16MB hugepages, so
228 * fall back to system memory if the altmap allocation fail.
230 if (altmap && !altmap_cross_boundary(altmap, start, page_size)) {
231 p = vmemmap_alloc_block_buf(page_size, node, altmap);
233 pr_debug("altmap block allocation failed, falling back to system memory");
238 p = vmemmap_alloc_block_buf(page_size, node, NULL);
239 altmap_alloc = false;
244 if (vmemmap_list_populate(__pa(p), start, node)) {
246 * If we don't populate vmemap list, we don't have
247 * the ability to free the allocated vmemmap
248 * pages in section_deactivate. Hence free them
251 int nr_pfns = page_size >> PAGE_SHIFT;
252 unsigned long page_order = get_order(page_size);
255 vmem_altmap_free(altmap, nr_pfns);
257 free_pages((unsigned long)p, page_order);
261 pr_debug(" * %016lx..%016lx allocated at %p\n",
262 start, start + page_size, p);
264 rc = vmemmap_create_mapping(start, page_size, __pa(p));
266 pr_warn("%s: Unable to create vmemmap mapping: %d\n",
275 #ifdef CONFIG_MEMORY_HOTPLUG
276 static unsigned long vmemmap_list_free(unsigned long start)
278 struct vmemmap_backing *vmem_back, *vmem_back_prev;
280 vmem_back_prev = vmem_back = vmemmap_list;
282 /* look for it with prev pointer recorded */
283 for (; vmem_back; vmem_back = vmem_back->list) {
284 if (vmem_back->virt_addr == start)
286 vmem_back_prev = vmem_back;
289 if (unlikely(!vmem_back))
292 /* remove it from vmemmap_list */
293 if (vmem_back == vmemmap_list) /* remove head */
294 vmemmap_list = vmem_back->list;
296 vmem_back_prev->list = vmem_back->list;
298 /* next point to this freed entry */
299 vmem_back->list = next;
303 return vmem_back->phys;
306 void __ref vmemmap_free(unsigned long start, unsigned long end,
307 struct vmem_altmap *altmap)
309 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
310 unsigned long page_order = get_order(page_size);
311 unsigned long alt_start = ~0, alt_end = ~0;
312 unsigned long base_pfn;
314 start = ALIGN_DOWN(start, page_size);
316 alt_start = altmap->base_pfn;
317 alt_end = altmap->base_pfn + altmap->reserve + altmap->free;
320 pr_debug("vmemmap_free %lx...%lx\n", start, end);
322 for (; start < end; start += page_size) {
323 unsigned long nr_pages, addr;
327 * We have already marked the subsection we are trying to remove
328 * invalid. So if we want to remove the vmemmap range, we
329 * need to make sure there is no subsection marked valid
332 if (vmemmap_populated(start, page_size))
335 addr = vmemmap_list_free(start);
339 page = pfn_to_page(addr >> PAGE_SHIFT);
340 nr_pages = 1 << page_order;
341 base_pfn = PHYS_PFN(addr);
343 if (base_pfn >= alt_start && base_pfn < alt_end) {
344 vmem_altmap_free(altmap, nr_pages);
345 } else if (PageReserved(page)) {
346 /* allocated from bootmem */
347 if (page_size < PAGE_SIZE) {
349 * this shouldn't happen, but if it is
350 * the case, leave the memory there
355 free_reserved_page(page++);
358 free_pages((unsigned long)(__va(addr)), page_order);
361 vmemmap_remove_mapping(start, page_size);
365 void register_page_bootmem_memmap(unsigned long section_nr,
366 struct page *start_page, unsigned long size)
370 #endif /* CONFIG_SPARSEMEM_VMEMMAP */
372 #ifdef CONFIG_PPC_BOOK3S_64
373 unsigned int mmu_lpid_bits;
374 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
375 EXPORT_SYMBOL_GPL(mmu_lpid_bits);
377 unsigned int mmu_pid_bits;
379 static bool disable_radix = !IS_ENABLED(CONFIG_PPC_RADIX_MMU_DEFAULT);
381 static int __init parse_disable_radix(char *p)
387 else if (kstrtobool(p, &val))
394 early_param("disable_radix", parse_disable_radix);
397 * If we're running under a hypervisor, we need to check the contents of
398 * /chosen/ibm,architecture-vec-5 to see if the hypervisor is willing to do
399 * radix. If not, we clear the radix feature bit so we fall back to hash.
401 static void __init early_check_vec5(void)
403 unsigned long root, chosen;
408 root = of_get_flat_dt_root();
409 chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
410 if (chosen == -FDT_ERR_NOTFOUND) {
411 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
414 vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
416 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
419 if (size <= OV5_INDX(OV5_MMU_SUPPORT)) {
420 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
424 /* Check for supported configuration */
425 mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] &
426 OV5_FEAT(OV5_MMU_SUPPORT);
427 if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) {
428 /* Hypervisor only supports radix - check enabled && GTSE */
429 if (!early_radix_enabled()) {
430 pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
432 if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] &
433 OV5_FEAT(OV5_RADIX_GTSE))) {
434 cur_cpu_spec->mmu_features &= ~MMU_FTR_GTSE;
436 cur_cpu_spec->mmu_features |= MMU_FTR_GTSE;
437 /* Do radix anyway - the hypervisor said we had to */
438 cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX;
439 } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) {
440 /* Hypervisor only supports hash - disable radix */
441 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
442 cur_cpu_spec->mmu_features &= ~MMU_FTR_GTSE;
446 static int __init dt_scan_mmu_pid_width(unsigned long node,
447 const char *uname, int depth,
452 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
454 /* We are scanning "cpu" nodes only */
455 if (type == NULL || strcmp(type, "cpu") != 0)
458 /* Find MMU LPID, PID register size */
459 prop = of_get_flat_dt_prop(node, "ibm,mmu-lpid-bits", &size);
460 if (prop && size == 4)
461 mmu_lpid_bits = be32_to_cpup(prop);
463 prop = of_get_flat_dt_prop(node, "ibm,mmu-pid-bits", &size);
464 if (prop && size == 4)
465 mmu_pid_bits = be32_to_cpup(prop);
467 if (!mmu_pid_bits && !mmu_lpid_bits)
473 void __init mmu_early_init_devtree(void)
475 bool hvmode = !!(mfmsr() & MSR_HV);
477 /* Disable radix mode based on kernel command line. */
479 if (IS_ENABLED(CONFIG_PPC_64S_HASH_MMU))
480 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
482 pr_warn("WARNING: Ignoring cmdline option disable_radix\n");
485 of_scan_flat_dt(dt_scan_mmu_pid_width, NULL);
486 if (hvmode && !mmu_lpid_bits) {
487 if (early_cpu_has_feature(CPU_FTR_ARCH_207S))
488 mmu_lpid_bits = 12; /* POWER8-10 */
490 mmu_lpid_bits = 10; /* POWER7 */
493 if (early_cpu_has_feature(CPU_FTR_ARCH_300))
494 mmu_pid_bits = 20; /* POWER9-10 */
498 * Check /chosen/ibm,architecture-vec-5 if running as a guest.
499 * When running bare-metal, we can use radix if we like
500 * even though the ibm,architecture-vec-5 property created by
501 * skiboot doesn't have the necessary bits set.
506 if (early_radix_enabled()) {
507 radix__early_init_devtree();
510 * We have finalized the translation we are going to use by now.
511 * Radix mode is not limited by RMA / VRMA addressing.
512 * Hence don't limit memblock allocations.
514 ppc64_rma_size = ULONG_MAX;
515 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
517 hash__early_init_devtree();
519 if (IS_ENABLED(CONFIG_HUGETLB_PAGE_SIZE_VARIABLE))
520 hugetlbpage_init_defaultsize();
522 if (!(cur_cpu_spec->mmu_features & MMU_FTR_HPTE_TABLE) &&
523 !(cur_cpu_spec->mmu_features & MMU_FTR_TYPE_RADIX))
524 panic("kernel does not support any MMU type offered by platform");
526 #endif /* CONFIG_PPC_BOOK3S_64 */