3 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
5 * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
6 * and Cort Dougan (PReP) (cort@cs.nmt.edu)
7 * Copyright (C) 1996 Paul Mackerras
9 * Derived from "arch/i386/mm/init.c"
10 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
12 * Dave Engebretsen <engebret@us.ibm.com>
13 * Rework for PPC64 port.
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version
18 * 2 of the License, or (at your option) any later version.
24 #include <linux/signal.h>
25 #include <linux/sched.h>
26 #include <linux/kernel.h>
27 #include <linux/errno.h>
28 #include <linux/string.h>
29 #include <linux/types.h>
30 #include <linux/mman.h>
32 #include <linux/swap.h>
33 #include <linux/stddef.h>
34 #include <linux/vmalloc.h>
35 #include <linux/init.h>
36 #include <linux/delay.h>
37 #include <linux/highmem.h>
38 #include <linux/idr.h>
39 #include <linux/nodemask.h>
40 #include <linux/module.h>
41 #include <linux/poison.h>
42 #include <linux/memblock.h>
43 #include <linux/hugetlb.h>
44 #include <linux/slab.h>
45 #include <linux/of_fdt.h>
46 #include <linux/libfdt.h>
48 #include <asm/pgalloc.h>
53 #include <asm/mmu_context.h>
54 #include <asm/pgtable.h>
56 #include <asm/uaccess.h>
58 #include <asm/machdep.h>
61 #include <asm/processor.h>
62 #include <asm/mmzone.h>
63 #include <asm/cputable.h>
64 #include <asm/sections.h>
65 #include <asm/iommu.h>
70 #ifdef CONFIG_PPC_STD_MMU_64
71 #if H_PGTABLE_RANGE > USER_VSID_RANGE
72 #warning Limited user VSID range means pagetable space is wasted
75 #if (TASK_SIZE_USER64 < H_PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
76 #warning TASK_SIZE is smaller than it needs to be.
78 #endif /* CONFIG_PPC_STD_MMU_64 */
80 phys_addr_t memstart_addr = ~0;
81 EXPORT_SYMBOL_GPL(memstart_addr);
82 phys_addr_t kernstart_addr;
83 EXPORT_SYMBOL_GPL(kernstart_addr);
85 static void pgd_ctor(void *addr)
87 memset(addr, 0, PGD_TABLE_SIZE);
90 static void pud_ctor(void *addr)
92 memset(addr, 0, PUD_TABLE_SIZE);
95 static void pmd_ctor(void *addr)
97 memset(addr, 0, PMD_TABLE_SIZE);
100 struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE];
103 * Create a kmem_cache() for pagetables. This is not used for PTE
104 * pages - they're linked to struct page, come from the normal free
105 * pages pool and have a different entry size (see real_pte_t) to
106 * everything else. Caches created by this function are used for all
107 * the higher level pagetables, and for hugepage pagetables.
109 void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
112 unsigned long table_size = sizeof(void *) << shift;
113 unsigned long align = table_size;
115 /* When batching pgtable pointers for RCU freeing, we store
116 * the index size in the low bits. Table alignment must be
117 * big enough to fit it.
119 * Likewise, hugeapge pagetable pointers contain a (different)
120 * shift value in the low bits. All tables must be aligned so
121 * as to leave enough 0 bits in the address to contain it. */
122 unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1,
123 HUGEPD_SHIFT_MASK + 1);
124 struct kmem_cache *new;
126 /* It would be nice if this was a BUILD_BUG_ON(), but at the
127 * moment, gcc doesn't seem to recognize is_power_of_2 as a
128 * constant expression, so so much for that. */
129 BUG_ON(!is_power_of_2(minalign));
130 BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE));
132 if (PGT_CACHE(shift))
133 return; /* Already have a cache of this size */
135 align = max_t(unsigned long, align, minalign);
136 name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
137 new = kmem_cache_create(name, table_size, align, 0, ctor);
139 pgtable_cache[shift - 1] = new;
140 pr_debug("Allocated pgtable cache for order %d\n", shift);
144 void pgtable_cache_init(void)
146 pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
147 pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
149 * In all current configs, when the PUD index exists it's the
150 * same size as either the pgd or pmd index except with THP enabled
153 if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
154 pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
156 if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_CACHE_INDEX))
157 panic("Couldn't allocate pgtable caches");
158 if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
159 panic("Couldn't allocate pud pgtable caches");
162 #ifdef CONFIG_SPARSEMEM_VMEMMAP
164 * Given an address within the vmemmap, determine the pfn of the page that
165 * represents the start of the section it is within. Note that we have to
166 * do this by hand as the proffered address may not be correctly aligned.
167 * Subtraction of non-aligned pointers produces undefined results.
169 static unsigned long __meminit vmemmap_section_start(unsigned long page)
171 unsigned long offset = page - ((unsigned long)(vmemmap));
173 /* Return the pfn of the start of the section. */
174 return (offset / sizeof(struct page)) & PAGE_SECTION_MASK;
178 * Check if this vmemmap page is already initialised. If any section
179 * which overlaps this vmemmap page is initialised then this page is
180 * initialised already.
182 static int __meminit vmemmap_populated(unsigned long start, int page_size)
184 unsigned long end = start + page_size;
185 start = (unsigned long)(pfn_to_page(vmemmap_section_start(start)));
187 for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page)))
188 if (pfn_valid(page_to_pfn((struct page *)start)))
194 struct vmemmap_backing *vmemmap_list;
195 static struct vmemmap_backing *next;
197 static int num_freed;
199 static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
201 struct vmemmap_backing *vmem_back;
202 /* get from freed entries first */
211 /* allocate a page when required and hand out chunks */
213 next = vmemmap_alloc_block(PAGE_SIZE, node);
214 if (unlikely(!next)) {
218 num_left = PAGE_SIZE / sizeof(struct vmemmap_backing);
226 static __meminit void vmemmap_list_populate(unsigned long phys,
230 struct vmemmap_backing *vmem_back;
232 vmem_back = vmemmap_list_alloc(node);
233 if (unlikely(!vmem_back)) {
238 vmem_back->phys = phys;
239 vmem_back->virt_addr = start;
240 vmem_back->list = vmemmap_list;
242 vmemmap_list = vmem_back;
245 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
247 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
249 /* Align to the page size of the linear mapping. */
250 start = _ALIGN_DOWN(start, page_size);
252 pr_debug("vmemmap_populate %lx..%lx, node %d\n", start, end, node);
254 for (; start < end; start += page_size) {
258 if (vmemmap_populated(start, page_size))
261 p = vmemmap_alloc_block(page_size, node);
265 vmemmap_list_populate(__pa(p), start, node);
267 pr_debug(" * %016lx..%016lx allocated at %p\n",
268 start, start + page_size, p);
270 rc = vmemmap_create_mapping(start, page_size, __pa(p));
273 "vmemmap_populate: Unable to create vmemmap mapping: %d\n",
282 #ifdef CONFIG_MEMORY_HOTPLUG
283 static unsigned long vmemmap_list_free(unsigned long start)
285 struct vmemmap_backing *vmem_back, *vmem_back_prev;
287 vmem_back_prev = vmem_back = vmemmap_list;
289 /* look for it with prev pointer recorded */
290 for (; vmem_back; vmem_back = vmem_back->list) {
291 if (vmem_back->virt_addr == start)
293 vmem_back_prev = vmem_back;
296 if (unlikely(!vmem_back)) {
301 /* remove it from vmemmap_list */
302 if (vmem_back == vmemmap_list) /* remove head */
303 vmemmap_list = vmem_back->list;
305 vmem_back_prev->list = vmem_back->list;
307 /* next point to this freed entry */
308 vmem_back->list = next;
312 return vmem_back->phys;
315 void __ref vmemmap_free(unsigned long start, unsigned long end)
317 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
319 start = _ALIGN_DOWN(start, page_size);
321 pr_debug("vmemmap_free %lx...%lx\n", start, end);
323 for (; start < end; start += page_size) {
327 * the section has already be marked as invalid, so
328 * vmemmap_populated() true means some other sections still
329 * in this page, so skip it.
331 if (vmemmap_populated(start, page_size))
334 addr = vmemmap_list_free(start);
336 struct page *page = pfn_to_page(addr >> PAGE_SHIFT);
338 if (PageReserved(page)) {
339 /* allocated from bootmem */
340 if (page_size < PAGE_SIZE) {
342 * this shouldn't happen, but if it is
343 * the case, leave the memory there
347 unsigned int nr_pages =
348 1 << get_order(page_size);
350 free_reserved_page(page++);
353 free_pages((unsigned long)(__va(addr)),
354 get_order(page_size));
356 vmemmap_remove_mapping(start, page_size);
361 void register_page_bootmem_memmap(unsigned long section_nr,
362 struct page *start_page, unsigned long size)
367 * We do not have access to the sparsemem vmemmap, so we fallback to
368 * walking the list of sparsemem blocks which we already maintain for
369 * the sake of crashdump. In the long run, we might want to maintain
370 * a tree if performance of that linear walk becomes a problem.
372 * realmode_pfn_to_page functions can fail due to:
373 * 1) As real sparsemem blocks do not lay in RAM continously (they
374 * are in virtual address space which is not available in the real mode),
375 * the requested page struct can be split between blocks so get_page/put_page
377 * 2) When huge pages are used, the get_page/put_page API will fail
378 * in real mode as the linked addresses in the page struct are virtual
381 struct page *realmode_pfn_to_page(unsigned long pfn)
383 struct vmemmap_backing *vmem_back;
385 unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
386 unsigned long pg_va = (unsigned long) pfn_to_page(pfn);
388 for (vmem_back = vmemmap_list; vmem_back; vmem_back = vmem_back->list) {
389 if (pg_va < vmem_back->virt_addr)
392 /* After vmemmap_list entry free is possible, need check all */
393 if ((pg_va + sizeof(struct page)) <=
394 (vmem_back->virt_addr + page_size)) {
395 page = (struct page *) (vmem_back->phys + pg_va -
396 vmem_back->virt_addr);
401 /* Probably that page struct is split between real pages */
404 EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
406 #elif defined(CONFIG_FLATMEM)
408 struct page *realmode_pfn_to_page(unsigned long pfn)
410 struct page *page = pfn_to_page(pfn);
413 EXPORT_SYMBOL_GPL(realmode_pfn_to_page);
415 #endif /* CONFIG_SPARSEMEM_VMEMMAP/CONFIG_FLATMEM */
417 #ifdef CONFIG_PPC_STD_MMU_64
418 static bool disable_radix;
419 static int __init parse_disable_radix(char *p)
421 disable_radix = true;
424 early_param("disable_radix", parse_disable_radix);
427 * If we're running under a hypervisor, we currently can't do radix
428 * since we don't have the code to do the H_REGISTER_PROC_TBL hcall.
429 * We tell that we're running under a hypervisor by looking for the
430 * /chosen/ibm,architecture-vec-5 property.
432 static void early_check_vec5(void)
434 unsigned long root, chosen;
438 root = of_get_flat_dt_root();
439 chosen = of_get_flat_dt_subnode_by_name(root, "chosen");
440 if (chosen == -FDT_ERR_NOTFOUND)
442 vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size);
445 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
448 void __init mmu_early_init_devtree(void)
450 /* Disable radix mode based on kernel command line. */
451 /* We don't yet have the machinery to do radix as a guest. */
452 if (disable_radix || !(mfmsr() & MSR_HV))
453 cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX;
456 * Check /chosen/ibm,architecture-vec-5 if running as a guest.
457 * When running bare-metal, we can use radix if we like
458 * even though the ibm,architecture-vec-5 property created by
459 * skiboot doesn't have the necessary bits set.
461 if (early_radix_enabled() && !(mfmsr() & MSR_HV))
464 if (early_radix_enabled())
465 radix__early_init_devtree();
467 hash__early_init_devtree();
469 #endif /* CONFIG_PPC_STD_MMU_64 */