1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2006
6 #include <linux/memory_hotplug.h>
7 #include <linux/memblock.h>
10 #include <linux/init.h>
11 #include <linux/list.h>
12 #include <linux/hugetlb.h>
13 #include <linux/slab.h>
14 #include <linux/sort.h>
15 #include <asm/page-states.h>
16 #include <asm/cacheflush.h>
17 #include <asm/nospec-branch.h>
18 #include <asm/ctlreg.h>
19 #include <asm/pgalloc.h>
20 #include <asm/setup.h>
21 #include <asm/tlbflush.h>
22 #include <asm/sections.h>
23 #include <asm/set_memory.h>
25 static DEFINE_MUTEX(vmem_mutex);
27 static void __ref *vmem_alloc_pages(unsigned int order)
29 unsigned long size = PAGE_SIZE << order;
31 if (slab_is_available())
32 return (void *)__get_free_pages(GFP_KERNEL, order);
33 return memblock_alloc(size, size);
36 static void vmem_free_pages(unsigned long addr, int order)
38 /* We don't expect boot memory to be removed ever. */
39 if (!slab_is_available() ||
40 WARN_ON_ONCE(PageReserved(virt_to_page((void *)addr))))
42 free_pages(addr, order);
45 void *vmem_crst_alloc(unsigned long val)
49 table = vmem_alloc_pages(CRST_ALLOC_ORDER);
52 crst_table_init(table, val);
53 __arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
57 pte_t __ref *vmem_pte_alloc(void)
59 unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
62 if (slab_is_available())
63 pte = (pte_t *) page_table_alloc(&init_mm);
65 pte = (pte_t *) memblock_alloc(size, size);
68 memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
69 __arch_set_page_dat(pte, 1);
73 static void vmem_pte_free(unsigned long *table)
75 /* We don't expect boot memory to be removed ever. */
76 if (!slab_is_available() ||
77 WARN_ON_ONCE(PageReserved(virt_to_page(table))))
79 page_table_free(&init_mm, table);
82 #define PAGE_UNUSED 0xFD
85 * The unused vmemmap range, which was not yet memset(PAGE_UNUSED) ranges
86 * from unused_sub_pmd_start to next PMD_SIZE boundary.
88 static unsigned long unused_sub_pmd_start;
90 static void vmemmap_flush_unused_sub_pmd(void)
92 if (!unused_sub_pmd_start)
94 memset((void *)unused_sub_pmd_start, PAGE_UNUSED,
95 ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start);
96 unused_sub_pmd_start = 0;
99 static void vmemmap_mark_sub_pmd_used(unsigned long start, unsigned long end)
102 * As we expect to add in the same granularity as we remove, it's
103 * sufficient to mark only some piece used to block the memmap page from
104 * getting removed (just in case the memmap never gets initialized,
105 * e.g., because the memory block never gets onlined).
107 memset((void *)start, 0, sizeof(struct page));
110 static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
113 * We only optimize if the new used range directly follows the
114 * previously unused range (esp., when populating consecutive sections).
116 if (unused_sub_pmd_start == start) {
117 unused_sub_pmd_start = end;
118 if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE)))
119 unused_sub_pmd_start = 0;
122 vmemmap_flush_unused_sub_pmd();
123 vmemmap_mark_sub_pmd_used(start, end);
126 static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
128 unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
130 vmemmap_flush_unused_sub_pmd();
132 /* Could be our memmap page is filled with PAGE_UNUSED already ... */
133 vmemmap_mark_sub_pmd_used(start, end);
135 /* Mark the unused parts of the new memmap page PAGE_UNUSED. */
136 if (!IS_ALIGNED(start, PMD_SIZE))
137 memset((void *)page, PAGE_UNUSED, start - page);
139 * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
140 * consecutive sections. Remember for the last added PMD the last
141 * unused range in the populated PMD.
143 if (!IS_ALIGNED(end, PMD_SIZE))
144 unused_sub_pmd_start = end;
147 /* Returns true if the PMD is completely unused and can be freed. */
148 static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
150 unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
152 vmemmap_flush_unused_sub_pmd();
153 memset((void *)start, PAGE_UNUSED, end - start);
154 return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE);
157 /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
158 static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
159 unsigned long end, bool add, bool direct)
161 unsigned long prot, pages = 0;
165 prot = pgprot_val(PAGE_KERNEL);
167 prot &= ~_PAGE_NOEXEC;
169 pte = pte_offset_kernel(pmd, addr);
170 for (; addr < end; addr += PAGE_SIZE, pte++) {
175 vmem_free_pages((unsigned long) pfn_to_virt(pte_pfn(*pte)), 0);
176 pte_clear(&init_mm, addr, pte);
177 } else if (pte_none(*pte)) {
179 void *new_page = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
183 set_pte(pte, __pte(__pa(new_page) | prot));
185 set_pte(pte, __pte(__pa(addr) | prot));
195 update_page_count(PG_DIRECT_MAP_4K, add ? pages : -pages);
199 static void try_free_pte_table(pmd_t *pmd, unsigned long start)
204 /* We can safely assume this is fully in 1:1 mapping & vmemmap area */
205 pte = pte_offset_kernel(pmd, start);
206 for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
210 vmem_pte_free((unsigned long *) pmd_deref(*pmd));
214 /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
215 static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
216 unsigned long end, bool add, bool direct)
218 unsigned long next, prot, pages = 0;
223 prot = pgprot_val(SEGMENT_KERNEL);
225 prot &= ~_SEGMENT_ENTRY_NOEXEC;
227 pmd = pmd_offset(pud, addr);
228 for (; addr < end; addr = next, pmd++) {
229 next = pmd_addr_end(addr, end);
233 if (pmd_large(*pmd)) {
234 if (IS_ALIGNED(addr, PMD_SIZE) &&
235 IS_ALIGNED(next, PMD_SIZE)) {
237 vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
240 } else if (!direct && vmemmap_unuse_sub_pmd(addr, next)) {
241 vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
246 } else if (pmd_none(*pmd)) {
247 if (IS_ALIGNED(addr, PMD_SIZE) &&
248 IS_ALIGNED(next, PMD_SIZE) &&
249 MACHINE_HAS_EDAT1 && direct &&
250 !debug_pagealloc_enabled()) {
251 set_pmd(pmd, __pmd(__pa(addr) | prot));
254 } else if (!direct && MACHINE_HAS_EDAT1) {
258 * Use 1MB frames for vmemmap if available. We
259 * always use large frames even if they are only
260 * partially used. Otherwise we would have also
261 * page tables since vmemmap_populate gets
262 * called for each section separately.
264 new_page = vmemmap_alloc_block(PMD_SIZE, NUMA_NO_NODE);
266 set_pmd(pmd, __pmd(__pa(new_page) | prot));
267 if (!IS_ALIGNED(addr, PMD_SIZE) ||
268 !IS_ALIGNED(next, PMD_SIZE)) {
269 vmemmap_use_new_sub_pmd(addr, next);
274 pte = vmem_pte_alloc();
277 pmd_populate(&init_mm, pmd, pte);
278 } else if (pmd_large(*pmd)) {
280 vmemmap_use_sub_pmd(addr, next);
283 ret = modify_pte_table(pmd, addr, next, add, direct);
287 try_free_pte_table(pmd, addr & PMD_MASK);
292 update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages);
296 static void try_free_pmd_table(pud_t *pud, unsigned long start)
301 pmd = pmd_offset(pud, start);
302 for (i = 0; i < PTRS_PER_PMD; i++, pmd++)
305 vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER);
309 static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
310 bool add, bool direct)
312 unsigned long next, prot, pages = 0;
317 prot = pgprot_val(REGION3_KERNEL);
319 prot &= ~_REGION_ENTRY_NOEXEC;
320 pud = pud_offset(p4d, addr);
321 for (; addr < end; addr = next, pud++) {
322 next = pud_addr_end(addr, end);
326 if (pud_large(*pud)) {
327 if (IS_ALIGNED(addr, PUD_SIZE) &&
328 IS_ALIGNED(next, PUD_SIZE)) {
334 } else if (pud_none(*pud)) {
335 if (IS_ALIGNED(addr, PUD_SIZE) &&
336 IS_ALIGNED(next, PUD_SIZE) &&
337 MACHINE_HAS_EDAT2 && direct &&
338 !debug_pagealloc_enabled()) {
339 set_pud(pud, __pud(__pa(addr) | prot));
343 pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
346 pud_populate(&init_mm, pud, pmd);
347 } else if (pud_large(*pud)) {
350 ret = modify_pmd_table(pud, addr, next, add, direct);
354 try_free_pmd_table(pud, addr & PUD_MASK);
359 update_page_count(PG_DIRECT_MAP_2G, add ? pages : -pages);
363 static void try_free_pud_table(p4d_t *p4d, unsigned long start)
368 pud = pud_offset(p4d, start);
369 for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
373 vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER);
377 static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
378 bool add, bool direct)
385 p4d = p4d_offset(pgd, addr);
386 for (; addr < end; addr = next, p4d++) {
387 next = p4d_addr_end(addr, end);
391 } else if (p4d_none(*p4d)) {
392 pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
395 p4d_populate(&init_mm, p4d, pud);
397 ret = modify_pud_table(p4d, addr, next, add, direct);
401 try_free_pud_table(p4d, addr & P4D_MASK);
408 static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
413 p4d = p4d_offset(pgd, start);
414 for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
418 vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER);
422 static int modify_pagetable(unsigned long start, unsigned long end, bool add,
425 unsigned long addr, next;
430 if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
432 /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
433 if (WARN_ON_ONCE(end > VMALLOC_START))
435 for (addr = start; addr < end; addr = next) {
436 next = pgd_addr_end(addr, end);
437 pgd = pgd_offset_k(addr);
442 } else if (pgd_none(*pgd)) {
443 p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
446 pgd_populate(&init_mm, pgd, p4d);
448 ret = modify_p4d_table(pgd, addr, next, add, direct);
452 try_free_p4d_table(pgd, addr & PGDIR_MASK);
457 flush_tlb_kernel_range(start, end);
461 static int add_pagetable(unsigned long start, unsigned long end, bool direct)
463 return modify_pagetable(start, end, true, direct);
466 static int remove_pagetable(unsigned long start, unsigned long end, bool direct)
468 return modify_pagetable(start, end, false, direct);
472 * Add a physical memory range to the 1:1 mapping.
474 static int vmem_add_range(unsigned long start, unsigned long size)
476 start = (unsigned long)__va(start);
477 return add_pagetable(start, start + size, true);
481 * Remove a physical memory range from the 1:1 mapping.
483 static void vmem_remove_range(unsigned long start, unsigned long size)
485 start = (unsigned long)__va(start);
486 remove_pagetable(start, start + size, true);
490 * Add a backed mem_map array to the virtual mem_map array.
492 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
493 struct vmem_altmap *altmap)
497 mutex_lock(&vmem_mutex);
498 /* We don't care about the node, just use NUMA_NO_NODE on allocations */
499 ret = add_pagetable(start, end, false);
501 remove_pagetable(start, end, false);
502 mutex_unlock(&vmem_mutex);
506 #ifdef CONFIG_MEMORY_HOTPLUG
508 void vmemmap_free(unsigned long start, unsigned long end,
509 struct vmem_altmap *altmap)
511 mutex_lock(&vmem_mutex);
512 remove_pagetable(start, end, false);
513 mutex_unlock(&vmem_mutex);
518 void vmem_remove_mapping(unsigned long start, unsigned long size)
520 mutex_lock(&vmem_mutex);
521 vmem_remove_range(start, size);
522 mutex_unlock(&vmem_mutex);
525 struct range arch_get_mappable_range(void)
527 struct range mhp_range;
530 mhp_range.end = max_mappable - 1;
534 int vmem_add_mapping(unsigned long start, unsigned long size)
536 struct range range = arch_get_mappable_range();
539 if (start < range.start ||
540 start + size > range.end + 1 ||
541 start + size < start)
544 mutex_lock(&vmem_mutex);
545 ret = vmem_add_range(start, size);
547 vmem_remove_range(start, size);
548 mutex_unlock(&vmem_mutex);
553 * Allocate new or return existing page-table entry, but do not map it
554 * to any physical address. If missing, allocate segment- and region-
555 * table entries along. Meeting a large segment- or region-table entry
556 * while traversing is an error, since the function is expected to be
557 * called against virtual regions reserved for 4KB mappings only.
559 pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
568 pgd = pgd_offset_k(addr);
569 if (pgd_none(*pgd)) {
572 p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
575 pgd_populate(&init_mm, pgd, p4d);
577 p4d = p4d_offset(pgd, addr);
578 if (p4d_none(*p4d)) {
581 pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
584 p4d_populate(&init_mm, p4d, pud);
586 pud = pud_offset(p4d, addr);
587 if (pud_none(*pud)) {
590 pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
593 pud_populate(&init_mm, pud, pmd);
594 } else if (WARN_ON_ONCE(pud_large(*pud))) {
597 pmd = pmd_offset(pud, addr);
598 if (pmd_none(*pmd)) {
601 pte = vmem_pte_alloc();
604 pmd_populate(&init_mm, pmd, pte);
605 } else if (WARN_ON_ONCE(pmd_large(*pmd))) {
608 ptep = pte_offset_kernel(pmd, addr);
613 int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc)
617 if (!IS_ALIGNED(addr, PAGE_SIZE))
619 ptep = vmem_get_alloc_pte(addr, alloc);
622 __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
623 pte = mk_pte_phys(phys, prot);
628 int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot)
632 mutex_lock(&vmem_mutex);
633 rc = __vmem_map_4k_page(addr, phys, prot, true);
634 mutex_unlock(&vmem_mutex);
638 void vmem_unmap_4k_page(unsigned long addr)
642 mutex_lock(&vmem_mutex);
643 ptep = virt_to_kpte(addr);
644 __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
645 pte_clear(&init_mm, addr, ptep);
646 mutex_unlock(&vmem_mutex);
649 void __init vmem_map_init(void)
651 __set_memory_rox(_stext, _etext);
652 __set_memory_ro(_etext, __end_rodata);
653 __set_memory_rox(_sinittext, _einittext);
654 __set_memory_rox(__stext_amode31, __etext_amode31);
656 * If the BEAR-enhancement facility is not installed the first
657 * prefix page is used to return to the previous context with
658 * an LPSWE instruction and therefore must be executable.
660 if (!static_key_enabled(&cpu_has_bear))
662 if (debug_pagealloc_enabled()) {
664 * Use RELOC_HIDE() as long as __va(0) translates to NULL,
665 * since performing pointer arithmetic on a NULL pointer
666 * has undefined behavior and generates compiler warnings.
668 __set_memory_4k(__va(0), RELOC_HIDE(__va(0), ident_map_size));
671 system_ctl_set_bit(0, CR0_INSTRUCTION_EXEC_PROTECTION_BIT);
672 pr_info("Write protected kernel read-only data: %luk\n",
673 (unsigned long)(__end_rodata - _stext) >> 10);