1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright IBM Corp. 2006
6 #include <linux/memory_hotplug.h>
7 #include <linux/memblock.h>
10 #include <linux/init.h>
11 #include <linux/list.h>
12 #include <linux/hugetlb.h>
13 #include <linux/slab.h>
14 #include <asm/page-states.h>
15 #include <asm/cacheflush.h>
16 #include <asm/nospec-branch.h>
17 #include <asm/pgalloc.h>
18 #include <asm/setup.h>
19 #include <asm/tlbflush.h>
20 #include <asm/sections.h>
21 #include <asm/set_memory.h>
23 static DEFINE_MUTEX(vmem_mutex);
25 static void __ref *vmem_alloc_pages(unsigned int order)
27 unsigned long size = PAGE_SIZE << order;
29 if (slab_is_available())
30 return (void *)__get_free_pages(GFP_KERNEL, order);
31 return memblock_alloc(size, size);
34 static void vmem_free_pages(unsigned long addr, int order)
36 /* We don't expect boot memory to be removed ever. */
37 if (!slab_is_available() ||
38 WARN_ON_ONCE(PageReserved(virt_to_page(addr))))
40 free_pages(addr, order);
43 void *vmem_crst_alloc(unsigned long val)
47 table = vmem_alloc_pages(CRST_ALLOC_ORDER);
50 crst_table_init(table, val);
51 if (slab_is_available())
52 arch_set_page_dat(virt_to_page(table), CRST_ALLOC_ORDER);
56 pte_t __ref *vmem_pte_alloc(void)
58 unsigned long size = PTRS_PER_PTE * sizeof(pte_t);
61 if (slab_is_available())
62 pte = (pte_t *) page_table_alloc(&init_mm);
64 pte = (pte_t *) memblock_alloc(size, size);
67 memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
71 static void vmem_pte_free(unsigned long *table)
73 /* We don't expect boot memory to be removed ever. */
74 if (!slab_is_available() ||
75 WARN_ON_ONCE(PageReserved(virt_to_page(table))))
77 page_table_free(&init_mm, table);
80 #define PAGE_UNUSED 0xFD
83 * The unused vmemmap range, which was not yet memset(PAGE_UNUSED) ranges
84 * from unused_sub_pmd_start to next PMD_SIZE boundary.
86 static unsigned long unused_sub_pmd_start;
88 static void vmemmap_flush_unused_sub_pmd(void)
90 if (!unused_sub_pmd_start)
92 memset((void *)unused_sub_pmd_start, PAGE_UNUSED,
93 ALIGN(unused_sub_pmd_start, PMD_SIZE) - unused_sub_pmd_start);
94 unused_sub_pmd_start = 0;
97 static void vmemmap_mark_sub_pmd_used(unsigned long start, unsigned long end)
100 * As we expect to add in the same granularity as we remove, it's
101 * sufficient to mark only some piece used to block the memmap page from
102 * getting removed (just in case the memmap never gets initialized,
103 * e.g., because the memory block never gets onlined).
105 memset((void *)start, 0, sizeof(struct page));
108 static void vmemmap_use_sub_pmd(unsigned long start, unsigned long end)
111 * We only optimize if the new used range directly follows the
112 * previously unused range (esp., when populating consecutive sections).
114 if (unused_sub_pmd_start == start) {
115 unused_sub_pmd_start = end;
116 if (likely(IS_ALIGNED(unused_sub_pmd_start, PMD_SIZE)))
117 unused_sub_pmd_start = 0;
120 vmemmap_flush_unused_sub_pmd();
121 vmemmap_mark_sub_pmd_used(start, end);
124 static void vmemmap_use_new_sub_pmd(unsigned long start, unsigned long end)
126 unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
128 vmemmap_flush_unused_sub_pmd();
130 /* Could be our memmap page is filled with PAGE_UNUSED already ... */
131 vmemmap_mark_sub_pmd_used(start, end);
133 /* Mark the unused parts of the new memmap page PAGE_UNUSED. */
134 if (!IS_ALIGNED(start, PMD_SIZE))
135 memset((void *)page, PAGE_UNUSED, start - page);
137 * We want to avoid memset(PAGE_UNUSED) when populating the vmemmap of
138 * consecutive sections. Remember for the last added PMD the last
139 * unused range in the populated PMD.
141 if (!IS_ALIGNED(end, PMD_SIZE))
142 unused_sub_pmd_start = end;
145 /* Returns true if the PMD is completely unused and can be freed. */
146 static bool vmemmap_unuse_sub_pmd(unsigned long start, unsigned long end)
148 unsigned long page = ALIGN_DOWN(start, PMD_SIZE);
150 vmemmap_flush_unused_sub_pmd();
151 memset((void *)start, PAGE_UNUSED, end - start);
152 return !memchr_inv((void *)page, PAGE_UNUSED, PMD_SIZE);
155 /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
156 static int __ref modify_pte_table(pmd_t *pmd, unsigned long addr,
157 unsigned long end, bool add, bool direct)
159 unsigned long prot, pages = 0;
163 prot = pgprot_val(PAGE_KERNEL);
165 prot &= ~_PAGE_NOEXEC;
167 pte = pte_offset_kernel(pmd, addr);
168 for (; addr < end; addr += PAGE_SIZE, pte++) {
173 vmem_free_pages((unsigned long) pfn_to_virt(pte_pfn(*pte)), 0);
174 pte_clear(&init_mm, addr, pte);
175 } else if (pte_none(*pte)) {
177 void *new_page = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
181 set_pte(pte, __pte(__pa(new_page) | prot));
183 set_pte(pte, __pte(__pa(addr) | prot));
193 update_page_count(PG_DIRECT_MAP_4K, add ? pages : -pages);
197 static void try_free_pte_table(pmd_t *pmd, unsigned long start)
202 /* We can safely assume this is fully in 1:1 mapping & vmemmap area */
203 pte = pte_offset_kernel(pmd, start);
204 for (i = 0; i < PTRS_PER_PTE; i++, pte++) {
208 vmem_pte_free((unsigned long *) pmd_deref(*pmd));
212 /* __ref: we'll only call vmemmap_alloc_block() via vmemmap_populate() */
213 static int __ref modify_pmd_table(pud_t *pud, unsigned long addr,
214 unsigned long end, bool add, bool direct)
216 unsigned long next, prot, pages = 0;
221 prot = pgprot_val(SEGMENT_KERNEL);
223 prot &= ~_SEGMENT_ENTRY_NOEXEC;
225 pmd = pmd_offset(pud, addr);
226 for (; addr < end; addr = next, pmd++) {
227 next = pmd_addr_end(addr, end);
231 if (pmd_large(*pmd)) {
232 if (IS_ALIGNED(addr, PMD_SIZE) &&
233 IS_ALIGNED(next, PMD_SIZE)) {
235 vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
238 } else if (!direct && vmemmap_unuse_sub_pmd(addr, next)) {
239 vmem_free_pages(pmd_deref(*pmd), get_order(PMD_SIZE));
244 } else if (pmd_none(*pmd)) {
245 if (IS_ALIGNED(addr, PMD_SIZE) &&
246 IS_ALIGNED(next, PMD_SIZE) &&
247 MACHINE_HAS_EDAT1 && direct &&
248 !debug_pagealloc_enabled()) {
249 set_pmd(pmd, __pmd(__pa(addr) | prot));
252 } else if (!direct && MACHINE_HAS_EDAT1) {
256 * Use 1MB frames for vmemmap if available. We
257 * always use large frames even if they are only
258 * partially used. Otherwise we would have also
259 * page tables since vmemmap_populate gets
260 * called for each section separately.
262 new_page = vmemmap_alloc_block(PMD_SIZE, NUMA_NO_NODE);
264 set_pmd(pmd, __pmd(__pa(new_page) | prot));
265 if (!IS_ALIGNED(addr, PMD_SIZE) ||
266 !IS_ALIGNED(next, PMD_SIZE)) {
267 vmemmap_use_new_sub_pmd(addr, next);
272 pte = vmem_pte_alloc();
275 pmd_populate(&init_mm, pmd, pte);
276 } else if (pmd_large(*pmd)) {
278 vmemmap_use_sub_pmd(addr, next);
281 ret = modify_pte_table(pmd, addr, next, add, direct);
285 try_free_pte_table(pmd, addr & PMD_MASK);
290 update_page_count(PG_DIRECT_MAP_1M, add ? pages : -pages);
294 static void try_free_pmd_table(pud_t *pud, unsigned long start)
296 const unsigned long end = start + PUD_SIZE;
300 /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
301 if (end > VMALLOC_START)
304 if (start < KASAN_SHADOW_END && end > KASAN_SHADOW_START)
307 pmd = pmd_offset(pud, start);
308 for (i = 0; i < PTRS_PER_PMD; i++, pmd++)
311 vmem_free_pages(pud_deref(*pud), CRST_ALLOC_ORDER);
315 static int modify_pud_table(p4d_t *p4d, unsigned long addr, unsigned long end,
316 bool add, bool direct)
318 unsigned long next, prot, pages = 0;
323 prot = pgprot_val(REGION3_KERNEL);
325 prot &= ~_REGION_ENTRY_NOEXEC;
326 pud = pud_offset(p4d, addr);
327 for (; addr < end; addr = next, pud++) {
328 next = pud_addr_end(addr, end);
332 if (pud_large(*pud)) {
333 if (IS_ALIGNED(addr, PUD_SIZE) &&
334 IS_ALIGNED(next, PUD_SIZE)) {
340 } else if (pud_none(*pud)) {
341 if (IS_ALIGNED(addr, PUD_SIZE) &&
342 IS_ALIGNED(next, PUD_SIZE) &&
343 MACHINE_HAS_EDAT2 && direct &&
344 !debug_pagealloc_enabled()) {
345 set_pud(pud, __pud(__pa(addr) | prot));
349 pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
352 pud_populate(&init_mm, pud, pmd);
353 } else if (pud_large(*pud)) {
356 ret = modify_pmd_table(pud, addr, next, add, direct);
360 try_free_pmd_table(pud, addr & PUD_MASK);
365 update_page_count(PG_DIRECT_MAP_2G, add ? pages : -pages);
369 static void try_free_pud_table(p4d_t *p4d, unsigned long start)
371 const unsigned long end = start + P4D_SIZE;
375 /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
376 if (end > VMALLOC_START)
379 if (start < KASAN_SHADOW_END && end > KASAN_SHADOW_START)
383 pud = pud_offset(p4d, start);
384 for (i = 0; i < PTRS_PER_PUD; i++, pud++) {
388 vmem_free_pages(p4d_deref(*p4d), CRST_ALLOC_ORDER);
392 static int modify_p4d_table(pgd_t *pgd, unsigned long addr, unsigned long end,
393 bool add, bool direct)
400 p4d = p4d_offset(pgd, addr);
401 for (; addr < end; addr = next, p4d++) {
402 next = p4d_addr_end(addr, end);
406 } else if (p4d_none(*p4d)) {
407 pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
410 p4d_populate(&init_mm, p4d, pud);
412 ret = modify_pud_table(p4d, addr, next, add, direct);
416 try_free_pud_table(p4d, addr & P4D_MASK);
423 static void try_free_p4d_table(pgd_t *pgd, unsigned long start)
425 const unsigned long end = start + PGDIR_SIZE;
429 /* Don't mess with any tables not fully in 1:1 mapping & vmemmap area */
430 if (end > VMALLOC_START)
433 if (start < KASAN_SHADOW_END && end > KASAN_SHADOW_START)
437 p4d = p4d_offset(pgd, start);
438 for (i = 0; i < PTRS_PER_P4D; i++, p4d++) {
442 vmem_free_pages(pgd_deref(*pgd), CRST_ALLOC_ORDER);
446 static int modify_pagetable(unsigned long start, unsigned long end, bool add,
449 unsigned long addr, next;
454 if (WARN_ON_ONCE(!PAGE_ALIGNED(start | end)))
456 for (addr = start; addr < end; addr = next) {
457 next = pgd_addr_end(addr, end);
458 pgd = pgd_offset_k(addr);
463 } else if (pgd_none(*pgd)) {
464 p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
467 pgd_populate(&init_mm, pgd, p4d);
469 ret = modify_p4d_table(pgd, addr, next, add, direct);
473 try_free_p4d_table(pgd, addr & PGDIR_MASK);
478 flush_tlb_kernel_range(start, end);
482 static int add_pagetable(unsigned long start, unsigned long end, bool direct)
484 return modify_pagetable(start, end, true, direct);
487 static int remove_pagetable(unsigned long start, unsigned long end, bool direct)
489 return modify_pagetable(start, end, false, direct);
493 * Add a physical memory range to the 1:1 mapping.
495 static int vmem_add_range(unsigned long start, unsigned long size)
497 return add_pagetable(start, start + size, true);
501 * Remove a physical memory range from the 1:1 mapping.
503 static void vmem_remove_range(unsigned long start, unsigned long size)
505 remove_pagetable(start, start + size, true);
509 * Add a backed mem_map array to the virtual mem_map array.
511 int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
512 struct vmem_altmap *altmap)
516 mutex_lock(&vmem_mutex);
517 /* We don't care about the node, just use NUMA_NO_NODE on allocations */
518 ret = add_pagetable(start, end, false);
520 remove_pagetable(start, end, false);
521 mutex_unlock(&vmem_mutex);
525 void vmemmap_free(unsigned long start, unsigned long end,
526 struct vmem_altmap *altmap)
528 mutex_lock(&vmem_mutex);
529 remove_pagetable(start, end, false);
530 mutex_unlock(&vmem_mutex);
533 void vmem_remove_mapping(unsigned long start, unsigned long size)
535 mutex_lock(&vmem_mutex);
536 vmem_remove_range(start, size);
537 mutex_unlock(&vmem_mutex);
540 struct range arch_get_mappable_range(void)
542 struct range mhp_range;
545 mhp_range.end = VMEM_MAX_PHYS - 1;
549 int vmem_add_mapping(unsigned long start, unsigned long size)
551 struct range range = arch_get_mappable_range();
554 if (start < range.start ||
555 start + size > range.end + 1 ||
556 start + size < start)
559 mutex_lock(&vmem_mutex);
560 ret = vmem_add_range(start, size);
562 vmem_remove_range(start, size);
563 mutex_unlock(&vmem_mutex);
568 * Allocate new or return existing page-table entry, but do not map it
569 * to any physical address. If missing, allocate segment- and region-
570 * table entries along. Meeting a large segment- or region-table entry
571 * while traversing is an error, since the function is expected to be
572 * called against virtual regions reserverd for 4KB mappings only.
574 pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc)
583 pgd = pgd_offset_k(addr);
584 if (pgd_none(*pgd)) {
587 p4d = vmem_crst_alloc(_REGION2_ENTRY_EMPTY);
590 pgd_populate(&init_mm, pgd, p4d);
592 p4d = p4d_offset(pgd, addr);
593 if (p4d_none(*p4d)) {
596 pud = vmem_crst_alloc(_REGION3_ENTRY_EMPTY);
599 p4d_populate(&init_mm, p4d, pud);
601 pud = pud_offset(p4d, addr);
602 if (pud_none(*pud)) {
605 pmd = vmem_crst_alloc(_SEGMENT_ENTRY_EMPTY);
608 pud_populate(&init_mm, pud, pmd);
609 } else if (WARN_ON_ONCE(pud_large(*pud))) {
612 pmd = pmd_offset(pud, addr);
613 if (pmd_none(*pmd)) {
616 pte = vmem_pte_alloc();
619 pmd_populate(&init_mm, pmd, pte);
620 } else if (WARN_ON_ONCE(pmd_large(*pmd))) {
623 ptep = pte_offset_kernel(pmd, addr);
628 int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc)
632 if (!IS_ALIGNED(addr, PAGE_SIZE))
634 ptep = vmem_get_alloc_pte(addr, alloc);
637 __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
638 pte = mk_pte_phys(phys, prot);
643 int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot)
647 mutex_lock(&vmem_mutex);
648 rc = __vmem_map_4k_page(addr, phys, prot, true);
649 mutex_unlock(&vmem_mutex);
653 void vmem_unmap_4k_page(unsigned long addr)
657 mutex_lock(&vmem_mutex);
658 ptep = virt_to_kpte(addr);
659 __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
660 pte_clear(&init_mm, addr, ptep);
661 mutex_unlock(&vmem_mutex);
665 * map whole physical memory to virtual memory (identity mapping)
666 * we reserve enough space in the vmalloc area for vmemmap to hotplug
667 * additional memory segments.
669 void __init vmem_map_init(void)
671 phys_addr_t base, end;
674 for_each_mem_range(i, &base, &end)
675 vmem_add_range(base, end - base);
676 __set_memory((unsigned long)_stext,
677 (unsigned long)(_etext - _stext) >> PAGE_SHIFT,
678 SET_MEMORY_RO | SET_MEMORY_X);
679 __set_memory((unsigned long)_etext,
680 (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT,
682 __set_memory((unsigned long)_sinittext,
683 (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
684 SET_MEMORY_RO | SET_MEMORY_X);
685 __set_memory(__stext_amode31, (__etext_amode31 - __stext_amode31) >> PAGE_SHIFT,
686 SET_MEMORY_RO | SET_MEMORY_X);
688 /* lowcore requires 4k mapping for real addresses / prefixing */
689 set_memory_4k(0, LC_PAGES);
691 /* lowcore must be executable for LPSWE */
692 if (!static_key_enabled(&cpu_has_bear))
695 pr_info("Write protected kernel read-only data: %luk\n",
696 (unsigned long)(__end_rodata - _stext) >> 10);