1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Page table handling routines for radix page table.
5 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
8 #define pr_fmt(fmt) "radix-mmu: " fmt
11 #include <linux/kernel.h>
12 #include <linux/sched/mm.h>
13 #include <linux/memblock.h>
15 #include <linux/of_fdt.h>
17 #include <linux/hugetlb.h>
18 #include <linux/string_helpers.h>
19 #include <linux/memory.h>
21 #include <asm/pgalloc.h>
22 #include <asm/mmu_context.h>
24 #include <asm/machdep.h>
26 #include <asm/firmware.h>
27 #include <asm/powernv.h>
28 #include <asm/sections.h>
30 #include <asm/trace.h>
31 #include <asm/uaccess.h>
32 #include <asm/ultravisor.h>
33 #include <asm/set_memory.h>
35 #include <trace/events/thp.h>
37 #include <mm/mmu_decl.h>
39 unsigned int mmu_base_pid;
41 static __ref void *early_alloc_pgtable(unsigned long size, int nid,
42 unsigned long region_start, unsigned long region_end)
44 phys_addr_t min_addr = MEMBLOCK_LOW_LIMIT;
45 phys_addr_t max_addr = MEMBLOCK_ALLOC_ANYWHERE;
49 min_addr = region_start;
51 max_addr = region_end;
53 ptr = memblock_alloc_try_nid(size, size, min_addr, max_addr, nid);
56 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa max_addr=%pa\n",
57 __func__, size, size, nid, &min_addr, &max_addr);
63 * When allocating pud or pmd pointers, we allocate a complete page
64 * of PAGE_SIZE rather than PUD_TABLE_SIZE or PMD_TABLE_SIZE. This
65 * is to ensure that the page obtained from the memblock allocator
66 * can be completely used as page table page and can be freed
67 * correctly when the page table entries are removed.
69 static int early_map_kernel_page(unsigned long ea, unsigned long pa,
71 unsigned int map_page_size,
73 unsigned long region_start, unsigned long region_end)
75 unsigned long pfn = pa >> PAGE_SHIFT;
82 pgdp = pgd_offset_k(ea);
83 p4dp = p4d_offset(pgdp, ea);
84 if (p4d_none(*p4dp)) {
85 pudp = early_alloc_pgtable(PAGE_SIZE, nid,
86 region_start, region_end);
87 p4d_populate(&init_mm, p4dp, pudp);
89 pudp = pud_offset(p4dp, ea);
90 if (map_page_size == PUD_SIZE) {
94 if (pud_none(*pudp)) {
95 pmdp = early_alloc_pgtable(PAGE_SIZE, nid, region_start,
97 pud_populate(&init_mm, pudp, pmdp);
99 pmdp = pmd_offset(pudp, ea);
100 if (map_page_size == PMD_SIZE) {
101 ptep = pmdp_ptep(pmdp);
104 if (!pmd_present(*pmdp)) {
105 ptep = early_alloc_pgtable(PAGE_SIZE, nid,
106 region_start, region_end);
107 pmd_populate_kernel(&init_mm, pmdp, ptep);
109 ptep = pte_offset_kernel(pmdp, ea);
112 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
113 asm volatile("ptesync": : :"memory");
118 * nid, region_start, and region_end are hints to try to place the page
119 * table memory in the same node or region.
121 static int __map_kernel_page(unsigned long ea, unsigned long pa,
123 unsigned int map_page_size,
125 unsigned long region_start, unsigned long region_end)
127 unsigned long pfn = pa >> PAGE_SHIFT;
134 * Make sure task size is correct as per the max adddr
136 BUILD_BUG_ON(TASK_SIZE_USER64 > RADIX_PGTABLE_RANGE);
138 #ifdef CONFIG_PPC_64K_PAGES
139 BUILD_BUG_ON(RADIX_KERN_MAP_SIZE != (1UL << MAX_EA_BITS_PER_CONTEXT));
142 if (unlikely(!slab_is_available()))
143 return early_map_kernel_page(ea, pa, flags, map_page_size,
144 nid, region_start, region_end);
147 * Should make page table allocation functions be able to take a
148 * node, so we can place kernel page tables on the right nodes after
151 pgdp = pgd_offset_k(ea);
152 p4dp = p4d_offset(pgdp, ea);
153 pudp = pud_alloc(&init_mm, p4dp, ea);
156 if (map_page_size == PUD_SIZE) {
157 ptep = (pte_t *)pudp;
160 pmdp = pmd_alloc(&init_mm, pudp, ea);
163 if (map_page_size == PMD_SIZE) {
164 ptep = pmdp_ptep(pmdp);
167 ptep = pte_alloc_kernel(pmdp, ea);
172 set_pte_at(&init_mm, ea, ptep, pfn_pte(pfn, flags));
173 asm volatile("ptesync": : :"memory");
177 int radix__map_kernel_page(unsigned long ea, unsigned long pa,
179 unsigned int map_page_size)
181 return __map_kernel_page(ea, pa, flags, map_page_size, -1, 0, 0);
184 #ifdef CONFIG_STRICT_KERNEL_RWX
185 static void radix__change_memory_range(unsigned long start, unsigned long end,
195 start = ALIGN_DOWN(start, PAGE_SIZE);
196 end = PAGE_ALIGN(end); // aligns up
198 pr_debug("Changing flags on range %lx-%lx removing 0x%lx\n",
201 for (idx = start; idx < end; idx += PAGE_SIZE) {
202 pgdp = pgd_offset_k(idx);
203 p4dp = p4d_offset(pgdp, idx);
204 pudp = pud_alloc(&init_mm, p4dp, idx);
207 if (pud_is_leaf(*pudp)) {
208 ptep = (pte_t *)pudp;
211 pmdp = pmd_alloc(&init_mm, pudp, idx);
214 if (pmd_is_leaf(*pmdp)) {
215 ptep = pmdp_ptep(pmdp);
218 ptep = pte_alloc_kernel(pmdp, idx);
222 radix__pte_update(&init_mm, idx, ptep, clear, 0, 0);
225 radix__flush_tlb_kernel_range(start, end);
228 void radix__mark_rodata_ro(void)
230 unsigned long start, end;
232 start = (unsigned long)_stext;
233 end = (unsigned long)__end_rodata;
235 radix__change_memory_range(start, end, _PAGE_WRITE);
237 for (start = PAGE_OFFSET; start < (unsigned long)_stext; start += PAGE_SIZE) {
238 end = start + PAGE_SIZE;
239 if (overlaps_interrupt_vector_text(start, end))
240 radix__change_memory_range(start, end, _PAGE_WRITE);
246 void radix__mark_initmem_nx(void)
248 unsigned long start = (unsigned long)__init_begin;
249 unsigned long end = (unsigned long)__init_end;
251 radix__change_memory_range(start, end, _PAGE_EXEC);
253 #endif /* CONFIG_STRICT_KERNEL_RWX */
255 static inline void __meminit
256 print_mapping(unsigned long start, unsigned long end, unsigned long size, bool exec)
263 string_get_size(size, 1, STRING_UNITS_2, buf, sizeof(buf));
265 pr_info("Mapped 0x%016lx-0x%016lx with %s pages%s\n", start, end, buf,
266 exec ? " (exec)" : "");
269 static unsigned long next_boundary(unsigned long addr, unsigned long end)
271 #ifdef CONFIG_STRICT_KERNEL_RWX
272 unsigned long stext_phys;
274 stext_phys = __pa_symbol(_stext);
276 // Relocatable kernel running at non-zero real address
277 if (stext_phys != 0) {
278 // The end of interrupts code at zero is a rodata boundary
279 unsigned long end_intr = __pa_symbol(__end_interrupts) - stext_phys;
283 // Start of relocated kernel text is a rodata boundary
284 if (addr < stext_phys)
288 if (addr < __pa_symbol(__srwx_boundary))
289 return __pa_symbol(__srwx_boundary);
294 static int __meminit create_physical_mapping(unsigned long start,
296 int nid, pgprot_t _prot)
298 unsigned long vaddr, addr, mapping_size = 0;
299 bool prev_exec, exec = false;
302 unsigned long max_mapping_size = memory_block_size;
304 if (debug_pagealloc_enabled_or_kfence())
305 max_mapping_size = PAGE_SIZE;
307 start = ALIGN(start, PAGE_SIZE);
308 end = ALIGN_DOWN(end, PAGE_SIZE);
309 for (addr = start; addr < end; addr += mapping_size) {
310 unsigned long gap, previous_size;
313 gap = next_boundary(addr, end) - addr;
314 if (gap > max_mapping_size)
315 gap = max_mapping_size;
316 previous_size = mapping_size;
319 if (IS_ALIGNED(addr, PUD_SIZE) && gap >= PUD_SIZE &&
320 mmu_psize_defs[MMU_PAGE_1G].shift) {
321 mapping_size = PUD_SIZE;
323 } else if (IS_ALIGNED(addr, PMD_SIZE) && gap >= PMD_SIZE &&
324 mmu_psize_defs[MMU_PAGE_2M].shift) {
325 mapping_size = PMD_SIZE;
328 mapping_size = PAGE_SIZE;
329 psize = mmu_virtual_psize;
332 vaddr = (unsigned long)__va(addr);
334 if (overlaps_kernel_text(vaddr, vaddr + mapping_size) ||
335 overlaps_interrupt_vector_text(vaddr, vaddr + mapping_size)) {
336 prot = PAGE_KERNEL_X;
343 if (mapping_size != previous_size || exec != prev_exec) {
344 print_mapping(start, addr, previous_size, prev_exec);
348 rc = __map_kernel_page(vaddr, addr, prot, mapping_size, nid, start, end);
352 update_page_count(psize, 1);
355 print_mapping(start, addr, mapping_size, exec);
359 static void __init radix_init_pgtable(void)
361 unsigned long rts_field;
362 phys_addr_t start, end;
365 /* We don't support slb for radix */
369 * Create the linear mapping
371 for_each_mem_range(i, &start, &end) {
373 * The memblock allocator is up at this point, so the
374 * page tables will be allocated within the range. No
375 * need or a node (which we don't have yet).
378 if (end >= RADIX_VMALLOC_START) {
379 pr_warn("Outside the supported range\n");
383 WARN_ON(create_physical_mapping(start, end,
387 if (!cpu_has_feature(CPU_FTR_HVMODE) &&
388 cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
390 * Older versions of KVM on these machines prefer if the
391 * guest only uses the low 19 PID bits.
398 * Allocate Partition table and process table for the
401 BUG_ON(PRTB_SIZE_SHIFT > 36);
402 process_tb = early_alloc_pgtable(1UL << PRTB_SIZE_SHIFT, -1, 0, 0);
404 * Fill in the process table.
406 rts_field = radix__get_tree_size();
407 process_tb->prtb0 = cpu_to_be64(rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE);
410 * The init_mm context is given the first available (non-zero) PID,
411 * which is the "guard PID" and contains no page table. PIDR should
412 * never be set to zero because that duplicates the kernel address
413 * space at the 0x0... offset (quadrant 0)!
415 * An arbitrary PID that may later be allocated by the PID allocator
416 * for userspace processes must not be used either, because that
417 * would cause stale user mappings for that PID on CPUs outside of
418 * the TLB invalidation scheme (because it won't be in mm_cpumask).
420 * So permanently carve out one PID for the purpose of a guard PID.
422 init_mm.context.id = mmu_base_pid;
426 static void __init radix_init_partition_table(void)
428 unsigned long rts_field, dw0, dw1;
430 mmu_partition_table_init();
431 rts_field = radix__get_tree_size();
432 dw0 = rts_field | __pa(init_mm.pgd) | RADIX_PGD_INDEX_SIZE | PATB_HR;
433 dw1 = __pa(process_tb) | (PRTB_SIZE_SHIFT - 12) | PATB_GR;
434 mmu_partition_table_set_entry(0, dw0, dw1, false);
436 pr_info("Initializing Radix MMU\n");
439 static int __init get_idx_from_shift(unsigned int shift)
460 static int __init radix_dt_scan_page_sizes(unsigned long node,
461 const char *uname, int depth,
468 const char *type = of_get_flat_dt_prop(node, "device_type", NULL);
470 /* We are scanning "cpu" nodes only */
471 if (type == NULL || strcmp(type, "cpu") != 0)
474 /* Grab page size encodings */
475 prop = of_get_flat_dt_prop(node, "ibm,processor-radix-AP-encodings", &size);
479 pr_info("Page sizes from device-tree:\n");
480 for (; size >= 4; size -= 4, ++prop) {
482 struct mmu_psize_def *def;
484 /* top 3 bit is AP encoding */
485 shift = be32_to_cpu(prop[0]) & ~(0xe << 28);
486 ap = be32_to_cpu(prop[0]) >> 29;
487 pr_info("Page size shift = %d AP=0x%x\n", shift, ap);
489 idx = get_idx_from_shift(shift);
493 def = &mmu_psize_defs[idx];
496 def->h_rpt_pgsize = psize_to_rpti_pgsize(idx);
500 cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B;
504 void __init radix__early_init_devtree(void)
509 * Try to find the available page sizes in the device-tree
511 rc = of_scan_flat_dt(radix_dt_scan_page_sizes, NULL);
514 * No page size details found in device tree.
515 * Let's assume we have page 4k and 64k support
517 mmu_psize_defs[MMU_PAGE_4K].shift = 12;
518 mmu_psize_defs[MMU_PAGE_4K].ap = 0x0;
519 mmu_psize_defs[MMU_PAGE_4K].h_rpt_pgsize =
520 psize_to_rpti_pgsize(MMU_PAGE_4K);
522 mmu_psize_defs[MMU_PAGE_64K].shift = 16;
523 mmu_psize_defs[MMU_PAGE_64K].ap = 0x5;
524 mmu_psize_defs[MMU_PAGE_64K].h_rpt_pgsize =
525 psize_to_rpti_pgsize(MMU_PAGE_64K);
530 void __init radix__early_init_mmu(void)
534 #ifdef CONFIG_PPC_64S_HASH_MMU
535 #ifdef CONFIG_PPC_64K_PAGES
536 /* PAGE_SIZE mappings */
537 mmu_virtual_psize = MMU_PAGE_64K;
539 mmu_virtual_psize = MMU_PAGE_4K;
543 * initialize page table size
545 __pte_index_size = RADIX_PTE_INDEX_SIZE;
546 __pmd_index_size = RADIX_PMD_INDEX_SIZE;
547 __pud_index_size = RADIX_PUD_INDEX_SIZE;
548 __pgd_index_size = RADIX_PGD_INDEX_SIZE;
549 __pud_cache_index = RADIX_PUD_INDEX_SIZE;
550 __pte_table_size = RADIX_PTE_TABLE_SIZE;
551 __pmd_table_size = RADIX_PMD_TABLE_SIZE;
552 __pud_table_size = RADIX_PUD_TABLE_SIZE;
553 __pgd_table_size = RADIX_PGD_TABLE_SIZE;
555 __pmd_val_bits = RADIX_PMD_VAL_BITS;
556 __pud_val_bits = RADIX_PUD_VAL_BITS;
557 __pgd_val_bits = RADIX_PGD_VAL_BITS;
559 __kernel_virt_start = RADIX_KERN_VIRT_START;
560 __vmalloc_start = RADIX_VMALLOC_START;
561 __vmalloc_end = RADIX_VMALLOC_END;
562 __kernel_io_start = RADIX_KERN_IO_START;
563 __kernel_io_end = RADIX_KERN_IO_END;
564 vmemmap = (struct page *)RADIX_VMEMMAP_START;
565 ioremap_bot = IOREMAP_BASE;
568 pci_io_base = ISA_IO_BASE;
570 __pte_frag_nr = RADIX_PTE_FRAG_NR;
571 __pte_frag_size_shift = RADIX_PTE_FRAG_SIZE_SHIFT;
572 __pmd_frag_nr = RADIX_PMD_FRAG_NR;
573 __pmd_frag_size_shift = RADIX_PMD_FRAG_SIZE_SHIFT;
575 radix_init_pgtable();
577 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
578 lpcr = mfspr(SPRN_LPCR);
579 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
580 radix_init_partition_table();
582 radix_init_pseries();
585 memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
587 /* Switch to the guard PID before turning on MMU */
588 radix__switch_mmu_context(NULL, &init_mm);
592 void radix__early_init_mmu_secondary(void)
596 * update partition table control register and UPRT
598 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
599 lpcr = mfspr(SPRN_LPCR);
600 mtspr(SPRN_LPCR, lpcr | LPCR_UPRT | LPCR_HR);
602 set_ptcr_when_no_uv(__pa(partition_tb) |
603 (PATB_SIZE_SHIFT - 12));
606 radix__switch_mmu_context(NULL, &init_mm);
609 /* Make sure userspace can't change the AMR */
610 mtspr(SPRN_UAMOR, 0);
613 /* Called during kexec sequence with MMU off */
614 notrace void radix__mmu_cleanup_all(void)
618 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
619 lpcr = mfspr(SPRN_LPCR);
620 mtspr(SPRN_LPCR, lpcr & ~LPCR_UPRT);
621 set_ptcr_when_no_uv(0);
622 powernv_set_nmmu_ptcr(0);
623 radix__flush_tlb_all();
627 #ifdef CONFIG_MEMORY_HOTPLUG
628 static void free_pte_table(pte_t *pte_start, pmd_t *pmd)
633 for (i = 0; i < PTRS_PER_PTE; i++) {
639 pte_free_kernel(&init_mm, pte_start);
643 static void free_pmd_table(pmd_t *pmd_start, pud_t *pud)
648 for (i = 0; i < PTRS_PER_PMD; i++) {
654 pmd_free(&init_mm, pmd_start);
658 static void free_pud_table(pud_t *pud_start, p4d_t *p4d)
663 for (i = 0; i < PTRS_PER_PUD; i++) {
669 pud_free(&init_mm, pud_start);
673 #ifdef CONFIG_SPARSEMEM_VMEMMAP
674 static bool __meminit vmemmap_pmd_is_unused(unsigned long addr, unsigned long end)
676 unsigned long start = ALIGN_DOWN(addr, PMD_SIZE);
678 return !vmemmap_populated(start, PMD_SIZE);
681 static bool __meminit vmemmap_page_is_unused(unsigned long addr, unsigned long end)
683 unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE);
685 return !vmemmap_populated(start, PAGE_SIZE);
690 static void __meminit free_vmemmap_pages(struct page *page,
691 struct vmem_altmap *altmap,
694 unsigned int nr_pages = 1 << order;
697 unsigned long alt_start, alt_end;
698 unsigned long base_pfn = page_to_pfn(page);
701 * with 2M vmemmap mmaping we can have things setup
702 * such that even though atlmap is specified we never
705 alt_start = altmap->base_pfn;
706 alt_end = altmap->base_pfn + altmap->reserve + altmap->free;
708 if (base_pfn >= alt_start && base_pfn < alt_end) {
709 vmem_altmap_free(altmap, nr_pages);
714 if (PageReserved(page)) {
715 /* allocated from memblock */
717 free_reserved_page(page++);
719 free_pages((unsigned long)page_address(page), order);
722 static void __meminit remove_pte_table(pte_t *pte_start, unsigned long addr,
723 unsigned long end, bool direct,
724 struct vmem_altmap *altmap)
726 unsigned long next, pages = 0;
729 pte = pte_start + pte_index(addr);
730 for (; addr < end; addr = next, pte++) {
731 next = (addr + PAGE_SIZE) & PAGE_MASK;
735 if (!pte_present(*pte))
738 if (PAGE_ALIGNED(addr) && PAGE_ALIGNED(next)) {
740 free_vmemmap_pages(pte_page(*pte), altmap, 0);
741 pte_clear(&init_mm, addr, pte);
744 #ifdef CONFIG_SPARSEMEM_VMEMMAP
745 else if (!direct && vmemmap_page_is_unused(addr, next)) {
746 free_vmemmap_pages(pte_page(*pte), altmap, 0);
747 pte_clear(&init_mm, addr, pte);
752 update_page_count(mmu_virtual_psize, -pages);
755 static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr,
756 unsigned long end, bool direct,
757 struct vmem_altmap *altmap)
759 unsigned long next, pages = 0;
763 pmd = pmd_start + pmd_index(addr);
764 for (; addr < end; addr = next, pmd++) {
765 next = pmd_addr_end(addr, end);
767 if (!pmd_present(*pmd))
770 if (pmd_is_leaf(*pmd)) {
771 if (IS_ALIGNED(addr, PMD_SIZE) &&
772 IS_ALIGNED(next, PMD_SIZE)) {
774 free_vmemmap_pages(pmd_page(*pmd), altmap, get_order(PMD_SIZE));
775 pte_clear(&init_mm, addr, (pte_t *)pmd);
778 #ifdef CONFIG_SPARSEMEM_VMEMMAP
779 else if (!direct && vmemmap_pmd_is_unused(addr, next)) {
780 free_vmemmap_pages(pmd_page(*pmd), altmap, get_order(PMD_SIZE));
781 pte_clear(&init_mm, addr, (pte_t *)pmd);
787 pte_base = (pte_t *)pmd_page_vaddr(*pmd);
788 remove_pte_table(pte_base, addr, next, direct, altmap);
789 free_pte_table(pte_base, pmd);
792 update_page_count(MMU_PAGE_2M, -pages);
795 static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr,
796 unsigned long end, bool direct,
797 struct vmem_altmap *altmap)
799 unsigned long next, pages = 0;
803 pud = pud_start + pud_index(addr);
804 for (; addr < end; addr = next, pud++) {
805 next = pud_addr_end(addr, end);
807 if (!pud_present(*pud))
810 if (pud_is_leaf(*pud)) {
811 if (!IS_ALIGNED(addr, PUD_SIZE) ||
812 !IS_ALIGNED(next, PUD_SIZE)) {
813 WARN_ONCE(1, "%s: unaligned range\n", __func__);
816 pte_clear(&init_mm, addr, (pte_t *)pud);
821 pmd_base = pud_pgtable(*pud);
822 remove_pmd_table(pmd_base, addr, next, direct, altmap);
823 free_pmd_table(pmd_base, pud);
826 update_page_count(MMU_PAGE_1G, -pages);
829 static void __meminit
830 remove_pagetable(unsigned long start, unsigned long end, bool direct,
831 struct vmem_altmap *altmap)
833 unsigned long addr, next;
838 spin_lock(&init_mm.page_table_lock);
840 for (addr = start; addr < end; addr = next) {
841 next = pgd_addr_end(addr, end);
843 pgd = pgd_offset_k(addr);
844 p4d = p4d_offset(pgd, addr);
845 if (!p4d_present(*p4d))
848 if (p4d_is_leaf(*p4d)) {
849 if (!IS_ALIGNED(addr, P4D_SIZE) ||
850 !IS_ALIGNED(next, P4D_SIZE)) {
851 WARN_ONCE(1, "%s: unaligned range\n", __func__);
855 pte_clear(&init_mm, addr, (pte_t *)pgd);
859 pud_base = p4d_pgtable(*p4d);
860 remove_pud_table(pud_base, addr, next, direct, altmap);
861 free_pud_table(pud_base, p4d);
864 spin_unlock(&init_mm.page_table_lock);
865 radix__flush_tlb_kernel_range(start, end);
868 int __meminit radix__create_section_mapping(unsigned long start,
869 unsigned long end, int nid,
872 if (end >= RADIX_VMALLOC_START) {
873 pr_warn("Outside the supported range\n");
877 return create_physical_mapping(__pa(start), __pa(end),
881 int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
883 remove_pagetable(start, end, true, NULL);
886 #endif /* CONFIG_MEMORY_HOTPLUG */
888 #ifdef CONFIG_SPARSEMEM_VMEMMAP
889 static int __map_kernel_page_nid(unsigned long ea, unsigned long pa,
890 pgprot_t flags, unsigned int map_page_size,
893 return __map_kernel_page(ea, pa, flags, map_page_size, nid, 0, 0);
896 int __meminit radix__vmemmap_create_mapping(unsigned long start,
897 unsigned long page_size,
900 /* Create a PTE encoding */
901 int nid = early_pfn_to_nid(phys >> PAGE_SHIFT);
904 if ((start + page_size) >= RADIX_VMEMMAP_END) {
905 pr_warn("Outside the supported range\n");
909 ret = __map_kernel_page_nid(start, phys, PAGE_KERNEL, page_size, nid);
916 bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap)
919 return __vmemmap_can_optimize(altmap, pgmap);
924 int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
925 unsigned long addr, unsigned long next)
927 int large = pmd_large(*pmdp);
930 vmemmap_verify(pmdp_ptep(pmdp), node, addr, next);
935 void __meminit vmemmap_set_pmd(pmd_t *pmdp, void *p, int node,
936 unsigned long addr, unsigned long next)
939 pte_t *ptep = pmdp_ptep(pmdp);
941 VM_BUG_ON(!IS_ALIGNED(addr, PMD_SIZE));
942 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
943 set_pte_at(&init_mm, addr, ptep, entry);
944 asm volatile("ptesync": : :"memory");
946 vmemmap_verify(ptep, node, addr, next);
949 static pte_t * __meminit radix__vmemmap_pte_populate(pmd_t *pmdp, unsigned long addr,
951 struct vmem_altmap *altmap,
954 pte_t *pte = pte_offset_kernel(pmdp, addr);
956 if (pte_none(*pte)) {
962 * make sure we don't create altmap mappings
963 * covering things outside the device.
965 if (altmap && altmap_cross_boundary(altmap, addr, PAGE_SIZE))
968 p = vmemmap_alloc_block_buf(PAGE_SIZE, node, altmap);
970 p = vmemmap_alloc_block_buf(PAGE_SIZE, node, NULL);
973 pr_debug("PAGE_SIZE vmemmap mapping\n");
976 * When a PTE/PMD entry is freed from the init_mm
977 * there's a free_pages() call to this page allocated
978 * above. Thus this get_page() is paired with the
979 * put_page_testzero() on the freeing path.
980 * This can only called by certain ZONE_DEVICE path,
981 * and through vmemmap_populate_compound_pages() when
985 p = page_to_virt(reuse);
986 pr_debug("Tail page reuse vmemmap mapping\n");
989 VM_BUG_ON(!PAGE_ALIGNED(addr));
990 entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL);
991 set_pte_at(&init_mm, addr, pte, entry);
992 asm volatile("ptesync": : :"memory");
997 static inline pud_t *vmemmap_pud_alloc(p4d_t *p4dp, int node,
998 unsigned long address)
1002 /* All early vmemmap mapping to keep simple do it at PAGE_SIZE */
1003 if (unlikely(p4d_none(*p4dp))) {
1004 if (unlikely(!slab_is_available())) {
1005 pud = early_alloc_pgtable(PAGE_SIZE, node, 0, 0);
1006 p4d_populate(&init_mm, p4dp, pud);
1007 /* go to the pud_offset */
1009 return pud_alloc(&init_mm, p4dp, address);
1011 return pud_offset(p4dp, address);
1014 static inline pmd_t *vmemmap_pmd_alloc(pud_t *pudp, int node,
1015 unsigned long address)
1019 /* All early vmemmap mapping to keep simple do it at PAGE_SIZE */
1020 if (unlikely(pud_none(*pudp))) {
1021 if (unlikely(!slab_is_available())) {
1022 pmd = early_alloc_pgtable(PAGE_SIZE, node, 0, 0);
1023 pud_populate(&init_mm, pudp, pmd);
1025 return pmd_alloc(&init_mm, pudp, address);
1027 return pmd_offset(pudp, address);
1030 static inline pte_t *vmemmap_pte_alloc(pmd_t *pmdp, int node,
1031 unsigned long address)
1035 /* All early vmemmap mapping to keep simple do it at PAGE_SIZE */
1036 if (unlikely(pmd_none(*pmdp))) {
1037 if (unlikely(!slab_is_available())) {
1038 pte = early_alloc_pgtable(PAGE_SIZE, node, 0, 0);
1039 pmd_populate(&init_mm, pmdp, pte);
1041 return pte_alloc_kernel(pmdp, address);
1043 return pte_offset_kernel(pmdp, address);
1048 int __meminit radix__vmemmap_populate(unsigned long start, unsigned long end, int node,
1049 struct vmem_altmap *altmap)
1059 for (addr = start; addr < end; addr = next) {
1060 next = pmd_addr_end(addr, end);
1062 pgd = pgd_offset_k(addr);
1063 p4d = p4d_offset(pgd, addr);
1064 pud = vmemmap_pud_alloc(p4d, node, addr);
1067 pmd = vmemmap_pmd_alloc(pud, node, addr);
1071 if (pmd_none(READ_ONCE(*pmd))) {
1075 * keep it simple by checking addr PMD_SIZE alignment
1076 * and verifying the device boundary condition.
1077 * For us to use a pmd mapping, both addr and pfn should
1078 * be aligned. We skip if addr is not aligned and for
1079 * pfn we hope we have extra area in the altmap that
1080 * can help to find an aligned block. This can result
1081 * in altmap block allocation failures, in which case
1082 * we fallback to RAM for vmemmap allocation.
1084 if (altmap && (!IS_ALIGNED(addr, PMD_SIZE) ||
1085 altmap_cross_boundary(altmap, addr, PMD_SIZE))) {
1087 * make sure we don't create altmap mappings
1088 * covering things outside the device.
1093 p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap);
1095 vmemmap_set_pmd(pmd, p, node, addr, next);
1096 pr_debug("PMD_SIZE vmemmap mapping\n");
1098 } else if (altmap) {
1100 * A vmemmap block allocation can fail due to
1101 * alignment requirements and we trying to align
1102 * things aggressively there by running out of
1103 * space. Try base mapping on failure.
1107 } else if (vmemmap_check_pmd(pmd, node, addr, next)) {
1109 * If a huge mapping exist due to early call to
1110 * vmemmap_populate, let's try to use that.
1116 * Not able allocate higher order memory to back memmap
1117 * or we found a pointer to pte page. Allocate base page
1120 pte = vmemmap_pte_alloc(pmd, node, addr);
1124 pte = radix__vmemmap_pte_populate(pmd, addr, node, altmap, NULL);
1128 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
1129 next = addr + PAGE_SIZE;
1134 static pte_t * __meminit radix__vmemmap_populate_address(unsigned long addr, int node,
1135 struct vmem_altmap *altmap,
1144 pgd = pgd_offset_k(addr);
1145 p4d = p4d_offset(pgd, addr);
1146 pud = vmemmap_pud_alloc(p4d, node, addr);
1149 pmd = vmemmap_pmd_alloc(pud, node, addr);
1154 * The second page is mapped as a hugepage due to a nearby request.
1155 * Force our mapping to page size without deduplication
1158 pte = vmemmap_pte_alloc(pmd, node, addr);
1161 radix__vmemmap_pte_populate(pmd, addr, node, NULL, NULL);
1162 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
1167 static pte_t * __meminit vmemmap_compound_tail_page(unsigned long addr,
1168 unsigned long pfn_offset, int node)
1175 unsigned long map_addr;
1177 /* the second vmemmap page which we use for duplication */
1178 map_addr = addr - pfn_offset * sizeof(struct page) + PAGE_SIZE;
1179 pgd = pgd_offset_k(map_addr);
1180 p4d = p4d_offset(pgd, map_addr);
1181 pud = vmemmap_pud_alloc(p4d, node, map_addr);
1184 pmd = vmemmap_pmd_alloc(pud, node, map_addr);
1189 * The second page is mapped as a hugepage due to a nearby request.
1190 * Force our mapping to page size without deduplication
1193 pte = vmemmap_pte_alloc(pmd, node, map_addr);
1197 * Check if there exist a mapping to the left
1199 if (pte_none(*pte)) {
1201 * Populate the head page vmemmap page.
1202 * It can fall in different pmd, hence
1203 * vmemmap_populate_address()
1205 pte = radix__vmemmap_populate_address(map_addr - PAGE_SIZE, node, NULL, NULL);
1209 * Populate the tail pages vmemmap page
1211 pte = radix__vmemmap_pte_populate(pmd, map_addr, node, NULL, NULL);
1214 vmemmap_verify(pte, node, map_addr, map_addr + PAGE_SIZE);
1220 int __meminit vmemmap_populate_compound_pages(unsigned long start_pfn,
1221 unsigned long start,
1222 unsigned long end, int node,
1223 struct dev_pagemap *pgmap)
1226 * we want to map things as base page size mapping so that
1227 * we can save space in vmemmap. We could have huge mapping
1228 * covering out both edges.
1231 unsigned long addr_pfn = start_pfn;
1239 for (addr = start; addr < end; addr = next) {
1241 pgd = pgd_offset_k(addr);
1242 p4d = p4d_offset(pgd, addr);
1243 pud = vmemmap_pud_alloc(p4d, node, addr);
1246 pmd = vmemmap_pmd_alloc(pud, node, addr);
1250 if (pmd_leaf(READ_ONCE(*pmd))) {
1251 /* existing huge mapping. Skip the range */
1252 addr_pfn += (PMD_SIZE >> PAGE_SHIFT);
1253 next = pmd_addr_end(addr, end);
1256 pte = vmemmap_pte_alloc(pmd, node, addr);
1259 if (!pte_none(*pte)) {
1261 * This could be because we already have a compound
1262 * page whose VMEMMAP_RESERVE_NR pages were mapped and
1263 * this request fall in those pages.
1266 next = addr + PAGE_SIZE;
1269 unsigned long nr_pages = pgmap_vmemmap_nr(pgmap);
1270 unsigned long pfn_offset = addr_pfn - ALIGN_DOWN(addr_pfn, nr_pages);
1271 pte_t *tail_page_pte;
1274 * if the address is aligned to huge page size it is the
1277 if (pfn_offset == 0) {
1278 /* Populate the head page vmemmap page */
1279 pte = radix__vmemmap_pte_populate(pmd, addr, node, NULL, NULL);
1282 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
1285 * Populate the tail pages vmemmap page
1286 * It can fall in different pmd, hence
1287 * vmemmap_populate_address()
1289 pte = radix__vmemmap_populate_address(addr + PAGE_SIZE, node, NULL, NULL);
1294 next = addr + 2 * PAGE_SIZE;
1298 * get the 2nd mapping details
1299 * Also create it if that doesn't exist
1301 tail_page_pte = vmemmap_compound_tail_page(addr, pfn_offset, node);
1302 if (!tail_page_pte) {
1304 pte = radix__vmemmap_pte_populate(pmd, addr, node, NULL, NULL);
1307 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
1310 next = addr + PAGE_SIZE;
1314 pte = radix__vmemmap_pte_populate(pmd, addr, node, NULL, pte_page(*tail_page_pte));
1317 vmemmap_verify(pte, node, addr, addr + PAGE_SIZE);
1320 next = addr + PAGE_SIZE;
1328 #ifdef CONFIG_MEMORY_HOTPLUG
1329 void __meminit radix__vmemmap_remove_mapping(unsigned long start, unsigned long page_size)
1331 remove_pagetable(start, start + page_size, true, NULL);
1334 void __ref radix__vmemmap_free(unsigned long start, unsigned long end,
1335 struct vmem_altmap *altmap)
1337 remove_pagetable(start, end, false, altmap);
1342 #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)
1343 void radix__kernel_map_pages(struct page *page, int numpages, int enable)
1347 addr = (unsigned long)page_address(page);
1350 set_memory_p(addr, numpages);
1352 set_memory_np(addr, numpages);
1356 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1358 unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
1359 pmd_t *pmdp, unsigned long clr,
1364 #ifdef CONFIG_DEBUG_VM
1365 WARN_ON(!radix__pmd_trans_huge(*pmdp) && !pmd_devmap(*pmdp));
1366 assert_spin_locked(pmd_lockptr(mm, pmdp));
1369 old = radix__pte_update(mm, addr, pmdp_ptep(pmdp), clr, set, 1);
1370 trace_hugepage_update_pmd(addr, old, clr, set);
1375 unsigned long radix__pud_hugepage_update(struct mm_struct *mm, unsigned long addr,
1376 pud_t *pudp, unsigned long clr,
1381 #ifdef CONFIG_DEBUG_VM
1382 WARN_ON(!pud_devmap(*pudp));
1383 assert_spin_locked(pud_lockptr(mm, pudp));
1386 old = radix__pte_update(mm, addr, pudp_ptep(pudp), clr, set, 1);
1387 trace_hugepage_update_pud(addr, old, clr, set);
1392 pmd_t radix__pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
1398 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1399 VM_BUG_ON(radix__pmd_trans_huge(*pmdp));
1400 VM_BUG_ON(pmd_devmap(*pmdp));
1402 * khugepaged calls this for normal pmd
1407 radix__flush_tlb_collapsed_pmd(vma->vm_mm, address);
1413 * For us pgtable_t is pte_t *. Inorder to save the deposisted
1414 * page table, we consider the allocated page table as a list
1415 * head. On withdraw we need to make sure we zero out the used
1416 * list_head memory area.
1418 void radix__pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1421 struct list_head *lh = (struct list_head *) pgtable;
1423 assert_spin_locked(pmd_lockptr(mm, pmdp));
1426 if (!pmd_huge_pte(mm, pmdp))
1429 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
1430 pmd_huge_pte(mm, pmdp) = pgtable;
1433 pgtable_t radix__pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
1437 struct list_head *lh;
1439 assert_spin_locked(pmd_lockptr(mm, pmdp));
1442 pgtable = pmd_huge_pte(mm, pmdp);
1443 lh = (struct list_head *) pgtable;
1445 pmd_huge_pte(mm, pmdp) = NULL;
1447 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
1450 ptep = (pte_t *) pgtable;
1457 pmd_t radix__pmdp_huge_get_and_clear(struct mm_struct *mm,
1458 unsigned long addr, pmd_t *pmdp)
1463 old = radix__pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
1464 old_pmd = __pmd(old);
1468 pud_t radix__pudp_huge_get_and_clear(struct mm_struct *mm,
1469 unsigned long addr, pud_t *pudp)
1474 old = radix__pud_hugepage_update(mm, addr, pudp, ~0UL, 0);
1475 old_pud = __pud(old);
1479 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1481 void radix__ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep,
1482 pte_t entry, unsigned long address, int psize)
1484 struct mm_struct *mm = vma->vm_mm;
1485 unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_SOFT_DIRTY |
1486 _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC);
1488 unsigned long change = pte_val(entry) ^ pte_val(*ptep);
1490 * On POWER9, the NMMU is not able to relax PTE access permissions
1491 * for a translation with a TLB. The PTE must be invalidated, TLB
1492 * flushed before the new PTE is installed.
1494 * This only needs to be done for radix, because hash translation does
1495 * flush when updating the linux pte (and we don't support NMMU
1496 * accelerators on HPT on POWER9 anyway XXX: do we?).
1498 * POWER10 (and P9P) NMMU does behave as per ISA.
1500 if (!cpu_has_feature(CPU_FTR_ARCH_31) && (change & _PAGE_RW) &&
1501 atomic_read(&mm->context.copros) > 0) {
1502 unsigned long old_pte, new_pte;
1504 old_pte = __radix_pte_update(ptep, _PAGE_PRESENT, _PAGE_INVALID);
1505 new_pte = old_pte | set;
1506 radix__flush_tlb_page_psize(mm, address, psize);
1507 __radix_pte_update(ptep, _PAGE_INVALID, new_pte);
1509 __radix_pte_update(ptep, 0, set);
1511 * Book3S does not require a TLB flush when relaxing access
1512 * restrictions when the address space (modulo the POWER9 nest
1513 * MMU issue above) because the MMU will reload the PTE after
1514 * taking an access fault, as defined by the architecture. See
1515 * "Setting a Reference or Change Bit or Upgrading Access
1516 * Authority (PTE Subject to Atomic Hardware Updates)" in
1517 * Power ISA Version 3.1B.
1520 /* See ptesync comment in radix__set_pte_at */
1523 void radix__ptep_modify_prot_commit(struct vm_area_struct *vma,
1524 unsigned long addr, pte_t *ptep,
1525 pte_t old_pte, pte_t pte)
1527 struct mm_struct *mm = vma->vm_mm;
1530 * POWER9 NMMU must flush the TLB after clearing the PTE before
1531 * installing a PTE with more relaxed access permissions, see
1532 * radix__ptep_set_access_flags.
1534 if (!cpu_has_feature(CPU_FTR_ARCH_31) &&
1535 is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
1536 (atomic_read(&mm->context.copros) > 0))
1537 radix__flush_tlb_page(vma, addr);
1539 set_pte_at(mm, addr, ptep, pte);
1542 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
1544 pte_t *ptep = (pte_t *)pud;
1545 pte_t new_pud = pfn_pte(__phys_to_pfn(addr), prot);
1547 if (!radix_enabled())
1550 set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pud);
1555 int pud_clear_huge(pud_t *pud)
1557 if (pud_is_leaf(*pud)) {
1565 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
1570 pmd = pud_pgtable(*pud);
1573 flush_tlb_kernel_range(addr, addr + PUD_SIZE);
1575 for (i = 0; i < PTRS_PER_PMD; i++) {
1576 if (!pmd_none(pmd[i])) {
1578 pte = (pte_t *)pmd_page_vaddr(pmd[i]);
1580 pte_free_kernel(&init_mm, pte);
1584 pmd_free(&init_mm, pmd);
1589 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
1591 pte_t *ptep = (pte_t *)pmd;
1592 pte_t new_pmd = pfn_pte(__phys_to_pfn(addr), prot);
1594 if (!radix_enabled())
1597 set_pte_at(&init_mm, 0 /* radix unused */, ptep, new_pmd);
1602 int pmd_clear_huge(pmd_t *pmd)
1604 if (pmd_is_leaf(*pmd)) {
1612 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
1616 pte = (pte_t *)pmd_page_vaddr(*pmd);
1619 flush_tlb_kernel_range(addr, addr + PMD_SIZE);
1621 pte_free_kernel(&init_mm, pte);