1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 Synopsys, Inc. (www.synopsys.com)
6 #include <linux/memblock.h>
7 #include <linux/export.h>
8 #include <linux/highmem.h>
9 #include <asm/processor.h>
10 #include <asm/pgtable.h>
11 #include <asm/pgalloc.h>
12 #include <asm/tlbflush.h>
17 * kmap() API provides sleep semantics hence referred to as "permanent maps"
18 * It allows mapping LAST_PKMAP pages, using @last_pkmap_nr as the cursor
21 * kmap_atomic() can't sleep (calls pagefault_disable()), thus it provides
22 * shortlived ala "temporary mappings" which historically were implemented as
23 * fixmaps (compile time addr etc). Their book-keeping is done per cpu.
25 * Both these facts combined (preemption disabled and per-cpu allocation)
26 * means the total number of concurrent fixmaps will be limited to max
27 * such allocations in a single control path. Thus KM_TYPE_NR (another
28 * historic relic) is a small'ish number which caps max percpu fixmaps
32 * - the kernel vaddr space from 0x7z to 0x8z (currently used by vmalloc/module)
33 * is now shared between vmalloc and kmap (non overlapping though)
35 * - Both fixmap/pkmap use a dedicated page table each, hooked up to swapper PGD
36 * This means each only has 1 PGDIR_SIZE worth of kvaddr mappings, which means
37 * 2M of kvaddr space for typical config (8K page and 11:8:13 traversal split)
39 * - fixmap anyhow needs a limited number of mappings. So 2M kvaddr == 256 PTE
40 * slots across NR_CPUS would be more than sufficient (generic code defines
43 * - pkmap being preemptible, in theory could do with more than 256 concurrent
44 * mappings. However, generic pkmap code: map_new_virtual(), doesn't traverse
45 * the PGD and only works with a single page table @pkmap_page_table, hence
49 extern pte_t * pkmap_page_table;
50 static pte_t * fixmap_page_table;
52 void *kmap(struct page *page)
54 BUG_ON(in_interrupt());
55 if (!PageHighMem(page))
56 return page_address(page);
58 return kmap_high(page);
62 void *kmap_atomic(struct page *page)
69 if (!PageHighMem(page))
70 return page_address(page);
72 cpu_idx = kmap_atomic_idx_push();
73 idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
74 vaddr = FIXMAP_ADDR(idx);
76 set_pte_at(&init_mm, vaddr, fixmap_page_table + idx,
77 mk_pte(page, kmap_prot));
81 EXPORT_SYMBOL(kmap_atomic);
83 void __kunmap_atomic(void *kv)
85 unsigned long kvaddr = (unsigned long)kv;
87 if (kvaddr >= FIXMAP_BASE && kvaddr < (FIXMAP_BASE + FIXMAP_SIZE)) {
90 * Because preemption is disabled, this vaddr can be associated
91 * with the current allocated index.
92 * But in case of multiple live kmap_atomic(), it still relies on
93 * callers to unmap in right order.
95 int cpu_idx = kmap_atomic_idx();
96 int idx = cpu_idx + KM_TYPE_NR * smp_processor_id();
98 WARN_ON(kvaddr != FIXMAP_ADDR(idx));
100 pte_clear(&init_mm, kvaddr, fixmap_page_table + idx);
101 local_flush_tlb_kernel_range(kvaddr, kvaddr + PAGE_SIZE);
103 kmap_atomic_idx_pop();
109 EXPORT_SYMBOL(__kunmap_atomic);
111 static noinline pte_t * __init alloc_kmap_pgtable(unsigned long kvaddr)
118 pgd_k = pgd_offset_k(kvaddr);
119 pud_k = pud_offset(pgd_k, kvaddr);
120 pmd_k = pmd_offset(pud_k, kvaddr);
122 pte_k = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
124 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
125 __func__, PAGE_SIZE, PAGE_SIZE);
127 pmd_populate_kernel(&init_mm, pmd_k, pte_k);
131 void __init kmap_init(void)
133 /* Due to recursive include hell, we can't do this in processor.h */
134 BUILD_BUG_ON(PAGE_OFFSET < (VMALLOC_END + FIXMAP_SIZE + PKMAP_SIZE));
136 BUILD_BUG_ON(KM_TYPE_NR > PTRS_PER_PTE);
137 pkmap_page_table = alloc_kmap_pgtable(PKMAP_BASE);
139 BUILD_BUG_ON(LAST_PKMAP > PTRS_PER_PTE);
140 fixmap_page_table = alloc_kmap_pgtable(FIXMAP_BASE);