1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/highmem.h>
3 #include <linux/export.h>
4 #include <linux/swap.h> /* for totalram_pages */
5 #include <linux/memblock.h>
7 void *kmap(struct page *page)
10 if (!PageHighMem(page))
11 return page_address(page);
12 return kmap_high(page);
16 void kunmap(struct page *page)
20 if (!PageHighMem(page))
24 EXPORT_SYMBOL(kunmap);
27 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
28 * no global lock is needed and because the kmap code must perform a global TLB
29 * invalidation when the kmap pool wraps.
31 * However when holding an atomic kmap it is not legal to sleep, so atomic
32 * kmaps are appropriate for short, tight code paths only.
34 void *kmap_atomic_prot(struct page *page, pgprot_t prot)
42 if (!PageHighMem(page))
43 return page_address(page);
45 type = kmap_atomic_idx_push();
46 idx = type + KM_TYPE_NR*smp_processor_id();
47 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
48 BUG_ON(!pte_none(*(kmap_pte-idx)));
49 set_pte(kmap_pte-idx, mk_pte(page, prot));
50 arch_flush_lazy_mmu_mode();
54 EXPORT_SYMBOL(kmap_atomic_prot);
56 void *kmap_atomic(struct page *page)
58 return kmap_atomic_prot(page, kmap_prot);
60 EXPORT_SYMBOL(kmap_atomic);
63 * This is the same as kmap_atomic() but can map memory that doesn't
64 * have a struct page associated with it.
66 void *kmap_atomic_pfn(unsigned long pfn)
68 return kmap_atomic_prot_pfn(pfn, kmap_prot);
70 EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
72 void __kunmap_atomic(void *kvaddr)
74 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
76 if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
77 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
80 type = kmap_atomic_idx();
81 idx = type + KM_TYPE_NR * smp_processor_id();
83 #ifdef CONFIG_DEBUG_HIGHMEM
84 WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
87 * Force other mappings to Oops if they'll try to access this
88 * pte without first remap it. Keeping stale mappings around
89 * is a bad idea also, in case the page changes cacheability
90 * attributes or becomes a protected page in a hypervisor.
92 kpte_clear_flush(kmap_pte-idx, vaddr);
93 kmap_atomic_idx_pop();
94 arch_flush_lazy_mmu_mode();
96 #ifdef CONFIG_DEBUG_HIGHMEM
98 BUG_ON(vaddr < PAGE_OFFSET);
99 BUG_ON(vaddr >= (unsigned long)high_memory);
106 EXPORT_SYMBOL(__kunmap_atomic);
108 void __init set_highmem_pages_init(void)
114 * Explicitly reset zone->managed_pages because set_highmem_pages_init()
115 * is invoked before memblock_free_all()
117 reset_all_zones_managed_pages();
118 for_each_zone(zone) {
119 unsigned long zone_start_pfn, zone_end_pfn;
121 if (!is_highmem(zone))
124 zone_start_pfn = zone->zone_start_pfn;
125 zone_end_pfn = zone_start_pfn + zone->spanned_pages;
127 nid = zone_to_nid(zone);
128 printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
129 zone->name, nid, zone_start_pfn, zone_end_pfn);
131 add_highpages_with_active_regions(nid, zone_start_pfn,