1 // SPDX-License-Identifier: GPL-2.0-only
3 * Re-map IO memory to kernel address space so that we can access it.
4 * This is needed for high PCI addresses that aren't mapped in the
5 * 640k-1MB IO memory area on PC's
7 * (C) Copyright 1995 1996 Linus Torvalds
10 #include <linux/memblock.h>
11 #include <linux/init.h>
13 #include <linux/ioport.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mmiotrace.h>
17 #include <linux/cc_platform.h>
18 #include <linux/efi.h>
19 #include <linux/pgtable.h>
20 #include <linux/kmsan.h>
22 #include <asm/set_memory.h>
23 #include <asm/e820/api.h>
25 #include <asm/fixmap.h>
26 #include <asm/tlbflush.h>
27 #include <asm/pgalloc.h>
28 #include <asm/memtype.h>
29 #include <asm/setup.h>
34 * Descriptor controlling ioremap() behavior.
41 * Fix up the linear direct mapping of the kernel to avoid cache attribute
44 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
45 enum page_cache_mode pcm)
47 unsigned long nrpages = size >> PAGE_SHIFT;
51 case _PAGE_CACHE_MODE_UC:
53 err = _set_memory_uc(vaddr, nrpages);
55 case _PAGE_CACHE_MODE_WC:
56 err = _set_memory_wc(vaddr, nrpages);
58 case _PAGE_CACHE_MODE_WT:
59 err = _set_memory_wt(vaddr, nrpages);
61 case _PAGE_CACHE_MODE_WB:
62 err = _set_memory_wb(vaddr, nrpages);
69 /* Does the range (or a subset of) contain normal RAM? */
70 static unsigned int __ioremap_check_ram(struct resource *res)
72 unsigned long start_pfn, stop_pfn;
75 if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
78 start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
79 stop_pfn = (res->end + 1) >> PAGE_SHIFT;
80 if (stop_pfn > start_pfn) {
81 for (i = 0; i < (stop_pfn - start_pfn); ++i)
82 if (pfn_valid(start_pfn + i) &&
83 !PageReserved(pfn_to_page(start_pfn + i)))
84 return IORES_MAP_SYSTEM_RAM;
91 * In a SEV guest, NONE and RESERVED should not be mapped encrypted because
92 * there the whole memory is already encrypted.
94 static unsigned int __ioremap_check_encrypted(struct resource *res)
96 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
100 case IORES_DESC_NONE:
101 case IORES_DESC_RESERVED:
104 return IORES_MAP_ENCRYPTED;
111 * The EFI runtime services data area is not covered by walk_mem_res(), but must
112 * be mapped encrypted when SEV is active.
114 static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc)
116 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
119 if (!IS_ENABLED(CONFIG_EFI))
122 if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA ||
123 (efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA &&
124 efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME))
125 desc->flags |= IORES_MAP_ENCRYPTED;
128 static int __ioremap_collect_map_flags(struct resource *res, void *arg)
130 struct ioremap_desc *desc = arg;
132 if (!(desc->flags & IORES_MAP_SYSTEM_RAM))
133 desc->flags |= __ioremap_check_ram(res);
135 if (!(desc->flags & IORES_MAP_ENCRYPTED))
136 desc->flags |= __ioremap_check_encrypted(res);
138 return ((desc->flags & (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)) ==
139 (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED));
143 * To avoid multiple resource walks, this function walks resources marked as
144 * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
145 * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
147 * After that, deal with misc other ranges in __ioremap_check_other() which do
148 * not fall into the above category.
150 static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
151 struct ioremap_desc *desc)
156 end = start + size - 1;
157 memset(desc, 0, sizeof(struct ioremap_desc));
159 walk_mem_res(start, end, desc, __ioremap_collect_map_flags);
161 __ioremap_check_other(addr, desc);
165 * Remap an arbitrary physical address space into the kernel virtual
166 * address space. It transparently creates kernel huge I/O mapping when
167 * the physical address is aligned by a huge page size (1GB or 2MB) and
168 * the requested size is at least the huge page size.
170 * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
171 * Therefore, the mapping code falls back to use a smaller page toward 4KB
172 * when a mapping range is covered by non-WB type of MTRRs.
174 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
175 * have to convert them into an offset in a page-aligned mapping, but the
176 * caller shouldn't need to know that small detail.
178 static void __iomem *
179 __ioremap_caller(resource_size_t phys_addr, unsigned long size,
180 enum page_cache_mode pcm, void *caller, bool encrypted)
182 unsigned long offset, vaddr;
183 resource_size_t last_addr;
184 const resource_size_t unaligned_phys_addr = phys_addr;
185 const unsigned long unaligned_size = size;
186 struct ioremap_desc io_desc;
187 struct vm_struct *area;
188 enum page_cache_mode new_pcm;
191 void __iomem *ret_addr;
193 /* Don't allow wraparound or zero size */
194 last_addr = phys_addr + size - 1;
195 if (!size || last_addr < phys_addr)
198 if (!phys_addr_valid(phys_addr)) {
199 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
200 (unsigned long long)phys_addr);
205 __ioremap_check_mem(phys_addr, size, &io_desc);
208 * Don't allow anybody to remap normal RAM that we're using..
210 if (io_desc.flags & IORES_MAP_SYSTEM_RAM) {
211 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
212 &phys_addr, &last_addr);
217 * Mappings have to be page-aligned
219 offset = phys_addr & ~PAGE_MASK;
220 phys_addr &= PAGE_MASK;
221 size = PAGE_ALIGN(last_addr+1) - phys_addr;
224 * Mask out any bits not part of the actual physical
225 * address, like memory encryption bits.
227 phys_addr &= PHYSICAL_PAGE_MASK;
229 retval = memtype_reserve(phys_addr, (u64)phys_addr + size,
232 printk(KERN_ERR "ioremap memtype_reserve failed %d\n", retval);
236 if (pcm != new_pcm) {
237 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
239 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
240 (unsigned long long)phys_addr,
241 (unsigned long long)(phys_addr + size),
243 goto err_free_memtype;
249 * If the page being mapped is in memory and SEV is active then
250 * make sure the memory encryption attribute is enabled in the
252 * In TDX guests, memory is marked private by default. If encryption
253 * is not requested (using encrypted), explicitly set decrypt
254 * attribute in all IOREMAPPED memory.
256 prot = PAGE_KERNEL_IO;
257 if ((io_desc.flags & IORES_MAP_ENCRYPTED) || encrypted)
258 prot = pgprot_encrypted(prot);
260 prot = pgprot_decrypted(prot);
263 case _PAGE_CACHE_MODE_UC:
265 prot = __pgprot(pgprot_val(prot) |
266 cachemode2protval(_PAGE_CACHE_MODE_UC));
268 case _PAGE_CACHE_MODE_UC_MINUS:
269 prot = __pgprot(pgprot_val(prot) |
270 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
272 case _PAGE_CACHE_MODE_WC:
273 prot = __pgprot(pgprot_val(prot) |
274 cachemode2protval(_PAGE_CACHE_MODE_WC));
276 case _PAGE_CACHE_MODE_WT:
277 prot = __pgprot(pgprot_val(prot) |
278 cachemode2protval(_PAGE_CACHE_MODE_WT));
280 case _PAGE_CACHE_MODE_WB:
287 area = get_vm_area_caller(size, VM_IOREMAP, caller);
289 goto err_free_memtype;
290 area->phys_addr = phys_addr;
291 vaddr = (unsigned long) area->addr;
293 if (memtype_kernel_map_sync(phys_addr, size, pcm))
296 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
299 ret_addr = (void __iomem *) (vaddr + offset);
300 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
303 * Check if the request spans more than any BAR in the iomem resource
306 if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
307 pr_warn("caller %pS mapping multiple BARs\n", caller);
313 memtype_free(phys_addr, phys_addr + size);
318 * ioremap - map bus memory into CPU space
319 * @phys_addr: bus address of the memory
320 * @size: size of the resource to map
322 * ioremap performs a platform specific sequence of operations to
323 * make bus memory CPU accessible via the readb/readw/readl/writeb/
324 * writew/writel functions and the other mmio helpers. The returned
325 * address is not guaranteed to be usable directly as a virtual
328 * This version of ioremap ensures that the memory is marked uncachable
329 * on the CPU as well as honouring existing caching rules from things like
330 * the PCI bus. Note that there are other caches and buffers on many
331 * busses. In particular driver authors should read up on PCI writes
333 * It's useful if some control registers are in such an area and
334 * write combining or read caching is not desirable:
336 * Must be freed with iounmap.
338 void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
341 * Ideally, this should be:
342 * pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
344 * Till we fix all X drivers to use ioremap_wc(), we will use
345 * UC MINUS. Drivers that are certain they need or can already
346 * be converted over to strong UC can use ioremap_uc().
348 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
350 return __ioremap_caller(phys_addr, size, pcm,
351 __builtin_return_address(0), false);
353 EXPORT_SYMBOL(ioremap);
356 * ioremap_uc - map bus memory into CPU space as strongly uncachable
357 * @phys_addr: bus address of the memory
358 * @size: size of the resource to map
360 * ioremap_uc performs a platform specific sequence of operations to
361 * make bus memory CPU accessible via the readb/readw/readl/writeb/
362 * writew/writel functions and the other mmio helpers. The returned
363 * address is not guaranteed to be usable directly as a virtual
366 * This version of ioremap ensures that the memory is marked with a strong
367 * preference as completely uncachable on the CPU when possible. For non-PAT
368 * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
369 * systems this will set the PAT entry for the pages as strong UC. This call
370 * will honor existing caching rules from things like the PCI bus. Note that
371 * there are other caches and buffers on many busses. In particular driver
372 * authors should read up on PCI writes.
374 * It's useful if some control registers are in such an area and
375 * write combining or read caching is not desirable:
377 * Must be freed with iounmap.
379 void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
381 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
383 return __ioremap_caller(phys_addr, size, pcm,
384 __builtin_return_address(0), false);
386 EXPORT_SYMBOL_GPL(ioremap_uc);
389 * ioremap_wc - map memory into CPU space write combined
390 * @phys_addr: bus address of the memory
391 * @size: size of the resource to map
393 * This version of ioremap ensures that the memory is marked write combining.
394 * Write combining allows faster writes to some hardware devices.
396 * Must be freed with iounmap.
398 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
400 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
401 __builtin_return_address(0), false);
403 EXPORT_SYMBOL(ioremap_wc);
406 * ioremap_wt - map memory into CPU space write through
407 * @phys_addr: bus address of the memory
408 * @size: size of the resource to map
410 * This version of ioremap ensures that the memory is marked write through.
411 * Write through stores data into memory while keeping the cache up-to-date.
413 * Must be freed with iounmap.
415 void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
417 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
418 __builtin_return_address(0), false);
420 EXPORT_SYMBOL(ioremap_wt);
422 void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size)
424 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
425 __builtin_return_address(0), true);
427 EXPORT_SYMBOL(ioremap_encrypted);
429 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
431 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
432 __builtin_return_address(0), false);
434 EXPORT_SYMBOL(ioremap_cache);
436 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
437 unsigned long prot_val)
439 return __ioremap_caller(phys_addr, size,
440 pgprot2cachemode(__pgprot(prot_val)),
441 __builtin_return_address(0), false);
443 EXPORT_SYMBOL(ioremap_prot);
446 * iounmap - Free a IO remapping
447 * @addr: virtual address from ioremap_*
449 * Caller must ensure there is only one unmapping for the same pointer.
451 void iounmap(volatile void __iomem *addr)
453 struct vm_struct *p, *o;
455 if ((void __force *)addr <= high_memory)
459 * The PCI/ISA range special-casing was removed from __ioremap()
460 * so this check, in theory, can be removed. However, there are
461 * cases where iounmap() is called for addresses not obtained via
462 * ioremap() (vga16fb for example). Add a warning so that these
463 * cases can be caught and fixed.
465 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
466 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) {
467 WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n");
471 mmiotrace_iounmap(addr);
473 addr = (volatile void __iomem *)
474 (PAGE_MASK & (unsigned long __force)addr);
476 /* Use the vm area unlocked, assuming the caller
477 ensures there isn't another iounmap for the same address
478 in parallel. Reuse of the virtual address is prevented by
479 leaving it in the global lists until we're done with it.
480 cpa takes care of the direct mappings. */
481 p = find_vm_area((void __force *)addr);
484 printk(KERN_ERR "iounmap: bad address %p\n", addr);
489 kmsan_iounmap_page_range((unsigned long)addr,
490 (unsigned long)addr + get_vm_area_size(p));
491 memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p));
493 /* Finally remove it */
494 o = remove_vm_area((void __force *)addr);
495 BUG_ON(p != o || o == NULL);
498 EXPORT_SYMBOL(iounmap);
501 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
504 void *xlate_dev_mem_ptr(phys_addr_t phys)
506 unsigned long start = phys & PAGE_MASK;
507 unsigned long offset = phys & ~PAGE_MASK;
510 /* memremap() maps if RAM, otherwise falls back to ioremap() */
511 vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
513 /* Only add the offset on success and return NULL if memremap() failed */
520 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
522 memunmap((void *)((unsigned long)addr & PAGE_MASK));
525 #ifdef CONFIG_AMD_MEM_ENCRYPT
527 * Examine the physical address to determine if it is an area of memory
528 * that should be mapped decrypted. If the memory is not part of the
529 * kernel usable area it was accessed and created decrypted, so these
530 * areas should be mapped decrypted. And since the encryption key can
531 * change across reboots, persistent memory should also be mapped
534 * If SEV is active, that implies that BIOS/UEFI also ran encrypted so
535 * only persistent memory should be mapped decrypted.
537 static bool memremap_should_map_decrypted(resource_size_t phys_addr,
543 * Check if the address is part of a persistent memory region.
544 * This check covers areas added by E820, EFI and ACPI.
546 is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM,
547 IORES_DESC_PERSISTENT_MEMORY);
548 if (is_pmem != REGION_DISJOINT)
552 * Check if the non-volatile attribute is set for an EFI
555 if (efi_enabled(EFI_BOOT)) {
556 switch (efi_mem_type(phys_addr)) {
557 case EFI_RESERVED_TYPE:
558 if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV)
566 /* Check if the address is outside kernel usable area */
567 switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) {
568 case E820_TYPE_RESERVED:
571 case E820_TYPE_UNUSABLE:
572 /* For SEV, these areas are encrypted */
573 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
587 * Examine the physical address to determine if it is EFI data. Check
588 * it against the boot params structure and EFI tables and memory types.
590 static bool memremap_is_efi_data(resource_size_t phys_addr,
595 /* Check if the address is part of EFI boot/runtime data */
596 if (!efi_enabled(EFI_BOOT))
599 paddr = boot_params.efi_info.efi_memmap_hi;
601 paddr |= boot_params.efi_info.efi_memmap;
602 if (phys_addr == paddr)
605 paddr = boot_params.efi_info.efi_systab_hi;
607 paddr |= boot_params.efi_info.efi_systab;
608 if (phys_addr == paddr)
611 if (efi_is_table_address(phys_addr))
614 switch (efi_mem_type(phys_addr)) {
615 case EFI_BOOT_SERVICES_DATA:
616 case EFI_RUNTIME_SERVICES_DATA:
626 * Examine the physical address to determine if it is boot data by checking
627 * it against the boot params setup_data chain.
629 static bool memremap_is_setup_data(resource_size_t phys_addr,
632 struct setup_indirect *indirect;
633 struct setup_data *data;
634 u64 paddr, paddr_next;
636 paddr = boot_params.hdr.setup_data;
640 if (phys_addr == paddr)
643 data = memremap(paddr, sizeof(*data),
644 MEMREMAP_WB | MEMREMAP_DEC);
646 pr_warn("failed to memremap setup_data entry\n");
650 paddr_next = data->next;
653 if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
658 if (data->type == SETUP_INDIRECT) {
660 data = memremap(paddr, sizeof(*data) + len,
661 MEMREMAP_WB | MEMREMAP_DEC);
663 pr_warn("failed to memremap indirect setup_data\n");
667 indirect = (struct setup_indirect *)data->data;
669 if (indirect->type != SETUP_INDIRECT) {
670 paddr = indirect->addr;
677 if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
687 * Examine the physical address to determine if it is boot data by checking
688 * it against the boot params setup_data chain (early boot version).
690 static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
693 struct setup_indirect *indirect;
694 struct setup_data *data;
695 u64 paddr, paddr_next;
697 paddr = boot_params.hdr.setup_data;
699 unsigned int len, size;
701 if (phys_addr == paddr)
704 data = early_memremap_decrypted(paddr, sizeof(*data));
706 pr_warn("failed to early memremap setup_data entry\n");
710 size = sizeof(*data);
712 paddr_next = data->next;
715 if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
716 early_memunmap(data, sizeof(*data));
720 if (data->type == SETUP_INDIRECT) {
722 early_memunmap(data, sizeof(*data));
723 data = early_memremap_decrypted(paddr, size);
725 pr_warn("failed to early memremap indirect setup_data\n");
729 indirect = (struct setup_indirect *)data->data;
731 if (indirect->type != SETUP_INDIRECT) {
732 paddr = indirect->addr;
737 early_memunmap(data, size);
739 if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
749 * Architecture function to determine if RAM remap is allowed. By default, a
750 * RAM remap will map the data as encrypted. Determine if a RAM remap should
751 * not be done so that the data will be mapped decrypted.
753 bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
756 if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
759 if (flags & MEMREMAP_ENC)
762 if (flags & MEMREMAP_DEC)
765 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
766 if (memremap_is_setup_data(phys_addr, size) ||
767 memremap_is_efi_data(phys_addr, size))
771 return !memremap_should_map_decrypted(phys_addr, size);
775 * Architecture override of __weak function to adjust the protection attributes
776 * used when remapping memory. By default, early_memremap() will map the data
777 * as encrypted. Determine if an encrypted mapping should not be done and set
778 * the appropriate protection attributes.
780 pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
786 if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
789 encrypted_prot = true;
791 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
792 if (early_memremap_is_setup_data(phys_addr, size) ||
793 memremap_is_efi_data(phys_addr, size))
794 encrypted_prot = false;
797 if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size))
798 encrypted_prot = false;
800 return encrypted_prot ? pgprot_encrypted(prot)
801 : pgprot_decrypted(prot);
804 bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
806 return arch_memremap_can_ram_remap(phys_addr, size, 0);
809 /* Remap memory with encryption */
810 void __init *early_memremap_encrypted(resource_size_t phys_addr,
813 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC);
817 * Remap memory with encryption and write-protected - cannot be called
818 * before pat_init() is called
820 void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
823 if (!x86_has_pat_wp())
825 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP);
828 /* Remap memory without encryption */
829 void __init *early_memremap_decrypted(resource_size_t phys_addr,
832 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC);
836 * Remap memory without encryption and write-protected - cannot be called
837 * before pat_init() is called
839 void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
842 if (!x86_has_pat_wp())
844 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP);
846 #endif /* CONFIG_AMD_MEM_ENCRYPT */
848 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
850 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
852 /* Don't assume we're using swapper_pg_dir at this point */
853 pgd_t *base = __va(read_cr3_pa());
854 pgd_t *pgd = &base[pgd_index(addr)];
855 p4d_t *p4d = p4d_offset(pgd, addr);
856 pud_t *pud = pud_offset(p4d, addr);
857 pmd_t *pmd = pmd_offset(pud, addr);
862 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
864 return &bm_pte[pte_index(addr)];
867 bool __init is_early_ioremap_ptep(pte_t *ptep)
869 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
872 void __init early_ioremap_init(void)
877 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
879 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
882 early_ioremap_setup();
884 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
885 memset(bm_pte, 0, sizeof(bm_pte));
886 pmd_populate_kernel(&init_mm, pmd, bm_pte);
889 * The boot-ioremap range spans multiple pmds, for which
890 * we are not prepared:
892 #define __FIXADDR_TOP (-PAGE_SIZE)
893 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
894 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
896 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
898 printk(KERN_WARNING "pmd %p != %p\n",
899 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
900 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
901 fix_to_virt(FIX_BTMAP_BEGIN));
902 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
903 fix_to_virt(FIX_BTMAP_END));
905 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
906 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
911 void __init __early_set_fixmap(enum fixed_addresses idx,
912 phys_addr_t phys, pgprot_t flags)
914 unsigned long addr = __fix_to_virt(idx);
917 if (idx >= __end_of_fixed_addresses) {
921 pte = early_ioremap_pte(addr);
923 /* Sanitize 'prot' against any unsupported bits: */
924 pgprot_val(flags) &= __supported_pte_mask;
926 if (pgprot_val(flags))
927 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
929 pte_clear(&init_mm, addr, pte);
930 flush_tlb_one_kernel(addr);