1 // SPDX-License-Identifier: GPL-2.0-only
3 * Re-map IO memory to kernel address space so that we can access it.
4 * This is needed for high PCI addresses that aren't mapped in the
5 * 640k-1MB IO memory area on PC's
7 * (C) Copyright 1995 1996 Linus Torvalds
10 #include <linux/memblock.h>
11 #include <linux/init.h>
13 #include <linux/ioport.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
16 #include <linux/mmiotrace.h>
17 #include <linux/cc_platform.h>
18 #include <linux/efi.h>
19 #include <linux/pgtable.h>
20 #include <linux/kmsan.h>
22 #include <asm/set_memory.h>
23 #include <asm/e820/api.h>
25 #include <asm/fixmap.h>
26 #include <asm/tlbflush.h>
27 #include <asm/pgalloc.h>
28 #include <asm/memtype.h>
29 #include <asm/setup.h>
34 * Descriptor controlling ioremap() behavior.
41 * Fix up the linear direct mapping of the kernel to avoid cache attribute
44 int ioremap_change_attr(unsigned long vaddr, unsigned long size,
45 enum page_cache_mode pcm)
47 unsigned long nrpages = size >> PAGE_SHIFT;
51 case _PAGE_CACHE_MODE_UC:
53 err = _set_memory_uc(vaddr, nrpages);
55 case _PAGE_CACHE_MODE_WC:
56 err = _set_memory_wc(vaddr, nrpages);
58 case _PAGE_CACHE_MODE_WT:
59 err = _set_memory_wt(vaddr, nrpages);
61 case _PAGE_CACHE_MODE_WB:
62 err = _set_memory_wb(vaddr, nrpages);
69 /* Does the range (or a subset of) contain normal RAM? */
70 static unsigned int __ioremap_check_ram(struct resource *res)
72 unsigned long start_pfn, stop_pfn;
75 if ((res->flags & IORESOURCE_SYSTEM_RAM) != IORESOURCE_SYSTEM_RAM)
78 start_pfn = (res->start + PAGE_SIZE - 1) >> PAGE_SHIFT;
79 stop_pfn = (res->end + 1) >> PAGE_SHIFT;
80 if (stop_pfn > start_pfn) {
81 for (i = 0; i < (stop_pfn - start_pfn); ++i)
82 if (pfn_valid(start_pfn + i) &&
83 !PageReserved(pfn_to_page(start_pfn + i)))
84 return IORES_MAP_SYSTEM_RAM;
91 * In a SEV guest, NONE and RESERVED should not be mapped encrypted because
92 * there the whole memory is already encrypted.
94 static unsigned int __ioremap_check_encrypted(struct resource *res)
96 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
100 case IORES_DESC_NONE:
101 case IORES_DESC_RESERVED:
104 return IORES_MAP_ENCRYPTED;
111 * The EFI runtime services data area is not covered by walk_mem_res(), but must
112 * be mapped encrypted when SEV is active.
114 static void __ioremap_check_other(resource_size_t addr, struct ioremap_desc *desc)
116 if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
119 if (x86_platform.hyper.is_private_mmio(addr)) {
120 desc->flags |= IORES_MAP_ENCRYPTED;
124 if (!IS_ENABLED(CONFIG_EFI))
127 if (efi_mem_type(addr) == EFI_RUNTIME_SERVICES_DATA ||
128 (efi_mem_type(addr) == EFI_BOOT_SERVICES_DATA &&
129 efi_mem_attributes(addr) & EFI_MEMORY_RUNTIME))
130 desc->flags |= IORES_MAP_ENCRYPTED;
133 static int __ioremap_collect_map_flags(struct resource *res, void *arg)
135 struct ioremap_desc *desc = arg;
137 if (!(desc->flags & IORES_MAP_SYSTEM_RAM))
138 desc->flags |= __ioremap_check_ram(res);
140 if (!(desc->flags & IORES_MAP_ENCRYPTED))
141 desc->flags |= __ioremap_check_encrypted(res);
143 return ((desc->flags & (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED)) ==
144 (IORES_MAP_SYSTEM_RAM | IORES_MAP_ENCRYPTED));
148 * To avoid multiple resource walks, this function walks resources marked as
149 * IORESOURCE_MEM and IORESOURCE_BUSY and looking for system RAM and/or a
150 * resource described not as IORES_DESC_NONE (e.g. IORES_DESC_ACPI_TABLES).
152 * After that, deal with misc other ranges in __ioremap_check_other() which do
153 * not fall into the above category.
155 static void __ioremap_check_mem(resource_size_t addr, unsigned long size,
156 struct ioremap_desc *desc)
161 end = start + size - 1;
162 memset(desc, 0, sizeof(struct ioremap_desc));
164 walk_mem_res(start, end, desc, __ioremap_collect_map_flags);
166 __ioremap_check_other(addr, desc);
170 * Remap an arbitrary physical address space into the kernel virtual
171 * address space. It transparently creates kernel huge I/O mapping when
172 * the physical address is aligned by a huge page size (1GB or 2MB) and
173 * the requested size is at least the huge page size.
175 * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
176 * Therefore, the mapping code falls back to use a smaller page toward 4KB
177 * when a mapping range is covered by non-WB type of MTRRs.
179 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
180 * have to convert them into an offset in a page-aligned mapping, but the
181 * caller shouldn't need to know that small detail.
183 static void __iomem *
184 __ioremap_caller(resource_size_t phys_addr, unsigned long size,
185 enum page_cache_mode pcm, void *caller, bool encrypted)
187 unsigned long offset, vaddr;
188 resource_size_t last_addr;
189 const resource_size_t unaligned_phys_addr = phys_addr;
190 const unsigned long unaligned_size = size;
191 struct ioremap_desc io_desc;
192 struct vm_struct *area;
193 enum page_cache_mode new_pcm;
196 void __iomem *ret_addr;
198 /* Don't allow wraparound or zero size */
199 last_addr = phys_addr + size - 1;
200 if (!size || last_addr < phys_addr)
203 if (!phys_addr_valid(phys_addr)) {
204 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
205 (unsigned long long)phys_addr);
210 __ioremap_check_mem(phys_addr, size, &io_desc);
213 * Don't allow anybody to remap normal RAM that we're using..
215 if (io_desc.flags & IORES_MAP_SYSTEM_RAM) {
216 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
217 &phys_addr, &last_addr);
222 * Mappings have to be page-aligned
224 offset = phys_addr & ~PAGE_MASK;
225 phys_addr &= PAGE_MASK;
226 size = PAGE_ALIGN(last_addr+1) - phys_addr;
229 * Mask out any bits not part of the actual physical
230 * address, like memory encryption bits.
232 phys_addr &= PHYSICAL_PAGE_MASK;
234 retval = memtype_reserve(phys_addr, (u64)phys_addr + size,
237 printk(KERN_ERR "ioremap memtype_reserve failed %d\n", retval);
241 if (pcm != new_pcm) {
242 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
244 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
245 (unsigned long long)phys_addr,
246 (unsigned long long)(phys_addr + size),
248 goto err_free_memtype;
254 * If the page being mapped is in memory and SEV is active then
255 * make sure the memory encryption attribute is enabled in the
257 * In TDX guests, memory is marked private by default. If encryption
258 * is not requested (using encrypted), explicitly set decrypt
259 * attribute in all IOREMAPPED memory.
261 prot = PAGE_KERNEL_IO;
262 if ((io_desc.flags & IORES_MAP_ENCRYPTED) || encrypted)
263 prot = pgprot_encrypted(prot);
265 prot = pgprot_decrypted(prot);
268 case _PAGE_CACHE_MODE_UC:
270 prot = __pgprot(pgprot_val(prot) |
271 cachemode2protval(_PAGE_CACHE_MODE_UC));
273 case _PAGE_CACHE_MODE_UC_MINUS:
274 prot = __pgprot(pgprot_val(prot) |
275 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
277 case _PAGE_CACHE_MODE_WC:
278 prot = __pgprot(pgprot_val(prot) |
279 cachemode2protval(_PAGE_CACHE_MODE_WC));
281 case _PAGE_CACHE_MODE_WT:
282 prot = __pgprot(pgprot_val(prot) |
283 cachemode2protval(_PAGE_CACHE_MODE_WT));
285 case _PAGE_CACHE_MODE_WB:
292 area = get_vm_area_caller(size, VM_IOREMAP, caller);
294 goto err_free_memtype;
295 area->phys_addr = phys_addr;
296 vaddr = (unsigned long) area->addr;
298 if (memtype_kernel_map_sync(phys_addr, size, pcm))
301 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
304 ret_addr = (void __iomem *) (vaddr + offset);
305 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
308 * Check if the request spans more than any BAR in the iomem resource
311 if (iomem_map_sanity_check(unaligned_phys_addr, unaligned_size))
312 pr_warn("caller %pS mapping multiple BARs\n", caller);
318 memtype_free(phys_addr, phys_addr + size);
323 * ioremap - map bus memory into CPU space
324 * @phys_addr: bus address of the memory
325 * @size: size of the resource to map
327 * ioremap performs a platform specific sequence of operations to
328 * make bus memory CPU accessible via the readb/readw/readl/writeb/
329 * writew/writel functions and the other mmio helpers. The returned
330 * address is not guaranteed to be usable directly as a virtual
333 * This version of ioremap ensures that the memory is marked uncachable
334 * on the CPU as well as honouring existing caching rules from things like
335 * the PCI bus. Note that there are other caches and buffers on many
336 * busses. In particular driver authors should read up on PCI writes
338 * It's useful if some control registers are in such an area and
339 * write combining or read caching is not desirable:
341 * Must be freed with iounmap.
343 void __iomem *ioremap(resource_size_t phys_addr, unsigned long size)
346 * Ideally, this should be:
347 * pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
349 * Till we fix all X drivers to use ioremap_wc(), we will use
350 * UC MINUS. Drivers that are certain they need or can already
351 * be converted over to strong UC can use ioremap_uc().
353 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
355 return __ioremap_caller(phys_addr, size, pcm,
356 __builtin_return_address(0), false);
358 EXPORT_SYMBOL(ioremap);
361 * ioremap_uc - map bus memory into CPU space as strongly uncachable
362 * @phys_addr: bus address of the memory
363 * @size: size of the resource to map
365 * ioremap_uc performs a platform specific sequence of operations to
366 * make bus memory CPU accessible via the readb/readw/readl/writeb/
367 * writew/writel functions and the other mmio helpers. The returned
368 * address is not guaranteed to be usable directly as a virtual
371 * This version of ioremap ensures that the memory is marked with a strong
372 * preference as completely uncachable on the CPU when possible. For non-PAT
373 * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
374 * systems this will set the PAT entry for the pages as strong UC. This call
375 * will honor existing caching rules from things like the PCI bus. Note that
376 * there are other caches and buffers on many busses. In particular driver
377 * authors should read up on PCI writes.
379 * It's useful if some control registers are in such an area and
380 * write combining or read caching is not desirable:
382 * Must be freed with iounmap.
384 void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
386 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
388 return __ioremap_caller(phys_addr, size, pcm,
389 __builtin_return_address(0), false);
391 EXPORT_SYMBOL_GPL(ioremap_uc);
394 * ioremap_wc - map memory into CPU space write combined
395 * @phys_addr: bus address of the memory
396 * @size: size of the resource to map
398 * This version of ioremap ensures that the memory is marked write combining.
399 * Write combining allows faster writes to some hardware devices.
401 * Must be freed with iounmap.
403 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
405 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
406 __builtin_return_address(0), false);
408 EXPORT_SYMBOL(ioremap_wc);
411 * ioremap_wt - map memory into CPU space write through
412 * @phys_addr: bus address of the memory
413 * @size: size of the resource to map
415 * This version of ioremap ensures that the memory is marked write through.
416 * Write through stores data into memory while keeping the cache up-to-date.
418 * Must be freed with iounmap.
420 void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
422 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
423 __builtin_return_address(0), false);
425 EXPORT_SYMBOL(ioremap_wt);
427 void __iomem *ioremap_encrypted(resource_size_t phys_addr, unsigned long size)
429 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
430 __builtin_return_address(0), true);
432 EXPORT_SYMBOL(ioremap_encrypted);
434 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
436 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
437 __builtin_return_address(0), false);
439 EXPORT_SYMBOL(ioremap_cache);
441 void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
442 unsigned long prot_val)
444 return __ioremap_caller(phys_addr, size,
445 pgprot2cachemode(__pgprot(prot_val)),
446 __builtin_return_address(0), false);
448 EXPORT_SYMBOL(ioremap_prot);
451 * iounmap - Free a IO remapping
452 * @addr: virtual address from ioremap_*
454 * Caller must ensure there is only one unmapping for the same pointer.
456 void iounmap(volatile void __iomem *addr)
458 struct vm_struct *p, *o;
460 if ((void __force *)addr <= high_memory)
464 * The PCI/ISA range special-casing was removed from __ioremap()
465 * so this check, in theory, can be removed. However, there are
466 * cases where iounmap() is called for addresses not obtained via
467 * ioremap() (vga16fb for example). Add a warning so that these
468 * cases can be caught and fixed.
470 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
471 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) {
472 WARN(1, "iounmap() called for ISA range not obtained using ioremap()\n");
476 mmiotrace_iounmap(addr);
478 addr = (volatile void __iomem *)
479 (PAGE_MASK & (unsigned long __force)addr);
481 /* Use the vm area unlocked, assuming the caller
482 ensures there isn't another iounmap for the same address
483 in parallel. Reuse of the virtual address is prevented by
484 leaving it in the global lists until we're done with it.
485 cpa takes care of the direct mappings. */
486 p = find_vm_area((void __force *)addr);
489 printk(KERN_ERR "iounmap: bad address %p\n", addr);
494 kmsan_iounmap_page_range((unsigned long)addr,
495 (unsigned long)addr + get_vm_area_size(p));
496 memtype_free(p->phys_addr, p->phys_addr + get_vm_area_size(p));
498 /* Finally remove it */
499 o = remove_vm_area((void __force *)addr);
500 BUG_ON(p != o || o == NULL);
503 EXPORT_SYMBOL(iounmap);
506 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
509 void *xlate_dev_mem_ptr(phys_addr_t phys)
511 unsigned long start = phys & PAGE_MASK;
512 unsigned long offset = phys & ~PAGE_MASK;
515 /* memremap() maps if RAM, otherwise falls back to ioremap() */
516 vaddr = memremap(start, PAGE_SIZE, MEMREMAP_WB);
518 /* Only add the offset on success and return NULL if memremap() failed */
525 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
527 memunmap((void *)((unsigned long)addr & PAGE_MASK));
530 #ifdef CONFIG_AMD_MEM_ENCRYPT
532 * Examine the physical address to determine if it is an area of memory
533 * that should be mapped decrypted. If the memory is not part of the
534 * kernel usable area it was accessed and created decrypted, so these
535 * areas should be mapped decrypted. And since the encryption key can
536 * change across reboots, persistent memory should also be mapped
539 * If SEV is active, that implies that BIOS/UEFI also ran encrypted so
540 * only persistent memory should be mapped decrypted.
542 static bool memremap_should_map_decrypted(resource_size_t phys_addr,
548 * Check if the address is part of a persistent memory region.
549 * This check covers areas added by E820, EFI and ACPI.
551 is_pmem = region_intersects(phys_addr, size, IORESOURCE_MEM,
552 IORES_DESC_PERSISTENT_MEMORY);
553 if (is_pmem != REGION_DISJOINT)
557 * Check if the non-volatile attribute is set for an EFI
560 if (efi_enabled(EFI_BOOT)) {
561 switch (efi_mem_type(phys_addr)) {
562 case EFI_RESERVED_TYPE:
563 if (efi_mem_attributes(phys_addr) & EFI_MEMORY_NV)
571 /* Check if the address is outside kernel usable area */
572 switch (e820__get_entry_type(phys_addr, phys_addr + size - 1)) {
573 case E820_TYPE_RESERVED:
576 case E820_TYPE_UNUSABLE:
577 /* For SEV, these areas are encrypted */
578 if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
592 * Examine the physical address to determine if it is EFI data. Check
593 * it against the boot params structure and EFI tables and memory types.
595 static bool memremap_is_efi_data(resource_size_t phys_addr,
600 /* Check if the address is part of EFI boot/runtime data */
601 if (!efi_enabled(EFI_BOOT))
604 paddr = boot_params.efi_info.efi_memmap_hi;
606 paddr |= boot_params.efi_info.efi_memmap;
607 if (phys_addr == paddr)
610 paddr = boot_params.efi_info.efi_systab_hi;
612 paddr |= boot_params.efi_info.efi_systab;
613 if (phys_addr == paddr)
616 if (efi_is_table_address(phys_addr))
619 switch (efi_mem_type(phys_addr)) {
620 case EFI_BOOT_SERVICES_DATA:
621 case EFI_RUNTIME_SERVICES_DATA:
631 * Examine the physical address to determine if it is boot data by checking
632 * it against the boot params setup_data chain.
634 static bool memremap_is_setup_data(resource_size_t phys_addr,
637 struct setup_indirect *indirect;
638 struct setup_data *data;
639 u64 paddr, paddr_next;
641 paddr = boot_params.hdr.setup_data;
645 if (phys_addr == paddr)
648 data = memremap(paddr, sizeof(*data),
649 MEMREMAP_WB | MEMREMAP_DEC);
651 pr_warn("failed to memremap setup_data entry\n");
655 paddr_next = data->next;
658 if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
663 if (data->type == SETUP_INDIRECT) {
665 data = memremap(paddr, sizeof(*data) + len,
666 MEMREMAP_WB | MEMREMAP_DEC);
668 pr_warn("failed to memremap indirect setup_data\n");
672 indirect = (struct setup_indirect *)data->data;
674 if (indirect->type != SETUP_INDIRECT) {
675 paddr = indirect->addr;
682 if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
692 * Examine the physical address to determine if it is boot data by checking
693 * it against the boot params setup_data chain (early boot version).
695 static bool __init early_memremap_is_setup_data(resource_size_t phys_addr,
698 struct setup_indirect *indirect;
699 struct setup_data *data;
700 u64 paddr, paddr_next;
702 paddr = boot_params.hdr.setup_data;
704 unsigned int len, size;
706 if (phys_addr == paddr)
709 data = early_memremap_decrypted(paddr, sizeof(*data));
711 pr_warn("failed to early memremap setup_data entry\n");
715 size = sizeof(*data);
717 paddr_next = data->next;
720 if ((phys_addr > paddr) && (phys_addr < (paddr + len))) {
721 early_memunmap(data, sizeof(*data));
725 if (data->type == SETUP_INDIRECT) {
727 early_memunmap(data, sizeof(*data));
728 data = early_memremap_decrypted(paddr, size);
730 pr_warn("failed to early memremap indirect setup_data\n");
734 indirect = (struct setup_indirect *)data->data;
736 if (indirect->type != SETUP_INDIRECT) {
737 paddr = indirect->addr;
742 early_memunmap(data, size);
744 if ((phys_addr > paddr) && (phys_addr < (paddr + len)))
754 * Architecture function to determine if RAM remap is allowed. By default, a
755 * RAM remap will map the data as encrypted. Determine if a RAM remap should
756 * not be done so that the data will be mapped decrypted.
758 bool arch_memremap_can_ram_remap(resource_size_t phys_addr, unsigned long size,
761 if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
764 if (flags & MEMREMAP_ENC)
767 if (flags & MEMREMAP_DEC)
770 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
771 if (memremap_is_setup_data(phys_addr, size) ||
772 memremap_is_efi_data(phys_addr, size))
776 return !memremap_should_map_decrypted(phys_addr, size);
780 * Architecture override of __weak function to adjust the protection attributes
781 * used when remapping memory. By default, early_memremap() will map the data
782 * as encrypted. Determine if an encrypted mapping should not be done and set
783 * the appropriate protection attributes.
785 pgprot_t __init early_memremap_pgprot_adjust(resource_size_t phys_addr,
791 if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
794 encrypted_prot = true;
796 if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
797 if (early_memremap_is_setup_data(phys_addr, size) ||
798 memremap_is_efi_data(phys_addr, size))
799 encrypted_prot = false;
802 if (encrypted_prot && memremap_should_map_decrypted(phys_addr, size))
803 encrypted_prot = false;
805 return encrypted_prot ? pgprot_encrypted(prot)
806 : pgprot_decrypted(prot);
809 bool phys_mem_access_encrypted(unsigned long phys_addr, unsigned long size)
811 return arch_memremap_can_ram_remap(phys_addr, size, 0);
814 /* Remap memory with encryption */
815 void __init *early_memremap_encrypted(resource_size_t phys_addr,
818 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC);
822 * Remap memory with encryption and write-protected - cannot be called
823 * before pat_init() is called
825 void __init *early_memremap_encrypted_wp(resource_size_t phys_addr,
828 if (!x86_has_pat_wp())
830 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_ENC_WP);
833 /* Remap memory without encryption */
834 void __init *early_memremap_decrypted(resource_size_t phys_addr,
837 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC);
841 * Remap memory without encryption and write-protected - cannot be called
842 * before pat_init() is called
844 void __init *early_memremap_decrypted_wp(resource_size_t phys_addr,
847 if (!x86_has_pat_wp())
849 return early_memremap_prot(phys_addr, size, __PAGE_KERNEL_NOENC_WP);
851 #endif /* CONFIG_AMD_MEM_ENCRYPT */
853 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
855 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
857 /* Don't assume we're using swapper_pg_dir at this point */
858 pgd_t *base = __va(read_cr3_pa());
859 pgd_t *pgd = &base[pgd_index(addr)];
860 p4d_t *p4d = p4d_offset(pgd, addr);
861 pud_t *pud = pud_offset(p4d, addr);
862 pmd_t *pmd = pmd_offset(pud, addr);
867 static inline pte_t * __init early_ioremap_pte(unsigned long addr)
869 return &bm_pte[pte_index(addr)];
872 bool __init is_early_ioremap_ptep(pte_t *ptep)
874 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
877 void __init early_ioremap_init(void)
882 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
884 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
887 early_ioremap_setup();
889 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
890 memset(bm_pte, 0, sizeof(bm_pte));
891 pmd_populate_kernel(&init_mm, pmd, bm_pte);
894 * The boot-ioremap range spans multiple pmds, for which
895 * we are not prepared:
897 #define __FIXADDR_TOP (-PAGE_SIZE)
898 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
899 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
901 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
903 printk(KERN_WARNING "pmd %p != %p\n",
904 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
905 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
906 fix_to_virt(FIX_BTMAP_BEGIN));
907 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
908 fix_to_virt(FIX_BTMAP_END));
910 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
911 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
916 void __init __early_set_fixmap(enum fixed_addresses idx,
917 phys_addr_t phys, pgprot_t flags)
919 unsigned long addr = __fix_to_virt(idx);
922 if (idx >= __end_of_fixed_addresses) {
926 pte = early_ioremap_pte(addr);
928 /* Sanitize 'prot' against any unsupported bits: */
929 pgprot_val(flags) &= __supported_pte_mask;
931 if (pgprot_val(flags))
932 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
934 pte_clear(&init_mm, addr, pte);
935 flush_tlb_one_kernel(addr);