1 // SPDX-License-Identifier: GPL-2.0
3 * Common EFI (Extensible Firmware Interface) support functions
4 * Based on Extensible Firmware Interface Specification version 1.0
6 * Copyright (C) 1999 VA Linux Systems
7 * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
8 * Copyright (C) 1999-2002 Hewlett-Packard Co.
9 * David Mosberger-Tang <davidm@hpl.hp.com>
10 * Stephane Eranian <eranian@hpl.hp.com>
11 * Copyright (C) 2005-2008 Intel Co.
12 * Fenghua Yu <fenghua.yu@intel.com>
13 * Bibo Mao <bibo.mao@intel.com>
14 * Chandramouli Narayanan <mouli@linux.intel.com>
15 * Huang Ying <ying.huang@intel.com>
16 * Copyright (C) 2013 SuSE Labs
17 * Borislav Petkov <bp@suse.de> - runtime services VA mapping
19 * Copied from efi_32.c to eliminate the duplicated code between EFI
20 * 32/64 support code. --ying 2007-10-26
22 * All EFI Runtime Services are not implemented yet as EFI only
23 * supports physical mode addressing on SoftSDV. This is to be fixed
24 * in a future version. --drummond 1999-07-20
26 * Implemented EFI runtime services and virtual mode calls. --davidm
28 * Goutham Rao: <goutham.rao@intel.com>
29 * Skip non-WB memory and ignore empty memory ranges.
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/efi.h>
37 #include <linux/efi-bgrt.h>
38 #include <linux/export.h>
39 #include <linux/memblock.h>
40 #include <linux/slab.h>
41 #include <linux/spinlock.h>
42 #include <linux/uaccess.h>
43 #include <linux/time.h>
45 #include <linux/reboot.h>
46 #include <linux/bcd.h>
48 #include <asm/setup.h>
50 #include <asm/e820/api.h>
52 #include <asm/set_memory.h>
53 #include <asm/tlbflush.h>
54 #include <asm/x86_init.h>
55 #include <asm/uv/uv.h>
57 static struct efi efi_phys __initdata;
58 static efi_system_table_t efi_systab __initdata;
60 static efi_config_table_type_t arch_tables[] __initdata = {
62 {UV_SYSTEM_TABLE_GUID, "UVsystab", &uv_systab_phys},
64 {NULL_GUID, NULL, NULL},
67 static const unsigned long * const efi_tables[] = {
83 &efi.properties_table,
85 #ifdef CONFIG_EFI_RCI2_TABLE
92 u64 efi_setup; /* efi setup_data physical address */
94 static int add_efi_memmap __initdata;
95 static int __init setup_add_efi_memmap(char *arg)
100 early_param("add_efi_memmap", setup_add_efi_memmap);
102 static efi_status_t __init phys_efi_set_virtual_address_map(
103 unsigned long memory_map_size,
104 unsigned long descriptor_size,
105 u32 descriptor_version,
106 efi_memory_desc_t *virtual_map)
112 save_pgd = efi_call_phys_prolog();
116 /* Disable interrupts around EFI calls: */
117 local_irq_save(flags);
118 status = efi_call_phys(efi_phys.set_virtual_address_map,
119 memory_map_size, descriptor_size,
120 descriptor_version, virtual_map);
121 local_irq_restore(flags);
123 efi_call_phys_epilog(save_pgd);
128 void __init efi_find_mirror(void)
130 efi_memory_desc_t *md;
131 u64 mirror_size = 0, total_size = 0;
133 for_each_efi_memory_desc(md) {
134 unsigned long long start = md->phys_addr;
135 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
138 if (md->attribute & EFI_MEMORY_MORE_RELIABLE) {
139 memblock_mark_mirror(start, size);
144 pr_info("Memory: %lldM/%lldM mirrored memory\n",
145 mirror_size>>20, total_size>>20);
149 * Tell the kernel about the EFI memory map. This might include
150 * more than the max 128 entries that can fit in the e820 legacy
151 * (zeropage) memory map.
154 static void __init do_add_efi_memmap(void)
156 efi_memory_desc_t *md;
158 for_each_efi_memory_desc(md) {
159 unsigned long long start = md->phys_addr;
160 unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
164 case EFI_LOADER_CODE:
165 case EFI_LOADER_DATA:
166 case EFI_BOOT_SERVICES_CODE:
167 case EFI_BOOT_SERVICES_DATA:
168 case EFI_CONVENTIONAL_MEMORY:
169 if (md->attribute & EFI_MEMORY_WB)
170 e820_type = E820_TYPE_RAM;
172 e820_type = E820_TYPE_RESERVED;
174 case EFI_ACPI_RECLAIM_MEMORY:
175 e820_type = E820_TYPE_ACPI;
177 case EFI_ACPI_MEMORY_NVS:
178 e820_type = E820_TYPE_NVS;
180 case EFI_UNUSABLE_MEMORY:
181 e820_type = E820_TYPE_UNUSABLE;
183 case EFI_PERSISTENT_MEMORY:
184 e820_type = E820_TYPE_PMEM;
188 * EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
189 * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO
190 * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE
192 e820_type = E820_TYPE_RESERVED;
195 e820__range_add(start, size, e820_type);
197 e820__update_table(e820_table);
200 int __init efi_memblock_x86_reserve_range(void)
202 struct efi_info *e = &boot_params.efi_info;
203 struct efi_memory_map_data data;
207 if (efi_enabled(EFI_PARAVIRT))
211 /* Can't handle data above 4GB at this time */
212 if (e->efi_memmap_hi) {
213 pr_err("Memory map is above 4GB, disabling EFI.\n");
216 pmap = e->efi_memmap;
218 pmap = (e->efi_memmap | ((__u64)e->efi_memmap_hi << 32));
220 data.phys_map = pmap;
221 data.size = e->efi_memmap_size;
222 data.desc_size = e->efi_memdesc_size;
223 data.desc_version = e->efi_memdesc_version;
225 rv = efi_memmap_init_early(&data);
232 WARN(efi.memmap.desc_version != 1,
233 "Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
234 efi.memmap.desc_version);
236 memblock_reserve(pmap, efi.memmap.nr_map * efi.memmap.desc_size);
241 #define OVERFLOW_ADDR_SHIFT (64 - EFI_PAGE_SHIFT)
242 #define OVERFLOW_ADDR_MASK (U64_MAX << OVERFLOW_ADDR_SHIFT)
243 #define U64_HIGH_BIT (~(U64_MAX >> 1))
245 static bool __init efi_memmap_entry_valid(const efi_memory_desc_t *md, int i)
247 u64 end = (md->num_pages << EFI_PAGE_SHIFT) + md->phys_addr - 1;
251 if (md->num_pages == 0) {
253 } else if (md->num_pages > EFI_PAGES_MAX ||
254 EFI_PAGES_MAX - md->num_pages <
255 (md->phys_addr >> EFI_PAGE_SHIFT)) {
256 end_hi = (md->num_pages & OVERFLOW_ADDR_MASK)
257 >> OVERFLOW_ADDR_SHIFT;
259 if ((md->phys_addr & U64_HIGH_BIT) && !(end & U64_HIGH_BIT))
265 pr_warn_once(FW_BUG "Invalid EFI memory map entries:\n");
268 pr_warn("mem%02u: %s range=[0x%016llx-0x%llx%016llx] (invalid)\n",
269 i, efi_md_typeattr_format(buf, sizeof(buf), md),
270 md->phys_addr, end_hi, end);
272 pr_warn("mem%02u: %s range=[0x%016llx-0x%016llx] (invalid)\n",
273 i, efi_md_typeattr_format(buf, sizeof(buf), md),
279 static void __init efi_clean_memmap(void)
281 efi_memory_desc_t *out = efi.memmap.map;
282 const efi_memory_desc_t *in = out;
283 const efi_memory_desc_t *end = efi.memmap.map_end;
286 for (i = n_removal = 0; in < end; i++) {
287 if (efi_memmap_entry_valid(in, i)) {
289 memcpy(out, in, efi.memmap.desc_size);
290 out = (void *)out + efi.memmap.desc_size;
294 in = (void *)in + efi.memmap.desc_size;
298 u64 size = efi.memmap.nr_map - n_removal;
300 pr_warn("Removing %d invalid memory map entries.\n", n_removal);
301 efi_memmap_install(efi.memmap.phys_map, size);
305 void __init efi_print_memmap(void)
307 efi_memory_desc_t *md;
310 for_each_efi_memory_desc(md) {
313 pr_info("mem%02u: %s range=[0x%016llx-0x%016llx] (%lluMB)\n",
314 i++, efi_md_typeattr_format(buf, sizeof(buf), md),
316 md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1,
317 (md->num_pages >> (20 - EFI_PAGE_SHIFT)));
321 static int __init efi_systab_init(void *phys)
323 if (efi_enabled(EFI_64BIT)) {
324 efi_system_table_64_t *systab64;
325 struct efi_setup_data *data = NULL;
329 data = early_memremap(efi_setup, sizeof(*data));
333 systab64 = early_memremap((unsigned long)phys,
335 if (systab64 == NULL) {
336 pr_err("Couldn't map the system table!\n");
338 early_memunmap(data, sizeof(*data));
342 efi_systab.hdr = systab64->hdr;
343 efi_systab.fw_vendor = data ? (unsigned long)data->fw_vendor :
345 tmp |= data ? data->fw_vendor : systab64->fw_vendor;
346 efi_systab.fw_revision = systab64->fw_revision;
347 efi_systab.con_in_handle = systab64->con_in_handle;
348 tmp |= systab64->con_in_handle;
349 efi_systab.con_in = systab64->con_in;
350 tmp |= systab64->con_in;
351 efi_systab.con_out_handle = systab64->con_out_handle;
352 tmp |= systab64->con_out_handle;
353 efi_systab.con_out = systab64->con_out;
354 tmp |= systab64->con_out;
355 efi_systab.stderr_handle = systab64->stderr_handle;
356 tmp |= systab64->stderr_handle;
357 efi_systab.stderr = systab64->stderr;
358 tmp |= systab64->stderr;
359 efi_systab.runtime = data ?
360 (void *)(unsigned long)data->runtime :
361 (void *)(unsigned long)systab64->runtime;
362 tmp |= data ? data->runtime : systab64->runtime;
363 efi_systab.boottime = (void *)(unsigned long)systab64->boottime;
364 tmp |= systab64->boottime;
365 efi_systab.nr_tables = systab64->nr_tables;
366 efi_systab.tables = data ? (unsigned long)data->tables :
368 tmp |= data ? data->tables : systab64->tables;
370 early_memunmap(systab64, sizeof(*systab64));
372 early_memunmap(data, sizeof(*data));
375 pr_err("EFI data located above 4GB, disabling EFI.\n");
380 efi_system_table_32_t *systab32;
382 systab32 = early_memremap((unsigned long)phys,
384 if (systab32 == NULL) {
385 pr_err("Couldn't map the system table!\n");
389 efi_systab.hdr = systab32->hdr;
390 efi_systab.fw_vendor = systab32->fw_vendor;
391 efi_systab.fw_revision = systab32->fw_revision;
392 efi_systab.con_in_handle = systab32->con_in_handle;
393 efi_systab.con_in = systab32->con_in;
394 efi_systab.con_out_handle = systab32->con_out_handle;
395 efi_systab.con_out = systab32->con_out;
396 efi_systab.stderr_handle = systab32->stderr_handle;
397 efi_systab.stderr = systab32->stderr;
398 efi_systab.runtime = (void *)(unsigned long)systab32->runtime;
399 efi_systab.boottime = (void *)(unsigned long)systab32->boottime;
400 efi_systab.nr_tables = systab32->nr_tables;
401 efi_systab.tables = systab32->tables;
403 early_memunmap(systab32, sizeof(*systab32));
406 efi.systab = &efi_systab;
409 * Verify the EFI Table
411 if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
412 pr_err("System table signature incorrect!\n");
415 if ((efi.systab->hdr.revision >> 16) == 0)
416 pr_err("Warning: System table version %d.%02d, expected 1.00 or greater!\n",
417 efi.systab->hdr.revision >> 16,
418 efi.systab->hdr.revision & 0xffff);
423 static int __init efi_runtime_init32(void)
425 efi_runtime_services_32_t *runtime;
427 runtime = early_memremap((unsigned long)efi.systab->runtime,
428 sizeof(efi_runtime_services_32_t));
430 pr_err("Could not map the runtime service table!\n");
435 * We will only need *early* access to the SetVirtualAddressMap
436 * EFI runtime service. All other runtime services will be called
437 * via the virtual mapping.
439 efi_phys.set_virtual_address_map =
440 (efi_set_virtual_address_map_t *)
441 (unsigned long)runtime->set_virtual_address_map;
442 early_memunmap(runtime, sizeof(efi_runtime_services_32_t));
447 static int __init efi_runtime_init64(void)
449 efi_runtime_services_64_t *runtime;
451 runtime = early_memremap((unsigned long)efi.systab->runtime,
452 sizeof(efi_runtime_services_64_t));
454 pr_err("Could not map the runtime service table!\n");
459 * We will only need *early* access to the SetVirtualAddressMap
460 * EFI runtime service. All other runtime services will be called
461 * via the virtual mapping.
463 efi_phys.set_virtual_address_map =
464 (efi_set_virtual_address_map_t *)
465 (unsigned long)runtime->set_virtual_address_map;
466 early_memunmap(runtime, sizeof(efi_runtime_services_64_t));
471 static int __init efi_runtime_init(void)
476 * Check out the runtime services table. We need to map
477 * the runtime services table so that we can grab the physical
478 * address of several of the EFI runtime functions, needed to
479 * set the firmware into virtual mode.
481 * When EFI_PARAVIRT is in force then we could not map runtime
482 * service memory region because we do not have direct access to it.
483 * However, runtime services are available through proxy functions
484 * (e.g. in case of Xen dom0 EFI implementation they call special
485 * hypercall which executes relevant EFI functions) and that is why
486 * they are always enabled.
489 if (!efi_enabled(EFI_PARAVIRT)) {
490 if (efi_enabled(EFI_64BIT))
491 rv = efi_runtime_init64();
493 rv = efi_runtime_init32();
499 set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
504 void __init efi_init(void)
507 char vendor[100] = "unknown";
511 if (boot_params.efi_info.efi_systab_hi ||
512 boot_params.efi_info.efi_memmap_hi) {
513 pr_info("Table located above 4GB, disabling EFI.\n");
516 efi_phys.systab = (efi_system_table_t *)boot_params.efi_info.efi_systab;
518 efi_phys.systab = (efi_system_table_t *)
519 (boot_params.efi_info.efi_systab |
520 ((__u64)boot_params.efi_info.efi_systab_hi<<32));
523 if (efi_systab_init(efi_phys.systab))
526 efi.config_table = (unsigned long)efi.systab->tables;
527 efi.fw_vendor = (unsigned long)efi.systab->fw_vendor;
528 efi.runtime = (unsigned long)efi.systab->runtime;
531 * Show what we know for posterity
533 c16 = early_memremap_ro(efi.systab->fw_vendor,
534 sizeof(vendor) * sizeof(efi_char16_t));
536 for (i = 0; i < sizeof(vendor) - 1 && c16[i]; ++i)
539 early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
541 pr_err("Could not map the firmware vendor!\n");
544 pr_info("EFI v%u.%.02u by %s\n",
545 efi.systab->hdr.revision >> 16,
546 efi.systab->hdr.revision & 0xffff, vendor);
548 if (efi_reuse_config(efi.systab->tables, efi.systab->nr_tables))
551 if (efi_config_init(arch_tables))
555 * Note: We currently don't support runtime services on an EFI
556 * that doesn't match the kernel 32/64-bit mode.
559 if (!efi_runtime_supported())
560 pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
562 if (efi_runtime_disabled() || efi_runtime_init()) {
570 if (efi_enabled(EFI_DBG))
574 void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
578 addr = md->virt_addr;
579 npages = md->num_pages;
581 memrange_efi_to_native(&addr, &npages);
584 set_memory_x(addr, npages);
586 set_memory_nx(addr, npages);
589 void __init runtime_code_page_mkexec(void)
591 efi_memory_desc_t *md;
593 /* Make EFI runtime service code area executable */
594 for_each_efi_memory_desc(md) {
595 if (md->type != EFI_RUNTIME_SERVICES_CODE)
598 efi_set_executable(md, true);
602 void __init efi_memory_uc(u64 addr, unsigned long size)
604 unsigned long page_shift = 1UL << EFI_PAGE_SHIFT;
607 npages = round_up(size, page_shift) / page_shift;
608 memrange_efi_to_native(&addr, &npages);
609 set_memory_uc(addr, npages);
612 void __init old_map_region(efi_memory_desc_t *md)
614 u64 start_pfn, end_pfn, end;
618 start_pfn = PFN_DOWN(md->phys_addr);
619 size = md->num_pages << PAGE_SHIFT;
620 end = md->phys_addr + size;
621 end_pfn = PFN_UP(end);
623 if (pfn_range_is_mapped(start_pfn, end_pfn)) {
624 va = __va(md->phys_addr);
626 if (!(md->attribute & EFI_MEMORY_WB))
627 efi_memory_uc((u64)(unsigned long)va, size);
629 va = efi_ioremap(md->phys_addr, size,
630 md->type, md->attribute);
632 md->virt_addr = (u64) (unsigned long) va;
634 pr_err("ioremap of 0x%llX failed!\n",
635 (unsigned long long)md->phys_addr);
638 /* Merge contiguous regions of the same type and attribute */
639 static void __init efi_merge_regions(void)
641 efi_memory_desc_t *md, *prev_md = NULL;
643 for_each_efi_memory_desc(md) {
651 if (prev_md->type != md->type ||
652 prev_md->attribute != md->attribute) {
657 prev_size = prev_md->num_pages << EFI_PAGE_SHIFT;
659 if (md->phys_addr == (prev_md->phys_addr + prev_size)) {
660 prev_md->num_pages += md->num_pages;
661 md->type = EFI_RESERVED_TYPE;
669 static void __init get_systab_virt_addr(efi_memory_desc_t *md)
674 size = md->num_pages << EFI_PAGE_SHIFT;
675 end = md->phys_addr + size;
676 systab = (u64)(unsigned long)efi_phys.systab;
677 if (md->phys_addr <= systab && systab < end) {
678 systab += md->virt_addr - md->phys_addr;
679 efi.systab = (efi_system_table_t *)(unsigned long)systab;
683 static void *realloc_pages(void *old_memmap, int old_shift)
687 ret = (void *)__get_free_pages(GFP_KERNEL, old_shift + 1);
692 * A first-time allocation doesn't have anything to copy.
697 memcpy(ret, old_memmap, PAGE_SIZE << old_shift);
700 free_pages((unsigned long)old_memmap, old_shift);
705 * Iterate the EFI memory map in reverse order because the regions
706 * will be mapped top-down. The end result is the same as if we had
707 * mapped things forward, but doesn't require us to change the
708 * existing implementation of efi_map_region().
710 static inline void *efi_map_next_entry_reverse(void *entry)
714 return efi.memmap.map_end - efi.memmap.desc_size;
716 entry -= efi.memmap.desc_size;
717 if (entry < efi.memmap.map)
724 * efi_map_next_entry - Return the next EFI memory map descriptor
725 * @entry: Previous EFI memory map descriptor
727 * This is a helper function to iterate over the EFI memory map, which
728 * we do in different orders depending on the current configuration.
730 * To begin traversing the memory map @entry must be %NULL.
732 * Returns %NULL when we reach the end of the memory map.
734 static void *efi_map_next_entry(void *entry)
736 if (!efi_enabled(EFI_OLD_MEMMAP) && efi_enabled(EFI_64BIT)) {
738 * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
739 * config table feature requires us to map all entries
740 * in the same order as they appear in the EFI memory
741 * map. That is to say, entry N must have a lower
742 * virtual address than entry N+1. This is because the
743 * firmware toolchain leaves relative references in
744 * the code/data sections, which are split and become
745 * separate EFI memory regions. Mapping things
746 * out-of-order leads to the firmware accessing
747 * unmapped addresses.
749 * Since we need to map things this way whether or not
750 * the kernel actually makes use of
751 * EFI_PROPERTIES_TABLE, let's just switch to this
752 * scheme by default for 64-bit.
754 return efi_map_next_entry_reverse(entry);
759 return efi.memmap.map;
761 entry += efi.memmap.desc_size;
762 if (entry >= efi.memmap.map_end)
768 static bool should_map_region(efi_memory_desc_t *md)
771 * Runtime regions always require runtime mappings (obviously).
773 if (md->attribute & EFI_MEMORY_RUNTIME)
777 * 32-bit EFI doesn't suffer from the bug that requires us to
778 * reserve boot services regions, and mixed mode support
779 * doesn't exist for 32-bit kernels.
781 if (IS_ENABLED(CONFIG_X86_32))
785 * Map all of RAM so that we can access arguments in the 1:1
786 * mapping when making EFI runtime calls.
788 if (IS_ENABLED(CONFIG_EFI_MIXED) && !efi_is_native()) {
789 if (md->type == EFI_CONVENTIONAL_MEMORY ||
790 md->type == EFI_LOADER_DATA ||
791 md->type == EFI_LOADER_CODE)
796 * Map boot services regions as a workaround for buggy
797 * firmware that accesses them even when they shouldn't.
799 * See efi_{reserve,free}_boot_services().
801 if (md->type == EFI_BOOT_SERVICES_CODE ||
802 md->type == EFI_BOOT_SERVICES_DATA)
809 * Map the efi memory ranges of the runtime services and update new_mmap with
812 static void * __init efi_map_regions(int *count, int *pg_shift)
814 void *p, *new_memmap = NULL;
815 unsigned long left = 0;
816 unsigned long desc_size;
817 efi_memory_desc_t *md;
819 desc_size = efi.memmap.desc_size;
822 while ((p = efi_map_next_entry(p))) {
825 if (!should_map_region(md))
829 get_systab_virt_addr(md);
831 if (left < desc_size) {
832 new_memmap = realloc_pages(new_memmap, *pg_shift);
836 left += PAGE_SIZE << *pg_shift;
840 memcpy(new_memmap + (*count * desc_size), md, desc_size);
849 static void __init kexec_enter_virtual_mode(void)
851 #ifdef CONFIG_KEXEC_CORE
852 efi_memory_desc_t *md;
853 unsigned int num_pages;
858 * We don't do virtual mode, since we don't do runtime services, on
859 * non-native EFI. With efi=old_map, we don't do runtime services in
860 * kexec kernel because in the initial boot something else might
861 * have been mapped at these virtual addresses.
863 if (!efi_is_native() || efi_enabled(EFI_OLD_MEMMAP)) {
865 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
869 if (efi_alloc_page_tables()) {
870 pr_err("Failed to allocate EFI page tables\n");
871 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
876 * Map efi regions which were passed via setup_data. The virt_addr is a
877 * fixed addr which was used in first kernel of a kexec boot.
879 for_each_efi_memory_desc(md) {
880 efi_map_region_fixed(md); /* FIXME: add error handling */
881 get_systab_virt_addr(md);
885 * Unregister the early EFI memmap from efi_init() and install
886 * the new EFI memory map.
890 if (efi_memmap_init_late(efi.memmap.phys_map,
891 efi.memmap.desc_size * efi.memmap.nr_map)) {
892 pr_err("Failed to remap late EFI memory map\n");
893 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
899 num_pages = ALIGN(efi.memmap.nr_map * efi.memmap.desc_size, PAGE_SIZE);
900 num_pages >>= PAGE_SHIFT;
902 if (efi_setup_page_tables(efi.memmap.phys_map, num_pages)) {
903 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
907 efi_sync_low_kernel_mappings();
910 * Now that EFI is in virtual mode, update the function
911 * pointers in the runtime service table to the new virtual addresses.
913 * Call EFI services through wrapper functions.
915 efi.runtime_version = efi_systab.hdr.revision;
917 efi_native_runtime_setup();
919 efi.set_virtual_address_map = NULL;
921 if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX))
922 runtime_code_page_mkexec();
927 * This function will switch the EFI runtime services to virtual mode.
928 * Essentially, we look through the EFI memmap and map every region that
929 * has the runtime attribute bit set in its memory descriptor into the
930 * efi_pgd page table.
932 * The old method which used to update that memory descriptor with the
933 * virtual address obtained from ioremap() is still supported when the
934 * kernel is booted with efi=old_map on its command line. Same old
935 * method enabled the runtime services to be called without having to
936 * thunk back into physical mode for every invocation.
938 * The new method does a pagetable switch in a preemption-safe manner
939 * so that we're in a different address space when calling a runtime
940 * function. For function arguments passing we do copy the PUDs of the
941 * kernel page table into efi_pgd prior to each call.
943 * Specially for kexec boot, efi runtime maps in previous kernel should
944 * be passed in via setup_data. In that case runtime ranges will be mapped
945 * to the same virtual addresses as the first kernel, see
946 * kexec_enter_virtual_mode().
948 static void __init __efi_enter_virtual_mode(void)
950 int count = 0, pg_shift = 0;
951 void *new_memmap = NULL;
957 if (efi_alloc_page_tables()) {
958 pr_err("Failed to allocate EFI page tables\n");
963 new_memmap = efi_map_regions(&count, &pg_shift);
965 pr_err("Error reallocating memory, EFI runtime non-functional!\n");
969 pa = __pa(new_memmap);
972 * Unregister the early EFI memmap from efi_init() and install
973 * the new EFI memory map that we are about to pass to the
974 * firmware via SetVirtualAddressMap().
978 if (efi_memmap_init_late(pa, efi.memmap.desc_size * count)) {
979 pr_err("Failed to remap late EFI memory map\n");
983 if (efi_enabled(EFI_DBG)) {
984 pr_info("EFI runtime memory map:\n");
988 if (WARN_ON(!efi.systab))
991 if (efi_setup_page_tables(pa, 1 << pg_shift))
994 efi_sync_low_kernel_mappings();
996 if (efi_is_native()) {
997 status = phys_efi_set_virtual_address_map(
998 efi.memmap.desc_size * count,
999 efi.memmap.desc_size,
1000 efi.memmap.desc_version,
1001 (efi_memory_desc_t *)pa);
1003 status = efi_thunk_set_virtual_address_map(
1004 efi_phys.set_virtual_address_map,
1005 efi.memmap.desc_size * count,
1006 efi.memmap.desc_size,
1007 efi.memmap.desc_version,
1008 (efi_memory_desc_t *)pa);
1011 if (status != EFI_SUCCESS) {
1012 pr_err("Unable to switch EFI into virtual mode (status=%lx)!\n",
1017 efi_free_boot_services();
1020 * Now that EFI is in virtual mode, update the function
1021 * pointers in the runtime service table to the new virtual addresses.
1023 * Call EFI services through wrapper functions.
1025 efi.runtime_version = efi_systab.hdr.revision;
1027 if (efi_is_native())
1028 efi_native_runtime_setup();
1030 efi_thunk_runtime_setup();
1032 efi.set_virtual_address_map = NULL;
1035 * Apply more restrictive page table mapping attributes now that
1036 * SVAM() has been called and the firmware has performed all
1037 * necessary relocation fixups for the new virtual addresses.
1039 efi_runtime_update_mappings();
1041 /* clean DUMMY object */
1042 efi_delete_dummy_variable();
1046 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
1049 void __init efi_enter_virtual_mode(void)
1051 if (efi_enabled(EFI_PARAVIRT))
1055 kexec_enter_virtual_mode();
1057 __efi_enter_virtual_mode();
1059 efi_dump_pagetable();
1062 static int __init arch_parse_efi_cmdline(char *str)
1065 pr_warn("need at least one option\n");
1069 if (parse_option_str(str, "old_map"))
1070 set_bit(EFI_OLD_MEMMAP, &efi.flags);
1074 early_param("efi", arch_parse_efi_cmdline);
1076 bool efi_is_table_address(unsigned long phys_addr)
1080 if (phys_addr == EFI_INVALID_TABLE_ADDR)
1083 for (i = 0; i < ARRAY_SIZE(efi_tables); i++)
1084 if (*(efi_tables[i]) == phys_addr)