2 * Copyright (C) 1995 Linus Torvalds
4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * Memory region support
7 * David Parsons <orc@pell.chi.il.us>, July-August 1999
9 * Added E820 sanitization routine (removes overlapping memory regions);
10 * Brian Moyle <bmoyle@mvista.com>, February 2001
12 * Moved CPU detection code to cpu/${cpu}.c
13 * Patrick Mochel <mochel@osdl.org>, March 2002
15 * Provisions for empty E820 memory regions (reported by certain BIOSes).
16 * Alex Achenbach <xela@slit.de>, December 2002.
21 * This file handles the architecture-dependent parts of initialization
24 #include <linux/sched.h>
26 #include <linux/mmzone.h>
27 #include <linux/screen_info.h>
28 #include <linux/ioport.h>
29 #include <linux/acpi.h>
30 #include <linux/sfi.h>
31 #include <linux/apm_bios.h>
32 #include <linux/initrd.h>
33 #include <linux/bootmem.h>
34 #include <linux/memblock.h>
35 #include <linux/seq_file.h>
36 #include <linux/console.h>
37 #include <linux/root_dev.h>
38 #include <linux/highmem.h>
39 #include <linux/export.h>
40 #include <linux/efi.h>
41 #include <linux/init.h>
42 #include <linux/edd.h>
43 #include <linux/iscsi_ibft.h>
44 #include <linux/nodemask.h>
45 #include <linux/kexec.h>
46 #include <linux/dmi.h>
47 #include <linux/pfn.h>
48 #include <linux/pci.h>
49 #include <asm/pci-direct.h>
50 #include <linux/init_ohci1394_dma.h>
51 #include <linux/kvm_para.h>
52 #include <linux/dma-contiguous.h>
54 #include <linux/errno.h>
55 #include <linux/kernel.h>
56 #include <linux/stddef.h>
57 #include <linux/unistd.h>
58 #include <linux/ptrace.h>
59 #include <linux/user.h>
60 #include <linux/delay.h>
62 #include <linux/kallsyms.h>
63 #include <linux/cpufreq.h>
64 #include <linux/dma-mapping.h>
65 #include <linux/ctype.h>
66 #include <linux/uaccess.h>
68 #include <linux/percpu.h>
69 #include <linux/crash_dump.h>
70 #include <linux/tboot.h>
71 #include <linux/jiffies.h>
73 #include <video/edid.h>
77 #include <asm/realmode.h>
79 #include <asm/mpspec.h>
80 #include <asm/setup.h>
82 #include <asm/timer.h>
83 #include <asm/i8259.h>
84 #include <asm/sections.h>
85 #include <asm/io_apic.h>
87 #include <asm/setup_arch.h>
88 #include <asm/bios_ebda.h>
89 #include <asm/cacheflush.h>
90 #include <asm/processor.h>
92 #include <asm/kasan.h>
94 #include <asm/vsyscall.h>
98 #include <asm/iommu.h>
100 #include <asm/mmu_context.h>
101 #include <asm/proto.h>
103 #include <asm/paravirt.h>
104 #include <asm/hypervisor.h>
105 #include <asm/olpc_ofw.h>
107 #include <asm/percpu.h>
108 #include <asm/topology.h>
109 #include <asm/apicdef.h>
110 #include <asm/amd_nb.h>
112 #include <asm/alternative.h>
113 #include <asm/prom.h>
114 #include <asm/microcode.h>
115 #include <asm/mmu_context.h>
116 #include <asm/kaslr.h>
117 #include <asm/kaiser.h>
120 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
121 * max_pfn_mapped: highest direct mapped pfn over 4GB
123 * The direct mapping only covers E820_RAM regions, so the ranges and gaps are
124 * represented by pfn_mapped
126 unsigned long max_low_pfn_mapped;
127 unsigned long max_pfn_mapped;
130 RESERVE_BRK(dmi_alloc, 65536);
134 static __initdata unsigned long _brk_start = (unsigned long)__brk_base;
135 unsigned long _brk_end = (unsigned long)__brk_base;
138 int default_cpu_present_to_apicid(int mps_cpu)
140 return __default_cpu_present_to_apicid(mps_cpu);
143 int default_check_phys_apicid_present(int phys_apicid)
145 return __default_check_phys_apicid_present(phys_apicid);
149 struct boot_params boot_params;
154 static struct resource data_resource = {
155 .name = "Kernel data",
158 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
161 static struct resource code_resource = {
162 .name = "Kernel code",
165 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
168 static struct resource bss_resource = {
169 .name = "Kernel bss",
172 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM
177 /* cpu data as detected by the assembly code in head.S */
178 struct cpuinfo_x86 new_cpu_data = {
181 /* common cpu data for all cpus */
182 struct cpuinfo_x86 boot_cpu_data __read_mostly = {
185 EXPORT_SYMBOL(boot_cpu_data);
187 unsigned int def_to_bigsmp;
189 /* for MCA, but anyone else can use it if they want */
190 unsigned int machine_id;
191 unsigned int machine_submodel_id;
192 unsigned int BIOS_revision;
194 struct apm_info apm_info;
195 EXPORT_SYMBOL(apm_info);
197 #if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
198 defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
199 struct ist_info ist_info;
200 EXPORT_SYMBOL(ist_info);
202 struct ist_info ist_info;
206 struct cpuinfo_x86 boot_cpu_data __read_mostly = {
207 .x86_phys_bits = MAX_PHYSMEM_BITS,
209 EXPORT_SYMBOL(boot_cpu_data);
213 #if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
214 __visible unsigned long mmu_cr4_features __ro_after_init;
216 __visible unsigned long mmu_cr4_features __ro_after_init = X86_CR4_PAE;
219 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
220 int bootloader_type, bootloader_version;
225 struct screen_info screen_info;
226 EXPORT_SYMBOL(screen_info);
227 struct edid_info edid_info;
228 EXPORT_SYMBOL_GPL(edid_info);
230 extern int root_mountflags;
232 unsigned long saved_video_mode;
234 #define RAMDISK_IMAGE_START_MASK 0x07FF
235 #define RAMDISK_PROMPT_FLAG 0x8000
236 #define RAMDISK_LOAD_FLAG 0x4000
238 static char __initdata command_line[COMMAND_LINE_SIZE];
239 #ifdef CONFIG_CMDLINE_BOOL
240 static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
243 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
245 #ifdef CONFIG_EDD_MODULE
249 * copy_edd() - Copy the BIOS EDD information
250 * from boot_params into a safe place.
253 static inline void __init copy_edd(void)
255 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
256 sizeof(edd.mbr_signature));
257 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
258 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
259 edd.edd_info_nr = boot_params.eddbuf_entries;
262 static inline void __init copy_edd(void)
267 void * __init extend_brk(size_t size, size_t align)
269 size_t mask = align - 1;
272 BUG_ON(_brk_start == 0);
273 BUG_ON(align & mask);
275 _brk_end = (_brk_end + mask) & ~mask;
276 BUG_ON((char *)(_brk_end + size) > __brk_limit);
278 ret = (void *)_brk_end;
281 memset(ret, 0, size);
287 static void __init cleanup_highmap(void)
292 static void __init reserve_brk(void)
294 if (_brk_end > _brk_start)
295 memblock_reserve(__pa_symbol(_brk_start),
296 _brk_end - _brk_start);
298 /* Mark brk area as locked down and no longer taking any
303 u64 relocated_ramdisk;
305 #ifdef CONFIG_BLK_DEV_INITRD
307 static u64 __init get_ramdisk_image(void)
309 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
311 ramdisk_image |= (u64)boot_params.ext_ramdisk_image << 32;
313 return ramdisk_image;
315 static u64 __init get_ramdisk_size(void)
317 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
319 ramdisk_size |= (u64)boot_params.ext_ramdisk_size << 32;
324 static void __init relocate_initrd(void)
326 /* Assume only end is not page aligned */
327 u64 ramdisk_image = get_ramdisk_image();
328 u64 ramdisk_size = get_ramdisk_size();
329 u64 area_size = PAGE_ALIGN(ramdisk_size);
331 /* We need to move the initrd down into directly mapped mem */
332 relocated_ramdisk = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
333 area_size, PAGE_SIZE);
335 if (!relocated_ramdisk)
336 panic("Cannot find place for new RAMDISK of size %lld\n",
339 /* Note: this includes all the mem currently occupied by
340 the initrd, we rely on that fact to keep the data intact. */
341 memblock_reserve(relocated_ramdisk, area_size);
342 initrd_start = relocated_ramdisk + PAGE_OFFSET;
343 initrd_end = initrd_start + ramdisk_size;
344 printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
345 relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
347 copy_from_early_mem((void *)initrd_start, ramdisk_image, ramdisk_size);
349 printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
350 " [mem %#010llx-%#010llx]\n",
351 ramdisk_image, ramdisk_image + ramdisk_size - 1,
352 relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
355 static void __init early_reserve_initrd(void)
357 /* Assume only end is not page aligned */
358 u64 ramdisk_image = get_ramdisk_image();
359 u64 ramdisk_size = get_ramdisk_size();
360 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
362 if (!boot_params.hdr.type_of_loader ||
363 !ramdisk_image || !ramdisk_size)
364 return; /* No initrd provided by bootloader */
366 memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
368 static void __init reserve_initrd(void)
370 /* Assume only end is not page aligned */
371 u64 ramdisk_image = get_ramdisk_image();
372 u64 ramdisk_size = get_ramdisk_size();
373 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
376 if (!boot_params.hdr.type_of_loader ||
377 !ramdisk_image || !ramdisk_size)
378 return; /* No initrd provided by bootloader */
382 mapped_size = memblock_mem_size(max_pfn_mapped);
383 if (ramdisk_size >= (mapped_size>>1))
384 panic("initrd too large to handle, "
385 "disabling initrd (%lld needed, %lld available)\n",
386 ramdisk_size, mapped_size>>1);
388 printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image,
391 if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image),
392 PFN_DOWN(ramdisk_end))) {
393 /* All are mapped, easy case */
394 initrd_start = ramdisk_image + PAGE_OFFSET;
395 initrd_end = initrd_start + ramdisk_size;
401 memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
405 static void __init early_reserve_initrd(void)
408 static void __init reserve_initrd(void)
411 #endif /* CONFIG_BLK_DEV_INITRD */
413 static void __init parse_setup_data(void)
415 struct setup_data *data;
416 u64 pa_data, pa_next;
418 pa_data = boot_params.hdr.setup_data;
420 u32 data_len, data_type;
422 data = early_memremap(pa_data, sizeof(*data));
423 data_len = data->len + sizeof(struct setup_data);
424 data_type = data->type;
425 pa_next = data->next;
426 early_memunmap(data, sizeof(*data));
430 parse_e820_ext(pa_data, data_len);
436 parse_efi_setup(pa_data, data_len);
445 static void __init e820_reserve_setup_data(void)
447 struct setup_data *data;
450 pa_data = boot_params.hdr.setup_data;
455 data = early_memremap(pa_data, sizeof(*data));
456 e820_update_range(pa_data, sizeof(*data)+data->len,
457 E820_RAM, E820_RESERVED_KERN);
458 pa_data = data->next;
459 early_memunmap(data, sizeof(*data));
462 sanitize_e820_map(e820->map, ARRAY_SIZE(e820->map), &e820->nr_map);
463 memcpy(e820_saved, e820, sizeof(struct e820map));
464 printk(KERN_INFO "extended physical RAM map:\n");
465 e820_print_map("reserve setup_data");
468 static void __init memblock_x86_reserve_range_setup_data(void)
470 struct setup_data *data;
473 pa_data = boot_params.hdr.setup_data;
475 data = early_memremap(pa_data, sizeof(*data));
476 memblock_reserve(pa_data, sizeof(*data) + data->len);
477 pa_data = data->next;
478 early_memunmap(data, sizeof(*data));
483 * --------- Crashkernel reservation ------------------------------
486 #ifdef CONFIG_KEXEC_CORE
488 /* 16M alignment for crash kernel regions */
489 #define CRASH_ALIGN (16 << 20)
492 * Keep the crash kernel below this limit. On 32 bits earlier kernels
493 * would limit the kernel to the low 512 MiB due to mapping restrictions.
494 * On 64bit, old kexec-tools need to under 896MiB.
497 # define CRASH_ADDR_LOW_MAX (512 << 20)
498 # define CRASH_ADDR_HIGH_MAX (512 << 20)
500 # define CRASH_ADDR_LOW_MAX (896UL << 20)
501 # define CRASH_ADDR_HIGH_MAX MAXMEM
504 static int __init reserve_crashkernel_low(void)
507 unsigned long long base, low_base = 0, low_size = 0;
508 unsigned long total_low_mem;
511 total_low_mem = memblock_mem_size(1UL << (32 - PAGE_SHIFT));
513 /* crashkernel=Y,low */
514 ret = parse_crashkernel_low(boot_command_line, total_low_mem, &low_size, &base);
517 * two parts from lib/swiotlb.c:
518 * -swiotlb size: user-specified with swiotlb= or default.
520 * -swiotlb overflow buffer: now hardcoded to 32k. We round it
521 * to 8M for other buffers that may need to stay low too. Also
522 * make sure we allocate enough extra low memory so that we
523 * don't run out of DMA buffers for 32-bit devices.
525 low_size = max(swiotlb_size_or_default() + (8UL << 20), 256UL << 20);
527 /* passed with crashkernel=0,low ? */
532 low_base = memblock_find_in_range(low_size, 1ULL << 32, low_size, CRASH_ALIGN);
534 pr_err("Cannot reserve %ldMB crashkernel low memory, please try smaller size.\n",
535 (unsigned long)(low_size >> 20));
539 ret = memblock_reserve(low_base, low_size);
541 pr_err("%s: Error reserving crashkernel low memblock.\n", __func__);
545 pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (System low RAM: %ldMB)\n",
546 (unsigned long)(low_size >> 20),
547 (unsigned long)(low_base >> 20),
548 (unsigned long)(total_low_mem >> 20));
550 crashk_low_res.start = low_base;
551 crashk_low_res.end = low_base + low_size - 1;
552 insert_resource(&iomem_resource, &crashk_low_res);
557 static void __init reserve_crashkernel(void)
559 unsigned long long crash_size, crash_base, total_mem;
563 total_mem = memblock_phys_mem_size();
566 ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base);
567 if (ret != 0 || crash_size <= 0) {
568 /* crashkernel=X,high */
569 ret = parse_crashkernel_high(boot_command_line, total_mem,
570 &crash_size, &crash_base);
571 if (ret != 0 || crash_size <= 0)
576 /* 0 means: find the address automatically */
577 if (crash_base <= 0) {
579 * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX
581 crash_base = memblock_find_in_range(CRASH_ALIGN,
582 high ? CRASH_ADDR_HIGH_MAX
583 : CRASH_ADDR_LOW_MAX,
584 crash_size, CRASH_ALIGN);
586 pr_info("crashkernel reservation failed - No suitable area found.\n");
591 unsigned long long start;
593 start = memblock_find_in_range(crash_base,
594 crash_base + crash_size,
595 crash_size, 1 << 20);
596 if (start != crash_base) {
597 pr_info("crashkernel reservation failed - memory is in use.\n");
601 ret = memblock_reserve(crash_base, crash_size);
603 pr_err("%s: Error reserving crashkernel memblock.\n", __func__);
607 if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) {
608 memblock_free(crash_base, crash_size);
612 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
613 (unsigned long)(crash_size >> 20),
614 (unsigned long)(crash_base >> 20),
615 (unsigned long)(total_mem >> 20));
617 crashk_res.start = crash_base;
618 crashk_res.end = crash_base + crash_size - 1;
619 insert_resource(&iomem_resource, &crashk_res);
622 static void __init reserve_crashkernel(void)
627 static struct resource standard_io_resources[] = {
628 { .name = "dma1", .start = 0x00, .end = 0x1f,
629 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
630 { .name = "pic1", .start = 0x20, .end = 0x21,
631 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
632 { .name = "timer0", .start = 0x40, .end = 0x43,
633 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
634 { .name = "timer1", .start = 0x50, .end = 0x53,
635 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
636 { .name = "keyboard", .start = 0x60, .end = 0x60,
637 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
638 { .name = "keyboard", .start = 0x64, .end = 0x64,
639 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
640 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
641 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
642 { .name = "pic2", .start = 0xa0, .end = 0xa1,
643 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
644 { .name = "dma2", .start = 0xc0, .end = 0xdf,
645 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
646 { .name = "fpu", .start = 0xf0, .end = 0xff,
647 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
650 void __init reserve_standard_io_resources(void)
654 /* request I/O space for devices used on all i[345]86 PCs */
655 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
656 request_resource(&ioport_resource, &standard_io_resources[i]);
660 static __init void reserve_ibft_region(void)
662 unsigned long addr, size = 0;
664 addr = find_ibft_region(&size);
667 memblock_reserve(addr, size);
670 static bool __init snb_gfx_workaround_needed(void)
675 static const __initconst u16 snb_ids[] = {
685 /* Assume no if something weird is going on with PCI */
686 if (!early_pci_allowed())
689 vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
690 if (vendor != 0x8086)
693 devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
694 for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
695 if (devid == snb_ids[i])
703 * Sandy Bridge graphics has trouble with certain ranges, exclude
704 * them from allocation.
706 static void __init trim_snb_memory(void)
708 static const __initconst unsigned long bad_pages[] = {
717 if (!snb_gfx_workaround_needed())
720 printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
723 * Reserve all memory below the 1 MB mark that has not
724 * already been reserved.
726 memblock_reserve(0, 1<<20);
728 for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
729 if (memblock_reserve(bad_pages[i], PAGE_SIZE))
730 printk(KERN_WARNING "failed to reserve 0x%08lx\n",
736 * Here we put platform-specific memory range workarounds, i.e.
737 * memory known to be corrupt or otherwise in need to be reserved on
738 * specific platforms.
740 * If this gets used more widely it could use a real dispatch mechanism.
742 static void __init trim_platform_memory_ranges(void)
747 static void __init trim_bios_range(void)
750 * A special case is the first 4Kb of memory;
751 * This is a BIOS owned area, not kernel ram, but generally
752 * not listed as such in the E820 table.
754 * This typically reserves additional memory (64KiB by default)
755 * since some BIOSes are known to corrupt low memory. See the
756 * Kconfig help text for X86_RESERVE_LOW.
758 e820_update_range(0, PAGE_SIZE, E820_RAM, E820_RESERVED);
761 * special case: Some BIOSen report the PC BIOS
762 * area (640->1Mb) as ram even though it is not.
765 e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
767 sanitize_e820_map(e820->map, ARRAY_SIZE(e820->map), &e820->nr_map);
770 /* called before trim_bios_range() to spare extra sanitize */
771 static void __init e820_add_kernel_range(void)
773 u64 start = __pa_symbol(_text);
774 u64 size = __pa_symbol(_end) - start;
777 * Complain if .text .data and .bss are not marked as E820_RAM and
778 * attempt to fix it by adding the range. We may have a confused BIOS,
779 * or the user may have used memmap=exactmap or memmap=xxM$yyM to
780 * exclude kernel range. If we really are running on top non-RAM,
781 * we will crash later anyways.
783 if (e820_all_mapped(start, start + size, E820_RAM))
786 pr_warn(".text .data .bss are not marked as E820_RAM!\n");
787 e820_remove_range(start, size, E820_RAM, 0);
788 e820_add_region(start, size, E820_RAM);
791 static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
793 static int __init parse_reservelow(char *p)
795 unsigned long long size;
800 size = memparse(p, &p);
813 early_param("reservelow", parse_reservelow);
815 static void __init trim_low_memory_range(void)
817 memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
821 * Dump out kernel offset information on panic.
824 dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
826 if (kaslr_enabled()) {
827 pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
833 pr_emerg("Kernel Offset: disabled\n");
840 * Determine if we were loaded by an EFI loader. If so, then we have also been
841 * passed the efi memmap, systab, etc., so we should use these data structures
842 * for initialization. Note, the efi init code path is determined by the
843 * global efi_enabled. This allows the same kernel image to be used on existing
844 * systems (with a traditional BIOS) as well as on EFI systems.
847 * setup_arch - architecture-specific boot-time initializations
849 * Note: On x86_64, fixmaps are ready for use even before this is called.
852 void __init setup_arch(char **cmdline_p)
854 memblock_reserve(__pa_symbol(_text),
855 (unsigned long)__bss_stop - (unsigned long)_text);
858 * Make sure page 0 is always reserved because on systems with
859 * L1TF its contents can be leaked to user processes.
861 memblock_reserve(0, PAGE_SIZE);
863 early_reserve_initrd();
866 * At this point everything still needed from the boot loader
867 * or BIOS or kernel text should be early reserved or marked not
868 * RAM in e820. All other memory is free game.
872 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
875 * copy kernel address range established so far and switch
876 * to the proper swapper page table
878 clone_pgd_range(swapper_pg_dir + KERNEL_PGD_BOUNDARY,
879 initial_page_table + KERNEL_PGD_BOUNDARY,
882 load_cr3(swapper_pg_dir);
884 * Note: Quark X1000 CPUs advertise PGE incorrectly and require
885 * a cr3 based tlb flush, so the following __flush_tlb_all()
886 * will not flush anything because the cpu quirk which clears
887 * X86_FEATURE_PGE has not been invoked yet. Though due to the
888 * load_cr3() above the TLB has been flushed already. The
889 * quirk is invoked before subsequent calls to __flush_tlb_all()
890 * so proper operation is guaranteed.
894 printk(KERN_INFO "Command line: %s\n", boot_command_line);
898 * If we have OLPC OFW, we might end up relocating the fixmap due to
899 * reserve_top(), so do this before touching the ioremap area.
905 early_ioremap_init();
907 setup_olpc_ofw_pgd();
909 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
910 screen_info = boot_params.screen_info;
911 edid_info = boot_params.edid_info;
913 apm_info.bios = boot_params.apm_bios_info;
914 ist_info = boot_params.ist_info;
916 saved_video_mode = boot_params.hdr.vid_mode;
917 bootloader_type = boot_params.hdr.type_of_loader;
918 if ((bootloader_type >> 4) == 0xe) {
919 bootloader_type &= 0xf;
920 bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4;
922 bootloader_version = bootloader_type & 0xf;
923 bootloader_version |= boot_params.hdr.ext_loader_ver << 4;
925 #ifdef CONFIG_BLK_DEV_RAM
926 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
927 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
928 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
931 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
932 EFI32_LOADER_SIGNATURE, 4)) {
933 set_bit(EFI_BOOT, &efi.flags);
934 } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
935 EFI64_LOADER_SIGNATURE, 4)) {
936 set_bit(EFI_BOOT, &efi.flags);
937 set_bit(EFI_64BIT, &efi.flags);
940 if (efi_enabled(EFI_BOOT))
941 efi_memblock_x86_reserve_range();
944 x86_init.oem.arch_setup();
946 iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
952 if (!boot_params.hdr.root_flags)
953 root_mountflags &= ~MS_RDONLY;
954 init_mm.start_code = (unsigned long) _text;
955 init_mm.end_code = (unsigned long) _etext;
956 init_mm.end_data = (unsigned long) _edata;
957 init_mm.brk = _brk_end;
959 mpx_mm_init(&init_mm);
961 code_resource.start = __pa_symbol(_text);
962 code_resource.end = __pa_symbol(_etext)-1;
963 data_resource.start = __pa_symbol(_etext);
964 data_resource.end = __pa_symbol(_edata)-1;
965 bss_resource.start = __pa_symbol(__bss_start);
966 bss_resource.end = __pa_symbol(__bss_stop)-1;
968 #ifdef CONFIG_CMDLINE_BOOL
969 #ifdef CONFIG_CMDLINE_OVERRIDE
970 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
972 if (builtin_cmdline[0]) {
973 /* append boot loader cmdline to builtin */
974 strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
975 strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
976 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
981 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
982 *cmdline_p = command_line;
985 * x86_configure_nx() is called before parse_early_param() to detect
986 * whether hardware doesn't support NX (so that the early EHCI debug
987 * console setup can safely call set_fixmap()). It may then be called
988 * again from within noexec_setup() during parsing early parameters
989 * to honor the respective command line option.
997 /* after early param, so could get panic from serial */
998 memblock_x86_reserve_range_setup_data();
1000 if (acpi_mps_check()) {
1001 #ifdef CONFIG_X86_LOCAL_APIC
1004 setup_clear_cpu_cap(X86_FEATURE_APIC);
1008 if (pci_early_dump_regs)
1009 early_dump_pci_devices();
1012 /* update the e820_saved too */
1013 e820_reserve_setup_data();
1014 finish_e820_parsing();
1016 if (efi_enabled(EFI_BOOT))
1021 dmi_set_dump_stack_arch_desc();
1024 * VMware detection requires dmi to be available, so this
1025 * needs to be done after dmi_scan_machine, for the BP.
1027 init_hypervisor_platform();
1030 * This needs to happen right after XENPV is set on xen and
1031 * kaiser_enabled is checked below in cleanup_highmap().
1033 kaiser_check_boottime_disable();
1035 x86_init.resources.probe_roms();
1037 /* after parse_early_param, so could debug it */
1038 insert_resource(&iomem_resource, &code_resource);
1039 insert_resource(&iomem_resource, &data_resource);
1040 insert_resource(&iomem_resource, &bss_resource);
1042 e820_add_kernel_range();
1044 #ifdef CONFIG_X86_32
1045 if (ppro_with_ram_bug()) {
1046 e820_update_range(0x70000000ULL, 0x40000ULL, E820_RAM,
1048 sanitize_e820_map(e820->map, ARRAY_SIZE(e820->map), &e820->nr_map);
1049 printk(KERN_INFO "fixed physical RAM map:\n");
1050 e820_print_map("bad_ppro");
1053 early_gart_iommu_check();
1057 * partially used pages are not usable - thus
1058 * we are rounding upwards:
1060 max_pfn = e820_end_of_ram_pfn();
1062 /* update e820 for memory not covered by WB MTRRs */
1064 if (mtrr_trim_uncached_memory(max_pfn))
1065 max_pfn = e820_end_of_ram_pfn();
1067 max_possible_pfn = max_pfn;
1070 * This call is required when the CPU does not support PAT. If
1071 * mtrr_bp_init() invoked it already via pat_init() the call has no
1077 * Define random base addresses for memory sections after max_pfn is
1078 * defined and before each memory section base is used.
1080 kernel_randomize_memory();
1082 #ifdef CONFIG_X86_32
1083 /* max_low_pfn get updated here */
1084 find_low_pfn_range();
1088 /* How many end-of-memory variables you have, grandma! */
1089 /* need this before calling reserve_initrd */
1090 if (max_pfn > (1UL<<(32 - PAGE_SHIFT)))
1091 max_low_pfn = e820_end_of_low_ram_pfn();
1093 max_low_pfn = max_pfn;
1095 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
1099 * Find and reserve possible boot-time SMP configuration:
1103 reserve_ibft_region();
1105 early_alloc_pgt_buf();
1108 * Need to conclude brk, before memblock_x86_fill()
1109 * it could use memblock_find_in_range, could overlap with
1116 memblock_set_current_limit(ISA_END_ADDRESS);
1117 memblock_x86_fill();
1119 reserve_bios_regions();
1121 if (efi_enabled(EFI_MEMMAP)) {
1127 * The EFI specification says that boot service code won't be
1128 * called after ExitBootServices(). This is, in fact, a lie.
1130 efi_reserve_boot_services();
1133 /* preallocate 4k for mptable mpc */
1134 early_reserve_e820_mpc_new();
1136 #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
1137 setup_bios_corruption_check();
1140 #ifdef CONFIG_X86_32
1141 printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
1142 (max_pfn_mapped<<PAGE_SHIFT) - 1);
1145 reserve_real_mode();
1147 trim_platform_memory_ranges();
1148 trim_low_memory_range();
1152 early_trap_pf_init();
1155 * Update mmu_cr4_features (and, indirectly, trampoline_cr4_features)
1156 * with the current CR4 value. This may not be necessary, but
1157 * auditing all the early-boot CR4 manipulation would be needed to
1160 mmu_cr4_features = __read_cr4();
1162 memblock_set_current_limit(get_max_mapped());
1165 * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
1168 #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
1169 if (init_ohci1394_dma_early)
1170 init_ohci1394_dma_on_all_controllers();
1172 /* Allocate bigger log buffer */
1177 acpi_table_upgrade();
1184 * Parse the ACPI tables for possible boot-time SMP configuration.
1186 acpi_boot_table_init();
1188 early_acpi_boot_init();
1191 dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
1194 * Reserve memory for crash kernel after SRAT is parsed so that it
1195 * won't consume hotpluggable memory.
1197 reserve_crashkernel();
1199 memblock_find_dma_reserve();
1201 #ifdef CONFIG_KVM_GUEST
1205 x86_init.paging.pagetable_init();
1209 #ifdef CONFIG_X86_32
1210 /* sync back kernel address range */
1211 clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
1212 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
1216 * sync back low identity map too. It is used for example
1217 * in the 32-bit EFI stub.
1219 clone_pgd_range(initial_page_table,
1220 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
1221 min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
1228 generic_apic_probe();
1233 * Read APIC and some other early information from ACPI tables.
1240 * get boot-time SMP configuration:
1245 * Systems w/o ACPI and mptables might not have it mapped the local
1246 * APIC yet, but prefill_possible_map() might need to access it.
1248 init_apic_mappings();
1250 prefill_possible_map();
1254 io_apic_init_mappings();
1258 e820_reserve_resources();
1259 e820_mark_nosave_regions(max_low_pfn);
1261 x86_init.resources.reserve_resources();
1266 #if defined(CONFIG_VGA_CONSOLE)
1267 if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
1268 conswitchp = &vga_con;
1269 #elif defined(CONFIG_DUMMY_CONSOLE)
1270 conswitchp = &dummy_con;
1273 x86_init.oem.banner();
1275 x86_init.timers.wallclock_init();
1279 arch_init_ideal_nops();
1281 register_refined_jiffies(CLOCK_TICK_RATE);
1284 if (efi_enabled(EFI_BOOT))
1285 efi_apply_memmap_quirks();
1289 #ifdef CONFIG_X86_32
1291 static struct resource video_ram_resource = {
1292 .name = "Video RAM area",
1295 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
1298 void __init i386_reserve_resources(void)
1300 request_resource(&iomem_resource, &video_ram_resource);
1301 reserve_standard_io_resources();
1304 #endif /* CONFIG_X86_32 */
1306 static struct notifier_block kernel_offset_notifier = {
1307 .notifier_call = dump_kernel_offset
1310 static int __init register_kernel_offset_dumper(void)
1312 atomic_notifier_chain_register(&panic_notifier_list,
1313 &kernel_offset_notifier);
1316 __initcall(register_kernel_offset_dumper);
1318 void arch_show_smap(struct seq_file *m, struct vm_area_struct *vma)
1320 if (!boot_cpu_has(X86_FEATURE_OSPKE))
1323 seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));