2 * Copyright (C) 1995 Linus Torvalds
4 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
6 * Memory region support
7 * David Parsons <orc@pell.chi.il.us>, July-August 1999
9 * Added E820 sanitization routine (removes overlapping memory regions);
10 * Brian Moyle <bmoyle@mvista.com>, February 2001
12 * Moved CPU detection code to cpu/${cpu}.c
13 * Patrick Mochel <mochel@osdl.org>, March 2002
15 * Provisions for empty E820 memory regions (reported by certain BIOSes).
16 * Alex Achenbach <xela@slit.de>, December 2002.
21 * This file handles the architecture-dependent parts of initialization
24 #include <linux/sched.h>
26 #include <linux/mmzone.h>
27 #include <linux/screen_info.h>
28 #include <linux/ioport.h>
29 #include <linux/acpi.h>
30 #include <linux/sfi.h>
31 #include <linux/apm_bios.h>
32 #include <linux/initrd.h>
33 #include <linux/bootmem.h>
34 #include <linux/memblock.h>
35 #include <linux/seq_file.h>
36 #include <linux/console.h>
37 #include <linux/root_dev.h>
38 #include <linux/highmem.h>
39 #include <linux/module.h>
40 #include <linux/efi.h>
41 #include <linux/init.h>
42 #include <linux/edd.h>
43 #include <linux/iscsi_ibft.h>
44 #include <linux/nodemask.h>
45 #include <linux/kexec.h>
46 #include <linux/dmi.h>
47 #include <linux/pfn.h>
48 #include <linux/pci.h>
49 #include <asm/pci-direct.h>
50 #include <linux/init_ohci1394_dma.h>
51 #include <linux/kvm_para.h>
52 #include <linux/dma-contiguous.h>
54 #include <linux/errno.h>
55 #include <linux/kernel.h>
56 #include <linux/stddef.h>
57 #include <linux/unistd.h>
58 #include <linux/ptrace.h>
59 #include <linux/user.h>
60 #include <linux/delay.h>
62 #include <linux/kallsyms.h>
63 #include <linux/cpufreq.h>
64 #include <linux/dma-mapping.h>
65 #include <linux/ctype.h>
66 #include <linux/uaccess.h>
68 #include <linux/percpu.h>
69 #include <linux/crash_dump.h>
70 #include <linux/tboot.h>
71 #include <linux/jiffies.h>
73 #include <video/edid.h>
77 #include <asm/realmode.h>
79 #include <asm/mpspec.h>
80 #include <asm/setup.h>
82 #include <asm/timer.h>
83 #include <asm/i8259.h>
84 #include <asm/sections.h>
85 #include <asm/io_apic.h>
87 #include <asm/setup_arch.h>
88 #include <asm/bios_ebda.h>
89 #include <asm/cacheflush.h>
90 #include <asm/processor.h>
92 #include <asm/kasan.h>
94 #include <asm/vsyscall.h>
98 #include <asm/iommu.h>
100 #include <asm/mmu_context.h>
101 #include <asm/proto.h>
103 #include <asm/paravirt.h>
104 #include <asm/hypervisor.h>
105 #include <asm/olpc_ofw.h>
107 #include <asm/percpu.h>
108 #include <asm/topology.h>
109 #include <asm/apicdef.h>
110 #include <asm/amd_nb.h>
112 #include <asm/alternative.h>
113 #include <asm/prom.h>
114 #include <asm/microcode.h>
115 #include <asm/kaiser.h>
118 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
119 * max_pfn_mapped: highest direct mapped pfn over 4GB
121 * The direct mapping only covers E820_RAM regions, so the ranges and gaps are
122 * represented by pfn_mapped
124 unsigned long max_low_pfn_mapped;
125 unsigned long max_pfn_mapped;
128 RESERVE_BRK(dmi_alloc, 65536);
132 static __initdata unsigned long _brk_start = (unsigned long)__brk_base;
133 unsigned long _brk_end = (unsigned long)__brk_base;
136 int default_cpu_present_to_apicid(int mps_cpu)
138 return __default_cpu_present_to_apicid(mps_cpu);
141 int default_check_phys_apicid_present(int phys_apicid)
143 return __default_check_phys_apicid_present(phys_apicid);
147 struct boot_params boot_params;
152 static struct resource data_resource = {
153 .name = "Kernel data",
156 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
159 static struct resource code_resource = {
160 .name = "Kernel code",
163 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
166 static struct resource bss_resource = {
167 .name = "Kernel bss",
170 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
175 /* cpu data as detected by the assembly code in head.S */
176 struct cpuinfo_x86 new_cpu_data = {
179 /* common cpu data for all cpus */
180 struct cpuinfo_x86 boot_cpu_data __read_mostly = {
183 EXPORT_SYMBOL(boot_cpu_data);
185 unsigned int def_to_bigsmp;
187 /* for MCA, but anyone else can use it if they want */
188 unsigned int machine_id;
189 unsigned int machine_submodel_id;
190 unsigned int BIOS_revision;
192 struct apm_info apm_info;
193 EXPORT_SYMBOL(apm_info);
195 #if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
196 defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
197 struct ist_info ist_info;
198 EXPORT_SYMBOL(ist_info);
200 struct ist_info ist_info;
204 struct cpuinfo_x86 boot_cpu_data __read_mostly = {
205 .x86_phys_bits = MAX_PHYSMEM_BITS,
207 EXPORT_SYMBOL(boot_cpu_data);
211 #if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
212 __visible unsigned long mmu_cr4_features;
214 __visible unsigned long mmu_cr4_features = X86_CR4_PAE;
217 /* Boot loader ID and version as integers, for the benefit of proc_dointvec */
218 int bootloader_type, bootloader_version;
223 struct screen_info screen_info;
224 EXPORT_SYMBOL(screen_info);
225 struct edid_info edid_info;
226 EXPORT_SYMBOL_GPL(edid_info);
228 extern int root_mountflags;
230 unsigned long saved_video_mode;
232 #define RAMDISK_IMAGE_START_MASK 0x07FF
233 #define RAMDISK_PROMPT_FLAG 0x8000
234 #define RAMDISK_LOAD_FLAG 0x4000
236 static char __initdata command_line[COMMAND_LINE_SIZE];
237 #ifdef CONFIG_CMDLINE_BOOL
238 static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
241 #if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
243 #ifdef CONFIG_EDD_MODULE
247 * copy_edd() - Copy the BIOS EDD information
248 * from boot_params into a safe place.
251 static inline void __init copy_edd(void)
253 memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
254 sizeof(edd.mbr_signature));
255 memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
256 edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
257 edd.edd_info_nr = boot_params.eddbuf_entries;
260 static inline void __init copy_edd(void)
265 void * __init extend_brk(size_t size, size_t align)
267 size_t mask = align - 1;
270 BUG_ON(_brk_start == 0);
271 BUG_ON(align & mask);
273 _brk_end = (_brk_end + mask) & ~mask;
274 BUG_ON((char *)(_brk_end + size) > __brk_limit);
276 ret = (void *)_brk_end;
279 memset(ret, 0, size);
285 static void __init cleanup_highmap(void)
290 static void __init reserve_brk(void)
292 if (_brk_end > _brk_start)
293 memblock_reserve(__pa_symbol(_brk_start),
294 _brk_end - _brk_start);
296 /* Mark brk area as locked down and no longer taking any
301 u64 relocated_ramdisk;
303 #ifdef CONFIG_BLK_DEV_INITRD
305 static u64 __init get_ramdisk_image(void)
307 u64 ramdisk_image = boot_params.hdr.ramdisk_image;
309 ramdisk_image |= (u64)boot_params.ext_ramdisk_image << 32;
311 return ramdisk_image;
313 static u64 __init get_ramdisk_size(void)
315 u64 ramdisk_size = boot_params.hdr.ramdisk_size;
317 ramdisk_size |= (u64)boot_params.ext_ramdisk_size << 32;
322 static void __init relocate_initrd(void)
324 /* Assume only end is not page aligned */
325 u64 ramdisk_image = get_ramdisk_image();
326 u64 ramdisk_size = get_ramdisk_size();
327 u64 area_size = PAGE_ALIGN(ramdisk_size);
329 /* We need to move the initrd down into directly mapped mem */
330 relocated_ramdisk = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
331 area_size, PAGE_SIZE);
333 if (!relocated_ramdisk)
334 panic("Cannot find place for new RAMDISK of size %lld\n",
337 /* Note: this includes all the mem currently occupied by
338 the initrd, we rely on that fact to keep the data intact. */
339 memblock_reserve(relocated_ramdisk, area_size);
340 initrd_start = relocated_ramdisk + PAGE_OFFSET;
341 initrd_end = initrd_start + ramdisk_size;
342 printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
343 relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
345 copy_from_early_mem((void *)initrd_start, ramdisk_image, ramdisk_size);
347 printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
348 " [mem %#010llx-%#010llx]\n",
349 ramdisk_image, ramdisk_image + ramdisk_size - 1,
350 relocated_ramdisk, relocated_ramdisk + ramdisk_size - 1);
353 static void __init early_reserve_initrd(void)
355 /* Assume only end is not page aligned */
356 u64 ramdisk_image = get_ramdisk_image();
357 u64 ramdisk_size = get_ramdisk_size();
358 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
360 if (!boot_params.hdr.type_of_loader ||
361 !ramdisk_image || !ramdisk_size)
362 return; /* No initrd provided by bootloader */
364 memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
366 static void __init reserve_initrd(void)
368 /* Assume only end is not page aligned */
369 u64 ramdisk_image = get_ramdisk_image();
370 u64 ramdisk_size = get_ramdisk_size();
371 u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
374 if (!boot_params.hdr.type_of_loader ||
375 !ramdisk_image || !ramdisk_size)
376 return; /* No initrd provided by bootloader */
380 mapped_size = memblock_mem_size(max_pfn_mapped);
381 if (ramdisk_size >= (mapped_size>>1))
382 panic("initrd too large to handle, "
383 "disabling initrd (%lld needed, %lld available)\n",
384 ramdisk_size, mapped_size>>1);
386 printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image,
389 if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image),
390 PFN_DOWN(ramdisk_end))) {
391 /* All are mapped, easy case */
392 initrd_start = ramdisk_image + PAGE_OFFSET;
393 initrd_end = initrd_start + ramdisk_size;
399 memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
402 static void __init early_reserve_initrd(void)
405 static void __init reserve_initrd(void)
408 #endif /* CONFIG_BLK_DEV_INITRD */
410 static void __init parse_setup_data(void)
412 struct setup_data *data;
413 u64 pa_data, pa_next;
415 pa_data = boot_params.hdr.setup_data;
417 u32 data_len, data_type;
419 data = early_memremap(pa_data, sizeof(*data));
420 data_len = data->len + sizeof(struct setup_data);
421 data_type = data->type;
422 pa_next = data->next;
423 early_memunmap(data, sizeof(*data));
427 parse_e820_ext(pa_data, data_len);
433 parse_efi_setup(pa_data, data_len);
442 static void __init e820_reserve_setup_data(void)
444 struct setup_data *data;
447 pa_data = boot_params.hdr.setup_data;
452 data = early_memremap(pa_data, sizeof(*data));
453 e820_update_range(pa_data, sizeof(*data)+data->len,
454 E820_RAM, E820_RESERVED_KERN);
455 pa_data = data->next;
456 early_memunmap(data, sizeof(*data));
459 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
460 memcpy(&e820_saved, &e820, sizeof(struct e820map));
461 printk(KERN_INFO "extended physical RAM map:\n");
462 e820_print_map("reserve setup_data");
465 static void __init memblock_x86_reserve_range_setup_data(void)
467 struct setup_data *data;
470 pa_data = boot_params.hdr.setup_data;
472 data = early_memremap(pa_data, sizeof(*data));
473 memblock_reserve(pa_data, sizeof(*data) + data->len);
474 pa_data = data->next;
475 early_memunmap(data, sizeof(*data));
480 * --------- Crashkernel reservation ------------------------------
483 #ifdef CONFIG_KEXEC_CORE
485 /* 16M alignment for crash kernel regions */
486 #define CRASH_ALIGN (16 << 20)
489 * Keep the crash kernel below this limit. On 32 bits earlier kernels
490 * would limit the kernel to the low 512 MiB due to mapping restrictions.
491 * On 64bit, old kexec-tools need to under 896MiB.
494 # define CRASH_ADDR_LOW_MAX (512 << 20)
495 # define CRASH_ADDR_HIGH_MAX (512 << 20)
497 # define CRASH_ADDR_LOW_MAX (896UL << 20)
498 # define CRASH_ADDR_HIGH_MAX MAXMEM
501 static int __init reserve_crashkernel_low(void)
504 unsigned long long base, low_base = 0, low_size = 0;
505 unsigned long total_low_mem;
508 total_low_mem = memblock_mem_size(1UL << (32 - PAGE_SHIFT));
510 /* crashkernel=Y,low */
511 ret = parse_crashkernel_low(boot_command_line, total_low_mem, &low_size, &base);
514 * two parts from lib/swiotlb.c:
515 * -swiotlb size: user-specified with swiotlb= or default.
517 * -swiotlb overflow buffer: now hardcoded to 32k. We round it
518 * to 8M for other buffers that may need to stay low too. Also
519 * make sure we allocate enough extra low memory so that we
520 * don't run out of DMA buffers for 32-bit devices.
522 low_size = max(swiotlb_size_or_default() + (8UL << 20), 256UL << 20);
524 /* passed with crashkernel=0,low ? */
529 low_base = memblock_find_in_range(low_size, 1ULL << 32, low_size, CRASH_ALIGN);
531 pr_err("Cannot reserve %ldMB crashkernel low memory, please try smaller size.\n",
532 (unsigned long)(low_size >> 20));
536 ret = memblock_reserve(low_base, low_size);
538 pr_err("%s: Error reserving crashkernel low memblock.\n", __func__);
542 pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (System low RAM: %ldMB)\n",
543 (unsigned long)(low_size >> 20),
544 (unsigned long)(low_base >> 20),
545 (unsigned long)(total_low_mem >> 20));
547 crashk_low_res.start = low_base;
548 crashk_low_res.end = low_base + low_size - 1;
549 insert_resource(&iomem_resource, &crashk_low_res);
554 static void __init reserve_crashkernel(void)
556 unsigned long long crash_size, crash_base, total_mem;
560 total_mem = memblock_phys_mem_size();
563 ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base);
564 if (ret != 0 || crash_size <= 0) {
565 /* crashkernel=X,high */
566 ret = parse_crashkernel_high(boot_command_line, total_mem,
567 &crash_size, &crash_base);
568 if (ret != 0 || crash_size <= 0)
573 /* 0 means: find the address automatically */
574 if (crash_base <= 0) {
576 * kexec want bzImage is below CRASH_KERNEL_ADDR_MAX
578 crash_base = memblock_find_in_range(CRASH_ALIGN,
579 high ? CRASH_ADDR_HIGH_MAX
580 : CRASH_ADDR_LOW_MAX,
581 crash_size, CRASH_ALIGN);
583 pr_info("crashkernel reservation failed - No suitable area found.\n");
588 unsigned long long start;
590 start = memblock_find_in_range(crash_base,
591 crash_base + crash_size,
592 crash_size, 1 << 20);
593 if (start != crash_base) {
594 pr_info("crashkernel reservation failed - memory is in use.\n");
598 ret = memblock_reserve(crash_base, crash_size);
600 pr_err("%s: Error reserving crashkernel memblock.\n", __func__);
604 if (crash_base >= (1ULL << 32) && reserve_crashkernel_low()) {
605 memblock_free(crash_base, crash_size);
609 pr_info("Reserving %ldMB of memory at %ldMB for crashkernel (System RAM: %ldMB)\n",
610 (unsigned long)(crash_size >> 20),
611 (unsigned long)(crash_base >> 20),
612 (unsigned long)(total_mem >> 20));
614 crashk_res.start = crash_base;
615 crashk_res.end = crash_base + crash_size - 1;
616 insert_resource(&iomem_resource, &crashk_res);
619 static void __init reserve_crashkernel(void)
624 static struct resource standard_io_resources[] = {
625 { .name = "dma1", .start = 0x00, .end = 0x1f,
626 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
627 { .name = "pic1", .start = 0x20, .end = 0x21,
628 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
629 { .name = "timer0", .start = 0x40, .end = 0x43,
630 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
631 { .name = "timer1", .start = 0x50, .end = 0x53,
632 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
633 { .name = "keyboard", .start = 0x60, .end = 0x60,
634 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
635 { .name = "keyboard", .start = 0x64, .end = 0x64,
636 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
637 { .name = "dma page reg", .start = 0x80, .end = 0x8f,
638 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
639 { .name = "pic2", .start = 0xa0, .end = 0xa1,
640 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
641 { .name = "dma2", .start = 0xc0, .end = 0xdf,
642 .flags = IORESOURCE_BUSY | IORESOURCE_IO },
643 { .name = "fpu", .start = 0xf0, .end = 0xff,
644 .flags = IORESOURCE_BUSY | IORESOURCE_IO }
647 void __init reserve_standard_io_resources(void)
651 /* request I/O space for devices used on all i[345]86 PCs */
652 for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
653 request_resource(&ioport_resource, &standard_io_resources[i]);
657 static __init void reserve_ibft_region(void)
659 unsigned long addr, size = 0;
661 addr = find_ibft_region(&size);
664 memblock_reserve(addr, size);
667 static bool __init snb_gfx_workaround_needed(void)
672 static const __initconst u16 snb_ids[] = {
682 /* Assume no if something weird is going on with PCI */
683 if (!early_pci_allowed())
686 vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
687 if (vendor != 0x8086)
690 devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
691 for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
692 if (devid == snb_ids[i])
700 * Sandy Bridge graphics has trouble with certain ranges, exclude
701 * them from allocation.
703 static void __init trim_snb_memory(void)
705 static const __initconst unsigned long bad_pages[] = {
714 if (!snb_gfx_workaround_needed())
717 printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
720 * Reserve all memory below the 1 MB mark that has not
721 * already been reserved.
723 memblock_reserve(0, 1<<20);
725 for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
726 if (memblock_reserve(bad_pages[i], PAGE_SIZE))
727 printk(KERN_WARNING "failed to reserve 0x%08lx\n",
733 * Here we put platform-specific memory range workarounds, i.e.
734 * memory known to be corrupt or otherwise in need to be reserved on
735 * specific platforms.
737 * If this gets used more widely it could use a real dispatch mechanism.
739 static void __init trim_platform_memory_ranges(void)
744 static void __init trim_bios_range(void)
747 * A special case is the first 4Kb of memory;
748 * This is a BIOS owned area, not kernel ram, but generally
749 * not listed as such in the E820 table.
751 * This typically reserves additional memory (64KiB by default)
752 * since some BIOSes are known to corrupt low memory. See the
753 * Kconfig help text for X86_RESERVE_LOW.
755 e820_update_range(0, PAGE_SIZE, E820_RAM, E820_RESERVED);
758 * special case: Some BIOSen report the PC BIOS
759 * area (640->1Mb) as ram even though it is not.
762 e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
764 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
767 /* called before trim_bios_range() to spare extra sanitize */
768 static void __init e820_add_kernel_range(void)
770 u64 start = __pa_symbol(_text);
771 u64 size = __pa_symbol(_end) - start;
774 * Complain if .text .data and .bss are not marked as E820_RAM and
775 * attempt to fix it by adding the range. We may have a confused BIOS,
776 * or the user may have used memmap=exactmap or memmap=xxM$yyM to
777 * exclude kernel range. If we really are running on top non-RAM,
778 * we will crash later anyways.
780 if (e820_all_mapped(start, start + size, E820_RAM))
783 pr_warn(".text .data .bss are not marked as E820_RAM!\n");
784 e820_remove_range(start, size, E820_RAM, 0);
785 e820_add_region(start, size, E820_RAM);
788 static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
790 static int __init parse_reservelow(char *p)
792 unsigned long long size;
797 size = memparse(p, &p);
810 early_param("reservelow", parse_reservelow);
812 static void __init trim_low_memory_range(void)
814 memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
818 * Dump out kernel offset information on panic.
821 dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
823 if (kaslr_enabled()) {
824 pr_emerg("Kernel Offset: 0x%lx from 0x%lx (relocation range: 0x%lx-0x%lx)\n",
830 pr_emerg("Kernel Offset: disabled\n");
837 * Determine if we were loaded by an EFI loader. If so, then we have also been
838 * passed the efi memmap, systab, etc., so we should use these data structures
839 * for initialization. Note, the efi init code path is determined by the
840 * global efi_enabled. This allows the same kernel image to be used on existing
841 * systems (with a traditional BIOS) as well as on EFI systems.
844 * setup_arch - architecture-specific boot-time initializations
846 * Note: On x86_64, fixmaps are ready for use even before this is called.
849 void __init setup_arch(char **cmdline_p)
851 memblock_reserve(__pa_symbol(_text),
852 (unsigned long)__bss_stop - (unsigned long)_text);
855 * Make sure page 0 is always reserved because on systems with
856 * L1TF its contents can be leaked to user processes.
858 memblock_reserve(0, PAGE_SIZE);
860 early_reserve_initrd();
863 * At this point everything still needed from the boot loader
864 * or BIOS or kernel text should be early reserved or marked not
865 * RAM in e820. All other memory is free game.
869 memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
872 * copy kernel address range established so far and switch
873 * to the proper swapper page table
875 clone_pgd_range(swapper_pg_dir + KERNEL_PGD_BOUNDARY,
876 initial_page_table + KERNEL_PGD_BOUNDARY,
879 load_cr3(swapper_pg_dir);
881 * Note: Quark X1000 CPUs advertise PGE incorrectly and require
882 * a cr3 based tlb flush, so the following __flush_tlb_all()
883 * will not flush anything because the cpu quirk which clears
884 * X86_FEATURE_PGE has not been invoked yet. Though due to the
885 * load_cr3() above the TLB has been flushed already. The
886 * quirk is invoked before subsequent calls to __flush_tlb_all()
887 * so proper operation is guaranteed.
891 printk(KERN_INFO "Command line: %s\n", boot_command_line);
895 * If we have OLPC OFW, we might end up relocating the fixmap due to
896 * reserve_top(), so do this before touching the ioremap area.
902 early_ioremap_init();
904 setup_olpc_ofw_pgd();
906 ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
907 screen_info = boot_params.screen_info;
908 edid_info = boot_params.edid_info;
910 apm_info.bios = boot_params.apm_bios_info;
911 ist_info = boot_params.ist_info;
913 saved_video_mode = boot_params.hdr.vid_mode;
914 bootloader_type = boot_params.hdr.type_of_loader;
915 if ((bootloader_type >> 4) == 0xe) {
916 bootloader_type &= 0xf;
917 bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4;
919 bootloader_version = bootloader_type & 0xf;
920 bootloader_version |= boot_params.hdr.ext_loader_ver << 4;
922 #ifdef CONFIG_BLK_DEV_RAM
923 rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
924 rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
925 rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
928 if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
929 EFI32_LOADER_SIGNATURE, 4)) {
930 set_bit(EFI_BOOT, &efi.flags);
931 } else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
932 EFI64_LOADER_SIGNATURE, 4)) {
933 set_bit(EFI_BOOT, &efi.flags);
934 set_bit(EFI_64BIT, &efi.flags);
937 if (efi_enabled(EFI_BOOT))
938 efi_memblock_x86_reserve_range();
941 x86_init.oem.arch_setup();
943 iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
949 if (!boot_params.hdr.root_flags)
950 root_mountflags &= ~MS_RDONLY;
951 init_mm.start_code = (unsigned long) _text;
952 init_mm.end_code = (unsigned long) _etext;
953 init_mm.end_data = (unsigned long) _edata;
954 init_mm.brk = _brk_end;
956 mpx_mm_init(&init_mm);
958 code_resource.start = __pa_symbol(_text);
959 code_resource.end = __pa_symbol(_etext)-1;
960 data_resource.start = __pa_symbol(_etext);
961 data_resource.end = __pa_symbol(_edata)-1;
962 bss_resource.start = __pa_symbol(__bss_start);
963 bss_resource.end = __pa_symbol(__bss_stop)-1;
965 #ifdef CONFIG_CMDLINE_BOOL
966 #ifdef CONFIG_CMDLINE_OVERRIDE
967 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
969 if (builtin_cmdline[0]) {
970 /* append boot loader cmdline to builtin */
971 strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
972 strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
973 strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
978 strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
979 *cmdline_p = command_line;
982 * x86_configure_nx() is called before parse_early_param() to detect
983 * whether hardware doesn't support NX (so that the early EHCI debug
984 * console setup can safely call set_fixmap()). It may then be called
985 * again from within noexec_setup() during parsing early parameters
986 * to honor the respective command line option.
994 /* after early param, so could get panic from serial */
995 memblock_x86_reserve_range_setup_data();
997 if (acpi_mps_check()) {
998 #ifdef CONFIG_X86_LOCAL_APIC
1001 setup_clear_cpu_cap(X86_FEATURE_APIC);
1005 if (pci_early_dump_regs)
1006 early_dump_pci_devices();
1009 /* update the e820_saved too */
1010 e820_reserve_setup_data();
1011 finish_e820_parsing();
1013 if (efi_enabled(EFI_BOOT))
1018 dmi_set_dump_stack_arch_desc();
1021 * VMware detection requires dmi to be available, so this
1022 * needs to be done after dmi_scan_machine, for the BP.
1024 init_hypervisor_platform();
1027 * This needs to happen right after XENPV is set on xen and
1028 * kaiser_enabled is checked below in cleanup_highmap().
1030 kaiser_check_boottime_disable();
1032 x86_init.resources.probe_roms();
1034 /* after parse_early_param, so could debug it */
1035 insert_resource(&iomem_resource, &code_resource);
1036 insert_resource(&iomem_resource, &data_resource);
1037 insert_resource(&iomem_resource, &bss_resource);
1039 e820_add_kernel_range();
1041 #ifdef CONFIG_X86_32
1042 if (ppro_with_ram_bug()) {
1043 e820_update_range(0x70000000ULL, 0x40000ULL, E820_RAM,
1045 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
1046 printk(KERN_INFO "fixed physical RAM map:\n");
1047 e820_print_map("bad_ppro");
1050 early_gart_iommu_check();
1054 * partially used pages are not usable - thus
1055 * we are rounding upwards:
1057 max_pfn = e820_end_of_ram_pfn();
1059 /* update e820 for memory not covered by WB MTRRs */
1061 if (mtrr_trim_uncached_memory(max_pfn))
1062 max_pfn = e820_end_of_ram_pfn();
1065 * This call is required when the CPU does not support PAT. If
1066 * mtrr_bp_init() invoked it already via pat_init() the call has no
1071 #ifdef CONFIG_X86_32
1072 /* max_low_pfn get updated here */
1073 find_low_pfn_range();
1077 /* How many end-of-memory variables you have, grandma! */
1078 /* need this before calling reserve_initrd */
1079 if (max_pfn > (1UL<<(32 - PAGE_SHIFT)))
1080 max_low_pfn = e820_end_of_low_ram_pfn();
1082 max_low_pfn = max_pfn;
1084 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
1088 * Find and reserve possible boot-time SMP configuration:
1092 reserve_ibft_region();
1094 early_alloc_pgt_buf();
1097 * Need to conclude brk, before memblock_x86_fill()
1098 * it could use memblock_find_in_range, could overlap with
1105 memblock_set_current_limit(ISA_END_ADDRESS);
1106 memblock_x86_fill();
1108 if (efi_enabled(EFI_BOOT)) {
1114 * The EFI specification says that boot service code won't be called
1115 * after ExitBootServices(). This is, in fact, a lie.
1117 if (efi_enabled(EFI_MEMMAP))
1118 efi_reserve_boot_services();
1120 /* preallocate 4k for mptable mpc */
1121 early_reserve_e820_mpc_new();
1123 #ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
1124 setup_bios_corruption_check();
1127 #ifdef CONFIG_X86_32
1128 printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
1129 (max_pfn_mapped<<PAGE_SHIFT) - 1);
1132 reserve_real_mode();
1134 trim_platform_memory_ranges();
1135 trim_low_memory_range();
1139 early_trap_pf_init();
1143 memblock_set_current_limit(get_max_mapped());
1146 * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
1149 #ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
1150 if (init_ohci1394_dma_early)
1151 init_ohci1394_dma_on_all_controllers();
1153 /* Allocate bigger log buffer */
1158 #if defined(CONFIG_ACPI) && defined(CONFIG_BLK_DEV_INITRD)
1159 acpi_initrd_override((void *)initrd_start, initrd_end - initrd_start);
1167 * Parse the ACPI tables for possible boot-time SMP configuration.
1169 acpi_boot_table_init();
1171 early_acpi_boot_init();
1174 dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
1177 * Reserve memory for crash kernel after SRAT is parsed so that it
1178 * won't consume hotpluggable memory.
1180 reserve_crashkernel();
1182 memblock_find_dma_reserve();
1184 #ifdef CONFIG_KVM_GUEST
1188 x86_init.paging.pagetable_init();
1192 if (boot_cpu_data.cpuid_level >= 0) {
1193 /* A CPU has %cr4 if and only if it has CPUID */
1194 mmu_cr4_features = __read_cr4();
1195 if (trampoline_cr4_features)
1196 *trampoline_cr4_features = mmu_cr4_features;
1199 #ifdef CONFIG_X86_32
1200 /* sync back kernel address range */
1201 clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
1202 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
1206 * sync back low identity map too. It is used for example
1207 * in the 32-bit EFI stub.
1209 clone_pgd_range(initial_page_table,
1210 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
1211 min(KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
1218 generic_apic_probe();
1223 * Read APIC and some other early information from ACPI tables.
1230 * get boot-time SMP configuration:
1232 if (smp_found_config)
1235 prefill_possible_map();
1239 init_apic_mappings();
1240 io_apic_init_mappings();
1244 e820_reserve_resources();
1245 e820_mark_nosave_regions(max_low_pfn);
1247 x86_init.resources.reserve_resources();
1252 #if defined(CONFIG_VGA_CONSOLE)
1253 if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
1254 conswitchp = &vga_con;
1255 #elif defined(CONFIG_DUMMY_CONSOLE)
1256 conswitchp = &dummy_con;
1259 x86_init.oem.banner();
1261 x86_init.timers.wallclock_init();
1265 arch_init_ideal_nops();
1267 register_refined_jiffies(CLOCK_TICK_RATE);
1270 if (efi_enabled(EFI_BOOT))
1271 efi_apply_memmap_quirks();
1275 #ifdef CONFIG_X86_32
1277 static struct resource video_ram_resource = {
1278 .name = "Video RAM area",
1281 .flags = IORESOURCE_BUSY | IORESOURCE_MEM
1284 void __init i386_reserve_resources(void)
1286 request_resource(&iomem_resource, &video_ram_resource);
1287 reserve_standard_io_resources();
1290 #endif /* CONFIG_X86_32 */
1292 static struct notifier_block kernel_offset_notifier = {
1293 .notifier_call = dump_kernel_offset
1296 static int __init register_kernel_offset_dumper(void)
1298 atomic_notifier_chain_register(&panic_notifier_list,
1299 &kernel_offset_notifier);
1302 __initcall(register_kernel_offset_dumper);