2 * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
4 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
5 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
6 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
7 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
8 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
12 #include <linux/linkage.h>
13 #include <linux/threads.h>
14 #include <linux/init.h>
15 #include <asm/segment.h>
16 #include <asm/pgtable.h>
19 #include <asm/cache.h>
20 #include <asm/processor-flags.h>
21 #include <asm/percpu.h>
23 #include "../entry/calling.h"
24 #include <asm/export.h>
25 #include <asm/nospec-branch.h>
27 #ifdef CONFIG_PARAVIRT
28 #include <asm/asm-offsets.h>
29 #include <asm/paravirt.h>
30 #define GET_CR2_INTO(reg) GET_CR2_INTO_RAX ; movq %rax, reg
32 #define GET_CR2_INTO(reg) movq %cr2, reg
33 #define INTERRUPT_RETURN iretq
36 /* we are not able to switch in one step to the final KERNEL ADDRESS SPACE
37 * because we need identity-mapped pages.
41 #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
43 L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET_BASE)
44 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
45 L3_START_KERNEL = pud_index(__START_KERNEL_map)
53 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
54 * and someone has loaded an identity mapped page table
55 * for us. These identity mapped page tables map all of the
56 * kernel pages and possibly all of memory.
58 * %rsi holds a physical pointer to real_mode_data.
60 * We come here either directly from a 64bit bootloader, or from
61 * arch/x86/boot/compressed/head_64.S.
63 * We only come here initially at boot nothing else comes here.
65 * Since we may be loaded at an address different from what we were
66 * compiled to run at we first fixup the physical addresses in our page
67 * tables and then reload them.
71 * Setup stack for verify_cpu(). "-8" because initial_stack is defined
72 * this way, see below. Our best guess is a NULL ptr for stack
73 * termination heuristics and we don't want to break anything which
74 * might depend on it (kgdb, ...).
76 leaq (__end_init_task - 8)(%rip), %rsp
78 /* Sanitize CPU configuration */
82 * Compute the delta between the address I am compiled to run at and the
83 * address I am actually running at.
85 leaq _text(%rip), %rbp
86 subq $_text - __START_KERNEL_map, %rbp
88 /* Is the address not 2M aligned? */
89 testl $~PMD_PAGE_MASK, %ebp
93 * Is the address too large?
95 leaq _text(%rip), %rax
96 shrq $MAX_PHYSMEM_BITS, %rax
100 * Fixup the physical addresses in the page table
102 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
104 addq %rbp, level3_kernel_pgt + (510*8)(%rip)
105 addq %rbp, level3_kernel_pgt + (511*8)(%rip)
107 addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
110 * Set up the identity mapping for the switchover. These
111 * entries should *NOT* have the global bit set! This also
112 * creates a bunch of nonsense entries but that is fine --
113 * it avoids problems around wraparound.
115 leaq _text(%rip), %rdi
116 leaq early_level4_pgt(%rip), %rbx
119 shrq $PGDIR_SHIFT, %rax
121 leaq (4096 + _KERNPG_TABLE)(%rbx), %rdx
122 movq %rdx, 0(%rbx,%rax,8)
123 movq %rdx, 8(%rbx,%rax,8)
127 shrq $PUD_SHIFT, %rax
128 andl $(PTRS_PER_PUD-1), %eax
129 movq %rdx, 4096(%rbx,%rax,8)
131 andl $(PTRS_PER_PUD-1), %eax
132 movq %rdx, 4096(%rbx,%rax,8)
136 shrq $PMD_SHIFT, %rdi
137 addq $(__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL), %rax
138 leaq (_end - 1)(%rip), %rcx
139 shrq $PMD_SHIFT, %rcx
144 andq $(PTRS_PER_PMD - 1), %rdi
145 movq %rax, (%rbx,%rdi,8)
152 * Fixup the kernel text+data virtual addresses. Note that
153 * we might write invalid pmds, when the kernel is relocated
154 * cleanup_highmap() fixes this up along with the mappings
157 leaq level2_kernel_pgt(%rip), %rdi
159 /* See if it is a valid page table entry */
163 /* Go to the next page */
168 /* Fixup phys_base */
169 addq %rbp, phys_base(%rip)
171 movq $(early_level4_pgt - __START_KERNEL_map), %rax
173 ENTRY(secondary_startup_64)
175 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
176 * and someone has loaded a mapped page table.
178 * %rsi holds a physical pointer to real_mode_data.
180 * We come here either from startup_64 (using physical addresses)
181 * or from trampoline.S (using virtual addresses).
183 * Using virtual addresses from trampoline.S removes the need
184 * to have any identity mapped pages in the kernel page table
185 * after the boot processor executes this code.
188 /* Sanitize CPU configuration */
191 movq $(init_level4_pgt - __START_KERNEL_map), %rax
194 /* Enable PAE and PSE, but defer PGE until kaiser_enabled is decided */
195 movl $(X86_CR4_PAE | X86_CR4_PSE), %ecx
198 /* Setup early boot stage 4 level pagetables. */
199 addq phys_base(%rip), %rax
202 /* Ensure I am executing from virtual addresses */
204 ANNOTATE_RETPOLINE_SAFE
208 /* Check if nx is implemented */
209 movl $0x80000001, %eax
213 /* Setup EFER (Extended Feature Enable Register) */
216 btsl $_EFER_SCE, %eax /* Enable System Call */
217 btl $20,%edi /* No Execute supported? */
220 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
221 1: wrmsr /* Make changes effective */
224 #define CR0_STATE (X86_CR0_PE | X86_CR0_MP | X86_CR0_ET | \
225 X86_CR0_NE | X86_CR0_WP | X86_CR0_AM | \
227 movl $CR0_STATE, %eax
228 /* Make changes effective */
231 /* Setup a boot time stack */
232 movq initial_stack(%rip), %rsp
234 /* zero EFLAGS after setting rsp */
239 * We must switch to a new descriptor in kernel space for the GDT
240 * because soon the kernel won't have access anymore to the userspace
241 * addresses where we're currently running on. We have to do that here
242 * because in 32bit we couldn't load a 64bit linear address.
244 lgdt early_gdt_descr(%rip)
246 /* set up data segments */
253 * We don't really need to load %fs or %gs, but load them anyway
254 * to kill any stale realmode selectors. This allows execution
262 * The base of %gs always points to the bottom of the irqstack
263 * union. If the stack protector canary is enabled, it is
264 * located at %gs:40. Note that, on SMP, the boot cpu uses
265 * init data section till per cpu areas are set up.
267 movl $MSR_GS_BASE,%ecx
268 movl initial_gs(%rip),%eax
269 movl initial_gs+4(%rip),%edx
272 /* rsi is pointer to real mode structure with interesting info.
276 /* Finally jump to run C code and to be on real kernel address
277 * Since we are running on identity-mapped space we have to jump
278 * to the full 64bit address, this is only possible as indirect
279 * jump. In addition we need to ensure %cs is set so we make this
282 * Note: do not change to far jump indirect with 64bit offset.
284 * AMD does not support far jump indirect with 64bit offset.
285 * AMD64 Architecture Programmer's Manual, Volume 3: states only
286 * JMP FAR mem16:16 FF /5 Far jump indirect,
287 * with the target specified by a far pointer in memory.
288 * JMP FAR mem16:32 FF /5 Far jump indirect,
289 * with the target specified by a far pointer in memory.
291 * Intel64 does support 64bit offset.
292 * Software Developer Manual Vol 2: states:
293 * FF /5 JMP m16:16 Jump far, absolute indirect,
294 * address given in m16:16
295 * FF /5 JMP m16:32 Jump far, absolute indirect,
296 * address given in m16:32.
297 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
298 * address given in m16:64.
300 movq initial_code(%rip),%rax
301 pushq $0 # fake return address to stop unwinder
302 pushq $__KERNEL_CS # set correct cs
303 pushq %rax # target address in negative space
305 ENDPROC(secondary_startup_64)
307 #include "verify_cpu.S"
309 #ifdef CONFIG_HOTPLUG_CPU
311 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
312 * up already except stack. We just set up stack here. Then call
316 movq initial_stack(%rip),%rsp
317 movq initial_code(%rip),%rax
318 pushq $0 # fake return address to stop unwinder
319 pushq $__KERNEL_CS # set correct cs
320 pushq %rax # target address in negative space
325 /* Both SMP bootup and ACPI suspend change these variables */
329 .quad x86_64_start_kernel
331 .quad INIT_PER_CPU_VAR(irq_stack_union)
332 GLOBAL(initial_stack)
333 .quad init_thread_union+THREAD_SIZE-8
340 ENTRY(early_idt_handler_array)
344 # 80(%rsp) error code
346 .rept NUM_EXCEPTION_VECTORS
347 .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
348 pushq $0 # Dummy error code, to make stack frame uniform
350 pushq $i # 72(%rsp) Vector number
351 jmp early_idt_handler_common
353 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
355 ENDPROC(early_idt_handler_array)
357 early_idt_handler_common:
359 * The stack is the hardware frame, an error code or zero, and the
364 incl early_recursion_flag(%rip)
366 /* The vector number is currently in the pt_regs->di slot. */
367 pushq %rsi /* pt_regs->si */
368 movq 8(%rsp), %rsi /* RSI = vector number */
369 movq %rdi, 8(%rsp) /* pt_regs->di = RDI */
370 pushq %rdx /* pt_regs->dx */
371 pushq %rcx /* pt_regs->cx */
372 pushq %rax /* pt_regs->ax */
373 pushq %r8 /* pt_regs->r8 */
374 pushq %r9 /* pt_regs->r9 */
375 pushq %r10 /* pt_regs->r10 */
376 pushq %r11 /* pt_regs->r11 */
377 pushq %rbx /* pt_regs->bx */
378 pushq %rbp /* pt_regs->bp */
379 pushq %r12 /* pt_regs->r12 */
380 pushq %r13 /* pt_regs->r13 */
381 pushq %r14 /* pt_regs->r14 */
382 pushq %r15 /* pt_regs->r15 */
384 cmpq $14,%rsi /* Page fault? */
386 GET_CR2_INTO(%rdi) /* Can clobber any volatile register if pv */
387 call early_make_pgtable
389 jz 20f /* All good */
392 movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */
393 call early_fixup_exception
396 decl early_recursion_flag(%rip)
397 jmp restore_regs_and_iret
398 ENDPROC(early_idt_handler_common)
403 GLOBAL(early_recursion_flag)
406 #define NEXT_PAGE(name) \
410 #ifdef CONFIG_PAGE_TABLE_ISOLATION
412 * Each PGD needs to be 8k long and 8k aligned. We do not
413 * ever go out to userspace with these, so we do not
414 * strictly *need* the second page, but this allows us to
415 * have a single set_pgd() implementation that does not
416 * need to worry about whether it has 4k or 8k to work
419 * This ensures PGDs are 8k long:
421 #define KAISER_USER_PGD_FILL 512
422 /* This ensures they are 8k-aligned: */
423 #define NEXT_PGD_PAGE(name) \
424 .balign 2 * PAGE_SIZE; \
427 #define NEXT_PGD_PAGE(name) NEXT_PAGE(name)
428 #define KAISER_USER_PGD_FILL 0
431 /* Automate the creation of 1 to 1 mapping pmd entries */
432 #define PMDS(START, PERM, COUNT) \
435 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \
440 NEXT_PGD_PAGE(early_level4_pgt)
442 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
443 .fill KAISER_USER_PGD_FILL,8,0
445 NEXT_PAGE(early_dynamic_pgts)
446 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
451 NEXT_PGD_PAGE(init_level4_pgt)
453 .fill KAISER_USER_PGD_FILL,8,0
455 NEXT_PGD_PAGE(init_level4_pgt)
456 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
457 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
458 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
459 .org init_level4_pgt + L4_START_KERNEL*8, 0
460 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
461 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
462 .fill KAISER_USER_PGD_FILL,8,0
464 NEXT_PAGE(level3_ident_pgt)
465 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
467 NEXT_PAGE(level2_ident_pgt)
468 /* Since I easily can, map the first 1G.
469 * Don't set NX because code runs from these pages.
471 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
473 .fill KAISER_USER_PGD_FILL,8,0
475 NEXT_PAGE(level3_kernel_pgt)
476 .fill L3_START_KERNEL,8,0
477 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
478 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
479 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
481 NEXT_PAGE(level2_kernel_pgt)
483 * 512 MB kernel mapping. We spend a full page on this pagetable
486 * The kernel code+data+bss must not be bigger than that.
488 * (NOTE: at +512MB starts the module area, see MODULES_VADDR.
489 * If you want to increase this then increase MODULES_VADDR
492 PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
493 KERNEL_IMAGE_SIZE/PMD_SIZE)
495 NEXT_PAGE(level2_fixmap_pgt)
497 .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
498 /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
501 NEXT_PAGE(level1_fixmap_pgt)
508 .globl early_gdt_descr
510 .word GDT_ENTRIES*8-1
511 early_gdt_descr_base:
512 .quad INIT_PER_CPU_VAR(gdt_page)
515 /* This must match the first entry in level2_kernel_pgt */
516 .quad 0x0000000000000000
517 EXPORT_SYMBOL(phys_base)
519 #include "../../x86/xen/xen-head.S"
522 NEXT_PAGE(empty_zero_page)
524 EXPORT_SYMBOL(empty_zero_page)