1 /* SPDX-License-Identifier: GPL-2.0 */
3 * linux/arch/x86/kernel/head_64.S -- start in 32bit and switch to 64bit
5 * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
6 * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
7 * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
8 * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
9 * Copyright (C) 2005 Eric Biederman <ebiederm@xmission.com>
13 #include <linux/linkage.h>
14 #include <linux/threads.h>
15 #include <linux/init.h>
16 #include <linux/pgtable.h>
17 #include <asm/segment.h>
20 #include <asm/cache.h>
21 #include <asm/processor-flags.h>
22 #include <asm/percpu.h>
24 #include "../entry/calling.h"
25 #include <asm/export.h>
26 #include <asm/nospec-branch.h>
27 #include <asm/fixmap.h>
30 * We are not able to switch in one step to the final KERNEL ADDRESS SPACE
31 * because we need identity-mapped pages.
33 #define l4_index(x) (((x) >> 39) & 511)
34 #define pud_index(x) (((x) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
36 L4_PAGE_OFFSET = l4_index(__PAGE_OFFSET_BASE_L4)
37 L4_START_KERNEL = l4_index(__START_KERNEL_map)
39 L3_START_KERNEL = pud_index(__START_KERNEL_map)
44 SYM_CODE_START_NOALIGN(startup_64)
47 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
48 * and someone has loaded an identity mapped page table
49 * for us. These identity mapped page tables map all of the
50 * kernel pages and possibly all of memory.
52 * %rsi holds a physical pointer to real_mode_data.
54 * We come here either directly from a 64bit bootloader, or from
55 * arch/x86/boot/compressed/head_64.S.
57 * We only come here initially at boot nothing else comes here.
59 * Since we may be loaded at an address different from what we were
60 * compiled to run at we first fixup the physical addresses in our page
61 * tables and then reload them.
64 /* Set up the stack for verify_cpu(), similar to initial_stack below */
65 leaq (__end_init_task - FRAME_SIZE)(%rip), %rsp
67 leaq _text(%rip), %rdi
70 * initial_gs points to initial fixed_percpu_data struct with storage for
71 * the stack protector canary. Global pointer fixups are needed at this
72 * stage, so apply them as is done in fixup_pointer(), and initialize %gs
73 * such that the canary can be accessed at %gs:40 for subsequent C calls.
75 movl $MSR_GS_BASE, %ecx
76 movq initial_gs(%rip), %rax
85 call startup_64_setup_env
88 #ifdef CONFIG_AMD_MEM_ENCRYPT
90 * Activate SEV/SME memory encryption if supported/enabled. This needs to
91 * be done now, since this also includes setup of the SEV-SNP CPUID table,
92 * which needs to be done before any CPUID instructions are executed in
101 /* Now switch to __KERNEL_CS so IRET works reliably */
103 leaq .Lon_kernel_cs(%rip), %rax
110 /* Sanitize CPU configuration */
114 * Perform pagetable fixups. Additionally, if SME is active, encrypt
115 * the kernel and retrieve the modifier (SME encryption mask if SME
116 * is active) to be added to the initial pgdir entry that will be
117 * programmed into CR3.
119 leaq _text(%rip), %rdi
124 /* Form the CR3 value being sure to include the CR3 modifier */
125 addq $(early_top_pgt - __START_KERNEL_map), %rax
127 SYM_CODE_END(startup_64)
129 SYM_CODE_START(secondary_startup_64)
133 * At this point the CPU runs in 64bit mode CS.L = 1 CS.D = 0,
134 * and someone has loaded a mapped page table.
136 * %rsi holds a physical pointer to real_mode_data.
138 * We come here either from startup_64 (using physical addresses)
139 * or from trampoline.S (using virtual addresses).
141 * Using virtual addresses from trampoline.S removes the need
142 * to have any identity mapped pages in the kernel page table
143 * after the boot processor executes this code.
146 /* Sanitize CPU configuration */
150 * The secondary_startup_64_no_verify entry point is only used by
151 * SEV-ES guests. In those guests the call to verify_cpu() would cause
152 * #VC exceptions which can not be handled at this stage of secondary
155 * All non SEV-ES systems, especially Intel systems, need to execute
156 * verify_cpu() above to make sure NX is enabled.
158 SYM_INNER_LABEL(secondary_startup_64_no_verify, SYM_L_GLOBAL)
163 * Retrieve the modifier (SME encryption mask if SME is active) to be
164 * added to the initial pgdir entry that will be programmed into CR3.
166 #ifdef CONFIG_AMD_MEM_ENCRYPT
167 movq sme_me_mask, %rax
172 /* Form the CR3 value being sure to include the CR3 modifier */
173 addq $(init_top_pgt - __START_KERNEL_map), %rax
176 #ifdef CONFIG_X86_MCE
178 * Preserve CR4.MCE if the kernel will enable #MC support.
179 * Clearing MCE may fault in some environments (that also force #MC
180 * support). Any machine check that occurs before #MC support is fully
181 * configured will crash the system regardless of the CR4.MCE value set
185 andl $X86_CR4_MCE, %ecx
190 /* Enable PAE mode, PGE and LA57 */
191 orl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
192 #ifdef CONFIG_X86_5LEVEL
193 testl $1, __pgtable_l5_enabled(%rip)
195 orl $X86_CR4_LA57, %ecx
200 /* Setup early boot stage 4-/5-level pagetables. */
201 addq phys_base(%rip), %rax
204 * For SEV guests: Verify that the C-bit is correct. A malicious
205 * hypervisor could lie about the C-bit position to perform a ROP
206 * attack on the guest by writing to the unencrypted stack and wait for
207 * the next RET instruction.
208 * %rsi carries pointer to realmode data and is callee-clobbered. Save
217 * Switch to new page-table
219 * For the boot CPU this switches to early_top_pgt which still has the
220 * indentity mappings present. The secondary CPUs will switch to the
221 * init_top_pgt here, away from the trampoline_pgd and unmap the
222 * indentity mapped ranges.
227 * Do a global TLB flush after the CR3 switch to make sure the TLB
228 * entries from the identity mapping are flushed.
232 xorq $X86_CR4_PGE, %rcx
236 /* Ensure I am executing from virtual addresses */
238 ANNOTATE_RETPOLINE_SAFE
242 ANNOTATE_NOENDBR // above
245 * We must switch to a new descriptor in kernel space for the GDT
246 * because soon the kernel won't have access anymore to the userspace
247 * addresses where we're currently running on. We have to do that here
248 * because in 32bit we couldn't load a 64bit linear address.
250 lgdt early_gdt_descr(%rip)
252 /* set up data segments */
259 * We don't really need to load %fs or %gs, but load them anyway
260 * to kill any stale realmode selectors. This allows execution
268 * The base of %gs always points to fixed_percpu_data. If the
269 * stack protector canary is enabled, it is located at %gs:40.
270 * Note that, on SMP, the boot cpu uses init data section until
271 * the per cpu areas are set up.
273 movl $MSR_GS_BASE,%ecx
274 movl initial_gs(%rip),%eax
275 movl initial_gs+4(%rip),%edx
279 * Setup a boot time stack - Any secondary CPU will have lost its stack
280 * by now because the cr3-switch above unmaps the real-mode stack
282 movq initial_stack(%rip), %rsp
284 /* Setup and Load IDT */
289 /* Check if nx is implemented */
290 movl $0x80000001, %eax
294 /* Setup EFER (Extended Feature Enable Register) */
298 * Preserve current value of EFER for comparison and to skip
299 * EFER writes if no change was made (for TDX guest)
302 btsl $_EFER_SCE, %eax /* Enable System Call */
303 btl $20,%edi /* No Execute supported? */
306 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
308 /* Avoid writing EFER if no change was made (for TDX guest) */
312 wrmsr /* Make changes effective */
315 movl $CR0_STATE, %eax
316 /* Make changes effective */
319 /* zero EFLAGS after setting rsp */
323 /* rsi is pointer to real mode structure with interesting info.
329 * Jump to run C code and to be on a real kernel address.
330 * Since we are running on identity-mapped space we have to jump
331 * to the full 64bit address, this is only possible as indirect
332 * jump. In addition we need to ensure %cs is set so we make this
335 * Note: do not change to far jump indirect with 64bit offset.
337 * AMD does not support far jump indirect with 64bit offset.
338 * AMD64 Architecture Programmer's Manual, Volume 3: states only
339 * JMP FAR mem16:16 FF /5 Far jump indirect,
340 * with the target specified by a far pointer in memory.
341 * JMP FAR mem16:32 FF /5 Far jump indirect,
342 * with the target specified by a far pointer in memory.
344 * Intel64 does support 64bit offset.
345 * Software Developer Manual Vol 2: states:
346 * FF /5 JMP m16:16 Jump far, absolute indirect,
347 * address given in m16:16
348 * FF /5 JMP m16:32 Jump far, absolute indirect,
349 * address given in m16:32.
350 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
351 * address given in m16:64.
353 pushq $.Lafter_lret # put return address on stack for unwinder
354 xorl %ebp, %ebp # clear frame pointer
355 movq initial_code(%rip), %rax
356 pushq $__KERNEL_CS # set correct cs
357 pushq %rax # target address in negative space
361 SYM_CODE_END(secondary_startup_64)
363 #include "verify_cpu.S"
364 #include "sev_verify_cbit.S"
366 #ifdef CONFIG_HOTPLUG_CPU
368 * Boot CPU0 entry point. It's called from play_dead(). Everything has been set
369 * up already except stack. We just set up stack here. Then call
370 * start_secondary() via .Ljump_to_C_code.
372 SYM_CODE_START(start_cpu0)
374 movq initial_stack(%rip), %rsp
376 SYM_CODE_END(start_cpu0)
379 #ifdef CONFIG_AMD_MEM_ENCRYPT
381 * VC Exception handler used during early boot when running on kernel
382 * addresses, but before the switch to the idt_table can be made.
383 * The early_idt_handler_array can't be used here because it calls into a lot
384 * of __init code and this handler is also used during CPU offlining/onlining.
385 * Therefore this handler ends up in the .text section so that it stays around
386 * when .init.text is freed.
388 SYM_CODE_START_NOALIGN(vc_boot_ghcb)
389 UNWIND_HINT_IRET_REGS offset=8
397 movq ORIG_RAX(%rsp), %rsi
398 movq initial_vc_handler(%rip), %rax
399 ANNOTATE_RETPOLINE_SAFE
405 /* Remove Error Code */
409 SYM_CODE_END(vc_boot_ghcb)
412 /* Both SMP bootup and ACPI suspend change these variables */
415 SYM_DATA(initial_code, .quad x86_64_start_kernel)
416 SYM_DATA(initial_gs, .quad INIT_PER_CPU_VAR(fixed_percpu_data))
417 #ifdef CONFIG_AMD_MEM_ENCRYPT
418 SYM_DATA(initial_vc_handler, .quad handle_vc_boot_ghcb)
422 * The FRAME_SIZE gap is a convention which helps the in-kernel unwinder
423 * reliably detect the end of the stack.
425 SYM_DATA(initial_stack, .quad init_thread_union + THREAD_SIZE - FRAME_SIZE)
429 SYM_CODE_START(early_idt_handler_array)
431 .rept NUM_EXCEPTION_VECTORS
432 .if ((EXCEPTION_ERRCODE_MASK >> i) & 1) == 0
433 UNWIND_HINT_IRET_REGS
435 pushq $0 # Dummy error code, to make stack frame uniform
437 UNWIND_HINT_IRET_REGS offset=8
440 pushq $i # 72(%rsp) Vector number
441 jmp early_idt_handler_common
442 UNWIND_HINT_IRET_REGS
444 .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
446 SYM_CODE_END(early_idt_handler_array)
447 ANNOTATE_NOENDBR // early_idt_handler_array[NUM_EXCEPTION_VECTORS]
449 SYM_CODE_START_LOCAL(early_idt_handler_common)
450 UNWIND_HINT_IRET_REGS offset=16
452 * The stack is the hardware frame, an error code or zero, and the
457 incl early_recursion_flag(%rip)
459 /* The vector number is currently in the pt_regs->di slot. */
460 pushq %rsi /* pt_regs->si */
461 movq 8(%rsp), %rsi /* RSI = vector number */
462 movq %rdi, 8(%rsp) /* pt_regs->di = RDI */
463 pushq %rdx /* pt_regs->dx */
464 pushq %rcx /* pt_regs->cx */
465 pushq %rax /* pt_regs->ax */
466 pushq %r8 /* pt_regs->r8 */
467 pushq %r9 /* pt_regs->r9 */
468 pushq %r10 /* pt_regs->r10 */
469 pushq %r11 /* pt_regs->r11 */
470 pushq %rbx /* pt_regs->bx */
471 pushq %rbp /* pt_regs->bp */
472 pushq %r12 /* pt_regs->r12 */
473 pushq %r13 /* pt_regs->r13 */
474 pushq %r14 /* pt_regs->r14 */
475 pushq %r15 /* pt_regs->r15 */
478 movq %rsp,%rdi /* RDI = pt_regs; RSI is already trapnr */
479 call do_early_exception
481 decl early_recursion_flag(%rip)
482 jmp restore_regs_and_return_to_kernel
483 SYM_CODE_END(early_idt_handler_common)
485 #ifdef CONFIG_AMD_MEM_ENCRYPT
487 * VC Exception handler used during very early boot. The
488 * early_idt_handler_array can't be used because it returns via the
489 * paravirtualized INTERRUPT_RETURN and pv-ops don't work that early.
491 * XXX it does, fix this.
493 * This handler will end up in the .init.text section and not be
494 * available to boot secondary CPUs.
496 SYM_CODE_START_NOALIGN(vc_no_ghcb)
497 UNWIND_HINT_IRET_REGS offset=8
505 movq ORIG_RAX(%rsp), %rsi
511 /* Remove Error Code */
514 /* Pure iret required here - don't use INTERRUPT_RETURN */
516 SYM_CODE_END(vc_no_ghcb)
519 #define SYM_DATA_START_PAGE_ALIGNED(name) \
520 SYM_START(name, SYM_L_GLOBAL, .balign PAGE_SIZE)
522 #ifdef CONFIG_PAGE_TABLE_ISOLATION
524 * Each PGD needs to be 8k long and 8k aligned. We do not
525 * ever go out to userspace with these, so we do not
526 * strictly *need* the second page, but this allows us to
527 * have a single set_pgd() implementation that does not
528 * need to worry about whether it has 4k or 8k to work
531 * This ensures PGDs are 8k long:
533 #define PTI_USER_PGD_FILL 512
534 /* This ensures they are 8k-aligned: */
535 #define SYM_DATA_START_PTI_ALIGNED(name) \
536 SYM_START(name, SYM_L_GLOBAL, .balign 2 * PAGE_SIZE)
538 #define SYM_DATA_START_PTI_ALIGNED(name) \
539 SYM_DATA_START_PAGE_ALIGNED(name)
540 #define PTI_USER_PGD_FILL 0
543 /* Automate the creation of 1 to 1 mapping pmd entries */
544 #define PMDS(START, PERM, COUNT) \
547 .quad (START) + (i << PMD_SHIFT) + (PERM) ; \
554 SYM_DATA_START_PTI_ALIGNED(early_top_pgt)
556 .fill PTI_USER_PGD_FILL,8,0
557 SYM_DATA_END(early_top_pgt)
559 SYM_DATA_START_PAGE_ALIGNED(early_dynamic_pgts)
560 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
561 SYM_DATA_END(early_dynamic_pgts)
563 SYM_DATA(early_recursion_flag, .long 0)
567 #if defined(CONFIG_XEN_PV) || defined(CONFIG_PVH)
568 SYM_DATA_START_PTI_ALIGNED(init_top_pgt)
569 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
570 .org init_top_pgt + L4_PAGE_OFFSET*8, 0
571 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
572 .org init_top_pgt + L4_START_KERNEL*8, 0
573 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
574 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
575 .fill PTI_USER_PGD_FILL,8,0
576 SYM_DATA_END(init_top_pgt)
578 SYM_DATA_START_PAGE_ALIGNED(level3_ident_pgt)
579 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
581 SYM_DATA_END(level3_ident_pgt)
582 SYM_DATA_START_PAGE_ALIGNED(level2_ident_pgt)
584 * Since I easily can, map the first 1G.
585 * Don't set NX because code runs from these pages.
587 * Note: This sets _PAGE_GLOBAL despite whether
588 * the CPU supports it or it is enabled. But,
589 * the CPU should ignore the bit.
591 PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
592 SYM_DATA_END(level2_ident_pgt)
594 SYM_DATA_START_PTI_ALIGNED(init_top_pgt)
596 .fill PTI_USER_PGD_FILL,8,0
597 SYM_DATA_END(init_top_pgt)
600 #ifdef CONFIG_X86_5LEVEL
601 SYM_DATA_START_PAGE_ALIGNED(level4_kernel_pgt)
603 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
604 SYM_DATA_END(level4_kernel_pgt)
607 SYM_DATA_START_PAGE_ALIGNED(level3_kernel_pgt)
608 .fill L3_START_KERNEL,8,0
609 /* (2^48-(2*1024*1024*1024)-((2^39)*511))/(2^30) = 510 */
610 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE_NOENC
611 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE_NOENC
612 SYM_DATA_END(level3_kernel_pgt)
614 SYM_DATA_START_PAGE_ALIGNED(level2_kernel_pgt)
616 * Kernel high mapping.
618 * The kernel code+data+bss must be located below KERNEL_IMAGE_SIZE in
619 * virtual address space, which is 1 GiB if RANDOMIZE_BASE is enabled,
622 * (NOTE: after that starts the module area, see MODULES_VADDR.)
624 * This table is eventually used by the kernel during normal runtime.
625 * Care must be taken to clear out undesired bits later, like _PAGE_RW
626 * or _PAGE_GLOBAL in some cases.
628 PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
629 SYM_DATA_END(level2_kernel_pgt)
631 SYM_DATA_START_PAGE_ALIGNED(level2_fixmap_pgt)
632 .fill (512 - 4 - FIXMAP_PMD_NUM),8,0
634 .rept (FIXMAP_PMD_NUM)
635 .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
639 /* 6 MB reserved space + a 2MB hole */
641 SYM_DATA_END(level2_fixmap_pgt)
643 SYM_DATA_START_PAGE_ALIGNED(level1_fixmap_pgt)
644 .rept (FIXMAP_PMD_NUM)
647 SYM_DATA_END(level1_fixmap_pgt)
654 SYM_DATA(early_gdt_descr, .word GDT_ENTRIES*8-1)
655 SYM_DATA_LOCAL(early_gdt_descr_base, .quad INIT_PER_CPU_VAR(gdt_page))
658 /* This must match the first entry in level2_kernel_pgt */
659 SYM_DATA(phys_base, .quad 0x0)
660 EXPORT_SYMBOL(phys_base)
662 #include "../../x86/xen/xen-head.S"
665 SYM_DATA_START_PAGE_ALIGNED(empty_zero_page)
667 SYM_DATA_END(empty_zero_page)
668 EXPORT_SYMBOL(empty_zero_page)