1 /* SPDX-License-Identifier: GPL-2.0 */
5 * Copyright (C) 1991, 1992, 1993 Linus Torvalds
9 * head.S contains the 32-bit startup code.
11 * NOTE!!! Startup happens at absolute address 0x00001000, which is also where
12 * the page directory will exist. The startup code will be overwritten by
13 * the page directory. [According to comments etc elsewhere on a compressed
14 * kernel it will end up at 0x1000 + 1Mb I hope so as I assume this. - AC]
16 * Page 0 is deliberately kept safe, since System Management Mode code in
17 * laptops may need to access the BIOS data stored there. This is also
18 * useful for future device drivers that either access the BIOS via VM86
23 * High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
28 #include <linux/init.h>
29 #include <linux/linkage.h>
30 #include <asm/segment.h>
33 #include <asm/processor-flags.h>
34 #include <asm/asm-offsets.h>
35 #include <asm/bootparam.h>
36 #include <asm/desc_defs.h>
37 #include <asm/trapnr.h>
41 * Locally defined symbols should be marked hidden:
50 * This macro gives the relative virtual address of X, i.e. the offset of X
51 * from startup_32. This is the same as the link-time virtual address of X,
52 * since startup_32 is at 0, but defining it this way tells the
53 * assembler/linker that we do not want the actual run-time address of X. This
54 * prevents the linker from trying to create unwanted run-time relocation
55 * entries for the reference when the compressed kernel is linked as PIE.
57 * A reference X(%reg) will result in the link-time VA of X being stored with
58 * the instruction, and a run-time R_X86_64_RELATIVE relocation entry that
59 * adds the 64-bit base address where the kernel is loaded.
61 * Replacing it with (X-startup_32)(%reg) results in the offset being stored,
62 * and no run-time relocation.
64 * The macro should be used as a displacement with a base register containing
65 * the run-time address of startup_32 [i.e. rva(X)(%reg)], or as an immediate
68 * This macro can only be used from within the .head.text section, since the
69 * expression requires startup_32 to be in the same section as the code being
72 #define rva(X) ((X) - startup_32)
75 SYM_FUNC_START(startup_32)
77 * 32bit entry is 0 and it is ABI so immutable!
78 * If we come here directly from a bootloader,
79 * kernel(text+data+bss+brk) ramdisk, zero_page, command line
80 * all need to be under the 4G limit.
86 * Calculate the delta between where we were compiled to run
87 * at and where we were actually loaded at. This can only be done
88 * with a short local call on x86. Nothing else will tell us what
89 * address we are running at. The reserved chunk of the real-mode
90 * data at 0x1e4 (defined as a scratch field) are used as the stack
91 * for this calculation. Only 4 bytes are needed.
93 leal (BP_scratch+4)(%esi), %esp
98 /* Load new GDT with the 64bit segments using 32bit descriptor */
99 leal rva(gdt)(%ebp), %eax
103 /* Load segment registers with our descriptors */
104 movl $__BOOT_DS, %eax
111 /* Setup a stack and load CS from current GDT */
112 leal rva(boot_stack_end)(%ebp), %esp
115 leal rva(1f)(%ebp), %eax
120 /* Setup Exception handling for SEV-ES */
121 call startup32_load_idt
123 /* Make sure cpu supports long mode. */
129 * Compute the delta between where we were compiled to run at
130 * and where the code will actually run at.
132 * %ebp contains the address we are loaded at by the boot loader and %ebx
133 * contains the address where we should move the kernel image temporarily
134 * for safe in-place decompression.
137 #ifdef CONFIG_RELOCATABLE
140 #ifdef CONFIG_EFI_STUB
142 * If we were loaded via the EFI LoadImage service, startup_32 will be at an
143 * offset to the start of the space allocated for the image. efi_pe_entry will
144 * set up image_offset to tell us where the image actually starts, so that we
145 * can use the full available buffer.
146 * image_offset = startup_32 - image_base
147 * Otherwise image_offset will be zero and has no effect on the calculations.
149 subl rva(image_offset)(%ebp), %ebx
152 movl BP_kernel_alignment(%esi), %eax
157 cmpl $LOAD_PHYSICAL_ADDR, %ebx
160 movl $LOAD_PHYSICAL_ADDR, %ebx
163 /* Target address to relocate to for decompression */
164 addl BP_init_size(%esi), %ebx
165 subl $ rva(_end), %ebx
168 * Prepare for entering 64 bit mode
171 /* Enable PAE mode */
173 orl $X86_CR4_PAE, %eax
177 * Build early 4G boot pagetable
180 * If SEV is active then set the encryption mask in the page tables.
181 * This will insure that when the kernel is copied and decompressed
182 * it will be done so encrypted.
184 call get_sev_encryption_bit
186 #ifdef CONFIG_AMD_MEM_ENCRYPT
189 subl $32, %eax /* Encryption bit is always above bit 31 */
190 bts %eax, %edx /* Set encryption mask for page tables */
192 * Set MSR_AMD64_SEV_ENABLED_BIT in sev_status so that
193 * startup32_check_sev_cbit() will do a check. sev_enable() will
194 * initialize sev_status with all the bits reported by
195 * MSR_AMD_SEV_STATUS later, but only MSR_AMD64_SEV_ENABLED_BIT
196 * needs to be set for now.
198 movl $1, rva(sev_status)(%ebp)
202 /* Initialize Page tables to 0 */
203 leal rva(pgtable)(%ebx), %edi
205 movl $(BOOT_INIT_PGT_SIZE/4), %ecx
209 leal rva(pgtable + 0)(%ebx), %edi
210 leal 0x1007 (%edi), %eax
215 leal rva(pgtable + 0x1000)(%ebx), %edi
216 leal 0x1007(%edi), %eax
218 1: movl %eax, 0x00(%edi)
219 addl %edx, 0x04(%edi)
220 addl $0x00001000, %eax
226 leal rva(pgtable + 0x2000)(%ebx), %edi
227 movl $0x00000183, %eax
229 1: movl %eax, 0(%edi)
231 addl $0x00200000, %eax
236 /* Enable the boot page tables */
237 leal rva(pgtable)(%ebx), %eax
240 /* Enable Long mode in EFER (Extended Feature Enable Register) */
243 btsl $_EFER_LME, %eax
246 /* After gdt is loaded */
249 movl $__BOOT_TSS, %eax
253 * Setup for the jump to 64bit mode
255 * When the jump is performed we will be in long mode but
256 * in 32bit compatibility mode with EFER.LME = 1, CS.L = 0, CS.D = 1
257 * (and in turn EFER.LMA = 1). To jump into 64bit mode we use
258 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
259 * We place all of the values on our mini stack so lret can
260 * used to perform that far jump.
262 leal rva(startup_64)(%ebp), %eax
263 #ifdef CONFIG_EFI_MIXED
264 movl rva(efi32_boot_args)(%ebp), %edi
267 leal rva(efi64_stub_entry)(%ebp), %eax
268 movl rva(efi32_boot_args+4)(%ebp), %esi
269 movl rva(efi32_boot_args+8)(%ebp), %edx // saved bootparams pointer
273 * efi_pe_entry uses MS calling convention, which requires 32 bytes of
274 * shadow space on the stack even if all arguments are passed in
275 * registers. We also need an additional 8 bytes for the space that
276 * would be occupied by the return address, and this also results in
277 * the correct stack alignment for entry.
280 leal rva(efi_pe_entry)(%ebp), %eax
281 movl %edi, %ecx // MS calling convention
285 /* Check if the C-bit position is correct when SEV is active */
286 call startup32_check_sev_cbit
291 /* Enter paged protected Mode, activating Long Mode */
292 movl $CR0_STATE, %eax
295 /* Jump from 32bit compatibility mode into 64bit mode. */
297 SYM_FUNC_END(startup_32)
299 #ifdef CONFIG_EFI_MIXED
301 SYM_FUNC_START(efi32_stub_entry)
302 add $0x4, %esp /* Discard return address */
311 movl %esi, rva(efi32_boot_args+8)(%ebp)
312 SYM_INNER_LABEL(efi32_pe_stub_entry, SYM_L_LOCAL)
313 movl %ecx, rva(efi32_boot_args)(%ebp)
314 movl %edx, rva(efi32_boot_args+4)(%ebp)
315 movb $0, rva(efi_is64)(%ebp)
317 /* Save firmware GDTR and code/data selectors */
318 sgdtl rva(efi32_boot_gdt)(%ebp)
319 movw %cs, rva(efi32_boot_cs)(%ebp)
320 movw %ds, rva(efi32_boot_ds)(%ebp)
322 /* Store firmware IDT descriptor */
323 sidtl rva(efi32_boot_idt)(%ebp)
327 btrl $X86_CR0_PG_BIT, %eax
331 SYM_FUNC_END(efi32_stub_entry)
336 SYM_CODE_START(startup_64)
338 * 64bit entry is 0x200 and it is ABI so immutable!
339 * We come here either from startup_32 or directly from a
341 * If we come here from a bootloader, kernel(text+data+bss+brk),
342 * ramdisk, zero_page, command line could be above 4G.
343 * We depend on an identity mapped page table being provided
344 * that maps our entire kernel(text+data+bss+brk), zero page
351 /* Setup data segments. */
360 * Compute the decompressed kernel start address. It is where
361 * we were loaded at aligned to a 2M boundary. %rbp contains the
362 * decompressed kernel start address.
364 * If it is a relocatable kernel then decompress and run the kernel
365 * from load address aligned to 2MB addr, otherwise decompress and
366 * run the kernel from LOAD_PHYSICAL_ADDR
368 * We cannot rely on the calculation done in 32-bit mode, since we
369 * may have been invoked via the 64-bit entry point.
372 /* Start with the delta to where the kernel will run at. */
373 #ifdef CONFIG_RELOCATABLE
374 leaq startup_32(%rip) /* - $startup_32 */, %rbp
376 #ifdef CONFIG_EFI_STUB
378 * If we were loaded via the EFI LoadImage service, startup_32 will be at an
379 * offset to the start of the space allocated for the image. efi_pe_entry will
380 * set up image_offset to tell us where the image actually starts, so that we
381 * can use the full available buffer.
382 * image_offset = startup_32 - image_base
383 * Otherwise image_offset will be zero and has no effect on the calculations.
385 movl image_offset(%rip), %eax
389 movl BP_kernel_alignment(%rsi), %eax
394 cmpq $LOAD_PHYSICAL_ADDR, %rbp
397 movq $LOAD_PHYSICAL_ADDR, %rbp
400 /* Target address to relocate to for decompression */
401 movl BP_init_size(%rsi), %ebx
402 subl $ rva(_end), %ebx
405 /* Set up the stack */
406 leaq rva(boot_stack_end)(%rbx), %rsp
409 * At this point we are in long mode with 4-level paging enabled,
410 * but we might want to enable 5-level paging or vice versa.
412 * The problem is that we cannot do it directly. Setting or clearing
413 * CR4.LA57 in long mode would trigger #GP. So we need to switch off
414 * long mode and paging first.
416 * We also need a trampoline in lower memory to switch over from
417 * 4- to 5-level paging for cases when the bootloader puts the kernel
418 * above 4G, but didn't enable 5-level paging for us.
420 * The same trampoline can be used to switch from 5- to 4-level paging
421 * mode, like when starting 4-level paging kernel via kexec() when
422 * original kernel worked in 5-level paging mode.
424 * For the trampoline, we need the top page table to reside in lower
425 * memory as we don't have a way to load 64-bit values into CR3 in
428 * We go though the trampoline even if we don't have to: if we're
429 * already in a desired paging mode. This way the trampoline code gets
430 * tested on every boot.
433 /* Make sure we have GDT with 32-bit code segment */
434 leaq gdt64(%rip), %rax
438 /* Reload CS so IRET returns to a CS actually in the GDT */
440 leaq .Lon_kernel_cs(%rip), %rax
450 #ifdef CONFIG_AMD_MEM_ENCRYPT
452 * Now that the stage1 interrupt handlers are set up, #VC exceptions from
453 * CPUID instructions can be properly handled for SEV-ES guests.
455 * For SEV-SNP, the CPUID table also needs to be set up in advance of any
456 * CPUID instructions being issued, so go ahead and do that now via
457 * sev_enable(), which will also handle the rest of the SEV-related
458 * detection/setup to ensure that has been done in advance of any dependent
462 movq %rsi, %rdi /* real mode address */
468 * paging_prepare() sets up the trampoline and checks if we need to
469 * enable 5-level paging.
471 * paging_prepare() returns a two-quadword structure which lands
473 * - Address of the trampoline is returned in RAX.
474 * - Non zero RDX means trampoline needs to enable 5-level
477 * RSI holds real mode data and needs to be preserved across
478 * this function call.
481 movq %rsi, %rdi /* real mode address */
485 /* Save the trampoline address in RCX */
489 * Load the address of trampoline_return() into RDI.
490 * It will be used by the trampoline to return to the main code.
492 leaq trampoline_return(%rip), %rdi
494 /* Switch to compatibility mode (CS.L = 0 CS.D = 1) via far return */
496 leaq TRAMPOLINE_32BIT_CODE_OFFSET(%rax), %rax
500 /* Restore the stack, the 32-bit trampoline uses its own stack */
501 leaq rva(boot_stack_end)(%rbx), %rsp
504 * cleanup_trampoline() would restore trampoline memory.
506 * RDI is address of the page table to use instead of page table
507 * in trampoline memory (if required).
509 * RSI holds real mode data and needs to be preserved across
510 * this function call.
513 leaq rva(top_pgtable)(%rbx), %rdi
514 call cleanup_trampoline
522 * Copy the compressed kernel to the end of our buffer
523 * where decompression in place becomes safe.
526 leaq (_bss-8)(%rip), %rsi
527 leaq rva(_bss-8)(%rbx), %rdi
528 movl $(_bss - startup_32), %ecx
536 * The GDT may get overwritten either during the copy we just did or
537 * during extract_kernel below. To avoid any issues, repoint the GDTR
538 * to the new copy of the GDT.
540 leaq rva(gdt64)(%rbx), %rax
541 leaq rva(gdt)(%rbx), %rdx
546 * Jump to the relocated address.
548 leaq rva(.Lrelocated)(%rbx), %rax
550 SYM_CODE_END(startup_64)
552 #ifdef CONFIG_EFI_STUB
554 SYM_FUNC_START(efi64_stub_entry)
555 and $~0xf, %rsp /* realign the stack */
556 movq %rdx, %rbx /* save boot_params pointer */
559 leaq rva(startup_64)(%rax), %rax
561 SYM_FUNC_END(efi64_stub_entry)
562 SYM_FUNC_ALIAS(efi_stub_entry, efi64_stub_entry)
566 SYM_FUNC_START_LOCAL_NOALIGN(.Lrelocated)
569 * Clear BSS (stack is currently empty)
572 leaq _bss(%rip), %rdi
573 leaq _ebss(%rip), %rcx
581 /* Pass boot_params to initialize_identity_maps() */
583 call initialize_identity_maps
587 * Do the extraction, and jump to the new kernel..
589 pushq %rsi /* Save the real mode argument */
590 movq %rsi, %rdi /* real mode address */
591 leaq boot_heap(%rip), %rsi /* malloc area for uncompression */
592 leaq input_data(%rip), %rdx /* input_data */
593 movl input_len(%rip), %ecx /* input_len */
594 movq %rbp, %r8 /* output target address */
595 movl output_len(%rip), %r9d /* decompressed length, end of relocs */
596 call extract_kernel /* returns kernel location in %rax */
600 * Jump to the decompressed kernel.
603 SYM_FUNC_END(.Lrelocated)
607 * This is the 32-bit trampoline that will be copied over to low memory.
609 * RDI contains the return address (might be above 4G).
610 * ECX contains the base address of the trampoline memory.
611 * Non zero RDX means trampoline needs to enable 5-level paging.
613 SYM_CODE_START(trampoline_32bit_src)
614 /* Set up data and stack segments */
615 movl $__KERNEL_DS, %eax
619 /* Set up new stack */
620 leal TRAMPOLINE_32BIT_STACK_END(%ecx), %esp
624 btrl $X86_CR0_PG_BIT, %eax
627 /* Check what paging mode we want to be in after the trampoline */
631 /* We want 5-level paging: don't touch CR3 if it already points to 5-level page tables */
633 testl $X86_CR4_LA57, %eax
637 /* We want 4-level paging: don't touch CR3 if it already points to 4-level page tables */
639 testl $X86_CR4_LA57, %eax
642 /* Point CR3 to the trampoline's new top level page table */
643 leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax
646 /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
651 btsl $_EFER_LME, %eax
652 /* Avoid writing EFER if no change was made (for TDX guest) */
658 #ifdef CONFIG_X86_MCE
660 * Preserve CR4.MCE if the kernel will enable #MC support.
661 * Clearing MCE may fault in some environments (that also force #MC
662 * support). Any machine check that occurs before #MC support is fully
663 * configured will crash the system regardless of the CR4.MCE value set
667 andl $X86_CR4_MCE, %eax
672 /* Enable PAE and LA57 (if required) paging modes */
673 orl $X86_CR4_PAE, %eax
676 orl $X86_CR4_LA57, %eax
680 /* Calculate address of paging_enabled() once we are executing in the trampoline */
681 leal .Lpaging_enabled - trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_OFFSET(%ecx), %eax
683 /* Prepare the stack for far return to Long Mode */
687 /* Enable paging again. */
689 btsl $X86_CR0_PG_BIT, %eax
693 SYM_CODE_END(trampoline_32bit_src)
696 SYM_FUNC_START_LOCAL_NOALIGN(.Lpaging_enabled)
697 /* Return from the trampoline */
699 SYM_FUNC_END(.Lpaging_enabled)
702 * The trampoline code has a size limit.
703 * Make sure we fail to compile if the trampoline code grows
704 * beyond TRAMPOLINE_32BIT_CODE_SIZE bytes.
706 .org trampoline_32bit_src + TRAMPOLINE_32BIT_CODE_SIZE
709 SYM_FUNC_START_LOCAL_NOALIGN(.Lno_longmode)
710 /* This isn't an x86-64 CPU, so hang intentionally, we cannot continue */
714 SYM_FUNC_END(.Lno_longmode)
716 #include "../../kernel/verify_cpu.S"
719 SYM_DATA_START_LOCAL(gdt64)
720 .word gdt_end - gdt - 1
724 SYM_DATA_START_LOCAL(gdt)
725 .word gdt_end - gdt - 1
728 .quad 0x00cf9a000000ffff /* __KERNEL32_CS */
729 .quad 0x00af9a000000ffff /* __KERNEL_CS */
730 .quad 0x00cf92000000ffff /* __KERNEL_DS */
731 .quad 0x0080890000000000 /* TS descriptor */
732 .quad 0x0000000000000000 /* TS continued */
733 SYM_DATA_END_LABEL(gdt, SYM_L_LOCAL, gdt_end)
735 SYM_DATA_START(boot_idt_desc)
736 .word boot_idt_end - boot_idt - 1
738 SYM_DATA_END(boot_idt_desc)
740 SYM_DATA_START(boot_idt)
741 .rept BOOT_IDT_ENTRIES
745 SYM_DATA_END_LABEL(boot_idt, SYM_L_GLOBAL, boot_idt_end)
747 #ifdef CONFIG_AMD_MEM_ENCRYPT
748 SYM_DATA_START(boot32_idt_desc)
749 .word boot32_idt_end - boot32_idt - 1
751 SYM_DATA_END(boot32_idt_desc)
753 SYM_DATA_START(boot32_idt)
757 SYM_DATA_END_LABEL(boot32_idt, SYM_L_GLOBAL, boot32_idt_end)
760 #ifdef CONFIG_EFI_STUB
761 SYM_DATA(image_offset, .long 0)
763 #ifdef CONFIG_EFI_MIXED
764 SYM_DATA_LOCAL(efi32_boot_args, .long 0, 0, 0)
765 SYM_DATA(efi_is64, .byte 1)
767 #define ST32_boottime 60 // offsetof(efi_system_table_32_t, boottime)
768 #define BS32_handle_protocol 88 // offsetof(efi_boot_services_32_t, handle_protocol)
769 #define LI32_image_base 32 // offsetof(efi_loaded_image_32_t, image_base)
773 SYM_FUNC_START(efi32_pe_entry)
775 * efi_status_t efi32_pe_entry(efi_handle_t image_handle,
776 * efi_system_table_32_t *sys_table)
781 pushl %eax // dummy push to allocate loaded_image
783 pushl %ebx // save callee-save registers
786 call verify_cpu // check for long mode support
788 movl $0x80000003, %eax // EFI_UNSUPPORTED
795 /* Get the loaded image protocol pointer from the image handle */
797 pushl %eax // &loaded_image
798 leal rva(loaded_image_proto)(%ebx), %eax
799 pushl %eax // pass the GUID address
800 pushl 8(%ebp) // pass the image handle
803 * Note the alignment of the stack frame.
805 * handle <-- 16-byte aligned on entry by ABI
808 * loaded_image <-- local variable
809 * saved %ebx <-- 16-byte aligned here
812 * &loaded_image_proto
813 * handle <-- 16-byte aligned for call to handle_protocol
816 movl 12(%ebp), %eax // sys_table
817 movl ST32_boottime(%eax), %eax // sys_table->boottime
818 call *BS32_handle_protocol(%eax) // sys_table->boottime->handle_protocol
819 addl $12, %esp // restore argument space
823 movl 8(%ebp), %ecx // image_handle
824 movl 12(%ebp), %edx // sys_table
825 movl -4(%ebp), %esi // loaded_image
826 movl LI32_image_base(%esi), %esi // loaded_image->image_base
827 movl %ebx, %ebp // startup_32 for efi32_pe_stub_entry
829 * We need to set the image_offset variable here since startup_32() will
830 * use it before we get to the 64-bit efi_pe_entry() in C code.
833 movl %ebx, rva(image_offset)(%ebp) // save image_offset
834 jmp efi32_pe_stub_entry
836 2: popl %edi // restore callee-save registers
840 SYM_FUNC_END(efi32_pe_entry)
843 /* EFI loaded image protocol GUID */
845 SYM_DATA_START_LOCAL(loaded_image_proto)
848 .byte 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b
849 SYM_DATA_END(loaded_image_proto)
852 #ifdef CONFIG_AMD_MEM_ENCRYPT
856 * Write an IDT entry into boot32_idt
860 * %eax: Handler address
861 * %edx: Vector number
863 * Physical offset is expected in %ebp
865 SYM_FUNC_START(startup32_set_idt_entry)
869 /* IDT entry address to %ebx */
870 leal rva(boot32_idt)(%ebp), %ebx
874 /* Build IDT entry, lower 4 bytes */
876 andl $0x0000ffff, %edx # Target code segment offset [15:0]
877 movl $__KERNEL32_CS, %ecx # Target code segment selector
881 /* Store lower 4 bytes to IDT */
884 /* Build IDT entry, upper 4 bytes */
886 andl $0xffff0000, %edx # Target code segment offset [31:16]
887 orl $0x00008e00, %edx # Present, Type 32-bit Interrupt Gate
889 /* Store upper 4 bytes to IDT */
895 SYM_FUNC_END(startup32_set_idt_entry)
898 SYM_FUNC_START(startup32_load_idt)
899 #ifdef CONFIG_AMD_MEM_ENCRYPT
901 leal rva(startup32_vc_handler)(%ebp), %eax
902 movl $X86_TRAP_VC, %edx
903 call startup32_set_idt_entry
906 leal rva(boot32_idt)(%ebp), %eax
907 movl %eax, rva(boot32_idt_desc+2)(%ebp)
908 lidt rva(boot32_idt_desc)(%ebp)
911 SYM_FUNC_END(startup32_load_idt)
914 * Check for the correct C-bit position when the startup_32 boot-path is used.
916 * The check makes use of the fact that all memory is encrypted when paging is
917 * disabled. The function creates 64 bits of random data using the RDRAND
918 * instruction. RDRAND is mandatory for SEV guests, so always available. If the
919 * hypervisor violates that the kernel will crash right here.
921 * The 64 bits of random data are stored to a memory location and at the same
922 * time kept in the %eax and %ebx registers. Since encryption is always active
923 * when paging is off the random data will be stored encrypted in main memory.
925 * Then paging is enabled. When the C-bit position is correct all memory is
926 * still mapped encrypted and comparing the register values with memory will
927 * succeed. An incorrect C-bit position will map all memory unencrypted, so that
928 * the compare will use the encrypted random data and fail.
930 SYM_FUNC_START(startup32_check_sev_cbit)
931 #ifdef CONFIG_AMD_MEM_ENCRYPT
937 /* Check for non-zero sev_status */
938 movl rva(sev_status)(%ebp), %eax
943 * Get two 32-bit random values - Don't bail out if RDRAND fails
944 * because it is better to prevent forward progress if no random value
952 /* Store to memory and keep it in the registers */
953 movl %eax, rva(sev_check_data)(%ebp)
954 movl %ebx, rva(sev_check_data+4)(%ebp)
956 /* Enable paging to see if encryption is active */
957 movl %cr0, %edx /* Backup %cr0 in %edx */
958 movl $(X86_CR0_PG | X86_CR0_PE), %ecx /* Enable Paging and Protected mode */
961 cmpl %eax, rva(sev_check_data)(%ebp)
963 cmpl %ebx, rva(sev_check_data+4)(%ebp)
966 movl %edx, %cr0 /* Restore previous %cr0 */
970 3: /* Check failed - hlt the machine */
981 SYM_FUNC_END(startup32_check_sev_cbit)
984 * Stack and heap for uncompression
988 SYM_DATA_LOCAL(boot_heap, .fill BOOT_HEAP_SIZE, 1, 0)
990 SYM_DATA_START_LOCAL(boot_stack)
991 .fill BOOT_STACK_SIZE, 1, 0
993 SYM_DATA_END_LABEL(boot_stack, SYM_L_LOCAL, boot_stack_end)
996 * Space for page tables (not in .bss so not zeroed)
998 .section ".pgtable","aw",@nobits
1000 SYM_DATA_LOCAL(pgtable, .fill BOOT_PGT_SIZE, 1, 0)
1003 * The page table is going to be used instead of page table in the trampoline
1006 SYM_DATA_LOCAL(top_pgtable, .fill PAGE_SIZE, 1, 0)