1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * relocate_kernel.S - put the kernel image in place to boot
4 * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com>
7 #include <linux/linkage.h>
8 #include <asm/page_types.h>
10 #include <asm/processor-flags.h>
11 #include <asm/pgtable_types.h>
12 #include <asm/nospec-branch.h>
13 #include <asm/unwind_hints.h>
16 * Must be relocatable PIC code callable as a C function, in particular
17 * there must be a plain RET and not jump to return thunk.
20 #define PTR(x) (x << 3)
21 #define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
24 * control_page + KEXEC_CONTROL_CODE_MAX_SIZE
25 * ~ control_page + PAGE_SIZE are used as data storage and stack for
28 #define DATA(offset) (KEXEC_CONTROL_CODE_MAX_SIZE+(offset))
30 /* Minimal CPU state */
33 #define CR3 DATA(0x10)
34 #define CR4 DATA(0x18)
37 #define CP_PA_TABLE_PAGE DATA(0x20)
38 #define CP_PA_SWAP_PAGE DATA(0x28)
39 #define CP_PA_BACKUP_PAGES_MAP DATA(0x30)
44 SYM_CODE_START_NOALIGN(relocate_kernel)
47 * %rdi indirection_page
50 * %rcx preserve_context
54 /* Save the CPU context, used for jumping back */
63 movq PTR(VA_CONTROL_PAGE)(%rsi), %r11
72 /* Save CR4. Required to enable the right paging mode later. */
75 /* zero out flags, and disable interrupts */
79 /* Save SME active flag */
83 * get physical address of control page now
84 * this is impossible after page table switch
86 movq PTR(PA_CONTROL_PAGE)(%rsi), %r8
88 /* get physical address of page table now too */
89 movq PTR(PA_TABLE_PAGE)(%rsi), %r9
91 /* get physical address of swap page now */
92 movq PTR(PA_SWAP_PAGE)(%rsi), %r10
94 /* save some information for jumping back */
95 movq %r9, CP_PA_TABLE_PAGE(%r11)
96 movq %r10, CP_PA_SWAP_PAGE(%r11)
97 movq %rdi, CP_PA_BACKUP_PAGES_MAP(%r11)
99 /* Switch to the identity mapped page tables */
102 /* setup a new stack at the end of the physical control page */
103 lea PAGE_SIZE(%r8), %rsp
105 /* jump to identity mapped page */
106 addq $(identity_mapped - relocate_kernel), %r8
111 SYM_CODE_END(relocate_kernel)
113 SYM_CODE_START_LOCAL_NOALIGN(identity_mapped)
115 /* set return address to 0 if not preserving context */
117 /* store the start address on the stack */
121 * Set cr0 to a known state:
123 * - Alignment check disabled
124 * - Write protect disabled
126 * - Don't do FP software emulation.
127 * - Protected mode enabled
130 andq $~(X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %rax
131 orl $(X86_CR0_PG | X86_CR0_PE), %eax
135 * Set cr4 to a known state:
136 * - physical address extension enabled
137 * - 5-level paging, if it was enabled before
139 movl $X86_CR4_PAE, %eax
140 testq $X86_CR4_LA57, %r13
142 orl $X86_CR4_LA57, %eax
149 /* Flush the TLB (needed?) */
153 * If SME is active, there could be old encrypted cache line
154 * entries that will conflict with the now unencrypted memory
155 * used by kexec. Flush the caches before copying the kernel.
166 * To be certain of avoiding problems with self-modifying code
167 * I need to execute a serializing instruction here.
168 * So I flush the TLB by reloading %cr3 here, it's handy,
169 * and not processor dependent.
175 * set all of the registers to known values
203 leaq PAGE_SIZE(%r10), %rsp
204 ANNOTATE_RETPOLINE_SAFE
207 /* get the re-entry point of the peer system */
209 leaq relocate_kernel(%rip), %r8
210 movq CP_PA_SWAP_PAGE(%r8), %r10
211 movq CP_PA_BACKUP_PAGES_MAP(%r8), %rdi
212 movq CP_PA_TABLE_PAGE(%r8), %rax
214 lea PAGE_SIZE(%r8), %rsp
216 movq $virtual_mapped, %rax
221 SYM_CODE_END(identity_mapped)
223 SYM_CODE_START_LOCAL_NOALIGN(virtual_mapped)
244 SYM_CODE_END(virtual_mapped)
247 SYM_CODE_START_LOCAL_NOALIGN(swap_pages)
249 movq %rdi, %rcx /* Put the page_list in %rcx */
254 0: /* top, read another word for the indirection page */
259 testb $0x1, %cl /* is it a destination page? */
262 andq $0xfffffffffffff000, %rdi
265 testb $0x2, %cl /* is it an indirection page? */
268 andq $0xfffffffffffff000, %rbx
271 testb $0x4, %cl /* is it the done indicator? */
275 testb $0x8, %cl /* is it the source indicator? */
276 jz 0b /* Ignore it otherwise */
277 movq %rcx, %rsi /* For ever source page do a copy */
278 andq $0xfffffffffffff000, %rsi
297 lea PAGE_SIZE(%rax), %rsi
303 SYM_CODE_END(swap_pages)
305 .globl kexec_control_code_size
306 .set kexec_control_code_size, . - relocate_kernel