2 * relocate_kernel.S - put the kernel image in place to boot
3 * Copyright (C) 2002-2005 Eric Biederman <ebiederm@xmission.com>
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
9 #include <linux/linkage.h>
10 #include <asm/page_types.h>
11 #include <asm/kexec.h>
12 #include <asm/processor-flags.h>
13 #include <asm/pgtable_types.h>
16 * Must be relocatable PIC code callable as a C function
19 #define PTR(x) (x << 3)
20 #define PAGE_ATTR (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
23 * control_page + KEXEC_CONTROL_CODE_MAX_SIZE
24 * ~ control_page + PAGE_SIZE are used as data storage and stack for
27 #define DATA(offset) (KEXEC_CONTROL_CODE_MAX_SIZE+(offset))
29 /* Minimal CPU state */
32 #define CR3 DATA(0x10)
33 #define CR4 DATA(0x18)
36 #define CP_PA_TABLE_PAGE DATA(0x20)
37 #define CP_PA_SWAP_PAGE DATA(0x28)
38 #define CP_PA_BACKUP_PAGES_MAP DATA(0x30)
43 .globl relocate_kernel
46 * %rdi indirection_page
49 * %rcx preserve_context
53 /* Save the CPU context, used for jumping back */
62 movq PTR(VA_CONTROL_PAGE)(%rsi), %r11
71 /* Save CR4. Required to enable the right paging mode later. */
74 /* zero out flags, and disable interrupts */
78 /* Save SME active flag */
82 * get physical address of control page now
83 * this is impossible after page table switch
85 movq PTR(PA_CONTROL_PAGE)(%rsi), %r8
87 /* get physical address of page table now too */
88 movq PTR(PA_TABLE_PAGE)(%rsi), %r9
90 /* get physical address of swap page now */
91 movq PTR(PA_SWAP_PAGE)(%rsi), %r10
93 /* save some information for jumping back */
94 movq %r9, CP_PA_TABLE_PAGE(%r11)
95 movq %r10, CP_PA_SWAP_PAGE(%r11)
96 movq %rdi, CP_PA_BACKUP_PAGES_MAP(%r11)
98 /* Switch to the identity mapped page tables */
101 /* setup a new stack at the end of the physical control page */
102 lea PAGE_SIZE(%r8), %rsp
104 /* jump to identity mapped page */
105 addq $(identity_mapped - relocate_kernel), %r8
110 /* set return address to 0 if not preserving context */
112 /* store the start address on the stack */
116 * Set cr0 to a known state:
118 * - Alignment check disabled
119 * - Write protect disabled
121 * - Don't do FP software emulation.
122 * - Proctected mode enabled
125 andq $~(X86_CR0_AM | X86_CR0_WP | X86_CR0_TS | X86_CR0_EM), %rax
126 orl $(X86_CR0_PG | X86_CR0_PE), %eax
130 * Set cr4 to a known state:
131 * - physical address extension enabled
132 * - 5-level paging, if it was enabled before
134 movl $X86_CR4_PAE, %eax
135 testq $X86_CR4_LA57, %r13
137 orl $X86_CR4_LA57, %eax
144 /* Flush the TLB (needed?) */
148 * If SME is active, there could be old encrypted cache line
149 * entries that will conflict with the now unencrypted memory
150 * used by kexec. Flush the caches before copying the kernel.
161 * To be certain of avoiding problems with self-modifying code
162 * I need to execute a serializing instruction here.
163 * So I flush the TLB by reloading %cr3 here, it's handy,
164 * and not processor dependent.
170 * set all of the registers to known values
196 leaq PAGE_SIZE(%r10), %rsp
199 /* get the re-entry point of the peer system */
204 subq $(1b - relocate_kernel), %r8
205 movq CP_PA_SWAP_PAGE(%r8), %r10
206 movq CP_PA_BACKUP_PAGES_MAP(%r8), %rdi
207 movq CP_PA_TABLE_PAGE(%r8), %rax
209 lea PAGE_SIZE(%r8), %rsp
211 movq $virtual_mapped, %rax
236 movq %rdi, %rcx /* Put the page_list in %rcx */
241 0: /* top, read another word for the indirection page */
246 testb $0x1, %cl /* is it a destination page? */
249 andq $0xfffffffffffff000, %rdi
252 testb $0x2, %cl /* is it an indirection page? */
255 andq $0xfffffffffffff000, %rbx
258 testb $0x4, %cl /* is it the done indicator? */
262 testb $0x8, %cl /* is it the source indicator? */
263 jz 0b /* Ignore it otherwise */
264 movq %rcx, %rsi /* For ever source page do a copy */
265 andq $0xfffffffffffff000, %rsi
284 lea PAGE_SIZE(%rax), %rsi
289 .globl kexec_control_code_size
290 .set kexec_control_code_size, . - relocate_kernel