1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_KEXEC_H
3 #define _ASM_X86_KEXEC_H
6 # define PA_CONTROL_PAGE 0
7 # define VA_CONTROL_PAGE 1
9 # define PA_SWAP_PAGE 3
12 # define PA_CONTROL_PAGE 0
13 # define VA_CONTROL_PAGE 1
14 # define PA_TABLE_PAGE 2
15 # define PA_SWAP_PAGE 3
19 # define KEXEC_CONTROL_CODE_MAX_SIZE 2048
23 #include <linux/string.h>
24 #include <linux/kernel.h>
25 #include <linux/module.h>
28 #include <asm/ptrace.h>
29 #include <asm/bootparam.h>
34 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
35 * I.e. Maximum page that is mapped directly into kernel memory,
36 * and kmap is not required.
38 * So far x86_64 is limited to 40 physical address bits.
41 /* Maximum physical address we can use pages from */
42 # define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
43 /* Maximum address we can reach in physical address mode */
44 # define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
45 /* Maximum address we can use for the control code buffer */
46 # define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
48 # define KEXEC_CONTROL_PAGE_SIZE 4096
50 /* The native architecture */
51 # define KEXEC_ARCH KEXEC_ARCH_386
53 /* We can also handle crash dumps from 64 bit kernel. */
54 # define vmcore_elf_check_arch_cross(x) ((x)->e_machine == EM_X86_64)
56 /* Maximum physical address we can use pages from */
57 # define KEXEC_SOURCE_MEMORY_LIMIT (MAXMEM-1)
58 /* Maximum address we can reach in physical address mode */
59 # define KEXEC_DESTINATION_MEMORY_LIMIT (MAXMEM-1)
60 /* Maximum address we can use for the control pages */
61 # define KEXEC_CONTROL_MEMORY_LIMIT (MAXMEM-1)
63 /* Allocate one page for the pdp and the second for the code */
64 # define KEXEC_CONTROL_PAGE_SIZE (4096UL + 4096UL)
66 /* The native architecture */
67 # define KEXEC_ARCH KEXEC_ARCH_X86_64
70 /* Memory to backup during crash kdump */
71 #define KEXEC_BACKUP_SRC_START (0UL)
72 #define KEXEC_BACKUP_SRC_END (640 * 1024UL - 1) /* 640K */
75 * This function is responsible for capturing register states if coming
76 * via panic otherwise just fix up the ss and sp if coming via kernel
79 static inline void crash_setup_regs(struct pt_regs *newregs,
80 struct pt_regs *oldregs)
83 memcpy(newregs, oldregs, sizeof(*newregs));
86 asm volatile("movl %%ebx,%0" : "=m"(newregs->bx));
87 asm volatile("movl %%ecx,%0" : "=m"(newregs->cx));
88 asm volatile("movl %%edx,%0" : "=m"(newregs->dx));
89 asm volatile("movl %%esi,%0" : "=m"(newregs->si));
90 asm volatile("movl %%edi,%0" : "=m"(newregs->di));
91 asm volatile("movl %%ebp,%0" : "=m"(newregs->bp));
92 asm volatile("movl %%eax,%0" : "=m"(newregs->ax));
93 asm volatile("movl %%esp,%0" : "=m"(newregs->sp));
94 asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
95 asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
96 asm volatile("movl %%ds, %%eax;" :"=a"(newregs->ds));
97 asm volatile("movl %%es, %%eax;" :"=a"(newregs->es));
98 asm volatile("pushfl; popl %0" :"=m"(newregs->flags));
100 asm volatile("movq %%rbx,%0" : "=m"(newregs->bx));
101 asm volatile("movq %%rcx,%0" : "=m"(newregs->cx));
102 asm volatile("movq %%rdx,%0" : "=m"(newregs->dx));
103 asm volatile("movq %%rsi,%0" : "=m"(newregs->si));
104 asm volatile("movq %%rdi,%0" : "=m"(newregs->di));
105 asm volatile("movq %%rbp,%0" : "=m"(newregs->bp));
106 asm volatile("movq %%rax,%0" : "=m"(newregs->ax));
107 asm volatile("movq %%rsp,%0" : "=m"(newregs->sp));
108 asm volatile("movq %%r8,%0" : "=m"(newregs->r8));
109 asm volatile("movq %%r9,%0" : "=m"(newregs->r9));
110 asm volatile("movq %%r10,%0" : "=m"(newregs->r10));
111 asm volatile("movq %%r11,%0" : "=m"(newregs->r11));
112 asm volatile("movq %%r12,%0" : "=m"(newregs->r12));
113 asm volatile("movq %%r13,%0" : "=m"(newregs->r13));
114 asm volatile("movq %%r14,%0" : "=m"(newregs->r14));
115 asm volatile("movq %%r15,%0" : "=m"(newregs->r15));
116 asm volatile("movl %%ss, %%eax;" :"=a"(newregs->ss));
117 asm volatile("movl %%cs, %%eax;" :"=a"(newregs->cs));
118 asm volatile("pushfq; popq %0" :"=m"(newregs->flags));
120 newregs->ip = _THIS_IP_;
125 asmlinkage unsigned long
126 relocate_kernel(unsigned long indirection_page,
127 unsigned long control_page,
128 unsigned long start_address,
129 unsigned int has_pae,
130 unsigned int preserve_context);
133 relocate_kernel(unsigned long indirection_page,
134 unsigned long page_list,
135 unsigned long start_address,
136 unsigned int preserve_context,
137 unsigned int sme_active);
140 #define ARCH_HAS_KIMAGE_ARCH
145 #ifdef CONFIG_X86_PAE
158 /* Details of backup region */
159 unsigned long backup_src_start;
160 unsigned long backup_src_sz;
162 /* Physical address of backup segment */
163 unsigned long backup_load_addr;
165 /* Core ELF header buffer */
167 unsigned long elf_headers_sz;
168 unsigned long elf_load_addr;
170 #endif /* CONFIG_X86_32 */
174 * Number of elements and order of elements in this structure should match
175 * with the ones in arch/x86/purgatory/entry64.S. If you make a change here
176 * make an appropriate change in purgatory too.
178 struct kexec_entry64_regs {
198 extern int arch_kexec_post_alloc_pages(void *vaddr, unsigned int pages,
200 #define arch_kexec_post_alloc_pages arch_kexec_post_alloc_pages
202 extern void arch_kexec_pre_free_pages(void *vaddr, unsigned int pages);
203 #define arch_kexec_pre_free_pages arch_kexec_pre_free_pages
205 #ifdef CONFIG_KEXEC_FILE
206 struct purgatory_info;
207 int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
209 const Elf_Shdr *relsec,
210 const Elf_Shdr *symtab);
211 #define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
215 typedef void crash_vmclear_fn(void);
216 extern crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss;
217 extern void kdump_nmi_shootdown_cpus(void);
219 #endif /* __ASSEMBLY__ */
221 #endif /* _ASM_X86_KEXEC_H */