2 * Architecture specific (i386/x86_64) functions for kexec based crash dumps.
4 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved.
7 * Copyright (C) Red Hat Inc., 2014. All rights reserved.
9 * Vivek Goyal <vgoyal@redhat.com>
13 #define pr_fmt(fmt) "kexec: " fmt
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/smp.h>
18 #include <linux/reboot.h>
19 #include <linux/kexec.h>
20 #include <linux/delay.h>
21 #include <linux/elf.h>
22 #include <linux/elfcore.h>
23 #include <linux/export.h>
24 #include <linux/slab.h>
25 #include <linux/vmalloc.h>
26 #include <linux/overflow.h>
28 #include <asm/processor.h>
29 #include <asm/hardirq.h>
31 #include <asm/hw_irq.h>
33 #include <asm/e820/types.h>
34 #include <asm/io_apic.h>
36 #include <linux/kdebug.h>
38 #include <asm/reboot.h>
39 #include <asm/intel_pt.h>
41 /* Alignment required for elf header segment */
42 #define ELF_CORE_HEADER_ALIGN 4096
44 /* This primarily represents number of split ranges due to exclusion */
45 #define CRASH_MAX_RANGES 16
47 struct crash_mem_range {
52 unsigned int nr_ranges;
53 struct crash_mem_range ranges[CRASH_MAX_RANGES];
56 /* Misc data about ram ranges needed to prepare elf headers */
57 struct crash_elf_data {
60 * Total number of ram ranges we have after various adjustments for
61 * crash reserved region, etc.
63 unsigned int max_nr_ranges;
65 /* Pointer to elf header */
67 /* Pointer to next phdr */
72 /* Used while preparing memory map entries for second kernel */
73 struct crash_memmap_data {
74 struct boot_params *params;
80 * This is used to VMCLEAR all VMCSs loaded on the
81 * processor. And when loading kvm_intel module, the
82 * callback function pointer will be assigned.
86 crash_vmclear_fn __rcu *crash_vmclear_loaded_vmcss = NULL;
87 EXPORT_SYMBOL_GPL(crash_vmclear_loaded_vmcss);
88 unsigned long crash_zero_bytes;
90 static inline void cpu_crash_vmclear_loaded_vmcss(void)
92 crash_vmclear_fn *do_vmclear_operation = NULL;
95 do_vmclear_operation = rcu_dereference(crash_vmclear_loaded_vmcss);
96 if (do_vmclear_operation)
97 do_vmclear_operation();
101 #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
103 static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
106 struct pt_regs fixed_regs;
108 if (!user_mode(regs)) {
109 crash_fixup_ss_esp(&fixed_regs, regs);
113 crash_save_cpu(regs, cpu);
116 * VMCLEAR VMCSs loaded on all cpus if needed.
118 cpu_crash_vmclear_loaded_vmcss();
121 * Disable Intel PT to stop its logging
123 cpu_emergency_stop_pt();
125 disable_local_APIC();
128 void kdump_nmi_shootdown_cpus(void)
130 nmi_shootdown_cpus(kdump_nmi_callback);
132 disable_local_APIC();
135 /* Override the weak function in kernel/panic.c */
136 void crash_smp_send_stop(void)
138 static int cpus_stopped;
143 if (smp_ops.crash_stop_other_cpus)
144 smp_ops.crash_stop_other_cpus();
152 void crash_smp_send_stop(void)
154 /* There are no cpus to shootdown */
158 void native_machine_crash_shutdown(struct pt_regs *regs)
160 /* This function is only called after the system
161 * has panicked or is otherwise in a critical state.
162 * The minimum amount of code to allow a kexec'd kernel
163 * to run successfully needs to happen here.
165 * In practice this means shooting down the other cpus in
168 /* The kernel is broken so disable interrupts */
171 crash_smp_send_stop();
174 * VMCLEAR VMCSs loaded on this cpu if needed.
176 cpu_crash_vmclear_loaded_vmcss();
178 cpu_emergency_disable_virtualization();
181 * Disable Intel PT to stop its logging
183 cpu_emergency_stop_pt();
185 #ifdef CONFIG_X86_IO_APIC
186 /* Prevent crash_kexec() from deadlocking on ioapic_lock. */
191 #ifdef CONFIG_HPET_TIMER
194 crash_save_cpu(regs, safe_smp_processor_id());
197 #ifdef CONFIG_KEXEC_FILE
198 static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg)
200 unsigned int *nr_ranges = arg;
207 /* Gather all the required information to prepare elf headers for ram regions */
208 static void fill_up_crash_elf_data(struct crash_elf_data *ced,
209 struct kimage *image)
211 unsigned int nr_ranges = 0;
215 walk_system_ram_res(0, -1, &nr_ranges,
216 get_nr_ram_ranges_callback);
218 ced->max_nr_ranges = nr_ranges;
220 /* Exclusion of crash region could split memory ranges */
221 ced->max_nr_ranges++;
223 /* If crashk_low_res is not 0, another range split possible */
224 if (crashk_low_res.end)
225 ced->max_nr_ranges++;
228 static int exclude_mem_range(struct crash_mem *mem,
229 unsigned long long mstart, unsigned long long mend)
232 unsigned long long start, end;
233 struct crash_mem_range temp_range = {0, 0};
235 for (i = 0; i < mem->nr_ranges; i++) {
236 start = mem->ranges[i].start;
237 end = mem->ranges[i].end;
239 if (mstart > end || mend < start)
242 /* Truncate any area outside of range */
248 /* Found completely overlapping range */
249 if (mstart == start && mend == end) {
250 mem->ranges[i].start = 0;
251 mem->ranges[i].end = 0;
252 if (i < mem->nr_ranges - 1) {
253 /* Shift rest of the ranges to left */
254 for (j = i; j < mem->nr_ranges - 1; j++) {
255 mem->ranges[j].start =
256 mem->ranges[j+1].start;
258 mem->ranges[j+1].end;
265 if (mstart > start && mend < end) {
266 /* Split original range */
267 mem->ranges[i].end = mstart - 1;
268 temp_range.start = mend + 1;
269 temp_range.end = end;
270 } else if (mstart != start)
271 mem->ranges[i].end = mstart - 1;
273 mem->ranges[i].start = mend + 1;
277 /* If a split happend, add the split to array */
282 if (i == CRASH_MAX_RANGES - 1) {
283 pr_err("Too many crash ranges after split\n");
287 /* Location where new range should go */
289 if (j < mem->nr_ranges) {
290 /* Move over all ranges one slot towards the end */
291 for (i = mem->nr_ranges - 1; i >= j; i--)
292 mem->ranges[i + 1] = mem->ranges[i];
295 mem->ranges[j].start = temp_range.start;
296 mem->ranges[j].end = temp_range.end;
302 * Look for any unwanted ranges between mstart, mend and remove them. This
303 * might lead to split and split ranges are put in ced->mem.ranges[] array
305 static int elf_header_exclude_ranges(struct crash_elf_data *ced,
306 unsigned long long mstart, unsigned long long mend)
308 struct crash_mem *cmem = &ced->mem;
311 memset(cmem->ranges, 0, sizeof(cmem->ranges));
313 cmem->ranges[0].start = mstart;
314 cmem->ranges[0].end = mend;
317 /* Exclude crashkernel region */
318 ret = exclude_mem_range(cmem, crashk_res.start, crashk_res.end);
322 if (crashk_low_res.end) {
323 ret = exclude_mem_range(cmem, crashk_low_res.start, crashk_low_res.end);
331 static int prepare_elf64_ram_headers_callback(u64 start, u64 end, void *arg)
333 struct crash_elf_data *ced = arg;
336 unsigned long mstart, mend;
337 struct kimage *image = ced->image;
338 struct crash_mem *cmem;
343 /* Exclude unwanted mem ranges */
344 ret = elf_header_exclude_ranges(ced, start, end);
348 /* Go through all the ranges in ced->mem.ranges[] and prepare phdr */
351 for (i = 0; i < cmem->nr_ranges; i++) {
352 mstart = cmem->ranges[i].start;
353 mend = cmem->ranges[i].end;
356 ced->bufp += sizeof(Elf64_Phdr);
358 phdr->p_type = PT_LOAD;
359 phdr->p_flags = PF_R|PF_W|PF_X;
360 phdr->p_offset = mstart;
363 * If a range matches backup region, adjust offset to backup
366 if (mstart == image->arch.backup_src_start &&
367 (mend - mstart + 1) == image->arch.backup_src_sz)
368 phdr->p_offset = image->arch.backup_load_addr;
370 phdr->p_paddr = mstart;
371 phdr->p_vaddr = (unsigned long long) __va(mstart);
372 phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
375 pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
376 phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
377 ehdr->e_phnum, phdr->p_offset);
383 static int prepare_elf64_headers(struct crash_elf_data *ced,
384 void **addr, unsigned long *sz)
388 unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
389 unsigned char *buf, *bufp;
391 unsigned long long notes_addr;
394 /* extra phdr for vmcoreinfo elf note */
395 nr_phdr = nr_cpus + 1;
396 nr_phdr += ced->max_nr_ranges;
399 * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
400 * area on x86_64 (ffffffff80000000 - ffffffffa0000000).
401 * I think this is required by tools like gdb. So same physical
402 * memory will be mapped in two elf headers. One will contain kernel
403 * text virtual addresses and other will have __va(physical) addresses.
407 elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
408 elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
410 buf = vzalloc(elf_sz);
415 ehdr = (Elf64_Ehdr *)bufp;
416 bufp += sizeof(Elf64_Ehdr);
417 memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
418 ehdr->e_ident[EI_CLASS] = ELFCLASS64;
419 ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
420 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
421 ehdr->e_ident[EI_OSABI] = ELF_OSABI;
422 memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
423 ehdr->e_type = ET_CORE;
424 ehdr->e_machine = ELF_ARCH;
425 ehdr->e_version = EV_CURRENT;
426 ehdr->e_phoff = sizeof(Elf64_Ehdr);
427 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
428 ehdr->e_phentsize = sizeof(Elf64_Phdr);
430 /* Prepare one phdr of type PT_NOTE for each present cpu */
431 for_each_present_cpu(cpu) {
432 phdr = (Elf64_Phdr *)bufp;
433 bufp += sizeof(Elf64_Phdr);
434 phdr->p_type = PT_NOTE;
435 notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
436 phdr->p_offset = phdr->p_paddr = notes_addr;
437 phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
441 /* Prepare one PT_NOTE header for vmcoreinfo */
442 phdr = (Elf64_Phdr *)bufp;
443 bufp += sizeof(Elf64_Phdr);
444 phdr->p_type = PT_NOTE;
445 phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
446 phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE;
450 /* Prepare PT_LOAD type program header for kernel text region */
451 phdr = (Elf64_Phdr *)bufp;
452 bufp += sizeof(Elf64_Phdr);
453 phdr->p_type = PT_LOAD;
454 phdr->p_flags = PF_R|PF_W|PF_X;
455 phdr->p_vaddr = (Elf64_Addr)_text;
456 phdr->p_filesz = phdr->p_memsz = _end - _text;
457 phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
461 /* Prepare PT_LOAD headers for system ram chunks. */
464 ret = walk_system_ram_res(0, -1, ced,
465 prepare_elf64_ram_headers_callback);
474 /* Prepare elf headers. Return addr and size */
475 static int prepare_elf_headers(struct kimage *image, void **addr,
478 struct crash_elf_data *ced;
481 ced = kzalloc(sizeof(*ced), GFP_KERNEL);
485 fill_up_crash_elf_data(ced, image);
487 /* By default prepare 64bit headers */
488 ret = prepare_elf64_headers(ced, addr, sz);
493 static int add_e820_entry(struct boot_params *params, struct e820_entry *entry)
495 unsigned int nr_e820_entries;
497 nr_e820_entries = params->e820_entries;
498 if (nr_e820_entries >= E820_MAX_ENTRIES_ZEROPAGE)
501 memcpy(¶ms->e820_table[nr_e820_entries], entry,
502 sizeof(struct e820_entry));
503 params->e820_entries++;
507 static int memmap_entry_callback(u64 start, u64 end, void *arg)
509 struct crash_memmap_data *cmd = arg;
510 struct boot_params *params = cmd->params;
511 struct e820_entry ei;
514 ei.size = end - start + 1;
516 add_e820_entry(params, &ei);
521 static int memmap_exclude_ranges(struct kimage *image, struct crash_mem *cmem,
522 unsigned long long mstart,
523 unsigned long long mend)
525 unsigned long start, end;
528 cmem->ranges[0].start = mstart;
529 cmem->ranges[0].end = mend;
532 /* Exclude Backup region */
533 start = image->arch.backup_load_addr;
534 end = start + image->arch.backup_src_sz - 1;
535 ret = exclude_mem_range(cmem, start, end);
539 /* Exclude elf header region */
540 start = image->arch.elf_load_addr;
541 end = start + image->arch.elf_headers_sz - 1;
542 return exclude_mem_range(cmem, start, end);
545 /* Prepare memory map for crash dump kernel */
546 int crash_setup_memmap_entries(struct kimage *image, struct boot_params *params)
550 struct e820_entry ei;
551 struct crash_memmap_data cmd;
552 struct crash_mem *cmem;
554 cmem = vzalloc(struct_size(cmem, ranges, 1));
558 memset(&cmd, 0, sizeof(struct crash_memmap_data));
561 /* Add first 640K segment */
562 ei.addr = image->arch.backup_src_start;
563 ei.size = image->arch.backup_src_sz;
564 ei.type = E820_TYPE_RAM;
565 add_e820_entry(params, &ei);
567 /* Add ACPI tables */
568 cmd.type = E820_TYPE_ACPI;
569 flags = IORESOURCE_MEM | IORESOURCE_BUSY;
570 walk_iomem_res_desc(IORES_DESC_ACPI_TABLES, flags, 0, -1, &cmd,
571 memmap_entry_callback);
573 /* Add ACPI Non-volatile Storage */
574 cmd.type = E820_TYPE_NVS;
575 walk_iomem_res_desc(IORES_DESC_ACPI_NV_STORAGE, flags, 0, -1, &cmd,
576 memmap_entry_callback);
578 /* Add crashk_low_res region */
579 if (crashk_low_res.end) {
580 ei.addr = crashk_low_res.start;
581 ei.size = crashk_low_res.end - crashk_low_res.start + 1;
582 ei.type = E820_TYPE_RAM;
583 add_e820_entry(params, &ei);
586 /* Exclude some ranges from crashk_res and add rest to memmap */
587 ret = memmap_exclude_ranges(image, cmem, crashk_res.start,
592 for (i = 0; i < cmem->nr_ranges; i++) {
593 ei.size = cmem->ranges[i].end - cmem->ranges[i].start + 1;
595 /* If entry is less than a page, skip it */
596 if (ei.size < PAGE_SIZE)
598 ei.addr = cmem->ranges[i].start;
599 ei.type = E820_TYPE_RAM;
600 add_e820_entry(params, &ei);
608 static int determine_backup_region(u64 start, u64 end, void *arg)
610 struct kimage *image = arg;
612 image->arch.backup_src_start = start;
613 image->arch.backup_src_sz = end - start + 1;
615 /* Expecting only one range for backup region */
619 int crash_load_segments(struct kimage *image)
622 struct kexec_buf kbuf = { .image = image, .buf_min = 0,
623 .buf_max = ULONG_MAX, .top_down = false };
626 * Determine and load a segment for backup area. First 640K RAM
627 * region is backup source
630 ret = walk_system_ram_res(KEXEC_BACKUP_SRC_START, KEXEC_BACKUP_SRC_END,
631 image, determine_backup_region);
633 /* Zero or postive return values are ok */
637 /* Add backup segment. */
638 if (image->arch.backup_src_sz) {
639 kbuf.buffer = &crash_zero_bytes;
640 kbuf.bufsz = sizeof(crash_zero_bytes);
641 kbuf.memsz = image->arch.backup_src_sz;
642 kbuf.buf_align = PAGE_SIZE;
644 * Ideally there is no source for backup segment. This is
645 * copied in purgatory after crash. Just add a zero filled
646 * segment for now to make sure checksum logic works fine.
648 ret = kexec_add_buffer(&kbuf);
651 image->arch.backup_load_addr = kbuf.mem;
652 pr_debug("Loaded backup region at 0x%lx backup_start=0x%lx memsz=0x%lx\n",
653 image->arch.backup_load_addr,
654 image->arch.backup_src_start, kbuf.memsz);
657 /* Prepare elf headers and add a segment */
658 ret = prepare_elf_headers(image, &kbuf.buffer, &kbuf.bufsz);
662 image->arch.elf_headers = kbuf.buffer;
663 image->arch.elf_headers_sz = kbuf.bufsz;
665 kbuf.memsz = kbuf.bufsz;
666 kbuf.buf_align = ELF_CORE_HEADER_ALIGN;
667 ret = kexec_add_buffer(&kbuf);
669 vfree((void *)image->arch.elf_headers);
672 image->arch.elf_load_addr = kbuf.mem;
673 pr_debug("Loaded ELF headers at 0x%lx bufsz=0x%lx memsz=0x%lx\n",
674 image->arch.elf_load_addr, kbuf.bufsz, kbuf.bufsz);
678 #endif /* CONFIG_KEXEC_FILE */