1 // SPDX-License-Identifier: GPL-2.0-only
3 * crash.c - kernel crash support code.
4 * Copyright (C) 2002-2004 Eric Biederman <ebiederm@xmission.com>
7 #include <linux/buildid.h>
8 #include <linux/init.h>
9 #include <linux/utsname.h>
10 #include <linux/vmalloc.h>
11 #include <linux/sizes.h>
12 #include <linux/kexec.h>
13 #include <linux/memory.h>
14 #include <linux/cpuhotplug.h>
15 #include <linux/memblock.h>
16 #include <linux/kmemleak.h>
19 #include <asm/sections.h>
21 #include <crypto/sha1.h>
23 #include "kallsyms_internal.h"
24 #include "kexec_internal.h"
26 /* Per cpu memory for storing cpu states in case of system crash. */
27 note_buf_t __percpu *crash_notes;
29 /* vmcoreinfo stuff */
30 unsigned char *vmcoreinfo_data;
31 size_t vmcoreinfo_size;
34 /* trusted vmcoreinfo, e.g. we can make a copy in the crash memory */
35 static unsigned char *vmcoreinfo_data_safecopy;
37 /* Location of the reserved area for the crash kernel */
38 struct resource crashk_res = {
39 .name = "Crash kernel",
42 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
43 .desc = IORES_DESC_CRASH_KERNEL
45 struct resource crashk_low_res = {
46 .name = "Crash kernel",
49 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
50 .desc = IORES_DESC_CRASH_KERNEL
54 * parsing the "crashkernel" commandline
56 * this code is intended to be called from architecture specific code
61 * This function parses command lines in the format
63 * crashkernel=ramsize-range:size[,...][@offset]
65 * The function returns 0 on success and -EINVAL on failure.
67 static int __init parse_crashkernel_mem(char *cmdline,
68 unsigned long long system_ram,
69 unsigned long long *crash_size,
70 unsigned long long *crash_base)
72 char *cur = cmdline, *tmp;
73 unsigned long long total_mem = system_ram;
76 * Firmware sometimes reserves some memory regions for its own use,
77 * so the system memory size is less than the actual physical memory
78 * size. Work around this by rounding up the total size to 128M,
79 * which is enough for most test cases.
81 total_mem = roundup(total_mem, SZ_128M);
83 /* for each entry of the comma-separated list */
85 unsigned long long start, end = ULLONG_MAX, size;
87 /* get the start of the range */
88 start = memparse(cur, &tmp);
90 pr_warn("crashkernel: Memory value expected\n");
95 pr_warn("crashkernel: '-' expected\n");
100 /* if no ':' is here, than we read the end */
102 end = memparse(cur, &tmp);
104 pr_warn("crashkernel: Memory value expected\n");
109 pr_warn("crashkernel: end <= start\n");
115 pr_warn("crashkernel: ':' expected\n");
120 size = memparse(cur, &tmp);
122 pr_warn("Memory value expected\n");
126 if (size >= total_mem) {
127 pr_warn("crashkernel: invalid size\n");
132 if (total_mem >= start && total_mem < end) {
136 } while (*cur++ == ',');
138 if (*crash_size > 0) {
139 while (*cur && *cur != ' ' && *cur != '@')
143 *crash_base = memparse(cur, &tmp);
145 pr_warn("Memory value expected after '@'\n");
150 pr_info("crashkernel size resulted in zero bytes\n");
156 * That function parses "simple" (old) crashkernel command lines like
158 * crashkernel=size[@offset]
160 * It returns 0 on success and -EINVAL on failure.
162 static int __init parse_crashkernel_simple(char *cmdline,
163 unsigned long long *crash_size,
164 unsigned long long *crash_base)
168 *crash_size = memparse(cmdline, &cur);
169 if (cmdline == cur) {
170 pr_warn("crashkernel: memory value expected\n");
175 *crash_base = memparse(cur+1, &cur);
176 else if (*cur != ' ' && *cur != '\0') {
177 pr_warn("crashkernel: unrecognized char: %c\n", *cur);
184 #define SUFFIX_HIGH 0
186 #define SUFFIX_NULL 2
187 static __initdata char *suffix_tbl[] = {
188 [SUFFIX_HIGH] = ",high",
189 [SUFFIX_LOW] = ",low",
190 [SUFFIX_NULL] = NULL,
194 * That function parses "suffix" crashkernel command lines like
196 * crashkernel=size,[high|low]
198 * It returns 0 on success and -EINVAL on failure.
200 static int __init parse_crashkernel_suffix(char *cmdline,
201 unsigned long long *crash_size,
206 *crash_size = memparse(cmdline, &cur);
207 if (cmdline == cur) {
208 pr_warn("crashkernel: memory value expected\n");
212 /* check with suffix */
213 if (strncmp(cur, suffix, strlen(suffix))) {
214 pr_warn("crashkernel: unrecognized char: %c\n", *cur);
217 cur += strlen(suffix);
218 if (*cur != ' ' && *cur != '\0') {
219 pr_warn("crashkernel: unrecognized char: %c\n", *cur);
226 static __init char *get_last_crashkernel(char *cmdline,
230 char *p = cmdline, *ck_cmdline = NULL;
232 /* find crashkernel and use the last one if there are more */
235 char *end_p = strchr(p, ' ');
239 end_p = p + strlen(p);
244 /* skip the one with any known suffix */
245 for (i = 0; suffix_tbl[i]; i++) {
246 q = end_p - strlen(suffix_tbl[i]);
247 if (!strncmp(q, suffix_tbl[i],
248 strlen(suffix_tbl[i])))
253 q = end_p - strlen(suffix);
254 if (!strncmp(q, suffix, strlen(suffix)))
258 p = strstr(p+1, name);
264 static int __init __parse_crashkernel(char *cmdline,
265 unsigned long long system_ram,
266 unsigned long long *crash_size,
267 unsigned long long *crash_base,
270 char *first_colon, *first_space;
272 char *name = "crashkernel=";
274 BUG_ON(!crash_size || !crash_base);
278 ck_cmdline = get_last_crashkernel(cmdline, name, suffix);
282 ck_cmdline += strlen(name);
285 return parse_crashkernel_suffix(ck_cmdline, crash_size,
288 * if the commandline contains a ':', then that's the extended
289 * syntax -- if not, it must be the classic syntax
291 first_colon = strchr(ck_cmdline, ':');
292 first_space = strchr(ck_cmdline, ' ');
293 if (first_colon && (!first_space || first_colon < first_space))
294 return parse_crashkernel_mem(ck_cmdline, system_ram,
295 crash_size, crash_base);
297 return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base);
301 * That function is the entry point for command line parsing and should be
302 * called from the arch-specific code.
304 * If crashkernel=,high|low is supported on architecture, non-NULL values
305 * should be passed to parameters 'low_size' and 'high'.
307 int __init parse_crashkernel(char *cmdline,
308 unsigned long long system_ram,
309 unsigned long long *crash_size,
310 unsigned long long *crash_base,
311 unsigned long long *low_size,
316 /* crashkernel=X[@offset] */
317 ret = __parse_crashkernel(cmdline, system_ram, crash_size,
319 #ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
321 * If non-NULL 'high' passed in and no normal crashkernel
322 * setting detected, try parsing crashkernel=,high|low.
324 if (high && ret == -ENOENT) {
325 ret = __parse_crashkernel(cmdline, 0, crash_size,
326 crash_base, suffix_tbl[SUFFIX_HIGH]);
327 if (ret || !*crash_size)
331 * crashkernel=Y,low can be specified or not, but invalid value
334 ret = __parse_crashkernel(cmdline, 0, low_size,
335 crash_base, suffix_tbl[SUFFIX_LOW]);
336 if (ret == -ENOENT) {
337 *low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE;
353 * Add a dummy early_param handler to mark crashkernel= as a known command line
354 * parameter and suppress incorrect warnings in init/main.c.
356 static int __init parse_crashkernel_dummy(char *arg)
360 early_param("crashkernel", parse_crashkernel_dummy);
362 #ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
363 static int __init reserve_crashkernel_low(unsigned long long low_size)
366 unsigned long long low_base;
368 low_base = memblock_phys_alloc_range(low_size, CRASH_ALIGN, 0, CRASH_ADDR_LOW_MAX);
370 pr_err("cannot allocate crashkernel low memory (size:0x%llx).\n", low_size);
374 pr_info("crashkernel low memory reserved: 0x%08llx - 0x%08llx (%lld MB)\n",
375 low_base, low_base + low_size, low_size >> 20);
377 crashk_low_res.start = low_base;
378 crashk_low_res.end = low_base + low_size - 1;
379 #ifdef HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY
380 insert_resource(&iomem_resource, &crashk_low_res);
386 void __init reserve_crashkernel_generic(char *cmdline,
387 unsigned long long crash_size,
388 unsigned long long crash_base,
389 unsigned long long crash_low_size,
392 unsigned long long search_end = CRASH_ADDR_LOW_MAX, search_base = 0;
393 bool fixed_base = false;
395 /* User specifies base address explicitly. */
398 search_base = crash_base;
399 search_end = crash_base + crash_size;
401 search_base = CRASH_ADDR_LOW_MAX;
402 search_end = CRASH_ADDR_HIGH_MAX;
406 crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
407 search_base, search_end);
410 * For crashkernel=size[KMG]@offset[KMG], print out failure
411 * message if can't reserve the specified region.
414 pr_warn("crashkernel reservation failed - memory is in use.\n");
419 * For crashkernel=size[KMG], if the first attempt was for
420 * low memory, fall back to high memory, the minimum required
421 * low memory will be reserved later.
423 if (!high && search_end == CRASH_ADDR_LOW_MAX) {
424 search_end = CRASH_ADDR_HIGH_MAX;
425 search_base = CRASH_ADDR_LOW_MAX;
426 crash_low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE;
431 * For crashkernel=size[KMG],high, if the first attempt was
432 * for high memory, fall back to low memory.
434 if (high && search_end == CRASH_ADDR_HIGH_MAX) {
435 search_end = CRASH_ADDR_LOW_MAX;
439 pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
444 if ((crash_base >= CRASH_ADDR_LOW_MAX) &&
445 crash_low_size && reserve_crashkernel_low(crash_low_size)) {
446 memblock_phys_free(crash_base, crash_size);
450 pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
451 crash_base, crash_base + crash_size, crash_size >> 20);
454 * The crashkernel memory will be removed from the kernel linear
455 * map. Inform kmemleak so that it won't try to access it.
457 kmemleak_ignore_phys(crash_base);
458 if (crashk_low_res.end)
459 kmemleak_ignore_phys(crashk_low_res.start);
461 crashk_res.start = crash_base;
462 crashk_res.end = crash_base + crash_size - 1;
463 #ifdef HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY
464 insert_resource(&iomem_resource, &crashk_res);
468 #ifndef HAVE_ARCH_ADD_CRASH_RES_TO_IOMEM_EARLY
469 static __init int insert_crashkernel_resources(void)
471 if (crashk_res.start < crashk_res.end)
472 insert_resource(&iomem_resource, &crashk_res);
474 if (crashk_low_res.start < crashk_low_res.end)
475 insert_resource(&iomem_resource, &crashk_low_res);
479 early_initcall(insert_crashkernel_resources);
483 int crash_prepare_elf64_headers(struct crash_mem *mem, int need_kernel_map,
484 void **addr, unsigned long *sz)
488 unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
491 unsigned long long notes_addr;
492 unsigned long mstart, mend;
494 /* extra phdr for vmcoreinfo ELF note */
495 nr_phdr = nr_cpus + 1;
496 nr_phdr += mem->nr_ranges;
499 * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
500 * area (for example, ffffffff80000000 - ffffffffa0000000 on x86_64).
501 * I think this is required by tools like gdb. So same physical
502 * memory will be mapped in two ELF headers. One will contain kernel
503 * text virtual addresses and other will have __va(physical) addresses.
507 elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
508 elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
510 buf = vzalloc(elf_sz);
514 ehdr = (Elf64_Ehdr *)buf;
515 phdr = (Elf64_Phdr *)(ehdr + 1);
516 memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
517 ehdr->e_ident[EI_CLASS] = ELFCLASS64;
518 ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
519 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
520 ehdr->e_ident[EI_OSABI] = ELF_OSABI;
521 memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
522 ehdr->e_type = ET_CORE;
523 ehdr->e_machine = ELF_ARCH;
524 ehdr->e_version = EV_CURRENT;
525 ehdr->e_phoff = sizeof(Elf64_Ehdr);
526 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
527 ehdr->e_phentsize = sizeof(Elf64_Phdr);
529 /* Prepare one phdr of type PT_NOTE for each possible CPU */
530 for_each_possible_cpu(cpu) {
531 phdr->p_type = PT_NOTE;
532 notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
533 phdr->p_offset = phdr->p_paddr = notes_addr;
534 phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
539 /* Prepare one PT_NOTE header for vmcoreinfo */
540 phdr->p_type = PT_NOTE;
541 phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
542 phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE;
546 /* Prepare PT_LOAD type program header for kernel text region */
547 if (need_kernel_map) {
548 phdr->p_type = PT_LOAD;
549 phdr->p_flags = PF_R|PF_W|PF_X;
550 phdr->p_vaddr = (unsigned long) _text;
551 phdr->p_filesz = phdr->p_memsz = _end - _text;
552 phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
557 /* Go through all the ranges in mem->ranges[] and prepare phdr */
558 for (i = 0; i < mem->nr_ranges; i++) {
559 mstart = mem->ranges[i].start;
560 mend = mem->ranges[i].end;
562 phdr->p_type = PT_LOAD;
563 phdr->p_flags = PF_R|PF_W|PF_X;
564 phdr->p_offset = mstart;
566 phdr->p_paddr = mstart;
567 phdr->p_vaddr = (unsigned long) __va(mstart);
568 phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
571 #ifdef CONFIG_KEXEC_FILE
572 kexec_dprintk("Crash PT_LOAD ELF header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
573 phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
574 ehdr->e_phnum, phdr->p_offset);
584 int crash_exclude_mem_range(struct crash_mem *mem,
585 unsigned long long mstart, unsigned long long mend)
588 unsigned long long start, end, p_start, p_end;
590 for (i = 0; i < mem->nr_ranges; i++) {
591 start = mem->ranges[i].start;
592 end = mem->ranges[i].end;
600 * Because the memory ranges in mem->ranges are stored in
601 * ascending order, when we detect `p_end < start`, we can
602 * immediately exit the for loop, as the subsequent memory
603 * ranges will definitely be outside the range we are looking
609 /* Truncate any area outside of range */
615 /* Found completely overlapping range */
616 if (p_start == start && p_end == end) {
617 memmove(&mem->ranges[i], &mem->ranges[i + 1],
618 (mem->nr_ranges - (i + 1)) * sizeof(mem->ranges[i]));
621 } else if (p_start > start && p_end < end) {
622 /* Split original range */
623 if (mem->nr_ranges >= mem->max_nr_ranges)
626 memmove(&mem->ranges[i + 2], &mem->ranges[i + 1],
627 (mem->nr_ranges - (i + 1)) * sizeof(mem->ranges[i]));
629 mem->ranges[i].end = p_start - 1;
630 mem->ranges[i + 1].start = p_end + 1;
631 mem->ranges[i + 1].end = end;
635 } else if (p_start != start)
636 mem->ranges[i].end = p_start - 1;
638 mem->ranges[i].start = p_end + 1;
644 Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
645 void *data, size_t data_len)
647 struct elf_note *note = (struct elf_note *)buf;
649 note->n_namesz = strlen(name) + 1;
650 note->n_descsz = data_len;
652 buf += DIV_ROUND_UP(sizeof(*note), sizeof(Elf_Word));
653 memcpy(buf, name, note->n_namesz);
654 buf += DIV_ROUND_UP(note->n_namesz, sizeof(Elf_Word));
655 memcpy(buf, data, data_len);
656 buf += DIV_ROUND_UP(data_len, sizeof(Elf_Word));
661 void final_note(Elf_Word *buf)
663 memset(buf, 0, sizeof(struct elf_note));
666 static void update_vmcoreinfo_note(void)
668 u32 *buf = vmcoreinfo_note;
670 if (!vmcoreinfo_size)
672 buf = append_elf_note(buf, VMCOREINFO_NOTE_NAME, 0, vmcoreinfo_data,
677 void crash_update_vmcoreinfo_safecopy(void *ptr)
680 memcpy(ptr, vmcoreinfo_data, vmcoreinfo_size);
682 vmcoreinfo_data_safecopy = ptr;
685 void crash_save_vmcoreinfo(void)
687 if (!vmcoreinfo_note)
690 /* Use the safe copy to generate vmcoreinfo note if have */
691 if (vmcoreinfo_data_safecopy)
692 vmcoreinfo_data = vmcoreinfo_data_safecopy;
694 vmcoreinfo_append_str("CRASHTIME=%lld\n", ktime_get_real_seconds());
695 update_vmcoreinfo_note();
698 void vmcoreinfo_append_str(const char *fmt, ...)
705 r = vscnprintf(buf, sizeof(buf), fmt, args);
708 r = min(r, (size_t)VMCOREINFO_BYTES - vmcoreinfo_size);
710 memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
712 vmcoreinfo_size += r;
714 WARN_ONCE(vmcoreinfo_size == VMCOREINFO_BYTES,
715 "vmcoreinfo data exceeds allocated size, truncating");
719 * provide an empty default implementation here -- architecture
720 * code may override this
722 void __weak arch_crash_save_vmcoreinfo(void)
725 phys_addr_t __weak paddr_vmcoreinfo_note(void)
727 return __pa(vmcoreinfo_note);
729 EXPORT_SYMBOL(paddr_vmcoreinfo_note);
731 static int __init crash_save_vmcoreinfo_init(void)
733 vmcoreinfo_data = (unsigned char *)get_zeroed_page(GFP_KERNEL);
734 if (!vmcoreinfo_data) {
735 pr_warn("Memory allocation for vmcoreinfo_data failed\n");
739 vmcoreinfo_note = alloc_pages_exact(VMCOREINFO_NOTE_SIZE,
740 GFP_KERNEL | __GFP_ZERO);
741 if (!vmcoreinfo_note) {
742 free_page((unsigned long)vmcoreinfo_data);
743 vmcoreinfo_data = NULL;
744 pr_warn("Memory allocation for vmcoreinfo_note failed\n");
748 VMCOREINFO_OSRELEASE(init_uts_ns.name.release);
749 VMCOREINFO_BUILD_ID();
750 VMCOREINFO_PAGESIZE(PAGE_SIZE);
752 VMCOREINFO_SYMBOL(init_uts_ns);
753 VMCOREINFO_OFFSET(uts_namespace, name);
754 VMCOREINFO_SYMBOL(node_online_map);
756 VMCOREINFO_SYMBOL_ARRAY(swapper_pg_dir);
758 VMCOREINFO_SYMBOL(_stext);
759 VMCOREINFO_SYMBOL(vmap_area_list);
762 VMCOREINFO_SYMBOL(mem_map);
763 VMCOREINFO_SYMBOL(contig_page_data);
765 #ifdef CONFIG_SPARSEMEM
766 VMCOREINFO_SYMBOL_ARRAY(mem_section);
767 VMCOREINFO_LENGTH(mem_section, NR_SECTION_ROOTS);
768 VMCOREINFO_STRUCT_SIZE(mem_section);
769 VMCOREINFO_OFFSET(mem_section, section_mem_map);
770 VMCOREINFO_NUMBER(SECTION_SIZE_BITS);
771 VMCOREINFO_NUMBER(MAX_PHYSMEM_BITS);
773 VMCOREINFO_STRUCT_SIZE(page);
774 VMCOREINFO_STRUCT_SIZE(pglist_data);
775 VMCOREINFO_STRUCT_SIZE(zone);
776 VMCOREINFO_STRUCT_SIZE(free_area);
777 VMCOREINFO_STRUCT_SIZE(list_head);
778 VMCOREINFO_SIZE(nodemask_t);
779 VMCOREINFO_OFFSET(page, flags);
780 VMCOREINFO_OFFSET(page, _refcount);
781 VMCOREINFO_OFFSET(page, mapping);
782 VMCOREINFO_OFFSET(page, lru);
783 VMCOREINFO_OFFSET(page, _mapcount);
784 VMCOREINFO_OFFSET(page, private);
785 VMCOREINFO_OFFSET(page, compound_head);
786 VMCOREINFO_OFFSET(pglist_data, node_zones);
787 VMCOREINFO_OFFSET(pglist_data, nr_zones);
788 #ifdef CONFIG_FLATMEM
789 VMCOREINFO_OFFSET(pglist_data, node_mem_map);
791 VMCOREINFO_OFFSET(pglist_data, node_start_pfn);
792 VMCOREINFO_OFFSET(pglist_data, node_spanned_pages);
793 VMCOREINFO_OFFSET(pglist_data, node_id);
794 VMCOREINFO_OFFSET(zone, free_area);
795 VMCOREINFO_OFFSET(zone, vm_stat);
796 VMCOREINFO_OFFSET(zone, spanned_pages);
797 VMCOREINFO_OFFSET(free_area, free_list);
798 VMCOREINFO_OFFSET(list_head, next);
799 VMCOREINFO_OFFSET(list_head, prev);
800 VMCOREINFO_OFFSET(vmap_area, va_start);
801 VMCOREINFO_OFFSET(vmap_area, list);
802 VMCOREINFO_LENGTH(zone.free_area, NR_PAGE_ORDERS);
803 log_buf_vmcoreinfo_setup();
804 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
805 VMCOREINFO_NUMBER(NR_FREE_PAGES);
806 VMCOREINFO_NUMBER(PG_lru);
807 VMCOREINFO_NUMBER(PG_private);
808 VMCOREINFO_NUMBER(PG_swapcache);
809 VMCOREINFO_NUMBER(PG_swapbacked);
810 VMCOREINFO_NUMBER(PG_slab);
811 #ifdef CONFIG_MEMORY_FAILURE
812 VMCOREINFO_NUMBER(PG_hwpoison);
814 VMCOREINFO_NUMBER(PG_head_mask);
815 #define PAGE_BUDDY_MAPCOUNT_VALUE (~PG_buddy)
816 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
817 #define PAGE_HUGETLB_MAPCOUNT_VALUE (~PG_hugetlb)
818 VMCOREINFO_NUMBER(PAGE_HUGETLB_MAPCOUNT_VALUE);
819 #define PAGE_OFFLINE_MAPCOUNT_VALUE (~PG_offline)
820 VMCOREINFO_NUMBER(PAGE_OFFLINE_MAPCOUNT_VALUE);
822 #ifdef CONFIG_KALLSYMS
823 VMCOREINFO_SYMBOL(kallsyms_names);
824 VMCOREINFO_SYMBOL(kallsyms_num_syms);
825 VMCOREINFO_SYMBOL(kallsyms_token_table);
826 VMCOREINFO_SYMBOL(kallsyms_token_index);
827 #ifdef CONFIG_KALLSYMS_BASE_RELATIVE
828 VMCOREINFO_SYMBOL(kallsyms_offsets);
829 VMCOREINFO_SYMBOL(kallsyms_relative_base);
831 VMCOREINFO_SYMBOL(kallsyms_addresses);
832 #endif /* CONFIG_KALLSYMS_BASE_RELATIVE */
833 #endif /* CONFIG_KALLSYMS */
835 arch_crash_save_vmcoreinfo();
836 update_vmcoreinfo_note();
841 subsys_initcall(crash_save_vmcoreinfo_init);
843 static int __init crash_notes_memory_init(void)
845 /* Allocate memory for saving cpu registers. */
849 * crash_notes could be allocated across 2 vmalloc pages when percpu
850 * is vmalloc based . vmalloc doesn't guarantee 2 continuous vmalloc
851 * pages are also on 2 continuous physical pages. In this case the
852 * 2nd part of crash_notes in 2nd page could be lost since only the
853 * starting address and size of crash_notes are exported through sysfs.
854 * Here round up the size of crash_notes to the nearest power of two
855 * and pass it to __alloc_percpu as align value. This can make sure
856 * crash_notes is allocated inside one physical page.
858 size = sizeof(note_buf_t);
859 align = min(roundup_pow_of_two(sizeof(note_buf_t)), PAGE_SIZE);
862 * Break compile if size is bigger than PAGE_SIZE since crash_notes
863 * definitely will be in 2 pages with that.
865 BUILD_BUG_ON(size > PAGE_SIZE);
867 crash_notes = __alloc_percpu(size, align);
869 pr_warn("Memory allocation for saving cpu register states failed\n");
874 subsys_initcall(crash_notes_memory_init);
876 #ifdef CONFIG_CRASH_HOTPLUG
878 #define pr_fmt(fmt) "crash hp: " fmt
881 * Different than kexec/kdump loading/unloading/jumping/shrinking which
882 * usually rarely happen, there will be many crash hotplug events notified
883 * during one short period, e.g one memory board is hot added and memory
884 * regions are online. So mutex lock __crash_hotplug_lock is used to
885 * serialize the crash hotplug handling specifically.
887 static DEFINE_MUTEX(__crash_hotplug_lock);
888 #define crash_hotplug_lock() mutex_lock(&__crash_hotplug_lock)
889 #define crash_hotplug_unlock() mutex_unlock(&__crash_hotplug_lock)
892 * This routine utilized when the crash_hotplug sysfs node is read.
893 * It reflects the kernel's ability/permission to update the crash
894 * elfcorehdr directly.
896 int crash_check_update_elfcorehdr(void)
900 crash_hotplug_lock();
901 /* Obtain lock while reading crash information */
902 if (!kexec_trylock()) {
903 pr_info("kexec_trylock() failed, elfcorehdr may be inaccurate\n");
904 crash_hotplug_unlock();
907 if (kexec_crash_image) {
908 if (kexec_crash_image->file_mode)
911 rc = kexec_crash_image->update_elfcorehdr;
913 /* Release lock now that update complete */
915 crash_hotplug_unlock();
921 * To accurately reflect hot un/plug changes of cpu and memory resources
922 * (including onling and offlining of those resources), the elfcorehdr
923 * (which is passed to the crash kernel via the elfcorehdr= parameter)
924 * must be updated with the new list of CPUs and memories.
926 * In order to make changes to elfcorehdr, two conditions are needed:
927 * First, the segment containing the elfcorehdr must be large enough
928 * to permit a growing number of resources; the elfcorehdr memory size
929 * is based on NR_CPUS_DEFAULT and CRASH_MAX_MEMORY_RANGES.
930 * Second, purgatory must explicitly exclude the elfcorehdr from the
931 * list of segments it checks (since the elfcorehdr changes and thus
932 * would require an update to purgatory itself to update the digest).
934 static void crash_handle_hotplug_event(unsigned int hp_action, unsigned int cpu)
936 struct kimage *image;
938 crash_hotplug_lock();
939 /* Obtain lock while changing crash information */
940 if (!kexec_trylock()) {
941 pr_info("kexec_trylock() failed, elfcorehdr may be inaccurate\n");
942 crash_hotplug_unlock();
946 /* Check kdump is not loaded */
947 if (!kexec_crash_image)
950 image = kexec_crash_image;
952 /* Check that updating elfcorehdr is permitted */
953 if (!(image->file_mode || image->update_elfcorehdr))
956 if (hp_action == KEXEC_CRASH_HP_ADD_CPU ||
957 hp_action == KEXEC_CRASH_HP_REMOVE_CPU)
958 pr_debug("hp_action %u, cpu %u\n", hp_action, cpu);
960 pr_debug("hp_action %u\n", hp_action);
963 * The elfcorehdr_index is set to -1 when the struct kimage
964 * is allocated. Find the segment containing the elfcorehdr,
965 * if not already found.
967 if (image->elfcorehdr_index < 0) {
972 for (n = 0; n < image->nr_segments; n++) {
973 mem = image->segment[n].mem;
974 ptr = kmap_local_page(pfn_to_page(mem >> PAGE_SHIFT));
976 /* The segment containing elfcorehdr */
977 if (memcmp(ptr, ELFMAG, SELFMAG) == 0)
978 image->elfcorehdr_index = (int)n;
984 if (image->elfcorehdr_index < 0) {
985 pr_err("unable to locate elfcorehdr segment");
989 /* Needed in order for the segments to be updated */
990 arch_kexec_unprotect_crashkres();
992 /* Differentiate between normal load and hotplug update */
993 image->hp_action = hp_action;
995 /* Now invoke arch-specific update handler */
996 arch_crash_handle_hotplug_event(image);
998 /* No longer handling a hotplug event */
999 image->hp_action = KEXEC_CRASH_HP_NONE;
1000 image->elfcorehdr_updated = true;
1002 /* Change back to read-only */
1003 arch_kexec_protect_crashkres();
1005 /* Errors in the callback is not a reason to rollback state */
1007 /* Release lock now that update complete */
1009 crash_hotplug_unlock();
1012 static int crash_memhp_notifier(struct notifier_block *nb, unsigned long val, void *v)
1016 crash_handle_hotplug_event(KEXEC_CRASH_HP_ADD_MEMORY,
1017 KEXEC_CRASH_HP_INVALID_CPU);
1021 crash_handle_hotplug_event(KEXEC_CRASH_HP_REMOVE_MEMORY,
1022 KEXEC_CRASH_HP_INVALID_CPU);
1028 static struct notifier_block crash_memhp_nb = {
1029 .notifier_call = crash_memhp_notifier,
1033 static int crash_cpuhp_online(unsigned int cpu)
1035 crash_handle_hotplug_event(KEXEC_CRASH_HP_ADD_CPU, cpu);
1039 static int crash_cpuhp_offline(unsigned int cpu)
1041 crash_handle_hotplug_event(KEXEC_CRASH_HP_REMOVE_CPU, cpu);
1045 static int __init crash_hotplug_init(void)
1049 if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG))
1050 register_memory_notifier(&crash_memhp_nb);
1052 if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
1053 result = cpuhp_setup_state_nocalls(CPUHP_BP_PREPARE_DYN,
1054 "crash/cpuhp", crash_cpuhp_online, crash_cpuhp_offline);
1060 subsys_initcall(crash_hotplug_init);