2 * fs/proc/vmcore.c Interface for accessing the crash
3 * dump from the system's previous life.
4 * Heavily borrowed from fs/proc/kcore.c
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved
11 #include <linux/kcore.h>
12 #include <linux/user.h>
13 #include <linux/elf.h>
14 #include <linux/elfcore.h>
15 #include <linux/export.h>
16 #include <linux/slab.h>
17 #include <linux/highmem.h>
18 #include <linux/printk.h>
19 #include <linux/bootmem.h>
20 #include <linux/init.h>
21 #include <linux/crash_dump.h>
22 #include <linux/list.h>
23 #include <linux/vmalloc.h>
24 #include <linux/pagemap.h>
25 #include <asm/uaccess.h>
29 /* List representing chunks of contiguous memory areas and their offsets in
32 static LIST_HEAD(vmcore_list);
34 /* Stores the pointer to the buffer containing kernel elf core headers. */
35 static char *elfcorebuf;
36 static size_t elfcorebuf_sz;
37 static size_t elfcorebuf_sz_orig;
39 static char *elfnotes_buf;
40 static size_t elfnotes_sz;
42 /* Total size of vmcore file. */
43 static u64 vmcore_size;
45 static struct proc_dir_entry *proc_vmcore;
48 * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error
49 * The called function has to take care of module refcounting.
51 static int (*oldmem_pfn_is_ram)(unsigned long pfn);
53 int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn))
55 if (oldmem_pfn_is_ram)
57 oldmem_pfn_is_ram = fn;
60 EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram);
62 void unregister_oldmem_pfn_is_ram(void)
64 oldmem_pfn_is_ram = NULL;
67 EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram);
69 static int pfn_is_ram(unsigned long pfn)
71 int (*fn)(unsigned long pfn);
72 /* pfn is ram unless fn() checks pagetype */
76 * Ask hypervisor if the pfn is really ram.
77 * A ballooned page contains no data and reading from such a page
78 * will cause high load in the hypervisor.
80 fn = oldmem_pfn_is_ram;
87 /* Reads a page from the oldmem device from given offset. */
88 static ssize_t read_from_oldmem(char *buf, size_t count,
89 u64 *ppos, int userbuf)
91 unsigned long pfn, offset;
93 ssize_t read = 0, tmp;
98 offset = (unsigned long)(*ppos % PAGE_SIZE);
99 pfn = (unsigned long)(*ppos / PAGE_SIZE);
102 if (count > (PAGE_SIZE - offset))
103 nr_bytes = PAGE_SIZE - offset;
107 /* If pfn is not ram, return zeros for sparse dump files */
108 if (pfn_is_ram(pfn) == 0)
109 memset(buf, 0, nr_bytes);
111 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
128 * Architectures may override this function to allocate ELF header in 2nd kernel
130 int __weak elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size)
136 * Architectures may override this function to free header
138 void __weak elfcorehdr_free(unsigned long long addr)
142 * Architectures may override this function to read from ELF header
144 ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos)
146 return read_from_oldmem(buf, count, ppos, 0);
150 * Architectures may override this function to read from notes sections
152 ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos)
154 return read_from_oldmem(buf, count, ppos, 0);
158 * Architectures may override this function to map oldmem
160 int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma,
161 unsigned long from, unsigned long pfn,
162 unsigned long size, pgprot_t prot)
164 return remap_pfn_range(vma, from, pfn, size, prot);
168 * Architectures which support memory encryption override this.
171 copy_oldmem_page_encrypted(unsigned long pfn, char *buf, size_t csize,
172 unsigned long offset, int userbuf)
174 return copy_oldmem_page(pfn, buf, csize, offset, userbuf);
178 * Copy to either kernel or user space
180 static int copy_to(void *target, void *src, size_t size, int userbuf)
183 if (copy_to_user((char __user *) target, src, size))
186 memcpy(target, src, size);
191 /* Read from the ELF header and then the crash dump. On error, negative value is
192 * returned otherwise number of bytes read are returned.
194 static ssize_t __read_vmcore(char *buffer, size_t buflen, loff_t *fpos,
197 ssize_t acc = 0, tmp;
200 struct vmcore *m = NULL;
202 if (buflen == 0 || *fpos >= vmcore_size)
205 /* trim buflen to not go beyond EOF */
206 if (buflen > vmcore_size - *fpos)
207 buflen = vmcore_size - *fpos;
209 /* Read ELF core header */
210 if (*fpos < elfcorebuf_sz) {
211 tsz = min(elfcorebuf_sz - (size_t)*fpos, buflen);
212 if (copy_to(buffer, elfcorebuf + *fpos, tsz, userbuf))
219 /* leave now if filled buffer already */
224 /* Read Elf note segment */
225 if (*fpos < elfcorebuf_sz + elfnotes_sz) {
228 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)*fpos, buflen);
229 kaddr = elfnotes_buf + *fpos - elfcorebuf_sz;
230 if (copy_to(buffer, kaddr, tsz, userbuf))
237 /* leave now if filled buffer already */
242 list_for_each_entry(m, &vmcore_list, list) {
243 if (*fpos < m->offset + m->size) {
244 tsz = min_t(size_t, m->offset + m->size - *fpos, buflen);
245 start = m->paddr + *fpos - m->offset;
246 tmp = read_from_oldmem(buffer, tsz, &start, userbuf);
254 /* leave now if filled buffer already */
263 static ssize_t read_vmcore(struct file *file, char __user *buffer,
264 size_t buflen, loff_t *fpos)
266 return __read_vmcore((__force char *) buffer, buflen, fpos, 1);
270 * The vmcore fault handler uses the page cache and fills data using the
271 * standard __vmcore_read() function.
273 * On s390 the fault handler is used for memory regions that can't be mapped
274 * directly with remap_pfn_range().
276 static int mmap_vmcore_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
279 struct address_space *mapping = vma->vm_file->f_mapping;
280 pgoff_t index = vmf->pgoff;
286 page = find_or_create_page(mapping, index, GFP_KERNEL);
289 if (!PageUptodate(page)) {
290 offset = (loff_t) index << PAGE_CACHE_SHIFT;
291 buf = __va((page_to_pfn(page) << PAGE_SHIFT));
292 rc = __read_vmcore(buf, PAGE_SIZE, &offset, 0);
295 page_cache_release(page);
296 return (rc == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
298 SetPageUptodate(page);
304 return VM_FAULT_SIGBUS;
308 static const struct vm_operations_struct vmcore_mmap_ops = {
309 .fault = mmap_vmcore_fault,
313 * alloc_elfnotes_buf - allocate buffer for ELF note segment in
316 * @notes_sz: size of buffer
318 * If CONFIG_MMU is defined, use vmalloc_user() to allow users to mmap
319 * the buffer to user-space by means of remap_vmalloc_range().
321 * If CONFIG_MMU is not defined, use vzalloc() since mmap_vmcore() is
322 * disabled and there's no need to allow users to mmap the buffer.
324 static inline char *alloc_elfnotes_buf(size_t notes_sz)
327 return vmalloc_user(notes_sz);
329 return vzalloc(notes_sz);
334 * Disable mmap_vmcore() if CONFIG_MMU is not defined. MMU is
335 * essential for mmap_vmcore() in order to map physically
336 * non-contiguous objects (ELF header, ELF note segment and memory
337 * regions in the 1st kernel pointed to by PT_LOAD entries) into
338 * virtually contiguous user-space in ELF layout.
342 * remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
343 * reported as not being ram with the zero page.
345 * @vma: vm_area_struct describing requested mapping
346 * @from: start remapping from
347 * @pfn: page frame number to start remapping to
348 * @size: remapping size
349 * @prot: protection bits
351 * Returns zero on success, -EAGAIN on failure.
353 static int remap_oldmem_pfn_checked(struct vm_area_struct *vma,
354 unsigned long from, unsigned long pfn,
355 unsigned long size, pgprot_t prot)
357 unsigned long map_size;
358 unsigned long pos_start, pos_end, pos;
359 unsigned long zeropage_pfn = my_zero_pfn(0);
363 pos_end = pfn + (size >> PAGE_SHIFT);
365 for (pos = pos_start; pos < pos_end; ++pos) {
366 if (!pfn_is_ram(pos)) {
368 * We hit a page which is not ram. Remap the continuous
369 * region between pos_start and pos-1 and replace
370 * the non-ram page at pos with the zero page.
372 if (pos > pos_start) {
373 /* Remap continuous region */
374 map_size = (pos - pos_start) << PAGE_SHIFT;
375 if (remap_oldmem_pfn_range(vma, from + len,
381 /* Remap the zero page */
382 if (remap_oldmem_pfn_range(vma, from + len,
390 if (pos > pos_start) {
392 map_size = (pos - pos_start) << PAGE_SHIFT;
393 if (remap_oldmem_pfn_range(vma, from + len, pos_start,
399 do_munmap(vma->vm_mm, from, len);
403 static int vmcore_remap_oldmem_pfn(struct vm_area_struct *vma,
404 unsigned long from, unsigned long pfn,
405 unsigned long size, pgprot_t prot)
408 * Check if oldmem_pfn_is_ram was registered to avoid
409 * looping over all pages without a reason.
411 if (oldmem_pfn_is_ram)
412 return remap_oldmem_pfn_checked(vma, from, pfn, size, prot);
414 return remap_oldmem_pfn_range(vma, from, pfn, size, prot);
417 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
419 size_t size = vma->vm_end - vma->vm_start;
420 u64 start, end, len, tsz;
423 start = (u64)vma->vm_pgoff << PAGE_SHIFT;
426 if (size > vmcore_size || end > vmcore_size)
429 if (vma->vm_flags & (VM_WRITE | VM_EXEC))
432 vma->vm_flags &= ~(VM_MAYWRITE | VM_MAYEXEC);
433 vma->vm_flags |= VM_MIXEDMAP;
434 vma->vm_ops = &vmcore_mmap_ops;
438 if (start < elfcorebuf_sz) {
441 tsz = min(elfcorebuf_sz - (size_t)start, size);
442 pfn = __pa(elfcorebuf + start) >> PAGE_SHIFT;
443 if (remap_pfn_range(vma, vma->vm_start, pfn, tsz,
454 if (start < elfcorebuf_sz + elfnotes_sz) {
457 tsz = min(elfcorebuf_sz + elfnotes_sz - (size_t)start, size);
458 kaddr = elfnotes_buf + start - elfcorebuf_sz;
459 if (remap_vmalloc_range_partial(vma, vma->vm_start + len,
470 list_for_each_entry(m, &vmcore_list, list) {
471 if (start < m->offset + m->size) {
474 tsz = min_t(size_t, m->offset + m->size - start, size);
475 paddr = m->paddr + start - m->offset;
476 if (vmcore_remap_oldmem_pfn(vma, vma->vm_start + len,
477 paddr >> PAGE_SHIFT, tsz,
491 do_munmap(vma->vm_mm, vma->vm_start, len);
495 static int mmap_vmcore(struct file *file, struct vm_area_struct *vma)
501 static const struct file_operations proc_vmcore_operations = {
503 .llseek = default_llseek,
507 static struct vmcore* __init get_new_element(void)
509 return kzalloc(sizeof(struct vmcore), GFP_KERNEL);
512 static u64 __init get_vmcore_size(size_t elfsz, size_t elfnotesegsz,
513 struct list_head *vc_list)
518 size = elfsz + elfnotesegsz;
519 list_for_each_entry(m, vc_list, list) {
526 * update_note_header_size_elf64 - update p_memsz member of each PT_NOTE entry
528 * @ehdr_ptr: ELF header
530 * This function updates p_memsz member of each PT_NOTE entry in the
531 * program header table pointed to by @ehdr_ptr to real size of ELF
534 static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
537 Elf64_Phdr *phdr_ptr;
538 Elf64_Nhdr *nhdr_ptr;
540 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
541 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
543 u64 offset, max_sz, sz, real_sz = 0;
544 if (phdr_ptr->p_type != PT_NOTE)
546 max_sz = phdr_ptr->p_memsz;
547 offset = phdr_ptr->p_offset;
548 notes_section = kmalloc(max_sz, GFP_KERNEL);
551 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
553 kfree(notes_section);
556 nhdr_ptr = notes_section;
557 while (nhdr_ptr->n_namesz != 0) {
558 sz = sizeof(Elf64_Nhdr) +
559 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
560 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
561 if ((real_sz + sz) > max_sz) {
562 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
563 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
567 nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
569 kfree(notes_section);
570 phdr_ptr->p_memsz = real_sz;
572 pr_warn("Warning: Zero PT_NOTE entries found\n");
580 * get_note_number_and_size_elf64 - get the number of PT_NOTE program
581 * headers and sum of real size of their ELF note segment headers and
584 * @ehdr_ptr: ELF header
585 * @nr_ptnote: buffer for the number of PT_NOTE program headers
586 * @sz_ptnote: buffer for size of unique PT_NOTE program header
588 * This function is used to merge multiple PT_NOTE program headers
589 * into a unique single one. The resulting unique entry will have
590 * @sz_ptnote in its phdr->p_mem.
592 * It is assumed that program headers with PT_NOTE type pointed to by
593 * @ehdr_ptr has already been updated by update_note_header_size_elf64
594 * and each of PT_NOTE program headers has actual ELF note segment
595 * size in its p_memsz member.
597 static int __init get_note_number_and_size_elf64(const Elf64_Ehdr *ehdr_ptr,
598 int *nr_ptnote, u64 *sz_ptnote)
601 Elf64_Phdr *phdr_ptr;
603 *nr_ptnote = *sz_ptnote = 0;
605 phdr_ptr = (Elf64_Phdr *)(ehdr_ptr + 1);
606 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
607 if (phdr_ptr->p_type != PT_NOTE)
610 *sz_ptnote += phdr_ptr->p_memsz;
617 * copy_notes_elf64 - copy ELF note segments in a given buffer
619 * @ehdr_ptr: ELF header
620 * @notes_buf: buffer into which ELF note segments are copied
622 * This function is used to copy ELF note segment in the 1st kernel
623 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
624 * size of the buffer @notes_buf is equal to or larger than sum of the
625 * real ELF note segment headers and data.
627 * It is assumed that program headers with PT_NOTE type pointed to by
628 * @ehdr_ptr has already been updated by update_note_header_size_elf64
629 * and each of PT_NOTE program headers has actual ELF note segment
630 * size in its p_memsz member.
632 static int __init copy_notes_elf64(const Elf64_Ehdr *ehdr_ptr, char *notes_buf)
635 Elf64_Phdr *phdr_ptr;
637 phdr_ptr = (Elf64_Phdr*)(ehdr_ptr + 1);
639 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
641 if (phdr_ptr->p_type != PT_NOTE)
643 offset = phdr_ptr->p_offset;
644 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
648 notes_buf += phdr_ptr->p_memsz;
654 /* Merges all the PT_NOTE headers into one. */
655 static int __init merge_note_headers_elf64(char *elfptr, size_t *elfsz,
656 char **notes_buf, size_t *notes_sz)
658 int i, nr_ptnote=0, rc=0;
660 Elf64_Ehdr *ehdr_ptr;
662 u64 phdr_sz = 0, note_off;
664 ehdr_ptr = (Elf64_Ehdr *)elfptr;
666 rc = update_note_header_size_elf64(ehdr_ptr);
670 rc = get_note_number_and_size_elf64(ehdr_ptr, &nr_ptnote, &phdr_sz);
674 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
675 *notes_buf = alloc_elfnotes_buf(*notes_sz);
679 rc = copy_notes_elf64(ehdr_ptr, *notes_buf);
683 /* Prepare merged PT_NOTE program header. */
684 phdr.p_type = PT_NOTE;
686 note_off = sizeof(Elf64_Ehdr) +
687 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf64_Phdr);
688 phdr.p_offset = roundup(note_off, PAGE_SIZE);
689 phdr.p_vaddr = phdr.p_paddr = 0;
690 phdr.p_filesz = phdr.p_memsz = phdr_sz;
693 /* Add merged PT_NOTE program header*/
694 tmp = elfptr + sizeof(Elf64_Ehdr);
695 memcpy(tmp, &phdr, sizeof(phdr));
698 /* Remove unwanted PT_NOTE program headers. */
699 i = (nr_ptnote - 1) * sizeof(Elf64_Phdr);
701 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf64_Ehdr)-sizeof(Elf64_Phdr)));
702 memset(elfptr + *elfsz, 0, i);
703 *elfsz = roundup(*elfsz, PAGE_SIZE);
705 /* Modify e_phnum to reflect merged headers. */
706 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
712 * update_note_header_size_elf32 - update p_memsz member of each PT_NOTE entry
714 * @ehdr_ptr: ELF header
716 * This function updates p_memsz member of each PT_NOTE entry in the
717 * program header table pointed to by @ehdr_ptr to real size of ELF
720 static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
723 Elf32_Phdr *phdr_ptr;
724 Elf32_Nhdr *nhdr_ptr;
726 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
727 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
729 u64 offset, max_sz, sz, real_sz = 0;
730 if (phdr_ptr->p_type != PT_NOTE)
732 max_sz = phdr_ptr->p_memsz;
733 offset = phdr_ptr->p_offset;
734 notes_section = kmalloc(max_sz, GFP_KERNEL);
737 rc = elfcorehdr_read_notes(notes_section, max_sz, &offset);
739 kfree(notes_section);
742 nhdr_ptr = notes_section;
743 while (nhdr_ptr->n_namesz != 0) {
744 sz = sizeof(Elf32_Nhdr) +
745 (((u64)nhdr_ptr->n_namesz + 3) & ~3) +
746 (((u64)nhdr_ptr->n_descsz + 3) & ~3);
747 if ((real_sz + sz) > max_sz) {
748 pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
749 nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
753 nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
755 kfree(notes_section);
756 phdr_ptr->p_memsz = real_sz;
758 pr_warn("Warning: Zero PT_NOTE entries found\n");
766 * get_note_number_and_size_elf32 - get the number of PT_NOTE program
767 * headers and sum of real size of their ELF note segment headers and
770 * @ehdr_ptr: ELF header
771 * @nr_ptnote: buffer for the number of PT_NOTE program headers
772 * @sz_ptnote: buffer for size of unique PT_NOTE program header
774 * This function is used to merge multiple PT_NOTE program headers
775 * into a unique single one. The resulting unique entry will have
776 * @sz_ptnote in its phdr->p_mem.
778 * It is assumed that program headers with PT_NOTE type pointed to by
779 * @ehdr_ptr has already been updated by update_note_header_size_elf32
780 * and each of PT_NOTE program headers has actual ELF note segment
781 * size in its p_memsz member.
783 static int __init get_note_number_and_size_elf32(const Elf32_Ehdr *ehdr_ptr,
784 int *nr_ptnote, u64 *sz_ptnote)
787 Elf32_Phdr *phdr_ptr;
789 *nr_ptnote = *sz_ptnote = 0;
791 phdr_ptr = (Elf32_Phdr *)(ehdr_ptr + 1);
792 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
793 if (phdr_ptr->p_type != PT_NOTE)
796 *sz_ptnote += phdr_ptr->p_memsz;
803 * copy_notes_elf32 - copy ELF note segments in a given buffer
805 * @ehdr_ptr: ELF header
806 * @notes_buf: buffer into which ELF note segments are copied
808 * This function is used to copy ELF note segment in the 1st kernel
809 * into the buffer @notes_buf in the 2nd kernel. It is assumed that
810 * size of the buffer @notes_buf is equal to or larger than sum of the
811 * real ELF note segment headers and data.
813 * It is assumed that program headers with PT_NOTE type pointed to by
814 * @ehdr_ptr has already been updated by update_note_header_size_elf32
815 * and each of PT_NOTE program headers has actual ELF note segment
816 * size in its p_memsz member.
818 static int __init copy_notes_elf32(const Elf32_Ehdr *ehdr_ptr, char *notes_buf)
821 Elf32_Phdr *phdr_ptr;
823 phdr_ptr = (Elf32_Phdr*)(ehdr_ptr + 1);
825 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
827 if (phdr_ptr->p_type != PT_NOTE)
829 offset = phdr_ptr->p_offset;
830 rc = elfcorehdr_read_notes(notes_buf, phdr_ptr->p_memsz,
834 notes_buf += phdr_ptr->p_memsz;
840 /* Merges all the PT_NOTE headers into one. */
841 static int __init merge_note_headers_elf32(char *elfptr, size_t *elfsz,
842 char **notes_buf, size_t *notes_sz)
844 int i, nr_ptnote=0, rc=0;
846 Elf32_Ehdr *ehdr_ptr;
848 u64 phdr_sz = 0, note_off;
850 ehdr_ptr = (Elf32_Ehdr *)elfptr;
852 rc = update_note_header_size_elf32(ehdr_ptr);
856 rc = get_note_number_and_size_elf32(ehdr_ptr, &nr_ptnote, &phdr_sz);
860 *notes_sz = roundup(phdr_sz, PAGE_SIZE);
861 *notes_buf = alloc_elfnotes_buf(*notes_sz);
865 rc = copy_notes_elf32(ehdr_ptr, *notes_buf);
869 /* Prepare merged PT_NOTE program header. */
870 phdr.p_type = PT_NOTE;
872 note_off = sizeof(Elf32_Ehdr) +
873 (ehdr_ptr->e_phnum - nr_ptnote +1) * sizeof(Elf32_Phdr);
874 phdr.p_offset = roundup(note_off, PAGE_SIZE);
875 phdr.p_vaddr = phdr.p_paddr = 0;
876 phdr.p_filesz = phdr.p_memsz = phdr_sz;
879 /* Add merged PT_NOTE program header*/
880 tmp = elfptr + sizeof(Elf32_Ehdr);
881 memcpy(tmp, &phdr, sizeof(phdr));
884 /* Remove unwanted PT_NOTE program headers. */
885 i = (nr_ptnote - 1) * sizeof(Elf32_Phdr);
887 memmove(tmp, tmp+i, ((*elfsz)-sizeof(Elf32_Ehdr)-sizeof(Elf32_Phdr)));
888 memset(elfptr + *elfsz, 0, i);
889 *elfsz = roundup(*elfsz, PAGE_SIZE);
891 /* Modify e_phnum to reflect merged headers. */
892 ehdr_ptr->e_phnum = ehdr_ptr->e_phnum - nr_ptnote + 1;
897 /* Add memory chunks represented by program headers to vmcore list. Also update
898 * the new offset fields of exported program headers. */
899 static int __init process_ptload_program_headers_elf64(char *elfptr,
902 struct list_head *vc_list)
905 Elf64_Ehdr *ehdr_ptr;
906 Elf64_Phdr *phdr_ptr;
910 ehdr_ptr = (Elf64_Ehdr *)elfptr;
911 phdr_ptr = (Elf64_Phdr*)(elfptr + sizeof(Elf64_Ehdr)); /* PT_NOTE hdr */
913 /* Skip Elf header, program headers and Elf note segment. */
914 vmcore_off = elfsz + elfnotes_sz;
916 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
917 u64 paddr, start, end, size;
919 if (phdr_ptr->p_type != PT_LOAD)
922 paddr = phdr_ptr->p_offset;
923 start = rounddown(paddr, PAGE_SIZE);
924 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
927 /* Add this contiguous chunk of memory to vmcore list.*/
928 new = get_new_element();
933 list_add_tail(&new->list, vc_list);
935 /* Update the program header offset. */
936 phdr_ptr->p_offset = vmcore_off + (paddr - start);
937 vmcore_off = vmcore_off + size;
942 static int __init process_ptload_program_headers_elf32(char *elfptr,
945 struct list_head *vc_list)
948 Elf32_Ehdr *ehdr_ptr;
949 Elf32_Phdr *phdr_ptr;
953 ehdr_ptr = (Elf32_Ehdr *)elfptr;
954 phdr_ptr = (Elf32_Phdr*)(elfptr + sizeof(Elf32_Ehdr)); /* PT_NOTE hdr */
956 /* Skip Elf header, program headers and Elf note segment. */
957 vmcore_off = elfsz + elfnotes_sz;
959 for (i = 0; i < ehdr_ptr->e_phnum; i++, phdr_ptr++) {
960 u64 paddr, start, end, size;
962 if (phdr_ptr->p_type != PT_LOAD)
965 paddr = phdr_ptr->p_offset;
966 start = rounddown(paddr, PAGE_SIZE);
967 end = roundup(paddr + phdr_ptr->p_memsz, PAGE_SIZE);
970 /* Add this contiguous chunk of memory to vmcore list.*/
971 new = get_new_element();
976 list_add_tail(&new->list, vc_list);
978 /* Update the program header offset */
979 phdr_ptr->p_offset = vmcore_off + (paddr - start);
980 vmcore_off = vmcore_off + size;
985 /* Sets offset fields of vmcore elements. */
986 static void __init set_vmcore_list_offsets(size_t elfsz, size_t elfnotes_sz,
987 struct list_head *vc_list)
992 /* Skip Elf header, program headers and Elf note segment. */
993 vmcore_off = elfsz + elfnotes_sz;
995 list_for_each_entry(m, vc_list, list) {
996 m->offset = vmcore_off;
997 vmcore_off += m->size;
1001 static void free_elfcorebuf(void)
1003 free_pages((unsigned long)elfcorebuf, get_order(elfcorebuf_sz_orig));
1005 vfree(elfnotes_buf);
1006 elfnotes_buf = NULL;
1009 static int __init parse_crash_elf64_headers(void)
1015 addr = elfcorehdr_addr;
1017 /* Read Elf header */
1018 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf64_Ehdr), &addr);
1022 /* Do some basic Verification. */
1023 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1024 (ehdr.e_type != ET_CORE) ||
1025 !vmcore_elf64_check_arch(&ehdr) ||
1026 ehdr.e_ident[EI_CLASS] != ELFCLASS64 ||
1027 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1028 ehdr.e_version != EV_CURRENT ||
1029 ehdr.e_ehsize != sizeof(Elf64_Ehdr) ||
1030 ehdr.e_phentsize != sizeof(Elf64_Phdr) ||
1031 ehdr.e_phnum == 0) {
1032 pr_warn("Warning: Core image elf header is not sane\n");
1036 /* Read in all elf headers. */
1037 elfcorebuf_sz_orig = sizeof(Elf64_Ehdr) +
1038 ehdr.e_phnum * sizeof(Elf64_Phdr);
1039 elfcorebuf_sz = elfcorebuf_sz_orig;
1040 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1041 get_order(elfcorebuf_sz_orig));
1044 addr = elfcorehdr_addr;
1045 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1049 /* Merge all PT_NOTE headers into one. */
1050 rc = merge_note_headers_elf64(elfcorebuf, &elfcorebuf_sz,
1051 &elfnotes_buf, &elfnotes_sz);
1054 rc = process_ptload_program_headers_elf64(elfcorebuf, elfcorebuf_sz,
1055 elfnotes_sz, &vmcore_list);
1058 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1065 static int __init parse_crash_elf32_headers(void)
1071 addr = elfcorehdr_addr;
1073 /* Read Elf header */
1074 rc = elfcorehdr_read((char *)&ehdr, sizeof(Elf32_Ehdr), &addr);
1078 /* Do some basic Verification. */
1079 if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0 ||
1080 (ehdr.e_type != ET_CORE) ||
1081 !elf_check_arch(&ehdr) ||
1082 ehdr.e_ident[EI_CLASS] != ELFCLASS32||
1083 ehdr.e_ident[EI_VERSION] != EV_CURRENT ||
1084 ehdr.e_version != EV_CURRENT ||
1085 ehdr.e_ehsize != sizeof(Elf32_Ehdr) ||
1086 ehdr.e_phentsize != sizeof(Elf32_Phdr) ||
1087 ehdr.e_phnum == 0) {
1088 pr_warn("Warning: Core image elf header is not sane\n");
1092 /* Read in all elf headers. */
1093 elfcorebuf_sz_orig = sizeof(Elf32_Ehdr) + ehdr.e_phnum * sizeof(Elf32_Phdr);
1094 elfcorebuf_sz = elfcorebuf_sz_orig;
1095 elfcorebuf = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO,
1096 get_order(elfcorebuf_sz_orig));
1099 addr = elfcorehdr_addr;
1100 rc = elfcorehdr_read(elfcorebuf, elfcorebuf_sz_orig, &addr);
1104 /* Merge all PT_NOTE headers into one. */
1105 rc = merge_note_headers_elf32(elfcorebuf, &elfcorebuf_sz,
1106 &elfnotes_buf, &elfnotes_sz);
1109 rc = process_ptload_program_headers_elf32(elfcorebuf, elfcorebuf_sz,
1110 elfnotes_sz, &vmcore_list);
1113 set_vmcore_list_offsets(elfcorebuf_sz, elfnotes_sz, &vmcore_list);
1120 static int __init parse_crash_elf_headers(void)
1122 unsigned char e_ident[EI_NIDENT];
1126 addr = elfcorehdr_addr;
1127 rc = elfcorehdr_read(e_ident, EI_NIDENT, &addr);
1130 if (memcmp(e_ident, ELFMAG, SELFMAG) != 0) {
1131 pr_warn("Warning: Core image elf header not found\n");
1135 if (e_ident[EI_CLASS] == ELFCLASS64) {
1136 rc = parse_crash_elf64_headers();
1139 } else if (e_ident[EI_CLASS] == ELFCLASS32) {
1140 rc = parse_crash_elf32_headers();
1144 pr_warn("Warning: Core image elf header is not sane\n");
1148 /* Determine vmcore size. */
1149 vmcore_size = get_vmcore_size(elfcorebuf_sz, elfnotes_sz,
1155 /* Init function for vmcore module. */
1156 static int __init vmcore_init(void)
1160 /* Allow architectures to allocate ELF header in 2nd kernel */
1161 rc = elfcorehdr_alloc(&elfcorehdr_addr, &elfcorehdr_size);
1165 * If elfcorehdr= has been passed in cmdline or created in 2nd kernel,
1166 * then capture the dump.
1168 if (!(is_vmcore_usable()))
1170 rc = parse_crash_elf_headers();
1172 pr_warn("Kdump: vmcore not initialized\n");
1175 elfcorehdr_free(elfcorehdr_addr);
1176 elfcorehdr_addr = ELFCORE_ADDR_ERR;
1178 proc_vmcore = proc_create("vmcore", S_IRUSR, NULL, &proc_vmcore_operations);
1180 proc_vmcore->size = vmcore_size;
1183 fs_initcall(vmcore_init);
1185 /* Cleanup function for vmcore module. */
1186 void vmcore_cleanup(void)
1188 struct list_head *pos, *next;
1191 proc_remove(proc_vmcore);
1195 /* clear the vmcore list. */
1196 list_for_each_safe(pos, next, &vmcore_list) {
1199 m = list_entry(pos, struct vmcore, list);