2 * kexec: kexec_file_load system call
4 * Copyright (C) 2014 Red Hat Inc.
6 * Vivek Goyal <vgoyal@redhat.com>
8 * This source code is licensed under the GNU General Public License,
9 * Version 2. See the file COPYING for more details.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/capability.h>
16 #include <linux/file.h>
17 #include <linux/slab.h>
18 #include <linux/kexec.h>
19 #include <linux/mutex.h>
20 #include <linux/list.h>
22 #include <linux/ima.h>
23 #include <crypto/hash.h>
24 #include <crypto/sha.h>
25 #include <linux/elf.h>
26 #include <linux/elfcore.h>
27 #include <linux/kernel.h>
28 #include <linux/kexec.h>
29 #include <linux/slab.h>
30 #include <linux/syscalls.h>
31 #include <linux/vmalloc.h>
32 #include "kexec_internal.h"
34 static int kexec_calculate_store_digests(struct kimage *image);
37 * Currently this is the only default function that is exported as some
38 * architectures need it to do additional handlings.
39 * In the future, other default functions may be exported too if required.
41 int kexec_image_probe_default(struct kimage *image, void *buf,
42 unsigned long buf_len)
44 const struct kexec_file_ops * const *fops;
47 for (fops = &kexec_file_loaders[0]; *fops && (*fops)->probe; ++fops) {
48 ret = (*fops)->probe(buf, buf_len);
58 /* Architectures can provide this probe function */
59 int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
60 unsigned long buf_len)
62 return kexec_image_probe_default(image, buf, buf_len);
65 static void *kexec_image_load_default(struct kimage *image)
67 if (!image->fops || !image->fops->load)
68 return ERR_PTR(-ENOEXEC);
70 return image->fops->load(image, image->kernel_buf,
71 image->kernel_buf_len, image->initrd_buf,
72 image->initrd_buf_len, image->cmdline_buf,
73 image->cmdline_buf_len);
76 void * __weak arch_kexec_kernel_image_load(struct kimage *image)
78 return kexec_image_load_default(image);
81 static int kexec_image_post_load_cleanup_default(struct kimage *image)
83 if (!image->fops || !image->fops->cleanup)
86 return image->fops->cleanup(image->image_loader_data);
89 int __weak arch_kimage_file_post_load_cleanup(struct kimage *image)
91 return kexec_image_post_load_cleanup_default(image);
94 #ifdef CONFIG_KEXEC_VERIFY_SIG
95 static int kexec_image_verify_sig_default(struct kimage *image, void *buf,
96 unsigned long buf_len)
98 if (!image->fops || !image->fops->verify_sig) {
99 pr_debug("kernel loader does not support signature verification.\n");
100 return -EKEYREJECTED;
103 return image->fops->verify_sig(buf, buf_len);
106 int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
107 unsigned long buf_len)
109 return kexec_image_verify_sig_default(image, buf, buf_len);
114 * arch_kexec_apply_relocations_add - apply relocations of type RELA
115 * @pi: Purgatory to be relocated.
116 * @section: Section relocations applying to.
117 * @relsec: Section containing RELAs.
118 * @symtab: Corresponding symtab.
120 * Return: 0 on success, negative errno on error.
123 arch_kexec_apply_relocations_add(struct purgatory_info *pi, Elf_Shdr *section,
124 const Elf_Shdr *relsec, const Elf_Shdr *symtab)
126 pr_err("RELA relocation unsupported.\n");
131 * arch_kexec_apply_relocations - apply relocations of type REL
132 * @pi: Purgatory to be relocated.
133 * @section: Section relocations applying to.
134 * @relsec: Section containing RELs.
135 * @symtab: Corresponding symtab.
137 * Return: 0 on success, negative errno on error.
140 arch_kexec_apply_relocations(struct purgatory_info *pi, Elf_Shdr *section,
141 const Elf_Shdr *relsec, const Elf_Shdr *symtab)
143 pr_err("REL relocation unsupported.\n");
148 * Free up memory used by kernel, initrd, and command line. This is temporary
149 * memory allocation which is not needed any more after these buffers have
150 * been loaded into separate segments and have been copied elsewhere.
152 void kimage_file_post_load_cleanup(struct kimage *image)
154 struct purgatory_info *pi = &image->purgatory_info;
156 vfree(image->kernel_buf);
157 image->kernel_buf = NULL;
159 vfree(image->initrd_buf);
160 image->initrd_buf = NULL;
162 kfree(image->cmdline_buf);
163 image->cmdline_buf = NULL;
165 vfree(pi->purgatory_buf);
166 pi->purgatory_buf = NULL;
171 #ifdef CONFIG_IMA_KEXEC
172 vfree(image->ima_buffer);
173 image->ima_buffer = NULL;
174 #endif /* CONFIG_IMA_KEXEC */
176 /* See if architecture has anything to cleanup post load */
177 arch_kimage_file_post_load_cleanup(image);
180 * Above call should have called into bootloader to free up
181 * any data stored in kimage->image_loader_data. It should
182 * be ok now to free it up.
184 kfree(image->image_loader_data);
185 image->image_loader_data = NULL;
189 * In file mode list of segments is prepared by kernel. Copy relevant
190 * data from user space, do error checking, prepare segment list
193 kimage_file_prepare_segments(struct kimage *image, int kernel_fd, int initrd_fd,
194 const char __user *cmdline_ptr,
195 unsigned long cmdline_len, unsigned flags)
201 ret = kernel_read_file_from_fd(kernel_fd, &image->kernel_buf,
202 &size, INT_MAX, READING_KEXEC_IMAGE);
205 image->kernel_buf_len = size;
207 /* IMA needs to pass the measurement list to the next kernel. */
208 ima_add_kexec_buffer(image);
210 /* Call arch image probe handlers */
211 ret = arch_kexec_kernel_image_probe(image, image->kernel_buf,
212 image->kernel_buf_len);
216 #ifdef CONFIG_KEXEC_VERIFY_SIG
217 ret = arch_kexec_kernel_verify_sig(image, image->kernel_buf,
218 image->kernel_buf_len);
220 pr_debug("kernel signature verification failed.\n");
223 pr_debug("kernel signature verification successful.\n");
225 /* It is possible that there no initramfs is being loaded */
226 if (!(flags & KEXEC_FILE_NO_INITRAMFS)) {
227 ret = kernel_read_file_from_fd(initrd_fd, &image->initrd_buf,
229 READING_KEXEC_INITRAMFS);
232 image->initrd_buf_len = size;
236 image->cmdline_buf = memdup_user(cmdline_ptr, cmdline_len);
237 if (IS_ERR(image->cmdline_buf)) {
238 ret = PTR_ERR(image->cmdline_buf);
239 image->cmdline_buf = NULL;
243 image->cmdline_buf_len = cmdline_len;
245 /* command line should be a string with last byte null */
246 if (image->cmdline_buf[cmdline_len - 1] != '\0') {
252 /* Call arch image load handlers */
253 ldata = arch_kexec_kernel_image_load(image);
256 ret = PTR_ERR(ldata);
260 image->image_loader_data = ldata;
262 /* In case of error, free up all allocated memory in this function */
264 kimage_file_post_load_cleanup(image);
269 kimage_file_alloc_init(struct kimage **rimage, int kernel_fd,
270 int initrd_fd, const char __user *cmdline_ptr,
271 unsigned long cmdline_len, unsigned long flags)
274 struct kimage *image;
275 bool kexec_on_panic = flags & KEXEC_FILE_ON_CRASH;
277 image = do_kimage_alloc_init();
281 image->file_mode = 1;
283 if (kexec_on_panic) {
284 /* Enable special crash kernel control page alloc policy. */
285 image->control_page = crashk_res.start;
286 image->type = KEXEC_TYPE_CRASH;
289 ret = kimage_file_prepare_segments(image, kernel_fd, initrd_fd,
290 cmdline_ptr, cmdline_len, flags);
294 ret = sanity_check_segment_list(image);
296 goto out_free_post_load_bufs;
299 image->control_code_page = kimage_alloc_control_pages(image,
300 get_order(KEXEC_CONTROL_PAGE_SIZE));
301 if (!image->control_code_page) {
302 pr_err("Could not allocate control_code_buffer\n");
303 goto out_free_post_load_bufs;
306 if (!kexec_on_panic) {
307 image->swap_page = kimage_alloc_control_pages(image, 0);
308 if (!image->swap_page) {
309 pr_err("Could not allocate swap buffer\n");
310 goto out_free_control_pages;
316 out_free_control_pages:
317 kimage_free_page_list(&image->control_pages);
318 out_free_post_load_bufs:
319 kimage_file_post_load_cleanup(image);
325 SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd,
326 unsigned long, cmdline_len, const char __user *, cmdline_ptr,
327 unsigned long, flags)
330 struct kimage **dest_image, *image;
332 /* We only trust the superuser with rebooting the system. */
333 if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
336 /* Make sure we have a legal set of flags */
337 if (flags != (flags & KEXEC_FILE_FLAGS))
342 if (!mutex_trylock(&kexec_mutex))
345 dest_image = &kexec_image;
346 if (flags & KEXEC_FILE_ON_CRASH) {
347 dest_image = &kexec_crash_image;
348 if (kexec_crash_image)
349 arch_kexec_unprotect_crashkres();
352 if (flags & KEXEC_FILE_UNLOAD)
356 * In case of crash, new kernel gets loaded in reserved region. It is
357 * same memory where old crash kernel might be loaded. Free any
358 * current crash dump kernel before we corrupt it.
360 if (flags & KEXEC_FILE_ON_CRASH)
361 kimage_free(xchg(&kexec_crash_image, NULL));
363 ret = kimage_file_alloc_init(&image, kernel_fd, initrd_fd, cmdline_ptr,
368 ret = machine_kexec_prepare(image);
373 * Some architecture(like S390) may touch the crash memory before
374 * machine_kexec_prepare(), we must copy vmcoreinfo data after it.
376 ret = kimage_crash_copy_vmcoreinfo(image);
380 ret = kexec_calculate_store_digests(image);
384 for (i = 0; i < image->nr_segments; i++) {
385 struct kexec_segment *ksegment;
387 ksegment = &image->segment[i];
388 pr_debug("Loading segment %d: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n",
389 i, ksegment->buf, ksegment->bufsz, ksegment->mem,
392 ret = kimage_load_segment(image, &image->segment[i]);
397 kimage_terminate(image);
400 * Free up any temporary buffers allocated which are not needed
401 * after image has been loaded
403 kimage_file_post_load_cleanup(image);
405 image = xchg(dest_image, image);
407 if ((flags & KEXEC_FILE_ON_CRASH) && kexec_crash_image)
408 arch_kexec_protect_crashkres();
410 mutex_unlock(&kexec_mutex);
415 static int locate_mem_hole_top_down(unsigned long start, unsigned long end,
416 struct kexec_buf *kbuf)
418 struct kimage *image = kbuf->image;
419 unsigned long temp_start, temp_end;
421 temp_end = min(end, kbuf->buf_max);
422 temp_start = temp_end - kbuf->memsz;
425 /* align down start */
426 temp_start = temp_start & (~(kbuf->buf_align - 1));
428 if (temp_start < start || temp_start < kbuf->buf_min)
431 temp_end = temp_start + kbuf->memsz - 1;
434 * Make sure this does not conflict with any of existing
437 if (kimage_is_destination_range(image, temp_start, temp_end)) {
438 temp_start = temp_start - PAGE_SIZE;
442 /* We found a suitable memory range */
446 /* If we are here, we found a suitable memory range */
447 kbuf->mem = temp_start;
449 /* Success, stop navigating through remaining System RAM ranges */
453 static int locate_mem_hole_bottom_up(unsigned long start, unsigned long end,
454 struct kexec_buf *kbuf)
456 struct kimage *image = kbuf->image;
457 unsigned long temp_start, temp_end;
459 temp_start = max(start, kbuf->buf_min);
462 temp_start = ALIGN(temp_start, kbuf->buf_align);
463 temp_end = temp_start + kbuf->memsz - 1;
465 if (temp_end > end || temp_end > kbuf->buf_max)
468 * Make sure this does not conflict with any of existing
471 if (kimage_is_destination_range(image, temp_start, temp_end)) {
472 temp_start = temp_start + PAGE_SIZE;
476 /* We found a suitable memory range */
480 /* If we are here, we found a suitable memory range */
481 kbuf->mem = temp_start;
483 /* Success, stop navigating through remaining System RAM ranges */
487 static int locate_mem_hole_callback(struct resource *res, void *arg)
489 struct kexec_buf *kbuf = (struct kexec_buf *)arg;
490 u64 start = res->start, end = res->end;
491 unsigned long sz = end - start + 1;
493 /* Returning 0 will take to next memory range */
494 if (sz < kbuf->memsz)
497 if (end < kbuf->buf_min || start > kbuf->buf_max)
501 * Allocate memory top down with-in ram range. Otherwise bottom up
505 return locate_mem_hole_top_down(start, end, kbuf);
506 return locate_mem_hole_bottom_up(start, end, kbuf);
510 * arch_kexec_walk_mem - call func(data) on free memory regions
511 * @kbuf: Context info for the search. Also passed to @func.
512 * @func: Function to call for each memory region.
514 * Return: The memory walk will stop when func returns a non-zero value
515 * and that value will be returned. If all free regions are visited without
516 * func returning non-zero, then zero will be returned.
518 int __weak arch_kexec_walk_mem(struct kexec_buf *kbuf,
519 int (*func)(struct resource *, void *))
521 if (kbuf->image->type == KEXEC_TYPE_CRASH)
522 return walk_iomem_res_desc(crashk_res.desc,
523 IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
524 crashk_res.start, crashk_res.end,
527 return walk_system_ram_res(0, ULONG_MAX, kbuf, func);
531 * kexec_locate_mem_hole - find free memory for the purgatory or the next kernel
532 * @kbuf: Parameters for the memory search.
534 * On success, kbuf->mem will have the start address of the memory region found.
536 * Return: 0 on success, negative errno on error.
538 int kexec_locate_mem_hole(struct kexec_buf *kbuf)
542 ret = arch_kexec_walk_mem(kbuf, locate_mem_hole_callback);
544 return ret == 1 ? 0 : -EADDRNOTAVAIL;
548 * kexec_add_buffer - place a buffer in a kexec segment
549 * @kbuf: Buffer contents and memory parameters.
551 * This function assumes that kexec_mutex is held.
552 * On successful return, @kbuf->mem will have the physical address of
553 * the buffer in memory.
555 * Return: 0 on success, negative errno on error.
557 int kexec_add_buffer(struct kexec_buf *kbuf)
560 struct kexec_segment *ksegment;
563 /* Currently adding segment this way is allowed only in file mode */
564 if (!kbuf->image->file_mode)
567 if (kbuf->image->nr_segments >= KEXEC_SEGMENT_MAX)
571 * Make sure we are not trying to add buffer after allocating
572 * control pages. All segments need to be placed first before
573 * any control pages are allocated. As control page allocation
574 * logic goes through list of segments to make sure there are
575 * no destination overlaps.
577 if (!list_empty(&kbuf->image->control_pages)) {
582 /* Ensure minimum alignment needed for segments. */
583 kbuf->memsz = ALIGN(kbuf->memsz, PAGE_SIZE);
584 kbuf->buf_align = max(kbuf->buf_align, PAGE_SIZE);
586 /* Walk the RAM ranges and allocate a suitable range for the buffer */
587 ret = kexec_locate_mem_hole(kbuf);
591 /* Found a suitable memory range */
592 ksegment = &kbuf->image->segment[kbuf->image->nr_segments];
593 ksegment->kbuf = kbuf->buffer;
594 ksegment->bufsz = kbuf->bufsz;
595 ksegment->mem = kbuf->mem;
596 ksegment->memsz = kbuf->memsz;
597 kbuf->image->nr_segments++;
601 /* Calculate and store the digest of segments */
602 static int kexec_calculate_store_digests(struct kimage *image)
604 struct crypto_shash *tfm;
605 struct shash_desc *desc;
606 int ret = 0, i, j, zero_buf_sz, sha_region_sz;
607 size_t desc_size, nullsz;
610 struct kexec_sha_region *sha_regions;
611 struct purgatory_info *pi = &image->purgatory_info;
613 if (!IS_ENABLED(CONFIG_ARCH_HAS_KEXEC_PURGATORY))
616 zero_buf = __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT);
617 zero_buf_sz = PAGE_SIZE;
619 tfm = crypto_alloc_shash("sha256", 0, 0);
625 desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
626 desc = kzalloc(desc_size, GFP_KERNEL);
632 sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region);
633 sha_regions = vzalloc(sha_region_sz);
642 ret = crypto_shash_init(desc);
644 goto out_free_sha_regions;
646 digest = kzalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
649 goto out_free_sha_regions;
652 for (j = i = 0; i < image->nr_segments; i++) {
653 struct kexec_segment *ksegment;
655 ksegment = &image->segment[i];
657 * Skip purgatory as it will be modified once we put digest
660 if (ksegment->kbuf == pi->purgatory_buf)
663 ret = crypto_shash_update(desc, ksegment->kbuf,
669 * Assume rest of the buffer is filled with zero and
670 * update digest accordingly.
672 nullsz = ksegment->memsz - ksegment->bufsz;
674 unsigned long bytes = nullsz;
676 if (bytes > zero_buf_sz)
678 ret = crypto_shash_update(desc, zero_buf, bytes);
687 sha_regions[j].start = ksegment->mem;
688 sha_regions[j].len = ksegment->memsz;
693 ret = crypto_shash_final(desc, digest);
695 goto out_free_digest;
696 ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha_regions",
697 sha_regions, sha_region_sz, 0);
699 goto out_free_digest;
701 ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha256_digest",
702 digest, SHA256_DIGEST_SIZE, 0);
704 goto out_free_digest;
709 out_free_sha_regions:
719 #ifdef CONFIG_ARCH_HAS_KEXEC_PURGATORY
721 * kexec_purgatory_setup_kbuf - prepare buffer to load purgatory.
722 * @pi: Purgatory to be loaded.
723 * @kbuf: Buffer to setup.
725 * Allocates the memory needed for the buffer. Caller is responsible to free
726 * the memory after use.
728 * Return: 0 on success, negative errno on error.
730 static int kexec_purgatory_setup_kbuf(struct purgatory_info *pi,
731 struct kexec_buf *kbuf)
733 const Elf_Shdr *sechdrs;
734 unsigned long bss_align;
735 unsigned long bss_sz;
739 sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
740 kbuf->buf_align = bss_align = 1;
741 kbuf->bufsz = bss_sz = 0;
743 for (i = 0; i < pi->ehdr->e_shnum; i++) {
744 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
747 align = sechdrs[i].sh_addralign;
748 if (sechdrs[i].sh_type != SHT_NOBITS) {
749 if (kbuf->buf_align < align)
750 kbuf->buf_align = align;
751 kbuf->bufsz = ALIGN(kbuf->bufsz, align);
752 kbuf->bufsz += sechdrs[i].sh_size;
754 if (bss_align < align)
756 bss_sz = ALIGN(bss_sz, align);
757 bss_sz += sechdrs[i].sh_size;
760 kbuf->bufsz = ALIGN(kbuf->bufsz, bss_align);
761 kbuf->memsz = kbuf->bufsz + bss_sz;
762 if (kbuf->buf_align < bss_align)
763 kbuf->buf_align = bss_align;
765 kbuf->buffer = vzalloc(kbuf->bufsz);
768 pi->purgatory_buf = kbuf->buffer;
770 ret = kexec_add_buffer(kbuf);
776 vfree(pi->purgatory_buf);
777 pi->purgatory_buf = NULL;
782 * kexec_purgatory_setup_sechdrs - prepares the pi->sechdrs buffer.
783 * @pi: Purgatory to be loaded.
784 * @kbuf: Buffer prepared to store purgatory.
786 * Allocates the memory needed for the buffer. Caller is responsible to free
787 * the memory after use.
789 * Return: 0 on success, negative errno on error.
791 static int kexec_purgatory_setup_sechdrs(struct purgatory_info *pi,
792 struct kexec_buf *kbuf)
794 unsigned long bss_addr;
795 unsigned long offset;
800 * The section headers in kexec_purgatory are read-only. In order to
801 * have them modifiable make a temporary copy.
803 sechdrs = vzalloc(array_size(sizeof(Elf_Shdr), pi->ehdr->e_shnum));
806 memcpy(sechdrs, (void *)pi->ehdr + pi->ehdr->e_shoff,
807 pi->ehdr->e_shnum * sizeof(Elf_Shdr));
808 pi->sechdrs = sechdrs;
811 bss_addr = kbuf->mem + kbuf->bufsz;
812 kbuf->image->start = pi->ehdr->e_entry;
814 for (i = 0; i < pi->ehdr->e_shnum; i++) {
818 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
821 align = sechdrs[i].sh_addralign;
822 if (sechdrs[i].sh_type == SHT_NOBITS) {
823 bss_addr = ALIGN(bss_addr, align);
824 sechdrs[i].sh_addr = bss_addr;
825 bss_addr += sechdrs[i].sh_size;
829 offset = ALIGN(offset, align);
830 if (sechdrs[i].sh_flags & SHF_EXECINSTR &&
831 pi->ehdr->e_entry >= sechdrs[i].sh_addr &&
832 pi->ehdr->e_entry < (sechdrs[i].sh_addr
833 + sechdrs[i].sh_size)) {
834 kbuf->image->start -= sechdrs[i].sh_addr;
835 kbuf->image->start += kbuf->mem + offset;
838 src = (void *)pi->ehdr + sechdrs[i].sh_offset;
839 dst = pi->purgatory_buf + offset;
840 memcpy(dst, src, sechdrs[i].sh_size);
842 sechdrs[i].sh_addr = kbuf->mem + offset;
843 sechdrs[i].sh_offset = offset;
844 offset += sechdrs[i].sh_size;
850 static int kexec_apply_relocations(struct kimage *image)
853 struct purgatory_info *pi = &image->purgatory_info;
854 const Elf_Shdr *sechdrs;
856 sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
858 for (i = 0; i < pi->ehdr->e_shnum; i++) {
859 const Elf_Shdr *relsec;
860 const Elf_Shdr *symtab;
863 relsec = sechdrs + i;
865 if (relsec->sh_type != SHT_RELA &&
866 relsec->sh_type != SHT_REL)
870 * For section of type SHT_RELA/SHT_REL,
871 * ->sh_link contains section header index of associated
872 * symbol table. And ->sh_info contains section header
873 * index of section to which relocations apply.
875 if (relsec->sh_info >= pi->ehdr->e_shnum ||
876 relsec->sh_link >= pi->ehdr->e_shnum)
879 section = pi->sechdrs + relsec->sh_info;
880 symtab = sechdrs + relsec->sh_link;
882 if (!(section->sh_flags & SHF_ALLOC))
886 * symtab->sh_link contain section header index of associated
889 if (symtab->sh_link >= pi->ehdr->e_shnum)
890 /* Invalid section number? */
894 * Respective architecture needs to provide support for applying
895 * relocations of type SHT_RELA/SHT_REL.
897 if (relsec->sh_type == SHT_RELA)
898 ret = arch_kexec_apply_relocations_add(pi, section,
900 else if (relsec->sh_type == SHT_REL)
901 ret = arch_kexec_apply_relocations(pi, section,
911 * kexec_load_purgatory - Load and relocate the purgatory object.
912 * @image: Image to add the purgatory to.
913 * @kbuf: Memory parameters to use.
915 * Allocates the memory needed for image->purgatory_info.sechdrs and
916 * image->purgatory_info.purgatory_buf/kbuf->buffer. Caller is responsible
917 * to free the memory after use.
919 * Return: 0 on success, negative errno on error.
921 int kexec_load_purgatory(struct kimage *image, struct kexec_buf *kbuf)
923 struct purgatory_info *pi = &image->purgatory_info;
926 if (kexec_purgatory_size <= 0)
929 pi->ehdr = (const Elf_Ehdr *)kexec_purgatory;
931 ret = kexec_purgatory_setup_kbuf(pi, kbuf);
935 ret = kexec_purgatory_setup_sechdrs(pi, kbuf);
939 ret = kexec_apply_relocations(image);
948 vfree(pi->purgatory_buf);
949 pi->purgatory_buf = NULL;
954 * kexec_purgatory_find_symbol - find a symbol in the purgatory
955 * @pi: Purgatory to search in.
956 * @name: Name of the symbol.
958 * Return: pointer to symbol in read-only symtab on success, NULL on error.
960 static const Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi,
963 const Elf_Shdr *sechdrs;
964 const Elf_Ehdr *ehdr;
973 sechdrs = (void *)ehdr + ehdr->e_shoff;
975 for (i = 0; i < ehdr->e_shnum; i++) {
976 if (sechdrs[i].sh_type != SHT_SYMTAB)
979 if (sechdrs[i].sh_link >= ehdr->e_shnum)
980 /* Invalid strtab section number */
982 strtab = (void *)ehdr + sechdrs[sechdrs[i].sh_link].sh_offset;
983 syms = (void *)ehdr + sechdrs[i].sh_offset;
985 /* Go through symbols for a match */
986 for (k = 0; k < sechdrs[i].sh_size/sizeof(Elf_Sym); k++) {
987 if (ELF_ST_BIND(syms[k].st_info) != STB_GLOBAL)
990 if (strcmp(strtab + syms[k].st_name, name) != 0)
993 if (syms[k].st_shndx == SHN_UNDEF ||
994 syms[k].st_shndx >= ehdr->e_shnum) {
995 pr_debug("Symbol: %s has bad section index %d.\n",
996 name, syms[k].st_shndx);
1000 /* Found the symbol we are looking for */
1008 void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name)
1010 struct purgatory_info *pi = &image->purgatory_info;
1014 sym = kexec_purgatory_find_symbol(pi, name);
1016 return ERR_PTR(-EINVAL);
1018 sechdr = &pi->sechdrs[sym->st_shndx];
1021 * Returns the address where symbol will finally be loaded after
1022 * kexec_load_segment()
1024 return (void *)(sechdr->sh_addr + sym->st_value);
1028 * Get or set value of a symbol. If "get_value" is true, symbol value is
1029 * returned in buf otherwise symbol value is set based on value in buf.
1031 int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
1032 void *buf, unsigned int size, bool get_value)
1034 struct purgatory_info *pi = &image->purgatory_info;
1039 sym = kexec_purgatory_find_symbol(pi, name);
1043 if (sym->st_size != size) {
1044 pr_err("symbol %s size mismatch: expected %lu actual %u\n",
1045 name, (unsigned long)sym->st_size, size);
1049 sec = pi->sechdrs + sym->st_shndx;
1051 if (sec->sh_type == SHT_NOBITS) {
1052 pr_err("symbol %s is in a bss section. Cannot %s\n", name,
1053 get_value ? "get" : "set");
1057 sym_buf = (char *)pi->purgatory_buf + sec->sh_offset + sym->st_value;
1060 memcpy((void *)buf, sym_buf, size);
1062 memcpy((void *)sym_buf, buf, size);
1066 #endif /* CONFIG_ARCH_HAS_KEXEC_PURGATORY */
1068 int crash_exclude_mem_range(struct crash_mem *mem,
1069 unsigned long long mstart, unsigned long long mend)
1072 unsigned long long start, end;
1073 struct crash_mem_range temp_range = {0, 0};
1075 for (i = 0; i < mem->nr_ranges; i++) {
1076 start = mem->ranges[i].start;
1077 end = mem->ranges[i].end;
1079 if (mstart > end || mend < start)
1082 /* Truncate any area outside of range */
1088 /* Found completely overlapping range */
1089 if (mstart == start && mend == end) {
1090 mem->ranges[i].start = 0;
1091 mem->ranges[i].end = 0;
1092 if (i < mem->nr_ranges - 1) {
1093 /* Shift rest of the ranges to left */
1094 for (j = i; j < mem->nr_ranges - 1; j++) {
1095 mem->ranges[j].start =
1096 mem->ranges[j+1].start;
1097 mem->ranges[j].end =
1098 mem->ranges[j+1].end;
1105 if (mstart > start && mend < end) {
1106 /* Split original range */
1107 mem->ranges[i].end = mstart - 1;
1108 temp_range.start = mend + 1;
1109 temp_range.end = end;
1110 } else if (mstart != start)
1111 mem->ranges[i].end = mstart - 1;
1113 mem->ranges[i].start = mend + 1;
1117 /* If a split happened, add the split to array */
1118 if (!temp_range.end)
1121 /* Split happened */
1122 if (i == mem->max_nr_ranges - 1)
1125 /* Location where new range should go */
1127 if (j < mem->nr_ranges) {
1128 /* Move over all ranges one slot towards the end */
1129 for (i = mem->nr_ranges - 1; i >= j; i--)
1130 mem->ranges[i + 1] = mem->ranges[i];
1133 mem->ranges[j].start = temp_range.start;
1134 mem->ranges[j].end = temp_range.end;
1139 int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
1140 void **addr, unsigned long *sz)
1144 unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
1146 unsigned int cpu, i;
1147 unsigned long long notes_addr;
1148 unsigned long mstart, mend;
1150 /* extra phdr for vmcoreinfo elf note */
1151 nr_phdr = nr_cpus + 1;
1152 nr_phdr += mem->nr_ranges;
1155 * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
1156 * area (for example, ffffffff80000000 - ffffffffa0000000 on x86_64).
1157 * I think this is required by tools like gdb. So same physical
1158 * memory will be mapped in two elf headers. One will contain kernel
1159 * text virtual addresses and other will have __va(physical) addresses.
1163 elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
1164 elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
1166 buf = vzalloc(elf_sz);
1170 ehdr = (Elf64_Ehdr *)buf;
1171 phdr = (Elf64_Phdr *)(ehdr + 1);
1172 memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
1173 ehdr->e_ident[EI_CLASS] = ELFCLASS64;
1174 ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
1175 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1176 ehdr->e_ident[EI_OSABI] = ELF_OSABI;
1177 memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
1178 ehdr->e_type = ET_CORE;
1179 ehdr->e_machine = ELF_ARCH;
1180 ehdr->e_version = EV_CURRENT;
1181 ehdr->e_phoff = sizeof(Elf64_Ehdr);
1182 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
1183 ehdr->e_phentsize = sizeof(Elf64_Phdr);
1185 /* Prepare one phdr of type PT_NOTE for each present cpu */
1186 for_each_present_cpu(cpu) {
1187 phdr->p_type = PT_NOTE;
1188 notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
1189 phdr->p_offset = phdr->p_paddr = notes_addr;
1190 phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
1195 /* Prepare one PT_NOTE header for vmcoreinfo */
1196 phdr->p_type = PT_NOTE;
1197 phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
1198 phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE;
1202 /* Prepare PT_LOAD type program header for kernel text region */
1204 phdr->p_type = PT_LOAD;
1205 phdr->p_flags = PF_R|PF_W|PF_X;
1206 phdr->p_vaddr = (Elf64_Addr)_text;
1207 phdr->p_filesz = phdr->p_memsz = _end - _text;
1208 phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
1213 /* Go through all the ranges in mem->ranges[] and prepare phdr */
1214 for (i = 0; i < mem->nr_ranges; i++) {
1215 mstart = mem->ranges[i].start;
1216 mend = mem->ranges[i].end;
1218 phdr->p_type = PT_LOAD;
1219 phdr->p_flags = PF_R|PF_W|PF_X;
1220 phdr->p_offset = mstart;
1222 phdr->p_paddr = mstart;
1223 phdr->p_vaddr = (unsigned long long) __va(mstart);
1224 phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
1228 pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
1229 phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
1230 ehdr->e_phnum, phdr->p_offset);