1 // SPDX-License-Identifier: GPL-2.0-only
3 * kexec: kexec_file_load system call
5 * Copyright (C) 2014 Red Hat Inc.
7 * Vivek Goyal <vgoyal@redhat.com>
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/capability.h>
14 #include <linux/file.h>
15 #include <linux/slab.h>
16 #include <linux/kexec.h>
17 #include <linux/memblock.h>
18 #include <linux/mutex.h>
19 #include <linux/list.h>
21 #include <linux/ima.h>
22 #include <crypto/hash.h>
23 #include <crypto/sha.h>
24 #include <linux/elf.h>
25 #include <linux/elfcore.h>
26 #include <linux/kernel.h>
27 #include <linux/syscalls.h>
28 #include <linux/vmalloc.h>
29 #include "kexec_internal.h"
31 static int kexec_calculate_store_digests(struct kimage *image);
34 * Currently this is the only default function that is exported as some
35 * architectures need it to do additional handlings.
36 * In the future, other default functions may be exported too if required.
38 int kexec_image_probe_default(struct kimage *image, void *buf,
39 unsigned long buf_len)
41 const struct kexec_file_ops * const *fops;
44 for (fops = &kexec_file_loaders[0]; *fops && (*fops)->probe; ++fops) {
45 ret = (*fops)->probe(buf, buf_len);
55 /* Architectures can provide this probe function */
56 int __weak arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
57 unsigned long buf_len)
59 return kexec_image_probe_default(image, buf, buf_len);
62 static void *kexec_image_load_default(struct kimage *image)
64 if (!image->fops || !image->fops->load)
65 return ERR_PTR(-ENOEXEC);
67 return image->fops->load(image, image->kernel_buf,
68 image->kernel_buf_len, image->initrd_buf,
69 image->initrd_buf_len, image->cmdline_buf,
70 image->cmdline_buf_len);
73 void * __weak arch_kexec_kernel_image_load(struct kimage *image)
75 return kexec_image_load_default(image);
78 int kexec_image_post_load_cleanup_default(struct kimage *image)
80 if (!image->fops || !image->fops->cleanup)
83 return image->fops->cleanup(image->image_loader_data);
86 int __weak arch_kimage_file_post_load_cleanup(struct kimage *image)
88 return kexec_image_post_load_cleanup_default(image);
91 #ifdef CONFIG_KEXEC_SIG
92 static int kexec_image_verify_sig_default(struct kimage *image, void *buf,
93 unsigned long buf_len)
95 if (!image->fops || !image->fops->verify_sig) {
96 pr_debug("kernel loader does not support signature verification.\n");
100 return image->fops->verify_sig(buf, buf_len);
103 int __weak arch_kexec_kernel_verify_sig(struct kimage *image, void *buf,
104 unsigned long buf_len)
106 return kexec_image_verify_sig_default(image, buf, buf_len);
111 * Free up memory used by kernel, initrd, and command line. This is temporary
112 * memory allocation which is not needed any more after these buffers have
113 * been loaded into separate segments and have been copied elsewhere.
115 void kimage_file_post_load_cleanup(struct kimage *image)
117 struct purgatory_info *pi = &image->purgatory_info;
119 vfree(image->kernel_buf);
120 image->kernel_buf = NULL;
122 vfree(image->initrd_buf);
123 image->initrd_buf = NULL;
125 kfree(image->cmdline_buf);
126 image->cmdline_buf = NULL;
128 vfree(pi->purgatory_buf);
129 pi->purgatory_buf = NULL;
134 #ifdef CONFIG_IMA_KEXEC
135 vfree(image->ima_buffer);
136 image->ima_buffer = NULL;
137 #endif /* CONFIG_IMA_KEXEC */
139 /* See if architecture has anything to cleanup post load */
140 arch_kimage_file_post_load_cleanup(image);
143 * Above call should have called into bootloader to free up
144 * any data stored in kimage->image_loader_data. It should
145 * be ok now to free it up.
147 kfree(image->image_loader_data);
148 image->image_loader_data = NULL;
151 #ifdef CONFIG_KEXEC_SIG
153 kimage_validate_signature(struct kimage *image)
158 ret = arch_kexec_kernel_verify_sig(image, image->kernel_buf,
159 image->kernel_buf_len);
164 /* Certain verification errors are non-fatal if we're not
165 * checking errors, provided we aren't mandating that there
166 * must be a valid signature.
169 reason = "kexec of unsigned image";
172 reason = "kexec of image with unsupported crypto";
175 reason = "kexec of image with unavailable key";
177 if (IS_ENABLED(CONFIG_KEXEC_SIG_FORCE)) {
178 pr_notice("%s rejected\n", reason);
182 /* If IMA is guaranteed to appraise a signature on the kexec
183 * image, permit it even if the kernel is otherwise locked
186 if (!ima_appraise_signature(READING_KEXEC_IMAGE) &&
187 security_locked_down(LOCKDOWN_KEXEC))
192 /* All other errors are fatal, including nomem, unparseable
193 * signatures and signature check failures - even if signatures
197 pr_notice("kernel signature verification failed (%d).\n", ret);
205 * In file mode list of segments is prepared by kernel. Copy relevant
206 * data from user space, do error checking, prepare segment list
209 kimage_file_prepare_segments(struct kimage *image, int kernel_fd, int initrd_fd,
210 const char __user *cmdline_ptr,
211 unsigned long cmdline_len, unsigned flags)
217 ret = kernel_read_file_from_fd(kernel_fd, &image->kernel_buf,
218 &size, INT_MAX, READING_KEXEC_IMAGE);
221 image->kernel_buf_len = size;
223 /* Call arch image probe handlers */
224 ret = arch_kexec_kernel_image_probe(image, image->kernel_buf,
225 image->kernel_buf_len);
229 #ifdef CONFIG_KEXEC_SIG
230 ret = kimage_validate_signature(image);
235 /* It is possible that there no initramfs is being loaded */
236 if (!(flags & KEXEC_FILE_NO_INITRAMFS)) {
237 ret = kernel_read_file_from_fd(initrd_fd, &image->initrd_buf,
239 READING_KEXEC_INITRAMFS);
242 image->initrd_buf_len = size;
246 image->cmdline_buf = memdup_user(cmdline_ptr, cmdline_len);
247 if (IS_ERR(image->cmdline_buf)) {
248 ret = PTR_ERR(image->cmdline_buf);
249 image->cmdline_buf = NULL;
253 image->cmdline_buf_len = cmdline_len;
255 /* command line should be a string with last byte null */
256 if (image->cmdline_buf[cmdline_len - 1] != '\0') {
261 ima_kexec_cmdline(image->cmdline_buf,
262 image->cmdline_buf_len - 1);
265 /* IMA needs to pass the measurement list to the next kernel. */
266 ima_add_kexec_buffer(image);
268 /* Call arch image load handlers */
269 ldata = arch_kexec_kernel_image_load(image);
272 ret = PTR_ERR(ldata);
276 image->image_loader_data = ldata;
278 /* In case of error, free up all allocated memory in this function */
280 kimage_file_post_load_cleanup(image);
285 kimage_file_alloc_init(struct kimage **rimage, int kernel_fd,
286 int initrd_fd, const char __user *cmdline_ptr,
287 unsigned long cmdline_len, unsigned long flags)
290 struct kimage *image;
291 bool kexec_on_panic = flags & KEXEC_FILE_ON_CRASH;
293 image = do_kimage_alloc_init();
297 image->file_mode = 1;
299 if (kexec_on_panic) {
300 /* Enable special crash kernel control page alloc policy. */
301 image->control_page = crashk_res.start;
302 image->type = KEXEC_TYPE_CRASH;
305 ret = kimage_file_prepare_segments(image, kernel_fd, initrd_fd,
306 cmdline_ptr, cmdline_len, flags);
310 ret = sanity_check_segment_list(image);
312 goto out_free_post_load_bufs;
315 image->control_code_page = kimage_alloc_control_pages(image,
316 get_order(KEXEC_CONTROL_PAGE_SIZE));
317 if (!image->control_code_page) {
318 pr_err("Could not allocate control_code_buffer\n");
319 goto out_free_post_load_bufs;
322 if (!kexec_on_panic) {
323 image->swap_page = kimage_alloc_control_pages(image, 0);
324 if (!image->swap_page) {
325 pr_err("Could not allocate swap buffer\n");
326 goto out_free_control_pages;
332 out_free_control_pages:
333 kimage_free_page_list(&image->control_pages);
334 out_free_post_load_bufs:
335 kimage_file_post_load_cleanup(image);
341 SYSCALL_DEFINE5(kexec_file_load, int, kernel_fd, int, initrd_fd,
342 unsigned long, cmdline_len, const char __user *, cmdline_ptr,
343 unsigned long, flags)
346 struct kimage **dest_image, *image;
348 /* We only trust the superuser with rebooting the system. */
349 if (!capable(CAP_SYS_BOOT) || kexec_load_disabled)
352 /* Make sure we have a legal set of flags */
353 if (flags != (flags & KEXEC_FILE_FLAGS))
358 if (!mutex_trylock(&kexec_mutex))
361 dest_image = &kexec_image;
362 if (flags & KEXEC_FILE_ON_CRASH) {
363 dest_image = &kexec_crash_image;
364 if (kexec_crash_image)
365 arch_kexec_unprotect_crashkres();
368 if (flags & KEXEC_FILE_UNLOAD)
372 * In case of crash, new kernel gets loaded in reserved region. It is
373 * same memory where old crash kernel might be loaded. Free any
374 * current crash dump kernel before we corrupt it.
376 if (flags & KEXEC_FILE_ON_CRASH)
377 kimage_free(xchg(&kexec_crash_image, NULL));
379 ret = kimage_file_alloc_init(&image, kernel_fd, initrd_fd, cmdline_ptr,
384 ret = machine_kexec_prepare(image);
389 * Some architecture(like S390) may touch the crash memory before
390 * machine_kexec_prepare(), we must copy vmcoreinfo data after it.
392 ret = kimage_crash_copy_vmcoreinfo(image);
396 ret = kexec_calculate_store_digests(image);
400 for (i = 0; i < image->nr_segments; i++) {
401 struct kexec_segment *ksegment;
403 ksegment = &image->segment[i];
404 pr_debug("Loading segment %d: buf=0x%p bufsz=0x%zx mem=0x%lx memsz=0x%zx\n",
405 i, ksegment->buf, ksegment->bufsz, ksegment->mem,
408 ret = kimage_load_segment(image, &image->segment[i]);
413 kimage_terminate(image);
416 * Free up any temporary buffers allocated which are not needed
417 * after image has been loaded
419 kimage_file_post_load_cleanup(image);
421 image = xchg(dest_image, image);
423 if ((flags & KEXEC_FILE_ON_CRASH) && kexec_crash_image)
424 arch_kexec_protect_crashkres();
426 mutex_unlock(&kexec_mutex);
431 static int locate_mem_hole_top_down(unsigned long start, unsigned long end,
432 struct kexec_buf *kbuf)
434 struct kimage *image = kbuf->image;
435 unsigned long temp_start, temp_end;
437 temp_end = min(end, kbuf->buf_max);
438 temp_start = temp_end - kbuf->memsz;
441 /* align down start */
442 temp_start = temp_start & (~(kbuf->buf_align - 1));
444 if (temp_start < start || temp_start < kbuf->buf_min)
447 temp_end = temp_start + kbuf->memsz - 1;
450 * Make sure this does not conflict with any of existing
453 if (kimage_is_destination_range(image, temp_start, temp_end)) {
454 temp_start = temp_start - PAGE_SIZE;
458 /* We found a suitable memory range */
462 /* If we are here, we found a suitable memory range */
463 kbuf->mem = temp_start;
465 /* Success, stop navigating through remaining System RAM ranges */
469 static int locate_mem_hole_bottom_up(unsigned long start, unsigned long end,
470 struct kexec_buf *kbuf)
472 struct kimage *image = kbuf->image;
473 unsigned long temp_start, temp_end;
475 temp_start = max(start, kbuf->buf_min);
478 temp_start = ALIGN(temp_start, kbuf->buf_align);
479 temp_end = temp_start + kbuf->memsz - 1;
481 if (temp_end > end || temp_end > kbuf->buf_max)
484 * Make sure this does not conflict with any of existing
487 if (kimage_is_destination_range(image, temp_start, temp_end)) {
488 temp_start = temp_start + PAGE_SIZE;
492 /* We found a suitable memory range */
496 /* If we are here, we found a suitable memory range */
497 kbuf->mem = temp_start;
499 /* Success, stop navigating through remaining System RAM ranges */
503 static int locate_mem_hole_callback(struct resource *res, void *arg)
505 struct kexec_buf *kbuf = (struct kexec_buf *)arg;
506 u64 start = res->start, end = res->end;
507 unsigned long sz = end - start + 1;
509 /* Returning 0 will take to next memory range */
510 if (sz < kbuf->memsz)
513 if (end < kbuf->buf_min || start > kbuf->buf_max)
517 * Allocate memory top down with-in ram range. Otherwise bottom up
521 return locate_mem_hole_top_down(start, end, kbuf);
522 return locate_mem_hole_bottom_up(start, end, kbuf);
525 #ifdef CONFIG_ARCH_KEEP_MEMBLOCK
526 static int kexec_walk_memblock(struct kexec_buf *kbuf,
527 int (*func)(struct resource *, void *))
531 phys_addr_t mstart, mend;
532 struct resource res = { };
534 if (kbuf->image->type == KEXEC_TYPE_CRASH)
535 return func(&crashk_res, kbuf);
537 if (kbuf->top_down) {
538 for_each_free_mem_range_reverse(i, NUMA_NO_NODE, MEMBLOCK_NONE,
539 &mstart, &mend, NULL) {
541 * In memblock, end points to the first byte after the
542 * range while in kexec, end points to the last byte
547 ret = func(&res, kbuf);
552 for_each_free_mem_range(i, NUMA_NO_NODE, MEMBLOCK_NONE,
553 &mstart, &mend, NULL) {
555 * In memblock, end points to the first byte after the
556 * range while in kexec, end points to the last byte
561 ret = func(&res, kbuf);
570 static int kexec_walk_memblock(struct kexec_buf *kbuf,
571 int (*func)(struct resource *, void *))
578 * kexec_walk_resources - call func(data) on free memory regions
579 * @kbuf: Context info for the search. Also passed to @func.
580 * @func: Function to call for each memory region.
582 * Return: The memory walk will stop when func returns a non-zero value
583 * and that value will be returned. If all free regions are visited without
584 * func returning non-zero, then zero will be returned.
586 static int kexec_walk_resources(struct kexec_buf *kbuf,
587 int (*func)(struct resource *, void *))
589 if (kbuf->image->type == KEXEC_TYPE_CRASH)
590 return walk_iomem_res_desc(crashk_res.desc,
591 IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY,
592 crashk_res.start, crashk_res.end,
595 return walk_system_ram_res(0, ULONG_MAX, kbuf, func);
599 * kexec_locate_mem_hole - find free memory for the purgatory or the next kernel
600 * @kbuf: Parameters for the memory search.
602 * On success, kbuf->mem will have the start address of the memory region found.
604 * Return: 0 on success, negative errno on error.
606 int kexec_locate_mem_hole(struct kexec_buf *kbuf)
610 /* Arch knows where to place */
611 if (kbuf->mem != KEXEC_BUF_MEM_UNKNOWN)
614 if (!IS_ENABLED(CONFIG_ARCH_KEEP_MEMBLOCK))
615 ret = kexec_walk_resources(kbuf, locate_mem_hole_callback);
617 ret = kexec_walk_memblock(kbuf, locate_mem_hole_callback);
619 return ret == 1 ? 0 : -EADDRNOTAVAIL;
623 * kexec_add_buffer - place a buffer in a kexec segment
624 * @kbuf: Buffer contents and memory parameters.
626 * This function assumes that kexec_mutex is held.
627 * On successful return, @kbuf->mem will have the physical address of
628 * the buffer in memory.
630 * Return: 0 on success, negative errno on error.
632 int kexec_add_buffer(struct kexec_buf *kbuf)
635 struct kexec_segment *ksegment;
638 /* Currently adding segment this way is allowed only in file mode */
639 if (!kbuf->image->file_mode)
642 if (kbuf->image->nr_segments >= KEXEC_SEGMENT_MAX)
646 * Make sure we are not trying to add buffer after allocating
647 * control pages. All segments need to be placed first before
648 * any control pages are allocated. As control page allocation
649 * logic goes through list of segments to make sure there are
650 * no destination overlaps.
652 if (!list_empty(&kbuf->image->control_pages)) {
657 /* Ensure minimum alignment needed for segments. */
658 kbuf->memsz = ALIGN(kbuf->memsz, PAGE_SIZE);
659 kbuf->buf_align = max(kbuf->buf_align, PAGE_SIZE);
661 /* Walk the RAM ranges and allocate a suitable range for the buffer */
662 ret = kexec_locate_mem_hole(kbuf);
666 /* Found a suitable memory range */
667 ksegment = &kbuf->image->segment[kbuf->image->nr_segments];
668 ksegment->kbuf = kbuf->buffer;
669 ksegment->bufsz = kbuf->bufsz;
670 ksegment->mem = kbuf->mem;
671 ksegment->memsz = kbuf->memsz;
672 kbuf->image->nr_segments++;
676 /* Calculate and store the digest of segments */
677 static int kexec_calculate_store_digests(struct kimage *image)
679 struct crypto_shash *tfm;
680 struct shash_desc *desc;
681 int ret = 0, i, j, zero_buf_sz, sha_region_sz;
682 size_t desc_size, nullsz;
685 struct kexec_sha_region *sha_regions;
686 struct purgatory_info *pi = &image->purgatory_info;
688 if (!IS_ENABLED(CONFIG_ARCH_HAS_KEXEC_PURGATORY))
691 zero_buf = __va(page_to_pfn(ZERO_PAGE(0)) << PAGE_SHIFT);
692 zero_buf_sz = PAGE_SIZE;
694 tfm = crypto_alloc_shash("sha256", 0, 0);
700 desc_size = crypto_shash_descsize(tfm) + sizeof(*desc);
701 desc = kzalloc(desc_size, GFP_KERNEL);
707 sha_region_sz = KEXEC_SEGMENT_MAX * sizeof(struct kexec_sha_region);
708 sha_regions = vzalloc(sha_region_sz);
716 ret = crypto_shash_init(desc);
718 goto out_free_sha_regions;
720 digest = kzalloc(SHA256_DIGEST_SIZE, GFP_KERNEL);
723 goto out_free_sha_regions;
726 for (j = i = 0; i < image->nr_segments; i++) {
727 struct kexec_segment *ksegment;
729 ksegment = &image->segment[i];
731 * Skip purgatory as it will be modified once we put digest
734 if (ksegment->kbuf == pi->purgatory_buf)
737 ret = crypto_shash_update(desc, ksegment->kbuf,
743 * Assume rest of the buffer is filled with zero and
744 * update digest accordingly.
746 nullsz = ksegment->memsz - ksegment->bufsz;
748 unsigned long bytes = nullsz;
750 if (bytes > zero_buf_sz)
752 ret = crypto_shash_update(desc, zero_buf, bytes);
761 sha_regions[j].start = ksegment->mem;
762 sha_regions[j].len = ksegment->memsz;
767 ret = crypto_shash_final(desc, digest);
769 goto out_free_digest;
770 ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha_regions",
771 sha_regions, sha_region_sz, 0);
773 goto out_free_digest;
775 ret = kexec_purgatory_get_set_symbol(image, "purgatory_sha256_digest",
776 digest, SHA256_DIGEST_SIZE, 0);
778 goto out_free_digest;
783 out_free_sha_regions:
793 #ifdef CONFIG_ARCH_HAS_KEXEC_PURGATORY
795 * kexec_purgatory_setup_kbuf - prepare buffer to load purgatory.
796 * @pi: Purgatory to be loaded.
797 * @kbuf: Buffer to setup.
799 * Allocates the memory needed for the buffer. Caller is responsible to free
800 * the memory after use.
802 * Return: 0 on success, negative errno on error.
804 static int kexec_purgatory_setup_kbuf(struct purgatory_info *pi,
805 struct kexec_buf *kbuf)
807 const Elf_Shdr *sechdrs;
808 unsigned long bss_align;
809 unsigned long bss_sz;
813 sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
814 kbuf->buf_align = bss_align = 1;
815 kbuf->bufsz = bss_sz = 0;
817 for (i = 0; i < pi->ehdr->e_shnum; i++) {
818 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
821 align = sechdrs[i].sh_addralign;
822 if (sechdrs[i].sh_type != SHT_NOBITS) {
823 if (kbuf->buf_align < align)
824 kbuf->buf_align = align;
825 kbuf->bufsz = ALIGN(kbuf->bufsz, align);
826 kbuf->bufsz += sechdrs[i].sh_size;
828 if (bss_align < align)
830 bss_sz = ALIGN(bss_sz, align);
831 bss_sz += sechdrs[i].sh_size;
834 kbuf->bufsz = ALIGN(kbuf->bufsz, bss_align);
835 kbuf->memsz = kbuf->bufsz + bss_sz;
836 if (kbuf->buf_align < bss_align)
837 kbuf->buf_align = bss_align;
839 kbuf->buffer = vzalloc(kbuf->bufsz);
842 pi->purgatory_buf = kbuf->buffer;
844 ret = kexec_add_buffer(kbuf);
850 vfree(pi->purgatory_buf);
851 pi->purgatory_buf = NULL;
856 * kexec_purgatory_setup_sechdrs - prepares the pi->sechdrs buffer.
857 * @pi: Purgatory to be loaded.
858 * @kbuf: Buffer prepared to store purgatory.
860 * Allocates the memory needed for the buffer. Caller is responsible to free
861 * the memory after use.
863 * Return: 0 on success, negative errno on error.
865 static int kexec_purgatory_setup_sechdrs(struct purgatory_info *pi,
866 struct kexec_buf *kbuf)
868 unsigned long bss_addr;
869 unsigned long offset;
874 * The section headers in kexec_purgatory are read-only. In order to
875 * have them modifiable make a temporary copy.
877 sechdrs = vzalloc(array_size(sizeof(Elf_Shdr), pi->ehdr->e_shnum));
880 memcpy(sechdrs, (void *)pi->ehdr + pi->ehdr->e_shoff,
881 pi->ehdr->e_shnum * sizeof(Elf_Shdr));
882 pi->sechdrs = sechdrs;
885 bss_addr = kbuf->mem + kbuf->bufsz;
886 kbuf->image->start = pi->ehdr->e_entry;
888 for (i = 0; i < pi->ehdr->e_shnum; i++) {
892 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
895 align = sechdrs[i].sh_addralign;
896 if (sechdrs[i].sh_type == SHT_NOBITS) {
897 bss_addr = ALIGN(bss_addr, align);
898 sechdrs[i].sh_addr = bss_addr;
899 bss_addr += sechdrs[i].sh_size;
903 offset = ALIGN(offset, align);
906 * Check if the segment contains the entry point, if so,
907 * calculate the value of image->start based on it.
908 * If the compiler has produced more than one .text section
909 * (Eg: .text.hot), they are generally after the main .text
910 * section, and they shall not be used to calculate
911 * image->start. So do not re-calculate image->start if it
912 * is not set to the initial value, and warn the user so they
913 * have a chance to fix their purgatory's linker script.
915 if (sechdrs[i].sh_flags & SHF_EXECINSTR &&
916 pi->ehdr->e_entry >= sechdrs[i].sh_addr &&
917 pi->ehdr->e_entry < (sechdrs[i].sh_addr
918 + sechdrs[i].sh_size) &&
919 !WARN_ON(kbuf->image->start != pi->ehdr->e_entry)) {
920 kbuf->image->start -= sechdrs[i].sh_addr;
921 kbuf->image->start += kbuf->mem + offset;
924 src = (void *)pi->ehdr + sechdrs[i].sh_offset;
925 dst = pi->purgatory_buf + offset;
926 memcpy(dst, src, sechdrs[i].sh_size);
928 sechdrs[i].sh_addr = kbuf->mem + offset;
929 sechdrs[i].sh_offset = offset;
930 offset += sechdrs[i].sh_size;
936 static int kexec_apply_relocations(struct kimage *image)
939 struct purgatory_info *pi = &image->purgatory_info;
940 const Elf_Shdr *sechdrs;
942 sechdrs = (void *)pi->ehdr + pi->ehdr->e_shoff;
944 for (i = 0; i < pi->ehdr->e_shnum; i++) {
945 const Elf_Shdr *relsec;
946 const Elf_Shdr *symtab;
949 relsec = sechdrs + i;
951 if (relsec->sh_type != SHT_RELA &&
952 relsec->sh_type != SHT_REL)
956 * For section of type SHT_RELA/SHT_REL,
957 * ->sh_link contains section header index of associated
958 * symbol table. And ->sh_info contains section header
959 * index of section to which relocations apply.
961 if (relsec->sh_info >= pi->ehdr->e_shnum ||
962 relsec->sh_link >= pi->ehdr->e_shnum)
965 section = pi->sechdrs + relsec->sh_info;
966 symtab = sechdrs + relsec->sh_link;
968 if (!(section->sh_flags & SHF_ALLOC))
972 * symtab->sh_link contain section header index of associated
975 if (symtab->sh_link >= pi->ehdr->e_shnum)
976 /* Invalid section number? */
980 * Respective architecture needs to provide support for applying
981 * relocations of type SHT_RELA/SHT_REL.
983 if (relsec->sh_type == SHT_RELA)
984 ret = arch_kexec_apply_relocations_add(pi, section,
986 else if (relsec->sh_type == SHT_REL)
987 ret = arch_kexec_apply_relocations(pi, section,
997 * kexec_load_purgatory - Load and relocate the purgatory object.
998 * @image: Image to add the purgatory to.
999 * @kbuf: Memory parameters to use.
1001 * Allocates the memory needed for image->purgatory_info.sechdrs and
1002 * image->purgatory_info.purgatory_buf/kbuf->buffer. Caller is responsible
1003 * to free the memory after use.
1005 * Return: 0 on success, negative errno on error.
1007 int kexec_load_purgatory(struct kimage *image, struct kexec_buf *kbuf)
1009 struct purgatory_info *pi = &image->purgatory_info;
1012 if (kexec_purgatory_size <= 0)
1015 pi->ehdr = (const Elf_Ehdr *)kexec_purgatory;
1017 ret = kexec_purgatory_setup_kbuf(pi, kbuf);
1021 ret = kexec_purgatory_setup_sechdrs(pi, kbuf);
1025 ret = kexec_apply_relocations(image);
1034 vfree(pi->purgatory_buf);
1035 pi->purgatory_buf = NULL;
1040 * kexec_purgatory_find_symbol - find a symbol in the purgatory
1041 * @pi: Purgatory to search in.
1042 * @name: Name of the symbol.
1044 * Return: pointer to symbol in read-only symtab on success, NULL on error.
1046 static const Elf_Sym *kexec_purgatory_find_symbol(struct purgatory_info *pi,
1049 const Elf_Shdr *sechdrs;
1050 const Elf_Ehdr *ehdr;
1051 const Elf_Sym *syms;
1059 sechdrs = (void *)ehdr + ehdr->e_shoff;
1061 for (i = 0; i < ehdr->e_shnum; i++) {
1062 if (sechdrs[i].sh_type != SHT_SYMTAB)
1065 if (sechdrs[i].sh_link >= ehdr->e_shnum)
1066 /* Invalid strtab section number */
1068 strtab = (void *)ehdr + sechdrs[sechdrs[i].sh_link].sh_offset;
1069 syms = (void *)ehdr + sechdrs[i].sh_offset;
1071 /* Go through symbols for a match */
1072 for (k = 0; k < sechdrs[i].sh_size/sizeof(Elf_Sym); k++) {
1073 if (ELF_ST_BIND(syms[k].st_info) != STB_GLOBAL)
1076 if (strcmp(strtab + syms[k].st_name, name) != 0)
1079 if (syms[k].st_shndx == SHN_UNDEF ||
1080 syms[k].st_shndx >= ehdr->e_shnum) {
1081 pr_debug("Symbol: %s has bad section index %d.\n",
1082 name, syms[k].st_shndx);
1086 /* Found the symbol we are looking for */
1094 void *kexec_purgatory_get_symbol_addr(struct kimage *image, const char *name)
1096 struct purgatory_info *pi = &image->purgatory_info;
1100 sym = kexec_purgatory_find_symbol(pi, name);
1102 return ERR_PTR(-EINVAL);
1104 sechdr = &pi->sechdrs[sym->st_shndx];
1107 * Returns the address where symbol will finally be loaded after
1108 * kexec_load_segment()
1110 return (void *)(sechdr->sh_addr + sym->st_value);
1114 * Get or set value of a symbol. If "get_value" is true, symbol value is
1115 * returned in buf otherwise symbol value is set based on value in buf.
1117 int kexec_purgatory_get_set_symbol(struct kimage *image, const char *name,
1118 void *buf, unsigned int size, bool get_value)
1120 struct purgatory_info *pi = &image->purgatory_info;
1125 sym = kexec_purgatory_find_symbol(pi, name);
1129 if (sym->st_size != size) {
1130 pr_err("symbol %s size mismatch: expected %lu actual %u\n",
1131 name, (unsigned long)sym->st_size, size);
1135 sec = pi->sechdrs + sym->st_shndx;
1137 if (sec->sh_type == SHT_NOBITS) {
1138 pr_err("symbol %s is in a bss section. Cannot %s\n", name,
1139 get_value ? "get" : "set");
1143 sym_buf = (char *)pi->purgatory_buf + sec->sh_offset + sym->st_value;
1146 memcpy((void *)buf, sym_buf, size);
1148 memcpy((void *)sym_buf, buf, size);
1152 #endif /* CONFIG_ARCH_HAS_KEXEC_PURGATORY */
1154 int crash_exclude_mem_range(struct crash_mem *mem,
1155 unsigned long long mstart, unsigned long long mend)
1158 unsigned long long start, end;
1159 struct crash_mem_range temp_range = {0, 0};
1161 for (i = 0; i < mem->nr_ranges; i++) {
1162 start = mem->ranges[i].start;
1163 end = mem->ranges[i].end;
1165 if (mstart > end || mend < start)
1168 /* Truncate any area outside of range */
1174 /* Found completely overlapping range */
1175 if (mstart == start && mend == end) {
1176 mem->ranges[i].start = 0;
1177 mem->ranges[i].end = 0;
1178 if (i < mem->nr_ranges - 1) {
1179 /* Shift rest of the ranges to left */
1180 for (j = i; j < mem->nr_ranges - 1; j++) {
1181 mem->ranges[j].start =
1182 mem->ranges[j+1].start;
1183 mem->ranges[j].end =
1184 mem->ranges[j+1].end;
1191 if (mstart > start && mend < end) {
1192 /* Split original range */
1193 mem->ranges[i].end = mstart - 1;
1194 temp_range.start = mend + 1;
1195 temp_range.end = end;
1196 } else if (mstart != start)
1197 mem->ranges[i].end = mstart - 1;
1199 mem->ranges[i].start = mend + 1;
1203 /* If a split happened, add the split to array */
1204 if (!temp_range.end)
1207 /* Split happened */
1208 if (i == mem->max_nr_ranges - 1)
1211 /* Location where new range should go */
1213 if (j < mem->nr_ranges) {
1214 /* Move over all ranges one slot towards the end */
1215 for (i = mem->nr_ranges - 1; i >= j; i--)
1216 mem->ranges[i + 1] = mem->ranges[i];
1219 mem->ranges[j].start = temp_range.start;
1220 mem->ranges[j].end = temp_range.end;
1225 int crash_prepare_elf64_headers(struct crash_mem *mem, int kernel_map,
1226 void **addr, unsigned long *sz)
1230 unsigned long nr_cpus = num_possible_cpus(), nr_phdr, elf_sz;
1232 unsigned int cpu, i;
1233 unsigned long long notes_addr;
1234 unsigned long mstart, mend;
1236 /* extra phdr for vmcoreinfo elf note */
1237 nr_phdr = nr_cpus + 1;
1238 nr_phdr += mem->nr_ranges;
1241 * kexec-tools creates an extra PT_LOAD phdr for kernel text mapping
1242 * area (for example, ffffffff80000000 - ffffffffa0000000 on x86_64).
1243 * I think this is required by tools like gdb. So same physical
1244 * memory will be mapped in two elf headers. One will contain kernel
1245 * text virtual addresses and other will have __va(physical) addresses.
1249 elf_sz = sizeof(Elf64_Ehdr) + nr_phdr * sizeof(Elf64_Phdr);
1250 elf_sz = ALIGN(elf_sz, ELF_CORE_HEADER_ALIGN);
1252 buf = vzalloc(elf_sz);
1256 ehdr = (Elf64_Ehdr *)buf;
1257 phdr = (Elf64_Phdr *)(ehdr + 1);
1258 memcpy(ehdr->e_ident, ELFMAG, SELFMAG);
1259 ehdr->e_ident[EI_CLASS] = ELFCLASS64;
1260 ehdr->e_ident[EI_DATA] = ELFDATA2LSB;
1261 ehdr->e_ident[EI_VERSION] = EV_CURRENT;
1262 ehdr->e_ident[EI_OSABI] = ELF_OSABI;
1263 memset(ehdr->e_ident + EI_PAD, 0, EI_NIDENT - EI_PAD);
1264 ehdr->e_type = ET_CORE;
1265 ehdr->e_machine = ELF_ARCH;
1266 ehdr->e_version = EV_CURRENT;
1267 ehdr->e_phoff = sizeof(Elf64_Ehdr);
1268 ehdr->e_ehsize = sizeof(Elf64_Ehdr);
1269 ehdr->e_phentsize = sizeof(Elf64_Phdr);
1271 /* Prepare one phdr of type PT_NOTE for each present cpu */
1272 for_each_present_cpu(cpu) {
1273 phdr->p_type = PT_NOTE;
1274 notes_addr = per_cpu_ptr_to_phys(per_cpu_ptr(crash_notes, cpu));
1275 phdr->p_offset = phdr->p_paddr = notes_addr;
1276 phdr->p_filesz = phdr->p_memsz = sizeof(note_buf_t);
1281 /* Prepare one PT_NOTE header for vmcoreinfo */
1282 phdr->p_type = PT_NOTE;
1283 phdr->p_offset = phdr->p_paddr = paddr_vmcoreinfo_note();
1284 phdr->p_filesz = phdr->p_memsz = VMCOREINFO_NOTE_SIZE;
1288 /* Prepare PT_LOAD type program header for kernel text region */
1290 phdr->p_type = PT_LOAD;
1291 phdr->p_flags = PF_R|PF_W|PF_X;
1292 phdr->p_vaddr = (Elf64_Addr)_text;
1293 phdr->p_filesz = phdr->p_memsz = _end - _text;
1294 phdr->p_offset = phdr->p_paddr = __pa_symbol(_text);
1299 /* Go through all the ranges in mem->ranges[] and prepare phdr */
1300 for (i = 0; i < mem->nr_ranges; i++) {
1301 mstart = mem->ranges[i].start;
1302 mend = mem->ranges[i].end;
1304 phdr->p_type = PT_LOAD;
1305 phdr->p_flags = PF_R|PF_W|PF_X;
1306 phdr->p_offset = mstart;
1308 phdr->p_paddr = mstart;
1309 phdr->p_vaddr = (unsigned long long) __va(mstart);
1310 phdr->p_filesz = phdr->p_memsz = mend - mstart + 1;
1314 pr_debug("Crash PT_LOAD elf header. phdr=%p vaddr=0x%llx, paddr=0x%llx, sz=0x%llx e_phnum=%d p_offset=0x%llx\n",
1315 phdr, phdr->p_vaddr, phdr->p_paddr, phdr->p_filesz,
1316 ehdr->e_phnum, phdr->p_offset);