2 * linux/fs/binfmt_elf.c
4 * These are the functions used to load ELF format executables as used
5 * on SVr4 machines. Information on the format may be found in the book
6 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
9 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
12 #include <linux/module.h>
13 #include <linux/kernel.h>
16 #include <linux/mman.h>
17 #include <linux/errno.h>
18 #include <linux/signal.h>
19 #include <linux/binfmts.h>
20 #include <linux/string.h>
21 #include <linux/file.h>
22 #include <linux/slab.h>
23 #include <linux/personality.h>
24 #include <linux/elfcore.h>
25 #include <linux/init.h>
26 #include <linux/highuid.h>
27 #include <linux/compiler.h>
28 #include <linux/highmem.h>
29 #include <linux/pagemap.h>
30 #include <linux/vmalloc.h>
31 #include <linux/security.h>
32 #include <linux/random.h>
33 #include <linux/elf.h>
34 #include <linux/elf-randomize.h>
35 #include <linux/utsname.h>
36 #include <linux/coredump.h>
37 #include <linux/sched.h>
38 #include <linux/sched/coredump.h>
39 #include <linux/sched/task_stack.h>
40 #include <linux/sched/cputime.h>
41 #include <linux/cred.h>
42 #include <linux/dax.h>
43 #include <linux/uaccess.h>
44 #include <asm/param.h>
48 #define user_long_t long
50 #ifndef user_siginfo_t
51 #define user_siginfo_t siginfo_t
54 static int load_elf_binary(struct linux_binprm *bprm);
55 static unsigned long elf_map(struct file *, unsigned long, struct elf_phdr *,
56 int, int, unsigned long);
59 static int load_elf_library(struct file *);
61 #define load_elf_library NULL
65 * If we don't support core dumping, then supply a NULL so we
68 #ifdef CONFIG_ELF_CORE
69 static int elf_core_dump(struct coredump_params *cprm);
71 #define elf_core_dump NULL
74 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
75 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
77 #define ELF_MIN_ALIGN PAGE_SIZE
80 #ifndef ELF_CORE_EFLAGS
81 #define ELF_CORE_EFLAGS 0
84 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
85 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
86 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
88 static struct linux_binfmt elf_format = {
89 .module = THIS_MODULE,
90 .load_binary = load_elf_binary,
91 .load_shlib = load_elf_library,
92 .core_dump = elf_core_dump,
93 .min_coredump = ELF_EXEC_PAGESIZE,
96 #define BAD_ADDR(x) ((unsigned long)(x) >= TASK_SIZE)
98 static int set_brk(unsigned long start, unsigned long end, int prot)
100 start = ELF_PAGEALIGN(start);
101 end = ELF_PAGEALIGN(end);
104 * Map the last of the bss segment.
105 * If the header is requesting these pages to be
106 * executable, honour that (ppc32 needs this).
108 int error = vm_brk_flags(start, end - start,
109 prot & PROT_EXEC ? VM_EXEC : 0);
113 current->mm->start_brk = current->mm->brk = end;
117 /* We need to explicitly zero any fractional pages
118 after the data section (i.e. bss). This would
119 contain the junk from the file that should not
122 static int padzero(unsigned long elf_bss)
126 nbyte = ELF_PAGEOFFSET(elf_bss);
128 nbyte = ELF_MIN_ALIGN - nbyte;
129 if (clear_user((void __user *) elf_bss, nbyte))
135 /* Let's use some macros to make this stack manipulation a little clearer */
136 #ifdef CONFIG_STACK_GROWSUP
137 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
138 #define STACK_ROUND(sp, items) \
139 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
140 #define STACK_ALLOC(sp, len) ({ \
141 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
144 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
145 #define STACK_ROUND(sp, items) \
146 (((unsigned long) (sp - items)) &~ 15UL)
147 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
150 #ifndef ELF_BASE_PLATFORM
152 * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture.
153 * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value
154 * will be copied to the user stack in the same manner as AT_PLATFORM.
156 #define ELF_BASE_PLATFORM NULL
160 create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
161 unsigned long load_addr, unsigned long interp_load_addr)
163 unsigned long p = bprm->p;
164 int argc = bprm->argc;
165 int envc = bprm->envc;
166 elf_addr_t __user *sp;
167 elf_addr_t __user *u_platform;
168 elf_addr_t __user *u_base_platform;
169 elf_addr_t __user *u_rand_bytes;
170 const char *k_platform = ELF_PLATFORM;
171 const char *k_base_platform = ELF_BASE_PLATFORM;
172 unsigned char k_rand_bytes[16];
174 elf_addr_t *elf_info;
176 const struct cred *cred = current_cred();
177 struct vm_area_struct *vma;
180 * In some cases (e.g. Hyper-Threading), we want to avoid L1
181 * evictions by the processes running on the same package. One
182 * thing we can do is to shuffle the initial stack for them.
185 p = arch_align_stack(p);
188 * If this architecture has a platform capability string, copy it
189 * to userspace. In some cases (Sparc), this info is impossible
190 * for userspace to get any other way, in others (i386) it is
195 size_t len = strlen(k_platform) + 1;
197 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
198 if (__copy_to_user(u_platform, k_platform, len))
203 * If this architecture has a "base" platform capability
204 * string, copy it to userspace.
206 u_base_platform = NULL;
207 if (k_base_platform) {
208 size_t len = strlen(k_base_platform) + 1;
210 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
211 if (__copy_to_user(u_base_platform, k_base_platform, len))
216 * Generate 16 random bytes for userspace PRNG seeding.
218 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
219 u_rand_bytes = (elf_addr_t __user *)
220 STACK_ALLOC(p, sizeof(k_rand_bytes));
221 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
224 /* Create the ELF interpreter info */
225 elf_info = (elf_addr_t *)current->mm->saved_auxv;
226 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
227 #define NEW_AUX_ENT(id, val) \
229 elf_info[ei_index++] = id; \
230 elf_info[ei_index++] = val; \
235 * ARCH_DLINFO must come first so PPC can do its special alignment of
237 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
238 * ARCH_DLINFO changes
242 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
243 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
244 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
245 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
246 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
247 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
248 NEW_AUX_ENT(AT_BASE, interp_load_addr);
249 NEW_AUX_ENT(AT_FLAGS, 0);
250 NEW_AUX_ENT(AT_ENTRY, exec->e_entry);
251 NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid));
252 NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid));
253 NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid));
254 NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid));
255 NEW_AUX_ENT(AT_SECURE, bprm->secureexec);
256 NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
258 NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2);
260 NEW_AUX_ENT(AT_EXECFN, bprm->exec);
262 NEW_AUX_ENT(AT_PLATFORM,
263 (elf_addr_t)(unsigned long)u_platform);
265 if (k_base_platform) {
266 NEW_AUX_ENT(AT_BASE_PLATFORM,
267 (elf_addr_t)(unsigned long)u_base_platform);
269 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
270 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
273 /* AT_NULL is zero; clear the rest too */
274 memset(&elf_info[ei_index], 0,
275 sizeof current->mm->saved_auxv - ei_index * sizeof elf_info[0]);
277 /* And advance past the AT_NULL entry. */
280 sp = STACK_ADD(p, ei_index);
282 items = (argc + 1) + (envc + 1) + 1;
283 bprm->p = STACK_ROUND(sp, items);
285 /* Point sp at the lowest address on the stack */
286 #ifdef CONFIG_STACK_GROWSUP
287 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
288 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
290 sp = (elf_addr_t __user *)bprm->p;
295 * Grow the stack manually; some architectures have a limit on how
296 * far ahead a user-space access may be in order to grow the stack.
298 vma = find_extend_vma(current->mm, bprm->p);
302 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
303 if (__put_user(argc, sp++))
306 /* Populate list of argv pointers back to argv strings. */
307 p = current->mm->arg_end = current->mm->arg_start;
310 if (__put_user((elf_addr_t)p, sp++))
312 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
313 if (!len || len > MAX_ARG_STRLEN)
317 if (__put_user(0, sp++))
319 current->mm->arg_end = p;
321 /* Populate list of envp pointers back to envp strings. */
322 current->mm->env_end = current->mm->env_start = p;
325 if (__put_user((elf_addr_t)p, sp++))
327 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
328 if (!len || len > MAX_ARG_STRLEN)
332 if (__put_user(0, sp++))
334 current->mm->env_end = p;
336 /* Put the elf_info on the stack in the right place. */
337 if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
344 static unsigned long elf_map(struct file *filep, unsigned long addr,
345 struct elf_phdr *eppnt, int prot, int type,
346 unsigned long total_size)
348 unsigned long map_addr;
349 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
350 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
351 addr = ELF_PAGESTART(addr);
352 size = ELF_PAGEALIGN(size);
354 /* mmap() will return -EINVAL if given a zero size, but a
355 * segment with zero filesize is perfectly valid */
360 * total_size is the size of the ELF (interpreter) image.
361 * The _first_ mmap needs to know the full size, otherwise
362 * randomization might put this image into an overlapping
363 * position with the ELF binary image. (since size < total_size)
364 * So we first map the 'big' image - and unmap the remainder at
365 * the end. (which unmap is needed for ELF images with holes.)
368 total_size = ELF_PAGEALIGN(total_size);
369 map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
370 if (!BAD_ADDR(map_addr))
371 vm_munmap(map_addr+size, total_size-size);
373 map_addr = vm_mmap(filep, addr, size, prot, type, off);
378 #endif /* !elf_map */
380 static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
382 int i, first_idx = -1, last_idx = -1;
384 for (i = 0; i < nr; i++) {
385 if (cmds[i].p_type == PT_LOAD) {
394 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
395 ELF_PAGESTART(cmds[first_idx].p_vaddr);
399 * load_elf_phdrs() - load ELF program headers
400 * @elf_ex: ELF header of the binary whose program headers should be loaded
401 * @elf_file: the opened ELF binary file
403 * Loads ELF program headers from the binary file elf_file, which has the ELF
404 * header pointed to by elf_ex, into a newly allocated array. The caller is
405 * responsible for freeing the allocated data. Returns an ERR_PTR upon failure.
407 static struct elf_phdr *load_elf_phdrs(struct elfhdr *elf_ex,
408 struct file *elf_file)
410 struct elf_phdr *elf_phdata = NULL;
411 int retval, size, err = -1;
412 loff_t pos = elf_ex->e_phoff;
415 * If the size of this structure has changed, then punt, since
416 * we will be doing the wrong thing.
418 if (elf_ex->e_phentsize != sizeof(struct elf_phdr))
421 /* Sanity check the number of program headers... */
422 if (elf_ex->e_phnum < 1 ||
423 elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
426 /* ...and their total size. */
427 size = sizeof(struct elf_phdr) * elf_ex->e_phnum;
428 if (size > ELF_MIN_ALIGN)
431 elf_phdata = kmalloc(size, GFP_KERNEL);
435 /* Read in the program headers */
436 retval = kernel_read(elf_file, elf_phdata, size, &pos);
437 if (retval != size) {
438 err = (retval < 0) ? retval : -EIO;
452 #ifndef CONFIG_ARCH_BINFMT_ELF_STATE
455 * struct arch_elf_state - arch-specific ELF loading state
457 * This structure is used to preserve architecture specific data during
458 * the loading of an ELF file, throughout the checking of architecture
459 * specific ELF headers & through to the point where the ELF load is
460 * known to be proceeding (ie. SET_PERSONALITY).
462 * This implementation is a dummy for architectures which require no
465 struct arch_elf_state {
468 #define INIT_ARCH_ELF_STATE {}
471 * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header
472 * @ehdr: The main ELF header
473 * @phdr: The program header to check
474 * @elf: The open ELF file
475 * @is_interp: True if the phdr is from the interpreter of the ELF being
476 * loaded, else false.
477 * @state: Architecture-specific state preserved throughout the process
478 * of loading the ELF.
480 * Inspects the program header phdr to validate its correctness and/or
481 * suitability for the system. Called once per ELF program header in the
482 * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its
485 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
486 * with that return code.
488 static inline int arch_elf_pt_proc(struct elfhdr *ehdr,
489 struct elf_phdr *phdr,
490 struct file *elf, bool is_interp,
491 struct arch_elf_state *state)
493 /* Dummy implementation, always proceed */
498 * arch_check_elf() - check an ELF executable
499 * @ehdr: The main ELF header
500 * @has_interp: True if the ELF has an interpreter, else false.
501 * @interp_ehdr: The interpreter's ELF header
502 * @state: Architecture-specific state preserved throughout the process
503 * of loading the ELF.
505 * Provides a final opportunity for architecture code to reject the loading
506 * of the ELF & cause an exec syscall to return an error. This is called after
507 * all program headers to be checked by arch_elf_pt_proc have been.
509 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
510 * with that return code.
512 static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
513 struct elfhdr *interp_ehdr,
514 struct arch_elf_state *state)
516 /* Dummy implementation, always proceed */
520 #endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */
522 /* This is much more generalized than the library routine read function,
523 so we keep this separate. Technically the library read function
524 is only provided so that we can read a.out libraries that have
527 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
528 struct file *interpreter, unsigned long *interp_map_addr,
529 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
531 struct elf_phdr *eppnt;
532 unsigned long load_addr = 0;
533 int load_addr_set = 0;
534 unsigned long last_bss = 0, elf_bss = 0;
536 unsigned long error = ~0UL;
537 unsigned long total_size;
540 /* First of all, some simple consistency checks */
541 if (interp_elf_ex->e_type != ET_EXEC &&
542 interp_elf_ex->e_type != ET_DYN)
544 if (!elf_check_arch(interp_elf_ex))
546 if (!interpreter->f_op->mmap)
549 total_size = total_mapping_size(interp_elf_phdata,
550 interp_elf_ex->e_phnum);
556 eppnt = interp_elf_phdata;
557 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
558 if (eppnt->p_type == PT_LOAD) {
559 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
561 unsigned long vaddr = 0;
562 unsigned long k, map_addr;
564 if (eppnt->p_flags & PF_R)
565 elf_prot = PROT_READ;
566 if (eppnt->p_flags & PF_W)
567 elf_prot |= PROT_WRITE;
568 if (eppnt->p_flags & PF_X)
569 elf_prot |= PROT_EXEC;
570 vaddr = eppnt->p_vaddr;
571 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
572 elf_type |= MAP_FIXED;
573 else if (no_base && interp_elf_ex->e_type == ET_DYN)
576 map_addr = elf_map(interpreter, load_addr + vaddr,
577 eppnt, elf_prot, elf_type, total_size);
579 if (!*interp_map_addr)
580 *interp_map_addr = map_addr;
582 if (BAD_ADDR(map_addr))
585 if (!load_addr_set &&
586 interp_elf_ex->e_type == ET_DYN) {
587 load_addr = map_addr - ELF_PAGESTART(vaddr);
592 * Check to see if the section's size will overflow the
593 * allowed task size. Note that p_filesz must always be
594 * <= p_memsize so it's only necessary to check p_memsz.
596 k = load_addr + eppnt->p_vaddr;
598 eppnt->p_filesz > eppnt->p_memsz ||
599 eppnt->p_memsz > TASK_SIZE ||
600 TASK_SIZE - eppnt->p_memsz < k) {
606 * Find the end of the file mapping for this phdr, and
607 * keep track of the largest address we see for this.
609 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
614 * Do the same thing for the memory mapping - between
615 * elf_bss and last_bss is the bss section.
617 k = load_addr + eppnt->p_vaddr + eppnt->p_memsz;
626 * Now fill out the bss section: first pad the last page from
627 * the file up to the page boundary, and zero it from elf_bss
628 * up to the end of the page.
630 if (padzero(elf_bss)) {
635 * Next, align both the file and mem bss up to the page size,
636 * since this is where elf_bss was just zeroed up to, and where
637 * last_bss will end after the vm_brk_flags() below.
639 elf_bss = ELF_PAGEALIGN(elf_bss);
640 last_bss = ELF_PAGEALIGN(last_bss);
641 /* Finally, if there is still more bss to allocate, do it. */
642 if (last_bss > elf_bss) {
643 error = vm_brk_flags(elf_bss, last_bss - elf_bss,
644 bss_prot & PROT_EXEC ? VM_EXEC : 0);
655 * These are the functions used to load ELF style executables and shared
656 * libraries. There is no binary dependent code anywhere else.
659 #ifndef STACK_RND_MASK
660 #define STACK_RND_MASK (0x7ff >> (PAGE_SHIFT - 12)) /* 8MB of VA */
663 static unsigned long randomize_stack_top(unsigned long stack_top)
665 unsigned long random_variable = 0;
667 if (current->flags & PF_RANDOMIZE) {
668 random_variable = get_random_long();
669 random_variable &= STACK_RND_MASK;
670 random_variable <<= PAGE_SHIFT;
672 #ifdef CONFIG_STACK_GROWSUP
673 return PAGE_ALIGN(stack_top) + random_variable;
675 return PAGE_ALIGN(stack_top) - random_variable;
679 static int load_elf_binary(struct linux_binprm *bprm)
681 struct file *interpreter = NULL; /* to shut gcc up */
682 unsigned long load_addr = 0, load_bias = 0;
683 int load_addr_set = 0;
684 char * elf_interpreter = NULL;
686 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
687 unsigned long elf_bss, elf_brk;
690 unsigned long elf_entry;
691 unsigned long interp_load_addr = 0;
692 unsigned long start_code, end_code, start_data, end_data;
693 unsigned long reloc_func_desc __maybe_unused = 0;
694 int executable_stack = EXSTACK_DEFAULT;
695 struct pt_regs *regs = current_pt_regs();
697 struct elfhdr elf_ex;
698 struct elfhdr interp_elf_ex;
700 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
703 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
709 /* Get the exec-header */
710 loc->elf_ex = *((struct elfhdr *)bprm->buf);
713 /* First of all, some simple consistency checks */
714 if (memcmp(loc->elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
717 if (loc->elf_ex.e_type != ET_EXEC && loc->elf_ex.e_type != ET_DYN)
719 if (!elf_check_arch(&loc->elf_ex))
721 if (!bprm->file->f_op->mmap)
724 elf_phdata = load_elf_phdrs(&loc->elf_ex, bprm->file);
728 elf_ppnt = elf_phdata;
737 for (i = 0; i < loc->elf_ex.e_phnum; i++) {
738 if (elf_ppnt->p_type == PT_INTERP) {
739 /* This is the program interpreter used for
740 * shared libraries - for now assume that this
741 * is an a.out format binary
744 if (elf_ppnt->p_filesz > PATH_MAX ||
745 elf_ppnt->p_filesz < 2)
749 elf_interpreter = kmalloc(elf_ppnt->p_filesz,
751 if (!elf_interpreter)
754 pos = elf_ppnt->p_offset;
755 retval = kernel_read(bprm->file, elf_interpreter,
756 elf_ppnt->p_filesz, &pos);
757 if (retval != elf_ppnt->p_filesz) {
760 goto out_free_interp;
762 /* make sure path is NULL terminated */
764 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
765 goto out_free_interp;
767 interpreter = open_exec(elf_interpreter);
768 retval = PTR_ERR(interpreter);
769 if (IS_ERR(interpreter))
770 goto out_free_interp;
773 * If the binary is not readable then enforce
774 * mm->dumpable = 0 regardless of the interpreter's
777 would_dump(bprm, interpreter);
779 /* Get the exec headers */
781 retval = kernel_read(interpreter, &loc->interp_elf_ex,
782 sizeof(loc->interp_elf_ex), &pos);
783 if (retval != sizeof(loc->interp_elf_ex)) {
786 goto out_free_dentry;
794 elf_ppnt = elf_phdata;
795 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
796 switch (elf_ppnt->p_type) {
798 if (elf_ppnt->p_flags & PF_X)
799 executable_stack = EXSTACK_ENABLE_X;
801 executable_stack = EXSTACK_DISABLE_X;
804 case PT_LOPROC ... PT_HIPROC:
805 retval = arch_elf_pt_proc(&loc->elf_ex, elf_ppnt,
809 goto out_free_dentry;
813 /* Some simple consistency checks for the interpreter */
814 if (elf_interpreter) {
816 /* Not an ELF interpreter */
817 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
818 goto out_free_dentry;
819 /* Verify the interpreter has a valid arch */
820 if (!elf_check_arch(&loc->interp_elf_ex))
821 goto out_free_dentry;
823 /* Load the interpreter program headers */
824 interp_elf_phdata = load_elf_phdrs(&loc->interp_elf_ex,
826 if (!interp_elf_phdata)
827 goto out_free_dentry;
829 /* Pass PT_LOPROC..PT_HIPROC headers to arch code */
830 elf_ppnt = interp_elf_phdata;
831 for (i = 0; i < loc->interp_elf_ex.e_phnum; i++, elf_ppnt++)
832 switch (elf_ppnt->p_type) {
833 case PT_LOPROC ... PT_HIPROC:
834 retval = arch_elf_pt_proc(&loc->interp_elf_ex,
835 elf_ppnt, interpreter,
838 goto out_free_dentry;
844 * Allow arch code to reject the ELF at this point, whilst it's
845 * still possible to return an error to the code that invoked
848 retval = arch_check_elf(&loc->elf_ex,
849 !!interpreter, &loc->interp_elf_ex,
852 goto out_free_dentry;
854 /* Flush all traces of the currently running executable */
855 retval = flush_old_exec(bprm);
857 goto out_free_dentry;
859 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
860 may depend on the personality. */
861 SET_PERSONALITY2(loc->elf_ex, &arch_state);
862 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
863 current->personality |= READ_IMPLIES_EXEC;
865 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
866 current->flags |= PF_RANDOMIZE;
868 setup_new_exec(bprm);
869 install_exec_creds(bprm);
871 /* Do this so that we can load the interpreter, if need be. We will
872 change some of these later */
873 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
876 goto out_free_dentry;
878 current->mm->start_stack = bprm->p;
880 /* Now we do a little grungy work by mmapping the ELF image into
881 the correct location in memory. */
882 for(i = 0, elf_ppnt = elf_phdata;
883 i < loc->elf_ex.e_phnum; i++, elf_ppnt++) {
884 int elf_prot = 0, elf_flags;
885 unsigned long k, vaddr;
886 unsigned long total_size = 0;
888 if (elf_ppnt->p_type != PT_LOAD)
891 if (unlikely (elf_brk > elf_bss)) {
894 /* There was a PT_LOAD segment with p_memsz > p_filesz
895 before this one. Map anonymous pages, if needed,
896 and clear the area. */
897 retval = set_brk(elf_bss + load_bias,
901 goto out_free_dentry;
902 nbyte = ELF_PAGEOFFSET(elf_bss);
904 nbyte = ELF_MIN_ALIGN - nbyte;
905 if (nbyte > elf_brk - elf_bss)
906 nbyte = elf_brk - elf_bss;
907 if (clear_user((void __user *)elf_bss +
910 * This bss-zeroing can fail if the ELF
911 * file specifies odd protections. So
912 * we don't check the return value
918 if (elf_ppnt->p_flags & PF_R)
919 elf_prot |= PROT_READ;
920 if (elf_ppnt->p_flags & PF_W)
921 elf_prot |= PROT_WRITE;
922 if (elf_ppnt->p_flags & PF_X)
923 elf_prot |= PROT_EXEC;
925 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
927 vaddr = elf_ppnt->p_vaddr;
929 * If we are loading ET_EXEC or we have already performed
930 * the ET_DYN load_addr calculations, proceed normally.
932 if (loc->elf_ex.e_type == ET_EXEC || load_addr_set) {
933 elf_flags |= MAP_FIXED;
934 } else if (loc->elf_ex.e_type == ET_DYN) {
936 * This logic is run once for the first LOAD Program
937 * Header for ET_DYN binaries to calculate the
938 * randomization (load_bias) for all the LOAD
939 * Program Headers, and to calculate the entire
940 * size of the ELF mapping (total_size). (Note that
941 * load_addr_set is set to true later once the
942 * initial mapping is performed.)
944 * There are effectively two types of ET_DYN
945 * binaries: programs (i.e. PIE: ET_DYN with INTERP)
946 * and loaders (ET_DYN without INTERP, since they
947 * _are_ the ELF interpreter). The loaders must
948 * be loaded away from programs since the program
949 * may otherwise collide with the loader (especially
950 * for ET_EXEC which does not have a randomized
951 * position). For example to handle invocations of
952 * "./ld.so someprog" to test out a new version of
953 * the loader, the subsequent program that the
954 * loader loads must avoid the loader itself, so
955 * they cannot share the same load range. Sufficient
956 * room for the brk must be allocated with the
957 * loader as well, since brk must be available with
960 * Therefore, programs are loaded offset from
961 * ELF_ET_DYN_BASE and loaders are loaded into the
962 * independently randomized mmap region (0 load_bias
963 * without MAP_FIXED).
965 if (elf_interpreter) {
966 load_bias = ELF_ET_DYN_BASE;
967 if (current->flags & PF_RANDOMIZE)
968 load_bias += arch_mmap_rnd();
969 elf_flags |= MAP_FIXED;
974 * Since load_bias is used for all subsequent loading
975 * calculations, we must lower it by the first vaddr
976 * so that the remaining calculations based on the
977 * ELF vaddrs will be correctly offset. The result
978 * is then page aligned.
980 load_bias = ELF_PAGESTART(load_bias - vaddr);
982 total_size = total_mapping_size(elf_phdata,
983 loc->elf_ex.e_phnum);
986 goto out_free_dentry;
990 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
991 elf_prot, elf_flags, total_size);
992 if (BAD_ADDR(error)) {
993 retval = IS_ERR((void *)error) ?
994 PTR_ERR((void*)error) : -EINVAL;
995 goto out_free_dentry;
998 if (!load_addr_set) {
1000 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
1001 if (loc->elf_ex.e_type == ET_DYN) {
1002 load_bias += error -
1003 ELF_PAGESTART(load_bias + vaddr);
1004 load_addr += load_bias;
1005 reloc_func_desc = load_bias;
1008 k = elf_ppnt->p_vaddr;
1015 * Check to see if the section's size will overflow the
1016 * allowed task size. Note that p_filesz must always be
1017 * <= p_memsz so it is only necessary to check p_memsz.
1019 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
1020 elf_ppnt->p_memsz > TASK_SIZE ||
1021 TASK_SIZE - elf_ppnt->p_memsz < k) {
1022 /* set_brk can never work. Avoid overflows. */
1024 goto out_free_dentry;
1027 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1031 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1035 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1037 bss_prot = elf_prot;
1042 loc->elf_ex.e_entry += load_bias;
1043 elf_bss += load_bias;
1044 elf_brk += load_bias;
1045 start_code += load_bias;
1046 end_code += load_bias;
1047 start_data += load_bias;
1048 end_data += load_bias;
1050 /* Calling set_brk effectively mmaps the pages that we need
1051 * for the bss and break sections. We must do this before
1052 * mapping in the interpreter, to make sure it doesn't wind
1053 * up getting placed where the bss needs to go.
1055 retval = set_brk(elf_bss, elf_brk, bss_prot);
1057 goto out_free_dentry;
1058 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
1059 retval = -EFAULT; /* Nobody gets to see this, but.. */
1060 goto out_free_dentry;
1063 if (elf_interpreter) {
1064 unsigned long interp_map_addr = 0;
1066 elf_entry = load_elf_interp(&loc->interp_elf_ex,
1069 load_bias, interp_elf_phdata);
1070 if (!IS_ERR((void *)elf_entry)) {
1072 * load_elf_interp() returns relocation
1075 interp_load_addr = elf_entry;
1076 elf_entry += loc->interp_elf_ex.e_entry;
1078 if (BAD_ADDR(elf_entry)) {
1079 retval = IS_ERR((void *)elf_entry) ?
1080 (int)elf_entry : -EINVAL;
1081 goto out_free_dentry;
1083 reloc_func_desc = interp_load_addr;
1085 allow_write_access(interpreter);
1087 kfree(elf_interpreter);
1089 elf_entry = loc->elf_ex.e_entry;
1090 if (BAD_ADDR(elf_entry)) {
1092 goto out_free_dentry;
1096 kfree(interp_elf_phdata);
1099 set_binfmt(&elf_format);
1101 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
1102 retval = arch_setup_additional_pages(bprm, !!elf_interpreter);
1105 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1107 retval = create_elf_tables(bprm, &loc->elf_ex,
1108 load_addr, interp_load_addr);
1111 /* N.B. passed_fileno might not be initialized? */
1112 current->mm->end_code = end_code;
1113 current->mm->start_code = start_code;
1114 current->mm->start_data = start_data;
1115 current->mm->end_data = end_data;
1116 current->mm->start_stack = bprm->p;
1118 if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
1120 * For architectures with ELF randomization, when executing
1121 * a loader directly (i.e. no interpreter listed in ELF
1122 * headers), move the brk area out of the mmap region
1123 * (since it grows up, and may collide early with the stack
1124 * growing down), and into the unused ELF_ET_DYN_BASE region.
1126 if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
1127 loc->elf_ex.e_type == ET_DYN && !interpreter)
1128 current->mm->brk = current->mm->start_brk =
1131 current->mm->brk = current->mm->start_brk =
1132 arch_randomize_brk(current->mm);
1133 #ifdef compat_brk_randomized
1134 current->brk_randomized = 1;
1138 if (current->personality & MMAP_PAGE_ZERO) {
1139 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1140 and some applications "depend" upon this behavior.
1141 Since we do not have the power to recompile these, we
1142 emulate the SVr4 behavior. Sigh. */
1143 error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
1144 MAP_FIXED | MAP_PRIVATE, 0);
1147 #ifdef ELF_PLAT_INIT
1149 * The ABI may specify that certain registers be set up in special
1150 * ways (on i386 %edx is the address of a DT_FINI function, for
1151 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1152 * that the e_entry field is the address of the function descriptor
1153 * for the startup routine, rather than the address of the startup
1154 * routine itself. This macro performs whatever initialization to
1155 * the regs structure is required as well as any relocations to the
1156 * function descriptor entries when executing dynamically links apps.
1158 ELF_PLAT_INIT(regs, reloc_func_desc);
1161 start_thread(regs, elf_entry, bprm->p);
1170 kfree(interp_elf_phdata);
1171 allow_write_access(interpreter);
1175 kfree(elf_interpreter);
1181 #ifdef CONFIG_USELIB
1182 /* This is really simpleminded and specialized - we are loading an
1183 a.out library that is given an ELF header. */
1184 static int load_elf_library(struct file *file)
1186 struct elf_phdr *elf_phdata;
1187 struct elf_phdr *eppnt;
1188 unsigned long elf_bss, bss, len;
1189 int retval, error, i, j;
1190 struct elfhdr elf_ex;
1194 retval = kernel_read(file, &elf_ex, sizeof(elf_ex), &pos);
1195 if (retval != sizeof(elf_ex))
1198 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1201 /* First of all, some simple consistency checks */
1202 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1203 !elf_check_arch(&elf_ex) || !file->f_op->mmap)
1206 /* Now read in all of the header information */
1208 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1209 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1212 elf_phdata = kmalloc(j, GFP_KERNEL);
1218 pos = elf_ex.e_phoff;
1219 retval = kernel_read(file, eppnt, j, &pos);
1223 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1224 if ((eppnt + i)->p_type == PT_LOAD)
1229 while (eppnt->p_type != PT_LOAD)
1232 /* Now use mmap to map the library into memory. */
1233 error = vm_mmap(file,
1234 ELF_PAGESTART(eppnt->p_vaddr),
1236 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1237 PROT_READ | PROT_WRITE | PROT_EXEC,
1238 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE,
1240 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1241 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1244 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1245 if (padzero(elf_bss)) {
1250 len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr);
1251 bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr);
1253 error = vm_brk(len, bss - len);
1264 #endif /* #ifdef CONFIG_USELIB */
1266 #ifdef CONFIG_ELF_CORE
1270 * Modelled on fs/exec.c:aout_core_dump()
1271 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1275 * The purpose of always_dump_vma() is to make sure that special kernel mappings
1276 * that are useful for post-mortem analysis are included in every core dump.
1277 * In that way we ensure that the core dump is fully interpretable later
1278 * without matching up the same kernel and hardware config to see what PC values
1279 * meant. These special mappings include - vDSO, vsyscall, and other
1280 * architecture specific mappings
1282 static bool always_dump_vma(struct vm_area_struct *vma)
1284 /* Any vsyscall mappings? */
1285 if (vma == get_gate_vma(vma->vm_mm))
1289 * Assume that all vmas with a .name op should always be dumped.
1290 * If this changes, a new vm_ops field can easily be added.
1292 if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma))
1296 * arch_vma_name() returns non-NULL for special architecture mappings,
1297 * such as vDSO sections.
1299 if (arch_vma_name(vma))
1306 * Decide what to dump of a segment, part, all or none.
1308 static unsigned long vma_dump_size(struct vm_area_struct *vma,
1309 unsigned long mm_flags)
1311 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
1313 /* always dump the vdso and vsyscall sections */
1314 if (always_dump_vma(vma))
1317 if (vma->vm_flags & VM_DONTDUMP)
1320 /* support for DAX */
1321 if (vma_is_dax(vma)) {
1322 if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED))
1324 if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE))
1329 /* Hugetlb memory check */
1330 if (vma->vm_flags & VM_HUGETLB) {
1331 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
1333 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1338 /* Do not dump I/O mapped devices or special mappings */
1339 if (vma->vm_flags & VM_IO)
1342 /* By default, dump shared memory if mapped from an anonymous file. */
1343 if (vma->vm_flags & VM_SHARED) {
1344 if (file_inode(vma->vm_file)->i_nlink == 0 ?
1345 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1350 /* Dump segments that have been written to. */
1351 if (vma->anon_vma && FILTER(ANON_PRIVATE))
1353 if (vma->vm_file == NULL)
1356 if (FILTER(MAPPED_PRIVATE))
1360 * If this looks like the beginning of a DSO or executable mapping,
1361 * check for an ELF header. If we find one, dump the first page to
1362 * aid in determining what was mapped here.
1364 if (FILTER(ELF_HEADERS) &&
1365 vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
1366 u32 __user *header = (u32 __user *) vma->vm_start;
1368 mm_segment_t fs = get_fs();
1370 * Doing it this way gets the constant folded by GCC.
1374 char elfmag[SELFMAG];
1376 BUILD_BUG_ON(SELFMAG != sizeof word);
1377 magic.elfmag[EI_MAG0] = ELFMAG0;
1378 magic.elfmag[EI_MAG1] = ELFMAG1;
1379 magic.elfmag[EI_MAG2] = ELFMAG2;
1380 magic.elfmag[EI_MAG3] = ELFMAG3;
1382 * Switch to the user "segment" for get_user(),
1383 * then put back what elf_core_dump() had in place.
1386 if (unlikely(get_user(word, header)))
1389 if (word == magic.cmp)
1398 return vma->vm_end - vma->vm_start;
1401 /* An ELF note in memory */
1406 unsigned int datasz;
1410 static int notesize(struct memelfnote *en)
1414 sz = sizeof(struct elf_note);
1415 sz += roundup(strlen(en->name) + 1, 4);
1416 sz += roundup(en->datasz, 4);
1421 static int writenote(struct memelfnote *men, struct coredump_params *cprm)
1424 en.n_namesz = strlen(men->name) + 1;
1425 en.n_descsz = men->datasz;
1426 en.n_type = men->type;
1428 return dump_emit(cprm, &en, sizeof(en)) &&
1429 dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) &&
1430 dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4);
1433 static void fill_elf_header(struct elfhdr *elf, int segs,
1434 u16 machine, u32 flags)
1436 memset(elf, 0, sizeof(*elf));
1438 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1439 elf->e_ident[EI_CLASS] = ELF_CLASS;
1440 elf->e_ident[EI_DATA] = ELF_DATA;
1441 elf->e_ident[EI_VERSION] = EV_CURRENT;
1442 elf->e_ident[EI_OSABI] = ELF_OSABI;
1444 elf->e_type = ET_CORE;
1445 elf->e_machine = machine;
1446 elf->e_version = EV_CURRENT;
1447 elf->e_phoff = sizeof(struct elfhdr);
1448 elf->e_flags = flags;
1449 elf->e_ehsize = sizeof(struct elfhdr);
1450 elf->e_phentsize = sizeof(struct elf_phdr);
1451 elf->e_phnum = segs;
1456 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
1458 phdr->p_type = PT_NOTE;
1459 phdr->p_offset = offset;
1462 phdr->p_filesz = sz;
1469 static void fill_note(struct memelfnote *note, const char *name, int type,
1470 unsigned int sz, void *data)
1480 * fill up all the fields in prstatus from the given task struct, except
1481 * registers which need to be filled up separately.
1483 static void fill_prstatus(struct elf_prstatus *prstatus,
1484 struct task_struct *p, long signr)
1486 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1487 prstatus->pr_sigpend = p->pending.signal.sig[0];
1488 prstatus->pr_sighold = p->blocked.sig[0];
1490 prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1492 prstatus->pr_pid = task_pid_vnr(p);
1493 prstatus->pr_pgrp = task_pgrp_vnr(p);
1494 prstatus->pr_sid = task_session_vnr(p);
1495 if (thread_group_leader(p)) {
1496 struct task_cputime cputime;
1499 * This is the record for the group leader. It shows the
1500 * group-wide total, not its individual thread total.
1502 thread_group_cputime(p, &cputime);
1503 prstatus->pr_utime = ns_to_timeval(cputime.utime);
1504 prstatus->pr_stime = ns_to_timeval(cputime.stime);
1508 task_cputime(p, &utime, &stime);
1509 prstatus->pr_utime = ns_to_timeval(utime);
1510 prstatus->pr_stime = ns_to_timeval(stime);
1513 prstatus->pr_cutime = ns_to_timeval(p->signal->cutime);
1514 prstatus->pr_cstime = ns_to_timeval(p->signal->cstime);
1517 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1518 struct mm_struct *mm)
1520 const struct cred *cred;
1521 unsigned int i, len;
1523 /* first copy the parameters from user space */
1524 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1526 len = mm->arg_end - mm->arg_start;
1527 if (len >= ELF_PRARGSZ)
1528 len = ELF_PRARGSZ-1;
1529 if (copy_from_user(&psinfo->pr_psargs,
1530 (const char __user *)mm->arg_start, len))
1532 for(i = 0; i < len; i++)
1533 if (psinfo->pr_psargs[i] == 0)
1534 psinfo->pr_psargs[i] = ' ';
1535 psinfo->pr_psargs[len] = 0;
1538 psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1540 psinfo->pr_pid = task_pid_vnr(p);
1541 psinfo->pr_pgrp = task_pgrp_vnr(p);
1542 psinfo->pr_sid = task_session_vnr(p);
1544 i = p->state ? ffz(~p->state) + 1 : 0;
1545 psinfo->pr_state = i;
1546 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
1547 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1548 psinfo->pr_nice = task_nice(p);
1549 psinfo->pr_flag = p->flags;
1551 cred = __task_cred(p);
1552 SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
1553 SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
1555 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1560 static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
1562 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
1566 while (auxv[i - 2] != AT_NULL);
1567 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
1570 static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
1571 const siginfo_t *siginfo)
1573 mm_segment_t old_fs = get_fs();
1575 copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
1577 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
1580 #define MAX_FILE_NOTE_SIZE (4*1024*1024)
1582 * Format of NT_FILE note:
1584 * long count -- how many files are mapped
1585 * long page_size -- units for file_ofs
1586 * array of [COUNT] elements of
1590 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
1592 static int fill_files_note(struct memelfnote *note)
1594 struct vm_area_struct *vma;
1595 unsigned count, size, names_ofs, remaining, n;
1597 user_long_t *start_end_ofs;
1598 char *name_base, *name_curpos;
1600 /* *Estimated* file count and total data size needed */
1601 count = current->mm->map_count;
1604 names_ofs = (2 + 3 * count) * sizeof(data[0]);
1606 if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
1608 size = round_up(size, PAGE_SIZE);
1609 data = vmalloc(size);
1613 start_end_ofs = data + 2;
1614 name_base = name_curpos = ((char *)data) + names_ofs;
1615 remaining = size - names_ofs;
1617 for (vma = current->mm->mmap; vma != NULL; vma = vma->vm_next) {
1619 const char *filename;
1621 file = vma->vm_file;
1624 filename = file_path(file, name_curpos, remaining);
1625 if (IS_ERR(filename)) {
1626 if (PTR_ERR(filename) == -ENAMETOOLONG) {
1628 size = size * 5 / 4;
1634 /* file_path() fills at the end, move name down */
1635 /* n = strlen(filename) + 1: */
1636 n = (name_curpos + remaining) - filename;
1637 remaining = filename - name_curpos;
1638 memmove(name_curpos, filename, n);
1641 *start_end_ofs++ = vma->vm_start;
1642 *start_end_ofs++ = vma->vm_end;
1643 *start_end_ofs++ = vma->vm_pgoff;
1647 /* Now we know exact count of files, can store it */
1649 data[1] = PAGE_SIZE;
1651 * Count usually is less than current->mm->map_count,
1652 * we need to move filenames down.
1654 n = current->mm->map_count - count;
1656 unsigned shift_bytes = n * 3 * sizeof(data[0]);
1657 memmove(name_base - shift_bytes, name_base,
1658 name_curpos - name_base);
1659 name_curpos -= shift_bytes;
1662 size = name_curpos - (char *)data;
1663 fill_note(note, "CORE", NT_FILE, size, data);
1667 #ifdef CORE_DUMP_USE_REGSET
1668 #include <linux/regset.h>
1670 struct elf_thread_core_info {
1671 struct elf_thread_core_info *next;
1672 struct task_struct *task;
1673 struct elf_prstatus prstatus;
1674 struct memelfnote notes[0];
1677 struct elf_note_info {
1678 struct elf_thread_core_info *thread;
1679 struct memelfnote psinfo;
1680 struct memelfnote signote;
1681 struct memelfnote auxv;
1682 struct memelfnote files;
1683 user_siginfo_t csigdata;
1689 * When a regset has a writeback hook, we call it on each thread before
1690 * dumping user memory. On register window machines, this makes sure the
1691 * user memory backing the register data is up to date before we read it.
1693 static void do_thread_regset_writeback(struct task_struct *task,
1694 const struct user_regset *regset)
1696 if (regset->writeback)
1697 regset->writeback(task, regset, 1);
1700 #ifndef PRSTATUS_SIZE
1701 #define PRSTATUS_SIZE(S, R) sizeof(S)
1704 #ifndef SET_PR_FPVALID
1705 #define SET_PR_FPVALID(S, V, R) ((S)->pr_fpvalid = (V))
1708 static int fill_thread_core_info(struct elf_thread_core_info *t,
1709 const struct user_regset_view *view,
1710 long signr, size_t *total)
1713 unsigned int regset_size = view->regsets[0].n * view->regsets[0].size;
1716 * NT_PRSTATUS is the one special case, because the regset data
1717 * goes into the pr_reg field inside the note contents, rather
1718 * than being the whole note contents. We fill the reset in here.
1719 * We assume that regset 0 is NT_PRSTATUS.
1721 fill_prstatus(&t->prstatus, t->task, signr);
1722 (void) view->regsets[0].get(t->task, &view->regsets[0], 0, regset_size,
1723 &t->prstatus.pr_reg, NULL);
1725 fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
1726 PRSTATUS_SIZE(t->prstatus, regset_size), &t->prstatus);
1727 *total += notesize(&t->notes[0]);
1729 do_thread_regset_writeback(t->task, &view->regsets[0]);
1732 * Each other regset might generate a note too. For each regset
1733 * that has no core_note_type or is inactive, we leave t->notes[i]
1734 * all zero and we'll know to skip writing it later.
1736 for (i = 1; i < view->n; ++i) {
1737 const struct user_regset *regset = &view->regsets[i];
1738 do_thread_regset_writeback(t->task, regset);
1739 if (regset->core_note_type && regset->get &&
1740 (!regset->active || regset->active(t->task, regset) > 0)) {
1742 size_t size = regset->n * regset->size;
1743 void *data = kzalloc(size, GFP_KERNEL);
1744 if (unlikely(!data))
1746 ret = regset->get(t->task, regset,
1747 0, size, data, NULL);
1751 if (regset->core_note_type != NT_PRFPREG)
1752 fill_note(&t->notes[i], "LINUX",
1753 regset->core_note_type,
1756 SET_PR_FPVALID(&t->prstatus,
1758 fill_note(&t->notes[i], "CORE",
1759 NT_PRFPREG, size, data);
1761 *total += notesize(&t->notes[i]);
1769 static int fill_note_info(struct elfhdr *elf, int phdrs,
1770 struct elf_note_info *info,
1771 const siginfo_t *siginfo, struct pt_regs *regs)
1773 struct task_struct *dump_task = current;
1774 const struct user_regset_view *view = task_user_regset_view(dump_task);
1775 struct elf_thread_core_info *t;
1776 struct elf_prpsinfo *psinfo;
1777 struct core_thread *ct;
1781 info->thread = NULL;
1783 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1784 if (psinfo == NULL) {
1785 info->psinfo.data = NULL; /* So we don't free this wrongly */
1789 fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1792 * Figure out how many notes we're going to need for each thread.
1794 info->thread_notes = 0;
1795 for (i = 0; i < view->n; ++i)
1796 if (view->regsets[i].core_note_type != 0)
1797 ++info->thread_notes;
1800 * Sanity check. We rely on regset 0 being in NT_PRSTATUS,
1801 * since it is our one special case.
1803 if (unlikely(info->thread_notes == 0) ||
1804 unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) {
1810 * Initialize the ELF file header.
1812 fill_elf_header(elf, phdrs,
1813 view->e_machine, view->e_flags);
1816 * Allocate a structure for each thread.
1818 for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
1819 t = kzalloc(offsetof(struct elf_thread_core_info,
1820 notes[info->thread_notes]),
1826 if (ct->task == dump_task || !info->thread) {
1827 t->next = info->thread;
1831 * Make sure to keep the original task at
1832 * the head of the list.
1834 t->next = info->thread->next;
1835 info->thread->next = t;
1840 * Now fill in each thread's information.
1842 for (t = info->thread; t != NULL; t = t->next)
1843 if (!fill_thread_core_info(t, view, siginfo->si_signo, &info->size))
1847 * Fill in the two process-wide notes.
1849 fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
1850 info->size += notesize(&info->psinfo);
1852 fill_siginfo_note(&info->signote, &info->csigdata, siginfo);
1853 info->size += notesize(&info->signote);
1855 fill_auxv_note(&info->auxv, current->mm);
1856 info->size += notesize(&info->auxv);
1858 if (fill_files_note(&info->files) == 0)
1859 info->size += notesize(&info->files);
1864 static size_t get_note_info_size(struct elf_note_info *info)
1870 * Write all the notes for each thread. When writing the first thread, the
1871 * process-wide notes are interleaved after the first thread-specific note.
1873 static int write_note_info(struct elf_note_info *info,
1874 struct coredump_params *cprm)
1877 struct elf_thread_core_info *t = info->thread;
1882 if (!writenote(&t->notes[0], cprm))
1885 if (first && !writenote(&info->psinfo, cprm))
1887 if (first && !writenote(&info->signote, cprm))
1889 if (first && !writenote(&info->auxv, cprm))
1891 if (first && info->files.data &&
1892 !writenote(&info->files, cprm))
1895 for (i = 1; i < info->thread_notes; ++i)
1896 if (t->notes[i].data &&
1897 !writenote(&t->notes[i], cprm))
1907 static void free_note_info(struct elf_note_info *info)
1909 struct elf_thread_core_info *threads = info->thread;
1912 struct elf_thread_core_info *t = threads;
1914 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
1915 for (i = 1; i < info->thread_notes; ++i)
1916 kfree(t->notes[i].data);
1919 kfree(info->psinfo.data);
1920 vfree(info->files.data);
1925 /* Here is the structure in which status of each thread is captured. */
1926 struct elf_thread_status
1928 struct list_head list;
1929 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1930 elf_fpregset_t fpu; /* NT_PRFPREG */
1931 struct task_struct *thread;
1932 #ifdef ELF_CORE_COPY_XFPREGS
1933 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
1935 struct memelfnote notes[3];
1940 * In order to add the specific thread information for the elf file format,
1941 * we need to keep a linked list of every threads pr_status and then create
1942 * a single section for them in the final core file.
1944 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1947 struct task_struct *p = t->thread;
1950 fill_prstatus(&t->prstatus, p, signr);
1951 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1953 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1956 sz += notesize(&t->notes[0]);
1958 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
1960 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1963 sz += notesize(&t->notes[1]);
1966 #ifdef ELF_CORE_COPY_XFPREGS
1967 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1968 fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
1969 sizeof(t->xfpu), &t->xfpu);
1971 sz += notesize(&t->notes[2]);
1977 struct elf_note_info {
1978 struct memelfnote *notes;
1979 struct memelfnote *notes_files;
1980 struct elf_prstatus *prstatus; /* NT_PRSTATUS */
1981 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */
1982 struct list_head thread_list;
1983 elf_fpregset_t *fpu;
1984 #ifdef ELF_CORE_COPY_XFPREGS
1985 elf_fpxregset_t *xfpu;
1987 user_siginfo_t csigdata;
1988 int thread_status_size;
1992 static int elf_note_info_init(struct elf_note_info *info)
1994 memset(info, 0, sizeof(*info));
1995 INIT_LIST_HEAD(&info->thread_list);
1997 /* Allocate space for ELF notes */
1998 info->notes = kmalloc(8 * sizeof(struct memelfnote), GFP_KERNEL);
2001 info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
2004 info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
2005 if (!info->prstatus)
2007 info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
2010 #ifdef ELF_CORE_COPY_XFPREGS
2011 info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL);
2018 static int fill_note_info(struct elfhdr *elf, int phdrs,
2019 struct elf_note_info *info,
2020 const siginfo_t *siginfo, struct pt_regs *regs)
2022 struct list_head *t;
2023 struct core_thread *ct;
2024 struct elf_thread_status *ets;
2026 if (!elf_note_info_init(info))
2029 for (ct = current->mm->core_state->dumper.next;
2030 ct; ct = ct->next) {
2031 ets = kzalloc(sizeof(*ets), GFP_KERNEL);
2035 ets->thread = ct->task;
2036 list_add(&ets->list, &info->thread_list);
2039 list_for_each(t, &info->thread_list) {
2042 ets = list_entry(t, struct elf_thread_status, list);
2043 sz = elf_dump_thread_status(siginfo->si_signo, ets);
2044 info->thread_status_size += sz;
2046 /* now collect the dump for the current */
2047 memset(info->prstatus, 0, sizeof(*info->prstatus));
2048 fill_prstatus(info->prstatus, current, siginfo->si_signo);
2049 elf_core_copy_regs(&info->prstatus->pr_reg, regs);
2052 fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS);
2055 * Set up the notes in similar form to SVR4 core dumps made
2056 * with info from their /proc.
2059 fill_note(info->notes + 0, "CORE", NT_PRSTATUS,
2060 sizeof(*info->prstatus), info->prstatus);
2061 fill_psinfo(info->psinfo, current->group_leader, current->mm);
2062 fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
2063 sizeof(*info->psinfo), info->psinfo);
2065 fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
2066 fill_auxv_note(info->notes + 3, current->mm);
2069 if (fill_files_note(info->notes + info->numnote) == 0) {
2070 info->notes_files = info->notes + info->numnote;
2074 /* Try to dump the FPU. */
2075 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
2077 if (info->prstatus->pr_fpvalid)
2078 fill_note(info->notes + info->numnote++,
2079 "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu);
2080 #ifdef ELF_CORE_COPY_XFPREGS
2081 if (elf_core_copy_task_xfpregs(current, info->xfpu))
2082 fill_note(info->notes + info->numnote++,
2083 "LINUX", ELF_CORE_XFPREG_TYPE,
2084 sizeof(*info->xfpu), info->xfpu);
2090 static size_t get_note_info_size(struct elf_note_info *info)
2095 for (i = 0; i < info->numnote; i++)
2096 sz += notesize(info->notes + i);
2098 sz += info->thread_status_size;
2103 static int write_note_info(struct elf_note_info *info,
2104 struct coredump_params *cprm)
2107 struct list_head *t;
2109 for (i = 0; i < info->numnote; i++)
2110 if (!writenote(info->notes + i, cprm))
2113 /* write out the thread status notes section */
2114 list_for_each(t, &info->thread_list) {
2115 struct elf_thread_status *tmp =
2116 list_entry(t, struct elf_thread_status, list);
2118 for (i = 0; i < tmp->num_notes; i++)
2119 if (!writenote(&tmp->notes[i], cprm))
2126 static void free_note_info(struct elf_note_info *info)
2128 while (!list_empty(&info->thread_list)) {
2129 struct list_head *tmp = info->thread_list.next;
2131 kfree(list_entry(tmp, struct elf_thread_status, list));
2134 /* Free data possibly allocated by fill_files_note(): */
2135 if (info->notes_files)
2136 vfree(info->notes_files->data);
2138 kfree(info->prstatus);
2139 kfree(info->psinfo);
2142 #ifdef ELF_CORE_COPY_XFPREGS
2149 static struct vm_area_struct *first_vma(struct task_struct *tsk,
2150 struct vm_area_struct *gate_vma)
2152 struct vm_area_struct *ret = tsk->mm->mmap;
2159 * Helper function for iterating across a vma list. It ensures that the caller
2160 * will visit `gate_vma' prior to terminating the search.
2162 static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
2163 struct vm_area_struct *gate_vma)
2165 struct vm_area_struct *ret;
2167 ret = this_vma->vm_next;
2170 if (this_vma == gate_vma)
2175 static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
2176 elf_addr_t e_shoff, int segs)
2178 elf->e_shoff = e_shoff;
2179 elf->e_shentsize = sizeof(*shdr4extnum);
2181 elf->e_shstrndx = SHN_UNDEF;
2183 memset(shdr4extnum, 0, sizeof(*shdr4extnum));
2185 shdr4extnum->sh_type = SHT_NULL;
2186 shdr4extnum->sh_size = elf->e_shnum;
2187 shdr4extnum->sh_link = elf->e_shstrndx;
2188 shdr4extnum->sh_info = segs;
2194 * This is a two-pass process; first we find the offsets of the bits,
2195 * and then they are actually written out. If we run out of core limit
2198 static int elf_core_dump(struct coredump_params *cprm)
2203 size_t vma_data_size = 0;
2204 struct vm_area_struct *vma, *gate_vma;
2205 struct elfhdr *elf = NULL;
2206 loff_t offset = 0, dataoff;
2207 struct elf_note_info info = { };
2208 struct elf_phdr *phdr4note = NULL;
2209 struct elf_shdr *shdr4extnum = NULL;
2212 elf_addr_t *vma_filesz = NULL;
2215 * We no longer stop all VM operations.
2217 * This is because those proceses that could possibly change map_count
2218 * or the mmap / vma pages are now blocked in do_exit on current
2219 * finishing this core dump.
2221 * Only ptrace can touch these memory addresses, but it doesn't change
2222 * the map_count or the pages allocated. So no possibility of crashing
2223 * exists while dumping the mm->vm_next areas to the core file.
2226 /* alloc memory for large data structures: too large to be on stack */
2227 elf = kmalloc(sizeof(*elf), GFP_KERNEL);
2231 * The number of segs are recored into ELF header as 16bit value.
2232 * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
2234 segs = current->mm->map_count;
2235 segs += elf_core_extra_phdrs();
2237 gate_vma = get_gate_vma(current->mm);
2238 if (gate_vma != NULL)
2241 /* for notes section */
2244 /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid
2245 * this, kernel supports extended numbering. Have a look at
2246 * include/linux/elf.h for further information. */
2247 e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
2250 * Collect all the non-memory information about the process for the
2251 * notes. This also sets up the file header.
2253 if (!fill_note_info(elf, e_phnum, &info, cprm->siginfo, cprm->regs))
2261 offset += sizeof(*elf); /* Elf header */
2262 offset += segs * sizeof(struct elf_phdr); /* Program headers */
2264 /* Write notes phdr entry */
2266 size_t sz = get_note_info_size(&info);
2268 sz += elf_coredump_extra_notes_size();
2270 phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
2274 fill_elf_note_phdr(phdr4note, sz, offset);
2278 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
2280 if (segs - 1 > ULONG_MAX / sizeof(*vma_filesz))
2282 vma_filesz = vmalloc((segs - 1) * sizeof(*vma_filesz));
2286 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2287 vma = next_vma(vma, gate_vma)) {
2288 unsigned long dump_size;
2290 dump_size = vma_dump_size(vma, cprm->mm_flags);
2291 vma_filesz[i++] = dump_size;
2292 vma_data_size += dump_size;
2295 offset += vma_data_size;
2296 offset += elf_core_extra_data_size();
2299 if (e_phnum == PN_XNUM) {
2300 shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
2303 fill_extnum_info(elf, shdr4extnum, e_shoff, segs);
2308 if (!dump_emit(cprm, elf, sizeof(*elf)))
2311 if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
2314 /* Write program headers for segments dump */
2315 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2316 vma = next_vma(vma, gate_vma)) {
2317 struct elf_phdr phdr;
2319 phdr.p_type = PT_LOAD;
2320 phdr.p_offset = offset;
2321 phdr.p_vaddr = vma->vm_start;
2323 phdr.p_filesz = vma_filesz[i++];
2324 phdr.p_memsz = vma->vm_end - vma->vm_start;
2325 offset += phdr.p_filesz;
2326 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
2327 if (vma->vm_flags & VM_WRITE)
2328 phdr.p_flags |= PF_W;
2329 if (vma->vm_flags & VM_EXEC)
2330 phdr.p_flags |= PF_X;
2331 phdr.p_align = ELF_EXEC_PAGESIZE;
2333 if (!dump_emit(cprm, &phdr, sizeof(phdr)))
2337 if (!elf_core_write_extra_phdrs(cprm, offset))
2340 /* write out the notes section */
2341 if (!write_note_info(&info, cprm))
2344 if (elf_coredump_extra_notes_write(cprm))
2348 if (!dump_skip(cprm, dataoff - cprm->pos))
2351 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2352 vma = next_vma(vma, gate_vma)) {
2356 end = vma->vm_start + vma_filesz[i++];
2358 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
2362 page = get_dump_page(addr);
2364 void *kaddr = kmap(page);
2365 stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
2369 stop = !dump_skip(cprm, PAGE_SIZE);
2374 dump_truncate(cprm);
2376 if (!elf_core_write_extra_data(cprm))
2379 if (e_phnum == PN_XNUM) {
2380 if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
2388 free_note_info(&info);
2397 #endif /* CONFIG_ELF_CORE */
2399 static int __init init_elf_binfmt(void)
2401 register_binfmt(&elf_format);
2405 static void __exit exit_elf_binfmt(void)
2407 /* Remove the COFF and ELF loaders. */
2408 unregister_binfmt(&elf_format);
2411 core_initcall(init_elf_binfmt);
2412 module_exit(exit_elf_binfmt);
2413 MODULE_LICENSE("GPL");