1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/fs/binfmt_elf.c
5 * These are the functions used to load ELF format executables as used
6 * on SVr4 machines. Information on the format may be found in the book
7 * "UNIX SYSTEM V RELEASE 4 Programmers Guide: Ansi C and Programming Support
10 * Copyright 1993, 1994: Eric Youngdale (ericy@cais.com).
13 #include <linux/module.h>
14 #include <linux/kernel.h>
17 #include <linux/mman.h>
18 #include <linux/errno.h>
19 #include <linux/signal.h>
20 #include <linux/binfmts.h>
21 #include <linux/string.h>
22 #include <linux/file.h>
23 #include <linux/slab.h>
24 #include <linux/personality.h>
25 #include <linux/elfcore.h>
26 #include <linux/init.h>
27 #include <linux/highuid.h>
28 #include <linux/compiler.h>
29 #include <linux/highmem.h>
30 #include <linux/pagemap.h>
31 #include <linux/vmalloc.h>
32 #include <linux/security.h>
33 #include <linux/random.h>
34 #include <linux/elf.h>
35 #include <linux/elf-randomize.h>
36 #include <linux/utsname.h>
37 #include <linux/coredump.h>
38 #include <linux/sched.h>
39 #include <linux/sched/coredump.h>
40 #include <linux/sched/task_stack.h>
41 #include <linux/sched/cputime.h>
42 #include <linux/cred.h>
43 #include <linux/dax.h>
44 #include <linux/uaccess.h>
45 #include <asm/param.h>
49 #define user_long_t long
51 #ifndef user_siginfo_t
52 #define user_siginfo_t siginfo_t
55 /* That's for binfmt_elf_fdpic to deal with */
56 #ifndef elf_check_fdpic
57 #define elf_check_fdpic(ex) false
60 static int load_elf_binary(struct linux_binprm *bprm);
63 static int load_elf_library(struct file *);
65 #define load_elf_library NULL
69 * If we don't support core dumping, then supply a NULL so we
72 #ifdef CONFIG_ELF_CORE
73 static int elf_core_dump(struct coredump_params *cprm);
75 #define elf_core_dump NULL
78 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
79 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
81 #define ELF_MIN_ALIGN PAGE_SIZE
84 #ifndef ELF_CORE_EFLAGS
85 #define ELF_CORE_EFLAGS 0
88 #define ELF_PAGESTART(_v) ((_v) & ~(unsigned long)(ELF_MIN_ALIGN-1))
89 #define ELF_PAGEOFFSET(_v) ((_v) & (ELF_MIN_ALIGN-1))
90 #define ELF_PAGEALIGN(_v) (((_v) + ELF_MIN_ALIGN - 1) & ~(ELF_MIN_ALIGN - 1))
92 static struct linux_binfmt elf_format = {
93 .module = THIS_MODULE,
94 .load_binary = load_elf_binary,
95 .load_shlib = load_elf_library,
96 .core_dump = elf_core_dump,
97 .min_coredump = ELF_EXEC_PAGESIZE,
100 #define BAD_ADDR(x) (unlikely((unsigned long)(x) >= TASK_SIZE))
102 static int set_brk(unsigned long start, unsigned long end, int prot)
104 start = ELF_PAGEALIGN(start);
105 end = ELF_PAGEALIGN(end);
108 * Map the last of the bss segment.
109 * If the header is requesting these pages to be
110 * executable, honour that (ppc32 needs this).
112 int error = vm_brk_flags(start, end - start,
113 prot & PROT_EXEC ? VM_EXEC : 0);
117 current->mm->start_brk = current->mm->brk = end;
121 /* We need to explicitly zero any fractional pages
122 after the data section (i.e. bss). This would
123 contain the junk from the file that should not
126 static int padzero(unsigned long elf_bss)
130 nbyte = ELF_PAGEOFFSET(elf_bss);
132 nbyte = ELF_MIN_ALIGN - nbyte;
133 if (clear_user((void __user *) elf_bss, nbyte))
139 /* Let's use some macros to make this stack manipulation a little clearer */
140 #ifdef CONFIG_STACK_GROWSUP
141 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) + (items))
142 #define STACK_ROUND(sp, items) \
143 ((15 + (unsigned long) ((sp) + (items))) &~ 15UL)
144 #define STACK_ALLOC(sp, len) ({ \
145 elf_addr_t __user *old_sp = (elf_addr_t __user *)sp; sp += len; \
148 #define STACK_ADD(sp, items) ((elf_addr_t __user *)(sp) - (items))
149 #define STACK_ROUND(sp, items) \
150 (((unsigned long) (sp - items)) &~ 15UL)
151 #define STACK_ALLOC(sp, len) ({ sp -= len ; sp; })
154 #ifndef ELF_BASE_PLATFORM
156 * AT_BASE_PLATFORM indicates the "real" hardware/microarchitecture.
157 * If the arch defines ELF_BASE_PLATFORM (in asm/elf.h), the value
158 * will be copied to the user stack in the same manner as AT_PLATFORM.
160 #define ELF_BASE_PLATFORM NULL
164 create_elf_tables(struct linux_binprm *bprm, const struct elfhdr *exec,
165 unsigned long load_addr, unsigned long interp_load_addr,
166 unsigned long e_entry)
168 struct mm_struct *mm = current->mm;
169 unsigned long p = bprm->p;
170 int argc = bprm->argc;
171 int envc = bprm->envc;
172 elf_addr_t __user *sp;
173 elf_addr_t __user *u_platform;
174 elf_addr_t __user *u_base_platform;
175 elf_addr_t __user *u_rand_bytes;
176 const char *k_platform = ELF_PLATFORM;
177 const char *k_base_platform = ELF_BASE_PLATFORM;
178 unsigned char k_rand_bytes[16];
180 elf_addr_t *elf_info;
182 const struct cred *cred = current_cred();
183 struct vm_area_struct *vma;
186 * In some cases (e.g. Hyper-Threading), we want to avoid L1
187 * evictions by the processes running on the same package. One
188 * thing we can do is to shuffle the initial stack for them.
191 p = arch_align_stack(p);
194 * If this architecture has a platform capability string, copy it
195 * to userspace. In some cases (Sparc), this info is impossible
196 * for userspace to get any other way, in others (i386) it is
201 size_t len = strlen(k_platform) + 1;
203 u_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
204 if (__copy_to_user(u_platform, k_platform, len))
209 * If this architecture has a "base" platform capability
210 * string, copy it to userspace.
212 u_base_platform = NULL;
213 if (k_base_platform) {
214 size_t len = strlen(k_base_platform) + 1;
216 u_base_platform = (elf_addr_t __user *)STACK_ALLOC(p, len);
217 if (__copy_to_user(u_base_platform, k_base_platform, len))
222 * Generate 16 random bytes for userspace PRNG seeding.
224 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
225 u_rand_bytes = (elf_addr_t __user *)
226 STACK_ALLOC(p, sizeof(k_rand_bytes));
227 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
230 /* Create the ELF interpreter info */
231 elf_info = (elf_addr_t *)mm->saved_auxv;
232 /* update AT_VECTOR_SIZE_BASE if the number of NEW_AUX_ENT() changes */
233 #define NEW_AUX_ENT(id, val) \
241 * ARCH_DLINFO must come first so PPC can do its special alignment of
243 * update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT() in
244 * ARCH_DLINFO changes
248 NEW_AUX_ENT(AT_HWCAP, ELF_HWCAP);
249 NEW_AUX_ENT(AT_PAGESZ, ELF_EXEC_PAGESIZE);
250 NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
251 NEW_AUX_ENT(AT_PHDR, load_addr + exec->e_phoff);
252 NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
253 NEW_AUX_ENT(AT_PHNUM, exec->e_phnum);
254 NEW_AUX_ENT(AT_BASE, interp_load_addr);
255 NEW_AUX_ENT(AT_FLAGS, 0);
256 NEW_AUX_ENT(AT_ENTRY, e_entry);
257 NEW_AUX_ENT(AT_UID, from_kuid_munged(cred->user_ns, cred->uid));
258 NEW_AUX_ENT(AT_EUID, from_kuid_munged(cred->user_ns, cred->euid));
259 NEW_AUX_ENT(AT_GID, from_kgid_munged(cred->user_ns, cred->gid));
260 NEW_AUX_ENT(AT_EGID, from_kgid_munged(cred->user_ns, cred->egid));
261 NEW_AUX_ENT(AT_SECURE, bprm->secureexec);
262 NEW_AUX_ENT(AT_RANDOM, (elf_addr_t)(unsigned long)u_rand_bytes);
264 NEW_AUX_ENT(AT_HWCAP2, ELF_HWCAP2);
266 NEW_AUX_ENT(AT_EXECFN, bprm->exec);
268 NEW_AUX_ENT(AT_PLATFORM,
269 (elf_addr_t)(unsigned long)u_platform);
271 if (k_base_platform) {
272 NEW_AUX_ENT(AT_BASE_PLATFORM,
273 (elf_addr_t)(unsigned long)u_base_platform);
275 if (bprm->interp_flags & BINPRM_FLAGS_EXECFD) {
276 NEW_AUX_ENT(AT_EXECFD, bprm->interp_data);
279 /* AT_NULL is zero; clear the rest too */
280 memset(elf_info, 0, (char *)mm->saved_auxv +
281 sizeof(mm->saved_auxv) - (char *)elf_info);
283 /* And advance past the AT_NULL entry. */
286 ei_index = elf_info - (elf_addr_t *)mm->saved_auxv;
287 sp = STACK_ADD(p, ei_index);
289 items = (argc + 1) + (envc + 1) + 1;
290 bprm->p = STACK_ROUND(sp, items);
292 /* Point sp at the lowest address on the stack */
293 #ifdef CONFIG_STACK_GROWSUP
294 sp = (elf_addr_t __user *)bprm->p - items - ei_index;
295 bprm->exec = (unsigned long)sp; /* XXX: PARISC HACK */
297 sp = (elf_addr_t __user *)bprm->p;
302 * Grow the stack manually; some architectures have a limit on how
303 * far ahead a user-space access may be in order to grow the stack.
305 vma = find_extend_vma(mm, bprm->p);
309 /* Now, let's put argc (and argv, envp if appropriate) on the stack */
310 if (__put_user(argc, sp++))
313 /* Populate list of argv pointers back to argv strings. */
314 p = mm->arg_end = mm->arg_start;
317 if (__put_user((elf_addr_t)p, sp++))
319 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
320 if (!len || len > MAX_ARG_STRLEN)
324 if (__put_user(0, sp++))
328 /* Populate list of envp pointers back to envp strings. */
329 mm->env_end = mm->env_start = p;
332 if (__put_user((elf_addr_t)p, sp++))
334 len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
335 if (!len || len > MAX_ARG_STRLEN)
339 if (__put_user(0, sp++))
343 /* Put the elf_info on the stack in the right place. */
344 if (copy_to_user(sp, mm->saved_auxv, ei_index * sizeof(elf_addr_t)))
351 static unsigned long elf_map(struct file *filep, unsigned long addr,
352 const struct elf_phdr *eppnt, int prot, int type,
353 unsigned long total_size)
355 unsigned long map_addr;
356 unsigned long size = eppnt->p_filesz + ELF_PAGEOFFSET(eppnt->p_vaddr);
357 unsigned long off = eppnt->p_offset - ELF_PAGEOFFSET(eppnt->p_vaddr);
358 addr = ELF_PAGESTART(addr);
359 size = ELF_PAGEALIGN(size);
361 /* mmap() will return -EINVAL if given a zero size, but a
362 * segment with zero filesize is perfectly valid */
367 * total_size is the size of the ELF (interpreter) image.
368 * The _first_ mmap needs to know the full size, otherwise
369 * randomization might put this image into an overlapping
370 * position with the ELF binary image. (since size < total_size)
371 * So we first map the 'big' image - and unmap the remainder at
372 * the end. (which unmap is needed for ELF images with holes.)
375 total_size = ELF_PAGEALIGN(total_size);
376 map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
377 if (!BAD_ADDR(map_addr))
378 vm_munmap(map_addr+size, total_size-size);
380 map_addr = vm_mmap(filep, addr, size, prot, type, off);
382 if ((type & MAP_FIXED_NOREPLACE) &&
383 PTR_ERR((void *)map_addr) == -EEXIST)
384 pr_info("%d (%s): Uhuuh, elf segment at %px requested but the memory is mapped already\n",
385 task_pid_nr(current), current->comm, (void *)addr);
390 #endif /* !elf_map */
392 static unsigned long total_mapping_size(const struct elf_phdr *cmds, int nr)
394 int i, first_idx = -1, last_idx = -1;
396 for (i = 0; i < nr; i++) {
397 if (cmds[i].p_type == PT_LOAD) {
406 return cmds[last_idx].p_vaddr + cmds[last_idx].p_memsz -
407 ELF_PAGESTART(cmds[first_idx].p_vaddr);
410 static int elf_read(struct file *file, void *buf, size_t len, loff_t pos)
414 rv = kernel_read(file, buf, len, &pos);
415 if (unlikely(rv != len)) {
416 return (rv < 0) ? rv : -EIO;
422 * load_elf_phdrs() - load ELF program headers
423 * @elf_ex: ELF header of the binary whose program headers should be loaded
424 * @elf_file: the opened ELF binary file
426 * Loads ELF program headers from the binary file elf_file, which has the ELF
427 * header pointed to by elf_ex, into a newly allocated array. The caller is
428 * responsible for freeing the allocated data. Returns an ERR_PTR upon failure.
430 static struct elf_phdr *load_elf_phdrs(const struct elfhdr *elf_ex,
431 struct file *elf_file)
433 struct elf_phdr *elf_phdata = NULL;
434 int retval, err = -1;
438 * If the size of this structure has changed, then punt, since
439 * we will be doing the wrong thing.
441 if (elf_ex->e_phentsize != sizeof(struct elf_phdr))
444 /* Sanity check the number of program headers... */
445 /* ...and their total size. */
446 size = sizeof(struct elf_phdr) * elf_ex->e_phnum;
447 if (size == 0 || size > 65536 || size > ELF_MIN_ALIGN)
450 elf_phdata = kmalloc(size, GFP_KERNEL);
454 /* Read in the program headers */
455 retval = elf_read(elf_file, elf_phdata, size, elf_ex->e_phoff);
471 #ifndef CONFIG_ARCH_BINFMT_ELF_STATE
474 * struct arch_elf_state - arch-specific ELF loading state
476 * This structure is used to preserve architecture specific data during
477 * the loading of an ELF file, throughout the checking of architecture
478 * specific ELF headers & through to the point where the ELF load is
479 * known to be proceeding (ie. SET_PERSONALITY).
481 * This implementation is a dummy for architectures which require no
484 struct arch_elf_state {
487 #define INIT_ARCH_ELF_STATE {}
490 * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header
491 * @ehdr: The main ELF header
492 * @phdr: The program header to check
493 * @elf: The open ELF file
494 * @is_interp: True if the phdr is from the interpreter of the ELF being
495 * loaded, else false.
496 * @state: Architecture-specific state preserved throughout the process
497 * of loading the ELF.
499 * Inspects the program header phdr to validate its correctness and/or
500 * suitability for the system. Called once per ELF program header in the
501 * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its
504 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
505 * with that return code.
507 static inline int arch_elf_pt_proc(struct elfhdr *ehdr,
508 struct elf_phdr *phdr,
509 struct file *elf, bool is_interp,
510 struct arch_elf_state *state)
512 /* Dummy implementation, always proceed */
517 * arch_check_elf() - check an ELF executable
518 * @ehdr: The main ELF header
519 * @has_interp: True if the ELF has an interpreter, else false.
520 * @interp_ehdr: The interpreter's ELF header
521 * @state: Architecture-specific state preserved throughout the process
522 * of loading the ELF.
524 * Provides a final opportunity for architecture code to reject the loading
525 * of the ELF & cause an exec syscall to return an error. This is called after
526 * all program headers to be checked by arch_elf_pt_proc have been.
528 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
529 * with that return code.
531 static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
532 struct elfhdr *interp_ehdr,
533 struct arch_elf_state *state)
535 /* Dummy implementation, always proceed */
539 #endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */
541 static inline int make_prot(u32 p_flags)
554 /* This is much more generalized than the library routine read function,
555 so we keep this separate. Technically the library read function
556 is only provided so that we can read a.out libraries that have
559 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
560 struct file *interpreter,
561 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
563 struct elf_phdr *eppnt;
564 unsigned long load_addr = 0;
565 int load_addr_set = 0;
566 unsigned long last_bss = 0, elf_bss = 0;
568 unsigned long error = ~0UL;
569 unsigned long total_size;
572 /* First of all, some simple consistency checks */
573 if (interp_elf_ex->e_type != ET_EXEC &&
574 interp_elf_ex->e_type != ET_DYN)
576 if (!elf_check_arch(interp_elf_ex) ||
577 elf_check_fdpic(interp_elf_ex))
579 if (!interpreter->f_op->mmap)
582 total_size = total_mapping_size(interp_elf_phdata,
583 interp_elf_ex->e_phnum);
589 eppnt = interp_elf_phdata;
590 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
591 if (eppnt->p_type == PT_LOAD) {
592 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
593 int elf_prot = make_prot(eppnt->p_flags);
594 unsigned long vaddr = 0;
595 unsigned long k, map_addr;
597 vaddr = eppnt->p_vaddr;
598 if (interp_elf_ex->e_type == ET_EXEC || load_addr_set)
599 elf_type |= MAP_FIXED_NOREPLACE;
600 else if (no_base && interp_elf_ex->e_type == ET_DYN)
603 map_addr = elf_map(interpreter, load_addr + vaddr,
604 eppnt, elf_prot, elf_type, total_size);
607 if (BAD_ADDR(map_addr))
610 if (!load_addr_set &&
611 interp_elf_ex->e_type == ET_DYN) {
612 load_addr = map_addr - ELF_PAGESTART(vaddr);
617 * Check to see if the section's size will overflow the
618 * allowed task size. Note that p_filesz must always be
619 * <= p_memsize so it's only necessary to check p_memsz.
621 k = load_addr + eppnt->p_vaddr;
623 eppnt->p_filesz > eppnt->p_memsz ||
624 eppnt->p_memsz > TASK_SIZE ||
625 TASK_SIZE - eppnt->p_memsz < k) {
631 * Find the end of the file mapping for this phdr, and
632 * keep track of the largest address we see for this.
634 k = load_addr + eppnt->p_vaddr + eppnt->p_filesz;
639 * Do the same thing for the memory mapping - between
640 * elf_bss and last_bss is the bss section.
642 k = load_addr + eppnt->p_vaddr + eppnt->p_memsz;
651 * Now fill out the bss section: first pad the last page from
652 * the file up to the page boundary, and zero it from elf_bss
653 * up to the end of the page.
655 if (padzero(elf_bss)) {
660 * Next, align both the file and mem bss up to the page size,
661 * since this is where elf_bss was just zeroed up to, and where
662 * last_bss will end after the vm_brk_flags() below.
664 elf_bss = ELF_PAGEALIGN(elf_bss);
665 last_bss = ELF_PAGEALIGN(last_bss);
666 /* Finally, if there is still more bss to allocate, do it. */
667 if (last_bss > elf_bss) {
668 error = vm_brk_flags(elf_bss, last_bss - elf_bss,
669 bss_prot & PROT_EXEC ? VM_EXEC : 0);
680 * These are the functions used to load ELF style executables and shared
681 * libraries. There is no binary dependent code anywhere else.
684 static int load_elf_binary(struct linux_binprm *bprm)
686 struct file *interpreter = NULL; /* to shut gcc up */
687 unsigned long load_addr = 0, load_bias = 0;
688 int load_addr_set = 0;
690 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
691 unsigned long elf_bss, elf_brk;
694 unsigned long elf_entry;
695 unsigned long e_entry;
696 unsigned long interp_load_addr = 0;
697 unsigned long start_code, end_code, start_data, end_data;
698 unsigned long reloc_func_desc __maybe_unused = 0;
699 int executable_stack = EXSTACK_DEFAULT;
700 struct elfhdr *elf_ex = (struct elfhdr *)bprm->buf;
702 struct elfhdr interp_elf_ex;
704 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
705 struct mm_struct *mm;
706 struct pt_regs *regs;
708 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
715 /* First of all, some simple consistency checks */
716 if (memcmp(elf_ex->e_ident, ELFMAG, SELFMAG) != 0)
719 if (elf_ex->e_type != ET_EXEC && elf_ex->e_type != ET_DYN)
721 if (!elf_check_arch(elf_ex))
723 if (elf_check_fdpic(elf_ex))
725 if (!bprm->file->f_op->mmap)
728 elf_phdata = load_elf_phdrs(elf_ex, bprm->file);
732 elf_ppnt = elf_phdata;
733 for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++) {
734 char *elf_interpreter;
736 if (elf_ppnt->p_type != PT_INTERP)
740 * This is the program interpreter used for shared libraries -
741 * for now assume that this is an a.out format binary.
744 if (elf_ppnt->p_filesz > PATH_MAX || elf_ppnt->p_filesz < 2)
748 elf_interpreter = kmalloc(elf_ppnt->p_filesz, GFP_KERNEL);
749 if (!elf_interpreter)
752 retval = elf_read(bprm->file, elf_interpreter, elf_ppnt->p_filesz,
755 goto out_free_interp;
756 /* make sure path is NULL terminated */
758 if (elf_interpreter[elf_ppnt->p_filesz - 1] != '\0')
759 goto out_free_interp;
761 interpreter = open_exec(elf_interpreter);
762 kfree(elf_interpreter);
763 retval = PTR_ERR(interpreter);
764 if (IS_ERR(interpreter))
768 * If the binary is not readable then enforce mm->dumpable = 0
769 * regardless of the interpreter's permissions.
771 would_dump(bprm, interpreter);
773 /* Get the exec headers */
774 retval = elf_read(interpreter, &loc->interp_elf_ex,
775 sizeof(loc->interp_elf_ex), 0);
777 goto out_free_dentry;
782 kfree(elf_interpreter);
786 elf_ppnt = elf_phdata;
787 for (i = 0; i < elf_ex->e_phnum; i++, elf_ppnt++)
788 switch (elf_ppnt->p_type) {
790 if (elf_ppnt->p_flags & PF_X)
791 executable_stack = EXSTACK_ENABLE_X;
793 executable_stack = EXSTACK_DISABLE_X;
796 case PT_LOPROC ... PT_HIPROC:
797 retval = arch_elf_pt_proc(elf_ex, elf_ppnt,
801 goto out_free_dentry;
805 /* Some simple consistency checks for the interpreter */
808 /* Not an ELF interpreter */
809 if (memcmp(loc->interp_elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
810 goto out_free_dentry;
811 /* Verify the interpreter has a valid arch */
812 if (!elf_check_arch(&loc->interp_elf_ex) ||
813 elf_check_fdpic(&loc->interp_elf_ex))
814 goto out_free_dentry;
816 /* Load the interpreter program headers */
817 interp_elf_phdata = load_elf_phdrs(&loc->interp_elf_ex,
819 if (!interp_elf_phdata)
820 goto out_free_dentry;
822 /* Pass PT_LOPROC..PT_HIPROC headers to arch code */
823 elf_ppnt = interp_elf_phdata;
824 for (i = 0; i < loc->interp_elf_ex.e_phnum; i++, elf_ppnt++)
825 switch (elf_ppnt->p_type) {
826 case PT_LOPROC ... PT_HIPROC:
827 retval = arch_elf_pt_proc(&loc->interp_elf_ex,
828 elf_ppnt, interpreter,
831 goto out_free_dentry;
837 * Allow arch code to reject the ELF at this point, whilst it's
838 * still possible to return an error to the code that invoked
841 retval = arch_check_elf(elf_ex,
842 !!interpreter, &loc->interp_elf_ex,
845 goto out_free_dentry;
847 /* Flush all traces of the currently running executable */
848 retval = flush_old_exec(bprm);
850 goto out_free_dentry;
852 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
853 may depend on the personality. */
854 SET_PERSONALITY2(*elf_ex, &arch_state);
855 if (elf_read_implies_exec(*elf_ex, executable_stack))
856 current->personality |= READ_IMPLIES_EXEC;
858 if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
859 current->flags |= PF_RANDOMIZE;
861 setup_new_exec(bprm);
862 install_exec_creds(bprm);
864 /* Do this so that we can load the interpreter, if need be. We will
865 change some of these later */
866 retval = setup_arg_pages(bprm, randomize_stack_top(STACK_TOP),
869 goto out_free_dentry;
879 /* Now we do a little grungy work by mmapping the ELF image into
880 the correct location in memory. */
881 for(i = 0, elf_ppnt = elf_phdata;
882 i < elf_ex->e_phnum; i++, elf_ppnt++) {
883 int elf_prot, elf_flags;
884 unsigned long k, vaddr;
885 unsigned long total_size = 0;
887 if (elf_ppnt->p_type != PT_LOAD)
890 if (unlikely (elf_brk > elf_bss)) {
893 /* There was a PT_LOAD segment with p_memsz > p_filesz
894 before this one. Map anonymous pages, if needed,
895 and clear the area. */
896 retval = set_brk(elf_bss + load_bias,
900 goto out_free_dentry;
901 nbyte = ELF_PAGEOFFSET(elf_bss);
903 nbyte = ELF_MIN_ALIGN - nbyte;
904 if (nbyte > elf_brk - elf_bss)
905 nbyte = elf_brk - elf_bss;
906 if (clear_user((void __user *)elf_bss +
909 * This bss-zeroing can fail if the ELF
910 * file specifies odd protections. So
911 * we don't check the return value
917 elf_prot = make_prot(elf_ppnt->p_flags);
919 elf_flags = MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE;
921 vaddr = elf_ppnt->p_vaddr;
923 * If we are loading ET_EXEC or we have already performed
924 * the ET_DYN load_addr calculations, proceed normally.
926 if (elf_ex->e_type == ET_EXEC || load_addr_set) {
927 elf_flags |= MAP_FIXED;
928 } else if (elf_ex->e_type == ET_DYN) {
930 * This logic is run once for the first LOAD Program
931 * Header for ET_DYN binaries to calculate the
932 * randomization (load_bias) for all the LOAD
933 * Program Headers, and to calculate the entire
934 * size of the ELF mapping (total_size). (Note that
935 * load_addr_set is set to true later once the
936 * initial mapping is performed.)
938 * There are effectively two types of ET_DYN
939 * binaries: programs (i.e. PIE: ET_DYN with INTERP)
940 * and loaders (ET_DYN without INTERP, since they
941 * _are_ the ELF interpreter). The loaders must
942 * be loaded away from programs since the program
943 * may otherwise collide with the loader (especially
944 * for ET_EXEC which does not have a randomized
945 * position). For example to handle invocations of
946 * "./ld.so someprog" to test out a new version of
947 * the loader, the subsequent program that the
948 * loader loads must avoid the loader itself, so
949 * they cannot share the same load range. Sufficient
950 * room for the brk must be allocated with the
951 * loader as well, since brk must be available with
954 * Therefore, programs are loaded offset from
955 * ELF_ET_DYN_BASE and loaders are loaded into the
956 * independently randomized mmap region (0 load_bias
957 * without MAP_FIXED).
960 load_bias = ELF_ET_DYN_BASE;
961 if (current->flags & PF_RANDOMIZE)
962 load_bias += arch_mmap_rnd();
963 elf_flags |= MAP_FIXED;
968 * Since load_bias is used for all subsequent loading
969 * calculations, we must lower it by the first vaddr
970 * so that the remaining calculations based on the
971 * ELF vaddrs will be correctly offset. The result
972 * is then page aligned.
974 load_bias = ELF_PAGESTART(load_bias - vaddr);
976 total_size = total_mapping_size(elf_phdata,
980 goto out_free_dentry;
984 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
985 elf_prot, elf_flags, total_size);
986 if (BAD_ADDR(error)) {
987 retval = IS_ERR((void *)error) ?
988 PTR_ERR((void*)error) : -EINVAL;
989 goto out_free_dentry;
992 if (!load_addr_set) {
994 load_addr = (elf_ppnt->p_vaddr - elf_ppnt->p_offset);
995 if (elf_ex->e_type == ET_DYN) {
997 ELF_PAGESTART(load_bias + vaddr);
998 load_addr += load_bias;
999 reloc_func_desc = load_bias;
1002 k = elf_ppnt->p_vaddr;
1003 if ((elf_ppnt->p_flags & PF_X) && k < start_code)
1009 * Check to see if the section's size will overflow the
1010 * allowed task size. Note that p_filesz must always be
1011 * <= p_memsz so it is only necessary to check p_memsz.
1013 if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
1014 elf_ppnt->p_memsz > TASK_SIZE ||
1015 TASK_SIZE - elf_ppnt->p_memsz < k) {
1016 /* set_brk can never work. Avoid overflows. */
1018 goto out_free_dentry;
1021 k = elf_ppnt->p_vaddr + elf_ppnt->p_filesz;
1025 if ((elf_ppnt->p_flags & PF_X) && end_code < k)
1029 k = elf_ppnt->p_vaddr + elf_ppnt->p_memsz;
1031 bss_prot = elf_prot;
1036 e_entry = elf_ex->e_entry + load_bias;
1037 elf_bss += load_bias;
1038 elf_brk += load_bias;
1039 start_code += load_bias;
1040 end_code += load_bias;
1041 start_data += load_bias;
1042 end_data += load_bias;
1044 /* Calling set_brk effectively mmaps the pages that we need
1045 * for the bss and break sections. We must do this before
1046 * mapping in the interpreter, to make sure it doesn't wind
1047 * up getting placed where the bss needs to go.
1049 retval = set_brk(elf_bss, elf_brk, bss_prot);
1051 goto out_free_dentry;
1052 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
1053 retval = -EFAULT; /* Nobody gets to see this, but.. */
1054 goto out_free_dentry;
1058 elf_entry = load_elf_interp(&loc->interp_elf_ex,
1060 load_bias, interp_elf_phdata);
1061 if (!IS_ERR((void *)elf_entry)) {
1063 * load_elf_interp() returns relocation
1066 interp_load_addr = elf_entry;
1067 elf_entry += loc->interp_elf_ex.e_entry;
1069 if (BAD_ADDR(elf_entry)) {
1070 retval = IS_ERR((void *)elf_entry) ?
1071 (int)elf_entry : -EINVAL;
1072 goto out_free_dentry;
1074 reloc_func_desc = interp_load_addr;
1076 allow_write_access(interpreter);
1079 elf_entry = e_entry;
1080 if (BAD_ADDR(elf_entry)) {
1082 goto out_free_dentry;
1086 kfree(interp_elf_phdata);
1089 set_binfmt(&elf_format);
1091 #ifdef ARCH_HAS_SETUP_ADDITIONAL_PAGES
1092 retval = arch_setup_additional_pages(bprm, !!interpreter);
1095 #endif /* ARCH_HAS_SETUP_ADDITIONAL_PAGES */
1097 retval = create_elf_tables(bprm, elf_ex,
1098 load_addr, interp_load_addr, e_entry);
1103 mm->end_code = end_code;
1104 mm->start_code = start_code;
1105 mm->start_data = start_data;
1106 mm->end_data = end_data;
1107 mm->start_stack = bprm->p;
1109 if ((current->flags & PF_RANDOMIZE) && (randomize_va_space > 1)) {
1111 * For architectures with ELF randomization, when executing
1112 * a loader directly (i.e. no interpreter listed in ELF
1113 * headers), move the brk area out of the mmap region
1114 * (since it grows up, and may collide early with the stack
1115 * growing down), and into the unused ELF_ET_DYN_BASE region.
1117 if (IS_ENABLED(CONFIG_ARCH_HAS_ELF_RANDOMIZE) &&
1118 elf_ex->e_type == ET_DYN && !interpreter) {
1119 mm->brk = mm->start_brk = ELF_ET_DYN_BASE;
1122 mm->brk = mm->start_brk = arch_randomize_brk(mm);
1123 #ifdef compat_brk_randomized
1124 current->brk_randomized = 1;
1128 if (current->personality & MMAP_PAGE_ZERO) {
1129 /* Why this, you ask??? Well SVr4 maps page 0 as read-only,
1130 and some applications "depend" upon this behavior.
1131 Since we do not have the power to recompile these, we
1132 emulate the SVr4 behavior. Sigh. */
1133 error = vm_mmap(NULL, 0, PAGE_SIZE, PROT_READ | PROT_EXEC,
1134 MAP_FIXED | MAP_PRIVATE, 0);
1137 regs = current_pt_regs();
1138 #ifdef ELF_PLAT_INIT
1140 * The ABI may specify that certain registers be set up in special
1141 * ways (on i386 %edx is the address of a DT_FINI function, for
1142 * example. In addition, it may also specify (eg, PowerPC64 ELF)
1143 * that the e_entry field is the address of the function descriptor
1144 * for the startup routine, rather than the address of the startup
1145 * routine itself. This macro performs whatever initialization to
1146 * the regs structure is required as well as any relocations to the
1147 * function descriptor entries when executing dynamically links apps.
1149 ELF_PLAT_INIT(regs, reloc_func_desc);
1152 finalize_exec(bprm);
1153 start_thread(regs, elf_entry, bprm->p);
1162 kfree(interp_elf_phdata);
1163 allow_write_access(interpreter);
1171 #ifdef CONFIG_USELIB
1172 /* This is really simpleminded and specialized - we are loading an
1173 a.out library that is given an ELF header. */
1174 static int load_elf_library(struct file *file)
1176 struct elf_phdr *elf_phdata;
1177 struct elf_phdr *eppnt;
1178 unsigned long elf_bss, bss, len;
1179 int retval, error, i, j;
1180 struct elfhdr elf_ex;
1183 retval = elf_read(file, &elf_ex, sizeof(elf_ex), 0);
1187 if (memcmp(elf_ex.e_ident, ELFMAG, SELFMAG) != 0)
1190 /* First of all, some simple consistency checks */
1191 if (elf_ex.e_type != ET_EXEC || elf_ex.e_phnum > 2 ||
1192 !elf_check_arch(&elf_ex) || !file->f_op->mmap)
1194 if (elf_check_fdpic(&elf_ex))
1197 /* Now read in all of the header information */
1199 j = sizeof(struct elf_phdr) * elf_ex.e_phnum;
1200 /* j < ELF_MIN_ALIGN because elf_ex.e_phnum <= 2 */
1203 elf_phdata = kmalloc(j, GFP_KERNEL);
1209 retval = elf_read(file, eppnt, j, elf_ex.e_phoff);
1213 for (j = 0, i = 0; i<elf_ex.e_phnum; i++)
1214 if ((eppnt + i)->p_type == PT_LOAD)
1219 while (eppnt->p_type != PT_LOAD)
1222 /* Now use mmap to map the library into memory. */
1223 error = vm_mmap(file,
1224 ELF_PAGESTART(eppnt->p_vaddr),
1226 ELF_PAGEOFFSET(eppnt->p_vaddr)),
1227 PROT_READ | PROT_WRITE | PROT_EXEC,
1228 MAP_FIXED_NOREPLACE | MAP_PRIVATE | MAP_DENYWRITE,
1230 ELF_PAGEOFFSET(eppnt->p_vaddr)));
1231 if (error != ELF_PAGESTART(eppnt->p_vaddr))
1234 elf_bss = eppnt->p_vaddr + eppnt->p_filesz;
1235 if (padzero(elf_bss)) {
1240 len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr);
1241 bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr);
1243 error = vm_brk(len, bss - len);
1254 #endif /* #ifdef CONFIG_USELIB */
1256 #ifdef CONFIG_ELF_CORE
1260 * Modelled on fs/exec.c:aout_core_dump()
1261 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
1265 * The purpose of always_dump_vma() is to make sure that special kernel mappings
1266 * that are useful for post-mortem analysis are included in every core dump.
1267 * In that way we ensure that the core dump is fully interpretable later
1268 * without matching up the same kernel and hardware config to see what PC values
1269 * meant. These special mappings include - vDSO, vsyscall, and other
1270 * architecture specific mappings
1272 static bool always_dump_vma(struct vm_area_struct *vma)
1274 /* Any vsyscall mappings? */
1275 if (vma == get_gate_vma(vma->vm_mm))
1279 * Assume that all vmas with a .name op should always be dumped.
1280 * If this changes, a new vm_ops field can easily be added.
1282 if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma))
1286 * arch_vma_name() returns non-NULL for special architecture mappings,
1287 * such as vDSO sections.
1289 if (arch_vma_name(vma))
1296 * Decide what to dump of a segment, part, all or none.
1298 static unsigned long vma_dump_size(struct vm_area_struct *vma,
1299 unsigned long mm_flags)
1301 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
1303 /* always dump the vdso and vsyscall sections */
1304 if (always_dump_vma(vma))
1307 if (vma->vm_flags & VM_DONTDUMP)
1310 /* support for DAX */
1311 if (vma_is_dax(vma)) {
1312 if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED))
1314 if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE))
1319 /* Hugetlb memory check */
1320 if (vma->vm_flags & VM_HUGETLB) {
1321 if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED))
1323 if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE))
1328 /* Do not dump I/O mapped devices or special mappings */
1329 if (vma->vm_flags & VM_IO)
1332 /* By default, dump shared memory if mapped from an anonymous file. */
1333 if (vma->vm_flags & VM_SHARED) {
1334 if (file_inode(vma->vm_file)->i_nlink == 0 ?
1335 FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED))
1340 /* Dump segments that have been written to. */
1341 if (vma->anon_vma && FILTER(ANON_PRIVATE))
1343 if (vma->vm_file == NULL)
1346 if (FILTER(MAPPED_PRIVATE))
1350 * If this looks like the beginning of a DSO or executable mapping,
1351 * check for an ELF header. If we find one, dump the first page to
1352 * aid in determining what was mapped here.
1354 if (FILTER(ELF_HEADERS) &&
1355 vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) {
1356 u32 __user *header = (u32 __user *) vma->vm_start;
1358 mm_segment_t fs = get_fs();
1360 * Doing it this way gets the constant folded by GCC.
1364 char elfmag[SELFMAG];
1366 BUILD_BUG_ON(SELFMAG != sizeof word);
1367 magic.elfmag[EI_MAG0] = ELFMAG0;
1368 magic.elfmag[EI_MAG1] = ELFMAG1;
1369 magic.elfmag[EI_MAG2] = ELFMAG2;
1370 magic.elfmag[EI_MAG3] = ELFMAG3;
1372 * Switch to the user "segment" for get_user(),
1373 * then put back what elf_core_dump() had in place.
1376 if (unlikely(get_user(word, header)))
1379 if (word == magic.cmp)
1388 return vma->vm_end - vma->vm_start;
1391 /* An ELF note in memory */
1396 unsigned int datasz;
1400 static int notesize(struct memelfnote *en)
1404 sz = sizeof(struct elf_note);
1405 sz += roundup(strlen(en->name) + 1, 4);
1406 sz += roundup(en->datasz, 4);
1411 static int writenote(struct memelfnote *men, struct coredump_params *cprm)
1414 en.n_namesz = strlen(men->name) + 1;
1415 en.n_descsz = men->datasz;
1416 en.n_type = men->type;
1418 return dump_emit(cprm, &en, sizeof(en)) &&
1419 dump_emit(cprm, men->name, en.n_namesz) && dump_align(cprm, 4) &&
1420 dump_emit(cprm, men->data, men->datasz) && dump_align(cprm, 4);
1423 static void fill_elf_header(struct elfhdr *elf, int segs,
1424 u16 machine, u32 flags)
1426 memset(elf, 0, sizeof(*elf));
1428 memcpy(elf->e_ident, ELFMAG, SELFMAG);
1429 elf->e_ident[EI_CLASS] = ELF_CLASS;
1430 elf->e_ident[EI_DATA] = ELF_DATA;
1431 elf->e_ident[EI_VERSION] = EV_CURRENT;
1432 elf->e_ident[EI_OSABI] = ELF_OSABI;
1434 elf->e_type = ET_CORE;
1435 elf->e_machine = machine;
1436 elf->e_version = EV_CURRENT;
1437 elf->e_phoff = sizeof(struct elfhdr);
1438 elf->e_flags = flags;
1439 elf->e_ehsize = sizeof(struct elfhdr);
1440 elf->e_phentsize = sizeof(struct elf_phdr);
1441 elf->e_phnum = segs;
1444 static void fill_elf_note_phdr(struct elf_phdr *phdr, int sz, loff_t offset)
1446 phdr->p_type = PT_NOTE;
1447 phdr->p_offset = offset;
1450 phdr->p_filesz = sz;
1456 static void fill_note(struct memelfnote *note, const char *name, int type,
1457 unsigned int sz, void *data)
1466 * fill up all the fields in prstatus from the given task struct, except
1467 * registers which need to be filled up separately.
1469 static void fill_prstatus(struct elf_prstatus *prstatus,
1470 struct task_struct *p, long signr)
1472 prstatus->pr_info.si_signo = prstatus->pr_cursig = signr;
1473 prstatus->pr_sigpend = p->pending.signal.sig[0];
1474 prstatus->pr_sighold = p->blocked.sig[0];
1476 prstatus->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1478 prstatus->pr_pid = task_pid_vnr(p);
1479 prstatus->pr_pgrp = task_pgrp_vnr(p);
1480 prstatus->pr_sid = task_session_vnr(p);
1481 if (thread_group_leader(p)) {
1482 struct task_cputime cputime;
1485 * This is the record for the group leader. It shows the
1486 * group-wide total, not its individual thread total.
1488 thread_group_cputime(p, &cputime);
1489 prstatus->pr_utime = ns_to_kernel_old_timeval(cputime.utime);
1490 prstatus->pr_stime = ns_to_kernel_old_timeval(cputime.stime);
1494 task_cputime(p, &utime, &stime);
1495 prstatus->pr_utime = ns_to_kernel_old_timeval(utime);
1496 prstatus->pr_stime = ns_to_kernel_old_timeval(stime);
1499 prstatus->pr_cutime = ns_to_kernel_old_timeval(p->signal->cutime);
1500 prstatus->pr_cstime = ns_to_kernel_old_timeval(p->signal->cstime);
1503 static int fill_psinfo(struct elf_prpsinfo *psinfo, struct task_struct *p,
1504 struct mm_struct *mm)
1506 const struct cred *cred;
1507 unsigned int i, len;
1509 /* first copy the parameters from user space */
1510 memset(psinfo, 0, sizeof(struct elf_prpsinfo));
1512 len = mm->arg_end - mm->arg_start;
1513 if (len >= ELF_PRARGSZ)
1514 len = ELF_PRARGSZ-1;
1515 if (copy_from_user(&psinfo->pr_psargs,
1516 (const char __user *)mm->arg_start, len))
1518 for(i = 0; i < len; i++)
1519 if (psinfo->pr_psargs[i] == 0)
1520 psinfo->pr_psargs[i] = ' ';
1521 psinfo->pr_psargs[len] = 0;
1524 psinfo->pr_ppid = task_pid_vnr(rcu_dereference(p->real_parent));
1526 psinfo->pr_pid = task_pid_vnr(p);
1527 psinfo->pr_pgrp = task_pgrp_vnr(p);
1528 psinfo->pr_sid = task_session_vnr(p);
1530 i = p->state ? ffz(~p->state) + 1 : 0;
1531 psinfo->pr_state = i;
1532 psinfo->pr_sname = (i > 5) ? '.' : "RSDTZW"[i];
1533 psinfo->pr_zomb = psinfo->pr_sname == 'Z';
1534 psinfo->pr_nice = task_nice(p);
1535 psinfo->pr_flag = p->flags;
1537 cred = __task_cred(p);
1538 SET_UID(psinfo->pr_uid, from_kuid_munged(cred->user_ns, cred->uid));
1539 SET_GID(psinfo->pr_gid, from_kgid_munged(cred->user_ns, cred->gid));
1541 strncpy(psinfo->pr_fname, p->comm, sizeof(psinfo->pr_fname));
1546 static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
1548 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
1552 while (auxv[i - 2] != AT_NULL);
1553 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
1556 static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
1557 const kernel_siginfo_t *siginfo)
1559 mm_segment_t old_fs = get_fs();
1561 copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
1563 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
1566 #define MAX_FILE_NOTE_SIZE (4*1024*1024)
1568 * Format of NT_FILE note:
1570 * long count -- how many files are mapped
1571 * long page_size -- units for file_ofs
1572 * array of [COUNT] elements of
1576 * followed by COUNT filenames in ASCII: "FILE1" NUL "FILE2" NUL...
1578 static int fill_files_note(struct memelfnote *note)
1580 struct mm_struct *mm = current->mm;
1581 struct vm_area_struct *vma;
1582 unsigned count, size, names_ofs, remaining, n;
1584 user_long_t *start_end_ofs;
1585 char *name_base, *name_curpos;
1587 /* *Estimated* file count and total data size needed */
1588 count = mm->map_count;
1589 if (count > UINT_MAX / 64)
1593 names_ofs = (2 + 3 * count) * sizeof(data[0]);
1595 if (size >= MAX_FILE_NOTE_SIZE) /* paranoia check */
1597 size = round_up(size, PAGE_SIZE);
1598 data = kvmalloc(size, GFP_KERNEL);
1599 if (ZERO_OR_NULL_PTR(data))
1602 start_end_ofs = data + 2;
1603 name_base = name_curpos = ((char *)data) + names_ofs;
1604 remaining = size - names_ofs;
1606 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
1608 const char *filename;
1610 file = vma->vm_file;
1613 filename = file_path(file, name_curpos, remaining);
1614 if (IS_ERR(filename)) {
1615 if (PTR_ERR(filename) == -ENAMETOOLONG) {
1617 size = size * 5 / 4;
1623 /* file_path() fills at the end, move name down */
1624 /* n = strlen(filename) + 1: */
1625 n = (name_curpos + remaining) - filename;
1626 remaining = filename - name_curpos;
1627 memmove(name_curpos, filename, n);
1630 *start_end_ofs++ = vma->vm_start;
1631 *start_end_ofs++ = vma->vm_end;
1632 *start_end_ofs++ = vma->vm_pgoff;
1636 /* Now we know exact count of files, can store it */
1638 data[1] = PAGE_SIZE;
1640 * Count usually is less than mm->map_count,
1641 * we need to move filenames down.
1643 n = mm->map_count - count;
1645 unsigned shift_bytes = n * 3 * sizeof(data[0]);
1646 memmove(name_base - shift_bytes, name_base,
1647 name_curpos - name_base);
1648 name_curpos -= shift_bytes;
1651 size = name_curpos - (char *)data;
1652 fill_note(note, "CORE", NT_FILE, size, data);
1656 #ifdef CORE_DUMP_USE_REGSET
1657 #include <linux/regset.h>
1659 struct elf_thread_core_info {
1660 struct elf_thread_core_info *next;
1661 struct task_struct *task;
1662 struct elf_prstatus prstatus;
1663 struct memelfnote notes[0];
1666 struct elf_note_info {
1667 struct elf_thread_core_info *thread;
1668 struct memelfnote psinfo;
1669 struct memelfnote signote;
1670 struct memelfnote auxv;
1671 struct memelfnote files;
1672 user_siginfo_t csigdata;
1678 * When a regset has a writeback hook, we call it on each thread before
1679 * dumping user memory. On register window machines, this makes sure the
1680 * user memory backing the register data is up to date before we read it.
1682 static void do_thread_regset_writeback(struct task_struct *task,
1683 const struct user_regset *regset)
1685 if (regset->writeback)
1686 regset->writeback(task, regset, 1);
1689 #ifndef PRSTATUS_SIZE
1690 #define PRSTATUS_SIZE(S, R) sizeof(S)
1693 #ifndef SET_PR_FPVALID
1694 #define SET_PR_FPVALID(S, V, R) ((S)->pr_fpvalid = (V))
1697 static int fill_thread_core_info(struct elf_thread_core_info *t,
1698 const struct user_regset_view *view,
1699 long signr, size_t *total)
1702 unsigned int regset0_size = regset_size(t->task, &view->regsets[0]);
1705 * NT_PRSTATUS is the one special case, because the regset data
1706 * goes into the pr_reg field inside the note contents, rather
1707 * than being the whole note contents. We fill the reset in here.
1708 * We assume that regset 0 is NT_PRSTATUS.
1710 fill_prstatus(&t->prstatus, t->task, signr);
1711 (void) view->regsets[0].get(t->task, &view->regsets[0], 0, regset0_size,
1712 &t->prstatus.pr_reg, NULL);
1714 fill_note(&t->notes[0], "CORE", NT_PRSTATUS,
1715 PRSTATUS_SIZE(t->prstatus, regset0_size), &t->prstatus);
1716 *total += notesize(&t->notes[0]);
1718 do_thread_regset_writeback(t->task, &view->regsets[0]);
1721 * Each other regset might generate a note too. For each regset
1722 * that has no core_note_type or is inactive, we leave t->notes[i]
1723 * all zero and we'll know to skip writing it later.
1725 for (i = 1; i < view->n; ++i) {
1726 const struct user_regset *regset = &view->regsets[i];
1727 do_thread_regset_writeback(t->task, regset);
1728 if (regset->core_note_type && regset->get &&
1729 (!regset->active || regset->active(t->task, regset) > 0)) {
1731 size_t size = regset_size(t->task, regset);
1732 void *data = kmalloc(size, GFP_KERNEL);
1733 if (unlikely(!data))
1735 ret = regset->get(t->task, regset,
1736 0, size, data, NULL);
1740 if (regset->core_note_type != NT_PRFPREG)
1741 fill_note(&t->notes[i], "LINUX",
1742 regset->core_note_type,
1745 SET_PR_FPVALID(&t->prstatus,
1747 fill_note(&t->notes[i], "CORE",
1748 NT_PRFPREG, size, data);
1750 *total += notesize(&t->notes[i]);
1758 static int fill_note_info(struct elfhdr *elf, int phdrs,
1759 struct elf_note_info *info,
1760 const kernel_siginfo_t *siginfo, struct pt_regs *regs)
1762 struct task_struct *dump_task = current;
1763 const struct user_regset_view *view = task_user_regset_view(dump_task);
1764 struct elf_thread_core_info *t;
1765 struct elf_prpsinfo *psinfo;
1766 struct core_thread *ct;
1770 info->thread = NULL;
1772 psinfo = kmalloc(sizeof(*psinfo), GFP_KERNEL);
1773 if (psinfo == NULL) {
1774 info->psinfo.data = NULL; /* So we don't free this wrongly */
1778 fill_note(&info->psinfo, "CORE", NT_PRPSINFO, sizeof(*psinfo), psinfo);
1781 * Figure out how many notes we're going to need for each thread.
1783 info->thread_notes = 0;
1784 for (i = 0; i < view->n; ++i)
1785 if (view->regsets[i].core_note_type != 0)
1786 ++info->thread_notes;
1789 * Sanity check. We rely on regset 0 being in NT_PRSTATUS,
1790 * since it is our one special case.
1792 if (unlikely(info->thread_notes == 0) ||
1793 unlikely(view->regsets[0].core_note_type != NT_PRSTATUS)) {
1799 * Initialize the ELF file header.
1801 fill_elf_header(elf, phdrs,
1802 view->e_machine, view->e_flags);
1805 * Allocate a structure for each thread.
1807 for (ct = &dump_task->mm->core_state->dumper; ct; ct = ct->next) {
1808 t = kzalloc(offsetof(struct elf_thread_core_info,
1809 notes[info->thread_notes]),
1815 if (ct->task == dump_task || !info->thread) {
1816 t->next = info->thread;
1820 * Make sure to keep the original task at
1821 * the head of the list.
1823 t->next = info->thread->next;
1824 info->thread->next = t;
1829 * Now fill in each thread's information.
1831 for (t = info->thread; t != NULL; t = t->next)
1832 if (!fill_thread_core_info(t, view, siginfo->si_signo, &info->size))
1836 * Fill in the two process-wide notes.
1838 fill_psinfo(psinfo, dump_task->group_leader, dump_task->mm);
1839 info->size += notesize(&info->psinfo);
1841 fill_siginfo_note(&info->signote, &info->csigdata, siginfo);
1842 info->size += notesize(&info->signote);
1844 fill_auxv_note(&info->auxv, current->mm);
1845 info->size += notesize(&info->auxv);
1847 if (fill_files_note(&info->files) == 0)
1848 info->size += notesize(&info->files);
1853 static size_t get_note_info_size(struct elf_note_info *info)
1859 * Write all the notes for each thread. When writing the first thread, the
1860 * process-wide notes are interleaved after the first thread-specific note.
1862 static int write_note_info(struct elf_note_info *info,
1863 struct coredump_params *cprm)
1866 struct elf_thread_core_info *t = info->thread;
1871 if (!writenote(&t->notes[0], cprm))
1874 if (first && !writenote(&info->psinfo, cprm))
1876 if (first && !writenote(&info->signote, cprm))
1878 if (first && !writenote(&info->auxv, cprm))
1880 if (first && info->files.data &&
1881 !writenote(&info->files, cprm))
1884 for (i = 1; i < info->thread_notes; ++i)
1885 if (t->notes[i].data &&
1886 !writenote(&t->notes[i], cprm))
1896 static void free_note_info(struct elf_note_info *info)
1898 struct elf_thread_core_info *threads = info->thread;
1901 struct elf_thread_core_info *t = threads;
1903 WARN_ON(t->notes[0].data && t->notes[0].data != &t->prstatus);
1904 for (i = 1; i < info->thread_notes; ++i)
1905 kfree(t->notes[i].data);
1908 kfree(info->psinfo.data);
1909 kvfree(info->files.data);
1914 /* Here is the structure in which status of each thread is captured. */
1915 struct elf_thread_status
1917 struct list_head list;
1918 struct elf_prstatus prstatus; /* NT_PRSTATUS */
1919 elf_fpregset_t fpu; /* NT_PRFPREG */
1920 struct task_struct *thread;
1921 #ifdef ELF_CORE_COPY_XFPREGS
1922 elf_fpxregset_t xfpu; /* ELF_CORE_XFPREG_TYPE */
1924 struct memelfnote notes[3];
1929 * In order to add the specific thread information for the elf file format,
1930 * we need to keep a linked list of every threads pr_status and then create
1931 * a single section for them in the final core file.
1933 static int elf_dump_thread_status(long signr, struct elf_thread_status *t)
1936 struct task_struct *p = t->thread;
1939 fill_prstatus(&t->prstatus, p, signr);
1940 elf_core_copy_task_regs(p, &t->prstatus.pr_reg);
1942 fill_note(&t->notes[0], "CORE", NT_PRSTATUS, sizeof(t->prstatus),
1945 sz += notesize(&t->notes[0]);
1947 if ((t->prstatus.pr_fpvalid = elf_core_copy_task_fpregs(p, NULL,
1949 fill_note(&t->notes[1], "CORE", NT_PRFPREG, sizeof(t->fpu),
1952 sz += notesize(&t->notes[1]);
1955 #ifdef ELF_CORE_COPY_XFPREGS
1956 if (elf_core_copy_task_xfpregs(p, &t->xfpu)) {
1957 fill_note(&t->notes[2], "LINUX", ELF_CORE_XFPREG_TYPE,
1958 sizeof(t->xfpu), &t->xfpu);
1960 sz += notesize(&t->notes[2]);
1966 struct elf_note_info {
1967 struct memelfnote *notes;
1968 struct memelfnote *notes_files;
1969 struct elf_prstatus *prstatus; /* NT_PRSTATUS */
1970 struct elf_prpsinfo *psinfo; /* NT_PRPSINFO */
1971 struct list_head thread_list;
1972 elf_fpregset_t *fpu;
1973 #ifdef ELF_CORE_COPY_XFPREGS
1974 elf_fpxregset_t *xfpu;
1976 user_siginfo_t csigdata;
1977 int thread_status_size;
1981 static int elf_note_info_init(struct elf_note_info *info)
1983 memset(info, 0, sizeof(*info));
1984 INIT_LIST_HEAD(&info->thread_list);
1986 /* Allocate space for ELF notes */
1987 info->notes = kmalloc_array(8, sizeof(struct memelfnote), GFP_KERNEL);
1990 info->psinfo = kmalloc(sizeof(*info->psinfo), GFP_KERNEL);
1993 info->prstatus = kmalloc(sizeof(*info->prstatus), GFP_KERNEL);
1994 if (!info->prstatus)
1996 info->fpu = kmalloc(sizeof(*info->fpu), GFP_KERNEL);
1999 #ifdef ELF_CORE_COPY_XFPREGS
2000 info->xfpu = kmalloc(sizeof(*info->xfpu), GFP_KERNEL);
2007 static int fill_note_info(struct elfhdr *elf, int phdrs,
2008 struct elf_note_info *info,
2009 const kernel_siginfo_t *siginfo, struct pt_regs *regs)
2011 struct core_thread *ct;
2012 struct elf_thread_status *ets;
2014 if (!elf_note_info_init(info))
2017 for (ct = current->mm->core_state->dumper.next;
2018 ct; ct = ct->next) {
2019 ets = kzalloc(sizeof(*ets), GFP_KERNEL);
2023 ets->thread = ct->task;
2024 list_add(&ets->list, &info->thread_list);
2027 list_for_each_entry(ets, &info->thread_list, list) {
2030 sz = elf_dump_thread_status(siginfo->si_signo, ets);
2031 info->thread_status_size += sz;
2033 /* now collect the dump for the current */
2034 memset(info->prstatus, 0, sizeof(*info->prstatus));
2035 fill_prstatus(info->prstatus, current, siginfo->si_signo);
2036 elf_core_copy_regs(&info->prstatus->pr_reg, regs);
2039 fill_elf_header(elf, phdrs, ELF_ARCH, ELF_CORE_EFLAGS);
2042 * Set up the notes in similar form to SVR4 core dumps made
2043 * with info from their /proc.
2046 fill_note(info->notes + 0, "CORE", NT_PRSTATUS,
2047 sizeof(*info->prstatus), info->prstatus);
2048 fill_psinfo(info->psinfo, current->group_leader, current->mm);
2049 fill_note(info->notes + 1, "CORE", NT_PRPSINFO,
2050 sizeof(*info->psinfo), info->psinfo);
2052 fill_siginfo_note(info->notes + 2, &info->csigdata, siginfo);
2053 fill_auxv_note(info->notes + 3, current->mm);
2056 if (fill_files_note(info->notes + info->numnote) == 0) {
2057 info->notes_files = info->notes + info->numnote;
2061 /* Try to dump the FPU. */
2062 info->prstatus->pr_fpvalid = elf_core_copy_task_fpregs(current, regs,
2064 if (info->prstatus->pr_fpvalid)
2065 fill_note(info->notes + info->numnote++,
2066 "CORE", NT_PRFPREG, sizeof(*info->fpu), info->fpu);
2067 #ifdef ELF_CORE_COPY_XFPREGS
2068 if (elf_core_copy_task_xfpregs(current, info->xfpu))
2069 fill_note(info->notes + info->numnote++,
2070 "LINUX", ELF_CORE_XFPREG_TYPE,
2071 sizeof(*info->xfpu), info->xfpu);
2077 static size_t get_note_info_size(struct elf_note_info *info)
2082 for (i = 0; i < info->numnote; i++)
2083 sz += notesize(info->notes + i);
2085 sz += info->thread_status_size;
2090 static int write_note_info(struct elf_note_info *info,
2091 struct coredump_params *cprm)
2093 struct elf_thread_status *ets;
2096 for (i = 0; i < info->numnote; i++)
2097 if (!writenote(info->notes + i, cprm))
2100 /* write out the thread status notes section */
2101 list_for_each_entry(ets, &info->thread_list, list) {
2102 for (i = 0; i < ets->num_notes; i++)
2103 if (!writenote(&ets->notes[i], cprm))
2110 static void free_note_info(struct elf_note_info *info)
2112 while (!list_empty(&info->thread_list)) {
2113 struct list_head *tmp = info->thread_list.next;
2115 kfree(list_entry(tmp, struct elf_thread_status, list));
2118 /* Free data possibly allocated by fill_files_note(): */
2119 if (info->notes_files)
2120 kvfree(info->notes_files->data);
2122 kfree(info->prstatus);
2123 kfree(info->psinfo);
2126 #ifdef ELF_CORE_COPY_XFPREGS
2133 static struct vm_area_struct *first_vma(struct task_struct *tsk,
2134 struct vm_area_struct *gate_vma)
2136 struct vm_area_struct *ret = tsk->mm->mmap;
2143 * Helper function for iterating across a vma list. It ensures that the caller
2144 * will visit `gate_vma' prior to terminating the search.
2146 static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma,
2147 struct vm_area_struct *gate_vma)
2149 struct vm_area_struct *ret;
2151 ret = this_vma->vm_next;
2154 if (this_vma == gate_vma)
2159 static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
2160 elf_addr_t e_shoff, int segs)
2162 elf->e_shoff = e_shoff;
2163 elf->e_shentsize = sizeof(*shdr4extnum);
2165 elf->e_shstrndx = SHN_UNDEF;
2167 memset(shdr4extnum, 0, sizeof(*shdr4extnum));
2169 shdr4extnum->sh_type = SHT_NULL;
2170 shdr4extnum->sh_size = elf->e_shnum;
2171 shdr4extnum->sh_link = elf->e_shstrndx;
2172 shdr4extnum->sh_info = segs;
2178 * This is a two-pass process; first we find the offsets of the bits,
2179 * and then they are actually written out. If we run out of core limit
2182 static int elf_core_dump(struct coredump_params *cprm)
2187 size_t vma_data_size = 0;
2188 struct vm_area_struct *vma, *gate_vma;
2190 loff_t offset = 0, dataoff;
2191 struct elf_note_info info = { };
2192 struct elf_phdr *phdr4note = NULL;
2193 struct elf_shdr *shdr4extnum = NULL;
2196 elf_addr_t *vma_filesz = NULL;
2199 * We no longer stop all VM operations.
2201 * This is because those proceses that could possibly change map_count
2202 * or the mmap / vma pages are now blocked in do_exit on current
2203 * finishing this core dump.
2205 * Only ptrace can touch these memory addresses, but it doesn't change
2206 * the map_count or the pages allocated. So no possibility of crashing
2207 * exists while dumping the mm->vm_next areas to the core file.
2211 * The number of segs are recored into ELF header as 16bit value.
2212 * Please check DEFAULT_MAX_MAP_COUNT definition when you modify here.
2214 segs = current->mm->map_count;
2215 segs += elf_core_extra_phdrs();
2217 gate_vma = get_gate_vma(current->mm);
2218 if (gate_vma != NULL)
2221 /* for notes section */
2224 /* If segs > PN_XNUM(0xffff), then e_phnum overflows. To avoid
2225 * this, kernel supports extended numbering. Have a look at
2226 * include/linux/elf.h for further information. */
2227 e_phnum = segs > PN_XNUM ? PN_XNUM : segs;
2230 * Collect all the non-memory information about the process for the
2231 * notes. This also sets up the file header.
2233 if (!fill_note_info(&elf, e_phnum, &info, cprm->siginfo, cprm->regs))
2241 offset += sizeof(elf); /* Elf header */
2242 offset += segs * sizeof(struct elf_phdr); /* Program headers */
2244 /* Write notes phdr entry */
2246 size_t sz = get_note_info_size(&info);
2248 sz += elf_coredump_extra_notes_size();
2250 phdr4note = kmalloc(sizeof(*phdr4note), GFP_KERNEL);
2254 fill_elf_note_phdr(phdr4note, sz, offset);
2258 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
2260 vma_filesz = kvmalloc(array_size(sizeof(*vma_filesz), (segs - 1)),
2262 if (ZERO_OR_NULL_PTR(vma_filesz))
2265 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2266 vma = next_vma(vma, gate_vma)) {
2267 unsigned long dump_size;
2269 dump_size = vma_dump_size(vma, cprm->mm_flags);
2270 vma_filesz[i++] = dump_size;
2271 vma_data_size += dump_size;
2274 offset += vma_data_size;
2275 offset += elf_core_extra_data_size();
2278 if (e_phnum == PN_XNUM) {
2279 shdr4extnum = kmalloc(sizeof(*shdr4extnum), GFP_KERNEL);
2282 fill_extnum_info(&elf, shdr4extnum, e_shoff, segs);
2287 if (!dump_emit(cprm, &elf, sizeof(elf)))
2290 if (!dump_emit(cprm, phdr4note, sizeof(*phdr4note)))
2293 /* Write program headers for segments dump */
2294 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2295 vma = next_vma(vma, gate_vma)) {
2296 struct elf_phdr phdr;
2298 phdr.p_type = PT_LOAD;
2299 phdr.p_offset = offset;
2300 phdr.p_vaddr = vma->vm_start;
2302 phdr.p_filesz = vma_filesz[i++];
2303 phdr.p_memsz = vma->vm_end - vma->vm_start;
2304 offset += phdr.p_filesz;
2305 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
2306 if (vma->vm_flags & VM_WRITE)
2307 phdr.p_flags |= PF_W;
2308 if (vma->vm_flags & VM_EXEC)
2309 phdr.p_flags |= PF_X;
2310 phdr.p_align = ELF_EXEC_PAGESIZE;
2312 if (!dump_emit(cprm, &phdr, sizeof(phdr)))
2316 if (!elf_core_write_extra_phdrs(cprm, offset))
2319 /* write out the notes section */
2320 if (!write_note_info(&info, cprm))
2323 if (elf_coredump_extra_notes_write(cprm))
2327 if (!dump_skip(cprm, dataoff - cprm->pos))
2330 for (i = 0, vma = first_vma(current, gate_vma); vma != NULL;
2331 vma = next_vma(vma, gate_vma)) {
2335 end = vma->vm_start + vma_filesz[i++];
2337 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
2341 page = get_dump_page(addr);
2343 void *kaddr = kmap(page);
2344 stop = !dump_emit(cprm, kaddr, PAGE_SIZE);
2348 stop = !dump_skip(cprm, PAGE_SIZE);
2353 dump_truncate(cprm);
2355 if (!elf_core_write_extra_data(cprm))
2358 if (e_phnum == PN_XNUM) {
2359 if (!dump_emit(cprm, shdr4extnum, sizeof(*shdr4extnum)))
2367 free_note_info(&info);
2374 #endif /* CONFIG_ELF_CORE */
2376 static int __init init_elf_binfmt(void)
2378 register_binfmt(&elf_format);
2382 static void __exit exit_elf_binfmt(void)
2384 /* Remove the COFF and ELF loaders. */
2385 unregister_binfmt(&elf_format);
2388 core_initcall(init_elf_binfmt);
2389 module_exit(exit_elf_binfmt);
2390 MODULE_LICENSE("GPL");