fs/kernel_read_file: Split into separate source file
[linux-2.6-microblaze.git] / fs / exec.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/exec.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  */
7
8 /*
9  * #!-checking implemented by tytso.
10  */
11 /*
12  * Demand-loading implemented 01.12.91 - no need to read anything but
13  * the header into memory. The inode of the executable is put into
14  * "current->executable", and page faults do the actual loading. Clean.
15  *
16  * Once more I can proudly say that linux stood up to being changed: it
17  * was less than 2 hours work to get demand-loading completely implemented.
18  *
19  * Demand loading changed July 1993 by Eric Youngdale.   Use mmap instead,
20  * current->executable is only used by the procfs.  This allows a dispatch
21  * table to check for several different types  of binary formats.  We keep
22  * trying until we recognize the file or we run out of supported binary
23  * formats.
24  */
25
26 #include <linux/kernel_read_file.h>
27 #include <linux/slab.h>
28 #include <linux/file.h>
29 #include <linux/fdtable.h>
30 #include <linux/mm.h>
31 #include <linux/vmacache.h>
32 #include <linux/stat.h>
33 #include <linux/fcntl.h>
34 #include <linux/swap.h>
35 #include <linux/string.h>
36 #include <linux/init.h>
37 #include <linux/sched/mm.h>
38 #include <linux/sched/coredump.h>
39 #include <linux/sched/signal.h>
40 #include <linux/sched/numa_balancing.h>
41 #include <linux/sched/task.h>
42 #include <linux/pagemap.h>
43 #include <linux/perf_event.h>
44 #include <linux/highmem.h>
45 #include <linux/spinlock.h>
46 #include <linux/key.h>
47 #include <linux/personality.h>
48 #include <linux/binfmts.h>
49 #include <linux/utsname.h>
50 #include <linux/pid_namespace.h>
51 #include <linux/module.h>
52 #include <linux/namei.h>
53 #include <linux/mount.h>
54 #include <linux/security.h>
55 #include <linux/syscalls.h>
56 #include <linux/tsacct_kern.h>
57 #include <linux/cn_proc.h>
58 #include <linux/audit.h>
59 #include <linux/tracehook.h>
60 #include <linux/kmod.h>
61 #include <linux/fsnotify.h>
62 #include <linux/fs_struct.h>
63 #include <linux/oom.h>
64 #include <linux/compat.h>
65 #include <linux/vmalloc.h>
66
67 #include <linux/uaccess.h>
68 #include <asm/mmu_context.h>
69 #include <asm/tlb.h>
70
71 #include <trace/events/task.h>
72 #include "internal.h"
73
74 #include <trace/events/sched.h>
75
76 static int bprm_creds_from_file(struct linux_binprm *bprm);
77
78 int suid_dumpable = 0;
79
80 static LIST_HEAD(formats);
81 static DEFINE_RWLOCK(binfmt_lock);
82
83 void __register_binfmt(struct linux_binfmt * fmt, int insert)
84 {
85         BUG_ON(!fmt);
86         if (WARN_ON(!fmt->load_binary))
87                 return;
88         write_lock(&binfmt_lock);
89         insert ? list_add(&fmt->lh, &formats) :
90                  list_add_tail(&fmt->lh, &formats);
91         write_unlock(&binfmt_lock);
92 }
93
94 EXPORT_SYMBOL(__register_binfmt);
95
96 void unregister_binfmt(struct linux_binfmt * fmt)
97 {
98         write_lock(&binfmt_lock);
99         list_del(&fmt->lh);
100         write_unlock(&binfmt_lock);
101 }
102
103 EXPORT_SYMBOL(unregister_binfmt);
104
105 static inline void put_binfmt(struct linux_binfmt * fmt)
106 {
107         module_put(fmt->module);
108 }
109
110 bool path_noexec(const struct path *path)
111 {
112         return (path->mnt->mnt_flags & MNT_NOEXEC) ||
113                (path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC);
114 }
115
116 #ifdef CONFIG_USELIB
117 /*
118  * Note that a shared library must be both readable and executable due to
119  * security reasons.
120  *
121  * Also note that we take the address to load from from the file itself.
122  */
123 SYSCALL_DEFINE1(uselib, const char __user *, library)
124 {
125         struct linux_binfmt *fmt;
126         struct file *file;
127         struct filename *tmp = getname(library);
128         int error = PTR_ERR(tmp);
129         static const struct open_flags uselib_flags = {
130                 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
131                 .acc_mode = MAY_READ | MAY_EXEC,
132                 .intent = LOOKUP_OPEN,
133                 .lookup_flags = LOOKUP_FOLLOW,
134         };
135
136         if (IS_ERR(tmp))
137                 goto out;
138
139         file = do_filp_open(AT_FDCWD, tmp, &uselib_flags);
140         putname(tmp);
141         error = PTR_ERR(file);
142         if (IS_ERR(file))
143                 goto out;
144
145         /*
146          * may_open() has already checked for this, so it should be
147          * impossible to trip now. But we need to be extra cautious
148          * and check again at the very end too.
149          */
150         error = -EACCES;
151         if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
152                          path_noexec(&file->f_path)))
153                 goto exit;
154
155         fsnotify_open(file);
156
157         error = -ENOEXEC;
158
159         read_lock(&binfmt_lock);
160         list_for_each_entry(fmt, &formats, lh) {
161                 if (!fmt->load_shlib)
162                         continue;
163                 if (!try_module_get(fmt->module))
164                         continue;
165                 read_unlock(&binfmt_lock);
166                 error = fmt->load_shlib(file);
167                 read_lock(&binfmt_lock);
168                 put_binfmt(fmt);
169                 if (error != -ENOEXEC)
170                         break;
171         }
172         read_unlock(&binfmt_lock);
173 exit:
174         fput(file);
175 out:
176         return error;
177 }
178 #endif /* #ifdef CONFIG_USELIB */
179
180 #ifdef CONFIG_MMU
181 /*
182  * The nascent bprm->mm is not visible until exec_mmap() but it can
183  * use a lot of memory, account these pages in current->mm temporary
184  * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
185  * change the counter back via acct_arg_size(0).
186  */
187 static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
188 {
189         struct mm_struct *mm = current->mm;
190         long diff = (long)(pages - bprm->vma_pages);
191
192         if (!mm || !diff)
193                 return;
194
195         bprm->vma_pages = pages;
196         add_mm_counter(mm, MM_ANONPAGES, diff);
197 }
198
199 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
200                 int write)
201 {
202         struct page *page;
203         int ret;
204         unsigned int gup_flags = FOLL_FORCE;
205
206 #ifdef CONFIG_STACK_GROWSUP
207         if (write) {
208                 ret = expand_downwards(bprm->vma, pos);
209                 if (ret < 0)
210                         return NULL;
211         }
212 #endif
213
214         if (write)
215                 gup_flags |= FOLL_WRITE;
216
217         /*
218          * We are doing an exec().  'current' is the process
219          * doing the exec and bprm->mm is the new process's mm.
220          */
221         ret = get_user_pages_remote(bprm->mm, pos, 1, gup_flags,
222                         &page, NULL, NULL);
223         if (ret <= 0)
224                 return NULL;
225
226         if (write)
227                 acct_arg_size(bprm, vma_pages(bprm->vma));
228
229         return page;
230 }
231
232 static void put_arg_page(struct page *page)
233 {
234         put_page(page);
235 }
236
237 static void free_arg_pages(struct linux_binprm *bprm)
238 {
239 }
240
241 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
242                 struct page *page)
243 {
244         flush_cache_page(bprm->vma, pos, page_to_pfn(page));
245 }
246
247 static int __bprm_mm_init(struct linux_binprm *bprm)
248 {
249         int err;
250         struct vm_area_struct *vma = NULL;
251         struct mm_struct *mm = bprm->mm;
252
253         bprm->vma = vma = vm_area_alloc(mm);
254         if (!vma)
255                 return -ENOMEM;
256         vma_set_anonymous(vma);
257
258         if (mmap_write_lock_killable(mm)) {
259                 err = -EINTR;
260                 goto err_free;
261         }
262
263         /*
264          * Place the stack at the largest stack address the architecture
265          * supports. Later, we'll move this to an appropriate place. We don't
266          * use STACK_TOP because that can depend on attributes which aren't
267          * configured yet.
268          */
269         BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
270         vma->vm_end = STACK_TOP_MAX;
271         vma->vm_start = vma->vm_end - PAGE_SIZE;
272         vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
273         vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
274
275         err = insert_vm_struct(mm, vma);
276         if (err)
277                 goto err;
278
279         mm->stack_vm = mm->total_vm = 1;
280         mmap_write_unlock(mm);
281         bprm->p = vma->vm_end - sizeof(void *);
282         return 0;
283 err:
284         mmap_write_unlock(mm);
285 err_free:
286         bprm->vma = NULL;
287         vm_area_free(vma);
288         return err;
289 }
290
291 static bool valid_arg_len(struct linux_binprm *bprm, long len)
292 {
293         return len <= MAX_ARG_STRLEN;
294 }
295
296 #else
297
298 static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
299 {
300 }
301
302 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
303                 int write)
304 {
305         struct page *page;
306
307         page = bprm->page[pos / PAGE_SIZE];
308         if (!page && write) {
309                 page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
310                 if (!page)
311                         return NULL;
312                 bprm->page[pos / PAGE_SIZE] = page;
313         }
314
315         return page;
316 }
317
318 static void put_arg_page(struct page *page)
319 {
320 }
321
322 static void free_arg_page(struct linux_binprm *bprm, int i)
323 {
324         if (bprm->page[i]) {
325                 __free_page(bprm->page[i]);
326                 bprm->page[i] = NULL;
327         }
328 }
329
330 static void free_arg_pages(struct linux_binprm *bprm)
331 {
332         int i;
333
334         for (i = 0; i < MAX_ARG_PAGES; i++)
335                 free_arg_page(bprm, i);
336 }
337
338 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
339                 struct page *page)
340 {
341 }
342
343 static int __bprm_mm_init(struct linux_binprm *bprm)
344 {
345         bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
346         return 0;
347 }
348
349 static bool valid_arg_len(struct linux_binprm *bprm, long len)
350 {
351         return len <= bprm->p;
352 }
353
354 #endif /* CONFIG_MMU */
355
356 /*
357  * Create a new mm_struct and populate it with a temporary stack
358  * vm_area_struct.  We don't have enough context at this point to set the stack
359  * flags, permissions, and offset, so we use temporary values.  We'll update
360  * them later in setup_arg_pages().
361  */
362 static int bprm_mm_init(struct linux_binprm *bprm)
363 {
364         int err;
365         struct mm_struct *mm = NULL;
366
367         bprm->mm = mm = mm_alloc();
368         err = -ENOMEM;
369         if (!mm)
370                 goto err;
371
372         /* Save current stack limit for all calculations made during exec. */
373         task_lock(current->group_leader);
374         bprm->rlim_stack = current->signal->rlim[RLIMIT_STACK];
375         task_unlock(current->group_leader);
376
377         err = __bprm_mm_init(bprm);
378         if (err)
379                 goto err;
380
381         return 0;
382
383 err:
384         if (mm) {
385                 bprm->mm = NULL;
386                 mmdrop(mm);
387         }
388
389         return err;
390 }
391
392 struct user_arg_ptr {
393 #ifdef CONFIG_COMPAT
394         bool is_compat;
395 #endif
396         union {
397                 const char __user *const __user *native;
398 #ifdef CONFIG_COMPAT
399                 const compat_uptr_t __user *compat;
400 #endif
401         } ptr;
402 };
403
404 static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
405 {
406         const char __user *native;
407
408 #ifdef CONFIG_COMPAT
409         if (unlikely(argv.is_compat)) {
410                 compat_uptr_t compat;
411
412                 if (get_user(compat, argv.ptr.compat + nr))
413                         return ERR_PTR(-EFAULT);
414
415                 return compat_ptr(compat);
416         }
417 #endif
418
419         if (get_user(native, argv.ptr.native + nr))
420                 return ERR_PTR(-EFAULT);
421
422         return native;
423 }
424
425 /*
426  * count() counts the number of strings in array ARGV.
427  */
428 static int count(struct user_arg_ptr argv, int max)
429 {
430         int i = 0;
431
432         if (argv.ptr.native != NULL) {
433                 for (;;) {
434                         const char __user *p = get_user_arg_ptr(argv, i);
435
436                         if (!p)
437                                 break;
438
439                         if (IS_ERR(p))
440                                 return -EFAULT;
441
442                         if (i >= max)
443                                 return -E2BIG;
444                         ++i;
445
446                         if (fatal_signal_pending(current))
447                                 return -ERESTARTNOHAND;
448                         cond_resched();
449                 }
450         }
451         return i;
452 }
453
454 static int count_strings_kernel(const char *const *argv)
455 {
456         int i;
457
458         if (!argv)
459                 return 0;
460
461         for (i = 0; argv[i]; ++i) {
462                 if (i >= MAX_ARG_STRINGS)
463                         return -E2BIG;
464                 if (fatal_signal_pending(current))
465                         return -ERESTARTNOHAND;
466                 cond_resched();
467         }
468         return i;
469 }
470
471 static int bprm_stack_limits(struct linux_binprm *bprm)
472 {
473         unsigned long limit, ptr_size;
474
475         /*
476          * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
477          * (whichever is smaller) for the argv+env strings.
478          * This ensures that:
479          *  - the remaining binfmt code will not run out of stack space,
480          *  - the program will have a reasonable amount of stack left
481          *    to work from.
482          */
483         limit = _STK_LIM / 4 * 3;
484         limit = min(limit, bprm->rlim_stack.rlim_cur / 4);
485         /*
486          * We've historically supported up to 32 pages (ARG_MAX)
487          * of argument strings even with small stacks
488          */
489         limit = max_t(unsigned long, limit, ARG_MAX);
490         /*
491          * We must account for the size of all the argv and envp pointers to
492          * the argv and envp strings, since they will also take up space in
493          * the stack. They aren't stored until much later when we can't
494          * signal to the parent that the child has run out of stack space.
495          * Instead, calculate it here so it's possible to fail gracefully.
496          */
497         ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
498         if (limit <= ptr_size)
499                 return -E2BIG;
500         limit -= ptr_size;
501
502         bprm->argmin = bprm->p - limit;
503         return 0;
504 }
505
506 /*
507  * 'copy_strings()' copies argument/environment strings from the old
508  * processes's memory to the new process's stack.  The call to get_user_pages()
509  * ensures the destination page is created and not swapped out.
510  */
511 static int copy_strings(int argc, struct user_arg_ptr argv,
512                         struct linux_binprm *bprm)
513 {
514         struct page *kmapped_page = NULL;
515         char *kaddr = NULL;
516         unsigned long kpos = 0;
517         int ret;
518
519         while (argc-- > 0) {
520                 const char __user *str;
521                 int len;
522                 unsigned long pos;
523
524                 ret = -EFAULT;
525                 str = get_user_arg_ptr(argv, argc);
526                 if (IS_ERR(str))
527                         goto out;
528
529                 len = strnlen_user(str, MAX_ARG_STRLEN);
530                 if (!len)
531                         goto out;
532
533                 ret = -E2BIG;
534                 if (!valid_arg_len(bprm, len))
535                         goto out;
536
537                 /* We're going to work our way backwords. */
538                 pos = bprm->p;
539                 str += len;
540                 bprm->p -= len;
541 #ifdef CONFIG_MMU
542                 if (bprm->p < bprm->argmin)
543                         goto out;
544 #endif
545
546                 while (len > 0) {
547                         int offset, bytes_to_copy;
548
549                         if (fatal_signal_pending(current)) {
550                                 ret = -ERESTARTNOHAND;
551                                 goto out;
552                         }
553                         cond_resched();
554
555                         offset = pos % PAGE_SIZE;
556                         if (offset == 0)
557                                 offset = PAGE_SIZE;
558
559                         bytes_to_copy = offset;
560                         if (bytes_to_copy > len)
561                                 bytes_to_copy = len;
562
563                         offset -= bytes_to_copy;
564                         pos -= bytes_to_copy;
565                         str -= bytes_to_copy;
566                         len -= bytes_to_copy;
567
568                         if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
569                                 struct page *page;
570
571                                 page = get_arg_page(bprm, pos, 1);
572                                 if (!page) {
573                                         ret = -E2BIG;
574                                         goto out;
575                                 }
576
577                                 if (kmapped_page) {
578                                         flush_kernel_dcache_page(kmapped_page);
579                                         kunmap(kmapped_page);
580                                         put_arg_page(kmapped_page);
581                                 }
582                                 kmapped_page = page;
583                                 kaddr = kmap(kmapped_page);
584                                 kpos = pos & PAGE_MASK;
585                                 flush_arg_page(bprm, kpos, kmapped_page);
586                         }
587                         if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
588                                 ret = -EFAULT;
589                                 goto out;
590                         }
591                 }
592         }
593         ret = 0;
594 out:
595         if (kmapped_page) {
596                 flush_kernel_dcache_page(kmapped_page);
597                 kunmap(kmapped_page);
598                 put_arg_page(kmapped_page);
599         }
600         return ret;
601 }
602
603 /*
604  * Copy and argument/environment string from the kernel to the processes stack.
605  */
606 int copy_string_kernel(const char *arg, struct linux_binprm *bprm)
607 {
608         int len = strnlen(arg, MAX_ARG_STRLEN) + 1 /* terminating NUL */;
609         unsigned long pos = bprm->p;
610
611         if (len == 0)
612                 return -EFAULT;
613         if (!valid_arg_len(bprm, len))
614                 return -E2BIG;
615
616         /* We're going to work our way backwards. */
617         arg += len;
618         bprm->p -= len;
619         if (IS_ENABLED(CONFIG_MMU) && bprm->p < bprm->argmin)
620                 return -E2BIG;
621
622         while (len > 0) {
623                 unsigned int bytes_to_copy = min_t(unsigned int, len,
624                                 min_not_zero(offset_in_page(pos), PAGE_SIZE));
625                 struct page *page;
626                 char *kaddr;
627
628                 pos -= bytes_to_copy;
629                 arg -= bytes_to_copy;
630                 len -= bytes_to_copy;
631
632                 page = get_arg_page(bprm, pos, 1);
633                 if (!page)
634                         return -E2BIG;
635                 kaddr = kmap_atomic(page);
636                 flush_arg_page(bprm, pos & PAGE_MASK, page);
637                 memcpy(kaddr + offset_in_page(pos), arg, bytes_to_copy);
638                 flush_kernel_dcache_page(page);
639                 kunmap_atomic(kaddr);
640                 put_arg_page(page);
641         }
642
643         return 0;
644 }
645 EXPORT_SYMBOL(copy_string_kernel);
646
647 static int copy_strings_kernel(int argc, const char *const *argv,
648                                struct linux_binprm *bprm)
649 {
650         while (argc-- > 0) {
651                 int ret = copy_string_kernel(argv[argc], bprm);
652                 if (ret < 0)
653                         return ret;
654                 if (fatal_signal_pending(current))
655                         return -ERESTARTNOHAND;
656                 cond_resched();
657         }
658         return 0;
659 }
660
661 #ifdef CONFIG_MMU
662
663 /*
664  * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX.  Once
665  * the binfmt code determines where the new stack should reside, we shift it to
666  * its final location.  The process proceeds as follows:
667  *
668  * 1) Use shift to calculate the new vma endpoints.
669  * 2) Extend vma to cover both the old and new ranges.  This ensures the
670  *    arguments passed to subsequent functions are consistent.
671  * 3) Move vma's page tables to the new range.
672  * 4) Free up any cleared pgd range.
673  * 5) Shrink the vma to cover only the new range.
674  */
675 static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
676 {
677         struct mm_struct *mm = vma->vm_mm;
678         unsigned long old_start = vma->vm_start;
679         unsigned long old_end = vma->vm_end;
680         unsigned long length = old_end - old_start;
681         unsigned long new_start = old_start - shift;
682         unsigned long new_end = old_end - shift;
683         struct mmu_gather tlb;
684
685         BUG_ON(new_start > new_end);
686
687         /*
688          * ensure there are no vmas between where we want to go
689          * and where we are
690          */
691         if (vma != find_vma(mm, new_start))
692                 return -EFAULT;
693
694         /*
695          * cover the whole range: [new_start, old_end)
696          */
697         if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
698                 return -ENOMEM;
699
700         /*
701          * move the page tables downwards, on failure we rely on
702          * process cleanup to remove whatever mess we made.
703          */
704         if (length != move_page_tables(vma, old_start,
705                                        vma, new_start, length, false))
706                 return -ENOMEM;
707
708         lru_add_drain();
709         tlb_gather_mmu(&tlb, mm, old_start, old_end);
710         if (new_end > old_start) {
711                 /*
712                  * when the old and new regions overlap clear from new_end.
713                  */
714                 free_pgd_range(&tlb, new_end, old_end, new_end,
715                         vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
716         } else {
717                 /*
718                  * otherwise, clean from old_start; this is done to not touch
719                  * the address space in [new_end, old_start) some architectures
720                  * have constraints on va-space that make this illegal (IA64) -
721                  * for the others its just a little faster.
722                  */
723                 free_pgd_range(&tlb, old_start, old_end, new_end,
724                         vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
725         }
726         tlb_finish_mmu(&tlb, old_start, old_end);
727
728         /*
729          * Shrink the vma to just the new range.  Always succeeds.
730          */
731         vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
732
733         return 0;
734 }
735
736 /*
737  * Finalizes the stack vm_area_struct. The flags and permissions are updated,
738  * the stack is optionally relocated, and some extra space is added.
739  */
740 int setup_arg_pages(struct linux_binprm *bprm,
741                     unsigned long stack_top,
742                     int executable_stack)
743 {
744         unsigned long ret;
745         unsigned long stack_shift;
746         struct mm_struct *mm = current->mm;
747         struct vm_area_struct *vma = bprm->vma;
748         struct vm_area_struct *prev = NULL;
749         unsigned long vm_flags;
750         unsigned long stack_base;
751         unsigned long stack_size;
752         unsigned long stack_expand;
753         unsigned long rlim_stack;
754
755 #ifdef CONFIG_STACK_GROWSUP
756         /* Limit stack size */
757         stack_base = bprm->rlim_stack.rlim_max;
758         if (stack_base > STACK_SIZE_MAX)
759                 stack_base = STACK_SIZE_MAX;
760
761         /* Add space for stack randomization. */
762         stack_base += (STACK_RND_MASK << PAGE_SHIFT);
763
764         /* Make sure we didn't let the argument array grow too large. */
765         if (vma->vm_end - vma->vm_start > stack_base)
766                 return -ENOMEM;
767
768         stack_base = PAGE_ALIGN(stack_top - stack_base);
769
770         stack_shift = vma->vm_start - stack_base;
771         mm->arg_start = bprm->p - stack_shift;
772         bprm->p = vma->vm_end - stack_shift;
773 #else
774         stack_top = arch_align_stack(stack_top);
775         stack_top = PAGE_ALIGN(stack_top);
776
777         if (unlikely(stack_top < mmap_min_addr) ||
778             unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
779                 return -ENOMEM;
780
781         stack_shift = vma->vm_end - stack_top;
782
783         bprm->p -= stack_shift;
784         mm->arg_start = bprm->p;
785 #endif
786
787         if (bprm->loader)
788                 bprm->loader -= stack_shift;
789         bprm->exec -= stack_shift;
790
791         if (mmap_write_lock_killable(mm))
792                 return -EINTR;
793
794         vm_flags = VM_STACK_FLAGS;
795
796         /*
797          * Adjust stack execute permissions; explicitly enable for
798          * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
799          * (arch default) otherwise.
800          */
801         if (unlikely(executable_stack == EXSTACK_ENABLE_X))
802                 vm_flags |= VM_EXEC;
803         else if (executable_stack == EXSTACK_DISABLE_X)
804                 vm_flags &= ~VM_EXEC;
805         vm_flags |= mm->def_flags;
806         vm_flags |= VM_STACK_INCOMPLETE_SETUP;
807
808         ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
809                         vm_flags);
810         if (ret)
811                 goto out_unlock;
812         BUG_ON(prev != vma);
813
814         if (unlikely(vm_flags & VM_EXEC)) {
815                 pr_warn_once("process '%pD4' started with executable stack\n",
816                              bprm->file);
817         }
818
819         /* Move stack pages down in memory. */
820         if (stack_shift) {
821                 ret = shift_arg_pages(vma, stack_shift);
822                 if (ret)
823                         goto out_unlock;
824         }
825
826         /* mprotect_fixup is overkill to remove the temporary stack flags */
827         vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
828
829         stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
830         stack_size = vma->vm_end - vma->vm_start;
831         /*
832          * Align this down to a page boundary as expand_stack
833          * will align it up.
834          */
835         rlim_stack = bprm->rlim_stack.rlim_cur & PAGE_MASK;
836 #ifdef CONFIG_STACK_GROWSUP
837         if (stack_size + stack_expand > rlim_stack)
838                 stack_base = vma->vm_start + rlim_stack;
839         else
840                 stack_base = vma->vm_end + stack_expand;
841 #else
842         if (stack_size + stack_expand > rlim_stack)
843                 stack_base = vma->vm_end - rlim_stack;
844         else
845                 stack_base = vma->vm_start - stack_expand;
846 #endif
847         current->mm->start_stack = bprm->p;
848         ret = expand_stack(vma, stack_base);
849         if (ret)
850                 ret = -EFAULT;
851
852 out_unlock:
853         mmap_write_unlock(mm);
854         return ret;
855 }
856 EXPORT_SYMBOL(setup_arg_pages);
857
858 #else
859
860 /*
861  * Transfer the program arguments and environment from the holding pages
862  * onto the stack. The provided stack pointer is adjusted accordingly.
863  */
864 int transfer_args_to_stack(struct linux_binprm *bprm,
865                            unsigned long *sp_location)
866 {
867         unsigned long index, stop, sp;
868         int ret = 0;
869
870         stop = bprm->p >> PAGE_SHIFT;
871         sp = *sp_location;
872
873         for (index = MAX_ARG_PAGES - 1; index >= stop; index--) {
874                 unsigned int offset = index == stop ? bprm->p & ~PAGE_MASK : 0;
875                 char *src = kmap(bprm->page[index]) + offset;
876                 sp -= PAGE_SIZE - offset;
877                 if (copy_to_user((void *) sp, src, PAGE_SIZE - offset) != 0)
878                         ret = -EFAULT;
879                 kunmap(bprm->page[index]);
880                 if (ret)
881                         goto out;
882         }
883
884         *sp_location = sp;
885
886 out:
887         return ret;
888 }
889 EXPORT_SYMBOL(transfer_args_to_stack);
890
891 #endif /* CONFIG_MMU */
892
893 static struct file *do_open_execat(int fd, struct filename *name, int flags)
894 {
895         struct file *file;
896         int err;
897         struct open_flags open_exec_flags = {
898                 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
899                 .acc_mode = MAY_EXEC,
900                 .intent = LOOKUP_OPEN,
901                 .lookup_flags = LOOKUP_FOLLOW,
902         };
903
904         if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
905                 return ERR_PTR(-EINVAL);
906         if (flags & AT_SYMLINK_NOFOLLOW)
907                 open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW;
908         if (flags & AT_EMPTY_PATH)
909                 open_exec_flags.lookup_flags |= LOOKUP_EMPTY;
910
911         file = do_filp_open(fd, name, &open_exec_flags);
912         if (IS_ERR(file))
913                 goto out;
914
915         /*
916          * may_open() has already checked for this, so it should be
917          * impossible to trip now. But we need to be extra cautious
918          * and check again at the very end too.
919          */
920         err = -EACCES;
921         if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
922                          path_noexec(&file->f_path)))
923                 goto exit;
924
925         err = deny_write_access(file);
926         if (err)
927                 goto exit;
928
929         if (name->name[0] != '\0')
930                 fsnotify_open(file);
931
932 out:
933         return file;
934
935 exit:
936         fput(file);
937         return ERR_PTR(err);
938 }
939
940 struct file *open_exec(const char *name)
941 {
942         struct filename *filename = getname_kernel(name);
943         struct file *f = ERR_CAST(filename);
944
945         if (!IS_ERR(filename)) {
946                 f = do_open_execat(AT_FDCWD, filename, 0);
947                 putname(filename);
948         }
949         return f;
950 }
951 EXPORT_SYMBOL(open_exec);
952
953 #if defined(CONFIG_HAVE_AOUT) || defined(CONFIG_BINFMT_FLAT) || \
954     defined(CONFIG_BINFMT_ELF_FDPIC)
955 ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
956 {
957         ssize_t res = vfs_read(file, (void __user *)addr, len, &pos);
958         if (res > 0)
959                 flush_icache_user_range(addr, addr + len);
960         return res;
961 }
962 EXPORT_SYMBOL(read_code);
963 #endif
964
965 /*
966  * Maps the mm_struct mm into the current task struct.
967  * On success, this function returns with the mutex
968  * exec_update_mutex locked.
969  */
970 static int exec_mmap(struct mm_struct *mm)
971 {
972         struct task_struct *tsk;
973         struct mm_struct *old_mm, *active_mm;
974         int ret;
975
976         /* Notify parent that we're no longer interested in the old VM */
977         tsk = current;
978         old_mm = current->mm;
979         exec_mm_release(tsk, old_mm);
980         if (old_mm)
981                 sync_mm_rss(old_mm);
982
983         ret = mutex_lock_killable(&tsk->signal->exec_update_mutex);
984         if (ret)
985                 return ret;
986
987         if (old_mm) {
988                 /*
989                  * Make sure that if there is a core dump in progress
990                  * for the old mm, we get out and die instead of going
991                  * through with the exec.  We must hold mmap_lock around
992                  * checking core_state and changing tsk->mm.
993                  */
994                 mmap_read_lock(old_mm);
995                 if (unlikely(old_mm->core_state)) {
996                         mmap_read_unlock(old_mm);
997                         mutex_unlock(&tsk->signal->exec_update_mutex);
998                         return -EINTR;
999                 }
1000         }
1001
1002         task_lock(tsk);
1003         active_mm = tsk->active_mm;
1004         membarrier_exec_mmap(mm);
1005         tsk->mm = mm;
1006         tsk->active_mm = mm;
1007         activate_mm(active_mm, mm);
1008         tsk->mm->vmacache_seqnum = 0;
1009         vmacache_flush(tsk);
1010         task_unlock(tsk);
1011         if (old_mm) {
1012                 mmap_read_unlock(old_mm);
1013                 BUG_ON(active_mm != old_mm);
1014                 setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
1015                 mm_update_next_owner(old_mm);
1016                 mmput(old_mm);
1017                 return 0;
1018         }
1019         mmdrop(active_mm);
1020         return 0;
1021 }
1022
1023 static int de_thread(struct task_struct *tsk)
1024 {
1025         struct signal_struct *sig = tsk->signal;
1026         struct sighand_struct *oldsighand = tsk->sighand;
1027         spinlock_t *lock = &oldsighand->siglock;
1028
1029         if (thread_group_empty(tsk))
1030                 goto no_thread_group;
1031
1032         /*
1033          * Kill all other threads in the thread group.
1034          */
1035         spin_lock_irq(lock);
1036         if (signal_group_exit(sig)) {
1037                 /*
1038                  * Another group action in progress, just
1039                  * return so that the signal is processed.
1040                  */
1041                 spin_unlock_irq(lock);
1042                 return -EAGAIN;
1043         }
1044
1045         sig->group_exit_task = tsk;
1046         sig->notify_count = zap_other_threads(tsk);
1047         if (!thread_group_leader(tsk))
1048                 sig->notify_count--;
1049
1050         while (sig->notify_count) {
1051                 __set_current_state(TASK_KILLABLE);
1052                 spin_unlock_irq(lock);
1053                 schedule();
1054                 if (__fatal_signal_pending(tsk))
1055                         goto killed;
1056                 spin_lock_irq(lock);
1057         }
1058         spin_unlock_irq(lock);
1059
1060         /*
1061          * At this point all other threads have exited, all we have to
1062          * do is to wait for the thread group leader to become inactive,
1063          * and to assume its PID:
1064          */
1065         if (!thread_group_leader(tsk)) {
1066                 struct task_struct *leader = tsk->group_leader;
1067
1068                 for (;;) {
1069                         cgroup_threadgroup_change_begin(tsk);
1070                         write_lock_irq(&tasklist_lock);
1071                         /*
1072                          * Do this under tasklist_lock to ensure that
1073                          * exit_notify() can't miss ->group_exit_task
1074                          */
1075                         sig->notify_count = -1;
1076                         if (likely(leader->exit_state))
1077                                 break;
1078                         __set_current_state(TASK_KILLABLE);
1079                         write_unlock_irq(&tasklist_lock);
1080                         cgroup_threadgroup_change_end(tsk);
1081                         schedule();
1082                         if (__fatal_signal_pending(tsk))
1083                                 goto killed;
1084                 }
1085
1086                 /*
1087                  * The only record we have of the real-time age of a
1088                  * process, regardless of execs it's done, is start_time.
1089                  * All the past CPU time is accumulated in signal_struct
1090                  * from sister threads now dead.  But in this non-leader
1091                  * exec, nothing survives from the original leader thread,
1092                  * whose birth marks the true age of this process now.
1093                  * When we take on its identity by switching to its PID, we
1094                  * also take its birthdate (always earlier than our own).
1095                  */
1096                 tsk->start_time = leader->start_time;
1097                 tsk->start_boottime = leader->start_boottime;
1098
1099                 BUG_ON(!same_thread_group(leader, tsk));
1100                 /*
1101                  * An exec() starts a new thread group with the
1102                  * TGID of the previous thread group. Rehash the
1103                  * two threads with a switched PID, and release
1104                  * the former thread group leader:
1105                  */
1106
1107                 /* Become a process group leader with the old leader's pid.
1108                  * The old leader becomes a thread of the this thread group.
1109                  */
1110                 exchange_tids(tsk, leader);
1111                 transfer_pid(leader, tsk, PIDTYPE_TGID);
1112                 transfer_pid(leader, tsk, PIDTYPE_PGID);
1113                 transfer_pid(leader, tsk, PIDTYPE_SID);
1114
1115                 list_replace_rcu(&leader->tasks, &tsk->tasks);
1116                 list_replace_init(&leader->sibling, &tsk->sibling);
1117
1118                 tsk->group_leader = tsk;
1119                 leader->group_leader = tsk;
1120
1121                 tsk->exit_signal = SIGCHLD;
1122                 leader->exit_signal = -1;
1123
1124                 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
1125                 leader->exit_state = EXIT_DEAD;
1126
1127                 /*
1128                  * We are going to release_task()->ptrace_unlink() silently,
1129                  * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
1130                  * the tracer wont't block again waiting for this thread.
1131                  */
1132                 if (unlikely(leader->ptrace))
1133                         __wake_up_parent(leader, leader->parent);
1134                 write_unlock_irq(&tasklist_lock);
1135                 cgroup_threadgroup_change_end(tsk);
1136
1137                 release_task(leader);
1138         }
1139
1140         sig->group_exit_task = NULL;
1141         sig->notify_count = 0;
1142
1143 no_thread_group:
1144         /* we have changed execution domain */
1145         tsk->exit_signal = SIGCHLD;
1146
1147         BUG_ON(!thread_group_leader(tsk));
1148         return 0;
1149
1150 killed:
1151         /* protects against exit_notify() and __exit_signal() */
1152         read_lock(&tasklist_lock);
1153         sig->group_exit_task = NULL;
1154         sig->notify_count = 0;
1155         read_unlock(&tasklist_lock);
1156         return -EAGAIN;
1157 }
1158
1159
1160 /*
1161  * This function makes sure the current process has its own signal table,
1162  * so that flush_signal_handlers can later reset the handlers without
1163  * disturbing other processes.  (Other processes might share the signal
1164  * table via the CLONE_SIGHAND option to clone().)
1165  */
1166 static int unshare_sighand(struct task_struct *me)
1167 {
1168         struct sighand_struct *oldsighand = me->sighand;
1169
1170         if (refcount_read(&oldsighand->count) != 1) {
1171                 struct sighand_struct *newsighand;
1172                 /*
1173                  * This ->sighand is shared with the CLONE_SIGHAND
1174                  * but not CLONE_THREAD task, switch to the new one.
1175                  */
1176                 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
1177                 if (!newsighand)
1178                         return -ENOMEM;
1179
1180                 refcount_set(&newsighand->count, 1);
1181                 memcpy(newsighand->action, oldsighand->action,
1182                        sizeof(newsighand->action));
1183
1184                 write_lock_irq(&tasklist_lock);
1185                 spin_lock(&oldsighand->siglock);
1186                 rcu_assign_pointer(me->sighand, newsighand);
1187                 spin_unlock(&oldsighand->siglock);
1188                 write_unlock_irq(&tasklist_lock);
1189
1190                 __cleanup_sighand(oldsighand);
1191         }
1192         return 0;
1193 }
1194
1195 char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk)
1196 {
1197         task_lock(tsk);
1198         strncpy(buf, tsk->comm, buf_size);
1199         task_unlock(tsk);
1200         return buf;
1201 }
1202 EXPORT_SYMBOL_GPL(__get_task_comm);
1203
1204 /*
1205  * These functions flushes out all traces of the currently running executable
1206  * so that a new one can be started
1207  */
1208
1209 void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
1210 {
1211         task_lock(tsk);
1212         trace_task_rename(tsk, buf);
1213         strlcpy(tsk->comm, buf, sizeof(tsk->comm));
1214         task_unlock(tsk);
1215         perf_event_comm(tsk, exec);
1216 }
1217
1218 /*
1219  * Calling this is the point of no return. None of the failures will be
1220  * seen by userspace since either the process is already taking a fatal
1221  * signal (via de_thread() or coredump), or will have SEGV raised
1222  * (after exec_mmap()) by search_binary_handler (see below).
1223  */
1224 int begin_new_exec(struct linux_binprm * bprm)
1225 {
1226         struct task_struct *me = current;
1227         int retval;
1228
1229         /* Once we are committed compute the creds */
1230         retval = bprm_creds_from_file(bprm);
1231         if (retval)
1232                 return retval;
1233
1234         /*
1235          * Ensure all future errors are fatal.
1236          */
1237         bprm->point_of_no_return = true;
1238
1239         /*
1240          * Make this the only thread in the thread group.
1241          */
1242         retval = de_thread(me);
1243         if (retval)
1244                 goto out;
1245
1246         /*
1247          * Must be called _before_ exec_mmap() as bprm->mm is
1248          * not visibile until then. This also enables the update
1249          * to be lockless.
1250          */
1251         set_mm_exe_file(bprm->mm, bprm->file);
1252
1253         /* If the binary is not readable then enforce mm->dumpable=0 */
1254         would_dump(bprm, bprm->file);
1255         if (bprm->have_execfd)
1256                 would_dump(bprm, bprm->executable);
1257
1258         /*
1259          * Release all of the old mmap stuff
1260          */
1261         acct_arg_size(bprm, 0);
1262         retval = exec_mmap(bprm->mm);
1263         if (retval)
1264                 goto out;
1265
1266         bprm->mm = NULL;
1267
1268 #ifdef CONFIG_POSIX_TIMERS
1269         exit_itimers(me->signal);
1270         flush_itimer_signals();
1271 #endif
1272
1273         /*
1274          * Make the signal table private.
1275          */
1276         retval = unshare_sighand(me);
1277         if (retval)
1278                 goto out_unlock;
1279
1280         /*
1281          * Ensure that the uaccess routines can actually operate on userspace
1282          * pointers:
1283          */
1284         force_uaccess_begin();
1285
1286         me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
1287                                         PF_NOFREEZE | PF_NO_SETAFFINITY);
1288         flush_thread();
1289         me->personality &= ~bprm->per_clear;
1290
1291         /*
1292          * We have to apply CLOEXEC before we change whether the process is
1293          * dumpable (in setup_new_exec) to avoid a race with a process in userspace
1294          * trying to access the should-be-closed file descriptors of a process
1295          * undergoing exec(2).
1296          */
1297         do_close_on_exec(me->files);
1298
1299         if (bprm->secureexec) {
1300                 /* Make sure parent cannot signal privileged process. */
1301                 me->pdeath_signal = 0;
1302
1303                 /*
1304                  * For secureexec, reset the stack limit to sane default to
1305                  * avoid bad behavior from the prior rlimits. This has to
1306                  * happen before arch_pick_mmap_layout(), which examines
1307                  * RLIMIT_STACK, but after the point of no return to avoid
1308                  * needing to clean up the change on failure.
1309                  */
1310                 if (bprm->rlim_stack.rlim_cur > _STK_LIM)
1311                         bprm->rlim_stack.rlim_cur = _STK_LIM;
1312         }
1313
1314         me->sas_ss_sp = me->sas_ss_size = 0;
1315
1316         /*
1317          * Figure out dumpability. Note that this checking only of current
1318          * is wrong, but userspace depends on it. This should be testing
1319          * bprm->secureexec instead.
1320          */
1321         if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
1322             !(uid_eq(current_euid(), current_uid()) &&
1323               gid_eq(current_egid(), current_gid())))
1324                 set_dumpable(current->mm, suid_dumpable);
1325         else
1326                 set_dumpable(current->mm, SUID_DUMP_USER);
1327
1328         perf_event_exec();
1329         __set_task_comm(me, kbasename(bprm->filename), true);
1330
1331         /* An exec changes our domain. We are no longer part of the thread
1332            group */
1333         WRITE_ONCE(me->self_exec_id, me->self_exec_id + 1);
1334         flush_signal_handlers(me, 0);
1335
1336         /*
1337          * install the new credentials for this executable
1338          */
1339         security_bprm_committing_creds(bprm);
1340
1341         commit_creds(bprm->cred);
1342         bprm->cred = NULL;
1343
1344         /*
1345          * Disable monitoring for regular users
1346          * when executing setuid binaries. Must
1347          * wait until new credentials are committed
1348          * by commit_creds() above
1349          */
1350         if (get_dumpable(me->mm) != SUID_DUMP_USER)
1351                 perf_event_exit_task(me);
1352         /*
1353          * cred_guard_mutex must be held at least to this point to prevent
1354          * ptrace_attach() from altering our determination of the task's
1355          * credentials; any time after this it may be unlocked.
1356          */
1357         security_bprm_committed_creds(bprm);
1358
1359         /* Pass the opened binary to the interpreter. */
1360         if (bprm->have_execfd) {
1361                 retval = get_unused_fd_flags(0);
1362                 if (retval < 0)
1363                         goto out_unlock;
1364                 fd_install(retval, bprm->executable);
1365                 bprm->executable = NULL;
1366                 bprm->execfd = retval;
1367         }
1368         return 0;
1369
1370 out_unlock:
1371         mutex_unlock(&me->signal->exec_update_mutex);
1372 out:
1373         return retval;
1374 }
1375 EXPORT_SYMBOL(begin_new_exec);
1376
1377 void would_dump(struct linux_binprm *bprm, struct file *file)
1378 {
1379         struct inode *inode = file_inode(file);
1380         if (inode_permission(inode, MAY_READ) < 0) {
1381                 struct user_namespace *old, *user_ns;
1382                 bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
1383
1384                 /* Ensure mm->user_ns contains the executable */
1385                 user_ns = old = bprm->mm->user_ns;
1386                 while ((user_ns != &init_user_ns) &&
1387                        !privileged_wrt_inode_uidgid(user_ns, inode))
1388                         user_ns = user_ns->parent;
1389
1390                 if (old != user_ns) {
1391                         bprm->mm->user_ns = get_user_ns(user_ns);
1392                         put_user_ns(old);
1393                 }
1394         }
1395 }
1396 EXPORT_SYMBOL(would_dump);
1397
1398 void setup_new_exec(struct linux_binprm * bprm)
1399 {
1400         /* Setup things that can depend upon the personality */
1401         struct task_struct *me = current;
1402
1403         arch_pick_mmap_layout(me->mm, &bprm->rlim_stack);
1404
1405         arch_setup_new_exec();
1406
1407         /* Set the new mm task size. We have to do that late because it may
1408          * depend on TIF_32BIT which is only updated in flush_thread() on
1409          * some architectures like powerpc
1410          */
1411         me->mm->task_size = TASK_SIZE;
1412         mutex_unlock(&me->signal->exec_update_mutex);
1413         mutex_unlock(&me->signal->cred_guard_mutex);
1414 }
1415 EXPORT_SYMBOL(setup_new_exec);
1416
1417 /* Runs immediately before start_thread() takes over. */
1418 void finalize_exec(struct linux_binprm *bprm)
1419 {
1420         /* Store any stack rlimit changes before starting thread. */
1421         task_lock(current->group_leader);
1422         current->signal->rlim[RLIMIT_STACK] = bprm->rlim_stack;
1423         task_unlock(current->group_leader);
1424 }
1425 EXPORT_SYMBOL(finalize_exec);
1426
1427 /*
1428  * Prepare credentials and lock ->cred_guard_mutex.
1429  * setup_new_exec() commits the new creds and drops the lock.
1430  * Or, if exec fails before, free_bprm() should release ->cred and
1431  * and unlock.
1432  */
1433 static int prepare_bprm_creds(struct linux_binprm *bprm)
1434 {
1435         if (mutex_lock_interruptible(&current->signal->cred_guard_mutex))
1436                 return -ERESTARTNOINTR;
1437
1438         bprm->cred = prepare_exec_creds();
1439         if (likely(bprm->cred))
1440                 return 0;
1441
1442         mutex_unlock(&current->signal->cred_guard_mutex);
1443         return -ENOMEM;
1444 }
1445
1446 static void free_bprm(struct linux_binprm *bprm)
1447 {
1448         if (bprm->mm) {
1449                 acct_arg_size(bprm, 0);
1450                 mmput(bprm->mm);
1451         }
1452         free_arg_pages(bprm);
1453         if (bprm->cred) {
1454                 mutex_unlock(&current->signal->cred_guard_mutex);
1455                 abort_creds(bprm->cred);
1456         }
1457         if (bprm->file) {
1458                 allow_write_access(bprm->file);
1459                 fput(bprm->file);
1460         }
1461         if (bprm->executable)
1462                 fput(bprm->executable);
1463         /* If a binfmt changed the interp, free it. */
1464         if (bprm->interp != bprm->filename)
1465                 kfree(bprm->interp);
1466         kfree(bprm->fdpath);
1467         kfree(bprm);
1468 }
1469
1470 static struct linux_binprm *alloc_bprm(int fd, struct filename *filename)
1471 {
1472         struct linux_binprm *bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1473         int retval = -ENOMEM;
1474         if (!bprm)
1475                 goto out;
1476
1477         if (fd == AT_FDCWD || filename->name[0] == '/') {
1478                 bprm->filename = filename->name;
1479         } else {
1480                 if (filename->name[0] == '\0')
1481                         bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd);
1482                 else
1483                         bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s",
1484                                                   fd, filename->name);
1485                 if (!bprm->fdpath)
1486                         goto out_free;
1487
1488                 bprm->filename = bprm->fdpath;
1489         }
1490         bprm->interp = bprm->filename;
1491
1492         retval = bprm_mm_init(bprm);
1493         if (retval)
1494                 goto out_free;
1495         return bprm;
1496
1497 out_free:
1498         free_bprm(bprm);
1499 out:
1500         return ERR_PTR(retval);
1501 }
1502
1503 int bprm_change_interp(const char *interp, struct linux_binprm *bprm)
1504 {
1505         /* If a binfmt changed the interp, free it first. */
1506         if (bprm->interp != bprm->filename)
1507                 kfree(bprm->interp);
1508         bprm->interp = kstrdup(interp, GFP_KERNEL);
1509         if (!bprm->interp)
1510                 return -ENOMEM;
1511         return 0;
1512 }
1513 EXPORT_SYMBOL(bprm_change_interp);
1514
1515 /*
1516  * determine how safe it is to execute the proposed program
1517  * - the caller must hold ->cred_guard_mutex to protect against
1518  *   PTRACE_ATTACH or seccomp thread-sync
1519  */
1520 static void check_unsafe_exec(struct linux_binprm *bprm)
1521 {
1522         struct task_struct *p = current, *t;
1523         unsigned n_fs;
1524
1525         if (p->ptrace)
1526                 bprm->unsafe |= LSM_UNSAFE_PTRACE;
1527
1528         /*
1529          * This isn't strictly necessary, but it makes it harder for LSMs to
1530          * mess up.
1531          */
1532         if (task_no_new_privs(current))
1533                 bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
1534
1535         t = p;
1536         n_fs = 1;
1537         spin_lock(&p->fs->lock);
1538         rcu_read_lock();
1539         while_each_thread(p, t) {
1540                 if (t->fs == p->fs)
1541                         n_fs++;
1542         }
1543         rcu_read_unlock();
1544
1545         if (p->fs->users > n_fs)
1546                 bprm->unsafe |= LSM_UNSAFE_SHARE;
1547         else
1548                 p->fs->in_exec = 1;
1549         spin_unlock(&p->fs->lock);
1550 }
1551
1552 static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file)
1553 {
1554         /* Handle suid and sgid on files */
1555         struct inode *inode;
1556         unsigned int mode;
1557         kuid_t uid;
1558         kgid_t gid;
1559
1560         if (!mnt_may_suid(file->f_path.mnt))
1561                 return;
1562
1563         if (task_no_new_privs(current))
1564                 return;
1565
1566         inode = file->f_path.dentry->d_inode;
1567         mode = READ_ONCE(inode->i_mode);
1568         if (!(mode & (S_ISUID|S_ISGID)))
1569                 return;
1570
1571         /* Be careful if suid/sgid is set */
1572         inode_lock(inode);
1573
1574         /* reload atomically mode/uid/gid now that lock held */
1575         mode = inode->i_mode;
1576         uid = inode->i_uid;
1577         gid = inode->i_gid;
1578         inode_unlock(inode);
1579
1580         /* We ignore suid/sgid if there are no mappings for them in the ns */
1581         if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
1582                  !kgid_has_mapping(bprm->cred->user_ns, gid))
1583                 return;
1584
1585         if (mode & S_ISUID) {
1586                 bprm->per_clear |= PER_CLEAR_ON_SETID;
1587                 bprm->cred->euid = uid;
1588         }
1589
1590         if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1591                 bprm->per_clear |= PER_CLEAR_ON_SETID;
1592                 bprm->cred->egid = gid;
1593         }
1594 }
1595
1596 /*
1597  * Compute brpm->cred based upon the final binary.
1598  */
1599 static int bprm_creds_from_file(struct linux_binprm *bprm)
1600 {
1601         /* Compute creds based on which file? */
1602         struct file *file = bprm->execfd_creds ? bprm->executable : bprm->file;
1603
1604         bprm_fill_uid(bprm, file);
1605         return security_bprm_creds_from_file(bprm, file);
1606 }
1607
1608 /*
1609  * Fill the binprm structure from the inode.
1610  * Read the first BINPRM_BUF_SIZE bytes
1611  *
1612  * This may be called multiple times for binary chains (scripts for example).
1613  */
1614 static int prepare_binprm(struct linux_binprm *bprm)
1615 {
1616         loff_t pos = 0;
1617
1618         memset(bprm->buf, 0, BINPRM_BUF_SIZE);
1619         return kernel_read(bprm->file, bprm->buf, BINPRM_BUF_SIZE, &pos);
1620 }
1621
1622 /*
1623  * Arguments are '\0' separated strings found at the location bprm->p
1624  * points to; chop off the first by relocating brpm->p to right after
1625  * the first '\0' encountered.
1626  */
1627 int remove_arg_zero(struct linux_binprm *bprm)
1628 {
1629         int ret = 0;
1630         unsigned long offset;
1631         char *kaddr;
1632         struct page *page;
1633
1634         if (!bprm->argc)
1635                 return 0;
1636
1637         do {
1638                 offset = bprm->p & ~PAGE_MASK;
1639                 page = get_arg_page(bprm, bprm->p, 0);
1640                 if (!page) {
1641                         ret = -EFAULT;
1642                         goto out;
1643                 }
1644                 kaddr = kmap_atomic(page);
1645
1646                 for (; offset < PAGE_SIZE && kaddr[offset];
1647                                 offset++, bprm->p++)
1648                         ;
1649
1650                 kunmap_atomic(kaddr);
1651                 put_arg_page(page);
1652         } while (offset == PAGE_SIZE);
1653
1654         bprm->p++;
1655         bprm->argc--;
1656         ret = 0;
1657
1658 out:
1659         return ret;
1660 }
1661 EXPORT_SYMBOL(remove_arg_zero);
1662
1663 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1664 /*
1665  * cycle the list of binary formats handler, until one recognizes the image
1666  */
1667 static int search_binary_handler(struct linux_binprm *bprm)
1668 {
1669         bool need_retry = IS_ENABLED(CONFIG_MODULES);
1670         struct linux_binfmt *fmt;
1671         int retval;
1672
1673         retval = prepare_binprm(bprm);
1674         if (retval < 0)
1675                 return retval;
1676
1677         retval = security_bprm_check(bprm);
1678         if (retval)
1679                 return retval;
1680
1681         retval = -ENOENT;
1682  retry:
1683         read_lock(&binfmt_lock);
1684         list_for_each_entry(fmt, &formats, lh) {
1685                 if (!try_module_get(fmt->module))
1686                         continue;
1687                 read_unlock(&binfmt_lock);
1688
1689                 retval = fmt->load_binary(bprm);
1690
1691                 read_lock(&binfmt_lock);
1692                 put_binfmt(fmt);
1693                 if (bprm->point_of_no_return || (retval != -ENOEXEC)) {
1694                         read_unlock(&binfmt_lock);
1695                         return retval;
1696                 }
1697         }
1698         read_unlock(&binfmt_lock);
1699
1700         if (need_retry) {
1701                 if (printable(bprm->buf[0]) && printable(bprm->buf[1]) &&
1702                     printable(bprm->buf[2]) && printable(bprm->buf[3]))
1703                         return retval;
1704                 if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0)
1705                         return retval;
1706                 need_retry = false;
1707                 goto retry;
1708         }
1709
1710         return retval;
1711 }
1712
1713 static int exec_binprm(struct linux_binprm *bprm)
1714 {
1715         pid_t old_pid, old_vpid;
1716         int ret, depth;
1717
1718         /* Need to fetch pid before load_binary changes it */
1719         old_pid = current->pid;
1720         rcu_read_lock();
1721         old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
1722         rcu_read_unlock();
1723
1724         /* This allows 4 levels of binfmt rewrites before failing hard. */
1725         for (depth = 0;; depth++) {
1726                 struct file *exec;
1727                 if (depth > 5)
1728                         return -ELOOP;
1729
1730                 ret = search_binary_handler(bprm);
1731                 if (ret < 0)
1732                         return ret;
1733                 if (!bprm->interpreter)
1734                         break;
1735
1736                 exec = bprm->file;
1737                 bprm->file = bprm->interpreter;
1738                 bprm->interpreter = NULL;
1739
1740                 allow_write_access(exec);
1741                 if (unlikely(bprm->have_execfd)) {
1742                         if (bprm->executable) {
1743                                 fput(exec);
1744                                 return -ENOEXEC;
1745                         }
1746                         bprm->executable = exec;
1747                 } else
1748                         fput(exec);
1749         }
1750
1751         audit_bprm(bprm);
1752         trace_sched_process_exec(current, old_pid, bprm);
1753         ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
1754         proc_exec_connector(current);
1755         return 0;
1756 }
1757
1758 /*
1759  * sys_execve() executes a new program.
1760  */
1761 static int bprm_execve(struct linux_binprm *bprm,
1762                        int fd, struct filename *filename, int flags)
1763 {
1764         struct file *file;
1765         struct files_struct *displaced;
1766         int retval;
1767
1768         retval = unshare_files(&displaced);
1769         if (retval)
1770                 return retval;
1771
1772         retval = prepare_bprm_creds(bprm);
1773         if (retval)
1774                 goto out_files;
1775
1776         check_unsafe_exec(bprm);
1777         current->in_execve = 1;
1778
1779         file = do_open_execat(fd, filename, flags);
1780         retval = PTR_ERR(file);
1781         if (IS_ERR(file))
1782                 goto out_unmark;
1783
1784         sched_exec();
1785
1786         bprm->file = file;
1787         /*
1788          * Record that a name derived from an O_CLOEXEC fd will be
1789          * inaccessible after exec. Relies on having exclusive access to
1790          * current->files (due to unshare_files above).
1791          */
1792         if (bprm->fdpath &&
1793             close_on_exec(fd, rcu_dereference_raw(current->files->fdt)))
1794                 bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE;
1795
1796         /* Set the unchanging part of bprm->cred */
1797         retval = security_bprm_creds_for_exec(bprm);
1798         if (retval)
1799                 goto out;
1800
1801         retval = exec_binprm(bprm);
1802         if (retval < 0)
1803                 goto out;
1804
1805         /* execve succeeded */
1806         current->fs->in_exec = 0;
1807         current->in_execve = 0;
1808         rseq_execve(current);
1809         acct_update_integrals(current);
1810         task_numa_free(current, false);
1811         if (displaced)
1812                 put_files_struct(displaced);
1813         return retval;
1814
1815 out:
1816         /*
1817          * If past the point of no return ensure the the code never
1818          * returns to the userspace process.  Use an existing fatal
1819          * signal if present otherwise terminate the process with
1820          * SIGSEGV.
1821          */
1822         if (bprm->point_of_no_return && !fatal_signal_pending(current))
1823                 force_sigsegv(SIGSEGV);
1824
1825 out_unmark:
1826         current->fs->in_exec = 0;
1827         current->in_execve = 0;
1828
1829 out_files:
1830         if (displaced)
1831                 reset_files_struct(displaced);
1832
1833         return retval;
1834 }
1835
1836 static int do_execveat_common(int fd, struct filename *filename,
1837                               struct user_arg_ptr argv,
1838                               struct user_arg_ptr envp,
1839                               int flags)
1840 {
1841         struct linux_binprm *bprm;
1842         int retval;
1843
1844         if (IS_ERR(filename))
1845                 return PTR_ERR(filename);
1846
1847         /*
1848          * We move the actual failure in case of RLIMIT_NPROC excess from
1849          * set*uid() to execve() because too many poorly written programs
1850          * don't check setuid() return code.  Here we additionally recheck
1851          * whether NPROC limit is still exceeded.
1852          */
1853         if ((current->flags & PF_NPROC_EXCEEDED) &&
1854             atomic_read(&current_user()->processes) > rlimit(RLIMIT_NPROC)) {
1855                 retval = -EAGAIN;
1856                 goto out_ret;
1857         }
1858
1859         /* We're below the limit (still or again), so we don't want to make
1860          * further execve() calls fail. */
1861         current->flags &= ~PF_NPROC_EXCEEDED;
1862
1863         bprm = alloc_bprm(fd, filename);
1864         if (IS_ERR(bprm)) {
1865                 retval = PTR_ERR(bprm);
1866                 goto out_ret;
1867         }
1868
1869         retval = count(argv, MAX_ARG_STRINGS);
1870         if (retval < 0)
1871                 goto out_free;
1872         bprm->argc = retval;
1873
1874         retval = count(envp, MAX_ARG_STRINGS);
1875         if (retval < 0)
1876                 goto out_free;
1877         bprm->envc = retval;
1878
1879         retval = bprm_stack_limits(bprm);
1880         if (retval < 0)
1881                 goto out_free;
1882
1883         retval = copy_string_kernel(bprm->filename, bprm);
1884         if (retval < 0)
1885                 goto out_free;
1886         bprm->exec = bprm->p;
1887
1888         retval = copy_strings(bprm->envc, envp, bprm);
1889         if (retval < 0)
1890                 goto out_free;
1891
1892         retval = copy_strings(bprm->argc, argv, bprm);
1893         if (retval < 0)
1894                 goto out_free;
1895
1896         retval = bprm_execve(bprm, fd, filename, flags);
1897 out_free:
1898         free_bprm(bprm);
1899
1900 out_ret:
1901         putname(filename);
1902         return retval;
1903 }
1904
1905 int kernel_execve(const char *kernel_filename,
1906                   const char *const *argv, const char *const *envp)
1907 {
1908         struct filename *filename;
1909         struct linux_binprm *bprm;
1910         int fd = AT_FDCWD;
1911         int retval;
1912
1913         filename = getname_kernel(kernel_filename);
1914         if (IS_ERR(filename))
1915                 return PTR_ERR(filename);
1916
1917         bprm = alloc_bprm(fd, filename);
1918         if (IS_ERR(bprm)) {
1919                 retval = PTR_ERR(bprm);
1920                 goto out_ret;
1921         }
1922
1923         retval = count_strings_kernel(argv);
1924         if (retval < 0)
1925                 goto out_free;
1926         bprm->argc = retval;
1927
1928         retval = count_strings_kernel(envp);
1929         if (retval < 0)
1930                 goto out_free;
1931         bprm->envc = retval;
1932
1933         retval = bprm_stack_limits(bprm);
1934         if (retval < 0)
1935                 goto out_free;
1936
1937         retval = copy_string_kernel(bprm->filename, bprm);
1938         if (retval < 0)
1939                 goto out_free;
1940         bprm->exec = bprm->p;
1941
1942         retval = copy_strings_kernel(bprm->envc, envp, bprm);
1943         if (retval < 0)
1944                 goto out_free;
1945
1946         retval = copy_strings_kernel(bprm->argc, argv, bprm);
1947         if (retval < 0)
1948                 goto out_free;
1949
1950         retval = bprm_execve(bprm, fd, filename, 0);
1951 out_free:
1952         free_bprm(bprm);
1953 out_ret:
1954         putname(filename);
1955         return retval;
1956 }
1957
1958 static int do_execve(struct filename *filename,
1959         const char __user *const __user *__argv,
1960         const char __user *const __user *__envp)
1961 {
1962         struct user_arg_ptr argv = { .ptr.native = __argv };
1963         struct user_arg_ptr envp = { .ptr.native = __envp };
1964         return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
1965 }
1966
1967 static int do_execveat(int fd, struct filename *filename,
1968                 const char __user *const __user *__argv,
1969                 const char __user *const __user *__envp,
1970                 int flags)
1971 {
1972         struct user_arg_ptr argv = { .ptr.native = __argv };
1973         struct user_arg_ptr envp = { .ptr.native = __envp };
1974
1975         return do_execveat_common(fd, filename, argv, envp, flags);
1976 }
1977
1978 #ifdef CONFIG_COMPAT
1979 static int compat_do_execve(struct filename *filename,
1980         const compat_uptr_t __user *__argv,
1981         const compat_uptr_t __user *__envp)
1982 {
1983         struct user_arg_ptr argv = {
1984                 .is_compat = true,
1985                 .ptr.compat = __argv,
1986         };
1987         struct user_arg_ptr envp = {
1988                 .is_compat = true,
1989                 .ptr.compat = __envp,
1990         };
1991         return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
1992 }
1993
1994 static int compat_do_execveat(int fd, struct filename *filename,
1995                               const compat_uptr_t __user *__argv,
1996                               const compat_uptr_t __user *__envp,
1997                               int flags)
1998 {
1999         struct user_arg_ptr argv = {
2000                 .is_compat = true,
2001                 .ptr.compat = __argv,
2002         };
2003         struct user_arg_ptr envp = {
2004                 .is_compat = true,
2005                 .ptr.compat = __envp,
2006         };
2007         return do_execveat_common(fd, filename, argv, envp, flags);
2008 }
2009 #endif
2010
2011 void set_binfmt(struct linux_binfmt *new)
2012 {
2013         struct mm_struct *mm = current->mm;
2014
2015         if (mm->binfmt)
2016                 module_put(mm->binfmt->module);
2017
2018         mm->binfmt = new;
2019         if (new)
2020                 __module_get(new->module);
2021 }
2022 EXPORT_SYMBOL(set_binfmt);
2023
2024 /*
2025  * set_dumpable stores three-value SUID_DUMP_* into mm->flags.
2026  */
2027 void set_dumpable(struct mm_struct *mm, int value)
2028 {
2029         if (WARN_ON((unsigned)value > SUID_DUMP_ROOT))
2030                 return;
2031
2032         set_mask_bits(&mm->flags, MMF_DUMPABLE_MASK, value);
2033 }
2034
2035 SYSCALL_DEFINE3(execve,
2036                 const char __user *, filename,
2037                 const char __user *const __user *, argv,
2038                 const char __user *const __user *, envp)
2039 {
2040         return do_execve(getname(filename), argv, envp);
2041 }
2042
2043 SYSCALL_DEFINE5(execveat,
2044                 int, fd, const char __user *, filename,
2045                 const char __user *const __user *, argv,
2046                 const char __user *const __user *, envp,
2047                 int, flags)
2048 {
2049         int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
2050
2051         return do_execveat(fd,
2052                            getname_flags(filename, lookup_flags, NULL),
2053                            argv, envp, flags);
2054 }
2055
2056 #ifdef CONFIG_COMPAT
2057 COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
2058         const compat_uptr_t __user *, argv,
2059         const compat_uptr_t __user *, envp)
2060 {
2061         return compat_do_execve(getname(filename), argv, envp);
2062 }
2063
2064 COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
2065                        const char __user *, filename,
2066                        const compat_uptr_t __user *, argv,
2067                        const compat_uptr_t __user *, envp,
2068                        int,  flags)
2069 {
2070         int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
2071
2072         return compat_do_execveat(fd,
2073                                   getname_flags(filename, lookup_flags, NULL),
2074                                   argv, envp, flags);
2075 }
2076 #endif