Merge tag 'x86_urgent_for_v5.14_rc6' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / fs / exec.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/exec.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  */
7
8 /*
9  * #!-checking implemented by tytso.
10  */
11 /*
12  * Demand-loading implemented 01.12.91 - no need to read anything but
13  * the header into memory. The inode of the executable is put into
14  * "current->executable", and page faults do the actual loading. Clean.
15  *
16  * Once more I can proudly say that linux stood up to being changed: it
17  * was less than 2 hours work to get demand-loading completely implemented.
18  *
19  * Demand loading changed July 1993 by Eric Youngdale.   Use mmap instead,
20  * current->executable is only used by the procfs.  This allows a dispatch
21  * table to check for several different types  of binary formats.  We keep
22  * trying until we recognize the file or we run out of supported binary
23  * formats.
24  */
25
26 #include <linux/kernel_read_file.h>
27 #include <linux/slab.h>
28 #include <linux/file.h>
29 #include <linux/fdtable.h>
30 #include <linux/mm.h>
31 #include <linux/vmacache.h>
32 #include <linux/stat.h>
33 #include <linux/fcntl.h>
34 #include <linux/swap.h>
35 #include <linux/string.h>
36 #include <linux/init.h>
37 #include <linux/sched/mm.h>
38 #include <linux/sched/coredump.h>
39 #include <linux/sched/signal.h>
40 #include <linux/sched/numa_balancing.h>
41 #include <linux/sched/task.h>
42 #include <linux/pagemap.h>
43 #include <linux/perf_event.h>
44 #include <linux/highmem.h>
45 #include <linux/spinlock.h>
46 #include <linux/key.h>
47 #include <linux/personality.h>
48 #include <linux/binfmts.h>
49 #include <linux/utsname.h>
50 #include <linux/pid_namespace.h>
51 #include <linux/module.h>
52 #include <linux/namei.h>
53 #include <linux/mount.h>
54 #include <linux/security.h>
55 #include <linux/syscalls.h>
56 #include <linux/tsacct_kern.h>
57 #include <linux/cn_proc.h>
58 #include <linux/audit.h>
59 #include <linux/tracehook.h>
60 #include <linux/kmod.h>
61 #include <linux/fsnotify.h>
62 #include <linux/fs_struct.h>
63 #include <linux/oom.h>
64 #include <linux/compat.h>
65 #include <linux/vmalloc.h>
66 #include <linux/io_uring.h>
67 #include <linux/syscall_user_dispatch.h>
68
69 #include <linux/uaccess.h>
70 #include <asm/mmu_context.h>
71 #include <asm/tlb.h>
72
73 #include <trace/events/task.h>
74 #include "internal.h"
75
76 #include <trace/events/sched.h>
77
78 static int bprm_creds_from_file(struct linux_binprm *bprm);
79
80 int suid_dumpable = 0;
81
82 static LIST_HEAD(formats);
83 static DEFINE_RWLOCK(binfmt_lock);
84
85 void __register_binfmt(struct linux_binfmt * fmt, int insert)
86 {
87         write_lock(&binfmt_lock);
88         insert ? list_add(&fmt->lh, &formats) :
89                  list_add_tail(&fmt->lh, &formats);
90         write_unlock(&binfmt_lock);
91 }
92
93 EXPORT_SYMBOL(__register_binfmt);
94
95 void unregister_binfmt(struct linux_binfmt * fmt)
96 {
97         write_lock(&binfmt_lock);
98         list_del(&fmt->lh);
99         write_unlock(&binfmt_lock);
100 }
101
102 EXPORT_SYMBOL(unregister_binfmt);
103
104 static inline void put_binfmt(struct linux_binfmt * fmt)
105 {
106         module_put(fmt->module);
107 }
108
109 bool path_noexec(const struct path *path)
110 {
111         return (path->mnt->mnt_flags & MNT_NOEXEC) ||
112                (path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC);
113 }
114
115 #ifdef CONFIG_USELIB
116 /*
117  * Note that a shared library must be both readable and executable due to
118  * security reasons.
119  *
120  * Also note that we take the address to load from from the file itself.
121  */
122 SYSCALL_DEFINE1(uselib, const char __user *, library)
123 {
124         struct linux_binfmt *fmt;
125         struct file *file;
126         struct filename *tmp = getname(library);
127         int error = PTR_ERR(tmp);
128         static const struct open_flags uselib_flags = {
129                 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
130                 .acc_mode = MAY_READ | MAY_EXEC,
131                 .intent = LOOKUP_OPEN,
132                 .lookup_flags = LOOKUP_FOLLOW,
133         };
134
135         if (IS_ERR(tmp))
136                 goto out;
137
138         file = do_filp_open(AT_FDCWD, tmp, &uselib_flags);
139         putname(tmp);
140         error = PTR_ERR(file);
141         if (IS_ERR(file))
142                 goto out;
143
144         /*
145          * may_open() has already checked for this, so it should be
146          * impossible to trip now. But we need to be extra cautious
147          * and check again at the very end too.
148          */
149         error = -EACCES;
150         if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
151                          path_noexec(&file->f_path)))
152                 goto exit;
153
154         fsnotify_open(file);
155
156         error = -ENOEXEC;
157
158         read_lock(&binfmt_lock);
159         list_for_each_entry(fmt, &formats, lh) {
160                 if (!fmt->load_shlib)
161                         continue;
162                 if (!try_module_get(fmt->module))
163                         continue;
164                 read_unlock(&binfmt_lock);
165                 error = fmt->load_shlib(file);
166                 read_lock(&binfmt_lock);
167                 put_binfmt(fmt);
168                 if (error != -ENOEXEC)
169                         break;
170         }
171         read_unlock(&binfmt_lock);
172 exit:
173         fput(file);
174 out:
175         return error;
176 }
177 #endif /* #ifdef CONFIG_USELIB */
178
179 #ifdef CONFIG_MMU
180 /*
181  * The nascent bprm->mm is not visible until exec_mmap() but it can
182  * use a lot of memory, account these pages in current->mm temporary
183  * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
184  * change the counter back via acct_arg_size(0).
185  */
186 static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
187 {
188         struct mm_struct *mm = current->mm;
189         long diff = (long)(pages - bprm->vma_pages);
190
191         if (!mm || !diff)
192                 return;
193
194         bprm->vma_pages = pages;
195         add_mm_counter(mm, MM_ANONPAGES, diff);
196 }
197
198 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
199                 int write)
200 {
201         struct page *page;
202         int ret;
203         unsigned int gup_flags = FOLL_FORCE;
204
205 #ifdef CONFIG_STACK_GROWSUP
206         if (write) {
207                 ret = expand_downwards(bprm->vma, pos);
208                 if (ret < 0)
209                         return NULL;
210         }
211 #endif
212
213         if (write)
214                 gup_flags |= FOLL_WRITE;
215
216         /*
217          * We are doing an exec().  'current' is the process
218          * doing the exec and bprm->mm is the new process's mm.
219          */
220         ret = get_user_pages_remote(bprm->mm, pos, 1, gup_flags,
221                         &page, NULL, NULL);
222         if (ret <= 0)
223                 return NULL;
224
225         if (write)
226                 acct_arg_size(bprm, vma_pages(bprm->vma));
227
228         return page;
229 }
230
231 static void put_arg_page(struct page *page)
232 {
233         put_page(page);
234 }
235
236 static void free_arg_pages(struct linux_binprm *bprm)
237 {
238 }
239
240 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
241                 struct page *page)
242 {
243         flush_cache_page(bprm->vma, pos, page_to_pfn(page));
244 }
245
246 static int __bprm_mm_init(struct linux_binprm *bprm)
247 {
248         int err;
249         struct vm_area_struct *vma = NULL;
250         struct mm_struct *mm = bprm->mm;
251
252         bprm->vma = vma = vm_area_alloc(mm);
253         if (!vma)
254                 return -ENOMEM;
255         vma_set_anonymous(vma);
256
257         if (mmap_write_lock_killable(mm)) {
258                 err = -EINTR;
259                 goto err_free;
260         }
261
262         /*
263          * Place the stack at the largest stack address the architecture
264          * supports. Later, we'll move this to an appropriate place. We don't
265          * use STACK_TOP because that can depend on attributes which aren't
266          * configured yet.
267          */
268         BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
269         vma->vm_end = STACK_TOP_MAX;
270         vma->vm_start = vma->vm_end - PAGE_SIZE;
271         vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
272         vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
273
274         err = insert_vm_struct(mm, vma);
275         if (err)
276                 goto err;
277
278         mm->stack_vm = mm->total_vm = 1;
279         mmap_write_unlock(mm);
280         bprm->p = vma->vm_end - sizeof(void *);
281         return 0;
282 err:
283         mmap_write_unlock(mm);
284 err_free:
285         bprm->vma = NULL;
286         vm_area_free(vma);
287         return err;
288 }
289
290 static bool valid_arg_len(struct linux_binprm *bprm, long len)
291 {
292         return len <= MAX_ARG_STRLEN;
293 }
294
295 #else
296
297 static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
298 {
299 }
300
301 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
302                 int write)
303 {
304         struct page *page;
305
306         page = bprm->page[pos / PAGE_SIZE];
307         if (!page && write) {
308                 page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
309                 if (!page)
310                         return NULL;
311                 bprm->page[pos / PAGE_SIZE] = page;
312         }
313
314         return page;
315 }
316
317 static void put_arg_page(struct page *page)
318 {
319 }
320
321 static void free_arg_page(struct linux_binprm *bprm, int i)
322 {
323         if (bprm->page[i]) {
324                 __free_page(bprm->page[i]);
325                 bprm->page[i] = NULL;
326         }
327 }
328
329 static void free_arg_pages(struct linux_binprm *bprm)
330 {
331         int i;
332
333         for (i = 0; i < MAX_ARG_PAGES; i++)
334                 free_arg_page(bprm, i);
335 }
336
337 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
338                 struct page *page)
339 {
340 }
341
342 static int __bprm_mm_init(struct linux_binprm *bprm)
343 {
344         bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
345         return 0;
346 }
347
348 static bool valid_arg_len(struct linux_binprm *bprm, long len)
349 {
350         return len <= bprm->p;
351 }
352
353 #endif /* CONFIG_MMU */
354
355 /*
356  * Create a new mm_struct and populate it with a temporary stack
357  * vm_area_struct.  We don't have enough context at this point to set the stack
358  * flags, permissions, and offset, so we use temporary values.  We'll update
359  * them later in setup_arg_pages().
360  */
361 static int bprm_mm_init(struct linux_binprm *bprm)
362 {
363         int err;
364         struct mm_struct *mm = NULL;
365
366         bprm->mm = mm = mm_alloc();
367         err = -ENOMEM;
368         if (!mm)
369                 goto err;
370
371         /* Save current stack limit for all calculations made during exec. */
372         task_lock(current->group_leader);
373         bprm->rlim_stack = current->signal->rlim[RLIMIT_STACK];
374         task_unlock(current->group_leader);
375
376         err = __bprm_mm_init(bprm);
377         if (err)
378                 goto err;
379
380         return 0;
381
382 err:
383         if (mm) {
384                 bprm->mm = NULL;
385                 mmdrop(mm);
386         }
387
388         return err;
389 }
390
391 struct user_arg_ptr {
392 #ifdef CONFIG_COMPAT
393         bool is_compat;
394 #endif
395         union {
396                 const char __user *const __user *native;
397 #ifdef CONFIG_COMPAT
398                 const compat_uptr_t __user *compat;
399 #endif
400         } ptr;
401 };
402
403 static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
404 {
405         const char __user *native;
406
407 #ifdef CONFIG_COMPAT
408         if (unlikely(argv.is_compat)) {
409                 compat_uptr_t compat;
410
411                 if (get_user(compat, argv.ptr.compat + nr))
412                         return ERR_PTR(-EFAULT);
413
414                 return compat_ptr(compat);
415         }
416 #endif
417
418         if (get_user(native, argv.ptr.native + nr))
419                 return ERR_PTR(-EFAULT);
420
421         return native;
422 }
423
424 /*
425  * count() counts the number of strings in array ARGV.
426  */
427 static int count(struct user_arg_ptr argv, int max)
428 {
429         int i = 0;
430
431         if (argv.ptr.native != NULL) {
432                 for (;;) {
433                         const char __user *p = get_user_arg_ptr(argv, i);
434
435                         if (!p)
436                                 break;
437
438                         if (IS_ERR(p))
439                                 return -EFAULT;
440
441                         if (i >= max)
442                                 return -E2BIG;
443                         ++i;
444
445                         if (fatal_signal_pending(current))
446                                 return -ERESTARTNOHAND;
447                         cond_resched();
448                 }
449         }
450         return i;
451 }
452
453 static int count_strings_kernel(const char *const *argv)
454 {
455         int i;
456
457         if (!argv)
458                 return 0;
459
460         for (i = 0; argv[i]; ++i) {
461                 if (i >= MAX_ARG_STRINGS)
462                         return -E2BIG;
463                 if (fatal_signal_pending(current))
464                         return -ERESTARTNOHAND;
465                 cond_resched();
466         }
467         return i;
468 }
469
470 static int bprm_stack_limits(struct linux_binprm *bprm)
471 {
472         unsigned long limit, ptr_size;
473
474         /*
475          * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
476          * (whichever is smaller) for the argv+env strings.
477          * This ensures that:
478          *  - the remaining binfmt code will not run out of stack space,
479          *  - the program will have a reasonable amount of stack left
480          *    to work from.
481          */
482         limit = _STK_LIM / 4 * 3;
483         limit = min(limit, bprm->rlim_stack.rlim_cur / 4);
484         /*
485          * We've historically supported up to 32 pages (ARG_MAX)
486          * of argument strings even with small stacks
487          */
488         limit = max_t(unsigned long, limit, ARG_MAX);
489         /*
490          * We must account for the size of all the argv and envp pointers to
491          * the argv and envp strings, since they will also take up space in
492          * the stack. They aren't stored until much later when we can't
493          * signal to the parent that the child has run out of stack space.
494          * Instead, calculate it here so it's possible to fail gracefully.
495          */
496         ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
497         if (limit <= ptr_size)
498                 return -E2BIG;
499         limit -= ptr_size;
500
501         bprm->argmin = bprm->p - limit;
502         return 0;
503 }
504
505 /*
506  * 'copy_strings()' copies argument/environment strings from the old
507  * processes's memory to the new process's stack.  The call to get_user_pages()
508  * ensures the destination page is created and not swapped out.
509  */
510 static int copy_strings(int argc, struct user_arg_ptr argv,
511                         struct linux_binprm *bprm)
512 {
513         struct page *kmapped_page = NULL;
514         char *kaddr = NULL;
515         unsigned long kpos = 0;
516         int ret;
517
518         while (argc-- > 0) {
519                 const char __user *str;
520                 int len;
521                 unsigned long pos;
522
523                 ret = -EFAULT;
524                 str = get_user_arg_ptr(argv, argc);
525                 if (IS_ERR(str))
526                         goto out;
527
528                 len = strnlen_user(str, MAX_ARG_STRLEN);
529                 if (!len)
530                         goto out;
531
532                 ret = -E2BIG;
533                 if (!valid_arg_len(bprm, len))
534                         goto out;
535
536                 /* We're going to work our way backwords. */
537                 pos = bprm->p;
538                 str += len;
539                 bprm->p -= len;
540 #ifdef CONFIG_MMU
541                 if (bprm->p < bprm->argmin)
542                         goto out;
543 #endif
544
545                 while (len > 0) {
546                         int offset, bytes_to_copy;
547
548                         if (fatal_signal_pending(current)) {
549                                 ret = -ERESTARTNOHAND;
550                                 goto out;
551                         }
552                         cond_resched();
553
554                         offset = pos % PAGE_SIZE;
555                         if (offset == 0)
556                                 offset = PAGE_SIZE;
557
558                         bytes_to_copy = offset;
559                         if (bytes_to_copy > len)
560                                 bytes_to_copy = len;
561
562                         offset -= bytes_to_copy;
563                         pos -= bytes_to_copy;
564                         str -= bytes_to_copy;
565                         len -= bytes_to_copy;
566
567                         if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
568                                 struct page *page;
569
570                                 page = get_arg_page(bprm, pos, 1);
571                                 if (!page) {
572                                         ret = -E2BIG;
573                                         goto out;
574                                 }
575
576                                 if (kmapped_page) {
577                                         flush_kernel_dcache_page(kmapped_page);
578                                         kunmap(kmapped_page);
579                                         put_arg_page(kmapped_page);
580                                 }
581                                 kmapped_page = page;
582                                 kaddr = kmap(kmapped_page);
583                                 kpos = pos & PAGE_MASK;
584                                 flush_arg_page(bprm, kpos, kmapped_page);
585                         }
586                         if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
587                                 ret = -EFAULT;
588                                 goto out;
589                         }
590                 }
591         }
592         ret = 0;
593 out:
594         if (kmapped_page) {
595                 flush_kernel_dcache_page(kmapped_page);
596                 kunmap(kmapped_page);
597                 put_arg_page(kmapped_page);
598         }
599         return ret;
600 }
601
602 /*
603  * Copy and argument/environment string from the kernel to the processes stack.
604  */
605 int copy_string_kernel(const char *arg, struct linux_binprm *bprm)
606 {
607         int len = strnlen(arg, MAX_ARG_STRLEN) + 1 /* terminating NUL */;
608         unsigned long pos = bprm->p;
609
610         if (len == 0)
611                 return -EFAULT;
612         if (!valid_arg_len(bprm, len))
613                 return -E2BIG;
614
615         /* We're going to work our way backwards. */
616         arg += len;
617         bprm->p -= len;
618         if (IS_ENABLED(CONFIG_MMU) && bprm->p < bprm->argmin)
619                 return -E2BIG;
620
621         while (len > 0) {
622                 unsigned int bytes_to_copy = min_t(unsigned int, len,
623                                 min_not_zero(offset_in_page(pos), PAGE_SIZE));
624                 struct page *page;
625                 char *kaddr;
626
627                 pos -= bytes_to_copy;
628                 arg -= bytes_to_copy;
629                 len -= bytes_to_copy;
630
631                 page = get_arg_page(bprm, pos, 1);
632                 if (!page)
633                         return -E2BIG;
634                 kaddr = kmap_atomic(page);
635                 flush_arg_page(bprm, pos & PAGE_MASK, page);
636                 memcpy(kaddr + offset_in_page(pos), arg, bytes_to_copy);
637                 flush_kernel_dcache_page(page);
638                 kunmap_atomic(kaddr);
639                 put_arg_page(page);
640         }
641
642         return 0;
643 }
644 EXPORT_SYMBOL(copy_string_kernel);
645
646 static int copy_strings_kernel(int argc, const char *const *argv,
647                                struct linux_binprm *bprm)
648 {
649         while (argc-- > 0) {
650                 int ret = copy_string_kernel(argv[argc], bprm);
651                 if (ret < 0)
652                         return ret;
653                 if (fatal_signal_pending(current))
654                         return -ERESTARTNOHAND;
655                 cond_resched();
656         }
657         return 0;
658 }
659
660 #ifdef CONFIG_MMU
661
662 /*
663  * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX.  Once
664  * the binfmt code determines where the new stack should reside, we shift it to
665  * its final location.  The process proceeds as follows:
666  *
667  * 1) Use shift to calculate the new vma endpoints.
668  * 2) Extend vma to cover both the old and new ranges.  This ensures the
669  *    arguments passed to subsequent functions are consistent.
670  * 3) Move vma's page tables to the new range.
671  * 4) Free up any cleared pgd range.
672  * 5) Shrink the vma to cover only the new range.
673  */
674 static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
675 {
676         struct mm_struct *mm = vma->vm_mm;
677         unsigned long old_start = vma->vm_start;
678         unsigned long old_end = vma->vm_end;
679         unsigned long length = old_end - old_start;
680         unsigned long new_start = old_start - shift;
681         unsigned long new_end = old_end - shift;
682         struct mmu_gather tlb;
683
684         BUG_ON(new_start > new_end);
685
686         /*
687          * ensure there are no vmas between where we want to go
688          * and where we are
689          */
690         if (vma != find_vma(mm, new_start))
691                 return -EFAULT;
692
693         /*
694          * cover the whole range: [new_start, old_end)
695          */
696         if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
697                 return -ENOMEM;
698
699         /*
700          * move the page tables downwards, on failure we rely on
701          * process cleanup to remove whatever mess we made.
702          */
703         if (length != move_page_tables(vma, old_start,
704                                        vma, new_start, length, false))
705                 return -ENOMEM;
706
707         lru_add_drain();
708         tlb_gather_mmu(&tlb, mm);
709         if (new_end > old_start) {
710                 /*
711                  * when the old and new regions overlap clear from new_end.
712                  */
713                 free_pgd_range(&tlb, new_end, old_end, new_end,
714                         vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
715         } else {
716                 /*
717                  * otherwise, clean from old_start; this is done to not touch
718                  * the address space in [new_end, old_start) some architectures
719                  * have constraints on va-space that make this illegal (IA64) -
720                  * for the others its just a little faster.
721                  */
722                 free_pgd_range(&tlb, old_start, old_end, new_end,
723                         vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
724         }
725         tlb_finish_mmu(&tlb);
726
727         /*
728          * Shrink the vma to just the new range.  Always succeeds.
729          */
730         vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
731
732         return 0;
733 }
734
735 /*
736  * Finalizes the stack vm_area_struct. The flags and permissions are updated,
737  * the stack is optionally relocated, and some extra space is added.
738  */
739 int setup_arg_pages(struct linux_binprm *bprm,
740                     unsigned long stack_top,
741                     int executable_stack)
742 {
743         unsigned long ret;
744         unsigned long stack_shift;
745         struct mm_struct *mm = current->mm;
746         struct vm_area_struct *vma = bprm->vma;
747         struct vm_area_struct *prev = NULL;
748         unsigned long vm_flags;
749         unsigned long stack_base;
750         unsigned long stack_size;
751         unsigned long stack_expand;
752         unsigned long rlim_stack;
753
754 #ifdef CONFIG_STACK_GROWSUP
755         /* Limit stack size */
756         stack_base = bprm->rlim_stack.rlim_max;
757
758         stack_base = calc_max_stack_size(stack_base);
759
760         /* Add space for stack randomization. */
761         stack_base += (STACK_RND_MASK << PAGE_SHIFT);
762
763         /* Make sure we didn't let the argument array grow too large. */
764         if (vma->vm_end - vma->vm_start > stack_base)
765                 return -ENOMEM;
766
767         stack_base = PAGE_ALIGN(stack_top - stack_base);
768
769         stack_shift = vma->vm_start - stack_base;
770         mm->arg_start = bprm->p - stack_shift;
771         bprm->p = vma->vm_end - stack_shift;
772 #else
773         stack_top = arch_align_stack(stack_top);
774         stack_top = PAGE_ALIGN(stack_top);
775
776         if (unlikely(stack_top < mmap_min_addr) ||
777             unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
778                 return -ENOMEM;
779
780         stack_shift = vma->vm_end - stack_top;
781
782         bprm->p -= stack_shift;
783         mm->arg_start = bprm->p;
784 #endif
785
786         if (bprm->loader)
787                 bprm->loader -= stack_shift;
788         bprm->exec -= stack_shift;
789
790         if (mmap_write_lock_killable(mm))
791                 return -EINTR;
792
793         vm_flags = VM_STACK_FLAGS;
794
795         /*
796          * Adjust stack execute permissions; explicitly enable for
797          * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
798          * (arch default) otherwise.
799          */
800         if (unlikely(executable_stack == EXSTACK_ENABLE_X))
801                 vm_flags |= VM_EXEC;
802         else if (executable_stack == EXSTACK_DISABLE_X)
803                 vm_flags &= ~VM_EXEC;
804         vm_flags |= mm->def_flags;
805         vm_flags |= VM_STACK_INCOMPLETE_SETUP;
806
807         ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
808                         vm_flags);
809         if (ret)
810                 goto out_unlock;
811         BUG_ON(prev != vma);
812
813         if (unlikely(vm_flags & VM_EXEC)) {
814                 pr_warn_once("process '%pD4' started with executable stack\n",
815                              bprm->file);
816         }
817
818         /* Move stack pages down in memory. */
819         if (stack_shift) {
820                 ret = shift_arg_pages(vma, stack_shift);
821                 if (ret)
822                         goto out_unlock;
823         }
824
825         /* mprotect_fixup is overkill to remove the temporary stack flags */
826         vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
827
828         stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
829         stack_size = vma->vm_end - vma->vm_start;
830         /*
831          * Align this down to a page boundary as expand_stack
832          * will align it up.
833          */
834         rlim_stack = bprm->rlim_stack.rlim_cur & PAGE_MASK;
835 #ifdef CONFIG_STACK_GROWSUP
836         if (stack_size + stack_expand > rlim_stack)
837                 stack_base = vma->vm_start + rlim_stack;
838         else
839                 stack_base = vma->vm_end + stack_expand;
840 #else
841         if (stack_size + stack_expand > rlim_stack)
842                 stack_base = vma->vm_end - rlim_stack;
843         else
844                 stack_base = vma->vm_start - stack_expand;
845 #endif
846         current->mm->start_stack = bprm->p;
847         ret = expand_stack(vma, stack_base);
848         if (ret)
849                 ret = -EFAULT;
850
851 out_unlock:
852         mmap_write_unlock(mm);
853         return ret;
854 }
855 EXPORT_SYMBOL(setup_arg_pages);
856
857 #else
858
859 /*
860  * Transfer the program arguments and environment from the holding pages
861  * onto the stack. The provided stack pointer is adjusted accordingly.
862  */
863 int transfer_args_to_stack(struct linux_binprm *bprm,
864                            unsigned long *sp_location)
865 {
866         unsigned long index, stop, sp;
867         int ret = 0;
868
869         stop = bprm->p >> PAGE_SHIFT;
870         sp = *sp_location;
871
872         for (index = MAX_ARG_PAGES - 1; index >= stop; index--) {
873                 unsigned int offset = index == stop ? bprm->p & ~PAGE_MASK : 0;
874                 char *src = kmap(bprm->page[index]) + offset;
875                 sp -= PAGE_SIZE - offset;
876                 if (copy_to_user((void *) sp, src, PAGE_SIZE - offset) != 0)
877                         ret = -EFAULT;
878                 kunmap(bprm->page[index]);
879                 if (ret)
880                         goto out;
881         }
882
883         *sp_location = sp;
884
885 out:
886         return ret;
887 }
888 EXPORT_SYMBOL(transfer_args_to_stack);
889
890 #endif /* CONFIG_MMU */
891
892 static struct file *do_open_execat(int fd, struct filename *name, int flags)
893 {
894         struct file *file;
895         int err;
896         struct open_flags open_exec_flags = {
897                 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
898                 .acc_mode = MAY_EXEC,
899                 .intent = LOOKUP_OPEN,
900                 .lookup_flags = LOOKUP_FOLLOW,
901         };
902
903         if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
904                 return ERR_PTR(-EINVAL);
905         if (flags & AT_SYMLINK_NOFOLLOW)
906                 open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW;
907         if (flags & AT_EMPTY_PATH)
908                 open_exec_flags.lookup_flags |= LOOKUP_EMPTY;
909
910         file = do_filp_open(fd, name, &open_exec_flags);
911         if (IS_ERR(file))
912                 goto out;
913
914         /*
915          * may_open() has already checked for this, so it should be
916          * impossible to trip now. But we need to be extra cautious
917          * and check again at the very end too.
918          */
919         err = -EACCES;
920         if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
921                          path_noexec(&file->f_path)))
922                 goto exit;
923
924         err = deny_write_access(file);
925         if (err)
926                 goto exit;
927
928         if (name->name[0] != '\0')
929                 fsnotify_open(file);
930
931 out:
932         return file;
933
934 exit:
935         fput(file);
936         return ERR_PTR(err);
937 }
938
939 struct file *open_exec(const char *name)
940 {
941         struct filename *filename = getname_kernel(name);
942         struct file *f = ERR_CAST(filename);
943
944         if (!IS_ERR(filename)) {
945                 f = do_open_execat(AT_FDCWD, filename, 0);
946                 putname(filename);
947         }
948         return f;
949 }
950 EXPORT_SYMBOL(open_exec);
951
952 #if defined(CONFIG_HAVE_AOUT) || defined(CONFIG_BINFMT_FLAT) || \
953     defined(CONFIG_BINFMT_ELF_FDPIC)
954 ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
955 {
956         ssize_t res = vfs_read(file, (void __user *)addr, len, &pos);
957         if (res > 0)
958                 flush_icache_user_range(addr, addr + len);
959         return res;
960 }
961 EXPORT_SYMBOL(read_code);
962 #endif
963
964 /*
965  * Maps the mm_struct mm into the current task struct.
966  * On success, this function returns with exec_update_lock
967  * held for writing.
968  */
969 static int exec_mmap(struct mm_struct *mm)
970 {
971         struct task_struct *tsk;
972         struct mm_struct *old_mm, *active_mm;
973         int ret;
974
975         /* Notify parent that we're no longer interested in the old VM */
976         tsk = current;
977         old_mm = current->mm;
978         exec_mm_release(tsk, old_mm);
979         if (old_mm)
980                 sync_mm_rss(old_mm);
981
982         ret = down_write_killable(&tsk->signal->exec_update_lock);
983         if (ret)
984                 return ret;
985
986         if (old_mm) {
987                 /*
988                  * Make sure that if there is a core dump in progress
989                  * for the old mm, we get out and die instead of going
990                  * through with the exec.  We must hold mmap_lock around
991                  * checking core_state and changing tsk->mm.
992                  */
993                 mmap_read_lock(old_mm);
994                 if (unlikely(old_mm->core_state)) {
995                         mmap_read_unlock(old_mm);
996                         up_write(&tsk->signal->exec_update_lock);
997                         return -EINTR;
998                 }
999         }
1000
1001         task_lock(tsk);
1002         membarrier_exec_mmap(mm);
1003
1004         local_irq_disable();
1005         active_mm = tsk->active_mm;
1006         tsk->active_mm = mm;
1007         tsk->mm = mm;
1008         /*
1009          * This prevents preemption while active_mm is being loaded and
1010          * it and mm are being updated, which could cause problems for
1011          * lazy tlb mm refcounting when these are updated by context
1012          * switches. Not all architectures can handle irqs off over
1013          * activate_mm yet.
1014          */
1015         if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
1016                 local_irq_enable();
1017         activate_mm(active_mm, mm);
1018         if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
1019                 local_irq_enable();
1020         tsk->mm->vmacache_seqnum = 0;
1021         vmacache_flush(tsk);
1022         task_unlock(tsk);
1023         if (old_mm) {
1024                 mmap_read_unlock(old_mm);
1025                 BUG_ON(active_mm != old_mm);
1026                 setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
1027                 mm_update_next_owner(old_mm);
1028                 mmput(old_mm);
1029                 return 0;
1030         }
1031         mmdrop(active_mm);
1032         return 0;
1033 }
1034
1035 static int de_thread(struct task_struct *tsk)
1036 {
1037         struct signal_struct *sig = tsk->signal;
1038         struct sighand_struct *oldsighand = tsk->sighand;
1039         spinlock_t *lock = &oldsighand->siglock;
1040
1041         if (thread_group_empty(tsk))
1042                 goto no_thread_group;
1043
1044         /*
1045          * Kill all other threads in the thread group.
1046          */
1047         spin_lock_irq(lock);
1048         if (signal_group_exit(sig)) {
1049                 /*
1050                  * Another group action in progress, just
1051                  * return so that the signal is processed.
1052                  */
1053                 spin_unlock_irq(lock);
1054                 return -EAGAIN;
1055         }
1056
1057         sig->group_exit_task = tsk;
1058         sig->notify_count = zap_other_threads(tsk);
1059         if (!thread_group_leader(tsk))
1060                 sig->notify_count--;
1061
1062         while (sig->notify_count) {
1063                 __set_current_state(TASK_KILLABLE);
1064                 spin_unlock_irq(lock);
1065                 schedule();
1066                 if (__fatal_signal_pending(tsk))
1067                         goto killed;
1068                 spin_lock_irq(lock);
1069         }
1070         spin_unlock_irq(lock);
1071
1072         /*
1073          * At this point all other threads have exited, all we have to
1074          * do is to wait for the thread group leader to become inactive,
1075          * and to assume its PID:
1076          */
1077         if (!thread_group_leader(tsk)) {
1078                 struct task_struct *leader = tsk->group_leader;
1079
1080                 for (;;) {
1081                         cgroup_threadgroup_change_begin(tsk);
1082                         write_lock_irq(&tasklist_lock);
1083                         /*
1084                          * Do this under tasklist_lock to ensure that
1085                          * exit_notify() can't miss ->group_exit_task
1086                          */
1087                         sig->notify_count = -1;
1088                         if (likely(leader->exit_state))
1089                                 break;
1090                         __set_current_state(TASK_KILLABLE);
1091                         write_unlock_irq(&tasklist_lock);
1092                         cgroup_threadgroup_change_end(tsk);
1093                         schedule();
1094                         if (__fatal_signal_pending(tsk))
1095                                 goto killed;
1096                 }
1097
1098                 /*
1099                  * The only record we have of the real-time age of a
1100                  * process, regardless of execs it's done, is start_time.
1101                  * All the past CPU time is accumulated in signal_struct
1102                  * from sister threads now dead.  But in this non-leader
1103                  * exec, nothing survives from the original leader thread,
1104                  * whose birth marks the true age of this process now.
1105                  * When we take on its identity by switching to its PID, we
1106                  * also take its birthdate (always earlier than our own).
1107                  */
1108                 tsk->start_time = leader->start_time;
1109                 tsk->start_boottime = leader->start_boottime;
1110
1111                 BUG_ON(!same_thread_group(leader, tsk));
1112                 /*
1113                  * An exec() starts a new thread group with the
1114                  * TGID of the previous thread group. Rehash the
1115                  * two threads with a switched PID, and release
1116                  * the former thread group leader:
1117                  */
1118
1119                 /* Become a process group leader with the old leader's pid.
1120                  * The old leader becomes a thread of the this thread group.
1121                  */
1122                 exchange_tids(tsk, leader);
1123                 transfer_pid(leader, tsk, PIDTYPE_TGID);
1124                 transfer_pid(leader, tsk, PIDTYPE_PGID);
1125                 transfer_pid(leader, tsk, PIDTYPE_SID);
1126
1127                 list_replace_rcu(&leader->tasks, &tsk->tasks);
1128                 list_replace_init(&leader->sibling, &tsk->sibling);
1129
1130                 tsk->group_leader = tsk;
1131                 leader->group_leader = tsk;
1132
1133                 tsk->exit_signal = SIGCHLD;
1134                 leader->exit_signal = -1;
1135
1136                 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
1137                 leader->exit_state = EXIT_DEAD;
1138
1139                 /*
1140                  * We are going to release_task()->ptrace_unlink() silently,
1141                  * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
1142                  * the tracer wont't block again waiting for this thread.
1143                  */
1144                 if (unlikely(leader->ptrace))
1145                         __wake_up_parent(leader, leader->parent);
1146                 write_unlock_irq(&tasklist_lock);
1147                 cgroup_threadgroup_change_end(tsk);
1148
1149                 release_task(leader);
1150         }
1151
1152         sig->group_exit_task = NULL;
1153         sig->notify_count = 0;
1154
1155 no_thread_group:
1156         /* we have changed execution domain */
1157         tsk->exit_signal = SIGCHLD;
1158
1159         BUG_ON(!thread_group_leader(tsk));
1160         return 0;
1161
1162 killed:
1163         /* protects against exit_notify() and __exit_signal() */
1164         read_lock(&tasklist_lock);
1165         sig->group_exit_task = NULL;
1166         sig->notify_count = 0;
1167         read_unlock(&tasklist_lock);
1168         return -EAGAIN;
1169 }
1170
1171
1172 /*
1173  * This function makes sure the current process has its own signal table,
1174  * so that flush_signal_handlers can later reset the handlers without
1175  * disturbing other processes.  (Other processes might share the signal
1176  * table via the CLONE_SIGHAND option to clone().)
1177  */
1178 static int unshare_sighand(struct task_struct *me)
1179 {
1180         struct sighand_struct *oldsighand = me->sighand;
1181
1182         if (refcount_read(&oldsighand->count) != 1) {
1183                 struct sighand_struct *newsighand;
1184                 /*
1185                  * This ->sighand is shared with the CLONE_SIGHAND
1186                  * but not CLONE_THREAD task, switch to the new one.
1187                  */
1188                 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
1189                 if (!newsighand)
1190                         return -ENOMEM;
1191
1192                 refcount_set(&newsighand->count, 1);
1193                 memcpy(newsighand->action, oldsighand->action,
1194                        sizeof(newsighand->action));
1195
1196                 write_lock_irq(&tasklist_lock);
1197                 spin_lock(&oldsighand->siglock);
1198                 rcu_assign_pointer(me->sighand, newsighand);
1199                 spin_unlock(&oldsighand->siglock);
1200                 write_unlock_irq(&tasklist_lock);
1201
1202                 __cleanup_sighand(oldsighand);
1203         }
1204         return 0;
1205 }
1206
1207 char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk)
1208 {
1209         task_lock(tsk);
1210         strncpy(buf, tsk->comm, buf_size);
1211         task_unlock(tsk);
1212         return buf;
1213 }
1214 EXPORT_SYMBOL_GPL(__get_task_comm);
1215
1216 /*
1217  * These functions flushes out all traces of the currently running executable
1218  * so that a new one can be started
1219  */
1220
1221 void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
1222 {
1223         task_lock(tsk);
1224         trace_task_rename(tsk, buf);
1225         strlcpy(tsk->comm, buf, sizeof(tsk->comm));
1226         task_unlock(tsk);
1227         perf_event_comm(tsk, exec);
1228 }
1229
1230 /*
1231  * Calling this is the point of no return. None of the failures will be
1232  * seen by userspace since either the process is already taking a fatal
1233  * signal (via de_thread() or coredump), or will have SEGV raised
1234  * (after exec_mmap()) by search_binary_handler (see below).
1235  */
1236 int begin_new_exec(struct linux_binprm * bprm)
1237 {
1238         struct task_struct *me = current;
1239         int retval;
1240
1241         /* Once we are committed compute the creds */
1242         retval = bprm_creds_from_file(bprm);
1243         if (retval)
1244                 return retval;
1245
1246         /*
1247          * Ensure all future errors are fatal.
1248          */
1249         bprm->point_of_no_return = true;
1250
1251         /*
1252          * Make this the only thread in the thread group.
1253          */
1254         retval = de_thread(me);
1255         if (retval)
1256                 goto out;
1257
1258         /*
1259          * Cancel any io_uring activity across execve
1260          */
1261         io_uring_task_cancel();
1262
1263         /* Ensure the files table is not shared. */
1264         retval = unshare_files();
1265         if (retval)
1266                 goto out;
1267
1268         /*
1269          * Must be called _before_ exec_mmap() as bprm->mm is
1270          * not visibile until then. This also enables the update
1271          * to be lockless.
1272          */
1273         set_mm_exe_file(bprm->mm, bprm->file);
1274
1275         /* If the binary is not readable then enforce mm->dumpable=0 */
1276         would_dump(bprm, bprm->file);
1277         if (bprm->have_execfd)
1278                 would_dump(bprm, bprm->executable);
1279
1280         /*
1281          * Release all of the old mmap stuff
1282          */
1283         acct_arg_size(bprm, 0);
1284         retval = exec_mmap(bprm->mm);
1285         if (retval)
1286                 goto out;
1287
1288         bprm->mm = NULL;
1289
1290 #ifdef CONFIG_POSIX_TIMERS
1291         exit_itimers(me->signal);
1292         flush_itimer_signals();
1293 #endif
1294
1295         /*
1296          * Make the signal table private.
1297          */
1298         retval = unshare_sighand(me);
1299         if (retval)
1300                 goto out_unlock;
1301
1302         /*
1303          * Ensure that the uaccess routines can actually operate on userspace
1304          * pointers:
1305          */
1306         force_uaccess_begin();
1307
1308         me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
1309                                         PF_NOFREEZE | PF_NO_SETAFFINITY);
1310         flush_thread();
1311         me->personality &= ~bprm->per_clear;
1312
1313         clear_syscall_work_syscall_user_dispatch(me);
1314
1315         /*
1316          * We have to apply CLOEXEC before we change whether the process is
1317          * dumpable (in setup_new_exec) to avoid a race with a process in userspace
1318          * trying to access the should-be-closed file descriptors of a process
1319          * undergoing exec(2).
1320          */
1321         do_close_on_exec(me->files);
1322
1323         if (bprm->secureexec) {
1324                 /* Make sure parent cannot signal privileged process. */
1325                 me->pdeath_signal = 0;
1326
1327                 /*
1328                  * For secureexec, reset the stack limit to sane default to
1329                  * avoid bad behavior from the prior rlimits. This has to
1330                  * happen before arch_pick_mmap_layout(), which examines
1331                  * RLIMIT_STACK, but after the point of no return to avoid
1332                  * needing to clean up the change on failure.
1333                  */
1334                 if (bprm->rlim_stack.rlim_cur > _STK_LIM)
1335                         bprm->rlim_stack.rlim_cur = _STK_LIM;
1336         }
1337
1338         me->sas_ss_sp = me->sas_ss_size = 0;
1339
1340         /*
1341          * Figure out dumpability. Note that this checking only of current
1342          * is wrong, but userspace depends on it. This should be testing
1343          * bprm->secureexec instead.
1344          */
1345         if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
1346             !(uid_eq(current_euid(), current_uid()) &&
1347               gid_eq(current_egid(), current_gid())))
1348                 set_dumpable(current->mm, suid_dumpable);
1349         else
1350                 set_dumpable(current->mm, SUID_DUMP_USER);
1351
1352         perf_event_exec();
1353         __set_task_comm(me, kbasename(bprm->filename), true);
1354
1355         /* An exec changes our domain. We are no longer part of the thread
1356            group */
1357         WRITE_ONCE(me->self_exec_id, me->self_exec_id + 1);
1358         flush_signal_handlers(me, 0);
1359
1360         retval = set_cred_ucounts(bprm->cred);
1361         if (retval < 0)
1362                 goto out_unlock;
1363
1364         /*
1365          * install the new credentials for this executable
1366          */
1367         security_bprm_committing_creds(bprm);
1368
1369         commit_creds(bprm->cred);
1370         bprm->cred = NULL;
1371
1372         /*
1373          * Disable monitoring for regular users
1374          * when executing setuid binaries. Must
1375          * wait until new credentials are committed
1376          * by commit_creds() above
1377          */
1378         if (get_dumpable(me->mm) != SUID_DUMP_USER)
1379                 perf_event_exit_task(me);
1380         /*
1381          * cred_guard_mutex must be held at least to this point to prevent
1382          * ptrace_attach() from altering our determination of the task's
1383          * credentials; any time after this it may be unlocked.
1384          */
1385         security_bprm_committed_creds(bprm);
1386
1387         /* Pass the opened binary to the interpreter. */
1388         if (bprm->have_execfd) {
1389                 retval = get_unused_fd_flags(0);
1390                 if (retval < 0)
1391                         goto out_unlock;
1392                 fd_install(retval, bprm->executable);
1393                 bprm->executable = NULL;
1394                 bprm->execfd = retval;
1395         }
1396         return 0;
1397
1398 out_unlock:
1399         up_write(&me->signal->exec_update_lock);
1400 out:
1401         return retval;
1402 }
1403 EXPORT_SYMBOL(begin_new_exec);
1404
1405 void would_dump(struct linux_binprm *bprm, struct file *file)
1406 {
1407         struct inode *inode = file_inode(file);
1408         struct user_namespace *mnt_userns = file_mnt_user_ns(file);
1409         if (inode_permission(mnt_userns, inode, MAY_READ) < 0) {
1410                 struct user_namespace *old, *user_ns;
1411                 bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
1412
1413                 /* Ensure mm->user_ns contains the executable */
1414                 user_ns = old = bprm->mm->user_ns;
1415                 while ((user_ns != &init_user_ns) &&
1416                        !privileged_wrt_inode_uidgid(user_ns, mnt_userns, inode))
1417                         user_ns = user_ns->parent;
1418
1419                 if (old != user_ns) {
1420                         bprm->mm->user_ns = get_user_ns(user_ns);
1421                         put_user_ns(old);
1422                 }
1423         }
1424 }
1425 EXPORT_SYMBOL(would_dump);
1426
1427 void setup_new_exec(struct linux_binprm * bprm)
1428 {
1429         /* Setup things that can depend upon the personality */
1430         struct task_struct *me = current;
1431
1432         arch_pick_mmap_layout(me->mm, &bprm->rlim_stack);
1433
1434         arch_setup_new_exec();
1435
1436         /* Set the new mm task size. We have to do that late because it may
1437          * depend on TIF_32BIT which is only updated in flush_thread() on
1438          * some architectures like powerpc
1439          */
1440         me->mm->task_size = TASK_SIZE;
1441         up_write(&me->signal->exec_update_lock);
1442         mutex_unlock(&me->signal->cred_guard_mutex);
1443 }
1444 EXPORT_SYMBOL(setup_new_exec);
1445
1446 /* Runs immediately before start_thread() takes over. */
1447 void finalize_exec(struct linux_binprm *bprm)
1448 {
1449         /* Store any stack rlimit changes before starting thread. */
1450         task_lock(current->group_leader);
1451         current->signal->rlim[RLIMIT_STACK] = bprm->rlim_stack;
1452         task_unlock(current->group_leader);
1453 }
1454 EXPORT_SYMBOL(finalize_exec);
1455
1456 /*
1457  * Prepare credentials and lock ->cred_guard_mutex.
1458  * setup_new_exec() commits the new creds and drops the lock.
1459  * Or, if exec fails before, free_bprm() should release ->cred
1460  * and unlock.
1461  */
1462 static int prepare_bprm_creds(struct linux_binprm *bprm)
1463 {
1464         if (mutex_lock_interruptible(&current->signal->cred_guard_mutex))
1465                 return -ERESTARTNOINTR;
1466
1467         bprm->cred = prepare_exec_creds();
1468         if (likely(bprm->cred))
1469                 return 0;
1470
1471         mutex_unlock(&current->signal->cred_guard_mutex);
1472         return -ENOMEM;
1473 }
1474
1475 static void free_bprm(struct linux_binprm *bprm)
1476 {
1477         if (bprm->mm) {
1478                 acct_arg_size(bprm, 0);
1479                 mmput(bprm->mm);
1480         }
1481         free_arg_pages(bprm);
1482         if (bprm->cred) {
1483                 mutex_unlock(&current->signal->cred_guard_mutex);
1484                 abort_creds(bprm->cred);
1485         }
1486         if (bprm->file) {
1487                 allow_write_access(bprm->file);
1488                 fput(bprm->file);
1489         }
1490         if (bprm->executable)
1491                 fput(bprm->executable);
1492         /* If a binfmt changed the interp, free it. */
1493         if (bprm->interp != bprm->filename)
1494                 kfree(bprm->interp);
1495         kfree(bprm->fdpath);
1496         kfree(bprm);
1497 }
1498
1499 static struct linux_binprm *alloc_bprm(int fd, struct filename *filename)
1500 {
1501         struct linux_binprm *bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1502         int retval = -ENOMEM;
1503         if (!bprm)
1504                 goto out;
1505
1506         if (fd == AT_FDCWD || filename->name[0] == '/') {
1507                 bprm->filename = filename->name;
1508         } else {
1509                 if (filename->name[0] == '\0')
1510                         bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd);
1511                 else
1512                         bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s",
1513                                                   fd, filename->name);
1514                 if (!bprm->fdpath)
1515                         goto out_free;
1516
1517                 bprm->filename = bprm->fdpath;
1518         }
1519         bprm->interp = bprm->filename;
1520
1521         retval = bprm_mm_init(bprm);
1522         if (retval)
1523                 goto out_free;
1524         return bprm;
1525
1526 out_free:
1527         free_bprm(bprm);
1528 out:
1529         return ERR_PTR(retval);
1530 }
1531
1532 int bprm_change_interp(const char *interp, struct linux_binprm *bprm)
1533 {
1534         /* If a binfmt changed the interp, free it first. */
1535         if (bprm->interp != bprm->filename)
1536                 kfree(bprm->interp);
1537         bprm->interp = kstrdup(interp, GFP_KERNEL);
1538         if (!bprm->interp)
1539                 return -ENOMEM;
1540         return 0;
1541 }
1542 EXPORT_SYMBOL(bprm_change_interp);
1543
1544 /*
1545  * determine how safe it is to execute the proposed program
1546  * - the caller must hold ->cred_guard_mutex to protect against
1547  *   PTRACE_ATTACH or seccomp thread-sync
1548  */
1549 static void check_unsafe_exec(struct linux_binprm *bprm)
1550 {
1551         struct task_struct *p = current, *t;
1552         unsigned n_fs;
1553
1554         if (p->ptrace)
1555                 bprm->unsafe |= LSM_UNSAFE_PTRACE;
1556
1557         /*
1558          * This isn't strictly necessary, but it makes it harder for LSMs to
1559          * mess up.
1560          */
1561         if (task_no_new_privs(current))
1562                 bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
1563
1564         t = p;
1565         n_fs = 1;
1566         spin_lock(&p->fs->lock);
1567         rcu_read_lock();
1568         while_each_thread(p, t) {
1569                 if (t->fs == p->fs)
1570                         n_fs++;
1571         }
1572         rcu_read_unlock();
1573
1574         if (p->fs->users > n_fs)
1575                 bprm->unsafe |= LSM_UNSAFE_SHARE;
1576         else
1577                 p->fs->in_exec = 1;
1578         spin_unlock(&p->fs->lock);
1579 }
1580
1581 static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file)
1582 {
1583         /* Handle suid and sgid on files */
1584         struct user_namespace *mnt_userns;
1585         struct inode *inode;
1586         unsigned int mode;
1587         kuid_t uid;
1588         kgid_t gid;
1589
1590         if (!mnt_may_suid(file->f_path.mnt))
1591                 return;
1592
1593         if (task_no_new_privs(current))
1594                 return;
1595
1596         inode = file->f_path.dentry->d_inode;
1597         mode = READ_ONCE(inode->i_mode);
1598         if (!(mode & (S_ISUID|S_ISGID)))
1599                 return;
1600
1601         mnt_userns = file_mnt_user_ns(file);
1602
1603         /* Be careful if suid/sgid is set */
1604         inode_lock(inode);
1605
1606         /* reload atomically mode/uid/gid now that lock held */
1607         mode = inode->i_mode;
1608         uid = i_uid_into_mnt(mnt_userns, inode);
1609         gid = i_gid_into_mnt(mnt_userns, inode);
1610         inode_unlock(inode);
1611
1612         /* We ignore suid/sgid if there are no mappings for them in the ns */
1613         if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
1614                  !kgid_has_mapping(bprm->cred->user_ns, gid))
1615                 return;
1616
1617         if (mode & S_ISUID) {
1618                 bprm->per_clear |= PER_CLEAR_ON_SETID;
1619                 bprm->cred->euid = uid;
1620         }
1621
1622         if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1623                 bprm->per_clear |= PER_CLEAR_ON_SETID;
1624                 bprm->cred->egid = gid;
1625         }
1626 }
1627
1628 /*
1629  * Compute brpm->cred based upon the final binary.
1630  */
1631 static int bprm_creds_from_file(struct linux_binprm *bprm)
1632 {
1633         /* Compute creds based on which file? */
1634         struct file *file = bprm->execfd_creds ? bprm->executable : bprm->file;
1635
1636         bprm_fill_uid(bprm, file);
1637         return security_bprm_creds_from_file(bprm, file);
1638 }
1639
1640 /*
1641  * Fill the binprm structure from the inode.
1642  * Read the first BINPRM_BUF_SIZE bytes
1643  *
1644  * This may be called multiple times for binary chains (scripts for example).
1645  */
1646 static int prepare_binprm(struct linux_binprm *bprm)
1647 {
1648         loff_t pos = 0;
1649
1650         memset(bprm->buf, 0, BINPRM_BUF_SIZE);
1651         return kernel_read(bprm->file, bprm->buf, BINPRM_BUF_SIZE, &pos);
1652 }
1653
1654 /*
1655  * Arguments are '\0' separated strings found at the location bprm->p
1656  * points to; chop off the first by relocating brpm->p to right after
1657  * the first '\0' encountered.
1658  */
1659 int remove_arg_zero(struct linux_binprm *bprm)
1660 {
1661         int ret = 0;
1662         unsigned long offset;
1663         char *kaddr;
1664         struct page *page;
1665
1666         if (!bprm->argc)
1667                 return 0;
1668
1669         do {
1670                 offset = bprm->p & ~PAGE_MASK;
1671                 page = get_arg_page(bprm, bprm->p, 0);
1672                 if (!page) {
1673                         ret = -EFAULT;
1674                         goto out;
1675                 }
1676                 kaddr = kmap_atomic(page);
1677
1678                 for (; offset < PAGE_SIZE && kaddr[offset];
1679                                 offset++, bprm->p++)
1680                         ;
1681
1682                 kunmap_atomic(kaddr);
1683                 put_arg_page(page);
1684         } while (offset == PAGE_SIZE);
1685
1686         bprm->p++;
1687         bprm->argc--;
1688         ret = 0;
1689
1690 out:
1691         return ret;
1692 }
1693 EXPORT_SYMBOL(remove_arg_zero);
1694
1695 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1696 /*
1697  * cycle the list of binary formats handler, until one recognizes the image
1698  */
1699 static int search_binary_handler(struct linux_binprm *bprm)
1700 {
1701         bool need_retry = IS_ENABLED(CONFIG_MODULES);
1702         struct linux_binfmt *fmt;
1703         int retval;
1704
1705         retval = prepare_binprm(bprm);
1706         if (retval < 0)
1707                 return retval;
1708
1709         retval = security_bprm_check(bprm);
1710         if (retval)
1711                 return retval;
1712
1713         retval = -ENOENT;
1714  retry:
1715         read_lock(&binfmt_lock);
1716         list_for_each_entry(fmt, &formats, lh) {
1717                 if (!try_module_get(fmt->module))
1718                         continue;
1719                 read_unlock(&binfmt_lock);
1720
1721                 retval = fmt->load_binary(bprm);
1722
1723                 read_lock(&binfmt_lock);
1724                 put_binfmt(fmt);
1725                 if (bprm->point_of_no_return || (retval != -ENOEXEC)) {
1726                         read_unlock(&binfmt_lock);
1727                         return retval;
1728                 }
1729         }
1730         read_unlock(&binfmt_lock);
1731
1732         if (need_retry) {
1733                 if (printable(bprm->buf[0]) && printable(bprm->buf[1]) &&
1734                     printable(bprm->buf[2]) && printable(bprm->buf[3]))
1735                         return retval;
1736                 if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0)
1737                         return retval;
1738                 need_retry = false;
1739                 goto retry;
1740         }
1741
1742         return retval;
1743 }
1744
1745 static int exec_binprm(struct linux_binprm *bprm)
1746 {
1747         pid_t old_pid, old_vpid;
1748         int ret, depth;
1749
1750         /* Need to fetch pid before load_binary changes it */
1751         old_pid = current->pid;
1752         rcu_read_lock();
1753         old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
1754         rcu_read_unlock();
1755
1756         /* This allows 4 levels of binfmt rewrites before failing hard. */
1757         for (depth = 0;; depth++) {
1758                 struct file *exec;
1759                 if (depth > 5)
1760                         return -ELOOP;
1761
1762                 ret = search_binary_handler(bprm);
1763                 if (ret < 0)
1764                         return ret;
1765                 if (!bprm->interpreter)
1766                         break;
1767
1768                 exec = bprm->file;
1769                 bprm->file = bprm->interpreter;
1770                 bprm->interpreter = NULL;
1771
1772                 allow_write_access(exec);
1773                 if (unlikely(bprm->have_execfd)) {
1774                         if (bprm->executable) {
1775                                 fput(exec);
1776                                 return -ENOEXEC;
1777                         }
1778                         bprm->executable = exec;
1779                 } else
1780                         fput(exec);
1781         }
1782
1783         audit_bprm(bprm);
1784         trace_sched_process_exec(current, old_pid, bprm);
1785         ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
1786         proc_exec_connector(current);
1787         return 0;
1788 }
1789
1790 /*
1791  * sys_execve() executes a new program.
1792  */
1793 static int bprm_execve(struct linux_binprm *bprm,
1794                        int fd, struct filename *filename, int flags)
1795 {
1796         struct file *file;
1797         int retval;
1798
1799         retval = prepare_bprm_creds(bprm);
1800         if (retval)
1801                 return retval;
1802
1803         check_unsafe_exec(bprm);
1804         current->in_execve = 1;
1805
1806         file = do_open_execat(fd, filename, flags);
1807         retval = PTR_ERR(file);
1808         if (IS_ERR(file))
1809                 goto out_unmark;
1810
1811         sched_exec();
1812
1813         bprm->file = file;
1814         /*
1815          * Record that a name derived from an O_CLOEXEC fd will be
1816          * inaccessible after exec.  This allows the code in exec to
1817          * choose to fail when the executable is not mmaped into the
1818          * interpreter and an open file descriptor is not passed to
1819          * the interpreter.  This makes for a better user experience
1820          * than having the interpreter start and then immediately fail
1821          * when it finds the executable is inaccessible.
1822          */
1823         if (bprm->fdpath && get_close_on_exec(fd))
1824                 bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE;
1825
1826         /* Set the unchanging part of bprm->cred */
1827         retval = security_bprm_creds_for_exec(bprm);
1828         if (retval)
1829                 goto out;
1830
1831         retval = exec_binprm(bprm);
1832         if (retval < 0)
1833                 goto out;
1834
1835         /* execve succeeded */
1836         current->fs->in_exec = 0;
1837         current->in_execve = 0;
1838         rseq_execve(current);
1839         acct_update_integrals(current);
1840         task_numa_free(current, false);
1841         return retval;
1842
1843 out:
1844         /*
1845          * If past the point of no return ensure the code never
1846          * returns to the userspace process.  Use an existing fatal
1847          * signal if present otherwise terminate the process with
1848          * SIGSEGV.
1849          */
1850         if (bprm->point_of_no_return && !fatal_signal_pending(current))
1851                 force_sigsegv(SIGSEGV);
1852
1853 out_unmark:
1854         current->fs->in_exec = 0;
1855         current->in_execve = 0;
1856
1857         return retval;
1858 }
1859
1860 static int do_execveat_common(int fd, struct filename *filename,
1861                               struct user_arg_ptr argv,
1862                               struct user_arg_ptr envp,
1863                               int flags)
1864 {
1865         struct linux_binprm *bprm;
1866         int retval;
1867
1868         if (IS_ERR(filename))
1869                 return PTR_ERR(filename);
1870
1871         /*
1872          * We move the actual failure in case of RLIMIT_NPROC excess from
1873          * set*uid() to execve() because too many poorly written programs
1874          * don't check setuid() return code.  Here we additionally recheck
1875          * whether NPROC limit is still exceeded.
1876          */
1877         if ((current->flags & PF_NPROC_EXCEEDED) &&
1878             is_ucounts_overlimit(current_ucounts(), UCOUNT_RLIMIT_NPROC, rlimit(RLIMIT_NPROC))) {
1879                 retval = -EAGAIN;
1880                 goto out_ret;
1881         }
1882
1883         /* We're below the limit (still or again), so we don't want to make
1884          * further execve() calls fail. */
1885         current->flags &= ~PF_NPROC_EXCEEDED;
1886
1887         bprm = alloc_bprm(fd, filename);
1888         if (IS_ERR(bprm)) {
1889                 retval = PTR_ERR(bprm);
1890                 goto out_ret;
1891         }
1892
1893         retval = count(argv, MAX_ARG_STRINGS);
1894         if (retval < 0)
1895                 goto out_free;
1896         bprm->argc = retval;
1897
1898         retval = count(envp, MAX_ARG_STRINGS);
1899         if (retval < 0)
1900                 goto out_free;
1901         bprm->envc = retval;
1902
1903         retval = bprm_stack_limits(bprm);
1904         if (retval < 0)
1905                 goto out_free;
1906
1907         retval = copy_string_kernel(bprm->filename, bprm);
1908         if (retval < 0)
1909                 goto out_free;
1910         bprm->exec = bprm->p;
1911
1912         retval = copy_strings(bprm->envc, envp, bprm);
1913         if (retval < 0)
1914                 goto out_free;
1915
1916         retval = copy_strings(bprm->argc, argv, bprm);
1917         if (retval < 0)
1918                 goto out_free;
1919
1920         retval = bprm_execve(bprm, fd, filename, flags);
1921 out_free:
1922         free_bprm(bprm);
1923
1924 out_ret:
1925         putname(filename);
1926         return retval;
1927 }
1928
1929 int kernel_execve(const char *kernel_filename,
1930                   const char *const *argv, const char *const *envp)
1931 {
1932         struct filename *filename;
1933         struct linux_binprm *bprm;
1934         int fd = AT_FDCWD;
1935         int retval;
1936
1937         filename = getname_kernel(kernel_filename);
1938         if (IS_ERR(filename))
1939                 return PTR_ERR(filename);
1940
1941         bprm = alloc_bprm(fd, filename);
1942         if (IS_ERR(bprm)) {
1943                 retval = PTR_ERR(bprm);
1944                 goto out_ret;
1945         }
1946
1947         retval = count_strings_kernel(argv);
1948         if (retval < 0)
1949                 goto out_free;
1950         bprm->argc = retval;
1951
1952         retval = count_strings_kernel(envp);
1953         if (retval < 0)
1954                 goto out_free;
1955         bprm->envc = retval;
1956
1957         retval = bprm_stack_limits(bprm);
1958         if (retval < 0)
1959                 goto out_free;
1960
1961         retval = copy_string_kernel(bprm->filename, bprm);
1962         if (retval < 0)
1963                 goto out_free;
1964         bprm->exec = bprm->p;
1965
1966         retval = copy_strings_kernel(bprm->envc, envp, bprm);
1967         if (retval < 0)
1968                 goto out_free;
1969
1970         retval = copy_strings_kernel(bprm->argc, argv, bprm);
1971         if (retval < 0)
1972                 goto out_free;
1973
1974         retval = bprm_execve(bprm, fd, filename, 0);
1975 out_free:
1976         free_bprm(bprm);
1977 out_ret:
1978         putname(filename);
1979         return retval;
1980 }
1981
1982 static int do_execve(struct filename *filename,
1983         const char __user *const __user *__argv,
1984         const char __user *const __user *__envp)
1985 {
1986         struct user_arg_ptr argv = { .ptr.native = __argv };
1987         struct user_arg_ptr envp = { .ptr.native = __envp };
1988         return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
1989 }
1990
1991 static int do_execveat(int fd, struct filename *filename,
1992                 const char __user *const __user *__argv,
1993                 const char __user *const __user *__envp,
1994                 int flags)
1995 {
1996         struct user_arg_ptr argv = { .ptr.native = __argv };
1997         struct user_arg_ptr envp = { .ptr.native = __envp };
1998
1999         return do_execveat_common(fd, filename, argv, envp, flags);
2000 }
2001
2002 #ifdef CONFIG_COMPAT
2003 static int compat_do_execve(struct filename *filename,
2004         const compat_uptr_t __user *__argv,
2005         const compat_uptr_t __user *__envp)
2006 {
2007         struct user_arg_ptr argv = {
2008                 .is_compat = true,
2009                 .ptr.compat = __argv,
2010         };
2011         struct user_arg_ptr envp = {
2012                 .is_compat = true,
2013                 .ptr.compat = __envp,
2014         };
2015         return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
2016 }
2017
2018 static int compat_do_execveat(int fd, struct filename *filename,
2019                               const compat_uptr_t __user *__argv,
2020                               const compat_uptr_t __user *__envp,
2021                               int flags)
2022 {
2023         struct user_arg_ptr argv = {
2024                 .is_compat = true,
2025                 .ptr.compat = __argv,
2026         };
2027         struct user_arg_ptr envp = {
2028                 .is_compat = true,
2029                 .ptr.compat = __envp,
2030         };
2031         return do_execveat_common(fd, filename, argv, envp, flags);
2032 }
2033 #endif
2034
2035 void set_binfmt(struct linux_binfmt *new)
2036 {
2037         struct mm_struct *mm = current->mm;
2038
2039         if (mm->binfmt)
2040                 module_put(mm->binfmt->module);
2041
2042         mm->binfmt = new;
2043         if (new)
2044                 __module_get(new->module);
2045 }
2046 EXPORT_SYMBOL(set_binfmt);
2047
2048 /*
2049  * set_dumpable stores three-value SUID_DUMP_* into mm->flags.
2050  */
2051 void set_dumpable(struct mm_struct *mm, int value)
2052 {
2053         if (WARN_ON((unsigned)value > SUID_DUMP_ROOT))
2054                 return;
2055
2056         set_mask_bits(&mm->flags, MMF_DUMPABLE_MASK, value);
2057 }
2058
2059 SYSCALL_DEFINE3(execve,
2060                 const char __user *, filename,
2061                 const char __user *const __user *, argv,
2062                 const char __user *const __user *, envp)
2063 {
2064         return do_execve(getname(filename), argv, envp);
2065 }
2066
2067 SYSCALL_DEFINE5(execveat,
2068                 int, fd, const char __user *, filename,
2069                 const char __user *const __user *, argv,
2070                 const char __user *const __user *, envp,
2071                 int, flags)
2072 {
2073         int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
2074
2075         return do_execveat(fd,
2076                            getname_flags(filename, lookup_flags, NULL),
2077                            argv, envp, flags);
2078 }
2079
2080 #ifdef CONFIG_COMPAT
2081 COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
2082         const compat_uptr_t __user *, argv,
2083         const compat_uptr_t __user *, envp)
2084 {
2085         return compat_do_execve(getname(filename), argv, envp);
2086 }
2087
2088 COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
2089                        const char __user *, filename,
2090                        const compat_uptr_t __user *, argv,
2091                        const compat_uptr_t __user *, envp,
2092                        int,  flags)
2093 {
2094         int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
2095
2096         return compat_do_execveat(fd,
2097                                   getname_flags(filename, lookup_flags, NULL),
2098                                   argv, envp, flags);
2099 }
2100 #endif