hwmon: (pmbus) Stop caching register values
[linux-2.6-microblaze.git] / fs / exec.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/fs/exec.c
4  *
5  *  Copyright (C) 1991, 1992  Linus Torvalds
6  */
7
8 /*
9  * #!-checking implemented by tytso.
10  */
11 /*
12  * Demand-loading implemented 01.12.91 - no need to read anything but
13  * the header into memory. The inode of the executable is put into
14  * "current->executable", and page faults do the actual loading. Clean.
15  *
16  * Once more I can proudly say that linux stood up to being changed: it
17  * was less than 2 hours work to get demand-loading completely implemented.
18  *
19  * Demand loading changed July 1993 by Eric Youngdale.   Use mmap instead,
20  * current->executable is only used by the procfs.  This allows a dispatch
21  * table to check for several different types  of binary formats.  We keep
22  * trying until we recognize the file or we run out of supported binary
23  * formats.
24  */
25
26 #include <linux/slab.h>
27 #include <linux/file.h>
28 #include <linux/fdtable.h>
29 #include <linux/mm.h>
30 #include <linux/vmacache.h>
31 #include <linux/stat.h>
32 #include <linux/fcntl.h>
33 #include <linux/swap.h>
34 #include <linux/string.h>
35 #include <linux/init.h>
36 #include <linux/sched/mm.h>
37 #include <linux/sched/coredump.h>
38 #include <linux/sched/signal.h>
39 #include <linux/sched/numa_balancing.h>
40 #include <linux/sched/task.h>
41 #include <linux/pagemap.h>
42 #include <linux/perf_event.h>
43 #include <linux/highmem.h>
44 #include <linux/spinlock.h>
45 #include <linux/key.h>
46 #include <linux/personality.h>
47 #include <linux/binfmts.h>
48 #include <linux/utsname.h>
49 #include <linux/pid_namespace.h>
50 #include <linux/module.h>
51 #include <linux/namei.h>
52 #include <linux/mount.h>
53 #include <linux/security.h>
54 #include <linux/syscalls.h>
55 #include <linux/tsacct_kern.h>
56 #include <linux/cn_proc.h>
57 #include <linux/audit.h>
58 #include <linux/tracehook.h>
59 #include <linux/kmod.h>
60 #include <linux/fsnotify.h>
61 #include <linux/fs_struct.h>
62 #include <linux/oom.h>
63 #include <linux/compat.h>
64 #include <linux/vmalloc.h>
65
66 #include <linux/uaccess.h>
67 #include <asm/mmu_context.h>
68 #include <asm/tlb.h>
69
70 #include <trace/events/task.h>
71 #include "internal.h"
72
73 #include <trace/events/sched.h>
74
75 static int bprm_creds_from_file(struct linux_binprm *bprm);
76
77 int suid_dumpable = 0;
78
79 static LIST_HEAD(formats);
80 static DEFINE_RWLOCK(binfmt_lock);
81
82 void __register_binfmt(struct linux_binfmt * fmt, int insert)
83 {
84         BUG_ON(!fmt);
85         if (WARN_ON(!fmt->load_binary))
86                 return;
87         write_lock(&binfmt_lock);
88         insert ? list_add(&fmt->lh, &formats) :
89                  list_add_tail(&fmt->lh, &formats);
90         write_unlock(&binfmt_lock);
91 }
92
93 EXPORT_SYMBOL(__register_binfmt);
94
95 void unregister_binfmt(struct linux_binfmt * fmt)
96 {
97         write_lock(&binfmt_lock);
98         list_del(&fmt->lh);
99         write_unlock(&binfmt_lock);
100 }
101
102 EXPORT_SYMBOL(unregister_binfmt);
103
104 static inline void put_binfmt(struct linux_binfmt * fmt)
105 {
106         module_put(fmt->module);
107 }
108
109 bool path_noexec(const struct path *path)
110 {
111         return (path->mnt->mnt_flags & MNT_NOEXEC) ||
112                (path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC);
113 }
114
115 #ifdef CONFIG_USELIB
116 /*
117  * Note that a shared library must be both readable and executable due to
118  * security reasons.
119  *
120  * Also note that we take the address to load from from the file itself.
121  */
122 SYSCALL_DEFINE1(uselib, const char __user *, library)
123 {
124         struct linux_binfmt *fmt;
125         struct file *file;
126         struct filename *tmp = getname(library);
127         int error = PTR_ERR(tmp);
128         static const struct open_flags uselib_flags = {
129                 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
130                 .acc_mode = MAY_READ | MAY_EXEC,
131                 .intent = LOOKUP_OPEN,
132                 .lookup_flags = LOOKUP_FOLLOW,
133         };
134
135         if (IS_ERR(tmp))
136                 goto out;
137
138         file = do_filp_open(AT_FDCWD, tmp, &uselib_flags);
139         putname(tmp);
140         error = PTR_ERR(file);
141         if (IS_ERR(file))
142                 goto out;
143
144         /*
145          * may_open() has already checked for this, so it should be
146          * impossible to trip now. But we need to be extra cautious
147          * and check again at the very end too.
148          */
149         error = -EACCES;
150         if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
151                          path_noexec(&file->f_path)))
152                 goto exit;
153
154         fsnotify_open(file);
155
156         error = -ENOEXEC;
157
158         read_lock(&binfmt_lock);
159         list_for_each_entry(fmt, &formats, lh) {
160                 if (!fmt->load_shlib)
161                         continue;
162                 if (!try_module_get(fmt->module))
163                         continue;
164                 read_unlock(&binfmt_lock);
165                 error = fmt->load_shlib(file);
166                 read_lock(&binfmt_lock);
167                 put_binfmt(fmt);
168                 if (error != -ENOEXEC)
169                         break;
170         }
171         read_unlock(&binfmt_lock);
172 exit:
173         fput(file);
174 out:
175         return error;
176 }
177 #endif /* #ifdef CONFIG_USELIB */
178
179 #ifdef CONFIG_MMU
180 /*
181  * The nascent bprm->mm is not visible until exec_mmap() but it can
182  * use a lot of memory, account these pages in current->mm temporary
183  * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
184  * change the counter back via acct_arg_size(0).
185  */
186 static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
187 {
188         struct mm_struct *mm = current->mm;
189         long diff = (long)(pages - bprm->vma_pages);
190
191         if (!mm || !diff)
192                 return;
193
194         bprm->vma_pages = pages;
195         add_mm_counter(mm, MM_ANONPAGES, diff);
196 }
197
198 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
199                 int write)
200 {
201         struct page *page;
202         int ret;
203         unsigned int gup_flags = FOLL_FORCE;
204
205 #ifdef CONFIG_STACK_GROWSUP
206         if (write) {
207                 ret = expand_downwards(bprm->vma, pos);
208                 if (ret < 0)
209                         return NULL;
210         }
211 #endif
212
213         if (write)
214                 gup_flags |= FOLL_WRITE;
215
216         /*
217          * We are doing an exec().  'current' is the process
218          * doing the exec and bprm->mm is the new process's mm.
219          */
220         ret = get_user_pages_remote(bprm->mm, pos, 1, gup_flags,
221                         &page, NULL, NULL);
222         if (ret <= 0)
223                 return NULL;
224
225         if (write)
226                 acct_arg_size(bprm, vma_pages(bprm->vma));
227
228         return page;
229 }
230
231 static void put_arg_page(struct page *page)
232 {
233         put_page(page);
234 }
235
236 static void free_arg_pages(struct linux_binprm *bprm)
237 {
238 }
239
240 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
241                 struct page *page)
242 {
243         flush_cache_page(bprm->vma, pos, page_to_pfn(page));
244 }
245
246 static int __bprm_mm_init(struct linux_binprm *bprm)
247 {
248         int err;
249         struct vm_area_struct *vma = NULL;
250         struct mm_struct *mm = bprm->mm;
251
252         bprm->vma = vma = vm_area_alloc(mm);
253         if (!vma)
254                 return -ENOMEM;
255         vma_set_anonymous(vma);
256
257         if (mmap_write_lock_killable(mm)) {
258                 err = -EINTR;
259                 goto err_free;
260         }
261
262         /*
263          * Place the stack at the largest stack address the architecture
264          * supports. Later, we'll move this to an appropriate place. We don't
265          * use STACK_TOP because that can depend on attributes which aren't
266          * configured yet.
267          */
268         BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
269         vma->vm_end = STACK_TOP_MAX;
270         vma->vm_start = vma->vm_end - PAGE_SIZE;
271         vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
272         vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
273
274         err = insert_vm_struct(mm, vma);
275         if (err)
276                 goto err;
277
278         mm->stack_vm = mm->total_vm = 1;
279         mmap_write_unlock(mm);
280         bprm->p = vma->vm_end - sizeof(void *);
281         return 0;
282 err:
283         mmap_write_unlock(mm);
284 err_free:
285         bprm->vma = NULL;
286         vm_area_free(vma);
287         return err;
288 }
289
290 static bool valid_arg_len(struct linux_binprm *bprm, long len)
291 {
292         return len <= MAX_ARG_STRLEN;
293 }
294
295 #else
296
297 static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
298 {
299 }
300
301 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
302                 int write)
303 {
304         struct page *page;
305
306         page = bprm->page[pos / PAGE_SIZE];
307         if (!page && write) {
308                 page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
309                 if (!page)
310                         return NULL;
311                 bprm->page[pos / PAGE_SIZE] = page;
312         }
313
314         return page;
315 }
316
317 static void put_arg_page(struct page *page)
318 {
319 }
320
321 static void free_arg_page(struct linux_binprm *bprm, int i)
322 {
323         if (bprm->page[i]) {
324                 __free_page(bprm->page[i]);
325                 bprm->page[i] = NULL;
326         }
327 }
328
329 static void free_arg_pages(struct linux_binprm *bprm)
330 {
331         int i;
332
333         for (i = 0; i < MAX_ARG_PAGES; i++)
334                 free_arg_page(bprm, i);
335 }
336
337 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
338                 struct page *page)
339 {
340 }
341
342 static int __bprm_mm_init(struct linux_binprm *bprm)
343 {
344         bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
345         return 0;
346 }
347
348 static bool valid_arg_len(struct linux_binprm *bprm, long len)
349 {
350         return len <= bprm->p;
351 }
352
353 #endif /* CONFIG_MMU */
354
355 /*
356  * Create a new mm_struct and populate it with a temporary stack
357  * vm_area_struct.  We don't have enough context at this point to set the stack
358  * flags, permissions, and offset, so we use temporary values.  We'll update
359  * them later in setup_arg_pages().
360  */
361 static int bprm_mm_init(struct linux_binprm *bprm)
362 {
363         int err;
364         struct mm_struct *mm = NULL;
365
366         bprm->mm = mm = mm_alloc();
367         err = -ENOMEM;
368         if (!mm)
369                 goto err;
370
371         /* Save current stack limit for all calculations made during exec. */
372         task_lock(current->group_leader);
373         bprm->rlim_stack = current->signal->rlim[RLIMIT_STACK];
374         task_unlock(current->group_leader);
375
376         err = __bprm_mm_init(bprm);
377         if (err)
378                 goto err;
379
380         return 0;
381
382 err:
383         if (mm) {
384                 bprm->mm = NULL;
385                 mmdrop(mm);
386         }
387
388         return err;
389 }
390
391 struct user_arg_ptr {
392 #ifdef CONFIG_COMPAT
393         bool is_compat;
394 #endif
395         union {
396                 const char __user *const __user *native;
397 #ifdef CONFIG_COMPAT
398                 const compat_uptr_t __user *compat;
399 #endif
400         } ptr;
401 };
402
403 static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
404 {
405         const char __user *native;
406
407 #ifdef CONFIG_COMPAT
408         if (unlikely(argv.is_compat)) {
409                 compat_uptr_t compat;
410
411                 if (get_user(compat, argv.ptr.compat + nr))
412                         return ERR_PTR(-EFAULT);
413
414                 return compat_ptr(compat);
415         }
416 #endif
417
418         if (get_user(native, argv.ptr.native + nr))
419                 return ERR_PTR(-EFAULT);
420
421         return native;
422 }
423
424 /*
425  * count() counts the number of strings in array ARGV.
426  */
427 static int count(struct user_arg_ptr argv, int max)
428 {
429         int i = 0;
430
431         if (argv.ptr.native != NULL) {
432                 for (;;) {
433                         const char __user *p = get_user_arg_ptr(argv, i);
434
435                         if (!p)
436                                 break;
437
438                         if (IS_ERR(p))
439                                 return -EFAULT;
440
441                         if (i >= max)
442                                 return -E2BIG;
443                         ++i;
444
445                         if (fatal_signal_pending(current))
446                                 return -ERESTARTNOHAND;
447                         cond_resched();
448                 }
449         }
450         return i;
451 }
452
453 static int count_strings_kernel(const char *const *argv)
454 {
455         int i;
456
457         if (!argv)
458                 return 0;
459
460         for (i = 0; argv[i]; ++i) {
461                 if (i >= MAX_ARG_STRINGS)
462                         return -E2BIG;
463                 if (fatal_signal_pending(current))
464                         return -ERESTARTNOHAND;
465                 cond_resched();
466         }
467         return i;
468 }
469
470 static int bprm_stack_limits(struct linux_binprm *bprm)
471 {
472         unsigned long limit, ptr_size;
473
474         /*
475          * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
476          * (whichever is smaller) for the argv+env strings.
477          * This ensures that:
478          *  - the remaining binfmt code will not run out of stack space,
479          *  - the program will have a reasonable amount of stack left
480          *    to work from.
481          */
482         limit = _STK_LIM / 4 * 3;
483         limit = min(limit, bprm->rlim_stack.rlim_cur / 4);
484         /*
485          * We've historically supported up to 32 pages (ARG_MAX)
486          * of argument strings even with small stacks
487          */
488         limit = max_t(unsigned long, limit, ARG_MAX);
489         /*
490          * We must account for the size of all the argv and envp pointers to
491          * the argv and envp strings, since they will also take up space in
492          * the stack. They aren't stored until much later when we can't
493          * signal to the parent that the child has run out of stack space.
494          * Instead, calculate it here so it's possible to fail gracefully.
495          */
496         ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
497         if (limit <= ptr_size)
498                 return -E2BIG;
499         limit -= ptr_size;
500
501         bprm->argmin = bprm->p - limit;
502         return 0;
503 }
504
505 /*
506  * 'copy_strings()' copies argument/environment strings from the old
507  * processes's memory to the new process's stack.  The call to get_user_pages()
508  * ensures the destination page is created and not swapped out.
509  */
510 static int copy_strings(int argc, struct user_arg_ptr argv,
511                         struct linux_binprm *bprm)
512 {
513         struct page *kmapped_page = NULL;
514         char *kaddr = NULL;
515         unsigned long kpos = 0;
516         int ret;
517
518         while (argc-- > 0) {
519                 const char __user *str;
520                 int len;
521                 unsigned long pos;
522
523                 ret = -EFAULT;
524                 str = get_user_arg_ptr(argv, argc);
525                 if (IS_ERR(str))
526                         goto out;
527
528                 len = strnlen_user(str, MAX_ARG_STRLEN);
529                 if (!len)
530                         goto out;
531
532                 ret = -E2BIG;
533                 if (!valid_arg_len(bprm, len))
534                         goto out;
535
536                 /* We're going to work our way backwords. */
537                 pos = bprm->p;
538                 str += len;
539                 bprm->p -= len;
540 #ifdef CONFIG_MMU
541                 if (bprm->p < bprm->argmin)
542                         goto out;
543 #endif
544
545                 while (len > 0) {
546                         int offset, bytes_to_copy;
547
548                         if (fatal_signal_pending(current)) {
549                                 ret = -ERESTARTNOHAND;
550                                 goto out;
551                         }
552                         cond_resched();
553
554                         offset = pos % PAGE_SIZE;
555                         if (offset == 0)
556                                 offset = PAGE_SIZE;
557
558                         bytes_to_copy = offset;
559                         if (bytes_to_copy > len)
560                                 bytes_to_copy = len;
561
562                         offset -= bytes_to_copy;
563                         pos -= bytes_to_copy;
564                         str -= bytes_to_copy;
565                         len -= bytes_to_copy;
566
567                         if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
568                                 struct page *page;
569
570                                 page = get_arg_page(bprm, pos, 1);
571                                 if (!page) {
572                                         ret = -E2BIG;
573                                         goto out;
574                                 }
575
576                                 if (kmapped_page) {
577                                         flush_kernel_dcache_page(kmapped_page);
578                                         kunmap(kmapped_page);
579                                         put_arg_page(kmapped_page);
580                                 }
581                                 kmapped_page = page;
582                                 kaddr = kmap(kmapped_page);
583                                 kpos = pos & PAGE_MASK;
584                                 flush_arg_page(bprm, kpos, kmapped_page);
585                         }
586                         if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
587                                 ret = -EFAULT;
588                                 goto out;
589                         }
590                 }
591         }
592         ret = 0;
593 out:
594         if (kmapped_page) {
595                 flush_kernel_dcache_page(kmapped_page);
596                 kunmap(kmapped_page);
597                 put_arg_page(kmapped_page);
598         }
599         return ret;
600 }
601
602 /*
603  * Copy and argument/environment string from the kernel to the processes stack.
604  */
605 int copy_string_kernel(const char *arg, struct linux_binprm *bprm)
606 {
607         int len = strnlen(arg, MAX_ARG_STRLEN) + 1 /* terminating NUL */;
608         unsigned long pos = bprm->p;
609
610         if (len == 0)
611                 return -EFAULT;
612         if (!valid_arg_len(bprm, len))
613                 return -E2BIG;
614
615         /* We're going to work our way backwards. */
616         arg += len;
617         bprm->p -= len;
618         if (IS_ENABLED(CONFIG_MMU) && bprm->p < bprm->argmin)
619                 return -E2BIG;
620
621         while (len > 0) {
622                 unsigned int bytes_to_copy = min_t(unsigned int, len,
623                                 min_not_zero(offset_in_page(pos), PAGE_SIZE));
624                 struct page *page;
625                 char *kaddr;
626
627                 pos -= bytes_to_copy;
628                 arg -= bytes_to_copy;
629                 len -= bytes_to_copy;
630
631                 page = get_arg_page(bprm, pos, 1);
632                 if (!page)
633                         return -E2BIG;
634                 kaddr = kmap_atomic(page);
635                 flush_arg_page(bprm, pos & PAGE_MASK, page);
636                 memcpy(kaddr + offset_in_page(pos), arg, bytes_to_copy);
637                 flush_kernel_dcache_page(page);
638                 kunmap_atomic(kaddr);
639                 put_arg_page(page);
640         }
641
642         return 0;
643 }
644 EXPORT_SYMBOL(copy_string_kernel);
645
646 static int copy_strings_kernel(int argc, const char *const *argv,
647                                struct linux_binprm *bprm)
648 {
649         while (argc-- > 0) {
650                 int ret = copy_string_kernel(argv[argc], bprm);
651                 if (ret < 0)
652                         return ret;
653                 if (fatal_signal_pending(current))
654                         return -ERESTARTNOHAND;
655                 cond_resched();
656         }
657         return 0;
658 }
659
660 #ifdef CONFIG_MMU
661
662 /*
663  * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX.  Once
664  * the binfmt code determines where the new stack should reside, we shift it to
665  * its final location.  The process proceeds as follows:
666  *
667  * 1) Use shift to calculate the new vma endpoints.
668  * 2) Extend vma to cover both the old and new ranges.  This ensures the
669  *    arguments passed to subsequent functions are consistent.
670  * 3) Move vma's page tables to the new range.
671  * 4) Free up any cleared pgd range.
672  * 5) Shrink the vma to cover only the new range.
673  */
674 static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
675 {
676         struct mm_struct *mm = vma->vm_mm;
677         unsigned long old_start = vma->vm_start;
678         unsigned long old_end = vma->vm_end;
679         unsigned long length = old_end - old_start;
680         unsigned long new_start = old_start - shift;
681         unsigned long new_end = old_end - shift;
682         struct mmu_gather tlb;
683
684         BUG_ON(new_start > new_end);
685
686         /*
687          * ensure there are no vmas between where we want to go
688          * and where we are
689          */
690         if (vma != find_vma(mm, new_start))
691                 return -EFAULT;
692
693         /*
694          * cover the whole range: [new_start, old_end)
695          */
696         if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
697                 return -ENOMEM;
698
699         /*
700          * move the page tables downwards, on failure we rely on
701          * process cleanup to remove whatever mess we made.
702          */
703         if (length != move_page_tables(vma, old_start,
704                                        vma, new_start, length, false))
705                 return -ENOMEM;
706
707         lru_add_drain();
708         tlb_gather_mmu(&tlb, mm, old_start, old_end);
709         if (new_end > old_start) {
710                 /*
711                  * when the old and new regions overlap clear from new_end.
712                  */
713                 free_pgd_range(&tlb, new_end, old_end, new_end,
714                         vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
715         } else {
716                 /*
717                  * otherwise, clean from old_start; this is done to not touch
718                  * the address space in [new_end, old_start) some architectures
719                  * have constraints on va-space that make this illegal (IA64) -
720                  * for the others its just a little faster.
721                  */
722                 free_pgd_range(&tlb, old_start, old_end, new_end,
723                         vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
724         }
725         tlb_finish_mmu(&tlb, old_start, old_end);
726
727         /*
728          * Shrink the vma to just the new range.  Always succeeds.
729          */
730         vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
731
732         return 0;
733 }
734
735 /*
736  * Finalizes the stack vm_area_struct. The flags and permissions are updated,
737  * the stack is optionally relocated, and some extra space is added.
738  */
739 int setup_arg_pages(struct linux_binprm *bprm,
740                     unsigned long stack_top,
741                     int executable_stack)
742 {
743         unsigned long ret;
744         unsigned long stack_shift;
745         struct mm_struct *mm = current->mm;
746         struct vm_area_struct *vma = bprm->vma;
747         struct vm_area_struct *prev = NULL;
748         unsigned long vm_flags;
749         unsigned long stack_base;
750         unsigned long stack_size;
751         unsigned long stack_expand;
752         unsigned long rlim_stack;
753
754 #ifdef CONFIG_STACK_GROWSUP
755         /* Limit stack size */
756         stack_base = bprm->rlim_stack.rlim_max;
757         if (stack_base > STACK_SIZE_MAX)
758                 stack_base = STACK_SIZE_MAX;
759
760         /* Add space for stack randomization. */
761         stack_base += (STACK_RND_MASK << PAGE_SHIFT);
762
763         /* Make sure we didn't let the argument array grow too large. */
764         if (vma->vm_end - vma->vm_start > stack_base)
765                 return -ENOMEM;
766
767         stack_base = PAGE_ALIGN(stack_top - stack_base);
768
769         stack_shift = vma->vm_start - stack_base;
770         mm->arg_start = bprm->p - stack_shift;
771         bprm->p = vma->vm_end - stack_shift;
772 #else
773         stack_top = arch_align_stack(stack_top);
774         stack_top = PAGE_ALIGN(stack_top);
775
776         if (unlikely(stack_top < mmap_min_addr) ||
777             unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
778                 return -ENOMEM;
779
780         stack_shift = vma->vm_end - stack_top;
781
782         bprm->p -= stack_shift;
783         mm->arg_start = bprm->p;
784 #endif
785
786         if (bprm->loader)
787                 bprm->loader -= stack_shift;
788         bprm->exec -= stack_shift;
789
790         if (mmap_write_lock_killable(mm))
791                 return -EINTR;
792
793         vm_flags = VM_STACK_FLAGS;
794
795         /*
796          * Adjust stack execute permissions; explicitly enable for
797          * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
798          * (arch default) otherwise.
799          */
800         if (unlikely(executable_stack == EXSTACK_ENABLE_X))
801                 vm_flags |= VM_EXEC;
802         else if (executable_stack == EXSTACK_DISABLE_X)
803                 vm_flags &= ~VM_EXEC;
804         vm_flags |= mm->def_flags;
805         vm_flags |= VM_STACK_INCOMPLETE_SETUP;
806
807         ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
808                         vm_flags);
809         if (ret)
810                 goto out_unlock;
811         BUG_ON(prev != vma);
812
813         if (unlikely(vm_flags & VM_EXEC)) {
814                 pr_warn_once("process '%pD4' started with executable stack\n",
815                              bprm->file);
816         }
817
818         /* Move stack pages down in memory. */
819         if (stack_shift) {
820                 ret = shift_arg_pages(vma, stack_shift);
821                 if (ret)
822                         goto out_unlock;
823         }
824
825         /* mprotect_fixup is overkill to remove the temporary stack flags */
826         vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
827
828         stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
829         stack_size = vma->vm_end - vma->vm_start;
830         /*
831          * Align this down to a page boundary as expand_stack
832          * will align it up.
833          */
834         rlim_stack = bprm->rlim_stack.rlim_cur & PAGE_MASK;
835 #ifdef CONFIG_STACK_GROWSUP
836         if (stack_size + stack_expand > rlim_stack)
837                 stack_base = vma->vm_start + rlim_stack;
838         else
839                 stack_base = vma->vm_end + stack_expand;
840 #else
841         if (stack_size + stack_expand > rlim_stack)
842                 stack_base = vma->vm_end - rlim_stack;
843         else
844                 stack_base = vma->vm_start - stack_expand;
845 #endif
846         current->mm->start_stack = bprm->p;
847         ret = expand_stack(vma, stack_base);
848         if (ret)
849                 ret = -EFAULT;
850
851 out_unlock:
852         mmap_write_unlock(mm);
853         return ret;
854 }
855 EXPORT_SYMBOL(setup_arg_pages);
856
857 #else
858
859 /*
860  * Transfer the program arguments and environment from the holding pages
861  * onto the stack. The provided stack pointer is adjusted accordingly.
862  */
863 int transfer_args_to_stack(struct linux_binprm *bprm,
864                            unsigned long *sp_location)
865 {
866         unsigned long index, stop, sp;
867         int ret = 0;
868
869         stop = bprm->p >> PAGE_SHIFT;
870         sp = *sp_location;
871
872         for (index = MAX_ARG_PAGES - 1; index >= stop; index--) {
873                 unsigned int offset = index == stop ? bprm->p & ~PAGE_MASK : 0;
874                 char *src = kmap(bprm->page[index]) + offset;
875                 sp -= PAGE_SIZE - offset;
876                 if (copy_to_user((void *) sp, src, PAGE_SIZE - offset) != 0)
877                         ret = -EFAULT;
878                 kunmap(bprm->page[index]);
879                 if (ret)
880                         goto out;
881         }
882
883         *sp_location = sp;
884
885 out:
886         return ret;
887 }
888 EXPORT_SYMBOL(transfer_args_to_stack);
889
890 #endif /* CONFIG_MMU */
891
892 static struct file *do_open_execat(int fd, struct filename *name, int flags)
893 {
894         struct file *file;
895         int err;
896         struct open_flags open_exec_flags = {
897                 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
898                 .acc_mode = MAY_EXEC,
899                 .intent = LOOKUP_OPEN,
900                 .lookup_flags = LOOKUP_FOLLOW,
901         };
902
903         if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
904                 return ERR_PTR(-EINVAL);
905         if (flags & AT_SYMLINK_NOFOLLOW)
906                 open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW;
907         if (flags & AT_EMPTY_PATH)
908                 open_exec_flags.lookup_flags |= LOOKUP_EMPTY;
909
910         file = do_filp_open(fd, name, &open_exec_flags);
911         if (IS_ERR(file))
912                 goto out;
913
914         /*
915          * may_open() has already checked for this, so it should be
916          * impossible to trip now. But we need to be extra cautious
917          * and check again at the very end too.
918          */
919         err = -EACCES;
920         if (WARN_ON_ONCE(!S_ISREG(file_inode(file)->i_mode) ||
921                          path_noexec(&file->f_path)))
922                 goto exit;
923
924         err = deny_write_access(file);
925         if (err)
926                 goto exit;
927
928         if (name->name[0] != '\0')
929                 fsnotify_open(file);
930
931 out:
932         return file;
933
934 exit:
935         fput(file);
936         return ERR_PTR(err);
937 }
938
939 struct file *open_exec(const char *name)
940 {
941         struct filename *filename = getname_kernel(name);
942         struct file *f = ERR_CAST(filename);
943
944         if (!IS_ERR(filename)) {
945                 f = do_open_execat(AT_FDCWD, filename, 0);
946                 putname(filename);
947         }
948         return f;
949 }
950 EXPORT_SYMBOL(open_exec);
951
952 int kernel_read_file(struct file *file, void **buf, loff_t *size,
953                      loff_t max_size, enum kernel_read_file_id id)
954 {
955         loff_t i_size, pos;
956         ssize_t bytes = 0;
957         int ret;
958
959         if (!S_ISREG(file_inode(file)->i_mode) || max_size < 0)
960                 return -EINVAL;
961
962         ret = deny_write_access(file);
963         if (ret)
964                 return ret;
965
966         ret = security_kernel_read_file(file, id);
967         if (ret)
968                 goto out;
969
970         i_size = i_size_read(file_inode(file));
971         if (i_size <= 0) {
972                 ret = -EINVAL;
973                 goto out;
974         }
975         if (i_size > SIZE_MAX || (max_size > 0 && i_size > max_size)) {
976                 ret = -EFBIG;
977                 goto out;
978         }
979
980         if (id != READING_FIRMWARE_PREALLOC_BUFFER)
981                 *buf = vmalloc(i_size);
982         if (!*buf) {
983                 ret = -ENOMEM;
984                 goto out;
985         }
986
987         pos = 0;
988         while (pos < i_size) {
989                 bytes = kernel_read(file, *buf + pos, i_size - pos, &pos);
990                 if (bytes < 0) {
991                         ret = bytes;
992                         goto out_free;
993                 }
994
995                 if (bytes == 0)
996                         break;
997         }
998
999         if (pos != i_size) {
1000                 ret = -EIO;
1001                 goto out_free;
1002         }
1003
1004         ret = security_kernel_post_read_file(file, *buf, i_size, id);
1005         if (!ret)
1006                 *size = pos;
1007
1008 out_free:
1009         if (ret < 0) {
1010                 if (id != READING_FIRMWARE_PREALLOC_BUFFER) {
1011                         vfree(*buf);
1012                         *buf = NULL;
1013                 }
1014         }
1015
1016 out:
1017         allow_write_access(file);
1018         return ret;
1019 }
1020 EXPORT_SYMBOL_GPL(kernel_read_file);
1021
1022 int kernel_read_file_from_path(const char *path, void **buf, loff_t *size,
1023                                loff_t max_size, enum kernel_read_file_id id)
1024 {
1025         struct file *file;
1026         int ret;
1027
1028         if (!path || !*path)
1029                 return -EINVAL;
1030
1031         file = filp_open(path, O_RDONLY, 0);
1032         if (IS_ERR(file))
1033                 return PTR_ERR(file);
1034
1035         ret = kernel_read_file(file, buf, size, max_size, id);
1036         fput(file);
1037         return ret;
1038 }
1039 EXPORT_SYMBOL_GPL(kernel_read_file_from_path);
1040
1041 int kernel_read_file_from_path_initns(const char *path, void **buf,
1042                                       loff_t *size, loff_t max_size,
1043                                       enum kernel_read_file_id id)
1044 {
1045         struct file *file;
1046         struct path root;
1047         int ret;
1048
1049         if (!path || !*path)
1050                 return -EINVAL;
1051
1052         task_lock(&init_task);
1053         get_fs_root(init_task.fs, &root);
1054         task_unlock(&init_task);
1055
1056         file = file_open_root(root.dentry, root.mnt, path, O_RDONLY, 0);
1057         path_put(&root);
1058         if (IS_ERR(file))
1059                 return PTR_ERR(file);
1060
1061         ret = kernel_read_file(file, buf, size, max_size, id);
1062         fput(file);
1063         return ret;
1064 }
1065 EXPORT_SYMBOL_GPL(kernel_read_file_from_path_initns);
1066
1067 int kernel_read_file_from_fd(int fd, void **buf, loff_t *size, loff_t max_size,
1068                              enum kernel_read_file_id id)
1069 {
1070         struct fd f = fdget(fd);
1071         int ret = -EBADF;
1072
1073         if (!f.file)
1074                 goto out;
1075
1076         ret = kernel_read_file(f.file, buf, size, max_size, id);
1077 out:
1078         fdput(f);
1079         return ret;
1080 }
1081 EXPORT_SYMBOL_GPL(kernel_read_file_from_fd);
1082
1083 #if defined(CONFIG_HAVE_AOUT) || defined(CONFIG_BINFMT_FLAT) || \
1084     defined(CONFIG_BINFMT_ELF_FDPIC)
1085 ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
1086 {
1087         ssize_t res = vfs_read(file, (void __user *)addr, len, &pos);
1088         if (res > 0)
1089                 flush_icache_user_range(addr, addr + len);
1090         return res;
1091 }
1092 EXPORT_SYMBOL(read_code);
1093 #endif
1094
1095 /*
1096  * Maps the mm_struct mm into the current task struct.
1097  * On success, this function returns with the mutex
1098  * exec_update_mutex locked.
1099  */
1100 static int exec_mmap(struct mm_struct *mm)
1101 {
1102         struct task_struct *tsk;
1103         struct mm_struct *old_mm, *active_mm;
1104         int ret;
1105
1106         /* Notify parent that we're no longer interested in the old VM */
1107         tsk = current;
1108         old_mm = current->mm;
1109         exec_mm_release(tsk, old_mm);
1110         if (old_mm)
1111                 sync_mm_rss(old_mm);
1112
1113         ret = mutex_lock_killable(&tsk->signal->exec_update_mutex);
1114         if (ret)
1115                 return ret;
1116
1117         if (old_mm) {
1118                 /*
1119                  * Make sure that if there is a core dump in progress
1120                  * for the old mm, we get out and die instead of going
1121                  * through with the exec.  We must hold mmap_lock around
1122                  * checking core_state and changing tsk->mm.
1123                  */
1124                 mmap_read_lock(old_mm);
1125                 if (unlikely(old_mm->core_state)) {
1126                         mmap_read_unlock(old_mm);
1127                         mutex_unlock(&tsk->signal->exec_update_mutex);
1128                         return -EINTR;
1129                 }
1130         }
1131
1132         task_lock(tsk);
1133         active_mm = tsk->active_mm;
1134         membarrier_exec_mmap(mm);
1135         tsk->mm = mm;
1136         tsk->active_mm = mm;
1137         activate_mm(active_mm, mm);
1138         tsk->mm->vmacache_seqnum = 0;
1139         vmacache_flush(tsk);
1140         task_unlock(tsk);
1141         if (old_mm) {
1142                 mmap_read_unlock(old_mm);
1143                 BUG_ON(active_mm != old_mm);
1144                 setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
1145                 mm_update_next_owner(old_mm);
1146                 mmput(old_mm);
1147                 return 0;
1148         }
1149         mmdrop(active_mm);
1150         return 0;
1151 }
1152
1153 static int de_thread(struct task_struct *tsk)
1154 {
1155         struct signal_struct *sig = tsk->signal;
1156         struct sighand_struct *oldsighand = tsk->sighand;
1157         spinlock_t *lock = &oldsighand->siglock;
1158
1159         if (thread_group_empty(tsk))
1160                 goto no_thread_group;
1161
1162         /*
1163          * Kill all other threads in the thread group.
1164          */
1165         spin_lock_irq(lock);
1166         if (signal_group_exit(sig)) {
1167                 /*
1168                  * Another group action in progress, just
1169                  * return so that the signal is processed.
1170                  */
1171                 spin_unlock_irq(lock);
1172                 return -EAGAIN;
1173         }
1174
1175         sig->group_exit_task = tsk;
1176         sig->notify_count = zap_other_threads(tsk);
1177         if (!thread_group_leader(tsk))
1178                 sig->notify_count--;
1179
1180         while (sig->notify_count) {
1181                 __set_current_state(TASK_KILLABLE);
1182                 spin_unlock_irq(lock);
1183                 schedule();
1184                 if (__fatal_signal_pending(tsk))
1185                         goto killed;
1186                 spin_lock_irq(lock);
1187         }
1188         spin_unlock_irq(lock);
1189
1190         /*
1191          * At this point all other threads have exited, all we have to
1192          * do is to wait for the thread group leader to become inactive,
1193          * and to assume its PID:
1194          */
1195         if (!thread_group_leader(tsk)) {
1196                 struct task_struct *leader = tsk->group_leader;
1197
1198                 for (;;) {
1199                         cgroup_threadgroup_change_begin(tsk);
1200                         write_lock_irq(&tasklist_lock);
1201                         /*
1202                          * Do this under tasklist_lock to ensure that
1203                          * exit_notify() can't miss ->group_exit_task
1204                          */
1205                         sig->notify_count = -1;
1206                         if (likely(leader->exit_state))
1207                                 break;
1208                         __set_current_state(TASK_KILLABLE);
1209                         write_unlock_irq(&tasklist_lock);
1210                         cgroup_threadgroup_change_end(tsk);
1211                         schedule();
1212                         if (__fatal_signal_pending(tsk))
1213                                 goto killed;
1214                 }
1215
1216                 /*
1217                  * The only record we have of the real-time age of a
1218                  * process, regardless of execs it's done, is start_time.
1219                  * All the past CPU time is accumulated in signal_struct
1220                  * from sister threads now dead.  But in this non-leader
1221                  * exec, nothing survives from the original leader thread,
1222                  * whose birth marks the true age of this process now.
1223                  * When we take on its identity by switching to its PID, we
1224                  * also take its birthdate (always earlier than our own).
1225                  */
1226                 tsk->start_time = leader->start_time;
1227                 tsk->start_boottime = leader->start_boottime;
1228
1229                 BUG_ON(!same_thread_group(leader, tsk));
1230                 /*
1231                  * An exec() starts a new thread group with the
1232                  * TGID of the previous thread group. Rehash the
1233                  * two threads with a switched PID, and release
1234                  * the former thread group leader:
1235                  */
1236
1237                 /* Become a process group leader with the old leader's pid.
1238                  * The old leader becomes a thread of the this thread group.
1239                  */
1240                 exchange_tids(tsk, leader);
1241                 transfer_pid(leader, tsk, PIDTYPE_TGID);
1242                 transfer_pid(leader, tsk, PIDTYPE_PGID);
1243                 transfer_pid(leader, tsk, PIDTYPE_SID);
1244
1245                 list_replace_rcu(&leader->tasks, &tsk->tasks);
1246                 list_replace_init(&leader->sibling, &tsk->sibling);
1247
1248                 tsk->group_leader = tsk;
1249                 leader->group_leader = tsk;
1250
1251                 tsk->exit_signal = SIGCHLD;
1252                 leader->exit_signal = -1;
1253
1254                 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
1255                 leader->exit_state = EXIT_DEAD;
1256
1257                 /*
1258                  * We are going to release_task()->ptrace_unlink() silently,
1259                  * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
1260                  * the tracer wont't block again waiting for this thread.
1261                  */
1262                 if (unlikely(leader->ptrace))
1263                         __wake_up_parent(leader, leader->parent);
1264                 write_unlock_irq(&tasklist_lock);
1265                 cgroup_threadgroup_change_end(tsk);
1266
1267                 release_task(leader);
1268         }
1269
1270         sig->group_exit_task = NULL;
1271         sig->notify_count = 0;
1272
1273 no_thread_group:
1274         /* we have changed execution domain */
1275         tsk->exit_signal = SIGCHLD;
1276
1277         BUG_ON(!thread_group_leader(tsk));
1278         return 0;
1279
1280 killed:
1281         /* protects against exit_notify() and __exit_signal() */
1282         read_lock(&tasklist_lock);
1283         sig->group_exit_task = NULL;
1284         sig->notify_count = 0;
1285         read_unlock(&tasklist_lock);
1286         return -EAGAIN;
1287 }
1288
1289
1290 /*
1291  * This function makes sure the current process has its own signal table,
1292  * so that flush_signal_handlers can later reset the handlers without
1293  * disturbing other processes.  (Other processes might share the signal
1294  * table via the CLONE_SIGHAND option to clone().)
1295  */
1296 static int unshare_sighand(struct task_struct *me)
1297 {
1298         struct sighand_struct *oldsighand = me->sighand;
1299
1300         if (refcount_read(&oldsighand->count) != 1) {
1301                 struct sighand_struct *newsighand;
1302                 /*
1303                  * This ->sighand is shared with the CLONE_SIGHAND
1304                  * but not CLONE_THREAD task, switch to the new one.
1305                  */
1306                 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
1307                 if (!newsighand)
1308                         return -ENOMEM;
1309
1310                 refcount_set(&newsighand->count, 1);
1311                 memcpy(newsighand->action, oldsighand->action,
1312                        sizeof(newsighand->action));
1313
1314                 write_lock_irq(&tasklist_lock);
1315                 spin_lock(&oldsighand->siglock);
1316                 rcu_assign_pointer(me->sighand, newsighand);
1317                 spin_unlock(&oldsighand->siglock);
1318                 write_unlock_irq(&tasklist_lock);
1319
1320                 __cleanup_sighand(oldsighand);
1321         }
1322         return 0;
1323 }
1324
1325 char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk)
1326 {
1327         task_lock(tsk);
1328         strncpy(buf, tsk->comm, buf_size);
1329         task_unlock(tsk);
1330         return buf;
1331 }
1332 EXPORT_SYMBOL_GPL(__get_task_comm);
1333
1334 /*
1335  * These functions flushes out all traces of the currently running executable
1336  * so that a new one can be started
1337  */
1338
1339 void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
1340 {
1341         task_lock(tsk);
1342         trace_task_rename(tsk, buf);
1343         strlcpy(tsk->comm, buf, sizeof(tsk->comm));
1344         task_unlock(tsk);
1345         perf_event_comm(tsk, exec);
1346 }
1347
1348 /*
1349  * Calling this is the point of no return. None of the failures will be
1350  * seen by userspace since either the process is already taking a fatal
1351  * signal (via de_thread() or coredump), or will have SEGV raised
1352  * (after exec_mmap()) by search_binary_handler (see below).
1353  */
1354 int begin_new_exec(struct linux_binprm * bprm)
1355 {
1356         struct task_struct *me = current;
1357         int retval;
1358
1359         /* Once we are committed compute the creds */
1360         retval = bprm_creds_from_file(bprm);
1361         if (retval)
1362                 return retval;
1363
1364         /*
1365          * Ensure all future errors are fatal.
1366          */
1367         bprm->point_of_no_return = true;
1368
1369         /*
1370          * Make this the only thread in the thread group.
1371          */
1372         retval = de_thread(me);
1373         if (retval)
1374                 goto out;
1375
1376         /*
1377          * Must be called _before_ exec_mmap() as bprm->mm is
1378          * not visibile until then. This also enables the update
1379          * to be lockless.
1380          */
1381         set_mm_exe_file(bprm->mm, bprm->file);
1382
1383         /* If the binary is not readable then enforce mm->dumpable=0 */
1384         would_dump(bprm, bprm->file);
1385         if (bprm->have_execfd)
1386                 would_dump(bprm, bprm->executable);
1387
1388         /*
1389          * Release all of the old mmap stuff
1390          */
1391         acct_arg_size(bprm, 0);
1392         retval = exec_mmap(bprm->mm);
1393         if (retval)
1394                 goto out;
1395
1396         bprm->mm = NULL;
1397
1398 #ifdef CONFIG_POSIX_TIMERS
1399         exit_itimers(me->signal);
1400         flush_itimer_signals();
1401 #endif
1402
1403         /*
1404          * Make the signal table private.
1405          */
1406         retval = unshare_sighand(me);
1407         if (retval)
1408                 goto out_unlock;
1409
1410         /*
1411          * Ensure that the uaccess routines can actually operate on userspace
1412          * pointers:
1413          */
1414         force_uaccess_begin();
1415
1416         me->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
1417                                         PF_NOFREEZE | PF_NO_SETAFFINITY);
1418         flush_thread();
1419         me->personality &= ~bprm->per_clear;
1420
1421         /*
1422          * We have to apply CLOEXEC before we change whether the process is
1423          * dumpable (in setup_new_exec) to avoid a race with a process in userspace
1424          * trying to access the should-be-closed file descriptors of a process
1425          * undergoing exec(2).
1426          */
1427         do_close_on_exec(me->files);
1428
1429         if (bprm->secureexec) {
1430                 /* Make sure parent cannot signal privileged process. */
1431                 me->pdeath_signal = 0;
1432
1433                 /*
1434                  * For secureexec, reset the stack limit to sane default to
1435                  * avoid bad behavior from the prior rlimits. This has to
1436                  * happen before arch_pick_mmap_layout(), which examines
1437                  * RLIMIT_STACK, but after the point of no return to avoid
1438                  * needing to clean up the change on failure.
1439                  */
1440                 if (bprm->rlim_stack.rlim_cur > _STK_LIM)
1441                         bprm->rlim_stack.rlim_cur = _STK_LIM;
1442         }
1443
1444         me->sas_ss_sp = me->sas_ss_size = 0;
1445
1446         /*
1447          * Figure out dumpability. Note that this checking only of current
1448          * is wrong, but userspace depends on it. This should be testing
1449          * bprm->secureexec instead.
1450          */
1451         if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
1452             !(uid_eq(current_euid(), current_uid()) &&
1453               gid_eq(current_egid(), current_gid())))
1454                 set_dumpable(current->mm, suid_dumpable);
1455         else
1456                 set_dumpable(current->mm, SUID_DUMP_USER);
1457
1458         perf_event_exec();
1459         __set_task_comm(me, kbasename(bprm->filename), true);
1460
1461         /* An exec changes our domain. We are no longer part of the thread
1462            group */
1463         WRITE_ONCE(me->self_exec_id, me->self_exec_id + 1);
1464         flush_signal_handlers(me, 0);
1465
1466         /*
1467          * install the new credentials for this executable
1468          */
1469         security_bprm_committing_creds(bprm);
1470
1471         commit_creds(bprm->cred);
1472         bprm->cred = NULL;
1473
1474         /*
1475          * Disable monitoring for regular users
1476          * when executing setuid binaries. Must
1477          * wait until new credentials are committed
1478          * by commit_creds() above
1479          */
1480         if (get_dumpable(me->mm) != SUID_DUMP_USER)
1481                 perf_event_exit_task(me);
1482         /*
1483          * cred_guard_mutex must be held at least to this point to prevent
1484          * ptrace_attach() from altering our determination of the task's
1485          * credentials; any time after this it may be unlocked.
1486          */
1487         security_bprm_committed_creds(bprm);
1488
1489         /* Pass the opened binary to the interpreter. */
1490         if (bprm->have_execfd) {
1491                 retval = get_unused_fd_flags(0);
1492                 if (retval < 0)
1493                         goto out_unlock;
1494                 fd_install(retval, bprm->executable);
1495                 bprm->executable = NULL;
1496                 bprm->execfd = retval;
1497         }
1498         return 0;
1499
1500 out_unlock:
1501         mutex_unlock(&me->signal->exec_update_mutex);
1502 out:
1503         return retval;
1504 }
1505 EXPORT_SYMBOL(begin_new_exec);
1506
1507 void would_dump(struct linux_binprm *bprm, struct file *file)
1508 {
1509         struct inode *inode = file_inode(file);
1510         if (inode_permission(inode, MAY_READ) < 0) {
1511                 struct user_namespace *old, *user_ns;
1512                 bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
1513
1514                 /* Ensure mm->user_ns contains the executable */
1515                 user_ns = old = bprm->mm->user_ns;
1516                 while ((user_ns != &init_user_ns) &&
1517                        !privileged_wrt_inode_uidgid(user_ns, inode))
1518                         user_ns = user_ns->parent;
1519
1520                 if (old != user_ns) {
1521                         bprm->mm->user_ns = get_user_ns(user_ns);
1522                         put_user_ns(old);
1523                 }
1524         }
1525 }
1526 EXPORT_SYMBOL(would_dump);
1527
1528 void setup_new_exec(struct linux_binprm * bprm)
1529 {
1530         /* Setup things that can depend upon the personality */
1531         struct task_struct *me = current;
1532
1533         arch_pick_mmap_layout(me->mm, &bprm->rlim_stack);
1534
1535         arch_setup_new_exec();
1536
1537         /* Set the new mm task size. We have to do that late because it may
1538          * depend on TIF_32BIT which is only updated in flush_thread() on
1539          * some architectures like powerpc
1540          */
1541         me->mm->task_size = TASK_SIZE;
1542         mutex_unlock(&me->signal->exec_update_mutex);
1543         mutex_unlock(&me->signal->cred_guard_mutex);
1544 }
1545 EXPORT_SYMBOL(setup_new_exec);
1546
1547 /* Runs immediately before start_thread() takes over. */
1548 void finalize_exec(struct linux_binprm *bprm)
1549 {
1550         /* Store any stack rlimit changes before starting thread. */
1551         task_lock(current->group_leader);
1552         current->signal->rlim[RLIMIT_STACK] = bprm->rlim_stack;
1553         task_unlock(current->group_leader);
1554 }
1555 EXPORT_SYMBOL(finalize_exec);
1556
1557 /*
1558  * Prepare credentials and lock ->cred_guard_mutex.
1559  * setup_new_exec() commits the new creds and drops the lock.
1560  * Or, if exec fails before, free_bprm() should release ->cred and
1561  * and unlock.
1562  */
1563 static int prepare_bprm_creds(struct linux_binprm *bprm)
1564 {
1565         if (mutex_lock_interruptible(&current->signal->cred_guard_mutex))
1566                 return -ERESTARTNOINTR;
1567
1568         bprm->cred = prepare_exec_creds();
1569         if (likely(bprm->cred))
1570                 return 0;
1571
1572         mutex_unlock(&current->signal->cred_guard_mutex);
1573         return -ENOMEM;
1574 }
1575
1576 static void free_bprm(struct linux_binprm *bprm)
1577 {
1578         if (bprm->mm) {
1579                 acct_arg_size(bprm, 0);
1580                 mmput(bprm->mm);
1581         }
1582         free_arg_pages(bprm);
1583         if (bprm->cred) {
1584                 mutex_unlock(&current->signal->cred_guard_mutex);
1585                 abort_creds(bprm->cred);
1586         }
1587         if (bprm->file) {
1588                 allow_write_access(bprm->file);
1589                 fput(bprm->file);
1590         }
1591         if (bprm->executable)
1592                 fput(bprm->executable);
1593         /* If a binfmt changed the interp, free it. */
1594         if (bprm->interp != bprm->filename)
1595                 kfree(bprm->interp);
1596         kfree(bprm->fdpath);
1597         kfree(bprm);
1598 }
1599
1600 static struct linux_binprm *alloc_bprm(int fd, struct filename *filename)
1601 {
1602         struct linux_binprm *bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1603         int retval = -ENOMEM;
1604         if (!bprm)
1605                 goto out;
1606
1607         if (fd == AT_FDCWD || filename->name[0] == '/') {
1608                 bprm->filename = filename->name;
1609         } else {
1610                 if (filename->name[0] == '\0')
1611                         bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd);
1612                 else
1613                         bprm->fdpath = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s",
1614                                                   fd, filename->name);
1615                 if (!bprm->fdpath)
1616                         goto out_free;
1617
1618                 bprm->filename = bprm->fdpath;
1619         }
1620         bprm->interp = bprm->filename;
1621
1622         retval = bprm_mm_init(bprm);
1623         if (retval)
1624                 goto out_free;
1625         return bprm;
1626
1627 out_free:
1628         free_bprm(bprm);
1629 out:
1630         return ERR_PTR(retval);
1631 }
1632
1633 int bprm_change_interp(const char *interp, struct linux_binprm *bprm)
1634 {
1635         /* If a binfmt changed the interp, free it first. */
1636         if (bprm->interp != bprm->filename)
1637                 kfree(bprm->interp);
1638         bprm->interp = kstrdup(interp, GFP_KERNEL);
1639         if (!bprm->interp)
1640                 return -ENOMEM;
1641         return 0;
1642 }
1643 EXPORT_SYMBOL(bprm_change_interp);
1644
1645 /*
1646  * determine how safe it is to execute the proposed program
1647  * - the caller must hold ->cred_guard_mutex to protect against
1648  *   PTRACE_ATTACH or seccomp thread-sync
1649  */
1650 static void check_unsafe_exec(struct linux_binprm *bprm)
1651 {
1652         struct task_struct *p = current, *t;
1653         unsigned n_fs;
1654
1655         if (p->ptrace)
1656                 bprm->unsafe |= LSM_UNSAFE_PTRACE;
1657
1658         /*
1659          * This isn't strictly necessary, but it makes it harder for LSMs to
1660          * mess up.
1661          */
1662         if (task_no_new_privs(current))
1663                 bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
1664
1665         t = p;
1666         n_fs = 1;
1667         spin_lock(&p->fs->lock);
1668         rcu_read_lock();
1669         while_each_thread(p, t) {
1670                 if (t->fs == p->fs)
1671                         n_fs++;
1672         }
1673         rcu_read_unlock();
1674
1675         if (p->fs->users > n_fs)
1676                 bprm->unsafe |= LSM_UNSAFE_SHARE;
1677         else
1678                 p->fs->in_exec = 1;
1679         spin_unlock(&p->fs->lock);
1680 }
1681
1682 static void bprm_fill_uid(struct linux_binprm *bprm, struct file *file)
1683 {
1684         /* Handle suid and sgid on files */
1685         struct inode *inode;
1686         unsigned int mode;
1687         kuid_t uid;
1688         kgid_t gid;
1689
1690         if (!mnt_may_suid(file->f_path.mnt))
1691                 return;
1692
1693         if (task_no_new_privs(current))
1694                 return;
1695
1696         inode = file->f_path.dentry->d_inode;
1697         mode = READ_ONCE(inode->i_mode);
1698         if (!(mode & (S_ISUID|S_ISGID)))
1699                 return;
1700
1701         /* Be careful if suid/sgid is set */
1702         inode_lock(inode);
1703
1704         /* reload atomically mode/uid/gid now that lock held */
1705         mode = inode->i_mode;
1706         uid = inode->i_uid;
1707         gid = inode->i_gid;
1708         inode_unlock(inode);
1709
1710         /* We ignore suid/sgid if there are no mappings for them in the ns */
1711         if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
1712                  !kgid_has_mapping(bprm->cred->user_ns, gid))
1713                 return;
1714
1715         if (mode & S_ISUID) {
1716                 bprm->per_clear |= PER_CLEAR_ON_SETID;
1717                 bprm->cred->euid = uid;
1718         }
1719
1720         if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1721                 bprm->per_clear |= PER_CLEAR_ON_SETID;
1722                 bprm->cred->egid = gid;
1723         }
1724 }
1725
1726 /*
1727  * Compute brpm->cred based upon the final binary.
1728  */
1729 static int bprm_creds_from_file(struct linux_binprm *bprm)
1730 {
1731         /* Compute creds based on which file? */
1732         struct file *file = bprm->execfd_creds ? bprm->executable : bprm->file;
1733
1734         bprm_fill_uid(bprm, file);
1735         return security_bprm_creds_from_file(bprm, file);
1736 }
1737
1738 /*
1739  * Fill the binprm structure from the inode.
1740  * Read the first BINPRM_BUF_SIZE bytes
1741  *
1742  * This may be called multiple times for binary chains (scripts for example).
1743  */
1744 static int prepare_binprm(struct linux_binprm *bprm)
1745 {
1746         loff_t pos = 0;
1747
1748         memset(bprm->buf, 0, BINPRM_BUF_SIZE);
1749         return kernel_read(bprm->file, bprm->buf, BINPRM_BUF_SIZE, &pos);
1750 }
1751
1752 /*
1753  * Arguments are '\0' separated strings found at the location bprm->p
1754  * points to; chop off the first by relocating brpm->p to right after
1755  * the first '\0' encountered.
1756  */
1757 int remove_arg_zero(struct linux_binprm *bprm)
1758 {
1759         int ret = 0;
1760         unsigned long offset;
1761         char *kaddr;
1762         struct page *page;
1763
1764         if (!bprm->argc)
1765                 return 0;
1766
1767         do {
1768                 offset = bprm->p & ~PAGE_MASK;
1769                 page = get_arg_page(bprm, bprm->p, 0);
1770                 if (!page) {
1771                         ret = -EFAULT;
1772                         goto out;
1773                 }
1774                 kaddr = kmap_atomic(page);
1775
1776                 for (; offset < PAGE_SIZE && kaddr[offset];
1777                                 offset++, bprm->p++)
1778                         ;
1779
1780                 kunmap_atomic(kaddr);
1781                 put_arg_page(page);
1782         } while (offset == PAGE_SIZE);
1783
1784         bprm->p++;
1785         bprm->argc--;
1786         ret = 0;
1787
1788 out:
1789         return ret;
1790 }
1791 EXPORT_SYMBOL(remove_arg_zero);
1792
1793 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1794 /*
1795  * cycle the list of binary formats handler, until one recognizes the image
1796  */
1797 static int search_binary_handler(struct linux_binprm *bprm)
1798 {
1799         bool need_retry = IS_ENABLED(CONFIG_MODULES);
1800         struct linux_binfmt *fmt;
1801         int retval;
1802
1803         retval = prepare_binprm(bprm);
1804         if (retval < 0)
1805                 return retval;
1806
1807         retval = security_bprm_check(bprm);
1808         if (retval)
1809                 return retval;
1810
1811         retval = -ENOENT;
1812  retry:
1813         read_lock(&binfmt_lock);
1814         list_for_each_entry(fmt, &formats, lh) {
1815                 if (!try_module_get(fmt->module))
1816                         continue;
1817                 read_unlock(&binfmt_lock);
1818
1819                 retval = fmt->load_binary(bprm);
1820
1821                 read_lock(&binfmt_lock);
1822                 put_binfmt(fmt);
1823                 if (bprm->point_of_no_return || (retval != -ENOEXEC)) {
1824                         read_unlock(&binfmt_lock);
1825                         return retval;
1826                 }
1827         }
1828         read_unlock(&binfmt_lock);
1829
1830         if (need_retry) {
1831                 if (printable(bprm->buf[0]) && printable(bprm->buf[1]) &&
1832                     printable(bprm->buf[2]) && printable(bprm->buf[3]))
1833                         return retval;
1834                 if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0)
1835                         return retval;
1836                 need_retry = false;
1837                 goto retry;
1838         }
1839
1840         return retval;
1841 }
1842
1843 static int exec_binprm(struct linux_binprm *bprm)
1844 {
1845         pid_t old_pid, old_vpid;
1846         int ret, depth;
1847
1848         /* Need to fetch pid before load_binary changes it */
1849         old_pid = current->pid;
1850         rcu_read_lock();
1851         old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
1852         rcu_read_unlock();
1853
1854         /* This allows 4 levels of binfmt rewrites before failing hard. */
1855         for (depth = 0;; depth++) {
1856                 struct file *exec;
1857                 if (depth > 5)
1858                         return -ELOOP;
1859
1860                 ret = search_binary_handler(bprm);
1861                 if (ret < 0)
1862                         return ret;
1863                 if (!bprm->interpreter)
1864                         break;
1865
1866                 exec = bprm->file;
1867                 bprm->file = bprm->interpreter;
1868                 bprm->interpreter = NULL;
1869
1870                 allow_write_access(exec);
1871                 if (unlikely(bprm->have_execfd)) {
1872                         if (bprm->executable) {
1873                                 fput(exec);
1874                                 return -ENOEXEC;
1875                         }
1876                         bprm->executable = exec;
1877                 } else
1878                         fput(exec);
1879         }
1880
1881         audit_bprm(bprm);
1882         trace_sched_process_exec(current, old_pid, bprm);
1883         ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
1884         proc_exec_connector(current);
1885         return 0;
1886 }
1887
1888 /*
1889  * sys_execve() executes a new program.
1890  */
1891 static int bprm_execve(struct linux_binprm *bprm,
1892                        int fd, struct filename *filename, int flags)
1893 {
1894         struct file *file;
1895         struct files_struct *displaced;
1896         int retval;
1897
1898         retval = unshare_files(&displaced);
1899         if (retval)
1900                 return retval;
1901
1902         retval = prepare_bprm_creds(bprm);
1903         if (retval)
1904                 goto out_files;
1905
1906         check_unsafe_exec(bprm);
1907         current->in_execve = 1;
1908
1909         file = do_open_execat(fd, filename, flags);
1910         retval = PTR_ERR(file);
1911         if (IS_ERR(file))
1912                 goto out_unmark;
1913
1914         sched_exec();
1915
1916         bprm->file = file;
1917         /*
1918          * Record that a name derived from an O_CLOEXEC fd will be
1919          * inaccessible after exec. Relies on having exclusive access to
1920          * current->files (due to unshare_files above).
1921          */
1922         if (bprm->fdpath &&
1923             close_on_exec(fd, rcu_dereference_raw(current->files->fdt)))
1924                 bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE;
1925
1926         /* Set the unchanging part of bprm->cred */
1927         retval = security_bprm_creds_for_exec(bprm);
1928         if (retval)
1929                 goto out;
1930
1931         retval = exec_binprm(bprm);
1932         if (retval < 0)
1933                 goto out;
1934
1935         /* execve succeeded */
1936         current->fs->in_exec = 0;
1937         current->in_execve = 0;
1938         rseq_execve(current);
1939         acct_update_integrals(current);
1940         task_numa_free(current, false);
1941         if (displaced)
1942                 put_files_struct(displaced);
1943         return retval;
1944
1945 out:
1946         /*
1947          * If past the point of no return ensure the the code never
1948          * returns to the userspace process.  Use an existing fatal
1949          * signal if present otherwise terminate the process with
1950          * SIGSEGV.
1951          */
1952         if (bprm->point_of_no_return && !fatal_signal_pending(current))
1953                 force_sigsegv(SIGSEGV);
1954
1955 out_unmark:
1956         current->fs->in_exec = 0;
1957         current->in_execve = 0;
1958
1959 out_files:
1960         if (displaced)
1961                 reset_files_struct(displaced);
1962
1963         return retval;
1964 }
1965
1966 static int do_execveat_common(int fd, struct filename *filename,
1967                               struct user_arg_ptr argv,
1968                               struct user_arg_ptr envp,
1969                               int flags)
1970 {
1971         struct linux_binprm *bprm;
1972         int retval;
1973
1974         if (IS_ERR(filename))
1975                 return PTR_ERR(filename);
1976
1977         /*
1978          * We move the actual failure in case of RLIMIT_NPROC excess from
1979          * set*uid() to execve() because too many poorly written programs
1980          * don't check setuid() return code.  Here we additionally recheck
1981          * whether NPROC limit is still exceeded.
1982          */
1983         if ((current->flags & PF_NPROC_EXCEEDED) &&
1984             atomic_read(&current_user()->processes) > rlimit(RLIMIT_NPROC)) {
1985                 retval = -EAGAIN;
1986                 goto out_ret;
1987         }
1988
1989         /* We're below the limit (still or again), so we don't want to make
1990          * further execve() calls fail. */
1991         current->flags &= ~PF_NPROC_EXCEEDED;
1992
1993         bprm = alloc_bprm(fd, filename);
1994         if (IS_ERR(bprm)) {
1995                 retval = PTR_ERR(bprm);
1996                 goto out_ret;
1997         }
1998
1999         retval = count(argv, MAX_ARG_STRINGS);
2000         if (retval < 0)
2001                 goto out_free;
2002         bprm->argc = retval;
2003
2004         retval = count(envp, MAX_ARG_STRINGS);
2005         if (retval < 0)
2006                 goto out_free;
2007         bprm->envc = retval;
2008
2009         retval = bprm_stack_limits(bprm);
2010         if (retval < 0)
2011                 goto out_free;
2012
2013         retval = copy_string_kernel(bprm->filename, bprm);
2014         if (retval < 0)
2015                 goto out_free;
2016         bprm->exec = bprm->p;
2017
2018         retval = copy_strings(bprm->envc, envp, bprm);
2019         if (retval < 0)
2020                 goto out_free;
2021
2022         retval = copy_strings(bprm->argc, argv, bprm);
2023         if (retval < 0)
2024                 goto out_free;
2025
2026         retval = bprm_execve(bprm, fd, filename, flags);
2027 out_free:
2028         free_bprm(bprm);
2029
2030 out_ret:
2031         putname(filename);
2032         return retval;
2033 }
2034
2035 int kernel_execve(const char *kernel_filename,
2036                   const char *const *argv, const char *const *envp)
2037 {
2038         struct filename *filename;
2039         struct linux_binprm *bprm;
2040         int fd = AT_FDCWD;
2041         int retval;
2042
2043         filename = getname_kernel(kernel_filename);
2044         if (IS_ERR(filename))
2045                 return PTR_ERR(filename);
2046
2047         bprm = alloc_bprm(fd, filename);
2048         if (IS_ERR(bprm)) {
2049                 retval = PTR_ERR(bprm);
2050                 goto out_ret;
2051         }
2052
2053         retval = count_strings_kernel(argv);
2054         if (retval < 0)
2055                 goto out_free;
2056         bprm->argc = retval;
2057
2058         retval = count_strings_kernel(envp);
2059         if (retval < 0)
2060                 goto out_free;
2061         bprm->envc = retval;
2062
2063         retval = bprm_stack_limits(bprm);
2064         if (retval < 0)
2065                 goto out_free;
2066
2067         retval = copy_string_kernel(bprm->filename, bprm);
2068         if (retval < 0)
2069                 goto out_free;
2070         bprm->exec = bprm->p;
2071
2072         retval = copy_strings_kernel(bprm->envc, envp, bprm);
2073         if (retval < 0)
2074                 goto out_free;
2075
2076         retval = copy_strings_kernel(bprm->argc, argv, bprm);
2077         if (retval < 0)
2078                 goto out_free;
2079
2080         retval = bprm_execve(bprm, fd, filename, 0);
2081 out_free:
2082         free_bprm(bprm);
2083 out_ret:
2084         putname(filename);
2085         return retval;
2086 }
2087
2088 static int do_execve(struct filename *filename,
2089         const char __user *const __user *__argv,
2090         const char __user *const __user *__envp)
2091 {
2092         struct user_arg_ptr argv = { .ptr.native = __argv };
2093         struct user_arg_ptr envp = { .ptr.native = __envp };
2094         return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
2095 }
2096
2097 static int do_execveat(int fd, struct filename *filename,
2098                 const char __user *const __user *__argv,
2099                 const char __user *const __user *__envp,
2100                 int flags)
2101 {
2102         struct user_arg_ptr argv = { .ptr.native = __argv };
2103         struct user_arg_ptr envp = { .ptr.native = __envp };
2104
2105         return do_execveat_common(fd, filename, argv, envp, flags);
2106 }
2107
2108 #ifdef CONFIG_COMPAT
2109 static int compat_do_execve(struct filename *filename,
2110         const compat_uptr_t __user *__argv,
2111         const compat_uptr_t __user *__envp)
2112 {
2113         struct user_arg_ptr argv = {
2114                 .is_compat = true,
2115                 .ptr.compat = __argv,
2116         };
2117         struct user_arg_ptr envp = {
2118                 .is_compat = true,
2119                 .ptr.compat = __envp,
2120         };
2121         return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
2122 }
2123
2124 static int compat_do_execveat(int fd, struct filename *filename,
2125                               const compat_uptr_t __user *__argv,
2126                               const compat_uptr_t __user *__envp,
2127                               int flags)
2128 {
2129         struct user_arg_ptr argv = {
2130                 .is_compat = true,
2131                 .ptr.compat = __argv,
2132         };
2133         struct user_arg_ptr envp = {
2134                 .is_compat = true,
2135                 .ptr.compat = __envp,
2136         };
2137         return do_execveat_common(fd, filename, argv, envp, flags);
2138 }
2139 #endif
2140
2141 void set_binfmt(struct linux_binfmt *new)
2142 {
2143         struct mm_struct *mm = current->mm;
2144
2145         if (mm->binfmt)
2146                 module_put(mm->binfmt->module);
2147
2148         mm->binfmt = new;
2149         if (new)
2150                 __module_get(new->module);
2151 }
2152 EXPORT_SYMBOL(set_binfmt);
2153
2154 /*
2155  * set_dumpable stores three-value SUID_DUMP_* into mm->flags.
2156  */
2157 void set_dumpable(struct mm_struct *mm, int value)
2158 {
2159         if (WARN_ON((unsigned)value > SUID_DUMP_ROOT))
2160                 return;
2161
2162         set_mask_bits(&mm->flags, MMF_DUMPABLE_MASK, value);
2163 }
2164
2165 SYSCALL_DEFINE3(execve,
2166                 const char __user *, filename,
2167                 const char __user *const __user *, argv,
2168                 const char __user *const __user *, envp)
2169 {
2170         return do_execve(getname(filename), argv, envp);
2171 }
2172
2173 SYSCALL_DEFINE5(execveat,
2174                 int, fd, const char __user *, filename,
2175                 const char __user *const __user *, argv,
2176                 const char __user *const __user *, envp,
2177                 int, flags)
2178 {
2179         int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
2180
2181         return do_execveat(fd,
2182                            getname_flags(filename, lookup_flags, NULL),
2183                            argv, envp, flags);
2184 }
2185
2186 #ifdef CONFIG_COMPAT
2187 COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
2188         const compat_uptr_t __user *, argv,
2189         const compat_uptr_t __user *, envp)
2190 {
2191         return compat_do_execve(getname(filename), argv, envp);
2192 }
2193
2194 COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
2195                        const char __user *, filename,
2196                        const compat_uptr_t __user *, argv,
2197                        const compat_uptr_t __user *, envp,
2198                        int,  flags)
2199 {
2200         int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
2201
2202         return compat_do_execveat(fd,
2203                                   getname_flags(filename, lookup_flags, NULL),
2204                                   argv, envp, flags);
2205 }
2206 #endif