mlxsw: spectrum_acl: Make global TCAM resources available to regions
[linux-2.6-microblaze.git] / fs / exec.c
1 /*
2  *  linux/fs/exec.c
3  *
4  *  Copyright (C) 1991, 1992  Linus Torvalds
5  */
6
7 /*
8  * #!-checking implemented by tytso.
9  */
10 /*
11  * Demand-loading implemented 01.12.91 - no need to read anything but
12  * the header into memory. The inode of the executable is put into
13  * "current->executable", and page faults do the actual loading. Clean.
14  *
15  * Once more I can proudly say that linux stood up to being changed: it
16  * was less than 2 hours work to get demand-loading completely implemented.
17  *
18  * Demand loading changed July 1993 by Eric Youngdale.   Use mmap instead,
19  * current->executable is only used by the procfs.  This allows a dispatch
20  * table to check for several different types  of binary formats.  We keep
21  * trying until we recognize the file or we run out of supported binary
22  * formats.
23  */
24
25 #include <linux/slab.h>
26 #include <linux/file.h>
27 #include <linux/fdtable.h>
28 #include <linux/mm.h>
29 #include <linux/vmacache.h>
30 #include <linux/stat.h>
31 #include <linux/fcntl.h>
32 #include <linux/swap.h>
33 #include <linux/string.h>
34 #include <linux/init.h>
35 #include <linux/sched/mm.h>
36 #include <linux/sched/coredump.h>
37 #include <linux/sched/signal.h>
38 #include <linux/sched/numa_balancing.h>
39 #include <linux/sched/task.h>
40 #include <linux/pagemap.h>
41 #include <linux/perf_event.h>
42 #include <linux/highmem.h>
43 #include <linux/spinlock.h>
44 #include <linux/key.h>
45 #include <linux/personality.h>
46 #include <linux/binfmts.h>
47 #include <linux/utsname.h>
48 #include <linux/pid_namespace.h>
49 #include <linux/module.h>
50 #include <linux/namei.h>
51 #include <linux/mount.h>
52 #include <linux/security.h>
53 #include <linux/syscalls.h>
54 #include <linux/tsacct_kern.h>
55 #include <linux/cn_proc.h>
56 #include <linux/audit.h>
57 #include <linux/tracehook.h>
58 #include <linux/kmod.h>
59 #include <linux/fsnotify.h>
60 #include <linux/fs_struct.h>
61 #include <linux/pipe_fs_i.h>
62 #include <linux/oom.h>
63 #include <linux/compat.h>
64 #include <linux/vmalloc.h>
65
66 #include <linux/uaccess.h>
67 #include <asm/mmu_context.h>
68 #include <asm/tlb.h>
69
70 #include <trace/events/task.h>
71 #include "internal.h"
72
73 #include <trace/events/sched.h>
74
75 int suid_dumpable = 0;
76
77 static LIST_HEAD(formats);
78 static DEFINE_RWLOCK(binfmt_lock);
79
80 void __register_binfmt(struct linux_binfmt * fmt, int insert)
81 {
82         BUG_ON(!fmt);
83         if (WARN_ON(!fmt->load_binary))
84                 return;
85         write_lock(&binfmt_lock);
86         insert ? list_add(&fmt->lh, &formats) :
87                  list_add_tail(&fmt->lh, &formats);
88         write_unlock(&binfmt_lock);
89 }
90
91 EXPORT_SYMBOL(__register_binfmt);
92
93 void unregister_binfmt(struct linux_binfmt * fmt)
94 {
95         write_lock(&binfmt_lock);
96         list_del(&fmt->lh);
97         write_unlock(&binfmt_lock);
98 }
99
100 EXPORT_SYMBOL(unregister_binfmt);
101
102 static inline void put_binfmt(struct linux_binfmt * fmt)
103 {
104         module_put(fmt->module);
105 }
106
107 bool path_noexec(const struct path *path)
108 {
109         return (path->mnt->mnt_flags & MNT_NOEXEC) ||
110                (path->mnt->mnt_sb->s_iflags & SB_I_NOEXEC);
111 }
112
113 #ifdef CONFIG_USELIB
114 /*
115  * Note that a shared library must be both readable and executable due to
116  * security reasons.
117  *
118  * Also note that we take the address to load from from the file itself.
119  */
120 SYSCALL_DEFINE1(uselib, const char __user *, library)
121 {
122         struct linux_binfmt *fmt;
123         struct file *file;
124         struct filename *tmp = getname(library);
125         int error = PTR_ERR(tmp);
126         static const struct open_flags uselib_flags = {
127                 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
128                 .acc_mode = MAY_READ | MAY_EXEC,
129                 .intent = LOOKUP_OPEN,
130                 .lookup_flags = LOOKUP_FOLLOW,
131         };
132
133         if (IS_ERR(tmp))
134                 goto out;
135
136         file = do_filp_open(AT_FDCWD, tmp, &uselib_flags);
137         putname(tmp);
138         error = PTR_ERR(file);
139         if (IS_ERR(file))
140                 goto out;
141
142         error = -EINVAL;
143         if (!S_ISREG(file_inode(file)->i_mode))
144                 goto exit;
145
146         error = -EACCES;
147         if (path_noexec(&file->f_path))
148                 goto exit;
149
150         fsnotify_open(file);
151
152         error = -ENOEXEC;
153
154         read_lock(&binfmt_lock);
155         list_for_each_entry(fmt, &formats, lh) {
156                 if (!fmt->load_shlib)
157                         continue;
158                 if (!try_module_get(fmt->module))
159                         continue;
160                 read_unlock(&binfmt_lock);
161                 error = fmt->load_shlib(file);
162                 read_lock(&binfmt_lock);
163                 put_binfmt(fmt);
164                 if (error != -ENOEXEC)
165                         break;
166         }
167         read_unlock(&binfmt_lock);
168 exit:
169         fput(file);
170 out:
171         return error;
172 }
173 #endif /* #ifdef CONFIG_USELIB */
174
175 #ifdef CONFIG_MMU
176 /*
177  * The nascent bprm->mm is not visible until exec_mmap() but it can
178  * use a lot of memory, account these pages in current->mm temporary
179  * for oom_badness()->get_mm_rss(). Once exec succeeds or fails, we
180  * change the counter back via acct_arg_size(0).
181  */
182 static void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
183 {
184         struct mm_struct *mm = current->mm;
185         long diff = (long)(pages - bprm->vma_pages);
186
187         if (!mm || !diff)
188                 return;
189
190         bprm->vma_pages = pages;
191         add_mm_counter(mm, MM_ANONPAGES, diff);
192 }
193
194 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
195                 int write)
196 {
197         struct page *page;
198         int ret;
199         unsigned int gup_flags = FOLL_FORCE;
200
201 #ifdef CONFIG_STACK_GROWSUP
202         if (write) {
203                 ret = expand_downwards(bprm->vma, pos);
204                 if (ret < 0)
205                         return NULL;
206         }
207 #endif
208
209         if (write)
210                 gup_flags |= FOLL_WRITE;
211
212         /*
213          * We are doing an exec().  'current' is the process
214          * doing the exec and bprm->mm is the new process's mm.
215          */
216         ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags,
217                         &page, NULL, NULL);
218         if (ret <= 0)
219                 return NULL;
220
221         if (write) {
222                 unsigned long size = bprm->vma->vm_end - bprm->vma->vm_start;
223                 unsigned long ptr_size, limit;
224
225                 /*
226                  * Since the stack will hold pointers to the strings, we
227                  * must account for them as well.
228                  *
229                  * The size calculation is the entire vma while each arg page is
230                  * built, so each time we get here it's calculating how far it
231                  * is currently (rather than each call being just the newly
232                  * added size from the arg page).  As a result, we need to
233                  * always add the entire size of the pointers, so that on the
234                  * last call to get_arg_page() we'll actually have the entire
235                  * correct size.
236                  */
237                 ptr_size = (bprm->argc + bprm->envc) * sizeof(void *);
238                 if (ptr_size > ULONG_MAX - size)
239                         goto fail;
240                 size += ptr_size;
241
242                 acct_arg_size(bprm, size / PAGE_SIZE);
243
244                 /*
245                  * We've historically supported up to 32 pages (ARG_MAX)
246                  * of argument strings even with small stacks
247                  */
248                 if (size <= ARG_MAX)
249                         return page;
250
251                 /*
252                  * Limit to 1/4 of the max stack size or 3/4 of _STK_LIM
253                  * (whichever is smaller) for the argv+env strings.
254                  * This ensures that:
255                  *  - the remaining binfmt code will not run out of stack space,
256                  *  - the program will have a reasonable amount of stack left
257                  *    to work from.
258                  */
259                 limit = _STK_LIM / 4 * 3;
260                 limit = min(limit, bprm->rlim_stack.rlim_cur / 4);
261                 if (size > limit)
262                         goto fail;
263         }
264
265         return page;
266
267 fail:
268         put_page(page);
269         return NULL;
270 }
271
272 static void put_arg_page(struct page *page)
273 {
274         put_page(page);
275 }
276
277 static void free_arg_pages(struct linux_binprm *bprm)
278 {
279 }
280
281 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
282                 struct page *page)
283 {
284         flush_cache_page(bprm->vma, pos, page_to_pfn(page));
285 }
286
287 static int __bprm_mm_init(struct linux_binprm *bprm)
288 {
289         int err;
290         struct vm_area_struct *vma = NULL;
291         struct mm_struct *mm = bprm->mm;
292
293         bprm->vma = vma = vm_area_alloc(mm);
294         if (!vma)
295                 return -ENOMEM;
296
297         if (down_write_killable(&mm->mmap_sem)) {
298                 err = -EINTR;
299                 goto err_free;
300         }
301
302         /*
303          * Place the stack at the largest stack address the architecture
304          * supports. Later, we'll move this to an appropriate place. We don't
305          * use STACK_TOP because that can depend on attributes which aren't
306          * configured yet.
307          */
308         BUILD_BUG_ON(VM_STACK_FLAGS & VM_STACK_INCOMPLETE_SETUP);
309         vma->vm_end = STACK_TOP_MAX;
310         vma->vm_start = vma->vm_end - PAGE_SIZE;
311         vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
312         vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
313
314         err = insert_vm_struct(mm, vma);
315         if (err)
316                 goto err;
317
318         mm->stack_vm = mm->total_vm = 1;
319         arch_bprm_mm_init(mm, vma);
320         up_write(&mm->mmap_sem);
321         bprm->p = vma->vm_end - sizeof(void *);
322         return 0;
323 err:
324         up_write(&mm->mmap_sem);
325 err_free:
326         bprm->vma = NULL;
327         vm_area_free(vma);
328         return err;
329 }
330
331 static bool valid_arg_len(struct linux_binprm *bprm, long len)
332 {
333         return len <= MAX_ARG_STRLEN;
334 }
335
336 #else
337
338 static inline void acct_arg_size(struct linux_binprm *bprm, unsigned long pages)
339 {
340 }
341
342 static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
343                 int write)
344 {
345         struct page *page;
346
347         page = bprm->page[pos / PAGE_SIZE];
348         if (!page && write) {
349                 page = alloc_page(GFP_HIGHUSER|__GFP_ZERO);
350                 if (!page)
351                         return NULL;
352                 bprm->page[pos / PAGE_SIZE] = page;
353         }
354
355         return page;
356 }
357
358 static void put_arg_page(struct page *page)
359 {
360 }
361
362 static void free_arg_page(struct linux_binprm *bprm, int i)
363 {
364         if (bprm->page[i]) {
365                 __free_page(bprm->page[i]);
366                 bprm->page[i] = NULL;
367         }
368 }
369
370 static void free_arg_pages(struct linux_binprm *bprm)
371 {
372         int i;
373
374         for (i = 0; i < MAX_ARG_PAGES; i++)
375                 free_arg_page(bprm, i);
376 }
377
378 static void flush_arg_page(struct linux_binprm *bprm, unsigned long pos,
379                 struct page *page)
380 {
381 }
382
383 static int __bprm_mm_init(struct linux_binprm *bprm)
384 {
385         bprm->p = PAGE_SIZE * MAX_ARG_PAGES - sizeof(void *);
386         return 0;
387 }
388
389 static bool valid_arg_len(struct linux_binprm *bprm, long len)
390 {
391         return len <= bprm->p;
392 }
393
394 #endif /* CONFIG_MMU */
395
396 /*
397  * Create a new mm_struct and populate it with a temporary stack
398  * vm_area_struct.  We don't have enough context at this point to set the stack
399  * flags, permissions, and offset, so we use temporary values.  We'll update
400  * them later in setup_arg_pages().
401  */
402 static int bprm_mm_init(struct linux_binprm *bprm)
403 {
404         int err;
405         struct mm_struct *mm = NULL;
406
407         bprm->mm = mm = mm_alloc();
408         err = -ENOMEM;
409         if (!mm)
410                 goto err;
411
412         /* Save current stack limit for all calculations made during exec. */
413         task_lock(current->group_leader);
414         bprm->rlim_stack = current->signal->rlim[RLIMIT_STACK];
415         task_unlock(current->group_leader);
416
417         err = __bprm_mm_init(bprm);
418         if (err)
419                 goto err;
420
421         return 0;
422
423 err:
424         if (mm) {
425                 bprm->mm = NULL;
426                 mmdrop(mm);
427         }
428
429         return err;
430 }
431
432 struct user_arg_ptr {
433 #ifdef CONFIG_COMPAT
434         bool is_compat;
435 #endif
436         union {
437                 const char __user *const __user *native;
438 #ifdef CONFIG_COMPAT
439                 const compat_uptr_t __user *compat;
440 #endif
441         } ptr;
442 };
443
444 static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
445 {
446         const char __user *native;
447
448 #ifdef CONFIG_COMPAT
449         if (unlikely(argv.is_compat)) {
450                 compat_uptr_t compat;
451
452                 if (get_user(compat, argv.ptr.compat + nr))
453                         return ERR_PTR(-EFAULT);
454
455                 return compat_ptr(compat);
456         }
457 #endif
458
459         if (get_user(native, argv.ptr.native + nr))
460                 return ERR_PTR(-EFAULT);
461
462         return native;
463 }
464
465 /*
466  * count() counts the number of strings in array ARGV.
467  */
468 static int count(struct user_arg_ptr argv, int max)
469 {
470         int i = 0;
471
472         if (argv.ptr.native != NULL) {
473                 for (;;) {
474                         const char __user *p = get_user_arg_ptr(argv, i);
475
476                         if (!p)
477                                 break;
478
479                         if (IS_ERR(p))
480                                 return -EFAULT;
481
482                         if (i >= max)
483                                 return -E2BIG;
484                         ++i;
485
486                         if (fatal_signal_pending(current))
487                                 return -ERESTARTNOHAND;
488                         cond_resched();
489                 }
490         }
491         return i;
492 }
493
494 /*
495  * 'copy_strings()' copies argument/environment strings from the old
496  * processes's memory to the new process's stack.  The call to get_user_pages()
497  * ensures the destination page is created and not swapped out.
498  */
499 static int copy_strings(int argc, struct user_arg_ptr argv,
500                         struct linux_binprm *bprm)
501 {
502         struct page *kmapped_page = NULL;
503         char *kaddr = NULL;
504         unsigned long kpos = 0;
505         int ret;
506
507         while (argc-- > 0) {
508                 const char __user *str;
509                 int len;
510                 unsigned long pos;
511
512                 ret = -EFAULT;
513                 str = get_user_arg_ptr(argv, argc);
514                 if (IS_ERR(str))
515                         goto out;
516
517                 len = strnlen_user(str, MAX_ARG_STRLEN);
518                 if (!len)
519                         goto out;
520
521                 ret = -E2BIG;
522                 if (!valid_arg_len(bprm, len))
523                         goto out;
524
525                 /* We're going to work our way backwords. */
526                 pos = bprm->p;
527                 str += len;
528                 bprm->p -= len;
529
530                 while (len > 0) {
531                         int offset, bytes_to_copy;
532
533                         if (fatal_signal_pending(current)) {
534                                 ret = -ERESTARTNOHAND;
535                                 goto out;
536                         }
537                         cond_resched();
538
539                         offset = pos % PAGE_SIZE;
540                         if (offset == 0)
541                                 offset = PAGE_SIZE;
542
543                         bytes_to_copy = offset;
544                         if (bytes_to_copy > len)
545                                 bytes_to_copy = len;
546
547                         offset -= bytes_to_copy;
548                         pos -= bytes_to_copy;
549                         str -= bytes_to_copy;
550                         len -= bytes_to_copy;
551
552                         if (!kmapped_page || kpos != (pos & PAGE_MASK)) {
553                                 struct page *page;
554
555                                 page = get_arg_page(bprm, pos, 1);
556                                 if (!page) {
557                                         ret = -E2BIG;
558                                         goto out;
559                                 }
560
561                                 if (kmapped_page) {
562                                         flush_kernel_dcache_page(kmapped_page);
563                                         kunmap(kmapped_page);
564                                         put_arg_page(kmapped_page);
565                                 }
566                                 kmapped_page = page;
567                                 kaddr = kmap(kmapped_page);
568                                 kpos = pos & PAGE_MASK;
569                                 flush_arg_page(bprm, kpos, kmapped_page);
570                         }
571                         if (copy_from_user(kaddr+offset, str, bytes_to_copy)) {
572                                 ret = -EFAULT;
573                                 goto out;
574                         }
575                 }
576         }
577         ret = 0;
578 out:
579         if (kmapped_page) {
580                 flush_kernel_dcache_page(kmapped_page);
581                 kunmap(kmapped_page);
582                 put_arg_page(kmapped_page);
583         }
584         return ret;
585 }
586
587 /*
588  * Like copy_strings, but get argv and its values from kernel memory.
589  */
590 int copy_strings_kernel(int argc, const char *const *__argv,
591                         struct linux_binprm *bprm)
592 {
593         int r;
594         mm_segment_t oldfs = get_fs();
595         struct user_arg_ptr argv = {
596                 .ptr.native = (const char __user *const  __user *)__argv,
597         };
598
599         set_fs(KERNEL_DS);
600         r = copy_strings(argc, argv, bprm);
601         set_fs(oldfs);
602
603         return r;
604 }
605 EXPORT_SYMBOL(copy_strings_kernel);
606
607 #ifdef CONFIG_MMU
608
609 /*
610  * During bprm_mm_init(), we create a temporary stack at STACK_TOP_MAX.  Once
611  * the binfmt code determines where the new stack should reside, we shift it to
612  * its final location.  The process proceeds as follows:
613  *
614  * 1) Use shift to calculate the new vma endpoints.
615  * 2) Extend vma to cover both the old and new ranges.  This ensures the
616  *    arguments passed to subsequent functions are consistent.
617  * 3) Move vma's page tables to the new range.
618  * 4) Free up any cleared pgd range.
619  * 5) Shrink the vma to cover only the new range.
620  */
621 static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
622 {
623         struct mm_struct *mm = vma->vm_mm;
624         unsigned long old_start = vma->vm_start;
625         unsigned long old_end = vma->vm_end;
626         unsigned long length = old_end - old_start;
627         unsigned long new_start = old_start - shift;
628         unsigned long new_end = old_end - shift;
629         struct mmu_gather tlb;
630
631         BUG_ON(new_start > new_end);
632
633         /*
634          * ensure there are no vmas between where we want to go
635          * and where we are
636          */
637         if (vma != find_vma(mm, new_start))
638                 return -EFAULT;
639
640         /*
641          * cover the whole range: [new_start, old_end)
642          */
643         if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
644                 return -ENOMEM;
645
646         /*
647          * move the page tables downwards, on failure we rely on
648          * process cleanup to remove whatever mess we made.
649          */
650         if (length != move_page_tables(vma, old_start,
651                                        vma, new_start, length, false))
652                 return -ENOMEM;
653
654         lru_add_drain();
655         tlb_gather_mmu(&tlb, mm, old_start, old_end);
656         if (new_end > old_start) {
657                 /*
658                  * when the old and new regions overlap clear from new_end.
659                  */
660                 free_pgd_range(&tlb, new_end, old_end, new_end,
661                         vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
662         } else {
663                 /*
664                  * otherwise, clean from old_start; this is done to not touch
665                  * the address space in [new_end, old_start) some architectures
666                  * have constraints on va-space that make this illegal (IA64) -
667                  * for the others its just a little faster.
668                  */
669                 free_pgd_range(&tlb, old_start, old_end, new_end,
670                         vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING);
671         }
672         tlb_finish_mmu(&tlb, old_start, old_end);
673
674         /*
675          * Shrink the vma to just the new range.  Always succeeds.
676          */
677         vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
678
679         return 0;
680 }
681
682 /*
683  * Finalizes the stack vm_area_struct. The flags and permissions are updated,
684  * the stack is optionally relocated, and some extra space is added.
685  */
686 int setup_arg_pages(struct linux_binprm *bprm,
687                     unsigned long stack_top,
688                     int executable_stack)
689 {
690         unsigned long ret;
691         unsigned long stack_shift;
692         struct mm_struct *mm = current->mm;
693         struct vm_area_struct *vma = bprm->vma;
694         struct vm_area_struct *prev = NULL;
695         unsigned long vm_flags;
696         unsigned long stack_base;
697         unsigned long stack_size;
698         unsigned long stack_expand;
699         unsigned long rlim_stack;
700
701 #ifdef CONFIG_STACK_GROWSUP
702         /* Limit stack size */
703         stack_base = bprm->rlim_stack.rlim_max;
704         if (stack_base > STACK_SIZE_MAX)
705                 stack_base = STACK_SIZE_MAX;
706
707         /* Add space for stack randomization. */
708         stack_base += (STACK_RND_MASK << PAGE_SHIFT);
709
710         /* Make sure we didn't let the argument array grow too large. */
711         if (vma->vm_end - vma->vm_start > stack_base)
712                 return -ENOMEM;
713
714         stack_base = PAGE_ALIGN(stack_top - stack_base);
715
716         stack_shift = vma->vm_start - stack_base;
717         mm->arg_start = bprm->p - stack_shift;
718         bprm->p = vma->vm_end - stack_shift;
719 #else
720         stack_top = arch_align_stack(stack_top);
721         stack_top = PAGE_ALIGN(stack_top);
722
723         if (unlikely(stack_top < mmap_min_addr) ||
724             unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
725                 return -ENOMEM;
726
727         stack_shift = vma->vm_end - stack_top;
728
729         bprm->p -= stack_shift;
730         mm->arg_start = bprm->p;
731 #endif
732
733         if (bprm->loader)
734                 bprm->loader -= stack_shift;
735         bprm->exec -= stack_shift;
736
737         if (down_write_killable(&mm->mmap_sem))
738                 return -EINTR;
739
740         vm_flags = VM_STACK_FLAGS;
741
742         /*
743          * Adjust stack execute permissions; explicitly enable for
744          * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
745          * (arch default) otherwise.
746          */
747         if (unlikely(executable_stack == EXSTACK_ENABLE_X))
748                 vm_flags |= VM_EXEC;
749         else if (executable_stack == EXSTACK_DISABLE_X)
750                 vm_flags &= ~VM_EXEC;
751         vm_flags |= mm->def_flags;
752         vm_flags |= VM_STACK_INCOMPLETE_SETUP;
753
754         ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
755                         vm_flags);
756         if (ret)
757                 goto out_unlock;
758         BUG_ON(prev != vma);
759
760         /* Move stack pages down in memory. */
761         if (stack_shift) {
762                 ret = shift_arg_pages(vma, stack_shift);
763                 if (ret)
764                         goto out_unlock;
765         }
766
767         /* mprotect_fixup is overkill to remove the temporary stack flags */
768         vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
769
770         stack_expand = 131072UL; /* randomly 32*4k (or 2*64k) pages */
771         stack_size = vma->vm_end - vma->vm_start;
772         /*
773          * Align this down to a page boundary as expand_stack
774          * will align it up.
775          */
776         rlim_stack = bprm->rlim_stack.rlim_cur & PAGE_MASK;
777 #ifdef CONFIG_STACK_GROWSUP
778         if (stack_size + stack_expand > rlim_stack)
779                 stack_base = vma->vm_start + rlim_stack;
780         else
781                 stack_base = vma->vm_end + stack_expand;
782 #else
783         if (stack_size + stack_expand > rlim_stack)
784                 stack_base = vma->vm_end - rlim_stack;
785         else
786                 stack_base = vma->vm_start - stack_expand;
787 #endif
788         current->mm->start_stack = bprm->p;
789         ret = expand_stack(vma, stack_base);
790         if (ret)
791                 ret = -EFAULT;
792
793 out_unlock:
794         up_write(&mm->mmap_sem);
795         return ret;
796 }
797 EXPORT_SYMBOL(setup_arg_pages);
798
799 #else
800
801 /*
802  * Transfer the program arguments and environment from the holding pages
803  * onto the stack. The provided stack pointer is adjusted accordingly.
804  */
805 int transfer_args_to_stack(struct linux_binprm *bprm,
806                            unsigned long *sp_location)
807 {
808         unsigned long index, stop, sp;
809         int ret = 0;
810
811         stop = bprm->p >> PAGE_SHIFT;
812         sp = *sp_location;
813
814         for (index = MAX_ARG_PAGES - 1; index >= stop; index--) {
815                 unsigned int offset = index == stop ? bprm->p & ~PAGE_MASK : 0;
816                 char *src = kmap(bprm->page[index]) + offset;
817                 sp -= PAGE_SIZE - offset;
818                 if (copy_to_user((void *) sp, src, PAGE_SIZE - offset) != 0)
819                         ret = -EFAULT;
820                 kunmap(bprm->page[index]);
821                 if (ret)
822                         goto out;
823         }
824
825         *sp_location = sp;
826
827 out:
828         return ret;
829 }
830 EXPORT_SYMBOL(transfer_args_to_stack);
831
832 #endif /* CONFIG_MMU */
833
834 static struct file *do_open_execat(int fd, struct filename *name, int flags)
835 {
836         struct file *file;
837         int err;
838         struct open_flags open_exec_flags = {
839                 .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
840                 .acc_mode = MAY_EXEC,
841                 .intent = LOOKUP_OPEN,
842                 .lookup_flags = LOOKUP_FOLLOW,
843         };
844
845         if ((flags & ~(AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) != 0)
846                 return ERR_PTR(-EINVAL);
847         if (flags & AT_SYMLINK_NOFOLLOW)
848                 open_exec_flags.lookup_flags &= ~LOOKUP_FOLLOW;
849         if (flags & AT_EMPTY_PATH)
850                 open_exec_flags.lookup_flags |= LOOKUP_EMPTY;
851
852         file = do_filp_open(fd, name, &open_exec_flags);
853         if (IS_ERR(file))
854                 goto out;
855
856         err = -EACCES;
857         if (!S_ISREG(file_inode(file)->i_mode))
858                 goto exit;
859
860         if (path_noexec(&file->f_path))
861                 goto exit;
862
863         err = deny_write_access(file);
864         if (err)
865                 goto exit;
866
867         if (name->name[0] != '\0')
868                 fsnotify_open(file);
869
870 out:
871         return file;
872
873 exit:
874         fput(file);
875         return ERR_PTR(err);
876 }
877
878 struct file *open_exec(const char *name)
879 {
880         struct filename *filename = getname_kernel(name);
881         struct file *f = ERR_CAST(filename);
882
883         if (!IS_ERR(filename)) {
884                 f = do_open_execat(AT_FDCWD, filename, 0);
885                 putname(filename);
886         }
887         return f;
888 }
889 EXPORT_SYMBOL(open_exec);
890
891 int kernel_read_file(struct file *file, void **buf, loff_t *size,
892                      loff_t max_size, enum kernel_read_file_id id)
893 {
894         loff_t i_size, pos;
895         ssize_t bytes = 0;
896         int ret;
897
898         if (!S_ISREG(file_inode(file)->i_mode) || max_size < 0)
899                 return -EINVAL;
900
901         ret = deny_write_access(file);
902         if (ret)
903                 return ret;
904
905         ret = security_kernel_read_file(file, id);
906         if (ret)
907                 goto out;
908
909         i_size = i_size_read(file_inode(file));
910         if (max_size > 0 && i_size > max_size) {
911                 ret = -EFBIG;
912                 goto out;
913         }
914         if (i_size <= 0) {
915                 ret = -EINVAL;
916                 goto out;
917         }
918
919         if (id != READING_FIRMWARE_PREALLOC_BUFFER)
920                 *buf = vmalloc(i_size);
921         if (!*buf) {
922                 ret = -ENOMEM;
923                 goto out;
924         }
925
926         pos = 0;
927         while (pos < i_size) {
928                 bytes = kernel_read(file, *buf + pos, i_size - pos, &pos);
929                 if (bytes < 0) {
930                         ret = bytes;
931                         goto out;
932                 }
933
934                 if (bytes == 0)
935                         break;
936         }
937
938         if (pos != i_size) {
939                 ret = -EIO;
940                 goto out_free;
941         }
942
943         ret = security_kernel_post_read_file(file, *buf, i_size, id);
944         if (!ret)
945                 *size = pos;
946
947 out_free:
948         if (ret < 0) {
949                 if (id != READING_FIRMWARE_PREALLOC_BUFFER) {
950                         vfree(*buf);
951                         *buf = NULL;
952                 }
953         }
954
955 out:
956         allow_write_access(file);
957         return ret;
958 }
959 EXPORT_SYMBOL_GPL(kernel_read_file);
960
961 int kernel_read_file_from_path(const char *path, void **buf, loff_t *size,
962                                loff_t max_size, enum kernel_read_file_id id)
963 {
964         struct file *file;
965         int ret;
966
967         if (!path || !*path)
968                 return -EINVAL;
969
970         file = filp_open(path, O_RDONLY, 0);
971         if (IS_ERR(file))
972                 return PTR_ERR(file);
973
974         ret = kernel_read_file(file, buf, size, max_size, id);
975         fput(file);
976         return ret;
977 }
978 EXPORT_SYMBOL_GPL(kernel_read_file_from_path);
979
980 int kernel_read_file_from_fd(int fd, void **buf, loff_t *size, loff_t max_size,
981                              enum kernel_read_file_id id)
982 {
983         struct fd f = fdget(fd);
984         int ret = -EBADF;
985
986         if (!f.file)
987                 goto out;
988
989         ret = kernel_read_file(f.file, buf, size, max_size, id);
990 out:
991         fdput(f);
992         return ret;
993 }
994 EXPORT_SYMBOL_GPL(kernel_read_file_from_fd);
995
996 ssize_t read_code(struct file *file, unsigned long addr, loff_t pos, size_t len)
997 {
998         ssize_t res = vfs_read(file, (void __user *)addr, len, &pos);
999         if (res > 0)
1000                 flush_icache_range(addr, addr + len);
1001         return res;
1002 }
1003 EXPORT_SYMBOL(read_code);
1004
1005 static int exec_mmap(struct mm_struct *mm)
1006 {
1007         struct task_struct *tsk;
1008         struct mm_struct *old_mm, *active_mm;
1009
1010         /* Notify parent that we're no longer interested in the old VM */
1011         tsk = current;
1012         old_mm = current->mm;
1013         mm_release(tsk, old_mm);
1014
1015         if (old_mm) {
1016                 sync_mm_rss(old_mm);
1017                 /*
1018                  * Make sure that if there is a core dump in progress
1019                  * for the old mm, we get out and die instead of going
1020                  * through with the exec.  We must hold mmap_sem around
1021                  * checking core_state and changing tsk->mm.
1022                  */
1023                 down_read(&old_mm->mmap_sem);
1024                 if (unlikely(old_mm->core_state)) {
1025                         up_read(&old_mm->mmap_sem);
1026                         return -EINTR;
1027                 }
1028         }
1029         task_lock(tsk);
1030         active_mm = tsk->active_mm;
1031         tsk->mm = mm;
1032         tsk->active_mm = mm;
1033         activate_mm(active_mm, mm);
1034         tsk->mm->vmacache_seqnum = 0;
1035         vmacache_flush(tsk);
1036         task_unlock(tsk);
1037         if (old_mm) {
1038                 up_read(&old_mm->mmap_sem);
1039                 BUG_ON(active_mm != old_mm);
1040                 setmax_mm_hiwater_rss(&tsk->signal->maxrss, old_mm);
1041                 mm_update_next_owner(old_mm);
1042                 mmput(old_mm);
1043                 return 0;
1044         }
1045         mmdrop(active_mm);
1046         return 0;
1047 }
1048
1049 /*
1050  * This function makes sure the current process has its own signal table,
1051  * so that flush_signal_handlers can later reset the handlers without
1052  * disturbing other processes.  (Other processes might share the signal
1053  * table via the CLONE_SIGHAND option to clone().)
1054  */
1055 static int de_thread(struct task_struct *tsk)
1056 {
1057         struct signal_struct *sig = tsk->signal;
1058         struct sighand_struct *oldsighand = tsk->sighand;
1059         spinlock_t *lock = &oldsighand->siglock;
1060
1061         if (thread_group_empty(tsk))
1062                 goto no_thread_group;
1063
1064         /*
1065          * Kill all other threads in the thread group.
1066          */
1067         spin_lock_irq(lock);
1068         if (signal_group_exit(sig)) {
1069                 /*
1070                  * Another group action in progress, just
1071                  * return so that the signal is processed.
1072                  */
1073                 spin_unlock_irq(lock);
1074                 return -EAGAIN;
1075         }
1076
1077         sig->group_exit_task = tsk;
1078         sig->notify_count = zap_other_threads(tsk);
1079         if (!thread_group_leader(tsk))
1080                 sig->notify_count--;
1081
1082         while (sig->notify_count) {
1083                 __set_current_state(TASK_KILLABLE);
1084                 spin_unlock_irq(lock);
1085                 schedule();
1086                 if (unlikely(__fatal_signal_pending(tsk)))
1087                         goto killed;
1088                 spin_lock_irq(lock);
1089         }
1090         spin_unlock_irq(lock);
1091
1092         /*
1093          * At this point all other threads have exited, all we have to
1094          * do is to wait for the thread group leader to become inactive,
1095          * and to assume its PID:
1096          */
1097         if (!thread_group_leader(tsk)) {
1098                 struct task_struct *leader = tsk->group_leader;
1099
1100                 for (;;) {
1101                         cgroup_threadgroup_change_begin(tsk);
1102                         write_lock_irq(&tasklist_lock);
1103                         /*
1104                          * Do this under tasklist_lock to ensure that
1105                          * exit_notify() can't miss ->group_exit_task
1106                          */
1107                         sig->notify_count = -1;
1108                         if (likely(leader->exit_state))
1109                                 break;
1110                         __set_current_state(TASK_KILLABLE);
1111                         write_unlock_irq(&tasklist_lock);
1112                         cgroup_threadgroup_change_end(tsk);
1113                         schedule();
1114                         if (unlikely(__fatal_signal_pending(tsk)))
1115                                 goto killed;
1116                 }
1117
1118                 /*
1119                  * The only record we have of the real-time age of a
1120                  * process, regardless of execs it's done, is start_time.
1121                  * All the past CPU time is accumulated in signal_struct
1122                  * from sister threads now dead.  But in this non-leader
1123                  * exec, nothing survives from the original leader thread,
1124                  * whose birth marks the true age of this process now.
1125                  * When we take on its identity by switching to its PID, we
1126                  * also take its birthdate (always earlier than our own).
1127                  */
1128                 tsk->start_time = leader->start_time;
1129                 tsk->real_start_time = leader->real_start_time;
1130
1131                 BUG_ON(!same_thread_group(leader, tsk));
1132                 BUG_ON(has_group_leader_pid(tsk));
1133                 /*
1134                  * An exec() starts a new thread group with the
1135                  * TGID of the previous thread group. Rehash the
1136                  * two threads with a switched PID, and release
1137                  * the former thread group leader:
1138                  */
1139
1140                 /* Become a process group leader with the old leader's pid.
1141                  * The old leader becomes a thread of the this thread group.
1142                  * Note: The old leader also uses this pid until release_task
1143                  *       is called.  Odd but simple and correct.
1144                  */
1145                 tsk->pid = leader->pid;
1146                 change_pid(tsk, PIDTYPE_PID, task_pid(leader));
1147                 transfer_pid(leader, tsk, PIDTYPE_PGID);
1148                 transfer_pid(leader, tsk, PIDTYPE_SID);
1149
1150                 list_replace_rcu(&leader->tasks, &tsk->tasks);
1151                 list_replace_init(&leader->sibling, &tsk->sibling);
1152
1153                 tsk->group_leader = tsk;
1154                 leader->group_leader = tsk;
1155
1156                 tsk->exit_signal = SIGCHLD;
1157                 leader->exit_signal = -1;
1158
1159                 BUG_ON(leader->exit_state != EXIT_ZOMBIE);
1160                 leader->exit_state = EXIT_DEAD;
1161
1162                 /*
1163                  * We are going to release_task()->ptrace_unlink() silently,
1164                  * the tracer can sleep in do_wait(). EXIT_DEAD guarantees
1165                  * the tracer wont't block again waiting for this thread.
1166                  */
1167                 if (unlikely(leader->ptrace))
1168                         __wake_up_parent(leader, leader->parent);
1169                 write_unlock_irq(&tasklist_lock);
1170                 cgroup_threadgroup_change_end(tsk);
1171
1172                 release_task(leader);
1173         }
1174
1175         sig->group_exit_task = NULL;
1176         sig->notify_count = 0;
1177
1178 no_thread_group:
1179         /* we have changed execution domain */
1180         tsk->exit_signal = SIGCHLD;
1181
1182 #ifdef CONFIG_POSIX_TIMERS
1183         exit_itimers(sig);
1184         flush_itimer_signals();
1185 #endif
1186
1187         if (atomic_read(&oldsighand->count) != 1) {
1188                 struct sighand_struct *newsighand;
1189                 /*
1190                  * This ->sighand is shared with the CLONE_SIGHAND
1191                  * but not CLONE_THREAD task, switch to the new one.
1192                  */
1193                 newsighand = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
1194                 if (!newsighand)
1195                         return -ENOMEM;
1196
1197                 atomic_set(&newsighand->count, 1);
1198                 memcpy(newsighand->action, oldsighand->action,
1199                        sizeof(newsighand->action));
1200
1201                 write_lock_irq(&tasklist_lock);
1202                 spin_lock(&oldsighand->siglock);
1203                 rcu_assign_pointer(tsk->sighand, newsighand);
1204                 spin_unlock(&oldsighand->siglock);
1205                 write_unlock_irq(&tasklist_lock);
1206
1207                 __cleanup_sighand(oldsighand);
1208         }
1209
1210         BUG_ON(!thread_group_leader(tsk));
1211         return 0;
1212
1213 killed:
1214         /* protects against exit_notify() and __exit_signal() */
1215         read_lock(&tasklist_lock);
1216         sig->group_exit_task = NULL;
1217         sig->notify_count = 0;
1218         read_unlock(&tasklist_lock);
1219         return -EAGAIN;
1220 }
1221
1222 char *__get_task_comm(char *buf, size_t buf_size, struct task_struct *tsk)
1223 {
1224         task_lock(tsk);
1225         strncpy(buf, tsk->comm, buf_size);
1226         task_unlock(tsk);
1227         return buf;
1228 }
1229 EXPORT_SYMBOL_GPL(__get_task_comm);
1230
1231 /*
1232  * These functions flushes out all traces of the currently running executable
1233  * so that a new one can be started
1234  */
1235
1236 void __set_task_comm(struct task_struct *tsk, const char *buf, bool exec)
1237 {
1238         task_lock(tsk);
1239         trace_task_rename(tsk, buf);
1240         strlcpy(tsk->comm, buf, sizeof(tsk->comm));
1241         task_unlock(tsk);
1242         perf_event_comm(tsk, exec);
1243 }
1244
1245 /*
1246  * Calling this is the point of no return. None of the failures will be
1247  * seen by userspace since either the process is already taking a fatal
1248  * signal (via de_thread() or coredump), or will have SEGV raised
1249  * (after exec_mmap()) by search_binary_handlers (see below).
1250  */
1251 int flush_old_exec(struct linux_binprm * bprm)
1252 {
1253         int retval;
1254
1255         /*
1256          * Make sure we have a private signal table and that
1257          * we are unassociated from the previous thread group.
1258          */
1259         retval = de_thread(current);
1260         if (retval)
1261                 goto out;
1262
1263         /*
1264          * Must be called _before_ exec_mmap() as bprm->mm is
1265          * not visibile until then. This also enables the update
1266          * to be lockless.
1267          */
1268         set_mm_exe_file(bprm->mm, bprm->file);
1269
1270         /*
1271          * Release all of the old mmap stuff
1272          */
1273         acct_arg_size(bprm, 0);
1274         retval = exec_mmap(bprm->mm);
1275         if (retval)
1276                 goto out;
1277
1278         /*
1279          * After clearing bprm->mm (to mark that current is using the
1280          * prepared mm now), we have nothing left of the original
1281          * process. If anything from here on returns an error, the check
1282          * in search_binary_handler() will SEGV current.
1283          */
1284         bprm->mm = NULL;
1285
1286         set_fs(USER_DS);
1287         current->flags &= ~(PF_RANDOMIZE | PF_FORKNOEXEC | PF_KTHREAD |
1288                                         PF_NOFREEZE | PF_NO_SETAFFINITY);
1289         flush_thread();
1290         current->personality &= ~bprm->per_clear;
1291
1292         /*
1293          * We have to apply CLOEXEC before we change whether the process is
1294          * dumpable (in setup_new_exec) to avoid a race with a process in userspace
1295          * trying to access the should-be-closed file descriptors of a process
1296          * undergoing exec(2).
1297          */
1298         do_close_on_exec(current->files);
1299         return 0;
1300
1301 out:
1302         return retval;
1303 }
1304 EXPORT_SYMBOL(flush_old_exec);
1305
1306 void would_dump(struct linux_binprm *bprm, struct file *file)
1307 {
1308         struct inode *inode = file_inode(file);
1309         if (inode_permission(inode, MAY_READ) < 0) {
1310                 struct user_namespace *old, *user_ns;
1311                 bprm->interp_flags |= BINPRM_FLAGS_ENFORCE_NONDUMP;
1312
1313                 /* Ensure mm->user_ns contains the executable */
1314                 user_ns = old = bprm->mm->user_ns;
1315                 while ((user_ns != &init_user_ns) &&
1316                        !privileged_wrt_inode_uidgid(user_ns, inode))
1317                         user_ns = user_ns->parent;
1318
1319                 if (old != user_ns) {
1320                         bprm->mm->user_ns = get_user_ns(user_ns);
1321                         put_user_ns(old);
1322                 }
1323         }
1324 }
1325 EXPORT_SYMBOL(would_dump);
1326
1327 void setup_new_exec(struct linux_binprm * bprm)
1328 {
1329         /*
1330          * Once here, prepare_binrpm() will not be called any more, so
1331          * the final state of setuid/setgid/fscaps can be merged into the
1332          * secureexec flag.
1333          */
1334         bprm->secureexec |= bprm->cap_elevated;
1335
1336         if (bprm->secureexec) {
1337                 /* Make sure parent cannot signal privileged process. */
1338                 current->pdeath_signal = 0;
1339
1340                 /*
1341                  * For secureexec, reset the stack limit to sane default to
1342                  * avoid bad behavior from the prior rlimits. This has to
1343                  * happen before arch_pick_mmap_layout(), which examines
1344                  * RLIMIT_STACK, but after the point of no return to avoid
1345                  * needing to clean up the change on failure.
1346                  */
1347                 if (bprm->rlim_stack.rlim_cur > _STK_LIM)
1348                         bprm->rlim_stack.rlim_cur = _STK_LIM;
1349         }
1350
1351         arch_pick_mmap_layout(current->mm, &bprm->rlim_stack);
1352
1353         current->sas_ss_sp = current->sas_ss_size = 0;
1354
1355         /*
1356          * Figure out dumpability. Note that this checking only of current
1357          * is wrong, but userspace depends on it. This should be testing
1358          * bprm->secureexec instead.
1359          */
1360         if (bprm->interp_flags & BINPRM_FLAGS_ENFORCE_NONDUMP ||
1361             !(uid_eq(current_euid(), current_uid()) &&
1362               gid_eq(current_egid(), current_gid())))
1363                 set_dumpable(current->mm, suid_dumpable);
1364         else
1365                 set_dumpable(current->mm, SUID_DUMP_USER);
1366
1367         arch_setup_new_exec();
1368         perf_event_exec();
1369         __set_task_comm(current, kbasename(bprm->filename), true);
1370
1371         /* Set the new mm task size. We have to do that late because it may
1372          * depend on TIF_32BIT which is only updated in flush_thread() on
1373          * some architectures like powerpc
1374          */
1375         current->mm->task_size = TASK_SIZE;
1376
1377         /* An exec changes our domain. We are no longer part of the thread
1378            group */
1379         current->self_exec_id++;
1380         flush_signal_handlers(current, 0);
1381 }
1382 EXPORT_SYMBOL(setup_new_exec);
1383
1384 /* Runs immediately before start_thread() takes over. */
1385 void finalize_exec(struct linux_binprm *bprm)
1386 {
1387         /* Store any stack rlimit changes before starting thread. */
1388         task_lock(current->group_leader);
1389         current->signal->rlim[RLIMIT_STACK] = bprm->rlim_stack;
1390         task_unlock(current->group_leader);
1391 }
1392 EXPORT_SYMBOL(finalize_exec);
1393
1394 /*
1395  * Prepare credentials and lock ->cred_guard_mutex.
1396  * install_exec_creds() commits the new creds and drops the lock.
1397  * Or, if exec fails before, free_bprm() should release ->cred and
1398  * and unlock.
1399  */
1400 int prepare_bprm_creds(struct linux_binprm *bprm)
1401 {
1402         if (mutex_lock_interruptible(&current->signal->cred_guard_mutex))
1403                 return -ERESTARTNOINTR;
1404
1405         bprm->cred = prepare_exec_creds();
1406         if (likely(bprm->cred))
1407                 return 0;
1408
1409         mutex_unlock(&current->signal->cred_guard_mutex);
1410         return -ENOMEM;
1411 }
1412
1413 static void free_bprm(struct linux_binprm *bprm)
1414 {
1415         free_arg_pages(bprm);
1416         if (bprm->cred) {
1417                 mutex_unlock(&current->signal->cred_guard_mutex);
1418                 abort_creds(bprm->cred);
1419         }
1420         if (bprm->file) {
1421                 allow_write_access(bprm->file);
1422                 fput(bprm->file);
1423         }
1424         /* If a binfmt changed the interp, free it. */
1425         if (bprm->interp != bprm->filename)
1426                 kfree(bprm->interp);
1427         kfree(bprm);
1428 }
1429
1430 int bprm_change_interp(const char *interp, struct linux_binprm *bprm)
1431 {
1432         /* If a binfmt changed the interp, free it first. */
1433         if (bprm->interp != bprm->filename)
1434                 kfree(bprm->interp);
1435         bprm->interp = kstrdup(interp, GFP_KERNEL);
1436         if (!bprm->interp)
1437                 return -ENOMEM;
1438         return 0;
1439 }
1440 EXPORT_SYMBOL(bprm_change_interp);
1441
1442 /*
1443  * install the new credentials for this executable
1444  */
1445 void install_exec_creds(struct linux_binprm *bprm)
1446 {
1447         security_bprm_committing_creds(bprm);
1448
1449         commit_creds(bprm->cred);
1450         bprm->cred = NULL;
1451
1452         /*
1453          * Disable monitoring for regular users
1454          * when executing setuid binaries. Must
1455          * wait until new credentials are committed
1456          * by commit_creds() above
1457          */
1458         if (get_dumpable(current->mm) != SUID_DUMP_USER)
1459                 perf_event_exit_task(current);
1460         /*
1461          * cred_guard_mutex must be held at least to this point to prevent
1462          * ptrace_attach() from altering our determination of the task's
1463          * credentials; any time after this it may be unlocked.
1464          */
1465         security_bprm_committed_creds(bprm);
1466         mutex_unlock(&current->signal->cred_guard_mutex);
1467 }
1468 EXPORT_SYMBOL(install_exec_creds);
1469
1470 /*
1471  * determine how safe it is to execute the proposed program
1472  * - the caller must hold ->cred_guard_mutex to protect against
1473  *   PTRACE_ATTACH or seccomp thread-sync
1474  */
1475 static void check_unsafe_exec(struct linux_binprm *bprm)
1476 {
1477         struct task_struct *p = current, *t;
1478         unsigned n_fs;
1479
1480         if (p->ptrace)
1481                 bprm->unsafe |= LSM_UNSAFE_PTRACE;
1482
1483         /*
1484          * This isn't strictly necessary, but it makes it harder for LSMs to
1485          * mess up.
1486          */
1487         if (task_no_new_privs(current))
1488                 bprm->unsafe |= LSM_UNSAFE_NO_NEW_PRIVS;
1489
1490         t = p;
1491         n_fs = 1;
1492         spin_lock(&p->fs->lock);
1493         rcu_read_lock();
1494         while_each_thread(p, t) {
1495                 if (t->fs == p->fs)
1496                         n_fs++;
1497         }
1498         rcu_read_unlock();
1499
1500         if (p->fs->users > n_fs)
1501                 bprm->unsafe |= LSM_UNSAFE_SHARE;
1502         else
1503                 p->fs->in_exec = 1;
1504         spin_unlock(&p->fs->lock);
1505 }
1506
1507 static void bprm_fill_uid(struct linux_binprm *bprm)
1508 {
1509         struct inode *inode;
1510         unsigned int mode;
1511         kuid_t uid;
1512         kgid_t gid;
1513
1514         /*
1515          * Since this can be called multiple times (via prepare_binprm),
1516          * we must clear any previous work done when setting set[ug]id
1517          * bits from any earlier bprm->file uses (for example when run
1518          * first for a setuid script then again for its interpreter).
1519          */
1520         bprm->cred->euid = current_euid();
1521         bprm->cred->egid = current_egid();
1522
1523         if (!mnt_may_suid(bprm->file->f_path.mnt))
1524                 return;
1525
1526         if (task_no_new_privs(current))
1527                 return;
1528
1529         inode = bprm->file->f_path.dentry->d_inode;
1530         mode = READ_ONCE(inode->i_mode);
1531         if (!(mode & (S_ISUID|S_ISGID)))
1532                 return;
1533
1534         /* Be careful if suid/sgid is set */
1535         inode_lock(inode);
1536
1537         /* reload atomically mode/uid/gid now that lock held */
1538         mode = inode->i_mode;
1539         uid = inode->i_uid;
1540         gid = inode->i_gid;
1541         inode_unlock(inode);
1542
1543         /* We ignore suid/sgid if there are no mappings for them in the ns */
1544         if (!kuid_has_mapping(bprm->cred->user_ns, uid) ||
1545                  !kgid_has_mapping(bprm->cred->user_ns, gid))
1546                 return;
1547
1548         if (mode & S_ISUID) {
1549                 bprm->per_clear |= PER_CLEAR_ON_SETID;
1550                 bprm->cred->euid = uid;
1551         }
1552
1553         if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) {
1554                 bprm->per_clear |= PER_CLEAR_ON_SETID;
1555                 bprm->cred->egid = gid;
1556         }
1557 }
1558
1559 /*
1560  * Fill the binprm structure from the inode.
1561  * Check permissions, then read the first 128 (BINPRM_BUF_SIZE) bytes
1562  *
1563  * This may be called multiple times for binary chains (scripts for example).
1564  */
1565 int prepare_binprm(struct linux_binprm *bprm)
1566 {
1567         int retval;
1568         loff_t pos = 0;
1569
1570         bprm_fill_uid(bprm);
1571
1572         /* fill in binprm security blob */
1573         retval = security_bprm_set_creds(bprm);
1574         if (retval)
1575                 return retval;
1576         bprm->called_set_creds = 1;
1577
1578         memset(bprm->buf, 0, BINPRM_BUF_SIZE);
1579         return kernel_read(bprm->file, bprm->buf, BINPRM_BUF_SIZE, &pos);
1580 }
1581
1582 EXPORT_SYMBOL(prepare_binprm);
1583
1584 /*
1585  * Arguments are '\0' separated strings found at the location bprm->p
1586  * points to; chop off the first by relocating brpm->p to right after
1587  * the first '\0' encountered.
1588  */
1589 int remove_arg_zero(struct linux_binprm *bprm)
1590 {
1591         int ret = 0;
1592         unsigned long offset;
1593         char *kaddr;
1594         struct page *page;
1595
1596         if (!bprm->argc)
1597                 return 0;
1598
1599         do {
1600                 offset = bprm->p & ~PAGE_MASK;
1601                 page = get_arg_page(bprm, bprm->p, 0);
1602                 if (!page) {
1603                         ret = -EFAULT;
1604                         goto out;
1605                 }
1606                 kaddr = kmap_atomic(page);
1607
1608                 for (; offset < PAGE_SIZE && kaddr[offset];
1609                                 offset++, bprm->p++)
1610                         ;
1611
1612                 kunmap_atomic(kaddr);
1613                 put_arg_page(page);
1614         } while (offset == PAGE_SIZE);
1615
1616         bprm->p++;
1617         bprm->argc--;
1618         ret = 0;
1619
1620 out:
1621         return ret;
1622 }
1623 EXPORT_SYMBOL(remove_arg_zero);
1624
1625 #define printable(c) (((c)=='\t') || ((c)=='\n') || (0x20<=(c) && (c)<=0x7e))
1626 /*
1627  * cycle the list of binary formats handler, until one recognizes the image
1628  */
1629 int search_binary_handler(struct linux_binprm *bprm)
1630 {
1631         bool need_retry = IS_ENABLED(CONFIG_MODULES);
1632         struct linux_binfmt *fmt;
1633         int retval;
1634
1635         /* This allows 4 levels of binfmt rewrites before failing hard. */
1636         if (bprm->recursion_depth > 5)
1637                 return -ELOOP;
1638
1639         retval = security_bprm_check(bprm);
1640         if (retval)
1641                 return retval;
1642
1643         retval = -ENOENT;
1644  retry:
1645         read_lock(&binfmt_lock);
1646         list_for_each_entry(fmt, &formats, lh) {
1647                 if (!try_module_get(fmt->module))
1648                         continue;
1649                 read_unlock(&binfmt_lock);
1650                 bprm->recursion_depth++;
1651                 retval = fmt->load_binary(bprm);
1652                 read_lock(&binfmt_lock);
1653                 put_binfmt(fmt);
1654                 bprm->recursion_depth--;
1655                 if (retval < 0 && !bprm->mm) {
1656                         /* we got to flush_old_exec() and failed after it */
1657                         read_unlock(&binfmt_lock);
1658                         force_sigsegv(SIGSEGV, current);
1659                         return retval;
1660                 }
1661                 if (retval != -ENOEXEC || !bprm->file) {
1662                         read_unlock(&binfmt_lock);
1663                         return retval;
1664                 }
1665         }
1666         read_unlock(&binfmt_lock);
1667
1668         if (need_retry) {
1669                 if (printable(bprm->buf[0]) && printable(bprm->buf[1]) &&
1670                     printable(bprm->buf[2]) && printable(bprm->buf[3]))
1671                         return retval;
1672                 if (request_module("binfmt-%04x", *(ushort *)(bprm->buf + 2)) < 0)
1673                         return retval;
1674                 need_retry = false;
1675                 goto retry;
1676         }
1677
1678         return retval;
1679 }
1680 EXPORT_SYMBOL(search_binary_handler);
1681
1682 static int exec_binprm(struct linux_binprm *bprm)
1683 {
1684         pid_t old_pid, old_vpid;
1685         int ret;
1686
1687         /* Need to fetch pid before load_binary changes it */
1688         old_pid = current->pid;
1689         rcu_read_lock();
1690         old_vpid = task_pid_nr_ns(current, task_active_pid_ns(current->parent));
1691         rcu_read_unlock();
1692
1693         ret = search_binary_handler(bprm);
1694         if (ret >= 0) {
1695                 audit_bprm(bprm);
1696                 trace_sched_process_exec(current, old_pid, bprm);
1697                 ptrace_event(PTRACE_EVENT_EXEC, old_vpid);
1698                 proc_exec_connector(current);
1699         }
1700
1701         return ret;
1702 }
1703
1704 /*
1705  * sys_execve() executes a new program.
1706  */
1707 static int __do_execve_file(int fd, struct filename *filename,
1708                             struct user_arg_ptr argv,
1709                             struct user_arg_ptr envp,
1710                             int flags, struct file *file)
1711 {
1712         char *pathbuf = NULL;
1713         struct linux_binprm *bprm;
1714         struct files_struct *displaced;
1715         int retval;
1716
1717         if (IS_ERR(filename))
1718                 return PTR_ERR(filename);
1719
1720         /*
1721          * We move the actual failure in case of RLIMIT_NPROC excess from
1722          * set*uid() to execve() because too many poorly written programs
1723          * don't check setuid() return code.  Here we additionally recheck
1724          * whether NPROC limit is still exceeded.
1725          */
1726         if ((current->flags & PF_NPROC_EXCEEDED) &&
1727             atomic_read(&current_user()->processes) > rlimit(RLIMIT_NPROC)) {
1728                 retval = -EAGAIN;
1729                 goto out_ret;
1730         }
1731
1732         /* We're below the limit (still or again), so we don't want to make
1733          * further execve() calls fail. */
1734         current->flags &= ~PF_NPROC_EXCEEDED;
1735
1736         retval = unshare_files(&displaced);
1737         if (retval)
1738                 goto out_ret;
1739
1740         retval = -ENOMEM;
1741         bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
1742         if (!bprm)
1743                 goto out_files;
1744
1745         retval = prepare_bprm_creds(bprm);
1746         if (retval)
1747                 goto out_free;
1748
1749         check_unsafe_exec(bprm);
1750         current->in_execve = 1;
1751
1752         if (!file)
1753                 file = do_open_execat(fd, filename, flags);
1754         retval = PTR_ERR(file);
1755         if (IS_ERR(file))
1756                 goto out_unmark;
1757
1758         sched_exec();
1759
1760         bprm->file = file;
1761         if (!filename) {
1762                 bprm->filename = "none";
1763         } else if (fd == AT_FDCWD || filename->name[0] == '/') {
1764                 bprm->filename = filename->name;
1765         } else {
1766                 if (filename->name[0] == '\0')
1767                         pathbuf = kasprintf(GFP_KERNEL, "/dev/fd/%d", fd);
1768                 else
1769                         pathbuf = kasprintf(GFP_KERNEL, "/dev/fd/%d/%s",
1770                                             fd, filename->name);
1771                 if (!pathbuf) {
1772                         retval = -ENOMEM;
1773                         goto out_unmark;
1774                 }
1775                 /*
1776                  * Record that a name derived from an O_CLOEXEC fd will be
1777                  * inaccessible after exec. Relies on having exclusive access to
1778                  * current->files (due to unshare_files above).
1779                  */
1780                 if (close_on_exec(fd, rcu_dereference_raw(current->files->fdt)))
1781                         bprm->interp_flags |= BINPRM_FLAGS_PATH_INACCESSIBLE;
1782                 bprm->filename = pathbuf;
1783         }
1784         bprm->interp = bprm->filename;
1785
1786         retval = bprm_mm_init(bprm);
1787         if (retval)
1788                 goto out_unmark;
1789
1790         bprm->argc = count(argv, MAX_ARG_STRINGS);
1791         if ((retval = bprm->argc) < 0)
1792                 goto out;
1793
1794         bprm->envc = count(envp, MAX_ARG_STRINGS);
1795         if ((retval = bprm->envc) < 0)
1796                 goto out;
1797
1798         retval = prepare_binprm(bprm);
1799         if (retval < 0)
1800                 goto out;
1801
1802         retval = copy_strings_kernel(1, &bprm->filename, bprm);
1803         if (retval < 0)
1804                 goto out;
1805
1806         bprm->exec = bprm->p;
1807         retval = copy_strings(bprm->envc, envp, bprm);
1808         if (retval < 0)
1809                 goto out;
1810
1811         retval = copy_strings(bprm->argc, argv, bprm);
1812         if (retval < 0)
1813                 goto out;
1814
1815         would_dump(bprm, bprm->file);
1816
1817         retval = exec_binprm(bprm);
1818         if (retval < 0)
1819                 goto out;
1820
1821         /* execve succeeded */
1822         current->fs->in_exec = 0;
1823         current->in_execve = 0;
1824         membarrier_execve(current);
1825         rseq_execve(current);
1826         acct_update_integrals(current);
1827         task_numa_free(current);
1828         free_bprm(bprm);
1829         kfree(pathbuf);
1830         if (filename)
1831                 putname(filename);
1832         if (displaced)
1833                 put_files_struct(displaced);
1834         return retval;
1835
1836 out:
1837         if (bprm->mm) {
1838                 acct_arg_size(bprm, 0);
1839                 mmput(bprm->mm);
1840         }
1841
1842 out_unmark:
1843         current->fs->in_exec = 0;
1844         current->in_execve = 0;
1845
1846 out_free:
1847         free_bprm(bprm);
1848         kfree(pathbuf);
1849
1850 out_files:
1851         if (displaced)
1852                 reset_files_struct(displaced);
1853 out_ret:
1854         if (filename)
1855                 putname(filename);
1856         return retval;
1857 }
1858
1859 static int do_execveat_common(int fd, struct filename *filename,
1860                               struct user_arg_ptr argv,
1861                               struct user_arg_ptr envp,
1862                               int flags)
1863 {
1864         return __do_execve_file(fd, filename, argv, envp, flags, NULL);
1865 }
1866
1867 int do_execve_file(struct file *file, void *__argv, void *__envp)
1868 {
1869         struct user_arg_ptr argv = { .ptr.native = __argv };
1870         struct user_arg_ptr envp = { .ptr.native = __envp };
1871
1872         return __do_execve_file(AT_FDCWD, NULL, argv, envp, 0, file);
1873 }
1874
1875 int do_execve(struct filename *filename,
1876         const char __user *const __user *__argv,
1877         const char __user *const __user *__envp)
1878 {
1879         struct user_arg_ptr argv = { .ptr.native = __argv };
1880         struct user_arg_ptr envp = { .ptr.native = __envp };
1881         return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
1882 }
1883
1884 int do_execveat(int fd, struct filename *filename,
1885                 const char __user *const __user *__argv,
1886                 const char __user *const __user *__envp,
1887                 int flags)
1888 {
1889         struct user_arg_ptr argv = { .ptr.native = __argv };
1890         struct user_arg_ptr envp = { .ptr.native = __envp };
1891
1892         return do_execveat_common(fd, filename, argv, envp, flags);
1893 }
1894
1895 #ifdef CONFIG_COMPAT
1896 static int compat_do_execve(struct filename *filename,
1897         const compat_uptr_t __user *__argv,
1898         const compat_uptr_t __user *__envp)
1899 {
1900         struct user_arg_ptr argv = {
1901                 .is_compat = true,
1902                 .ptr.compat = __argv,
1903         };
1904         struct user_arg_ptr envp = {
1905                 .is_compat = true,
1906                 .ptr.compat = __envp,
1907         };
1908         return do_execveat_common(AT_FDCWD, filename, argv, envp, 0);
1909 }
1910
1911 static int compat_do_execveat(int fd, struct filename *filename,
1912                               const compat_uptr_t __user *__argv,
1913                               const compat_uptr_t __user *__envp,
1914                               int flags)
1915 {
1916         struct user_arg_ptr argv = {
1917                 .is_compat = true,
1918                 .ptr.compat = __argv,
1919         };
1920         struct user_arg_ptr envp = {
1921                 .is_compat = true,
1922                 .ptr.compat = __envp,
1923         };
1924         return do_execveat_common(fd, filename, argv, envp, flags);
1925 }
1926 #endif
1927
1928 void set_binfmt(struct linux_binfmt *new)
1929 {
1930         struct mm_struct *mm = current->mm;
1931
1932         if (mm->binfmt)
1933                 module_put(mm->binfmt->module);
1934
1935         mm->binfmt = new;
1936         if (new)
1937                 __module_get(new->module);
1938 }
1939 EXPORT_SYMBOL(set_binfmt);
1940
1941 /*
1942  * set_dumpable stores three-value SUID_DUMP_* into mm->flags.
1943  */
1944 void set_dumpable(struct mm_struct *mm, int value)
1945 {
1946         unsigned long old, new;
1947
1948         if (WARN_ON((unsigned)value > SUID_DUMP_ROOT))
1949                 return;
1950
1951         do {
1952                 old = READ_ONCE(mm->flags);
1953                 new = (old & ~MMF_DUMPABLE_MASK) | value;
1954         } while (cmpxchg(&mm->flags, old, new) != old);
1955 }
1956
1957 SYSCALL_DEFINE3(execve,
1958                 const char __user *, filename,
1959                 const char __user *const __user *, argv,
1960                 const char __user *const __user *, envp)
1961 {
1962         return do_execve(getname(filename), argv, envp);
1963 }
1964
1965 SYSCALL_DEFINE5(execveat,
1966                 int, fd, const char __user *, filename,
1967                 const char __user *const __user *, argv,
1968                 const char __user *const __user *, envp,
1969                 int, flags)
1970 {
1971         int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
1972
1973         return do_execveat(fd,
1974                            getname_flags(filename, lookup_flags, NULL),
1975                            argv, envp, flags);
1976 }
1977
1978 #ifdef CONFIG_COMPAT
1979 COMPAT_SYSCALL_DEFINE3(execve, const char __user *, filename,
1980         const compat_uptr_t __user *, argv,
1981         const compat_uptr_t __user *, envp)
1982 {
1983         return compat_do_execve(getname(filename), argv, envp);
1984 }
1985
1986 COMPAT_SYSCALL_DEFINE5(execveat, int, fd,
1987                        const char __user *, filename,
1988                        const compat_uptr_t __user *, argv,
1989                        const compat_uptr_t __user *, envp,
1990                        int,  flags)
1991 {
1992         int lookup_flags = (flags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0;
1993
1994         return compat_do_execveat(fd,
1995                                   getname_flags(filename, lookup_flags, NULL),
1996                                   argv, envp, flags);
1997 }
1998 #endif