KVM: hyperv: Fix Direct Synthetic timers assert an interrupt w/o lapic_in_kernel
[linux-2.6-microblaze.git] / mm / mmap.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * mm/mmap.c
4  *
5  * Written by obz.
6  *
7  * Address space accounting code        <alan@lxorguk.ukuu.org.uk>
8  */
9
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/kernel.h>
13 #include <linux/slab.h>
14 #include <linux/backing-dev.h>
15 #include <linux/mm.h>
16 #include <linux/vmacache.h>
17 #include <linux/shm.h>
18 #include <linux/mman.h>
19 #include <linux/pagemap.h>
20 #include <linux/swap.h>
21 #include <linux/syscalls.h>
22 #include <linux/capability.h>
23 #include <linux/init.h>
24 #include <linux/file.h>
25 #include <linux/fs.h>
26 #include <linux/personality.h>
27 #include <linux/security.h>
28 #include <linux/hugetlb.h>
29 #include <linux/shmem_fs.h>
30 #include <linux/profile.h>
31 #include <linux/export.h>
32 #include <linux/mount.h>
33 #include <linux/mempolicy.h>
34 #include <linux/rmap.h>
35 #include <linux/mmu_notifier.h>
36 #include <linux/mmdebug.h>
37 #include <linux/perf_event.h>
38 #include <linux/audit.h>
39 #include <linux/khugepaged.h>
40 #include <linux/uprobes.h>
41 #include <linux/rbtree_augmented.h>
42 #include <linux/notifier.h>
43 #include <linux/memory.h>
44 #include <linux/printk.h>
45 #include <linux/userfaultfd_k.h>
46 #include <linux/moduleparam.h>
47 #include <linux/pkeys.h>
48 #include <linux/oom.h>
49 #include <linux/sched/mm.h>
50
51 #include <linux/uaccess.h>
52 #include <asm/cacheflush.h>
53 #include <asm/tlb.h>
54 #include <asm/mmu_context.h>
55
56 #include "internal.h"
57
58 #ifndef arch_mmap_check
59 #define arch_mmap_check(addr, len, flags)       (0)
60 #endif
61
62 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_BITS
63 const int mmap_rnd_bits_min = CONFIG_ARCH_MMAP_RND_BITS_MIN;
64 const int mmap_rnd_bits_max = CONFIG_ARCH_MMAP_RND_BITS_MAX;
65 int mmap_rnd_bits __read_mostly = CONFIG_ARCH_MMAP_RND_BITS;
66 #endif
67 #ifdef CONFIG_HAVE_ARCH_MMAP_RND_COMPAT_BITS
68 const int mmap_rnd_compat_bits_min = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MIN;
69 const int mmap_rnd_compat_bits_max = CONFIG_ARCH_MMAP_RND_COMPAT_BITS_MAX;
70 int mmap_rnd_compat_bits __read_mostly = CONFIG_ARCH_MMAP_RND_COMPAT_BITS;
71 #endif
72
73 static bool ignore_rlimit_data;
74 core_param(ignore_rlimit_data, ignore_rlimit_data, bool, 0644);
75
76 static void unmap_region(struct mm_struct *mm,
77                 struct vm_area_struct *vma, struct vm_area_struct *prev,
78                 unsigned long start, unsigned long end);
79
80 /* description of effects of mapping type and prot in current implementation.
81  * this is due to the limited x86 page protection hardware.  The expected
82  * behavior is in parens:
83  *
84  * map_type     prot
85  *              PROT_NONE       PROT_READ       PROT_WRITE      PROT_EXEC
86  * MAP_SHARED   r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
87  *              w: (no) no      w: (no) no      w: (yes) yes    w: (no) no
88  *              x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
89  *
90  * MAP_PRIVATE  r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
91  *              w: (no) no      w: (no) no      w: (copy) copy  w: (no) no
92  *              x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
93  *
94  * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
95  * MAP_PRIVATE:
96  *                                                              r: (no) no
97  *                                                              w: (no) no
98  *                                                              x: (yes) yes
99  */
100 pgprot_t protection_map[16] __ro_after_init = {
101         __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
102         __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
103 };
104
105 #ifndef CONFIG_ARCH_HAS_FILTER_PGPROT
106 static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
107 {
108         return prot;
109 }
110 #endif
111
112 pgprot_t vm_get_page_prot(unsigned long vm_flags)
113 {
114         pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags &
115                                 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
116                         pgprot_val(arch_vm_get_page_prot(vm_flags)));
117
118         return arch_filter_pgprot(ret);
119 }
120 EXPORT_SYMBOL(vm_get_page_prot);
121
122 static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
123 {
124         return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
125 }
126
127 /* Update vma->vm_page_prot to reflect vma->vm_flags. */
128 void vma_set_page_prot(struct vm_area_struct *vma)
129 {
130         unsigned long vm_flags = vma->vm_flags;
131         pgprot_t vm_page_prot;
132
133         vm_page_prot = vm_pgprot_modify(vma->vm_page_prot, vm_flags);
134         if (vma_wants_writenotify(vma, vm_page_prot)) {
135                 vm_flags &= ~VM_SHARED;
136                 vm_page_prot = vm_pgprot_modify(vm_page_prot, vm_flags);
137         }
138         /* remove_protection_ptes reads vma->vm_page_prot without mmap_sem */
139         WRITE_ONCE(vma->vm_page_prot, vm_page_prot);
140 }
141
142 /*
143  * Requires inode->i_mapping->i_mmap_rwsem
144  */
145 static void __remove_shared_vm_struct(struct vm_area_struct *vma,
146                 struct file *file, struct address_space *mapping)
147 {
148         if (vma->vm_flags & VM_DENYWRITE)
149                 atomic_inc(&file_inode(file)->i_writecount);
150         if (vma->vm_flags & VM_SHARED)
151                 mapping_unmap_writable(mapping);
152
153         flush_dcache_mmap_lock(mapping);
154         vma_interval_tree_remove(vma, &mapping->i_mmap);
155         flush_dcache_mmap_unlock(mapping);
156 }
157
158 /*
159  * Unlink a file-based vm structure from its interval tree, to hide
160  * vma from rmap and vmtruncate before freeing its page tables.
161  */
162 void unlink_file_vma(struct vm_area_struct *vma)
163 {
164         struct file *file = vma->vm_file;
165
166         if (file) {
167                 struct address_space *mapping = file->f_mapping;
168                 i_mmap_lock_write(mapping);
169                 __remove_shared_vm_struct(vma, file, mapping);
170                 i_mmap_unlock_write(mapping);
171         }
172 }
173
174 /*
175  * Close a vm structure and free it, returning the next.
176  */
177 static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
178 {
179         struct vm_area_struct *next = vma->vm_next;
180
181         might_sleep();
182         if (vma->vm_ops && vma->vm_ops->close)
183                 vma->vm_ops->close(vma);
184         if (vma->vm_file)
185                 fput(vma->vm_file);
186         mpol_put(vma_policy(vma));
187         vm_area_free(vma);
188         return next;
189 }
190
191 static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags,
192                 struct list_head *uf);
193 SYSCALL_DEFINE1(brk, unsigned long, brk)
194 {
195         unsigned long retval;
196         unsigned long newbrk, oldbrk, origbrk;
197         struct mm_struct *mm = current->mm;
198         struct vm_area_struct *next;
199         unsigned long min_brk;
200         bool populate;
201         bool downgraded = false;
202         LIST_HEAD(uf);
203
204         if (down_write_killable(&mm->mmap_sem))
205                 return -EINTR;
206
207         origbrk = mm->brk;
208
209 #ifdef CONFIG_COMPAT_BRK
210         /*
211          * CONFIG_COMPAT_BRK can still be overridden by setting
212          * randomize_va_space to 2, which will still cause mm->start_brk
213          * to be arbitrarily shifted
214          */
215         if (current->brk_randomized)
216                 min_brk = mm->start_brk;
217         else
218                 min_brk = mm->end_data;
219 #else
220         min_brk = mm->start_brk;
221 #endif
222         if (brk < min_brk)
223                 goto out;
224
225         /*
226          * Check against rlimit here. If this check is done later after the test
227          * of oldbrk with newbrk then it can escape the test and let the data
228          * segment grow beyond its set limit the in case where the limit is
229          * not page aligned -Ram Gupta
230          */
231         if (check_data_rlimit(rlimit(RLIMIT_DATA), brk, mm->start_brk,
232                               mm->end_data, mm->start_data))
233                 goto out;
234
235         newbrk = PAGE_ALIGN(brk);
236         oldbrk = PAGE_ALIGN(mm->brk);
237         if (oldbrk == newbrk) {
238                 mm->brk = brk;
239                 goto success;
240         }
241
242         /*
243          * Always allow shrinking brk.
244          * __do_munmap() may downgrade mmap_sem to read.
245          */
246         if (brk <= mm->brk) {
247                 int ret;
248
249                 /*
250                  * mm->brk must to be protected by write mmap_sem so update it
251                  * before downgrading mmap_sem. When __do_munmap() fails,
252                  * mm->brk will be restored from origbrk.
253                  */
254                 mm->brk = brk;
255                 ret = __do_munmap(mm, newbrk, oldbrk-newbrk, &uf, true);
256                 if (ret < 0) {
257                         mm->brk = origbrk;
258                         goto out;
259                 } else if (ret == 1) {
260                         downgraded = true;
261                 }
262                 goto success;
263         }
264
265         /* Check against existing mmap mappings. */
266         next = find_vma(mm, oldbrk);
267         if (next && newbrk + PAGE_SIZE > vm_start_gap(next))
268                 goto out;
269
270         /* Ok, looks good - let it rip. */
271         if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0)
272                 goto out;
273         mm->brk = brk;
274
275 success:
276         populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
277         if (downgraded)
278                 up_read(&mm->mmap_sem);
279         else
280                 up_write(&mm->mmap_sem);
281         userfaultfd_unmap_complete(mm, &uf);
282         if (populate)
283                 mm_populate(oldbrk, newbrk - oldbrk);
284         return brk;
285
286 out:
287         retval = origbrk;
288         up_write(&mm->mmap_sem);
289         return retval;
290 }
291
292 static long vma_compute_subtree_gap(struct vm_area_struct *vma)
293 {
294         unsigned long max, prev_end, subtree_gap;
295
296         /*
297          * Note: in the rare case of a VM_GROWSDOWN above a VM_GROWSUP, we
298          * allow two stack_guard_gaps between them here, and when choosing
299          * an unmapped area; whereas when expanding we only require one.
300          * That's a little inconsistent, but keeps the code here simpler.
301          */
302         max = vm_start_gap(vma);
303         if (vma->vm_prev) {
304                 prev_end = vm_end_gap(vma->vm_prev);
305                 if (max > prev_end)
306                         max -= prev_end;
307                 else
308                         max = 0;
309         }
310         if (vma->vm_rb.rb_left) {
311                 subtree_gap = rb_entry(vma->vm_rb.rb_left,
312                                 struct vm_area_struct, vm_rb)->rb_subtree_gap;
313                 if (subtree_gap > max)
314                         max = subtree_gap;
315         }
316         if (vma->vm_rb.rb_right) {
317                 subtree_gap = rb_entry(vma->vm_rb.rb_right,
318                                 struct vm_area_struct, vm_rb)->rb_subtree_gap;
319                 if (subtree_gap > max)
320                         max = subtree_gap;
321         }
322         return max;
323 }
324
325 #ifdef CONFIG_DEBUG_VM_RB
326 static int browse_rb(struct mm_struct *mm)
327 {
328         struct rb_root *root = &mm->mm_rb;
329         int i = 0, j, bug = 0;
330         struct rb_node *nd, *pn = NULL;
331         unsigned long prev = 0, pend = 0;
332
333         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
334                 struct vm_area_struct *vma;
335                 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
336                 if (vma->vm_start < prev) {
337                         pr_emerg("vm_start %lx < prev %lx\n",
338                                   vma->vm_start, prev);
339                         bug = 1;
340                 }
341                 if (vma->vm_start < pend) {
342                         pr_emerg("vm_start %lx < pend %lx\n",
343                                   vma->vm_start, pend);
344                         bug = 1;
345                 }
346                 if (vma->vm_start > vma->vm_end) {
347                         pr_emerg("vm_start %lx > vm_end %lx\n",
348                                   vma->vm_start, vma->vm_end);
349                         bug = 1;
350                 }
351                 spin_lock(&mm->page_table_lock);
352                 if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
353                         pr_emerg("free gap %lx, correct %lx\n",
354                                vma->rb_subtree_gap,
355                                vma_compute_subtree_gap(vma));
356                         bug = 1;
357                 }
358                 spin_unlock(&mm->page_table_lock);
359                 i++;
360                 pn = nd;
361                 prev = vma->vm_start;
362                 pend = vma->vm_end;
363         }
364         j = 0;
365         for (nd = pn; nd; nd = rb_prev(nd))
366                 j++;
367         if (i != j) {
368                 pr_emerg("backwards %d, forwards %d\n", j, i);
369                 bug = 1;
370         }
371         return bug ? -1 : i;
372 }
373
374 static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
375 {
376         struct rb_node *nd;
377
378         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
379                 struct vm_area_struct *vma;
380                 vma = rb_entry(nd, struct vm_area_struct, vm_rb);
381                 VM_BUG_ON_VMA(vma != ignore &&
382                         vma->rb_subtree_gap != vma_compute_subtree_gap(vma),
383                         vma);
384         }
385 }
386
387 static void validate_mm(struct mm_struct *mm)
388 {
389         int bug = 0;
390         int i = 0;
391         unsigned long highest_address = 0;
392         struct vm_area_struct *vma = mm->mmap;
393
394         while (vma) {
395                 struct anon_vma *anon_vma = vma->anon_vma;
396                 struct anon_vma_chain *avc;
397
398                 if (anon_vma) {
399                         anon_vma_lock_read(anon_vma);
400                         list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
401                                 anon_vma_interval_tree_verify(avc);
402                         anon_vma_unlock_read(anon_vma);
403                 }
404
405                 highest_address = vm_end_gap(vma);
406                 vma = vma->vm_next;
407                 i++;
408         }
409         if (i != mm->map_count) {
410                 pr_emerg("map_count %d vm_next %d\n", mm->map_count, i);
411                 bug = 1;
412         }
413         if (highest_address != mm->highest_vm_end) {
414                 pr_emerg("mm->highest_vm_end %lx, found %lx\n",
415                           mm->highest_vm_end, highest_address);
416                 bug = 1;
417         }
418         i = browse_rb(mm);
419         if (i != mm->map_count) {
420                 if (i != -1)
421                         pr_emerg("map_count %d rb %d\n", mm->map_count, i);
422                 bug = 1;
423         }
424         VM_BUG_ON_MM(bug, mm);
425 }
426 #else
427 #define validate_mm_rb(root, ignore) do { } while (0)
428 #define validate_mm(mm) do { } while (0)
429 #endif
430
431 RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb,
432                      unsigned long, rb_subtree_gap, vma_compute_subtree_gap)
433
434 /*
435  * Update augmented rbtree rb_subtree_gap values after vma->vm_start or
436  * vma->vm_prev->vm_end values changed, without modifying the vma's position
437  * in the rbtree.
438  */
439 static void vma_gap_update(struct vm_area_struct *vma)
440 {
441         /*
442          * As it turns out, RB_DECLARE_CALLBACKS() already created a callback
443          * function that does exactly what we want.
444          */
445         vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
446 }
447
448 static inline void vma_rb_insert(struct vm_area_struct *vma,
449                                  struct rb_root *root)
450 {
451         /* All rb_subtree_gap values must be consistent prior to insertion */
452         validate_mm_rb(root, NULL);
453
454         rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
455 }
456
457 static void __vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
458 {
459         /*
460          * Note rb_erase_augmented is a fairly large inline function,
461          * so make sure we instantiate it only once with our desired
462          * augmented rbtree callbacks.
463          */
464         rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
465 }
466
467 static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma,
468                                                 struct rb_root *root,
469                                                 struct vm_area_struct *ignore)
470 {
471         /*
472          * All rb_subtree_gap values must be consistent prior to erase,
473          * with the possible exception of the "next" vma being erased if
474          * next->vm_start was reduced.
475          */
476         validate_mm_rb(root, ignore);
477
478         __vma_rb_erase(vma, root);
479 }
480
481 static __always_inline void vma_rb_erase(struct vm_area_struct *vma,
482                                          struct rb_root *root)
483 {
484         /*
485          * All rb_subtree_gap values must be consistent prior to erase,
486          * with the possible exception of the vma being erased.
487          */
488         validate_mm_rb(root, vma);
489
490         __vma_rb_erase(vma, root);
491 }
492
493 /*
494  * vma has some anon_vma assigned, and is already inserted on that
495  * anon_vma's interval trees.
496  *
497  * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
498  * vma must be removed from the anon_vma's interval trees using
499  * anon_vma_interval_tree_pre_update_vma().
500  *
501  * After the update, the vma will be reinserted using
502  * anon_vma_interval_tree_post_update_vma().
503  *
504  * The entire update must be protected by exclusive mmap_sem and by
505  * the root anon_vma's mutex.
506  */
507 static inline void
508 anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
509 {
510         struct anon_vma_chain *avc;
511
512         list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
513                 anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
514 }
515
516 static inline void
517 anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
518 {
519         struct anon_vma_chain *avc;
520
521         list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
522                 anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
523 }
524
525 static int find_vma_links(struct mm_struct *mm, unsigned long addr,
526                 unsigned long end, struct vm_area_struct **pprev,
527                 struct rb_node ***rb_link, struct rb_node **rb_parent)
528 {
529         struct rb_node **__rb_link, *__rb_parent, *rb_prev;
530
531         __rb_link = &mm->mm_rb.rb_node;
532         rb_prev = __rb_parent = NULL;
533
534         while (*__rb_link) {
535                 struct vm_area_struct *vma_tmp;
536
537                 __rb_parent = *__rb_link;
538                 vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
539
540                 if (vma_tmp->vm_end > addr) {
541                         /* Fail if an existing vma overlaps the area */
542                         if (vma_tmp->vm_start < end)
543                                 return -ENOMEM;
544                         __rb_link = &__rb_parent->rb_left;
545                 } else {
546                         rb_prev = __rb_parent;
547                         __rb_link = &__rb_parent->rb_right;
548                 }
549         }
550
551         *pprev = NULL;
552         if (rb_prev)
553                 *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
554         *rb_link = __rb_link;
555         *rb_parent = __rb_parent;
556         return 0;
557 }
558
559 static unsigned long count_vma_pages_range(struct mm_struct *mm,
560                 unsigned long addr, unsigned long end)
561 {
562         unsigned long nr_pages = 0;
563         struct vm_area_struct *vma;
564
565         /* Find first overlaping mapping */
566         vma = find_vma_intersection(mm, addr, end);
567         if (!vma)
568                 return 0;
569
570         nr_pages = (min(end, vma->vm_end) -
571                 max(addr, vma->vm_start)) >> PAGE_SHIFT;
572
573         /* Iterate over the rest of the overlaps */
574         for (vma = vma->vm_next; vma; vma = vma->vm_next) {
575                 unsigned long overlap_len;
576
577                 if (vma->vm_start > end)
578                         break;
579
580                 overlap_len = min(end, vma->vm_end) - vma->vm_start;
581                 nr_pages += overlap_len >> PAGE_SHIFT;
582         }
583
584         return nr_pages;
585 }
586
587 void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
588                 struct rb_node **rb_link, struct rb_node *rb_parent)
589 {
590         /* Update tracking information for the gap following the new vma. */
591         if (vma->vm_next)
592                 vma_gap_update(vma->vm_next);
593         else
594                 mm->highest_vm_end = vm_end_gap(vma);
595
596         /*
597          * vma->vm_prev wasn't known when we followed the rbtree to find the
598          * correct insertion point for that vma. As a result, we could not
599          * update the vma vm_rb parents rb_subtree_gap values on the way down.
600          * So, we first insert the vma with a zero rb_subtree_gap value
601          * (to be consistent with what we did on the way down), and then
602          * immediately update the gap to the correct value. Finally we
603          * rebalance the rbtree after all augmented values have been set.
604          */
605         rb_link_node(&vma->vm_rb, rb_parent, rb_link);
606         vma->rb_subtree_gap = 0;
607         vma_gap_update(vma);
608         vma_rb_insert(vma, &mm->mm_rb);
609 }
610
611 static void __vma_link_file(struct vm_area_struct *vma)
612 {
613         struct file *file;
614
615         file = vma->vm_file;
616         if (file) {
617                 struct address_space *mapping = file->f_mapping;
618
619                 if (vma->vm_flags & VM_DENYWRITE)
620                         atomic_dec(&file_inode(file)->i_writecount);
621                 if (vma->vm_flags & VM_SHARED)
622                         atomic_inc(&mapping->i_mmap_writable);
623
624                 flush_dcache_mmap_lock(mapping);
625                 vma_interval_tree_insert(vma, &mapping->i_mmap);
626                 flush_dcache_mmap_unlock(mapping);
627         }
628 }
629
630 static void
631 __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
632         struct vm_area_struct *prev, struct rb_node **rb_link,
633         struct rb_node *rb_parent)
634 {
635         __vma_link_list(mm, vma, prev, rb_parent);
636         __vma_link_rb(mm, vma, rb_link, rb_parent);
637 }
638
639 static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
640                         struct vm_area_struct *prev, struct rb_node **rb_link,
641                         struct rb_node *rb_parent)
642 {
643         struct address_space *mapping = NULL;
644
645         if (vma->vm_file) {
646                 mapping = vma->vm_file->f_mapping;
647                 i_mmap_lock_write(mapping);
648         }
649
650         __vma_link(mm, vma, prev, rb_link, rb_parent);
651         __vma_link_file(vma);
652
653         if (mapping)
654                 i_mmap_unlock_write(mapping);
655
656         mm->map_count++;
657         validate_mm(mm);
658 }
659
660 /*
661  * Helper for vma_adjust() in the split_vma insert case: insert a vma into the
662  * mm's list and rbtree.  It has already been inserted into the interval tree.
663  */
664 static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
665 {
666         struct vm_area_struct *prev;
667         struct rb_node **rb_link, *rb_parent;
668
669         if (find_vma_links(mm, vma->vm_start, vma->vm_end,
670                            &prev, &rb_link, &rb_parent))
671                 BUG();
672         __vma_link(mm, vma, prev, rb_link, rb_parent);
673         mm->map_count++;
674 }
675
676 static __always_inline void __vma_unlink_common(struct mm_struct *mm,
677                                                 struct vm_area_struct *vma,
678                                                 struct vm_area_struct *prev,
679                                                 bool has_prev,
680                                                 struct vm_area_struct *ignore)
681 {
682         struct vm_area_struct *next;
683
684         vma_rb_erase_ignore(vma, &mm->mm_rb, ignore);
685         next = vma->vm_next;
686         if (has_prev)
687                 prev->vm_next = next;
688         else {
689                 prev = vma->vm_prev;
690                 if (prev)
691                         prev->vm_next = next;
692                 else
693                         mm->mmap = next;
694         }
695         if (next)
696                 next->vm_prev = prev;
697
698         /* Kill the cache */
699         vmacache_invalidate(mm);
700 }
701
702 static inline void __vma_unlink_prev(struct mm_struct *mm,
703                                      struct vm_area_struct *vma,
704                                      struct vm_area_struct *prev)
705 {
706         __vma_unlink_common(mm, vma, prev, true, vma);
707 }
708
709 /*
710  * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
711  * is already present in an i_mmap tree without adjusting the tree.
712  * The following helper function should be used when such adjustments
713  * are necessary.  The "insert" vma (if any) is to be inserted
714  * before we drop the necessary locks.
715  */
716 int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
717         unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
718         struct vm_area_struct *expand)
719 {
720         struct mm_struct *mm = vma->vm_mm;
721         struct vm_area_struct *next = vma->vm_next, *orig_vma = vma;
722         struct address_space *mapping = NULL;
723         struct rb_root_cached *root = NULL;
724         struct anon_vma *anon_vma = NULL;
725         struct file *file = vma->vm_file;
726         bool start_changed = false, end_changed = false;
727         long adjust_next = 0;
728         int remove_next = 0;
729
730         if (next && !insert) {
731                 struct vm_area_struct *exporter = NULL, *importer = NULL;
732
733                 if (end >= next->vm_end) {
734                         /*
735                          * vma expands, overlapping all the next, and
736                          * perhaps the one after too (mprotect case 6).
737                          * The only other cases that gets here are
738                          * case 1, case 7 and case 8.
739                          */
740                         if (next == expand) {
741                                 /*
742                                  * The only case where we don't expand "vma"
743                                  * and we expand "next" instead is case 8.
744                                  */
745                                 VM_WARN_ON(end != next->vm_end);
746                                 /*
747                                  * remove_next == 3 means we're
748                                  * removing "vma" and that to do so we
749                                  * swapped "vma" and "next".
750                                  */
751                                 remove_next = 3;
752                                 VM_WARN_ON(file != next->vm_file);
753                                 swap(vma, next);
754                         } else {
755                                 VM_WARN_ON(expand != vma);
756                                 /*
757                                  * case 1, 6, 7, remove_next == 2 is case 6,
758                                  * remove_next == 1 is case 1 or 7.
759                                  */
760                                 remove_next = 1 + (end > next->vm_end);
761                                 VM_WARN_ON(remove_next == 2 &&
762                                            end != next->vm_next->vm_end);
763                                 VM_WARN_ON(remove_next == 1 &&
764                                            end != next->vm_end);
765                                 /* trim end to next, for case 6 first pass */
766                                 end = next->vm_end;
767                         }
768
769                         exporter = next;
770                         importer = vma;
771
772                         /*
773                          * If next doesn't have anon_vma, import from vma after
774                          * next, if the vma overlaps with it.
775                          */
776                         if (remove_next == 2 && !next->anon_vma)
777                                 exporter = next->vm_next;
778
779                 } else if (end > next->vm_start) {
780                         /*
781                          * vma expands, overlapping part of the next:
782                          * mprotect case 5 shifting the boundary up.
783                          */
784                         adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
785                         exporter = next;
786                         importer = vma;
787                         VM_WARN_ON(expand != importer);
788                 } else if (end < vma->vm_end) {
789                         /*
790                          * vma shrinks, and !insert tells it's not
791                          * split_vma inserting another: so it must be
792                          * mprotect case 4 shifting the boundary down.
793                          */
794                         adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT);
795                         exporter = vma;
796                         importer = next;
797                         VM_WARN_ON(expand != importer);
798                 }
799
800                 /*
801                  * Easily overlooked: when mprotect shifts the boundary,
802                  * make sure the expanding vma has anon_vma set if the
803                  * shrinking vma had, to cover any anon pages imported.
804                  */
805                 if (exporter && exporter->anon_vma && !importer->anon_vma) {
806                         int error;
807
808                         importer->anon_vma = exporter->anon_vma;
809                         error = anon_vma_clone(importer, exporter);
810                         if (error)
811                                 return error;
812                 }
813         }
814 again:
815         vma_adjust_trans_huge(orig_vma, start, end, adjust_next);
816
817         if (file) {
818                 mapping = file->f_mapping;
819                 root = &mapping->i_mmap;
820                 uprobe_munmap(vma, vma->vm_start, vma->vm_end);
821
822                 if (adjust_next)
823                         uprobe_munmap(next, next->vm_start, next->vm_end);
824
825                 i_mmap_lock_write(mapping);
826                 if (insert) {
827                         /*
828                          * Put into interval tree now, so instantiated pages
829                          * are visible to arm/parisc __flush_dcache_page
830                          * throughout; but we cannot insert into address
831                          * space until vma start or end is updated.
832                          */
833                         __vma_link_file(insert);
834                 }
835         }
836
837         anon_vma = vma->anon_vma;
838         if (!anon_vma && adjust_next)
839                 anon_vma = next->anon_vma;
840         if (anon_vma) {
841                 VM_WARN_ON(adjust_next && next->anon_vma &&
842                            anon_vma != next->anon_vma);
843                 anon_vma_lock_write(anon_vma);
844                 anon_vma_interval_tree_pre_update_vma(vma);
845                 if (adjust_next)
846                         anon_vma_interval_tree_pre_update_vma(next);
847         }
848
849         if (root) {
850                 flush_dcache_mmap_lock(mapping);
851                 vma_interval_tree_remove(vma, root);
852                 if (adjust_next)
853                         vma_interval_tree_remove(next, root);
854         }
855
856         if (start != vma->vm_start) {
857                 vma->vm_start = start;
858                 start_changed = true;
859         }
860         if (end != vma->vm_end) {
861                 vma->vm_end = end;
862                 end_changed = true;
863         }
864         vma->vm_pgoff = pgoff;
865         if (adjust_next) {
866                 next->vm_start += adjust_next << PAGE_SHIFT;
867                 next->vm_pgoff += adjust_next;
868         }
869
870         if (root) {
871                 if (adjust_next)
872                         vma_interval_tree_insert(next, root);
873                 vma_interval_tree_insert(vma, root);
874                 flush_dcache_mmap_unlock(mapping);
875         }
876
877         if (remove_next) {
878                 /*
879                  * vma_merge has merged next into vma, and needs
880                  * us to remove next before dropping the locks.
881                  */
882                 if (remove_next != 3)
883                         __vma_unlink_prev(mm, next, vma);
884                 else
885                         /*
886                          * vma is not before next if they've been
887                          * swapped.
888                          *
889                          * pre-swap() next->vm_start was reduced so
890                          * tell validate_mm_rb to ignore pre-swap()
891                          * "next" (which is stored in post-swap()
892                          * "vma").
893                          */
894                         __vma_unlink_common(mm, next, NULL, false, vma);
895                 if (file)
896                         __remove_shared_vm_struct(next, file, mapping);
897         } else if (insert) {
898                 /*
899                  * split_vma has split insert from vma, and needs
900                  * us to insert it before dropping the locks
901                  * (it may either follow vma or precede it).
902                  */
903                 __insert_vm_struct(mm, insert);
904         } else {
905                 if (start_changed)
906                         vma_gap_update(vma);
907                 if (end_changed) {
908                         if (!next)
909                                 mm->highest_vm_end = vm_end_gap(vma);
910                         else if (!adjust_next)
911                                 vma_gap_update(next);
912                 }
913         }
914
915         if (anon_vma) {
916                 anon_vma_interval_tree_post_update_vma(vma);
917                 if (adjust_next)
918                         anon_vma_interval_tree_post_update_vma(next);
919                 anon_vma_unlock_write(anon_vma);
920         }
921         if (mapping)
922                 i_mmap_unlock_write(mapping);
923
924         if (root) {
925                 uprobe_mmap(vma);
926
927                 if (adjust_next)
928                         uprobe_mmap(next);
929         }
930
931         if (remove_next) {
932                 if (file) {
933                         uprobe_munmap(next, next->vm_start, next->vm_end);
934                         fput(file);
935                 }
936                 if (next->anon_vma)
937                         anon_vma_merge(vma, next);
938                 mm->map_count--;
939                 mpol_put(vma_policy(next));
940                 vm_area_free(next);
941                 /*
942                  * In mprotect's case 6 (see comments on vma_merge),
943                  * we must remove another next too. It would clutter
944                  * up the code too much to do both in one go.
945                  */
946                 if (remove_next != 3) {
947                         /*
948                          * If "next" was removed and vma->vm_end was
949                          * expanded (up) over it, in turn
950                          * "next->vm_prev->vm_end" changed and the
951                          * "vma->vm_next" gap must be updated.
952                          */
953                         next = vma->vm_next;
954                 } else {
955                         /*
956                          * For the scope of the comment "next" and
957                          * "vma" considered pre-swap(): if "vma" was
958                          * removed, next->vm_start was expanded (down)
959                          * over it and the "next" gap must be updated.
960                          * Because of the swap() the post-swap() "vma"
961                          * actually points to pre-swap() "next"
962                          * (post-swap() "next" as opposed is now a
963                          * dangling pointer).
964                          */
965                         next = vma;
966                 }
967                 if (remove_next == 2) {
968                         remove_next = 1;
969                         end = next->vm_end;
970                         goto again;
971                 }
972                 else if (next)
973                         vma_gap_update(next);
974                 else {
975                         /*
976                          * If remove_next == 2 we obviously can't
977                          * reach this path.
978                          *
979                          * If remove_next == 3 we can't reach this
980                          * path because pre-swap() next is always not
981                          * NULL. pre-swap() "next" is not being
982                          * removed and its next->vm_end is not altered
983                          * (and furthermore "end" already matches
984                          * next->vm_end in remove_next == 3).
985                          *
986                          * We reach this only in the remove_next == 1
987                          * case if the "next" vma that was removed was
988                          * the highest vma of the mm. However in such
989                          * case next->vm_end == "end" and the extended
990                          * "vma" has vma->vm_end == next->vm_end so
991                          * mm->highest_vm_end doesn't need any update
992                          * in remove_next == 1 case.
993                          */
994                         VM_WARN_ON(mm->highest_vm_end != vm_end_gap(vma));
995                 }
996         }
997         if (insert && file)
998                 uprobe_mmap(insert);
999
1000         validate_mm(mm);
1001
1002         return 0;
1003 }
1004
1005 /*
1006  * If the vma has a ->close operation then the driver probably needs to release
1007  * per-vma resources, so we don't attempt to merge those.
1008  */
1009 static inline int is_mergeable_vma(struct vm_area_struct *vma,
1010                                 struct file *file, unsigned long vm_flags,
1011                                 struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
1012 {
1013         /*
1014          * VM_SOFTDIRTY should not prevent from VMA merging, if we
1015          * match the flags but dirty bit -- the caller should mark
1016          * merged VMA as dirty. If dirty bit won't be excluded from
1017          * comparison, we increase pressure on the memory system forcing
1018          * the kernel to generate new VMAs when old one could be
1019          * extended instead.
1020          */
1021         if ((vma->vm_flags ^ vm_flags) & ~VM_SOFTDIRTY)
1022                 return 0;
1023         if (vma->vm_file != file)
1024                 return 0;
1025         if (vma->vm_ops && vma->vm_ops->close)
1026                 return 0;
1027         if (!is_mergeable_vm_userfaultfd_ctx(vma, vm_userfaultfd_ctx))
1028                 return 0;
1029         return 1;
1030 }
1031
1032 static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
1033                                         struct anon_vma *anon_vma2,
1034                                         struct vm_area_struct *vma)
1035 {
1036         /*
1037          * The list_is_singular() test is to avoid merging VMA cloned from
1038          * parents. This can improve scalability caused by anon_vma lock.
1039          */
1040         if ((!anon_vma1 || !anon_vma2) && (!vma ||
1041                 list_is_singular(&vma->anon_vma_chain)))
1042                 return 1;
1043         return anon_vma1 == anon_vma2;
1044 }
1045
1046 /*
1047  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
1048  * in front of (at a lower virtual address and file offset than) the vma.
1049  *
1050  * We cannot merge two vmas if they have differently assigned (non-NULL)
1051  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
1052  *
1053  * We don't check here for the merged mmap wrapping around the end of pagecache
1054  * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
1055  * wrap, nor mmaps which cover the final page at index -1UL.
1056  */
1057 static int
1058 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
1059                      struct anon_vma *anon_vma, struct file *file,
1060                      pgoff_t vm_pgoff,
1061                      struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
1062 {
1063         if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
1064             is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
1065                 if (vma->vm_pgoff == vm_pgoff)
1066                         return 1;
1067         }
1068         return 0;
1069 }
1070
1071 /*
1072  * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
1073  * beyond (at a higher virtual address and file offset than) the vma.
1074  *
1075  * We cannot merge two vmas if they have differently assigned (non-NULL)
1076  * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
1077  */
1078 static int
1079 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
1080                     struct anon_vma *anon_vma, struct file *file,
1081                     pgoff_t vm_pgoff,
1082                     struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
1083 {
1084         if (is_mergeable_vma(vma, file, vm_flags, vm_userfaultfd_ctx) &&
1085             is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
1086                 pgoff_t vm_pglen;
1087                 vm_pglen = vma_pages(vma);
1088                 if (vma->vm_pgoff + vm_pglen == vm_pgoff)
1089                         return 1;
1090         }
1091         return 0;
1092 }
1093
1094 /*
1095  * Given a mapping request (addr,end,vm_flags,file,pgoff), figure out
1096  * whether that can be merged with its predecessor or its successor.
1097  * Or both (it neatly fills a hole).
1098  *
1099  * In most cases - when called for mmap, brk or mremap - [addr,end) is
1100  * certain not to be mapped by the time vma_merge is called; but when
1101  * called for mprotect, it is certain to be already mapped (either at
1102  * an offset within prev, or at the start of next), and the flags of
1103  * this area are about to be changed to vm_flags - and the no-change
1104  * case has already been eliminated.
1105  *
1106  * The following mprotect cases have to be considered, where AAAA is
1107  * the area passed down from mprotect_fixup, never extending beyond one
1108  * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after:
1109  *
1110  *     AAAA             AAAA                AAAA          AAAA
1111  *    PPPPPPNNNNNN    PPPPPPNNNNNN    PPPPPPNNNNNN    PPPPNNNNXXXX
1112  *    cannot merge    might become    might become    might become
1113  *                    PPNNNNNNNNNN    PPPPPPPPPPNN    PPPPPPPPPPPP 6 or
1114  *    mmap, brk or    case 4 below    case 5 below    PPPPPPPPXXXX 7 or
1115  *    mremap move:                                    PPPPXXXXXXXX 8
1116  *        AAAA
1117  *    PPPP    NNNN    PPPPPPPPPPPP    PPPPPPPPNNNN    PPPPNNNNNNNN
1118  *    might become    case 1 below    case 2 below    case 3 below
1119  *
1120  * It is important for case 8 that the vma NNNN overlapping the
1121  * region AAAA is never going to extended over XXXX. Instead XXXX must
1122  * be extended in region AAAA and NNNN must be removed. This way in
1123  * all cases where vma_merge succeeds, the moment vma_adjust drops the
1124  * rmap_locks, the properties of the merged vma will be already
1125  * correct for the whole merged range. Some of those properties like
1126  * vm_page_prot/vm_flags may be accessed by rmap_walks and they must
1127  * be correct for the whole merged range immediately after the
1128  * rmap_locks are released. Otherwise if XXXX would be removed and
1129  * NNNN would be extended over the XXXX range, remove_migration_ptes
1130  * or other rmap walkers (if working on addresses beyond the "end"
1131  * parameter) may establish ptes with the wrong permissions of NNNN
1132  * instead of the right permissions of XXXX.
1133  */
1134 struct vm_area_struct *vma_merge(struct mm_struct *mm,
1135                         struct vm_area_struct *prev, unsigned long addr,
1136                         unsigned long end, unsigned long vm_flags,
1137                         struct anon_vma *anon_vma, struct file *file,
1138                         pgoff_t pgoff, struct mempolicy *policy,
1139                         struct vm_userfaultfd_ctx vm_userfaultfd_ctx)
1140 {
1141         pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
1142         struct vm_area_struct *area, *next;
1143         int err;
1144
1145         /*
1146          * We later require that vma->vm_flags == vm_flags,
1147          * so this tests vma->vm_flags & VM_SPECIAL, too.
1148          */
1149         if (vm_flags & VM_SPECIAL)
1150                 return NULL;
1151
1152         if (prev)
1153                 next = prev->vm_next;
1154         else
1155                 next = mm->mmap;
1156         area = next;
1157         if (area && area->vm_end == end)                /* cases 6, 7, 8 */
1158                 next = next->vm_next;
1159
1160         /* verify some invariant that must be enforced by the caller */
1161         VM_WARN_ON(prev && addr <= prev->vm_start);
1162         VM_WARN_ON(area && end > area->vm_end);
1163         VM_WARN_ON(addr >= end);
1164
1165         /*
1166          * Can it merge with the predecessor?
1167          */
1168         if (prev && prev->vm_end == addr &&
1169                         mpol_equal(vma_policy(prev), policy) &&
1170                         can_vma_merge_after(prev, vm_flags,
1171                                             anon_vma, file, pgoff,
1172                                             vm_userfaultfd_ctx)) {
1173                 /*
1174                  * OK, it can.  Can we now merge in the successor as well?
1175                  */
1176                 if (next && end == next->vm_start &&
1177                                 mpol_equal(policy, vma_policy(next)) &&
1178                                 can_vma_merge_before(next, vm_flags,
1179                                                      anon_vma, file,
1180                                                      pgoff+pglen,
1181                                                      vm_userfaultfd_ctx) &&
1182                                 is_mergeable_anon_vma(prev->anon_vma,
1183                                                       next->anon_vma, NULL)) {
1184                                                         /* cases 1, 6 */
1185                         err = __vma_adjust(prev, prev->vm_start,
1186                                          next->vm_end, prev->vm_pgoff, NULL,
1187                                          prev);
1188                 } else                                  /* cases 2, 5, 7 */
1189                         err = __vma_adjust(prev, prev->vm_start,
1190                                          end, prev->vm_pgoff, NULL, prev);
1191                 if (err)
1192                         return NULL;
1193                 khugepaged_enter_vma_merge(prev, vm_flags);
1194                 return prev;
1195         }
1196
1197         /*
1198          * Can this new request be merged in front of next?
1199          */
1200         if (next && end == next->vm_start &&
1201                         mpol_equal(policy, vma_policy(next)) &&
1202                         can_vma_merge_before(next, vm_flags,
1203                                              anon_vma, file, pgoff+pglen,
1204                                              vm_userfaultfd_ctx)) {
1205                 if (prev && addr < prev->vm_end)        /* case 4 */
1206                         err = __vma_adjust(prev, prev->vm_start,
1207                                          addr, prev->vm_pgoff, NULL, next);
1208                 else {                                  /* cases 3, 8 */
1209                         err = __vma_adjust(area, addr, next->vm_end,
1210                                          next->vm_pgoff - pglen, NULL, next);
1211                         /*
1212                          * In case 3 area is already equal to next and
1213                          * this is a noop, but in case 8 "area" has
1214                          * been removed and next was expanded over it.
1215                          */
1216                         area = next;
1217                 }
1218                 if (err)
1219                         return NULL;
1220                 khugepaged_enter_vma_merge(area, vm_flags);
1221                 return area;
1222         }
1223
1224         return NULL;
1225 }
1226
1227 /*
1228  * Rough compatbility check to quickly see if it's even worth looking
1229  * at sharing an anon_vma.
1230  *
1231  * They need to have the same vm_file, and the flags can only differ
1232  * in things that mprotect may change.
1233  *
1234  * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1235  * we can merge the two vma's. For example, we refuse to merge a vma if
1236  * there is a vm_ops->close() function, because that indicates that the
1237  * driver is doing some kind of reference counting. But that doesn't
1238  * really matter for the anon_vma sharing case.
1239  */
1240 static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1241 {
1242         return a->vm_end == b->vm_start &&
1243                 mpol_equal(vma_policy(a), vma_policy(b)) &&
1244                 a->vm_file == b->vm_file &&
1245                 !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC|VM_SOFTDIRTY)) &&
1246                 b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1247 }
1248
1249 /*
1250  * Do some basic sanity checking to see if we can re-use the anon_vma
1251  * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1252  * the same as 'old', the other will be the new one that is trying
1253  * to share the anon_vma.
1254  *
1255  * NOTE! This runs with mm_sem held for reading, so it is possible that
1256  * the anon_vma of 'old' is concurrently in the process of being set up
1257  * by another page fault trying to merge _that_. But that's ok: if it
1258  * is being set up, that automatically means that it will be a singleton
1259  * acceptable for merging, so we can do all of this optimistically. But
1260  * we do that READ_ONCE() to make sure that we never re-load the pointer.
1261  *
1262  * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1263  * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1264  * is to return an anon_vma that is "complex" due to having gone through
1265  * a fork).
1266  *
1267  * We also make sure that the two vma's are compatible (adjacent,
1268  * and with the same memory policies). That's all stable, even with just
1269  * a read lock on the mm_sem.
1270  */
1271 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
1272 {
1273         if (anon_vma_compatible(a, b)) {
1274                 struct anon_vma *anon_vma = READ_ONCE(old->anon_vma);
1275
1276                 if (anon_vma && list_is_singular(&old->anon_vma_chain))
1277                         return anon_vma;
1278         }
1279         return NULL;
1280 }
1281
1282 /*
1283  * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1284  * neighbouring vmas for a suitable anon_vma, before it goes off
1285  * to allocate a new anon_vma.  It checks because a repetitive
1286  * sequence of mprotects and faults may otherwise lead to distinct
1287  * anon_vmas being allocated, preventing vma merge in subsequent
1288  * mprotect.
1289  */
1290 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1291 {
1292         struct anon_vma *anon_vma;
1293         struct vm_area_struct *near;
1294
1295         near = vma->vm_next;
1296         if (!near)
1297                 goto try_prev;
1298
1299         anon_vma = reusable_anon_vma(near, vma, near);
1300         if (anon_vma)
1301                 return anon_vma;
1302 try_prev:
1303         near = vma->vm_prev;
1304         if (!near)
1305                 goto none;
1306
1307         anon_vma = reusable_anon_vma(near, near, vma);
1308         if (anon_vma)
1309                 return anon_vma;
1310 none:
1311         /*
1312          * There's no absolute need to look only at touching neighbours:
1313          * we could search further afield for "compatible" anon_vmas.
1314          * But it would probably just be a waste of time searching,
1315          * or lead to too many vmas hanging off the same anon_vma.
1316          * We're trying to allow mprotect remerging later on,
1317          * not trying to minimize memory used for anon_vmas.
1318          */
1319         return NULL;
1320 }
1321
1322 /*
1323  * If a hint addr is less than mmap_min_addr change hint to be as
1324  * low as possible but still greater than mmap_min_addr
1325  */
1326 static inline unsigned long round_hint_to_min(unsigned long hint)
1327 {
1328         hint &= PAGE_MASK;
1329         if (((void *)hint != NULL) &&
1330             (hint < mmap_min_addr))
1331                 return PAGE_ALIGN(mmap_min_addr);
1332         return hint;
1333 }
1334
1335 static inline int mlock_future_check(struct mm_struct *mm,
1336                                      unsigned long flags,
1337                                      unsigned long len)
1338 {
1339         unsigned long locked, lock_limit;
1340
1341         /*  mlock MCL_FUTURE? */
1342         if (flags & VM_LOCKED) {
1343                 locked = len >> PAGE_SHIFT;
1344                 locked += mm->locked_vm;
1345                 lock_limit = rlimit(RLIMIT_MEMLOCK);
1346                 lock_limit >>= PAGE_SHIFT;
1347                 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
1348                         return -EAGAIN;
1349         }
1350         return 0;
1351 }
1352
1353 static inline u64 file_mmap_size_max(struct file *file, struct inode *inode)
1354 {
1355         if (S_ISREG(inode->i_mode))
1356                 return MAX_LFS_FILESIZE;
1357
1358         if (S_ISBLK(inode->i_mode))
1359                 return MAX_LFS_FILESIZE;
1360
1361         /* Special "we do even unsigned file positions" case */
1362         if (file->f_mode & FMODE_UNSIGNED_OFFSET)
1363                 return 0;
1364
1365         /* Yes, random drivers might want more. But I'm tired of buggy drivers */
1366         return ULONG_MAX;
1367 }
1368
1369 static inline bool file_mmap_ok(struct file *file, struct inode *inode,
1370                                 unsigned long pgoff, unsigned long len)
1371 {
1372         u64 maxsize = file_mmap_size_max(file, inode);
1373
1374         if (maxsize && len > maxsize)
1375                 return false;
1376         maxsize -= len;
1377         if (pgoff > maxsize >> PAGE_SHIFT)
1378                 return false;
1379         return true;
1380 }
1381
1382 /*
1383  * The caller must hold down_write(&current->mm->mmap_sem).
1384  */
1385 unsigned long do_mmap(struct file *file, unsigned long addr,
1386                         unsigned long len, unsigned long prot,
1387                         unsigned long flags, vm_flags_t vm_flags,
1388                         unsigned long pgoff, unsigned long *populate,
1389                         struct list_head *uf)
1390 {
1391         struct mm_struct *mm = current->mm;
1392         int pkey = 0;
1393
1394         *populate = 0;
1395
1396         if (!len)
1397                 return -EINVAL;
1398
1399         /*
1400          * Does the application expect PROT_READ to imply PROT_EXEC?
1401          *
1402          * (the exception is when the underlying filesystem is noexec
1403          *  mounted, in which case we dont add PROT_EXEC.)
1404          */
1405         if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
1406                 if (!(file && path_noexec(&file->f_path)))
1407                         prot |= PROT_EXEC;
1408
1409         /* force arch specific MAP_FIXED handling in get_unmapped_area */
1410         if (flags & MAP_FIXED_NOREPLACE)
1411                 flags |= MAP_FIXED;
1412
1413         if (!(flags & MAP_FIXED))
1414                 addr = round_hint_to_min(addr);
1415
1416         /* Careful about overflows.. */
1417         len = PAGE_ALIGN(len);
1418         if (!len)
1419                 return -ENOMEM;
1420
1421         /* offset overflow? */
1422         if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
1423                 return -EOVERFLOW;
1424
1425         /* Too many mappings? */
1426         if (mm->map_count > sysctl_max_map_count)
1427                 return -ENOMEM;
1428
1429         /* Obtain the address to map to. we verify (or select) it and ensure
1430          * that it represents a valid section of the address space.
1431          */
1432         addr = get_unmapped_area(file, addr, len, pgoff, flags);
1433         if (offset_in_page(addr))
1434                 return addr;
1435
1436         if (flags & MAP_FIXED_NOREPLACE) {
1437                 struct vm_area_struct *vma = find_vma(mm, addr);
1438
1439                 if (vma && vma->vm_start < addr + len)
1440                         return -EEXIST;
1441         }
1442
1443         if (prot == PROT_EXEC) {
1444                 pkey = execute_only_pkey(mm);
1445                 if (pkey < 0)
1446                         pkey = 0;
1447         }
1448
1449         /* Do simple checking here so the lower-level routines won't have
1450          * to. we assume access permissions have been handled by the open
1451          * of the memory object, so we don't do any here.
1452          */
1453         vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
1454                         mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1455
1456         if (flags & MAP_LOCKED)
1457                 if (!can_do_mlock())
1458                         return -EPERM;
1459
1460         if (mlock_future_check(mm, vm_flags, len))
1461                 return -EAGAIN;
1462
1463         if (file) {
1464                 struct inode *inode = file_inode(file);
1465                 unsigned long flags_mask;
1466
1467                 if (!file_mmap_ok(file, inode, pgoff, len))
1468                         return -EOVERFLOW;
1469
1470                 flags_mask = LEGACY_MAP_MASK | file->f_op->mmap_supported_flags;
1471
1472                 switch (flags & MAP_TYPE) {
1473                 case MAP_SHARED:
1474                         /*
1475                          * Force use of MAP_SHARED_VALIDATE with non-legacy
1476                          * flags. E.g. MAP_SYNC is dangerous to use with
1477                          * MAP_SHARED as you don't know which consistency model
1478                          * you will get. We silently ignore unsupported flags
1479                          * with MAP_SHARED to preserve backward compatibility.
1480                          */
1481                         flags &= LEGACY_MAP_MASK;
1482                         /* fall through */
1483                 case MAP_SHARED_VALIDATE:
1484                         if (flags & ~flags_mask)
1485                                 return -EOPNOTSUPP;
1486                         if (prot & PROT_WRITE) {
1487                                 if (!(file->f_mode & FMODE_WRITE))
1488                                         return -EACCES;
1489                                 if (IS_SWAPFILE(file->f_mapping->host))
1490                                         return -ETXTBSY;
1491                         }
1492
1493                         /*
1494                          * Make sure we don't allow writing to an append-only
1495                          * file..
1496                          */
1497                         if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
1498                                 return -EACCES;
1499
1500                         /*
1501                          * Make sure there are no mandatory locks on the file.
1502                          */
1503                         if (locks_verify_locked(file))
1504                                 return -EAGAIN;
1505
1506                         vm_flags |= VM_SHARED | VM_MAYSHARE;
1507                         if (!(file->f_mode & FMODE_WRITE))
1508                                 vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
1509
1510                         /* fall through */
1511                 case MAP_PRIVATE:
1512                         if (!(file->f_mode & FMODE_READ))
1513                                 return -EACCES;
1514                         if (path_noexec(&file->f_path)) {
1515                                 if (vm_flags & VM_EXEC)
1516                                         return -EPERM;
1517                                 vm_flags &= ~VM_MAYEXEC;
1518                         }
1519
1520                         if (!file->f_op->mmap)
1521                                 return -ENODEV;
1522                         if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1523                                 return -EINVAL;
1524                         break;
1525
1526                 default:
1527                         return -EINVAL;
1528                 }
1529         } else {
1530                 switch (flags & MAP_TYPE) {
1531                 case MAP_SHARED:
1532                         if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1533                                 return -EINVAL;
1534                         /*
1535                          * Ignore pgoff.
1536                          */
1537                         pgoff = 0;
1538                         vm_flags |= VM_SHARED | VM_MAYSHARE;
1539                         break;
1540                 case MAP_PRIVATE:
1541                         /*
1542                          * Set pgoff according to addr for anon_vma.
1543                          */
1544                         pgoff = addr >> PAGE_SHIFT;
1545                         break;
1546                 default:
1547                         return -EINVAL;
1548                 }
1549         }
1550
1551         /*
1552          * Set 'VM_NORESERVE' if we should not account for the
1553          * memory use of this mapping.
1554          */
1555         if (flags & MAP_NORESERVE) {
1556                 /* We honor MAP_NORESERVE if allowed to overcommit */
1557                 if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1558                         vm_flags |= VM_NORESERVE;
1559
1560                 /* hugetlb applies strict overcommit unless MAP_NORESERVE */
1561                 if (file && is_file_hugepages(file))
1562                         vm_flags |= VM_NORESERVE;
1563         }
1564
1565         addr = mmap_region(file, addr, len, vm_flags, pgoff, uf);
1566         if (!IS_ERR_VALUE(addr) &&
1567             ((vm_flags & VM_LOCKED) ||
1568              (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
1569                 *populate = len;
1570         return addr;
1571 }
1572
1573 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1574                               unsigned long prot, unsigned long flags,
1575                               unsigned long fd, unsigned long pgoff)
1576 {
1577         struct file *file = NULL;
1578         unsigned long retval;
1579
1580         if (!(flags & MAP_ANONYMOUS)) {
1581                 audit_mmap_fd(fd, flags);
1582                 file = fget(fd);
1583                 if (!file)
1584                         return -EBADF;
1585                 if (is_file_hugepages(file))
1586                         len = ALIGN(len, huge_page_size(hstate_file(file)));
1587                 retval = -EINVAL;
1588                 if (unlikely(flags & MAP_HUGETLB && !is_file_hugepages(file)))
1589                         goto out_fput;
1590         } else if (flags & MAP_HUGETLB) {
1591                 struct user_struct *user = NULL;
1592                 struct hstate *hs;
1593
1594                 hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1595                 if (!hs)
1596                         return -EINVAL;
1597
1598                 len = ALIGN(len, huge_page_size(hs));
1599                 /*
1600                  * VM_NORESERVE is used because the reservations will be
1601                  * taken when vm_ops->mmap() is called
1602                  * A dummy user value is used because we are not locking
1603                  * memory so no accounting is necessary
1604                  */
1605                 file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
1606                                 VM_NORESERVE,
1607                                 &user, HUGETLB_ANONHUGE_INODE,
1608                                 (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1609                 if (IS_ERR(file))
1610                         return PTR_ERR(file);
1611         }
1612
1613         flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1614
1615         retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1616 out_fput:
1617         if (file)
1618                 fput(file);
1619         return retval;
1620 }
1621
1622 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1623                 unsigned long, prot, unsigned long, flags,
1624                 unsigned long, fd, unsigned long, pgoff)
1625 {
1626         return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1627 }
1628
1629 #ifdef __ARCH_WANT_SYS_OLD_MMAP
1630 struct mmap_arg_struct {
1631         unsigned long addr;
1632         unsigned long len;
1633         unsigned long prot;
1634         unsigned long flags;
1635         unsigned long fd;
1636         unsigned long offset;
1637 };
1638
1639 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1640 {
1641         struct mmap_arg_struct a;
1642
1643         if (copy_from_user(&a, arg, sizeof(a)))
1644                 return -EFAULT;
1645         if (offset_in_page(a.offset))
1646                 return -EINVAL;
1647
1648         return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1649                                a.offset >> PAGE_SHIFT);
1650 }
1651 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
1652
1653 /*
1654  * Some shared mappings will want the pages marked read-only
1655  * to track write events. If so, we'll downgrade vm_page_prot
1656  * to the private version (using protection_map[] without the
1657  * VM_SHARED bit).
1658  */
1659 int vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot)
1660 {
1661         vm_flags_t vm_flags = vma->vm_flags;
1662         const struct vm_operations_struct *vm_ops = vma->vm_ops;
1663
1664         /* If it was private or non-writable, the write bit is already clear */
1665         if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
1666                 return 0;
1667
1668         /* The backer wishes to know when pages are first written to? */
1669         if (vm_ops && (vm_ops->page_mkwrite || vm_ops->pfn_mkwrite))
1670                 return 1;
1671
1672         /* The open routine did something to the protections that pgprot_modify
1673          * won't preserve? */
1674         if (pgprot_val(vm_page_prot) !=
1675             pgprot_val(vm_pgprot_modify(vm_page_prot, vm_flags)))
1676                 return 0;
1677
1678         /* Do we need to track softdirty? */
1679         if (IS_ENABLED(CONFIG_MEM_SOFT_DIRTY) && !(vm_flags & VM_SOFTDIRTY))
1680                 return 1;
1681
1682         /* Specialty mapping? */
1683         if (vm_flags & VM_PFNMAP)
1684                 return 0;
1685
1686         /* Can the mapping track the dirty pages? */
1687         return vma->vm_file && vma->vm_file->f_mapping &&
1688                 mapping_cap_account_dirty(vma->vm_file->f_mapping);
1689 }
1690
1691 /*
1692  * We account for memory if it's a private writeable mapping,
1693  * not hugepages and VM_NORESERVE wasn't set.
1694  */
1695 static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
1696 {
1697         /*
1698          * hugetlb has its own accounting separate from the core VM
1699          * VM_HUGETLB may not be set yet so we cannot check for that flag.
1700          */
1701         if (file && is_file_hugepages(file))
1702                 return 0;
1703
1704         return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1705 }
1706
1707 unsigned long mmap_region(struct file *file, unsigned long addr,
1708                 unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
1709                 struct list_head *uf)
1710 {
1711         struct mm_struct *mm = current->mm;
1712         struct vm_area_struct *vma, *prev;
1713         int error;
1714         struct rb_node **rb_link, *rb_parent;
1715         unsigned long charged = 0;
1716
1717         /* Check against address space limit. */
1718         if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
1719                 unsigned long nr_pages;
1720
1721                 /*
1722                  * MAP_FIXED may remove pages of mappings that intersects with
1723                  * requested mapping. Account for the pages it would unmap.
1724                  */
1725                 nr_pages = count_vma_pages_range(mm, addr, addr + len);
1726
1727                 if (!may_expand_vm(mm, vm_flags,
1728                                         (len >> PAGE_SHIFT) - nr_pages))
1729                         return -ENOMEM;
1730         }
1731
1732         /* Clear old maps */
1733         while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
1734                               &rb_parent)) {
1735                 if (do_munmap(mm, addr, len, uf))
1736                         return -ENOMEM;
1737         }
1738
1739         /*
1740          * Private writable mapping: check memory availability
1741          */
1742         if (accountable_mapping(file, vm_flags)) {
1743                 charged = len >> PAGE_SHIFT;
1744                 if (security_vm_enough_memory_mm(mm, charged))
1745                         return -ENOMEM;
1746                 vm_flags |= VM_ACCOUNT;
1747         }
1748
1749         /*
1750          * Can we just expand an old mapping?
1751          */
1752         vma = vma_merge(mm, prev, addr, addr + len, vm_flags,
1753                         NULL, file, pgoff, NULL, NULL_VM_UFFD_CTX);
1754         if (vma)
1755                 goto out;
1756
1757         /*
1758          * Determine the object being mapped and call the appropriate
1759          * specific mapper. the address has already been validated, but
1760          * not unmapped, but the maps are removed from the list.
1761          */
1762         vma = vm_area_alloc(mm);
1763         if (!vma) {
1764                 error = -ENOMEM;
1765                 goto unacct_error;
1766         }
1767
1768         vma->vm_start = addr;
1769         vma->vm_end = addr + len;
1770         vma->vm_flags = vm_flags;
1771         vma->vm_page_prot = vm_get_page_prot(vm_flags);
1772         vma->vm_pgoff = pgoff;
1773
1774         if (file) {
1775                 if (vm_flags & VM_DENYWRITE) {
1776                         error = deny_write_access(file);
1777                         if (error)
1778                                 goto free_vma;
1779                 }
1780                 if (vm_flags & VM_SHARED) {
1781                         error = mapping_map_writable(file->f_mapping);
1782                         if (error)
1783                                 goto allow_write_and_free_vma;
1784                 }
1785
1786                 /* ->mmap() can change vma->vm_file, but must guarantee that
1787                  * vma_link() below can deny write-access if VM_DENYWRITE is set
1788                  * and map writably if VM_SHARED is set. This usually means the
1789                  * new file must not have been exposed to user-space, yet.
1790                  */
1791                 vma->vm_file = get_file(file);
1792                 error = call_mmap(file, vma);
1793                 if (error)
1794                         goto unmap_and_free_vma;
1795
1796                 /* Can addr have changed??
1797                  *
1798                  * Answer: Yes, several device drivers can do it in their
1799                  *         f_op->mmap method. -DaveM
1800                  * Bug: If addr is changed, prev, rb_link, rb_parent should
1801                  *      be updated for vma_link()
1802                  */
1803                 WARN_ON_ONCE(addr != vma->vm_start);
1804
1805                 addr = vma->vm_start;
1806                 vm_flags = vma->vm_flags;
1807         } else if (vm_flags & VM_SHARED) {
1808                 error = shmem_zero_setup(vma);
1809                 if (error)
1810                         goto free_vma;
1811         } else {
1812                 vma_set_anonymous(vma);
1813         }
1814
1815         vma_link(mm, vma, prev, rb_link, rb_parent);
1816         /* Once vma denies write, undo our temporary denial count */
1817         if (file) {
1818                 if (vm_flags & VM_SHARED)
1819                         mapping_unmap_writable(file->f_mapping);
1820                 if (vm_flags & VM_DENYWRITE)
1821                         allow_write_access(file);
1822         }
1823         file = vma->vm_file;
1824 out:
1825         perf_event_mmap(vma);
1826
1827         vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
1828         if (vm_flags & VM_LOCKED) {
1829                 if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
1830                                         is_vm_hugetlb_page(vma) ||
1831                                         vma == get_gate_vma(current->mm))
1832                         vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
1833                 else
1834                         mm->locked_vm += (len >> PAGE_SHIFT);
1835         }
1836
1837         if (file)
1838                 uprobe_mmap(vma);
1839
1840         /*
1841          * New (or expanded) vma always get soft dirty status.
1842          * Otherwise user-space soft-dirty page tracker won't
1843          * be able to distinguish situation when vma area unmapped,
1844          * then new mapped in-place (which must be aimed as
1845          * a completely new data area).
1846          */
1847         vma->vm_flags |= VM_SOFTDIRTY;
1848
1849         vma_set_page_prot(vma);
1850
1851         return addr;
1852
1853 unmap_and_free_vma:
1854         vma->vm_file = NULL;
1855         fput(file);
1856
1857         /* Undo any partial mapping done by a device driver. */
1858         unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
1859         charged = 0;
1860         if (vm_flags & VM_SHARED)
1861                 mapping_unmap_writable(file->f_mapping);
1862 allow_write_and_free_vma:
1863         if (vm_flags & VM_DENYWRITE)
1864                 allow_write_access(file);
1865 free_vma:
1866         vm_area_free(vma);
1867 unacct_error:
1868         if (charged)
1869                 vm_unacct_memory(charged);
1870         return error;
1871 }
1872
1873 unsigned long unmapped_area(struct vm_unmapped_area_info *info)
1874 {
1875         /*
1876          * We implement the search by looking for an rbtree node that
1877          * immediately follows a suitable gap. That is,
1878          * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length;
1879          * - gap_end   = vma->vm_start        >= info->low_limit  + length;
1880          * - gap_end - gap_start >= length
1881          */
1882
1883         struct mm_struct *mm = current->mm;
1884         struct vm_area_struct *vma;
1885         unsigned long length, low_limit, high_limit, gap_start, gap_end;
1886
1887         /* Adjust search length to account for worst case alignment overhead */
1888         length = info->length + info->align_mask;
1889         if (length < info->length)
1890                 return -ENOMEM;
1891
1892         /* Adjust search limits by the desired length */
1893         if (info->high_limit < length)
1894                 return -ENOMEM;
1895         high_limit = info->high_limit - length;
1896
1897         if (info->low_limit > high_limit)
1898                 return -ENOMEM;
1899         low_limit = info->low_limit + length;
1900
1901         /* Check if rbtree root looks promising */
1902         if (RB_EMPTY_ROOT(&mm->mm_rb))
1903                 goto check_highest;
1904         vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1905         if (vma->rb_subtree_gap < length)
1906                 goto check_highest;
1907
1908         while (true) {
1909                 /* Visit left subtree if it looks promising */
1910                 gap_end = vm_start_gap(vma);
1911                 if (gap_end >= low_limit && vma->vm_rb.rb_left) {
1912                         struct vm_area_struct *left =
1913                                 rb_entry(vma->vm_rb.rb_left,
1914                                          struct vm_area_struct, vm_rb);
1915                         if (left->rb_subtree_gap >= length) {
1916                                 vma = left;
1917                                 continue;
1918                         }
1919                 }
1920
1921                 gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
1922 check_current:
1923                 /* Check if current node has a suitable gap */
1924                 if (gap_start > high_limit)
1925                         return -ENOMEM;
1926                 if (gap_end >= low_limit &&
1927                     gap_end > gap_start && gap_end - gap_start >= length)
1928                         goto found;
1929
1930                 /* Visit right subtree if it looks promising */
1931                 if (vma->vm_rb.rb_right) {
1932                         struct vm_area_struct *right =
1933                                 rb_entry(vma->vm_rb.rb_right,
1934                                          struct vm_area_struct, vm_rb);
1935                         if (right->rb_subtree_gap >= length) {
1936                                 vma = right;
1937                                 continue;
1938                         }
1939                 }
1940
1941                 /* Go back up the rbtree to find next candidate node */
1942                 while (true) {
1943                         struct rb_node *prev = &vma->vm_rb;
1944                         if (!rb_parent(prev))
1945                                 goto check_highest;
1946                         vma = rb_entry(rb_parent(prev),
1947                                        struct vm_area_struct, vm_rb);
1948                         if (prev == vma->vm_rb.rb_left) {
1949                                 gap_start = vm_end_gap(vma->vm_prev);
1950                                 gap_end = vm_start_gap(vma);
1951                                 goto check_current;
1952                         }
1953                 }
1954         }
1955
1956 check_highest:
1957         /* Check highest gap, which does not precede any rbtree node */
1958         gap_start = mm->highest_vm_end;
1959         gap_end = ULONG_MAX;  /* Only for VM_BUG_ON below */
1960         if (gap_start > high_limit)
1961                 return -ENOMEM;
1962
1963 found:
1964         /* We found a suitable gap. Clip it with the original low_limit. */
1965         if (gap_start < info->low_limit)
1966                 gap_start = info->low_limit;
1967
1968         /* Adjust gap address to the desired alignment */
1969         gap_start += (info->align_offset - gap_start) & info->align_mask;
1970
1971         VM_BUG_ON(gap_start + info->length > info->high_limit);
1972         VM_BUG_ON(gap_start + info->length > gap_end);
1973         return gap_start;
1974 }
1975
1976 unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
1977 {
1978         struct mm_struct *mm = current->mm;
1979         struct vm_area_struct *vma;
1980         unsigned long length, low_limit, high_limit, gap_start, gap_end;
1981
1982         /* Adjust search length to account for worst case alignment overhead */
1983         length = info->length + info->align_mask;
1984         if (length < info->length)
1985                 return -ENOMEM;
1986
1987         /*
1988          * Adjust search limits by the desired length.
1989          * See implementation comment at top of unmapped_area().
1990          */
1991         gap_end = info->high_limit;
1992         if (gap_end < length)
1993                 return -ENOMEM;
1994         high_limit = gap_end - length;
1995
1996         if (info->low_limit > high_limit)
1997                 return -ENOMEM;
1998         low_limit = info->low_limit + length;
1999
2000         /* Check highest gap, which does not precede any rbtree node */
2001         gap_start = mm->highest_vm_end;
2002         if (gap_start <= high_limit)
2003                 goto found_highest;
2004
2005         /* Check if rbtree root looks promising */
2006         if (RB_EMPTY_ROOT(&mm->mm_rb))
2007                 return -ENOMEM;
2008         vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
2009         if (vma->rb_subtree_gap < length)
2010                 return -ENOMEM;
2011
2012         while (true) {
2013                 /* Visit right subtree if it looks promising */
2014                 gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
2015                 if (gap_start <= high_limit && vma->vm_rb.rb_right) {
2016                         struct vm_area_struct *right =
2017                                 rb_entry(vma->vm_rb.rb_right,
2018                                          struct vm_area_struct, vm_rb);
2019                         if (right->rb_subtree_gap >= length) {
2020                                 vma = right;
2021                                 continue;
2022                         }
2023                 }
2024
2025 check_current:
2026                 /* Check if current node has a suitable gap */
2027                 gap_end = vm_start_gap(vma);
2028                 if (gap_end < low_limit)
2029                         return -ENOMEM;
2030                 if (gap_start <= high_limit &&
2031                     gap_end > gap_start && gap_end - gap_start >= length)
2032                         goto found;
2033
2034                 /* Visit left subtree if it looks promising */
2035                 if (vma->vm_rb.rb_left) {
2036                         struct vm_area_struct *left =
2037                                 rb_entry(vma->vm_rb.rb_left,
2038                                          struct vm_area_struct, vm_rb);
2039                         if (left->rb_subtree_gap >= length) {
2040                                 vma = left;
2041                                 continue;
2042                         }
2043                 }
2044
2045                 /* Go back up the rbtree to find next candidate node */
2046                 while (true) {
2047                         struct rb_node *prev = &vma->vm_rb;
2048                         if (!rb_parent(prev))
2049                                 return -ENOMEM;
2050                         vma = rb_entry(rb_parent(prev),
2051                                        struct vm_area_struct, vm_rb);
2052                         if (prev == vma->vm_rb.rb_right) {
2053                                 gap_start = vma->vm_prev ?
2054                                         vm_end_gap(vma->vm_prev) : 0;
2055                                 goto check_current;
2056                         }
2057                 }
2058         }
2059
2060 found:
2061         /* We found a suitable gap. Clip it with the original high_limit. */
2062         if (gap_end > info->high_limit)
2063                 gap_end = info->high_limit;
2064
2065 found_highest:
2066         /* Compute highest gap address at the desired alignment */
2067         gap_end -= info->length;
2068         gap_end -= (gap_end - info->align_offset) & info->align_mask;
2069
2070         VM_BUG_ON(gap_end < info->low_limit);
2071         VM_BUG_ON(gap_end < gap_start);
2072         return gap_end;
2073 }
2074
2075
2076 #ifndef arch_get_mmap_end
2077 #define arch_get_mmap_end(addr) (TASK_SIZE)
2078 #endif
2079
2080 #ifndef arch_get_mmap_base
2081 #define arch_get_mmap_base(addr, base) (base)
2082 #endif
2083
2084 /* Get an address range which is currently unmapped.
2085  * For shmat() with addr=0.
2086  *
2087  * Ugly calling convention alert:
2088  * Return value with the low bits set means error value,
2089  * ie
2090  *      if (ret & ~PAGE_MASK)
2091  *              error = ret;
2092  *
2093  * This function "knows" that -ENOMEM has the bits set.
2094  */
2095 #ifndef HAVE_ARCH_UNMAPPED_AREA
2096 unsigned long
2097 arch_get_unmapped_area(struct file *filp, unsigned long addr,
2098                 unsigned long len, unsigned long pgoff, unsigned long flags)
2099 {
2100         struct mm_struct *mm = current->mm;
2101         struct vm_area_struct *vma, *prev;
2102         struct vm_unmapped_area_info info;
2103         const unsigned long mmap_end = arch_get_mmap_end(addr);
2104
2105         if (len > mmap_end - mmap_min_addr)
2106                 return -ENOMEM;
2107
2108         if (flags & MAP_FIXED)
2109                 return addr;
2110
2111         if (addr) {
2112                 addr = PAGE_ALIGN(addr);
2113                 vma = find_vma_prev(mm, addr, &prev);
2114                 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
2115                     (!vma || addr + len <= vm_start_gap(vma)) &&
2116                     (!prev || addr >= vm_end_gap(prev)))
2117                         return addr;
2118         }
2119
2120         info.flags = 0;
2121         info.length = len;
2122         info.low_limit = mm->mmap_base;
2123         info.high_limit = mmap_end;
2124         info.align_mask = 0;
2125         return vm_unmapped_area(&info);
2126 }
2127 #endif
2128
2129 /*
2130  * This mmap-allocator allocates new areas top-down from below the
2131  * stack's low limit (the base):
2132  */
2133 #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
2134 unsigned long
2135 arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
2136                           unsigned long len, unsigned long pgoff,
2137                           unsigned long flags)
2138 {
2139         struct vm_area_struct *vma, *prev;
2140         struct mm_struct *mm = current->mm;
2141         struct vm_unmapped_area_info info;
2142         const unsigned long mmap_end = arch_get_mmap_end(addr);
2143
2144         /* requested length too big for entire address space */
2145         if (len > mmap_end - mmap_min_addr)
2146                 return -ENOMEM;
2147
2148         if (flags & MAP_FIXED)
2149                 return addr;
2150
2151         /* requesting a specific address */
2152         if (addr) {
2153                 addr = PAGE_ALIGN(addr);
2154                 vma = find_vma_prev(mm, addr, &prev);
2155                 if (mmap_end - len >= addr && addr >= mmap_min_addr &&
2156                                 (!vma || addr + len <= vm_start_gap(vma)) &&
2157                                 (!prev || addr >= vm_end_gap(prev)))
2158                         return addr;
2159         }
2160
2161         info.flags = VM_UNMAPPED_AREA_TOPDOWN;
2162         info.length = len;
2163         info.low_limit = max(PAGE_SIZE, mmap_min_addr);
2164         info.high_limit = arch_get_mmap_base(addr, mm->mmap_base);
2165         info.align_mask = 0;
2166         addr = vm_unmapped_area(&info);
2167
2168         /*
2169          * A failed mmap() very likely causes application failure,
2170          * so fall back to the bottom-up function here. This scenario
2171          * can happen with large stack limits and large mmap()
2172          * allocations.
2173          */
2174         if (offset_in_page(addr)) {
2175                 VM_BUG_ON(addr != -ENOMEM);
2176                 info.flags = 0;
2177                 info.low_limit = TASK_UNMAPPED_BASE;
2178                 info.high_limit = mmap_end;
2179                 addr = vm_unmapped_area(&info);
2180         }
2181
2182         return addr;
2183 }
2184 #endif
2185
2186 unsigned long
2187 get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
2188                 unsigned long pgoff, unsigned long flags)
2189 {
2190         unsigned long (*get_area)(struct file *, unsigned long,
2191                                   unsigned long, unsigned long, unsigned long);
2192
2193         unsigned long error = arch_mmap_check(addr, len, flags);
2194         if (error)
2195                 return error;
2196
2197         /* Careful about overflows.. */
2198         if (len > TASK_SIZE)
2199                 return -ENOMEM;
2200
2201         get_area = current->mm->get_unmapped_area;
2202         if (file) {
2203                 if (file->f_op->get_unmapped_area)
2204                         get_area = file->f_op->get_unmapped_area;
2205         } else if (flags & MAP_SHARED) {
2206                 /*
2207                  * mmap_region() will call shmem_zero_setup() to create a file,
2208                  * so use shmem's get_unmapped_area in case it can be huge.
2209                  * do_mmap_pgoff() will clear pgoff, so match alignment.
2210                  */
2211                 pgoff = 0;
2212                 get_area = shmem_get_unmapped_area;
2213         }
2214
2215         addr = get_area(file, addr, len, pgoff, flags);
2216         if (IS_ERR_VALUE(addr))
2217                 return addr;
2218
2219         if (addr > TASK_SIZE - len)
2220                 return -ENOMEM;
2221         if (offset_in_page(addr))
2222                 return -EINVAL;
2223
2224         error = security_mmap_addr(addr);
2225         return error ? error : addr;
2226 }
2227
2228 EXPORT_SYMBOL(get_unmapped_area);
2229
2230 /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
2231 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
2232 {
2233         struct rb_node *rb_node;
2234         struct vm_area_struct *vma;
2235
2236         /* Check the cache first. */
2237         vma = vmacache_find(mm, addr);
2238         if (likely(vma))
2239                 return vma;
2240
2241         rb_node = mm->mm_rb.rb_node;
2242
2243         while (rb_node) {
2244                 struct vm_area_struct *tmp;
2245
2246                 tmp = rb_entry(rb_node, struct vm_area_struct, vm_rb);
2247
2248                 if (tmp->vm_end > addr) {
2249                         vma = tmp;
2250                         if (tmp->vm_start <= addr)
2251                                 break;
2252                         rb_node = rb_node->rb_left;
2253                 } else
2254                         rb_node = rb_node->rb_right;
2255         }
2256
2257         if (vma)
2258                 vmacache_update(addr, vma);
2259         return vma;
2260 }
2261
2262 EXPORT_SYMBOL(find_vma);
2263
2264 /*
2265  * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
2266  */
2267 struct vm_area_struct *
2268 find_vma_prev(struct mm_struct *mm, unsigned long addr,
2269                         struct vm_area_struct **pprev)
2270 {
2271         struct vm_area_struct *vma;
2272
2273         vma = find_vma(mm, addr);
2274         if (vma) {
2275                 *pprev = vma->vm_prev;
2276         } else {
2277                 struct rb_node *rb_node = mm->mm_rb.rb_node;
2278                 *pprev = NULL;
2279                 while (rb_node) {
2280                         *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
2281                         rb_node = rb_node->rb_right;
2282                 }
2283         }
2284         return vma;
2285 }
2286
2287 /*
2288  * Verify that the stack growth is acceptable and
2289  * update accounting. This is shared with both the
2290  * grow-up and grow-down cases.
2291  */
2292 static int acct_stack_growth(struct vm_area_struct *vma,
2293                              unsigned long size, unsigned long grow)
2294 {
2295         struct mm_struct *mm = vma->vm_mm;
2296         unsigned long new_start;
2297
2298         /* address space limit tests */
2299         if (!may_expand_vm(mm, vma->vm_flags, grow))
2300                 return -ENOMEM;
2301
2302         /* Stack limit test */
2303         if (size > rlimit(RLIMIT_STACK))
2304                 return -ENOMEM;
2305
2306         /* mlock limit tests */
2307         if (vma->vm_flags & VM_LOCKED) {
2308                 unsigned long locked;
2309                 unsigned long limit;
2310                 locked = mm->locked_vm + grow;
2311                 limit = rlimit(RLIMIT_MEMLOCK);
2312                 limit >>= PAGE_SHIFT;
2313                 if (locked > limit && !capable(CAP_IPC_LOCK))
2314                         return -ENOMEM;
2315         }
2316
2317         /* Check to ensure the stack will not grow into a hugetlb-only region */
2318         new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
2319                         vma->vm_end - size;
2320         if (is_hugepage_only_range(vma->vm_mm, new_start, size))
2321                 return -EFAULT;
2322
2323         /*
2324          * Overcommit..  This must be the final test, as it will
2325          * update security statistics.
2326          */
2327         if (security_vm_enough_memory_mm(mm, grow))
2328                 return -ENOMEM;
2329
2330         return 0;
2331 }
2332
2333 #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
2334 /*
2335  * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
2336  * vma is the last one with address > vma->vm_end.  Have to extend vma.
2337  */
2338 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2339 {
2340         struct mm_struct *mm = vma->vm_mm;
2341         struct vm_area_struct *next;
2342         unsigned long gap_addr;
2343         int error = 0;
2344
2345         if (!(vma->vm_flags & VM_GROWSUP))
2346                 return -EFAULT;
2347
2348         /* Guard against exceeding limits of the address space. */
2349         address &= PAGE_MASK;
2350         if (address >= (TASK_SIZE & PAGE_MASK))
2351                 return -ENOMEM;
2352         address += PAGE_SIZE;
2353
2354         /* Enforce stack_guard_gap */
2355         gap_addr = address + stack_guard_gap;
2356
2357         /* Guard against overflow */
2358         if (gap_addr < address || gap_addr > TASK_SIZE)
2359                 gap_addr = TASK_SIZE;
2360
2361         next = vma->vm_next;
2362         if (next && next->vm_start < gap_addr &&
2363                         (next->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
2364                 if (!(next->vm_flags & VM_GROWSUP))
2365                         return -ENOMEM;
2366                 /* Check that both stack segments have the same anon_vma? */
2367         }
2368
2369         /* We must make sure the anon_vma is allocated. */
2370         if (unlikely(anon_vma_prepare(vma)))
2371                 return -ENOMEM;
2372
2373         /*
2374          * vma->vm_start/vm_end cannot change under us because the caller
2375          * is required to hold the mmap_sem in read mode.  We need the
2376          * anon_vma lock to serialize against concurrent expand_stacks.
2377          */
2378         anon_vma_lock_write(vma->anon_vma);
2379
2380         /* Somebody else might have raced and expanded it already */
2381         if (address > vma->vm_end) {
2382                 unsigned long size, grow;
2383
2384                 size = address - vma->vm_start;
2385                 grow = (address - vma->vm_end) >> PAGE_SHIFT;
2386
2387                 error = -ENOMEM;
2388                 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
2389                         error = acct_stack_growth(vma, size, grow);
2390                         if (!error) {
2391                                 /*
2392                                  * vma_gap_update() doesn't support concurrent
2393                                  * updates, but we only hold a shared mmap_sem
2394                                  * lock here, so we need to protect against
2395                                  * concurrent vma expansions.
2396                                  * anon_vma_lock_write() doesn't help here, as
2397                                  * we don't guarantee that all growable vmas
2398                                  * in a mm share the same root anon vma.
2399                                  * So, we reuse mm->page_table_lock to guard
2400                                  * against concurrent vma expansions.
2401                                  */
2402                                 spin_lock(&mm->page_table_lock);
2403                                 if (vma->vm_flags & VM_LOCKED)
2404                                         mm->locked_vm += grow;
2405                                 vm_stat_account(mm, vma->vm_flags, grow);
2406                                 anon_vma_interval_tree_pre_update_vma(vma);
2407                                 vma->vm_end = address;
2408                                 anon_vma_interval_tree_post_update_vma(vma);
2409                                 if (vma->vm_next)
2410                                         vma_gap_update(vma->vm_next);
2411                                 else
2412                                         mm->highest_vm_end = vm_end_gap(vma);
2413                                 spin_unlock(&mm->page_table_lock);
2414
2415                                 perf_event_mmap(vma);
2416                         }
2417                 }
2418         }
2419         anon_vma_unlock_write(vma->anon_vma);
2420         khugepaged_enter_vma_merge(vma, vma->vm_flags);
2421         validate_mm(mm);
2422         return error;
2423 }
2424 #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
2425
2426 /*
2427  * vma is the first one with address < vma->vm_start.  Have to extend vma.
2428  */
2429 int expand_downwards(struct vm_area_struct *vma,
2430                                    unsigned long address)
2431 {
2432         struct mm_struct *mm = vma->vm_mm;
2433         struct vm_area_struct *prev;
2434         int error = 0;
2435
2436         address &= PAGE_MASK;
2437         if (address < mmap_min_addr)
2438                 return -EPERM;
2439
2440         /* Enforce stack_guard_gap */
2441         prev = vma->vm_prev;
2442         /* Check that both stack segments have the same anon_vma? */
2443         if (prev && !(prev->vm_flags & VM_GROWSDOWN) &&
2444                         (prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
2445                 if (address - prev->vm_end < stack_guard_gap)
2446                         return -ENOMEM;
2447         }
2448
2449         /* We must make sure the anon_vma is allocated. */
2450         if (unlikely(anon_vma_prepare(vma)))
2451                 return -ENOMEM;
2452
2453         /*
2454          * vma->vm_start/vm_end cannot change under us because the caller
2455          * is required to hold the mmap_sem in read mode.  We need the
2456          * anon_vma lock to serialize against concurrent expand_stacks.
2457          */
2458         anon_vma_lock_write(vma->anon_vma);
2459
2460         /* Somebody else might have raced and expanded it already */
2461         if (address < vma->vm_start) {
2462                 unsigned long size, grow;
2463
2464                 size = vma->vm_end - address;
2465                 grow = (vma->vm_start - address) >> PAGE_SHIFT;
2466
2467                 error = -ENOMEM;
2468                 if (grow <= vma->vm_pgoff) {
2469                         error = acct_stack_growth(vma, size, grow);
2470                         if (!error) {
2471                                 /*
2472                                  * vma_gap_update() doesn't support concurrent
2473                                  * updates, but we only hold a shared mmap_sem
2474                                  * lock here, so we need to protect against
2475                                  * concurrent vma expansions.
2476                                  * anon_vma_lock_write() doesn't help here, as
2477                                  * we don't guarantee that all growable vmas
2478                                  * in a mm share the same root anon vma.
2479                                  * So, we reuse mm->page_table_lock to guard
2480                                  * against concurrent vma expansions.
2481                                  */
2482                                 spin_lock(&mm->page_table_lock);
2483                                 if (vma->vm_flags & VM_LOCKED)
2484                                         mm->locked_vm += grow;
2485                                 vm_stat_account(mm, vma->vm_flags, grow);
2486                                 anon_vma_interval_tree_pre_update_vma(vma);
2487                                 vma->vm_start = address;
2488                                 vma->vm_pgoff -= grow;
2489                                 anon_vma_interval_tree_post_update_vma(vma);
2490                                 vma_gap_update(vma);
2491                                 spin_unlock(&mm->page_table_lock);
2492
2493                                 perf_event_mmap(vma);
2494                         }
2495                 }
2496         }
2497         anon_vma_unlock_write(vma->anon_vma);
2498         khugepaged_enter_vma_merge(vma, vma->vm_flags);
2499         validate_mm(mm);
2500         return error;
2501 }
2502
2503 /* enforced gap between the expanding stack and other mappings. */
2504 unsigned long stack_guard_gap = 256UL<<PAGE_SHIFT;
2505
2506 static int __init cmdline_parse_stack_guard_gap(char *p)
2507 {
2508         unsigned long val;
2509         char *endptr;
2510
2511         val = simple_strtoul(p, &endptr, 10);
2512         if (!*endptr)
2513                 stack_guard_gap = val << PAGE_SHIFT;
2514
2515         return 0;
2516 }
2517 __setup("stack_guard_gap=", cmdline_parse_stack_guard_gap);
2518
2519 #ifdef CONFIG_STACK_GROWSUP
2520 int expand_stack(struct vm_area_struct *vma, unsigned long address)
2521 {
2522         return expand_upwards(vma, address);
2523 }
2524
2525 struct vm_area_struct *
2526 find_extend_vma(struct mm_struct *mm, unsigned long addr)
2527 {
2528         struct vm_area_struct *vma, *prev;
2529
2530         addr &= PAGE_MASK;
2531         vma = find_vma_prev(mm, addr, &prev);
2532         if (vma && (vma->vm_start <= addr))
2533                 return vma;
2534         /* don't alter vm_end if the coredump is running */
2535         if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr))
2536                 return NULL;
2537         if (prev->vm_flags & VM_LOCKED)
2538                 populate_vma_page_range(prev, addr, prev->vm_end, NULL);
2539         return prev;
2540 }
2541 #else
2542 int expand_stack(struct vm_area_struct *vma, unsigned long address)
2543 {
2544         return expand_downwards(vma, address);
2545 }
2546
2547 struct vm_area_struct *
2548 find_extend_vma(struct mm_struct *mm, unsigned long addr)
2549 {
2550         struct vm_area_struct *vma;
2551         unsigned long start;
2552
2553         addr &= PAGE_MASK;
2554         vma = find_vma(mm, addr);
2555         if (!vma)
2556                 return NULL;
2557         if (vma->vm_start <= addr)
2558                 return vma;
2559         if (!(vma->vm_flags & VM_GROWSDOWN))
2560                 return NULL;
2561         /* don't alter vm_start if the coredump is running */
2562         if (!mmget_still_valid(mm))
2563                 return NULL;
2564         start = vma->vm_start;
2565         if (expand_stack(vma, addr))
2566                 return NULL;
2567         if (vma->vm_flags & VM_LOCKED)
2568                 populate_vma_page_range(vma, addr, start, NULL);
2569         return vma;
2570 }
2571 #endif
2572
2573 EXPORT_SYMBOL_GPL(find_extend_vma);
2574
2575 /*
2576  * Ok - we have the memory areas we should free on the vma list,
2577  * so release them, and do the vma updates.
2578  *
2579  * Called with the mm semaphore held.
2580  */
2581 static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
2582 {
2583         unsigned long nr_accounted = 0;
2584
2585         /* Update high watermark before we lower total_vm */
2586         update_hiwater_vm(mm);
2587         do {
2588                 long nrpages = vma_pages(vma);
2589
2590                 if (vma->vm_flags & VM_ACCOUNT)
2591                         nr_accounted += nrpages;
2592                 vm_stat_account(mm, vma->vm_flags, -nrpages);
2593                 vma = remove_vma(vma);
2594         } while (vma);
2595         vm_unacct_memory(nr_accounted);
2596         validate_mm(mm);
2597 }
2598
2599 /*
2600  * Get rid of page table information in the indicated region.
2601  *
2602  * Called with the mm semaphore held.
2603  */
2604 static void unmap_region(struct mm_struct *mm,
2605                 struct vm_area_struct *vma, struct vm_area_struct *prev,
2606                 unsigned long start, unsigned long end)
2607 {
2608         struct vm_area_struct *next = prev ? prev->vm_next : mm->mmap;
2609         struct mmu_gather tlb;
2610
2611         lru_add_drain();
2612         tlb_gather_mmu(&tlb, mm, start, end);
2613         update_hiwater_rss(mm);
2614         unmap_vmas(&tlb, vma, start, end);
2615         free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
2616                                  next ? next->vm_start : USER_PGTABLES_CEILING);
2617         tlb_finish_mmu(&tlb, start, end);
2618 }
2619
2620 /*
2621  * Create a list of vma's touched by the unmap, removing them from the mm's
2622  * vma list as we go..
2623  */
2624 static void
2625 detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
2626         struct vm_area_struct *prev, unsigned long end)
2627 {
2628         struct vm_area_struct **insertion_point;
2629         struct vm_area_struct *tail_vma = NULL;
2630
2631         insertion_point = (prev ? &prev->vm_next : &mm->mmap);
2632         vma->vm_prev = NULL;
2633         do {
2634                 vma_rb_erase(vma, &mm->mm_rb);
2635                 mm->map_count--;
2636                 tail_vma = vma;
2637                 vma = vma->vm_next;
2638         } while (vma && vma->vm_start < end);
2639         *insertion_point = vma;
2640         if (vma) {
2641                 vma->vm_prev = prev;
2642                 vma_gap_update(vma);
2643         } else
2644                 mm->highest_vm_end = prev ? vm_end_gap(prev) : 0;
2645         tail_vma->vm_next = NULL;
2646
2647         /* Kill the cache */
2648         vmacache_invalidate(mm);
2649 }
2650
2651 /*
2652  * __split_vma() bypasses sysctl_max_map_count checking.  We use this where it
2653  * has already been checked or doesn't make sense to fail.
2654  */
2655 int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2656                 unsigned long addr, int new_below)
2657 {
2658         struct vm_area_struct *new;
2659         int err;
2660
2661         if (vma->vm_ops && vma->vm_ops->split) {
2662                 err = vma->vm_ops->split(vma, addr);
2663                 if (err)
2664                         return err;
2665         }
2666
2667         new = vm_area_dup(vma);
2668         if (!new)
2669                 return -ENOMEM;
2670
2671         if (new_below)
2672                 new->vm_end = addr;
2673         else {
2674                 new->vm_start = addr;
2675                 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
2676         }
2677
2678         err = vma_dup_policy(vma, new);
2679         if (err)
2680                 goto out_free_vma;
2681
2682         err = anon_vma_clone(new, vma);
2683         if (err)
2684                 goto out_free_mpol;
2685
2686         if (new->vm_file)
2687                 get_file(new->vm_file);
2688
2689         if (new->vm_ops && new->vm_ops->open)
2690                 new->vm_ops->open(new);
2691
2692         if (new_below)
2693                 err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
2694                         ((addr - new->vm_start) >> PAGE_SHIFT), new);
2695         else
2696                 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
2697
2698         /* Success. */
2699         if (!err)
2700                 return 0;
2701
2702         /* Clean everything up if vma_adjust failed. */
2703         if (new->vm_ops && new->vm_ops->close)
2704                 new->vm_ops->close(new);
2705         if (new->vm_file)
2706                 fput(new->vm_file);
2707         unlink_anon_vmas(new);
2708  out_free_mpol:
2709         mpol_put(vma_policy(new));
2710  out_free_vma:
2711         vm_area_free(new);
2712         return err;
2713 }
2714
2715 /*
2716  * Split a vma into two pieces at address 'addr', a new vma is allocated
2717  * either for the first part or the tail.
2718  */
2719 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2720               unsigned long addr, int new_below)
2721 {
2722         if (mm->map_count >= sysctl_max_map_count)
2723                 return -ENOMEM;
2724
2725         return __split_vma(mm, vma, addr, new_below);
2726 }
2727
2728 /* Munmap is split into 2 main parts -- this part which finds
2729  * what needs doing, and the areas themselves, which do the
2730  * work.  This now handles partial unmappings.
2731  * Jeremy Fitzhardinge <jeremy@goop.org>
2732  */
2733 int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
2734                 struct list_head *uf, bool downgrade)
2735 {
2736         unsigned long end;
2737         struct vm_area_struct *vma, *prev, *last;
2738
2739         if ((offset_in_page(start)) || start > TASK_SIZE || len > TASK_SIZE-start)
2740                 return -EINVAL;
2741
2742         len = PAGE_ALIGN(len);
2743         end = start + len;
2744         if (len == 0)
2745                 return -EINVAL;
2746
2747         /*
2748          * arch_unmap() might do unmaps itself.  It must be called
2749          * and finish any rbtree manipulation before this code
2750          * runs and also starts to manipulate the rbtree.
2751          */
2752         arch_unmap(mm, start, end);
2753
2754         /* Find the first overlapping VMA */
2755         vma = find_vma(mm, start);
2756         if (!vma)
2757                 return 0;
2758         prev = vma->vm_prev;
2759         /* we have  start < vma->vm_end  */
2760
2761         /* if it doesn't overlap, we have nothing.. */
2762         if (vma->vm_start >= end)
2763                 return 0;
2764
2765         /*
2766          * If we need to split any vma, do it now to save pain later.
2767          *
2768          * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
2769          * unmapped vm_area_struct will remain in use: so lower split_vma
2770          * places tmp vma above, and higher split_vma places tmp vma below.
2771          */
2772         if (start > vma->vm_start) {
2773                 int error;
2774
2775                 /*
2776                  * Make sure that map_count on return from munmap() will
2777                  * not exceed its limit; but let map_count go just above
2778                  * its limit temporarily, to help free resources as expected.
2779                  */
2780                 if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2781                         return -ENOMEM;
2782
2783                 error = __split_vma(mm, vma, start, 0);
2784                 if (error)
2785                         return error;
2786                 prev = vma;
2787         }
2788
2789         /* Does it split the last one? */
2790         last = find_vma(mm, end);
2791         if (last && end > last->vm_start) {
2792                 int error = __split_vma(mm, last, end, 1);
2793                 if (error)
2794                         return error;
2795         }
2796         vma = prev ? prev->vm_next : mm->mmap;
2797
2798         if (unlikely(uf)) {
2799                 /*
2800                  * If userfaultfd_unmap_prep returns an error the vmas
2801                  * will remain splitted, but userland will get a
2802                  * highly unexpected error anyway. This is no
2803                  * different than the case where the first of the two
2804                  * __split_vma fails, but we don't undo the first
2805                  * split, despite we could. This is unlikely enough
2806                  * failure that it's not worth optimizing it for.
2807                  */
2808                 int error = userfaultfd_unmap_prep(vma, start, end, uf);
2809                 if (error)
2810                         return error;
2811         }
2812
2813         /*
2814          * unlock any mlock()ed ranges before detaching vmas
2815          */
2816         if (mm->locked_vm) {
2817                 struct vm_area_struct *tmp = vma;
2818                 while (tmp && tmp->vm_start < end) {
2819                         if (tmp->vm_flags & VM_LOCKED) {
2820                                 mm->locked_vm -= vma_pages(tmp);
2821                                 munlock_vma_pages_all(tmp);
2822                         }
2823
2824                         tmp = tmp->vm_next;
2825                 }
2826         }
2827
2828         /* Detach vmas from rbtree */
2829         detach_vmas_to_be_unmapped(mm, vma, prev, end);
2830
2831         if (downgrade)
2832                 downgrade_write(&mm->mmap_sem);
2833
2834         unmap_region(mm, vma, prev, start, end);
2835
2836         /* Fix up all other VM information */
2837         remove_vma_list(mm, vma);
2838
2839         return downgrade ? 1 : 0;
2840 }
2841
2842 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
2843               struct list_head *uf)
2844 {
2845         return __do_munmap(mm, start, len, uf, false);
2846 }
2847
2848 static int __vm_munmap(unsigned long start, size_t len, bool downgrade)
2849 {
2850         int ret;
2851         struct mm_struct *mm = current->mm;
2852         LIST_HEAD(uf);
2853
2854         if (down_write_killable(&mm->mmap_sem))
2855                 return -EINTR;
2856
2857         ret = __do_munmap(mm, start, len, &uf, downgrade);
2858         /*
2859          * Returning 1 indicates mmap_sem is downgraded.
2860          * But 1 is not legal return value of vm_munmap() and munmap(), reset
2861          * it to 0 before return.
2862          */
2863         if (ret == 1) {
2864                 up_read(&mm->mmap_sem);
2865                 ret = 0;
2866         } else
2867                 up_write(&mm->mmap_sem);
2868
2869         userfaultfd_unmap_complete(mm, &uf);
2870         return ret;
2871 }
2872
2873 int vm_munmap(unsigned long start, size_t len)
2874 {
2875         return __vm_munmap(start, len, false);
2876 }
2877 EXPORT_SYMBOL(vm_munmap);
2878
2879 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
2880 {
2881         profile_munmap(addr);
2882         return __vm_munmap(addr, len, true);
2883 }
2884
2885
2886 /*
2887  * Emulation of deprecated remap_file_pages() syscall.
2888  */
2889 SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
2890                 unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
2891 {
2892
2893         struct mm_struct *mm = current->mm;
2894         struct vm_area_struct *vma;
2895         unsigned long populate = 0;
2896         unsigned long ret = -EINVAL;
2897         struct file *file;
2898
2899         pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.rst.\n",
2900                      current->comm, current->pid);
2901
2902         if (prot)
2903                 return ret;
2904         start = start & PAGE_MASK;
2905         size = size & PAGE_MASK;
2906
2907         if (start + size <= start)
2908                 return ret;
2909
2910         /* Does pgoff wrap? */
2911         if (pgoff + (size >> PAGE_SHIFT) < pgoff)
2912                 return ret;
2913
2914         if (down_write_killable(&mm->mmap_sem))
2915                 return -EINTR;
2916
2917         vma = find_vma(mm, start);
2918
2919         if (!vma || !(vma->vm_flags & VM_SHARED))
2920                 goto out;
2921
2922         if (start < vma->vm_start)
2923                 goto out;
2924
2925         if (start + size > vma->vm_end) {
2926                 struct vm_area_struct *next;
2927
2928                 for (next = vma->vm_next; next; next = next->vm_next) {
2929                         /* hole between vmas ? */
2930                         if (next->vm_start != next->vm_prev->vm_end)
2931                                 goto out;
2932
2933                         if (next->vm_file != vma->vm_file)
2934                                 goto out;
2935
2936                         if (next->vm_flags != vma->vm_flags)
2937                                 goto out;
2938
2939                         if (start + size <= next->vm_end)
2940                                 break;
2941                 }
2942
2943                 if (!next)
2944                         goto out;
2945         }
2946
2947         prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
2948         prot |= vma->vm_flags & VM_WRITE ? PROT_WRITE : 0;
2949         prot |= vma->vm_flags & VM_EXEC ? PROT_EXEC : 0;
2950
2951         flags &= MAP_NONBLOCK;
2952         flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
2953         if (vma->vm_flags & VM_LOCKED) {
2954                 struct vm_area_struct *tmp;
2955                 flags |= MAP_LOCKED;
2956
2957                 /* drop PG_Mlocked flag for over-mapped range */
2958                 for (tmp = vma; tmp->vm_start >= start + size;
2959                                 tmp = tmp->vm_next) {
2960                         /*
2961                          * Split pmd and munlock page on the border
2962                          * of the range.
2963                          */
2964                         vma_adjust_trans_huge(tmp, start, start + size, 0);
2965
2966                         munlock_vma_pages_range(tmp,
2967                                         max(tmp->vm_start, start),
2968                                         min(tmp->vm_end, start + size));
2969                 }
2970         }
2971
2972         file = get_file(vma->vm_file);
2973         ret = do_mmap_pgoff(vma->vm_file, start, size,
2974                         prot, flags, pgoff, &populate, NULL);
2975         fput(file);
2976 out:
2977         up_write(&mm->mmap_sem);
2978         if (populate)
2979                 mm_populate(ret, populate);
2980         if (!IS_ERR_VALUE(ret))
2981                 ret = 0;
2982         return ret;
2983 }
2984
2985 /*
2986  *  this is really a simplified "do_mmap".  it only handles
2987  *  anonymous maps.  eventually we may be able to do some
2988  *  brk-specific accounting here.
2989  */
2990 static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf)
2991 {
2992         struct mm_struct *mm = current->mm;
2993         struct vm_area_struct *vma, *prev;
2994         struct rb_node **rb_link, *rb_parent;
2995         pgoff_t pgoff = addr >> PAGE_SHIFT;
2996         int error;
2997
2998         /* Until we need other flags, refuse anything except VM_EXEC. */
2999         if ((flags & (~VM_EXEC)) != 0)
3000                 return -EINVAL;
3001         flags |= VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
3002
3003         error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
3004         if (offset_in_page(error))
3005                 return error;
3006
3007         error = mlock_future_check(mm, mm->def_flags, len);
3008         if (error)
3009                 return error;
3010
3011         /*
3012          * Clear old maps.  this also does some error checking for us
3013          */
3014         while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
3015                               &rb_parent)) {
3016                 if (do_munmap(mm, addr, len, uf))
3017                         return -ENOMEM;
3018         }
3019
3020         /* Check against address space limits *after* clearing old maps... */
3021         if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
3022                 return -ENOMEM;
3023
3024         if (mm->map_count > sysctl_max_map_count)
3025                 return -ENOMEM;
3026
3027         if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
3028                 return -ENOMEM;
3029
3030         /* Can we just expand an old private anonymous mapping? */
3031         vma = vma_merge(mm, prev, addr, addr + len, flags,
3032                         NULL, NULL, pgoff, NULL, NULL_VM_UFFD_CTX);
3033         if (vma)
3034                 goto out;
3035
3036         /*
3037          * create a vma struct for an anonymous mapping
3038          */
3039         vma = vm_area_alloc(mm);
3040         if (!vma) {
3041                 vm_unacct_memory(len >> PAGE_SHIFT);
3042                 return -ENOMEM;
3043         }
3044
3045         vma_set_anonymous(vma);
3046         vma->vm_start = addr;
3047         vma->vm_end = addr + len;
3048         vma->vm_pgoff = pgoff;
3049         vma->vm_flags = flags;
3050         vma->vm_page_prot = vm_get_page_prot(flags);
3051         vma_link(mm, vma, prev, rb_link, rb_parent);
3052 out:
3053         perf_event_mmap(vma);
3054         mm->total_vm += len >> PAGE_SHIFT;
3055         mm->data_vm += len >> PAGE_SHIFT;
3056         if (flags & VM_LOCKED)
3057                 mm->locked_vm += (len >> PAGE_SHIFT);
3058         vma->vm_flags |= VM_SOFTDIRTY;
3059         return 0;
3060 }
3061
3062 int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
3063 {
3064         struct mm_struct *mm = current->mm;
3065         unsigned long len;
3066         int ret;
3067         bool populate;
3068         LIST_HEAD(uf);
3069
3070         len = PAGE_ALIGN(request);
3071         if (len < request)
3072                 return -ENOMEM;
3073         if (!len)
3074                 return 0;
3075
3076         if (down_write_killable(&mm->mmap_sem))
3077                 return -EINTR;
3078
3079         ret = do_brk_flags(addr, len, flags, &uf);
3080         populate = ((mm->def_flags & VM_LOCKED) != 0);
3081         up_write(&mm->mmap_sem);
3082         userfaultfd_unmap_complete(mm, &uf);
3083         if (populate && !ret)
3084                 mm_populate(addr, len);
3085         return ret;
3086 }
3087 EXPORT_SYMBOL(vm_brk_flags);
3088
3089 int vm_brk(unsigned long addr, unsigned long len)
3090 {
3091         return vm_brk_flags(addr, len, 0);
3092 }
3093 EXPORT_SYMBOL(vm_brk);
3094
3095 /* Release all mmaps. */
3096 void exit_mmap(struct mm_struct *mm)
3097 {
3098         struct mmu_gather tlb;
3099         struct vm_area_struct *vma;
3100         unsigned long nr_accounted = 0;
3101
3102         /* mm's last user has gone, and its about to be pulled down */
3103         mmu_notifier_release(mm);
3104
3105         if (unlikely(mm_is_oom_victim(mm))) {
3106                 /*
3107                  * Manually reap the mm to free as much memory as possible.
3108                  * Then, as the oom reaper does, set MMF_OOM_SKIP to disregard
3109                  * this mm from further consideration.  Taking mm->mmap_sem for
3110                  * write after setting MMF_OOM_SKIP will guarantee that the oom
3111                  * reaper will not run on this mm again after mmap_sem is
3112                  * dropped.
3113                  *
3114                  * Nothing can be holding mm->mmap_sem here and the above call
3115                  * to mmu_notifier_release(mm) ensures mmu notifier callbacks in
3116                  * __oom_reap_task_mm() will not block.
3117                  *
3118                  * This needs to be done before calling munlock_vma_pages_all(),
3119                  * which clears VM_LOCKED, otherwise the oom reaper cannot
3120                  * reliably test it.
3121                  */
3122                 (void)__oom_reap_task_mm(mm);
3123
3124                 set_bit(MMF_OOM_SKIP, &mm->flags);
3125                 down_write(&mm->mmap_sem);
3126                 up_write(&mm->mmap_sem);
3127         }
3128
3129         if (mm->locked_vm) {
3130                 vma = mm->mmap;
3131                 while (vma) {
3132                         if (vma->vm_flags & VM_LOCKED)
3133                                 munlock_vma_pages_all(vma);
3134                         vma = vma->vm_next;
3135                 }
3136         }
3137
3138         arch_exit_mmap(mm);
3139
3140         vma = mm->mmap;
3141         if (!vma)       /* Can happen if dup_mmap() received an OOM */
3142                 return;
3143
3144         lru_add_drain();
3145         flush_cache_mm(mm);
3146         tlb_gather_mmu(&tlb, mm, 0, -1);
3147         /* update_hiwater_rss(mm) here? but nobody should be looking */
3148         /* Use -1 here to ensure all VMAs in the mm are unmapped */
3149         unmap_vmas(&tlb, vma, 0, -1);
3150         free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
3151         tlb_finish_mmu(&tlb, 0, -1);
3152
3153         /*
3154          * Walk the list again, actually closing and freeing it,
3155          * with preemption enabled, without holding any MM locks.
3156          */
3157         while (vma) {
3158                 if (vma->vm_flags & VM_ACCOUNT)
3159                         nr_accounted += vma_pages(vma);
3160                 vma = remove_vma(vma);
3161         }
3162         vm_unacct_memory(nr_accounted);
3163 }
3164
3165 /* Insert vm structure into process list sorted by address
3166  * and into the inode's i_mmap tree.  If vm_file is non-NULL
3167  * then i_mmap_rwsem is taken here.
3168  */
3169 int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
3170 {
3171         struct vm_area_struct *prev;
3172         struct rb_node **rb_link, *rb_parent;
3173
3174         if (find_vma_links(mm, vma->vm_start, vma->vm_end,
3175                            &prev, &rb_link, &rb_parent))
3176                 return -ENOMEM;
3177         if ((vma->vm_flags & VM_ACCOUNT) &&
3178              security_vm_enough_memory_mm(mm, vma_pages(vma)))
3179                 return -ENOMEM;
3180
3181         /*
3182          * The vm_pgoff of a purely anonymous vma should be irrelevant
3183          * until its first write fault, when page's anon_vma and index
3184          * are set.  But now set the vm_pgoff it will almost certainly
3185          * end up with (unless mremap moves it elsewhere before that
3186          * first wfault), so /proc/pid/maps tells a consistent story.
3187          *
3188          * By setting it to reflect the virtual start address of the
3189          * vma, merges and splits can happen in a seamless way, just
3190          * using the existing file pgoff checks and manipulations.
3191          * Similarly in do_mmap_pgoff and in do_brk.
3192          */
3193         if (vma_is_anonymous(vma)) {
3194                 BUG_ON(vma->anon_vma);
3195                 vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
3196         }
3197
3198         vma_link(mm, vma, prev, rb_link, rb_parent);
3199         return 0;
3200 }
3201
3202 /*
3203  * Copy the vma structure to a new location in the same mm,
3204  * prior to moving page table entries, to effect an mremap move.
3205  */
3206 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
3207         unsigned long addr, unsigned long len, pgoff_t pgoff,
3208         bool *need_rmap_locks)
3209 {
3210         struct vm_area_struct *vma = *vmap;
3211         unsigned long vma_start = vma->vm_start;
3212         struct mm_struct *mm = vma->vm_mm;
3213         struct vm_area_struct *new_vma, *prev;
3214         struct rb_node **rb_link, *rb_parent;
3215         bool faulted_in_anon_vma = true;
3216
3217         /*
3218          * If anonymous vma has not yet been faulted, update new pgoff
3219          * to match new location, to increase its chance of merging.
3220          */
3221         if (unlikely(vma_is_anonymous(vma) && !vma->anon_vma)) {
3222                 pgoff = addr >> PAGE_SHIFT;
3223                 faulted_in_anon_vma = false;
3224         }
3225
3226         if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
3227                 return NULL;    /* should never get here */
3228         new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
3229                             vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
3230                             vma->vm_userfaultfd_ctx);
3231         if (new_vma) {
3232                 /*
3233                  * Source vma may have been merged into new_vma
3234                  */
3235                 if (unlikely(vma_start >= new_vma->vm_start &&
3236                              vma_start < new_vma->vm_end)) {
3237                         /*
3238                          * The only way we can get a vma_merge with
3239                          * self during an mremap is if the vma hasn't
3240                          * been faulted in yet and we were allowed to
3241                          * reset the dst vma->vm_pgoff to the
3242                          * destination address of the mremap to allow
3243                          * the merge to happen. mremap must change the
3244                          * vm_pgoff linearity between src and dst vmas
3245                          * (in turn preventing a vma_merge) to be
3246                          * safe. It is only safe to keep the vm_pgoff
3247                          * linear if there are no pages mapped yet.
3248                          */
3249                         VM_BUG_ON_VMA(faulted_in_anon_vma, new_vma);
3250                         *vmap = vma = new_vma;
3251                 }
3252                 *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
3253         } else {
3254                 new_vma = vm_area_dup(vma);
3255                 if (!new_vma)
3256                         goto out;
3257                 new_vma->vm_start = addr;
3258                 new_vma->vm_end = addr + len;
3259                 new_vma->vm_pgoff = pgoff;
3260                 if (vma_dup_policy(vma, new_vma))
3261                         goto out_free_vma;
3262                 if (anon_vma_clone(new_vma, vma))
3263                         goto out_free_mempol;
3264                 if (new_vma->vm_file)
3265                         get_file(new_vma->vm_file);
3266                 if (new_vma->vm_ops && new_vma->vm_ops->open)
3267                         new_vma->vm_ops->open(new_vma);
3268                 vma_link(mm, new_vma, prev, rb_link, rb_parent);
3269                 *need_rmap_locks = false;
3270         }
3271         return new_vma;
3272
3273 out_free_mempol:
3274         mpol_put(vma_policy(new_vma));
3275 out_free_vma:
3276         vm_area_free(new_vma);
3277 out:
3278         return NULL;
3279 }
3280
3281 /*
3282  * Return true if the calling process may expand its vm space by the passed
3283  * number of pages
3284  */
3285 bool may_expand_vm(struct mm_struct *mm, vm_flags_t flags, unsigned long npages)
3286 {
3287         if (mm->total_vm + npages > rlimit(RLIMIT_AS) >> PAGE_SHIFT)
3288                 return false;
3289
3290         if (is_data_mapping(flags) &&
3291             mm->data_vm + npages > rlimit(RLIMIT_DATA) >> PAGE_SHIFT) {
3292                 /* Workaround for Valgrind */
3293                 if (rlimit(RLIMIT_DATA) == 0 &&
3294                     mm->data_vm + npages <= rlimit_max(RLIMIT_DATA) >> PAGE_SHIFT)
3295                         return true;
3296
3297                 pr_warn_once("%s (%d): VmData %lu exceed data ulimit %lu. Update limits%s.\n",
3298                              current->comm, current->pid,
3299                              (mm->data_vm + npages) << PAGE_SHIFT,
3300                              rlimit(RLIMIT_DATA),
3301                              ignore_rlimit_data ? "" : " or use boot option ignore_rlimit_data");
3302
3303                 if (!ignore_rlimit_data)
3304                         return false;
3305         }
3306
3307         return true;
3308 }
3309
3310 void vm_stat_account(struct mm_struct *mm, vm_flags_t flags, long npages)
3311 {
3312         mm->total_vm += npages;
3313
3314         if (is_exec_mapping(flags))
3315                 mm->exec_vm += npages;
3316         else if (is_stack_mapping(flags))
3317                 mm->stack_vm += npages;
3318         else if (is_data_mapping(flags))
3319                 mm->data_vm += npages;
3320 }
3321
3322 static vm_fault_t special_mapping_fault(struct vm_fault *vmf);
3323
3324 /*
3325  * Having a close hook prevents vma merging regardless of flags.
3326  */
3327 static void special_mapping_close(struct vm_area_struct *vma)
3328 {
3329 }
3330
3331 static const char *special_mapping_name(struct vm_area_struct *vma)
3332 {
3333         return ((struct vm_special_mapping *)vma->vm_private_data)->name;
3334 }
3335
3336 static int special_mapping_mremap(struct vm_area_struct *new_vma)
3337 {
3338         struct vm_special_mapping *sm = new_vma->vm_private_data;
3339
3340         if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
3341                 return -EFAULT;
3342
3343         if (sm->mremap)
3344                 return sm->mremap(sm, new_vma);
3345
3346         return 0;
3347 }
3348
3349 static const struct vm_operations_struct special_mapping_vmops = {
3350         .close = special_mapping_close,
3351         .fault = special_mapping_fault,
3352         .mremap = special_mapping_mremap,
3353         .name = special_mapping_name,
3354 };
3355
3356 static const struct vm_operations_struct legacy_special_mapping_vmops = {
3357         .close = special_mapping_close,
3358         .fault = special_mapping_fault,
3359 };
3360
3361 static vm_fault_t special_mapping_fault(struct vm_fault *vmf)
3362 {
3363         struct vm_area_struct *vma = vmf->vma;
3364         pgoff_t pgoff;
3365         struct page **pages;
3366
3367         if (vma->vm_ops == &legacy_special_mapping_vmops) {
3368                 pages = vma->vm_private_data;
3369         } else {
3370                 struct vm_special_mapping *sm = vma->vm_private_data;
3371
3372                 if (sm->fault)
3373                         return sm->fault(sm, vmf->vma, vmf);
3374
3375                 pages = sm->pages;
3376         }
3377
3378         for (pgoff = vmf->pgoff; pgoff && *pages; ++pages)
3379                 pgoff--;
3380
3381         if (*pages) {
3382                 struct page *page = *pages;
3383                 get_page(page);
3384                 vmf->page = page;
3385                 return 0;
3386         }
3387
3388         return VM_FAULT_SIGBUS;
3389 }
3390
3391 static struct vm_area_struct *__install_special_mapping(
3392         struct mm_struct *mm,
3393         unsigned long addr, unsigned long len,
3394         unsigned long vm_flags, void *priv,
3395         const struct vm_operations_struct *ops)
3396 {
3397         int ret;
3398         struct vm_area_struct *vma;
3399
3400         vma = vm_area_alloc(mm);
3401         if (unlikely(vma == NULL))
3402                 return ERR_PTR(-ENOMEM);
3403
3404         vma->vm_start = addr;
3405         vma->vm_end = addr + len;
3406
3407         vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
3408         vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
3409
3410         vma->vm_ops = ops;
3411         vma->vm_private_data = priv;
3412
3413         ret = insert_vm_struct(mm, vma);
3414         if (ret)
3415                 goto out;
3416
3417         vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
3418
3419         perf_event_mmap(vma);
3420
3421         return vma;
3422
3423 out:
3424         vm_area_free(vma);
3425         return ERR_PTR(ret);
3426 }
3427
3428 bool vma_is_special_mapping(const struct vm_area_struct *vma,
3429         const struct vm_special_mapping *sm)
3430 {
3431         return vma->vm_private_data == sm &&
3432                 (vma->vm_ops == &special_mapping_vmops ||
3433                  vma->vm_ops == &legacy_special_mapping_vmops);
3434 }
3435
3436 /*
3437  * Called with mm->mmap_sem held for writing.
3438  * Insert a new vma covering the given region, with the given flags.
3439  * Its pages are supplied by the given array of struct page *.
3440  * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
3441  * The region past the last page supplied will always produce SIGBUS.
3442  * The array pointer and the pages it points to are assumed to stay alive
3443  * for as long as this mapping might exist.
3444  */
3445 struct vm_area_struct *_install_special_mapping(
3446         struct mm_struct *mm,
3447         unsigned long addr, unsigned long len,
3448         unsigned long vm_flags, const struct vm_special_mapping *spec)
3449 {
3450         return __install_special_mapping(mm, addr, len, vm_flags, (void *)spec,
3451                                         &special_mapping_vmops);
3452 }
3453
3454 int install_special_mapping(struct mm_struct *mm,
3455                             unsigned long addr, unsigned long len,
3456                             unsigned long vm_flags, struct page **pages)
3457 {
3458         struct vm_area_struct *vma = __install_special_mapping(
3459                 mm, addr, len, vm_flags, (void *)pages,
3460                 &legacy_special_mapping_vmops);
3461
3462         return PTR_ERR_OR_ZERO(vma);
3463 }
3464
3465 static DEFINE_MUTEX(mm_all_locks_mutex);
3466
3467 static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
3468 {
3469         if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3470                 /*
3471                  * The LSB of head.next can't change from under us
3472                  * because we hold the mm_all_locks_mutex.
3473                  */
3474                 down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem);
3475                 /*
3476                  * We can safely modify head.next after taking the
3477                  * anon_vma->root->rwsem. If some other vma in this mm shares
3478                  * the same anon_vma we won't take it again.
3479                  *
3480                  * No need of atomic instructions here, head.next
3481                  * can't change from under us thanks to the
3482                  * anon_vma->root->rwsem.
3483                  */
3484                 if (__test_and_set_bit(0, (unsigned long *)
3485                                        &anon_vma->root->rb_root.rb_root.rb_node))
3486                         BUG();
3487         }
3488 }
3489
3490 static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
3491 {
3492         if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3493                 /*
3494                  * AS_MM_ALL_LOCKS can't change from under us because
3495                  * we hold the mm_all_locks_mutex.
3496                  *
3497                  * Operations on ->flags have to be atomic because
3498                  * even if AS_MM_ALL_LOCKS is stable thanks to the
3499                  * mm_all_locks_mutex, there may be other cpus
3500                  * changing other bitflags in parallel to us.
3501                  */
3502                 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
3503                         BUG();
3504                 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem);
3505         }
3506 }
3507
3508 /*
3509  * This operation locks against the VM for all pte/vma/mm related
3510  * operations that could ever happen on a certain mm. This includes
3511  * vmtruncate, try_to_unmap, and all page faults.
3512  *
3513  * The caller must take the mmap_sem in write mode before calling
3514  * mm_take_all_locks(). The caller isn't allowed to release the
3515  * mmap_sem until mm_drop_all_locks() returns.
3516  *
3517  * mmap_sem in write mode is required in order to block all operations
3518  * that could modify pagetables and free pages without need of
3519  * altering the vma layout. It's also needed in write mode to avoid new
3520  * anon_vmas to be associated with existing vmas.
3521  *
3522  * A single task can't take more than one mm_take_all_locks() in a row
3523  * or it would deadlock.
3524  *
3525  * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
3526  * mapping->flags avoid to take the same lock twice, if more than one
3527  * vma in this mm is backed by the same anon_vma or address_space.
3528  *
3529  * We take locks in following order, accordingly to comment at beginning
3530  * of mm/rmap.c:
3531  *   - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
3532  *     hugetlb mapping);
3533  *   - all i_mmap_rwsem locks;
3534  *   - all anon_vma->rwseml
3535  *
3536  * We can take all locks within these types randomly because the VM code
3537  * doesn't nest them and we protected from parallel mm_take_all_locks() by
3538  * mm_all_locks_mutex.
3539  *
3540  * mm_take_all_locks() and mm_drop_all_locks are expensive operations
3541  * that may have to take thousand of locks.
3542  *
3543  * mm_take_all_locks() can fail if it's interrupted by signals.
3544  */
3545 int mm_take_all_locks(struct mm_struct *mm)
3546 {
3547         struct vm_area_struct *vma;
3548         struct anon_vma_chain *avc;
3549
3550         BUG_ON(down_read_trylock(&mm->mmap_sem));
3551
3552         mutex_lock(&mm_all_locks_mutex);
3553
3554         for (vma = mm->mmap; vma; vma = vma->vm_next) {
3555                 if (signal_pending(current))
3556                         goto out_unlock;
3557                 if (vma->vm_file && vma->vm_file->f_mapping &&
3558                                 is_vm_hugetlb_page(vma))
3559                         vm_lock_mapping(mm, vma->vm_file->f_mapping);
3560         }
3561
3562         for (vma = mm->mmap; vma; vma = vma->vm_next) {
3563                 if (signal_pending(current))
3564                         goto out_unlock;
3565                 if (vma->vm_file && vma->vm_file->f_mapping &&
3566                                 !is_vm_hugetlb_page(vma))
3567                         vm_lock_mapping(mm, vma->vm_file->f_mapping);
3568         }
3569
3570         for (vma = mm->mmap; vma; vma = vma->vm_next) {
3571                 if (signal_pending(current))
3572                         goto out_unlock;
3573                 if (vma->anon_vma)
3574                         list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3575                                 vm_lock_anon_vma(mm, avc->anon_vma);
3576         }
3577
3578         return 0;
3579
3580 out_unlock:
3581         mm_drop_all_locks(mm);
3582         return -EINTR;
3583 }
3584
3585 static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
3586 {
3587         if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_root.rb_node)) {
3588                 /*
3589                  * The LSB of head.next can't change to 0 from under
3590                  * us because we hold the mm_all_locks_mutex.
3591                  *
3592                  * We must however clear the bitflag before unlocking
3593                  * the vma so the users using the anon_vma->rb_root will
3594                  * never see our bitflag.
3595                  *
3596                  * No need of atomic instructions here, head.next
3597                  * can't change from under us until we release the
3598                  * anon_vma->root->rwsem.
3599                  */
3600                 if (!__test_and_clear_bit(0, (unsigned long *)
3601                                           &anon_vma->root->rb_root.rb_root.rb_node))
3602                         BUG();
3603                 anon_vma_unlock_write(anon_vma);
3604         }
3605 }
3606
3607 static void vm_unlock_mapping(struct address_space *mapping)
3608 {
3609         if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3610                 /*
3611                  * AS_MM_ALL_LOCKS can't change to 0 from under us
3612                  * because we hold the mm_all_locks_mutex.
3613                  */
3614                 i_mmap_unlock_write(mapping);
3615                 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
3616                                         &mapping->flags))
3617                         BUG();
3618         }
3619 }
3620
3621 /*
3622  * The mmap_sem cannot be released by the caller until
3623  * mm_drop_all_locks() returns.
3624  */
3625 void mm_drop_all_locks(struct mm_struct *mm)
3626 {
3627         struct vm_area_struct *vma;
3628         struct anon_vma_chain *avc;
3629
3630         BUG_ON(down_read_trylock(&mm->mmap_sem));
3631         BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
3632
3633         for (vma = mm->mmap; vma; vma = vma->vm_next) {
3634                 if (vma->anon_vma)
3635                         list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3636                                 vm_unlock_anon_vma(avc->anon_vma);
3637                 if (vma->vm_file && vma->vm_file->f_mapping)
3638                         vm_unlock_mapping(vma->vm_file->f_mapping);
3639         }
3640
3641         mutex_unlock(&mm_all_locks_mutex);
3642 }
3643
3644 /*
3645  * initialise the percpu counter for VM
3646  */
3647 void __init mmap_init(void)
3648 {
3649         int ret;
3650
3651         ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
3652         VM_BUG_ON(ret);
3653 }
3654
3655 /*
3656  * Initialise sysctl_user_reserve_kbytes.
3657  *
3658  * This is intended to prevent a user from starting a single memory hogging
3659  * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
3660  * mode.
3661  *
3662  * The default value is min(3% of free memory, 128MB)
3663  * 128MB is enough to recover with sshd/login, bash, and top/kill.
3664  */
3665 static int init_user_reserve(void)
3666 {
3667         unsigned long free_kbytes;
3668
3669         free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3670
3671         sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
3672         return 0;
3673 }
3674 subsys_initcall(init_user_reserve);
3675
3676 /*
3677  * Initialise sysctl_admin_reserve_kbytes.
3678  *
3679  * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
3680  * to log in and kill a memory hogging process.
3681  *
3682  * Systems with more than 256MB will reserve 8MB, enough to recover
3683  * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
3684  * only reserve 3% of free pages by default.
3685  */
3686 static int init_admin_reserve(void)
3687 {
3688         unsigned long free_kbytes;
3689
3690         free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3691
3692         sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
3693         return 0;
3694 }
3695 subsys_initcall(init_admin_reserve);
3696
3697 /*
3698  * Reinititalise user and admin reserves if memory is added or removed.
3699  *
3700  * The default user reserve max is 128MB, and the default max for the
3701  * admin reserve is 8MB. These are usually, but not always, enough to
3702  * enable recovery from a memory hogging process using login/sshd, a shell,
3703  * and tools like top. It may make sense to increase or even disable the
3704  * reserve depending on the existence of swap or variations in the recovery
3705  * tools. So, the admin may have changed them.
3706  *
3707  * If memory is added and the reserves have been eliminated or increased above
3708  * the default max, then we'll trust the admin.
3709  *
3710  * If memory is removed and there isn't enough free memory, then we
3711  * need to reset the reserves.
3712  *
3713  * Otherwise keep the reserve set by the admin.
3714  */
3715 static int reserve_mem_notifier(struct notifier_block *nb,
3716                              unsigned long action, void *data)
3717 {
3718         unsigned long tmp, free_kbytes;
3719
3720         switch (action) {
3721         case MEM_ONLINE:
3722                 /* Default max is 128MB. Leave alone if modified by operator. */
3723                 tmp = sysctl_user_reserve_kbytes;
3724                 if (0 < tmp && tmp < (1UL << 17))
3725                         init_user_reserve();
3726
3727                 /* Default max is 8MB.  Leave alone if modified by operator. */
3728                 tmp = sysctl_admin_reserve_kbytes;
3729                 if (0 < tmp && tmp < (1UL << 13))
3730                         init_admin_reserve();
3731
3732                 break;
3733         case MEM_OFFLINE:
3734                 free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3735
3736                 if (sysctl_user_reserve_kbytes > free_kbytes) {
3737                         init_user_reserve();
3738                         pr_info("vm.user_reserve_kbytes reset to %lu\n",
3739                                 sysctl_user_reserve_kbytes);
3740                 }
3741
3742                 if (sysctl_admin_reserve_kbytes > free_kbytes) {
3743                         init_admin_reserve();
3744                         pr_info("vm.admin_reserve_kbytes reset to %lu\n",
3745                                 sysctl_admin_reserve_kbytes);
3746                 }
3747                 break;
3748         default:
3749                 break;
3750         }
3751         return NOTIFY_OK;
3752 }
3753
3754 static struct notifier_block reserve_mem_nb = {
3755         .notifier_call = reserve_mem_notifier,
3756 };
3757
3758 static int __meminit init_reserve_notifier(void)
3759 {
3760         if (register_hotmemory_notifier(&reserve_mem_nb))
3761                 pr_err("Failed registering memory add/remove notifier for admin reserve\n");
3762
3763         return 0;
3764 }
3765 subsys_initcall(init_reserve_notifier);