Merge tag 'mailbox-v6.3' of git://git.linaro.org/landing-teams/working/fujitsu/integr...
[linux-2.6-microblaze.git] / mm / nommu.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  *  linux/mm/nommu.c
4  *
5  *  Replacement code for mm functions to support CPU's that don't
6  *  have any form of memory management unit (thus no virtual memory).
7  *
8  *  See Documentation/admin-guide/mm/nommu-mmap.rst
9  *
10  *  Copyright (c) 2004-2008 David Howells <dhowells@redhat.com>
11  *  Copyright (c) 2000-2003 David McCullough <davidm@snapgear.com>
12  *  Copyright (c) 2000-2001 D Jeff Dionne <jeff@uClinux.org>
13  *  Copyright (c) 2002      Greg Ungerer <gerg@snapgear.com>
14  *  Copyright (c) 2007-2010 Paul Mundt <lethal@linux-sh.org>
15  */
16
17 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
19 #include <linux/export.h>
20 #include <linux/mm.h>
21 #include <linux/sched/mm.h>
22 #include <linux/mman.h>
23 #include <linux/swap.h>
24 #include <linux/file.h>
25 #include <linux/highmem.h>
26 #include <linux/pagemap.h>
27 #include <linux/slab.h>
28 #include <linux/vmalloc.h>
29 #include <linux/backing-dev.h>
30 #include <linux/compiler.h>
31 #include <linux/mount.h>
32 #include <linux/personality.h>
33 #include <linux/security.h>
34 #include <linux/syscalls.h>
35 #include <linux/audit.h>
36 #include <linux/printk.h>
37
38 #include <linux/uaccess.h>
39 #include <asm/tlb.h>
40 #include <asm/tlbflush.h>
41 #include <asm/mmu_context.h>
42 #include "internal.h"
43
44 void *high_memory;
45 EXPORT_SYMBOL(high_memory);
46 struct page *mem_map;
47 unsigned long max_mapnr;
48 EXPORT_SYMBOL(max_mapnr);
49 unsigned long highest_memmap_pfn;
50 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
51 int heap_stack_gap = 0;
52
53 atomic_long_t mmap_pages_allocated;
54
55 EXPORT_SYMBOL(mem_map);
56
57 /* list of mapped, potentially shareable regions */
58 static struct kmem_cache *vm_region_jar;
59 struct rb_root nommu_region_tree = RB_ROOT;
60 DECLARE_RWSEM(nommu_region_sem);
61
62 const struct vm_operations_struct generic_file_vm_ops = {
63 };
64
65 /*
66  * Return the total memory allocated for this pointer, not
67  * just what the caller asked for.
68  *
69  * Doesn't have to be accurate, i.e. may have races.
70  */
71 unsigned int kobjsize(const void *objp)
72 {
73         struct page *page;
74
75         /*
76          * If the object we have should not have ksize performed on it,
77          * return size of 0
78          */
79         if (!objp || !virt_addr_valid(objp))
80                 return 0;
81
82         page = virt_to_head_page(objp);
83
84         /*
85          * If the allocator sets PageSlab, we know the pointer came from
86          * kmalloc().
87          */
88         if (PageSlab(page))
89                 return ksize(objp);
90
91         /*
92          * If it's not a compound page, see if we have a matching VMA
93          * region. This test is intentionally done in reverse order,
94          * so if there's no VMA, we still fall through and hand back
95          * PAGE_SIZE for 0-order pages.
96          */
97         if (!PageCompound(page)) {
98                 struct vm_area_struct *vma;
99
100                 vma = find_vma(current->mm, (unsigned long)objp);
101                 if (vma)
102                         return vma->vm_end - vma->vm_start;
103         }
104
105         /*
106          * The ksize() function is only guaranteed to work for pointers
107          * returned by kmalloc(). So handle arbitrary pointers here.
108          */
109         return page_size(page);
110 }
111
112 /**
113  * follow_pfn - look up PFN at a user virtual address
114  * @vma: memory mapping
115  * @address: user virtual address
116  * @pfn: location to store found PFN
117  *
118  * Only IO mappings and raw PFN mappings are allowed.
119  *
120  * Returns zero and the pfn at @pfn on success, -ve otherwise.
121  */
122 int follow_pfn(struct vm_area_struct *vma, unsigned long address,
123         unsigned long *pfn)
124 {
125         if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
126                 return -EINVAL;
127
128         *pfn = address >> PAGE_SHIFT;
129         return 0;
130 }
131 EXPORT_SYMBOL(follow_pfn);
132
133 LIST_HEAD(vmap_area_list);
134
135 void vfree(const void *addr)
136 {
137         kfree(addr);
138 }
139 EXPORT_SYMBOL(vfree);
140
141 void *__vmalloc(unsigned long size, gfp_t gfp_mask)
142 {
143         /*
144          *  You can't specify __GFP_HIGHMEM with kmalloc() since kmalloc()
145          * returns only a logical address.
146          */
147         return kmalloc(size, (gfp_mask | __GFP_COMP) & ~__GFP_HIGHMEM);
148 }
149 EXPORT_SYMBOL(__vmalloc);
150
151 void *__vmalloc_node_range(unsigned long size, unsigned long align,
152                 unsigned long start, unsigned long end, gfp_t gfp_mask,
153                 pgprot_t prot, unsigned long vm_flags, int node,
154                 const void *caller)
155 {
156         return __vmalloc(size, gfp_mask);
157 }
158
159 void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
160                 int node, const void *caller)
161 {
162         return __vmalloc(size, gfp_mask);
163 }
164
165 static void *__vmalloc_user_flags(unsigned long size, gfp_t flags)
166 {
167         void *ret;
168
169         ret = __vmalloc(size, flags);
170         if (ret) {
171                 struct vm_area_struct *vma;
172
173                 mmap_write_lock(current->mm);
174                 vma = find_vma(current->mm, (unsigned long)ret);
175                 if (vma)
176                         vm_flags_set(vma, VM_USERMAP);
177                 mmap_write_unlock(current->mm);
178         }
179
180         return ret;
181 }
182
183 void *vmalloc_user(unsigned long size)
184 {
185         return __vmalloc_user_flags(size, GFP_KERNEL | __GFP_ZERO);
186 }
187 EXPORT_SYMBOL(vmalloc_user);
188
189 struct page *vmalloc_to_page(const void *addr)
190 {
191         return virt_to_page(addr);
192 }
193 EXPORT_SYMBOL(vmalloc_to_page);
194
195 unsigned long vmalloc_to_pfn(const void *addr)
196 {
197         return page_to_pfn(virt_to_page(addr));
198 }
199 EXPORT_SYMBOL(vmalloc_to_pfn);
200
201 long vread(char *buf, char *addr, unsigned long count)
202 {
203         /* Don't allow overflow */
204         if ((unsigned long) buf + count < count)
205                 count = -(unsigned long) buf;
206
207         memcpy(buf, addr, count);
208         return count;
209 }
210
211 /*
212  *      vmalloc  -  allocate virtually contiguous memory
213  *
214  *      @size:          allocation size
215  *
216  *      Allocate enough pages to cover @size from the page level
217  *      allocator and map them into contiguous kernel virtual space.
218  *
219  *      For tight control over page level allocator and protection flags
220  *      use __vmalloc() instead.
221  */
222 void *vmalloc(unsigned long size)
223 {
224         return __vmalloc(size, GFP_KERNEL);
225 }
226 EXPORT_SYMBOL(vmalloc);
227
228 void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __weak __alias(__vmalloc);
229
230 /*
231  *      vzalloc - allocate virtually contiguous memory with zero fill
232  *
233  *      @size:          allocation size
234  *
235  *      Allocate enough pages to cover @size from the page level
236  *      allocator and map them into contiguous kernel virtual space.
237  *      The memory allocated is set to zero.
238  *
239  *      For tight control over page level allocator and protection flags
240  *      use __vmalloc() instead.
241  */
242 void *vzalloc(unsigned long size)
243 {
244         return __vmalloc(size, GFP_KERNEL | __GFP_ZERO);
245 }
246 EXPORT_SYMBOL(vzalloc);
247
248 /**
249  * vmalloc_node - allocate memory on a specific node
250  * @size:       allocation size
251  * @node:       numa node
252  *
253  * Allocate enough pages to cover @size from the page level
254  * allocator and map them into contiguous kernel virtual space.
255  *
256  * For tight control over page level allocator and protection flags
257  * use __vmalloc() instead.
258  */
259 void *vmalloc_node(unsigned long size, int node)
260 {
261         return vmalloc(size);
262 }
263 EXPORT_SYMBOL(vmalloc_node);
264
265 /**
266  * vzalloc_node - allocate memory on a specific node with zero fill
267  * @size:       allocation size
268  * @node:       numa node
269  *
270  * Allocate enough pages to cover @size from the page level
271  * allocator and map them into contiguous kernel virtual space.
272  * The memory allocated is set to zero.
273  *
274  * For tight control over page level allocator and protection flags
275  * use __vmalloc() instead.
276  */
277 void *vzalloc_node(unsigned long size, int node)
278 {
279         return vzalloc(size);
280 }
281 EXPORT_SYMBOL(vzalloc_node);
282
283 /**
284  * vmalloc_32  -  allocate virtually contiguous memory (32bit addressable)
285  *      @size:          allocation size
286  *
287  *      Allocate enough 32bit PA addressable pages to cover @size from the
288  *      page level allocator and map them into contiguous kernel virtual space.
289  */
290 void *vmalloc_32(unsigned long size)
291 {
292         return __vmalloc(size, GFP_KERNEL);
293 }
294 EXPORT_SYMBOL(vmalloc_32);
295
296 /**
297  * vmalloc_32_user - allocate zeroed virtually contiguous 32bit memory
298  *      @size:          allocation size
299  *
300  * The resulting memory area is 32bit addressable and zeroed so it can be
301  * mapped to userspace without leaking data.
302  *
303  * VM_USERMAP is set on the corresponding VMA so that subsequent calls to
304  * remap_vmalloc_range() are permissible.
305  */
306 void *vmalloc_32_user(unsigned long size)
307 {
308         /*
309          * We'll have to sort out the ZONE_DMA bits for 64-bit,
310          * but for now this can simply use vmalloc_user() directly.
311          */
312         return vmalloc_user(size);
313 }
314 EXPORT_SYMBOL(vmalloc_32_user);
315
316 void *vmap(struct page **pages, unsigned int count, unsigned long flags, pgprot_t prot)
317 {
318         BUG();
319         return NULL;
320 }
321 EXPORT_SYMBOL(vmap);
322
323 void vunmap(const void *addr)
324 {
325         BUG();
326 }
327 EXPORT_SYMBOL(vunmap);
328
329 void *vm_map_ram(struct page **pages, unsigned int count, int node)
330 {
331         BUG();
332         return NULL;
333 }
334 EXPORT_SYMBOL(vm_map_ram);
335
336 void vm_unmap_ram(const void *mem, unsigned int count)
337 {
338         BUG();
339 }
340 EXPORT_SYMBOL(vm_unmap_ram);
341
342 void vm_unmap_aliases(void)
343 {
344 }
345 EXPORT_SYMBOL_GPL(vm_unmap_aliases);
346
347 void free_vm_area(struct vm_struct *area)
348 {
349         BUG();
350 }
351 EXPORT_SYMBOL_GPL(free_vm_area);
352
353 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
354                    struct page *page)
355 {
356         return -EINVAL;
357 }
358 EXPORT_SYMBOL(vm_insert_page);
359
360 int vm_map_pages(struct vm_area_struct *vma, struct page **pages,
361                         unsigned long num)
362 {
363         return -EINVAL;
364 }
365 EXPORT_SYMBOL(vm_map_pages);
366
367 int vm_map_pages_zero(struct vm_area_struct *vma, struct page **pages,
368                                 unsigned long num)
369 {
370         return -EINVAL;
371 }
372 EXPORT_SYMBOL(vm_map_pages_zero);
373
374 /*
375  *  sys_brk() for the most part doesn't need the global kernel
376  *  lock, except when an application is doing something nasty
377  *  like trying to un-brk an area that has already been mapped
378  *  to a regular file.  in this case, the unmapping will need
379  *  to invoke file system routines that need the global lock.
380  */
381 SYSCALL_DEFINE1(brk, unsigned long, brk)
382 {
383         struct mm_struct *mm = current->mm;
384
385         if (brk < mm->start_brk || brk > mm->context.end_brk)
386                 return mm->brk;
387
388         if (mm->brk == brk)
389                 return mm->brk;
390
391         /*
392          * Always allow shrinking brk
393          */
394         if (brk <= mm->brk) {
395                 mm->brk = brk;
396                 return brk;
397         }
398
399         /*
400          * Ok, looks good - let it rip.
401          */
402         flush_icache_user_range(mm->brk, brk);
403         return mm->brk = brk;
404 }
405
406 /*
407  * initialise the percpu counter for VM and region record slabs
408  */
409 void __init mmap_init(void)
410 {
411         int ret;
412
413         ret = percpu_counter_init(&vm_committed_as, 0, GFP_KERNEL);
414         VM_BUG_ON(ret);
415         vm_region_jar = KMEM_CACHE(vm_region, SLAB_PANIC|SLAB_ACCOUNT);
416 }
417
418 /*
419  * validate the region tree
420  * - the caller must hold the region lock
421  */
422 #ifdef CONFIG_DEBUG_NOMMU_REGIONS
423 static noinline void validate_nommu_regions(void)
424 {
425         struct vm_region *region, *last;
426         struct rb_node *p, *lastp;
427
428         lastp = rb_first(&nommu_region_tree);
429         if (!lastp)
430                 return;
431
432         last = rb_entry(lastp, struct vm_region, vm_rb);
433         BUG_ON(last->vm_end <= last->vm_start);
434         BUG_ON(last->vm_top < last->vm_end);
435
436         while ((p = rb_next(lastp))) {
437                 region = rb_entry(p, struct vm_region, vm_rb);
438                 last = rb_entry(lastp, struct vm_region, vm_rb);
439
440                 BUG_ON(region->vm_end <= region->vm_start);
441                 BUG_ON(region->vm_top < region->vm_end);
442                 BUG_ON(region->vm_start < last->vm_top);
443
444                 lastp = p;
445         }
446 }
447 #else
448 static void validate_nommu_regions(void)
449 {
450 }
451 #endif
452
453 /*
454  * add a region into the global tree
455  */
456 static void add_nommu_region(struct vm_region *region)
457 {
458         struct vm_region *pregion;
459         struct rb_node **p, *parent;
460
461         validate_nommu_regions();
462
463         parent = NULL;
464         p = &nommu_region_tree.rb_node;
465         while (*p) {
466                 parent = *p;
467                 pregion = rb_entry(parent, struct vm_region, vm_rb);
468                 if (region->vm_start < pregion->vm_start)
469                         p = &(*p)->rb_left;
470                 else if (region->vm_start > pregion->vm_start)
471                         p = &(*p)->rb_right;
472                 else if (pregion == region)
473                         return;
474                 else
475                         BUG();
476         }
477
478         rb_link_node(&region->vm_rb, parent, p);
479         rb_insert_color(&region->vm_rb, &nommu_region_tree);
480
481         validate_nommu_regions();
482 }
483
484 /*
485  * delete a region from the global tree
486  */
487 static void delete_nommu_region(struct vm_region *region)
488 {
489         BUG_ON(!nommu_region_tree.rb_node);
490
491         validate_nommu_regions();
492         rb_erase(&region->vm_rb, &nommu_region_tree);
493         validate_nommu_regions();
494 }
495
496 /*
497  * free a contiguous series of pages
498  */
499 static void free_page_series(unsigned long from, unsigned long to)
500 {
501         for (; from < to; from += PAGE_SIZE) {
502                 struct page *page = virt_to_page((void *)from);
503
504                 atomic_long_dec(&mmap_pages_allocated);
505                 put_page(page);
506         }
507 }
508
509 /*
510  * release a reference to a region
511  * - the caller must hold the region semaphore for writing, which this releases
512  * - the region may not have been added to the tree yet, in which case vm_top
513  *   will equal vm_start
514  */
515 static void __put_nommu_region(struct vm_region *region)
516         __releases(nommu_region_sem)
517 {
518         BUG_ON(!nommu_region_tree.rb_node);
519
520         if (--region->vm_usage == 0) {
521                 if (region->vm_top > region->vm_start)
522                         delete_nommu_region(region);
523                 up_write(&nommu_region_sem);
524
525                 if (region->vm_file)
526                         fput(region->vm_file);
527
528                 /* IO memory and memory shared directly out of the pagecache
529                  * from ramfs/tmpfs mustn't be released here */
530                 if (region->vm_flags & VM_MAPPED_COPY)
531                         free_page_series(region->vm_start, region->vm_top);
532                 kmem_cache_free(vm_region_jar, region);
533         } else {
534                 up_write(&nommu_region_sem);
535         }
536 }
537
538 /*
539  * release a reference to a region
540  */
541 static void put_nommu_region(struct vm_region *region)
542 {
543         down_write(&nommu_region_sem);
544         __put_nommu_region(region);
545 }
546
547 static void setup_vma_to_mm(struct vm_area_struct *vma, struct mm_struct *mm)
548 {
549         vma->vm_mm = mm;
550
551         /* add the VMA to the mapping */
552         if (vma->vm_file) {
553                 struct address_space *mapping = vma->vm_file->f_mapping;
554
555                 i_mmap_lock_write(mapping);
556                 flush_dcache_mmap_lock(mapping);
557                 vma_interval_tree_insert(vma, &mapping->i_mmap);
558                 flush_dcache_mmap_unlock(mapping);
559                 i_mmap_unlock_write(mapping);
560         }
561 }
562
563 static void cleanup_vma_from_mm(struct vm_area_struct *vma)
564 {
565         vma->vm_mm->map_count--;
566         /* remove the VMA from the mapping */
567         if (vma->vm_file) {
568                 struct address_space *mapping;
569                 mapping = vma->vm_file->f_mapping;
570
571                 i_mmap_lock_write(mapping);
572                 flush_dcache_mmap_lock(mapping);
573                 vma_interval_tree_remove(vma, &mapping->i_mmap);
574                 flush_dcache_mmap_unlock(mapping);
575                 i_mmap_unlock_write(mapping);
576         }
577 }
578
579 /*
580  * delete a VMA from its owning mm_struct and address space
581  */
582 static int delete_vma_from_mm(struct vm_area_struct *vma)
583 {
584         VMA_ITERATOR(vmi, vma->vm_mm, vma->vm_start);
585
586         if (vma_iter_prealloc(&vmi)) {
587                 pr_warn("Allocation of vma tree for process %d failed\n",
588                        current->pid);
589                 return -ENOMEM;
590         }
591         cleanup_vma_from_mm(vma);
592
593         /* remove from the MM's tree and list */
594         vma_iter_clear(&vmi, vma->vm_start, vma->vm_end);
595         return 0;
596 }
597 /*
598  * destroy a VMA record
599  */
600 static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
601 {
602         if (vma->vm_ops && vma->vm_ops->close)
603                 vma->vm_ops->close(vma);
604         if (vma->vm_file)
605                 fput(vma->vm_file);
606         put_nommu_region(vma->vm_region);
607         vm_area_free(vma);
608 }
609
610 struct vm_area_struct *find_vma_intersection(struct mm_struct *mm,
611                                              unsigned long start_addr,
612                                              unsigned long end_addr)
613 {
614         unsigned long index = start_addr;
615
616         mmap_assert_locked(mm);
617         return mt_find(&mm->mm_mt, &index, end_addr - 1);
618 }
619 EXPORT_SYMBOL(find_vma_intersection);
620
621 /*
622  * look up the first VMA in which addr resides, NULL if none
623  * - should be called with mm->mmap_lock at least held readlocked
624  */
625 struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
626 {
627         VMA_ITERATOR(vmi, mm, addr);
628
629         return vma_iter_load(&vmi);
630 }
631 EXPORT_SYMBOL(find_vma);
632
633 /*
634  * find a VMA
635  * - we don't extend stack VMAs under NOMMU conditions
636  */
637 struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
638 {
639         return find_vma(mm, addr);
640 }
641
642 /*
643  * expand a stack to a given address
644  * - not supported under NOMMU conditions
645  */
646 int expand_stack(struct vm_area_struct *vma, unsigned long address)
647 {
648         return -ENOMEM;
649 }
650
651 /*
652  * look up the first VMA exactly that exactly matches addr
653  * - should be called with mm->mmap_lock at least held readlocked
654  */
655 static struct vm_area_struct *find_vma_exact(struct mm_struct *mm,
656                                              unsigned long addr,
657                                              unsigned long len)
658 {
659         struct vm_area_struct *vma;
660         unsigned long end = addr + len;
661         VMA_ITERATOR(vmi, mm, addr);
662
663         vma = vma_iter_load(&vmi);
664         if (!vma)
665                 return NULL;
666         if (vma->vm_start != addr)
667                 return NULL;
668         if (vma->vm_end != end)
669                 return NULL;
670
671         return vma;
672 }
673
674 /*
675  * determine whether a mapping should be permitted and, if so, what sort of
676  * mapping we're capable of supporting
677  */
678 static int validate_mmap_request(struct file *file,
679                                  unsigned long addr,
680                                  unsigned long len,
681                                  unsigned long prot,
682                                  unsigned long flags,
683                                  unsigned long pgoff,
684                                  unsigned long *_capabilities)
685 {
686         unsigned long capabilities, rlen;
687         int ret;
688
689         /* do the simple checks first */
690         if (flags & MAP_FIXED)
691                 return -EINVAL;
692
693         if ((flags & MAP_TYPE) != MAP_PRIVATE &&
694             (flags & MAP_TYPE) != MAP_SHARED)
695                 return -EINVAL;
696
697         if (!len)
698                 return -EINVAL;
699
700         /* Careful about overflows.. */
701         rlen = PAGE_ALIGN(len);
702         if (!rlen || rlen > TASK_SIZE)
703                 return -ENOMEM;
704
705         /* offset overflow? */
706         if ((pgoff + (rlen >> PAGE_SHIFT)) < pgoff)
707                 return -EOVERFLOW;
708
709         if (file) {
710                 /* files must support mmap */
711                 if (!file->f_op->mmap)
712                         return -ENODEV;
713
714                 /* work out if what we've got could possibly be shared
715                  * - we support chardevs that provide their own "memory"
716                  * - we support files/blockdevs that are memory backed
717                  */
718                 if (file->f_op->mmap_capabilities) {
719                         capabilities = file->f_op->mmap_capabilities(file);
720                 } else {
721                         /* no explicit capabilities set, so assume some
722                          * defaults */
723                         switch (file_inode(file)->i_mode & S_IFMT) {
724                         case S_IFREG:
725                         case S_IFBLK:
726                                 capabilities = NOMMU_MAP_COPY;
727                                 break;
728
729                         case S_IFCHR:
730                                 capabilities =
731                                         NOMMU_MAP_DIRECT |
732                                         NOMMU_MAP_READ |
733                                         NOMMU_MAP_WRITE;
734                                 break;
735
736                         default:
737                                 return -EINVAL;
738                         }
739                 }
740
741                 /* eliminate any capabilities that we can't support on this
742                  * device */
743                 if (!file->f_op->get_unmapped_area)
744                         capabilities &= ~NOMMU_MAP_DIRECT;
745                 if (!(file->f_mode & FMODE_CAN_READ))
746                         capabilities &= ~NOMMU_MAP_COPY;
747
748                 /* The file shall have been opened with read permission. */
749                 if (!(file->f_mode & FMODE_READ))
750                         return -EACCES;
751
752                 if (flags & MAP_SHARED) {
753                         /* do checks for writing, appending and locking */
754                         if ((prot & PROT_WRITE) &&
755                             !(file->f_mode & FMODE_WRITE))
756                                 return -EACCES;
757
758                         if (IS_APPEND(file_inode(file)) &&
759                             (file->f_mode & FMODE_WRITE))
760                                 return -EACCES;
761
762                         if (!(capabilities & NOMMU_MAP_DIRECT))
763                                 return -ENODEV;
764
765                         /* we mustn't privatise shared mappings */
766                         capabilities &= ~NOMMU_MAP_COPY;
767                 } else {
768                         /* we're going to read the file into private memory we
769                          * allocate */
770                         if (!(capabilities & NOMMU_MAP_COPY))
771                                 return -ENODEV;
772
773                         /* we don't permit a private writable mapping to be
774                          * shared with the backing device */
775                         if (prot & PROT_WRITE)
776                                 capabilities &= ~NOMMU_MAP_DIRECT;
777                 }
778
779                 if (capabilities & NOMMU_MAP_DIRECT) {
780                         if (((prot & PROT_READ)  && !(capabilities & NOMMU_MAP_READ))  ||
781                             ((prot & PROT_WRITE) && !(capabilities & NOMMU_MAP_WRITE)) ||
782                             ((prot & PROT_EXEC)  && !(capabilities & NOMMU_MAP_EXEC))
783                             ) {
784                                 capabilities &= ~NOMMU_MAP_DIRECT;
785                                 if (flags & MAP_SHARED) {
786                                         pr_warn("MAP_SHARED not completely supported on !MMU\n");
787                                         return -EINVAL;
788                                 }
789                         }
790                 }
791
792                 /* handle executable mappings and implied executable
793                  * mappings */
794                 if (path_noexec(&file->f_path)) {
795                         if (prot & PROT_EXEC)
796                                 return -EPERM;
797                 } else if ((prot & PROT_READ) && !(prot & PROT_EXEC)) {
798                         /* handle implication of PROT_EXEC by PROT_READ */
799                         if (current->personality & READ_IMPLIES_EXEC) {
800                                 if (capabilities & NOMMU_MAP_EXEC)
801                                         prot |= PROT_EXEC;
802                         }
803                 } else if ((prot & PROT_READ) &&
804                          (prot & PROT_EXEC) &&
805                          !(capabilities & NOMMU_MAP_EXEC)
806                          ) {
807                         /* backing file is not executable, try to copy */
808                         capabilities &= ~NOMMU_MAP_DIRECT;
809                 }
810         } else {
811                 /* anonymous mappings are always memory backed and can be
812                  * privately mapped
813                  */
814                 capabilities = NOMMU_MAP_COPY;
815
816                 /* handle PROT_EXEC implication by PROT_READ */
817                 if ((prot & PROT_READ) &&
818                     (current->personality & READ_IMPLIES_EXEC))
819                         prot |= PROT_EXEC;
820         }
821
822         /* allow the security API to have its say */
823         ret = security_mmap_addr(addr);
824         if (ret < 0)
825                 return ret;
826
827         /* looks okay */
828         *_capabilities = capabilities;
829         return 0;
830 }
831
832 /*
833  * we've determined that we can make the mapping, now translate what we
834  * now know into VMA flags
835  */
836 static unsigned long determine_vm_flags(struct file *file,
837                                         unsigned long prot,
838                                         unsigned long flags,
839                                         unsigned long capabilities)
840 {
841         unsigned long vm_flags;
842
843         vm_flags = calc_vm_prot_bits(prot, 0) | calc_vm_flag_bits(flags);
844
845         if (!file) {
846                 /*
847                  * MAP_ANONYMOUS. MAP_SHARED is mapped to MAP_PRIVATE, because
848                  * there is no fork().
849                  */
850                 vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
851         } else if (flags & MAP_PRIVATE) {
852                 /* MAP_PRIVATE file mapping */
853                 if (capabilities & NOMMU_MAP_DIRECT)
854                         vm_flags |= (capabilities & NOMMU_VMFLAGS);
855                 else
856                         vm_flags |= VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
857
858                 if (!(prot & PROT_WRITE) && !current->ptrace)
859                         /*
860                          * R/O private file mapping which cannot be used to
861                          * modify memory, especially also not via active ptrace
862                          * (e.g., set breakpoints) or later by upgrading
863                          * permissions (no mprotect()). We can try overlaying
864                          * the file mapping, which will work e.g., on chardevs,
865                          * ramfs/tmpfs/shmfs and romfs/cramf.
866                          */
867                         vm_flags |= VM_MAYOVERLAY;
868         } else {
869                 /* MAP_SHARED file mapping: NOMMU_MAP_DIRECT is set. */
870                 vm_flags |= VM_SHARED | VM_MAYSHARE |
871                             (capabilities & NOMMU_VMFLAGS);
872         }
873
874         return vm_flags;
875 }
876
877 /*
878  * set up a shared mapping on a file (the driver or filesystem provides and
879  * pins the storage)
880  */
881 static int do_mmap_shared_file(struct vm_area_struct *vma)
882 {
883         int ret;
884
885         ret = call_mmap(vma->vm_file, vma);
886         if (ret == 0) {
887                 vma->vm_region->vm_top = vma->vm_region->vm_end;
888                 return 0;
889         }
890         if (ret != -ENOSYS)
891                 return ret;
892
893         /* getting -ENOSYS indicates that direct mmap isn't possible (as
894          * opposed to tried but failed) so we can only give a suitable error as
895          * it's not possible to make a private copy if MAP_SHARED was given */
896         return -ENODEV;
897 }
898
899 /*
900  * set up a private mapping or an anonymous shared mapping
901  */
902 static int do_mmap_private(struct vm_area_struct *vma,
903                            struct vm_region *region,
904                            unsigned long len,
905                            unsigned long capabilities)
906 {
907         unsigned long total, point;
908         void *base;
909         int ret, order;
910
911         /*
912          * Invoke the file's mapping function so that it can keep track of
913          * shared mappings on devices or memory. VM_MAYOVERLAY will be set if
914          * it may attempt to share, which will make is_nommu_shared_mapping()
915          * happy.
916          */
917         if (capabilities & NOMMU_MAP_DIRECT) {
918                 ret = call_mmap(vma->vm_file, vma);
919                 /* shouldn't return success if we're not sharing */
920                 if (WARN_ON_ONCE(!is_nommu_shared_mapping(vma->vm_flags)))
921                         ret = -ENOSYS;
922                 if (ret == 0) {
923                         vma->vm_region->vm_top = vma->vm_region->vm_end;
924                         return 0;
925                 }
926                 if (ret != -ENOSYS)
927                         return ret;
928
929                 /* getting an ENOSYS error indicates that direct mmap isn't
930                  * possible (as opposed to tried but failed) so we'll try to
931                  * make a private copy of the data and map that instead */
932         }
933
934
935         /* allocate some memory to hold the mapping
936          * - note that this may not return a page-aligned address if the object
937          *   we're allocating is smaller than a page
938          */
939         order = get_order(len);
940         total = 1 << order;
941         point = len >> PAGE_SHIFT;
942
943         /* we don't want to allocate a power-of-2 sized page set */
944         if (sysctl_nr_trim_pages && total - point >= sysctl_nr_trim_pages)
945                 total = point;
946
947         base = alloc_pages_exact(total << PAGE_SHIFT, GFP_KERNEL);
948         if (!base)
949                 goto enomem;
950
951         atomic_long_add(total, &mmap_pages_allocated);
952
953         vm_flags_set(vma, VM_MAPPED_COPY);
954         region->vm_flags = vma->vm_flags;
955         region->vm_start = (unsigned long) base;
956         region->vm_end   = region->vm_start + len;
957         region->vm_top   = region->vm_start + (total << PAGE_SHIFT);
958
959         vma->vm_start = region->vm_start;
960         vma->vm_end   = region->vm_start + len;
961
962         if (vma->vm_file) {
963                 /* read the contents of a file into the copy */
964                 loff_t fpos;
965
966                 fpos = vma->vm_pgoff;
967                 fpos <<= PAGE_SHIFT;
968
969                 ret = kernel_read(vma->vm_file, base, len, &fpos);
970                 if (ret < 0)
971                         goto error_free;
972
973                 /* clear the last little bit */
974                 if (ret < len)
975                         memset(base + ret, 0, len - ret);
976
977         } else {
978                 vma_set_anonymous(vma);
979         }
980
981         return 0;
982
983 error_free:
984         free_page_series(region->vm_start, region->vm_top);
985         region->vm_start = vma->vm_start = 0;
986         region->vm_end   = vma->vm_end = 0;
987         region->vm_top   = 0;
988         return ret;
989
990 enomem:
991         pr_err("Allocation of length %lu from process %d (%s) failed\n",
992                len, current->pid, current->comm);
993         show_free_areas(0, NULL);
994         return -ENOMEM;
995 }
996
997 /*
998  * handle mapping creation for uClinux
999  */
1000 unsigned long do_mmap(struct file *file,
1001                         unsigned long addr,
1002                         unsigned long len,
1003                         unsigned long prot,
1004                         unsigned long flags,
1005                         unsigned long pgoff,
1006                         unsigned long *populate,
1007                         struct list_head *uf)
1008 {
1009         struct vm_area_struct *vma;
1010         struct vm_region *region;
1011         struct rb_node *rb;
1012         vm_flags_t vm_flags;
1013         unsigned long capabilities, result;
1014         int ret;
1015         VMA_ITERATOR(vmi, current->mm, 0);
1016
1017         *populate = 0;
1018
1019         /* decide whether we should attempt the mapping, and if so what sort of
1020          * mapping */
1021         ret = validate_mmap_request(file, addr, len, prot, flags, pgoff,
1022                                     &capabilities);
1023         if (ret < 0)
1024                 return ret;
1025
1026         /* we ignore the address hint */
1027         addr = 0;
1028         len = PAGE_ALIGN(len);
1029
1030         /* we've determined that we can make the mapping, now translate what we
1031          * now know into VMA flags */
1032         vm_flags = determine_vm_flags(file, prot, flags, capabilities);
1033
1034
1035         /* we're going to need to record the mapping */
1036         region = kmem_cache_zalloc(vm_region_jar, GFP_KERNEL);
1037         if (!region)
1038                 goto error_getting_region;
1039
1040         vma = vm_area_alloc(current->mm);
1041         if (!vma)
1042                 goto error_getting_vma;
1043
1044         if (vma_iter_prealloc(&vmi))
1045                 goto error_vma_iter_prealloc;
1046
1047         region->vm_usage = 1;
1048         region->vm_flags = vm_flags;
1049         region->vm_pgoff = pgoff;
1050
1051         vm_flags_init(vma, vm_flags);
1052         vma->vm_pgoff = pgoff;
1053
1054         if (file) {
1055                 region->vm_file = get_file(file);
1056                 vma->vm_file = get_file(file);
1057         }
1058
1059         down_write(&nommu_region_sem);
1060
1061         /* if we want to share, we need to check for regions created by other
1062          * mmap() calls that overlap with our proposed mapping
1063          * - we can only share with a superset match on most regular files
1064          * - shared mappings on character devices and memory backed files are
1065          *   permitted to overlap inexactly as far as we are concerned for in
1066          *   these cases, sharing is handled in the driver or filesystem rather
1067          *   than here
1068          */
1069         if (is_nommu_shared_mapping(vm_flags)) {
1070                 struct vm_region *pregion;
1071                 unsigned long pglen, rpglen, pgend, rpgend, start;
1072
1073                 pglen = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1074                 pgend = pgoff + pglen;
1075
1076                 for (rb = rb_first(&nommu_region_tree); rb; rb = rb_next(rb)) {
1077                         pregion = rb_entry(rb, struct vm_region, vm_rb);
1078
1079                         if (!is_nommu_shared_mapping(pregion->vm_flags))
1080                                 continue;
1081
1082                         /* search for overlapping mappings on the same file */
1083                         if (file_inode(pregion->vm_file) !=
1084                             file_inode(file))
1085                                 continue;
1086
1087                         if (pregion->vm_pgoff >= pgend)
1088                                 continue;
1089
1090                         rpglen = pregion->vm_end - pregion->vm_start;
1091                         rpglen = (rpglen + PAGE_SIZE - 1) >> PAGE_SHIFT;
1092                         rpgend = pregion->vm_pgoff + rpglen;
1093                         if (pgoff >= rpgend)
1094                                 continue;
1095
1096                         /* handle inexactly overlapping matches between
1097                          * mappings */
1098                         if ((pregion->vm_pgoff != pgoff || rpglen != pglen) &&
1099                             !(pgoff >= pregion->vm_pgoff && pgend <= rpgend)) {
1100                                 /* new mapping is not a subset of the region */
1101                                 if (!(capabilities & NOMMU_MAP_DIRECT))
1102                                         goto sharing_violation;
1103                                 continue;
1104                         }
1105
1106                         /* we've found a region we can share */
1107                         pregion->vm_usage++;
1108                         vma->vm_region = pregion;
1109                         start = pregion->vm_start;
1110                         start += (pgoff - pregion->vm_pgoff) << PAGE_SHIFT;
1111                         vma->vm_start = start;
1112                         vma->vm_end = start + len;
1113
1114                         if (pregion->vm_flags & VM_MAPPED_COPY)
1115                                 vm_flags_set(vma, VM_MAPPED_COPY);
1116                         else {
1117                                 ret = do_mmap_shared_file(vma);
1118                                 if (ret < 0) {
1119                                         vma->vm_region = NULL;
1120                                         vma->vm_start = 0;
1121                                         vma->vm_end = 0;
1122                                         pregion->vm_usage--;
1123                                         pregion = NULL;
1124                                         goto error_just_free;
1125                                 }
1126                         }
1127                         fput(region->vm_file);
1128                         kmem_cache_free(vm_region_jar, region);
1129                         region = pregion;
1130                         result = start;
1131                         goto share;
1132                 }
1133
1134                 /* obtain the address at which to make a shared mapping
1135                  * - this is the hook for quasi-memory character devices to
1136                  *   tell us the location of a shared mapping
1137                  */
1138                 if (capabilities & NOMMU_MAP_DIRECT) {
1139                         addr = file->f_op->get_unmapped_area(file, addr, len,
1140                                                              pgoff, flags);
1141                         if (IS_ERR_VALUE(addr)) {
1142                                 ret = addr;
1143                                 if (ret != -ENOSYS)
1144                                         goto error_just_free;
1145
1146                                 /* the driver refused to tell us where to site
1147                                  * the mapping so we'll have to attempt to copy
1148                                  * it */
1149                                 ret = -ENODEV;
1150                                 if (!(capabilities & NOMMU_MAP_COPY))
1151                                         goto error_just_free;
1152
1153                                 capabilities &= ~NOMMU_MAP_DIRECT;
1154                         } else {
1155                                 vma->vm_start = region->vm_start = addr;
1156                                 vma->vm_end = region->vm_end = addr + len;
1157                         }
1158                 }
1159         }
1160
1161         vma->vm_region = region;
1162
1163         /* set up the mapping
1164          * - the region is filled in if NOMMU_MAP_DIRECT is still set
1165          */
1166         if (file && vma->vm_flags & VM_SHARED)
1167                 ret = do_mmap_shared_file(vma);
1168         else
1169                 ret = do_mmap_private(vma, region, len, capabilities);
1170         if (ret < 0)
1171                 goto error_just_free;
1172         add_nommu_region(region);
1173
1174         /* clear anonymous mappings that don't ask for uninitialized data */
1175         if (!vma->vm_file &&
1176             (!IS_ENABLED(CONFIG_MMAP_ALLOW_UNINITIALIZED) ||
1177              !(flags & MAP_UNINITIALIZED)))
1178                 memset((void *)region->vm_start, 0,
1179                        region->vm_end - region->vm_start);
1180
1181         /* okay... we have a mapping; now we have to register it */
1182         result = vma->vm_start;
1183
1184         current->mm->total_vm += len >> PAGE_SHIFT;
1185
1186 share:
1187         BUG_ON(!vma->vm_region);
1188         setup_vma_to_mm(vma, current->mm);
1189         current->mm->map_count++;
1190         /* add the VMA to the tree */
1191         vma_iter_store(&vmi, vma);
1192
1193         /* we flush the region from the icache only when the first executable
1194          * mapping of it is made  */
1195         if (vma->vm_flags & VM_EXEC && !region->vm_icache_flushed) {
1196                 flush_icache_user_range(region->vm_start, region->vm_end);
1197                 region->vm_icache_flushed = true;
1198         }
1199
1200         up_write(&nommu_region_sem);
1201
1202         return result;
1203
1204 error_just_free:
1205         up_write(&nommu_region_sem);
1206 error:
1207         vma_iter_free(&vmi);
1208         if (region->vm_file)
1209                 fput(region->vm_file);
1210         kmem_cache_free(vm_region_jar, region);
1211         if (vma->vm_file)
1212                 fput(vma->vm_file);
1213         vm_area_free(vma);
1214         return ret;
1215
1216 sharing_violation:
1217         up_write(&nommu_region_sem);
1218         pr_warn("Attempt to share mismatched mappings\n");
1219         ret = -EINVAL;
1220         goto error;
1221
1222 error_getting_vma:
1223         kmem_cache_free(vm_region_jar, region);
1224         pr_warn("Allocation of vma for %lu byte allocation from process %d failed\n",
1225                         len, current->pid);
1226         show_free_areas(0, NULL);
1227         return -ENOMEM;
1228
1229 error_getting_region:
1230         pr_warn("Allocation of vm region for %lu byte allocation from process %d failed\n",
1231                         len, current->pid);
1232         show_free_areas(0, NULL);
1233         return -ENOMEM;
1234
1235 error_vma_iter_prealloc:
1236         kmem_cache_free(vm_region_jar, region);
1237         vm_area_free(vma);
1238         pr_warn("Allocation of vma tree for process %d failed\n", current->pid);
1239         show_free_areas(0, NULL);
1240         return -ENOMEM;
1241
1242 }
1243
1244 unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
1245                               unsigned long prot, unsigned long flags,
1246                               unsigned long fd, unsigned long pgoff)
1247 {
1248         struct file *file = NULL;
1249         unsigned long retval = -EBADF;
1250
1251         audit_mmap_fd(fd, flags);
1252         if (!(flags & MAP_ANONYMOUS)) {
1253                 file = fget(fd);
1254                 if (!file)
1255                         goto out;
1256         }
1257
1258         retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1259
1260         if (file)
1261                 fput(file);
1262 out:
1263         return retval;
1264 }
1265
1266 SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1267                 unsigned long, prot, unsigned long, flags,
1268                 unsigned long, fd, unsigned long, pgoff)
1269 {
1270         return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
1271 }
1272
1273 #ifdef __ARCH_WANT_SYS_OLD_MMAP
1274 struct mmap_arg_struct {
1275         unsigned long addr;
1276         unsigned long len;
1277         unsigned long prot;
1278         unsigned long flags;
1279         unsigned long fd;
1280         unsigned long offset;
1281 };
1282
1283 SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1284 {
1285         struct mmap_arg_struct a;
1286
1287         if (copy_from_user(&a, arg, sizeof(a)))
1288                 return -EFAULT;
1289         if (offset_in_page(a.offset))
1290                 return -EINVAL;
1291
1292         return ksys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1293                                a.offset >> PAGE_SHIFT);
1294 }
1295 #endif /* __ARCH_WANT_SYS_OLD_MMAP */
1296
1297 /*
1298  * split a vma into two pieces at address 'addr', a new vma is allocated either
1299  * for the first part or the tail.
1300  */
1301 int split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
1302               unsigned long addr, int new_below)
1303 {
1304         struct vm_area_struct *new;
1305         struct vm_region *region;
1306         unsigned long npages;
1307         struct mm_struct *mm;
1308
1309         /* we're only permitted to split anonymous regions (these should have
1310          * only a single usage on the region) */
1311         if (vma->vm_file)
1312                 return -ENOMEM;
1313
1314         mm = vma->vm_mm;
1315         if (mm->map_count >= sysctl_max_map_count)
1316                 return -ENOMEM;
1317
1318         region = kmem_cache_alloc(vm_region_jar, GFP_KERNEL);
1319         if (!region)
1320                 return -ENOMEM;
1321
1322         new = vm_area_dup(vma);
1323         if (!new)
1324                 goto err_vma_dup;
1325
1326         if (vma_iter_prealloc(vmi)) {
1327                 pr_warn("Allocation of vma tree for process %d failed\n",
1328                         current->pid);
1329                 goto err_vmi_preallocate;
1330         }
1331
1332         /* most fields are the same, copy all, and then fixup */
1333         *region = *vma->vm_region;
1334         new->vm_region = region;
1335
1336         npages = (addr - vma->vm_start) >> PAGE_SHIFT;
1337
1338         if (new_below) {
1339                 region->vm_top = region->vm_end = new->vm_end = addr;
1340         } else {
1341                 region->vm_start = new->vm_start = addr;
1342                 region->vm_pgoff = new->vm_pgoff += npages;
1343         }
1344
1345         if (new->vm_ops && new->vm_ops->open)
1346                 new->vm_ops->open(new);
1347
1348         down_write(&nommu_region_sem);
1349         delete_nommu_region(vma->vm_region);
1350         if (new_below) {
1351                 vma->vm_region->vm_start = vma->vm_start = addr;
1352                 vma->vm_region->vm_pgoff = vma->vm_pgoff += npages;
1353         } else {
1354                 vma->vm_region->vm_end = vma->vm_end = addr;
1355                 vma->vm_region->vm_top = addr;
1356         }
1357         add_nommu_region(vma->vm_region);
1358         add_nommu_region(new->vm_region);
1359         up_write(&nommu_region_sem);
1360
1361         setup_vma_to_mm(vma, mm);
1362         setup_vma_to_mm(new, mm);
1363         vma_iter_store(vmi, new);
1364         mm->map_count++;
1365         return 0;
1366
1367 err_vmi_preallocate:
1368         vm_area_free(new);
1369 err_vma_dup:
1370         kmem_cache_free(vm_region_jar, region);
1371         return -ENOMEM;
1372 }
1373
1374 /*
1375  * shrink a VMA by removing the specified chunk from either the beginning or
1376  * the end
1377  */
1378 static int vmi_shrink_vma(struct vma_iterator *vmi,
1379                       struct vm_area_struct *vma,
1380                       unsigned long from, unsigned long to)
1381 {
1382         struct vm_region *region;
1383
1384         /* adjust the VMA's pointers, which may reposition it in the MM's tree
1385          * and list */
1386         if (vma_iter_prealloc(vmi)) {
1387                 pr_warn("Allocation of vma tree for process %d failed\n",
1388                        current->pid);
1389                 return -ENOMEM;
1390         }
1391
1392         if (from > vma->vm_start) {
1393                 vma_iter_clear(vmi, from, vma->vm_end);
1394                 vma->vm_end = from;
1395         } else {
1396                 vma_iter_clear(vmi, vma->vm_start, to);
1397                 vma->vm_start = to;
1398         }
1399
1400         /* cut the backing region down to size */
1401         region = vma->vm_region;
1402         BUG_ON(region->vm_usage != 1);
1403
1404         down_write(&nommu_region_sem);
1405         delete_nommu_region(region);
1406         if (from > region->vm_start) {
1407                 to = region->vm_top;
1408                 region->vm_top = region->vm_end = from;
1409         } else {
1410                 region->vm_start = to;
1411         }
1412         add_nommu_region(region);
1413         up_write(&nommu_region_sem);
1414
1415         free_page_series(from, to);
1416         return 0;
1417 }
1418
1419 /*
1420  * release a mapping
1421  * - under NOMMU conditions the chunk to be unmapped must be backed by a single
1422  *   VMA, though it need not cover the whole VMA
1423  */
1424 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len, struct list_head *uf)
1425 {
1426         VMA_ITERATOR(vmi, mm, start);
1427         struct vm_area_struct *vma;
1428         unsigned long end;
1429         int ret = 0;
1430
1431         len = PAGE_ALIGN(len);
1432         if (len == 0)
1433                 return -EINVAL;
1434
1435         end = start + len;
1436
1437         /* find the first potentially overlapping VMA */
1438         vma = vma_find(&vmi, end);
1439         if (!vma) {
1440                 static int limit;
1441                 if (limit < 5) {
1442                         pr_warn("munmap of memory not mmapped by process %d (%s): 0x%lx-0x%lx\n",
1443                                         current->pid, current->comm,
1444                                         start, start + len - 1);
1445                         limit++;
1446                 }
1447                 return -EINVAL;
1448         }
1449
1450         /* we're allowed to split an anonymous VMA but not a file-backed one */
1451         if (vma->vm_file) {
1452                 do {
1453                         if (start > vma->vm_start)
1454                                 return -EINVAL;
1455                         if (end == vma->vm_end)
1456                                 goto erase_whole_vma;
1457                         vma = vma_find(&vmi, end);
1458                 } while (vma);
1459                 return -EINVAL;
1460         } else {
1461                 /* the chunk must be a subset of the VMA found */
1462                 if (start == vma->vm_start && end == vma->vm_end)
1463                         goto erase_whole_vma;
1464                 if (start < vma->vm_start || end > vma->vm_end)
1465                         return -EINVAL;
1466                 if (offset_in_page(start))
1467                         return -EINVAL;
1468                 if (end != vma->vm_end && offset_in_page(end))
1469                         return -EINVAL;
1470                 if (start != vma->vm_start && end != vma->vm_end) {
1471                         ret = split_vma(&vmi, vma, start, 1);
1472                         if (ret < 0)
1473                                 return ret;
1474                 }
1475                 return vmi_shrink_vma(&vmi, vma, start, end);
1476         }
1477
1478 erase_whole_vma:
1479         if (delete_vma_from_mm(vma))
1480                 ret = -ENOMEM;
1481         else
1482                 delete_vma(mm, vma);
1483         return ret;
1484 }
1485
1486 int vm_munmap(unsigned long addr, size_t len)
1487 {
1488         struct mm_struct *mm = current->mm;
1489         int ret;
1490
1491         mmap_write_lock(mm);
1492         ret = do_munmap(mm, addr, len, NULL);
1493         mmap_write_unlock(mm);
1494         return ret;
1495 }
1496 EXPORT_SYMBOL(vm_munmap);
1497
1498 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
1499 {
1500         return vm_munmap(addr, len);
1501 }
1502
1503 /*
1504  * release all the mappings made in a process's VM space
1505  */
1506 void exit_mmap(struct mm_struct *mm)
1507 {
1508         VMA_ITERATOR(vmi, mm, 0);
1509         struct vm_area_struct *vma;
1510
1511         if (!mm)
1512                 return;
1513
1514         mm->total_vm = 0;
1515
1516         /*
1517          * Lock the mm to avoid assert complaining even though this is the only
1518          * user of the mm
1519          */
1520         mmap_write_lock(mm);
1521         for_each_vma(vmi, vma) {
1522                 cleanup_vma_from_mm(vma);
1523                 delete_vma(mm, vma);
1524                 cond_resched();
1525         }
1526         __mt_destroy(&mm->mm_mt);
1527         mmap_write_unlock(mm);
1528 }
1529
1530 int vm_brk(unsigned long addr, unsigned long len)
1531 {
1532         return -ENOMEM;
1533 }
1534
1535 /*
1536  * expand (or shrink) an existing mapping, potentially moving it at the same
1537  * time (controlled by the MREMAP_MAYMOVE flag and available VM space)
1538  *
1539  * under NOMMU conditions, we only permit changing a mapping's size, and only
1540  * as long as it stays within the region allocated by do_mmap_private() and the
1541  * block is not shareable
1542  *
1543  * MREMAP_FIXED is not supported under NOMMU conditions
1544  */
1545 static unsigned long do_mremap(unsigned long addr,
1546                         unsigned long old_len, unsigned long new_len,
1547                         unsigned long flags, unsigned long new_addr)
1548 {
1549         struct vm_area_struct *vma;
1550
1551         /* insanity checks first */
1552         old_len = PAGE_ALIGN(old_len);
1553         new_len = PAGE_ALIGN(new_len);
1554         if (old_len == 0 || new_len == 0)
1555                 return (unsigned long) -EINVAL;
1556
1557         if (offset_in_page(addr))
1558                 return -EINVAL;
1559
1560         if (flags & MREMAP_FIXED && new_addr != addr)
1561                 return (unsigned long) -EINVAL;
1562
1563         vma = find_vma_exact(current->mm, addr, old_len);
1564         if (!vma)
1565                 return (unsigned long) -EINVAL;
1566
1567         if (vma->vm_end != vma->vm_start + old_len)
1568                 return (unsigned long) -EFAULT;
1569
1570         if (is_nommu_shared_mapping(vma->vm_flags))
1571                 return (unsigned long) -EPERM;
1572
1573         if (new_len > vma->vm_region->vm_end - vma->vm_region->vm_start)
1574                 return (unsigned long) -ENOMEM;
1575
1576         /* all checks complete - do it */
1577         vma->vm_end = vma->vm_start + new_len;
1578         return vma->vm_start;
1579 }
1580
1581 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
1582                 unsigned long, new_len, unsigned long, flags,
1583                 unsigned long, new_addr)
1584 {
1585         unsigned long ret;
1586
1587         mmap_write_lock(current->mm);
1588         ret = do_mremap(addr, old_len, new_len, flags, new_addr);
1589         mmap_write_unlock(current->mm);
1590         return ret;
1591 }
1592
1593 struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
1594                          unsigned int foll_flags)
1595 {
1596         return NULL;
1597 }
1598
1599 int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
1600                 unsigned long pfn, unsigned long size, pgprot_t prot)
1601 {
1602         if (addr != (pfn << PAGE_SHIFT))
1603                 return -EINVAL;
1604
1605         vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
1606         return 0;
1607 }
1608 EXPORT_SYMBOL(remap_pfn_range);
1609
1610 int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len)
1611 {
1612         unsigned long pfn = start >> PAGE_SHIFT;
1613         unsigned long vm_len = vma->vm_end - vma->vm_start;
1614
1615         pfn += vma->vm_pgoff;
1616         return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot);
1617 }
1618 EXPORT_SYMBOL(vm_iomap_memory);
1619
1620 int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
1621                         unsigned long pgoff)
1622 {
1623         unsigned int size = vma->vm_end - vma->vm_start;
1624
1625         if (!(vma->vm_flags & VM_USERMAP))
1626                 return -EINVAL;
1627
1628         vma->vm_start = (unsigned long)(addr + (pgoff << PAGE_SHIFT));
1629         vma->vm_end = vma->vm_start + size;
1630
1631         return 0;
1632 }
1633 EXPORT_SYMBOL(remap_vmalloc_range);
1634
1635 vm_fault_t filemap_fault(struct vm_fault *vmf)
1636 {
1637         BUG();
1638         return 0;
1639 }
1640 EXPORT_SYMBOL(filemap_fault);
1641
1642 vm_fault_t filemap_map_pages(struct vm_fault *vmf,
1643                 pgoff_t start_pgoff, pgoff_t end_pgoff)
1644 {
1645         BUG();
1646         return 0;
1647 }
1648 EXPORT_SYMBOL(filemap_map_pages);
1649
1650 int __access_remote_vm(struct mm_struct *mm, unsigned long addr, void *buf,
1651                        int len, unsigned int gup_flags)
1652 {
1653         struct vm_area_struct *vma;
1654         int write = gup_flags & FOLL_WRITE;
1655
1656         if (mmap_read_lock_killable(mm))
1657                 return 0;
1658
1659         /* the access must start within one of the target process's mappings */
1660         vma = find_vma(mm, addr);
1661         if (vma) {
1662                 /* don't overrun this mapping */
1663                 if (addr + len >= vma->vm_end)
1664                         len = vma->vm_end - addr;
1665
1666                 /* only read or write mappings where it is permitted */
1667                 if (write && vma->vm_flags & VM_MAYWRITE)
1668                         copy_to_user_page(vma, NULL, addr,
1669                                          (void *) addr, buf, len);
1670                 else if (!write && vma->vm_flags & VM_MAYREAD)
1671                         copy_from_user_page(vma, NULL, addr,
1672                                             buf, (void *) addr, len);
1673                 else
1674                         len = 0;
1675         } else {
1676                 len = 0;
1677         }
1678
1679         mmap_read_unlock(mm);
1680
1681         return len;
1682 }
1683
1684 /**
1685  * access_remote_vm - access another process' address space
1686  * @mm:         the mm_struct of the target address space
1687  * @addr:       start address to access
1688  * @buf:        source or destination buffer
1689  * @len:        number of bytes to transfer
1690  * @gup_flags:  flags modifying lookup behaviour
1691  *
1692  * The caller must hold a reference on @mm.
1693  */
1694 int access_remote_vm(struct mm_struct *mm, unsigned long addr,
1695                 void *buf, int len, unsigned int gup_flags)
1696 {
1697         return __access_remote_vm(mm, addr, buf, len, gup_flags);
1698 }
1699
1700 /*
1701  * Access another process' address space.
1702  * - source/target buffer must be kernel space
1703  */
1704 int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len,
1705                 unsigned int gup_flags)
1706 {
1707         struct mm_struct *mm;
1708
1709         if (addr + len < addr)
1710                 return 0;
1711
1712         mm = get_task_mm(tsk);
1713         if (!mm)
1714                 return 0;
1715
1716         len = __access_remote_vm(mm, addr, buf, len, gup_flags);
1717
1718         mmput(mm);
1719         return len;
1720 }
1721 EXPORT_SYMBOL_GPL(access_process_vm);
1722
1723 /**
1724  * nommu_shrink_inode_mappings - Shrink the shared mappings on an inode
1725  * @inode: The inode to check
1726  * @size: The current filesize of the inode
1727  * @newsize: The proposed filesize of the inode
1728  *
1729  * Check the shared mappings on an inode on behalf of a shrinking truncate to
1730  * make sure that any outstanding VMAs aren't broken and then shrink the
1731  * vm_regions that extend beyond so that do_mmap() doesn't
1732  * automatically grant mappings that are too large.
1733  */
1734 int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
1735                                 size_t newsize)
1736 {
1737         struct vm_area_struct *vma;
1738         struct vm_region *region;
1739         pgoff_t low, high;
1740         size_t r_size, r_top;
1741
1742         low = newsize >> PAGE_SHIFT;
1743         high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1744
1745         down_write(&nommu_region_sem);
1746         i_mmap_lock_read(inode->i_mapping);
1747
1748         /* search for VMAs that fall within the dead zone */
1749         vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
1750                 /* found one - only interested if it's shared out of the page
1751                  * cache */
1752                 if (vma->vm_flags & VM_SHARED) {
1753                         i_mmap_unlock_read(inode->i_mapping);
1754                         up_write(&nommu_region_sem);
1755                         return -ETXTBSY; /* not quite true, but near enough */
1756                 }
1757         }
1758
1759         /* reduce any regions that overlap the dead zone - if in existence,
1760          * these will be pointed to by VMAs that don't overlap the dead zone
1761          *
1762          * we don't check for any regions that start beyond the EOF as there
1763          * shouldn't be any
1764          */
1765         vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
1766                 if (!(vma->vm_flags & VM_SHARED))
1767                         continue;
1768
1769                 region = vma->vm_region;
1770                 r_size = region->vm_top - region->vm_start;
1771                 r_top = (region->vm_pgoff << PAGE_SHIFT) + r_size;
1772
1773                 if (r_top > newsize) {
1774                         region->vm_top -= r_top - newsize;
1775                         if (region->vm_end > region->vm_top)
1776                                 region->vm_end = region->vm_top;
1777                 }
1778         }
1779
1780         i_mmap_unlock_read(inode->i_mapping);
1781         up_write(&nommu_region_sem);
1782         return 0;
1783 }
1784
1785 /*
1786  * Initialise sysctl_user_reserve_kbytes.
1787  *
1788  * This is intended to prevent a user from starting a single memory hogging
1789  * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
1790  * mode.
1791  *
1792  * The default value is min(3% of free memory, 128MB)
1793  * 128MB is enough to recover with sshd/login, bash, and top/kill.
1794  */
1795 static int __meminit init_user_reserve(void)
1796 {
1797         unsigned long free_kbytes;
1798
1799         free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
1800
1801         sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
1802         return 0;
1803 }
1804 subsys_initcall(init_user_reserve);
1805
1806 /*
1807  * Initialise sysctl_admin_reserve_kbytes.
1808  *
1809  * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
1810  * to log in and kill a memory hogging process.
1811  *
1812  * Systems with more than 256MB will reserve 8MB, enough to recover
1813  * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
1814  * only reserve 3% of free pages by default.
1815  */
1816 static int __meminit init_admin_reserve(void)
1817 {
1818         unsigned long free_kbytes;
1819
1820         free_kbytes = global_zone_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
1821
1822         sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
1823         return 0;
1824 }
1825 subsys_initcall(init_admin_reserve);