selftests/timens: Add a test for futex()
[linux-2.6-microblaze.git] / mm / mmap.c
index e71d2d4..d91ecb0 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -143,7 +143,7 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma,
                struct file *file, struct address_space *mapping)
 {
        if (vma->vm_flags & VM_DENYWRITE)
-               atomic_inc(&file_inode(file)->i_writecount);
+               allow_write_access(file);
        if (vma->vm_flags & VM_SHARED)
                mapping_unmap_writable(mapping);
 
@@ -474,8 +474,12 @@ static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma,
 {
        /*
         * All rb_subtree_gap values must be consistent prior to erase,
-        * with the possible exception of the "next" vma being erased if
-        * next->vm_start was reduced.
+        * with the possible exception of
+        *
+        * a. the "next" vma being erased if next->vm_start was reduced in
+        *    __vma_adjust() -> __vma_unlink()
+        * b. the vma being erased in detach_vmas_to_be_unmapped() ->
+        *    vma_rb_erase()
         */
        validate_mm_rb(root, ignore);
 
@@ -485,13 +489,7 @@ static __always_inline void vma_rb_erase_ignore(struct vm_area_struct *vma,
 static __always_inline void vma_rb_erase(struct vm_area_struct *vma,
                                         struct rb_root *root)
 {
-       /*
-        * All rb_subtree_gap values must be consistent prior to erase,
-        * with the possible exception of the vma being erased.
-        */
-       validate_mm_rb(root, vma);
-
-       __vma_rb_erase(vma, root);
+       vma_rb_erase_ignore(vma, root, vma);
 }
 
 /*
@@ -560,6 +558,50 @@ static int find_vma_links(struct mm_struct *mm, unsigned long addr,
        return 0;
 }
 
+/*
+ * vma_next() - Get the next VMA.
+ * @mm: The mm_struct.
+ * @vma: The current vma.
+ *
+ * If @vma is NULL, return the first vma in the mm.
+ *
+ * Returns: The next VMA after @vma.
+ */
+static inline struct vm_area_struct *vma_next(struct mm_struct *mm,
+                                        struct vm_area_struct *vma)
+{
+       if (!vma)
+               return mm->mmap;
+
+       return vma->vm_next;
+}
+
+/*
+ * munmap_vma_range() - munmap VMAs that overlap a range.
+ * @mm: The mm struct
+ * @start: The start of the range.
+ * @len: The length of the range.
+ * @pprev: pointer to the pointer that will be set to previous vm_area_struct
+ * @rb_link: the rb_node
+ * @rb_parent: the parent rb_node
+ *
+ * Find all the vm_area_struct that overlap from @start to
+ * @end and munmap them.  Set @pprev to the previous vm_area_struct.
+ *
+ * Returns: -ENOMEM on munmap failure or 0 on success.
+ */
+static inline int
+munmap_vma_range(struct mm_struct *mm, unsigned long start, unsigned long len,
+                struct vm_area_struct **pprev, struct rb_node ***link,
+                struct rb_node **parent, struct list_head *uf)
+{
+
+       while (find_vma_links(mm, start, start + len, pprev, link, parent))
+               if (do_munmap(mm, start, len, uf))
+                       return -ENOMEM;
+
+       return 0;
+}
 static unsigned long count_vma_pages_range(struct mm_struct *mm,
                unsigned long addr, unsigned long end)
 {
@@ -621,9 +663,9 @@ static void __vma_link_file(struct vm_area_struct *vma)
                struct address_space *mapping = file->f_mapping;
 
                if (vma->vm_flags & VM_DENYWRITE)
-                       atomic_dec(&file_inode(file)->i_writecount);
+                       put_write_access(file_inode(file));
                if (vma->vm_flags & VM_SHARED)
-                       atomic_inc(&mapping->i_mmap_writable);
+                       mapping_allow_writable(mapping);
 
                flush_dcache_mmap_lock(mapping);
                vma_interval_tree_insert(vma, &mapping->i_mmap);
@@ -677,7 +719,7 @@ static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
        mm->map_count++;
 }
 
-static __always_inline void __vma_unlink_common(struct mm_struct *mm,
+static __always_inline void __vma_unlink(struct mm_struct *mm,
                                                struct vm_area_struct *vma,
                                                struct vm_area_struct *ignore)
 {
@@ -760,7 +802,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
                         * vma expands, overlapping part of the next:
                         * mprotect case 5 shifting the boundary up.
                         */
-                       adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
+                       adjust_next = (end - next->vm_start);
                        exporter = next;
                        importer = vma;
                        VM_WARN_ON(expand != importer);
@@ -770,7 +812,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
                         * split_vma inserting another: so it must be
                         * mprotect case 4 shifting the boundary down.
                         */
-                       adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT);
+                       adjust_next = -(vma->vm_end - end);
                        exporter = vma;
                        importer = next;
                        VM_WARN_ON(expand != importer);
@@ -825,7 +867,7 @@ again:
                        anon_vma_interval_tree_pre_update_vma(next);
        }
 
-       if (root) {
+       if (file) {
                flush_dcache_mmap_lock(mapping);
                vma_interval_tree_remove(vma, root);
                if (adjust_next)
@@ -842,11 +884,11 @@ again:
        }
        vma->vm_pgoff = pgoff;
        if (adjust_next) {
-               next->vm_start += adjust_next << PAGE_SHIFT;
-               next->vm_pgoff += adjust_next;
+               next->vm_start += adjust_next;
+               next->vm_pgoff += adjust_next >> PAGE_SHIFT;
        }
 
-       if (root) {
+       if (file) {
                if (adjust_next)
                        vma_interval_tree_insert(next, root);
                vma_interval_tree_insert(vma, root);
@@ -859,7 +901,7 @@ again:
                 * us to remove next before dropping the locks.
                 */
                if (remove_next != 3)
-                       __vma_unlink_common(mm, next, next);
+                       __vma_unlink(mm, next, next);
                else
                        /*
                         * vma is not before next if they've been
@@ -870,7 +912,7 @@ again:
                         * "next" (which is stored in post-swap()
                         * "vma").
                         */
-                       __vma_unlink_common(mm, next, vma);
+                       __vma_unlink(mm, next, vma);
                if (file)
                        __remove_shared_vm_struct(next, file, mapping);
        } else if (insert) {
@@ -897,10 +939,9 @@ again:
                        anon_vma_interval_tree_post_update_vma(next);
                anon_vma_unlock_write(anon_vma);
        }
-       if (mapping)
-               i_mmap_unlock_write(mapping);
 
-       if (root) {
+       if (file) {
+               i_mmap_unlock_write(mapping);
                uprobe_mmap(vma);
 
                if (adjust_next)
@@ -1131,10 +1172,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
        if (vm_flags & VM_SPECIAL)
                return NULL;
 
-       if (prev)
-               next = prev->vm_next;
-       else
-               next = mm->mmap;
+       next = vma_next(mm, prev);
        area = next;
        if (area && area->vm_end == end)                /* cases 6, 7, 8 */
                next = next->vm_next;
@@ -1710,13 +1748,9 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
                        return -ENOMEM;
        }
 
-       /* Clear old maps */
-       while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
-                             &rb_parent)) {
-               if (do_munmap(mm, addr, len, uf))
-                       return -ENOMEM;
-       }
-
+       /* Clear old maps, set up prev, rb_link, rb_parent, and uf */
+       if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf))
+               return -ENOMEM;
        /*
         * Private writable mapping: check memory availability
         */
@@ -2565,7 +2599,7 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
        if (vma && (vma->vm_start <= addr))
                return vma;
        /* don't alter vm_end if the coredump is running */
-       if (!prev || !mmget_still_valid(mm) || expand_stack(prev, addr))
+       if (!prev || expand_stack(prev, addr))
                return NULL;
        if (prev->vm_flags & VM_LOCKED)
                populate_vma_page_range(prev, addr, prev->vm_end, NULL);
@@ -2591,9 +2625,6 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
                return vma;
        if (!(vma->vm_flags & VM_GROWSDOWN))
                return NULL;
-       /* don't alter vm_start if the coredump is running */
-       if (!mmget_still_valid(mm))
-               return NULL;
        start = vma->vm_start;
        if (expand_stack(vma, addr))
                return NULL;
@@ -2638,7 +2669,7 @@ static void unmap_region(struct mm_struct *mm,
                struct vm_area_struct *vma, struct vm_area_struct *prev,
                unsigned long start, unsigned long end)
 {
-       struct vm_area_struct *next = prev ? prev->vm_next : mm->mmap;
+       struct vm_area_struct *next = vma_next(mm, prev);
        struct mmu_gather tlb;
 
        lru_add_drain();
@@ -2837,7 +2868,7 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
                if (error)
                        return error;
        }
-       vma = prev ? prev->vm_next : mm->mmap;
+       vma = vma_next(mm, prev);
 
        if (unlikely(uf)) {
                /*
@@ -3055,14 +3086,9 @@ static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long fla
        if (error)
                return error;
 
-       /*
-        * Clear old maps.  this also does some error checking for us
-        */
-       while (find_vma_links(mm, addr, addr + len, &prev, &rb_link,
-                             &rb_parent)) {
-               if (do_munmap(mm, addr, len, uf))
-                       return -ENOMEM;
-       }
+       /* Clear old maps, set up prev, rb_link, rb_parent, and uf */
+       if (munmap_vma_range(mm, addr, len, &prev, &rb_link, &rb_parent, uf))
+               return -ENOMEM;
 
        /* Check against address space limits *after* clearing old maps... */
        if (!may_expand_vm(mm, flags, len >> PAGE_SHIFT))
@@ -3236,7 +3262,7 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
         * By setting it to reflect the virtual start address of the
         * vma, merges and splits can happen in a seamless way, just
         * using the existing file pgoff checks and manipulations.
-        * Similarly in do_mmap and in do_brk.
+        * Similarly in do_mmap and in do_brk_flags.
         */
        if (vma_is_anonymous(vma)) {
                BUG_ON(vma->anon_vma);