Merge branch 'akpm' (patches from Andrew)
[linux-2.6-microblaze.git] / mm / mmap.c
index bc88d16..aa9de98 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1457,9 +1457,7 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
                return addr;
 
        if (flags & MAP_FIXED_NOREPLACE) {
-               struct vm_area_struct *vma = find_vma(mm, addr);
-
-               if (vma && vma->vm_start < addr + len)
+               if (find_vma_intersection(mm, addr, addr + len))
                        return -EEXIST;
        }
 
@@ -1633,7 +1631,7 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
                        return PTR_ERR(file);
        }
 
-       flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+       flags &= ~MAP_DENYWRITE;
 
        retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
 out_fput:
@@ -2802,6 +2800,22 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
        return __split_vma(mm, vma, addr, new_below);
 }
 
+static inline void
+unlock_range(struct vm_area_struct *start, unsigned long limit)
+{
+       struct mm_struct *mm = start->vm_mm;
+       struct vm_area_struct *tmp = start;
+
+       while (tmp && tmp->vm_start < limit) {
+               if (tmp->vm_flags & VM_LOCKED) {
+                       mm->locked_vm -= vma_pages(tmp);
+                       munlock_vma_pages_all(tmp);
+               }
+
+               tmp = tmp->vm_next;
+       }
+}
+
 /* Munmap is split into 2 main parts -- this part which finds
  * what needs doing, and the areas themselves, which do the
  * work.  This now handles partial unmappings.
@@ -2828,16 +2842,11 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
         */
        arch_unmap(mm, start, end);
 
-       /* Find the first overlapping VMA */
-       vma = find_vma(mm, start);
+       /* Find the first overlapping VMA where start < vma->vm_end */
+       vma = find_vma_intersection(mm, start, end);
        if (!vma)
                return 0;
        prev = vma->vm_prev;
-       /* we have  start < vma->vm_end  */
-
-       /* if it doesn't overlap, we have nothing.. */
-       if (vma->vm_start >= end)
-               return 0;
 
        /*
         * If we need to split any vma, do it now to save pain later.
@@ -2890,17 +2899,8 @@ int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
        /*
         * unlock any mlock()ed ranges before detaching vmas
         */
-       if (mm->locked_vm) {
-               struct vm_area_struct *tmp = vma;
-               while (tmp && tmp->vm_start < end) {
-                       if (tmp->vm_flags & VM_LOCKED) {
-                               mm->locked_vm -= vma_pages(tmp);
-                               munlock_vma_pages_all(tmp);
-                       }
-
-                       tmp = tmp->vm_next;
-               }
-       }
+       if (mm->locked_vm)
+               unlock_range(vma, end);
 
        /* Detach vmas from rbtree */
        if (!detach_vmas_to_be_unmapped(mm, vma, prev, end))
@@ -3185,14 +3185,8 @@ void exit_mmap(struct mm_struct *mm)
                mmap_write_unlock(mm);
        }
 
-       if (mm->locked_vm) {
-               vma = mm->mmap;
-               while (vma) {
-                       if (vma->vm_flags & VM_LOCKED)
-                               munlock_vma_pages_all(vma);
-                       vma = vma->vm_next;
-               }
-       }
+       if (mm->locked_vm)
+               unlock_range(mm->mmap, ULONG_MAX);
 
        arch_exit_mmap(mm);