rcu: Remove unused rcu_state externs
[linux-2.6-microblaze.git] / mm / mmap.c
index f7cd9cb..6c04292 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -191,16 +191,19 @@ static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long
 SYSCALL_DEFINE1(brk, unsigned long, brk)
 {
        unsigned long retval;
-       unsigned long newbrk, oldbrk;
+       unsigned long newbrk, oldbrk, origbrk;
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *next;
        unsigned long min_brk;
        bool populate;
+       bool downgraded = false;
        LIST_HEAD(uf);
 
        if (down_write_killable(&mm->mmap_sem))
                return -EINTR;
 
+       origbrk = mm->brk;
+
 #ifdef CONFIG_COMPAT_BRK
        /*
         * CONFIG_COMPAT_BRK can still be overridden by setting
@@ -229,14 +232,32 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
 
        newbrk = PAGE_ALIGN(brk);
        oldbrk = PAGE_ALIGN(mm->brk);
-       if (oldbrk == newbrk)
-               goto set_brk;
+       if (oldbrk == newbrk) {
+               mm->brk = brk;
+               goto success;
+       }
 
-       /* Always allow shrinking brk. */
+       /*
+        * Always allow shrinking brk.
+        * __do_munmap() may downgrade mmap_sem to read.
+        */
        if (brk <= mm->brk) {
-               if (!do_munmap(mm, newbrk, oldbrk-newbrk, &uf))
-                       goto set_brk;
-               goto out;
+               int ret;
+
+               /*
+                * mm->brk must to be protected by write mmap_sem so update it
+                * before downgrading mmap_sem. When __do_munmap() fails,
+                * mm->brk will be restored from origbrk.
+                */
+               mm->brk = brk;
+               ret = __do_munmap(mm, newbrk, oldbrk-newbrk, &uf, true);
+               if (ret < 0) {
+                       mm->brk = origbrk;
+                       goto out;
+               } else if (ret == 1) {
+                       downgraded = true;
+               }
+               goto success;
        }
 
        /* Check against existing mmap mappings. */
@@ -247,18 +268,21 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
        /* Ok, looks good - let it rip. */
        if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0)
                goto out;
-
-set_brk:
        mm->brk = brk;
+
+success:
        populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
-       up_write(&mm->mmap_sem);
+       if (downgraded)
+               up_read(&mm->mmap_sem);
+       else
+               up_write(&mm->mmap_sem);
        userfaultfd_unmap_complete(mm, &uf);
        if (populate)
                mm_populate(oldbrk, newbrk - oldbrk);
        return brk;
 
 out:
-       retval = mm->brk;
+       retval = origbrk;
        up_write(&mm->mmap_sem);
        return retval;
 }
@@ -2687,8 +2711,8 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
  * work.  This now handles partial unmappings.
  * Jeremy Fitzhardinge <jeremy@goop.org>
  */
-int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
-             struct list_head *uf)
+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
+               struct list_head *uf, bool downgrade)
 {
        unsigned long end;
        struct vm_area_struct *vma, *prev, *last;
@@ -2770,25 +2794,38 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
                                mm->locked_vm -= vma_pages(tmp);
                                munlock_vma_pages_all(tmp);
                        }
+
                        tmp = tmp->vm_next;
                }
        }
 
-       /*
-        * Remove the vma's, and unmap the actual pages
-        */
+       /* Detach vmas from rbtree */
        detach_vmas_to_be_unmapped(mm, vma, prev, end);
-       unmap_region(mm, vma, prev, start, end);
 
+       /*
+        * mpx unmap needs to be called with mmap_sem held for write.
+        * It is safe to call it before unmap_region().
+        */
        arch_unmap(mm, vma, start, end);
 
+       if (downgrade)
+               downgrade_write(&mm->mmap_sem);
+
+       unmap_region(mm, vma, prev, start, end);
+
        /* Fix up all other VM information */
        remove_vma_list(mm, vma);
 
-       return 0;
+       return downgrade ? 1 : 0;
 }
 
-int vm_munmap(unsigned long start, size_t len)
+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
+             struct list_head *uf)
+{
+       return __do_munmap(mm, start, len, uf, false);
+}
+
+static int __vm_munmap(unsigned long start, size_t len, bool downgrade)
 {
        int ret;
        struct mm_struct *mm = current->mm;
@@ -2797,17 +2834,32 @@ int vm_munmap(unsigned long start, size_t len)
        if (down_write_killable(&mm->mmap_sem))
                return -EINTR;
 
-       ret = do_munmap(mm, start, len, &uf);
-       up_write(&mm->mmap_sem);
+       ret = __do_munmap(mm, start, len, &uf, downgrade);
+       /*
+        * Returning 1 indicates mmap_sem is downgraded.
+        * But 1 is not legal return value of vm_munmap() and munmap(), reset
+        * it to 0 before return.
+        */
+       if (ret == 1) {
+               up_read(&mm->mmap_sem);
+               ret = 0;
+       } else
+               up_write(&mm->mmap_sem);
+
        userfaultfd_unmap_complete(mm, &uf);
        return ret;
 }
+
+int vm_munmap(unsigned long start, size_t len)
+{
+       return __vm_munmap(start, len, false);
+}
 EXPORT_SYMBOL(vm_munmap);
 
 SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
 {
        profile_munmap(addr);
-       return vm_munmap(addr, len);
+       return __vm_munmap(addr, len, true);
 }