Merge branch 'pm-cpufreq'
[linux-2.6-microblaze.git] / mm / mremap.c
index 975a14c..c5590af 100644 (file)
@@ -493,7 +493,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
        unsigned long excess = 0;
        unsigned long hiwater_vm;
        int split = 0;
-       int err;
+       int err = 0;
        bool need_rmap_locks;
 
        /*
@@ -503,6 +503,15 @@ static unsigned long move_vma(struct vm_area_struct *vma,
        if (mm->map_count >= sysctl_max_map_count - 3)
                return -ENOMEM;
 
+       if (vma->vm_ops && vma->vm_ops->may_split) {
+               if (vma->vm_start != old_addr)
+                       err = vma->vm_ops->may_split(vma, old_addr);
+               if (!err && vma->vm_end != old_addr + old_len)
+                       err = vma->vm_ops->may_split(vma, old_addr + old_len);
+               if (err)
+                       return err;
+       }
+
        /*
         * Advise KSM to break any KSM pages in the area to be moved:
         * it would be confusing if they were to turn up at the new
@@ -515,18 +524,26 @@ static unsigned long move_vma(struct vm_area_struct *vma,
        if (err)
                return err;
 
+       if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT)) {
+               if (security_vm_enough_memory_mm(mm, new_len >> PAGE_SHIFT))
+                       return -ENOMEM;
+       }
+
        new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
        new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
                           &need_rmap_locks);
-       if (!new_vma)
+       if (!new_vma) {
+               if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT))
+                       vm_unacct_memory(new_len >> PAGE_SHIFT);
                return -ENOMEM;
+       }
 
        moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
                                     need_rmap_locks);
        if (moved_len < old_len) {
                err = -ENOMEM;
        } else if (vma->vm_ops && vma->vm_ops->mremap) {
-               err = vma->vm_ops->mremap(new_vma);
+               err = vma->vm_ops->mremap(new_vma, flags);
        }
 
        if (unlikely(err)) {
@@ -548,7 +565,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
        }
 
        /* Conceal VM_ACCOUNT so old reservation is not undone */
-       if (vm_flags & VM_ACCOUNT) {
+       if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {
                vma->vm_flags &= ~VM_ACCOUNT;
                excess = vma->vm_end - vma->vm_start - old_len;
                if (old_addr > vma->vm_start &&
@@ -573,34 +590,16 @@ static unsigned long move_vma(struct vm_area_struct *vma,
                untrack_pfn_moved(vma);
 
        if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) {
-               if (vm_flags & VM_ACCOUNT) {
-                       /* Always put back VM_ACCOUNT since we won't unmap */
-                       vma->vm_flags |= VM_ACCOUNT;
-
-                       vm_acct_memory(new_len >> PAGE_SHIFT);
-               }
-
-               /*
-                * VMAs can actually be merged back together in copy_vma
-                * calling merge_vma. This can happen with anonymous vmas
-                * which have not yet been faulted, so if we were to consider
-                * this VMA split we'll end up adding VM_ACCOUNT on the
-                * next VMA, which is completely unrelated if this VMA
-                * was re-merged.
-                */
-               if (split && new_vma == vma)
-                       split = 0;
-
                /* We always clear VM_LOCKED[ONFAULT] on the old vma */
                vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
 
                /* Because we won't unmap we don't need to touch locked_vm */
-               goto out;
+               return new_addr;
        }
 
        if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) {
                /* OOM: unable to split vma, just get accounts right */
-               if (vm_flags & VM_ACCOUNT)
+               if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
                        vm_acct_memory(new_len >> PAGE_SHIFT);
                excess = 0;
        }
@@ -609,7 +608,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
                mm->locked_vm += new_len >> PAGE_SHIFT;
                *locked = true;
        }
-out:
+
        mm->hiwater_vm = hiwater_vm;
 
        /* Restore VM_ACCOUNT if one or two pieces of vma left */