MAINTAINERS: Update Intel PMIC (MFD part) to Supported
[linux-2.6-microblaze.git] / mm / mmap.c
index 313b57d..61e6135 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -102,26 +102,31 @@ static void unmap_region(struct mm_struct *mm,
  *                                                             x: (yes) yes
  */
 pgprot_t protection_map[16] __ro_after_init = {
-       __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
-       __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
+       [VM_NONE]                                       = __P000,
+       [VM_READ]                                       = __P001,
+       [VM_WRITE]                                      = __P010,
+       [VM_WRITE | VM_READ]                            = __P011,
+       [VM_EXEC]                                       = __P100,
+       [VM_EXEC | VM_READ]                             = __P101,
+       [VM_EXEC | VM_WRITE]                            = __P110,
+       [VM_EXEC | VM_WRITE | VM_READ]                  = __P111,
+       [VM_SHARED]                                     = __S000,
+       [VM_SHARED | VM_READ]                           = __S001,
+       [VM_SHARED | VM_WRITE]                          = __S010,
+       [VM_SHARED | VM_WRITE | VM_READ]                = __S011,
+       [VM_SHARED | VM_EXEC]                           = __S100,
+       [VM_SHARED | VM_EXEC | VM_READ]                 = __S101,
+       [VM_SHARED | VM_EXEC | VM_WRITE]                = __S110,
+       [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]      = __S111
 };
 
-#ifndef CONFIG_ARCH_HAS_FILTER_PGPROT
-static inline pgprot_t arch_filter_pgprot(pgprot_t prot)
-{
-       return prot;
-}
-#endif
-
+#ifndef CONFIG_ARCH_HAS_VM_GET_PAGE_PROT
 pgprot_t vm_get_page_prot(unsigned long vm_flags)
 {
-       pgprot_t ret = __pgprot(pgprot_val(protection_map[vm_flags &
-                               (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
-                       pgprot_val(arch_vm_get_page_prot(vm_flags)));
-
-       return arch_filter_pgprot(ret);
+       return protection_map[vm_flags & (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)];
 }
 EXPORT_SYMBOL(vm_get_page_prot);
+#endif /* CONFIG_ARCH_HAS_VM_GET_PAGE_PROT */
 
 static pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
 {
@@ -1218,7 +1223,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                                         end, prev->vm_pgoff, NULL, prev);
                if (err)
                        return NULL;
-               khugepaged_enter_vma_merge(prev, vm_flags);
+               khugepaged_enter_vma(prev, vm_flags);
                return prev;
        }
 
@@ -1245,7 +1250,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
                }
                if (err)
                        return NULL;
-               khugepaged_enter_vma_merge(area, vm_flags);
+               khugepaged_enter_vma(area, vm_flags);
                return area;
        }
 
@@ -1280,7 +1285,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *
  * the same as 'old', the other will be the new one that is trying
  * to share the anon_vma.
  *
- * NOTE! This runs with mm_sem held for reading, so it is possible that
+ * NOTE! This runs with mmap_lock held for reading, so it is possible that
  * the anon_vma of 'old' is concurrently in the process of being set up
  * by another page fault trying to merge _that_. But that's ok: if it
  * is being set up, that automatically means that it will be a singleton
@@ -1294,7 +1299,7 @@ static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *
  *
  * We also make sure that the two vma's are compatible (adjacent,
  * and with the same memory policies). That's all stable, even with just
- * a read lock on the mm_sem.
+ * a read lock on the mmap_lock.
  */
 static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
 {
@@ -1842,6 +1847,13 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
        }
 
        vma_link(mm, vma, prev, rb_link, rb_parent);
+
+       /*
+        * vma_merge() calls khugepaged_enter_vma() either, the below
+        * call covers the non-merge case.
+        */
+       khugepaged_enter_vma(vma, vma->vm_flags);
+
        /* Once vma denies write, undo our temporary denial count */
 unmap_writable:
        if (file && vm_flags & VM_SHARED)
@@ -2128,15 +2140,15 @@ unsigned long vm_unmapped_area(struct vm_unmapped_area_info *info)
  *
  * This function "knows" that -ENOMEM has the bits set.
  */
-#ifndef HAVE_ARCH_UNMAPPED_AREA
 unsigned long
-arch_get_unmapped_area(struct file *filp, unsigned long addr,
-               unsigned long len, unsigned long pgoff, unsigned long flags)
+generic_get_unmapped_area(struct file *filp, unsigned long addr,
+                         unsigned long len, unsigned long pgoff,
+                         unsigned long flags)
 {
        struct mm_struct *mm = current->mm;
        struct vm_area_struct *vma, *prev;
        struct vm_unmapped_area_info info;
-       const unsigned long mmap_end = arch_get_mmap_end(addr);
+       const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
 
        if (len > mmap_end - mmap_min_addr)
                return -ENOMEM;
@@ -2161,22 +2173,30 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
        info.align_offset = 0;
        return vm_unmapped_area(&info);
 }
+
+#ifndef HAVE_ARCH_UNMAPPED_AREA
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+                      unsigned long len, unsigned long pgoff,
+                      unsigned long flags)
+{
+       return generic_get_unmapped_area(filp, addr, len, pgoff, flags);
+}
 #endif
 
 /*
  * This mmap-allocator allocates new areas top-down from below the
  * stack's low limit (the base):
  */
-#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
 unsigned long
-arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
-                         unsigned long len, unsigned long pgoff,
-                         unsigned long flags)
+generic_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
+                                 unsigned long len, unsigned long pgoff,
+                                 unsigned long flags)
 {
        struct vm_area_struct *vma, *prev;
        struct mm_struct *mm = current->mm;
        struct vm_unmapped_area_info info;
-       const unsigned long mmap_end = arch_get_mmap_end(addr);
+       const unsigned long mmap_end = arch_get_mmap_end(addr, len, flags);
 
        /* requested length too big for entire address space */
        if (len > mmap_end - mmap_min_addr)
@@ -2219,6 +2239,15 @@ arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
 
        return addr;
 }
+
+#ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
+unsigned long
+arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr,
+                              unsigned long len, unsigned long pgoff,
+                              unsigned long flags)
+{
+       return generic_get_unmapped_area_topdown(filp, addr, len, pgoff, flags);
+}
 #endif
 
 unsigned long
@@ -2340,15 +2369,8 @@ static int acct_stack_growth(struct vm_area_struct *vma,
                return -ENOMEM;
 
        /* mlock limit tests */
-       if (vma->vm_flags & VM_LOCKED) {
-               unsigned long locked;
-               unsigned long limit;
-               locked = mm->locked_vm + grow;
-               limit = rlimit(RLIMIT_MEMLOCK);
-               limit >>= PAGE_SHIFT;
-               if (locked > limit && !capable(CAP_IPC_LOCK))
-                       return -ENOMEM;
-       }
+       if (mlock_future_check(mm, vma->vm_flags, grow << PAGE_SHIFT))
+               return -ENOMEM;
 
        /* Check to ensure the stack will not grow into a hugetlb-only region */
        new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
@@ -2452,7 +2474,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
                }
        }
        anon_vma_unlock_write(vma->anon_vma);
-       khugepaged_enter_vma_merge(vma, vma->vm_flags);
+       khugepaged_enter_vma(vma, vma->vm_flags);
        validate_mm(mm);
        return error;
 }
@@ -2530,7 +2552,7 @@ int expand_downwards(struct vm_area_struct *vma,
                }
        }
        anon_vma_unlock_write(vma->anon_vma);
-       khugepaged_enter_vma_merge(vma, vma->vm_flags);
+       khugepaged_enter_vma(vma, vma->vm_flags);
        validate_mm(mm);
        return error;
 }
@@ -3553,7 +3575,7 @@ int mm_take_all_locks(struct mm_struct *mm)
        struct vm_area_struct *vma;
        struct anon_vma_chain *avc;
 
-       BUG_ON(mmap_read_trylock(mm));
+       mmap_assert_write_locked(mm);
 
        mutex_lock(&mm_all_locks_mutex);
 
@@ -3633,7 +3655,7 @@ void mm_drop_all_locks(struct mm_struct *mm)
        struct vm_area_struct *vma;
        struct anon_vma_chain *avc;
 
-       BUG_ON(mmap_read_trylock(mm));
+       mmap_assert_write_locked(mm);
        BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
 
        for (vma = mm->mmap; vma; vma = vma->vm_next) {