fs/hugetlbfs/inode.c: fix bugs in hugetlb_vmtruncate_list()
[linux-2.6-microblaze.git] / mm / mempolicy.c
index 87a1779..27d1354 100644 (file)
@@ -489,14 +489,33 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
        struct page *page;
        struct queue_pages *qp = walk->private;
        unsigned long flags = qp->flags;
-       int nid;
+       int nid, ret;
        pte_t *pte;
        spinlock_t *ptl;
 
-       split_huge_page_pmd(vma, addr, pmd);
-       if (pmd_trans_unstable(pmd))
-               return 0;
+       if (pmd_trans_huge(*pmd)) {
+               ptl = pmd_lock(walk->mm, pmd);
+               if (pmd_trans_huge(*pmd)) {
+                       page = pmd_page(*pmd);
+                       if (is_huge_zero_page(page)) {
+                               spin_unlock(ptl);
+                               split_huge_pmd(vma, pmd, addr);
+                       } else {
+                               get_page(page);
+                               spin_unlock(ptl);
+                               lock_page(page);
+                               ret = split_huge_page(page);
+                               unlock_page(page);
+                               put_page(page);
+                               if (ret)
+                                       return 0;
+                       }
+               } else {
+                       spin_unlock(ptl);
+               }
+       }
 
+retry:
        pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
        for (; addr != end; pte++, addr += PAGE_SIZE) {
                if (!pte_present(*pte))
@@ -513,6 +532,21 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
                nid = page_to_nid(page);
                if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
                        continue;
+               if (PageTail(page) && PageAnon(page)) {
+                       get_page(page);
+                       pte_unmap_unlock(pte, ptl);
+                       lock_page(page);
+                       ret = split_huge_page(page);
+                       unlock_page(page);
+                       put_page(page);
+                       /* Failed to split -- skip. */
+                       if (ret) {
+                               pte = pte_offset_map_lock(walk->mm, pmd,
+                                               addr, &ptl);
+                               continue;
+                       }
+                       goto retry;
+               }
 
                if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
                        migrate_page_add(page, qp->pagelist, flags);
@@ -610,7 +644,8 @@ static int queue_pages_test_walk(unsigned long start, unsigned long end,
 
        if (flags & MPOL_MF_LAZY) {
                /* Similar to task_numa_work, skip inaccessible VMAs */
-               if (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
+               if (vma_migratable(vma) &&
+                       vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE))
                        change_prot_numa(vma, start, endvma);
                return 1;
        }
@@ -2142,12 +2177,14 @@ bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
  *
  * Remember policies even when nobody has shared memory mapped.
  * The policies are kept in Red-Black tree linked from the inode.
- * They are protected by the sp->lock spinlock, which should be held
+ * They are protected by the sp->lock rwlock, which should be held
  * for any accesses to the tree.
  */
 
-/* lookup first element intersecting start-end */
-/* Caller holds sp->lock */
+/*
+ * lookup first element intersecting start-end.  Caller holds sp->lock for
+ * reading or for writing
+ */
 static struct sp_node *
 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
 {
@@ -2178,8 +2215,10 @@ sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
        return rb_entry(n, struct sp_node, nd);
 }
 
-/* Insert a new shared policy into the list. */
-/* Caller holds sp->lock */
+/*
+ * Insert a new shared policy into the list.  Caller holds sp->lock for
+ * writing.
+ */
 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
 {
        struct rb_node **p = &sp->root.rb_node;
@@ -2211,13 +2250,13 @@ mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
 
        if (!sp->root.rb_node)
                return NULL;
-       spin_lock(&sp->lock);
+       read_lock(&sp->lock);
        sn = sp_lookup(sp, idx, idx+1);
        if (sn) {
                mpol_get(sn->policy);
                pol = sn->policy;
        }
-       spin_unlock(&sp->lock);
+       read_unlock(&sp->lock);
        return pol;
 }
 
@@ -2360,7 +2399,7 @@ static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
        int ret = 0;
 
 restart:
-       spin_lock(&sp->lock);
+       write_lock(&sp->lock);
        n = sp_lookup(sp, start, end);
        /* Take care of old policies in the same range. */
        while (n && n->start < end) {
@@ -2393,7 +2432,7 @@ restart:
        }
        if (new)
                sp_insert(sp, new);
-       spin_unlock(&sp->lock);
+       write_unlock(&sp->lock);
        ret = 0;
 
 err_out:
@@ -2405,7 +2444,7 @@ err_out:
        return ret;
 
 alloc_new:
-       spin_unlock(&sp->lock);
+       write_unlock(&sp->lock);
        ret = -ENOMEM;
        n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
        if (!n_new)
@@ -2431,7 +2470,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
        int ret;
 
        sp->root = RB_ROOT;             /* empty tree == default mempolicy */
-       spin_lock_init(&sp->lock);
+       rwlock_init(&sp->lock);
 
        if (mpol) {
                struct vm_area_struct pvma;
@@ -2497,14 +2536,14 @@ void mpol_free_shared_policy(struct shared_policy *p)
 
        if (!p->root.rb_node)
                return;
-       spin_lock(&p->lock);
+       write_lock(&p->lock);
        next = rb_first(&p->root);
        while (next) {
                n = rb_entry(next, struct sp_node, nd);
                next = rb_next(&n->nd);
                sp_delete(p, n);
        }
-       spin_unlock(&p->lock);
+       write_unlock(&p->lock);
 }
 
 #ifdef CONFIG_NUMA_BALANCING