mm: memcontrol: replace the loop with a list_for_each_entry()
[linux-2.6-microblaze.git] / mm / huge_memory.c
index 9237976..86a3015 100644 (file)
@@ -1439,7 +1439,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
                if (!get_page_unless_zero(page))
                        goto out_unlock;
                spin_unlock(vmf->ptl);
-               put_and_wait_on_page_locked(page);
+               put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
                goto out;
        }
 
@@ -1475,7 +1475,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t pmd)
                if (!get_page_unless_zero(page))
                        goto out_unlock;
                spin_unlock(vmf->ptl);
-               put_and_wait_on_page_locked(page);
+               put_and_wait_on_page_locked(page, TASK_UNINTERRUPTIBLE);
                goto out;
        }
 
@@ -2176,7 +2176,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                lock_page_memcg(page);
                if (atomic_add_negative(-1, compound_mapcount_ptr(page))) {
                        /* Last compound_mapcount is gone. */
-                       __dec_lruvec_page_state(page, NR_ANON_THPS);
+                       __mod_lruvec_page_state(page, NR_ANON_THPS,
+                                               -HPAGE_PMD_NR);
                        if (TestClearPageDoubleMap(page)) {
                                /* No need in mapcount reference anymore */
                                for (i = 0; i < HPAGE_PMD_NR; i++)
@@ -2202,7 +2203,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
 {
        spinlock_t *ptl;
        struct mmu_notifier_range range;
-       bool was_locked = false;
+       bool do_unlock_page = false;
        pmd_t _pmd;
 
        mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
@@ -2218,7 +2219,6 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
        VM_BUG_ON(freeze && !page);
        if (page) {
                VM_WARN_ON_ONCE(!PageLocked(page));
-               was_locked = true;
                if (page != pmd_page(*pmd))
                        goto out;
        }
@@ -2227,19 +2227,29 @@ repeat:
        if (pmd_trans_huge(*pmd)) {
                if (!page) {
                        page = pmd_page(*pmd);
-                       if (unlikely(!trylock_page(page))) {
-                               get_page(page);
-                               _pmd = *pmd;
-                               spin_unlock(ptl);
-                               lock_page(page);
-                               spin_lock(ptl);
-                               if (unlikely(!pmd_same(*pmd, _pmd))) {
-                                       unlock_page(page);
+                       /*
+                        * An anonymous page must be locked, to ensure that a
+                        * concurrent reuse_swap_page() sees stable mapcount;
+                        * but reuse_swap_page() is not used on shmem or file,
+                        * and page lock must not be taken when zap_pmd_range()
+                        * calls __split_huge_pmd() while i_mmap_lock is held.
+                        */
+                       if (PageAnon(page)) {
+                               if (unlikely(!trylock_page(page))) {
+                                       get_page(page);
+                                       _pmd = *pmd;
+                                       spin_unlock(ptl);
+                                       lock_page(page);
+                                       spin_lock(ptl);
+                                       if (unlikely(!pmd_same(*pmd, _pmd))) {
+                                               unlock_page(page);
+                                               put_page(page);
+                                               page = NULL;
+                                               goto repeat;
+                                       }
                                        put_page(page);
-                                       page = NULL;
-                                       goto repeat;
                                }
-                               put_page(page);
+                               do_unlock_page = true;
                        }
                }
                if (PageMlocked(page))
@@ -2249,7 +2259,7 @@ repeat:
        __split_huge_pmd_locked(vma, pmd, range.start, freeze);
 out:
        spin_unlock(ptl);
-       if (!was_locked && page)
+       if (do_unlock_page)
                unlock_page(page);
        /*
         * No need to double call mmu_notifier->invalidate_range() callback.
@@ -2742,10 +2752,14 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
                }
                spin_unlock(&ds_queue->split_queue_lock);
                if (mapping) {
+                       int nr = thp_nr_pages(head);
+
                        if (PageSwapBacked(head))
-                               __dec_lruvec_page_state(head, NR_SHMEM_THPS);
+                               __mod_lruvec_page_state(head, NR_SHMEM_THPS,
+                                                       -nr);
                        else
-                               __dec_lruvec_page_state(head, NR_FILE_THPS);
+                               __mod_lruvec_page_state(head, NR_FILE_THPS,
+                                                       -nr);
                }
 
                __split_huge_page(page, list, end);