block: add QUEUE_FLAG_NOWAIT
[linux-2.6-microblaze.git] / mm / mlock.c
index f873613..93ca2bf 100644 (file)
@@ -61,8 +61,7 @@ void clear_page_mlock(struct page *page)
        if (!TestClearPageMlocked(page))
                return;
 
-       mod_zone_page_state(page_zone(page), NR_MLOCK,
-                           -hpage_nr_pages(page));
+       mod_zone_page_state(page_zone(page), NR_MLOCK, -thp_nr_pages(page));
        count_vm_event(UNEVICTABLE_PGCLEARED);
        /*
         * The previous TestClearPageMlocked() corresponds to the smp_mb()
@@ -95,7 +94,7 @@ void mlock_vma_page(struct page *page)
 
        if (!TestSetPageMlocked(page)) {
                mod_zone_page_state(page_zone(page), NR_MLOCK,
-                                   hpage_nr_pages(page));
+                                   thp_nr_pages(page));
                count_vm_event(UNEVICTABLE_PGMLOCKED);
                if (!isolate_lru_page(page))
                        putback_lru_page(page);
@@ -192,7 +191,7 @@ unsigned int munlock_vma_page(struct page *page)
        /*
         * Serialize with any parallel __split_huge_page_refcount() which
         * might otherwise copy PageMlocked to part of the tail pages before
-        * we clear it in the head page. It also stabilizes hpage_nr_pages().
+        * we clear it in the head page. It also stabilizes thp_nr_pages().
         */
        spin_lock_irq(&pgdat->lru_lock);
 
@@ -202,7 +201,7 @@ unsigned int munlock_vma_page(struct page *page)
                goto unlock_out;
        }
 
-       nr_pages = hpage_nr_pages(page);
+       nr_pages = thp_nr_pages(page);
        __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
 
        if (__munlock_isolate_lru_page(page, true)) {