mm: use SWP_SYNCHRONOUS_IO more intelligently
[linux-2.6-microblaze.git] / mm / swap.c
index 9285e60..d16d65d 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -241,7 +241,7 @@ static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
                del_page_from_lru_list(page, lruvec, page_lru(page));
                ClearPageActive(page);
                add_page_to_lru_list_tail(page, lruvec, page_lru(page));
-               (*pgmoved) += hpage_nr_pages(page);
+               (*pgmoved) += thp_nr_pages(page);
        }
 }
 
@@ -312,7 +312,7 @@ void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
 void lru_note_cost_page(struct page *page)
 {
        lru_note_cost(mem_cgroup_page_lruvec(page, page_pgdat(page)),
-                     page_is_file_lru(page), hpage_nr_pages(page));
+                     page_is_file_lru(page), thp_nr_pages(page));
 }
 
 static void __activate_page(struct page *page, struct lruvec *lruvec,
@@ -320,7 +320,7 @@ static void __activate_page(struct page *page, struct lruvec *lruvec,
 {
        if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
                int lru = page_lru_base_type(page);
-               int nr_pages = hpage_nr_pages(page);
+               int nr_pages = thp_nr_pages(page);
 
                del_page_from_lru_list(page, lruvec, lru);
                SetPageActive(page);
@@ -500,7 +500,7 @@ void lru_cache_add_inactive_or_unevictable(struct page *page,
                 * lock is held(spinlock), which implies preemption disabled.
                 */
                __mod_zone_page_state(page_zone(page), NR_MLOCK,
-                                   hpage_nr_pages(page));
+                                   thp_nr_pages(page));
                count_vm_event(UNEVICTABLE_PGMLOCKED);
        }
        lru_cache_add(page);
@@ -532,7 +532,7 @@ static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec,
 {
        int lru;
        bool active;
-       int nr_pages = hpage_nr_pages(page);
+       int nr_pages = thp_nr_pages(page);
 
        if (!PageLRU(page))
                return;
@@ -580,7 +580,7 @@ static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
 {
        if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) {
                int lru = page_lru_base_type(page);
-               int nr_pages = hpage_nr_pages(page);
+               int nr_pages = thp_nr_pages(page);
 
                del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE);
                ClearPageActive(page);
@@ -599,7 +599,7 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
        if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) &&
            !PageSwapCache(page) && !PageUnevictable(page)) {
                bool active = PageActive(page);
-               int nr_pages = hpage_nr_pages(page);
+               int nr_pages = thp_nr_pages(page);
 
                del_page_from_lru_list(page, lruvec,
                                       LRU_INACTIVE_ANON + active);
@@ -632,7 +632,8 @@ void lru_add_drain_cpu(int cpu)
                __pagevec_lru_add(pvec);
 
        pvec = &per_cpu(lru_rotate.pvec, cpu);
-       if (pagevec_count(pvec)) {
+       /* Disabling interrupts below acts as a compiler barrier. */
+       if (data_race(pagevec_count(pvec))) {
                unsigned long flags;
 
                /* No harm done if a racing interrupt already did this */
@@ -793,7 +794,7 @@ void lru_add_drain_all(void)
                struct work_struct *work = &per_cpu(lru_add_drain_work, cpu);
 
                if (pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) ||
-                   pagevec_count(&per_cpu(lru_rotate.pvec, cpu)) ||
+                   data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) ||
                    pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
                    pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
                    pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) ||
@@ -972,7 +973,7 @@ static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
 {
        enum lru_list lru;
        int was_unevictable = TestClearPageUnevictable(page);
-       int nr_pages = hpage_nr_pages(page);
+       int nr_pages = thp_nr_pages(page);
 
        VM_BUG_ON_PAGE(PageLRU(page), page);