Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux-2.6-microblaze.git] / mm / vmscan.c
index e84c4dd..99e1796 100644 (file)
@@ -854,6 +854,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
 {
        unsigned long flags;
        int refcount;
+       void *shadow = NULL;
 
        BUG_ON(!PageLocked(page));
        BUG_ON(mapping != page_mapping(page));
@@ -896,13 +897,13 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
        if (PageSwapCache(page)) {
                swp_entry_t swap = { .val = page_private(page) };
                mem_cgroup_swapout(page, swap);
-               __delete_from_swap_cache(page, swap, NULL);
+               if (reclaimed && !mapping_exiting(mapping))
+                       shadow = workingset_eviction(page, target_memcg);
+               __delete_from_swap_cache(page, swap, shadow);
                xa_unlock_irqrestore(&mapping->i_pages, flags);
                put_swap_page(page, swap);
-               workingset_eviction(page, target_memcg);
        } else {
                void (*freepage)(struct page *);
-               void *shadow = NULL;
 
                freepage = mapping->a_ops->freepage;
                /*
@@ -1353,7 +1354,7 @@ static unsigned int shrink_page_list(struct list_head *page_list,
                        case PAGE_ACTIVATE:
                                goto activate_locked;
                        case PAGE_SUCCESS:
-                               stat->nr_pageout += hpage_nr_pages(page);
+                               stat->nr_pageout += thp_nr_pages(page);
 
                                if (PageWriteback(page))
                                        goto keep;
@@ -1861,7 +1862,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
                SetPageLRU(page);
                lru = page_lru(page);
 
-               nr_pages = hpage_nr_pages(page);
+               nr_pages = thp_nr_pages(page);
                update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
                list_move(&page->lru, &lruvec->lists[lru]);
 
@@ -2064,7 +2065,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
                         * so we ignore them here.
                         */
                        if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) {
-                               nr_rotated += hpage_nr_pages(page);
+                               nr_rotated += thp_nr_pages(page);
                                list_add(&page->lru, &l_active);
                                continue;
                        }
@@ -2206,7 +2207,7 @@ static bool inactive_is_low(struct lruvec *lruvec, enum lru_list inactive_lru)
        active = lruvec_page_state(lruvec, NR_LRU_BASE + active_lru);
 
        gb = (inactive + active) >> (30 - PAGE_SHIFT);
-       if (gb && is_file_lru(inactive_lru))
+       if (gb)
                inactive_ratio = int_sqrt(10 * gb);
        else
                inactive_ratio = 1;
@@ -2797,7 +2798,7 @@ again:
                        set_bit(PGDAT_DIRTY, &pgdat->flags);
 
                /*
-                * If kswapd scans pages marked marked for immediate
+                * If kswapd scans pages marked for immediate
                 * reclaim and under writeback (nr_immediate), it
                 * implies that pages are cycling through the LRU
                 * faster than they are written so also forcibly stall.
@@ -3372,7 +3373,7 @@ static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx)
        /*
         * Check for watermark boosts top-down as the higher zones
         * are more likely to be boosted. Both watermarks and boosts
-        * should not be checked at the time time as reclaim would
+        * should not be checked at the same time as reclaim would
         * start prematurely when there is no boosting and a lower
         * zone is balanced.
         */