Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux-2.6-microblaze.git] / mm / vmscan.c
index 72da290..99e1796 100644 (file)
@@ -854,6 +854,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
 {
        unsigned long flags;
        int refcount;
+       void *shadow = NULL;
 
        BUG_ON(!PageLocked(page));
        BUG_ON(mapping != page_mapping(page));
@@ -896,13 +897,13 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
        if (PageSwapCache(page)) {
                swp_entry_t swap = { .val = page_private(page) };
                mem_cgroup_swapout(page, swap);
-               __delete_from_swap_cache(page, swap);
+               if (reclaimed && !mapping_exiting(mapping))
+                       shadow = workingset_eviction(page, target_memcg);
+               __delete_from_swap_cache(page, swap, shadow);
                xa_unlock_irqrestore(&mapping->i_pages, flags);
                put_swap_page(page, swap);
-               workingset_eviction(page, target_memcg);
        } else {
                void (*freepage)(struct page *);
-               void *shadow = NULL;
 
                freepage = mapping->a_ops->freepage;
                /*
@@ -998,8 +999,6 @@ static enum page_references page_check_references(struct page *page,
                return PAGEREF_RECLAIM;
 
        if (referenced_ptes) {
-               if (PageSwapBacked(page))
-                       return PAGEREF_ACTIVATE;
                /*
                 * All mapped pages start out with page table
                 * references from the instantiating fault, so we need
@@ -1022,7 +1021,7 @@ static enum page_references page_check_references(struct page *page,
                /*
                 * Activate file-backed executable pages after first usage.
                 */
-               if (vm_flags & VM_EXEC)
+               if ((vm_flags & VM_EXEC) && !PageSwapBacked(page))
                        return PAGEREF_ACTIVATE;
 
                return PAGEREF_KEEP;
@@ -1355,7 +1354,7 @@ static unsigned int shrink_page_list(struct list_head *page_list,
                        case PAGE_ACTIVATE:
                                goto activate_locked;
                        case PAGE_SUCCESS:
-                               stat->nr_pageout += hpage_nr_pages(page);
+                               stat->nr_pageout += thp_nr_pages(page);
 
                                if (PageWriteback(page))
                                        goto keep;
@@ -1863,7 +1862,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
                SetPageLRU(page);
                lru = page_lru(page);
 
-               nr_pages = hpage_nr_pages(page);
+               nr_pages = thp_nr_pages(page);
                update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
                list_move(&page->lru, &lruvec->lists[lru]);
 
@@ -2066,7 +2065,7 @@ static void shrink_active_list(unsigned long nr_to_scan,
                         * so we ignore them here.
                         */
                        if ((vm_flags & VM_EXEC) && page_is_file_lru(page)) {
-                               nr_rotated += hpage_nr_pages(page);
+                               nr_rotated += thp_nr_pages(page);
                                list_add(&page->lru, &l_active);
                                continue;
                        }
@@ -2685,7 +2684,10 @@ again:
        if (!sc->force_deactivate) {
                unsigned long refaults;
 
-               if (inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
+               refaults = lruvec_page_state(target_lruvec,
+                               WORKINGSET_ACTIVATE_ANON);
+               if (refaults != target_lruvec->refaults[0] ||
+                       inactive_is_low(target_lruvec, LRU_INACTIVE_ANON))
                        sc->may_deactivate |= DEACTIVATE_ANON;
                else
                        sc->may_deactivate &= ~DEACTIVATE_ANON;
@@ -2696,8 +2698,8 @@ again:
                 * rid of any stale active pages quickly.
                 */
                refaults = lruvec_page_state(target_lruvec,
-                                            WORKINGSET_ACTIVATE);
-               if (refaults != target_lruvec->refaults ||
+                               WORKINGSET_ACTIVATE_FILE);
+               if (refaults != target_lruvec->refaults[1] ||
                    inactive_is_low(target_lruvec, LRU_INACTIVE_FILE))
                        sc->may_deactivate |= DEACTIVATE_FILE;
                else
@@ -2796,7 +2798,7 @@ again:
                        set_bit(PGDAT_DIRTY, &pgdat->flags);
 
                /*
-                * If kswapd scans pages marked marked for immediate
+                * If kswapd scans pages marked for immediate
                 * reclaim and under writeback (nr_immediate), it
                 * implies that pages are cycling through the LRU
                 * faster than they are written so also forcibly stall.
@@ -2974,8 +2976,10 @@ static void snapshot_refaults(struct mem_cgroup *target_memcg, pg_data_t *pgdat)
        unsigned long refaults;
 
        target_lruvec = mem_cgroup_lruvec(target_memcg, pgdat);
-       refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE);
-       target_lruvec->refaults = refaults;
+       refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_ANON);
+       target_lruvec->refaults[0] = refaults;
+       refaults = lruvec_page_state(target_lruvec, WORKINGSET_ACTIVATE_FILE);
+       target_lruvec->refaults[1] = refaults;
 }
 
 /*
@@ -3369,7 +3373,7 @@ static bool pgdat_watermark_boosted(pg_data_t *pgdat, int highest_zoneidx)
        /*
         * Check for watermark boosts top-down as the higher zones
         * are more likely to be boosted. Both watermarks and boosts
-        * should not be checked at the time time as reclaim would
+        * should not be checked at the same time as reclaim would
         * start prematurely when there is no boosting and a lower
         * zone is balanced.
         */