f2fs: convert to fileattr
[linux-2.6-microblaze.git] / mm / vmscan.c
index b1b574a..562e87c 100644 (file)
@@ -310,7 +310,8 @@ unsigned long zone_reclaimable_pages(struct zone *zone)
  * @lru: lru to use
  * @zone_idx: zones to consider (use MAX_NR_ZONES for the whole LRU list)
  */
-unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru, int zone_idx)
+static unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru,
+                                    int zone_idx)
 {
        unsigned long size = 0;
        int zid;
@@ -1539,19 +1540,17 @@ unsigned int reclaim_clean_pages_from_list(struct zone *zone,
  * page:       page to consider
  * mode:       one of the LRU isolation modes defined above
  *
- * returns 0 on success, -ve errno on failure.
+ * returns true on success, false on failure.
  */
-int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
+bool __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
 {
-       int ret = -EBUSY;
-
        /* Only take pages on the LRU. */
        if (!PageLRU(page))
-               return ret;
+               return false;
 
        /* Compaction should not handle unevictable pages but CMA can do so */
        if (PageUnevictable(page) && !(mode & ISOLATE_UNEVICTABLE))
-               return ret;
+               return false;
 
        /*
         * To minimise LRU disruption, the caller can indicate that it only
@@ -1564,7 +1563,7 @@ int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
        if (mode & ISOLATE_ASYNC_MIGRATE) {
                /* All the caller can do on PageWriteback is block */
                if (PageWriteback(page))
-                       return ret;
+                       return false;
 
                if (PageDirty(page)) {
                        struct address_space *mapping;
@@ -1580,20 +1579,20 @@ int __isolate_lru_page_prepare(struct page *page, isolate_mode_t mode)
                         * from the page cache.
                         */
                        if (!trylock_page(page))
-                               return ret;
+                               return false;
 
                        mapping = page_mapping(page);
                        migrate_dirty = !mapping || mapping->a_ops->migratepage;
                        unlock_page(page);
                        if (!migrate_dirty)
-                               return ret;
+                               return false;
                }
        }
 
        if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
-               return ret;
+               return false;
 
-       return 0;
+       return true;
 }
 
 /*
@@ -1677,35 +1676,31 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
                 * only when the page is being freed somewhere else.
                 */
                scan += nr_pages;
-               switch (__isolate_lru_page_prepare(page, mode)) {
-               case 0:
-                       /*
-                        * Be careful not to clear PageLRU until after we're
-                        * sure the page is not being freed elsewhere -- the
-                        * page release code relies on it.
-                        */
-                       if (unlikely(!get_page_unless_zero(page)))
-                               goto busy;
-
-                       if (!TestClearPageLRU(page)) {
-                               /*
-                                * This page may in other isolation path,
-                                * but we still hold lru_lock.
-                                */
-                               put_page(page);
-                               goto busy;
-                       }
-
-                       nr_taken += nr_pages;
-                       nr_zone_taken[page_zonenum(page)] += nr_pages;
-                       list_move(&page->lru, dst);
-                       break;
+               if (!__isolate_lru_page_prepare(page, mode)) {
+                       /* It is being freed elsewhere */
+                       list_move(&page->lru, src);
+                       continue;
+               }
+               /*
+                * Be careful not to clear PageLRU until after we're
+                * sure the page is not being freed elsewhere -- the
+                * page release code relies on it.
+                */
+               if (unlikely(!get_page_unless_zero(page))) {
+                       list_move(&page->lru, src);
+                       continue;
+               }
 
-               default:
-busy:
-                       /* else it is being freed elsewhere */
+               if (!TestClearPageLRU(page)) {
+                       /* Another thread is already isolating this page */
+                       put_page(page);
                        list_move(&page->lru, src);
+                       continue;
                }
+
+               nr_taken += nr_pages;
+               nr_zone_taken[page_zonenum(page)] += nr_pages;
+               list_move(&page->lru, dst);
        }
 
        /*
@@ -1772,7 +1767,7 @@ int isolate_lru_page(struct page *page)
 
                get_page(page);
                lruvec = lock_page_lruvec_irq(page);
-               del_page_from_lru_list(page, lruvec, page_lru(page));
+               del_page_from_lru_list(page, lruvec);
                unlock_page_lruvec_irq(lruvec);
                ret = 0;
        }
@@ -1829,7 +1824,6 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
        int nr_pages, nr_moved = 0;
        LIST_HEAD(pages_to_free);
        struct page *page;
-       enum lru_list lru;
 
        while (!list_empty(list)) {
                page = lru_to_page(list);
@@ -1856,8 +1850,7 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
                SetPageLRU(page);
 
                if (unlikely(put_page_testzero(page))) {
-                       __ClearPageLRU(page);
-                       __ClearPageActive(page);
+                       __clear_page_lru_flags(page);
 
                        if (unlikely(PageCompound(page))) {
                                spin_unlock_irq(&lruvec->lru_lock);
@@ -1874,11 +1867,8 @@ static unsigned noinline_for_stack move_pages_to_lru(struct lruvec *lruvec,
                 * inhibits memcg migration).
                 */
                VM_BUG_ON_PAGE(!lruvec_holds_page_lru_lock(page, lruvec), page);
-               lru = page_lru(page);
+               add_page_to_lru_list(page, lruvec);
                nr_pages = thp_nr_pages(page);
-
-               update_lru_size(lruvec, lru, page_zonenum(page), nr_pages);
-               list_add(&page->lru, &lruvec->lists[lru]);
                nr_moved += nr_pages;
                if (PageActive(page))
                        workingset_age_nonresident(lruvec, nr_pages);
@@ -4095,8 +4085,13 @@ module_init(kswapd_init)
  */
 int node_reclaim_mode __read_mostly;
 
-#define RECLAIM_WRITE (1<<0)   /* Writeout pages during reclaim */
-#define RECLAIM_UNMAP (1<<1)   /* Unmap pages during reclaim */
+/*
+ * These bit locations are exposed in the vm.zone_reclaim_mode sysctl
+ * ABI.  New bits are OK, but existing bits can never change.
+ */
+#define RECLAIM_ZONE  (1<<0)   /* Run shrink_inactive_list on the zone */
+#define RECLAIM_WRITE (1<<1)   /* Writeout pages during reclaim */
+#define RECLAIM_UNMAP (1<<2)   /* Unmap pages during reclaim */
 
 /*
  * Priority for NODE_RECLAIM. This determines the fraction of pages
@@ -4292,12 +4287,9 @@ void check_move_unevictable_pages(struct pagevec *pvec)
 
                lruvec = relock_page_lruvec_irq(page, lruvec);
                if (page_evictable(page) && PageUnevictable(page)) {
-                       enum lru_list lru = page_lru_base_type(page);
-
-                       VM_BUG_ON_PAGE(PageActive(page), page);
+                       del_page_from_lru_list(page, lruvec);
                        ClearPageUnevictable(page);
-                       del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
-                       add_page_to_lru_list(page, lruvec, lru);
+                       add_page_to_lru_list(page, lruvec);
                        pgrescued += nr_pages;
                }
                SetPageLRU(page);