*/
        unsigned long           recent_rotated[2];
        unsigned long           recent_scanned[2];
+
+       /*
+        * accumulated for batching
+        */
+       unsigned long           nr_saved_scan[NR_LRU_LISTS];
 };
 
 struct zone {
        spinlock_t              lru_lock;       
        struct zone_lru {
                struct list_head list;
-               unsigned long nr_saved_scan;    /* accumulated for batching */
        } lru[NR_LRU_LISTS];
 
        struct zone_reclaim_stat reclaim_stat;
 
        enum lru_list l;
        unsigned long nr_reclaimed = sc->nr_reclaimed;
        unsigned long swap_cluster_max = sc->swap_cluster_max;
+       struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
        int noswap = 0;
 
        /* If we have no swap space, do not bother scanning anon pages. */
                        scan >>= priority;
                        scan = (scan * percent[file]) / 100;
                }
-               if (scanning_global_lru(sc))
-                       nr[l] = nr_scan_try_batch(scan,
-                                                 &zone->lru[l].nr_saved_scan,
-                                                 swap_cluster_max);
-               else
-                       nr[l] = scan;
+               nr[l] = nr_scan_try_batch(scan,
+                                         &reclaim_stat->nr_saved_scan[l],
+                                         swap_cluster_max);
        }
 
        while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
 {
        struct zone *zone;
        unsigned long nr_reclaimed = 0;
+       struct zone_reclaim_stat *reclaim_stat;
 
        for_each_populated_zone(zone) {
                enum lru_list l;
                                                l == LRU_ACTIVE_FILE))
                                continue;
 
-                       zone->lru[l].nr_saved_scan += (lru_pages >> prio) + 1;
-                       if (zone->lru[l].nr_saved_scan >= nr_pages || pass > 3) {
+                       reclaim_stat = get_reclaim_stat(zone, sc);
+                       reclaim_stat->nr_saved_scan[l] +=
+                                               (lru_pages >> prio) + 1;
+                       if (reclaim_stat->nr_saved_scan[l]
+                                               >= nr_pages || pass > 3) {
                                unsigned long nr_to_scan;
 
-                               zone->lru[l].nr_saved_scan = 0;
+                               reclaim_stat->nr_saved_scan[l] = 0;
                                nr_to_scan = min(nr_pages, lru_pages);
                                nr_reclaimed += shrink_list(l, nr_to_scan, zone,
                                                                sc, prio);