mm/page_alloc: allow high-order pages to be stored on the per-cpu lists
[linux-2.6-microblaze.git] / mm / swap.c
index a75a826..6c11db7 100644 (file)
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -95,7 +95,7 @@ static void __put_single_page(struct page *page)
 {
        __page_cache_release(page);
        mem_cgroup_uncharge(page);
-       free_unref_page(page);
+       free_unref_page(page, 0);
 }
 
 static void __put_compound_page(struct page *page)
@@ -313,7 +313,7 @@ void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages)
 
 void lru_note_cost_page(struct page *page)
 {
-       lru_note_cost(mem_cgroup_page_lruvec(page, page_pgdat(page)),
+       lru_note_cost(mem_cgroup_page_lruvec(page),
                      page_is_file_lru(page), thp_nr_pages(page));
 }
 
@@ -496,7 +496,7 @@ void lru_cache_add_inactive_or_unevictable(struct page *page,
        if (unlikely(unevictable) && !TestSetPageMlocked(page)) {
                int nr_pages = thp_nr_pages(page);
                /*
-                * We use the irq-unsafe __mod_zone_page_stat because this
+                * We use the irq-unsafe __mod_zone_page_state because this
                 * counter is not modified from interrupt context, and the pte
                 * lock is held(spinlock), which implies preemption disabled.
                 */
@@ -808,7 +808,7 @@ inline void __lru_add_drain_all(bool force_all_cpus)
         * below which drains the page vectors.
         *
         * Let x, y, and z represent some system CPU numbers, where x < y < z.
-        * Assume CPU #z is is in the middle of the for_each_online_cpu loop
+        * Assume CPU #z is in the middle of the for_each_online_cpu loop
         * below and has already reached CPU #y's per-cpu data. CPU #x comes
         * along, adds some pages to its per-cpu vectors, then calls
         * lru_add_drain_all().