mm/memcontrol.c: cleanup some useless code
[linux-2.6-microblaze.git] / mm / memcontrol.c
index 6c83cf4..6f6dc87 100644 (file)
@@ -5340,14 +5340,6 @@ static int mem_cgroup_move_account(struct page *page,
                __mod_lruvec_state(to_vec, NR_WRITEBACK, nr_pages);
        }
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       if (compound && !list_empty(page_deferred_list(page))) {
-               spin_lock(&from->deferred_split_queue.split_queue_lock);
-               list_del_init(page_deferred_list(page));
-               from->deferred_split_queue.split_queue_len--;
-               spin_unlock(&from->deferred_split_queue.split_queue_lock);
-       }
-#endif
        /*
         * It is safe to change page->mem_cgroup here because the page
         * is referenced, charged, and isolated - we can't race with
@@ -5357,16 +5349,6 @@ static int mem_cgroup_move_account(struct page *page,
        /* caller should have done css_get */
        page->mem_cgroup = to;
 
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-       if (compound && list_empty(page_deferred_list(page))) {
-               spin_lock(&to->deferred_split_queue.split_queue_lock);
-               list_add_tail(page_deferred_list(page),
-                             &to->deferred_split_queue.split_queue);
-               to->deferred_split_queue.split_queue_len++;
-               spin_unlock(&to->deferred_split_queue.split_queue_lock);
-       }
-#endif
-
        spin_unlock_irqrestore(&from->move_lock, flags);
 
        ret = 0;
@@ -6651,7 +6633,6 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
 {
        struct mem_cgroup *memcg;
        unsigned int nr_pages;
-       bool compound;
        unsigned long flags;
 
        VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage);
@@ -6673,8 +6654,7 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
                return;
 
        /* Force-charge the new page. The old one will be freed soon */
-       compound = PageTransHuge(newpage);
-       nr_pages = compound ? hpage_nr_pages(newpage) : 1;
+       nr_pages = hpage_nr_pages(newpage);
 
        page_counter_charge(&memcg->memory, nr_pages);
        if (do_memsw_account())
@@ -6684,7 +6664,8 @@ void mem_cgroup_migrate(struct page *oldpage, struct page *newpage)
        commit_charge(newpage, memcg, false);
 
        local_irq_save(flags);
-       mem_cgroup_charge_statistics(memcg, newpage, compound, nr_pages);
+       mem_cgroup_charge_statistics(memcg, newpage, PageTransHuge(newpage),
+                       nr_pages);
        memcg_check_events(memcg, newpage);
        local_irq_restore(flags);
 }