scsi: qla2xxx: remove double assignment in qla2x00_update_fcport
[linux-2.6-microblaze.git] / mm / page_alloc.c
index 909adce..3b13d39 100644 (file)
@@ -43,6 +43,7 @@
 #include <linux/mempolicy.h>
 #include <linux/memremap.h>
 #include <linux/stop_machine.h>
+#include <linux/random.h>
 #include <linux/sort.h>
 #include <linux/pfn.h>
 #include <linux/backing-dev.h>
@@ -72,6 +73,7 @@
 #include <asm/tlbflush.h>
 #include <asm/div64.h>
 #include "internal.h"
+#include "shuffle.h"
 
 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
 static DEFINE_MUTEX(pcp_batch_high_lock);
@@ -755,12 +757,6 @@ static inline void set_page_order(struct page *page, unsigned int order)
        __SetPageBuddy(page);
 }
 
-static inline void rmv_page_order(struct page *page)
-{
-       __ClearPageBuddy(page);
-       set_page_private(page, 0);
-}
-
 /*
  * This function checks whether a page is free && is the buddy
  * we can coalesce a page and its buddy if
@@ -918,13 +914,10 @@ continue_merging:
                 * Our buddy is free or it is CONFIG_DEBUG_PAGEALLOC guard page,
                 * merge with it and move up one order.
                 */
-               if (page_is_guard(buddy)) {
+               if (page_is_guard(buddy))
                        clear_page_guard(zone, buddy, order, migratetype);
-               } else {
-                       list_del(&buddy->lru);
-                       zone->free_area[order].nr_free--;
-                       rmv_page_order(buddy);
-               }
+               else
+                       del_page_from_free_area(buddy, &zone->free_area[order]);
                combined_pfn = buddy_pfn & pfn;
                page = page + (combined_pfn - pfn);
                pfn = combined_pfn;
@@ -966,7 +959,8 @@ done_merging:
         * so it's less likely to be used soon and more likely to be merged
         * as a higher order page
         */
-       if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)) {
+       if ((order < MAX_ORDER-2) && pfn_valid_within(buddy_pfn)
+                       && !is_shuffle_order(order)) {
                struct page *higher_page, *higher_buddy;
                combined_pfn = buddy_pfn & pfn;
                higher_page = page + (combined_pfn - pfn);
@@ -974,15 +968,18 @@ done_merging:
                higher_buddy = higher_page + (buddy_pfn - combined_pfn);
                if (pfn_valid_within(buddy_pfn) &&
                    page_is_buddy(higher_page, higher_buddy, order + 1)) {
-                       list_add_tail(&page->lru,
-                               &zone->free_area[order].free_list[migratetype]);
-                       goto out;
+                       add_to_free_area_tail(page, &zone->free_area[order],
+                                             migratetype);
+                       return;
                }
        }
 
-       list_add(&page->lru, &zone->free_area[order].free_list[migratetype]);
-out:
-       zone->free_area[order].nr_free++;
+       if (is_shuffle_order(order))
+               add_to_free_area_random(page, &zone->free_area[order],
+                               migratetype);
+       else
+               add_to_free_area(page, &zone->free_area[order], migratetype);
+
 }
 
 /*
@@ -1416,36 +1413,22 @@ int __meminit early_pfn_to_nid(unsigned long pfn)
 #endif
 
 #ifdef CONFIG_NODES_SPAN_OTHER_NODES
-static inline bool __meminit __maybe_unused
-meminit_pfn_in_nid(unsigned long pfn, int node,
-                  struct mminit_pfnnid_cache *state)
+/* Only safe to use early in boot when initialisation is single-threaded */
+static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
 {
        int nid;
 
-       nid = __early_pfn_to_nid(pfn, state);
+       nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
        if (nid >= 0 && nid != node)
                return false;
        return true;
 }
 
-/* Only safe to use early in boot when initialisation is single-threaded */
-static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
-{
-       return meminit_pfn_in_nid(pfn, node, &early_pfnnid_cache);
-}
-
 #else
-
 static inline bool __meminit early_pfn_in_nid(unsigned long pfn, int node)
 {
        return true;
 }
-static inline bool __meminit  __maybe_unused
-meminit_pfn_in_nid(unsigned long pfn, int node,
-                  struct mminit_pfnnid_cache *state)
-{
-       return true;
-}
 #endif
 
 
@@ -1574,21 +1557,13 @@ static inline void __init pgdat_init_report_one_done(void)
  *
  * Then, we check if a current large page is valid by only checking the validity
  * of the head pfn.
- *
- * Finally, meminit_pfn_in_nid is checked on systems where pfns can interleave
- * within a node: a pfn is between start and end of a node, but does not belong
- * to this memory node.
  */
-static inline bool __init
-deferred_pfn_valid(int nid, unsigned long pfn,
-                  struct mminit_pfnnid_cache *nid_init_state)
+static inline bool __init deferred_pfn_valid(unsigned long pfn)
 {
        if (!pfn_valid_within(pfn))
                return false;
        if (!(pfn & (pageblock_nr_pages - 1)) && !pfn_valid(pfn))
                return false;
-       if (!meminit_pfn_in_nid(pfn, nid, nid_init_state))
-               return false;
        return true;
 }
 
@@ -1596,15 +1571,14 @@ deferred_pfn_valid(int nid, unsigned long pfn,
  * Free pages to buddy allocator. Try to free aligned pages in
  * pageblock_nr_pages sizes.
  */
-static void __init deferred_free_pages(int nid, int zid, unsigned long pfn,
+static void __init deferred_free_pages(unsigned long pfn,
                                       unsigned long end_pfn)
 {
-       struct mminit_pfnnid_cache nid_init_state = { };
        unsigned long nr_pgmask = pageblock_nr_pages - 1;
        unsigned long nr_free = 0;
 
        for (; pfn < end_pfn; pfn++) {
-               if (!deferred_pfn_valid(nid, pfn, &nid_init_state)) {
+               if (!deferred_pfn_valid(pfn)) {
                        deferred_free_range(pfn - nr_free, nr_free);
                        nr_free = 0;
                } else if (!(pfn & nr_pgmask)) {
@@ -1624,17 +1598,18 @@ static void __init deferred_free_pages(int nid, int zid, unsigned long pfn,
  * by performing it only once every pageblock_nr_pages.
  * Return number of pages initialized.
  */
-static unsigned long  __init deferred_init_pages(int nid, int zid,
+static unsigned long  __init deferred_init_pages(struct zone *zone,
                                                 unsigned long pfn,
                                                 unsigned long end_pfn)
 {
-       struct mminit_pfnnid_cache nid_init_state = { };
        unsigned long nr_pgmask = pageblock_nr_pages - 1;
+       int nid = zone_to_nid(zone);
        unsigned long nr_pages = 0;
+       int zid = zone_idx(zone);
        struct page *page = NULL;
 
        for (; pfn < end_pfn; pfn++) {
-               if (!deferred_pfn_valid(nid, pfn, &nid_init_state)) {
+               if (!deferred_pfn_valid(pfn)) {
                        page = NULL;
                        continue;
                } else if (!page || !(pfn & nr_pgmask)) {
@@ -1649,18 +1624,100 @@ static unsigned long  __init deferred_init_pages(int nid, int zid,
        return (nr_pages);
 }
 
+/*
+ * This function is meant to pre-load the iterator for the zone init.
+ * Specifically it walks through the ranges until we are caught up to the
+ * first_init_pfn value and exits there. If we never encounter the value we
+ * return false indicating there are no valid ranges left.
+ */
+static bool __init
+deferred_init_mem_pfn_range_in_zone(u64 *i, struct zone *zone,
+                                   unsigned long *spfn, unsigned long *epfn,
+                                   unsigned long first_init_pfn)
+{
+       u64 j;
+
+       /*
+        * Start out by walking through the ranges in this zone that have
+        * already been initialized. We don't need to do anything with them
+        * so we just need to flush them out of the system.
+        */
+       for_each_free_mem_pfn_range_in_zone(j, zone, spfn, epfn) {
+               if (*epfn <= first_init_pfn)
+                       continue;
+               if (*spfn < first_init_pfn)
+                       *spfn = first_init_pfn;
+               *i = j;
+               return true;
+       }
+
+       return false;
+}
+
+/*
+ * Initialize and free pages. We do it in two loops: first we initialize
+ * struct page, then free to buddy allocator, because while we are
+ * freeing pages we can access pages that are ahead (computing buddy
+ * page in __free_one_page()).
+ *
+ * In order to try and keep some memory in the cache we have the loop
+ * broken along max page order boundaries. This way we will not cause
+ * any issues with the buddy page computation.
+ */
+static unsigned long __init
+deferred_init_maxorder(u64 *i, struct zone *zone, unsigned long *start_pfn,
+                      unsigned long *end_pfn)
+{
+       unsigned long mo_pfn = ALIGN(*start_pfn + 1, MAX_ORDER_NR_PAGES);
+       unsigned long spfn = *start_pfn, epfn = *end_pfn;
+       unsigned long nr_pages = 0;
+       u64 j = *i;
+
+       /* First we loop through and initialize the page values */
+       for_each_free_mem_pfn_range_in_zone_from(j, zone, start_pfn, end_pfn) {
+               unsigned long t;
+
+               if (mo_pfn <= *start_pfn)
+                       break;
+
+               t = min(mo_pfn, *end_pfn);
+               nr_pages += deferred_init_pages(zone, *start_pfn, t);
+
+               if (mo_pfn < *end_pfn) {
+                       *start_pfn = mo_pfn;
+                       break;
+               }
+       }
+
+       /* Reset values and now loop through freeing pages as needed */
+       swap(j, *i);
+
+       for_each_free_mem_pfn_range_in_zone_from(j, zone, &spfn, &epfn) {
+               unsigned long t;
+
+               if (mo_pfn <= spfn)
+                       break;
+
+               t = min(mo_pfn, epfn);
+               deferred_free_pages(spfn, t);
+
+               if (mo_pfn <= epfn)
+                       break;
+       }
+
+       return nr_pages;
+}
+
 /* Initialise remaining memory on a node */
 static int __init deferred_init_memmap(void *data)
 {
        pg_data_t *pgdat = data;
-       int nid = pgdat->node_id;
+       const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
+       unsigned long spfn = 0, epfn = 0, nr_pages = 0;
+       unsigned long first_init_pfn, flags;
        unsigned long start = jiffies;
-       unsigned long nr_pages = 0;
-       unsigned long spfn, epfn, first_init_pfn, flags;
-       phys_addr_t spa, epa;
-       int zid;
        struct zone *zone;
-       const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
+       int zid;
        u64 i;
 
        /* Bind memory initialisation thread to a local node if possible */
@@ -1686,31 +1743,27 @@ static int __init deferred_init_memmap(void *data)
                if (first_init_pfn < zone_end_pfn(zone))
                        break;
        }
-       first_init_pfn = max(zone->zone_start_pfn, first_init_pfn);
+
+       /* If the zone is empty somebody else may have cleared out the zone */
+       if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
+                                                first_init_pfn))
+               goto zone_empty;
 
        /*
-        * Initialize and free pages. We do it in two loops: first we initialize
-        * struct page, than free to buddy allocator, because while we are
-        * freeing pages we can access pages that are ahead (computing buddy
-        * page in __free_one_page()).
+        * Initialize and free pages in MAX_ORDER sized increments so
+        * that we can avoid introducing any issues with the buddy
+        * allocator.
         */
-       for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
-               spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
-               epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
-               nr_pages += deferred_init_pages(nid, zid, spfn, epfn);
-       }
-       for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
-               spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
-               epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
-               deferred_free_pages(nid, zid, spfn, epfn);
-       }
+       while (spfn < epfn)
+               nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
+zone_empty:
        pgdat_resize_unlock(pgdat, &flags);
 
        /* Sanity check that the next zone really is unpopulated */
        WARN_ON(++zid < MAX_NR_ZONES && populated_zone(++zone));
 
-       pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
-                                       jiffies_to_msecs(jiffies - start));
+       pr_info("node %d initialised, %lu pages in %ums\n",
+               pgdat->node_id, nr_pages, jiffies_to_msecs(jiffies - start));
 
        pgdat_init_report_one_done();
        return 0;
@@ -1734,14 +1787,11 @@ static int __init deferred_init_memmap(void *data)
 static noinline bool __init
 deferred_grow_zone(struct zone *zone, unsigned int order)
 {
-       int zid = zone_idx(zone);
-       int nid = zone_to_nid(zone);
-       pg_data_t *pgdat = NODE_DATA(nid);
        unsigned long nr_pages_needed = ALIGN(1 << order, PAGES_PER_SECTION);
-       unsigned long nr_pages = 0;
-       unsigned long first_init_pfn, spfn, epfn, t, flags;
+       pg_data_t *pgdat = zone->zone_pgdat;
        unsigned long first_deferred_pfn = pgdat->first_deferred_pfn;
-       phys_addr_t spa, epa;
+       unsigned long spfn, epfn, flags;
+       unsigned long nr_pages = 0;
        u64 i;
 
        /* Only the last zone may have deferred pages */
@@ -1770,38 +1820,35 @@ deferred_grow_zone(struct zone *zone, unsigned int order)
                return true;
        }
 
-       first_init_pfn = max(zone->zone_start_pfn, first_deferred_pfn);
-
-       if (first_init_pfn >= pgdat_end_pfn(pgdat)) {
+       /* If the zone is empty somebody else may have cleared out the zone */
+       if (!deferred_init_mem_pfn_range_in_zone(&i, zone, &spfn, &epfn,
+                                                first_deferred_pfn)) {
+               pgdat->first_deferred_pfn = ULONG_MAX;
                pgdat_resize_unlock(pgdat, &flags);
-               return false;
+               return true;
        }
 
-       for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
-               spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
-               epfn = min_t(unsigned long, zone_end_pfn(zone), PFN_DOWN(epa));
+       /*
+        * Initialize and free pages in MAX_ORDER sized increments so
+        * that we can avoid introducing any issues with the buddy
+        * allocator.
+        */
+       while (spfn < epfn) {
+               /* update our first deferred PFN for this section */
+               first_deferred_pfn = spfn;
 
-               while (spfn < epfn && nr_pages < nr_pages_needed) {
-                       t = ALIGN(spfn + PAGES_PER_SECTION, PAGES_PER_SECTION);
-                       first_deferred_pfn = min(t, epfn);
-                       nr_pages += deferred_init_pages(nid, zid, spfn,
-                                                       first_deferred_pfn);
-                       spfn = first_deferred_pfn;
-               }
+               nr_pages += deferred_init_maxorder(&i, zone, &spfn, &epfn);
+
+               /* We should only stop along section boundaries */
+               if ((first_deferred_pfn ^ spfn) < PAGES_PER_SECTION)
+                       continue;
 
+               /* If our quota has been met we can stop here */
                if (nr_pages >= nr_pages_needed)
                        break;
        }
 
-       for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &spa, &epa, NULL) {
-               spfn = max_t(unsigned long, first_init_pfn, PFN_UP(spa));
-               epfn = min_t(unsigned long, first_deferred_pfn, PFN_DOWN(epa));
-               deferred_free_pages(nid, zid, spfn, epfn);
-
-               if (first_deferred_pfn == epfn)
-                       break;
-       }
-       pgdat->first_deferred_pfn = first_deferred_pfn;
+       pgdat->first_deferred_pfn = spfn;
        pgdat_resize_unlock(pgdat, &flags);
 
        return nr_pages > 0;
@@ -1824,9 +1871,9 @@ _deferred_grow_zone(struct zone *zone, unsigned int order)
 void __init page_alloc_init_late(void)
 {
        struct zone *zone;
+       int nid;
 
 #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT
-       int nid;
 
        /* There will be num_node_state(N_MEMORY) threads */
        atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
@@ -1846,10 +1893,12 @@ void __init page_alloc_init_late(void)
        /* Reinit limits that are based on free pages after the kernel is up */
        files_maxfiles_init();
 #endif
-#ifdef CONFIG_ARCH_DISCARD_MEMBLOCK
+
        /* Discard memblock private memory */
        memblock_discard();
-#endif
+
+       for_each_node_state(nid, N_MEMORY)
+               shuffle_free_memory(NODE_DATA(nid));
 
        for_each_populated_zone(zone)
                set_zone_contiguous(zone);
@@ -1921,8 +1970,7 @@ static inline void expand(struct zone *zone, struct page *page,
                if (set_page_guard(zone, &page[size], high, migratetype))
                        continue;
 
-               list_add(&page[size].lru, &area->free_list[migratetype]);
-               area->nr_free++;
+               add_to_free_area(&page[size], area, migratetype);
                set_page_order(&page[size], high);
        }
 }
@@ -1937,7 +1985,7 @@ static void check_new_page_bad(struct page *page)
        if (unlikely(page->mapping != NULL))
                bad_reason = "non-NULL mapping";
        if (unlikely(page_ref_count(page) != 0))
-               bad_reason = "nonzero _count";
+               bad_reason = "nonzero _refcount";
        if (unlikely(page->flags & __PG_HWPOISON)) {
                bad_reason = "HWPoisoned (hardware-corrupted)";
                bad_flags = __PG_HWPOISON;
@@ -2064,13 +2112,10 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
        /* Find a page of the appropriate size in the preferred list */
        for (current_order = order; current_order < MAX_ORDER; ++current_order) {
                area = &(zone->free_area[current_order]);
-               page = list_first_entry_or_null(&area->free_list[migratetype],
-                                                       struct page, lru);
+               page = get_page_from_free_area(area, migratetype);
                if (!page)
                        continue;
-               list_del(&page->lru);
-               rmv_page_order(page);
-               area->nr_free--;
+               del_page_from_free_area(page, area);
                expand(zone, page, order, current_order, area, migratetype);
                set_pcppage_migratetype(page, migratetype);
                return page;
@@ -2156,8 +2201,7 @@ static int move_freepages(struct zone *zone,
                }
 
                order = page_order(page);
-               list_move(&page->lru,
-                         &zone->free_area[order].free_list[migratetype]);
+               move_to_free_area(page, &zone->free_area[order], migratetype);
                page += 1 << order;
                pages_moved += 1 << order;
        }
@@ -2345,7 +2389,7 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page,
 
 single_page:
        area = &zone->free_area[current_order];
-       list_move(&page->lru, &area->free_list[start_type]);
+       move_to_free_area(page, area, start_type);
 }
 
 /*
@@ -2369,7 +2413,7 @@ int find_suitable_fallback(struct free_area *area, unsigned int order,
                if (fallback_mt == MIGRATE_TYPES)
                        break;
 
-               if (list_empty(&area->free_list[fallback_mt]))
+               if (free_area_empty(area, fallback_mt))
                        continue;
 
                if (can_steal_fallback(order, migratetype))
@@ -2456,9 +2500,7 @@ static bool unreserve_highatomic_pageblock(const struct alloc_context *ac,
                for (order = 0; order < MAX_ORDER; order++) {
                        struct free_area *area = &(zone->free_area[order]);
 
-                       page = list_first_entry_or_null(
-                                       &area->free_list[MIGRATE_HIGHATOMIC],
-                                       struct page, lru);
+                       page = get_page_from_free_area(area, MIGRATE_HIGHATOMIC);
                        if (!page)
                                continue;
 
@@ -2581,8 +2623,7 @@ find_smallest:
        VM_BUG_ON(current_order == MAX_ORDER);
 
 do_steal:
-       page = list_first_entry(&area->free_list[fallback_mt],
-                                                       struct page, lru);
+       page = get_page_from_free_area(area, fallback_mt);
 
        steal_suitable_fallback(zone, page, alloc_flags, start_migratetype,
                                                                can_steal);
@@ -3019,6 +3060,7 @@ EXPORT_SYMBOL_GPL(split_page);
 
 int __isolate_free_page(struct page *page, unsigned int order)
 {
+       struct free_area *area = &page_zone(page)->free_area[order];
        unsigned long watermark;
        struct zone *zone;
        int mt;
@@ -3043,9 +3085,8 @@ int __isolate_free_page(struct page *page, unsigned int order)
        }
 
        /* Remove page from free list */
-       list_del(&page->lru);
-       zone->free_area[order].nr_free--;
-       rmv_page_order(page);
+
+       del_page_from_free_area(page, area);
 
        /*
         * Set the pageblock if the isolated page is at least half of a
@@ -3120,9 +3161,8 @@ static struct page *__rmqueue_pcplist(struct zone *zone, int migratetype,
 
 /* Lock and remove page from the per-cpu list */
 static struct page *rmqueue_pcplist(struct zone *preferred_zone,
-                       struct zone *zone, unsigned int order,
-                       gfp_t gfp_flags, int migratetype,
-                       unsigned int alloc_flags)
+                       struct zone *zone, gfp_t gfp_flags,
+                       int migratetype, unsigned int alloc_flags)
 {
        struct per_cpu_pages *pcp;
        struct list_head *list;
@@ -3134,7 +3174,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
        list = &pcp->lists[migratetype];
        page = __rmqueue_pcplist(zone,  migratetype, alloc_flags, pcp, list);
        if (page) {
-               __count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
+               __count_zid_vm_events(PGALLOC, page_zonenum(page), 1);
                zone_statistics(preferred_zone, zone);
        }
        local_irq_restore(flags);
@@ -3154,8 +3194,8 @@ struct page *rmqueue(struct zone *preferred_zone,
        struct page *page;
 
        if (likely(order == 0)) {
-               page = rmqueue_pcplist(preferred_zone, zone, order,
-                               gfp_flags, migratetype, alloc_flags);
+               page = rmqueue_pcplist(preferred_zone, zone, gfp_flags,
+                                       migratetype, alloc_flags);
                goto out;
        }
 
@@ -3343,13 +3383,13 @@ bool __zone_watermark_ok(struct zone *z, unsigned int order, unsigned long mark,
                        continue;
 
                for (mt = 0; mt < MIGRATE_PCPTYPES; mt++) {
-                       if (!list_empty(&area->free_list[mt]))
+                       if (!free_area_empty(area, mt))
                                return true;
                }
 
 #ifdef CONFIG_CMA
                if ((alloc_flags & ALLOC_CMA) &&
-                   !list_empty(&area->free_list[MIGRATE_CMA])) {
+                   !free_area_empty(area, MIGRATE_CMA)) {
                        return true;
                }
 #endif
@@ -5276,7 +5316,7 @@ void show_free_areas(unsigned int filter, nodemask_t *nodemask)
 
                        types[order] = 0;
                        for (type = 0; type < MIGRATE_TYPES; type++) {
-                               if (!list_empty(&area->free_list[type]))
+                               if (!free_area_empty(area, type))
                                        types[order] |= 1 << type;
                        }
                }
@@ -8403,7 +8443,7 @@ void zone_pcp_reset(struct zone *zone)
  * All pages in the range must be in a single zone and isolated
  * before calling this.
  */
-void
+unsigned long
 __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
 {
        struct page *page;
@@ -8411,12 +8451,15 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
        unsigned int order, i;
        unsigned long pfn;
        unsigned long flags;
+       unsigned long offlined_pages = 0;
+
        /* find the first valid pfn */
        for (pfn = start_pfn; pfn < end_pfn; pfn++)
                if (pfn_valid(pfn))
                        break;
        if (pfn == end_pfn)
-               return;
+               return offlined_pages;
+
        offline_mem_sections(pfn, end_pfn);
        zone = page_zone(pfn_to_page(pfn));
        spin_lock_irqsave(&zone->lock, flags);
@@ -8434,24 +8477,26 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn)
                if (unlikely(!PageBuddy(page) && PageHWPoison(page))) {
                        pfn++;
                        SetPageReserved(page);
+                       offlined_pages++;
                        continue;
                }
 
                BUG_ON(page_count(page));
                BUG_ON(!PageBuddy(page));
                order = page_order(page);
+               offlined_pages += 1 << order;
 #ifdef CONFIG_DEBUG_VM
                pr_info("remove from free list %lx %d %lx\n",
                        pfn, 1 << order, end_pfn);
 #endif
-               list_del(&page->lru);
-               rmv_page_order(page);
-               zone->free_area[order].nr_free--;
+               del_page_from_free_area(page, &zone->free_area[order]);
                for (i = 0; i < (1 << order); i++)
                        SetPageReserved((page+i));
                pfn += (1 << order);
        }
        spin_unlock_irqrestore(&zone->lock, flags);
+
+       return offlined_pages;
 }
 #endif