x86/cpu/amd: Call init_amd_zn() om Family 19h processors too
[linux-2.6-microblaze.git] / mm / memory_hotplug.c
index 36d8091..0a54ffa 100644 (file)
@@ -355,7 +355,7 @@ static unsigned long find_smallest_section_pfn(int nid, struct zone *zone,
                if (unlikely(pfn_to_nid(start_pfn) != nid))
                        continue;
 
-               if (zone && zone != page_zone(pfn_to_page(start_pfn)))
+               if (zone != page_zone(pfn_to_page(start_pfn)))
                        continue;
 
                return start_pfn;
@@ -380,7 +380,7 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
                if (unlikely(pfn_to_nid(pfn) != nid))
                        continue;
 
-               if (zone && zone != page_zone(pfn_to_page(pfn)))
+               if (zone != page_zone(pfn_to_page(pfn)))
                        continue;
 
                return pfn;
@@ -392,14 +392,11 @@ static unsigned long find_biggest_section_pfn(int nid, struct zone *zone,
 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
                             unsigned long end_pfn)
 {
-       unsigned long zone_start_pfn = zone->zone_start_pfn;
-       unsigned long z = zone_end_pfn(zone); /* zone_end_pfn namespace clash */
-       unsigned long zone_end_pfn = z;
        unsigned long pfn;
        int nid = zone_to_nid(zone);
 
        zone_span_writelock(zone);
-       if (zone_start_pfn == start_pfn) {
+       if (zone->zone_start_pfn == start_pfn) {
                /*
                 * If the section is smallest section in the zone, it need
                 * shrink zone->zone_start_pfn and zone->zone_spanned_pages.
@@ -407,50 +404,30 @@ static void shrink_zone_span(struct zone *zone, unsigned long start_pfn,
                 * for shrinking zone.
                 */
                pfn = find_smallest_section_pfn(nid, zone, end_pfn,
-                                               zone_end_pfn);
+                                               zone_end_pfn(zone));
                if (pfn) {
+                       zone->spanned_pages = zone_end_pfn(zone) - pfn;
                        zone->zone_start_pfn = pfn;
-                       zone->spanned_pages = zone_end_pfn - pfn;
+               } else {
+                       zone->zone_start_pfn = 0;
+                       zone->spanned_pages = 0;
                }
-       } else if (zone_end_pfn == end_pfn) {
+       } else if (zone_end_pfn(zone) == end_pfn) {
                /*
                 * If the section is biggest section in the zone, it need
                 * shrink zone->spanned_pages.
                 * In this case, we find second biggest valid mem_section for
                 * shrinking zone.
                 */
-               pfn = find_biggest_section_pfn(nid, zone, zone_start_pfn,
+               pfn = find_biggest_section_pfn(nid, zone, zone->zone_start_pfn,
                                               start_pfn);
                if (pfn)
-                       zone->spanned_pages = pfn - zone_start_pfn + 1;
-       }
-
-       /*
-        * The section is not biggest or smallest mem_section in the zone, it
-        * only creates a hole in the zone. So in this case, we need not
-        * change the zone. But perhaps, the zone has only hole data. Thus
-        * it check the zone has only hole or not.
-        */
-       pfn = zone_start_pfn;
-       for (; pfn < zone_end_pfn; pfn += PAGES_PER_SUBSECTION) {
-               if (unlikely(!pfn_to_online_page(pfn)))
-                       continue;
-
-               if (page_zone(pfn_to_page(pfn)) != zone)
-                       continue;
-
-               /* Skip range to be removed */
-               if (pfn >= start_pfn && pfn < end_pfn)
-                       continue;
-
-               /* If we find valid section, we have nothing to do */
-               zone_span_writeunlock(zone);
-               return;
+                       zone->spanned_pages = pfn - zone->zone_start_pfn + 1;
+               else {
+                       zone->zone_start_pfn = 0;
+                       zone->spanned_pages = 0;
+               }
        }
-
-       /* The zone has no valid section */
-       zone->zone_start_pfn = 0;
-       zone->spanned_pages = 0;
        zone_span_writeunlock(zone);
 }
 
@@ -490,6 +467,9 @@ void __ref remove_pfn_range_from_zone(struct zone *zone,
        struct pglist_data *pgdat = zone->zone_pgdat;
        unsigned long flags;
 
+       /* Poison struct pages because they are now uninitialized again. */
+       page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages);
+
 #ifdef CONFIG_ZONE_DEVICE
        /*
         * Zone shrinking code cannot properly deal with ZONE_DEVICE. So
@@ -536,25 +516,20 @@ static void __remove_section(unsigned long pfn, unsigned long nr_pages,
 void __remove_pages(unsigned long pfn, unsigned long nr_pages,
                    struct vmem_altmap *altmap)
 {
+       const unsigned long end_pfn = pfn + nr_pages;
+       unsigned long cur_nr_pages;
        unsigned long map_offset = 0;
-       unsigned long nr, start_sec, end_sec;
 
        map_offset = vmem_altmap_offset(altmap);
 
        if (check_pfn_span(pfn, nr_pages, "remove"))
                return;
 
-       start_sec = pfn_to_section_nr(pfn);
-       end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
-       for (nr = start_sec; nr <= end_sec; nr++) {
-               unsigned long pfns;
-
+       for (; pfn < end_pfn; pfn += cur_nr_pages) {
                cond_resched();
-               pfns = min(nr_pages, PAGES_PER_SECTION
-                               - (pfn & ~PAGE_SECTION_MASK));
-               __remove_section(pfn, pfns, map_offset, altmap);
-               pfn += pfns;
-               nr_pages -= pfns;
+               /* Select all remaining pages up to the next section boundary */
+               cur_nr_pages = min(end_pfn - pfn, -(pfn | PAGE_SECTION_MASK));
+               __remove_section(pfn, cur_nr_pages, map_offset, altmap);
                map_offset = 0;
        }
 }
@@ -1197,14 +1172,13 @@ bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
 }
 
 /*
- * Confirm all pages in a range [start, end) belong to the same zone.
- * When true, return its valid [start, end).
+ * Confirm all pages in a range [start, end) belong to the same zone (skipping
+ * memory holes). When true, return the zone.
  */
-int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
-                        unsigned long *valid_start, unsigned long *valid_end)
+struct zone *test_pages_in_a_zone(unsigned long start_pfn,
+                                 unsigned long end_pfn)
 {
        unsigned long pfn, sec_end_pfn;
-       unsigned long start, end;
        struct zone *zone = NULL;
        struct page *page;
        int i;
@@ -1225,24 +1199,15 @@ int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
                                continue;
                        /* Check if we got outside of the zone */
                        if (zone && !zone_spans_pfn(zone, pfn + i))
-                               return 0;
+                               return NULL;
                        page = pfn_to_page(pfn + i);
                        if (zone && page_zone(page) != zone)
-                               return 0;
-                       if (!zone)
-                               start = pfn + i;
+                               return NULL;
                        zone = page_zone(page);
-                       end = pfn + MAX_ORDER_NR_PAGES;
                }
        }
 
-       if (zone) {
-               *valid_start = start;
-               *valid_end = min(end, end_pfn);
-               return 1;
-       } else {
-               return 0;
-       }
+       return zone;
 }
 
 /*
@@ -1487,7 +1452,6 @@ static int __ref __offline_pages(unsigned long start_pfn,
        unsigned long offlined_pages = 0;
        int ret, node, nr_isolate_pageblock;
        unsigned long flags;
-       unsigned long valid_start, valid_end;
        struct zone *zone;
        struct memory_notify arg;
        char *reason;
@@ -1512,14 +1476,12 @@ static int __ref __offline_pages(unsigned long start_pfn,
 
        /* This makes hotplug much easier...and readable.
           we assume this for now. .*/
-       if (!test_pages_in_a_zone(start_pfn, end_pfn, &valid_start,
-                                 &valid_end)) {
+       zone = test_pages_in_a_zone(start_pfn, end_pfn);
+       if (!zone) {
                ret = -EINVAL;
                reason = "multizone range";
                goto failed_removal;
        }
-
-       zone = page_zone(pfn_to_page(valid_start));
        node = zone_to_nid(zone);
 
        /* set above range as isolated */