mm: memmap_init: iterate over memblock regions rather that check each PFN
[linux-2.6-microblaze.git] / mm / page_alloc.c
index 644a59d..40587d7 100644 (file)
@@ -5951,23 +5951,6 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
        return false;
 }
 
-#ifdef CONFIG_SPARSEMEM
-/* Skip PFNs that belong to non-present sections */
-static inline __meminit unsigned long next_pfn(unsigned long pfn)
-{
-       const unsigned long section_nr = pfn_to_section_nr(++pfn);
-
-       if (present_section_nr(section_nr))
-               return pfn;
-       return section_nr_to_pfn(next_present_section_nr(section_nr));
-}
-#else
-static inline __meminit unsigned long next_pfn(unsigned long pfn)
-{
-       return pfn++;
-}
-#endif
-
 /*
  * Initially all pages are reserved - free ones are freed
  * up by memblock_free_all() once the early boot process is
@@ -6007,14 +5990,6 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
                 * function.  They do not exist on hotplugged memory.
                 */
                if (context == MEMMAP_EARLY) {
-                       if (!early_pfn_valid(pfn)) {
-                               pfn = next_pfn(pfn);
-                               continue;
-                       }
-                       if (!early_pfn_in_nid(pfn, nid)) {
-                               pfn++;
-                               continue;
-                       }
                        if (overlap_memmap_init(zone, &pfn))
                                continue;
                        if (defer_init(nid, pfn, end_pfn))
@@ -6130,9 +6105,23 @@ static void __meminit zone_init_free_lists(struct zone *zone)
 }
 
 void __meminit __weak memmap_init(unsigned long size, int nid,
-                                 unsigned long zone, unsigned long start_pfn)
+                                 unsigned long zone,
+                                 unsigned long range_start_pfn)
 {
-       memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY, NULL);
+       unsigned long start_pfn, end_pfn;
+       unsigned long range_end_pfn = range_start_pfn + size;
+       int i;
+
+       for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) {
+               start_pfn = clamp(start_pfn, range_start_pfn, range_end_pfn);
+               end_pfn = clamp(end_pfn, range_start_pfn, range_end_pfn);
+
+               if (end_pfn > start_pfn) {
+                       size = end_pfn - start_pfn;
+                       memmap_init_zone(size, nid, zone, start_pfn,
+                                        MEMMAP_EARLY, NULL);
+               }
+       }
 }
 
 static int zone_batchsize(struct zone *zone)