Merge tag 'riscv-for-linus-5.11-rc6' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / mm / page_alloc.c
index 7a2c89b..519a60d 100644 (file)
@@ -423,6 +423,8 @@ defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
        if (end_pfn < pgdat_end_pfn(NODE_DATA(nid)))
                return false;
 
+       if (NODE_DATA(nid)->first_deferred_pfn != ULONG_MAX)
+               return true;
        /*
         * We start only with one section of pages, more pages are added as
         * needed until the rest of deferred pages are initialized.
@@ -1205,8 +1207,10 @@ static void kernel_init_free_pages(struct page *page, int numpages)
        /* s390's use of memset() could override KASAN redzones. */
        kasan_disable_current();
        for (i = 0; i < numpages; i++) {
+               u8 tag = page_kasan_tag(page + i);
                page_kasan_tag_reset(page + i);
                clear_highpage(page + i);
+               page_kasan_tag_set(page + i, tag);
        }
        kasan_enable_current();
 }
@@ -2860,20 +2864,20 @@ __rmqueue(struct zone *zone, unsigned int order, int migratetype,
 {
        struct page *page;
 
-#ifdef CONFIG_CMA
-       /*
-        * Balance movable allocations between regular and CMA areas by
-        * allocating from CMA when over half of the zone's free memory
-        * is in the CMA area.
-        */
-       if (alloc_flags & ALLOC_CMA &&
-           zone_page_state(zone, NR_FREE_CMA_PAGES) >
-           zone_page_state(zone, NR_FREE_PAGES) / 2) {
-               page = __rmqueue_cma_fallback(zone, order);
-               if (page)
-                       return page;
+       if (IS_ENABLED(CONFIG_CMA)) {
+               /*
+                * Balance movable allocations between regular and CMA areas by
+                * allocating from CMA when over half of the zone's free memory
+                * is in the CMA area.
+                */
+               if (alloc_flags & ALLOC_CMA &&
+                   zone_page_state(zone, NR_FREE_CMA_PAGES) >
+                   zone_page_state(zone, NR_FREE_PAGES) / 2) {
+                       page = __rmqueue_cma_fallback(zone, order);
+                       if (page)
+                               goto out;
+               }
        }
-#endif
 retry:
        page = __rmqueue_smallest(zone, order, migratetype);
        if (unlikely(!page)) {
@@ -2884,8 +2888,9 @@ retry:
                                                                alloc_flags))
                        goto retry;
        }
-
-       trace_mm_page_alloc_zone_locked(page, order, migratetype);
+out:
+       if (page)
+               trace_mm_page_alloc_zone_locked(page, order, migratetype);
        return page;
 }
 
@@ -6116,7 +6121,7 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
  * zone stats (e.g., nr_isolate_pageblock) are touched.
  */
 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
-               unsigned long start_pfn,
+               unsigned long start_pfn, unsigned long zone_end_pfn,
                enum meminit_context context,
                struct vmem_altmap *altmap, int migratetype)
 {
@@ -6152,7 +6157,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
                if (context == MEMINIT_EARLY) {
                        if (overlap_memmap_init(zone, &pfn))
                                continue;
-                       if (defer_init(nid, pfn, end_pfn))
+                       if (defer_init(nid, pfn, zone_end_pfn))
                                break;
                }
 
@@ -6266,7 +6271,7 @@ void __meminit __weak memmap_init(unsigned long size, int nid,
 
                if (end_pfn > start_pfn) {
                        size = end_pfn - start_pfn;
-                       memmap_init_zone(size, nid, zone, start_pfn,
+                       memmap_init_zone(size, nid, zone, start_pfn, range_end_pfn,
                                         MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
                }
        }