ceph: fold ceph_sync_writepages into writepage_nounlock
[linux-2.6-microblaze.git] / mm / page_alloc.c
index fab5e97..780c8f0 100644 (file)
@@ -69,6 +69,7 @@
 #include <linux/nmi.h>
 #include <linux/psi.h>
 #include <linux/padata.h>
+#include <linux/khugepaged.h>
 
 #include <asm/sections.h>
 #include <asm/tlbflush.h>
@@ -3367,9 +3368,16 @@ struct page *rmqueue(struct zone *preferred_zone,
        struct page *page;
 
        if (likely(order == 0)) {
-               page = rmqueue_pcplist(preferred_zone, zone, gfp_flags,
+               /*
+                * MIGRATE_MOVABLE pcplist could have the pages on CMA area and
+                * we need to skip it when CMA area isn't allowed.
+                */
+               if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA ||
+                               migratetype != MIGRATE_MOVABLE) {
+                       page = rmqueue_pcplist(preferred_zone, zone, gfp_flags,
                                        migratetype, alloc_flags);
-               goto out;
+                       goto out;
+               }
        }
 
        /*
@@ -3381,7 +3389,13 @@ struct page *rmqueue(struct zone *preferred_zone,
 
        do {
                page = NULL;
-               if (alloc_flags & ALLOC_HARDER) {
+               /*
+                * order-0 request can reach here when the pcplist is skipped
+                * due to non-CMA allocation context. HIGHATOMIC area is
+                * reserved for high-order atomic allocation, so order-0
+                * request should skip it.
+                */
+               if (order > 0 && alloc_flags & ALLOC_HARDER) {
                        page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC);
                        if (page)
                                trace_mm_page_alloc_zone_locked(page, order, migratetype);
@@ -5975,7 +5989,7 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn)
  * done. Non-atomic initialization, single-pass.
  */
 void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
-               unsigned long start_pfn, enum memmap_context context,
+               unsigned long start_pfn, enum meminit_context context,
                struct vmem_altmap *altmap)
 {
        unsigned long pfn, end_pfn = start_pfn + size;
@@ -6007,7 +6021,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
                 * There can be holes in boot-time mem_map[]s handed to this
                 * function.  They do not exist on hotplugged memory.
                 */
-               if (context == MEMMAP_EARLY) {
+               if (context == MEMINIT_EARLY) {
                        if (overlap_memmap_init(zone, &pfn))
                                continue;
                        if (defer_init(nid, pfn, end_pfn))
@@ -6016,7 +6030,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
 
                page = pfn_to_page(pfn);
                __init_single_page(page, pfn, zone, nid);
-               if (context == MEMMAP_HOTPLUG)
+               if (context == MEMINIT_HOTPLUG)
                        __SetPageReserved(page);
 
                /*
@@ -6099,7 +6113,7 @@ void __ref memmap_init_zone_device(struct zone *zone,
                 * check here not to call set_pageblock_migratetype() against
                 * pfn out of zone.
                 *
-                * Please note that MEMMAP_HOTPLUG path doesn't clear memmap
+                * Please note that MEMINIT_HOTPLUG path doesn't clear memmap
                 * because this is done early in section_activate()
                 */
                if (!(pfn & (pageblock_nr_pages - 1))) {
@@ -6137,7 +6151,7 @@ void __meminit __weak memmap_init(unsigned long size, int nid,
                if (end_pfn > start_pfn) {
                        size = end_pfn - start_pfn;
                        memmap_init_zone(size, nid, zone, start_pfn,
-                                        MEMMAP_EARLY, NULL);
+                                        MEMINIT_EARLY, NULL);
                }
        }
 }
@@ -7891,6 +7905,8 @@ int __meminit init_per_zone_wmark_min(void)
        setup_min_slab_ratio();
 #endif
 
+       khugepaged_min_free_kbytes_update();
+
        return 0;
 }
 postcore_initcall(init_per_zone_wmark_min)