mm, page_alloc: restructure direct compaction handling in slowpath
[linux-2.6-microblaze.git] / mm / page_alloc.c
index a42fa09..ae721a7 100644 (file)
@@ -3479,7 +3479,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
        struct page *page = NULL;
        unsigned int alloc_flags;
        unsigned long did_some_progress;
-       enum migrate_mode migration_mode = MIGRATE_ASYNC;
+       enum migrate_mode migration_mode = MIGRATE_SYNC_LIGHT;
        enum compact_result compact_result;
        int compaction_retries = 0;
        int no_progress_loops = 0;
@@ -3521,6 +3521,52 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
        if (page)
                goto got_pg;
 
+       /*
+        * For costly allocations, try direct compaction first, as it's likely
+        * that we have enough base pages and don't need to reclaim. Don't try
+        * that for allocations that are allowed to ignore watermarks, as the
+        * ALLOC_NO_WATERMARKS attempt didn't yet happen.
+        */
+       if (can_direct_reclaim && order > PAGE_ALLOC_COSTLY_ORDER &&
+               !gfp_pfmemalloc_allowed(gfp_mask)) {
+               page = __alloc_pages_direct_compact(gfp_mask, order,
+                                               alloc_flags, ac,
+                                               MIGRATE_ASYNC,
+                                               &compact_result);
+               if (page)
+                       goto got_pg;
+
+               /* Checks for THP-specific high-order allocations */
+               if (is_thp_gfp_mask(gfp_mask)) {
+                       /*
+                        * If compaction is deferred for high-order allocations,
+                        * it is because sync compaction recently failed. If
+                        * this is the case and the caller requested a THP
+                        * allocation, we do not want to heavily disrupt the
+                        * system, so we fail the allocation instead of entering
+                        * direct reclaim.
+                        */
+                       if (compact_result == COMPACT_DEFERRED)
+                               goto nopage;
+
+                       /*
+                        * Compaction is contended so rather back off than cause
+                        * excessive stalls.
+                        */
+                       if (compact_result == COMPACT_CONTENDED)
+                               goto nopage;
+
+                       /*
+                        * It can become very expensive to allocate transparent
+                        * hugepages at fault, so use asynchronous memory
+                        * compaction for THP unless it is khugepaged trying to
+                        * collapse. All other requests should tolerate at
+                        * least light sync migration.
+                        */
+                       if (!(current->flags & PF_KTHREAD))
+                               migration_mode = MIGRATE_ASYNC;
+               }
+       }
 
 retry:
        /* Ensure kswapd doesn't accidentally go to sleep as long as we loop */
@@ -3575,55 +3621,33 @@ retry:
        if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
                goto nopage;
 
-       /*
-        * Try direct compaction. The first pass is asynchronous. Subsequent
-        * attempts after direct reclaim are synchronous
-        */
+
+       /* Try direct reclaim and then allocating */
+       page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
+                                                       &did_some_progress);
+       if (page)
+               goto got_pg;
+
+       /* Try direct compaction and then allocating */
        page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
                                        migration_mode,
                                        &compact_result);
        if (page)
                goto got_pg;
 
-       /* Checks for THP-specific high-order allocations */
-       if (is_thp_gfp_mask(gfp_mask)) {
-               /*
-                * If compaction is deferred for high-order allocations, it is
-                * because sync compaction recently failed. If this is the case
-                * and the caller requested a THP allocation, we do not want
-                * to heavily disrupt the system, so we fail the allocation
-                * instead of entering direct reclaim.
-                */
-               if (compact_result == COMPACT_DEFERRED)
-                       goto nopage;
-
-               /*
-                * Compaction is contended so rather back off than cause
-                * excessive stalls.
-                */
-               if(compact_result == COMPACT_CONTENDED)
-                       goto nopage;
-       }
-
        if (order && compaction_made_progress(compact_result))
                compaction_retries++;
 
-       /* Try direct reclaim and then allocating */
-       page = __alloc_pages_direct_reclaim(gfp_mask, order, alloc_flags, ac,
-                                                       &did_some_progress);
-       if (page)
-               goto got_pg;
-
        /* Do not loop if specifically requested */
        if (gfp_mask & __GFP_NORETRY)
-               goto noretry;
+               goto nopage;
 
        /*
         * Do not retry costly high order allocations unless they are
         * __GFP_REPEAT
         */
        if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_REPEAT))
-               goto noretry;
+               goto nopage;
 
        /*
         * Costly allocations might have made a progress but this doesn't mean
@@ -3662,25 +3686,6 @@ retry:
                goto retry;
        }
 
-noretry:
-       /*
-        * High-order allocations do not necessarily loop after direct reclaim
-        * and reclaim/compaction depends on compaction being called after
-        * reclaim so call directly if necessary.
-        * It can become very expensive to allocate transparent hugepages at
-        * fault, so use asynchronous memory compaction for THP unless it is
-        * khugepaged trying to collapse. All other requests should tolerate
-        * at least light sync migration.
-        */
-       if (is_thp_gfp_mask(gfp_mask) && !(current->flags & PF_KTHREAD))
-               migration_mode = MIGRATE_ASYNC;
-       else
-               migration_mode = MIGRATE_SYNC_LIGHT;
-       page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags,
-                                           ac, migration_mode,
-                                           &compact_result);
-       if (page)
-               goto got_pg;
 nopage:
        warn_alloc_failed(gfp_mask, order, NULL);
 got_pg: