mm: compaction: remove unnecessary is_via_compact_memory() checks
authorJohannes Weiner <hannes@cmpxchg.org>
Fri, 19 May 2023 12:39:58 +0000 (14:39 +0200)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 9 Jun 2023 23:25:36 +0000 (16:25 -0700)
Remove from all paths not reachable via /proc/sys/vm/compact_memory.

Link: https://lkml.kernel.org/r/20230519123959.77335-5-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/compaction.c
mm/vmscan.c

index bb9b762..bc1f389 100644 (file)
@@ -2280,9 +2280,6 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order,
                unsigned long available;
                unsigned long watermark;
 
-               if (is_via_compact_memory(order))
-                       return true;
-
                /* Allocation can already succeed, nothing to do */
                watermark = wmark_pages(zone, alloc_flags & ALLOC_WMARK_MASK);
                if (zone_watermark_ok(zone, order, watermark,
@@ -2848,9 +2845,6 @@ static bool kcompactd_node_suitable(pg_data_t *pgdat)
                if (!populated_zone(zone))
                        continue;
 
-               if (is_via_compact_memory(pgdat->kcompactd_max_order))
-                       return true;
-
                /* Allocation can already succeed, check other zones */
                if (zone_watermark_ok(zone, pgdat->kcompactd_max_order,
                                      min_wmark_pages(zone),
@@ -2895,9 +2889,6 @@ static void kcompactd_do_work(pg_data_t *pgdat)
                if (compaction_deferred(zone, cc.order))
                        continue;
 
-               if (is_via_compact_memory(cc.order))
-                       goto compact;
-
                /* Allocation can already succeed, nothing to do */
                if (zone_watermark_ok(zone, cc.order,
                                      min_wmark_pages(zone), zoneid, 0))
@@ -2906,7 +2897,7 @@ static void kcompactd_do_work(pg_data_t *pgdat)
                if (compaction_suitable(zone, cc.order,
                                        zoneid) != COMPACT_CONTINUE)
                        continue;
-compact:
+
                if (kthread_should_stop())
                        return;
 
index 9f8bfd1..99e4ae4 100644 (file)
@@ -6399,9 +6399,6 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
                if (!managed_zone(zone))
                        continue;
 
-               if (sc->order == -1) /* is_via_compact_memory() */
-                       return false;
-
                /* Allocation can already succeed, nothing to do */
                if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
                                      sc->reclaim_idx, 0))
@@ -6598,9 +6595,6 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
 {
        unsigned long watermark;
 
-       if (sc->order == -1) /* is_via_compact_memory() */
-               goto suitable;
-
        /* Allocation can already succeed, nothing to do */
        if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone),
                              sc->reclaim_idx, 0))
@@ -6610,7 +6604,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
        if (compaction_suitable(zone, sc->order,
                                sc->reclaim_idx) == COMPACT_SKIPPED)
                return false;
-suitable:
+
        /*
         * Compaction is already possible, but it takes time to run and there
         * are potentially other callers using the pages just freed. So proceed