mm/zsmalloc: remove set_zspage_mapping()
authorChengming Zhou <zhouchengming@bytedance.com>
Tue, 20 Feb 2024 06:53:00 +0000 (06:53 +0000)
committerAndrew Morton <akpm@linux-foundation.org>
Sat, 24 Feb 2024 01:48:32 +0000 (17:48 -0800)
Patch series "mm/zsmalloc: some cleanup for get/set_zspage_mapping()".

The discussion[1] with Sergey shows there are some cleanup works to do
in get/set_zspage_mapping():

- the fullness returned from get_zspage_mapping() is not stable outside
  pool->lock, this usage pattern is confusing, but should be ok in this
  free_zspage path.

- we seldom use the class_idx returned from get_zspage_mapping(), only
  free_zspage path use to get its class.

- set_zspage_mapping() always set the zspage->class, but it's never
  changed after zspage allocated.

[1] https://lore.kernel.org/all/a6c22e30-cf10-4122-91bc-ceb9fb57a5d6@bytedance.com/

This patch (of 3):

We only need to update zspage->fullness when insert_zspage(), since
zspage->class is never changed after allocated.

Link: https://lkml.kernel.org/r/20240220-b4-zsmalloc-cleanup-v1-0-5c5ee4ccdd87@bytedance.com
Link: https://lkml.kernel.org/r/20240220-b4-zsmalloc-cleanup-v1-1-5c5ee4ccdd87@bytedance.com
Signed-off-by: Chengming Zhou <zhouchengming@bytedance.com>
Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Nhat Pham <nphamcs@gmail.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/zsmalloc.c

index a48f465..a665391 100644 (file)
@@ -486,14 +486,6 @@ static struct size_class *zspage_class(struct zs_pool *pool,
        return pool->size_class[zspage->class];
 }
 
-static void set_zspage_mapping(struct zspage *zspage,
-                              unsigned int class_idx,
-                              int fullness)
-{
-       zspage->class = class_idx;
-       zspage->fullness = fullness;
-}
-
 /*
  * zsmalloc divides the pool into various size classes where each
  * class maintains a list of zspages where each zspage is divided
@@ -688,6 +680,7 @@ static void insert_zspage(struct size_class *class,
 {
        class_stat_inc(class, fullness, 1);
        list_add(&zspage->list, &class->fullness_list[fullness]);
+       zspage->fullness = fullness;
 }
 
 /*
@@ -725,7 +718,6 @@ static int fix_fullness_group(struct size_class *class, struct zspage *zspage)
 
        remove_zspage(class, zspage, currfg);
        insert_zspage(class, zspage, newfg);
-       set_zspage_mapping(zspage, class_idx, newfg);
 out:
        return newfg;
 }
@@ -1005,6 +997,7 @@ static struct zspage *alloc_zspage(struct zs_pool *pool,
        create_page_chain(class, zspage, pages);
        init_zspage(class, zspage);
        zspage->pool = pool;
+       zspage->class = class->index;
 
        return zspage;
 }
@@ -1397,7 +1390,6 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
        obj = obj_malloc(pool, zspage, handle);
        newfg = get_fullness_group(class, zspage);
        insert_zspage(class, zspage, newfg);
-       set_zspage_mapping(zspage, class->index, newfg);
        record_obj(handle, obj);
        atomic_long_add(class->pages_per_zspage, &pool->pages_allocated);
        class_stat_inc(class, ZS_OBJS_ALLOCATED, class->objs_per_zspage);
@@ -1655,7 +1647,6 @@ static int putback_zspage(struct size_class *class, struct zspage *zspage)
 
        fullness = get_fullness_group(class, zspage);
        insert_zspage(class, zspage, fullness);
-       set_zspage_mapping(zspage, class->index, fullness);
 
        return fullness;
 }