mm: zswap: function ordering: per-cpu compression infra
authorJohannes Weiner <hannes@cmpxchg.org>
Tue, 30 Jan 2024 01:36:54 +0000 (20:36 -0500)
committerAndrew Morton <akpm@linux-foundation.org>
Thu, 22 Feb 2024 18:24:44 +0000 (10:24 -0800)
The per-cpu compression init/exit callbacks are awkwardly in the
middle of the shrinker code. Move them up to the compression section.

Link: https://lkml.kernel.org/r/20240130014208.565554-19-hannes@cmpxchg.org
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Nhat Pham <nphamcs@gmail.com>
Cc: Chengming Zhou <zhouchengming@bytedance.com>
Cc: Yosry Ahmed <yosryahmed@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/zswap.c

index 6d1e584..680e5a4 100644 (file)
@@ -995,6 +995,72 @@ static void zswap_invalidate_entry(struct zswap_tree *tree,
 /*********************************
 * compressed storage functions
 **********************************/
+static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
+{
+       struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
+       struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
+       struct crypto_acomp *acomp;
+       struct acomp_req *req;
+       int ret;
+
+       mutex_init(&acomp_ctx->mutex);
+
+       acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
+       if (!acomp_ctx->buffer)
+               return -ENOMEM;
+
+       acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
+       if (IS_ERR(acomp)) {
+               pr_err("could not alloc crypto acomp %s : %ld\n",
+                               pool->tfm_name, PTR_ERR(acomp));
+               ret = PTR_ERR(acomp);
+               goto acomp_fail;
+       }
+       acomp_ctx->acomp = acomp;
+
+       req = acomp_request_alloc(acomp_ctx->acomp);
+       if (!req) {
+               pr_err("could not alloc crypto acomp_request %s\n",
+                      pool->tfm_name);
+               ret = -ENOMEM;
+               goto req_fail;
+       }
+       acomp_ctx->req = req;
+
+       crypto_init_wait(&acomp_ctx->wait);
+       /*
+        * if the backend of acomp is async zip, crypto_req_done() will wakeup
+        * crypto_wait_req(); if the backend of acomp is scomp, the callback
+        * won't be called, crypto_wait_req() will return without blocking.
+        */
+       acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+                                  crypto_req_done, &acomp_ctx->wait);
+
+       return 0;
+
+req_fail:
+       crypto_free_acomp(acomp_ctx->acomp);
+acomp_fail:
+       kfree(acomp_ctx->buffer);
+       return ret;
+}
+
+static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
+{
+       struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
+       struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
+
+       if (!IS_ERR_OR_NULL(acomp_ctx)) {
+               if (!IS_ERR_OR_NULL(acomp_ctx->req))
+                       acomp_request_free(acomp_ctx->req);
+               if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
+                       crypto_free_acomp(acomp_ctx->acomp);
+               kfree(acomp_ctx->buffer);
+       }
+
+       return 0;
+}
+
 static bool zswap_compress(struct folio *folio, struct zswap_entry *entry)
 {
        struct crypto_acomp_ctx *acomp_ctx;
@@ -1201,75 +1267,6 @@ static void zswap_alloc_shrinker(struct zswap_pool *pool)
        pool->shrinker->seeks = DEFAULT_SEEKS;
 }
 
-/*********************************
-* per-cpu code
-**********************************/
-static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
-{
-       struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
-       struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
-       struct crypto_acomp *acomp;
-       struct acomp_req *req;
-       int ret;
-
-       mutex_init(&acomp_ctx->mutex);
-
-       acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
-       if (!acomp_ctx->buffer)
-               return -ENOMEM;
-
-       acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
-       if (IS_ERR(acomp)) {
-               pr_err("could not alloc crypto acomp %s : %ld\n",
-                               pool->tfm_name, PTR_ERR(acomp));
-               ret = PTR_ERR(acomp);
-               goto acomp_fail;
-       }
-       acomp_ctx->acomp = acomp;
-
-       req = acomp_request_alloc(acomp_ctx->acomp);
-       if (!req) {
-               pr_err("could not alloc crypto acomp_request %s\n",
-                      pool->tfm_name);
-               ret = -ENOMEM;
-               goto req_fail;
-       }
-       acomp_ctx->req = req;
-
-       crypto_init_wait(&acomp_ctx->wait);
-       /*
-        * if the backend of acomp is async zip, crypto_req_done() will wakeup
-        * crypto_wait_req(); if the backend of acomp is scomp, the callback
-        * won't be called, crypto_wait_req() will return without blocking.
-        */
-       acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-                                  crypto_req_done, &acomp_ctx->wait);
-
-       return 0;
-
-req_fail:
-       crypto_free_acomp(acomp_ctx->acomp);
-acomp_fail:
-       kfree(acomp_ctx->buffer);
-       return ret;
-}
-
-static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
-{
-       struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
-       struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
-
-       if (!IS_ERR_OR_NULL(acomp_ctx)) {
-               if (!IS_ERR_OR_NULL(acomp_ctx->req))
-                       acomp_request_free(acomp_ctx->req);
-               if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
-                       crypto_free_acomp(acomp_ctx->acomp);
-               kfree(acomp_ctx->buffer);
-       }
-
-       return 0;
-}
-
 static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
                                       spinlock_t *lock, void *arg)
 {