Linux 6.9-rc1
[linux-2.6-microblaze.git] / mm / zswap.c
index 104835b..9dec853 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * zswap.c - zswap driver file
  *
- * zswap is a backend for frontswap that takes pages that are in the process
+ * zswap is a cache that takes pages that are in the process
  * of being swapped out and attempts to compress and store them in a
  * RAM-based memory pool.  This can result in a significant I/O reduction on
  * the swap device and, in the case where decompressing from RAM is faster
 #include <linux/spinlock.h>
 #include <linux/types.h>
 #include <linux/atomic.h>
-#include <linux/frontswap.h>
 #include <linux/rbtree.h>
 #include <linux/swap.h>
 #include <linux/crypto.h>
 #include <linux/scatterlist.h>
+#include <linux/mempolicy.h>
 #include <linux/mempool.h>
 #include <linux/zpool.h>
 #include <crypto/acompress.h>
-
+#include <linux/zswap.h>
 #include <linux/mm_types.h>
 #include <linux/page-flags.h>
 #include <linux/swapops.h>
 #include <linux/writeback.h>
 #include <linux/pagemap.h>
 #include <linux/workqueue.h>
+#include <linux/list_lru.h>
 
 #include "swap.h"
+#include "internal.h"
 
 /*********************************
 * statistics
@@ -61,14 +63,14 @@ static u64 zswap_pool_limit_hit;
 static u64 zswap_written_back_pages;
 /* Store failed due to a reclaim failure after pool limit was reached */
 static u64 zswap_reject_reclaim_fail;
+/* Store failed due to compression algorithm failure */
+static u64 zswap_reject_compress_fail;
 /* Compressed page was too big for the allocator to (optimally) store */
 static u64 zswap_reject_compress_poor;
 /* Store failed because underlying allocator could not get memory */
 static u64 zswap_reject_alloc_fail;
 /* Store failed because the entry metadata could not be allocated (rare) */
 static u64 zswap_reject_kmemcache_fail;
-/* Duplicate store was encountered (rare) */
-static u64 zswap_duplicate_entry;
 
 /* Shrinker work queue */
 static struct workqueue_struct *shrink_wq;
@@ -81,6 +83,8 @@ static bool zswap_pool_reached_full;
 
 #define ZSWAP_PARAM_UNSET ""
 
+static int zswap_setup(void);
+
 /* Enable/disable zswap */
 static bool zswap_enabled = IS_ENABLED(CONFIG_ZSWAP_DEFAULT_ON);
 static int zswap_enabled_param_set(const char *,
@@ -135,6 +139,19 @@ static bool zswap_non_same_filled_pages_enabled = true;
 module_param_named(non_same_filled_pages_enabled, zswap_non_same_filled_pages_enabled,
                   bool, 0644);
 
+/* Number of zpools in zswap_pool (empirically determined for scalability) */
+#define ZSWAP_NR_ZPOOLS 32
+
+/* Enable/disable memory pressure-based shrinker. */
+static bool zswap_shrinker_enabled = IS_ENABLED(
+               CONFIG_ZSWAP_SHRINKER_DEFAULT_ON);
+module_param_named(shrinker_enabled, zswap_shrinker_enabled, bool, 0644);
+
+bool is_zswap_enabled(void)
+{
+       return zswap_enabled;
+}
+
 /*********************************
 * data structures
 **********************************/
@@ -143,21 +160,38 @@ struct crypto_acomp_ctx {
        struct crypto_acomp *acomp;
        struct acomp_req *req;
        struct crypto_wait wait;
-       u8 *dstmem;
-       struct mutex *mutex;
+       u8 *buffer;
+       struct mutex mutex;
+       bool is_sleepable;
 };
 
+/*
+ * The lock ordering is zswap_tree.lock -> zswap_pool.lru_lock.
+ * The only case where lru_lock is not acquired while holding tree.lock is
+ * when a zswap_entry is taken off the lru for writeback, in that case it
+ * needs to be verified that it's still valid in the tree.
+ */
 struct zswap_pool {
-       struct zpool *zpool;
+       struct zpool *zpools[ZSWAP_NR_ZPOOLS];
        struct crypto_acomp_ctx __percpu *acomp_ctx;
-       struct kref kref;
+       struct percpu_ref ref;
        struct list_head list;
        struct work_struct release_work;
-       struct work_struct shrink_work;
        struct hlist_node node;
        char tfm_name[CRYPTO_MAX_ALG_NAME];
 };
 
+/* Global LRU lists shared by all zswap pools. */
+static struct list_lru zswap_list_lru;
+/* counter of pages stored in all zswap pools. */
+static atomic_t zswap_nr_stored = ATOMIC_INIT(0);
+
+/* The lock protects zswap_next_shrink updates. */
+static DEFINE_SPINLOCK(zswap_shrink_lock);
+static struct mem_cgroup *zswap_next_shrink;
+static struct work_struct zswap_shrink_work;
+static struct shrinker *zswap_shrinker;
+
 /*
  * struct zswap_entry
  *
@@ -165,23 +199,19 @@ struct zswap_pool {
  * page within zswap.
  *
  * rbnode - links the entry into red-black tree for the appropriate swap type
- * offset - the swap offset for the entry.  Index into the red-black tree.
- * refcount - the number of outstanding reference to the entry. This is needed
- *            to protect against premature freeing of the entry by code
- *            concurrent calls to load, invalidate, and writeback.  The lock
- *            for the zswap_tree structure that contains the entry must
- *            be held while changing the refcount.  Since the lock must
- *            be held, there is no reason to also make refcount atomic.
+ * swpentry - associated swap entry, the offset indexes into the red-black tree
  * length - the length in bytes of the compressed page data.  Needed during
- *          decompression. For a same value filled page length is 0.
+ *          decompression. For a same value filled page length is 0, and both
+ *          pool and lru are invalid and must be ignored.
  * pool - the zswap_pool the entry's data is in
  * handle - zpool allocation handle that stores the compressed page data
  * value - value of the same-value filled pages which have same content
+ * objcg - the obj_cgroup that the compressed memory is charged to
+ * lru - handle to the pool's lru used to evict pages.
  */
 struct zswap_entry {
        struct rb_node rbnode;
-       pgoff_t offset;
-       int refcount;
+       swp_entry_t swpentry;
        unsigned int length;
        struct zswap_pool *pool;
        union {
@@ -189,23 +219,16 @@ struct zswap_entry {
                unsigned long value;
        };
        struct obj_cgroup *objcg;
+       struct list_head lru;
 };
 
-struct zswap_header {
-       swp_entry_t swpentry;
-};
-
-/*
- * The tree lock in the zswap_tree struct protects a few things:
- * - the rbtree
- * - the refcount field of each entry in the tree
- */
 struct zswap_tree {
        struct rb_root rbroot;
        spinlock_t lock;
 };
 
 static struct zswap_tree *zswap_trees[MAX_SWAPFILES];
+static unsigned int nr_zswap_trees[MAX_SWAPFILES];
 
 /* RCU-protected iteration */
 static LIST_HEAD(zswap_pools);
@@ -214,11 +237,16 @@ static DEFINE_SPINLOCK(zswap_pools_lock);
 /* pool counter to provide unique names to zpool */
 static atomic_t zswap_pools_count = ATOMIC_INIT(0);
 
-/* used by param callback function */
-static bool zswap_init_started;
+enum zswap_init_type {
+       ZSWAP_UNINIT,
+       ZSWAP_INIT_SUCCEED,
+       ZSWAP_INIT_FAILED
+};
+
+static enum zswap_init_type zswap_init_state;
 
-/* fatal error during init */
-static bool zswap_init_failed;
+/* used to ensure the integrity of initialization */
+static DEFINE_MUTEX(zswap_init_lock);
 
 /* init completed, but couldn't create the initial pool */
 static bool zswap_has_pool;
@@ -227,17 +255,15 @@ static bool zswap_has_pool;
 * helpers and fwd declarations
 **********************************/
 
+static inline struct zswap_tree *swap_zswap_tree(swp_entry_t swp)
+{
+       return &zswap_trees[swp_type(swp)][swp_offset(swp)
+               >> SWAP_ADDRESS_SPACE_SHIFT];
+}
+
 #define zswap_pool_debug(msg, p)                               \
        pr_debug("%s pool %s/%s\n", msg, (p)->tfm_name,         \
-                zpool_get_type((p)->zpool))
-
-static int zswap_writeback_entry(struct zpool *pool, unsigned long handle);
-static int zswap_pool_get(struct zswap_pool *pool);
-static void zswap_pool_put(struct zswap_pool *pool);
-
-static const struct zpool_ops zswap_zpool_ops = {
-       .evict = zswap_writeback_entry
-};
+                zpool_get_type((p)->zpools[0]))
 
 static bool zswap_is_full(void)
 {
@@ -252,6 +278,17 @@ static bool zswap_can_accept(void)
                        DIV_ROUND_UP(zswap_pool_total_size, PAGE_SIZE);
 }
 
+static u64 get_zswap_pool_size(struct zswap_pool *pool)
+{
+       u64 pool_size = 0;
+       int i;
+
+       for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
+               pool_size += zpool_get_total_size(pool->zpools[i]);
+
+       return pool_size;
+}
+
 static void zswap_update_total_size(void)
 {
        struct zswap_pool *pool;
@@ -260,7 +297,7 @@ static void zswap_update_total_size(void)
        rcu_read_lock();
 
        list_for_each_entry_rcu(pool, &zswap_pools, list)
-               total += zpool_get_total_size(pool->zpool);
+               total += get_zswap_pool_size(pool);
 
        rcu_read_unlock();
 
@@ -268,263 +305,197 @@ static void zswap_update_total_size(void)
 }
 
 /*********************************
-* zswap entry functions
+* pool functions
 **********************************/
-static struct kmem_cache *zswap_entry_cache;
+static void __zswap_pool_empty(struct percpu_ref *ref);
 
-static int __init zswap_entry_cache_create(void)
+static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
 {
-       zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
-       return zswap_entry_cache == NULL;
-}
+       int i;
+       struct zswap_pool *pool;
+       char name[38]; /* 'zswap' + 32 char (max) num + \0 */
+       gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
+       int ret;
 
-static void __init zswap_entry_cache_destroy(void)
-{
-       kmem_cache_destroy(zswap_entry_cache);
-}
+       if (!zswap_has_pool) {
+               /* if either are unset, pool initialization failed, and we
+                * need both params to be set correctly before trying to
+                * create a pool.
+                */
+               if (!strcmp(type, ZSWAP_PARAM_UNSET))
+                       return NULL;
+               if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
+                       return NULL;
+       }
 
-static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp)
-{
-       struct zswap_entry *entry;
-       entry = kmem_cache_alloc(zswap_entry_cache, gfp);
-       if (!entry)
+       pool = kzalloc(sizeof(*pool), GFP_KERNEL);
+       if (!pool)
                return NULL;
-       entry->refcount = 1;
-       RB_CLEAR_NODE(&entry->rbnode);
-       return entry;
-}
 
-static void zswap_entry_cache_free(struct zswap_entry *entry)
-{
-       kmem_cache_free(zswap_entry_cache, entry);
-}
+       for (i = 0; i < ZSWAP_NR_ZPOOLS; i++) {
+               /* unique name for each pool specifically required by zsmalloc */
+               snprintf(name, 38, "zswap%x",
+                        atomic_inc_return(&zswap_pools_count));
 
-/*********************************
-* rbtree functions
-**********************************/
-static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
-{
-       struct rb_node *node = root->rb_node;
-       struct zswap_entry *entry;
+               pool->zpools[i] = zpool_create_pool(type, name, gfp);
+               if (!pool->zpools[i]) {
+                       pr_err("%s zpool not available\n", type);
+                       goto error;
+               }
+       }
+       pr_debug("using %s zpool\n", zpool_get_type(pool->zpools[0]));
 
-       while (node) {
-               entry = rb_entry(node, struct zswap_entry, rbnode);
-               if (entry->offset > offset)
-                       node = node->rb_left;
-               else if (entry->offset < offset)
-                       node = node->rb_right;
-               else
-                       return entry;
+       strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
+
+       pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
+       if (!pool->acomp_ctx) {
+               pr_err("percpu alloc failed\n");
+               goto error;
        }
+
+       ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
+                                      &pool->node);
+       if (ret)
+               goto error;
+
+       /* being the current pool takes 1 ref; this func expects the
+        * caller to always add the new pool as the current pool
+        */
+       ret = percpu_ref_init(&pool->ref, __zswap_pool_empty,
+                             PERCPU_REF_ALLOW_REINIT, GFP_KERNEL);
+       if (ret)
+               goto ref_fail;
+       INIT_LIST_HEAD(&pool->list);
+
+       zswap_pool_debug("created", pool);
+
+       return pool;
+
+ref_fail:
+       cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
+error:
+       if (pool->acomp_ctx)
+               free_percpu(pool->acomp_ctx);
+       while (i--)
+               zpool_destroy_pool(pool->zpools[i]);
+       kfree(pool);
        return NULL;
 }
 
-/*
- * In the case that a entry with the same offset is found, a pointer to
- * the existing entry is stored in dupentry and the function returns -EEXIST
- */
-static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
-                       struct zswap_entry **dupentry)
+static struct zswap_pool *__zswap_pool_create_fallback(void)
 {
-       struct rb_node **link = &root->rb_node, *parent = NULL;
-       struct zswap_entry *myentry;
+       bool has_comp, has_zpool;
 
-       while (*link) {
-               parent = *link;
-               myentry = rb_entry(parent, struct zswap_entry, rbnode);
-               if (myentry->offset > entry->offset)
-                       link = &(*link)->rb_left;
-               else if (myentry->offset < entry->offset)
-                       link = &(*link)->rb_right;
-               else {
-                       *dupentry = myentry;
-                       return -EEXIST;
-               }
+       has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
+       if (!has_comp && strcmp(zswap_compressor,
+                               CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
+               pr_err("compressor %s not available, using default %s\n",
+                      zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
+               param_free_charp(&zswap_compressor);
+               zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
+               has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
        }
-       rb_link_node(&entry->rbnode, parent, link);
-       rb_insert_color(&entry->rbnode, root);
-       return 0;
-}
-
-static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
-{
-       if (!RB_EMPTY_NODE(&entry->rbnode)) {
-               rb_erase(&entry->rbnode, root);
-               RB_CLEAR_NODE(&entry->rbnode);
+       if (!has_comp) {
+               pr_err("default compressor %s not available\n",
+                      zswap_compressor);
+               param_free_charp(&zswap_compressor);
+               zswap_compressor = ZSWAP_PARAM_UNSET;
        }
-}
 
-/*
- * Carries out the common pattern of freeing and entry's zpool allocation,
- * freeing the entry itself, and decrementing the number of stored pages.
- */
-static void zswap_free_entry(struct zswap_entry *entry)
-{
-       if (entry->objcg) {
-               obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
-               obj_cgroup_put(entry->objcg);
+       has_zpool = zpool_has_pool(zswap_zpool_type);
+       if (!has_zpool && strcmp(zswap_zpool_type,
+                                CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
+               pr_err("zpool %s not available, using default %s\n",
+                      zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
+               param_free_charp(&zswap_zpool_type);
+               zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
+               has_zpool = zpool_has_pool(zswap_zpool_type);
        }
-       if (!entry->length)
-               atomic_dec(&zswap_same_filled_pages);
-       else {
-               zpool_free(entry->pool->zpool, entry->handle);
-               zswap_pool_put(entry->pool);
+       if (!has_zpool) {
+               pr_err("default zpool %s not available\n",
+                      zswap_zpool_type);
+               param_free_charp(&zswap_zpool_type);
+               zswap_zpool_type = ZSWAP_PARAM_UNSET;
        }
-       zswap_entry_cache_free(entry);
-       atomic_dec(&zswap_stored_pages);
-       zswap_update_total_size();
-}
 
-/* caller must hold the tree lock */
-static void zswap_entry_get(struct zswap_entry *entry)
-{
-       entry->refcount++;
+       if (!has_comp || !has_zpool)
+               return NULL;
+
+       return zswap_pool_create(zswap_zpool_type, zswap_compressor);
 }
 
-/* caller must hold the tree lock
-* remove from the tree and free it, if nobody reference the entry
-*/
-static void zswap_entry_put(struct zswap_tree *tree,
-                       struct zswap_entry *entry)
+static void zswap_pool_destroy(struct zswap_pool *pool)
 {
-       int refcount = --entry->refcount;
+       int i;
 
-       BUG_ON(refcount < 0);
-       if (refcount == 0) {
-               zswap_rb_erase(&tree->rbroot, entry);
-               zswap_free_entry(entry);
-       }
+       zswap_pool_debug("destroying", pool);
+
+       cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
+       free_percpu(pool->acomp_ctx);
+
+       for (i = 0; i < ZSWAP_NR_ZPOOLS; i++)
+               zpool_destroy_pool(pool->zpools[i]);
+       kfree(pool);
 }
 
-/* caller must hold the tree lock */
-static struct zswap_entry *zswap_entry_find_get(struct rb_root *root,
-                               pgoff_t offset)
+static void __zswap_pool_release(struct work_struct *work)
 {
-       struct zswap_entry *entry;
+       struct zswap_pool *pool = container_of(work, typeof(*pool),
+                                               release_work);
 
-       entry = zswap_rb_search(root, offset);
-       if (entry)
-               zswap_entry_get(entry);
+       synchronize_rcu();
 
-       return entry;
+       /* nobody should have been able to get a ref... */
+       WARN_ON(!percpu_ref_is_zero(&pool->ref));
+       percpu_ref_exit(&pool->ref);
+
+       /* pool is now off zswap_pools list and has no references. */
+       zswap_pool_destroy(pool);
 }
 
-/*********************************
-* per-cpu code
-**********************************/
-static DEFINE_PER_CPU(u8 *, zswap_dstmem);
-/*
- * If users dynamically change the zpool type and compressor at runtime, i.e.
- * zswap is running, zswap can have more than one zpool on one cpu, but they
- * are sharing dtsmem. So we need this mutex to be per-cpu.
- */
-static DEFINE_PER_CPU(struct mutex *, zswap_mutex);
+static struct zswap_pool *zswap_pool_current(void);
 
-static int zswap_dstmem_prepare(unsigned int cpu)
+static void __zswap_pool_empty(struct percpu_ref *ref)
 {
-       struct mutex *mutex;
-       u8 *dst;
+       struct zswap_pool *pool;
 
-       dst = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
-       if (!dst)
-               return -ENOMEM;
+       pool = container_of(ref, typeof(*pool), ref);
 
-       mutex = kmalloc_node(sizeof(*mutex), GFP_KERNEL, cpu_to_node(cpu));
-       if (!mutex) {
-               kfree(dst);
-               return -ENOMEM;
-       }
+       spin_lock_bh(&zswap_pools_lock);
 
-       mutex_init(mutex);
-       per_cpu(zswap_dstmem, cpu) = dst;
-       per_cpu(zswap_mutex, cpu) = mutex;
-       return 0;
+       WARN_ON(pool == zswap_pool_current());
+
+       list_del_rcu(&pool->list);
+
+       INIT_WORK(&pool->release_work, __zswap_pool_release);
+       schedule_work(&pool->release_work);
+
+       spin_unlock_bh(&zswap_pools_lock);
 }
 
-static int zswap_dstmem_dead(unsigned int cpu)
+static int __must_check zswap_pool_get(struct zswap_pool *pool)
 {
-       struct mutex *mutex;
-       u8 *dst;
-
-       mutex = per_cpu(zswap_mutex, cpu);
-       kfree(mutex);
-       per_cpu(zswap_mutex, cpu) = NULL;
+       if (!pool)
+               return 0;
 
-       dst = per_cpu(zswap_dstmem, cpu);
-       kfree(dst);
-       per_cpu(zswap_dstmem, cpu) = NULL;
+       return percpu_ref_tryget(&pool->ref);
+}
 
-       return 0;
+static void zswap_pool_put(struct zswap_pool *pool)
+{
+       percpu_ref_put(&pool->ref);
 }
 
-static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
+static struct zswap_pool *__zswap_pool_current(void)
 {
-       struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
-       struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
-       struct crypto_acomp *acomp;
-       struct acomp_req *req;
+       struct zswap_pool *pool;
 
-       acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
-       if (IS_ERR(acomp)) {
-               pr_err("could not alloc crypto acomp %s : %ld\n",
-                               pool->tfm_name, PTR_ERR(acomp));
-               return PTR_ERR(acomp);
-       }
-       acomp_ctx->acomp = acomp;
+       pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
+       WARN_ONCE(!pool && zswap_has_pool,
+                 "%s: no page storage pool!\n", __func__);
 
-       req = acomp_request_alloc(acomp_ctx->acomp);
-       if (!req) {
-               pr_err("could not alloc crypto acomp_request %s\n",
-                      pool->tfm_name);
-               crypto_free_acomp(acomp_ctx->acomp);
-               return -ENOMEM;
-       }
-       acomp_ctx->req = req;
-
-       crypto_init_wait(&acomp_ctx->wait);
-       /*
-        * if the backend of acomp is async zip, crypto_req_done() will wakeup
-        * crypto_wait_req(); if the backend of acomp is scomp, the callback
-        * won't be called, crypto_wait_req() will return without blocking.
-        */
-       acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
-                                  crypto_req_done, &acomp_ctx->wait);
-
-       acomp_ctx->mutex = per_cpu(zswap_mutex, cpu);
-       acomp_ctx->dstmem = per_cpu(zswap_dstmem, cpu);
-
-       return 0;
-}
-
-static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
-{
-       struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
-       struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
-
-       if (!IS_ERR_OR_NULL(acomp_ctx)) {
-               if (!IS_ERR_OR_NULL(acomp_ctx->req))
-                       acomp_request_free(acomp_ctx->req);
-               if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
-                       crypto_free_acomp(acomp_ctx->acomp);
-       }
-
-       return 0;
-}
-
-/*********************************
-* pool functions
-**********************************/
-
-static struct zswap_pool *__zswap_pool_current(void)
-{
-       struct zswap_pool *pool;
-
-       pool = list_first_or_null_rcu(&zswap_pools, typeof(*pool), list);
-       WARN_ONCE(!pool && zswap_has_pool,
-                 "%s: no page storage pool!\n", __func__);
-
-       return pool;
-}
+       return pool;
+}
 
 static struct zswap_pool *zswap_pool_current(void)
 {
@@ -548,24 +519,6 @@ static struct zswap_pool *zswap_pool_current_get(void)
        return pool;
 }
 
-static struct zswap_pool *zswap_pool_last_get(void)
-{
-       struct zswap_pool *pool, *last = NULL;
-
-       rcu_read_lock();
-
-       list_for_each_entry_rcu(pool, &zswap_pools, list)
-               last = pool;
-       WARN_ONCE(!last && zswap_has_pool,
-                 "%s: no page storage pool!\n", __func__);
-       if (!zswap_pool_get(last))
-               last = NULL;
-
-       rcu_read_unlock();
-
-       return last;
-}
-
 /* type and compressor must be null-terminated */
 static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
 {
@@ -576,7 +529,8 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
        list_for_each_entry_rcu(pool, &zswap_pools, list) {
                if (strcmp(pool->tfm_name, compressor))
                        continue;
-               if (strcmp(zpool_get_type(pool->zpool), type))
+               /* all zpools share the same type */
+               if (strcmp(zpool_get_type(pool->zpools[0]), type))
                        continue;
                /* if we can't get it, it's about to be destroyed */
                if (!zswap_pool_get(pool))
@@ -587,205 +541,47 @@ static struct zswap_pool *zswap_pool_find_get(char *type, char *compressor)
        return NULL;
 }
 
-static void shrink_worker(struct work_struct *w)
-{
-       struct zswap_pool *pool = container_of(w, typeof(*pool),
-                                               shrink_work);
-
-       if (zpool_shrink(pool->zpool, 1, NULL))
-               zswap_reject_reclaim_fail++;
-       zswap_pool_put(pool);
-}
-
-static struct zswap_pool *zswap_pool_create(char *type, char *compressor)
-{
-       struct zswap_pool *pool;
-       char name[38]; /* 'zswap' + 32 char (max) num + \0 */
-       gfp_t gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
-       int ret;
-
-       if (!zswap_has_pool) {
-               /* if either are unset, pool initialization failed, and we
-                * need both params to be set correctly before trying to
-                * create a pool.
-                */
-               if (!strcmp(type, ZSWAP_PARAM_UNSET))
-                       return NULL;
-               if (!strcmp(compressor, ZSWAP_PARAM_UNSET))
-                       return NULL;
-       }
-
-       pool = kzalloc(sizeof(*pool), GFP_KERNEL);
-       if (!pool)
-               return NULL;
-
-       /* unique name for each pool specifically required by zsmalloc */
-       snprintf(name, 38, "zswap%x", atomic_inc_return(&zswap_pools_count));
-
-       pool->zpool = zpool_create_pool(type, name, gfp, &zswap_zpool_ops);
-       if (!pool->zpool) {
-               pr_err("%s zpool not available\n", type);
-               goto error;
-       }
-       pr_debug("using %s zpool\n", zpool_get_type(pool->zpool));
-
-       strscpy(pool->tfm_name, compressor, sizeof(pool->tfm_name));
-
-       pool->acomp_ctx = alloc_percpu(*pool->acomp_ctx);
-       if (!pool->acomp_ctx) {
-               pr_err("percpu alloc failed\n");
-               goto error;
-       }
-
-       ret = cpuhp_state_add_instance(CPUHP_MM_ZSWP_POOL_PREPARE,
-                                      &pool->node);
-       if (ret)
-               goto error;
-       pr_debug("using %s compressor\n", pool->tfm_name);
-
-       /* being the current pool takes 1 ref; this func expects the
-        * caller to always add the new pool as the current pool
-        */
-       kref_init(&pool->kref);
-       INIT_LIST_HEAD(&pool->list);
-       INIT_WORK(&pool->shrink_work, shrink_worker);
-
-       zswap_pool_debug("created", pool);
-
-       return pool;
-
-error:
-       if (pool->acomp_ctx)
-               free_percpu(pool->acomp_ctx);
-       if (pool->zpool)
-               zpool_destroy_pool(pool->zpool);
-       kfree(pool);
-       return NULL;
-}
-
-static __init struct zswap_pool *__zswap_pool_create_fallback(void)
-{
-       bool has_comp, has_zpool;
-
-       has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
-       if (!has_comp && strcmp(zswap_compressor,
-                               CONFIG_ZSWAP_COMPRESSOR_DEFAULT)) {
-               pr_err("compressor %s not available, using default %s\n",
-                      zswap_compressor, CONFIG_ZSWAP_COMPRESSOR_DEFAULT);
-               param_free_charp(&zswap_compressor);
-               zswap_compressor = CONFIG_ZSWAP_COMPRESSOR_DEFAULT;
-               has_comp = crypto_has_acomp(zswap_compressor, 0, 0);
-       }
-       if (!has_comp) {
-               pr_err("default compressor %s not available\n",
-                      zswap_compressor);
-               param_free_charp(&zswap_compressor);
-               zswap_compressor = ZSWAP_PARAM_UNSET;
-       }
-
-       has_zpool = zpool_has_pool(zswap_zpool_type);
-       if (!has_zpool && strcmp(zswap_zpool_type,
-                                CONFIG_ZSWAP_ZPOOL_DEFAULT)) {
-               pr_err("zpool %s not available, using default %s\n",
-                      zswap_zpool_type, CONFIG_ZSWAP_ZPOOL_DEFAULT);
-               param_free_charp(&zswap_zpool_type);
-               zswap_zpool_type = CONFIG_ZSWAP_ZPOOL_DEFAULT;
-               has_zpool = zpool_has_pool(zswap_zpool_type);
-       }
-       if (!has_zpool) {
-               pr_err("default zpool %s not available\n",
-                      zswap_zpool_type);
-               param_free_charp(&zswap_zpool_type);
-               zswap_zpool_type = ZSWAP_PARAM_UNSET;
-       }
-
-       if (!has_comp || !has_zpool)
-               return NULL;
-
-       return zswap_pool_create(zswap_zpool_type, zswap_compressor);
-}
-
-static void zswap_pool_destroy(struct zswap_pool *pool)
-{
-       zswap_pool_debug("destroying", pool);
-
-       cpuhp_state_remove_instance(CPUHP_MM_ZSWP_POOL_PREPARE, &pool->node);
-       free_percpu(pool->acomp_ctx);
-       zpool_destroy_pool(pool->zpool);
-       kfree(pool);
-}
-
-static int __must_check zswap_pool_get(struct zswap_pool *pool)
-{
-       if (!pool)
-               return 0;
-
-       return kref_get_unless_zero(&pool->kref);
-}
-
-static void __zswap_pool_release(struct work_struct *work)
-{
-       struct zswap_pool *pool = container_of(work, typeof(*pool),
-                                               release_work);
-
-       synchronize_rcu();
-
-       /* nobody should have been able to get a kref... */
-       WARN_ON(kref_get_unless_zero(&pool->kref));
-
-       /* pool is now off zswap_pools list and has no references. */
-       zswap_pool_destroy(pool);
-}
-
-static void __zswap_pool_empty(struct kref *kref)
-{
-       struct zswap_pool *pool;
-
-       pool = container_of(kref, typeof(*pool), kref);
-
-       spin_lock(&zswap_pools_lock);
-
-       WARN_ON(pool == zswap_pool_current());
-
-       list_del_rcu(&pool->list);
-
-       INIT_WORK(&pool->release_work, __zswap_pool_release);
-       schedule_work(&pool->release_work);
-
-       spin_unlock(&zswap_pools_lock);
-}
-
-static void zswap_pool_put(struct zswap_pool *pool)
-{
-       kref_put(&pool->kref, __zswap_pool_empty);
-}
-
 /*********************************
 * param callbacks
 **********************************/
 
+static bool zswap_pool_changed(const char *s, const struct kernel_param *kp)
+{
+       /* no change required */
+       if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
+               return false;
+       return true;
+}
+
 /* val must be a null-terminated string */
 static int __zswap_param_set(const char *val, const struct kernel_param *kp,
                             char *type, char *compressor)
 {
        struct zswap_pool *pool, *put_pool = NULL;
        char *s = strstrip((char *)val);
-       int ret;
-
-       if (zswap_init_failed) {
+       int ret = 0;
+       bool new_pool = false;
+
+       mutex_lock(&zswap_init_lock);
+       switch (zswap_init_state) {
+       case ZSWAP_UNINIT:
+               /* if this is load-time (pre-init) param setting,
+                * don't create a pool; that's done during init.
+                */
+               ret = param_set_charp(s, kp);
+               break;
+       case ZSWAP_INIT_SUCCEED:
+               new_pool = zswap_pool_changed(s, kp);
+               break;
+       case ZSWAP_INIT_FAILED:
                pr_err("can't set param, initialization failed\n");
-               return -ENODEV;
+               ret = -ENODEV;
        }
+       mutex_unlock(&zswap_init_lock);
 
-       /* no change required */
-       if (!strcmp(s, *(char **)kp->arg) && zswap_has_pool)
-               return 0;
-
-       /* if this is load-time (pre-init) param setting,
-        * don't create a pool; that's done during init.
-        */
-       if (!zswap_init_started)
-               return param_set_charp(s, kp);
+       /* no need to create a new pool, return directly */
+       if (!new_pool)
+               return ret;
 
        if (!type) {
                if (!zpool_has_pool(s)) {
@@ -804,7 +600,7 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
                return -EINVAL;
        }
 
-       spin_lock(&zswap_pools_lock);
+       spin_lock_bh(&zswap_pools_lock);
 
        pool = zswap_pool_find_get(type, compressor);
        if (pool) {
@@ -813,17 +609,28 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
                list_del_rcu(&pool->list);
        }
 
-       spin_unlock(&zswap_pools_lock);
+       spin_unlock_bh(&zswap_pools_lock);
 
        if (!pool)
                pool = zswap_pool_create(type, compressor);
+       else {
+               /*
+                * Restore the initial ref dropped by percpu_ref_kill()
+                * when the pool was decommissioned and switch it again
+                * to percpu mode.
+                */
+               percpu_ref_resurrect(&pool->ref);
+
+               /* Drop the ref from zswap_pool_find_get(). */
+               zswap_pool_put(pool);
+       }
 
        if (pool)
                ret = param_set_charp(s, kp);
        else
                ret = -EINVAL;
 
-       spin_lock(&zswap_pools_lock);
+       spin_lock_bh(&zswap_pools_lock);
 
        if (!ret) {
                put_pool = zswap_pool_current();
@@ -838,247 +645,820 @@ static int __zswap_param_set(const char *val, const struct kernel_param *kp,
                put_pool = pool;
        }
 
-       spin_unlock(&zswap_pools_lock);
+       spin_unlock_bh(&zswap_pools_lock);
+
+       if (!zswap_has_pool && !pool) {
+               /* if initial pool creation failed, and this pool creation also
+                * failed, maybe both compressor and zpool params were bad.
+                * Allow changing this param, so pool creation will succeed
+                * when the other param is changed. We already verified this
+                * param is ok in the zpool_has_pool() or crypto_has_acomp()
+                * checks above.
+                */
+               ret = param_set_charp(s, kp);
+       }
+
+       /* drop the ref from either the old current pool,
+        * or the new pool we failed to add
+        */
+       if (put_pool)
+               percpu_ref_kill(&put_pool->ref);
+
+       return ret;
+}
+
+static int zswap_compressor_param_set(const char *val,
+                                     const struct kernel_param *kp)
+{
+       return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
+}
+
+static int zswap_zpool_param_set(const char *val,
+                                const struct kernel_param *kp)
+{
+       return __zswap_param_set(val, kp, NULL, zswap_compressor);
+}
+
+static int zswap_enabled_param_set(const char *val,
+                                  const struct kernel_param *kp)
+{
+       int ret = -ENODEV;
+
+       /* if this is load-time (pre-init) param setting, only set param. */
+       if (system_state != SYSTEM_RUNNING)
+               return param_set_bool(val, kp);
+
+       mutex_lock(&zswap_init_lock);
+       switch (zswap_init_state) {
+       case ZSWAP_UNINIT:
+               if (zswap_setup())
+                       break;
+               fallthrough;
+       case ZSWAP_INIT_SUCCEED:
+               if (!zswap_has_pool)
+                       pr_err("can't enable, no pool configured\n");
+               else
+                       ret = param_set_bool(val, kp);
+               break;
+       case ZSWAP_INIT_FAILED:
+               pr_err("can't enable, initialization failed\n");
+       }
+       mutex_unlock(&zswap_init_lock);
+
+       return ret;
+}
+
+/*********************************
+* lru functions
+**********************************/
+
+/* should be called under RCU */
+#ifdef CONFIG_MEMCG
+static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
+{
+       return entry->objcg ? obj_cgroup_memcg(entry->objcg) : NULL;
+}
+#else
+static inline struct mem_cgroup *mem_cgroup_from_entry(struct zswap_entry *entry)
+{
+       return NULL;
+}
+#endif
+
+static inline int entry_to_nid(struct zswap_entry *entry)
+{
+       return page_to_nid(virt_to_page(entry));
+}
+
+static void zswap_lru_add(struct list_lru *list_lru, struct zswap_entry *entry)
+{
+       atomic_long_t *nr_zswap_protected;
+       unsigned long lru_size, old, new;
+       int nid = entry_to_nid(entry);
+       struct mem_cgroup *memcg;
+       struct lruvec *lruvec;
+
+       /*
+        * Note that it is safe to use rcu_read_lock() here, even in the face of
+        * concurrent memcg offlining. Thanks to the memcg->kmemcg_id indirection
+        * used in list_lru lookup, only two scenarios are possible:
+        *
+        * 1. list_lru_add() is called before memcg->kmemcg_id is updated. The
+        *    new entry will be reparented to memcg's parent's list_lru.
+        * 2. list_lru_add() is called after memcg->kmemcg_id is updated. The
+        *    new entry will be added directly to memcg's parent's list_lru.
+        *
+        * Similar reasoning holds for list_lru_del().
+        */
+       rcu_read_lock();
+       memcg = mem_cgroup_from_entry(entry);
+       /* will always succeed */
+       list_lru_add(list_lru, &entry->lru, nid, memcg);
+
+       /* Update the protection area */
+       lru_size = list_lru_count_one(list_lru, nid, memcg);
+       lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(nid));
+       nr_zswap_protected = &lruvec->zswap_lruvec_state.nr_zswap_protected;
+       old = atomic_long_inc_return(nr_zswap_protected);
+       /*
+        * Decay to avoid overflow and adapt to changing workloads.
+        * This is based on LRU reclaim cost decaying heuristics.
+        */
+       do {
+               new = old > lru_size / 4 ? old / 2 : old;
+       } while (!atomic_long_try_cmpxchg(nr_zswap_protected, &old, new));
+       rcu_read_unlock();
+}
+
+static void zswap_lru_del(struct list_lru *list_lru, struct zswap_entry *entry)
+{
+       int nid = entry_to_nid(entry);
+       struct mem_cgroup *memcg;
+
+       rcu_read_lock();
+       memcg = mem_cgroup_from_entry(entry);
+       /* will always succeed */
+       list_lru_del(list_lru, &entry->lru, nid, memcg);
+       rcu_read_unlock();
+}
+
+void zswap_lruvec_state_init(struct lruvec *lruvec)
+{
+       atomic_long_set(&lruvec->zswap_lruvec_state.nr_zswap_protected, 0);
+}
+
+void zswap_folio_swapin(struct folio *folio)
+{
+       struct lruvec *lruvec;
+
+       if (folio) {
+               lruvec = folio_lruvec(folio);
+               atomic_long_inc(&lruvec->zswap_lruvec_state.nr_zswap_protected);
+       }
+}
+
+void zswap_memcg_offline_cleanup(struct mem_cgroup *memcg)
+{
+       /* lock out zswap shrinker walking memcg tree */
+       spin_lock(&zswap_shrink_lock);
+       if (zswap_next_shrink == memcg)
+               zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
+       spin_unlock(&zswap_shrink_lock);
+}
+
+/*********************************
+* rbtree functions
+**********************************/
+static struct zswap_entry *zswap_rb_search(struct rb_root *root, pgoff_t offset)
+{
+       struct rb_node *node = root->rb_node;
+       struct zswap_entry *entry;
+       pgoff_t entry_offset;
+
+       while (node) {
+               entry = rb_entry(node, struct zswap_entry, rbnode);
+               entry_offset = swp_offset(entry->swpentry);
+               if (entry_offset > offset)
+                       node = node->rb_left;
+               else if (entry_offset < offset)
+                       node = node->rb_right;
+               else
+                       return entry;
+       }
+       return NULL;
+}
+
+/*
+ * In the case that a entry with the same offset is found, a pointer to
+ * the existing entry is stored in dupentry and the function returns -EEXIST
+ */
+static int zswap_rb_insert(struct rb_root *root, struct zswap_entry *entry,
+                       struct zswap_entry **dupentry)
+{
+       struct rb_node **link = &root->rb_node, *parent = NULL;
+       struct zswap_entry *myentry;
+       pgoff_t myentry_offset, entry_offset = swp_offset(entry->swpentry);
+
+       while (*link) {
+               parent = *link;
+               myentry = rb_entry(parent, struct zswap_entry, rbnode);
+               myentry_offset = swp_offset(myentry->swpentry);
+               if (myentry_offset > entry_offset)
+                       link = &(*link)->rb_left;
+               else if (myentry_offset < entry_offset)
+                       link = &(*link)->rb_right;
+               else {
+                       *dupentry = myentry;
+                       return -EEXIST;
+               }
+       }
+       rb_link_node(&entry->rbnode, parent, link);
+       rb_insert_color(&entry->rbnode, root);
+       return 0;
+}
+
+static void zswap_rb_erase(struct rb_root *root, struct zswap_entry *entry)
+{
+       rb_erase(&entry->rbnode, root);
+       RB_CLEAR_NODE(&entry->rbnode);
+}
+
+/*********************************
+* zswap entry functions
+**********************************/
+static struct kmem_cache *zswap_entry_cache;
+
+static struct zswap_entry *zswap_entry_cache_alloc(gfp_t gfp, int nid)
+{
+       struct zswap_entry *entry;
+       entry = kmem_cache_alloc_node(zswap_entry_cache, gfp, nid);
+       if (!entry)
+               return NULL;
+       RB_CLEAR_NODE(&entry->rbnode);
+       return entry;
+}
+
+static void zswap_entry_cache_free(struct zswap_entry *entry)
+{
+       kmem_cache_free(zswap_entry_cache, entry);
+}
+
+static struct zpool *zswap_find_zpool(struct zswap_entry *entry)
+{
+       int i = 0;
+
+       if (ZSWAP_NR_ZPOOLS > 1)
+               i = hash_ptr(entry, ilog2(ZSWAP_NR_ZPOOLS));
+
+       return entry->pool->zpools[i];
+}
+
+/*
+ * Carries out the common pattern of freeing and entry's zpool allocation,
+ * freeing the entry itself, and decrementing the number of stored pages.
+ */
+static void zswap_entry_free(struct zswap_entry *entry)
+{
+       if (!entry->length)
+               atomic_dec(&zswap_same_filled_pages);
+       else {
+               zswap_lru_del(&zswap_list_lru, entry);
+               zpool_free(zswap_find_zpool(entry), entry->handle);
+               atomic_dec(&zswap_nr_stored);
+               zswap_pool_put(entry->pool);
+       }
+       if (entry->objcg) {
+               obj_cgroup_uncharge_zswap(entry->objcg, entry->length);
+               obj_cgroup_put(entry->objcg);
+       }
+       zswap_entry_cache_free(entry);
+       atomic_dec(&zswap_stored_pages);
+       zswap_update_total_size();
+}
+
+/*
+ * The caller hold the tree lock and search the entry from the tree,
+ * so it must be on the tree, remove it from the tree and free it.
+ */
+static void zswap_invalidate_entry(struct zswap_tree *tree,
+                                  struct zswap_entry *entry)
+{
+       zswap_rb_erase(&tree->rbroot, entry);
+       zswap_entry_free(entry);
+}
+
+/*********************************
+* compressed storage functions
+**********************************/
+static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
+{
+       struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
+       struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
+       struct crypto_acomp *acomp;
+       struct acomp_req *req;
+       int ret;
+
+       mutex_init(&acomp_ctx->mutex);
+
+       acomp_ctx->buffer = kmalloc_node(PAGE_SIZE * 2, GFP_KERNEL, cpu_to_node(cpu));
+       if (!acomp_ctx->buffer)
+               return -ENOMEM;
+
+       acomp = crypto_alloc_acomp_node(pool->tfm_name, 0, 0, cpu_to_node(cpu));
+       if (IS_ERR(acomp)) {
+               pr_err("could not alloc crypto acomp %s : %ld\n",
+                               pool->tfm_name, PTR_ERR(acomp));
+               ret = PTR_ERR(acomp);
+               goto acomp_fail;
+       }
+       acomp_ctx->acomp = acomp;
+       acomp_ctx->is_sleepable = acomp_is_async(acomp);
+
+       req = acomp_request_alloc(acomp_ctx->acomp);
+       if (!req) {
+               pr_err("could not alloc crypto acomp_request %s\n",
+                      pool->tfm_name);
+               ret = -ENOMEM;
+               goto req_fail;
+       }
+       acomp_ctx->req = req;
+
+       crypto_init_wait(&acomp_ctx->wait);
+       /*
+        * if the backend of acomp is async zip, crypto_req_done() will wakeup
+        * crypto_wait_req(); if the backend of acomp is scomp, the callback
+        * won't be called, crypto_wait_req() will return without blocking.
+        */
+       acomp_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+                                  crypto_req_done, &acomp_ctx->wait);
+
+       return 0;
+
+req_fail:
+       crypto_free_acomp(acomp_ctx->acomp);
+acomp_fail:
+       kfree(acomp_ctx->buffer);
+       return ret;
+}
+
+static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
+{
+       struct zswap_pool *pool = hlist_entry(node, struct zswap_pool, node);
+       struct crypto_acomp_ctx *acomp_ctx = per_cpu_ptr(pool->acomp_ctx, cpu);
+
+       if (!IS_ERR_OR_NULL(acomp_ctx)) {
+               if (!IS_ERR_OR_NULL(acomp_ctx->req))
+                       acomp_request_free(acomp_ctx->req);
+               if (!IS_ERR_OR_NULL(acomp_ctx->acomp))
+                       crypto_free_acomp(acomp_ctx->acomp);
+               kfree(acomp_ctx->buffer);
+       }
+
+       return 0;
+}
+
+static bool zswap_compress(struct folio *folio, struct zswap_entry *entry)
+{
+       struct crypto_acomp_ctx *acomp_ctx;
+       struct scatterlist input, output;
+       int comp_ret = 0, alloc_ret = 0;
+       unsigned int dlen = PAGE_SIZE;
+       unsigned long handle;
+       struct zpool *zpool;
+       char *buf;
+       gfp_t gfp;
+       u8 *dst;
+
+       acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
+
+       mutex_lock(&acomp_ctx->mutex);
+
+       dst = acomp_ctx->buffer;
+       sg_init_table(&input, 1);
+       sg_set_page(&input, &folio->page, PAGE_SIZE, 0);
+
+       /*
+        * We need PAGE_SIZE * 2 here since there maybe over-compression case,
+        * and hardware-accelerators may won't check the dst buffer size, so
+        * giving the dst buffer with enough length to avoid buffer overflow.
+        */
+       sg_init_one(&output, dst, PAGE_SIZE * 2);
+       acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
+
+       /*
+        * it maybe looks a little bit silly that we send an asynchronous request,
+        * then wait for its completion synchronously. This makes the process look
+        * synchronous in fact.
+        * Theoretically, acomp supports users send multiple acomp requests in one
+        * acomp instance, then get those requests done simultaneously. but in this
+        * case, zswap actually does store and load page by page, there is no
+        * existing method to send the second page before the first page is done
+        * in one thread doing zwap.
+        * but in different threads running on different cpu, we have different
+        * acomp instance, so multiple threads can do (de)compression in parallel.
+        */
+       comp_ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
+       dlen = acomp_ctx->req->dlen;
+       if (comp_ret)
+               goto unlock;
+
+       zpool = zswap_find_zpool(entry);
+       gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
+       if (zpool_malloc_support_movable(zpool))
+               gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
+       alloc_ret = zpool_malloc(zpool, dlen, gfp, &handle);
+       if (alloc_ret)
+               goto unlock;
+
+       buf = zpool_map_handle(zpool, handle, ZPOOL_MM_WO);
+       memcpy(buf, dst, dlen);
+       zpool_unmap_handle(zpool, handle);
+
+       entry->handle = handle;
+       entry->length = dlen;
+
+unlock:
+       if (comp_ret == -ENOSPC || alloc_ret == -ENOSPC)
+               zswap_reject_compress_poor++;
+       else if (comp_ret)
+               zswap_reject_compress_fail++;
+       else if (alloc_ret)
+               zswap_reject_alloc_fail++;
+
+       mutex_unlock(&acomp_ctx->mutex);
+       return comp_ret == 0 && alloc_ret == 0;
+}
+
+static void zswap_decompress(struct zswap_entry *entry, struct page *page)
+{
+       struct zpool *zpool = zswap_find_zpool(entry);
+       struct scatterlist input, output;
+       struct crypto_acomp_ctx *acomp_ctx;
+       u8 *src;
+
+       acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
+       mutex_lock(&acomp_ctx->mutex);
+
+       src = zpool_map_handle(zpool, entry->handle, ZPOOL_MM_RO);
+       if (acomp_ctx->is_sleepable && !zpool_can_sleep_mapped(zpool)) {
+               memcpy(acomp_ctx->buffer, src, entry->length);
+               src = acomp_ctx->buffer;
+               zpool_unmap_handle(zpool, entry->handle);
+       }
+
+       sg_init_one(&input, src, entry->length);
+       sg_init_table(&output, 1);
+       sg_set_page(&output, page, PAGE_SIZE, 0);
+       acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, PAGE_SIZE);
+       BUG_ON(crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait));
+       BUG_ON(acomp_ctx->req->dlen != PAGE_SIZE);
+       mutex_unlock(&acomp_ctx->mutex);
+
+       if (!acomp_ctx->is_sleepable || zpool_can_sleep_mapped(zpool))
+               zpool_unmap_handle(zpool, entry->handle);
+}
+
+/*********************************
+* writeback code
+**********************************/
+/*
+ * Attempts to free an entry by adding a folio to the swap cache,
+ * decompressing the entry data into the folio, and issuing a
+ * bio write to write the folio back to the swap device.
+ *
+ * This can be thought of as a "resumed writeback" of the folio
+ * to the swap device.  We are basically resuming the same swap
+ * writeback path that was intercepted with the zswap_store()
+ * in the first place.  After the folio has been decompressed into
+ * the swap cache, the compressed version stored by zswap can be
+ * freed.
+ */
+static int zswap_writeback_entry(struct zswap_entry *entry,
+                                swp_entry_t swpentry)
+{
+       struct zswap_tree *tree;
+       struct folio *folio;
+       struct mempolicy *mpol;
+       bool folio_was_allocated;
+       struct writeback_control wbc = {
+               .sync_mode = WB_SYNC_NONE,
+       };
+
+       /* try to allocate swap cache folio */
+       mpol = get_task_policy(current);
+       folio = __read_swap_cache_async(swpentry, GFP_KERNEL, mpol,
+                               NO_INTERLEAVE_INDEX, &folio_was_allocated, true);
+       if (!folio)
+               return -ENOMEM;
+
+       /*
+        * Found an existing folio, we raced with swapin or concurrent
+        * shrinker. We generally writeback cold folios from zswap, and
+        * swapin means the folio just became hot, so skip this folio.
+        * For unlikely concurrent shrinker case, it will be unlinked
+        * and freed when invalidated by the concurrent shrinker anyway.
+        */
+       if (!folio_was_allocated) {
+               folio_put(folio);
+               return -EEXIST;
+       }
+
+       /*
+        * folio is locked, and the swapcache is now secured against
+        * concurrent swapping to and from the slot, and concurrent
+        * swapoff so we can safely dereference the zswap tree here.
+        * Verify that the swap entry hasn't been invalidated and recycled
+        * behind our backs, to avoid overwriting a new swap folio with
+        * old compressed data. Only when this is successful can the entry
+        * be dereferenced.
+        */
+       tree = swap_zswap_tree(swpentry);
+       spin_lock(&tree->lock);
+       if (zswap_rb_search(&tree->rbroot, swp_offset(swpentry)) != entry) {
+               spin_unlock(&tree->lock);
+               delete_from_swap_cache(folio);
+               folio_unlock(folio);
+               folio_put(folio);
+               return -ENOMEM;
+       }
+
+       /* Safe to deref entry after the entry is verified above. */
+       zswap_rb_erase(&tree->rbroot, entry);
+       spin_unlock(&tree->lock);
+
+       zswap_decompress(entry, &folio->page);
+
+       count_vm_event(ZSWPWB);
+       if (entry->objcg)
+               count_objcg_event(entry->objcg, ZSWPWB);
+
+       zswap_entry_free(entry);
+
+       /* folio is up to date */
+       folio_mark_uptodate(folio);
+
+       /* move it to the tail of the inactive list after end_writeback */
+       folio_set_reclaim(folio);
+
+       /* start writeback */
+       __swap_writepage(folio, &wbc);
+       folio_put(folio);
+
+       return 0;
+}
+
+/*********************************
+* shrinker functions
+**********************************/
+static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
+                                      spinlock_t *lock, void *arg)
+{
+       struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
+       bool *encountered_page_in_swapcache = (bool *)arg;
+       swp_entry_t swpentry;
+       enum lru_status ret = LRU_REMOVED_RETRY;
+       int writeback_result;
+
+       /*
+        * As soon as we drop the LRU lock, the entry can be freed by
+        * a concurrent invalidation. This means the following:
+        *
+        * 1. We extract the swp_entry_t to the stack, allowing
+        *    zswap_writeback_entry() to pin the swap entry and
+        *    then validate the zwap entry against that swap entry's
+        *    tree using pointer value comparison. Only when that
+        *    is successful can the entry be dereferenced.
+        *
+        * 2. Usually, objects are taken off the LRU for reclaim. In
+        *    this case this isn't possible, because if reclaim fails
+        *    for whatever reason, we have no means of knowing if the
+        *    entry is alive to put it back on the LRU.
+        *
+        *    So rotate it before dropping the lock. If the entry is
+        *    written back or invalidated, the free path will unlink
+        *    it. For failures, rotation is the right thing as well.
+        *
+        *    Temporary failures, where the same entry should be tried
+        *    again immediately, almost never happen for this shrinker.
+        *    We don't do any trylocking; -ENOMEM comes closest,
+        *    but that's extremely rare and doesn't happen spuriously
+        *    either. Don't bother distinguishing this case.
+        */
+       list_move_tail(item, &l->list);
+
+       /*
+        * Once the lru lock is dropped, the entry might get freed. The
+        * swpentry is copied to the stack, and entry isn't deref'd again
+        * until the entry is verified to still be alive in the tree.
+        */
+       swpentry = entry->swpentry;
+
+       /*
+        * It's safe to drop the lock here because we return either
+        * LRU_REMOVED_RETRY or LRU_RETRY.
+        */
+       spin_unlock(lock);
+
+       writeback_result = zswap_writeback_entry(entry, swpentry);
+
+       if (writeback_result) {
+               zswap_reject_reclaim_fail++;
+               ret = LRU_RETRY;
+
+               /*
+                * Encountering a page already in swap cache is a sign that we are shrinking
+                * into the warmer region. We should terminate shrinking (if we're in the dynamic
+                * shrinker context).
+                */
+               if (writeback_result == -EEXIST && encountered_page_in_swapcache) {
+                       ret = LRU_STOP;
+                       *encountered_page_in_swapcache = true;
+               }
+       } else {
+               zswap_written_back_pages++;
+       }
+
+       spin_lock(lock);
+       return ret;
+}
 
-       if (!zswap_has_pool && !pool) {
-               /* if initial pool creation failed, and this pool creation also
-                * failed, maybe both compressor and zpool params were bad.
-                * Allow changing this param, so pool creation will succeed
-                * when the other param is changed. We already verified this
-                * param is ok in the zpool_has_pool() or crypto_has_acomp()
-                * checks above.
-                */
-               ret = param_set_charp(s, kp);
+static unsigned long zswap_shrinker_scan(struct shrinker *shrinker,
+               struct shrink_control *sc)
+{
+       struct lruvec *lruvec = mem_cgroup_lruvec(sc->memcg, NODE_DATA(sc->nid));
+       unsigned long shrink_ret, nr_protected, lru_size;
+       bool encountered_page_in_swapcache = false;
+
+       if (!zswap_shrinker_enabled ||
+                       !mem_cgroup_zswap_writeback_enabled(sc->memcg)) {
+               sc->nr_scanned = 0;
+               return SHRINK_STOP;
        }
 
-       /* drop the ref from either the old current pool,
-        * or the new pool we failed to add
+       nr_protected =
+               atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
+       lru_size = list_lru_shrink_count(&zswap_list_lru, sc);
+
+       /*
+        * Abort if we are shrinking into the protected region.
+        *
+        * This short-circuiting is necessary because if we have too many multiple
+        * concurrent reclaimers getting the freeable zswap object counts at the
+        * same time (before any of them made reasonable progress), the total
+        * number of reclaimed objects might be more than the number of unprotected
+        * objects (i.e the reclaimers will reclaim into the protected area of the
+        * zswap LRU).
         */
-       if (put_pool)
-               zswap_pool_put(put_pool);
+       if (nr_protected >= lru_size - sc->nr_to_scan) {
+               sc->nr_scanned = 0;
+               return SHRINK_STOP;
+       }
 
-       return ret;
-}
+       shrink_ret = list_lru_shrink_walk(&zswap_list_lru, sc, &shrink_memcg_cb,
+               &encountered_page_in_swapcache);
 
-static int zswap_compressor_param_set(const char *val,
-                                     const struct kernel_param *kp)
-{
-       return __zswap_param_set(val, kp, zswap_zpool_type, NULL);
-}
+       if (encountered_page_in_swapcache)
+               return SHRINK_STOP;
 
-static int zswap_zpool_param_set(const char *val,
-                                const struct kernel_param *kp)
-{
-       return __zswap_param_set(val, kp, NULL, zswap_compressor);
+       return shrink_ret ? shrink_ret : SHRINK_STOP;
 }
 
-static int zswap_enabled_param_set(const char *val,
-                                  const struct kernel_param *kp)
+static unsigned long zswap_shrinker_count(struct shrinker *shrinker,
+               struct shrink_control *sc)
 {
-       if (zswap_init_failed) {
-               pr_err("can't enable, initialization failed\n");
-               return -ENODEV;
-       }
-       if (!zswap_has_pool && zswap_init_started) {
-               pr_err("can't enable, no pool configured\n");
-               return -ENODEV;
-       }
+       struct mem_cgroup *memcg = sc->memcg;
+       struct lruvec *lruvec = mem_cgroup_lruvec(memcg, NODE_DATA(sc->nid));
+       unsigned long nr_backing, nr_stored, nr_freeable, nr_protected;
 
-       return param_set_bool(val, kp);
-}
+       if (!zswap_shrinker_enabled || !mem_cgroup_zswap_writeback_enabled(memcg))
+               return 0;
 
-/*********************************
-* writeback code
-**********************************/
-/* return enum for zswap_get_swap_cache_page */
-enum zswap_get_swap_ret {
-       ZSWAP_SWAPCACHE_NEW,
-       ZSWAP_SWAPCACHE_EXIST,
-       ZSWAP_SWAPCACHE_FAIL,
-};
+#ifdef CONFIG_MEMCG_KMEM
+       mem_cgroup_flush_stats(memcg);
+       nr_backing = memcg_page_state(memcg, MEMCG_ZSWAP_B) >> PAGE_SHIFT;
+       nr_stored = memcg_page_state(memcg, MEMCG_ZSWAPPED);
+#else
+       /* use pool stats instead of memcg stats */
+       nr_backing = zswap_pool_total_size >> PAGE_SHIFT;
+       nr_stored = atomic_read(&zswap_nr_stored);
+#endif
 
-/*
- * zswap_get_swap_cache_page
- *
- * This is an adaption of read_swap_cache_async()
- *
- * This function tries to find a page with the given swap entry
- * in the swapper_space address space (the swap cache).  If the page
- * is found, it is returned in retpage.  Otherwise, a page is allocated,
- * added to the swap cache, and returned in retpage.
- *
- * If success, the swap cache page is returned in retpage
- * Returns ZSWAP_SWAPCACHE_EXIST if page was already in the swap cache
- * Returns ZSWAP_SWAPCACHE_NEW if the new page needs to be populated,
- *     the new page is added to swapcache and locked
- * Returns ZSWAP_SWAPCACHE_FAIL on error
- */
-static int zswap_get_swap_cache_page(swp_entry_t entry,
-                               struct page **retpage)
-{
-       bool page_was_allocated;
-
-       *retpage = __read_swap_cache_async(entry, GFP_KERNEL,
-                       NULL, 0, &page_was_allocated);
-       if (page_was_allocated)
-               return ZSWAP_SWAPCACHE_NEW;
-       if (!*retpage)
-               return ZSWAP_SWAPCACHE_FAIL;
-       return ZSWAP_SWAPCACHE_EXIST;
+       if (!nr_stored)
+               return 0;
+
+       nr_protected =
+               atomic_long_read(&lruvec->zswap_lruvec_state.nr_zswap_protected);
+       nr_freeable = list_lru_shrink_count(&zswap_list_lru, sc);
+       /*
+        * Subtract the lru size by an estimate of the number of pages
+        * that should be protected.
+        */
+       nr_freeable = nr_freeable > nr_protected ? nr_freeable - nr_protected : 0;
+
+       /*
+        * Scale the number of freeable pages by the memory saving factor.
+        * This ensures that the better zswap compresses memory, the fewer
+        * pages we will evict to swap (as it will otherwise incur IO for
+        * relatively small memory saving).
+        */
+       return mult_frac(nr_freeable, nr_backing, nr_stored);
 }
 
-/*
- * Attempts to free an entry by adding a page to the swap cache,
- * decompressing the entry data into the page, and issuing a
- * bio write to write the page back to the swap device.
- *
- * This can be thought of as a "resumed writeback" of the page
- * to the swap device.  We are basically resuming the same swap
- * writeback path that was intercepted with the frontswap_store()
- * in the first place.  After the page has been decompressed into
- * the swap cache, the compressed version stored by zswap can be
- * freed.
- */
-static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
+static struct shrinker *zswap_alloc_shrinker(void)
 {
-       struct zswap_header *zhdr;
-       swp_entry_t swpentry;
-       struct zswap_tree *tree;
-       pgoff_t offset;
-       struct zswap_entry *entry;
-       struct page *page;
-       struct scatterlist input, output;
-       struct crypto_acomp_ctx *acomp_ctx;
+       struct shrinker *shrinker;
 
-       u8 *src, *tmp = NULL;
-       unsigned int dlen;
-       int ret;
-       struct writeback_control wbc = {
-               .sync_mode = WB_SYNC_NONE,
-       };
+       shrinker =
+               shrinker_alloc(SHRINKER_NUMA_AWARE | SHRINKER_MEMCG_AWARE, "mm-zswap");
+       if (!shrinker)
+               return NULL;
 
-       if (!zpool_can_sleep_mapped(pool)) {
-               tmp = kmalloc(PAGE_SIZE, GFP_ATOMIC);
-               if (!tmp)
-                       return -ENOMEM;
-       }
+       shrinker->scan_objects = zswap_shrinker_scan;
+       shrinker->count_objects = zswap_shrinker_count;
+       shrinker->batch = 0;
+       shrinker->seeks = DEFAULT_SEEKS;
+       return shrinker;
+}
 
-       /* extract swpentry from data */
-       zhdr = zpool_map_handle(pool, handle, ZPOOL_MM_RO);
-       swpentry = zhdr->swpentry; /* here */
-       tree = zswap_trees[swp_type(swpentry)];
-       offset = swp_offset(swpentry);
+static int shrink_memcg(struct mem_cgroup *memcg)
+{
+       int nid, shrunk = 0;
 
-       /* find and ref zswap entry */
-       spin_lock(&tree->lock);
-       entry = zswap_entry_find_get(&tree->rbroot, offset);
-       if (!entry) {
-               /* entry was invalidated */
-               spin_unlock(&tree->lock);
-               zpool_unmap_handle(pool, handle);
-               kfree(tmp);
-               return 0;
-       }
-       spin_unlock(&tree->lock);
-       BUG_ON(offset != entry->offset);
+       if (!mem_cgroup_zswap_writeback_enabled(memcg))
+               return -EINVAL;
 
-       src = (u8 *)zhdr + sizeof(struct zswap_header);
-       if (!zpool_can_sleep_mapped(pool)) {
-               memcpy(tmp, src, entry->length);
-               src = tmp;
-               zpool_unmap_handle(pool, handle);
-       }
+       /*
+        * Skip zombies because their LRUs are reparented and we would be
+        * reclaiming from the parent instead of the dead memcg.
+        */
+       if (memcg && !mem_cgroup_online(memcg))
+               return -ENOENT;
 
-       /* try to allocate swap cache page */
-       switch (zswap_get_swap_cache_page(swpentry, &page)) {
-       case ZSWAP_SWAPCACHE_FAIL: /* no memory or invalidate happened */
-               ret = -ENOMEM;
-               goto fail;
-
-       case ZSWAP_SWAPCACHE_EXIST:
-               /* page is already in the swap cache, ignore for now */
-               put_page(page);
-               ret = -EEXIST;
-               goto fail;
-
-       case ZSWAP_SWAPCACHE_NEW: /* page is locked */
-               /* decompress */
-               acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
-               dlen = PAGE_SIZE;
-
-               mutex_lock(acomp_ctx->mutex);
-               sg_init_one(&input, src, entry->length);
-               sg_init_table(&output, 1);
-               sg_set_page(&output, page, PAGE_SIZE, 0);
-               acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
-               ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
-               dlen = acomp_ctx->req->dlen;
-               mutex_unlock(acomp_ctx->mutex);
-
-               BUG_ON(ret);
-               BUG_ON(dlen != PAGE_SIZE);
-
-               /* page is up to date */
-               SetPageUptodate(page);
+       for_each_node_state(nid, N_NORMAL_MEMORY) {
+               unsigned long nr_to_walk = 1;
+
+               shrunk += list_lru_walk_one(&zswap_list_lru, nid, memcg,
+                                           &shrink_memcg_cb, NULL, &nr_to_walk);
        }
+       return shrunk ? 0 : -EAGAIN;
+}
 
-       /* move it to the tail of the inactive list after end_writeback */
-       SetPageReclaim(page);
+static void shrink_worker(struct work_struct *w)
+{
+       struct mem_cgroup *memcg;
+       int ret, failures = 0;
 
-       /* start writeback */
-       __swap_writepage(page, &wbc, end_swap_bio_write);
-       put_page(page);
-       zswap_written_back_pages++;
+       /* global reclaim will select cgroup in a round-robin fashion. */
+       do {
+               spin_lock(&zswap_shrink_lock);
+               zswap_next_shrink = mem_cgroup_iter(NULL, zswap_next_shrink, NULL);
+               memcg = zswap_next_shrink;
+
+               /*
+                * We need to retry if we have gone through a full round trip, or if we
+                * got an offline memcg (or else we risk undoing the effect of the
+                * zswap memcg offlining cleanup callback). This is not catastrophic
+                * per se, but it will keep the now offlined memcg hostage for a while.
+                *
+                * Note that if we got an online memcg, we will keep the extra
+                * reference in case the original reference obtained by mem_cgroup_iter
+                * is dropped by the zswap memcg offlining callback, ensuring that the
+                * memcg is not killed when we are reclaiming.
+                */
+               if (!memcg) {
+                       spin_unlock(&zswap_shrink_lock);
+                       if (++failures == MAX_RECLAIM_RETRIES)
+                               break;
 
-       spin_lock(&tree->lock);
-       /* drop local reference */
-       zswap_entry_put(tree, entry);
+                       goto resched;
+               }
 
-       /*
-       * There are two possible situations for entry here:
-       * (1) refcount is 1(normal case),  entry is valid and on the tree
-       * (2) refcount is 0, entry is freed and not on the tree
-       *     because invalidate happened during writeback
-       *  search the tree and free the entry if find entry
-       */
-       if (entry == zswap_rb_search(&tree->rbroot, offset))
-               zswap_entry_put(tree, entry);
-       spin_unlock(&tree->lock);
+               if (!mem_cgroup_tryget_online(memcg)) {
+                       /* drop the reference from mem_cgroup_iter() */
+                       mem_cgroup_iter_break(NULL, memcg);
+                       zswap_next_shrink = NULL;
+                       spin_unlock(&zswap_shrink_lock);
 
-       goto end;
+                       if (++failures == MAX_RECLAIM_RETRIES)
+                               break;
 
-       /*
-       * if we get here due to ZSWAP_SWAPCACHE_EXIST
-       * a load may be happening concurrently.
-       * it is safe and okay to not free the entry.
-       * if we free the entry in the following put
-       * it is also okay to return !0
-       */
-fail:
-       spin_lock(&tree->lock);
-       zswap_entry_put(tree, entry);
-       spin_unlock(&tree->lock);
+                       goto resched;
+               }
+               spin_unlock(&zswap_shrink_lock);
 
-end:
-       if (zpool_can_sleep_mapped(pool))
-               zpool_unmap_handle(pool, handle);
-       else
-               kfree(tmp);
+               ret = shrink_memcg(memcg);
+               /* drop the extra reference */
+               mem_cgroup_put(memcg);
 
-       return ret;
+               if (ret == -EINVAL)
+                       break;
+               if (ret && ++failures == MAX_RECLAIM_RETRIES)
+                       break;
+
+resched:
+               cond_resched();
+       } while (!zswap_can_accept());
 }
 
 static int zswap_is_page_same_filled(void *ptr, unsigned long *value)
 {
-       unsigned int pos;
        unsigned long *page;
+       unsigned long val;
+       unsigned int pos, last_pos = PAGE_SIZE / sizeof(*page) - 1;
 
        page = (unsigned long *)ptr;
-       for (pos = 1; pos < PAGE_SIZE / sizeof(*page); pos++) {
-               if (page[pos] != page[0])
+       val = page[0];
+
+       if (val != page[last_pos])
+               return 0;
+
+       for (pos = 1; pos < last_pos; pos++) {
+               if (val != page[pos])
                        return 0;
        }
-       *value = page[0];
+
+       *value = val;
+
        return 1;
 }
 
@@ -1090,41 +1470,34 @@ static void zswap_fill_page(void *ptr, unsigned long value)
        memset_l(page, value, PAGE_SIZE / sizeof(unsigned long));
 }
 
-/*********************************
-* frontswap hooks
-**********************************/
-/* attempts to compress and store an single page */
-static int zswap_frontswap_store(unsigned type, pgoff_t offset,
-                               struct page *page)
+bool zswap_store(struct folio *folio)
 {
-       struct zswap_tree *tree = zswap_trees[type];
+       swp_entry_t swp = folio->swap;
+       pgoff_t offset = swp_offset(swp);
+       struct zswap_tree *tree = swap_zswap_tree(swp);
        struct zswap_entry *entry, *dupentry;
-       struct scatterlist input, output;
-       struct crypto_acomp_ctx *acomp_ctx;
        struct obj_cgroup *objcg = NULL;
-       struct zswap_pool *pool;
-       int ret;
-       unsigned int hlen, dlen = PAGE_SIZE;
-       unsigned long handle, value;
-       char *buf;
-       u8 *src, *dst;
-       struct zswap_header zhdr = { .swpentry = swp_entry(type, offset) };
-       gfp_t gfp;
+       struct mem_cgroup *memcg = NULL;
 
-       /* THP isn't supported */
-       if (PageTransHuge(page)) {
-               ret = -EINVAL;
-               goto reject;
-       }
+       VM_WARN_ON_ONCE(!folio_test_locked(folio));
+       VM_WARN_ON_ONCE(!folio_test_swapcache(folio));
 
-       if (!zswap_enabled || !tree) {
-               ret = -ENODEV;
-               goto reject;
-       }
+       /* Large folios aren't supported */
+       if (folio_test_large(folio))
+               return false;
 
-       objcg = get_obj_cgroup_from_page(page);
-       if (objcg && !obj_cgroup_may_zswap(objcg))
-               goto shrink;
+       if (!zswap_enabled)
+               goto check_old;
+
+       objcg = get_obj_cgroup_from_folio(folio);
+       if (objcg && !obj_cgroup_may_zswap(objcg)) {
+               memcg = get_mem_cgroup_from_objcg(objcg);
+               if (shrink_memcg(memcg)) {
+                       mem_cgroup_put(memcg);
+                       goto reject;
+               }
+               mem_cgroup_put(memcg);
+       }
 
        /* reclaim space if needed */
        if (zswap_is_full()) {
@@ -1134,104 +1507,56 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
        }
 
        if (zswap_pool_reached_full) {
-              if (!zswap_can_accept()) {
-                       ret = -ENOMEM;
-                       goto reject;
-               } else
+              if (!zswap_can_accept())
+                       goto shrink;
+               else
                        zswap_pool_reached_full = false;
        }
 
        /* allocate entry */
-       entry = zswap_entry_cache_alloc(GFP_KERNEL);
+       entry = zswap_entry_cache_alloc(GFP_KERNEL, folio_nid(folio));
        if (!entry) {
                zswap_reject_kmemcache_fail++;
-               ret = -ENOMEM;
                goto reject;
        }
 
        if (zswap_same_filled_pages_enabled) {
-               src = kmap_atomic(page);
+               unsigned long value;
+               u8 *src;
+
+               src = kmap_local_folio(folio, 0);
                if (zswap_is_page_same_filled(src, &value)) {
-                       kunmap_atomic(src);
-                       entry->offset = offset;
+                       kunmap_local(src);
                        entry->length = 0;
                        entry->value = value;
                        atomic_inc(&zswap_same_filled_pages);
                        goto insert_entry;
                }
-               kunmap_atomic(src);
+               kunmap_local(src);
        }
 
-       if (!zswap_non_same_filled_pages_enabled) {
-               ret = -EINVAL;
+       if (!zswap_non_same_filled_pages_enabled)
                goto freepage;
-       }
 
        /* if entry is successfully added, it keeps the reference */
        entry->pool = zswap_pool_current_get();
-       if (!entry->pool) {
-               ret = -EINVAL;
+       if (!entry->pool)
                goto freepage;
-       }
-
-       /* compress */
-       acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
-
-       mutex_lock(acomp_ctx->mutex);
-
-       dst = acomp_ctx->dstmem;
-       sg_init_table(&input, 1);
-       sg_set_page(&input, page, PAGE_SIZE, 0);
-
-       /* zswap_dstmem is of size (PAGE_SIZE * 2). Reflect same in sg_list */
-       sg_init_one(&output, dst, PAGE_SIZE * 2);
-       acomp_request_set_params(acomp_ctx->req, &input, &output, PAGE_SIZE, dlen);
-       /*
-        * it maybe looks a little bit silly that we send an asynchronous request,
-        * then wait for its completion synchronously. This makes the process look
-        * synchronous in fact.
-        * Theoretically, acomp supports users send multiple acomp requests in one
-        * acomp instance, then get those requests done simultaneously. but in this
-        * case, frontswap actually does store and load page by page, there is no
-        * existing method to send the second page before the first page is done
-        * in one thread doing frontswap.
-        * but in different threads running on different cpu, we have different
-        * acomp instance, so multiple threads can do (de)compression in parallel.
-        */
-       ret = crypto_wait_req(crypto_acomp_compress(acomp_ctx->req), &acomp_ctx->wait);
-       dlen = acomp_ctx->req->dlen;
 
-       if (ret) {
-               ret = -EINVAL;
-               goto put_dstmem;
+       if (objcg) {
+               memcg = get_mem_cgroup_from_objcg(objcg);
+               if (memcg_list_lru_alloc(memcg, &zswap_list_lru, GFP_KERNEL)) {
+                       mem_cgroup_put(memcg);
+                       goto put_pool;
+               }
+               mem_cgroup_put(memcg);
        }
 
-       /* store */
-       hlen = zpool_evictable(entry->pool->zpool) ? sizeof(zhdr) : 0;
-       gfp = __GFP_NORETRY | __GFP_NOWARN | __GFP_KSWAPD_RECLAIM;
-       if (zpool_malloc_support_movable(entry->pool->zpool))
-               gfp |= __GFP_HIGHMEM | __GFP_MOVABLE;
-       ret = zpool_malloc(entry->pool->zpool, hlen + dlen, gfp, &handle);
-       if (ret == -ENOSPC) {
-               zswap_reject_compress_poor++;
-               goto put_dstmem;
-       }
-       if (ret) {
-               zswap_reject_alloc_fail++;
-               goto put_dstmem;
-       }
-       buf = zpool_map_handle(entry->pool->zpool, handle, ZPOOL_MM_WO);
-       memcpy(buf, &zhdr, hlen);
-       memcpy(buf + hlen, dst, dlen);
-       zpool_unmap_handle(entry->pool->zpool, handle);
-       mutex_unlock(acomp_ctx->mutex);
-
-       /* populate entry */
-       entry->offset = offset;
-       entry->handle = handle;
-       entry->length = dlen;
+       if (!zswap_compress(folio, entry))
+               goto put_pool;
 
 insert_entry:
+       entry->swpentry = swp;
        entry->objcg = objcg;
        if (objcg) {
                obj_cgroup_charge_zswap(objcg, entry->length);
@@ -1241,15 +1566,19 @@ insert_entry:
 
        /* map */
        spin_lock(&tree->lock);
-       do {
-               ret = zswap_rb_insert(&tree->rbroot, entry, &dupentry);
-               if (ret == -EEXIST) {
-                       zswap_duplicate_entry++;
-                       /* remove from rbtree */
-                       zswap_rb_erase(&tree->rbroot, dupentry);
-                       zswap_entry_put(tree, dupentry);
-               }
-       } while (ret == -EEXIST);
+       /*
+        * The folio may have been dirtied again, invalidate the
+        * possibly stale entry before inserting the new entry.
+        */
+       if (zswap_rb_insert(&tree->rbroot, entry, &dupentry) == -EEXIST) {
+               zswap_invalidate_entry(tree, dupentry);
+               WARN_ON(zswap_rb_insert(&tree->rbroot, entry, &dupentry));
+       }
+       if (entry->length) {
+               INIT_LIST_HEAD(&entry->lru);
+               zswap_lru_add(&zswap_list_lru, entry);
+               atomic_inc(&zswap_nr_stored);
+       }
        spin_unlock(&tree->lock);
 
        /* update stats */
@@ -1257,171 +1586,124 @@ insert_entry:
        zswap_update_total_size();
        count_vm_event(ZSWPOUT);
 
-       return 0;
+       return true;
 
-put_dstmem:
-       mutex_unlock(acomp_ctx->mutex);
+put_pool:
        zswap_pool_put(entry->pool);
 freepage:
        zswap_entry_cache_free(entry);
 reject:
        if (objcg)
                obj_cgroup_put(objcg);
-       return ret;
+check_old:
+       /*
+        * If the zswap store fails or zswap is disabled, we must invalidate the
+        * possibly stale entry which was previously stored at this offset.
+        * Otherwise, writeback could overwrite the new data in the swapfile.
+        */
+       spin_lock(&tree->lock);
+       entry = zswap_rb_search(&tree->rbroot, offset);
+       if (entry)
+               zswap_invalidate_entry(tree, entry);
+       spin_unlock(&tree->lock);
+       return false;
 
 shrink:
-       pool = zswap_pool_last_get();
-       if (pool)
-               queue_work(shrink_wq, &pool->shrink_work);
-       ret = -ENOMEM;
+       queue_work(shrink_wq, &zswap_shrink_work);
        goto reject;
 }
 
-/*
- * returns 0 if the page was successfully decompressed
- * return -1 on entry not found or error
-*/
-static int zswap_frontswap_load(unsigned type, pgoff_t offset,
-                               struct page *page)
+bool zswap_load(struct folio *folio)
 {
-       struct zswap_tree *tree = zswap_trees[type];
+       swp_entry_t swp = folio->swap;
+       pgoff_t offset = swp_offset(swp);
+       struct page *page = &folio->page;
+       struct zswap_tree *tree = swap_zswap_tree(swp);
        struct zswap_entry *entry;
-       struct scatterlist input, output;
-       struct crypto_acomp_ctx *acomp_ctx;
-       u8 *src, *dst, *tmp;
-       unsigned int dlen;
-       int ret;
+       u8 *dst;
+
+       VM_WARN_ON_ONCE(!folio_test_locked(folio));
 
-       /* find */
        spin_lock(&tree->lock);
-       entry = zswap_entry_find_get(&tree->rbroot, offset);
+       entry = zswap_rb_search(&tree->rbroot, offset);
        if (!entry) {
-               /* entry was written back */
                spin_unlock(&tree->lock);
-               return -1;
+               return false;
        }
+       zswap_rb_erase(&tree->rbroot, entry);
        spin_unlock(&tree->lock);
 
-       if (!entry->length) {
-               dst = kmap_atomic(page);
+       if (entry->length)
+               zswap_decompress(entry, page);
+       else {
+               dst = kmap_local_page(page);
                zswap_fill_page(dst, entry->value);
-               kunmap_atomic(dst);
-               ret = 0;
-               goto stats;
-       }
-
-       if (!zpool_can_sleep_mapped(entry->pool->zpool)) {
-               tmp = kmalloc(entry->length, GFP_ATOMIC);
-               if (!tmp) {
-                       ret = -ENOMEM;
-                       goto freeentry;
-               }
-       }
-
-       /* decompress */
-       dlen = PAGE_SIZE;
-       src = zpool_map_handle(entry->pool->zpool, entry->handle, ZPOOL_MM_RO);
-       if (zpool_evictable(entry->pool->zpool))
-               src += sizeof(struct zswap_header);
-
-       if (!zpool_can_sleep_mapped(entry->pool->zpool)) {
-               memcpy(tmp, src, entry->length);
-               src = tmp;
-               zpool_unmap_handle(entry->pool->zpool, entry->handle);
+               kunmap_local(dst);
        }
 
-       acomp_ctx = raw_cpu_ptr(entry->pool->acomp_ctx);
-       mutex_lock(acomp_ctx->mutex);
-       sg_init_one(&input, src, entry->length);
-       sg_init_table(&output, 1);
-       sg_set_page(&output, page, PAGE_SIZE, 0);
-       acomp_request_set_params(acomp_ctx->req, &input, &output, entry->length, dlen);
-       ret = crypto_wait_req(crypto_acomp_decompress(acomp_ctx->req), &acomp_ctx->wait);
-       mutex_unlock(acomp_ctx->mutex);
-
-       if (zpool_can_sleep_mapped(entry->pool->zpool))
-               zpool_unmap_handle(entry->pool->zpool, entry->handle);
-       else
-               kfree(tmp);
-
-       BUG_ON(ret);
-stats:
        count_vm_event(ZSWPIN);
        if (entry->objcg)
                count_objcg_event(entry->objcg, ZSWPIN);
-freeentry:
-       spin_lock(&tree->lock);
-       zswap_entry_put(tree, entry);
-       spin_unlock(&tree->lock);
 
-       return ret;
+       zswap_entry_free(entry);
+
+       folio_mark_dirty(folio);
+
+       return true;
 }
 
-/* frees an entry in zswap */
-static void zswap_frontswap_invalidate_page(unsigned type, pgoff_t offset)
+void zswap_invalidate(swp_entry_t swp)
 {
-       struct zswap_tree *tree = zswap_trees[type];
+       pgoff_t offset = swp_offset(swp);
+       struct zswap_tree *tree = swap_zswap_tree(swp);
        struct zswap_entry *entry;
 
-       /* find */
        spin_lock(&tree->lock);
        entry = zswap_rb_search(&tree->rbroot, offset);
-       if (!entry) {
-               /* entry was written back */
-               spin_unlock(&tree->lock);
-               return;
-       }
-
-       /* remove from rbtree */
-       zswap_rb_erase(&tree->rbroot, entry);
-
-       /* drop the initial reference from entry creation */
-       zswap_entry_put(tree, entry);
-
+       if (entry)
+               zswap_invalidate_entry(tree, entry);
        spin_unlock(&tree->lock);
 }
 
-/* frees all zswap entries for the given swap type */
-static void zswap_frontswap_invalidate_area(unsigned type)
+int zswap_swapon(int type, unsigned long nr_pages)
 {
-       struct zswap_tree *tree = zswap_trees[type];
-       struct zswap_entry *entry, *n;
+       struct zswap_tree *trees, *tree;
+       unsigned int nr, i;
 
-       if (!tree)
-               return;
+       nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
+       trees = kvcalloc(nr, sizeof(*tree), GFP_KERNEL);
+       if (!trees) {
+               pr_err("alloc failed, zswap disabled for swap type %d\n", type);
+               return -ENOMEM;
+       }
 
-       /* walk the tree and free everything */
-       spin_lock(&tree->lock);
-       rbtree_postorder_for_each_entry_safe(entry, n, &tree->rbroot, rbnode)
-               zswap_free_entry(entry);
-       tree->rbroot = RB_ROOT;
-       spin_unlock(&tree->lock);
-       kfree(tree);
-       zswap_trees[type] = NULL;
+       for (i = 0; i < nr; i++) {
+               tree = trees + i;
+               tree->rbroot = RB_ROOT;
+               spin_lock_init(&tree->lock);
+       }
+
+       nr_zswap_trees[type] = nr;
+       zswap_trees[type] = trees;
+       return 0;
 }
 
-static void zswap_frontswap_init(unsigned type)
+void zswap_swapoff(int type)
 {
-       struct zswap_tree *tree;
+       struct zswap_tree *trees = zswap_trees[type];
+       unsigned int i;
 
-       tree = kzalloc(sizeof(*tree), GFP_KERNEL);
-       if (!tree) {
-               pr_err("alloc failed, zswap disabled for swap type %d\n", type);
+       if (!trees)
                return;
-       }
 
-       tree->rbroot = RB_ROOT;
-       spin_lock_init(&tree->lock);
-       zswap_trees[type] = tree;
-}
+       /* try_to_unuse() invalidated all the entries already */
+       for (i = 0; i < nr_zswap_trees[type]; i++)
+               WARN_ON_ONCE(!RB_EMPTY_ROOT(&trees[i].rbroot));
 
-static const struct frontswap_ops zswap_frontswap_ops = {
-       .store = zswap_frontswap_store,
-       .load = zswap_frontswap_load,
-       .invalidate_page = zswap_frontswap_invalidate_page,
-       .invalidate_area = zswap_frontswap_invalidate_area,
-       .init = zswap_frontswap_init
-};
+       kvfree(trees);
+       nr_zswap_trees[type] = 0;
+       zswap_trees[type] = NULL;
+}
 
 /*********************************
 * debugfs functions
@@ -1431,7 +1713,7 @@ static const struct frontswap_ops zswap_frontswap_ops = {
 
 static struct dentry *zswap_debugfs_root;
 
-static int __init zswap_debugfs_init(void)
+static int zswap_debugfs_init(void)
 {
        if (!debugfs_initialized())
                return -ENODEV;
@@ -1446,12 +1728,12 @@ static int __init zswap_debugfs_init(void)
                           zswap_debugfs_root, &zswap_reject_alloc_fail);
        debugfs_create_u64("reject_kmemcache_fail", 0444,
                           zswap_debugfs_root, &zswap_reject_kmemcache_fail);
+       debugfs_create_u64("reject_compress_fail", 0444,
+                          zswap_debugfs_root, &zswap_reject_compress_fail);
        debugfs_create_u64("reject_compress_poor", 0444,
                           zswap_debugfs_root, &zswap_reject_compress_poor);
        debugfs_create_u64("written_back_pages", 0444,
                           zswap_debugfs_root, &zswap_written_back_pages);
-       debugfs_create_u64("duplicate_entry", 0444,
-                          zswap_debugfs_root, &zswap_duplicate_entry);
        debugfs_create_u64("pool_total_size", 0444,
                           zswap_debugfs_root, &zswap_pool_total_size);
        debugfs_create_atomic_t("stored_pages", 0444,
@@ -1462,7 +1744,7 @@ static int __init zswap_debugfs_init(void)
        return 0;
 }
 #else
-static int __init zswap_debugfs_init(void)
+static int zswap_debugfs_init(void)
 {
        return 0;
 }
@@ -1471,25 +1753,17 @@ static int __init zswap_debugfs_init(void)
 /*********************************
 * module init and exit
 **********************************/
-static int __init init_zswap(void)
+static int zswap_setup(void)
 {
        struct zswap_pool *pool;
        int ret;
 
-       zswap_init_started = true;
-
-       if (zswap_entry_cache_create()) {
+       zswap_entry_cache = KMEM_CACHE(zswap_entry, 0);
+       if (!zswap_entry_cache) {
                pr_err("entry cache creation failed\n");
                goto cache_fail;
        }
 
-       ret = cpuhp_setup_state(CPUHP_MM_ZSWP_MEM_PREPARE, "mm/zswap:prepare",
-                               zswap_dstmem_prepare, zswap_dstmem_dead);
-       if (ret) {
-               pr_err("dstmem alloc failed\n");
-               goto dstmem_fail;
-       }
-
        ret = cpuhp_setup_state_multi(CPUHP_MM_ZSWP_POOL_PREPARE,
                                      "mm/zswap_pool:prepare",
                                      zswap_cpu_comp_prepare,
@@ -1497,10 +1771,24 @@ static int __init init_zswap(void)
        if (ret)
                goto hp_fail;
 
+       shrink_wq = alloc_workqueue("zswap-shrink",
+                       WQ_UNBOUND|WQ_MEM_RECLAIM, 1);
+       if (!shrink_wq)
+               goto shrink_wq_fail;
+
+       zswap_shrinker = zswap_alloc_shrinker();
+       if (!zswap_shrinker)
+               goto shrinker_fail;
+       if (list_lru_init_memcg(&zswap_list_lru, zswap_shrinker))
+               goto lru_fail;
+       shrinker_register(zswap_shrinker);
+
+       INIT_WORK(&zswap_shrink_work, shrink_worker);
+
        pool = __zswap_pool_create_fallback();
        if (pool) {
                pr_info("loaded using pool %s/%s\n", pool->tfm_name,
-                       zpool_get_type(pool->zpool));
+                       zpool_get_type(pool->zpools[0]));
                list_add(&pool->list, &zswap_pools);
                zswap_has_pool = true;
        } else {
@@ -1508,35 +1796,34 @@ static int __init init_zswap(void)
                zswap_enabled = false;
        }
 
-       shrink_wq = create_workqueue("zswap-shrink");
-       if (!shrink_wq)
-               goto fallback_fail;
-
-       ret = frontswap_register_ops(&zswap_frontswap_ops);
-       if (ret)
-               goto destroy_wq;
        if (zswap_debugfs_init())
                pr_warn("debugfs initialization failed\n");
+       zswap_init_state = ZSWAP_INIT_SUCCEED;
        return 0;
 
-destroy_wq:
+lru_fail:
+       shrinker_free(zswap_shrinker);
+shrinker_fail:
        destroy_workqueue(shrink_wq);
-fallback_fail:
-       if (pool)
-               zswap_pool_destroy(pool);
+shrink_wq_fail:
+       cpuhp_remove_multi_state(CPUHP_MM_ZSWP_POOL_PREPARE);
 hp_fail:
-       cpuhp_remove_state(CPUHP_MM_ZSWP_MEM_PREPARE);
-dstmem_fail:
-       zswap_entry_cache_destroy();
+       kmem_cache_destroy(zswap_entry_cache);
 cache_fail:
        /* if built-in, we aren't unloaded on failure; don't allow use */
-       zswap_init_failed = true;
+       zswap_init_state = ZSWAP_INIT_FAILED;
        zswap_enabled = false;
        return -ENOMEM;
 }
+
+static int __init zswap_init(void)
+{
+       if (!zswap_enabled)
+               return 0;
+       return zswap_setup();
+}
 /* must be late so crypto has time to come up */
-late_initcall(init_zswap);
+late_initcall(zswap_init);
 
-MODULE_LICENSE("GPL");
 MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
 MODULE_DESCRIPTION("Compressed cache for swap pages");