mm/list_lru: simplify the list_lru walk callback function
authorKairui Song <kasong@tencent.com>
Mon, 4 Nov 2024 17:52:57 +0000 (01:52 +0800)
committerAndrew Morton <akpm@linux-foundation.org>
Tue, 12 Nov 2024 01:22:26 +0000 (17:22 -0800)
Now isolation no longer takes the list_lru global node lock, only use the
per-cgroup lock instead.  And this lock is inside the list_lru_one being
walked, no longer needed to pass the lock explicitly.

Link: https://lkml.kernel.org/r/20241104175257.60853-7-ryncsn@gmail.com
Signed-off-by: Kairui Song <kasong@tencent.com>
Cc: Chengming Zhou <zhouchengming@bytedance.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Qi Zheng <zhengqi.arch@bytedance.com>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Shakeel Butt <shakeel.butt@linux.dev>
Cc: Waiman Long <longman@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
13 files changed:
drivers/android/binder_alloc.c
drivers/android/binder_alloc.h
fs/dcache.c
fs/gfs2/quota.c
fs/inode.c
fs/nfs/nfs42xattr.c
fs/nfsd/filecache.c
fs/xfs/xfs_buf.c
fs/xfs/xfs_qm.c
include/linux/list_lru.h
mm/list_lru.c
mm/workingset.c
mm/zswap.c

index 86bbe40..a738e77 100644 (file)
@@ -1047,7 +1047,7 @@ void binder_alloc_vma_close(struct binder_alloc *alloc)
 /**
  * binder_alloc_free_page() - shrinker callback to free pages
  * @item:   item to free
- * @lock:   lock protecting the item
+ * @lru:    list_lru instance of the item
  * @cb_arg: callback argument
  *
  * Called from list_lru_walk() in binder_shrink_scan() to free
@@ -1055,9 +1055,8 @@ void binder_alloc_vma_close(struct binder_alloc *alloc)
  */
 enum lru_status binder_alloc_free_page(struct list_head *item,
                                       struct list_lru_one *lru,
-                                      spinlock_t *lock,
                                       void *cb_arg)
-       __must_hold(lock)
+       __must_hold(&lru->lock)
 {
        struct binder_lru_page *page = container_of(item, typeof(*page), lru);
        struct binder_alloc *alloc = page->alloc;
@@ -1092,7 +1091,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
 
        list_lru_isolate(lru, item);
        spin_unlock(&alloc->lock);
-       spin_unlock(lock);
+       spin_unlock(&lru->lock);
 
        if (vma) {
                trace_binder_unmap_user_start(alloc, index);
index 7038723..c02c8eb 100644 (file)
@@ -118,7 +118,7 @@ static inline void binder_selftest_alloc(struct binder_alloc *alloc) {}
 #endif
 enum lru_status binder_alloc_free_page(struct list_head *item,
                                       struct list_lru_one *lru,
-                                      spinlock_t *lock, void *cb_arg);
+                                      void *cb_arg);
 struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc,
                                           size_t data_size,
                                           size_t offsets_size,
index 0f6b16b..d7f6866 100644 (file)
@@ -1089,7 +1089,7 @@ void shrink_dentry_list(struct list_head *list)
 }
 
 static enum lru_status dentry_lru_isolate(struct list_head *item,
-               struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+               struct list_lru_one *lru, void *arg)
 {
        struct list_head *freeable = arg;
        struct dentry   *dentry = container_of(item, struct dentry, d_lru);
@@ -1170,7 +1170,7 @@ long prune_dcache_sb(struct super_block *sb, struct shrink_control *sc)
 }
 
 static enum lru_status dentry_lru_isolate_shrink(struct list_head *item,
-               struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+               struct list_lru_one *lru, void *arg)
 {
        struct list_head *freeable = arg;
        struct dentry   *dentry = container_of(item, struct dentry, d_lru);
index 2e6bc77..72b48f6 100644 (file)
@@ -149,7 +149,7 @@ static void gfs2_qd_list_dispose(struct list_head *list)
 
 
 static enum lru_status gfs2_qd_isolate(struct list_head *item,
-               struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+               struct list_lru_one *lru, void *arg)
 {
        struct list_head *dispose = arg;
        struct gfs2_quota_data *qd =
index 442cb4f..46fbd5b 100644 (file)
@@ -881,7 +881,7 @@ again:
  * with this flag set because they are the inodes that are out of order.
  */
 static enum lru_status inode_lru_isolate(struct list_head *item,
-               struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+               struct list_lru_one *lru, void *arg)
 {
        struct list_head *freeable = arg;
        struct inode    *inode = container_of(item, struct inode, i_lru);
@@ -923,7 +923,7 @@ static enum lru_status inode_lru_isolate(struct list_head *item,
        if (inode_has_buffers(inode) || !mapping_empty(&inode->i_data)) {
                inode_pin_lru_isolating(inode);
                spin_unlock(&inode->i_lock);
-               spin_unlock(lru_lock);
+               spin_unlock(&lru->lock);
                if (remove_inode_buffers(inode)) {
                        unsigned long reap;
                        reap = invalidate_mapping_pages(&inode->i_data, 0, -1);
index b6e3d8f..37d7940 100644 (file)
@@ -802,7 +802,7 @@ static struct shrinker *nfs4_xattr_large_entry_shrinker;
 
 static enum lru_status
 cache_lru_isolate(struct list_head *item,
-       struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+       struct list_lru_one *lru, void *arg)
 {
        struct list_head *dispose = arg;
        struct inode *inode;
@@ -867,7 +867,7 @@ nfs4_xattr_cache_count(struct shrinker *shrink, struct shrink_control *sc)
 
 static enum lru_status
 entry_lru_isolate(struct list_head *item,
-       struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
+       struct list_lru_one *lru, void *arg)
 {
        struct list_head *dispose = arg;
        struct nfs4_xattr_bucket *bucket;
index 2e6783f..09c444e 100644 (file)
@@ -487,7 +487,6 @@ void nfsd_file_net_dispose(struct nfsd_net *nn)
  * nfsd_file_lru_cb - Examine an entry on the LRU list
  * @item: LRU entry to examine
  * @lru: controlling LRU
- * @lock: LRU list lock (unused)
  * @arg: dispose list
  *
  * Return values:
@@ -497,9 +496,7 @@ void nfsd_file_net_dispose(struct nfsd_net *nn)
  */
 static enum lru_status
 nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
-                spinlock_t *lock, void *arg)
-       __releases(lock)
-       __acquires(lock)
+                void *arg)
 {
        struct list_head *head = arg;
        struct nfsd_file *nf = list_entry(item, struct nfsd_file, nf_lru);
index aa4dbda..43b914c 100644 (file)
@@ -1857,7 +1857,6 @@ static enum lru_status
 xfs_buftarg_drain_rele(
        struct list_head        *item,
        struct list_lru_one     *lru,
-       spinlock_t              *lru_lock,
        void                    *arg)
 
 {
@@ -1956,7 +1955,6 @@ static enum lru_status
 xfs_buftarg_isolate(
        struct list_head        *item,
        struct list_lru_one     *lru,
-       spinlock_t              *lru_lock,
        void                    *arg)
 {
        struct xfs_buf          *bp = container_of(item, struct xfs_buf, b_lru);
index 665d269..8413ac3 100644 (file)
@@ -412,9 +412,8 @@ static enum lru_status
 xfs_qm_dquot_isolate(
        struct list_head        *item,
        struct list_lru_one     *lru,
-       spinlock_t              *lru_lock,
        void                    *arg)
-               __releases(lru_lock) __acquires(lru_lock)
+               __releases(&lru->lock) __acquires(&lru->lock)
 {
        struct xfs_dquot        *dqp = container_of(item,
                                                struct xfs_dquot, q_lru);
@@ -460,7 +459,7 @@ xfs_qm_dquot_isolate(
                trace_xfs_dqreclaim_dirty(dqp);
 
                /* we have to drop the LRU lock to flush the dquot */
-               spin_unlock(lru_lock);
+               spin_unlock(&lru->lock);
 
                error = xfs_qm_dqflush(dqp, &bp);
                if (error)
index 10ba9a5..05c1668 100644 (file)
@@ -184,7 +184,7 @@ void list_lru_isolate_move(struct list_lru_one *list, struct list_head *item,
                           struct list_head *head);
 
 typedef enum lru_status (*list_lru_walk_cb)(struct list_head *item,
-               struct list_lru_one *list, spinlock_t *lock, void *cb_arg);
+               struct list_lru_one *list, void *cb_arg);
 
 /**
  * list_lru_walk_one: walk a @lru, isolating and disposing freeable items.
index c139202..f93ada6 100644 (file)
@@ -298,7 +298,7 @@ restart:
                        break;
                --*nr_to_walk;
 
-               ret = isolate(item, l, &l->lock, cb_arg);
+               ret = isolate(item, l, cb_arg);
                switch (ret) {
                /*
                 * LRU_RETRY, LRU_REMOVED_RETRY and LRU_STOP will drop the lru
index c187d4a..a4705e1 100644 (file)
@@ -702,8 +702,7 @@ static unsigned long count_shadow_nodes(struct shrinker *shrinker,
 
 static enum lru_status shadow_lru_isolate(struct list_head *item,
                                          struct list_lru_one *lru,
-                                         spinlock_t *lru_lock,
-                                         void *arg) __must_hold(lru_lock)
+                                         void *arg) __must_hold(lru->lock)
 {
        struct xa_node *node = container_of(item, struct xa_node, private_list);
        struct address_space *mapping;
@@ -712,20 +711,20 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
        /*
         * Page cache insertions and deletions synchronously maintain
         * the shadow node LRU under the i_pages lock and the
-        * lru_lock.  Because the page cache tree is emptied before
-        * the inode can be destroyed, holding the lru_lock pins any
+        * &lru->lock. Because the page cache tree is emptied before
+        * the inode can be destroyed, holding the &lru->lock pins any
         * address_space that has nodes on the LRU.
         *
         * We can then safely transition to the i_pages lock to
         * pin only the address_space of the particular node we want
-        * to reclaim, take the node off-LRU, and drop the lru_lock.
+        * to reclaim, take the node off-LRU, and drop the &lru->lock.
         */
 
        mapping = container_of(node->array, struct address_space, i_pages);
 
        /* Coming from the list, invert the lock order */
        if (!xa_trylock(&mapping->i_pages)) {
-               spin_unlock_irq(lru_lock);
+               spin_unlock_irq(&lru->lock);
                ret = LRU_RETRY;
                goto out;
        }
@@ -734,7 +733,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
        if (mapping->host != NULL) {
                if (!spin_trylock(&mapping->host->i_lock)) {
                        xa_unlock(&mapping->i_pages);
-                       spin_unlock_irq(lru_lock);
+                       spin_unlock_irq(&lru->lock);
                        ret = LRU_RETRY;
                        goto out;
                }
@@ -743,7 +742,7 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
        list_lru_isolate(lru, item);
        __dec_node_page_state(virt_to_page(node), WORKINGSET_NODES);
 
-       spin_unlock(lru_lock);
+       spin_unlock(&lru->lock);
 
        /*
         * The nodes should only contain one or more shadow entries,
index ba35e45..f6316b6 100644 (file)
@@ -1102,7 +1102,7 @@ static int zswap_writeback_entry(struct zswap_entry *entry,
  *    for reclaim by this ratio.
  */
 static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_one *l,
-                                      spinlock_t *lock, void *arg)
+                                      void *arg)
 {
        struct zswap_entry *entry = container_of(item, struct zswap_entry, lru);
        bool *encountered_page_in_swapcache = (bool *)arg;
@@ -1158,7 +1158,7 @@ static enum lru_status shrink_memcg_cb(struct list_head *item, struct list_lru_o
         * It's safe to drop the lock here because we return either
         * LRU_REMOVED_RETRY or LRU_RETRY.
         */
-       spin_unlock(lock);
+       spin_unlock(&l->lock);
 
        writeback_result = zswap_writeback_entry(entry, swpentry);