RDMA/mlx5: Remove not-used cache disable flag
authorLeon Romanovsky <leonro@nvidia.com>
Thu, 28 Sep 2023 17:20:47 +0000 (20:20 +0300)
committerLeon Romanovsky <leon@kernel.org>
Mon, 2 Oct 2023 11:32:44 +0000 (14:32 +0300)
During execution of mlx5_mkey_cache_cleanup(), there is a guarantee
that MR are not registered and/or destroyed. It means that we don't
need newly introduced cache disable flag.

Fixes: 374012b00457 ("RDMA/mlx5: Fix mkey cache possible deadlock on cleanup")
Link: https://lore.kernel.org/r/c7e9c9f98c8ae4a7413d97d9349b29f5b0a23dbe.1695921626.git.leon@kernel.org
Signed-off-by: Leon Romanovsky <leonro@nvidia.com>
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mr.c

index d1ff98a..16713ba 100644 (file)
@@ -798,7 +798,6 @@ struct mlx5_mkey_cache {
        struct dentry           *fs_root;
        unsigned long           last_add;
        struct delayed_work     remove_ent_dwork;
-       u8                      disable: 1;
 };
 
 struct mlx5_ib_port_resources {
index 433f964..8a3762d 100644 (file)
@@ -1026,7 +1026,6 @@ void mlx5_mkey_cache_cleanup(struct mlx5_ib_dev *dev)
                return;
 
        mutex_lock(&dev->cache.rb_lock);
-       dev->cache.disable = true;
        for (node = rb_first(root); node; node = rb_next(node)) {
                ent = rb_entry(node, struct mlx5_cache_ent, node);
                xa_lock_irq(&ent->mkeys);
@@ -1830,10 +1829,6 @@ static int cache_ent_find_and_store(struct mlx5_ib_dev *dev,
        }
 
        mutex_lock(&cache->rb_lock);
-       if (cache->disable) {
-               mutex_unlock(&cache->rb_lock);
-               return 0;
-       }
        ent = mkey_cache_ent_from_rb_key(dev, mr->mmkey.rb_key);
        if (ent) {
                if (ent->rb_key.ndescs == mr->mmkey.rb_key.ndescs) {