rhashtable: Avoid calculating hash again to unlock
authorThomas Graf <tgraf@suug.ch>
Mon, 16 Mar 2015 09:42:26 +0000 (10:42 +0100)
committerDavid S. Miller <davem@davemloft.net>
Mon, 16 Mar 2015 21:14:34 +0000 (17:14 -0400)
Caching the lock pointer avoids having to hash on the object
again to unlock the bucket locks.

Signed-off-by: Thomas Graf <tgraf@suug.ch>
Signed-off-by: David S. Miller <davem@davemloft.net>
lib/rhashtable.c

index eae26a6..09a7ada 100644 (file)
@@ -384,14 +384,16 @@ static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
        struct rhash_head *head;
        bool no_resize_running;
        unsigned hash;
+       spinlock_t *old_lock;
        bool success = true;
 
        rcu_read_lock();
 
        old_tbl = rht_dereference_rcu(ht->tbl, ht);
        hash = head_hashfn(ht, old_tbl, obj);
+       old_lock = bucket_lock(old_tbl, hash);
 
-       spin_lock_bh(bucket_lock(old_tbl, hash));
+       spin_lock_bh(old_lock);
 
        /* Because we have already taken the bucket lock in old_tbl,
         * if we find that future_tbl is not yet visible then that
@@ -428,13 +430,10 @@ static bool __rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
                schedule_work(&ht->run_work);
 
 exit:
-       if (tbl != old_tbl) {
-               hash = head_hashfn(ht, tbl, obj);
+       if (tbl != old_tbl)
                spin_unlock(bucket_lock(tbl, hash));
-       }
 
-       hash = head_hashfn(ht, old_tbl, obj);
-       spin_unlock_bh(bucket_lock(old_tbl, hash));
+       spin_unlock_bh(old_lock);
 
        rcu_read_unlock();