This is just some type safety cleanup.
Signed-off-by: Kent Overstreet <kent.overstreet@gmail.com>
                if (btree_node_read_locked(path, level + 1))
                        btree_node_unlock(trans, path, level + 1);
 
-               ret = btree_node_lock(trans, path, b, k->k.p, level, lock_type,
+               ret = btree_node_lock(trans, path, &b->c, k->k.p, level, lock_type,
                                      lock_node_check_fn, (void *) k, trace_ip);
                if (unlikely(ret)) {
                        if (bch2_err_matches(ret, BCH_ERR_lock_fail_node_reused))
 
                }
 
                lock_type = __btree_lock_want(path, path->level);
-               ret = btree_node_lock(trans, path, b, SPOS_MAX,
+               ret = btree_node_lock(trans, path, &b->c, SPOS_MAX,
                                      path->level, lock_type,
                                      lock_root_check_fn, rootp,
                                      trace_ip);
 
 
 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *trans,
                                                  struct btree_path *skip,
-                                                 struct btree *b,
+                                                 struct btree_bkey_cached_common *b,
                                                  unsigned level)
 {
        struct btree_path *path;
                return ret;
 
        trans_for_each_path(trans, path)
-               if (path != skip && path->l[level].b == b) {
+               if (path != skip && &path->l[level].b->c == b) {
                        int t = btree_node_locked_type(path, level);
 
                        if (t != BTREE_NODE_UNLOCKED)
 
 void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
 {
-       int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->c.level).n[SIX_LOCK_read];
+       int readers = bch2_btree_node_lock_counts(trans, NULL, &b->c, b->c.level).n[SIX_LOCK_read];
 
        /*
         * Must drop our read locks before calling six_lock_write() -
 /* Slowpath: */
 int __bch2_btree_node_lock(struct btree_trans *trans,
                           struct btree_path *path,
-                          struct btree *b,
+                          struct btree_bkey_cached_common *b,
                           struct bpos pos, unsigned level,
                           enum six_lock_type type,
                           six_lock_should_sleep_fn should_sleep_fn, void *p,
 
                /* Must lock btree nodes in key order: */
                if (btree_node_locked(linked, level) &&
-                   bpos_cmp(pos, btree_node_pos((void *) linked->l[level].b,
+                   bpos_cmp(pos, btree_node_pos(&linked->l[level].b->c,
                                                 linked->cached)) <= 0) {
                        reason = 7;
                        goto deadlock;
 
        if (six_relock_type(&b->c.lock, want, path->l[level].lock_seq) ||
            (btree_node_lock_seq_matches(path, b, level) &&
-            btree_node_lock_increment(trans, b, level, want))) {
+            btree_node_lock_increment(trans, &b->c, level, want))) {
                mark_btree_node_locked(trans, path, level, want);
                return true;
        }
                goto success;
 
        if (btree_node_lock_seq_matches(path, b, level) &&
-           btree_node_lock_increment(trans, b, level, BTREE_NODE_INTENT_LOCKED)) {
+           btree_node_lock_increment(trans, &b->c, level, BTREE_NODE_INTENT_LOCKED)) {
                btree_node_unlock(trans, path, level);
                goto success;
        }
 
 
 static inline int btree_node_lock_type(struct btree_trans *trans,
                                       struct btree_path *path,
-                                      struct btree *b,
+                                      struct btree_bkey_cached_common *b,
                                       struct bpos pos, unsigned level,
                                       enum six_lock_type type,
                                       six_lock_should_sleep_fn should_sleep_fn, void *p)
        u64 start_time;
        int ret;
 
-       if (six_trylock_type(&b->c.lock, type))
+       if (six_trylock_type(&b->lock, type))
                return 0;
 
        start_time = local_clock();
        trans->locking_btree_id = path->btree_id;
        trans->locking_level    = level;
        trans->locking_lock_type = type;
-       trans->locking          = &b->c;
-       ret = six_lock_type(&b->c.lock, type, should_sleep_fn, p);
+       trans->locking          = b;
+       ret = six_lock_type(&b->lock, type, should_sleep_fn, p);
        trans->locking = NULL;
 
        if (ret)
  * iterators:
  */
 static inline bool btree_node_lock_increment(struct btree_trans *trans,
-                                            struct btree *b, unsigned level,
+                                            struct btree_bkey_cached_common *b,
+                                            unsigned level,
                                             enum btree_node_locked_type want)
 {
        struct btree_path *path;
 
        trans_for_each_path(trans, path)
-               if (path->l[level].b == b &&
+               if (&path->l[level].b->c == b &&
                    btree_node_locked_type(path, level) >= want) {
-                       six_lock_increment(&b->c.lock, want);
+                       six_lock_increment(&b->lock, want);
                        return true;
                }
 
 }
 
 int __bch2_btree_node_lock(struct btree_trans *, struct btree_path *,
-                          struct btree *, struct bpos, unsigned,
+                          struct btree_bkey_cached_common *,
+                          struct bpos, unsigned,
                           enum six_lock_type,
                           six_lock_should_sleep_fn, void *,
                           unsigned long);
 
 static inline int btree_node_lock(struct btree_trans *trans,
                        struct btree_path *path,
-                       struct btree *b, struct bpos pos, unsigned level,
+                       struct btree_bkey_cached_common *b,
+                       struct bpos pos, unsigned level,
                        enum six_lock_type type,
                        six_lock_should_sleep_fn should_sleep_fn, void *p,
                        unsigned long ip)
        EBUG_ON(level >= BTREE_MAX_DEPTH);
        EBUG_ON(!(trans->paths_allocated & (1ULL << path->idx)));
 
-       if (likely(six_trylock_type(&b->c.lock, type)) ||
+       if (likely(six_trylock_type(&b->lock, type)) ||
            btree_node_lock_increment(trans, b, level, type) ||
            !(ret = __bch2_btree_node_lock(trans, path, b, pos, level, type,
                                           should_sleep_fn, p, ip))) {
 #ifdef CONFIG_BCACHEFS_LOCK_TIME_STATS
-               path->l[b->c.level].lock_taken_time = ktime_get_ns();
+               path->l[b->level].lock_taken_time = ktime_get_ns();
 #endif
        }
 
 /* debug */
 
 struct six_lock_count bch2_btree_node_lock_counts(struct btree_trans *,
-                               struct btree_path *, struct btree *, unsigned);
+                               struct btree_path *,
+                               struct btree_bkey_cached_common *b,
+                               unsigned);
 
 
 #ifdef CONFIG_BCACHEFS_DEBUG
 
                                goto fail;
 
                        ret = btree_node_lock_type(trans, i->path,
-                                            insert_l(i)->b,
+                                            &insert_l(i)->b->c,
                                             i->path->pos, i->level,
                                             SIX_LOCK_write, NULL, NULL);
                        BUG_ON(ret);
 
                TRACE_BPOS_assign(pos, path->pos);
                __entry->locked                 = btree_node_locked(path, level);
 
-               c = bch2_btree_node_lock_counts(trans, NULL, path->l[level].b, level),
+               c = bch2_btree_node_lock_counts(trans, NULL, &path->l[level].b->c, level),
                __entry->self_read_count        = c.n[SIX_LOCK_read];
                __entry->self_intent_count      = c.n[SIX_LOCK_intent];
                c = six_lock_counts(&path->l[level].b->c.lock);