}
}
- ret = bch2_btree_node_lock_write(trans, ck_path, ck_path->l[0].b);
+ ret = bch2_btree_node_lock_write(trans, ck_path, &ck_path->l[0].b->c);
if (ret) {
kfree(new_k);
goto err;
/* lock */
-void __bch2_btree_node_lock_write(struct btree_trans *trans, struct btree *b)
+void __bch2_btree_node_lock_write(struct btree_trans *trans,
+ struct btree_bkey_cached_common *b)
{
- int readers = bch2_btree_node_lock_counts(trans, NULL, &b->c, b->c.level).n[SIX_LOCK_read];
+ int readers = bch2_btree_node_lock_counts(trans, NULL, b, b->level).n[SIX_LOCK_read];
/*
* Must drop our read locks before calling six_lock_write() -
* goes to 0, and it's safe because we have the node intent
* locked:
*/
- six_lock_readers_add(&b->c.lock, -readers);
- btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_write);
- six_lock_readers_add(&b->c.lock, readers);
+ six_lock_readers_add(&b->lock, -readers);
+ btree_node_lock_nopath_nofail(trans, b, SIX_LOCK_write);
+ six_lock_readers_add(&b->lock, readers);
}
static inline bool path_has_read_locks(struct btree_path *path)
return ret;
}
-void __bch2_btree_node_lock_write(struct btree_trans *, struct btree *);
+void __bch2_btree_node_lock_write(struct btree_trans *, struct btree_bkey_cached_common *);
static inline void bch2_btree_node_lock_write_nofail(struct btree_trans *trans,
struct btree_path *path,
- struct btree *b)
+ struct btree_bkey_cached_common *b)
{
- EBUG_ON(path->l[b->c.level].b != b);
- EBUG_ON(path->l[b->c.level].lock_seq != b->c.lock.state.seq);
- EBUG_ON(!btree_node_intent_locked(path, b->c.level));
+ EBUG_ON(&path->l[b->level].b->c != b);
+ EBUG_ON(path->l[b->level].lock_seq != b->lock.state.seq);
+ EBUG_ON(!btree_node_intent_locked(path, b->level));
/*
* six locks are unfair, and read locks block while a thread wants a
* write lock: thus, we need to tell the cycle detector we have a write
* lock _before_ taking the lock:
*/
- mark_btree_node_locked_noreset(path, b->c.level, SIX_LOCK_write);
+ mark_btree_node_locked_noreset(path, b->level, SIX_LOCK_write);
- if (unlikely(!six_trylock_write(&b->c.lock)))
+ if (unlikely(!six_trylock_write(&b->lock)))
__bch2_btree_node_lock_write(trans, b);
}
static inline int __must_check
bch2_btree_node_lock_write(struct btree_trans *trans,
struct btree_path *path,
- struct btree *b)
+ struct btree_bkey_cached_common *b)
{
bch2_btree_node_lock_write_nofail(trans, path, b);
return 0;
* Ensure no one is using the old root while we switch to the
* new root:
*/
- bch2_btree_node_lock_write_nofail(trans, path, old);
+ bch2_btree_node_lock_write_nofail(trans, path, &old->c);
bch2_btree_set_root_inmem(c, b);
if (ret)
goto err;
- bch2_btree_node_lock_write_nofail(trans, iter->path, b);
+ bch2_btree_node_lock_write_nofail(trans, iter->path, &b->c);
if (new_hash) {
mutex_lock(&c->btree_cache.lock);
struct btree_path *path,
struct btree *b)
{
- bch2_btree_node_lock_write_nofail(trans, path, b);
+ bch2_btree_node_lock_write_nofail(trans, path, &b->c);
bch2_btree_node_prep_for_write(trans, path, b);
}