static inline void btree_path_list_add(struct btree_trans *, struct btree_path *,
struct btree_path *);
+static inline unsigned long btree_iter_ip_allocated(struct btree_iter *iter)
+{
+#ifdef CONFIG_BCACHEFS_DEBUG
+ return iter->ip_allocated;
+#else
+ return 0;
+#endif
+}
+
static struct btree_path *btree_path_alloc(struct btree_trans *, struct btree_path *);
/*
__flatten
struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *trans,
- struct btree_path *path, bool intent)
+ struct btree_path *path, bool intent,
+ unsigned long ip)
{
__btree_path_put(path, intent);
path = btree_path_clone(trans, path, intent);
struct btree_path * __must_check
__bch2_btree_path_set_pos(struct btree_trans *trans,
- struct btree_path *path, struct bpos new_pos,
- bool intent, int cmp)
+ struct btree_path *path, struct bpos new_pos,
+ bool intent, unsigned long ip, int cmp)
{
unsigned level = path->level;
EBUG_ON(trans->restarted);
EBUG_ON(!path->ref);
- path = bch2_btree_path_make_mut(trans, path, intent);
+ path = bch2_btree_path_make_mut(trans, path, intent, ip);
path->pos = new_pos;
trans->paths_sorted = false;
struct btree_path *bch2_path_get(struct btree_trans *trans,
enum btree_id btree_id, struct bpos pos,
unsigned locks_want, unsigned level,
- unsigned flags)
+ unsigned flags, unsigned long ip)
{
struct btree_path *path, *path_pos = NULL;
bool cached = flags & BTREE_ITER_CACHED;
path_pos->btree_id == btree_id &&
path_pos->level == level) {
__btree_path_get(path_pos, intent);
- path = bch2_btree_path_set_pos(trans, path_pos, pos, intent);
+ path = bch2_btree_path_set_pos(trans, path_pos, pos, intent, ip);
} else {
path = btree_path_alloc(trans, path_pos);
path_pos = NULL;
for (i = 0; i < ARRAY_SIZE(path->l); i++)
path->l[i].b = ERR_PTR(-BCH_ERR_no_btree_node_init);
#ifdef CONFIG_BCACHEFS_DEBUG
- path->ip_allocated = _RET_IP_;
+ path->ip_allocated = ip;
#endif
trans->paths_sorted = false;
}
iter->path = bch2_btree_path_set_pos(iter->trans, iter->path,
btree_iter_search_key(iter),
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(iter->trans, iter->path, iter->flags);
if (ret)
iter->k.p = iter->pos = b->key.k.p;
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
btree_path_set_should_be_locked(iter->path);
out:
bch2_btree_iter_verify_entry_exit(iter);
*/
path = iter->path =
bch2_btree_path_set_pos(trans, path, bpos_successor(iter->pos),
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
btree_path_set_level_down(trans, path, iter->min_depth);
iter->k.p = iter->pos = b->key.k.p;
iter->path = bch2_btree_path_set_pos(trans, iter->path, b->key.k.p,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
btree_path_set_should_be_locked(iter->path);
BUG_ON(iter->path->uptodate);
out:
iter->key_cache_path = bch2_path_get(trans, iter->btree_id, pos,
iter->flags & BTREE_ITER_INTENT, 0,
iter->flags|BTREE_ITER_CACHED|
- BTREE_ITER_CACHED_NOFILL);
+ BTREE_ITER_CACHED_NOFILL,
+ _THIS_IP_);
iter->key_cache_path = bch2_btree_path_set_pos(trans, iter->key_cache_path, pos,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(trans, iter->key_cache_path,
iter->flags|BTREE_ITER_CACHED) ?:
struct btree_path_level *l;
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (unlikely(ret)) {
iter->update_path = bch2_btree_path_set_pos(trans,
iter->update_path, pos,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_INTENT,
+ _THIS_IP_);
ret = bch2_btree_path_traverse(trans, iter->update_path, iter->flags);
if (unlikely(ret)) {
k = bkey_s_c_err(ret);
iter->pos = iter_pos;
iter->path = bch2_btree_path_set_pos(trans, iter->path, k.k->p,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
btree_path_set_should_be_locked(iter->path);
out_no_locked:
while (1) {
iter->path = bch2_btree_path_set_pos(trans, iter->path, iter->pos,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (unlikely(ret)) {
while (1) {
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (unlikely(ret)) {
search_key = btree_iter_search_key(iter);
iter->path = bch2_btree_path_set_pos(trans, iter->path, search_key,
- iter->flags & BTREE_ITER_INTENT);
+ iter->flags & BTREE_ITER_INTENT,
+ btree_iter_ip_allocated(iter));
ret = bch2_btree_path_traverse(trans, iter->path, iter->flags);
if (unlikely(ret)) {
unsigned flags)
{
bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
- bch2_btree_iter_flags(trans, btree_id, flags));
+ bch2_btree_iter_flags(trans, btree_id, flags),
+ _RET_IP_);
}
void bch2_trans_iter_init_outlined(struct btree_trans *trans,
unsigned flags)
{
bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
- bch2_btree_iter_flags(trans, btree_id, flags));
+ bch2_btree_iter_flags(trans, btree_id, flags),
+ _RET_IP_);
}
void bch2_trans_node_iter_init(struct btree_trans *trans,
flags |= BTREE_ITER_ALL_SNAPSHOTS;
bch2_trans_iter_init_common(trans, iter, btree_id, pos, locks_want, depth,
- __bch2_btree_iter_flags(trans, btree_id, flags));
+ __bch2_btree_iter_flags(trans, btree_id, flags),
+ _RET_IP_);
iter->min_depth = depth;
_path = __trans_next_path_with_node((_trans), (_b), \
(_path)->idx + 1))
-struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *,
- struct btree_path *, bool);
+struct btree_path *__bch2_btree_path_make_mut(struct btree_trans *, struct btree_path *,
+ bool, unsigned long);
static inline struct btree_path * __must_check
bch2_btree_path_make_mut(struct btree_trans *trans,
- struct btree_path *path, bool intent)
+ struct btree_path *path, bool intent,
+ unsigned long ip)
{
if (path->ref > 1 || path->preserve)
- path = __bch2_btree_path_make_mut(trans, path, intent);
+ path = __bch2_btree_path_make_mut(trans, path, intent, ip);
path->should_be_locked = false;
return path;
}
struct btree_path * __must_check
__bch2_btree_path_set_pos(struct btree_trans *, struct btree_path *,
- struct bpos, bool, int);
+ struct bpos, bool, unsigned long, int);
static inline struct btree_path * __must_check
bch2_btree_path_set_pos(struct btree_trans *trans,
struct btree_path *path, struct bpos new_pos,
- bool intent)
+ bool intent, unsigned long ip)
{
int cmp = bpos_cmp(new_pos, path->pos);
return cmp
- ? __bch2_btree_path_set_pos(trans, path, new_pos, intent, cmp)
+ ? __bch2_btree_path_set_pos(trans, path, new_pos, intent, ip, cmp)
: path;
}
int __must_check bch2_btree_path_traverse(struct btree_trans *,
struct btree_path *, unsigned);
struct btree_path *bch2_path_get(struct btree_trans *, enum btree_id, struct bpos,
- unsigned, unsigned, unsigned);
+ unsigned, unsigned, unsigned, unsigned long);
struct bkey_s_c bch2_btree_path_peek_slot(struct btree_path *, struct bkey *);
struct bkey_i *bch2_btree_journal_peek_slot(struct btree_trans *,
unsigned btree_id, struct bpos pos,
unsigned locks_want,
unsigned depth,
- unsigned flags)
+ unsigned flags,
+ unsigned long ip)
{
memset(iter, 0, sizeof(*iter));
iter->trans = trans;
iter->pos = pos;
iter->k.p = pos;
+#ifdef CONFIG_BCACHEFS_DEBUG
+ iter->ip_allocated = ip;
+#endif
iter->path = bch2_path_get(trans, btree_id, iter->pos,
- locks_want, depth, flags);
+ locks_want, depth, flags, ip);
}
void bch2_trans_iter_init_outlined(struct btree_trans *, struct btree_iter *,
if (__builtin_constant_p(btree_id) &&
__builtin_constant_p(flags))
bch2_trans_iter_init_common(trans, iter, btree_id, pos, 0, 0,
- bch2_btree_iter_flags(trans, btree_id, flags));
+ bch2_btree_iter_flags(trans, btree_id, flags),
+ _THIS_IP_);
else
bch2_trans_iter_init_outlined(trans, iter, btree_id, pos, flags);
}