bch2_write_inode_trans(&trans, inode, &inode_u,
inode_update_for_set_acl_fn,
(void *)(unsigned long) mode) ?:
- bch2_trans_commit(&trans, NULL, NULL,
+ bch2_trans_commit(&trans, NULL,
&inode->ei_journal_seq,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOUNLOCK);
bch2_btree_iter_set_pos(iter, a->k.p);
- return bch2_btree_insert_at(c, NULL, NULL, journal_seq,
+ return bch2_btree_insert_at(c, NULL, journal_seq,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE|
BTREE_INSERT_USE_ALLOC_RESERVE|
if (__bch2_btree_iter_relock(iter))
return 0;
- iter->flags &= ~BTREE_ITER_AT_END_OF_LEAF;
-
/*
* XXX: correctly using BTREE_ITER_UPTODATE should make using check_pos
* here unnecessary
iter->flags & BTREE_ITER_IS_EXTENTS))
__btree_iter_advance(l);
- if (!k && btree_iter_pos_after_node(iter, l->b)) {
+ if (!k && btree_iter_pos_after_node(iter, l->b))
btree_iter_set_dirty(iter, BTREE_ITER_NEED_TRAVERSE);
- iter->flags |= BTREE_ITER_AT_END_OF_LEAF;
- }
}
void bch2_btree_iter_set_pos(struct btree_iter *iter, struct bpos new_pos)
* @pos or the first key strictly greater than @pos
*/
#define BTREE_ITER_IS_EXTENTS (1 << 4)
-/*
- * indicates we need to call bch2_btree_iter_traverse() to revalidate iterator:
- */
-#define BTREE_ITER_AT_END_OF_LEAF (1 << 5)
-#define BTREE_ITER_ERROR (1 << 6)
+#define BTREE_ITER_ERROR (1 << 5)
enum btree_iter_uptodate {
BTREE_ITER_UPTODATE = 0,
struct btree_insert_entry {
struct btree_iter *iter;
struct bkey_i *k;
- unsigned extra_res;
- /*
- * true if entire key was inserted - can only be false for
- * extents
- */
- bool done;
};
struct btree_trans {
BTREE_INSERT_NEED_GC_LOCK,
};
-struct extent_insert_hook {
- enum btree_insert_ret
- (*fn)(struct extent_insert_hook *, struct bpos, struct bpos,
- struct bkey_s_c, const struct bkey_i *);
-};
-
enum btree_gc_coalesce_fail_reason {
BTREE_GC_COALESCE_FAIL_RESERVE_GET,
BTREE_GC_COALESCE_FAIL_KEYLIST_REALLOC,
struct disk_reservation *disk_res;
struct journal_res journal_res;
u64 *journal_seq;
- struct extent_insert_hook *hook;
unsigned flags;
bool did_work;
((struct btree_insert_entry) { \
.iter = (_iter), \
.k = (_k), \
- .done = false, \
- })
-
-#define BTREE_INSERT_ENTRY_EXTRA_RES(_iter, _k, _extra) \
- ((struct btree_insert_entry) { \
- .iter = (_iter), \
- .k = (_k), \
- .extra_res = (_extra), \
- .done = false, \
})
/**
* -EROFS: filesystem read only
* -EIO: journal or btree node IO error
*/
-#define bch2_btree_insert_at(_c, _disk_res, _hook, \
- _journal_seq, _flags, ...) \
+#define bch2_btree_insert_at(_c, _disk_res, _journal_seq, _flags, ...) \
__bch2_btree_insert_at(&(struct btree_insert) { \
.c = (_c), \
.disk_res = (_disk_res), \
.journal_seq = (_journal_seq), \
- .hook = (_hook), \
.flags = (_flags), \
.nr = COUNT_ARGS(__VA_ARGS__), \
.entries = (struct btree_insert_entry[]) { \
int bch2_btree_delete_at(struct btree_iter *, unsigned);
int bch2_btree_insert_list_at(struct btree_iter *, struct keylist *,
- struct disk_reservation *,
- struct extent_insert_hook *, u64 *, unsigned);
+ struct disk_reservation *, u64 *, unsigned);
int bch2_btree_insert(struct bch_fs *, enum btree_id, struct bkey_i *,
- struct disk_reservation *,
- struct extent_insert_hook *, u64 *, int flags);
+ struct disk_reservation *, u64 *, int flags);
int bch2_btree_delete_range(struct bch_fs *, enum btree_id,
- struct bpos, struct bpos, struct bversion,
- struct disk_reservation *,
- struct extent_insert_hook *, u64 *);
+ struct bpos, struct bpos, u64 *);
int bch2_btree_node_rewrite(struct bch_fs *c, struct btree_iter *,
__le64, unsigned);
int bch2_trans_commit(struct btree_trans *,
struct disk_reservation *,
- struct extent_insert_hook *,
u64 *, unsigned);
#define bch2_trans_do(_c, _journal_seq, _flags, _do) \
do { \
bch2_trans_begin(&trans); \
\
- _ret = (_do) ?: bch2_trans_commit(&trans, NULL, NULL, \
+ _ret = (_do) ?: bch2_trans_commit(&trans, NULL, \
(_journal_seq), (_flags)); \
} while (_ret == -EINTR); \
\
{
struct btree *b;
- /*
- * iterators are inconsistent when they hit end of leaf, until
- * traversed again
- *
- * XXX inconsistent how?
- */
- if (iter->flags & BTREE_ITER_AT_END_OF_LEAF)
- return;
-
if (iter->uptodate >= BTREE_ITER_NEED_TRAVERSE)
return;
unsigned u64s;
int ret;
- trans_for_each_entry(trans, i) {
- BUG_ON(i->done);
+ trans_for_each_entry(trans, i)
BUG_ON(i->iter->uptodate >= BTREE_ITER_NEED_RELOCK);
- }
u64s = 0;
trans_for_each_entry(trans, i)
- u64s += jset_u64s(i->k->k.u64s + i->extra_res);
+ u64s += jset_u64s(i->k->k.u64s);
memset(&trans->journal_res, 0, sizeof(trans->journal_res));
if (!same_leaf_as_prev(trans, i))
u64s = 0;
- u64s += i->k->k.u64s + i->extra_res;
+ u64s += i->k->k.u64s;
switch (btree_key_can_insert(trans, i, &u64s)) {
case BTREE_INSERT_OK:
break;
trans_for_each_entry(trans, i) {
switch (btree_insert_key_leaf(trans, i)) {
case BTREE_INSERT_OK:
- i->done = true;
break;
case BTREE_INSERT_NEED_TRAVERSE:
+ BUG_ON((trans->flags & BTREE_INSERT_ATOMIC));
ret = -EINTR;
- break;
- case BTREE_INSERT_BTREE_NODE_FULL:
- ret = -EINTR;
- *split = i->iter;
- break;
- case BTREE_INSERT_ENOSPC:
- ret = -ENOSPC;
- break;
+ goto out;
default:
BUG();
}
-
- /*
- * If we did some work (i.e. inserted part of an extent),
- * we have to do all the other updates as well:
- */
- if (!trans->did_work && (ret || *split))
- break;
}
out:
multi_unlock_write(trans);
trans->did_work &&
!btree_node_locked(linked, 0));
}
-
- /* make sure we didn't lose an error: */
- if (!ret)
- trans_for_each_entry(trans, i)
- BUG_ON(!i->done);
}
BUG_ON(!(trans->flags & BTREE_INSERT_ATOMIC) && ret == -EINTR);
int bch2_trans_commit(struct btree_trans *trans,
struct disk_reservation *disk_res,
- struct extent_insert_hook *hook,
u64 *journal_seq,
unsigned flags)
{
bkey_init(&k.k);
k.k.p = iter->pos;
- return bch2_btree_insert_at(iter->c, NULL, NULL, NULL,
+ return bch2_btree_insert_at(iter->c, NULL, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE|flags,
BTREE_INSERT_ENTRY(iter, &k));
int bch2_btree_insert_list_at(struct btree_iter *iter,
struct keylist *keys,
struct disk_reservation *disk_res,
- struct extent_insert_hook *hook,
u64 *journal_seq, unsigned flags)
{
BUG_ON(flags & BTREE_INSERT_ATOMIC);
bch2_verify_keylist_sorted(keys);
while (!bch2_keylist_empty(keys)) {
- int ret = bch2_btree_insert_at(iter->c, disk_res, hook,
+ int ret = bch2_btree_insert_at(iter->c, disk_res,
journal_seq, flags,
BTREE_INSERT_ENTRY(iter, bch2_keylist_front(keys)));
if (ret)
int bch2_btree_insert(struct bch_fs *c, enum btree_id id,
struct bkey_i *k,
struct disk_reservation *disk_res,
- struct extent_insert_hook *hook,
u64 *journal_seq, int flags)
{
struct btree_iter iter;
bch2_btree_iter_init(&iter, c, id, bkey_start_pos(&k->k),
BTREE_ITER_INTENT);
- ret = bch2_btree_insert_at(c, disk_res, hook, journal_seq, flags,
+ ret = bch2_btree_insert_at(c, disk_res, journal_seq, flags,
BTREE_INSERT_ENTRY(&iter, k));
bch2_btree_iter_unlock(&iter);
* Range is a half open interval - [start, end)
*/
int bch2_btree_delete_range(struct bch_fs *c, enum btree_id id,
- struct bpos start,
- struct bpos end,
- struct bversion version,
- struct disk_reservation *disk_res,
- struct extent_insert_hook *hook,
- u64 *journal_seq)
+ struct bpos start, struct bpos end,
+ u64 *journal_seq)
{
struct btree_iter iter;
struct bkey_s_c k;
BTREE_ITER_INTENT);
while ((k = bch2_btree_iter_peek(&iter)).k &&
- !(ret = btree_iter_err(k))) {
+ !(ret = btree_iter_err(k)) &&
+ bkey_cmp(iter.pos, end) < 0) {
unsigned max_sectors = KEY_SIZE_MAX & (~0 << c->block_bits);
/* really shouldn't be using a bare, unpadded bkey_i */
struct bkey_i delete;
- if (bkey_cmp(iter.pos, end) >= 0)
- break;
-
bkey_init(&delete.k);
/*
* bkey_start_pos(k.k)).
*/
delete.k.p = iter.pos;
- delete.k.version = version;
if (iter.flags & BTREE_ITER_IS_EXTENTS) {
/* create the biggest key we can */
bch2_cut_back(end, &delete.k);
}
- ret = bch2_btree_insert_at(c, disk_res, hook, journal_seq,
+ ret = bch2_btree_insert_at(c, NULL, journal_seq,
BTREE_INSERT_NOFAIL,
BTREE_INSERT_ENTRY(&iter, &delete));
if (ret)
s->trans->did_work = true;
}
-static enum btree_insert_ret
-__extent_insert_advance_pos(struct extent_insert_state *s,
- struct bpos next_pos,
- struct bkey_s_c k)
-{
- struct extent_insert_hook *hook = s->trans->hook;
- enum btree_insert_ret ret;
-
- if (hook)
- ret = hook->fn(hook, s->committed, next_pos, k, s->insert->k);
- else
- ret = BTREE_INSERT_OK;
-
- if (ret == BTREE_INSERT_OK)
- s->committed = next_pos;
-
- return ret;
-}
-
-/*
- * Update iter->pos, marking how much of @insert we've processed, and call hook
- * fn:
- */
-static enum btree_insert_ret
-extent_insert_advance_pos(struct extent_insert_state *s, struct bkey_s_c k)
-{
- struct btree *b = s->insert->iter->l[0].b;
- struct bpos next_pos = bpos_min(s->insert->k->k.p,
- k.k ? k.k->p : b->key.k.p);
- enum btree_insert_ret ret;
-
- /* hole? */
- if (k.k && bkey_cmp(s->committed, bkey_start_pos(k.k)) < 0) {
- ret = __extent_insert_advance_pos(s, bkey_start_pos(k.k),
- bkey_s_c_null);
- if (ret != BTREE_INSERT_OK)
- return ret;
- }
-
- /* avoid redundant calls to hook fn: */
- if (!bkey_cmp(s->committed, next_pos))
- return BTREE_INSERT_OK;
-
- return __extent_insert_advance_pos(s, next_pos, k);
-}
-
void bch2_extent_trim_atomic(struct bkey_i *k, struct btree_iter *iter)
{
struct btree *b = iter->l[0].b;
}
}
-static enum btree_insert_ret
-__bch2_insert_fixup_extent(struct extent_insert_state *s)
+static void __bch2_insert_fixup_extent(struct extent_insert_state *s)
{
struct btree_iter *iter = s->insert->iter;
struct btree_iter_level *l = &iter->l[0];
struct bkey_packed *_k;
struct bkey unpacked;
struct bkey_i *insert = s->insert->k;
- enum btree_insert_ret ret = BTREE_INSERT_OK;
while (bkey_cmp(s->committed, insert->k.p) < 0 &&
(_k = bch2_btree_node_iter_peek_filter(&l->iter, b,
if (bkey_cmp(bkey_start_pos(k.k), insert->k.p) >= 0)
break;
- ret = extent_insert_advance_pos(s, k.s_c);
- if (ret)
- break;
+ s->committed = bpos_min(s->insert->k->k.p, k.k->p);
if (!bkey_whiteout(k.k))
s->update_journal = true;
break;
}
- if (ret == BTREE_INSERT_OK &&
- bkey_cmp(s->committed, insert->k.p) < 0)
- ret = extent_insert_advance_pos(s, bkey_s_c_null);
+ if (bkey_cmp(s->committed, insert->k.p) < 0)
+ s->committed = bpos_min(s->insert->k->k.p, b->key.k.p);
/*
* may have skipped past some deleted extents greater than the insert
bkey_cmp_left_packed(b, _k, &s->committed) > 0)
l->iter = node_iter;
}
-
- return ret;
}
/**
bch2_insert_fixup_extent(struct btree_insert *trans,
struct btree_insert_entry *insert)
{
- struct bch_fs *c = trans->c;
- struct btree_iter *iter = insert->iter;
- struct btree_iter_level *l = &iter->l[0];
- struct btree *b = l->b;
- enum btree_insert_ret ret = BTREE_INSERT_OK;
-
+ struct bch_fs *c = trans->c;
+ struct btree_iter *iter = insert->iter;
+ struct btree *b = iter->l[0].b;
struct extent_insert_state s = {
.trans = trans,
.insert = insert,
- .committed = insert->iter->pos,
+ .committed = iter->pos,
.whiteout = *insert->k,
.update_journal = !bkey_whiteout(&insert->k->k),
bkey_start_offset(&insert->k->k),
insert->k->k.size);
- ret = __bch2_insert_fixup_extent(&s);
+ __bch2_insert_fixup_extent(&s);
extent_insert_committed(&s);
EBUG_ON(bkey_cmp(iter->pos, bkey_start_pos(&insert->k->k)));
EBUG_ON(bkey_cmp(iter->pos, s.committed));
- EBUG_ON((bkey_cmp(iter->pos, b->key.k.p) == 0) !=
- !!(iter->flags & BTREE_ITER_AT_END_OF_LEAF));
-
- if (insert->k->k.size && (iter->flags & BTREE_ITER_AT_END_OF_LEAF))
- ret = BTREE_INSERT_NEED_TRAVERSE;
- WARN_ONCE((ret == BTREE_INSERT_OK) != (insert->k->k.size == 0),
- "ret %u insert->k.size %u", ret, insert->k->k.size);
+ if (insert->k->k.size) {
+ /* got to the end of this leaf node */
+ BUG_ON(bkey_cmp(iter->pos, b->key.k.p));
+ return BTREE_INSERT_NEED_TRAVERSE;
+ }
- return ret;
+ return BTREE_INSERT_OK;
}
const char *bch2_extent_invalid(const struct bch_fs *c, struct bkey_s_c k)
struct btree_node_iter_large;
struct btree_insert;
struct btree_insert_entry;
-struct extent_insert_hook;
struct bch_devs_mask;
union bch_extent_crc;
BTREE_INSERT_ENTRY(inode_iter, &inode_p.inode.k_i));
}
- ret = bch2_trans_commit(trans, disk_res, NULL,
+ ret = bch2_trans_commit(trans, disk_res,
&inode->ei_journal_seq,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_ATOMIC|
bch2_trans_begin(&trans);
ret = bch2_write_inode_trans(&trans, inode, &inode_u, set, p) ?:
- bch2_trans_commit(&trans, NULL, NULL,
+ bch2_trans_commit(&trans, NULL,
&inode->ei_journal_seq,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOUNLOCK|
inode_update_for_create_fn,
&inode_u)
: 0) ?:
- bch2_trans_commit(&trans, NULL, NULL,
+ bch2_trans_commit(&trans, NULL,
&journal_seq,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOUNLOCK);
bch2_write_inode_trans(&trans, inode, &inode_u,
inode_update_for_link_fn,
NULL) ?:
- bch2_trans_commit(&trans, NULL, NULL,
+ bch2_trans_commit(&trans, NULL,
&inode->ei_journal_seq,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOUNLOCK);
bch2_write_inode_trans(&trans, inode, &inode_u,
inode_update_for_unlink_fn,
NULL) ?:
- bch2_trans_commit(&trans, NULL, NULL,
+ bch2_trans_commit(&trans, NULL,
&dir->ei_journal_seq,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOUNLOCK|
? bch2_write_inode_trans(&trans, i.dst_inode, &dst_inode_u,
inode_update_for_rename_fn, &i)
: 0 ) ?:
- bch2_trans_commit(&trans, NULL, NULL,
+ bch2_trans_commit(&trans, NULL,
&journal_seq,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOUNLOCK);
(iattr->ia_valid & ATTR_MODE
? bch2_acl_chmod(&trans, inode, iattr->ia_mode, &acl)
: 0) ?:
- bch2_trans_commit(&trans, NULL, NULL,
+ bch2_trans_commit(&trans, NULL,
&inode->ei_journal_seq,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOUNLOCK|
bch2_inode_pack(&packed, lostfound_inode);
ret = bch2_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i,
- NULL, NULL, NULL,
- BTREE_INSERT_NOFAIL);
+ NULL, NULL, BTREE_INSERT_NOFAIL);
if (ret) {
bch_err(c, "error %i reattaching inode %llu while updating lost+found",
ret, inum);
}
ret = bch2_hash_delete_at(&trans, desc, info, iter) ?:
- bch2_trans_commit(&trans, NULL, NULL, NULL,
+ bch2_trans_commit(&trans, NULL, NULL,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOFAIL);
err:
return ret;
}
+static int bch2_inode_truncate(struct bch_fs *c, u64 inode_nr, u64 new_size)
+{
+ return bch2_btree_delete_range(c, BTREE_ID_EXTENTS,
+ POS(inode_nr, round_up(new_size, block_bytes(c)) >> 9),
+ POS(inode_nr + 1, 0), NULL);
+}
+
/*
* Walk extents: verify that extents have a corresponding S_ISREG inode, and
* that i_size an i_sectors are consistent
k.k->type, k.k->p.inode, w.inode.bi_mode)) {
bch2_btree_iter_unlock(&iter);
- ret = bch2_inode_truncate(c, k.k->p.inode, 0, NULL, NULL);
+ ret = bch2_inode_truncate(c, k.k->p.inode, 0);
if (ret)
goto err;
continue;
bch2_inode_pack(&p, &w.inode);
ret = bch2_btree_insert(c, BTREE_ID_INODES,
- &p.inode.k_i,
- NULL,
- NULL,
- NULL,
+ &p.inode.k_i, NULL, NULL,
BTREE_INSERT_NOFAIL);
if (ret) {
bch_err(c, "error in fs gc: error %i "
bch2_btree_iter_unlock(&iter);
ret = bch2_inode_truncate(c, k.k->p.inode,
- round_up(w.inode.bi_size, PAGE_SIZE) >> 9,
- NULL, NULL);
+ w.inode.bi_size);
if (ret)
goto err;
continue;
bkey_reassemble(&n->k_i, d.s_c);
n->v.d_type = mode_to_type(target.bi_mode);
- ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
+ ret = bch2_btree_insert_at(c, NULL, NULL,
BTREE_INSERT_NOFAIL,
BTREE_INSERT_ENTRY(iter, &n->k_i));
kfree(n);
bch2_inode_pack(&packed, root_inode);
return bch2_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i,
- NULL, NULL, NULL, BTREE_INSERT_NOFAIL);
+ NULL, NULL, BTREE_INSERT_NOFAIL);
}
/* Get lost+found, create if it doesn't exist: */
bch2_inode_pack(&packed, root_inode);
ret = bch2_btree_insert(c, BTREE_ID_INODES, &packed.inode.k_i,
- NULL, NULL, NULL, BTREE_INSERT_NOFAIL);
+ NULL, NULL, BTREE_INSERT_NOFAIL);
if (ret)
return ret;
* just switch units to bytes and that issue goes away
*/
- ret = bch2_inode_truncate(c, u.bi_inum,
- round_up(u.bi_size, PAGE_SIZE) >> 9,
- NULL, NULL);
+ ret = bch2_inode_truncate(c, u.bi_inum, u.bi_size);
if (ret) {
bch_err(c, "error in fs gc: error %i "
"truncating inode", ret);
bch2_inode_pack(&p, &u);
- ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
+ ret = bch2_btree_insert_at(c, NULL, NULL,
BTREE_INSERT_NOFAIL,
BTREE_INSERT_ENTRY(iter, &p.inode.k_i));
if (ret && ret != -EINTR)
__bch2_inode_create(&trans, inode_u, min, max, hint));
}
-int bch2_inode_truncate(struct bch_fs *c, u64 inode_nr, u64 new_size,
- struct extent_insert_hook *hook, u64 *journal_seq)
-{
- return bch2_btree_delete_range(c, BTREE_ID_EXTENTS,
- POS(inode_nr, new_size),
- POS(inode_nr + 1, 0),
- ZERO_VERSION, NULL, hook,
- journal_seq);
-}
-
int bch2_inode_rm(struct bch_fs *c, u64 inode_nr)
{
struct btree_iter iter;
struct bkey_i_inode_generation delete;
+ struct bpos start = POS(inode_nr, 0);
+ struct bpos end = POS(inode_nr + 1, 0);
int ret;
- ret = bch2_inode_truncate(c, inode_nr, 0, NULL, NULL);
- if (ret < 0)
- return ret;
-
- ret = bch2_btree_delete_range(c, BTREE_ID_XATTRS,
- POS(inode_nr, 0),
- POS(inode_nr + 1, 0),
- ZERO_VERSION, NULL, NULL, NULL);
- if (ret < 0)
- return ret;
-
/*
* If this was a directory, there shouldn't be any real dirents left -
* but there could be whiteouts (from hash collisions) that we should
* XXX: the dirent could ideally would delete whiteouts when they're no
* longer needed
*/
- ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
- POS(inode_nr, 0),
- POS(inode_nr + 1, 0),
- ZERO_VERSION, NULL, NULL, NULL);
- if (ret < 0)
+ ret = bch2_btree_delete_range(c, BTREE_ID_EXTENTS,
+ start, end, NULL) ?:
+ bch2_btree_delete_range(c, BTREE_ID_XATTRS,
+ start, end, NULL) ?:
+ bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
+ start, end, NULL);
+ if (ret)
return ret;
bch2_btree_iter_init(&iter, c, BTREE_ID_INODES, POS(inode_nr, 0),
delete.v.bi_generation = cpu_to_le32(bi_generation);
}
- ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
+ ret = bch2_btree_insert_at(c, NULL, NULL,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOFAIL,
BTREE_INSERT_ENTRY(&iter, &delete.k_i));
int bch2_inode_create(struct bch_fs *, struct bch_inode_unpacked *,
u64, u64, u64 *);
-int bch2_inode_truncate(struct bch_fs *, u64, u64,
- struct extent_insert_hook *, u64 *);
int bch2_inode_rm(struct bch_fs *, u64);
int bch2_inode_find_by_inum(struct bch_fs *, u64,
BTREE_ITER_INTENT);
ret = bch2_btree_insert_list_at(&iter, keys, &op->res,
- NULL, op_journal_seq(op),
+ op_journal_seq(op),
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE);
bch2_btree_iter_unlock(&iter);
if (!bch2_extent_narrow_crcs(e, new_crc))
goto out;
- ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
+ ret = bch2_btree_insert_at(c, NULL, NULL,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOFAIL|
BTREE_INSERT_NOWAIT,
bch2_disk_reservation_init(c, 0);
ret = bch2_btree_insert(c, entry->btree_id, k,
- &disk_res, NULL, NULL,
+ &disk_res, NULL,
BTREE_INSERT_NOFAIL|
BTREE_INSERT_JOURNAL_REPLAY);
}
iter.pos = bkey_start_pos(&tmp.key.k);
- ret = bch2_btree_insert_at(c, NULL, NULL, NULL,
+ ret = bch2_btree_insert_at(c, NULL, NULL,
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOFAIL,
BTREE_INSERT_ENTRY(&iter, &tmp.key));
break;
ret = bch2_btree_insert_at(c, &op->res,
- NULL, op_journal_seq(op),
+ op_journal_seq(op),
BTREE_INSERT_ATOMIC|
BTREE_INSERT_NOFAIL|
BTREE_INSERT_USE_RESERVE|
ret = bch2_btree_delete_range(c, BTREE_ID_QUOTAS,
POS(QTYP_USR, 0),
POS(QTYP_USR + 1, 0),
- ZERO_VERSION, NULL, NULL, NULL);
+ NULL);
if (ret)
return ret;
}
ret = bch2_btree_delete_range(c, BTREE_ID_QUOTAS,
POS(QTYP_GRP, 0),
POS(QTYP_GRP + 1, 0),
- ZERO_VERSION, NULL, NULL, NULL);
+ NULL);
if (ret)
return ret;
}
ret = bch2_btree_delete_range(c, BTREE_ID_QUOTAS,
POS(QTYP_PRJ, 0),
POS(QTYP_PRJ + 1, 0),
- ZERO_VERSION, NULL, NULL, NULL);
+ NULL);
if (ret)
return ret;
}
if (qdq->d_fieldmask & QC_INO_HARD)
new_quota.v.c[Q_INO].hardlimit = cpu_to_le64(qdq->d_ino_hardlimit);
- ret = bch2_btree_insert_at(c, NULL, NULL, NULL, 0,
+ ret = bch2_btree_insert_at(c, NULL, NULL, 0,
BTREE_INSERT_ENTRY(&iter, &new_quota.k_i));
bch2_btree_iter_unlock(&iter);
err = "error creating root directory";
ret = bch2_btree_insert(c, BTREE_ID_INODES,
&packed_inode.inode.k_i,
- NULL, NULL, NULL, 0);
+ NULL, NULL, 0);
if (ret)
goto err;
err = "error creating lost+found";
ret = bch2_btree_insert(c, BTREE_ID_INODES,
&packed_inode.inode.k_i,
- NULL, NULL, NULL, 0);
+ NULL, NULL, 0);
if (ret)
goto err;
ret = bch2_btree_delete_range(c, BTREE_ID_ALLOC,
POS(ca->dev_idx, 0),
POS(ca->dev_idx + 1, 0),
- ZERO_VERSION,
- NULL, NULL, NULL);
+ NULL);
if (ret) {
bch_err(ca, "Remove failed, error deleting alloc info");
goto err;
ret = bch2_btree_delete_range(c, BTREE_ID_EXTENTS,
POS(0, 0), POS(0, U64_MAX),
- ZERO_VERSION, NULL, NULL, NULL);
+ NULL);
BUG_ON(ret);
ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
POS(0, 0), POS(0, U64_MAX),
- ZERO_VERSION, NULL, NULL, NULL);
+ NULL);
BUG_ON(ret);
}
ret = bch2_btree_iter_traverse(&iter);
BUG_ON(ret);
- ret = bch2_btree_insert_at(c, NULL, NULL, NULL, 0,
+ ret = bch2_btree_insert_at(c, NULL, NULL, 0,
BTREE_INSERT_ENTRY(&iter, &k.k_i));
BUG_ON(ret);
ret = bch2_btree_iter_traverse(&iter);
BUG_ON(ret);
- ret = bch2_btree_insert_at(c, NULL, NULL, NULL, 0,
+ ret = bch2_btree_insert_at(c, NULL, NULL, 0,
BTREE_INSERT_ENTRY(&iter, &k.k_i));
BUG_ON(ret);
k.k.p.offset = i;
ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
- NULL, NULL, NULL, 0);
+ NULL, NULL, 0);
BUG_ON(ret);
}
k.k.size = 8;
ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
- NULL, NULL, NULL, 0);
+ NULL, NULL, 0);
BUG_ON(ret);
}
k.k.p.offset = i * 2;
ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
- NULL, NULL, NULL, 0);
+ NULL, NULL, 0);
BUG_ON(ret);
}
k.k.size = 8;
ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
- NULL, NULL, NULL, 0);
+ NULL, NULL, 0);
BUG_ON(ret);
}
k.k_i.k.version.lo = test_version++;
ret = bch2_btree_insert(c, BTREE_ID_EXTENTS, &k.k_i,
- NULL, NULL, NULL, 0);
+ NULL, NULL, 0);
BUG_ON(ret);
}
k.k.p.offset = test_rand();
ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k.k_i,
- NULL, NULL, NULL, 0);
+ NULL, NULL, 0);
BUG_ON(ret);
}
}
bkey_cookie_init(&k.k_i);
k.k.p = iter.pos;
- ret = bch2_btree_insert_at(c, NULL, NULL, NULL, 0,
+ ret = bch2_btree_insert_at(c, NULL, NULL, 0,
BTREE_INSERT_ENTRY(&iter, &k.k_i));
BUG_ON(ret);
}
k.k.p.offset = test_rand();
ret = bch2_btree_insert(c, BTREE_ID_DIRENTS, &k,
- NULL, NULL, NULL, 0);
+ NULL, NULL, 0);
BUG_ON(ret);
}
}
BTREE_ITER_SLOTS|BTREE_ITER_INTENT, k) {
insert.k.p = iter.pos;
- ret = bch2_btree_insert_at(c, NULL, NULL, NULL, 0,
+ ret = bch2_btree_insert_at(c, NULL, NULL, 0,
BTREE_INSERT_ENTRY(&iter, &insert.k_i));
BUG_ON(ret);
bkey_reassemble(&u.k_i, k);
- ret = bch2_btree_insert_at(c, NULL, NULL, NULL, 0,
+ ret = bch2_btree_insert_at(c, NULL, NULL, 0,
BTREE_INSERT_ENTRY(&iter, &u.k_i));
BUG_ON(ret);
}
ret = bch2_btree_delete_range(c, BTREE_ID_DIRENTS,
POS(0, 0), POS(0, U64_MAX),
- ZERO_VERSION, NULL, NULL, NULL);
+ NULL);
BUG_ON(ret);
}