#undef x
};
+const struct bkey_ops bch2_bkey_null_ops = {
+ .min_val_size = U8_MAX,
+};
+
int bch2_bkey_val_invalid(struct bch_fs *c, struct bkey_s_c k,
unsigned flags, struct printbuf *err)
{
- const struct bkey_ops *ops;
-
- if (k.k->type >= KEY_TYPE_MAX) {
- prt_printf(err, "invalid type (%u >= %u)", k.k->type, KEY_TYPE_MAX);
- return -BCH_ERR_invalid_bkey;
- }
-
- ops = &bch2_bkey_ops[k.k->type];
+ const struct bkey_ops *ops = bch2_bkey_type_ops(k.k->type);
if (bkey_val_bytes(k.k) < ops->min_val_size) {
prt_printf(err, "bad val size (%zu < %u)",
return -BCH_ERR_invalid_bkey;
}
+ if (!ops->key_invalid)
+ return 0;
+
return ops->key_invalid(c, k, flags, err);
}
void bch2_val_to_text(struct printbuf *out, struct bch_fs *c,
struct bkey_s_c k)
{
- if (k.k->type < KEY_TYPE_MAX) {
- const struct bkey_ops *ops = &bch2_bkey_ops[k.k->type];
+ const struct bkey_ops *ops = bch2_bkey_type_ops(k.k->type);
- if (likely(ops->val_to_text))
- ops->val_to_text(out, c, k);
- } else {
- prt_printf(out, "(invalid type %u)", k.k->type);
- }
+ if (likely(ops->val_to_text))
+ ops->val_to_text(out, c, k);
}
void bch2_bkey_val_to_text(struct printbuf *out, struct bch_fs *c,
void bch2_bkey_swab_val(struct bkey_s k)
{
- const struct bkey_ops *ops = &bch2_bkey_ops[k.k->type];
+ const struct bkey_ops *ops = bch2_bkey_type_ops(k.k->type);
if (ops->swab)
ops->swab(k);
bool bch2_bkey_normalize(struct bch_fs *c, struct bkey_s k)
{
- const struct bkey_ops *ops = &bch2_bkey_ops[k.k->type];
+ const struct bkey_ops *ops = bch2_bkey_type_ops(k.k->type);
return ops->key_normalize
? ops->key_normalize(c, k)
bool bch2_bkey_merge(struct bch_fs *c, struct bkey_s l, struct bkey_s_c r)
{
- const struct bkey_ops *ops = &bch2_bkey_ops[l.k->type];
+ const struct bkey_ops *ops = bch2_bkey_type_ops(l.k->type);
- return bch2_bkey_maybe_mergable(l.k, r.k) &&
+ return ops->key_merge &&
+ bch2_bkey_maybe_mergable(l.k, r.k) &&
(u64) l.k->size + r.k->size <= KEY_SIZE_MAX &&
- bch2_bkey_ops[l.k->type].key_merge &&
!bch2_key_merging_disabled &&
ops->key_merge(c, l, r);
}
if (big_endian != CPU_BIG_ENDIAN)
bch2_bkey_swab_val(u);
- ops = &bch2_bkey_ops[k->type];
+ ops = bch2_bkey_type_ops(k->type);
if (ops->compat)
ops->compat(btree_id, version, big_endian, write, u);
enum btree_node_type;
extern const char * const bch2_bkey_types[];
+extern const struct bkey_ops bch2_bkey_null_ops;
/*
* key_invalid: checks validity of @k, returns 0 if good or -EINVAL if bad. If
extern const struct bkey_ops bch2_bkey_ops[];
+static inline const struct bkey_ops *bch2_bkey_type_ops(enum bch_bkey_type type)
+{
+ return likely(type < KEY_TYPE_MAX)
+ ? &bch2_bkey_ops[type]
+ : &bch2_bkey_null_ops;
+}
+
#define BKEY_INVALID_FROM_JOURNAL (1 << 1)
int bch2_bkey_val_invalid(struct bch_fs *, struct bkey_s_c, unsigned, struct printbuf *);
struct bkey_s_c old, struct bkey_s_c new,
unsigned flags)
{
- const struct bkey_ops *ops = &bch2_bkey_ops[old.k->type ?: new.k->type];
+ const struct bkey_ops *ops = bch2_bkey_type_ops(old.k->type ?: new.k->type);
return ops->atomic_trigger
? ops->atomic_trigger(trans, btree, level, old, new, flags)
struct bkey_s_c old, struct bkey_i *new,
unsigned flags)
{
- const struct bkey_ops *ops = &bch2_bkey_ops[old.k->type ?: new->k.type];
+ const struct bkey_ops *ops = bch2_bkey_type_ops(old.k->type ?: new->k.type);
return ops->trans_trigger
? ops->trans_trigger(trans, btree_id, level, old, new, flags)
{
struct bkey_s_c old = { &i->old_k, i->old_v };
struct bkey_i *new = i->k;
+ const struct bkey_ops *old_ops = bch2_bkey_type_ops(old.k->type);
+ const struct bkey_ops *new_ops = bch2_bkey_type_ops(i->k->k.type);
int ret;
verify_update_old_key(trans, i);
if (!btree_node_type_needs_gc(i->btree_id))
return 0;
- if (bch2_bkey_ops[old.k->type].atomic_trigger ==
- bch2_bkey_ops[i->k->k.type].atomic_trigger &&
+ if (old_ops->atomic_trigger == new_ops->atomic_trigger &&
((1U << old.k->type) & BTREE_TRIGGER_WANTS_OLD_AND_NEW)) {
ret = bch2_mark_key(trans, i->btree_id, i->level,
old, bkey_i_to_s_c(new),
*/
struct bkey old_k = i->old_k;
struct bkey_s_c old = { &old_k, i->old_v };
+ const struct bkey_ops *old_ops = bch2_bkey_type_ops(old.k->type);
+ const struct bkey_ops *new_ops = bch2_bkey_type_ops(i->k->k.type);
verify_update_old_key(trans, i);
if (!i->insert_trigger_run &&
!i->overwrite_trigger_run &&
- bch2_bkey_ops[old.k->type].trans_trigger ==
- bch2_bkey_ops[i->k->k.type].trans_trigger &&
+ old_ops->trans_trigger == new_ops->trans_trigger &&
((1U << old.k->type) & BTREE_TRIGGER_WANTS_OLD_AND_NEW)) {
i->overwrite_trigger_run = true;
i->insert_trigger_run = true;