seqcount_t usage_lock;
struct bch_fs_usage *usage_base;
- struct bch_fs_usage __percpu *usage[2];
+ struct bch_fs_usage __percpu *usage[JOURNAL_BUF_NR];
struct bch_fs_usage __percpu *usage_gc;
u64 __percpu *online_reserved;
struct genradix_iter dst_iter = genradix_iter_init(&c->stripes[0], 0);
struct genradix_iter src_iter = genradix_iter_init(&c->stripes[1], 0);
struct stripe *dst, *src;
- unsigned i;
c->ec_stripes_heap.used = 0;
{
return this_cpu_ptr(gc
? c->usage_gc
- : c->usage[journal_seq & 1]);
+ : c->usage[journal_seq & JOURNAL_BUF_MASK]);
}
u64 bch2_fs_usage_read_one(struct bch_fs *c, u64 *v)
{
ssize_t offset = v - (u64 *) c->usage_base;
- unsigned seq;
+ unsigned i, seq;
u64 ret;
BUG_ON(offset < 0 || offset >= fs_usage_u64s(c));
do {
seq = read_seqcount_begin(&c->usage_lock);
- ret = *v +
- percpu_u64_get((u64 __percpu *) c->usage[0] + offset) +
- percpu_u64_get((u64 __percpu *) c->usage[1] + offset);
+ ret = *v;
+
+ for (i = 0; i < ARRAY_SIZE(c->usage); i++)
+ ret += percpu_u64_get((u64 __percpu *) c->usage[i] + offset);
} while (read_seqcount_retry(&c->usage_lock, seq));
return ret;
struct bch_fs_usage_online *bch2_fs_usage_read(struct bch_fs *c)
{
struct bch_fs_usage_online *ret;
- unsigned seq, i, u64s;
+ unsigned seq, i, v, u64s = fs_usage_u64s(c);
+retry:
+ ret = kmalloc(u64s * sizeof(u64), GFP_NOFS);
+ if (unlikely(!ret))
+ return NULL;
percpu_down_read(&c->mark_lock);
- ret = kmalloc(sizeof(struct bch_fs_usage_online) +
- sizeof(u64) + c->replicas.nr, GFP_NOFS);
- if (unlikely(!ret)) {
+ v = fs_usage_u64s(c);
+ if (unlikely(u64s != v)) {
+ u64s = v;
percpu_up_read(&c->mark_lock);
- return NULL;
+ kfree(ret);
+ goto retry;
}
ret->online_reserved = percpu_u64_get(c->online_reserved);
u64s = fs_usage_u64s(c);
do {
seq = read_seqcount_begin(&c->usage_lock);
- memcpy(&ret->u, c->usage_base, u64s * sizeof(u64));
+ memcpy(ret, c->usage_base, u64s * sizeof(u64));
for (i = 0; i < ARRAY_SIZE(c->usage); i++)
acc_u64s_percpu((u64 *) &ret->u, (u64 __percpu *) c->usage[i], u64s);
} while (read_seqcount_retry(&c->usage_lock, seq));
struct journal_res;
+#define JOURNAL_BUF_BITS 1
+#define JOURNAL_BUF_NR (1U << JOURNAL_BUF_BITS)
+#define JOURNAL_BUF_MASK (JOURNAL_BUF_NR - 1)
+
/*
* We put two of these in struct journal; we used them for writes to the
* journal that are being staged or in flight.
static int replicas_table_update(struct bch_fs *c,
struct bch_replicas_cpu *new_r)
{
- struct bch_fs_usage __percpu *new_usage[2];
+ struct bch_fs_usage __percpu *new_usage[JOURNAL_BUF_NR];
struct bch_fs_usage_online *new_scratch = NULL;
struct bch_fs_usage __percpu *new_gc = NULL;
struct bch_fs_usage *new_base = NULL;
sizeof(u64) * new_r->nr;
unsigned scratch_bytes = sizeof(struct bch_fs_usage_online) +
sizeof(u64) * new_r->nr;
- int ret = -ENOMEM;
+ int ret = 0;
+
+ memset(new_usage, 0, sizeof(new_usage));
+
+ for (i = 0; i < ARRAY_SIZE(new_usage); i++)
+ if (!(new_usage[i] = __alloc_percpu_gfp(bytes,
+ sizeof(u64), GFP_NOIO)))
+ goto err;
memset(new_usage, 0, sizeof(new_usage));
if (!(new_base = kzalloc(bytes, GFP_NOIO)) ||
!(new_scratch = kmalloc(scratch_bytes, GFP_NOIO)) ||
(c->usage_gc &&
- !(new_gc = __alloc_percpu_gfp(bytes, sizeof(u64), GFP_NOIO)))) {
- bch_err(c, "error updating replicas table: memory allocation failure");
+ !(new_gc = __alloc_percpu_gfp(bytes, sizeof(u64), GFP_NOIO))))
goto err;
- }
for (i = 0; i < ARRAY_SIZE(new_usage); i++)
if (c->usage[i])
swap(c->usage_scratch, new_scratch);
swap(c->usage_gc, new_gc);
swap(c->replicas, *new_r);
- ret = 0;
-err:
+out:
free_percpu(new_gc);
kfree(new_scratch);
free_percpu(new_usage[1]);
free_percpu(new_usage[0]);
kfree(new_base);
return ret;
+err:
+ bch_err(c, "error updating replicas table: memory allocation failure");
+ ret = -ENOMEM;
+ goto out;
}
static unsigned reserve_journal_replicas(struct bch_fs *c,
struct bch_replicas_cpu n;
if (!__replicas_has_entry(&c->replicas_gc, e) &&
- (c->usage_base->replicas[i] ||
- percpu_u64_get(&c->usage[0]->replicas[i]) ||
- percpu_u64_get(&c->usage[1]->replicas[i]))) {
+ bch2_fs_usage_read_one(c, &c->usage_base->replicas[i])) {
n = cpu_replicas_add_entry(&c->replicas_gc, e);
if (!n.entries) {
ret = -ENOSPC;
cpu_replicas_entry(&c->replicas, i);
if (e->data_type == BCH_DATA_journal ||
- c->usage_base->replicas[i] ||
- percpu_u64_get(&c->usage[0]->replicas[i]) ||
- percpu_u64_get(&c->usage[1]->replicas[i]))
+ bch2_fs_usage_read_one(c, &c->usage_base->replicas[i]))
memcpy(cpu_replicas_entry(&new, new.nr++),
e, new.entry_size);
}
for (i = 0; i < ARRAY_SIZE(c->usage); i++)
bch2_fs_usage_acc_to_base(c, i);
} else {
- bch2_fs_usage_acc_to_base(c, journal_seq & 1);
+ bch2_fs_usage_acc_to_base(c, journal_seq & JOURNAL_BUF_MASK);
}
{
percpu_free_rwsem(&c->mark_lock);
free_percpu(c->online_reserved);
kfree(c->usage_scratch);
- free_percpu(c->usage[1]);
- free_percpu(c->usage[0]);
+ for (i = 0; i < ARRAY_SIZE(c->usage); i++)
+ free_percpu(c->usage[i]);
kfree(c->usage_base);
if (c->btree_iters_bufs)