static bool allocator_thread_running(struct bch_dev *ca)
{
unsigned state = ca->mi.state == BCH_MEMBER_STATE_rw &&
- test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags)
+ test_bit(BCH_FS_ALLOCATOR_RUNNING, &ca->fs->flags) &&
+ test_bit(BCH_FS_ALLOC_REPLAY_DONE, &ca->fs->flags)
? ALLOCATOR_running
: ALLOCATOR_stopped;
alloc_thread_set_state(ca, state);
BCH_FS_INITIAL_GC_DONE,
BCH_FS_INITIAL_GC_UNFIXED,
BCH_FS_TOPOLOGY_REPAIR_DONE,
+ BCH_FS_ALLOC_REPLAY_DONE,
BCH_FS_BTREE_INTERIOR_REPLAY_DONE,
BCH_FS_FSCK_DONE,
BCH_FS_STARTED,
struct journal_keys keys)
{
struct journal *j = &c->journal;
+ struct bch_dev *ca;
struct journal_key *i;
u64 seq;
- int ret;
+ int ret, idx;
sort(keys.d, keys.nr, sizeof(keys.d[0]), journal_sort_seq_cmp, NULL);
}
}
+ /* Now we can start the allocator threads: */
+ set_bit(BCH_FS_ALLOC_REPLAY_DONE, &c->flags);
+ for_each_member_device(ca, c, idx)
+ bch2_wake_allocator(ca);
+
/*
* Next replay updates to interior btree nodes:
*/
for (i = 0; i < BTREE_ID_NR; i++)
bch2_btree_root_alloc(c, i);
+ set_bit(BCH_FS_ALLOC_REPLAY_DONE, &c->flags);
set_bit(BCH_FS_BTREE_INTERIOR_REPLAY_DONE, &c->flags);
set_bit(JOURNAL_RECLAIM_STARTED, &c->journal.flags);