static unsigned bucket_sort_key(struct bucket *g, struct bucket_mark m,
u64 now, u64 last_seq_ondisk)
{
- unsigned used = bucket_sectors_used(m);
+ unsigned used = m.cached_sectors;
if (used) {
/*
static inline int bucket_sectors_fragmented(struct bch_dev *ca,
struct bucket_mark m)
{
- return bucket_sectors_used(m)
- ? max(0, (int) ca->mi.bucket_size - (int) bucket_sectors_used(m))
+ return m.dirty_sectors
+ ? max(0, (int) ca->mi.bucket_size - (int) m.dirty_sectors)
: 0;
}
.dev = p.ptr.dev,
.offset = p.ptr.offset,
};
+ ssize_t i;
- ssize_t i = eytzinger0_find_le(h->data, h->used,
- sizeof(h->data[0]),
- bucket_offset_cmp, &search);
+ if (p.ptr.cached)
+ continue;
+
+ i = eytzinger0_find_le(h->data, h->used,
+ sizeof(h->data[0]),
+ bucket_offset_cmp, &search);
#if 0
/* eytzinger search verify code: */
ssize_t j = -1, k;
if (m.owned_by_allocator ||
m.data_type != BCH_DATA_user ||
- !bucket_sectors_used(m) ||
- bucket_sectors_used(m) >= ca->mi.bucket_size)
+ m.dirty_sectors >= ca->mi.bucket_size)
continue;
WARN_ON(m.stripe && !g->stripe_redundancy);
.dev = dev_idx,
.gen = m.gen,
.replicas = 1 + g->stripe_redundancy,
- .fragmentation = bucket_sectors_used(m) * (1U << 15)
+ .fragmentation = m.dirty_sectors * (1U << 15)
/ ca->mi.bucket_size,
- .sectors = bucket_sectors_used(m),
+ .sectors = m.dirty_sectors,
.offset = bucket_to_sector(ca, b),
};
heap_add_or_replace(h, e, -fragmentation_cmp, NULL);
m = READ_ONCE(buckets->b[b].mark);
if (i->gen == m.gen &&
- bucket_sectors_used(m)) {
- sectors_not_moved += bucket_sectors_used(m);
+ m.dirty_sectors) {
+ sectors_not_moved += m.dirty_sectors;
buckets_not_moved++;
}
}