Remove the PTR_CACHE inline and replace it with a direct dereference
of c->cache.
(Coly Li: fix the typo from PTR_BUCKET to PTR_CACHE in commit log)
Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Coly Li <colyli@suse.de>
Link: https://lore.kernel.org/r/20210411134316.80274-3-colyli@suse.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++)
unsigned int i;
for (i = 0; i < KEY_PTRS(k); i++)
- __bch_bucket_free(PTR_CACHE(c, k, i),
- PTR_BUCKET(c, k, i));
+ __bch_bucket_free(c->cache, PTR_BUCKET(c, k, i));
}
int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
}
int __bch_bucket_alloc_set(struct cache_set *c, unsigned int reserve,
SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
atomic_long_add(sectors,
SET_PTR_OFFSET(&b->key, i, PTR_OFFSET(&b->key, i) + sectors);
atomic_long_add(sectors,
- &PTR_CACHE(c, &b->key, i)->sectors_written);
+ &c->cache->sectors_written);
}
if (b->sectors_free < c->cache->sb.block_size)
}
if (b->sectors_free < c->cache->sb.block_size)
return s & (c->cache->sb.bucket_size - 1);
}
return s & (c->cache->sb.bucket_size - 1);
}
-static inline struct cache *PTR_CACHE(struct cache_set *c,
- const struct bkey *k,
- unsigned int ptr)
-{
- return c->cache;
-}
-
static inline size_t PTR_BUCKET_NR(struct cache_set *c,
const struct bkey *k,
unsigned int ptr)
static inline size_t PTR_BUCKET_NR(struct cache_set *c,
const struct bkey *k,
unsigned int ptr)
const struct bkey *k,
unsigned int ptr)
{
const struct bkey *k,
unsigned int ptr)
{
- return PTR_CACHE(c, k, ptr)->buckets + PTR_BUCKET_NR(c, k, ptr);
+ return c->cache->buckets + PTR_BUCKET_NR(c, k, ptr);
}
static inline uint8_t gen_after(uint8_t a, uint8_t b)
}
static inline uint8_t gen_after(uint8_t a, uint8_t b)
static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
unsigned int i)
{
static inline bool ptr_available(struct cache_set *c, const struct bkey *k,
unsigned int i)
{
- return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && PTR_CACHE(c, k, i);
+ return (PTR_DEV(k, i) < MAX_CACHES_PER_SET) && c->cache;
do_btree_node_write(b);
atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size,
do_btree_node_write(b);
atomic_long_add(set_blocks(i, block_bytes(b->c->cache)) * b->c->cache->sb.block_size,
- &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
+ &b->c->cache->btree_sectors_written);
b->written += set_blocks(i, block_bytes(b->c->cache));
}
b->written += set_blocks(i, block_bytes(b->c->cache));
}
for (i = 0; i < KEY_PTRS(k); i++)
SET_PTR_GEN(k, i,
for (i = 0; i < KEY_PTRS(k); i++)
SET_PTR_GEN(k, i,
- bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
+ bch_inc_gen(b->c->cache,
PTR_BUCKET(b->c, &b->key, i)));
mutex_unlock(&b->c->bucket_lock);
PTR_BUCKET(b->c, &b->key, i)));
mutex_unlock(&b->c->bucket_lock);
v->keys.ops = b->keys.ops;
bio = bch_bbio_alloc(b->c);
v->keys.ops = b->keys.ops;
bio = bch_bbio_alloc(b->c);
- bio_set_dev(bio, PTR_CACHE(b->c, &b->key, 0)->bdev);
+ bio_set_dev(bio, c->cache->bdev);
bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9;
bio->bi_opf = REQ_OP_READ | REQ_META;
bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
bio->bi_iter.bi_size = KEY_SIZE(&v->key) << 9;
bio->bi_opf = REQ_OP_READ | REQ_META;
for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i)) {
for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i)) {
- struct cache *ca = PTR_CACHE(c, k, i);
+ struct cache *ca = c->cache;
size_t bucket = PTR_BUCKET_NR(c, k, i);
size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
size_t bucket = PTR_BUCKET_NR(c, k, i);
size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i)) {
for (i = 0; i < KEY_PTRS(k); i++)
if (ptr_available(c, k, i)) {
- struct cache *ca = PTR_CACHE(c, k, i);
+ struct cache *ca = c->cache;
size_t bucket = PTR_BUCKET_NR(c, k, i);
size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
size_t bucket = PTR_BUCKET_NR(c, k, i);
size_t r = bucket_remainder(c, PTR_OFFSET(k, i));
struct bbio *b = container_of(bio, struct bbio, bio);
bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
struct bbio *b = container_of(bio, struct bbio, bio);
bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
- bio_set_dev(bio, PTR_CACHE(c, &b->key, 0)->bdev);
+ bio_set_dev(bio, c->cache->bdev);
b->submit_time_us = local_clock_us();
closure_bio_submit(c, bio, bio->bi_private);
b->submit_time_us = local_clock_us();
closure_bio_submit(c, bio, bio->bi_private);
blk_status_t error, const char *m)
{
struct bbio *b = container_of(bio, struct bbio, bio);
blk_status_t error, const char *m)
{
struct bbio *b = container_of(bio, struct bbio, bio);
- struct cache *ca = PTR_CACHE(c, &b->key, 0);
+ struct cache *ca = c->cache;
int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
unsigned int threshold = op_is_write(bio_op(bio))
int is_read = (bio_data_dir(bio) == READ ? 1 : 0);
unsigned int threshold = op_is_write(bio_op(bio))
w->data->csum = csum_set(w->data);
for (i = 0; i < KEY_PTRS(k); i++) {
w->data->csum = csum_set(w->data);
for (i = 0; i < KEY_PTRS(k); i++) {
- ca = PTR_CACHE(c, k, i);
bio = &ca->journal.bio;
atomic_long_add(sectors, &ca->meta_sectors_written);
bio = &ca->journal.bio;
atomic_long_add(sectors, &ca->meta_sectors_written);
struct dirty_io *io = w->private;
/* is_read = 1 */
struct dirty_io *io = w->private;
/* is_read = 1 */
- bch_count_io_errors(PTR_CACHE(io->dc->disk.c, &w->key, 0),
+ bch_count_io_errors(io->dc->disk.c->cache,
bio->bi_status, 1,
"reading dirty data from cache");
bio->bi_status, 1,
"reading dirty data from cache");
dirty_init(w);
bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
dirty_init(w);
bio_set_op_attrs(&io->bio, REQ_OP_READ, 0);
io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
- bio_set_dev(&io->bio,
- PTR_CACHE(dc->disk.c, &w->key, 0)->bdev);
+ bio_set_dev(&io->bio, dc->disk.c->cache->bdev);
io->bio.bi_end_io = read_dirty_endio;
if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))
io->bio.bi_end_io = read_dirty_endio;
if (bch_bio_alloc_pages(&io->bio, GFP_KERNEL))