1 // SPDX-License-Identifier: GPL-2.0
3 * Main bcache entry point - handle a read or a write request and decide what to
4 * do with it; the make_request functions are called by the block layer.
6 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7 * Copyright 2012 Google, Inc.
14 #include "writeback.h"
16 #include <linux/module.h>
17 #include <linux/hash.h>
18 #include <linux/random.h>
19 #include <linux/backing-dev.h>
21 #include <trace/events/bcache.h>
23 #define CUTOFF_CACHE_ADD 95
24 #define CUTOFF_CACHE_READA 90
26 struct kmem_cache *bch_search_cache;
28 static void bch_data_insert_start(struct closure *);
30 static unsigned cache_mode(struct cached_dev *dc)
32 return BDEV_CACHE_MODE(&dc->sb);
35 static bool verify(struct cached_dev *dc)
40 static void bio_csum(struct bio *bio, struct bkey *k)
43 struct bvec_iter iter;
46 bio_for_each_segment(bv, bio, iter) {
47 void *d = kmap(bv.bv_page) + bv.bv_offset;
48 csum = bch_crc64_update(csum, d, bv.bv_len);
52 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
55 /* Insert data into cache */
57 static void bch_data_insert_keys(struct closure *cl)
59 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
60 atomic_t *journal_ref = NULL;
61 struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
65 * If we're looping, might already be waiting on
66 * another journal write - can't wait on more than one journal write at
69 * XXX: this looks wrong
72 while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
77 journal_ref = bch_journal(op->c, &op->insert_keys,
78 op->flush_journal ? cl : NULL);
80 ret = bch_btree_insert(op->c, &op->insert_keys,
81 journal_ref, replace_key);
83 op->replace_collision = true;
85 op->status = BLK_STS_RESOURCE;
86 op->insert_data_done = true;
90 atomic_dec_bug(journal_ref);
92 if (!op->insert_data_done) {
93 continue_at(cl, bch_data_insert_start, op->wq);
97 bch_keylist_free(&op->insert_keys);
101 static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
104 size_t oldsize = bch_keylist_nkeys(l);
105 size_t newsize = oldsize + u64s;
108 * The journalling code doesn't handle the case where the keys to insert
109 * is bigger than an empty write: If we just return -ENOMEM here,
110 * bch_data_insert_keys() will insert the keys created so far
111 * and finish the rest when the keylist is empty.
113 if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
116 return __bch_keylist_realloc(l, u64s);
119 static void bch_data_invalidate(struct closure *cl)
121 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
122 struct bio *bio = op->bio;
124 pr_debug("invalidating %i sectors from %llu",
125 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
127 while (bio_sectors(bio)) {
128 unsigned sectors = min(bio_sectors(bio),
129 1U << (KEY_SIZE_BITS - 1));
131 if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
134 bio->bi_iter.bi_sector += sectors;
135 bio->bi_iter.bi_size -= sectors << 9;
137 bch_keylist_add(&op->insert_keys,
138 &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
141 op->insert_data_done = true;
142 /* get in bch_data_insert() */
145 continue_at(cl, bch_data_insert_keys, op->wq);
148 static void bch_data_insert_error(struct closure *cl)
150 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
153 * Our data write just errored, which means we've got a bunch of keys to
154 * insert that point to data that wasn't succesfully written.
156 * We don't have to insert those keys but we still have to invalidate
157 * that region of the cache - so, if we just strip off all the pointers
158 * from the keys we'll accomplish just that.
161 struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
163 while (src != op->insert_keys.top) {
164 struct bkey *n = bkey_next(src);
166 SET_KEY_PTRS(src, 0);
167 memmove(dst, src, bkey_bytes(src));
169 dst = bkey_next(dst);
173 op->insert_keys.top = dst;
175 bch_data_insert_keys(cl);
178 static void bch_data_insert_endio(struct bio *bio)
180 struct closure *cl = bio->bi_private;
181 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
183 if (bio->bi_status) {
184 /* TODO: We could try to recover from this. */
186 op->status = bio->bi_status;
187 else if (!op->replace)
188 set_closure_fn(cl, bch_data_insert_error, op->wq);
190 set_closure_fn(cl, NULL, NULL);
193 bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
196 static void bch_data_insert_start(struct closure *cl)
198 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
199 struct bio *bio = op->bio, *n;
202 return bch_data_invalidate(cl);
204 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
208 * Journal writes are marked REQ_PREFLUSH; if the original write was a
209 * flush, it'll wait on the journal write.
211 bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
216 struct bio_set *split = &op->c->bio_split;
218 /* 1 for the device pointer and 1 for the chksum */
219 if (bch_keylist_realloc(&op->insert_keys,
220 3 + (op->csum ? 1 : 0),
222 continue_at(cl, bch_data_insert_keys, op->wq);
226 k = op->insert_keys.top;
228 SET_KEY_INODE(k, op->inode);
229 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
231 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
232 op->write_point, op->write_prio,
236 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
238 n->bi_end_io = bch_data_insert_endio;
242 SET_KEY_DIRTY(k, true);
244 for (i = 0; i < KEY_PTRS(k); i++)
245 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
249 SET_KEY_CSUM(k, op->csum);
253 trace_bcache_cache_insert(k);
254 bch_keylist_push(&op->insert_keys);
256 bio_set_op_attrs(n, REQ_OP_WRITE, 0);
257 bch_submit_bbio(n, op->c, k, 0);
260 op->insert_data_done = true;
261 continue_at(cl, bch_data_insert_keys, op->wq);
264 /* bch_alloc_sectors() blocks if s->writeback = true */
265 BUG_ON(op->writeback);
268 * But if it's not a writeback write we'd rather just bail out if
269 * there aren't any buckets ready to write to - it might take awhile and
270 * we might be starving btree writes for gc or something.
275 * Writethrough write: We can't complete the write until we've
276 * updated the index. But we don't want to delay the write while
277 * we wait for buckets to be freed up, so just invalidate the
281 return bch_data_invalidate(cl);
284 * From a cache miss, we can just insert the keys for the data
285 * we have written or bail out if we didn't do anything.
287 op->insert_data_done = true;
290 if (!bch_keylist_empty(&op->insert_keys))
291 continue_at(cl, bch_data_insert_keys, op->wq);
298 * bch_data_insert - stick some data in the cache
299 * @cl: closure pointer.
301 * This is the starting point for any data to end up in a cache device; it could
302 * be from a normal write, or a writeback write, or a write to a flash only
303 * volume - it's also used by the moving garbage collector to compact data in
304 * mostly empty buckets.
306 * It first writes the data to the cache, creating a list of keys to be inserted
307 * (if the data had to be fragmented there will be multiple keys); after the
308 * data is written it calls bch_journal, and after the keys have been added to
309 * the next journal write they're inserted into the btree.
311 * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
312 * and op->inode is used for the key inode.
314 * If s->bypass is true, instead of inserting the data it invalidates the
315 * region of the cache represented by s->cache_bio and op->inode.
317 void bch_data_insert(struct closure *cl)
319 struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
321 trace_bcache_write(op->c, op->inode, op->bio,
322 op->writeback, op->bypass);
324 bch_keylist_init(&op->insert_keys);
326 bch_data_insert_start(cl);
331 unsigned bch_get_congested(struct cache_set *c)
336 if (!c->congested_read_threshold_us &&
337 !c->congested_write_threshold_us)
340 i = (local_clock_us() - c->congested_last_us) / 1024;
344 i += atomic_read(&c->congested);
351 i = fract_exp_two(i, 6);
353 rand = get_random_int();
354 i -= bitmap_weight(&rand, BITS_PER_LONG);
356 return i > 0 ? i : 1;
359 static void add_sequential(struct task_struct *t)
361 ewma_add(t->sequential_io_avg,
362 t->sequential_io, 8, 0);
364 t->sequential_io = 0;
367 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
369 return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
372 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
374 struct cache_set *c = dc->disk.c;
375 unsigned mode = cache_mode(dc);
376 unsigned sectors, congested = bch_get_congested(c);
377 struct task_struct *task = current;
380 if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
381 c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
382 (bio_op(bio) == REQ_OP_DISCARD))
385 if (mode == CACHE_MODE_NONE ||
386 (mode == CACHE_MODE_WRITEAROUND &&
387 op_is_write(bio_op(bio))))
391 * Flag for bypass if the IO is for read-ahead or background,
392 * unless the read-ahead request is for metadata (eg, for gfs2).
394 if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) &&
395 !(bio->bi_opf & REQ_META))
398 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
399 bio_sectors(bio) & (c->sb.block_size - 1)) {
400 pr_debug("skipping unaligned io");
404 if (bypass_torture_test(dc)) {
405 if ((get_random_int() & 3) == 3)
411 if (!congested && !dc->sequential_cutoff)
414 spin_lock(&dc->io_lock);
416 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
417 if (i->last == bio->bi_iter.bi_sector &&
418 time_before(jiffies, i->jiffies))
421 i = list_first_entry(&dc->io_lru, struct io, lru);
423 add_sequential(task);
426 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
427 i->sequential += bio->bi_iter.bi_size;
429 i->last = bio_end_sector(bio);
430 i->jiffies = jiffies + msecs_to_jiffies(5000);
431 task->sequential_io = i->sequential;
434 hlist_add_head(&i->hash, iohash(dc, i->last));
435 list_move_tail(&i->lru, &dc->io_lru);
437 spin_unlock(&dc->io_lock);
439 sectors = max(task->sequential_io,
440 task->sequential_io_avg) >> 9;
442 if (dc->sequential_cutoff &&
443 sectors >= dc->sequential_cutoff >> 9) {
444 trace_bcache_bypass_sequential(bio);
448 if (congested && sectors >= congested) {
449 trace_bcache_bypass_congested(bio);
454 bch_rescale_priorities(c, bio_sectors(bio));
457 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
464 /* Stack frame for bio_complete */
468 struct bio *orig_bio;
469 struct bio *cache_miss;
470 struct bcache_device *d;
472 unsigned insert_bio_sectors;
473 unsigned recoverable:1;
475 unsigned read_dirty_data:1;
476 unsigned cache_missed:1;
478 unsigned long start_time;
481 struct data_insert_op iop;
484 static void bch_cache_read_endio(struct bio *bio)
486 struct bbio *b = container_of(bio, struct bbio, bio);
487 struct closure *cl = bio->bi_private;
488 struct search *s = container_of(cl, struct search, cl);
491 * If the bucket was reused while our bio was in flight, we might have
492 * read the wrong data. Set s->error but not error so it doesn't get
493 * counted against the cache device, but we'll still reread the data
494 * from the backing device.
498 s->iop.status = bio->bi_status;
499 else if (!KEY_DIRTY(&b->key) &&
500 ptr_stale(s->iop.c, &b->key, 0)) {
501 atomic_long_inc(&s->iop.c->cache_read_races);
502 s->iop.status = BLK_STS_IOERR;
505 bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
509 * Read from a single key, handling the initial cache miss if the key starts in
510 * the middle of the bio
512 static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
514 struct search *s = container_of(op, struct search, op);
515 struct bio *n, *bio = &s->bio.bio;
516 struct bkey *bio_key;
519 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
522 if (KEY_INODE(k) != s->iop.inode ||
523 KEY_START(k) > bio->bi_iter.bi_sector) {
524 unsigned bio_sectors = bio_sectors(bio);
525 unsigned sectors = KEY_INODE(k) == s->iop.inode
526 ? min_t(uint64_t, INT_MAX,
527 KEY_START(k) - bio->bi_iter.bi_sector)
530 int ret = s->d->cache_miss(b, s, bio, sectors);
531 if (ret != MAP_CONTINUE)
534 /* if this was a complete miss we shouldn't get here */
535 BUG_ON(bio_sectors <= sectors);
541 /* XXX: figure out best pointer - for multiple cache devices */
544 PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
547 s->read_dirty_data = true;
549 n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
550 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
551 GFP_NOIO, &s->d->bio_split);
553 bio_key = &container_of(n, struct bbio, bio)->key;
554 bch_bkey_copy_single_ptr(bio_key, k, ptr);
556 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
557 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
559 n->bi_end_io = bch_cache_read_endio;
560 n->bi_private = &s->cl;
563 * The bucket we're reading from might be reused while our bio
564 * is in flight, and we could then end up reading the wrong
567 * We guard against this by checking (in cache_read_endio()) if
568 * the pointer is stale again; if so, we treat it as an error
569 * and reread from the backing device (but we don't pass that
570 * error up anywhere).
573 __bch_submit_bbio(n, b->c);
574 return n == bio ? MAP_DONE : MAP_CONTINUE;
577 static void cache_lookup(struct closure *cl)
579 struct search *s = container_of(cl, struct search, iop.cl);
580 struct bio *bio = &s->bio.bio;
581 struct cached_dev *dc;
584 bch_btree_op_init(&s->op, -1);
586 ret = bch_btree_map_keys(&s->op, s->iop.c,
587 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
588 cache_lookup_fn, MAP_END_KEY);
589 if (ret == -EAGAIN) {
590 continue_at(cl, cache_lookup, bcache_wq);
595 * We might meet err when searching the btree, If that happens, we will
596 * get negative ret, in this scenario we should not recover data from
597 * backing device (when cache device is dirty) because we don't know
598 * whether bkeys the read request covered are all clean.
600 * And after that happened, s->iop.status is still its initial value
601 * before we submit s->bio.bio
604 BUG_ON(ret == -EINTR);
605 if (s->d && s->d->c &&
606 !UUID_FLASH_ONLY(&s->d->c->uuids[s->d->id])) {
607 dc = container_of(s->d, struct cached_dev, disk);
608 if (dc && atomic_read(&dc->has_dirty))
609 s->recoverable = false;
612 s->iop.status = BLK_STS_IOERR;
618 /* Common code for the make_request functions */
620 static void request_endio(struct bio *bio)
622 struct closure *cl = bio->bi_private;
624 if (bio->bi_status) {
625 struct search *s = container_of(cl, struct search, cl);
626 s->iop.status = bio->bi_status;
627 /* Only cache read errors are recoverable */
628 s->recoverable = false;
635 static void backing_request_endio(struct bio *bio)
637 struct closure *cl = bio->bi_private;
639 if (bio->bi_status) {
640 struct search *s = container_of(cl, struct search, cl);
641 struct cached_dev *dc = container_of(s->d,
642 struct cached_dev, disk);
644 * If a bio has REQ_PREFLUSH for writeback mode, it is
645 * speically assembled in cached_dev_write() for a non-zero
646 * write request which has REQ_PREFLUSH. we don't set
647 * s->iop.status by this failure, the status will be decided
648 * by result of bch_data_insert() operation.
650 if (unlikely(s->iop.writeback &&
651 bio->bi_opf & REQ_PREFLUSH)) {
652 pr_err("Can't flush %s: returned bi_status %i",
653 dc->backing_dev_name, bio->bi_status);
655 /* set to orig_bio->bi_status in bio_complete() */
656 s->iop.status = bio->bi_status;
658 s->recoverable = false;
659 /* should count I/O error for backing device here */
660 bch_count_backing_io_errors(dc, bio);
667 static void bio_complete(struct search *s)
670 generic_end_io_acct(s->d->disk->queue, bio_op(s->orig_bio),
671 &s->d->disk->part0, s->start_time);
673 trace_bcache_request_end(s->d, s->orig_bio);
674 s->orig_bio->bi_status = s->iop.status;
675 bio_endio(s->orig_bio);
680 static void do_bio_hook(struct search *s,
681 struct bio *orig_bio,
682 bio_end_io_t *end_io_fn)
684 struct bio *bio = &s->bio.bio;
686 bio_init(bio, NULL, 0);
687 __bio_clone_fast(bio, orig_bio);
689 * bi_end_io can be set separately somewhere else, e.g. the
691 * - cache_bio->bi_end_io from cached_dev_cache_miss()
692 * - n->bi_end_io from cache_lookup_fn()
694 bio->bi_end_io = end_io_fn;
695 bio->bi_private = &s->cl;
700 static void search_free(struct closure *cl)
702 struct search *s = container_of(cl, struct search, cl);
704 atomic_dec(&s->d->c->search_inflight);
710 closure_debug_destroy(cl);
711 mempool_free(s, &s->d->c->search);
714 static inline struct search *search_alloc(struct bio *bio,
715 struct bcache_device *d)
719 s = mempool_alloc(&d->c->search, GFP_NOIO);
721 closure_init(&s->cl, NULL);
722 do_bio_hook(s, bio, request_endio);
723 atomic_inc(&d->c->search_inflight);
726 s->cache_miss = NULL;
730 s->write = op_is_write(bio_op(bio));
731 s->read_dirty_data = 0;
732 s->start_time = jiffies;
736 s->iop.inode = d->id;
737 s->iop.write_point = hash_long((unsigned long) current, 16);
738 s->iop.write_prio = 0;
741 s->iop.flush_journal = op_is_flush(bio->bi_opf);
742 s->iop.wq = bcache_wq;
749 static void cached_dev_bio_complete(struct closure *cl)
751 struct search *s = container_of(cl, struct search, cl);
752 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
760 static void cached_dev_cache_miss_done(struct closure *cl)
762 struct search *s = container_of(cl, struct search, cl);
764 if (s->iop.replace_collision)
765 bch_mark_cache_miss_collision(s->iop.c, s->d);
768 bio_free_pages(s->iop.bio);
770 cached_dev_bio_complete(cl);
773 static void cached_dev_read_error(struct closure *cl)
775 struct search *s = container_of(cl, struct search, cl);
776 struct bio *bio = &s->bio.bio;
779 * If read request hit dirty data (s->read_dirty_data is true),
780 * then recovery a failed read request from cached device may
781 * get a stale data back. So read failure recovery is only
782 * permitted when read request hit clean data in cache device,
783 * or when cache read race happened.
785 if (s->recoverable && !s->read_dirty_data) {
786 /* Retry from the backing device: */
787 trace_bcache_read_retry(s->orig_bio);
790 do_bio_hook(s, s->orig_bio, backing_request_endio);
792 /* XXX: invalidate cache */
794 /* I/O request sent to backing device */
795 closure_bio_submit(s->iop.c, bio, cl);
798 continue_at(cl, cached_dev_cache_miss_done, NULL);
801 static void cached_dev_read_done(struct closure *cl)
803 struct search *s = container_of(cl, struct search, cl);
804 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
807 * We had a cache miss; cache_bio now contains data ready to be inserted
810 * First, we copy the data we just read from cache_bio's bounce buffers
811 * to the buffers the original bio pointed to:
815 bio_reset(s->iop.bio);
816 s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
817 bio_copy_dev(s->iop.bio, s->cache_miss);
818 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
819 bch_bio_map(s->iop.bio, NULL);
821 bio_copy_data(s->cache_miss, s->iop.bio);
823 bio_put(s->cache_miss);
824 s->cache_miss = NULL;
827 if (verify(dc) && s->recoverable && !s->read_dirty_data)
828 bch_data_verify(dc, s->orig_bio);
833 !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
834 BUG_ON(!s->iop.replace);
835 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
838 continue_at(cl, cached_dev_cache_miss_done, NULL);
841 static void cached_dev_read_done_bh(struct closure *cl)
843 struct search *s = container_of(cl, struct search, cl);
844 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
846 bch_mark_cache_accounting(s->iop.c, s->d,
847 !s->cache_missed, s->iop.bypass);
848 trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
851 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
852 else if (s->iop.bio || verify(dc))
853 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
855 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
858 static int cached_dev_cache_miss(struct btree *b, struct search *s,
859 struct bio *bio, unsigned sectors)
861 int ret = MAP_CONTINUE;
863 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
864 struct bio *miss, *cache_bio;
868 if (s->cache_miss || s->iop.bypass) {
869 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
870 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
874 if (!(bio->bi_opf & REQ_RAHEAD) &&
875 !(bio->bi_opf & REQ_META) &&
876 s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
877 reada = min_t(sector_t, dc->readahead >> 9,
878 get_capacity(bio->bi_disk) - bio_end_sector(bio));
880 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
882 s->iop.replace_key = KEY(s->iop.inode,
883 bio->bi_iter.bi_sector + s->insert_bio_sectors,
884 s->insert_bio_sectors);
886 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
890 s->iop.replace = true;
892 miss = bio_next_split(bio, sectors, GFP_NOIO, &s->d->bio_split);
894 /* btree_search_recurse()'s btree iterator is no good anymore */
895 ret = miss == bio ? MAP_DONE : -EINTR;
897 cache_bio = bio_alloc_bioset(GFP_NOWAIT,
898 DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
899 &dc->disk.bio_split);
903 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
904 bio_copy_dev(cache_bio, miss);
905 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
907 cache_bio->bi_end_io = backing_request_endio;
908 cache_bio->bi_private = &s->cl;
910 bch_bio_map(cache_bio, NULL);
911 if (bch_bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
915 bch_mark_cache_readahead(s->iop.c, s->d);
917 s->cache_miss = miss;
918 s->iop.bio = cache_bio;
920 /* I/O request sent to backing device */
921 closure_bio_submit(s->iop.c, cache_bio, &s->cl);
927 miss->bi_end_io = backing_request_endio;
928 miss->bi_private = &s->cl;
929 /* I/O request sent to backing device */
930 closure_bio_submit(s->iop.c, miss, &s->cl);
934 static void cached_dev_read(struct cached_dev *dc, struct search *s)
936 struct closure *cl = &s->cl;
938 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
939 continue_at(cl, cached_dev_read_done_bh, NULL);
944 static void cached_dev_write_complete(struct closure *cl)
946 struct search *s = container_of(cl, struct search, cl);
947 struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
949 up_read_non_owner(&dc->writeback_lock);
950 cached_dev_bio_complete(cl);
953 static void cached_dev_write(struct cached_dev *dc, struct search *s)
955 struct closure *cl = &s->cl;
956 struct bio *bio = &s->bio.bio;
957 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
958 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
960 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
962 down_read_non_owner(&dc->writeback_lock);
963 if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
965 * We overlap with some dirty data undergoing background
966 * writeback, force this write to writeback
968 s->iop.bypass = false;
969 s->iop.writeback = true;
973 * Discards aren't _required_ to do anything, so skipping if
974 * check_overlapping returned true is ok
976 * But check_overlapping drops dirty keys for which io hasn't started,
977 * so we still want to call it.
979 if (bio_op(bio) == REQ_OP_DISCARD)
980 s->iop.bypass = true;
982 if (should_writeback(dc, s->orig_bio,
985 s->iop.bypass = false;
986 s->iop.writeback = true;
990 s->iop.bio = s->orig_bio;
993 if (bio_op(bio) == REQ_OP_DISCARD &&
994 !blk_queue_discard(bdev_get_queue(dc->bdev)))
997 /* I/O request sent to backing device */
998 bio->bi_end_io = backing_request_endio;
999 closure_bio_submit(s->iop.c, bio, cl);
1001 } else if (s->iop.writeback) {
1002 bch_writeback_add(dc);
1005 if (bio->bi_opf & REQ_PREFLUSH) {
1007 * Also need to send a flush to the backing
1012 flush = bio_alloc_bioset(GFP_NOIO, 0,
1013 &dc->disk.bio_split);
1015 s->iop.status = BLK_STS_RESOURCE;
1018 bio_copy_dev(flush, bio);
1019 flush->bi_end_io = backing_request_endio;
1020 flush->bi_private = cl;
1021 flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
1022 /* I/O request sent to backing device */
1023 closure_bio_submit(s->iop.c, flush, cl);
1026 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, &dc->disk.bio_split);
1027 /* I/O request sent to backing device */
1028 bio->bi_end_io = backing_request_endio;
1029 closure_bio_submit(s->iop.c, bio, cl);
1033 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1034 continue_at(cl, cached_dev_write_complete, NULL);
1037 static void cached_dev_nodata(struct closure *cl)
1039 struct search *s = container_of(cl, struct search, cl);
1040 struct bio *bio = &s->bio.bio;
1042 if (s->iop.flush_journal)
1043 bch_journal_meta(s->iop.c, cl);
1045 /* If it's a flush, we send the flush to the backing device too */
1046 bio->bi_end_io = backing_request_endio;
1047 closure_bio_submit(s->iop.c, bio, cl);
1049 continue_at(cl, cached_dev_bio_complete, NULL);
1052 struct detached_dev_io_private {
1053 struct bcache_device *d;
1054 unsigned long start_time;
1055 bio_end_io_t *bi_end_io;
1059 static void detached_dev_end_io(struct bio *bio)
1061 struct detached_dev_io_private *ddip;
1063 ddip = bio->bi_private;
1064 bio->bi_end_io = ddip->bi_end_io;
1065 bio->bi_private = ddip->bi_private;
1067 generic_end_io_acct(ddip->d->disk->queue, bio_op(bio),
1068 &ddip->d->disk->part0, ddip->start_time);
1070 if (bio->bi_status) {
1071 struct cached_dev *dc = container_of(ddip->d,
1072 struct cached_dev, disk);
1073 /* should count I/O error for backing device here */
1074 bch_count_backing_io_errors(dc, bio);
1078 bio->bi_end_io(bio);
1081 static void detached_dev_do_request(struct bcache_device *d, struct bio *bio)
1083 struct detached_dev_io_private *ddip;
1084 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1087 * no need to call closure_get(&dc->disk.cl),
1088 * because upper layer had already opened bcache device,
1089 * which would call closure_get(&dc->disk.cl)
1091 ddip = kzalloc(sizeof(struct detached_dev_io_private), GFP_NOIO);
1093 ddip->start_time = jiffies;
1094 ddip->bi_end_io = bio->bi_end_io;
1095 ddip->bi_private = bio->bi_private;
1096 bio->bi_end_io = detached_dev_end_io;
1097 bio->bi_private = ddip;
1099 if ((bio_op(bio) == REQ_OP_DISCARD) &&
1100 !blk_queue_discard(bdev_get_queue(dc->bdev)))
1101 bio->bi_end_io(bio);
1103 generic_make_request(bio);
1106 static void quit_max_writeback_rate(struct cache_set *c,
1107 struct cached_dev *this_dc)
1110 struct bcache_device *d;
1111 struct cached_dev *dc;
1114 * mutex bch_register_lock may compete with other parallel requesters,
1115 * or attach/detach operations on other backing device. Waiting to
1116 * the mutex lock may increase I/O request latency for seconds or more.
1117 * To avoid such situation, if mutext_trylock() failed, only writeback
1118 * rate of current cached device is set to 1, and __update_write_back()
1119 * will decide writeback rate of other cached devices (remember now
1120 * c->idle_counter is 0 already).
1122 if (mutex_trylock(&bch_register_lock)) {
1123 for (i = 0; i < c->devices_max_used; i++) {
1127 if (UUID_FLASH_ONLY(&c->uuids[i]))
1131 dc = container_of(d, struct cached_dev, disk);
1133 * set writeback rate to default minimum value,
1134 * then let update_writeback_rate() to decide the
1137 atomic_long_set(&dc->writeback_rate.rate, 1);
1139 mutex_unlock(&bch_register_lock);
1141 atomic_long_set(&this_dc->writeback_rate.rate, 1);
1144 /* Cached devices - read & write stuff */
1146 static blk_qc_t cached_dev_make_request(struct request_queue *q,
1150 struct bcache_device *d = bio->bi_disk->private_data;
1151 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1152 int rw = bio_data_dir(bio);
1154 if (unlikely((d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags)) ||
1156 bio->bi_status = BLK_STS_IOERR;
1158 return BLK_QC_T_NONE;
1162 if (atomic_read(&d->c->idle_counter))
1163 atomic_set(&d->c->idle_counter, 0);
1165 * If at_max_writeback_rate of cache set is true and new I/O
1166 * comes, quit max writeback rate of all cached devices
1167 * attached to this cache set, and set at_max_writeback_rate
1170 if (unlikely(atomic_read(&d->c->at_max_writeback_rate) == 1)) {
1171 atomic_set(&d->c->at_max_writeback_rate, 0);
1172 quit_max_writeback_rate(d->c, dc);
1176 generic_start_io_acct(q,
1181 bio_set_dev(bio, dc->bdev);
1182 bio->bi_iter.bi_sector += dc->sb.data_offset;
1184 if (cached_dev_get(dc)) {
1185 s = search_alloc(bio, d);
1186 trace_bcache_request_start(s->d, bio);
1188 if (!bio->bi_iter.bi_size) {
1190 * can't call bch_journal_meta from under
1191 * generic_make_request
1193 continue_at_nobarrier(&s->cl,
1197 s->iop.bypass = check_should_bypass(dc, bio);
1200 cached_dev_write(dc, s);
1202 cached_dev_read(dc, s);
1205 /* I/O request sent to backing device */
1206 detached_dev_do_request(d, bio);
1208 return BLK_QC_T_NONE;
1211 static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1212 unsigned int cmd, unsigned long arg)
1214 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1215 return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1218 static int cached_dev_congested(void *data, int bits)
1220 struct bcache_device *d = data;
1221 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1222 struct request_queue *q = bdev_get_queue(dc->bdev);
1225 if (bdi_congested(q->backing_dev_info, bits))
1228 if (cached_dev_get(dc)) {
1232 for_each_cache(ca, d->c, i) {
1233 q = bdev_get_queue(ca->bdev);
1234 ret |= bdi_congested(q->backing_dev_info, bits);
1243 void bch_cached_dev_request_init(struct cached_dev *dc)
1245 struct gendisk *g = dc->disk.disk;
1247 g->queue->make_request_fn = cached_dev_make_request;
1248 g->queue->backing_dev_info->congested_fn = cached_dev_congested;
1249 dc->disk.cache_miss = cached_dev_cache_miss;
1250 dc->disk.ioctl = cached_dev_ioctl;
1253 /* Flash backed devices */
1255 static int flash_dev_cache_miss(struct btree *b, struct search *s,
1256 struct bio *bio, unsigned sectors)
1258 unsigned bytes = min(sectors, bio_sectors(bio)) << 9;
1260 swap(bio->bi_iter.bi_size, bytes);
1262 swap(bio->bi_iter.bi_size, bytes);
1264 bio_advance(bio, bytes);
1266 if (!bio->bi_iter.bi_size)
1269 return MAP_CONTINUE;
1272 static void flash_dev_nodata(struct closure *cl)
1274 struct search *s = container_of(cl, struct search, cl);
1276 if (s->iop.flush_journal)
1277 bch_journal_meta(s->iop.c, cl);
1279 continue_at(cl, search_free, NULL);
1282 static blk_qc_t flash_dev_make_request(struct request_queue *q,
1287 struct bcache_device *d = bio->bi_disk->private_data;
1289 if (unlikely(d->c && test_bit(CACHE_SET_IO_DISABLE, &d->c->flags))) {
1290 bio->bi_status = BLK_STS_IOERR;
1292 return BLK_QC_T_NONE;
1295 generic_start_io_acct(q, bio_op(bio), bio_sectors(bio), &d->disk->part0);
1297 s = search_alloc(bio, d);
1301 trace_bcache_request_start(s->d, bio);
1303 if (!bio->bi_iter.bi_size) {
1305 * can't call bch_journal_meta from under
1306 * generic_make_request
1308 continue_at_nobarrier(&s->cl,
1311 return BLK_QC_T_NONE;
1312 } else if (bio_data_dir(bio)) {
1313 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1314 &KEY(d->id, bio->bi_iter.bi_sector, 0),
1315 &KEY(d->id, bio_end_sector(bio), 0));
1317 s->iop.bypass = (bio_op(bio) == REQ_OP_DISCARD) != 0;
1318 s->iop.writeback = true;
1321 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1323 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1326 continue_at(cl, search_free, NULL);
1327 return BLK_QC_T_NONE;
1330 static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1331 unsigned int cmd, unsigned long arg)
1336 static int flash_dev_congested(void *data, int bits)
1338 struct bcache_device *d = data;
1339 struct request_queue *q;
1344 for_each_cache(ca, d->c, i) {
1345 q = bdev_get_queue(ca->bdev);
1346 ret |= bdi_congested(q->backing_dev_info, bits);
1352 void bch_flash_dev_request_init(struct bcache_device *d)
1354 struct gendisk *g = d->disk;
1356 g->queue->make_request_fn = flash_dev_make_request;
1357 g->queue->backing_dev_info->congested_fn = flash_dev_congested;
1358 d->cache_miss = flash_dev_cache_miss;
1359 d->ioctl = flash_dev_ioctl;
1362 void bch_request_exit(void)
1364 if (bch_search_cache)
1365 kmem_cache_destroy(bch_search_cache);
1368 int __init bch_request_init(void)
1370 bch_search_cache = KMEM_CACHE(search, 0);
1371 if (!bch_search_cache)