Merge branch 'work.iov_iter' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[linux-2.6-microblaze.git] / drivers / md / bcache / request.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Main bcache entry point - handle a read or a write request and decide what to
4  * do with it; the make_request functions are called by the block layer.
5  *
6  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7  * Copyright 2012 Google, Inc.
8  */
9
10 #include "bcache.h"
11 #include "btree.h"
12 #include "debug.h"
13 #include "request.h"
14 #include "writeback.h"
15
16 #include <linux/module.h>
17 #include <linux/hash.h>
18 #include <linux/random.h>
19 #include <linux/backing-dev.h>
20
21 #include <trace/events/bcache.h>
22
23 #define CUTOFF_CACHE_ADD        95
24 #define CUTOFF_CACHE_READA      90
25
26 struct kmem_cache *bch_search_cache;
27
28 static void bch_data_insert_start(struct closure *);
29
30 static unsigned cache_mode(struct cached_dev *dc)
31 {
32         return BDEV_CACHE_MODE(&dc->sb);
33 }
34
35 static bool verify(struct cached_dev *dc)
36 {
37         return dc->verify;
38 }
39
40 static void bio_csum(struct bio *bio, struct bkey *k)
41 {
42         struct bio_vec bv;
43         struct bvec_iter iter;
44         uint64_t csum = 0;
45
46         bio_for_each_segment(bv, bio, iter) {
47                 void *d = kmap(bv.bv_page) + bv.bv_offset;
48                 csum = bch_crc64_update(csum, d, bv.bv_len);
49                 kunmap(bv.bv_page);
50         }
51
52         k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
53 }
54
55 /* Insert data into cache */
56
57 static void bch_data_insert_keys(struct closure *cl)
58 {
59         struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
60         atomic_t *journal_ref = NULL;
61         struct bkey *replace_key = op->replace ? &op->replace_key : NULL;
62         int ret;
63
64         /*
65          * If we're looping, might already be waiting on
66          * another journal write - can't wait on more than one journal write at
67          * a time
68          *
69          * XXX: this looks wrong
70          */
71 #if 0
72         while (atomic_read(&s->cl.remaining) & CLOSURE_WAITING)
73                 closure_sync(&s->cl);
74 #endif
75
76         if (!op->replace)
77                 journal_ref = bch_journal(op->c, &op->insert_keys,
78                                           op->flush_journal ? cl : NULL);
79
80         ret = bch_btree_insert(op->c, &op->insert_keys,
81                                journal_ref, replace_key);
82         if (ret == -ESRCH) {
83                 op->replace_collision = true;
84         } else if (ret) {
85                 op->status              = BLK_STS_RESOURCE;
86                 op->insert_data_done    = true;
87         }
88
89         if (journal_ref)
90                 atomic_dec_bug(journal_ref);
91
92         if (!op->insert_data_done) {
93                 continue_at(cl, bch_data_insert_start, op->wq);
94                 return;
95         }
96
97         bch_keylist_free(&op->insert_keys);
98         closure_return(cl);
99 }
100
101 static int bch_keylist_realloc(struct keylist *l, unsigned u64s,
102                                struct cache_set *c)
103 {
104         size_t oldsize = bch_keylist_nkeys(l);
105         size_t newsize = oldsize + u64s;
106
107         /*
108          * The journalling code doesn't handle the case where the keys to insert
109          * is bigger than an empty write: If we just return -ENOMEM here,
110          * bio_insert() and bio_invalidate() will insert the keys created so far
111          * and finish the rest when the keylist is empty.
112          */
113         if (newsize * sizeof(uint64_t) > block_bytes(c) - sizeof(struct jset))
114                 return -ENOMEM;
115
116         return __bch_keylist_realloc(l, u64s);
117 }
118
119 static void bch_data_invalidate(struct closure *cl)
120 {
121         struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
122         struct bio *bio = op->bio;
123
124         pr_debug("invalidating %i sectors from %llu",
125                  bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
126
127         while (bio_sectors(bio)) {
128                 unsigned sectors = min(bio_sectors(bio),
129                                        1U << (KEY_SIZE_BITS - 1));
130
131                 if (bch_keylist_realloc(&op->insert_keys, 2, op->c))
132                         goto out;
133
134                 bio->bi_iter.bi_sector  += sectors;
135                 bio->bi_iter.bi_size    -= sectors << 9;
136
137                 bch_keylist_add(&op->insert_keys,
138                                 &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
139         }
140
141         op->insert_data_done = true;
142         bio_put(bio);
143 out:
144         continue_at(cl, bch_data_insert_keys, op->wq);
145 }
146
147 static void bch_data_insert_error(struct closure *cl)
148 {
149         struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
150
151         /*
152          * Our data write just errored, which means we've got a bunch of keys to
153          * insert that point to data that wasn't succesfully written.
154          *
155          * We don't have to insert those keys but we still have to invalidate
156          * that region of the cache - so, if we just strip off all the pointers
157          * from the keys we'll accomplish just that.
158          */
159
160         struct bkey *src = op->insert_keys.keys, *dst = op->insert_keys.keys;
161
162         while (src != op->insert_keys.top) {
163                 struct bkey *n = bkey_next(src);
164
165                 SET_KEY_PTRS(src, 0);
166                 memmove(dst, src, bkey_bytes(src));
167
168                 dst = bkey_next(dst);
169                 src = n;
170         }
171
172         op->insert_keys.top = dst;
173
174         bch_data_insert_keys(cl);
175 }
176
177 static void bch_data_insert_endio(struct bio *bio)
178 {
179         struct closure *cl = bio->bi_private;
180         struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
181
182         if (bio->bi_status) {
183                 /* TODO: We could try to recover from this. */
184                 if (op->writeback)
185                         op->status = bio->bi_status;
186                 else if (!op->replace)
187                         set_closure_fn(cl, bch_data_insert_error, op->wq);
188                 else
189                         set_closure_fn(cl, NULL, NULL);
190         }
191
192         bch_bbio_endio(op->c, bio, bio->bi_status, "writing data to cache");
193 }
194
195 static void bch_data_insert_start(struct closure *cl)
196 {
197         struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
198         struct bio *bio = op->bio, *n;
199
200         if (op->bypass)
201                 return bch_data_invalidate(cl);
202
203         if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0)
204                 wake_up_gc(op->c);
205
206         /*
207          * Journal writes are marked REQ_PREFLUSH; if the original write was a
208          * flush, it'll wait on the journal write.
209          */
210         bio->bi_opf &= ~(REQ_PREFLUSH|REQ_FUA);
211
212         do {
213                 unsigned i;
214                 struct bkey *k;
215                 struct bio_set *split = op->c->bio_split;
216
217                 /* 1 for the device pointer and 1 for the chksum */
218                 if (bch_keylist_realloc(&op->insert_keys,
219                                         3 + (op->csum ? 1 : 0),
220                                         op->c)) {
221                         continue_at(cl, bch_data_insert_keys, op->wq);
222                         return;
223                 }
224
225                 k = op->insert_keys.top;
226                 bkey_init(k);
227                 SET_KEY_INODE(k, op->inode);
228                 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
229
230                 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
231                                        op->write_point, op->write_prio,
232                                        op->writeback))
233                         goto err;
234
235                 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
236
237                 n->bi_end_io    = bch_data_insert_endio;
238                 n->bi_private   = cl;
239
240                 if (op->writeback) {
241                         SET_KEY_DIRTY(k, true);
242
243                         for (i = 0; i < KEY_PTRS(k); i++)
244                                 SET_GC_MARK(PTR_BUCKET(op->c, k, i),
245                                             GC_MARK_DIRTY);
246                 }
247
248                 SET_KEY_CSUM(k, op->csum);
249                 if (KEY_CSUM(k))
250                         bio_csum(n, k);
251
252                 trace_bcache_cache_insert(k);
253                 bch_keylist_push(&op->insert_keys);
254
255                 bio_set_op_attrs(n, REQ_OP_WRITE, 0);
256                 bch_submit_bbio(n, op->c, k, 0);
257         } while (n != bio);
258
259         op->insert_data_done = true;
260         continue_at(cl, bch_data_insert_keys, op->wq);
261         return;
262 err:
263         /* bch_alloc_sectors() blocks if s->writeback = true */
264         BUG_ON(op->writeback);
265
266         /*
267          * But if it's not a writeback write we'd rather just bail out if
268          * there aren't any buckets ready to write to - it might take awhile and
269          * we might be starving btree writes for gc or something.
270          */
271
272         if (!op->replace) {
273                 /*
274                  * Writethrough write: We can't complete the write until we've
275                  * updated the index. But we don't want to delay the write while
276                  * we wait for buckets to be freed up, so just invalidate the
277                  * rest of the write.
278                  */
279                 op->bypass = true;
280                 return bch_data_invalidate(cl);
281         } else {
282                 /*
283                  * From a cache miss, we can just insert the keys for the data
284                  * we have written or bail out if we didn't do anything.
285                  */
286                 op->insert_data_done = true;
287                 bio_put(bio);
288
289                 if (!bch_keylist_empty(&op->insert_keys))
290                         continue_at(cl, bch_data_insert_keys, op->wq);
291                 else
292                         closure_return(cl);
293         }
294 }
295
296 /**
297  * bch_data_insert - stick some data in the cache
298  *
299  * This is the starting point for any data to end up in a cache device; it could
300  * be from a normal write, or a writeback write, or a write to a flash only
301  * volume - it's also used by the moving garbage collector to compact data in
302  * mostly empty buckets.
303  *
304  * It first writes the data to the cache, creating a list of keys to be inserted
305  * (if the data had to be fragmented there will be multiple keys); after the
306  * data is written it calls bch_journal, and after the keys have been added to
307  * the next journal write they're inserted into the btree.
308  *
309  * It inserts the data in s->cache_bio; bi_sector is used for the key offset,
310  * and op->inode is used for the key inode.
311  *
312  * If s->bypass is true, instead of inserting the data it invalidates the
313  * region of the cache represented by s->cache_bio and op->inode.
314  */
315 void bch_data_insert(struct closure *cl)
316 {
317         struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
318
319         trace_bcache_write(op->c, op->inode, op->bio,
320                            op->writeback, op->bypass);
321
322         bch_keylist_init(&op->insert_keys);
323         bio_get(op->bio);
324         bch_data_insert_start(cl);
325 }
326
327 /* Congested? */
328
329 unsigned bch_get_congested(struct cache_set *c)
330 {
331         int i;
332         long rand;
333
334         if (!c->congested_read_threshold_us &&
335             !c->congested_write_threshold_us)
336                 return 0;
337
338         i = (local_clock_us() - c->congested_last_us) / 1024;
339         if (i < 0)
340                 return 0;
341
342         i += atomic_read(&c->congested);
343         if (i >= 0)
344                 return 0;
345
346         i += CONGESTED_MAX;
347
348         if (i > 0)
349                 i = fract_exp_two(i, 6);
350
351         rand = get_random_int();
352         i -= bitmap_weight(&rand, BITS_PER_LONG);
353
354         return i > 0 ? i : 1;
355 }
356
357 static void add_sequential(struct task_struct *t)
358 {
359         ewma_add(t->sequential_io_avg,
360                  t->sequential_io, 8, 0);
361
362         t->sequential_io = 0;
363 }
364
365 static struct hlist_head *iohash(struct cached_dev *dc, uint64_t k)
366 {
367         return &dc->io_hash[hash_64(k, RECENT_IO_BITS)];
368 }
369
370 static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
371 {
372         struct cache_set *c = dc->disk.c;
373         unsigned mode = cache_mode(dc);
374         unsigned sectors, congested = bch_get_congested(c);
375         struct task_struct *task = current;
376         struct io *i;
377
378         if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
379             c->gc_stats.in_use > CUTOFF_CACHE_ADD ||
380             (bio_op(bio) == REQ_OP_DISCARD))
381                 goto skip;
382
383         if (mode == CACHE_MODE_NONE ||
384             (mode == CACHE_MODE_WRITEAROUND &&
385              op_is_write(bio_op(bio))))
386                 goto skip;
387
388         /*
389          * Flag for bypass if the IO is for read-ahead or background,
390          * unless the read-ahead request is for metadata (eg, for gfs2).
391          */
392         if (bio->bi_opf & (REQ_RAHEAD|REQ_BACKGROUND) &&
393             !(bio->bi_opf & REQ_META))
394                 goto skip;
395
396         if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
397             bio_sectors(bio) & (c->sb.block_size - 1)) {
398                 pr_debug("skipping unaligned io");
399                 goto skip;
400         }
401
402         if (bypass_torture_test(dc)) {
403                 if ((get_random_int() & 3) == 3)
404                         goto skip;
405                 else
406                         goto rescale;
407         }
408
409         if (!congested && !dc->sequential_cutoff)
410                 goto rescale;
411
412         spin_lock(&dc->io_lock);
413
414         hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
415                 if (i->last == bio->bi_iter.bi_sector &&
416                     time_before(jiffies, i->jiffies))
417                         goto found;
418
419         i = list_first_entry(&dc->io_lru, struct io, lru);
420
421         add_sequential(task);
422         i->sequential = 0;
423 found:
424         if (i->sequential + bio->bi_iter.bi_size > i->sequential)
425                 i->sequential   += bio->bi_iter.bi_size;
426
427         i->last                  = bio_end_sector(bio);
428         i->jiffies               = jiffies + msecs_to_jiffies(5000);
429         task->sequential_io      = i->sequential;
430
431         hlist_del(&i->hash);
432         hlist_add_head(&i->hash, iohash(dc, i->last));
433         list_move_tail(&i->lru, &dc->io_lru);
434
435         spin_unlock(&dc->io_lock);
436
437         sectors = max(task->sequential_io,
438                       task->sequential_io_avg) >> 9;
439
440         if (dc->sequential_cutoff &&
441             sectors >= dc->sequential_cutoff >> 9) {
442                 trace_bcache_bypass_sequential(bio);
443                 goto skip;
444         }
445
446         if (congested && sectors >= congested) {
447                 trace_bcache_bypass_congested(bio);
448                 goto skip;
449         }
450
451 rescale:
452         bch_rescale_priorities(c, bio_sectors(bio));
453         return false;
454 skip:
455         bch_mark_sectors_bypassed(c, dc, bio_sectors(bio));
456         return true;
457 }
458
459 /* Cache lookup */
460
461 struct search {
462         /* Stack frame for bio_complete */
463         struct closure          cl;
464
465         struct bbio             bio;
466         struct bio              *orig_bio;
467         struct bio              *cache_miss;
468         struct bcache_device    *d;
469
470         unsigned                insert_bio_sectors;
471         unsigned                recoverable:1;
472         unsigned                write:1;
473         unsigned                read_dirty_data:1;
474         unsigned                cache_missed:1;
475
476         unsigned long           start_time;
477
478         struct btree_op         op;
479         struct data_insert_op   iop;
480 };
481
482 static void bch_cache_read_endio(struct bio *bio)
483 {
484         struct bbio *b = container_of(bio, struct bbio, bio);
485         struct closure *cl = bio->bi_private;
486         struct search *s = container_of(cl, struct search, cl);
487
488         /*
489          * If the bucket was reused while our bio was in flight, we might have
490          * read the wrong data. Set s->error but not error so it doesn't get
491          * counted against the cache device, but we'll still reread the data
492          * from the backing device.
493          */
494
495         if (bio->bi_status)
496                 s->iop.status = bio->bi_status;
497         else if (!KEY_DIRTY(&b->key) &&
498                  ptr_stale(s->iop.c, &b->key, 0)) {
499                 atomic_long_inc(&s->iop.c->cache_read_races);
500                 s->iop.status = BLK_STS_IOERR;
501         }
502
503         bch_bbio_endio(s->iop.c, bio, bio->bi_status, "reading from cache");
504 }
505
506 /*
507  * Read from a single key, handling the initial cache miss if the key starts in
508  * the middle of the bio
509  */
510 static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
511 {
512         struct search *s = container_of(op, struct search, op);
513         struct bio *n, *bio = &s->bio.bio;
514         struct bkey *bio_key;
515         unsigned ptr;
516
517         if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
518                 return MAP_CONTINUE;
519
520         if (KEY_INODE(k) != s->iop.inode ||
521             KEY_START(k) > bio->bi_iter.bi_sector) {
522                 unsigned bio_sectors = bio_sectors(bio);
523                 unsigned sectors = KEY_INODE(k) == s->iop.inode
524                         ? min_t(uint64_t, INT_MAX,
525                                 KEY_START(k) - bio->bi_iter.bi_sector)
526                         : INT_MAX;
527
528                 int ret = s->d->cache_miss(b, s, bio, sectors);
529                 if (ret != MAP_CONTINUE)
530                         return ret;
531
532                 /* if this was a complete miss we shouldn't get here */
533                 BUG_ON(bio_sectors <= sectors);
534         }
535
536         if (!KEY_SIZE(k))
537                 return MAP_CONTINUE;
538
539         /* XXX: figure out best pointer - for multiple cache devices */
540         ptr = 0;
541
542         PTR_BUCKET(b->c, k, ptr)->prio = INITIAL_PRIO;
543
544         if (KEY_DIRTY(k))
545                 s->read_dirty_data = true;
546
547         n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
548                                       KEY_OFFSET(k) - bio->bi_iter.bi_sector),
549                            GFP_NOIO, s->d->bio_split);
550
551         bio_key = &container_of(n, struct bbio, bio)->key;
552         bch_bkey_copy_single_ptr(bio_key, k, ptr);
553
554         bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
555         bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
556
557         n->bi_end_io    = bch_cache_read_endio;
558         n->bi_private   = &s->cl;
559
560         /*
561          * The bucket we're reading from might be reused while our bio
562          * is in flight, and we could then end up reading the wrong
563          * data.
564          *
565          * We guard against this by checking (in cache_read_endio()) if
566          * the pointer is stale again; if so, we treat it as an error
567          * and reread from the backing device (but we don't pass that
568          * error up anywhere).
569          */
570
571         __bch_submit_bbio(n, b->c);
572         return n == bio ? MAP_DONE : MAP_CONTINUE;
573 }
574
575 static void cache_lookup(struct closure *cl)
576 {
577         struct search *s = container_of(cl, struct search, iop.cl);
578         struct bio *bio = &s->bio.bio;
579         int ret;
580
581         bch_btree_op_init(&s->op, -1);
582
583         ret = bch_btree_map_keys(&s->op, s->iop.c,
584                                  &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
585                                  cache_lookup_fn, MAP_END_KEY);
586         if (ret == -EAGAIN) {
587                 continue_at(cl, cache_lookup, bcache_wq);
588                 return;
589         }
590
591         closure_return(cl);
592 }
593
594 /* Common code for the make_request functions */
595
596 static void request_endio(struct bio *bio)
597 {
598         struct closure *cl = bio->bi_private;
599
600         if (bio->bi_status) {
601                 struct search *s = container_of(cl, struct search, cl);
602                 s->iop.status = bio->bi_status;
603                 /* Only cache read errors are recoverable */
604                 s->recoverable = false;
605         }
606
607         bio_put(bio);
608         closure_put(cl);
609 }
610
611 static void bio_complete(struct search *s)
612 {
613         if (s->orig_bio) {
614                 struct request_queue *q = s->orig_bio->bi_disk->queue;
615                 generic_end_io_acct(q, bio_data_dir(s->orig_bio),
616                                     &s->d->disk->part0, s->start_time);
617
618                 trace_bcache_request_end(s->d, s->orig_bio);
619                 s->orig_bio->bi_status = s->iop.status;
620                 bio_endio(s->orig_bio);
621                 s->orig_bio = NULL;
622         }
623 }
624
625 static void do_bio_hook(struct search *s, struct bio *orig_bio)
626 {
627         struct bio *bio = &s->bio.bio;
628
629         bio_init(bio, NULL, 0);
630         __bio_clone_fast(bio, orig_bio);
631         bio->bi_end_io          = request_endio;
632         bio->bi_private         = &s->cl;
633
634         bio_cnt_set(bio, 3);
635 }
636
637 static void search_free(struct closure *cl)
638 {
639         struct search *s = container_of(cl, struct search, cl);
640         bio_complete(s);
641
642         if (s->iop.bio)
643                 bio_put(s->iop.bio);
644
645         closure_debug_destroy(cl);
646         mempool_free(s, s->d->c->search);
647 }
648
649 static inline struct search *search_alloc(struct bio *bio,
650                                           struct bcache_device *d)
651 {
652         struct search *s;
653
654         s = mempool_alloc(d->c->search, GFP_NOIO);
655
656         closure_init(&s->cl, NULL);
657         do_bio_hook(s, bio);
658
659         s->orig_bio             = bio;
660         s->cache_miss           = NULL;
661         s->cache_missed         = 0;
662         s->d                    = d;
663         s->recoverable          = 1;
664         s->write                = op_is_write(bio_op(bio));
665         s->read_dirty_data      = 0;
666         s->start_time           = jiffies;
667
668         s->iop.c                = d->c;
669         s->iop.bio              = NULL;
670         s->iop.inode            = d->id;
671         s->iop.write_point      = hash_long((unsigned long) current, 16);
672         s->iop.write_prio       = 0;
673         s->iop.status           = 0;
674         s->iop.flags            = 0;
675         s->iop.flush_journal    = op_is_flush(bio->bi_opf);
676         s->iop.wq               = bcache_wq;
677
678         return s;
679 }
680
681 /* Cached devices */
682
683 static void cached_dev_bio_complete(struct closure *cl)
684 {
685         struct search *s = container_of(cl, struct search, cl);
686         struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
687
688         search_free(cl);
689         cached_dev_put(dc);
690 }
691
692 /* Process reads */
693
694 static void cached_dev_cache_miss_done(struct closure *cl)
695 {
696         struct search *s = container_of(cl, struct search, cl);
697
698         if (s->iop.replace_collision)
699                 bch_mark_cache_miss_collision(s->iop.c, s->d);
700
701         if (s->iop.bio)
702                 bio_free_pages(s->iop.bio);
703
704         cached_dev_bio_complete(cl);
705 }
706
707 static void cached_dev_read_error(struct closure *cl)
708 {
709         struct search *s = container_of(cl, struct search, cl);
710         struct bio *bio = &s->bio.bio;
711         struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
712
713         /*
714          * If cache device is dirty (dc->has_dirty is non-zero), then
715          * recovery a failed read request from cached device may get a
716          * stale data back. So read failure recovery is only permitted
717          * when cache device is clean.
718          */
719         if (s->recoverable &&
720             (dc && !atomic_read(&dc->has_dirty))) {
721                 /* Retry from the backing device: */
722                 trace_bcache_read_retry(s->orig_bio);
723
724                 s->iop.status = 0;
725                 do_bio_hook(s, s->orig_bio);
726
727                 /* XXX: invalidate cache */
728
729                 closure_bio_submit(bio, cl);
730         }
731
732         continue_at(cl, cached_dev_cache_miss_done, NULL);
733 }
734
735 static void cached_dev_read_done(struct closure *cl)
736 {
737         struct search *s = container_of(cl, struct search, cl);
738         struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
739
740         /*
741          * We had a cache miss; cache_bio now contains data ready to be inserted
742          * into the cache.
743          *
744          * First, we copy the data we just read from cache_bio's bounce buffers
745          * to the buffers the original bio pointed to:
746          */
747
748         if (s->iop.bio) {
749                 bio_reset(s->iop.bio);
750                 s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
751                 bio_copy_dev(s->iop.bio, s->cache_miss);
752                 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
753                 bch_bio_map(s->iop.bio, NULL);
754
755                 bio_copy_data(s->cache_miss, s->iop.bio);
756
757                 bio_put(s->cache_miss);
758                 s->cache_miss = NULL;
759         }
760
761         if (verify(dc) && s->recoverable && !s->read_dirty_data)
762                 bch_data_verify(dc, s->orig_bio);
763
764         bio_complete(s);
765
766         if (s->iop.bio &&
767             !test_bit(CACHE_SET_STOPPING, &s->iop.c->flags)) {
768                 BUG_ON(!s->iop.replace);
769                 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
770         }
771
772         continue_at(cl, cached_dev_cache_miss_done, NULL);
773 }
774
775 static void cached_dev_read_done_bh(struct closure *cl)
776 {
777         struct search *s = container_of(cl, struct search, cl);
778         struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
779
780         bch_mark_cache_accounting(s->iop.c, s->d,
781                                   !s->cache_missed, s->iop.bypass);
782         trace_bcache_read(s->orig_bio, !s->cache_miss, s->iop.bypass);
783
784         if (s->iop.status)
785                 continue_at_nobarrier(cl, cached_dev_read_error, bcache_wq);
786         else if (s->iop.bio || verify(dc))
787                 continue_at_nobarrier(cl, cached_dev_read_done, bcache_wq);
788         else
789                 continue_at_nobarrier(cl, cached_dev_bio_complete, NULL);
790 }
791
792 static int cached_dev_cache_miss(struct btree *b, struct search *s,
793                                  struct bio *bio, unsigned sectors)
794 {
795         int ret = MAP_CONTINUE;
796         unsigned reada = 0;
797         struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
798         struct bio *miss, *cache_bio;
799
800         s->cache_missed = 1;
801
802         if (s->cache_miss || s->iop.bypass) {
803                 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
804                 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
805                 goto out_submit;
806         }
807
808         if (!(bio->bi_opf & REQ_RAHEAD) &&
809             !(bio->bi_opf & REQ_META) &&
810             s->iop.c->gc_stats.in_use < CUTOFF_CACHE_READA)
811                 reada = min_t(sector_t, dc->readahead >> 9,
812                               get_capacity(bio->bi_disk) - bio_end_sector(bio));
813
814         s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
815
816         s->iop.replace_key = KEY(s->iop.inode,
817                                  bio->bi_iter.bi_sector + s->insert_bio_sectors,
818                                  s->insert_bio_sectors);
819
820         ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
821         if (ret)
822                 return ret;
823
824         s->iop.replace = true;
825
826         miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
827
828         /* btree_search_recurse()'s btree iterator is no good anymore */
829         ret = miss == bio ? MAP_DONE : -EINTR;
830
831         cache_bio = bio_alloc_bioset(GFP_NOWAIT,
832                         DIV_ROUND_UP(s->insert_bio_sectors, PAGE_SECTORS),
833                         dc->disk.bio_split);
834         if (!cache_bio)
835                 goto out_submit;
836
837         cache_bio->bi_iter.bi_sector    = miss->bi_iter.bi_sector;
838         bio_copy_dev(cache_bio, miss);
839         cache_bio->bi_iter.bi_size      = s->insert_bio_sectors << 9;
840
841         cache_bio->bi_end_io    = request_endio;
842         cache_bio->bi_private   = &s->cl;
843
844         bch_bio_map(cache_bio, NULL);
845         if (bio_alloc_pages(cache_bio, __GFP_NOWARN|GFP_NOIO))
846                 goto out_put;
847
848         if (reada)
849                 bch_mark_cache_readahead(s->iop.c, s->d);
850
851         s->cache_miss   = miss;
852         s->iop.bio      = cache_bio;
853         bio_get(cache_bio);
854         closure_bio_submit(cache_bio, &s->cl);
855
856         return ret;
857 out_put:
858         bio_put(cache_bio);
859 out_submit:
860         miss->bi_end_io         = request_endio;
861         miss->bi_private        = &s->cl;
862         closure_bio_submit(miss, &s->cl);
863         return ret;
864 }
865
866 static void cached_dev_read(struct cached_dev *dc, struct search *s)
867 {
868         struct closure *cl = &s->cl;
869
870         closure_call(&s->iop.cl, cache_lookup, NULL, cl);
871         continue_at(cl, cached_dev_read_done_bh, NULL);
872 }
873
874 /* Process writes */
875
876 static void cached_dev_write_complete(struct closure *cl)
877 {
878         struct search *s = container_of(cl, struct search, cl);
879         struct cached_dev *dc = container_of(s->d, struct cached_dev, disk);
880
881         up_read_non_owner(&dc->writeback_lock);
882         cached_dev_bio_complete(cl);
883 }
884
885 static void cached_dev_write(struct cached_dev *dc, struct search *s)
886 {
887         struct closure *cl = &s->cl;
888         struct bio *bio = &s->bio.bio;
889         struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
890         struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
891
892         bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
893
894         down_read_non_owner(&dc->writeback_lock);
895         if (bch_keybuf_check_overlapping(&dc->writeback_keys, &start, &end)) {
896                 /*
897                  * We overlap with some dirty data undergoing background
898                  * writeback, force this write to writeback
899                  */
900                 s->iop.bypass = false;
901                 s->iop.writeback = true;
902         }
903
904         /*
905          * Discards aren't _required_ to do anything, so skipping if
906          * check_overlapping returned true is ok
907          *
908          * But check_overlapping drops dirty keys for which io hasn't started,
909          * so we still want to call it.
910          */
911         if (bio_op(bio) == REQ_OP_DISCARD)
912                 s->iop.bypass = true;
913
914         if (should_writeback(dc, s->orig_bio,
915                              cache_mode(dc),
916                              s->iop.bypass)) {
917                 s->iop.bypass = false;
918                 s->iop.writeback = true;
919         }
920
921         if (s->iop.bypass) {
922                 s->iop.bio = s->orig_bio;
923                 bio_get(s->iop.bio);
924
925                 if ((bio_op(bio) != REQ_OP_DISCARD) ||
926                     blk_queue_discard(bdev_get_queue(dc->bdev)))
927                         closure_bio_submit(bio, cl);
928         } else if (s->iop.writeback) {
929                 bch_writeback_add(dc);
930                 s->iop.bio = bio;
931
932                 if (bio->bi_opf & REQ_PREFLUSH) {
933                         /* Also need to send a flush to the backing device */
934                         struct bio *flush = bio_alloc_bioset(GFP_NOIO, 0,
935                                                              dc->disk.bio_split);
936
937                         bio_copy_dev(flush, bio);
938                         flush->bi_end_io = request_endio;
939                         flush->bi_private = cl;
940                         flush->bi_opf = REQ_OP_WRITE | REQ_PREFLUSH;
941
942                         closure_bio_submit(flush, cl);
943                 }
944         } else {
945                 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
946
947                 closure_bio_submit(bio, cl);
948         }
949
950         closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
951         continue_at(cl, cached_dev_write_complete, NULL);
952 }
953
954 static void cached_dev_nodata(struct closure *cl)
955 {
956         struct search *s = container_of(cl, struct search, cl);
957         struct bio *bio = &s->bio.bio;
958
959         if (s->iop.flush_journal)
960                 bch_journal_meta(s->iop.c, cl);
961
962         /* If it's a flush, we send the flush to the backing device too */
963         closure_bio_submit(bio, cl);
964
965         continue_at(cl, cached_dev_bio_complete, NULL);
966 }
967
968 /* Cached devices - read & write stuff */
969
970 static blk_qc_t cached_dev_make_request(struct request_queue *q,
971                                         struct bio *bio)
972 {
973         struct search *s;
974         struct bcache_device *d = bio->bi_disk->private_data;
975         struct cached_dev *dc = container_of(d, struct cached_dev, disk);
976         int rw = bio_data_dir(bio);
977
978         generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
979
980         bio_set_dev(bio, dc->bdev);
981         bio->bi_iter.bi_sector += dc->sb.data_offset;
982
983         if (cached_dev_get(dc)) {
984                 s = search_alloc(bio, d);
985                 trace_bcache_request_start(s->d, bio);
986
987                 if (!bio->bi_iter.bi_size) {
988                         /*
989                          * can't call bch_journal_meta from under
990                          * generic_make_request
991                          */
992                         continue_at_nobarrier(&s->cl,
993                                               cached_dev_nodata,
994                                               bcache_wq);
995                 } else {
996                         s->iop.bypass = check_should_bypass(dc, bio);
997
998                         if (rw)
999                                 cached_dev_write(dc, s);
1000                         else
1001                                 cached_dev_read(dc, s);
1002                 }
1003         } else {
1004                 if ((bio_op(bio) == REQ_OP_DISCARD) &&
1005                     !blk_queue_discard(bdev_get_queue(dc->bdev)))
1006                         bio_endio(bio);
1007                 else
1008                         generic_make_request(bio);
1009         }
1010
1011         return BLK_QC_T_NONE;
1012 }
1013
1014 static int cached_dev_ioctl(struct bcache_device *d, fmode_t mode,
1015                             unsigned int cmd, unsigned long arg)
1016 {
1017         struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1018         return __blkdev_driver_ioctl(dc->bdev, mode, cmd, arg);
1019 }
1020
1021 static int cached_dev_congested(void *data, int bits)
1022 {
1023         struct bcache_device *d = data;
1024         struct cached_dev *dc = container_of(d, struct cached_dev, disk);
1025         struct request_queue *q = bdev_get_queue(dc->bdev);
1026         int ret = 0;
1027
1028         if (bdi_congested(q->backing_dev_info, bits))
1029                 return 1;
1030
1031         if (cached_dev_get(dc)) {
1032                 unsigned i;
1033                 struct cache *ca;
1034
1035                 for_each_cache(ca, d->c, i) {
1036                         q = bdev_get_queue(ca->bdev);
1037                         ret |= bdi_congested(q->backing_dev_info, bits);
1038                 }
1039
1040                 cached_dev_put(dc);
1041         }
1042
1043         return ret;
1044 }
1045
1046 void bch_cached_dev_request_init(struct cached_dev *dc)
1047 {
1048         struct gendisk *g = dc->disk.disk;
1049
1050         g->queue->make_request_fn               = cached_dev_make_request;
1051         g->queue->backing_dev_info->congested_fn = cached_dev_congested;
1052         dc->disk.cache_miss                     = cached_dev_cache_miss;
1053         dc->disk.ioctl                          = cached_dev_ioctl;
1054 }
1055
1056 /* Flash backed devices */
1057
1058 static int flash_dev_cache_miss(struct btree *b, struct search *s,
1059                                 struct bio *bio, unsigned sectors)
1060 {
1061         unsigned bytes = min(sectors, bio_sectors(bio)) << 9;
1062
1063         swap(bio->bi_iter.bi_size, bytes);
1064         zero_fill_bio(bio);
1065         swap(bio->bi_iter.bi_size, bytes);
1066
1067         bio_advance(bio, bytes);
1068
1069         if (!bio->bi_iter.bi_size)
1070                 return MAP_DONE;
1071
1072         return MAP_CONTINUE;
1073 }
1074
1075 static void flash_dev_nodata(struct closure *cl)
1076 {
1077         struct search *s = container_of(cl, struct search, cl);
1078
1079         if (s->iop.flush_journal)
1080                 bch_journal_meta(s->iop.c, cl);
1081
1082         continue_at(cl, search_free, NULL);
1083 }
1084
1085 static blk_qc_t flash_dev_make_request(struct request_queue *q,
1086                                              struct bio *bio)
1087 {
1088         struct search *s;
1089         struct closure *cl;
1090         struct bcache_device *d = bio->bi_disk->private_data;
1091         int rw = bio_data_dir(bio);
1092
1093         generic_start_io_acct(q, rw, bio_sectors(bio), &d->disk->part0);
1094
1095         s = search_alloc(bio, d);
1096         cl = &s->cl;
1097         bio = &s->bio.bio;
1098
1099         trace_bcache_request_start(s->d, bio);
1100
1101         if (!bio->bi_iter.bi_size) {
1102                 /*
1103                  * can't call bch_journal_meta from under
1104                  * generic_make_request
1105                  */
1106                 continue_at_nobarrier(&s->cl,
1107                                       flash_dev_nodata,
1108                                       bcache_wq);
1109                 return BLK_QC_T_NONE;
1110         } else if (rw) {
1111                 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1112                                         &KEY(d->id, bio->bi_iter.bi_sector, 0),
1113                                         &KEY(d->id, bio_end_sector(bio), 0));
1114
1115                 s->iop.bypass           = (bio_op(bio) == REQ_OP_DISCARD) != 0;
1116                 s->iop.writeback        = true;
1117                 s->iop.bio              = bio;
1118
1119                 closure_call(&s->iop.cl, bch_data_insert, NULL, cl);
1120         } else {
1121                 closure_call(&s->iop.cl, cache_lookup, NULL, cl);
1122         }
1123
1124         continue_at(cl, search_free, NULL);
1125         return BLK_QC_T_NONE;
1126 }
1127
1128 static int flash_dev_ioctl(struct bcache_device *d, fmode_t mode,
1129                            unsigned int cmd, unsigned long arg)
1130 {
1131         return -ENOTTY;
1132 }
1133
1134 static int flash_dev_congested(void *data, int bits)
1135 {
1136         struct bcache_device *d = data;
1137         struct request_queue *q;
1138         struct cache *ca;
1139         unsigned i;
1140         int ret = 0;
1141
1142         for_each_cache(ca, d->c, i) {
1143                 q = bdev_get_queue(ca->bdev);
1144                 ret |= bdi_congested(q->backing_dev_info, bits);
1145         }
1146
1147         return ret;
1148 }
1149
1150 void bch_flash_dev_request_init(struct bcache_device *d)
1151 {
1152         struct gendisk *g = d->disk;
1153
1154         g->queue->make_request_fn               = flash_dev_make_request;
1155         g->queue->backing_dev_info->congested_fn = flash_dev_congested;
1156         d->cache_miss                           = flash_dev_cache_miss;
1157         d->ioctl                                = flash_dev_ioctl;
1158 }
1159
1160 void bch_request_exit(void)
1161 {
1162         if (bch_search_cache)
1163                 kmem_cache_destroy(bch_search_cache);
1164 }
1165
1166 int __init bch_request_init(void)
1167 {
1168         bch_search_cache = KMEM_CACHE(search, 0);
1169         if (!bch_search_cache)
1170                 return -ENOMEM;
1171
1172         return 0;
1173 }