Merge tag 'pinctrl-v5.8-4' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw...
[linux-2.6-microblaze.git] / drivers / md / bcache / btree.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4  *
5  * Uses a block device as cache for other block devices; optimized for SSDs.
6  * All allocation is done in buckets, which should match the erase block size
7  * of the device.
8  *
9  * Buckets containing cached data are kept on a heap sorted by priority;
10  * bucket priority is increased on cache hit, and periodically all the buckets
11  * on the heap have their priority scaled down. This currently is just used as
12  * an LRU but in the future should allow for more intelligent heuristics.
13  *
14  * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15  * counter. Garbage collection is used to remove stale pointers.
16  *
17  * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18  * as keys are inserted we only sort the pages that have not yet been written.
19  * When garbage collection is run, we resort the entire node.
20  *
21  * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
22  */
23
24 #include "bcache.h"
25 #include "btree.h"
26 #include "debug.h"
27 #include "extents.h"
28
29 #include <linux/slab.h>
30 #include <linux/bitops.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/rcupdate.h>
36 #include <linux/sched/clock.h>
37 #include <linux/rculist.h>
38 #include <linux/delay.h>
39 #include <trace/events/bcache.h>
40
41 /*
42  * Todo:
43  * register_bcache: Return errors out to userspace correctly
44  *
45  * Writeback: don't undirty key until after a cache flush
46  *
47  * Create an iterator for key pointers
48  *
49  * On btree write error, mark bucket such that it won't be freed from the cache
50  *
51  * Journalling:
52  *   Check for bad keys in replay
53  *   Propagate barriers
54  *   Refcount journal entries in journal_replay
55  *
56  * Garbage collection:
57  *   Finish incremental gc
58  *   Gc should free old UUIDs, data for invalid UUIDs
59  *
60  * Provide a way to list backing device UUIDs we have data cached for, and
61  * probably how long it's been since we've seen them, and a way to invalidate
62  * dirty data for devices that will never be attached again
63  *
64  * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
65  * that based on that and how much dirty data we have we can keep writeback
66  * from being starved
67  *
68  * Add a tracepoint or somesuch to watch for writeback starvation
69  *
70  * When btree depth > 1 and splitting an interior node, we have to make sure
71  * alloc_bucket() cannot fail. This should be true but is not completely
72  * obvious.
73  *
74  * Plugging?
75  *
76  * If data write is less than hard sector size of ssd, round up offset in open
77  * bucket to the next whole sector
78  *
79  * Superblock needs to be fleshed out for multiple cache devices
80  *
81  * Add a sysfs tunable for the number of writeback IOs in flight
82  *
83  * Add a sysfs tunable for the number of open data buckets
84  *
85  * IO tracking: Can we track when one process is doing io on behalf of another?
86  * IO tracking: Don't use just an average, weigh more recent stuff higher
87  *
88  * Test module load/unload
89  */
90
91 #define MAX_NEED_GC             64
92 #define MAX_SAVE_PRIO           72
93 #define MAX_GC_TIMES            100
94 #define MIN_GC_NODES            100
95 #define GC_SLEEP_MS             100
96
97 #define PTR_DIRTY_BIT           (((uint64_t) 1 << 36))
98
99 #define PTR_HASH(c, k)                                                  \
100         (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
101
102 #define insert_lock(s, b)       ((b)->level <= (s)->lock)
103
104
105 static inline struct bset *write_block(struct btree *b)
106 {
107         return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
108 }
109
110 static void bch_btree_init_next(struct btree *b)
111 {
112         /* If not a leaf node, always sort */
113         if (b->level && b->keys.nsets)
114                 bch_btree_sort(&b->keys, &b->c->sort);
115         else
116                 bch_btree_sort_lazy(&b->keys, &b->c->sort);
117
118         if (b->written < btree_blocks(b))
119                 bch_bset_init_next(&b->keys, write_block(b),
120                                    bset_magic(&b->c->sb));
121
122 }
123
124 /* Btree key manipulation */
125
126 void bkey_put(struct cache_set *c, struct bkey *k)
127 {
128         unsigned int i;
129
130         for (i = 0; i < KEY_PTRS(k); i++)
131                 if (ptr_available(c, k, i))
132                         atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
133 }
134
135 /* Btree IO */
136
137 static uint64_t btree_csum_set(struct btree *b, struct bset *i)
138 {
139         uint64_t crc = b->key.ptr[0];
140         void *data = (void *) i + 8, *end = bset_bkey_last(i);
141
142         crc = bch_crc64_update(crc, data, end - data);
143         return crc ^ 0xffffffffffffffffULL;
144 }
145
146 void bch_btree_node_read_done(struct btree *b)
147 {
148         const char *err = "bad btree header";
149         struct bset *i = btree_bset_first(b);
150         struct btree_iter *iter;
151
152         /*
153          * c->fill_iter can allocate an iterator with more memory space
154          * than static MAX_BSETS.
155          * See the comment arount cache_set->fill_iter.
156          */
157         iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
158         iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
159         iter->used = 0;
160
161 #ifdef CONFIG_BCACHE_DEBUG
162         iter->b = &b->keys;
163 #endif
164
165         if (!i->seq)
166                 goto err;
167
168         for (;
169              b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
170              i = write_block(b)) {
171                 err = "unsupported bset version";
172                 if (i->version > BCACHE_BSET_VERSION)
173                         goto err;
174
175                 err = "bad btree header";
176                 if (b->written + set_blocks(i, block_bytes(b->c)) >
177                     btree_blocks(b))
178                         goto err;
179
180                 err = "bad magic";
181                 if (i->magic != bset_magic(&b->c->sb))
182                         goto err;
183
184                 err = "bad checksum";
185                 switch (i->version) {
186                 case 0:
187                         if (i->csum != csum_set(i))
188                                 goto err;
189                         break;
190                 case BCACHE_BSET_VERSION:
191                         if (i->csum != btree_csum_set(b, i))
192                                 goto err;
193                         break;
194                 }
195
196                 err = "empty set";
197                 if (i != b->keys.set[0].data && !i->keys)
198                         goto err;
199
200                 bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
201
202                 b->written += set_blocks(i, block_bytes(b->c));
203         }
204
205         err = "corrupted btree";
206         for (i = write_block(b);
207              bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
208              i = ((void *) i) + block_bytes(b->c))
209                 if (i->seq == b->keys.set[0].data->seq)
210                         goto err;
211
212         bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
213
214         i = b->keys.set[0].data;
215         err = "short btree key";
216         if (b->keys.set[0].size &&
217             bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
218                 goto err;
219
220         if (b->written < btree_blocks(b))
221                 bch_bset_init_next(&b->keys, write_block(b),
222                                    bset_magic(&b->c->sb));
223 out:
224         mempool_free(iter, &b->c->fill_iter);
225         return;
226 err:
227         set_btree_node_io_error(b);
228         bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
229                             err, PTR_BUCKET_NR(b->c, &b->key, 0),
230                             bset_block_offset(b, i), i->keys);
231         goto out;
232 }
233
234 static void btree_node_read_endio(struct bio *bio)
235 {
236         struct closure *cl = bio->bi_private;
237
238         closure_put(cl);
239 }
240
241 static void bch_btree_node_read(struct btree *b)
242 {
243         uint64_t start_time = local_clock();
244         struct closure cl;
245         struct bio *bio;
246
247         trace_bcache_btree_read(b);
248
249         closure_init_stack(&cl);
250
251         bio = bch_bbio_alloc(b->c);
252         bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
253         bio->bi_end_io  = btree_node_read_endio;
254         bio->bi_private = &cl;
255         bio->bi_opf = REQ_OP_READ | REQ_META;
256
257         bch_bio_map(bio, b->keys.set[0].data);
258
259         bch_submit_bbio(bio, b->c, &b->key, 0);
260         closure_sync(&cl);
261
262         if (bio->bi_status)
263                 set_btree_node_io_error(b);
264
265         bch_bbio_free(bio, b->c);
266
267         if (btree_node_io_error(b))
268                 goto err;
269
270         bch_btree_node_read_done(b);
271         bch_time_stats_update(&b->c->btree_read_time, start_time);
272
273         return;
274 err:
275         bch_cache_set_error(b->c, "io error reading bucket %zu",
276                             PTR_BUCKET_NR(b->c, &b->key, 0));
277 }
278
279 static void btree_complete_write(struct btree *b, struct btree_write *w)
280 {
281         if (w->prio_blocked &&
282             !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
283                 wake_up_allocators(b->c);
284
285         if (w->journal) {
286                 atomic_dec_bug(w->journal);
287                 __closure_wake_up(&b->c->journal.wait);
288         }
289
290         w->prio_blocked = 0;
291         w->journal      = NULL;
292 }
293
294 static void btree_node_write_unlock(struct closure *cl)
295 {
296         struct btree *b = container_of(cl, struct btree, io);
297
298         up(&b->io_mutex);
299 }
300
301 static void __btree_node_write_done(struct closure *cl)
302 {
303         struct btree *b = container_of(cl, struct btree, io);
304         struct btree_write *w = btree_prev_write(b);
305
306         bch_bbio_free(b->bio, b->c);
307         b->bio = NULL;
308         btree_complete_write(b, w);
309
310         if (btree_node_dirty(b))
311                 schedule_delayed_work(&b->work, 30 * HZ);
312
313         closure_return_with_destructor(cl, btree_node_write_unlock);
314 }
315
316 static void btree_node_write_done(struct closure *cl)
317 {
318         struct btree *b = container_of(cl, struct btree, io);
319
320         bio_free_pages(b->bio);
321         __btree_node_write_done(cl);
322 }
323
324 static void btree_node_write_endio(struct bio *bio)
325 {
326         struct closure *cl = bio->bi_private;
327         struct btree *b = container_of(cl, struct btree, io);
328
329         if (bio->bi_status)
330                 set_btree_node_io_error(b);
331
332         bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
333         closure_put(cl);
334 }
335
336 static void do_btree_node_write(struct btree *b)
337 {
338         struct closure *cl = &b->io;
339         struct bset *i = btree_bset_last(b);
340         BKEY_PADDED(key) k;
341
342         i->version      = BCACHE_BSET_VERSION;
343         i->csum         = btree_csum_set(b, i);
344
345         BUG_ON(b->bio);
346         b->bio = bch_bbio_alloc(b->c);
347
348         b->bio->bi_end_io       = btree_node_write_endio;
349         b->bio->bi_private      = cl;
350         b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
351         b->bio->bi_opf          = REQ_OP_WRITE | REQ_META | REQ_FUA;
352         bch_bio_map(b->bio, i);
353
354         /*
355          * If we're appending to a leaf node, we don't technically need FUA -
356          * this write just needs to be persisted before the next journal write,
357          * which will be marked FLUSH|FUA.
358          *
359          * Similarly if we're writing a new btree root - the pointer is going to
360          * be in the next journal entry.
361          *
362          * But if we're writing a new btree node (that isn't a root) or
363          * appending to a non leaf btree node, we need either FUA or a flush
364          * when we write the parent with the new pointer. FUA is cheaper than a
365          * flush, and writes appending to leaf nodes aren't blocking anything so
366          * just make all btree node writes FUA to keep things sane.
367          */
368
369         bkey_copy(&k.key, &b->key);
370         SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
371                        bset_sector_offset(&b->keys, i));
372
373         if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
374                 struct bio_vec *bv;
375                 void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
376                 struct bvec_iter_all iter_all;
377
378                 bio_for_each_segment_all(bv, b->bio, iter_all) {
379                         memcpy(page_address(bv->bv_page), addr, PAGE_SIZE);
380                         addr += PAGE_SIZE;
381                 }
382
383                 bch_submit_bbio(b->bio, b->c, &k.key, 0);
384
385                 continue_at(cl, btree_node_write_done, NULL);
386         } else {
387                 /*
388                  * No problem for multipage bvec since the bio is
389                  * just allocated
390                  */
391                 b->bio->bi_vcnt = 0;
392                 bch_bio_map(b->bio, i);
393
394                 bch_submit_bbio(b->bio, b->c, &k.key, 0);
395
396                 closure_sync(cl);
397                 continue_at_nobarrier(cl, __btree_node_write_done, NULL);
398         }
399 }
400
401 void __bch_btree_node_write(struct btree *b, struct closure *parent)
402 {
403         struct bset *i = btree_bset_last(b);
404
405         lockdep_assert_held(&b->write_lock);
406
407         trace_bcache_btree_write(b);
408
409         BUG_ON(current->bio_list);
410         BUG_ON(b->written >= btree_blocks(b));
411         BUG_ON(b->written && !i->keys);
412         BUG_ON(btree_bset_first(b)->seq != i->seq);
413         bch_check_keys(&b->keys, "writing");
414
415         cancel_delayed_work(&b->work);
416
417         /* If caller isn't waiting for write, parent refcount is cache set */
418         down(&b->io_mutex);
419         closure_init(&b->io, parent ?: &b->c->cl);
420
421         clear_bit(BTREE_NODE_dirty,      &b->flags);
422         change_bit(BTREE_NODE_write_idx, &b->flags);
423
424         do_btree_node_write(b);
425
426         atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
427                         &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
428
429         b->written += set_blocks(i, block_bytes(b->c));
430 }
431
432 void bch_btree_node_write(struct btree *b, struct closure *parent)
433 {
434         unsigned int nsets = b->keys.nsets;
435
436         lockdep_assert_held(&b->lock);
437
438         __bch_btree_node_write(b, parent);
439
440         /*
441          * do verify if there was more than one set initially (i.e. we did a
442          * sort) and we sorted down to a single set:
443          */
444         if (nsets && !b->keys.nsets)
445                 bch_btree_verify(b);
446
447         bch_btree_init_next(b);
448 }
449
450 static void bch_btree_node_write_sync(struct btree *b)
451 {
452         struct closure cl;
453
454         closure_init_stack(&cl);
455
456         mutex_lock(&b->write_lock);
457         bch_btree_node_write(b, &cl);
458         mutex_unlock(&b->write_lock);
459
460         closure_sync(&cl);
461 }
462
463 static void btree_node_write_work(struct work_struct *w)
464 {
465         struct btree *b = container_of(to_delayed_work(w), struct btree, work);
466
467         mutex_lock(&b->write_lock);
468         if (btree_node_dirty(b))
469                 __bch_btree_node_write(b, NULL);
470         mutex_unlock(&b->write_lock);
471 }
472
473 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
474 {
475         struct bset *i = btree_bset_last(b);
476         struct btree_write *w = btree_current_write(b);
477
478         lockdep_assert_held(&b->write_lock);
479
480         BUG_ON(!b->written);
481         BUG_ON(!i->keys);
482
483         if (!btree_node_dirty(b))
484                 schedule_delayed_work(&b->work, 30 * HZ);
485
486         set_btree_node_dirty(b);
487
488         /*
489          * w->journal is always the oldest journal pin of all bkeys
490          * in the leaf node, to make sure the oldest jset seq won't
491          * be increased before this btree node is flushed.
492          */
493         if (journal_ref) {
494                 if (w->journal &&
495                     journal_pin_cmp(b->c, w->journal, journal_ref)) {
496                         atomic_dec_bug(w->journal);
497                         w->journal = NULL;
498                 }
499
500                 if (!w->journal) {
501                         w->journal = journal_ref;
502                         atomic_inc(w->journal);
503                 }
504         }
505
506         /* Force write if set is too big */
507         if (set_bytes(i) > PAGE_SIZE - 48 &&
508             !current->bio_list)
509                 bch_btree_node_write(b, NULL);
510 }
511
512 /*
513  * Btree in memory cache - allocation/freeing
514  * mca -> memory cache
515  */
516
517 #define mca_reserve(c)  (((c->root && c->root->level)           \
518                           ? c->root->level : 1) * 8 + 16)
519 #define mca_can_free(c)                                         \
520         max_t(int, 0, c->btree_cache_used - mca_reserve(c))
521
522 static void mca_data_free(struct btree *b)
523 {
524         BUG_ON(b->io_mutex.count != 1);
525
526         bch_btree_keys_free(&b->keys);
527
528         b->c->btree_cache_used--;
529         list_move(&b->list, &b->c->btree_cache_freed);
530 }
531
532 static void mca_bucket_free(struct btree *b)
533 {
534         BUG_ON(btree_node_dirty(b));
535
536         b->key.ptr[0] = 0;
537         hlist_del_init_rcu(&b->hash);
538         list_move(&b->list, &b->c->btree_cache_freeable);
539 }
540
541 static unsigned int btree_order(struct bkey *k)
542 {
543         return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
544 }
545
546 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
547 {
548         if (!bch_btree_keys_alloc(&b->keys,
549                                   max_t(unsigned int,
550                                         ilog2(b->c->btree_pages),
551                                         btree_order(k)),
552                                   gfp)) {
553                 b->c->btree_cache_used++;
554                 list_move(&b->list, &b->c->btree_cache);
555         } else {
556                 list_move(&b->list, &b->c->btree_cache_freed);
557         }
558 }
559
560 static struct btree *mca_bucket_alloc(struct cache_set *c,
561                                       struct bkey *k, gfp_t gfp)
562 {
563         /*
564          * kzalloc() is necessary here for initialization,
565          * see code comments in bch_btree_keys_init().
566          */
567         struct btree *b = kzalloc(sizeof(struct btree), gfp);
568
569         if (!b)
570                 return NULL;
571
572         init_rwsem(&b->lock);
573         lockdep_set_novalidate_class(&b->lock);
574         mutex_init(&b->write_lock);
575         lockdep_set_novalidate_class(&b->write_lock);
576         INIT_LIST_HEAD(&b->list);
577         INIT_DELAYED_WORK(&b->work, btree_node_write_work);
578         b->c = c;
579         sema_init(&b->io_mutex, 1);
580
581         mca_data_alloc(b, k, gfp);
582         return b;
583 }
584
585 static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
586 {
587         struct closure cl;
588
589         closure_init_stack(&cl);
590         lockdep_assert_held(&b->c->bucket_lock);
591
592         if (!down_write_trylock(&b->lock))
593                 return -ENOMEM;
594
595         BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
596
597         if (b->keys.page_order < min_order)
598                 goto out_unlock;
599
600         if (!flush) {
601                 if (btree_node_dirty(b))
602                         goto out_unlock;
603
604                 if (down_trylock(&b->io_mutex))
605                         goto out_unlock;
606                 up(&b->io_mutex);
607         }
608
609 retry:
610         /*
611          * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
612          * __bch_btree_node_write(). To avoid an extra flush, acquire
613          * b->write_lock before checking BTREE_NODE_dirty bit.
614          */
615         mutex_lock(&b->write_lock);
616         /*
617          * If this btree node is selected in btree_flush_write() by journal
618          * code, delay and retry until the node is flushed by journal code
619          * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
620          */
621         if (btree_node_journal_flush(b)) {
622                 pr_debug("bnode %p is flushing by journal, retry\n", b);
623                 mutex_unlock(&b->write_lock);
624                 udelay(1);
625                 goto retry;
626         }
627
628         if (btree_node_dirty(b))
629                 __bch_btree_node_write(b, &cl);
630         mutex_unlock(&b->write_lock);
631
632         closure_sync(&cl);
633
634         /* wait for any in flight btree write */
635         down(&b->io_mutex);
636         up(&b->io_mutex);
637
638         return 0;
639 out_unlock:
640         rw_unlock(true, b);
641         return -ENOMEM;
642 }
643
644 static unsigned long bch_mca_scan(struct shrinker *shrink,
645                                   struct shrink_control *sc)
646 {
647         struct cache_set *c = container_of(shrink, struct cache_set, shrink);
648         struct btree *b, *t;
649         unsigned long i, nr = sc->nr_to_scan;
650         unsigned long freed = 0;
651         unsigned int btree_cache_used;
652
653         if (c->shrinker_disabled)
654                 return SHRINK_STOP;
655
656         if (c->btree_cache_alloc_lock)
657                 return SHRINK_STOP;
658
659         /* Return -1 if we can't do anything right now */
660         if (sc->gfp_mask & __GFP_IO)
661                 mutex_lock(&c->bucket_lock);
662         else if (!mutex_trylock(&c->bucket_lock))
663                 return -1;
664
665         /*
666          * It's _really_ critical that we don't free too many btree nodes - we
667          * have to always leave ourselves a reserve. The reserve is how we
668          * guarantee that allocating memory for a new btree node can always
669          * succeed, so that inserting keys into the btree can always succeed and
670          * IO can always make forward progress:
671          */
672         nr /= c->btree_pages;
673         if (nr == 0)
674                 nr = 1;
675         nr = min_t(unsigned long, nr, mca_can_free(c));
676
677         i = 0;
678         btree_cache_used = c->btree_cache_used;
679         list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) {
680                 if (nr <= 0)
681                         goto out;
682
683                 if (!mca_reap(b, 0, false)) {
684                         mca_data_free(b);
685                         rw_unlock(true, b);
686                         freed++;
687                 }
688                 nr--;
689                 i++;
690         }
691
692         list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
693                 if (nr <= 0 || i >= btree_cache_used)
694                         goto out;
695
696                 if (!mca_reap(b, 0, false)) {
697                         mca_bucket_free(b);
698                         mca_data_free(b);
699                         rw_unlock(true, b);
700                         freed++;
701                 }
702
703                 nr--;
704                 i++;
705         }
706 out:
707         mutex_unlock(&c->bucket_lock);
708         return freed * c->btree_pages;
709 }
710
711 static unsigned long bch_mca_count(struct shrinker *shrink,
712                                    struct shrink_control *sc)
713 {
714         struct cache_set *c = container_of(shrink, struct cache_set, shrink);
715
716         if (c->shrinker_disabled)
717                 return 0;
718
719         if (c->btree_cache_alloc_lock)
720                 return 0;
721
722         return mca_can_free(c) * c->btree_pages;
723 }
724
725 void bch_btree_cache_free(struct cache_set *c)
726 {
727         struct btree *b;
728         struct closure cl;
729
730         closure_init_stack(&cl);
731
732         if (c->shrink.list.next)
733                 unregister_shrinker(&c->shrink);
734
735         mutex_lock(&c->bucket_lock);
736
737 #ifdef CONFIG_BCACHE_DEBUG
738         if (c->verify_data)
739                 list_move(&c->verify_data->list, &c->btree_cache);
740
741         free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
742 #endif
743
744         list_splice(&c->btree_cache_freeable,
745                     &c->btree_cache);
746
747         while (!list_empty(&c->btree_cache)) {
748                 b = list_first_entry(&c->btree_cache, struct btree, list);
749
750                 /*
751                  * This function is called by cache_set_free(), no I/O
752                  * request on cache now, it is unnecessary to acquire
753                  * b->write_lock before clearing BTREE_NODE_dirty anymore.
754                  */
755                 if (btree_node_dirty(b)) {
756                         btree_complete_write(b, btree_current_write(b));
757                         clear_bit(BTREE_NODE_dirty, &b->flags);
758                 }
759                 mca_data_free(b);
760         }
761
762         while (!list_empty(&c->btree_cache_freed)) {
763                 b = list_first_entry(&c->btree_cache_freed,
764                                      struct btree, list);
765                 list_del(&b->list);
766                 cancel_delayed_work_sync(&b->work);
767                 kfree(b);
768         }
769
770         mutex_unlock(&c->bucket_lock);
771 }
772
773 int bch_btree_cache_alloc(struct cache_set *c)
774 {
775         unsigned int i;
776
777         for (i = 0; i < mca_reserve(c); i++)
778                 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
779                         return -ENOMEM;
780
781         list_splice_init(&c->btree_cache,
782                          &c->btree_cache_freeable);
783
784 #ifdef CONFIG_BCACHE_DEBUG
785         mutex_init(&c->verify_lock);
786
787         c->verify_ondisk = (void *)
788                 __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
789
790         c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
791
792         if (c->verify_data &&
793             c->verify_data->keys.set->data)
794                 list_del_init(&c->verify_data->list);
795         else
796                 c->verify_data = NULL;
797 #endif
798
799         c->shrink.count_objects = bch_mca_count;
800         c->shrink.scan_objects = bch_mca_scan;
801         c->shrink.seeks = 4;
802         c->shrink.batch = c->btree_pages * 2;
803
804         if (register_shrinker(&c->shrink))
805                 pr_warn("bcache: %s: could not register shrinker\n",
806                                 __func__);
807
808         return 0;
809 }
810
811 /* Btree in memory cache - hash table */
812
813 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
814 {
815         return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
816 }
817
818 static struct btree *mca_find(struct cache_set *c, struct bkey *k)
819 {
820         struct btree *b;
821
822         rcu_read_lock();
823         hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
824                 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
825                         goto out;
826         b = NULL;
827 out:
828         rcu_read_unlock();
829         return b;
830 }
831
832 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
833 {
834         spin_lock(&c->btree_cannibalize_lock);
835         if (likely(c->btree_cache_alloc_lock == NULL)) {
836                 c->btree_cache_alloc_lock = current;
837         } else if (c->btree_cache_alloc_lock != current) {
838                 if (op)
839                         prepare_to_wait(&c->btree_cache_wait, &op->wait,
840                                         TASK_UNINTERRUPTIBLE);
841                 spin_unlock(&c->btree_cannibalize_lock);
842                 return -EINTR;
843         }
844         spin_unlock(&c->btree_cannibalize_lock);
845
846         return 0;
847 }
848
849 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
850                                      struct bkey *k)
851 {
852         struct btree *b;
853
854         trace_bcache_btree_cache_cannibalize(c);
855
856         if (mca_cannibalize_lock(c, op))
857                 return ERR_PTR(-EINTR);
858
859         list_for_each_entry_reverse(b, &c->btree_cache, list)
860                 if (!mca_reap(b, btree_order(k), false))
861                         return b;
862
863         list_for_each_entry_reverse(b, &c->btree_cache, list)
864                 if (!mca_reap(b, btree_order(k), true))
865                         return b;
866
867         WARN(1, "btree cache cannibalize failed\n");
868         return ERR_PTR(-ENOMEM);
869 }
870
871 /*
872  * We can only have one thread cannibalizing other cached btree nodes at a time,
873  * or we'll deadlock. We use an open coded mutex to ensure that, which a
874  * cannibalize_bucket() will take. This means every time we unlock the root of
875  * the btree, we need to release this lock if we have it held.
876  */
877 static void bch_cannibalize_unlock(struct cache_set *c)
878 {
879         spin_lock(&c->btree_cannibalize_lock);
880         if (c->btree_cache_alloc_lock == current) {
881                 c->btree_cache_alloc_lock = NULL;
882                 wake_up(&c->btree_cache_wait);
883         }
884         spin_unlock(&c->btree_cannibalize_lock);
885 }
886
887 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
888                                struct bkey *k, int level)
889 {
890         struct btree *b;
891
892         BUG_ON(current->bio_list);
893
894         lockdep_assert_held(&c->bucket_lock);
895
896         if (mca_find(c, k))
897                 return NULL;
898
899         /* btree_free() doesn't free memory; it sticks the node on the end of
900          * the list. Check if there's any freed nodes there:
901          */
902         list_for_each_entry(b, &c->btree_cache_freeable, list)
903                 if (!mca_reap(b, btree_order(k), false))
904                         goto out;
905
906         /* We never free struct btree itself, just the memory that holds the on
907          * disk node. Check the freed list before allocating a new one:
908          */
909         list_for_each_entry(b, &c->btree_cache_freed, list)
910                 if (!mca_reap(b, 0, false)) {
911                         mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
912                         if (!b->keys.set[0].data)
913                                 goto err;
914                         else
915                                 goto out;
916                 }
917
918         b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
919         if (!b)
920                 goto err;
921
922         BUG_ON(!down_write_trylock(&b->lock));
923         if (!b->keys.set->data)
924                 goto err;
925 out:
926         BUG_ON(b->io_mutex.count != 1);
927
928         bkey_copy(&b->key, k);
929         list_move(&b->list, &c->btree_cache);
930         hlist_del_init_rcu(&b->hash);
931         hlist_add_head_rcu(&b->hash, mca_hash(c, k));
932
933         lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
934         b->parent       = (void *) ~0UL;
935         b->flags        = 0;
936         b->written      = 0;
937         b->level        = level;
938
939         if (!b->level)
940                 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
941                                     &b->c->expensive_debug_checks);
942         else
943                 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
944                                     &b->c->expensive_debug_checks);
945
946         return b;
947 err:
948         if (b)
949                 rw_unlock(true, b);
950
951         b = mca_cannibalize(c, op, k);
952         if (!IS_ERR(b))
953                 goto out;
954
955         return b;
956 }
957
958 /*
959  * bch_btree_node_get - find a btree node in the cache and lock it, reading it
960  * in from disk if necessary.
961  *
962  * If IO is necessary and running under generic_make_request, returns -EAGAIN.
963  *
964  * The btree node will have either a read or a write lock held, depending on
965  * level and op->lock.
966  */
967 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
968                                  struct bkey *k, int level, bool write,
969                                  struct btree *parent)
970 {
971         int i = 0;
972         struct btree *b;
973
974         BUG_ON(level < 0);
975 retry:
976         b = mca_find(c, k);
977
978         if (!b) {
979                 if (current->bio_list)
980                         return ERR_PTR(-EAGAIN);
981
982                 mutex_lock(&c->bucket_lock);
983                 b = mca_alloc(c, op, k, level);
984                 mutex_unlock(&c->bucket_lock);
985
986                 if (!b)
987                         goto retry;
988                 if (IS_ERR(b))
989                         return b;
990
991                 bch_btree_node_read(b);
992
993                 if (!write)
994                         downgrade_write(&b->lock);
995         } else {
996                 rw_lock(write, b, level);
997                 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
998                         rw_unlock(write, b);
999                         goto retry;
1000                 }
1001                 BUG_ON(b->level != level);
1002         }
1003
1004         if (btree_node_io_error(b)) {
1005                 rw_unlock(write, b);
1006                 return ERR_PTR(-EIO);
1007         }
1008
1009         BUG_ON(!b->written);
1010
1011         b->parent = parent;
1012
1013         for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1014                 prefetch(b->keys.set[i].tree);
1015                 prefetch(b->keys.set[i].data);
1016         }
1017
1018         for (; i <= b->keys.nsets; i++)
1019                 prefetch(b->keys.set[i].data);
1020
1021         return b;
1022 }
1023
1024 static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1025 {
1026         struct btree *b;
1027
1028         mutex_lock(&parent->c->bucket_lock);
1029         b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1030         mutex_unlock(&parent->c->bucket_lock);
1031
1032         if (!IS_ERR_OR_NULL(b)) {
1033                 b->parent = parent;
1034                 bch_btree_node_read(b);
1035                 rw_unlock(true, b);
1036         }
1037 }
1038
1039 /* Btree alloc */
1040
1041 static void btree_node_free(struct btree *b)
1042 {
1043         trace_bcache_btree_node_free(b);
1044
1045         BUG_ON(b == b->c->root);
1046
1047 retry:
1048         mutex_lock(&b->write_lock);
1049         /*
1050          * If the btree node is selected and flushing in btree_flush_write(),
1051          * delay and retry until the BTREE_NODE_journal_flush bit cleared,
1052          * then it is safe to free the btree node here. Otherwise this btree
1053          * node will be in race condition.
1054          */
1055         if (btree_node_journal_flush(b)) {
1056                 mutex_unlock(&b->write_lock);
1057                 pr_debug("bnode %p journal_flush set, retry\n", b);
1058                 udelay(1);
1059                 goto retry;
1060         }
1061
1062         if (btree_node_dirty(b)) {
1063                 btree_complete_write(b, btree_current_write(b));
1064                 clear_bit(BTREE_NODE_dirty, &b->flags);
1065         }
1066
1067         mutex_unlock(&b->write_lock);
1068
1069         cancel_delayed_work(&b->work);
1070
1071         mutex_lock(&b->c->bucket_lock);
1072         bch_bucket_free(b->c, &b->key);
1073         mca_bucket_free(b);
1074         mutex_unlock(&b->c->bucket_lock);
1075 }
1076
1077 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1078                                      int level, bool wait,
1079                                      struct btree *parent)
1080 {
1081         BKEY_PADDED(key) k;
1082         struct btree *b = ERR_PTR(-EAGAIN);
1083
1084         mutex_lock(&c->bucket_lock);
1085 retry:
1086         if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
1087                 goto err;
1088
1089         bkey_put(c, &k.key);
1090         SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1091
1092         b = mca_alloc(c, op, &k.key, level);
1093         if (IS_ERR(b))
1094                 goto err_free;
1095
1096         if (!b) {
1097                 cache_bug(c,
1098                         "Tried to allocate bucket that was in btree cache");
1099                 goto retry;
1100         }
1101
1102         b->parent = parent;
1103         bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
1104
1105         mutex_unlock(&c->bucket_lock);
1106
1107         trace_bcache_btree_node_alloc(b);
1108         return b;
1109 err_free:
1110         bch_bucket_free(c, &k.key);
1111 err:
1112         mutex_unlock(&c->bucket_lock);
1113
1114         trace_bcache_btree_node_alloc_fail(c);
1115         return b;
1116 }
1117
1118 static struct btree *bch_btree_node_alloc(struct cache_set *c,
1119                                           struct btree_op *op, int level,
1120                                           struct btree *parent)
1121 {
1122         return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1123 }
1124
1125 static struct btree *btree_node_alloc_replacement(struct btree *b,
1126                                                   struct btree_op *op)
1127 {
1128         struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1129
1130         if (!IS_ERR_OR_NULL(n)) {
1131                 mutex_lock(&n->write_lock);
1132                 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1133                 bkey_copy_key(&n->key, &b->key);
1134                 mutex_unlock(&n->write_lock);
1135         }
1136
1137         return n;
1138 }
1139
1140 static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1141 {
1142         unsigned int i;
1143
1144         mutex_lock(&b->c->bucket_lock);
1145
1146         atomic_inc(&b->c->prio_blocked);
1147
1148         bkey_copy(k, &b->key);
1149         bkey_copy_key(k, &ZERO_KEY);
1150
1151         for (i = 0; i < KEY_PTRS(k); i++)
1152                 SET_PTR_GEN(k, i,
1153                             bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1154                                         PTR_BUCKET(b->c, &b->key, i)));
1155
1156         mutex_unlock(&b->c->bucket_lock);
1157 }
1158
1159 static int btree_check_reserve(struct btree *b, struct btree_op *op)
1160 {
1161         struct cache_set *c = b->c;
1162         struct cache *ca;
1163         unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;
1164
1165         mutex_lock(&c->bucket_lock);
1166
1167         for_each_cache(ca, c, i)
1168                 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1169                         if (op)
1170                                 prepare_to_wait(&c->btree_cache_wait, &op->wait,
1171                                                 TASK_UNINTERRUPTIBLE);
1172                         mutex_unlock(&c->bucket_lock);
1173                         return -EINTR;
1174                 }
1175
1176         mutex_unlock(&c->bucket_lock);
1177
1178         return mca_cannibalize_lock(b->c, op);
1179 }
1180
1181 /* Garbage collection */
1182
1183 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1184                                     struct bkey *k)
1185 {
1186         uint8_t stale = 0;
1187         unsigned int i;
1188         struct bucket *g;
1189
1190         /*
1191          * ptr_invalid() can't return true for the keys that mark btree nodes as
1192          * freed, but since ptr_bad() returns true we'll never actually use them
1193          * for anything and thus we don't want mark their pointers here
1194          */
1195         if (!bkey_cmp(k, &ZERO_KEY))
1196                 return stale;
1197
1198         for (i = 0; i < KEY_PTRS(k); i++) {
1199                 if (!ptr_available(c, k, i))
1200                         continue;
1201
1202                 g = PTR_BUCKET(c, k, i);
1203
1204                 if (gen_after(g->last_gc, PTR_GEN(k, i)))
1205                         g->last_gc = PTR_GEN(k, i);
1206
1207                 if (ptr_stale(c, k, i)) {
1208                         stale = max(stale, ptr_stale(c, k, i));
1209                         continue;
1210                 }
1211
1212                 cache_bug_on(GC_MARK(g) &&
1213                              (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1214                              c, "inconsistent ptrs: mark = %llu, level = %i",
1215                              GC_MARK(g), level);
1216
1217                 if (level)
1218                         SET_GC_MARK(g, GC_MARK_METADATA);
1219                 else if (KEY_DIRTY(k))
1220                         SET_GC_MARK(g, GC_MARK_DIRTY);
1221                 else if (!GC_MARK(g))
1222                         SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1223
1224                 /* guard against overflow */
1225                 SET_GC_SECTORS_USED(g, min_t(unsigned int,
1226                                              GC_SECTORS_USED(g) + KEY_SIZE(k),
1227                                              MAX_GC_SECTORS_USED));
1228
1229                 BUG_ON(!GC_SECTORS_USED(g));
1230         }
1231
1232         return stale;
1233 }
1234
1235 #define btree_mark_key(b, k)    __bch_btree_mark_key(b->c, b->level, k)
1236
1237 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1238 {
1239         unsigned int i;
1240
1241         for (i = 0; i < KEY_PTRS(k); i++)
1242                 if (ptr_available(c, k, i) &&
1243                     !ptr_stale(c, k, i)) {
1244                         struct bucket *b = PTR_BUCKET(c, k, i);
1245
1246                         b->gen = PTR_GEN(k, i);
1247
1248                         if (level && bkey_cmp(k, &ZERO_KEY))
1249                                 b->prio = BTREE_PRIO;
1250                         else if (!level && b->prio == BTREE_PRIO)
1251                                 b->prio = INITIAL_PRIO;
1252                 }
1253
1254         __bch_btree_mark_key(c, level, k);
1255 }
1256
1257 void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
1258 {
1259         stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
1260 }
1261
1262 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1263 {
1264         uint8_t stale = 0;
1265         unsigned int keys = 0, good_keys = 0;
1266         struct bkey *k;
1267         struct btree_iter iter;
1268         struct bset_tree *t;
1269
1270         gc->nodes++;
1271
1272         for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1273                 stale = max(stale, btree_mark_key(b, k));
1274                 keys++;
1275
1276                 if (bch_ptr_bad(&b->keys, k))
1277                         continue;
1278
1279                 gc->key_bytes += bkey_u64s(k);
1280                 gc->nkeys++;
1281                 good_keys++;
1282
1283                 gc->data += KEY_SIZE(k);
1284         }
1285
1286         for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1287                 btree_bug_on(t->size &&
1288                              bset_written(&b->keys, t) &&
1289                              bkey_cmp(&b->key, &t->end) < 0,
1290                              b, "found short btree key in gc");
1291
1292         if (b->c->gc_always_rewrite)
1293                 return true;
1294
1295         if (stale > 10)
1296                 return true;
1297
1298         if ((keys - good_keys) * 2 > keys)
1299                 return true;
1300
1301         return false;
1302 }
1303
1304 #define GC_MERGE_NODES  4U
1305
1306 struct gc_merge_info {
1307         struct btree    *b;
1308         unsigned int    keys;
1309 };
1310
1311 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
1312                                  struct keylist *insert_keys,
1313                                  atomic_t *journal_ref,
1314                                  struct bkey *replace_key);
1315
1316 static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1317                              struct gc_stat *gc, struct gc_merge_info *r)
1318 {
1319         unsigned int i, nodes = 0, keys = 0, blocks;
1320         struct btree *new_nodes[GC_MERGE_NODES];
1321         struct keylist keylist;
1322         struct closure cl;
1323         struct bkey *k;
1324
1325         bch_keylist_init(&keylist);
1326
1327         if (btree_check_reserve(b, NULL))
1328                 return 0;
1329
1330         memset(new_nodes, 0, sizeof(new_nodes));
1331         closure_init_stack(&cl);
1332
1333         while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1334                 keys += r[nodes++].keys;
1335
1336         blocks = btree_default_blocks(b->c) * 2 / 3;
1337
1338         if (nodes < 2 ||
1339             __set_blocks(b->keys.set[0].data, keys,
1340                          block_bytes(b->c)) > blocks * (nodes - 1))
1341                 return 0;
1342
1343         for (i = 0; i < nodes; i++) {
1344                 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1345                 if (IS_ERR_OR_NULL(new_nodes[i]))
1346                         goto out_nocoalesce;
1347         }
1348
1349         /*
1350          * We have to check the reserve here, after we've allocated our new
1351          * nodes, to make sure the insert below will succeed - we also check
1352          * before as an optimization to potentially avoid a bunch of expensive
1353          * allocs/sorts
1354          */
1355         if (btree_check_reserve(b, NULL))
1356                 goto out_nocoalesce;
1357
1358         for (i = 0; i < nodes; i++)
1359                 mutex_lock(&new_nodes[i]->write_lock);
1360
1361         for (i = nodes - 1; i > 0; --i) {
1362                 struct bset *n1 = btree_bset_first(new_nodes[i]);
1363                 struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1364                 struct bkey *k, *last = NULL;
1365
1366                 keys = 0;
1367
1368                 if (i > 1) {
1369                         for (k = n2->start;
1370                              k < bset_bkey_last(n2);
1371                              k = bkey_next(k)) {
1372                                 if (__set_blocks(n1, n1->keys + keys +
1373                                                  bkey_u64s(k),
1374                                                  block_bytes(b->c)) > blocks)
1375                                         break;
1376
1377                                 last = k;
1378                                 keys += bkey_u64s(k);
1379                         }
1380                 } else {
1381                         /*
1382                          * Last node we're not getting rid of - we're getting
1383                          * rid of the node at r[0]. Have to try and fit all of
1384                          * the remaining keys into this node; we can't ensure
1385                          * they will always fit due to rounding and variable
1386                          * length keys (shouldn't be possible in practice,
1387                          * though)
1388                          */
1389                         if (__set_blocks(n1, n1->keys + n2->keys,
1390                                          block_bytes(b->c)) >
1391                             btree_blocks(new_nodes[i]))
1392                                 goto out_unlock_nocoalesce;
1393
1394                         keys = n2->keys;
1395                         /* Take the key of the node we're getting rid of */
1396                         last = &r->b->key;
1397                 }
1398
1399                 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
1400                        btree_blocks(new_nodes[i]));
1401
1402                 if (last)
1403                         bkey_copy_key(&new_nodes[i]->key, last);
1404
1405                 memcpy(bset_bkey_last(n1),
1406                        n2->start,
1407                        (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1408
1409                 n1->keys += keys;
1410                 r[i].keys = n1->keys;
1411
1412                 memmove(n2->start,
1413                         bset_bkey_idx(n2, keys),
1414                         (void *) bset_bkey_last(n2) -
1415                         (void *) bset_bkey_idx(n2, keys));
1416
1417                 n2->keys -= keys;
1418
1419                 if (__bch_keylist_realloc(&keylist,
1420                                           bkey_u64s(&new_nodes[i]->key)))
1421                         goto out_unlock_nocoalesce;
1422
1423                 bch_btree_node_write(new_nodes[i], &cl);
1424                 bch_keylist_add(&keylist, &new_nodes[i]->key);
1425         }
1426
1427         for (i = 0; i < nodes; i++)
1428                 mutex_unlock(&new_nodes[i]->write_lock);
1429
1430         closure_sync(&cl);
1431
1432         /* We emptied out this node */
1433         BUG_ON(btree_bset_first(new_nodes[0])->keys);
1434         btree_node_free(new_nodes[0]);
1435         rw_unlock(true, new_nodes[0]);
1436         new_nodes[0] = NULL;
1437
1438         for (i = 0; i < nodes; i++) {
1439                 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1440                         goto out_nocoalesce;
1441
1442                 make_btree_freeing_key(r[i].b, keylist.top);
1443                 bch_keylist_push(&keylist);
1444         }
1445
1446         bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1447         BUG_ON(!bch_keylist_empty(&keylist));
1448
1449         for (i = 0; i < nodes; i++) {
1450                 btree_node_free(r[i].b);
1451                 rw_unlock(true, r[i].b);
1452
1453                 r[i].b = new_nodes[i];
1454         }
1455
1456         memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1457         r[nodes - 1].b = ERR_PTR(-EINTR);
1458
1459         trace_bcache_btree_gc_coalesce(nodes);
1460         gc->nodes--;
1461
1462         bch_keylist_free(&keylist);
1463
1464         /* Invalidated our iterator */
1465         return -EINTR;
1466
1467 out_unlock_nocoalesce:
1468         for (i = 0; i < nodes; i++)
1469                 mutex_unlock(&new_nodes[i]->write_lock);
1470
1471 out_nocoalesce:
1472         closure_sync(&cl);
1473
1474         while ((k = bch_keylist_pop(&keylist)))
1475                 if (!bkey_cmp(k, &ZERO_KEY))
1476                         atomic_dec(&b->c->prio_blocked);
1477         bch_keylist_free(&keylist);
1478
1479         for (i = 0; i < nodes; i++)
1480                 if (!IS_ERR_OR_NULL(new_nodes[i])) {
1481                         btree_node_free(new_nodes[i]);
1482                         rw_unlock(true, new_nodes[i]);
1483                 }
1484         return 0;
1485 }
1486
1487 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1488                                  struct btree *replace)
1489 {
1490         struct keylist keys;
1491         struct btree *n;
1492
1493         if (btree_check_reserve(b, NULL))
1494                 return 0;
1495
1496         n = btree_node_alloc_replacement(replace, NULL);
1497
1498         /* recheck reserve after allocating replacement node */
1499         if (btree_check_reserve(b, NULL)) {
1500                 btree_node_free(n);
1501                 rw_unlock(true, n);
1502                 return 0;
1503         }
1504
1505         bch_btree_node_write_sync(n);
1506
1507         bch_keylist_init(&keys);
1508         bch_keylist_add(&keys, &n->key);
1509
1510         make_btree_freeing_key(replace, keys.top);
1511         bch_keylist_push(&keys);
1512
1513         bch_btree_insert_node(b, op, &keys, NULL, NULL);
1514         BUG_ON(!bch_keylist_empty(&keys));
1515
1516         btree_node_free(replace);
1517         rw_unlock(true, n);
1518
1519         /* Invalidated our iterator */
1520         return -EINTR;
1521 }
1522
1523 static unsigned int btree_gc_count_keys(struct btree *b)
1524 {
1525         struct bkey *k;
1526         struct btree_iter iter;
1527         unsigned int ret = 0;
1528
1529         for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1530                 ret += bkey_u64s(k);
1531
1532         return ret;
1533 }
1534
1535 static size_t btree_gc_min_nodes(struct cache_set *c)
1536 {
1537         size_t min_nodes;
1538
1539         /*
1540          * Since incremental GC would stop 100ms when front
1541          * side I/O comes, so when there are many btree nodes,
1542          * if GC only processes constant (100) nodes each time,
1543          * GC would last a long time, and the front side I/Os
1544          * would run out of the buckets (since no new bucket
1545          * can be allocated during GC), and be blocked again.
1546          * So GC should not process constant nodes, but varied
1547          * nodes according to the number of btree nodes, which
1548          * realized by dividing GC into constant(100) times,
1549          * so when there are many btree nodes, GC can process
1550          * more nodes each time, otherwise, GC will process less
1551          * nodes each time (but no less than MIN_GC_NODES)
1552          */
1553         min_nodes = c->gc_stats.nodes / MAX_GC_TIMES;
1554         if (min_nodes < MIN_GC_NODES)
1555                 min_nodes = MIN_GC_NODES;
1556
1557         return min_nodes;
1558 }
1559
1560
1561 static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1562                             struct closure *writes, struct gc_stat *gc)
1563 {
1564         int ret = 0;
1565         bool should_rewrite;
1566         struct bkey *k;
1567         struct btree_iter iter;
1568         struct gc_merge_info r[GC_MERGE_NODES];
1569         struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1570
1571         bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1572
1573         for (i = r; i < r + ARRAY_SIZE(r); i++)
1574                 i->b = ERR_PTR(-EINTR);
1575
1576         while (1) {
1577                 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1578                 if (k) {
1579                         r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1580                                                   true, b);
1581                         if (IS_ERR(r->b)) {
1582                                 ret = PTR_ERR(r->b);
1583                                 break;
1584                         }
1585
1586                         r->keys = btree_gc_count_keys(r->b);
1587
1588                         ret = btree_gc_coalesce(b, op, gc, r);
1589                         if (ret)
1590                                 break;
1591                 }
1592
1593                 if (!last->b)
1594                         break;
1595
1596                 if (!IS_ERR(last->b)) {
1597                         should_rewrite = btree_gc_mark_node(last->b, gc);
1598                         if (should_rewrite) {
1599                                 ret = btree_gc_rewrite_node(b, op, last->b);
1600                                 if (ret)
1601                                         break;
1602                         }
1603
1604                         if (last->b->level) {
1605                                 ret = btree_gc_recurse(last->b, op, writes, gc);
1606                                 if (ret)
1607                                         break;
1608                         }
1609
1610                         bkey_copy_key(&b->c->gc_done, &last->b->key);
1611
1612                         /*
1613                          * Must flush leaf nodes before gc ends, since replace
1614                          * operations aren't journalled
1615                          */
1616                         mutex_lock(&last->b->write_lock);
1617                         if (btree_node_dirty(last->b))
1618                                 bch_btree_node_write(last->b, writes);
1619                         mutex_unlock(&last->b->write_lock);
1620                         rw_unlock(true, last->b);
1621                 }
1622
1623                 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1624                 r->b = NULL;
1625
1626                 if (atomic_read(&b->c->search_inflight) &&
1627                     gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) {
1628                         gc->nodes_pre =  gc->nodes;
1629                         ret = -EAGAIN;
1630                         break;
1631                 }
1632
1633                 if (need_resched()) {
1634                         ret = -EAGAIN;
1635                         break;
1636                 }
1637         }
1638
1639         for (i = r; i < r + ARRAY_SIZE(r); i++)
1640                 if (!IS_ERR_OR_NULL(i->b)) {
1641                         mutex_lock(&i->b->write_lock);
1642                         if (btree_node_dirty(i->b))
1643                                 bch_btree_node_write(i->b, writes);
1644                         mutex_unlock(&i->b->write_lock);
1645                         rw_unlock(true, i->b);
1646                 }
1647
1648         return ret;
1649 }
1650
1651 static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1652                              struct closure *writes, struct gc_stat *gc)
1653 {
1654         struct btree *n = NULL;
1655         int ret = 0;
1656         bool should_rewrite;
1657
1658         should_rewrite = btree_gc_mark_node(b, gc);
1659         if (should_rewrite) {
1660                 n = btree_node_alloc_replacement(b, NULL);
1661
1662                 if (!IS_ERR_OR_NULL(n)) {
1663                         bch_btree_node_write_sync(n);
1664
1665                         bch_btree_set_root(n);
1666                         btree_node_free(b);
1667                         rw_unlock(true, n);
1668
1669                         return -EINTR;
1670                 }
1671         }
1672
1673         __bch_btree_mark_key(b->c, b->level + 1, &b->key);
1674
1675         if (b->level) {
1676                 ret = btree_gc_recurse(b, op, writes, gc);
1677                 if (ret)
1678                         return ret;
1679         }
1680
1681         bkey_copy_key(&b->c->gc_done, &b->key);
1682
1683         return ret;
1684 }
1685
1686 static void btree_gc_start(struct cache_set *c)
1687 {
1688         struct cache *ca;
1689         struct bucket *b;
1690         unsigned int i;
1691
1692         if (!c->gc_mark_valid)
1693                 return;
1694
1695         mutex_lock(&c->bucket_lock);
1696
1697         c->gc_mark_valid = 0;
1698         c->gc_done = ZERO_KEY;
1699
1700         for_each_cache(ca, c, i)
1701                 for_each_bucket(b, ca) {
1702                         b->last_gc = b->gen;
1703                         if (!atomic_read(&b->pin)) {
1704                                 SET_GC_MARK(b, 0);
1705                                 SET_GC_SECTORS_USED(b, 0);
1706                         }
1707                 }
1708
1709         mutex_unlock(&c->bucket_lock);
1710 }
1711
1712 static void bch_btree_gc_finish(struct cache_set *c)
1713 {
1714         struct bucket *b;
1715         struct cache *ca;
1716         unsigned int i;
1717
1718         mutex_lock(&c->bucket_lock);
1719
1720         set_gc_sectors(c);
1721         c->gc_mark_valid = 1;
1722         c->need_gc      = 0;
1723
1724         for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1725                 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1726                             GC_MARK_METADATA);
1727
1728         /* don't reclaim buckets to which writeback keys point */
1729         rcu_read_lock();
1730         for (i = 0; i < c->devices_max_used; i++) {
1731                 struct bcache_device *d = c->devices[i];
1732                 struct cached_dev *dc;
1733                 struct keybuf_key *w, *n;
1734                 unsigned int j;
1735
1736                 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1737                         continue;
1738                 dc = container_of(d, struct cached_dev, disk);
1739
1740                 spin_lock(&dc->writeback_keys.lock);
1741                 rbtree_postorder_for_each_entry_safe(w, n,
1742                                         &dc->writeback_keys.keys, node)
1743                         for (j = 0; j < KEY_PTRS(&w->key); j++)
1744                                 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1745                                             GC_MARK_DIRTY);
1746                 spin_unlock(&dc->writeback_keys.lock);
1747         }
1748         rcu_read_unlock();
1749
1750         c->avail_nbuckets = 0;
1751         for_each_cache(ca, c, i) {
1752                 uint64_t *i;
1753
1754                 ca->invalidate_needs_gc = 0;
1755
1756                 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1757                         SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1758
1759                 for (i = ca->prio_buckets;
1760                      i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1761                         SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1762
1763                 for_each_bucket(b, ca) {
1764                         c->need_gc      = max(c->need_gc, bucket_gc_gen(b));
1765
1766                         if (atomic_read(&b->pin))
1767                                 continue;
1768
1769                         BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1770
1771                         if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1772                                 c->avail_nbuckets++;
1773                 }
1774         }
1775
1776         mutex_unlock(&c->bucket_lock);
1777 }
1778
1779 static void bch_btree_gc(struct cache_set *c)
1780 {
1781         int ret;
1782         struct gc_stat stats;
1783         struct closure writes;
1784         struct btree_op op;
1785         uint64_t start_time = local_clock();
1786
1787         trace_bcache_gc_start(c);
1788
1789         memset(&stats, 0, sizeof(struct gc_stat));
1790         closure_init_stack(&writes);
1791         bch_btree_op_init(&op, SHRT_MAX);
1792
1793         btree_gc_start(c);
1794
1795         /* if CACHE_SET_IO_DISABLE set, gc thread should stop too */
1796         do {
1797                 ret = bcache_btree_root(gc_root, c, &op, &writes, &stats);
1798                 closure_sync(&writes);
1799                 cond_resched();
1800
1801                 if (ret == -EAGAIN)
1802                         schedule_timeout_interruptible(msecs_to_jiffies
1803                                                        (GC_SLEEP_MS));
1804                 else if (ret)
1805                         pr_warn("gc failed!\n");
1806         } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
1807
1808         bch_btree_gc_finish(c);
1809         wake_up_allocators(c);
1810
1811         bch_time_stats_update(&c->btree_gc_time, start_time);
1812
1813         stats.key_bytes *= sizeof(uint64_t);
1814         stats.data      <<= 9;
1815         bch_update_bucket_in_use(c, &stats);
1816         memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1817
1818         trace_bcache_gc_end(c);
1819
1820         bch_moving_gc(c);
1821 }
1822
1823 static bool gc_should_run(struct cache_set *c)
1824 {
1825         struct cache *ca;
1826         unsigned int i;
1827
1828         for_each_cache(ca, c, i)
1829                 if (ca->invalidate_needs_gc)
1830                         return true;
1831
1832         if (atomic_read(&c->sectors_to_gc) < 0)
1833                 return true;
1834
1835         return false;
1836 }
1837
1838 static int bch_gc_thread(void *arg)
1839 {
1840         struct cache_set *c = arg;
1841
1842         while (1) {
1843                 wait_event_interruptible(c->gc_wait,
1844                            kthread_should_stop() ||
1845                            test_bit(CACHE_SET_IO_DISABLE, &c->flags) ||
1846                            gc_should_run(c));
1847
1848                 if (kthread_should_stop() ||
1849                     test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1850                         break;
1851
1852                 set_gc_sectors(c);
1853                 bch_btree_gc(c);
1854         }
1855
1856         wait_for_kthread_stop();
1857         return 0;
1858 }
1859
1860 int bch_gc_thread_start(struct cache_set *c)
1861 {
1862         c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
1863         return PTR_ERR_OR_ZERO(c->gc_thread);
1864 }
1865
1866 /* Initial partial gc */
1867
1868 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1869 {
1870         int ret = 0;
1871         struct bkey *k, *p = NULL;
1872         struct btree_iter iter;
1873
1874         for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1875                 bch_initial_mark_key(b->c, b->level, k);
1876
1877         bch_initial_mark_key(b->c, b->level + 1, &b->key);
1878
1879         if (b->level) {
1880                 bch_btree_iter_init(&b->keys, &iter, NULL);
1881
1882                 do {
1883                         k = bch_btree_iter_next_filter(&iter, &b->keys,
1884                                                        bch_ptr_bad);
1885                         if (k) {
1886                                 btree_node_prefetch(b, k);
1887                                 /*
1888                                  * initiallize c->gc_stats.nodes
1889                                  * for incremental GC
1890                                  */
1891                                 b->c->gc_stats.nodes++;
1892                         }
1893
1894                         if (p)
1895                                 ret = bcache_btree(check_recurse, p, b, op);
1896
1897                         p = k;
1898                 } while (p && !ret);
1899         }
1900
1901         return ret;
1902 }
1903
1904
1905 static int bch_btree_check_thread(void *arg)
1906 {
1907         int ret;
1908         struct btree_check_info *info = arg;
1909         struct btree_check_state *check_state = info->state;
1910         struct cache_set *c = check_state->c;
1911         struct btree_iter iter;
1912         struct bkey *k, *p;
1913         int cur_idx, prev_idx, skip_nr;
1914
1915         k = p = NULL;
1916         cur_idx = prev_idx = 0;
1917         ret = 0;
1918
1919         /* root node keys are checked before thread created */
1920         bch_btree_iter_init(&c->root->keys, &iter, NULL);
1921         k = bch_btree_iter_next_filter(&iter, &c->root->keys, bch_ptr_bad);
1922         BUG_ON(!k);
1923
1924         p = k;
1925         while (k) {
1926                 /*
1927                  * Fetch a root node key index, skip the keys which
1928                  * should be fetched by other threads, then check the
1929                  * sub-tree indexed by the fetched key.
1930                  */
1931                 spin_lock(&check_state->idx_lock);
1932                 cur_idx = check_state->key_idx;
1933                 check_state->key_idx++;
1934                 spin_unlock(&check_state->idx_lock);
1935
1936                 skip_nr = cur_idx - prev_idx;
1937
1938                 while (skip_nr) {
1939                         k = bch_btree_iter_next_filter(&iter,
1940                                                        &c->root->keys,
1941                                                        bch_ptr_bad);
1942                         if (k)
1943                                 p = k;
1944                         else {
1945                                 /*
1946                                  * No more keys to check in root node,
1947                                  * current checking threads are enough,
1948                                  * stop creating more.
1949                                  */
1950                                 atomic_set(&check_state->enough, 1);
1951                                 /* Update check_state->enough earlier */
1952                                 smp_mb__after_atomic();
1953                                 goto out;
1954                         }
1955                         skip_nr--;
1956                         cond_resched();
1957                 }
1958
1959                 if (p) {
1960                         struct btree_op op;
1961
1962                         btree_node_prefetch(c->root, p);
1963                         c->gc_stats.nodes++;
1964                         bch_btree_op_init(&op, 0);
1965                         ret = bcache_btree(check_recurse, p, c->root, &op);
1966                         if (ret)
1967                                 goto out;
1968                 }
1969                 p = NULL;
1970                 prev_idx = cur_idx;
1971                 cond_resched();
1972         }
1973
1974 out:
1975         info->result = ret;
1976         /* update check_state->started among all CPUs */
1977         smp_mb__before_atomic();
1978         if (atomic_dec_and_test(&check_state->started))
1979                 wake_up(&check_state->wait);
1980
1981         return ret;
1982 }
1983
1984
1985
1986 static int bch_btree_chkthread_nr(void)
1987 {
1988         int n = num_online_cpus()/2;
1989
1990         if (n == 0)
1991                 n = 1;
1992         else if (n > BCH_BTR_CHKTHREAD_MAX)
1993                 n = BCH_BTR_CHKTHREAD_MAX;
1994
1995         return n;
1996 }
1997
1998 int bch_btree_check(struct cache_set *c)
1999 {
2000         int ret = 0;
2001         int i;
2002         struct bkey *k = NULL;
2003         struct btree_iter iter;
2004         struct btree_check_state *check_state;
2005         char name[32];
2006
2007         /* check and mark root node keys */
2008         for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
2009                 bch_initial_mark_key(c, c->root->level, k);
2010
2011         bch_initial_mark_key(c, c->root->level + 1, &c->root->key);
2012
2013         if (c->root->level == 0)
2014                 return 0;
2015
2016         check_state = kzalloc(sizeof(struct btree_check_state), GFP_KERNEL);
2017         if (!check_state)
2018                 return -ENOMEM;
2019
2020         check_state->c = c;
2021         check_state->total_threads = bch_btree_chkthread_nr();
2022         check_state->key_idx = 0;
2023         spin_lock_init(&check_state->idx_lock);
2024         atomic_set(&check_state->started, 0);
2025         atomic_set(&check_state->enough, 0);
2026         init_waitqueue_head(&check_state->wait);
2027
2028         /*
2029          * Run multiple threads to check btree nodes in parallel,
2030          * if check_state->enough is non-zero, it means current
2031          * running check threads are enough, unncessary to create
2032          * more.
2033          */
2034         for (i = 0; i < check_state->total_threads; i++) {
2035                 /* fetch latest check_state->enough earlier */
2036                 smp_mb__before_atomic();
2037                 if (atomic_read(&check_state->enough))
2038                         break;
2039
2040                 check_state->infos[i].result = 0;
2041                 check_state->infos[i].state = check_state;
2042                 snprintf(name, sizeof(name), "bch_btrchk[%u]", i);
2043                 atomic_inc(&check_state->started);
2044
2045                 check_state->infos[i].thread =
2046                         kthread_run(bch_btree_check_thread,
2047                                     &check_state->infos[i],
2048                                     name);
2049                 if (IS_ERR(check_state->infos[i].thread)) {
2050                         pr_err("fails to run thread bch_btrchk[%d]\n", i);
2051                         for (--i; i >= 0; i--)
2052                                 kthread_stop(check_state->infos[i].thread);
2053                         ret = -ENOMEM;
2054                         goto out;
2055                 }
2056         }
2057
2058         wait_event_interruptible(check_state->wait,
2059                                  atomic_read(&check_state->started) == 0 ||
2060                                   test_bit(CACHE_SET_IO_DISABLE, &c->flags));
2061
2062         for (i = 0; i < check_state->total_threads; i++) {
2063                 if (check_state->infos[i].result) {
2064                         ret = check_state->infos[i].result;
2065                         goto out;
2066                 }
2067         }
2068
2069 out:
2070         kfree(check_state);
2071         return ret;
2072 }
2073
2074 void bch_initial_gc_finish(struct cache_set *c)
2075 {
2076         struct cache *ca;
2077         struct bucket *b;
2078         unsigned int i;
2079
2080         bch_btree_gc_finish(c);
2081
2082         mutex_lock(&c->bucket_lock);
2083
2084         /*
2085          * We need to put some unused buckets directly on the prio freelist in
2086          * order to get the allocator thread started - it needs freed buckets in
2087          * order to rewrite the prios and gens, and it needs to rewrite prios
2088          * and gens in order to free buckets.
2089          *
2090          * This is only safe for buckets that have no live data in them, which
2091          * there should always be some of.
2092          */
2093         for_each_cache(ca, c, i) {
2094                 for_each_bucket(b, ca) {
2095                         if (fifo_full(&ca->free[RESERVE_PRIO]) &&
2096                             fifo_full(&ca->free[RESERVE_BTREE]))
2097                                 break;
2098
2099                         if (bch_can_invalidate_bucket(ca, b) &&
2100                             !GC_MARK(b)) {
2101                                 __bch_invalidate_one_bucket(ca, b);
2102                                 if (!fifo_push(&ca->free[RESERVE_PRIO],
2103                                    b - ca->buckets))
2104                                         fifo_push(&ca->free[RESERVE_BTREE],
2105                                                   b - ca->buckets);
2106                         }
2107                 }
2108         }
2109
2110         mutex_unlock(&c->bucket_lock);
2111 }
2112
2113 /* Btree insertion */
2114
2115 static bool btree_insert_key(struct btree *b, struct bkey *k,
2116                              struct bkey *replace_key)
2117 {
2118         unsigned int status;
2119
2120         BUG_ON(bkey_cmp(k, &b->key) > 0);
2121
2122         status = bch_btree_insert_key(&b->keys, k, replace_key);
2123         if (status != BTREE_INSERT_STATUS_NO_INSERT) {
2124                 bch_check_keys(&b->keys, "%u for %s", status,
2125                                replace_key ? "replace" : "insert");
2126
2127                 trace_bcache_btree_insert_key(b, k, replace_key != NULL,
2128                                               status);
2129                 return true;
2130         } else
2131                 return false;
2132 }
2133
2134 static size_t insert_u64s_remaining(struct btree *b)
2135 {
2136         long ret = bch_btree_keys_u64s_remaining(&b->keys);
2137
2138         /*
2139          * Might land in the middle of an existing extent and have to split it
2140          */
2141         if (b->keys.ops->is_extents)
2142                 ret -= KEY_MAX_U64S;
2143
2144         return max(ret, 0L);
2145 }
2146
2147 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
2148                                   struct keylist *insert_keys,
2149                                   struct bkey *replace_key)
2150 {
2151         bool ret = false;
2152         int oldsize = bch_count_data(&b->keys);
2153
2154         while (!bch_keylist_empty(insert_keys)) {
2155                 struct bkey *k = insert_keys->keys;
2156
2157                 if (bkey_u64s(k) > insert_u64s_remaining(b))
2158                         break;
2159
2160                 if (bkey_cmp(k, &b->key) <= 0) {
2161                         if (!b->level)
2162                                 bkey_put(b->c, k);
2163
2164                         ret |= btree_insert_key(b, k, replace_key);
2165                         bch_keylist_pop_front(insert_keys);
2166                 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
2167                         BKEY_PADDED(key) temp;
2168                         bkey_copy(&temp.key, insert_keys->keys);
2169
2170                         bch_cut_back(&b->key, &temp.key);
2171                         bch_cut_front(&b->key, insert_keys->keys);
2172
2173                         ret |= btree_insert_key(b, &temp.key, replace_key);
2174                         break;
2175                 } else {
2176                         break;
2177                 }
2178         }
2179
2180         if (!ret)
2181                 op->insert_collision = true;
2182
2183         BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2184
2185         BUG_ON(bch_count_data(&b->keys) < oldsize);
2186         return ret;
2187 }
2188
2189 static int btree_split(struct btree *b, struct btree_op *op,
2190                        struct keylist *insert_keys,
2191                        struct bkey *replace_key)
2192 {
2193         bool split;
2194         struct btree *n1, *n2 = NULL, *n3 = NULL;
2195         uint64_t start_time = local_clock();
2196         struct closure cl;
2197         struct keylist parent_keys;
2198
2199         closure_init_stack(&cl);
2200         bch_keylist_init(&parent_keys);
2201
2202         if (btree_check_reserve(b, op)) {
2203                 if (!b->level)
2204                         return -EINTR;
2205                 else
2206                         WARN(1, "insufficient reserve for split\n");
2207         }
2208
2209         n1 = btree_node_alloc_replacement(b, op);
2210         if (IS_ERR(n1))
2211                 goto err;
2212
2213         split = set_blocks(btree_bset_first(n1),
2214                            block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
2215
2216         if (split) {
2217                 unsigned int keys = 0;
2218
2219                 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
2220
2221                 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
2222                 if (IS_ERR(n2))
2223                         goto err_free1;
2224
2225                 if (!b->parent) {
2226                         n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
2227                         if (IS_ERR(n3))
2228                                 goto err_free2;
2229                 }
2230
2231                 mutex_lock(&n1->write_lock);
2232                 mutex_lock(&n2->write_lock);
2233
2234                 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2235
2236                 /*
2237                  * Has to be a linear search because we don't have an auxiliary
2238                  * search tree yet
2239                  */
2240
2241                 while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2242                         keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2243                                                         keys));
2244
2245                 bkey_copy_key(&n1->key,
2246                               bset_bkey_idx(btree_bset_first(n1), keys));
2247                 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2248
2249                 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2250                 btree_bset_first(n1)->keys = keys;
2251
2252                 memcpy(btree_bset_first(n2)->start,
2253                        bset_bkey_last(btree_bset_first(n1)),
2254                        btree_bset_first(n2)->keys * sizeof(uint64_t));
2255
2256                 bkey_copy_key(&n2->key, &b->key);
2257
2258                 bch_keylist_add(&parent_keys, &n2->key);
2259                 bch_btree_node_write(n2, &cl);
2260                 mutex_unlock(&n2->write_lock);
2261                 rw_unlock(true, n2);
2262         } else {
2263                 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2264
2265                 mutex_lock(&n1->write_lock);
2266                 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2267         }
2268
2269         bch_keylist_add(&parent_keys, &n1->key);
2270         bch_btree_node_write(n1, &cl);
2271         mutex_unlock(&n1->write_lock);
2272
2273         if (n3) {
2274                 /* Depth increases, make a new root */
2275                 mutex_lock(&n3->write_lock);
2276                 bkey_copy_key(&n3->key, &MAX_KEY);
2277                 bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2278                 bch_btree_node_write(n3, &cl);
2279                 mutex_unlock(&n3->write_lock);
2280
2281                 closure_sync(&cl);
2282                 bch_btree_set_root(n3);
2283                 rw_unlock(true, n3);
2284         } else if (!b->parent) {
2285                 /* Root filled up but didn't need to be split */
2286                 closure_sync(&cl);
2287                 bch_btree_set_root(n1);
2288         } else {
2289                 /* Split a non root node */
2290                 closure_sync(&cl);
2291                 make_btree_freeing_key(b, parent_keys.top);
2292                 bch_keylist_push(&parent_keys);
2293
2294                 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2295                 BUG_ON(!bch_keylist_empty(&parent_keys));
2296         }
2297
2298         btree_node_free(b);
2299         rw_unlock(true, n1);
2300
2301         bch_time_stats_update(&b->c->btree_split_time, start_time);
2302
2303         return 0;
2304 err_free2:
2305         bkey_put(b->c, &n2->key);
2306         btree_node_free(n2);
2307         rw_unlock(true, n2);
2308 err_free1:
2309         bkey_put(b->c, &n1->key);
2310         btree_node_free(n1);
2311         rw_unlock(true, n1);
2312 err:
2313         WARN(1, "bcache: btree split failed (level %u)", b->level);
2314
2315         if (n3 == ERR_PTR(-EAGAIN) ||
2316             n2 == ERR_PTR(-EAGAIN) ||
2317             n1 == ERR_PTR(-EAGAIN))
2318                 return -EAGAIN;
2319
2320         return -ENOMEM;
2321 }
2322
2323 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2324                                  struct keylist *insert_keys,
2325                                  atomic_t *journal_ref,
2326                                  struct bkey *replace_key)
2327 {
2328         struct closure cl;
2329
2330         BUG_ON(b->level && replace_key);
2331
2332         closure_init_stack(&cl);
2333
2334         mutex_lock(&b->write_lock);
2335
2336         if (write_block(b) != btree_bset_last(b) &&
2337             b->keys.last_set_unwritten)
2338                 bch_btree_init_next(b); /* just wrote a set */
2339
2340         if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2341                 mutex_unlock(&b->write_lock);
2342                 goto split;
2343         }
2344
2345         BUG_ON(write_block(b) != btree_bset_last(b));
2346
2347         if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2348                 if (!b->level)
2349                         bch_btree_leaf_dirty(b, journal_ref);
2350                 else
2351                         bch_btree_node_write(b, &cl);
2352         }
2353
2354         mutex_unlock(&b->write_lock);
2355
2356         /* wait for btree node write if necessary, after unlock */
2357         closure_sync(&cl);
2358
2359         return 0;
2360 split:
2361         if (current->bio_list) {
2362                 op->lock = b->c->root->level + 1;
2363                 return -EAGAIN;
2364         } else if (op->lock <= b->c->root->level) {
2365                 op->lock = b->c->root->level + 1;
2366                 return -EINTR;
2367         } else {
2368                 /* Invalidated all iterators */
2369                 int ret = btree_split(b, op, insert_keys, replace_key);
2370
2371                 if (bch_keylist_empty(insert_keys))
2372                         return 0;
2373                 else if (!ret)
2374                         return -EINTR;
2375                 return ret;
2376         }
2377 }
2378
2379 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2380                                struct bkey *check_key)
2381 {
2382         int ret = -EINTR;
2383         uint64_t btree_ptr = b->key.ptr[0];
2384         unsigned long seq = b->seq;
2385         struct keylist insert;
2386         bool upgrade = op->lock == -1;
2387
2388         bch_keylist_init(&insert);
2389
2390         if (upgrade) {
2391                 rw_unlock(false, b);
2392                 rw_lock(true, b, b->level);
2393
2394                 if (b->key.ptr[0] != btree_ptr ||
2395                     b->seq != seq + 1) {
2396                         op->lock = b->level;
2397                         goto out;
2398                 }
2399         }
2400
2401         SET_KEY_PTRS(check_key, 1);
2402         get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2403
2404         SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2405
2406         bch_keylist_add(&insert, check_key);
2407
2408         ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2409
2410         BUG_ON(!ret && !bch_keylist_empty(&insert));
2411 out:
2412         if (upgrade)
2413                 downgrade_write(&b->lock);
2414         return ret;
2415 }
2416
2417 struct btree_insert_op {
2418         struct btree_op op;
2419         struct keylist  *keys;
2420         atomic_t        *journal_ref;
2421         struct bkey     *replace_key;
2422 };
2423
2424 static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2425 {
2426         struct btree_insert_op *op = container_of(b_op,
2427                                         struct btree_insert_op, op);
2428
2429         int ret = bch_btree_insert_node(b, &op->op, op->keys,
2430                                         op->journal_ref, op->replace_key);
2431         if (ret && !bch_keylist_empty(op->keys))
2432                 return ret;
2433         else
2434                 return MAP_DONE;
2435 }
2436
2437 int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2438                      atomic_t *journal_ref, struct bkey *replace_key)
2439 {
2440         struct btree_insert_op op;
2441         int ret = 0;
2442
2443         BUG_ON(current->bio_list);
2444         BUG_ON(bch_keylist_empty(keys));
2445
2446         bch_btree_op_init(&op.op, 0);
2447         op.keys         = keys;
2448         op.journal_ref  = journal_ref;
2449         op.replace_key  = replace_key;
2450
2451         while (!ret && !bch_keylist_empty(keys)) {
2452                 op.op.lock = 0;
2453                 ret = bch_btree_map_leaf_nodes(&op.op, c,
2454                                                &START_KEY(keys->keys),
2455                                                btree_insert_fn);
2456         }
2457
2458         if (ret) {
2459                 struct bkey *k;
2460
2461                 pr_err("error %i\n", ret);
2462
2463                 while ((k = bch_keylist_pop(keys)))
2464                         bkey_put(c, k);
2465         } else if (op.op.insert_collision)
2466                 ret = -ESRCH;
2467
2468         return ret;
2469 }
2470
2471 void bch_btree_set_root(struct btree *b)
2472 {
2473         unsigned int i;
2474         struct closure cl;
2475
2476         closure_init_stack(&cl);
2477
2478         trace_bcache_btree_set_root(b);
2479
2480         BUG_ON(!b->written);
2481
2482         for (i = 0; i < KEY_PTRS(&b->key); i++)
2483                 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2484
2485         mutex_lock(&b->c->bucket_lock);
2486         list_del_init(&b->list);
2487         mutex_unlock(&b->c->bucket_lock);
2488
2489         b->c->root = b;
2490
2491         bch_journal_meta(b->c, &cl);
2492         closure_sync(&cl);
2493 }
2494
2495 /* Map across nodes or keys */
2496
2497 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2498                                        struct bkey *from,
2499                                        btree_map_nodes_fn *fn, int flags)
2500 {
2501         int ret = MAP_CONTINUE;
2502
2503         if (b->level) {
2504                 struct bkey *k;
2505                 struct btree_iter iter;
2506
2507                 bch_btree_iter_init(&b->keys, &iter, from);
2508
2509                 while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2510                                                        bch_ptr_bad))) {
2511                         ret = bcache_btree(map_nodes_recurse, k, b,
2512                                     op, from, fn, flags);
2513                         from = NULL;
2514
2515                         if (ret != MAP_CONTINUE)
2516                                 return ret;
2517                 }
2518         }
2519
2520         if (!b->level || flags == MAP_ALL_NODES)
2521                 ret = fn(op, b);
2522
2523         return ret;
2524 }
2525
2526 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2527                           struct bkey *from, btree_map_nodes_fn *fn, int flags)
2528 {
2529         return bcache_btree_root(map_nodes_recurse, c, op, from, fn, flags);
2530 }
2531
2532 int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2533                                       struct bkey *from, btree_map_keys_fn *fn,
2534                                       int flags)
2535 {
2536         int ret = MAP_CONTINUE;
2537         struct bkey *k;
2538         struct btree_iter iter;
2539
2540         bch_btree_iter_init(&b->keys, &iter, from);
2541
2542         while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2543                 ret = !b->level
2544                         ? fn(op, b, k)
2545                         : bcache_btree(map_keys_recurse, k,
2546                                        b, op, from, fn, flags);
2547                 from = NULL;
2548
2549                 if (ret != MAP_CONTINUE)
2550                         return ret;
2551         }
2552
2553         if (!b->level && (flags & MAP_END_KEY))
2554                 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2555                                      KEY_OFFSET(&b->key), 0));
2556
2557         return ret;
2558 }
2559
2560 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2561                        struct bkey *from, btree_map_keys_fn *fn, int flags)
2562 {
2563         return bcache_btree_root(map_keys_recurse, c, op, from, fn, flags);
2564 }
2565
2566 /* Keybuf code */
2567
2568 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2569 {
2570         /* Overlapping keys compare equal */
2571         if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2572                 return -1;
2573         if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2574                 return 1;
2575         return 0;
2576 }
2577
2578 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2579                                             struct keybuf_key *r)
2580 {
2581         return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2582 }
2583
2584 struct refill {
2585         struct btree_op op;
2586         unsigned int    nr_found;
2587         struct keybuf   *buf;
2588         struct bkey     *end;
2589         keybuf_pred_fn  *pred;
2590 };
2591
2592 static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2593                             struct bkey *k)
2594 {
2595         struct refill *refill = container_of(op, struct refill, op);
2596         struct keybuf *buf = refill->buf;
2597         int ret = MAP_CONTINUE;
2598
2599         if (bkey_cmp(k, refill->end) > 0) {
2600                 ret = MAP_DONE;
2601                 goto out;
2602         }
2603
2604         if (!KEY_SIZE(k)) /* end key */
2605                 goto out;
2606
2607         if (refill->pred(buf, k)) {
2608                 struct keybuf_key *w;
2609
2610                 spin_lock(&buf->lock);
2611
2612                 w = array_alloc(&buf->freelist);
2613                 if (!w) {
2614                         spin_unlock(&buf->lock);
2615                         return MAP_DONE;
2616                 }
2617
2618                 w->private = NULL;
2619                 bkey_copy(&w->key, k);
2620
2621                 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2622                         array_free(&buf->freelist, w);
2623                 else
2624                         refill->nr_found++;
2625
2626                 if (array_freelist_empty(&buf->freelist))
2627                         ret = MAP_DONE;
2628
2629                 spin_unlock(&buf->lock);
2630         }
2631 out:
2632         buf->last_scanned = *k;
2633         return ret;
2634 }
2635
2636 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2637                        struct bkey *end, keybuf_pred_fn *pred)
2638 {
2639         struct bkey start = buf->last_scanned;
2640         struct refill refill;
2641
2642         cond_resched();
2643
2644         bch_btree_op_init(&refill.op, -1);
2645         refill.nr_found = 0;
2646         refill.buf      = buf;
2647         refill.end      = end;
2648         refill.pred     = pred;
2649
2650         bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2651                            refill_keybuf_fn, MAP_END_KEY);
2652
2653         trace_bcache_keyscan(refill.nr_found,
2654                              KEY_INODE(&start), KEY_OFFSET(&start),
2655                              KEY_INODE(&buf->last_scanned),
2656                              KEY_OFFSET(&buf->last_scanned));
2657
2658         spin_lock(&buf->lock);
2659
2660         if (!RB_EMPTY_ROOT(&buf->keys)) {
2661                 struct keybuf_key *w;
2662
2663                 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2664                 buf->start      = START_KEY(&w->key);
2665
2666                 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2667                 buf->end        = w->key;
2668         } else {
2669                 buf->start      = MAX_KEY;
2670                 buf->end        = MAX_KEY;
2671         }
2672
2673         spin_unlock(&buf->lock);
2674 }
2675
2676 static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2677 {
2678         rb_erase(&w->node, &buf->keys);
2679         array_free(&buf->freelist, w);
2680 }
2681
2682 void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2683 {
2684         spin_lock(&buf->lock);
2685         __bch_keybuf_del(buf, w);
2686         spin_unlock(&buf->lock);
2687 }
2688
2689 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2690                                   struct bkey *end)
2691 {
2692         bool ret = false;
2693         struct keybuf_key *p, *w, s;
2694
2695         s.key = *start;
2696
2697         if (bkey_cmp(end, &buf->start) <= 0 ||
2698             bkey_cmp(start, &buf->end) >= 0)
2699                 return false;
2700
2701         spin_lock(&buf->lock);
2702         w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2703
2704         while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2705                 p = w;
2706                 w = RB_NEXT(w, node);
2707
2708                 if (p->private)
2709                         ret = true;
2710                 else
2711                         __bch_keybuf_del(buf, p);
2712         }
2713
2714         spin_unlock(&buf->lock);
2715         return ret;
2716 }
2717
2718 struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2719 {
2720         struct keybuf_key *w;
2721
2722         spin_lock(&buf->lock);
2723
2724         w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2725
2726         while (w && w->private)
2727                 w = RB_NEXT(w, node);
2728
2729         if (w)
2730                 w->private = ERR_PTR(-EINTR);
2731
2732         spin_unlock(&buf->lock);
2733         return w;
2734 }
2735
2736 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2737                                           struct keybuf *buf,
2738                                           struct bkey *end,
2739                                           keybuf_pred_fn *pred)
2740 {
2741         struct keybuf_key *ret;
2742
2743         while (1) {
2744                 ret = bch_keybuf_next(buf);
2745                 if (ret)
2746                         break;
2747
2748                 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2749                         pr_debug("scan finished\n");
2750                         break;
2751                 }
2752
2753                 bch_refill_keybuf(c, buf, end, pred);
2754         }
2755
2756         return ret;
2757 }
2758
2759 void bch_keybuf_init(struct keybuf *buf)
2760 {
2761         buf->last_scanned       = MAX_KEY;
2762         buf->keys               = RB_ROOT;
2763
2764         spin_lock_init(&buf->lock);
2765         array_allocator_init(&buf->freelist);
2766 }