Merge tag 'mac80211-for-net-2020-02-24' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / drivers / md / bcache / btree.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4  *
5  * Uses a block device as cache for other block devices; optimized for SSDs.
6  * All allocation is done in buckets, which should match the erase block size
7  * of the device.
8  *
9  * Buckets containing cached data are kept on a heap sorted by priority;
10  * bucket priority is increased on cache hit, and periodically all the buckets
11  * on the heap have their priority scaled down. This currently is just used as
12  * an LRU but in the future should allow for more intelligent heuristics.
13  *
14  * Buckets have an 8 bit counter; freeing is accomplished by incrementing the
15  * counter. Garbage collection is used to remove stale pointers.
16  *
17  * Indexing is done via a btree; nodes are not necessarily fully sorted, rather
18  * as keys are inserted we only sort the pages that have not yet been written.
19  * When garbage collection is run, we resort the entire node.
20  *
21  * All configuration is done via sysfs; see Documentation/admin-guide/bcache.rst.
22  */
23
24 #include "bcache.h"
25 #include "btree.h"
26 #include "debug.h"
27 #include "extents.h"
28
29 #include <linux/slab.h>
30 #include <linux/bitops.h>
31 #include <linux/hash.h>
32 #include <linux/kthread.h>
33 #include <linux/prefetch.h>
34 #include <linux/random.h>
35 #include <linux/rcupdate.h>
36 #include <linux/sched/clock.h>
37 #include <linux/sched/signal.h>
38 #include <linux/rculist.h>
39 #include <linux/delay.h>
40 #include <trace/events/bcache.h>
41
42 /*
43  * Todo:
44  * register_bcache: Return errors out to userspace correctly
45  *
46  * Writeback: don't undirty key until after a cache flush
47  *
48  * Create an iterator for key pointers
49  *
50  * On btree write error, mark bucket such that it won't be freed from the cache
51  *
52  * Journalling:
53  *   Check for bad keys in replay
54  *   Propagate barriers
55  *   Refcount journal entries in journal_replay
56  *
57  * Garbage collection:
58  *   Finish incremental gc
59  *   Gc should free old UUIDs, data for invalid UUIDs
60  *
61  * Provide a way to list backing device UUIDs we have data cached for, and
62  * probably how long it's been since we've seen them, and a way to invalidate
63  * dirty data for devices that will never be attached again
64  *
65  * Keep 1 min/5 min/15 min statistics of how busy a block device has been, so
66  * that based on that and how much dirty data we have we can keep writeback
67  * from being starved
68  *
69  * Add a tracepoint or somesuch to watch for writeback starvation
70  *
71  * When btree depth > 1 and splitting an interior node, we have to make sure
72  * alloc_bucket() cannot fail. This should be true but is not completely
73  * obvious.
74  *
75  * Plugging?
76  *
77  * If data write is less than hard sector size of ssd, round up offset in open
78  * bucket to the next whole sector
79  *
80  * Superblock needs to be fleshed out for multiple cache devices
81  *
82  * Add a sysfs tunable for the number of writeback IOs in flight
83  *
84  * Add a sysfs tunable for the number of open data buckets
85  *
86  * IO tracking: Can we track when one process is doing io on behalf of another?
87  * IO tracking: Don't use just an average, weigh more recent stuff higher
88  *
89  * Test module load/unload
90  */
91
92 #define MAX_NEED_GC             64
93 #define MAX_SAVE_PRIO           72
94 #define MAX_GC_TIMES            100
95 #define MIN_GC_NODES            100
96 #define GC_SLEEP_MS             100
97
98 #define PTR_DIRTY_BIT           (((uint64_t) 1 << 36))
99
100 #define PTR_HASH(c, k)                                                  \
101         (((k)->ptr[0] >> c->bucket_bits) | PTR_GEN(k, 0))
102
103 #define insert_lock(s, b)       ((b)->level <= (s)->lock)
104
105 /*
106  * These macros are for recursing down the btree - they handle the details of
107  * locking and looking up nodes in the cache for you. They're best treated as
108  * mere syntax when reading code that uses them.
109  *
110  * op->lock determines whether we take a read or a write lock at a given depth.
111  * If you've got a read lock and find that you need a write lock (i.e. you're
112  * going to have to split), set op->lock and return -EINTR; btree_root() will
113  * call you again and you'll have the correct lock.
114  */
115
116 /**
117  * btree - recurse down the btree on a specified key
118  * @fn:         function to call, which will be passed the child node
119  * @key:        key to recurse on
120  * @b:          parent btree node
121  * @op:         pointer to struct btree_op
122  */
123 #define btree(fn, key, b, op, ...)                                      \
124 ({                                                                      \
125         int _r, l = (b)->level - 1;                                     \
126         bool _w = l <= (op)->lock;                                      \
127         struct btree *_child = bch_btree_node_get((b)->c, op, key, l,   \
128                                                   _w, b);               \
129         if (!IS_ERR(_child)) {                                          \
130                 _r = bch_btree_ ## fn(_child, op, ##__VA_ARGS__);       \
131                 rw_unlock(_w, _child);                                  \
132         } else                                                          \
133                 _r = PTR_ERR(_child);                                   \
134         _r;                                                             \
135 })
136
137 /**
138  * btree_root - call a function on the root of the btree
139  * @fn:         function to call, which will be passed the child node
140  * @c:          cache set
141  * @op:         pointer to struct btree_op
142  */
143 #define btree_root(fn, c, op, ...)                                      \
144 ({                                                                      \
145         int _r = -EINTR;                                                \
146         do {                                                            \
147                 struct btree *_b = (c)->root;                           \
148                 bool _w = insert_lock(op, _b);                          \
149                 rw_lock(_w, _b, _b->level);                             \
150                 if (_b == (c)->root &&                                  \
151                     _w == insert_lock(op, _b)) {                        \
152                         _r = bch_btree_ ## fn(_b, op, ##__VA_ARGS__);   \
153                 }                                                       \
154                 rw_unlock(_w, _b);                                      \
155                 bch_cannibalize_unlock(c);                              \
156                 if (_r == -EINTR)                                       \
157                         schedule();                                     \
158         } while (_r == -EINTR);                                         \
159                                                                         \
160         finish_wait(&(c)->btree_cache_wait, &(op)->wait);               \
161         _r;                                                             \
162 })
163
164 static inline struct bset *write_block(struct btree *b)
165 {
166         return ((void *) btree_bset_first(b)) + b->written * block_bytes(b->c);
167 }
168
169 static void bch_btree_init_next(struct btree *b)
170 {
171         /* If not a leaf node, always sort */
172         if (b->level && b->keys.nsets)
173                 bch_btree_sort(&b->keys, &b->c->sort);
174         else
175                 bch_btree_sort_lazy(&b->keys, &b->c->sort);
176
177         if (b->written < btree_blocks(b))
178                 bch_bset_init_next(&b->keys, write_block(b),
179                                    bset_magic(&b->c->sb));
180
181 }
182
183 /* Btree key manipulation */
184
185 void bkey_put(struct cache_set *c, struct bkey *k)
186 {
187         unsigned int i;
188
189         for (i = 0; i < KEY_PTRS(k); i++)
190                 if (ptr_available(c, k, i))
191                         atomic_dec_bug(&PTR_BUCKET(c, k, i)->pin);
192 }
193
194 /* Btree IO */
195
196 static uint64_t btree_csum_set(struct btree *b, struct bset *i)
197 {
198         uint64_t crc = b->key.ptr[0];
199         void *data = (void *) i + 8, *end = bset_bkey_last(i);
200
201         crc = bch_crc64_update(crc, data, end - data);
202         return crc ^ 0xffffffffffffffffULL;
203 }
204
205 void bch_btree_node_read_done(struct btree *b)
206 {
207         const char *err = "bad btree header";
208         struct bset *i = btree_bset_first(b);
209         struct btree_iter *iter;
210
211         /*
212          * c->fill_iter can allocate an iterator with more memory space
213          * than static MAX_BSETS.
214          * See the comment arount cache_set->fill_iter.
215          */
216         iter = mempool_alloc(&b->c->fill_iter, GFP_NOIO);
217         iter->size = b->c->sb.bucket_size / b->c->sb.block_size;
218         iter->used = 0;
219
220 #ifdef CONFIG_BCACHE_DEBUG
221         iter->b = &b->keys;
222 #endif
223
224         if (!i->seq)
225                 goto err;
226
227         for (;
228              b->written < btree_blocks(b) && i->seq == b->keys.set[0].data->seq;
229              i = write_block(b)) {
230                 err = "unsupported bset version";
231                 if (i->version > BCACHE_BSET_VERSION)
232                         goto err;
233
234                 err = "bad btree header";
235                 if (b->written + set_blocks(i, block_bytes(b->c)) >
236                     btree_blocks(b))
237                         goto err;
238
239                 err = "bad magic";
240                 if (i->magic != bset_magic(&b->c->sb))
241                         goto err;
242
243                 err = "bad checksum";
244                 switch (i->version) {
245                 case 0:
246                         if (i->csum != csum_set(i))
247                                 goto err;
248                         break;
249                 case BCACHE_BSET_VERSION:
250                         if (i->csum != btree_csum_set(b, i))
251                                 goto err;
252                         break;
253                 }
254
255                 err = "empty set";
256                 if (i != b->keys.set[0].data && !i->keys)
257                         goto err;
258
259                 bch_btree_iter_push(iter, i->start, bset_bkey_last(i));
260
261                 b->written += set_blocks(i, block_bytes(b->c));
262         }
263
264         err = "corrupted btree";
265         for (i = write_block(b);
266              bset_sector_offset(&b->keys, i) < KEY_SIZE(&b->key);
267              i = ((void *) i) + block_bytes(b->c))
268                 if (i->seq == b->keys.set[0].data->seq)
269                         goto err;
270
271         bch_btree_sort_and_fix_extents(&b->keys, iter, &b->c->sort);
272
273         i = b->keys.set[0].data;
274         err = "short btree key";
275         if (b->keys.set[0].size &&
276             bkey_cmp(&b->key, &b->keys.set[0].end) < 0)
277                 goto err;
278
279         if (b->written < btree_blocks(b))
280                 bch_bset_init_next(&b->keys, write_block(b),
281                                    bset_magic(&b->c->sb));
282 out:
283         mempool_free(iter, &b->c->fill_iter);
284         return;
285 err:
286         set_btree_node_io_error(b);
287         bch_cache_set_error(b->c, "%s at bucket %zu, block %u, %u keys",
288                             err, PTR_BUCKET_NR(b->c, &b->key, 0),
289                             bset_block_offset(b, i), i->keys);
290         goto out;
291 }
292
293 static void btree_node_read_endio(struct bio *bio)
294 {
295         struct closure *cl = bio->bi_private;
296
297         closure_put(cl);
298 }
299
300 static void bch_btree_node_read(struct btree *b)
301 {
302         uint64_t start_time = local_clock();
303         struct closure cl;
304         struct bio *bio;
305
306         trace_bcache_btree_read(b);
307
308         closure_init_stack(&cl);
309
310         bio = bch_bbio_alloc(b->c);
311         bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
312         bio->bi_end_io  = btree_node_read_endio;
313         bio->bi_private = &cl;
314         bio->bi_opf = REQ_OP_READ | REQ_META;
315
316         bch_bio_map(bio, b->keys.set[0].data);
317
318         bch_submit_bbio(bio, b->c, &b->key, 0);
319         closure_sync(&cl);
320
321         if (bio->bi_status)
322                 set_btree_node_io_error(b);
323
324         bch_bbio_free(bio, b->c);
325
326         if (btree_node_io_error(b))
327                 goto err;
328
329         bch_btree_node_read_done(b);
330         bch_time_stats_update(&b->c->btree_read_time, start_time);
331
332         return;
333 err:
334         bch_cache_set_error(b->c, "io error reading bucket %zu",
335                             PTR_BUCKET_NR(b->c, &b->key, 0));
336 }
337
338 static void btree_complete_write(struct btree *b, struct btree_write *w)
339 {
340         if (w->prio_blocked &&
341             !atomic_sub_return(w->prio_blocked, &b->c->prio_blocked))
342                 wake_up_allocators(b->c);
343
344         if (w->journal) {
345                 atomic_dec_bug(w->journal);
346                 __closure_wake_up(&b->c->journal.wait);
347         }
348
349         w->prio_blocked = 0;
350         w->journal      = NULL;
351 }
352
353 static void btree_node_write_unlock(struct closure *cl)
354 {
355         struct btree *b = container_of(cl, struct btree, io);
356
357         up(&b->io_mutex);
358 }
359
360 static void __btree_node_write_done(struct closure *cl)
361 {
362         struct btree *b = container_of(cl, struct btree, io);
363         struct btree_write *w = btree_prev_write(b);
364
365         bch_bbio_free(b->bio, b->c);
366         b->bio = NULL;
367         btree_complete_write(b, w);
368
369         if (btree_node_dirty(b))
370                 schedule_delayed_work(&b->work, 30 * HZ);
371
372         closure_return_with_destructor(cl, btree_node_write_unlock);
373 }
374
375 static void btree_node_write_done(struct closure *cl)
376 {
377         struct btree *b = container_of(cl, struct btree, io);
378
379         bio_free_pages(b->bio);
380         __btree_node_write_done(cl);
381 }
382
383 static void btree_node_write_endio(struct bio *bio)
384 {
385         struct closure *cl = bio->bi_private;
386         struct btree *b = container_of(cl, struct btree, io);
387
388         if (bio->bi_status)
389                 set_btree_node_io_error(b);
390
391         bch_bbio_count_io_errors(b->c, bio, bio->bi_status, "writing btree");
392         closure_put(cl);
393 }
394
395 static void do_btree_node_write(struct btree *b)
396 {
397         struct closure *cl = &b->io;
398         struct bset *i = btree_bset_last(b);
399         BKEY_PADDED(key) k;
400
401         i->version      = BCACHE_BSET_VERSION;
402         i->csum         = btree_csum_set(b, i);
403
404         BUG_ON(b->bio);
405         b->bio = bch_bbio_alloc(b->c);
406
407         b->bio->bi_end_io       = btree_node_write_endio;
408         b->bio->bi_private      = cl;
409         b->bio->bi_iter.bi_size = roundup(set_bytes(i), block_bytes(b->c));
410         b->bio->bi_opf          = REQ_OP_WRITE | REQ_META | REQ_FUA;
411         bch_bio_map(b->bio, i);
412
413         /*
414          * If we're appending to a leaf node, we don't technically need FUA -
415          * this write just needs to be persisted before the next journal write,
416          * which will be marked FLUSH|FUA.
417          *
418          * Similarly if we're writing a new btree root - the pointer is going to
419          * be in the next journal entry.
420          *
421          * But if we're writing a new btree node (that isn't a root) or
422          * appending to a non leaf btree node, we need either FUA or a flush
423          * when we write the parent with the new pointer. FUA is cheaper than a
424          * flush, and writes appending to leaf nodes aren't blocking anything so
425          * just make all btree node writes FUA to keep things sane.
426          */
427
428         bkey_copy(&k.key, &b->key);
429         SET_PTR_OFFSET(&k.key, 0, PTR_OFFSET(&k.key, 0) +
430                        bset_sector_offset(&b->keys, i));
431
432         if (!bch_bio_alloc_pages(b->bio, __GFP_NOWARN|GFP_NOWAIT)) {
433                 struct bio_vec *bv;
434                 void *addr = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
435                 struct bvec_iter_all iter_all;
436
437                 bio_for_each_segment_all(bv, b->bio, iter_all) {
438                         memcpy(page_address(bv->bv_page), addr, PAGE_SIZE);
439                         addr += PAGE_SIZE;
440                 }
441
442                 bch_submit_bbio(b->bio, b->c, &k.key, 0);
443
444                 continue_at(cl, btree_node_write_done, NULL);
445         } else {
446                 /*
447                  * No problem for multipage bvec since the bio is
448                  * just allocated
449                  */
450                 b->bio->bi_vcnt = 0;
451                 bch_bio_map(b->bio, i);
452
453                 bch_submit_bbio(b->bio, b->c, &k.key, 0);
454
455                 closure_sync(cl);
456                 continue_at_nobarrier(cl, __btree_node_write_done, NULL);
457         }
458 }
459
460 void __bch_btree_node_write(struct btree *b, struct closure *parent)
461 {
462         struct bset *i = btree_bset_last(b);
463
464         lockdep_assert_held(&b->write_lock);
465
466         trace_bcache_btree_write(b);
467
468         BUG_ON(current->bio_list);
469         BUG_ON(b->written >= btree_blocks(b));
470         BUG_ON(b->written && !i->keys);
471         BUG_ON(btree_bset_first(b)->seq != i->seq);
472         bch_check_keys(&b->keys, "writing");
473
474         cancel_delayed_work(&b->work);
475
476         /* If caller isn't waiting for write, parent refcount is cache set */
477         down(&b->io_mutex);
478         closure_init(&b->io, parent ?: &b->c->cl);
479
480         clear_bit(BTREE_NODE_dirty,      &b->flags);
481         change_bit(BTREE_NODE_write_idx, &b->flags);
482
483         do_btree_node_write(b);
484
485         atomic_long_add(set_blocks(i, block_bytes(b->c)) * b->c->sb.block_size,
486                         &PTR_CACHE(b->c, &b->key, 0)->btree_sectors_written);
487
488         b->written += set_blocks(i, block_bytes(b->c));
489 }
490
491 void bch_btree_node_write(struct btree *b, struct closure *parent)
492 {
493         unsigned int nsets = b->keys.nsets;
494
495         lockdep_assert_held(&b->lock);
496
497         __bch_btree_node_write(b, parent);
498
499         /*
500          * do verify if there was more than one set initially (i.e. we did a
501          * sort) and we sorted down to a single set:
502          */
503         if (nsets && !b->keys.nsets)
504                 bch_btree_verify(b);
505
506         bch_btree_init_next(b);
507 }
508
509 static void bch_btree_node_write_sync(struct btree *b)
510 {
511         struct closure cl;
512
513         closure_init_stack(&cl);
514
515         mutex_lock(&b->write_lock);
516         bch_btree_node_write(b, &cl);
517         mutex_unlock(&b->write_lock);
518
519         closure_sync(&cl);
520 }
521
522 static void btree_node_write_work(struct work_struct *w)
523 {
524         struct btree *b = container_of(to_delayed_work(w), struct btree, work);
525
526         mutex_lock(&b->write_lock);
527         if (btree_node_dirty(b))
528                 __bch_btree_node_write(b, NULL);
529         mutex_unlock(&b->write_lock);
530 }
531
532 static void bch_btree_leaf_dirty(struct btree *b, atomic_t *journal_ref)
533 {
534         struct bset *i = btree_bset_last(b);
535         struct btree_write *w = btree_current_write(b);
536
537         lockdep_assert_held(&b->write_lock);
538
539         BUG_ON(!b->written);
540         BUG_ON(!i->keys);
541
542         if (!btree_node_dirty(b))
543                 schedule_delayed_work(&b->work, 30 * HZ);
544
545         set_btree_node_dirty(b);
546
547         /*
548          * w->journal is always the oldest journal pin of all bkeys
549          * in the leaf node, to make sure the oldest jset seq won't
550          * be increased before this btree node is flushed.
551          */
552         if (journal_ref) {
553                 if (w->journal &&
554                     journal_pin_cmp(b->c, w->journal, journal_ref)) {
555                         atomic_dec_bug(w->journal);
556                         w->journal = NULL;
557                 }
558
559                 if (!w->journal) {
560                         w->journal = journal_ref;
561                         atomic_inc(w->journal);
562                 }
563         }
564
565         /* Force write if set is too big */
566         if (set_bytes(i) > PAGE_SIZE - 48 &&
567             !current->bio_list)
568                 bch_btree_node_write(b, NULL);
569 }
570
571 /*
572  * Btree in memory cache - allocation/freeing
573  * mca -> memory cache
574  */
575
576 #define mca_reserve(c)  (((c->root && c->root->level)           \
577                           ? c->root->level : 1) * 8 + 16)
578 #define mca_can_free(c)                                         \
579         max_t(int, 0, c->btree_cache_used - mca_reserve(c))
580
581 static void mca_data_free(struct btree *b)
582 {
583         BUG_ON(b->io_mutex.count != 1);
584
585         bch_btree_keys_free(&b->keys);
586
587         b->c->btree_cache_used--;
588         list_move(&b->list, &b->c->btree_cache_freed);
589 }
590
591 static void mca_bucket_free(struct btree *b)
592 {
593         BUG_ON(btree_node_dirty(b));
594
595         b->key.ptr[0] = 0;
596         hlist_del_init_rcu(&b->hash);
597         list_move(&b->list, &b->c->btree_cache_freeable);
598 }
599
600 static unsigned int btree_order(struct bkey *k)
601 {
602         return ilog2(KEY_SIZE(k) / PAGE_SECTORS ?: 1);
603 }
604
605 static void mca_data_alloc(struct btree *b, struct bkey *k, gfp_t gfp)
606 {
607         if (!bch_btree_keys_alloc(&b->keys,
608                                   max_t(unsigned int,
609                                         ilog2(b->c->btree_pages),
610                                         btree_order(k)),
611                                   gfp)) {
612                 b->c->btree_cache_used++;
613                 list_move(&b->list, &b->c->btree_cache);
614         } else {
615                 list_move(&b->list, &b->c->btree_cache_freed);
616         }
617 }
618
619 static struct btree *mca_bucket_alloc(struct cache_set *c,
620                                       struct bkey *k, gfp_t gfp)
621 {
622         /*
623          * kzalloc() is necessary here for initialization,
624          * see code comments in bch_btree_keys_init().
625          */
626         struct btree *b = kzalloc(sizeof(struct btree), gfp);
627
628         if (!b)
629                 return NULL;
630
631         init_rwsem(&b->lock);
632         lockdep_set_novalidate_class(&b->lock);
633         mutex_init(&b->write_lock);
634         lockdep_set_novalidate_class(&b->write_lock);
635         INIT_LIST_HEAD(&b->list);
636         INIT_DELAYED_WORK(&b->work, btree_node_write_work);
637         b->c = c;
638         sema_init(&b->io_mutex, 1);
639
640         mca_data_alloc(b, k, gfp);
641         return b;
642 }
643
644 static int mca_reap(struct btree *b, unsigned int min_order, bool flush)
645 {
646         struct closure cl;
647
648         closure_init_stack(&cl);
649         lockdep_assert_held(&b->c->bucket_lock);
650
651         if (!down_write_trylock(&b->lock))
652                 return -ENOMEM;
653
654         BUG_ON(btree_node_dirty(b) && !b->keys.set[0].data);
655
656         if (b->keys.page_order < min_order)
657                 goto out_unlock;
658
659         if (!flush) {
660                 if (btree_node_dirty(b))
661                         goto out_unlock;
662
663                 if (down_trylock(&b->io_mutex))
664                         goto out_unlock;
665                 up(&b->io_mutex);
666         }
667
668 retry:
669         /*
670          * BTREE_NODE_dirty might be cleared in btree_flush_btree() by
671          * __bch_btree_node_write(). To avoid an extra flush, acquire
672          * b->write_lock before checking BTREE_NODE_dirty bit.
673          */
674         mutex_lock(&b->write_lock);
675         /*
676          * If this btree node is selected in btree_flush_write() by journal
677          * code, delay and retry until the node is flushed by journal code
678          * and BTREE_NODE_journal_flush bit cleared by btree_flush_write().
679          */
680         if (btree_node_journal_flush(b)) {
681                 pr_debug("bnode %p is flushing by journal, retry", b);
682                 mutex_unlock(&b->write_lock);
683                 udelay(1);
684                 goto retry;
685         }
686
687         if (btree_node_dirty(b))
688                 __bch_btree_node_write(b, &cl);
689         mutex_unlock(&b->write_lock);
690
691         closure_sync(&cl);
692
693         /* wait for any in flight btree write */
694         down(&b->io_mutex);
695         up(&b->io_mutex);
696
697         return 0;
698 out_unlock:
699         rw_unlock(true, b);
700         return -ENOMEM;
701 }
702
703 static unsigned long bch_mca_scan(struct shrinker *shrink,
704                                   struct shrink_control *sc)
705 {
706         struct cache_set *c = container_of(shrink, struct cache_set, shrink);
707         struct btree *b, *t;
708         unsigned long i, nr = sc->nr_to_scan;
709         unsigned long freed = 0;
710         unsigned int btree_cache_used;
711
712         if (c->shrinker_disabled)
713                 return SHRINK_STOP;
714
715         if (c->btree_cache_alloc_lock)
716                 return SHRINK_STOP;
717
718         /* Return -1 if we can't do anything right now */
719         if (sc->gfp_mask & __GFP_IO)
720                 mutex_lock(&c->bucket_lock);
721         else if (!mutex_trylock(&c->bucket_lock))
722                 return -1;
723
724         /*
725          * It's _really_ critical that we don't free too many btree nodes - we
726          * have to always leave ourselves a reserve. The reserve is how we
727          * guarantee that allocating memory for a new btree node can always
728          * succeed, so that inserting keys into the btree can always succeed and
729          * IO can always make forward progress:
730          */
731         nr /= c->btree_pages;
732         if (nr == 0)
733                 nr = 1;
734         nr = min_t(unsigned long, nr, mca_can_free(c));
735
736         i = 0;
737         btree_cache_used = c->btree_cache_used;
738         list_for_each_entry_safe_reverse(b, t, &c->btree_cache_freeable, list) {
739                 if (nr <= 0)
740                         goto out;
741
742                 if (!mca_reap(b, 0, false)) {
743                         mca_data_free(b);
744                         rw_unlock(true, b);
745                         freed++;
746                 }
747                 nr--;
748                 i++;
749         }
750
751         list_for_each_entry_safe_reverse(b, t, &c->btree_cache, list) {
752                 if (nr <= 0 || i >= btree_cache_used)
753                         goto out;
754
755                 if (!mca_reap(b, 0, false)) {
756                         mca_bucket_free(b);
757                         mca_data_free(b);
758                         rw_unlock(true, b);
759                         freed++;
760                 }
761
762                 nr--;
763                 i++;
764         }
765 out:
766         mutex_unlock(&c->bucket_lock);
767         return freed * c->btree_pages;
768 }
769
770 static unsigned long bch_mca_count(struct shrinker *shrink,
771                                    struct shrink_control *sc)
772 {
773         struct cache_set *c = container_of(shrink, struct cache_set, shrink);
774
775         if (c->shrinker_disabled)
776                 return 0;
777
778         if (c->btree_cache_alloc_lock)
779                 return 0;
780
781         return mca_can_free(c) * c->btree_pages;
782 }
783
784 void bch_btree_cache_free(struct cache_set *c)
785 {
786         struct btree *b;
787         struct closure cl;
788
789         closure_init_stack(&cl);
790
791         if (c->shrink.list.next)
792                 unregister_shrinker(&c->shrink);
793
794         mutex_lock(&c->bucket_lock);
795
796 #ifdef CONFIG_BCACHE_DEBUG
797         if (c->verify_data)
798                 list_move(&c->verify_data->list, &c->btree_cache);
799
800         free_pages((unsigned long) c->verify_ondisk, ilog2(bucket_pages(c)));
801 #endif
802
803         list_splice(&c->btree_cache_freeable,
804                     &c->btree_cache);
805
806         while (!list_empty(&c->btree_cache)) {
807                 b = list_first_entry(&c->btree_cache, struct btree, list);
808
809                 /*
810                  * This function is called by cache_set_free(), no I/O
811                  * request on cache now, it is unnecessary to acquire
812                  * b->write_lock before clearing BTREE_NODE_dirty anymore.
813                  */
814                 if (btree_node_dirty(b)) {
815                         btree_complete_write(b, btree_current_write(b));
816                         clear_bit(BTREE_NODE_dirty, &b->flags);
817                 }
818                 mca_data_free(b);
819         }
820
821         while (!list_empty(&c->btree_cache_freed)) {
822                 b = list_first_entry(&c->btree_cache_freed,
823                                      struct btree, list);
824                 list_del(&b->list);
825                 cancel_delayed_work_sync(&b->work);
826                 kfree(b);
827         }
828
829         mutex_unlock(&c->bucket_lock);
830 }
831
832 int bch_btree_cache_alloc(struct cache_set *c)
833 {
834         unsigned int i;
835
836         for (i = 0; i < mca_reserve(c); i++)
837                 if (!mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL))
838                         return -ENOMEM;
839
840         list_splice_init(&c->btree_cache,
841                          &c->btree_cache_freeable);
842
843 #ifdef CONFIG_BCACHE_DEBUG
844         mutex_init(&c->verify_lock);
845
846         c->verify_ondisk = (void *)
847                 __get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
848
849         c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
850
851         if (c->verify_data &&
852             c->verify_data->keys.set->data)
853                 list_del_init(&c->verify_data->list);
854         else
855                 c->verify_data = NULL;
856 #endif
857
858         c->shrink.count_objects = bch_mca_count;
859         c->shrink.scan_objects = bch_mca_scan;
860         c->shrink.seeks = 4;
861         c->shrink.batch = c->btree_pages * 2;
862
863         if (register_shrinker(&c->shrink))
864                 pr_warn("bcache: %s: could not register shrinker",
865                                 __func__);
866
867         return 0;
868 }
869
870 /* Btree in memory cache - hash table */
871
872 static struct hlist_head *mca_hash(struct cache_set *c, struct bkey *k)
873 {
874         return &c->bucket_hash[hash_32(PTR_HASH(c, k), BUCKET_HASH_BITS)];
875 }
876
877 static struct btree *mca_find(struct cache_set *c, struct bkey *k)
878 {
879         struct btree *b;
880
881         rcu_read_lock();
882         hlist_for_each_entry_rcu(b, mca_hash(c, k), hash)
883                 if (PTR_HASH(c, &b->key) == PTR_HASH(c, k))
884                         goto out;
885         b = NULL;
886 out:
887         rcu_read_unlock();
888         return b;
889 }
890
891 static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
892 {
893         spin_lock(&c->btree_cannibalize_lock);
894         if (likely(c->btree_cache_alloc_lock == NULL)) {
895                 c->btree_cache_alloc_lock = current;
896         } else if (c->btree_cache_alloc_lock != current) {
897                 if (op)
898                         prepare_to_wait(&c->btree_cache_wait, &op->wait,
899                                         TASK_UNINTERRUPTIBLE);
900                 spin_unlock(&c->btree_cannibalize_lock);
901                 return -EINTR;
902         }
903         spin_unlock(&c->btree_cannibalize_lock);
904
905         return 0;
906 }
907
908 static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
909                                      struct bkey *k)
910 {
911         struct btree *b;
912
913         trace_bcache_btree_cache_cannibalize(c);
914
915         if (mca_cannibalize_lock(c, op))
916                 return ERR_PTR(-EINTR);
917
918         list_for_each_entry_reverse(b, &c->btree_cache, list)
919                 if (!mca_reap(b, btree_order(k), false))
920                         return b;
921
922         list_for_each_entry_reverse(b, &c->btree_cache, list)
923                 if (!mca_reap(b, btree_order(k), true))
924                         return b;
925
926         WARN(1, "btree cache cannibalize failed\n");
927         return ERR_PTR(-ENOMEM);
928 }
929
930 /*
931  * We can only have one thread cannibalizing other cached btree nodes at a time,
932  * or we'll deadlock. We use an open coded mutex to ensure that, which a
933  * cannibalize_bucket() will take. This means every time we unlock the root of
934  * the btree, we need to release this lock if we have it held.
935  */
936 static void bch_cannibalize_unlock(struct cache_set *c)
937 {
938         spin_lock(&c->btree_cannibalize_lock);
939         if (c->btree_cache_alloc_lock == current) {
940                 c->btree_cache_alloc_lock = NULL;
941                 wake_up(&c->btree_cache_wait);
942         }
943         spin_unlock(&c->btree_cannibalize_lock);
944 }
945
946 static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
947                                struct bkey *k, int level)
948 {
949         struct btree *b;
950
951         BUG_ON(current->bio_list);
952
953         lockdep_assert_held(&c->bucket_lock);
954
955         if (mca_find(c, k))
956                 return NULL;
957
958         /* btree_free() doesn't free memory; it sticks the node on the end of
959          * the list. Check if there's any freed nodes there:
960          */
961         list_for_each_entry(b, &c->btree_cache_freeable, list)
962                 if (!mca_reap(b, btree_order(k), false))
963                         goto out;
964
965         /* We never free struct btree itself, just the memory that holds the on
966          * disk node. Check the freed list before allocating a new one:
967          */
968         list_for_each_entry(b, &c->btree_cache_freed, list)
969                 if (!mca_reap(b, 0, false)) {
970                         mca_data_alloc(b, k, __GFP_NOWARN|GFP_NOIO);
971                         if (!b->keys.set[0].data)
972                                 goto err;
973                         else
974                                 goto out;
975                 }
976
977         b = mca_bucket_alloc(c, k, __GFP_NOWARN|GFP_NOIO);
978         if (!b)
979                 goto err;
980
981         BUG_ON(!down_write_trylock(&b->lock));
982         if (!b->keys.set->data)
983                 goto err;
984 out:
985         BUG_ON(b->io_mutex.count != 1);
986
987         bkey_copy(&b->key, k);
988         list_move(&b->list, &c->btree_cache);
989         hlist_del_init_rcu(&b->hash);
990         hlist_add_head_rcu(&b->hash, mca_hash(c, k));
991
992         lock_set_subclass(&b->lock.dep_map, level + 1, _THIS_IP_);
993         b->parent       = (void *) ~0UL;
994         b->flags        = 0;
995         b->written      = 0;
996         b->level        = level;
997
998         if (!b->level)
999                 bch_btree_keys_init(&b->keys, &bch_extent_keys_ops,
1000                                     &b->c->expensive_debug_checks);
1001         else
1002                 bch_btree_keys_init(&b->keys, &bch_btree_keys_ops,
1003                                     &b->c->expensive_debug_checks);
1004
1005         return b;
1006 err:
1007         if (b)
1008                 rw_unlock(true, b);
1009
1010         b = mca_cannibalize(c, op, k);
1011         if (!IS_ERR(b))
1012                 goto out;
1013
1014         return b;
1015 }
1016
1017 /*
1018  * bch_btree_node_get - find a btree node in the cache and lock it, reading it
1019  * in from disk if necessary.
1020  *
1021  * If IO is necessary and running under generic_make_request, returns -EAGAIN.
1022  *
1023  * The btree node will have either a read or a write lock held, depending on
1024  * level and op->lock.
1025  */
1026 struct btree *bch_btree_node_get(struct cache_set *c, struct btree_op *op,
1027                                  struct bkey *k, int level, bool write,
1028                                  struct btree *parent)
1029 {
1030         int i = 0;
1031         struct btree *b;
1032
1033         BUG_ON(level < 0);
1034 retry:
1035         b = mca_find(c, k);
1036
1037         if (!b) {
1038                 if (current->bio_list)
1039                         return ERR_PTR(-EAGAIN);
1040
1041                 mutex_lock(&c->bucket_lock);
1042                 b = mca_alloc(c, op, k, level);
1043                 mutex_unlock(&c->bucket_lock);
1044
1045                 if (!b)
1046                         goto retry;
1047                 if (IS_ERR(b))
1048                         return b;
1049
1050                 bch_btree_node_read(b);
1051
1052                 if (!write)
1053                         downgrade_write(&b->lock);
1054         } else {
1055                 rw_lock(write, b, level);
1056                 if (PTR_HASH(c, &b->key) != PTR_HASH(c, k)) {
1057                         rw_unlock(write, b);
1058                         goto retry;
1059                 }
1060                 BUG_ON(b->level != level);
1061         }
1062
1063         if (btree_node_io_error(b)) {
1064                 rw_unlock(write, b);
1065                 return ERR_PTR(-EIO);
1066         }
1067
1068         BUG_ON(!b->written);
1069
1070         b->parent = parent;
1071
1072         for (; i <= b->keys.nsets && b->keys.set[i].size; i++) {
1073                 prefetch(b->keys.set[i].tree);
1074                 prefetch(b->keys.set[i].data);
1075         }
1076
1077         for (; i <= b->keys.nsets; i++)
1078                 prefetch(b->keys.set[i].data);
1079
1080         return b;
1081 }
1082
1083 static void btree_node_prefetch(struct btree *parent, struct bkey *k)
1084 {
1085         struct btree *b;
1086
1087         mutex_lock(&parent->c->bucket_lock);
1088         b = mca_alloc(parent->c, NULL, k, parent->level - 1);
1089         mutex_unlock(&parent->c->bucket_lock);
1090
1091         if (!IS_ERR_OR_NULL(b)) {
1092                 b->parent = parent;
1093                 bch_btree_node_read(b);
1094                 rw_unlock(true, b);
1095         }
1096 }
1097
1098 /* Btree alloc */
1099
1100 static void btree_node_free(struct btree *b)
1101 {
1102         trace_bcache_btree_node_free(b);
1103
1104         BUG_ON(b == b->c->root);
1105
1106 retry:
1107         mutex_lock(&b->write_lock);
1108         /*
1109          * If the btree node is selected and flushing in btree_flush_write(),
1110          * delay and retry until the BTREE_NODE_journal_flush bit cleared,
1111          * then it is safe to free the btree node here. Otherwise this btree
1112          * node will be in race condition.
1113          */
1114         if (btree_node_journal_flush(b)) {
1115                 mutex_unlock(&b->write_lock);
1116                 pr_debug("bnode %p journal_flush set, retry", b);
1117                 udelay(1);
1118                 goto retry;
1119         }
1120
1121         if (btree_node_dirty(b)) {
1122                 btree_complete_write(b, btree_current_write(b));
1123                 clear_bit(BTREE_NODE_dirty, &b->flags);
1124         }
1125
1126         mutex_unlock(&b->write_lock);
1127
1128         cancel_delayed_work(&b->work);
1129
1130         mutex_lock(&b->c->bucket_lock);
1131         bch_bucket_free(b->c, &b->key);
1132         mca_bucket_free(b);
1133         mutex_unlock(&b->c->bucket_lock);
1134 }
1135
1136 struct btree *__bch_btree_node_alloc(struct cache_set *c, struct btree_op *op,
1137                                      int level, bool wait,
1138                                      struct btree *parent)
1139 {
1140         BKEY_PADDED(key) k;
1141         struct btree *b = ERR_PTR(-EAGAIN);
1142
1143         mutex_lock(&c->bucket_lock);
1144 retry:
1145         if (__bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, wait))
1146                 goto err;
1147
1148         bkey_put(c, &k.key);
1149         SET_KEY_SIZE(&k.key, c->btree_pages * PAGE_SECTORS);
1150
1151         b = mca_alloc(c, op, &k.key, level);
1152         if (IS_ERR(b))
1153                 goto err_free;
1154
1155         if (!b) {
1156                 cache_bug(c,
1157                         "Tried to allocate bucket that was in btree cache");
1158                 goto retry;
1159         }
1160
1161         b->parent = parent;
1162         bch_bset_init_next(&b->keys, b->keys.set->data, bset_magic(&b->c->sb));
1163
1164         mutex_unlock(&c->bucket_lock);
1165
1166         trace_bcache_btree_node_alloc(b);
1167         return b;
1168 err_free:
1169         bch_bucket_free(c, &k.key);
1170 err:
1171         mutex_unlock(&c->bucket_lock);
1172
1173         trace_bcache_btree_node_alloc_fail(c);
1174         return b;
1175 }
1176
1177 static struct btree *bch_btree_node_alloc(struct cache_set *c,
1178                                           struct btree_op *op, int level,
1179                                           struct btree *parent)
1180 {
1181         return __bch_btree_node_alloc(c, op, level, op != NULL, parent);
1182 }
1183
1184 static struct btree *btree_node_alloc_replacement(struct btree *b,
1185                                                   struct btree_op *op)
1186 {
1187         struct btree *n = bch_btree_node_alloc(b->c, op, b->level, b->parent);
1188
1189         if (!IS_ERR_OR_NULL(n)) {
1190                 mutex_lock(&n->write_lock);
1191                 bch_btree_sort_into(&b->keys, &n->keys, &b->c->sort);
1192                 bkey_copy_key(&n->key, &b->key);
1193                 mutex_unlock(&n->write_lock);
1194         }
1195
1196         return n;
1197 }
1198
1199 static void make_btree_freeing_key(struct btree *b, struct bkey *k)
1200 {
1201         unsigned int i;
1202
1203         mutex_lock(&b->c->bucket_lock);
1204
1205         atomic_inc(&b->c->prio_blocked);
1206
1207         bkey_copy(k, &b->key);
1208         bkey_copy_key(k, &ZERO_KEY);
1209
1210         for (i = 0; i < KEY_PTRS(k); i++)
1211                 SET_PTR_GEN(k, i,
1212                             bch_inc_gen(PTR_CACHE(b->c, &b->key, i),
1213                                         PTR_BUCKET(b->c, &b->key, i)));
1214
1215         mutex_unlock(&b->c->bucket_lock);
1216 }
1217
1218 static int btree_check_reserve(struct btree *b, struct btree_op *op)
1219 {
1220         struct cache_set *c = b->c;
1221         struct cache *ca;
1222         unsigned int i, reserve = (c->root->level - b->level) * 2 + 1;
1223
1224         mutex_lock(&c->bucket_lock);
1225
1226         for_each_cache(ca, c, i)
1227                 if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
1228                         if (op)
1229                                 prepare_to_wait(&c->btree_cache_wait, &op->wait,
1230                                                 TASK_UNINTERRUPTIBLE);
1231                         mutex_unlock(&c->bucket_lock);
1232                         return -EINTR;
1233                 }
1234
1235         mutex_unlock(&c->bucket_lock);
1236
1237         return mca_cannibalize_lock(b->c, op);
1238 }
1239
1240 /* Garbage collection */
1241
1242 static uint8_t __bch_btree_mark_key(struct cache_set *c, int level,
1243                                     struct bkey *k)
1244 {
1245         uint8_t stale = 0;
1246         unsigned int i;
1247         struct bucket *g;
1248
1249         /*
1250          * ptr_invalid() can't return true for the keys that mark btree nodes as
1251          * freed, but since ptr_bad() returns true we'll never actually use them
1252          * for anything and thus we don't want mark their pointers here
1253          */
1254         if (!bkey_cmp(k, &ZERO_KEY))
1255                 return stale;
1256
1257         for (i = 0; i < KEY_PTRS(k); i++) {
1258                 if (!ptr_available(c, k, i))
1259                         continue;
1260
1261                 g = PTR_BUCKET(c, k, i);
1262
1263                 if (gen_after(g->last_gc, PTR_GEN(k, i)))
1264                         g->last_gc = PTR_GEN(k, i);
1265
1266                 if (ptr_stale(c, k, i)) {
1267                         stale = max(stale, ptr_stale(c, k, i));
1268                         continue;
1269                 }
1270
1271                 cache_bug_on(GC_MARK(g) &&
1272                              (GC_MARK(g) == GC_MARK_METADATA) != (level != 0),
1273                              c, "inconsistent ptrs: mark = %llu, level = %i",
1274                              GC_MARK(g), level);
1275
1276                 if (level)
1277                         SET_GC_MARK(g, GC_MARK_METADATA);
1278                 else if (KEY_DIRTY(k))
1279                         SET_GC_MARK(g, GC_MARK_DIRTY);
1280                 else if (!GC_MARK(g))
1281                         SET_GC_MARK(g, GC_MARK_RECLAIMABLE);
1282
1283                 /* guard against overflow */
1284                 SET_GC_SECTORS_USED(g, min_t(unsigned int,
1285                                              GC_SECTORS_USED(g) + KEY_SIZE(k),
1286                                              MAX_GC_SECTORS_USED));
1287
1288                 BUG_ON(!GC_SECTORS_USED(g));
1289         }
1290
1291         return stale;
1292 }
1293
1294 #define btree_mark_key(b, k)    __bch_btree_mark_key(b->c, b->level, k)
1295
1296 void bch_initial_mark_key(struct cache_set *c, int level, struct bkey *k)
1297 {
1298         unsigned int i;
1299
1300         for (i = 0; i < KEY_PTRS(k); i++)
1301                 if (ptr_available(c, k, i) &&
1302                     !ptr_stale(c, k, i)) {
1303                         struct bucket *b = PTR_BUCKET(c, k, i);
1304
1305                         b->gen = PTR_GEN(k, i);
1306
1307                         if (level && bkey_cmp(k, &ZERO_KEY))
1308                                 b->prio = BTREE_PRIO;
1309                         else if (!level && b->prio == BTREE_PRIO)
1310                                 b->prio = INITIAL_PRIO;
1311                 }
1312
1313         __bch_btree_mark_key(c, level, k);
1314 }
1315
1316 void bch_update_bucket_in_use(struct cache_set *c, struct gc_stat *stats)
1317 {
1318         stats->in_use = (c->nbuckets - c->avail_nbuckets) * 100 / c->nbuckets;
1319 }
1320
1321 static bool btree_gc_mark_node(struct btree *b, struct gc_stat *gc)
1322 {
1323         uint8_t stale = 0;
1324         unsigned int keys = 0, good_keys = 0;
1325         struct bkey *k;
1326         struct btree_iter iter;
1327         struct bset_tree *t;
1328
1329         gc->nodes++;
1330
1331         for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid) {
1332                 stale = max(stale, btree_mark_key(b, k));
1333                 keys++;
1334
1335                 if (bch_ptr_bad(&b->keys, k))
1336                         continue;
1337
1338                 gc->key_bytes += bkey_u64s(k);
1339                 gc->nkeys++;
1340                 good_keys++;
1341
1342                 gc->data += KEY_SIZE(k);
1343         }
1344
1345         for (t = b->keys.set; t <= &b->keys.set[b->keys.nsets]; t++)
1346                 btree_bug_on(t->size &&
1347                              bset_written(&b->keys, t) &&
1348                              bkey_cmp(&b->key, &t->end) < 0,
1349                              b, "found short btree key in gc");
1350
1351         if (b->c->gc_always_rewrite)
1352                 return true;
1353
1354         if (stale > 10)
1355                 return true;
1356
1357         if ((keys - good_keys) * 2 > keys)
1358                 return true;
1359
1360         return false;
1361 }
1362
1363 #define GC_MERGE_NODES  4U
1364
1365 struct gc_merge_info {
1366         struct btree    *b;
1367         unsigned int    keys;
1368 };
1369
1370 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
1371                                  struct keylist *insert_keys,
1372                                  atomic_t *journal_ref,
1373                                  struct bkey *replace_key);
1374
1375 static int btree_gc_coalesce(struct btree *b, struct btree_op *op,
1376                              struct gc_stat *gc, struct gc_merge_info *r)
1377 {
1378         unsigned int i, nodes = 0, keys = 0, blocks;
1379         struct btree *new_nodes[GC_MERGE_NODES];
1380         struct keylist keylist;
1381         struct closure cl;
1382         struct bkey *k;
1383
1384         bch_keylist_init(&keylist);
1385
1386         if (btree_check_reserve(b, NULL))
1387                 return 0;
1388
1389         memset(new_nodes, 0, sizeof(new_nodes));
1390         closure_init_stack(&cl);
1391
1392         while (nodes < GC_MERGE_NODES && !IS_ERR_OR_NULL(r[nodes].b))
1393                 keys += r[nodes++].keys;
1394
1395         blocks = btree_default_blocks(b->c) * 2 / 3;
1396
1397         if (nodes < 2 ||
1398             __set_blocks(b->keys.set[0].data, keys,
1399                          block_bytes(b->c)) > blocks * (nodes - 1))
1400                 return 0;
1401
1402         for (i = 0; i < nodes; i++) {
1403                 new_nodes[i] = btree_node_alloc_replacement(r[i].b, NULL);
1404                 if (IS_ERR_OR_NULL(new_nodes[i]))
1405                         goto out_nocoalesce;
1406         }
1407
1408         /*
1409          * We have to check the reserve here, after we've allocated our new
1410          * nodes, to make sure the insert below will succeed - we also check
1411          * before as an optimization to potentially avoid a bunch of expensive
1412          * allocs/sorts
1413          */
1414         if (btree_check_reserve(b, NULL))
1415                 goto out_nocoalesce;
1416
1417         for (i = 0; i < nodes; i++)
1418                 mutex_lock(&new_nodes[i]->write_lock);
1419
1420         for (i = nodes - 1; i > 0; --i) {
1421                 struct bset *n1 = btree_bset_first(new_nodes[i]);
1422                 struct bset *n2 = btree_bset_first(new_nodes[i - 1]);
1423                 struct bkey *k, *last = NULL;
1424
1425                 keys = 0;
1426
1427                 if (i > 1) {
1428                         for (k = n2->start;
1429                              k < bset_bkey_last(n2);
1430                              k = bkey_next(k)) {
1431                                 if (__set_blocks(n1, n1->keys + keys +
1432                                                  bkey_u64s(k),
1433                                                  block_bytes(b->c)) > blocks)
1434                                         break;
1435
1436                                 last = k;
1437                                 keys += bkey_u64s(k);
1438                         }
1439                 } else {
1440                         /*
1441                          * Last node we're not getting rid of - we're getting
1442                          * rid of the node at r[0]. Have to try and fit all of
1443                          * the remaining keys into this node; we can't ensure
1444                          * they will always fit due to rounding and variable
1445                          * length keys (shouldn't be possible in practice,
1446                          * though)
1447                          */
1448                         if (__set_blocks(n1, n1->keys + n2->keys,
1449                                          block_bytes(b->c)) >
1450                             btree_blocks(new_nodes[i]))
1451                                 goto out_nocoalesce;
1452
1453                         keys = n2->keys;
1454                         /* Take the key of the node we're getting rid of */
1455                         last = &r->b->key;
1456                 }
1457
1458                 BUG_ON(__set_blocks(n1, n1->keys + keys, block_bytes(b->c)) >
1459                        btree_blocks(new_nodes[i]));
1460
1461                 if (last)
1462                         bkey_copy_key(&new_nodes[i]->key, last);
1463
1464                 memcpy(bset_bkey_last(n1),
1465                        n2->start,
1466                        (void *) bset_bkey_idx(n2, keys) - (void *) n2->start);
1467
1468                 n1->keys += keys;
1469                 r[i].keys = n1->keys;
1470
1471                 memmove(n2->start,
1472                         bset_bkey_idx(n2, keys),
1473                         (void *) bset_bkey_last(n2) -
1474                         (void *) bset_bkey_idx(n2, keys));
1475
1476                 n2->keys -= keys;
1477
1478                 if (__bch_keylist_realloc(&keylist,
1479                                           bkey_u64s(&new_nodes[i]->key)))
1480                         goto out_nocoalesce;
1481
1482                 bch_btree_node_write(new_nodes[i], &cl);
1483                 bch_keylist_add(&keylist, &new_nodes[i]->key);
1484         }
1485
1486         for (i = 0; i < nodes; i++)
1487                 mutex_unlock(&new_nodes[i]->write_lock);
1488
1489         closure_sync(&cl);
1490
1491         /* We emptied out this node */
1492         BUG_ON(btree_bset_first(new_nodes[0])->keys);
1493         btree_node_free(new_nodes[0]);
1494         rw_unlock(true, new_nodes[0]);
1495         new_nodes[0] = NULL;
1496
1497         for (i = 0; i < nodes; i++) {
1498                 if (__bch_keylist_realloc(&keylist, bkey_u64s(&r[i].b->key)))
1499                         goto out_nocoalesce;
1500
1501                 make_btree_freeing_key(r[i].b, keylist.top);
1502                 bch_keylist_push(&keylist);
1503         }
1504
1505         bch_btree_insert_node(b, op, &keylist, NULL, NULL);
1506         BUG_ON(!bch_keylist_empty(&keylist));
1507
1508         for (i = 0; i < nodes; i++) {
1509                 btree_node_free(r[i].b);
1510                 rw_unlock(true, r[i].b);
1511
1512                 r[i].b = new_nodes[i];
1513         }
1514
1515         memmove(r, r + 1, sizeof(r[0]) * (nodes - 1));
1516         r[nodes - 1].b = ERR_PTR(-EINTR);
1517
1518         trace_bcache_btree_gc_coalesce(nodes);
1519         gc->nodes--;
1520
1521         bch_keylist_free(&keylist);
1522
1523         /* Invalidated our iterator */
1524         return -EINTR;
1525
1526 out_nocoalesce:
1527         closure_sync(&cl);
1528
1529         while ((k = bch_keylist_pop(&keylist)))
1530                 if (!bkey_cmp(k, &ZERO_KEY))
1531                         atomic_dec(&b->c->prio_blocked);
1532         bch_keylist_free(&keylist);
1533
1534         for (i = 0; i < nodes; i++)
1535                 if (!IS_ERR_OR_NULL(new_nodes[i])) {
1536                         btree_node_free(new_nodes[i]);
1537                         rw_unlock(true, new_nodes[i]);
1538                 }
1539         return 0;
1540 }
1541
1542 static int btree_gc_rewrite_node(struct btree *b, struct btree_op *op,
1543                                  struct btree *replace)
1544 {
1545         struct keylist keys;
1546         struct btree *n;
1547
1548         if (btree_check_reserve(b, NULL))
1549                 return 0;
1550
1551         n = btree_node_alloc_replacement(replace, NULL);
1552
1553         /* recheck reserve after allocating replacement node */
1554         if (btree_check_reserve(b, NULL)) {
1555                 btree_node_free(n);
1556                 rw_unlock(true, n);
1557                 return 0;
1558         }
1559
1560         bch_btree_node_write_sync(n);
1561
1562         bch_keylist_init(&keys);
1563         bch_keylist_add(&keys, &n->key);
1564
1565         make_btree_freeing_key(replace, keys.top);
1566         bch_keylist_push(&keys);
1567
1568         bch_btree_insert_node(b, op, &keys, NULL, NULL);
1569         BUG_ON(!bch_keylist_empty(&keys));
1570
1571         btree_node_free(replace);
1572         rw_unlock(true, n);
1573
1574         /* Invalidated our iterator */
1575         return -EINTR;
1576 }
1577
1578 static unsigned int btree_gc_count_keys(struct btree *b)
1579 {
1580         struct bkey *k;
1581         struct btree_iter iter;
1582         unsigned int ret = 0;
1583
1584         for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
1585                 ret += bkey_u64s(k);
1586
1587         return ret;
1588 }
1589
1590 static size_t btree_gc_min_nodes(struct cache_set *c)
1591 {
1592         size_t min_nodes;
1593
1594         /*
1595          * Since incremental GC would stop 100ms when front
1596          * side I/O comes, so when there are many btree nodes,
1597          * if GC only processes constant (100) nodes each time,
1598          * GC would last a long time, and the front side I/Os
1599          * would run out of the buckets (since no new bucket
1600          * can be allocated during GC), and be blocked again.
1601          * So GC should not process constant nodes, but varied
1602          * nodes according to the number of btree nodes, which
1603          * realized by dividing GC into constant(100) times,
1604          * so when there are many btree nodes, GC can process
1605          * more nodes each time, otherwise, GC will process less
1606          * nodes each time (but no less than MIN_GC_NODES)
1607          */
1608         min_nodes = c->gc_stats.nodes / MAX_GC_TIMES;
1609         if (min_nodes < MIN_GC_NODES)
1610                 min_nodes = MIN_GC_NODES;
1611
1612         return min_nodes;
1613 }
1614
1615
1616 static int btree_gc_recurse(struct btree *b, struct btree_op *op,
1617                             struct closure *writes, struct gc_stat *gc)
1618 {
1619         int ret = 0;
1620         bool should_rewrite;
1621         struct bkey *k;
1622         struct btree_iter iter;
1623         struct gc_merge_info r[GC_MERGE_NODES];
1624         struct gc_merge_info *i, *last = r + ARRAY_SIZE(r) - 1;
1625
1626         bch_btree_iter_init(&b->keys, &iter, &b->c->gc_done);
1627
1628         for (i = r; i < r + ARRAY_SIZE(r); i++)
1629                 i->b = ERR_PTR(-EINTR);
1630
1631         while (1) {
1632                 k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad);
1633                 if (k) {
1634                         r->b = bch_btree_node_get(b->c, op, k, b->level - 1,
1635                                                   true, b);
1636                         if (IS_ERR(r->b)) {
1637                                 ret = PTR_ERR(r->b);
1638                                 break;
1639                         }
1640
1641                         r->keys = btree_gc_count_keys(r->b);
1642
1643                         ret = btree_gc_coalesce(b, op, gc, r);
1644                         if (ret)
1645                                 break;
1646                 }
1647
1648                 if (!last->b)
1649                         break;
1650
1651                 if (!IS_ERR(last->b)) {
1652                         should_rewrite = btree_gc_mark_node(last->b, gc);
1653                         if (should_rewrite) {
1654                                 ret = btree_gc_rewrite_node(b, op, last->b);
1655                                 if (ret)
1656                                         break;
1657                         }
1658
1659                         if (last->b->level) {
1660                                 ret = btree_gc_recurse(last->b, op, writes, gc);
1661                                 if (ret)
1662                                         break;
1663                         }
1664
1665                         bkey_copy_key(&b->c->gc_done, &last->b->key);
1666
1667                         /*
1668                          * Must flush leaf nodes before gc ends, since replace
1669                          * operations aren't journalled
1670                          */
1671                         mutex_lock(&last->b->write_lock);
1672                         if (btree_node_dirty(last->b))
1673                                 bch_btree_node_write(last->b, writes);
1674                         mutex_unlock(&last->b->write_lock);
1675                         rw_unlock(true, last->b);
1676                 }
1677
1678                 memmove(r + 1, r, sizeof(r[0]) * (GC_MERGE_NODES - 1));
1679                 r->b = NULL;
1680
1681                 if (atomic_read(&b->c->search_inflight) &&
1682                     gc->nodes >= gc->nodes_pre + btree_gc_min_nodes(b->c)) {
1683                         gc->nodes_pre =  gc->nodes;
1684                         ret = -EAGAIN;
1685                         break;
1686                 }
1687
1688                 if (need_resched()) {
1689                         ret = -EAGAIN;
1690                         break;
1691                 }
1692         }
1693
1694         for (i = r; i < r + ARRAY_SIZE(r); i++)
1695                 if (!IS_ERR_OR_NULL(i->b)) {
1696                         mutex_lock(&i->b->write_lock);
1697                         if (btree_node_dirty(i->b))
1698                                 bch_btree_node_write(i->b, writes);
1699                         mutex_unlock(&i->b->write_lock);
1700                         rw_unlock(true, i->b);
1701                 }
1702
1703         return ret;
1704 }
1705
1706 static int bch_btree_gc_root(struct btree *b, struct btree_op *op,
1707                              struct closure *writes, struct gc_stat *gc)
1708 {
1709         struct btree *n = NULL;
1710         int ret = 0;
1711         bool should_rewrite;
1712
1713         should_rewrite = btree_gc_mark_node(b, gc);
1714         if (should_rewrite) {
1715                 n = btree_node_alloc_replacement(b, NULL);
1716
1717                 if (!IS_ERR_OR_NULL(n)) {
1718                         bch_btree_node_write_sync(n);
1719
1720                         bch_btree_set_root(n);
1721                         btree_node_free(b);
1722                         rw_unlock(true, n);
1723
1724                         return -EINTR;
1725                 }
1726         }
1727
1728         __bch_btree_mark_key(b->c, b->level + 1, &b->key);
1729
1730         if (b->level) {
1731                 ret = btree_gc_recurse(b, op, writes, gc);
1732                 if (ret)
1733                         return ret;
1734         }
1735
1736         bkey_copy_key(&b->c->gc_done, &b->key);
1737
1738         return ret;
1739 }
1740
1741 static void btree_gc_start(struct cache_set *c)
1742 {
1743         struct cache *ca;
1744         struct bucket *b;
1745         unsigned int i;
1746
1747         if (!c->gc_mark_valid)
1748                 return;
1749
1750         mutex_lock(&c->bucket_lock);
1751
1752         c->gc_mark_valid = 0;
1753         c->gc_done = ZERO_KEY;
1754
1755         for_each_cache(ca, c, i)
1756                 for_each_bucket(b, ca) {
1757                         b->last_gc = b->gen;
1758                         if (!atomic_read(&b->pin)) {
1759                                 SET_GC_MARK(b, 0);
1760                                 SET_GC_SECTORS_USED(b, 0);
1761                         }
1762                 }
1763
1764         mutex_unlock(&c->bucket_lock);
1765 }
1766
1767 static void bch_btree_gc_finish(struct cache_set *c)
1768 {
1769         struct bucket *b;
1770         struct cache *ca;
1771         unsigned int i;
1772
1773         mutex_lock(&c->bucket_lock);
1774
1775         set_gc_sectors(c);
1776         c->gc_mark_valid = 1;
1777         c->need_gc      = 0;
1778
1779         for (i = 0; i < KEY_PTRS(&c->uuid_bucket); i++)
1780                 SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
1781                             GC_MARK_METADATA);
1782
1783         /* don't reclaim buckets to which writeback keys point */
1784         rcu_read_lock();
1785         for (i = 0; i < c->devices_max_used; i++) {
1786                 struct bcache_device *d = c->devices[i];
1787                 struct cached_dev *dc;
1788                 struct keybuf_key *w, *n;
1789                 unsigned int j;
1790
1791                 if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
1792                         continue;
1793                 dc = container_of(d, struct cached_dev, disk);
1794
1795                 spin_lock(&dc->writeback_keys.lock);
1796                 rbtree_postorder_for_each_entry_safe(w, n,
1797                                         &dc->writeback_keys.keys, node)
1798                         for (j = 0; j < KEY_PTRS(&w->key); j++)
1799                                 SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
1800                                             GC_MARK_DIRTY);
1801                 spin_unlock(&dc->writeback_keys.lock);
1802         }
1803         rcu_read_unlock();
1804
1805         c->avail_nbuckets = 0;
1806         for_each_cache(ca, c, i) {
1807                 uint64_t *i;
1808
1809                 ca->invalidate_needs_gc = 0;
1810
1811                 for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
1812                         SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1813
1814                 for (i = ca->prio_buckets;
1815                      i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
1816                         SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
1817
1818                 for_each_bucket(b, ca) {
1819                         c->need_gc      = max(c->need_gc, bucket_gc_gen(b));
1820
1821                         if (atomic_read(&b->pin))
1822                                 continue;
1823
1824                         BUG_ON(!GC_MARK(b) && GC_SECTORS_USED(b));
1825
1826                         if (!GC_MARK(b) || GC_MARK(b) == GC_MARK_RECLAIMABLE)
1827                                 c->avail_nbuckets++;
1828                 }
1829         }
1830
1831         mutex_unlock(&c->bucket_lock);
1832 }
1833
1834 static void bch_btree_gc(struct cache_set *c)
1835 {
1836         int ret;
1837         struct gc_stat stats;
1838         struct closure writes;
1839         struct btree_op op;
1840         uint64_t start_time = local_clock();
1841
1842         trace_bcache_gc_start(c);
1843
1844         memset(&stats, 0, sizeof(struct gc_stat));
1845         closure_init_stack(&writes);
1846         bch_btree_op_init(&op, SHRT_MAX);
1847
1848         btree_gc_start(c);
1849
1850         /* if CACHE_SET_IO_DISABLE set, gc thread should stop too */
1851         do {
1852                 ret = btree_root(gc_root, c, &op, &writes, &stats);
1853                 closure_sync(&writes);
1854                 cond_resched();
1855
1856                 if (ret == -EAGAIN)
1857                         schedule_timeout_interruptible(msecs_to_jiffies
1858                                                        (GC_SLEEP_MS));
1859                 else if (ret)
1860                         pr_warn("gc failed!");
1861         } while (ret && !test_bit(CACHE_SET_IO_DISABLE, &c->flags));
1862
1863         bch_btree_gc_finish(c);
1864         wake_up_allocators(c);
1865
1866         bch_time_stats_update(&c->btree_gc_time, start_time);
1867
1868         stats.key_bytes *= sizeof(uint64_t);
1869         stats.data      <<= 9;
1870         bch_update_bucket_in_use(c, &stats);
1871         memcpy(&c->gc_stats, &stats, sizeof(struct gc_stat));
1872
1873         trace_bcache_gc_end(c);
1874
1875         bch_moving_gc(c);
1876 }
1877
1878 static bool gc_should_run(struct cache_set *c)
1879 {
1880         struct cache *ca;
1881         unsigned int i;
1882
1883         for_each_cache(ca, c, i)
1884                 if (ca->invalidate_needs_gc)
1885                         return true;
1886
1887         if (atomic_read(&c->sectors_to_gc) < 0)
1888                 return true;
1889
1890         return false;
1891 }
1892
1893 static int bch_gc_thread(void *arg)
1894 {
1895         struct cache_set *c = arg;
1896
1897         while (1) {
1898                 wait_event_interruptible(c->gc_wait,
1899                            kthread_should_stop() ||
1900                            test_bit(CACHE_SET_IO_DISABLE, &c->flags) ||
1901                            gc_should_run(c));
1902
1903                 if (kthread_should_stop() ||
1904                     test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1905                         break;
1906
1907                 set_gc_sectors(c);
1908                 bch_btree_gc(c);
1909         }
1910
1911         wait_for_kthread_stop();
1912         return 0;
1913 }
1914
1915 int bch_gc_thread_start(struct cache_set *c)
1916 {
1917         /*
1918          * In case previous btree check operation occupies too many
1919          * system memory for bcache btree node cache, and the
1920          * registering process is selected by OOM killer. Here just
1921          * ignore the SIGKILL sent by OOM killer if there is, to
1922          * avoid kthread_run() being failed by pending signals. The
1923          * bcache registering process will exit after the registration
1924          * done.
1925          */
1926         if (signal_pending(current))
1927                 flush_signals(current);
1928
1929         c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
1930         return PTR_ERR_OR_ZERO(c->gc_thread);
1931 }
1932
1933 /* Initial partial gc */
1934
1935 static int bch_btree_check_recurse(struct btree *b, struct btree_op *op)
1936 {
1937         int ret = 0;
1938         struct bkey *k, *p = NULL;
1939         struct btree_iter iter;
1940
1941         for_each_key_filter(&b->keys, k, &iter, bch_ptr_invalid)
1942                 bch_initial_mark_key(b->c, b->level, k);
1943
1944         bch_initial_mark_key(b->c, b->level + 1, &b->key);
1945
1946         if (b->level) {
1947                 bch_btree_iter_init(&b->keys, &iter, NULL);
1948
1949                 do {
1950                         k = bch_btree_iter_next_filter(&iter, &b->keys,
1951                                                        bch_ptr_bad);
1952                         if (k) {
1953                                 btree_node_prefetch(b, k);
1954                                 /*
1955                                  * initiallize c->gc_stats.nodes
1956                                  * for incremental GC
1957                                  */
1958                                 b->c->gc_stats.nodes++;
1959                         }
1960
1961                         if (p)
1962                                 ret = btree(check_recurse, p, b, op);
1963
1964                         p = k;
1965                 } while (p && !ret);
1966         }
1967
1968         return ret;
1969 }
1970
1971 int bch_btree_check(struct cache_set *c)
1972 {
1973         struct btree_op op;
1974
1975         bch_btree_op_init(&op, SHRT_MAX);
1976
1977         return btree_root(check_recurse, c, &op);
1978 }
1979
1980 void bch_initial_gc_finish(struct cache_set *c)
1981 {
1982         struct cache *ca;
1983         struct bucket *b;
1984         unsigned int i;
1985
1986         bch_btree_gc_finish(c);
1987
1988         mutex_lock(&c->bucket_lock);
1989
1990         /*
1991          * We need to put some unused buckets directly on the prio freelist in
1992          * order to get the allocator thread started - it needs freed buckets in
1993          * order to rewrite the prios and gens, and it needs to rewrite prios
1994          * and gens in order to free buckets.
1995          *
1996          * This is only safe for buckets that have no live data in them, which
1997          * there should always be some of.
1998          */
1999         for_each_cache(ca, c, i) {
2000                 for_each_bucket(b, ca) {
2001                         if (fifo_full(&ca->free[RESERVE_PRIO]) &&
2002                             fifo_full(&ca->free[RESERVE_BTREE]))
2003                                 break;
2004
2005                         if (bch_can_invalidate_bucket(ca, b) &&
2006                             !GC_MARK(b)) {
2007                                 __bch_invalidate_one_bucket(ca, b);
2008                                 if (!fifo_push(&ca->free[RESERVE_PRIO],
2009                                    b - ca->buckets))
2010                                         fifo_push(&ca->free[RESERVE_BTREE],
2011                                                   b - ca->buckets);
2012                         }
2013                 }
2014         }
2015
2016         mutex_unlock(&c->bucket_lock);
2017 }
2018
2019 /* Btree insertion */
2020
2021 static bool btree_insert_key(struct btree *b, struct bkey *k,
2022                              struct bkey *replace_key)
2023 {
2024         unsigned int status;
2025
2026         BUG_ON(bkey_cmp(k, &b->key) > 0);
2027
2028         status = bch_btree_insert_key(&b->keys, k, replace_key);
2029         if (status != BTREE_INSERT_STATUS_NO_INSERT) {
2030                 bch_check_keys(&b->keys, "%u for %s", status,
2031                                replace_key ? "replace" : "insert");
2032
2033                 trace_bcache_btree_insert_key(b, k, replace_key != NULL,
2034                                               status);
2035                 return true;
2036         } else
2037                 return false;
2038 }
2039
2040 static size_t insert_u64s_remaining(struct btree *b)
2041 {
2042         long ret = bch_btree_keys_u64s_remaining(&b->keys);
2043
2044         /*
2045          * Might land in the middle of an existing extent and have to split it
2046          */
2047         if (b->keys.ops->is_extents)
2048                 ret -= KEY_MAX_U64S;
2049
2050         return max(ret, 0L);
2051 }
2052
2053 static bool bch_btree_insert_keys(struct btree *b, struct btree_op *op,
2054                                   struct keylist *insert_keys,
2055                                   struct bkey *replace_key)
2056 {
2057         bool ret = false;
2058         int oldsize = bch_count_data(&b->keys);
2059
2060         while (!bch_keylist_empty(insert_keys)) {
2061                 struct bkey *k = insert_keys->keys;
2062
2063                 if (bkey_u64s(k) > insert_u64s_remaining(b))
2064                         break;
2065
2066                 if (bkey_cmp(k, &b->key) <= 0) {
2067                         if (!b->level)
2068                                 bkey_put(b->c, k);
2069
2070                         ret |= btree_insert_key(b, k, replace_key);
2071                         bch_keylist_pop_front(insert_keys);
2072                 } else if (bkey_cmp(&START_KEY(k), &b->key) < 0) {
2073                         BKEY_PADDED(key) temp;
2074                         bkey_copy(&temp.key, insert_keys->keys);
2075
2076                         bch_cut_back(&b->key, &temp.key);
2077                         bch_cut_front(&b->key, insert_keys->keys);
2078
2079                         ret |= btree_insert_key(b, &temp.key, replace_key);
2080                         break;
2081                 } else {
2082                         break;
2083                 }
2084         }
2085
2086         if (!ret)
2087                 op->insert_collision = true;
2088
2089         BUG_ON(!bch_keylist_empty(insert_keys) && b->level);
2090
2091         BUG_ON(bch_count_data(&b->keys) < oldsize);
2092         return ret;
2093 }
2094
2095 static int btree_split(struct btree *b, struct btree_op *op,
2096                        struct keylist *insert_keys,
2097                        struct bkey *replace_key)
2098 {
2099         bool split;
2100         struct btree *n1, *n2 = NULL, *n3 = NULL;
2101         uint64_t start_time = local_clock();
2102         struct closure cl;
2103         struct keylist parent_keys;
2104
2105         closure_init_stack(&cl);
2106         bch_keylist_init(&parent_keys);
2107
2108         if (btree_check_reserve(b, op)) {
2109                 if (!b->level)
2110                         return -EINTR;
2111                 else
2112                         WARN(1, "insufficient reserve for split\n");
2113         }
2114
2115         n1 = btree_node_alloc_replacement(b, op);
2116         if (IS_ERR(n1))
2117                 goto err;
2118
2119         split = set_blocks(btree_bset_first(n1),
2120                            block_bytes(n1->c)) > (btree_blocks(b) * 4) / 5;
2121
2122         if (split) {
2123                 unsigned int keys = 0;
2124
2125                 trace_bcache_btree_node_split(b, btree_bset_first(n1)->keys);
2126
2127                 n2 = bch_btree_node_alloc(b->c, op, b->level, b->parent);
2128                 if (IS_ERR(n2))
2129                         goto err_free1;
2130
2131                 if (!b->parent) {
2132                         n3 = bch_btree_node_alloc(b->c, op, b->level + 1, NULL);
2133                         if (IS_ERR(n3))
2134                                 goto err_free2;
2135                 }
2136
2137                 mutex_lock(&n1->write_lock);
2138                 mutex_lock(&n2->write_lock);
2139
2140                 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2141
2142                 /*
2143                  * Has to be a linear search because we don't have an auxiliary
2144                  * search tree yet
2145                  */
2146
2147                 while (keys < (btree_bset_first(n1)->keys * 3) / 5)
2148                         keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1),
2149                                                         keys));
2150
2151                 bkey_copy_key(&n1->key,
2152                               bset_bkey_idx(btree_bset_first(n1), keys));
2153                 keys += bkey_u64s(bset_bkey_idx(btree_bset_first(n1), keys));
2154
2155                 btree_bset_first(n2)->keys = btree_bset_first(n1)->keys - keys;
2156                 btree_bset_first(n1)->keys = keys;
2157
2158                 memcpy(btree_bset_first(n2)->start,
2159                        bset_bkey_last(btree_bset_first(n1)),
2160                        btree_bset_first(n2)->keys * sizeof(uint64_t));
2161
2162                 bkey_copy_key(&n2->key, &b->key);
2163
2164                 bch_keylist_add(&parent_keys, &n2->key);
2165                 bch_btree_node_write(n2, &cl);
2166                 mutex_unlock(&n2->write_lock);
2167                 rw_unlock(true, n2);
2168         } else {
2169                 trace_bcache_btree_node_compact(b, btree_bset_first(n1)->keys);
2170
2171                 mutex_lock(&n1->write_lock);
2172                 bch_btree_insert_keys(n1, op, insert_keys, replace_key);
2173         }
2174
2175         bch_keylist_add(&parent_keys, &n1->key);
2176         bch_btree_node_write(n1, &cl);
2177         mutex_unlock(&n1->write_lock);
2178
2179         if (n3) {
2180                 /* Depth increases, make a new root */
2181                 mutex_lock(&n3->write_lock);
2182                 bkey_copy_key(&n3->key, &MAX_KEY);
2183                 bch_btree_insert_keys(n3, op, &parent_keys, NULL);
2184                 bch_btree_node_write(n3, &cl);
2185                 mutex_unlock(&n3->write_lock);
2186
2187                 closure_sync(&cl);
2188                 bch_btree_set_root(n3);
2189                 rw_unlock(true, n3);
2190         } else if (!b->parent) {
2191                 /* Root filled up but didn't need to be split */
2192                 closure_sync(&cl);
2193                 bch_btree_set_root(n1);
2194         } else {
2195                 /* Split a non root node */
2196                 closure_sync(&cl);
2197                 make_btree_freeing_key(b, parent_keys.top);
2198                 bch_keylist_push(&parent_keys);
2199
2200                 bch_btree_insert_node(b->parent, op, &parent_keys, NULL, NULL);
2201                 BUG_ON(!bch_keylist_empty(&parent_keys));
2202         }
2203
2204         btree_node_free(b);
2205         rw_unlock(true, n1);
2206
2207         bch_time_stats_update(&b->c->btree_split_time, start_time);
2208
2209         return 0;
2210 err_free2:
2211         bkey_put(b->c, &n2->key);
2212         btree_node_free(n2);
2213         rw_unlock(true, n2);
2214 err_free1:
2215         bkey_put(b->c, &n1->key);
2216         btree_node_free(n1);
2217         rw_unlock(true, n1);
2218 err:
2219         WARN(1, "bcache: btree split failed (level %u)", b->level);
2220
2221         if (n3 == ERR_PTR(-EAGAIN) ||
2222             n2 == ERR_PTR(-EAGAIN) ||
2223             n1 == ERR_PTR(-EAGAIN))
2224                 return -EAGAIN;
2225
2226         return -ENOMEM;
2227 }
2228
2229 static int bch_btree_insert_node(struct btree *b, struct btree_op *op,
2230                                  struct keylist *insert_keys,
2231                                  atomic_t *journal_ref,
2232                                  struct bkey *replace_key)
2233 {
2234         struct closure cl;
2235
2236         BUG_ON(b->level && replace_key);
2237
2238         closure_init_stack(&cl);
2239
2240         mutex_lock(&b->write_lock);
2241
2242         if (write_block(b) != btree_bset_last(b) &&
2243             b->keys.last_set_unwritten)
2244                 bch_btree_init_next(b); /* just wrote a set */
2245
2246         if (bch_keylist_nkeys(insert_keys) > insert_u64s_remaining(b)) {
2247                 mutex_unlock(&b->write_lock);
2248                 goto split;
2249         }
2250
2251         BUG_ON(write_block(b) != btree_bset_last(b));
2252
2253         if (bch_btree_insert_keys(b, op, insert_keys, replace_key)) {
2254                 if (!b->level)
2255                         bch_btree_leaf_dirty(b, journal_ref);
2256                 else
2257                         bch_btree_node_write(b, &cl);
2258         }
2259
2260         mutex_unlock(&b->write_lock);
2261
2262         /* wait for btree node write if necessary, after unlock */
2263         closure_sync(&cl);
2264
2265         return 0;
2266 split:
2267         if (current->bio_list) {
2268                 op->lock = b->c->root->level + 1;
2269                 return -EAGAIN;
2270         } else if (op->lock <= b->c->root->level) {
2271                 op->lock = b->c->root->level + 1;
2272                 return -EINTR;
2273         } else {
2274                 /* Invalidated all iterators */
2275                 int ret = btree_split(b, op, insert_keys, replace_key);
2276
2277                 if (bch_keylist_empty(insert_keys))
2278                         return 0;
2279                 else if (!ret)
2280                         return -EINTR;
2281                 return ret;
2282         }
2283 }
2284
2285 int bch_btree_insert_check_key(struct btree *b, struct btree_op *op,
2286                                struct bkey *check_key)
2287 {
2288         int ret = -EINTR;
2289         uint64_t btree_ptr = b->key.ptr[0];
2290         unsigned long seq = b->seq;
2291         struct keylist insert;
2292         bool upgrade = op->lock == -1;
2293
2294         bch_keylist_init(&insert);
2295
2296         if (upgrade) {
2297                 rw_unlock(false, b);
2298                 rw_lock(true, b, b->level);
2299
2300                 if (b->key.ptr[0] != btree_ptr ||
2301                     b->seq != seq + 1) {
2302                         op->lock = b->level;
2303                         goto out;
2304                 }
2305         }
2306
2307         SET_KEY_PTRS(check_key, 1);
2308         get_random_bytes(&check_key->ptr[0], sizeof(uint64_t));
2309
2310         SET_PTR_DEV(check_key, 0, PTR_CHECK_DEV);
2311
2312         bch_keylist_add(&insert, check_key);
2313
2314         ret = bch_btree_insert_node(b, op, &insert, NULL, NULL);
2315
2316         BUG_ON(!ret && !bch_keylist_empty(&insert));
2317 out:
2318         if (upgrade)
2319                 downgrade_write(&b->lock);
2320         return ret;
2321 }
2322
2323 struct btree_insert_op {
2324         struct btree_op op;
2325         struct keylist  *keys;
2326         atomic_t        *journal_ref;
2327         struct bkey     *replace_key;
2328 };
2329
2330 static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
2331 {
2332         struct btree_insert_op *op = container_of(b_op,
2333                                         struct btree_insert_op, op);
2334
2335         int ret = bch_btree_insert_node(b, &op->op, op->keys,
2336                                         op->journal_ref, op->replace_key);
2337         if (ret && !bch_keylist_empty(op->keys))
2338                 return ret;
2339         else
2340                 return MAP_DONE;
2341 }
2342
2343 int bch_btree_insert(struct cache_set *c, struct keylist *keys,
2344                      atomic_t *journal_ref, struct bkey *replace_key)
2345 {
2346         struct btree_insert_op op;
2347         int ret = 0;
2348
2349         BUG_ON(current->bio_list);
2350         BUG_ON(bch_keylist_empty(keys));
2351
2352         bch_btree_op_init(&op.op, 0);
2353         op.keys         = keys;
2354         op.journal_ref  = journal_ref;
2355         op.replace_key  = replace_key;
2356
2357         while (!ret && !bch_keylist_empty(keys)) {
2358                 op.op.lock = 0;
2359                 ret = bch_btree_map_leaf_nodes(&op.op, c,
2360                                                &START_KEY(keys->keys),
2361                                                btree_insert_fn);
2362         }
2363
2364         if (ret) {
2365                 struct bkey *k;
2366
2367                 pr_err("error %i", ret);
2368
2369                 while ((k = bch_keylist_pop(keys)))
2370                         bkey_put(c, k);
2371         } else if (op.op.insert_collision)
2372                 ret = -ESRCH;
2373
2374         return ret;
2375 }
2376
2377 void bch_btree_set_root(struct btree *b)
2378 {
2379         unsigned int i;
2380         struct closure cl;
2381
2382         closure_init_stack(&cl);
2383
2384         trace_bcache_btree_set_root(b);
2385
2386         BUG_ON(!b->written);
2387
2388         for (i = 0; i < KEY_PTRS(&b->key); i++)
2389                 BUG_ON(PTR_BUCKET(b->c, &b->key, i)->prio != BTREE_PRIO);
2390
2391         mutex_lock(&b->c->bucket_lock);
2392         list_del_init(&b->list);
2393         mutex_unlock(&b->c->bucket_lock);
2394
2395         b->c->root = b;
2396
2397         bch_journal_meta(b->c, &cl);
2398         closure_sync(&cl);
2399 }
2400
2401 /* Map across nodes or keys */
2402
2403 static int bch_btree_map_nodes_recurse(struct btree *b, struct btree_op *op,
2404                                        struct bkey *from,
2405                                        btree_map_nodes_fn *fn, int flags)
2406 {
2407         int ret = MAP_CONTINUE;
2408
2409         if (b->level) {
2410                 struct bkey *k;
2411                 struct btree_iter iter;
2412
2413                 bch_btree_iter_init(&b->keys, &iter, from);
2414
2415                 while ((k = bch_btree_iter_next_filter(&iter, &b->keys,
2416                                                        bch_ptr_bad))) {
2417                         ret = btree(map_nodes_recurse, k, b,
2418                                     op, from, fn, flags);
2419                         from = NULL;
2420
2421                         if (ret != MAP_CONTINUE)
2422                                 return ret;
2423                 }
2424         }
2425
2426         if (!b->level || flags == MAP_ALL_NODES)
2427                 ret = fn(op, b);
2428
2429         return ret;
2430 }
2431
2432 int __bch_btree_map_nodes(struct btree_op *op, struct cache_set *c,
2433                           struct bkey *from, btree_map_nodes_fn *fn, int flags)
2434 {
2435         return btree_root(map_nodes_recurse, c, op, from, fn, flags);
2436 }
2437
2438 static int bch_btree_map_keys_recurse(struct btree *b, struct btree_op *op,
2439                                       struct bkey *from, btree_map_keys_fn *fn,
2440                                       int flags)
2441 {
2442         int ret = MAP_CONTINUE;
2443         struct bkey *k;
2444         struct btree_iter iter;
2445
2446         bch_btree_iter_init(&b->keys, &iter, from);
2447
2448         while ((k = bch_btree_iter_next_filter(&iter, &b->keys, bch_ptr_bad))) {
2449                 ret = !b->level
2450                         ? fn(op, b, k)
2451                         : btree(map_keys_recurse, k, b, op, from, fn, flags);
2452                 from = NULL;
2453
2454                 if (ret != MAP_CONTINUE)
2455                         return ret;
2456         }
2457
2458         if (!b->level && (flags & MAP_END_KEY))
2459                 ret = fn(op, b, &KEY(KEY_INODE(&b->key),
2460                                      KEY_OFFSET(&b->key), 0));
2461
2462         return ret;
2463 }
2464
2465 int bch_btree_map_keys(struct btree_op *op, struct cache_set *c,
2466                        struct bkey *from, btree_map_keys_fn *fn, int flags)
2467 {
2468         return btree_root(map_keys_recurse, c, op, from, fn, flags);
2469 }
2470
2471 /* Keybuf code */
2472
2473 static inline int keybuf_cmp(struct keybuf_key *l, struct keybuf_key *r)
2474 {
2475         /* Overlapping keys compare equal */
2476         if (bkey_cmp(&l->key, &START_KEY(&r->key)) <= 0)
2477                 return -1;
2478         if (bkey_cmp(&START_KEY(&l->key), &r->key) >= 0)
2479                 return 1;
2480         return 0;
2481 }
2482
2483 static inline int keybuf_nonoverlapping_cmp(struct keybuf_key *l,
2484                                             struct keybuf_key *r)
2485 {
2486         return clamp_t(int64_t, bkey_cmp(&l->key, &r->key), -1, 1);
2487 }
2488
2489 struct refill {
2490         struct btree_op op;
2491         unsigned int    nr_found;
2492         struct keybuf   *buf;
2493         struct bkey     *end;
2494         keybuf_pred_fn  *pred;
2495 };
2496
2497 static int refill_keybuf_fn(struct btree_op *op, struct btree *b,
2498                             struct bkey *k)
2499 {
2500         struct refill *refill = container_of(op, struct refill, op);
2501         struct keybuf *buf = refill->buf;
2502         int ret = MAP_CONTINUE;
2503
2504         if (bkey_cmp(k, refill->end) > 0) {
2505                 ret = MAP_DONE;
2506                 goto out;
2507         }
2508
2509         if (!KEY_SIZE(k)) /* end key */
2510                 goto out;
2511
2512         if (refill->pred(buf, k)) {
2513                 struct keybuf_key *w;
2514
2515                 spin_lock(&buf->lock);
2516
2517                 w = array_alloc(&buf->freelist);
2518                 if (!w) {
2519                         spin_unlock(&buf->lock);
2520                         return MAP_DONE;
2521                 }
2522
2523                 w->private = NULL;
2524                 bkey_copy(&w->key, k);
2525
2526                 if (RB_INSERT(&buf->keys, w, node, keybuf_cmp))
2527                         array_free(&buf->freelist, w);
2528                 else
2529                         refill->nr_found++;
2530
2531                 if (array_freelist_empty(&buf->freelist))
2532                         ret = MAP_DONE;
2533
2534                 spin_unlock(&buf->lock);
2535         }
2536 out:
2537         buf->last_scanned = *k;
2538         return ret;
2539 }
2540
2541 void bch_refill_keybuf(struct cache_set *c, struct keybuf *buf,
2542                        struct bkey *end, keybuf_pred_fn *pred)
2543 {
2544         struct bkey start = buf->last_scanned;
2545         struct refill refill;
2546
2547         cond_resched();
2548
2549         bch_btree_op_init(&refill.op, -1);
2550         refill.nr_found = 0;
2551         refill.buf      = buf;
2552         refill.end      = end;
2553         refill.pred     = pred;
2554
2555         bch_btree_map_keys(&refill.op, c, &buf->last_scanned,
2556                            refill_keybuf_fn, MAP_END_KEY);
2557
2558         trace_bcache_keyscan(refill.nr_found,
2559                              KEY_INODE(&start), KEY_OFFSET(&start),
2560                              KEY_INODE(&buf->last_scanned),
2561                              KEY_OFFSET(&buf->last_scanned));
2562
2563         spin_lock(&buf->lock);
2564
2565         if (!RB_EMPTY_ROOT(&buf->keys)) {
2566                 struct keybuf_key *w;
2567
2568                 w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2569                 buf->start      = START_KEY(&w->key);
2570
2571                 w = RB_LAST(&buf->keys, struct keybuf_key, node);
2572                 buf->end        = w->key;
2573         } else {
2574                 buf->start      = MAX_KEY;
2575                 buf->end        = MAX_KEY;
2576         }
2577
2578         spin_unlock(&buf->lock);
2579 }
2580
2581 static void __bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2582 {
2583         rb_erase(&w->node, &buf->keys);
2584         array_free(&buf->freelist, w);
2585 }
2586
2587 void bch_keybuf_del(struct keybuf *buf, struct keybuf_key *w)
2588 {
2589         spin_lock(&buf->lock);
2590         __bch_keybuf_del(buf, w);
2591         spin_unlock(&buf->lock);
2592 }
2593
2594 bool bch_keybuf_check_overlapping(struct keybuf *buf, struct bkey *start,
2595                                   struct bkey *end)
2596 {
2597         bool ret = false;
2598         struct keybuf_key *p, *w, s;
2599
2600         s.key = *start;
2601
2602         if (bkey_cmp(end, &buf->start) <= 0 ||
2603             bkey_cmp(start, &buf->end) >= 0)
2604                 return false;
2605
2606         spin_lock(&buf->lock);
2607         w = RB_GREATER(&buf->keys, s, node, keybuf_nonoverlapping_cmp);
2608
2609         while (w && bkey_cmp(&START_KEY(&w->key), end) < 0) {
2610                 p = w;
2611                 w = RB_NEXT(w, node);
2612
2613                 if (p->private)
2614                         ret = true;
2615                 else
2616                         __bch_keybuf_del(buf, p);
2617         }
2618
2619         spin_unlock(&buf->lock);
2620         return ret;
2621 }
2622
2623 struct keybuf_key *bch_keybuf_next(struct keybuf *buf)
2624 {
2625         struct keybuf_key *w;
2626
2627         spin_lock(&buf->lock);
2628
2629         w = RB_FIRST(&buf->keys, struct keybuf_key, node);
2630
2631         while (w && w->private)
2632                 w = RB_NEXT(w, node);
2633
2634         if (w)
2635                 w->private = ERR_PTR(-EINTR);
2636
2637         spin_unlock(&buf->lock);
2638         return w;
2639 }
2640
2641 struct keybuf_key *bch_keybuf_next_rescan(struct cache_set *c,
2642                                           struct keybuf *buf,
2643                                           struct bkey *end,
2644                                           keybuf_pred_fn *pred)
2645 {
2646         struct keybuf_key *ret;
2647
2648         while (1) {
2649                 ret = bch_keybuf_next(buf);
2650                 if (ret)
2651                         break;
2652
2653                 if (bkey_cmp(&buf->last_scanned, end) >= 0) {
2654                         pr_debug("scan finished");
2655                         break;
2656                 }
2657
2658                 bch_refill_keybuf(c, buf, end, pred);
2659         }
2660
2661         return ret;
2662 }
2663
2664 void bch_keybuf_init(struct keybuf *buf)
2665 {
2666         buf->last_scanned       = MAX_KEY;
2667         buf->keys               = RB_ROOT;
2668
2669         spin_lock_init(&buf->lock);
2670         array_allocator_init(&buf->freelist);
2671 }