Merge tag 'irq-urgent-2020-11-08' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / md / bcache / super.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcache setup/teardown code, and some metadata io - read a superblock and
4  * figure out what to do with it.
5  *
6  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
7  * Copyright 2012 Google, Inc.
8  */
9
10 #include "bcache.h"
11 #include "btree.h"
12 #include "debug.h"
13 #include "extents.h"
14 #include "request.h"
15 #include "writeback.h"
16 #include "features.h"
17
18 #include <linux/blkdev.h>
19 #include <linux/debugfs.h>
20 #include <linux/genhd.h>
21 #include <linux/idr.h>
22 #include <linux/kthread.h>
23 #include <linux/workqueue.h>
24 #include <linux/module.h>
25 #include <linux/random.h>
26 #include <linux/reboot.h>
27 #include <linux/sysfs.h>
28
29 unsigned int bch_cutoff_writeback;
30 unsigned int bch_cutoff_writeback_sync;
31
32 static const char bcache_magic[] = {
33         0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
34         0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
35 };
36
37 static const char invalid_uuid[] = {
38         0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78,
39         0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99
40 };
41
42 static struct kobject *bcache_kobj;
43 struct mutex bch_register_lock;
44 bool bcache_is_reboot;
45 LIST_HEAD(bch_cache_sets);
46 static LIST_HEAD(uncached_devices);
47
48 static int bcache_major;
49 static DEFINE_IDA(bcache_device_idx);
50 static wait_queue_head_t unregister_wait;
51 struct workqueue_struct *bcache_wq;
52 struct workqueue_struct *bch_journal_wq;
53
54
55 #define BTREE_MAX_PAGES         (256 * 1024 / PAGE_SIZE)
56 /* limitation of partitions number on single bcache device */
57 #define BCACHE_MINORS           128
58 /* limitation of bcache devices number on single system */
59 #define BCACHE_DEVICE_IDX_MAX   ((1U << MINORBITS)/BCACHE_MINORS)
60
61 /* Superblock */
62
63 static unsigned int get_bucket_size(struct cache_sb *sb, struct cache_sb_disk *s)
64 {
65         unsigned int bucket_size = le16_to_cpu(s->bucket_size);
66
67         if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES &&
68              bch_has_feature_large_bucket(sb))
69                 bucket_size |= le16_to_cpu(s->bucket_size_hi) << 16;
70
71         return bucket_size;
72 }
73
74 static const char *read_super_common(struct cache_sb *sb,  struct block_device *bdev,
75                                      struct cache_sb_disk *s)
76 {
77         const char *err;
78         unsigned int i;
79
80         sb->first_bucket= le16_to_cpu(s->first_bucket);
81         sb->nbuckets    = le64_to_cpu(s->nbuckets);
82         sb->bucket_size = get_bucket_size(sb, s);
83
84         sb->nr_in_set   = le16_to_cpu(s->nr_in_set);
85         sb->nr_this_dev = le16_to_cpu(s->nr_this_dev);
86
87         err = "Too many journal buckets";
88         if (sb->keys > SB_JOURNAL_BUCKETS)
89                 goto err;
90
91         err = "Too many buckets";
92         if (sb->nbuckets > LONG_MAX)
93                 goto err;
94
95         err = "Not enough buckets";
96         if (sb->nbuckets < 1 << 7)
97                 goto err;
98
99         err = "Bad block size (not power of 2)";
100         if (!is_power_of_2(sb->block_size))
101                 goto err;
102
103         err = "Bad block size (larger than page size)";
104         if (sb->block_size > PAGE_SECTORS)
105                 goto err;
106
107         err = "Bad bucket size (not power of 2)";
108         if (!is_power_of_2(sb->bucket_size))
109                 goto err;
110
111         err = "Bad bucket size (smaller than page size)";
112         if (sb->bucket_size < PAGE_SECTORS)
113                 goto err;
114
115         err = "Invalid superblock: device too small";
116         if (get_capacity(bdev->bd_disk) <
117             sb->bucket_size * sb->nbuckets)
118                 goto err;
119
120         err = "Bad UUID";
121         if (bch_is_zero(sb->set_uuid, 16))
122                 goto err;
123
124         err = "Bad cache device number in set";
125         if (!sb->nr_in_set ||
126             sb->nr_in_set <= sb->nr_this_dev ||
127             sb->nr_in_set > MAX_CACHES_PER_SET)
128                 goto err;
129
130         err = "Journal buckets not sequential";
131         for (i = 0; i < sb->keys; i++)
132                 if (sb->d[i] != sb->first_bucket + i)
133                         goto err;
134
135         err = "Too many journal buckets";
136         if (sb->first_bucket + sb->keys > sb->nbuckets)
137                 goto err;
138
139         err = "Invalid superblock: first bucket comes before end of super";
140         if (sb->first_bucket * sb->bucket_size < 16)
141                 goto err;
142
143         err = NULL;
144 err:
145         return err;
146 }
147
148
149 static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
150                               struct cache_sb_disk **res)
151 {
152         const char *err;
153         struct cache_sb_disk *s;
154         struct page *page;
155         unsigned int i;
156
157         page = read_cache_page_gfp(bdev->bd_inode->i_mapping,
158                                    SB_OFFSET >> PAGE_SHIFT, GFP_KERNEL);
159         if (IS_ERR(page))
160                 return "IO error";
161         s = page_address(page) + offset_in_page(SB_OFFSET);
162
163         sb->offset              = le64_to_cpu(s->offset);
164         sb->version             = le64_to_cpu(s->version);
165
166         memcpy(sb->magic,       s->magic, 16);
167         memcpy(sb->uuid,        s->uuid, 16);
168         memcpy(sb->set_uuid,    s->set_uuid, 16);
169         memcpy(sb->label,       s->label, SB_LABEL_SIZE);
170
171         sb->flags               = le64_to_cpu(s->flags);
172         sb->seq                 = le64_to_cpu(s->seq);
173         sb->last_mount          = le32_to_cpu(s->last_mount);
174         sb->keys                = le16_to_cpu(s->keys);
175
176         for (i = 0; i < SB_JOURNAL_BUCKETS; i++)
177                 sb->d[i] = le64_to_cpu(s->d[i]);
178
179         pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u\n",
180                  sb->version, sb->flags, sb->seq, sb->keys);
181
182         err = "Not a bcache superblock (bad offset)";
183         if (sb->offset != SB_SECTOR)
184                 goto err;
185
186         err = "Not a bcache superblock (bad magic)";
187         if (memcmp(sb->magic, bcache_magic, 16))
188                 goto err;
189
190         err = "Bad checksum";
191         if (s->csum != csum_set(s))
192                 goto err;
193
194         err = "Bad UUID";
195         if (bch_is_zero(sb->uuid, 16))
196                 goto err;
197
198         sb->block_size  = le16_to_cpu(s->block_size);
199
200         err = "Superblock block size smaller than device block size";
201         if (sb->block_size << 9 < bdev_logical_block_size(bdev))
202                 goto err;
203
204         switch (sb->version) {
205         case BCACHE_SB_VERSION_BDEV:
206                 sb->data_offset = BDEV_DATA_START_DEFAULT;
207                 break;
208         case BCACHE_SB_VERSION_BDEV_WITH_OFFSET:
209         case BCACHE_SB_VERSION_BDEV_WITH_FEATURES:
210                 sb->data_offset = le64_to_cpu(s->data_offset);
211
212                 err = "Bad data offset";
213                 if (sb->data_offset < BDEV_DATA_START_DEFAULT)
214                         goto err;
215
216                 break;
217         case BCACHE_SB_VERSION_CDEV:
218         case BCACHE_SB_VERSION_CDEV_WITH_UUID:
219                 err = read_super_common(sb, bdev, s);
220                 if (err)
221                         goto err;
222                 break;
223         case BCACHE_SB_VERSION_CDEV_WITH_FEATURES:
224                 /*
225                  * Feature bits are needed in read_super_common(),
226                  * convert them firstly.
227                  */
228                 sb->feature_compat = le64_to_cpu(s->feature_compat);
229                 sb->feature_incompat = le64_to_cpu(s->feature_incompat);
230                 sb->feature_ro_compat = le64_to_cpu(s->feature_ro_compat);
231                 err = read_super_common(sb, bdev, s);
232                 if (err)
233                         goto err;
234                 break;
235         default:
236                 err = "Unsupported superblock version";
237                 goto err;
238         }
239
240         sb->last_mount = (u32)ktime_get_real_seconds();
241         *res = s;
242         return NULL;
243 err:
244         put_page(page);
245         return err;
246 }
247
248 static void write_bdev_super_endio(struct bio *bio)
249 {
250         struct cached_dev *dc = bio->bi_private;
251
252         if (bio->bi_status)
253                 bch_count_backing_io_errors(dc, bio);
254
255         closure_put(&dc->sb_write);
256 }
257
258 static void __write_super(struct cache_sb *sb, struct cache_sb_disk *out,
259                 struct bio *bio)
260 {
261         unsigned int i;
262
263         bio->bi_opf = REQ_OP_WRITE | REQ_SYNC | REQ_META;
264         bio->bi_iter.bi_sector  = SB_SECTOR;
265         __bio_add_page(bio, virt_to_page(out), SB_SIZE,
266                         offset_in_page(out));
267
268         out->offset             = cpu_to_le64(sb->offset);
269
270         memcpy(out->uuid,       sb->uuid, 16);
271         memcpy(out->set_uuid,   sb->set_uuid, 16);
272         memcpy(out->label,      sb->label, SB_LABEL_SIZE);
273
274         out->flags              = cpu_to_le64(sb->flags);
275         out->seq                = cpu_to_le64(sb->seq);
276
277         out->last_mount         = cpu_to_le32(sb->last_mount);
278         out->first_bucket       = cpu_to_le16(sb->first_bucket);
279         out->keys               = cpu_to_le16(sb->keys);
280
281         for (i = 0; i < sb->keys; i++)
282                 out->d[i] = cpu_to_le64(sb->d[i]);
283
284         if (sb->version >= BCACHE_SB_VERSION_CDEV_WITH_FEATURES) {
285                 out->feature_compat    = cpu_to_le64(sb->feature_compat);
286                 out->feature_incompat  = cpu_to_le64(sb->feature_incompat);
287                 out->feature_ro_compat = cpu_to_le64(sb->feature_ro_compat);
288         }
289
290         out->version            = cpu_to_le64(sb->version);
291         out->csum = csum_set(out);
292
293         pr_debug("ver %llu, flags %llu, seq %llu\n",
294                  sb->version, sb->flags, sb->seq);
295
296         submit_bio(bio);
297 }
298
299 static void bch_write_bdev_super_unlock(struct closure *cl)
300 {
301         struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
302
303         up(&dc->sb_write_mutex);
304 }
305
306 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
307 {
308         struct closure *cl = &dc->sb_write;
309         struct bio *bio = &dc->sb_bio;
310
311         down(&dc->sb_write_mutex);
312         closure_init(cl, parent);
313
314         bio_init(bio, dc->sb_bv, 1);
315         bio_set_dev(bio, dc->bdev);
316         bio->bi_end_io  = write_bdev_super_endio;
317         bio->bi_private = dc;
318
319         closure_get(cl);
320         /* I/O request sent to backing device */
321         __write_super(&dc->sb, dc->sb_disk, bio);
322
323         closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
324 }
325
326 static void write_super_endio(struct bio *bio)
327 {
328         struct cache *ca = bio->bi_private;
329
330         /* is_read = 0 */
331         bch_count_io_errors(ca, bio->bi_status, 0,
332                             "writing superblock");
333         closure_put(&ca->set->sb_write);
334 }
335
336 static void bcache_write_super_unlock(struct closure *cl)
337 {
338         struct cache_set *c = container_of(cl, struct cache_set, sb_write);
339
340         up(&c->sb_write_mutex);
341 }
342
343 void bcache_write_super(struct cache_set *c)
344 {
345         struct closure *cl = &c->sb_write;
346         struct cache *ca = c->cache;
347         struct bio *bio = &ca->sb_bio;
348         unsigned int version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
349
350         down(&c->sb_write_mutex);
351         closure_init(cl, &c->cl);
352
353         ca->sb.seq++;
354
355         if (ca->sb.version < version)
356                 ca->sb.version = version;
357
358         bio_init(bio, ca->sb_bv, 1);
359         bio_set_dev(bio, ca->bdev);
360         bio->bi_end_io  = write_super_endio;
361         bio->bi_private = ca;
362
363         closure_get(cl);
364         __write_super(&ca->sb, ca->sb_disk, bio);
365
366         closure_return_with_destructor(cl, bcache_write_super_unlock);
367 }
368
369 /* UUID io */
370
371 static void uuid_endio(struct bio *bio)
372 {
373         struct closure *cl = bio->bi_private;
374         struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
375
376         cache_set_err_on(bio->bi_status, c, "accessing uuids");
377         bch_bbio_free(bio, c);
378         closure_put(cl);
379 }
380
381 static void uuid_io_unlock(struct closure *cl)
382 {
383         struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
384
385         up(&c->uuid_write_mutex);
386 }
387
388 static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
389                     struct bkey *k, struct closure *parent)
390 {
391         struct closure *cl = &c->uuid_write;
392         struct uuid_entry *u;
393         unsigned int i;
394         char buf[80];
395
396         BUG_ON(!parent);
397         down(&c->uuid_write_mutex);
398         closure_init(cl, parent);
399
400         for (i = 0; i < KEY_PTRS(k); i++) {
401                 struct bio *bio = bch_bbio_alloc(c);
402
403                 bio->bi_opf = REQ_SYNC | REQ_META | op_flags;
404                 bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
405
406                 bio->bi_end_io  = uuid_endio;
407                 bio->bi_private = cl;
408                 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
409                 bch_bio_map(bio, c->uuids);
410
411                 bch_submit_bbio(bio, c, k, i);
412
413                 if (op != REQ_OP_WRITE)
414                         break;
415         }
416
417         bch_extent_to_text(buf, sizeof(buf), k);
418         pr_debug("%s UUIDs at %s\n", op == REQ_OP_WRITE ? "wrote" : "read", buf);
419
420         for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
421                 if (!bch_is_zero(u->uuid, 16))
422                         pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u\n",
423                                  u - c->uuids, u->uuid, u->label,
424                                  u->first_reg, u->last_reg, u->invalidated);
425
426         closure_return_with_destructor(cl, uuid_io_unlock);
427 }
428
429 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
430 {
431         struct bkey *k = &j->uuid_bucket;
432
433         if (__bch_btree_ptr_invalid(c, k))
434                 return "bad uuid pointer";
435
436         bkey_copy(&c->uuid_bucket, k);
437         uuid_io(c, REQ_OP_READ, 0, k, cl);
438
439         if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
440                 struct uuid_entry_v0    *u0 = (void *) c->uuids;
441                 struct uuid_entry       *u1 = (void *) c->uuids;
442                 int i;
443
444                 closure_sync(cl);
445
446                 /*
447                  * Since the new uuid entry is bigger than the old, we have to
448                  * convert starting at the highest memory address and work down
449                  * in order to do it in place
450                  */
451
452                 for (i = c->nr_uuids - 1;
453                      i >= 0;
454                      --i) {
455                         memcpy(u1[i].uuid,      u0[i].uuid, 16);
456                         memcpy(u1[i].label,     u0[i].label, 32);
457
458                         u1[i].first_reg         = u0[i].first_reg;
459                         u1[i].last_reg          = u0[i].last_reg;
460                         u1[i].invalidated       = u0[i].invalidated;
461
462                         u1[i].flags     = 0;
463                         u1[i].sectors   = 0;
464                 }
465         }
466
467         return NULL;
468 }
469
470 static int __uuid_write(struct cache_set *c)
471 {
472         BKEY_PADDED(key) k;
473         struct closure cl;
474         struct cache *ca = c->cache;
475         unsigned int size;
476
477         closure_init_stack(&cl);
478         lockdep_assert_held(&bch_register_lock);
479
480         if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, true))
481                 return 1;
482
483         size =  meta_bucket_pages(&ca->sb) * PAGE_SECTORS;
484         SET_KEY_SIZE(&k.key, size);
485         uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
486         closure_sync(&cl);
487
488         /* Only one bucket used for uuid write */
489         atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written);
490
491         bkey_copy(&c->uuid_bucket, &k.key);
492         bkey_put(c, &k.key);
493         return 0;
494 }
495
496 int bch_uuid_write(struct cache_set *c)
497 {
498         int ret = __uuid_write(c);
499
500         if (!ret)
501                 bch_journal_meta(c, NULL);
502
503         return ret;
504 }
505
506 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
507 {
508         struct uuid_entry *u;
509
510         for (u = c->uuids;
511              u < c->uuids + c->nr_uuids; u++)
512                 if (!memcmp(u->uuid, uuid, 16))
513                         return u;
514
515         return NULL;
516 }
517
518 static struct uuid_entry *uuid_find_empty(struct cache_set *c)
519 {
520         static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
521
522         return uuid_find(c, zero_uuid);
523 }
524
525 /*
526  * Bucket priorities/gens:
527  *
528  * For each bucket, we store on disk its
529  *   8 bit gen
530  *  16 bit priority
531  *
532  * See alloc.c for an explanation of the gen. The priority is used to implement
533  * lru (and in the future other) cache replacement policies; for most purposes
534  * it's just an opaque integer.
535  *
536  * The gens and the priorities don't have a whole lot to do with each other, and
537  * it's actually the gens that must be written out at specific times - it's no
538  * big deal if the priorities don't get written, if we lose them we just reuse
539  * buckets in suboptimal order.
540  *
541  * On disk they're stored in a packed array, and in as many buckets are required
542  * to fit them all. The buckets we use to store them form a list; the journal
543  * header points to the first bucket, the first bucket points to the second
544  * bucket, et cetera.
545  *
546  * This code is used by the allocation code; periodically (whenever it runs out
547  * of buckets to allocate from) the allocation code will invalidate some
548  * buckets, but it can't use those buckets until their new gens are safely on
549  * disk.
550  */
551
552 static void prio_endio(struct bio *bio)
553 {
554         struct cache *ca = bio->bi_private;
555
556         cache_set_err_on(bio->bi_status, ca->set, "accessing priorities");
557         bch_bbio_free(bio, ca->set);
558         closure_put(&ca->prio);
559 }
560
561 static void prio_io(struct cache *ca, uint64_t bucket, int op,
562                     unsigned long op_flags)
563 {
564         struct closure *cl = &ca->prio;
565         struct bio *bio = bch_bbio_alloc(ca->set);
566
567         closure_init_stack(cl);
568
569         bio->bi_iter.bi_sector  = bucket * ca->sb.bucket_size;
570         bio_set_dev(bio, ca->bdev);
571         bio->bi_iter.bi_size    = meta_bucket_bytes(&ca->sb);
572
573         bio->bi_end_io  = prio_endio;
574         bio->bi_private = ca;
575         bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
576         bch_bio_map(bio, ca->disk_buckets);
577
578         closure_bio_submit(ca->set, bio, &ca->prio);
579         closure_sync(cl);
580 }
581
582 int bch_prio_write(struct cache *ca, bool wait)
583 {
584         int i;
585         struct bucket *b;
586         struct closure cl;
587
588         pr_debug("free_prio=%zu, free_none=%zu, free_inc=%zu\n",
589                  fifo_used(&ca->free[RESERVE_PRIO]),
590                  fifo_used(&ca->free[RESERVE_NONE]),
591                  fifo_used(&ca->free_inc));
592
593         /*
594          * Pre-check if there are enough free buckets. In the non-blocking
595          * scenario it's better to fail early rather than starting to allocate
596          * buckets and do a cleanup later in case of failure.
597          */
598         if (!wait) {
599                 size_t avail = fifo_used(&ca->free[RESERVE_PRIO]) +
600                                fifo_used(&ca->free[RESERVE_NONE]);
601                 if (prio_buckets(ca) > avail)
602                         return -ENOMEM;
603         }
604
605         closure_init_stack(&cl);
606
607         lockdep_assert_held(&ca->set->bucket_lock);
608
609         ca->disk_buckets->seq++;
610
611         atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
612                         &ca->meta_sectors_written);
613
614         for (i = prio_buckets(ca) - 1; i >= 0; --i) {
615                 long bucket;
616                 struct prio_set *p = ca->disk_buckets;
617                 struct bucket_disk *d = p->data;
618                 struct bucket_disk *end = d + prios_per_bucket(ca);
619
620                 for (b = ca->buckets + i * prios_per_bucket(ca);
621                      b < ca->buckets + ca->sb.nbuckets && d < end;
622                      b++, d++) {
623                         d->prio = cpu_to_le16(b->prio);
624                         d->gen = b->gen;
625                 }
626
627                 p->next_bucket  = ca->prio_buckets[i + 1];
628                 p->magic        = pset_magic(&ca->sb);
629                 p->csum         = bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8);
630
631                 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait);
632                 BUG_ON(bucket == -1);
633
634                 mutex_unlock(&ca->set->bucket_lock);
635                 prio_io(ca, bucket, REQ_OP_WRITE, 0);
636                 mutex_lock(&ca->set->bucket_lock);
637
638                 ca->prio_buckets[i] = bucket;
639                 atomic_dec_bug(&ca->buckets[bucket].pin);
640         }
641
642         mutex_unlock(&ca->set->bucket_lock);
643
644         bch_journal_meta(ca->set, &cl);
645         closure_sync(&cl);
646
647         mutex_lock(&ca->set->bucket_lock);
648
649         /*
650          * Don't want the old priorities to get garbage collected until after we
651          * finish writing the new ones, and they're journalled
652          */
653         for (i = 0; i < prio_buckets(ca); i++) {
654                 if (ca->prio_last_buckets[i])
655                         __bch_bucket_free(ca,
656                                 &ca->buckets[ca->prio_last_buckets[i]]);
657
658                 ca->prio_last_buckets[i] = ca->prio_buckets[i];
659         }
660         return 0;
661 }
662
663 static int prio_read(struct cache *ca, uint64_t bucket)
664 {
665         struct prio_set *p = ca->disk_buckets;
666         struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
667         struct bucket *b;
668         unsigned int bucket_nr = 0;
669         int ret = -EIO;
670
671         for (b = ca->buckets;
672              b < ca->buckets + ca->sb.nbuckets;
673              b++, d++) {
674                 if (d == end) {
675                         ca->prio_buckets[bucket_nr] = bucket;
676                         ca->prio_last_buckets[bucket_nr] = bucket;
677                         bucket_nr++;
678
679                         prio_io(ca, bucket, REQ_OP_READ, 0);
680
681                         if (p->csum !=
682                             bch_crc64(&p->magic, meta_bucket_bytes(&ca->sb) - 8)) {
683                                 pr_warn("bad csum reading priorities\n");
684                                 goto out;
685                         }
686
687                         if (p->magic != pset_magic(&ca->sb)) {
688                                 pr_warn("bad magic reading priorities\n");
689                                 goto out;
690                         }
691
692                         bucket = p->next_bucket;
693                         d = p->data;
694                 }
695
696                 b->prio = le16_to_cpu(d->prio);
697                 b->gen = b->last_gc = d->gen;
698         }
699
700         ret = 0;
701 out:
702         return ret;
703 }
704
705 /* Bcache device */
706
707 static int open_dev(struct block_device *b, fmode_t mode)
708 {
709         struct bcache_device *d = b->bd_disk->private_data;
710
711         if (test_bit(BCACHE_DEV_CLOSING, &d->flags))
712                 return -ENXIO;
713
714         closure_get(&d->cl);
715         return 0;
716 }
717
718 static void release_dev(struct gendisk *b, fmode_t mode)
719 {
720         struct bcache_device *d = b->private_data;
721
722         closure_put(&d->cl);
723 }
724
725 static int ioctl_dev(struct block_device *b, fmode_t mode,
726                      unsigned int cmd, unsigned long arg)
727 {
728         struct bcache_device *d = b->bd_disk->private_data;
729
730         return d->ioctl(d, mode, cmd, arg);
731 }
732
733 static const struct block_device_operations bcache_cached_ops = {
734         .submit_bio     = cached_dev_submit_bio,
735         .open           = open_dev,
736         .release        = release_dev,
737         .ioctl          = ioctl_dev,
738         .owner          = THIS_MODULE,
739 };
740
741 static const struct block_device_operations bcache_flash_ops = {
742         .submit_bio     = flash_dev_submit_bio,
743         .open           = open_dev,
744         .release        = release_dev,
745         .ioctl          = ioctl_dev,
746         .owner          = THIS_MODULE,
747 };
748
749 void bcache_device_stop(struct bcache_device *d)
750 {
751         if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags))
752                 /*
753                  * closure_fn set to
754                  * - cached device: cached_dev_flush()
755                  * - flash dev: flash_dev_flush()
756                  */
757                 closure_queue(&d->cl);
758 }
759
760 static void bcache_device_unlink(struct bcache_device *d)
761 {
762         lockdep_assert_held(&bch_register_lock);
763
764         if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
765                 struct cache *ca = d->c->cache;
766
767                 sysfs_remove_link(&d->c->kobj, d->name);
768                 sysfs_remove_link(&d->kobj, "cache");
769
770                 bd_unlink_disk_holder(ca->bdev, d->disk);
771         }
772 }
773
774 static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
775                                const char *name)
776 {
777         struct cache *ca = c->cache;
778         int ret;
779
780         bd_link_disk_holder(ca->bdev, d->disk);
781
782         snprintf(d->name, BCACHEDEVNAME_SIZE,
783                  "%s%u", name, d->id);
784
785         ret = sysfs_create_link(&d->kobj, &c->kobj, "cache");
786         if (ret < 0)
787                 pr_err("Couldn't create device -> cache set symlink\n");
788
789         ret = sysfs_create_link(&c->kobj, &d->kobj, d->name);
790         if (ret < 0)
791                 pr_err("Couldn't create cache set -> device symlink\n");
792
793         clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
794 }
795
796 static void bcache_device_detach(struct bcache_device *d)
797 {
798         lockdep_assert_held(&bch_register_lock);
799
800         atomic_dec(&d->c->attached_dev_nr);
801
802         if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) {
803                 struct uuid_entry *u = d->c->uuids + d->id;
804
805                 SET_UUID_FLASH_ONLY(u, 0);
806                 memcpy(u->uuid, invalid_uuid, 16);
807                 u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds());
808                 bch_uuid_write(d->c);
809         }
810
811         bcache_device_unlink(d);
812
813         d->c->devices[d->id] = NULL;
814         closure_put(&d->c->caching);
815         d->c = NULL;
816 }
817
818 static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
819                                  unsigned int id)
820 {
821         d->id = id;
822         d->c = c;
823         c->devices[id] = d;
824
825         if (id >= c->devices_max_used)
826                 c->devices_max_used = id + 1;
827
828         closure_get(&c->caching);
829 }
830
831 static inline int first_minor_to_idx(int first_minor)
832 {
833         return (first_minor/BCACHE_MINORS);
834 }
835
836 static inline int idx_to_first_minor(int idx)
837 {
838         return (idx * BCACHE_MINORS);
839 }
840
841 static void bcache_device_free(struct bcache_device *d)
842 {
843         struct gendisk *disk = d->disk;
844
845         lockdep_assert_held(&bch_register_lock);
846
847         if (disk)
848                 pr_info("%s stopped\n", disk->disk_name);
849         else
850                 pr_err("bcache device (NULL gendisk) stopped\n");
851
852         if (d->c)
853                 bcache_device_detach(d);
854
855         if (disk) {
856                 bool disk_added = (disk->flags & GENHD_FL_UP) != 0;
857
858                 if (disk_added)
859                         del_gendisk(disk);
860
861                 if (disk->queue)
862                         blk_cleanup_queue(disk->queue);
863
864                 ida_simple_remove(&bcache_device_idx,
865                                   first_minor_to_idx(disk->first_minor));
866                 if (disk_added)
867                         put_disk(disk);
868         }
869
870         bioset_exit(&d->bio_split);
871         kvfree(d->full_dirty_stripes);
872         kvfree(d->stripe_sectors_dirty);
873
874         closure_debug_destroy(&d->cl);
875 }
876
877 static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
878                 sector_t sectors, struct block_device *cached_bdev,
879                 const struct block_device_operations *ops)
880 {
881         struct request_queue *q;
882         const size_t max_stripes = min_t(size_t, INT_MAX,
883                                          SIZE_MAX / sizeof(atomic_t));
884         uint64_t n;
885         int idx;
886
887         if (!d->stripe_size)
888                 d->stripe_size = 1 << 31;
889
890         n = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
891         if (!n || n > max_stripes) {
892                 pr_err("nr_stripes too large or invalid: %llu (start sector beyond end of disk?)\n",
893                         n);
894                 return -ENOMEM;
895         }
896         d->nr_stripes = n;
897
898         n = d->nr_stripes * sizeof(atomic_t);
899         d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL);
900         if (!d->stripe_sectors_dirty)
901                 return -ENOMEM;
902
903         n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
904         d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL);
905         if (!d->full_dirty_stripes)
906                 return -ENOMEM;
907
908         idx = ida_simple_get(&bcache_device_idx, 0,
909                                 BCACHE_DEVICE_IDX_MAX, GFP_KERNEL);
910         if (idx < 0)
911                 return idx;
912
913         if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio),
914                         BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
915                 goto err;
916
917         d->disk = alloc_disk(BCACHE_MINORS);
918         if (!d->disk)
919                 goto err;
920
921         set_capacity(d->disk, sectors);
922         snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx);
923
924         d->disk->major          = bcache_major;
925         d->disk->first_minor    = idx_to_first_minor(idx);
926         d->disk->fops           = ops;
927         d->disk->private_data   = d;
928
929         q = blk_alloc_queue(NUMA_NO_NODE);
930         if (!q)
931                 return -ENOMEM;
932
933         d->disk->queue                  = q;
934         q->limits.max_hw_sectors        = UINT_MAX;
935         q->limits.max_sectors           = UINT_MAX;
936         q->limits.max_segment_size      = UINT_MAX;
937         q->limits.max_segments          = BIO_MAX_PAGES;
938         blk_queue_max_discard_sectors(q, UINT_MAX);
939         q->limits.discard_granularity   = 512;
940         q->limits.io_min                = block_size;
941         q->limits.logical_block_size    = block_size;
942         q->limits.physical_block_size   = block_size;
943
944         if (q->limits.logical_block_size > PAGE_SIZE && cached_bdev) {
945                 /*
946                  * This should only happen with BCACHE_SB_VERSION_BDEV.
947                  * Block/page size is checked for BCACHE_SB_VERSION_CDEV.
948                  */
949                 pr_info("%s: sb/logical block size (%u) greater than page size (%lu) falling back to device logical block size (%u)\n",
950                         d->disk->disk_name, q->limits.logical_block_size,
951                         PAGE_SIZE, bdev_logical_block_size(cached_bdev));
952
953                 /* This also adjusts physical block size/min io size if needed */
954                 blk_queue_logical_block_size(q, bdev_logical_block_size(cached_bdev));
955         }
956
957         blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue);
958         blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue);
959         blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue);
960
961         blk_queue_write_cache(q, true, true);
962
963         return 0;
964
965 err:
966         ida_simple_remove(&bcache_device_idx, idx);
967         return -ENOMEM;
968
969 }
970
971 /* Cached device */
972
973 static void calc_cached_dev_sectors(struct cache_set *c)
974 {
975         uint64_t sectors = 0;
976         struct cached_dev *dc;
977
978         list_for_each_entry(dc, &c->cached_devs, list)
979                 sectors += bdev_sectors(dc->bdev);
980
981         c->cached_dev_sectors = sectors;
982 }
983
984 #define BACKING_DEV_OFFLINE_TIMEOUT 5
985 static int cached_dev_status_update(void *arg)
986 {
987         struct cached_dev *dc = arg;
988         struct request_queue *q;
989
990         /*
991          * If this delayed worker is stopping outside, directly quit here.
992          * dc->io_disable might be set via sysfs interface, so check it
993          * here too.
994          */
995         while (!kthread_should_stop() && !dc->io_disable) {
996                 q = bdev_get_queue(dc->bdev);
997                 if (blk_queue_dying(q))
998                         dc->offline_seconds++;
999                 else
1000                         dc->offline_seconds = 0;
1001
1002                 if (dc->offline_seconds >= BACKING_DEV_OFFLINE_TIMEOUT) {
1003                         pr_err("%s: device offline for %d seconds\n",
1004                                dc->backing_dev_name,
1005                                BACKING_DEV_OFFLINE_TIMEOUT);
1006                         pr_err("%s: disable I/O request due to backing device offline\n",
1007                                dc->disk.name);
1008                         dc->io_disable = true;
1009                         /* let others know earlier that io_disable is true */
1010                         smp_mb();
1011                         bcache_device_stop(&dc->disk);
1012                         break;
1013                 }
1014                 schedule_timeout_interruptible(HZ);
1015         }
1016
1017         wait_for_kthread_stop();
1018         return 0;
1019 }
1020
1021
1022 int bch_cached_dev_run(struct cached_dev *dc)
1023 {
1024         struct bcache_device *d = &dc->disk;
1025         char *buf = kmemdup_nul(dc->sb.label, SB_LABEL_SIZE, GFP_KERNEL);
1026         char *env[] = {
1027                 "DRIVER=bcache",
1028                 kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid),
1029                 kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf ? : ""),
1030                 NULL,
1031         };
1032
1033         if (dc->io_disable) {
1034                 pr_err("I/O disabled on cached dev %s\n",
1035                        dc->backing_dev_name);
1036                 kfree(env[1]);
1037                 kfree(env[2]);
1038                 kfree(buf);
1039                 return -EIO;
1040         }
1041
1042         if (atomic_xchg(&dc->running, 1)) {
1043                 kfree(env[1]);
1044                 kfree(env[2]);
1045                 kfree(buf);
1046                 pr_info("cached dev %s is running already\n",
1047                        dc->backing_dev_name);
1048                 return -EBUSY;
1049         }
1050
1051         if (!d->c &&
1052             BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
1053                 struct closure cl;
1054
1055                 closure_init_stack(&cl);
1056
1057                 SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE);
1058                 bch_write_bdev_super(dc, &cl);
1059                 closure_sync(&cl);
1060         }
1061
1062         add_disk(d->disk);
1063         bd_link_disk_holder(dc->bdev, dc->disk.disk);
1064         /*
1065          * won't show up in the uevent file, use udevadm monitor -e instead
1066          * only class / kset properties are persistent
1067          */
1068         kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
1069         kfree(env[1]);
1070         kfree(env[2]);
1071         kfree(buf);
1072
1073         if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
1074             sysfs_create_link(&disk_to_dev(d->disk)->kobj,
1075                               &d->kobj, "bcache")) {
1076                 pr_err("Couldn't create bcache dev <-> disk sysfs symlinks\n");
1077                 return -ENOMEM;
1078         }
1079
1080         dc->status_update_thread = kthread_run(cached_dev_status_update,
1081                                                dc, "bcache_status_update");
1082         if (IS_ERR(dc->status_update_thread)) {
1083                 pr_warn("failed to create bcache_status_update kthread, continue to run without monitoring backing device status\n");
1084         }
1085
1086         return 0;
1087 }
1088
1089 /*
1090  * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed
1091  * work dc->writeback_rate_update is running. Wait until the routine
1092  * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to
1093  * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out
1094  * seconds, give up waiting here and continue to cancel it too.
1095  */
1096 static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
1097 {
1098         int time_out = WRITEBACK_RATE_UPDATE_SECS_MAX * HZ;
1099
1100         do {
1101                 if (!test_bit(BCACHE_DEV_RATE_DW_RUNNING,
1102                               &dc->disk.flags))
1103                         break;
1104                 time_out--;
1105                 schedule_timeout_interruptible(1);
1106         } while (time_out > 0);
1107
1108         if (time_out == 0)
1109                 pr_warn("give up waiting for dc->writeback_write_update to quit\n");
1110
1111         cancel_delayed_work_sync(&dc->writeback_rate_update);
1112 }
1113
1114 static void cached_dev_detach_finish(struct work_struct *w)
1115 {
1116         struct cached_dev *dc = container_of(w, struct cached_dev, detach);
1117         struct closure cl;
1118
1119         closure_init_stack(&cl);
1120
1121         BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
1122         BUG_ON(refcount_read(&dc->count));
1123
1124
1125         if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
1126                 cancel_writeback_rate_update_dwork(dc);
1127
1128         if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
1129                 kthread_stop(dc->writeback_thread);
1130                 dc->writeback_thread = NULL;
1131         }
1132
1133         memset(&dc->sb.set_uuid, 0, 16);
1134         SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
1135
1136         bch_write_bdev_super(dc, &cl);
1137         closure_sync(&cl);
1138
1139         mutex_lock(&bch_register_lock);
1140
1141         calc_cached_dev_sectors(dc->disk.c);
1142         bcache_device_detach(&dc->disk);
1143         list_move(&dc->list, &uncached_devices);
1144
1145         clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
1146         clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);
1147
1148         mutex_unlock(&bch_register_lock);
1149
1150         pr_info("Caching disabled for %s\n", dc->backing_dev_name);
1151
1152         /* Drop ref we took in cached_dev_detach() */
1153         closure_put(&dc->disk.cl);
1154 }
1155
1156 void bch_cached_dev_detach(struct cached_dev *dc)
1157 {
1158         lockdep_assert_held(&bch_register_lock);
1159
1160         if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
1161                 return;
1162
1163         if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
1164                 return;
1165
1166         /*
1167          * Block the device from being closed and freed until we're finished
1168          * detaching
1169          */
1170         closure_get(&dc->disk.cl);
1171
1172         bch_writeback_queue(dc);
1173
1174         cached_dev_put(dc);
1175 }
1176
1177 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
1178                           uint8_t *set_uuid)
1179 {
1180         uint32_t rtime = cpu_to_le32((u32)ktime_get_real_seconds());
1181         struct uuid_entry *u;
1182         struct cached_dev *exist_dc, *t;
1183         int ret = 0;
1184
1185         if ((set_uuid && memcmp(set_uuid, c->set_uuid, 16)) ||
1186             (!set_uuid && memcmp(dc->sb.set_uuid, c->set_uuid, 16)))
1187                 return -ENOENT;
1188
1189         if (dc->disk.c) {
1190                 pr_err("Can't attach %s: already attached\n",
1191                        dc->backing_dev_name);
1192                 return -EINVAL;
1193         }
1194
1195         if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
1196                 pr_err("Can't attach %s: shutting down\n",
1197                        dc->backing_dev_name);
1198                 return -EINVAL;
1199         }
1200
1201         if (dc->sb.block_size < c->cache->sb.block_size) {
1202                 /* Will die */
1203                 pr_err("Couldn't attach %s: block size less than set's block size\n",
1204                        dc->backing_dev_name);
1205                 return -EINVAL;
1206         }
1207
1208         /* Check whether already attached */
1209         list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
1210                 if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
1211                         pr_err("Tried to attach %s but duplicate UUID already attached\n",
1212                                 dc->backing_dev_name);
1213
1214                         return -EINVAL;
1215                 }
1216         }
1217
1218         u = uuid_find(c, dc->sb.uuid);
1219
1220         if (u &&
1221             (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE ||
1222              BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) {
1223                 memcpy(u->uuid, invalid_uuid, 16);
1224                 u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds());
1225                 u = NULL;
1226         }
1227
1228         if (!u) {
1229                 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
1230                         pr_err("Couldn't find uuid for %s in set\n",
1231                                dc->backing_dev_name);
1232                         return -ENOENT;
1233                 }
1234
1235                 u = uuid_find_empty(c);
1236                 if (!u) {
1237                         pr_err("Not caching %s, no room for UUID\n",
1238                                dc->backing_dev_name);
1239                         return -EINVAL;
1240                 }
1241         }
1242
1243         /*
1244          * Deadlocks since we're called via sysfs...
1245          * sysfs_remove_file(&dc->kobj, &sysfs_attach);
1246          */
1247
1248         if (bch_is_zero(u->uuid, 16)) {
1249                 struct closure cl;
1250
1251                 closure_init_stack(&cl);
1252
1253                 memcpy(u->uuid, dc->sb.uuid, 16);
1254                 memcpy(u->label, dc->sb.label, SB_LABEL_SIZE);
1255                 u->first_reg = u->last_reg = rtime;
1256                 bch_uuid_write(c);
1257
1258                 memcpy(dc->sb.set_uuid, c->set_uuid, 16);
1259                 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
1260
1261                 bch_write_bdev_super(dc, &cl);
1262                 closure_sync(&cl);
1263         } else {
1264                 u->last_reg = rtime;
1265                 bch_uuid_write(c);
1266         }
1267
1268         bcache_device_attach(&dc->disk, c, u - c->uuids);
1269         list_move(&dc->list, &c->cached_devs);
1270         calc_cached_dev_sectors(c);
1271
1272         /*
1273          * dc->c must be set before dc->count != 0 - paired with the mb in
1274          * cached_dev_get()
1275          */
1276         smp_wmb();
1277         refcount_set(&dc->count, 1);
1278
1279         /* Block writeback thread, but spawn it */
1280         down_write(&dc->writeback_lock);
1281         if (bch_cached_dev_writeback_start(dc)) {
1282                 up_write(&dc->writeback_lock);
1283                 pr_err("Couldn't start writeback facilities for %s\n",
1284                        dc->disk.disk->disk_name);
1285                 return -ENOMEM;
1286         }
1287
1288         if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
1289                 atomic_set(&dc->has_dirty, 1);
1290                 bch_writeback_queue(dc);
1291         }
1292
1293         bch_sectors_dirty_init(&dc->disk);
1294
1295         ret = bch_cached_dev_run(dc);
1296         if (ret && (ret != -EBUSY)) {
1297                 up_write(&dc->writeback_lock);
1298                 /*
1299                  * bch_register_lock is held, bcache_device_stop() is not
1300                  * able to be directly called. The kthread and kworker
1301                  * created previously in bch_cached_dev_writeback_start()
1302                  * have to be stopped manually here.
1303                  */
1304                 kthread_stop(dc->writeback_thread);
1305                 cancel_writeback_rate_update_dwork(dc);
1306                 pr_err("Couldn't run cached device %s\n",
1307                        dc->backing_dev_name);
1308                 return ret;
1309         }
1310
1311         bcache_device_link(&dc->disk, c, "bdev");
1312         atomic_inc(&c->attached_dev_nr);
1313
1314         /* Allow the writeback thread to proceed */
1315         up_write(&dc->writeback_lock);
1316
1317         pr_info("Caching %s as %s on set %pU\n",
1318                 dc->backing_dev_name,
1319                 dc->disk.disk->disk_name,
1320                 dc->disk.c->set_uuid);
1321         return 0;
1322 }
1323
1324 /* when dc->disk.kobj released */
1325 void bch_cached_dev_release(struct kobject *kobj)
1326 {
1327         struct cached_dev *dc = container_of(kobj, struct cached_dev,
1328                                              disk.kobj);
1329         kfree(dc);
1330         module_put(THIS_MODULE);
1331 }
1332
1333 static void cached_dev_free(struct closure *cl)
1334 {
1335         struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
1336
1337         if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
1338                 cancel_writeback_rate_update_dwork(dc);
1339
1340         if (!IS_ERR_OR_NULL(dc->writeback_thread))
1341                 kthread_stop(dc->writeback_thread);
1342         if (!IS_ERR_OR_NULL(dc->status_update_thread))
1343                 kthread_stop(dc->status_update_thread);
1344
1345         mutex_lock(&bch_register_lock);
1346
1347         if (atomic_read(&dc->running))
1348                 bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
1349         bcache_device_free(&dc->disk);
1350         list_del(&dc->list);
1351
1352         mutex_unlock(&bch_register_lock);
1353
1354         if (dc->sb_disk)
1355                 put_page(virt_to_page(dc->sb_disk));
1356
1357         if (!IS_ERR_OR_NULL(dc->bdev))
1358                 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1359
1360         wake_up(&unregister_wait);
1361
1362         kobject_put(&dc->disk.kobj);
1363 }
1364
1365 static void cached_dev_flush(struct closure *cl)
1366 {
1367         struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
1368         struct bcache_device *d = &dc->disk;
1369
1370         mutex_lock(&bch_register_lock);
1371         bcache_device_unlink(d);
1372         mutex_unlock(&bch_register_lock);
1373
1374         bch_cache_accounting_destroy(&dc->accounting);
1375         kobject_del(&d->kobj);
1376
1377         continue_at(cl, cached_dev_free, system_wq);
1378 }
1379
1380 static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
1381 {
1382         int ret;
1383         struct io *io;
1384         struct request_queue *q = bdev_get_queue(dc->bdev);
1385
1386         __module_get(THIS_MODULE);
1387         INIT_LIST_HEAD(&dc->list);
1388         closure_init(&dc->disk.cl, NULL);
1389         set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
1390         kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
1391         INIT_WORK(&dc->detach, cached_dev_detach_finish);
1392         sema_init(&dc->sb_write_mutex, 1);
1393         INIT_LIST_HEAD(&dc->io_lru);
1394         spin_lock_init(&dc->io_lock);
1395         bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
1396
1397         dc->sequential_cutoff           = 4 << 20;
1398
1399         for (io = dc->io; io < dc->io + RECENT_IO; io++) {
1400                 list_add(&io->lru, &dc->io_lru);
1401                 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
1402         }
1403
1404         dc->disk.stripe_size = q->limits.io_opt >> 9;
1405
1406         if (dc->disk.stripe_size)
1407                 dc->partial_stripes_expensive =
1408                         q->limits.raid_partial_stripes_expensive;
1409
1410         ret = bcache_device_init(&dc->disk, block_size,
1411                          dc->bdev->bd_part->nr_sects - dc->sb.data_offset,
1412                          dc->bdev, &bcache_cached_ops);
1413         if (ret)
1414                 return ret;
1415
1416         blk_queue_io_opt(dc->disk.disk->queue,
1417                 max(queue_io_opt(dc->disk.disk->queue), queue_io_opt(q)));
1418
1419         atomic_set(&dc->io_errors, 0);
1420         dc->io_disable = false;
1421         dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT;
1422         /* default to auto */
1423         dc->stop_when_cache_set_failed = BCH_CACHED_DEV_STOP_AUTO;
1424
1425         bch_cached_dev_request_init(dc);
1426         bch_cached_dev_writeback_init(dc);
1427         return 0;
1428 }
1429
1430 /* Cached device - bcache superblock */
1431
1432 static int register_bdev(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
1433                                  struct block_device *bdev,
1434                                  struct cached_dev *dc)
1435 {
1436         const char *err = "cannot allocate memory";
1437         struct cache_set *c;
1438         int ret = -ENOMEM;
1439
1440         bdevname(bdev, dc->backing_dev_name);
1441         memcpy(&dc->sb, sb, sizeof(struct cache_sb));
1442         dc->bdev = bdev;
1443         dc->bdev->bd_holder = dc;
1444         dc->sb_disk = sb_disk;
1445
1446         if (cached_dev_init(dc, sb->block_size << 9))
1447                 goto err;
1448
1449         err = "error creating kobject";
1450         if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
1451                         "bcache"))
1452                 goto err;
1453         if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
1454                 goto err;
1455
1456         pr_info("registered backing device %s\n", dc->backing_dev_name);
1457
1458         list_add(&dc->list, &uncached_devices);
1459         /* attach to a matched cache set if it exists */
1460         list_for_each_entry(c, &bch_cache_sets, list)
1461                 bch_cached_dev_attach(dc, c, NULL);
1462
1463         if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
1464             BDEV_STATE(&dc->sb) == BDEV_STATE_STALE) {
1465                 err = "failed to run cached device";
1466                 ret = bch_cached_dev_run(dc);
1467                 if (ret)
1468                         goto err;
1469         }
1470
1471         return 0;
1472 err:
1473         pr_notice("error %s: %s\n", dc->backing_dev_name, err);
1474         bcache_device_stop(&dc->disk);
1475         return ret;
1476 }
1477
1478 /* Flash only volumes */
1479
1480 /* When d->kobj released */
1481 void bch_flash_dev_release(struct kobject *kobj)
1482 {
1483         struct bcache_device *d = container_of(kobj, struct bcache_device,
1484                                                kobj);
1485         kfree(d);
1486 }
1487
1488 static void flash_dev_free(struct closure *cl)
1489 {
1490         struct bcache_device *d = container_of(cl, struct bcache_device, cl);
1491
1492         mutex_lock(&bch_register_lock);
1493         atomic_long_sub(bcache_dev_sectors_dirty(d),
1494                         &d->c->flash_dev_dirty_sectors);
1495         bcache_device_free(d);
1496         mutex_unlock(&bch_register_lock);
1497         kobject_put(&d->kobj);
1498 }
1499
1500 static void flash_dev_flush(struct closure *cl)
1501 {
1502         struct bcache_device *d = container_of(cl, struct bcache_device, cl);
1503
1504         mutex_lock(&bch_register_lock);
1505         bcache_device_unlink(d);
1506         mutex_unlock(&bch_register_lock);
1507         kobject_del(&d->kobj);
1508         continue_at(cl, flash_dev_free, system_wq);
1509 }
1510
1511 static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
1512 {
1513         struct bcache_device *d = kzalloc(sizeof(struct bcache_device),
1514                                           GFP_KERNEL);
1515         if (!d)
1516                 return -ENOMEM;
1517
1518         closure_init(&d->cl, NULL);
1519         set_closure_fn(&d->cl, flash_dev_flush, system_wq);
1520
1521         kobject_init(&d->kobj, &bch_flash_dev_ktype);
1522
1523         if (bcache_device_init(d, block_bytes(c->cache), u->sectors,
1524                         NULL, &bcache_flash_ops))
1525                 goto err;
1526
1527         bcache_device_attach(d, c, u - c->uuids);
1528         bch_sectors_dirty_init(d);
1529         bch_flash_dev_request_init(d);
1530         add_disk(d->disk);
1531
1532         if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache"))
1533                 goto err;
1534
1535         bcache_device_link(d, c, "volume");
1536
1537         return 0;
1538 err:
1539         kobject_put(&d->kobj);
1540         return -ENOMEM;
1541 }
1542
1543 static int flash_devs_run(struct cache_set *c)
1544 {
1545         int ret = 0;
1546         struct uuid_entry *u;
1547
1548         for (u = c->uuids;
1549              u < c->uuids + c->nr_uuids && !ret;
1550              u++)
1551                 if (UUID_FLASH_ONLY(u))
1552                         ret = flash_dev_run(c, u);
1553
1554         return ret;
1555 }
1556
1557 int bch_flash_dev_create(struct cache_set *c, uint64_t size)
1558 {
1559         struct uuid_entry *u;
1560
1561         if (test_bit(CACHE_SET_STOPPING, &c->flags))
1562                 return -EINTR;
1563
1564         if (!test_bit(CACHE_SET_RUNNING, &c->flags))
1565                 return -EPERM;
1566
1567         u = uuid_find_empty(c);
1568         if (!u) {
1569                 pr_err("Can't create volume, no room for UUID\n");
1570                 return -EINVAL;
1571         }
1572
1573         get_random_bytes(u->uuid, 16);
1574         memset(u->label, 0, 32);
1575         u->first_reg = u->last_reg = cpu_to_le32((u32)ktime_get_real_seconds());
1576
1577         SET_UUID_FLASH_ONLY(u, 1);
1578         u->sectors = size >> 9;
1579
1580         bch_uuid_write(c);
1581
1582         return flash_dev_run(c, u);
1583 }
1584
1585 bool bch_cached_dev_error(struct cached_dev *dc)
1586 {
1587         if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
1588                 return false;
1589
1590         dc->io_disable = true;
1591         /* make others know io_disable is true earlier */
1592         smp_mb();
1593
1594         pr_err("stop %s: too many IO errors on backing device %s\n",
1595                dc->disk.disk->disk_name, dc->backing_dev_name);
1596
1597         bcache_device_stop(&dc->disk);
1598         return true;
1599 }
1600
1601 /* Cache set */
1602
1603 __printf(2, 3)
1604 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
1605 {
1606         struct va_format vaf;
1607         va_list args;
1608
1609         if (c->on_error != ON_ERROR_PANIC &&
1610             test_bit(CACHE_SET_STOPPING, &c->flags))
1611                 return false;
1612
1613         if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
1614                 pr_info("CACHE_SET_IO_DISABLE already set\n");
1615
1616         /*
1617          * XXX: we can be called from atomic context
1618          * acquire_console_sem();
1619          */
1620
1621         va_start(args, fmt);
1622
1623         vaf.fmt = fmt;
1624         vaf.va = &args;
1625
1626         pr_err("error on %pU: %pV, disabling caching\n",
1627                c->set_uuid, &vaf);
1628
1629         va_end(args);
1630
1631         if (c->on_error == ON_ERROR_PANIC)
1632                 panic("panic forced after error\n");
1633
1634         bch_cache_set_unregister(c);
1635         return true;
1636 }
1637
1638 /* When c->kobj released */
1639 void bch_cache_set_release(struct kobject *kobj)
1640 {
1641         struct cache_set *c = container_of(kobj, struct cache_set, kobj);
1642
1643         kfree(c);
1644         module_put(THIS_MODULE);
1645 }
1646
1647 static void cache_set_free(struct closure *cl)
1648 {
1649         struct cache_set *c = container_of(cl, struct cache_set, cl);
1650         struct cache *ca;
1651
1652         debugfs_remove(c->debug);
1653
1654         bch_open_buckets_free(c);
1655         bch_btree_cache_free(c);
1656         bch_journal_free(c);
1657
1658         mutex_lock(&bch_register_lock);
1659         bch_bset_sort_state_free(&c->sort);
1660         free_pages((unsigned long) c->uuids, ilog2(meta_bucket_pages(&c->cache->sb)));
1661
1662         ca = c->cache;
1663         if (ca) {
1664                 ca->set = NULL;
1665                 c->cache = NULL;
1666                 kobject_put(&ca->kobj);
1667         }
1668
1669
1670         if (c->moving_gc_wq)
1671                 destroy_workqueue(c->moving_gc_wq);
1672         bioset_exit(&c->bio_split);
1673         mempool_exit(&c->fill_iter);
1674         mempool_exit(&c->bio_meta);
1675         mempool_exit(&c->search);
1676         kfree(c->devices);
1677
1678         list_del(&c->list);
1679         mutex_unlock(&bch_register_lock);
1680
1681         pr_info("Cache set %pU unregistered\n", c->set_uuid);
1682         wake_up(&unregister_wait);
1683
1684         closure_debug_destroy(&c->cl);
1685         kobject_put(&c->kobj);
1686 }
1687
1688 static void cache_set_flush(struct closure *cl)
1689 {
1690         struct cache_set *c = container_of(cl, struct cache_set, caching);
1691         struct cache *ca = c->cache;
1692         struct btree *b;
1693
1694         bch_cache_accounting_destroy(&c->accounting);
1695
1696         kobject_put(&c->internal);
1697         kobject_del(&c->kobj);
1698
1699         if (!IS_ERR_OR_NULL(c->gc_thread))
1700                 kthread_stop(c->gc_thread);
1701
1702         if (!IS_ERR_OR_NULL(c->root))
1703                 list_add(&c->root->list, &c->btree_cache);
1704
1705         /*
1706          * Avoid flushing cached nodes if cache set is retiring
1707          * due to too many I/O errors detected.
1708          */
1709         if (!test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1710                 list_for_each_entry(b, &c->btree_cache, list) {
1711                         mutex_lock(&b->write_lock);
1712                         if (btree_node_dirty(b))
1713                                 __bch_btree_node_write(b, NULL);
1714                         mutex_unlock(&b->write_lock);
1715                 }
1716
1717         if (ca->alloc_thread)
1718                 kthread_stop(ca->alloc_thread);
1719
1720         if (c->journal.cur) {
1721                 cancel_delayed_work_sync(&c->journal.work);
1722                 /* flush last journal entry if needed */
1723                 c->journal.work.work.func(&c->journal.work.work);
1724         }
1725
1726         closure_return(cl);
1727 }
1728
1729 /*
1730  * This function is only called when CACHE_SET_IO_DISABLE is set, which means
1731  * cache set is unregistering due to too many I/O errors. In this condition,
1732  * the bcache device might be stopped, it depends on stop_when_cache_set_failed
1733  * value and whether the broken cache has dirty data:
1734  *
1735  * dc->stop_when_cache_set_failed    dc->has_dirty   stop bcache device
1736  *  BCH_CACHED_STOP_AUTO               0               NO
1737  *  BCH_CACHED_STOP_AUTO               1               YES
1738  *  BCH_CACHED_DEV_STOP_ALWAYS         0               YES
1739  *  BCH_CACHED_DEV_STOP_ALWAYS         1               YES
1740  *
1741  * The expected behavior is, if stop_when_cache_set_failed is configured to
1742  * "auto" via sysfs interface, the bcache device will not be stopped if the
1743  * backing device is clean on the broken cache device.
1744  */
1745 static void conditional_stop_bcache_device(struct cache_set *c,
1746                                            struct bcache_device *d,
1747                                            struct cached_dev *dc)
1748 {
1749         if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) {
1750                 pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.\n",
1751                         d->disk->disk_name, c->set_uuid);
1752                 bcache_device_stop(d);
1753         } else if (atomic_read(&dc->has_dirty)) {
1754                 /*
1755                  * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
1756                  * and dc->has_dirty == 1
1757                  */
1758                 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.\n",
1759                         d->disk->disk_name);
1760                 /*
1761                  * There might be a small time gap that cache set is
1762                  * released but bcache device is not. Inside this time
1763                  * gap, regular I/O requests will directly go into
1764                  * backing device as no cache set attached to. This
1765                  * behavior may also introduce potential inconsistence
1766                  * data in writeback mode while cache is dirty.
1767                  * Therefore before calling bcache_device_stop() due
1768                  * to a broken cache device, dc->io_disable should be
1769                  * explicitly set to true.
1770                  */
1771                 dc->io_disable = true;
1772                 /* make others know io_disable is true earlier */
1773                 smp_mb();
1774                 bcache_device_stop(d);
1775         } else {
1776                 /*
1777                  * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
1778                  * and dc->has_dirty == 0
1779                  */
1780                 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.\n",
1781                         d->disk->disk_name);
1782         }
1783 }
1784
1785 static void __cache_set_unregister(struct closure *cl)
1786 {
1787         struct cache_set *c = container_of(cl, struct cache_set, caching);
1788         struct cached_dev *dc;
1789         struct bcache_device *d;
1790         size_t i;
1791
1792         mutex_lock(&bch_register_lock);
1793
1794         for (i = 0; i < c->devices_max_used; i++) {
1795                 d = c->devices[i];
1796                 if (!d)
1797                         continue;
1798
1799                 if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
1800                     test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
1801                         dc = container_of(d, struct cached_dev, disk);
1802                         bch_cached_dev_detach(dc);
1803                         if (test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1804                                 conditional_stop_bcache_device(c, d, dc);
1805                 } else {
1806                         bcache_device_stop(d);
1807                 }
1808         }
1809
1810         mutex_unlock(&bch_register_lock);
1811
1812         continue_at(cl, cache_set_flush, system_wq);
1813 }
1814
1815 void bch_cache_set_stop(struct cache_set *c)
1816 {
1817         if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags))
1818                 /* closure_fn set to __cache_set_unregister() */
1819                 closure_queue(&c->caching);
1820 }
1821
1822 void bch_cache_set_unregister(struct cache_set *c)
1823 {
1824         set_bit(CACHE_SET_UNREGISTERING, &c->flags);
1825         bch_cache_set_stop(c);
1826 }
1827
1828 #define alloc_meta_bucket_pages(gfp, sb)                \
1829         ((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(meta_bucket_pages(sb))))
1830
1831 struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
1832 {
1833         int iter_size;
1834         struct cache *ca = container_of(sb, struct cache, sb);
1835         struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
1836
1837         if (!c)
1838                 return NULL;
1839
1840         __module_get(THIS_MODULE);
1841         closure_init(&c->cl, NULL);
1842         set_closure_fn(&c->cl, cache_set_free, system_wq);
1843
1844         closure_init(&c->caching, &c->cl);
1845         set_closure_fn(&c->caching, __cache_set_unregister, system_wq);
1846
1847         /* Maybe create continue_at_noreturn() and use it here? */
1848         closure_set_stopped(&c->cl);
1849         closure_put(&c->cl);
1850
1851         kobject_init(&c->kobj, &bch_cache_set_ktype);
1852         kobject_init(&c->internal, &bch_cache_set_internal_ktype);
1853
1854         bch_cache_accounting_init(&c->accounting, &c->cl);
1855
1856         memcpy(c->set_uuid, sb->set_uuid, 16);
1857
1858         c->cache                = ca;
1859         c->cache->set           = c;
1860         c->bucket_bits          = ilog2(sb->bucket_size);
1861         c->block_bits           = ilog2(sb->block_size);
1862         c->nr_uuids             = meta_bucket_bytes(sb) / sizeof(struct uuid_entry);
1863         c->devices_max_used     = 0;
1864         atomic_set(&c->attached_dev_nr, 0);
1865         c->btree_pages          = meta_bucket_pages(sb);
1866         if (c->btree_pages > BTREE_MAX_PAGES)
1867                 c->btree_pages = max_t(int, c->btree_pages / 4,
1868                                        BTREE_MAX_PAGES);
1869
1870         sema_init(&c->sb_write_mutex, 1);
1871         mutex_init(&c->bucket_lock);
1872         init_waitqueue_head(&c->btree_cache_wait);
1873         spin_lock_init(&c->btree_cannibalize_lock);
1874         init_waitqueue_head(&c->bucket_wait);
1875         init_waitqueue_head(&c->gc_wait);
1876         sema_init(&c->uuid_write_mutex, 1);
1877
1878         spin_lock_init(&c->btree_gc_time.lock);
1879         spin_lock_init(&c->btree_split_time.lock);
1880         spin_lock_init(&c->btree_read_time.lock);
1881
1882         bch_moving_init_cache_set(c);
1883
1884         INIT_LIST_HEAD(&c->list);
1885         INIT_LIST_HEAD(&c->cached_devs);
1886         INIT_LIST_HEAD(&c->btree_cache);
1887         INIT_LIST_HEAD(&c->btree_cache_freeable);
1888         INIT_LIST_HEAD(&c->btree_cache_freed);
1889         INIT_LIST_HEAD(&c->data_buckets);
1890
1891         iter_size = ((meta_bucket_pages(sb) * PAGE_SECTORS) / sb->block_size + 1) *
1892                 sizeof(struct btree_iter_set);
1893
1894         c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL);
1895         if (!c->devices)
1896                 goto err;
1897
1898         if (mempool_init_slab_pool(&c->search, 32, bch_search_cache))
1899                 goto err;
1900
1901         if (mempool_init_kmalloc_pool(&c->bio_meta, 2,
1902                         sizeof(struct bbio) +
1903                         sizeof(struct bio_vec) * meta_bucket_pages(sb)))
1904                 goto err;
1905
1906         if (mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size))
1907                 goto err;
1908
1909         if (bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio),
1910                         BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
1911                 goto err;
1912
1913         c->uuids = alloc_meta_bucket_pages(GFP_KERNEL, sb);
1914         if (!c->uuids)
1915                 goto err;
1916
1917         c->moving_gc_wq = alloc_workqueue("bcache_gc", WQ_MEM_RECLAIM, 0);
1918         if (!c->moving_gc_wq)
1919                 goto err;
1920
1921         if (bch_journal_alloc(c))
1922                 goto err;
1923
1924         if (bch_btree_cache_alloc(c))
1925                 goto err;
1926
1927         if (bch_open_buckets_alloc(c))
1928                 goto err;
1929
1930         if (bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
1931                 goto err;
1932
1933         c->congested_read_threshold_us  = 2000;
1934         c->congested_write_threshold_us = 20000;
1935         c->error_limit  = DEFAULT_IO_ERROR_LIMIT;
1936         c->idle_max_writeback_rate_enabled = 1;
1937         WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags));
1938
1939         return c;
1940 err:
1941         bch_cache_set_unregister(c);
1942         return NULL;
1943 }
1944
1945 static int run_cache_set(struct cache_set *c)
1946 {
1947         const char *err = "cannot allocate memory";
1948         struct cached_dev *dc, *t;
1949         struct cache *ca = c->cache;
1950         struct closure cl;
1951         LIST_HEAD(journal);
1952         struct journal_replay *l;
1953
1954         closure_init_stack(&cl);
1955
1956         c->nbuckets = ca->sb.nbuckets;
1957         set_gc_sectors(c);
1958
1959         if (CACHE_SYNC(&c->cache->sb)) {
1960                 struct bkey *k;
1961                 struct jset *j;
1962
1963                 err = "cannot allocate memory for journal";
1964                 if (bch_journal_read(c, &journal))
1965                         goto err;
1966
1967                 pr_debug("btree_journal_read() done\n");
1968
1969                 err = "no journal entries found";
1970                 if (list_empty(&journal))
1971                         goto err;
1972
1973                 j = &list_entry(journal.prev, struct journal_replay, list)->j;
1974
1975                 err = "IO error reading priorities";
1976                 if (prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]))
1977                         goto err;
1978
1979                 /*
1980                  * If prio_read() fails it'll call cache_set_error and we'll
1981                  * tear everything down right away, but if we perhaps checked
1982                  * sooner we could avoid journal replay.
1983                  */
1984
1985                 k = &j->btree_root;
1986
1987                 err = "bad btree root";
1988                 if (__bch_btree_ptr_invalid(c, k))
1989                         goto err;
1990
1991                 err = "error reading btree root";
1992                 c->root = bch_btree_node_get(c, NULL, k,
1993                                              j->btree_level,
1994                                              true, NULL);
1995                 if (IS_ERR_OR_NULL(c->root))
1996                         goto err;
1997
1998                 list_del_init(&c->root->list);
1999                 rw_unlock(true, c->root);
2000
2001                 err = uuid_read(c, j, &cl);
2002                 if (err)
2003                         goto err;
2004
2005                 err = "error in recovery";
2006                 if (bch_btree_check(c))
2007                         goto err;
2008
2009                 bch_journal_mark(c, &journal);
2010                 bch_initial_gc_finish(c);
2011                 pr_debug("btree_check() done\n");
2012
2013                 /*
2014                  * bcache_journal_next() can't happen sooner, or
2015                  * btree_gc_finish() will give spurious errors about last_gc >
2016                  * gc_gen - this is a hack but oh well.
2017                  */
2018                 bch_journal_next(&c->journal);
2019
2020                 err = "error starting allocator thread";
2021                 if (bch_cache_allocator_start(ca))
2022                         goto err;
2023
2024                 /*
2025                  * First place it's safe to allocate: btree_check() and
2026                  * btree_gc_finish() have to run before we have buckets to
2027                  * allocate, and bch_bucket_alloc_set() might cause a journal
2028                  * entry to be written so bcache_journal_next() has to be called
2029                  * first.
2030                  *
2031                  * If the uuids were in the old format we have to rewrite them
2032                  * before the next journal entry is written:
2033                  */
2034                 if (j->version < BCACHE_JSET_VERSION_UUID)
2035                         __uuid_write(c);
2036
2037                 err = "bcache: replay journal failed";
2038                 if (bch_journal_replay(c, &journal))
2039                         goto err;
2040         } else {
2041                 unsigned int j;
2042
2043                 pr_notice("invalidating existing data\n");
2044                 ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
2045                                         2, SB_JOURNAL_BUCKETS);
2046
2047                 for (j = 0; j < ca->sb.keys; j++)
2048                         ca->sb.d[j] = ca->sb.first_bucket + j;
2049
2050                 bch_initial_gc_finish(c);
2051
2052                 err = "error starting allocator thread";
2053                 if (bch_cache_allocator_start(ca))
2054                         goto err;
2055
2056                 mutex_lock(&c->bucket_lock);
2057                 bch_prio_write(ca, true);
2058                 mutex_unlock(&c->bucket_lock);
2059
2060                 err = "cannot allocate new UUID bucket";
2061                 if (__uuid_write(c))
2062                         goto err;
2063
2064                 err = "cannot allocate new btree root";
2065                 c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL);
2066                 if (IS_ERR_OR_NULL(c->root))
2067                         goto err;
2068
2069                 mutex_lock(&c->root->write_lock);
2070                 bkey_copy_key(&c->root->key, &MAX_KEY);
2071                 bch_btree_node_write(c->root, &cl);
2072                 mutex_unlock(&c->root->write_lock);
2073
2074                 bch_btree_set_root(c->root);
2075                 rw_unlock(true, c->root);
2076
2077                 /*
2078                  * We don't want to write the first journal entry until
2079                  * everything is set up - fortunately journal entries won't be
2080                  * written until the SET_CACHE_SYNC() here:
2081                  */
2082                 SET_CACHE_SYNC(&c->cache->sb, true);
2083
2084                 bch_journal_next(&c->journal);
2085                 bch_journal_meta(c, &cl);
2086         }
2087
2088         err = "error starting gc thread";
2089         if (bch_gc_thread_start(c))
2090                 goto err;
2091
2092         closure_sync(&cl);
2093         c->cache->sb.last_mount = (u32)ktime_get_real_seconds();
2094         bcache_write_super(c);
2095
2096         list_for_each_entry_safe(dc, t, &uncached_devices, list)
2097                 bch_cached_dev_attach(dc, c, NULL);
2098
2099         flash_devs_run(c);
2100
2101         set_bit(CACHE_SET_RUNNING, &c->flags);
2102         return 0;
2103 err:
2104         while (!list_empty(&journal)) {
2105                 l = list_first_entry(&journal, struct journal_replay, list);
2106                 list_del(&l->list);
2107                 kfree(l);
2108         }
2109
2110         closure_sync(&cl);
2111
2112         bch_cache_set_error(c, "%s", err);
2113
2114         return -EIO;
2115 }
2116
2117 static const char *register_cache_set(struct cache *ca)
2118 {
2119         char buf[12];
2120         const char *err = "cannot allocate memory";
2121         struct cache_set *c;
2122
2123         list_for_each_entry(c, &bch_cache_sets, list)
2124                 if (!memcmp(c->set_uuid, ca->sb.set_uuid, 16)) {
2125                         if (c->cache)
2126                                 return "duplicate cache set member";
2127
2128                         goto found;
2129                 }
2130
2131         c = bch_cache_set_alloc(&ca->sb);
2132         if (!c)
2133                 return err;
2134
2135         err = "error creating kobject";
2136         if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->set_uuid) ||
2137             kobject_add(&c->internal, &c->kobj, "internal"))
2138                 goto err;
2139
2140         if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj))
2141                 goto err;
2142
2143         bch_debug_init_cache_set(c);
2144
2145         list_add(&c->list, &bch_cache_sets);
2146 found:
2147         sprintf(buf, "cache%i", ca->sb.nr_this_dev);
2148         if (sysfs_create_link(&ca->kobj, &c->kobj, "set") ||
2149             sysfs_create_link(&c->kobj, &ca->kobj, buf))
2150                 goto err;
2151
2152         kobject_get(&ca->kobj);
2153         ca->set = c;
2154         ca->set->cache = ca;
2155
2156         err = "failed to run cache set";
2157         if (run_cache_set(c) < 0)
2158                 goto err;
2159
2160         return NULL;
2161 err:
2162         bch_cache_set_unregister(c);
2163         return err;
2164 }
2165
2166 /* Cache device */
2167
2168 /* When ca->kobj released */
2169 void bch_cache_release(struct kobject *kobj)
2170 {
2171         struct cache *ca = container_of(kobj, struct cache, kobj);
2172         unsigned int i;
2173
2174         if (ca->set) {
2175                 BUG_ON(ca->set->cache != ca);
2176                 ca->set->cache = NULL;
2177         }
2178
2179         free_pages((unsigned long) ca->disk_buckets, ilog2(meta_bucket_pages(&ca->sb)));
2180         kfree(ca->prio_buckets);
2181         vfree(ca->buckets);
2182
2183         free_heap(&ca->heap);
2184         free_fifo(&ca->free_inc);
2185
2186         for (i = 0; i < RESERVE_NR; i++)
2187                 free_fifo(&ca->free[i]);
2188
2189         if (ca->sb_disk)
2190                 put_page(virt_to_page(ca->sb_disk));
2191
2192         if (!IS_ERR_OR_NULL(ca->bdev))
2193                 blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2194
2195         kfree(ca);
2196         module_put(THIS_MODULE);
2197 }
2198
2199 static int cache_alloc(struct cache *ca)
2200 {
2201         size_t free;
2202         size_t btree_buckets;
2203         struct bucket *b;
2204         int ret = -ENOMEM;
2205         const char *err = NULL;
2206
2207         __module_get(THIS_MODULE);
2208         kobject_init(&ca->kobj, &bch_cache_ktype);
2209
2210         bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8);
2211
2212         /*
2213          * when ca->sb.njournal_buckets is not zero, journal exists,
2214          * and in bch_journal_replay(), tree node may split,
2215          * so bucket of RESERVE_BTREE type is needed,
2216          * the worst situation is all journal buckets are valid journal,
2217          * and all the keys need to replay,
2218          * so the number of  RESERVE_BTREE type buckets should be as much
2219          * as journal buckets
2220          */
2221         btree_buckets = ca->sb.njournal_buckets ?: 8;
2222         free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
2223         if (!free) {
2224                 ret = -EPERM;
2225                 err = "ca->sb.nbuckets is too small";
2226                 goto err_free;
2227         }
2228
2229         if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets,
2230                                                 GFP_KERNEL)) {
2231                 err = "ca->free[RESERVE_BTREE] alloc failed";
2232                 goto err_btree_alloc;
2233         }
2234
2235         if (!init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca),
2236                                                         GFP_KERNEL)) {
2237                 err = "ca->free[RESERVE_PRIO] alloc failed";
2238                 goto err_prio_alloc;
2239         }
2240
2241         if (!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL)) {
2242                 err = "ca->free[RESERVE_MOVINGGC] alloc failed";
2243                 goto err_movinggc_alloc;
2244         }
2245
2246         if (!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL)) {
2247                 err = "ca->free[RESERVE_NONE] alloc failed";
2248                 goto err_none_alloc;
2249         }
2250
2251         if (!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL)) {
2252                 err = "ca->free_inc alloc failed";
2253                 goto err_free_inc_alloc;
2254         }
2255
2256         if (!init_heap(&ca->heap, free << 3, GFP_KERNEL)) {
2257                 err = "ca->heap alloc failed";
2258                 goto err_heap_alloc;
2259         }
2260
2261         ca->buckets = vzalloc(array_size(sizeof(struct bucket),
2262                               ca->sb.nbuckets));
2263         if (!ca->buckets) {
2264                 err = "ca->buckets alloc failed";
2265                 goto err_buckets_alloc;
2266         }
2267
2268         ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t),
2269                                    prio_buckets(ca), 2),
2270                                    GFP_KERNEL);
2271         if (!ca->prio_buckets) {
2272                 err = "ca->prio_buckets alloc failed";
2273                 goto err_prio_buckets_alloc;
2274         }
2275
2276         ca->disk_buckets = alloc_meta_bucket_pages(GFP_KERNEL, &ca->sb);
2277         if (!ca->disk_buckets) {
2278                 err = "ca->disk_buckets alloc failed";
2279                 goto err_disk_buckets_alloc;
2280         }
2281
2282         ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
2283
2284         for_each_bucket(b, ca)
2285                 atomic_set(&b->pin, 0);
2286         return 0;
2287
2288 err_disk_buckets_alloc:
2289         kfree(ca->prio_buckets);
2290 err_prio_buckets_alloc:
2291         vfree(ca->buckets);
2292 err_buckets_alloc:
2293         free_heap(&ca->heap);
2294 err_heap_alloc:
2295         free_fifo(&ca->free_inc);
2296 err_free_inc_alloc:
2297         free_fifo(&ca->free[RESERVE_NONE]);
2298 err_none_alloc:
2299         free_fifo(&ca->free[RESERVE_MOVINGGC]);
2300 err_movinggc_alloc:
2301         free_fifo(&ca->free[RESERVE_PRIO]);
2302 err_prio_alloc:
2303         free_fifo(&ca->free[RESERVE_BTREE]);
2304 err_btree_alloc:
2305 err_free:
2306         module_put(THIS_MODULE);
2307         if (err)
2308                 pr_notice("error %s: %s\n", ca->cache_dev_name, err);
2309         return ret;
2310 }
2311
2312 static int register_cache(struct cache_sb *sb, struct cache_sb_disk *sb_disk,
2313                                 struct block_device *bdev, struct cache *ca)
2314 {
2315         const char *err = NULL; /* must be set for any error case */
2316         int ret = 0;
2317
2318         bdevname(bdev, ca->cache_dev_name);
2319         memcpy(&ca->sb, sb, sizeof(struct cache_sb));
2320         ca->bdev = bdev;
2321         ca->bdev->bd_holder = ca;
2322         ca->sb_disk = sb_disk;
2323
2324         if (blk_queue_discard(bdev_get_queue(bdev)))
2325                 ca->discard = CACHE_DISCARD(&ca->sb);
2326
2327         ret = cache_alloc(ca);
2328         if (ret != 0) {
2329                 /*
2330                  * If we failed here, it means ca->kobj is not initialized yet,
2331                  * kobject_put() won't be called and there is no chance to
2332                  * call blkdev_put() to bdev in bch_cache_release(). So we
2333                  * explicitly call blkdev_put() here.
2334                  */
2335                 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2336                 if (ret == -ENOMEM)
2337                         err = "cache_alloc(): -ENOMEM";
2338                 else if (ret == -EPERM)
2339                         err = "cache_alloc(): cache device is too small";
2340                 else
2341                         err = "cache_alloc(): unknown error";
2342                 goto err;
2343         }
2344
2345         if (kobject_add(&ca->kobj,
2346                         &part_to_dev(bdev->bd_part)->kobj,
2347                         "bcache")) {
2348                 err = "error calling kobject_add";
2349                 ret = -ENOMEM;
2350                 goto out;
2351         }
2352
2353         mutex_lock(&bch_register_lock);
2354         err = register_cache_set(ca);
2355         mutex_unlock(&bch_register_lock);
2356
2357         if (err) {
2358                 ret = -ENODEV;
2359                 goto out;
2360         }
2361
2362         pr_info("registered cache device %s\n", ca->cache_dev_name);
2363
2364 out:
2365         kobject_put(&ca->kobj);
2366
2367 err:
2368         if (err)
2369                 pr_notice("error %s: %s\n", ca->cache_dev_name, err);
2370
2371         return ret;
2372 }
2373
2374 /* Global interfaces/init */
2375
2376 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
2377                                const char *buffer, size_t size);
2378 static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
2379                                          struct kobj_attribute *attr,
2380                                          const char *buffer, size_t size);
2381
2382 kobj_attribute_write(register,          register_bcache);
2383 kobj_attribute_write(register_quiet,    register_bcache);
2384 kobj_attribute_write(pendings_cleanup,  bch_pending_bdevs_cleanup);
2385
2386 static bool bch_is_open_backing(struct block_device *bdev)
2387 {
2388         struct cache_set *c, *tc;
2389         struct cached_dev *dc, *t;
2390
2391         list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
2392                 list_for_each_entry_safe(dc, t, &c->cached_devs, list)
2393                         if (dc->bdev == bdev)
2394                                 return true;
2395         list_for_each_entry_safe(dc, t, &uncached_devices, list)
2396                 if (dc->bdev == bdev)
2397                         return true;
2398         return false;
2399 }
2400
2401 static bool bch_is_open_cache(struct block_device *bdev)
2402 {
2403         struct cache_set *c, *tc;
2404
2405         list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
2406                 struct cache *ca = c->cache;
2407
2408                 if (ca->bdev == bdev)
2409                         return true;
2410         }
2411
2412         return false;
2413 }
2414
2415 static bool bch_is_open(struct block_device *bdev)
2416 {
2417         return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
2418 }
2419
2420 struct async_reg_args {
2421         struct delayed_work reg_work;
2422         char *path;
2423         struct cache_sb *sb;
2424         struct cache_sb_disk *sb_disk;
2425         struct block_device *bdev;
2426 };
2427
2428 static void register_bdev_worker(struct work_struct *work)
2429 {
2430         int fail = false;
2431         struct async_reg_args *args =
2432                 container_of(work, struct async_reg_args, reg_work.work);
2433         struct cached_dev *dc;
2434
2435         dc = kzalloc(sizeof(*dc), GFP_KERNEL);
2436         if (!dc) {
2437                 fail = true;
2438                 put_page(virt_to_page(args->sb_disk));
2439                 blkdev_put(args->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2440                 goto out;
2441         }
2442
2443         mutex_lock(&bch_register_lock);
2444         if (register_bdev(args->sb, args->sb_disk, args->bdev, dc) < 0)
2445                 fail = true;
2446         mutex_unlock(&bch_register_lock);
2447
2448 out:
2449         if (fail)
2450                 pr_info("error %s: fail to register backing device\n",
2451                         args->path);
2452         kfree(args->sb);
2453         kfree(args->path);
2454         kfree(args);
2455         module_put(THIS_MODULE);
2456 }
2457
2458 static void register_cache_worker(struct work_struct *work)
2459 {
2460         int fail = false;
2461         struct async_reg_args *args =
2462                 container_of(work, struct async_reg_args, reg_work.work);
2463         struct cache *ca;
2464
2465         ca = kzalloc(sizeof(*ca), GFP_KERNEL);
2466         if (!ca) {
2467                 fail = true;
2468                 put_page(virt_to_page(args->sb_disk));
2469                 blkdev_put(args->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2470                 goto out;
2471         }
2472
2473         /* blkdev_put() will be called in bch_cache_release() */
2474         if (register_cache(args->sb, args->sb_disk, args->bdev, ca) != 0)
2475                 fail = true;
2476
2477 out:
2478         if (fail)
2479                 pr_info("error %s: fail to register cache device\n",
2480                         args->path);
2481         kfree(args->sb);
2482         kfree(args->path);
2483         kfree(args);
2484         module_put(THIS_MODULE);
2485 }
2486
2487 static void register_device_aync(struct async_reg_args *args)
2488 {
2489         if (SB_IS_BDEV(args->sb))
2490                 INIT_DELAYED_WORK(&args->reg_work, register_bdev_worker);
2491         else
2492                 INIT_DELAYED_WORK(&args->reg_work, register_cache_worker);
2493
2494         /* 10 jiffies is enough for a delay */
2495         queue_delayed_work(system_wq, &args->reg_work, 10);
2496 }
2497
2498 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
2499                                const char *buffer, size_t size)
2500 {
2501         const char *err;
2502         char *path = NULL;
2503         struct cache_sb *sb;
2504         struct cache_sb_disk *sb_disk;
2505         struct block_device *bdev;
2506         ssize_t ret;
2507         bool async_registration = false;
2508
2509 #ifdef CONFIG_BCACHE_ASYNC_REGISTRATION
2510         async_registration = true;
2511 #endif
2512
2513         ret = -EBUSY;
2514         err = "failed to reference bcache module";
2515         if (!try_module_get(THIS_MODULE))
2516                 goto out;
2517
2518         /* For latest state of bcache_is_reboot */
2519         smp_mb();
2520         err = "bcache is in reboot";
2521         if (bcache_is_reboot)
2522                 goto out_module_put;
2523
2524         ret = -ENOMEM;
2525         err = "cannot allocate memory";
2526         path = kstrndup(buffer, size, GFP_KERNEL);
2527         if (!path)
2528                 goto out_module_put;
2529
2530         sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL);
2531         if (!sb)
2532                 goto out_free_path;
2533
2534         ret = -EINVAL;
2535         err = "failed to open device";
2536         bdev = blkdev_get_by_path(strim(path),
2537                                   FMODE_READ|FMODE_WRITE|FMODE_EXCL,
2538                                   sb);
2539         if (IS_ERR(bdev)) {
2540                 if (bdev == ERR_PTR(-EBUSY)) {
2541                         bdev = lookup_bdev(strim(path));
2542                         mutex_lock(&bch_register_lock);
2543                         if (!IS_ERR(bdev) && bch_is_open(bdev))
2544                                 err = "device already registered";
2545                         else
2546                                 err = "device busy";
2547                         mutex_unlock(&bch_register_lock);
2548                         if (!IS_ERR(bdev))
2549                                 bdput(bdev);
2550                         if (attr == &ksysfs_register_quiet)
2551                                 goto done;
2552                 }
2553                 goto out_free_sb;
2554         }
2555
2556         err = "failed to set blocksize";
2557         if (set_blocksize(bdev, 4096))
2558                 goto out_blkdev_put;
2559
2560         err = read_super(sb, bdev, &sb_disk);
2561         if (err)
2562                 goto out_blkdev_put;
2563
2564         err = "failed to register device";
2565
2566         if (async_registration) {
2567                 /* register in asynchronous way */
2568                 struct async_reg_args *args =
2569                         kzalloc(sizeof(struct async_reg_args), GFP_KERNEL);
2570
2571                 if (!args) {
2572                         ret = -ENOMEM;
2573                         err = "cannot allocate memory";
2574                         goto out_put_sb_page;
2575                 }
2576
2577                 args->path      = path;
2578                 args->sb        = sb;
2579                 args->sb_disk   = sb_disk;
2580                 args->bdev      = bdev;
2581                 register_device_aync(args);
2582                 /* No wait and returns to user space */
2583                 goto async_done;
2584         }
2585
2586         if (SB_IS_BDEV(sb)) {
2587                 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
2588
2589                 if (!dc)
2590                         goto out_put_sb_page;
2591
2592                 mutex_lock(&bch_register_lock);
2593                 ret = register_bdev(sb, sb_disk, bdev, dc);
2594                 mutex_unlock(&bch_register_lock);
2595                 /* blkdev_put() will be called in cached_dev_free() */
2596                 if (ret < 0)
2597                         goto out_free_sb;
2598         } else {
2599                 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
2600
2601                 if (!ca)
2602                         goto out_put_sb_page;
2603
2604                 /* blkdev_put() will be called in bch_cache_release() */
2605                 if (register_cache(sb, sb_disk, bdev, ca) != 0)
2606                         goto out_free_sb;
2607         }
2608
2609 done:
2610         kfree(sb);
2611         kfree(path);
2612         module_put(THIS_MODULE);
2613 async_done:
2614         return size;
2615
2616 out_put_sb_page:
2617         put_page(virt_to_page(sb_disk));
2618 out_blkdev_put:
2619         blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL);
2620 out_free_sb:
2621         kfree(sb);
2622 out_free_path:
2623         kfree(path);
2624         path = NULL;
2625 out_module_put:
2626         module_put(THIS_MODULE);
2627 out:
2628         pr_info("error %s: %s\n", path?path:"", err);
2629         return ret;
2630 }
2631
2632
2633 struct pdev {
2634         struct list_head list;
2635         struct cached_dev *dc;
2636 };
2637
2638 static ssize_t bch_pending_bdevs_cleanup(struct kobject *k,
2639                                          struct kobj_attribute *attr,
2640                                          const char *buffer,
2641                                          size_t size)
2642 {
2643         LIST_HEAD(pending_devs);
2644         ssize_t ret = size;
2645         struct cached_dev *dc, *tdc;
2646         struct pdev *pdev, *tpdev;
2647         struct cache_set *c, *tc;
2648
2649         mutex_lock(&bch_register_lock);
2650         list_for_each_entry_safe(dc, tdc, &uncached_devices, list) {
2651                 pdev = kmalloc(sizeof(struct pdev), GFP_KERNEL);
2652                 if (!pdev)
2653                         break;
2654                 pdev->dc = dc;
2655                 list_add(&pdev->list, &pending_devs);
2656         }
2657
2658         list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
2659                 list_for_each_entry_safe(c, tc, &bch_cache_sets, list) {
2660                         char *pdev_set_uuid = pdev->dc->sb.set_uuid;
2661                         char *set_uuid = c->set_uuid;
2662
2663                         if (!memcmp(pdev_set_uuid, set_uuid, 16)) {
2664                                 list_del(&pdev->list);
2665                                 kfree(pdev);
2666                                 break;
2667                         }
2668                 }
2669         }
2670         mutex_unlock(&bch_register_lock);
2671
2672         list_for_each_entry_safe(pdev, tpdev, &pending_devs, list) {
2673                 pr_info("delete pdev %p\n", pdev);
2674                 list_del(&pdev->list);
2675                 bcache_device_stop(&pdev->dc->disk);
2676                 kfree(pdev);
2677         }
2678
2679         return ret;
2680 }
2681
2682 static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
2683 {
2684         if (bcache_is_reboot)
2685                 return NOTIFY_DONE;
2686
2687         if (code == SYS_DOWN ||
2688             code == SYS_HALT ||
2689             code == SYS_POWER_OFF) {
2690                 DEFINE_WAIT(wait);
2691                 unsigned long start = jiffies;
2692                 bool stopped = false;
2693
2694                 struct cache_set *c, *tc;
2695                 struct cached_dev *dc, *tdc;
2696
2697                 mutex_lock(&bch_register_lock);
2698
2699                 if (bcache_is_reboot)
2700                         goto out;
2701
2702                 /* New registration is rejected since now */
2703                 bcache_is_reboot = true;
2704                 /*
2705                  * Make registering caller (if there is) on other CPU
2706                  * core know bcache_is_reboot set to true earlier
2707                  */
2708                 smp_mb();
2709
2710                 if (list_empty(&bch_cache_sets) &&
2711                     list_empty(&uncached_devices))
2712                         goto out;
2713
2714                 mutex_unlock(&bch_register_lock);
2715
2716                 pr_info("Stopping all devices:\n");
2717
2718                 /*
2719                  * The reason bch_register_lock is not held to call
2720                  * bch_cache_set_stop() and bcache_device_stop() is to
2721                  * avoid potential deadlock during reboot, because cache
2722                  * set or bcache device stopping process will acqurie
2723                  * bch_register_lock too.
2724                  *
2725                  * We are safe here because bcache_is_reboot sets to
2726                  * true already, register_bcache() will reject new
2727                  * registration now. bcache_is_reboot also makes sure
2728                  * bcache_reboot() won't be re-entered on by other thread,
2729                  * so there is no race in following list iteration by
2730                  * list_for_each_entry_safe().
2731                  */
2732                 list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
2733                         bch_cache_set_stop(c);
2734
2735                 list_for_each_entry_safe(dc, tdc, &uncached_devices, list)
2736                         bcache_device_stop(&dc->disk);
2737
2738
2739                 /*
2740                  * Give an early chance for other kthreads and
2741                  * kworkers to stop themselves
2742                  */
2743                 schedule();
2744
2745                 /* What's a condition variable? */
2746                 while (1) {
2747                         long timeout = start + 10 * HZ - jiffies;
2748
2749                         mutex_lock(&bch_register_lock);
2750                         stopped = list_empty(&bch_cache_sets) &&
2751                                 list_empty(&uncached_devices);
2752
2753                         if (timeout < 0 || stopped)
2754                                 break;
2755
2756                         prepare_to_wait(&unregister_wait, &wait,
2757                                         TASK_UNINTERRUPTIBLE);
2758
2759                         mutex_unlock(&bch_register_lock);
2760                         schedule_timeout(timeout);
2761                 }
2762
2763                 finish_wait(&unregister_wait, &wait);
2764
2765                 if (stopped)
2766                         pr_info("All devices stopped\n");
2767                 else
2768                         pr_notice("Timeout waiting for devices to be closed\n");
2769 out:
2770                 mutex_unlock(&bch_register_lock);
2771         }
2772
2773         return NOTIFY_DONE;
2774 }
2775
2776 static struct notifier_block reboot = {
2777         .notifier_call  = bcache_reboot,
2778         .priority       = INT_MAX, /* before any real devices */
2779 };
2780
2781 static void bcache_exit(void)
2782 {
2783         bch_debug_exit();
2784         bch_request_exit();
2785         if (bcache_kobj)
2786                 kobject_put(bcache_kobj);
2787         if (bcache_wq)
2788                 destroy_workqueue(bcache_wq);
2789         if (bch_journal_wq)
2790                 destroy_workqueue(bch_journal_wq);
2791
2792         if (bcache_major)
2793                 unregister_blkdev(bcache_major, "bcache");
2794         unregister_reboot_notifier(&reboot);
2795         mutex_destroy(&bch_register_lock);
2796 }
2797
2798 /* Check and fixup module parameters */
2799 static void check_module_parameters(void)
2800 {
2801         if (bch_cutoff_writeback_sync == 0)
2802                 bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC;
2803         else if (bch_cutoff_writeback_sync > CUTOFF_WRITEBACK_SYNC_MAX) {
2804                 pr_warn("set bch_cutoff_writeback_sync (%u) to max value %u\n",
2805                         bch_cutoff_writeback_sync, CUTOFF_WRITEBACK_SYNC_MAX);
2806                 bch_cutoff_writeback_sync = CUTOFF_WRITEBACK_SYNC_MAX;
2807         }
2808
2809         if (bch_cutoff_writeback == 0)
2810                 bch_cutoff_writeback = CUTOFF_WRITEBACK;
2811         else if (bch_cutoff_writeback > CUTOFF_WRITEBACK_MAX) {
2812                 pr_warn("set bch_cutoff_writeback (%u) to max value %u\n",
2813                         bch_cutoff_writeback, CUTOFF_WRITEBACK_MAX);
2814                 bch_cutoff_writeback = CUTOFF_WRITEBACK_MAX;
2815         }
2816
2817         if (bch_cutoff_writeback > bch_cutoff_writeback_sync) {
2818                 pr_warn("set bch_cutoff_writeback (%u) to %u\n",
2819                         bch_cutoff_writeback, bch_cutoff_writeback_sync);
2820                 bch_cutoff_writeback = bch_cutoff_writeback_sync;
2821         }
2822 }
2823
2824 static int __init bcache_init(void)
2825 {
2826         static const struct attribute *files[] = {
2827                 &ksysfs_register.attr,
2828                 &ksysfs_register_quiet.attr,
2829                 &ksysfs_pendings_cleanup.attr,
2830                 NULL
2831         };
2832
2833         check_module_parameters();
2834
2835         mutex_init(&bch_register_lock);
2836         init_waitqueue_head(&unregister_wait);
2837         register_reboot_notifier(&reboot);
2838
2839         bcache_major = register_blkdev(0, "bcache");
2840         if (bcache_major < 0) {
2841                 unregister_reboot_notifier(&reboot);
2842                 mutex_destroy(&bch_register_lock);
2843                 return bcache_major;
2844         }
2845
2846         bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0);
2847         if (!bcache_wq)
2848                 goto err;
2849
2850         bch_journal_wq = alloc_workqueue("bch_journal", WQ_MEM_RECLAIM, 0);
2851         if (!bch_journal_wq)
2852                 goto err;
2853
2854         bcache_kobj = kobject_create_and_add("bcache", fs_kobj);
2855         if (!bcache_kobj)
2856                 goto err;
2857
2858         if (bch_request_init() ||
2859             sysfs_create_files(bcache_kobj, files))
2860                 goto err;
2861
2862         bch_debug_init();
2863         closure_debug_init();
2864
2865         bcache_is_reboot = false;
2866
2867         return 0;
2868 err:
2869         bcache_exit();
2870         return -ENOMEM;
2871 }
2872
2873 /*
2874  * Module hooks
2875  */
2876 module_exit(bcache_exit);
2877 module_init(bcache_init);
2878
2879 module_param(bch_cutoff_writeback, uint, 0);
2880 MODULE_PARM_DESC(bch_cutoff_writeback, "threshold to cutoff writeback");
2881
2882 module_param(bch_cutoff_writeback_sync, uint, 0);
2883 MODULE_PARM_DESC(bch_cutoff_writeback_sync, "hard threshold to cutoff writeback");
2884
2885 MODULE_DESCRIPTION("Bcache: a Linux block layer cache");
2886 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
2887 MODULE_LICENSE("GPL");