2 * bcache setup/teardown code, and some metadata io - read a superblock and
3 * figure out what to do with it.
5 * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6 * Copyright 2012 Google, Inc.
14 #include "writeback.h"
16 #include <linux/blkdev.h>
17 #include <linux/buffer_head.h>
18 #include <linux/debugfs.h>
19 #include <linux/genhd.h>
20 #include <linux/idr.h>
21 #include <linux/kthread.h>
22 #include <linux/module.h>
23 #include <linux/random.h>
24 #include <linux/reboot.h>
25 #include <linux/sysfs.h>
27 MODULE_LICENSE("GPL");
28 MODULE_AUTHOR("Kent Overstreet <kent.overstreet@gmail.com>");
30 static const char bcache_magic[] = {
31 0xc6, 0x85, 0x73, 0xf6, 0x4e, 0x1a, 0x45, 0xca,
32 0x82, 0x65, 0xf5, 0x7f, 0x48, 0xba, 0x6d, 0x81
35 static const char invalid_uuid[] = {
36 0xa0, 0x3e, 0xf8, 0xed, 0x3e, 0xe1, 0xb8, 0x78,
37 0xc8, 0x50, 0xfc, 0x5e, 0xcb, 0x16, 0xcd, 0x99
40 static struct kobject *bcache_kobj;
41 struct mutex bch_register_lock;
42 LIST_HEAD(bch_cache_sets);
43 static LIST_HEAD(uncached_devices);
45 static int bcache_major;
46 static DEFINE_IDA(bcache_device_idx);
47 static wait_queue_head_t unregister_wait;
48 struct workqueue_struct *bcache_wq;
50 #define BTREE_MAX_PAGES (256 * 1024 / PAGE_SIZE)
51 /* limitation of partitions number on single bcache device */
52 #define BCACHE_MINORS 128
53 /* limitation of bcache devices number on single system */
54 #define BCACHE_DEVICE_IDX_MAX ((1U << MINORBITS)/BCACHE_MINORS)
58 static const char *read_super(struct cache_sb *sb, struct block_device *bdev,
63 struct buffer_head *bh = __bread(bdev, 1, SB_SIZE);
69 s = (struct cache_sb *) bh->b_data;
71 sb->offset = le64_to_cpu(s->offset);
72 sb->version = le64_to_cpu(s->version);
74 memcpy(sb->magic, s->magic, 16);
75 memcpy(sb->uuid, s->uuid, 16);
76 memcpy(sb->set_uuid, s->set_uuid, 16);
77 memcpy(sb->label, s->label, SB_LABEL_SIZE);
79 sb->flags = le64_to_cpu(s->flags);
80 sb->seq = le64_to_cpu(s->seq);
81 sb->last_mount = le32_to_cpu(s->last_mount);
82 sb->first_bucket = le16_to_cpu(s->first_bucket);
83 sb->keys = le16_to_cpu(s->keys);
85 for (i = 0; i < SB_JOURNAL_BUCKETS; i++)
86 sb->d[i] = le64_to_cpu(s->d[i]);
88 pr_debug("read sb version %llu, flags %llu, seq %llu, journal size %u",
89 sb->version, sb->flags, sb->seq, sb->keys);
91 err = "Not a bcache superblock";
92 if (sb->offset != SB_SECTOR)
95 if (memcmp(sb->magic, bcache_magic, 16))
98 err = "Too many journal buckets";
99 if (sb->keys > SB_JOURNAL_BUCKETS)
102 err = "Bad checksum";
103 if (s->csum != csum_set(s))
107 if (bch_is_zero(sb->uuid, 16))
110 sb->block_size = le16_to_cpu(s->block_size);
112 err = "Superblock block size smaller than device block size";
113 if (sb->block_size << 9 < bdev_logical_block_size(bdev))
116 switch (sb->version) {
117 case BCACHE_SB_VERSION_BDEV:
118 sb->data_offset = BDEV_DATA_START_DEFAULT;
120 case BCACHE_SB_VERSION_BDEV_WITH_OFFSET:
121 sb->data_offset = le64_to_cpu(s->data_offset);
123 err = "Bad data offset";
124 if (sb->data_offset < BDEV_DATA_START_DEFAULT)
128 case BCACHE_SB_VERSION_CDEV:
129 case BCACHE_SB_VERSION_CDEV_WITH_UUID:
130 sb->nbuckets = le64_to_cpu(s->nbuckets);
131 sb->bucket_size = le16_to_cpu(s->bucket_size);
133 sb->nr_in_set = le16_to_cpu(s->nr_in_set);
134 sb->nr_this_dev = le16_to_cpu(s->nr_this_dev);
136 err = "Too many buckets";
137 if (sb->nbuckets > LONG_MAX)
140 err = "Not enough buckets";
141 if (sb->nbuckets < 1 << 7)
144 err = "Bad block/bucket size";
145 if (!is_power_of_2(sb->block_size) ||
146 sb->block_size > PAGE_SECTORS ||
147 !is_power_of_2(sb->bucket_size) ||
148 sb->bucket_size < PAGE_SECTORS)
151 err = "Invalid superblock: device too small";
152 if (get_capacity(bdev->bd_disk) <
153 sb->bucket_size * sb->nbuckets)
157 if (bch_is_zero(sb->set_uuid, 16))
160 err = "Bad cache device number in set";
161 if (!sb->nr_in_set ||
162 sb->nr_in_set <= sb->nr_this_dev ||
163 sb->nr_in_set > MAX_CACHES_PER_SET)
166 err = "Journal buckets not sequential";
167 for (i = 0; i < sb->keys; i++)
168 if (sb->d[i] != sb->first_bucket + i)
171 err = "Too many journal buckets";
172 if (sb->first_bucket + sb->keys > sb->nbuckets)
175 err = "Invalid superblock: first bucket comes before end of super";
176 if (sb->first_bucket * sb->bucket_size < 16)
181 err = "Unsupported superblock version";
185 sb->last_mount = (u32)ktime_get_real_seconds();
188 get_page(bh->b_page);
195 static void write_bdev_super_endio(struct bio *bio)
197 struct cached_dev *dc = bio->bi_private;
198 /* XXX: error checking */
200 closure_put(&dc->sb_write);
203 static void __write_super(struct cache_sb *sb, struct bio *bio)
205 struct cache_sb *out = page_address(bio_first_page_all(bio));
208 bio->bi_iter.bi_sector = SB_SECTOR;
209 bio->bi_iter.bi_size = SB_SIZE;
210 bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_SYNC|REQ_META);
211 bch_bio_map(bio, NULL);
213 out->offset = cpu_to_le64(sb->offset);
214 out->version = cpu_to_le64(sb->version);
216 memcpy(out->uuid, sb->uuid, 16);
217 memcpy(out->set_uuid, sb->set_uuid, 16);
218 memcpy(out->label, sb->label, SB_LABEL_SIZE);
220 out->flags = cpu_to_le64(sb->flags);
221 out->seq = cpu_to_le64(sb->seq);
223 out->last_mount = cpu_to_le32(sb->last_mount);
224 out->first_bucket = cpu_to_le16(sb->first_bucket);
225 out->keys = cpu_to_le16(sb->keys);
227 for (i = 0; i < sb->keys; i++)
228 out->d[i] = cpu_to_le64(sb->d[i]);
230 out->csum = csum_set(out);
232 pr_debug("ver %llu, flags %llu, seq %llu",
233 sb->version, sb->flags, sb->seq);
238 static void bch_write_bdev_super_unlock(struct closure *cl)
240 struct cached_dev *dc = container_of(cl, struct cached_dev, sb_write);
242 up(&dc->sb_write_mutex);
245 void bch_write_bdev_super(struct cached_dev *dc, struct closure *parent)
247 struct closure *cl = &dc->sb_write;
248 struct bio *bio = &dc->sb_bio;
250 down(&dc->sb_write_mutex);
251 closure_init(cl, parent);
254 bio_set_dev(bio, dc->bdev);
255 bio->bi_end_io = write_bdev_super_endio;
256 bio->bi_private = dc;
259 /* I/O request sent to backing device */
260 __write_super(&dc->sb, bio);
262 closure_return_with_destructor(cl, bch_write_bdev_super_unlock);
265 static void write_super_endio(struct bio *bio)
267 struct cache *ca = bio->bi_private;
270 bch_count_io_errors(ca, bio->bi_status, 0,
271 "writing superblock");
272 closure_put(&ca->set->sb_write);
275 static void bcache_write_super_unlock(struct closure *cl)
277 struct cache_set *c = container_of(cl, struct cache_set, sb_write);
279 up(&c->sb_write_mutex);
282 void bcache_write_super(struct cache_set *c)
284 struct closure *cl = &c->sb_write;
288 down(&c->sb_write_mutex);
289 closure_init(cl, &c->cl);
293 for_each_cache(ca, c, i) {
294 struct bio *bio = &ca->sb_bio;
296 ca->sb.version = BCACHE_SB_VERSION_CDEV_WITH_UUID;
297 ca->sb.seq = c->sb.seq;
298 ca->sb.last_mount = c->sb.last_mount;
300 SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
303 bio_set_dev(bio, ca->bdev);
304 bio->bi_end_io = write_super_endio;
305 bio->bi_private = ca;
308 __write_super(&ca->sb, bio);
311 closure_return_with_destructor(cl, bcache_write_super_unlock);
316 static void uuid_endio(struct bio *bio)
318 struct closure *cl = bio->bi_private;
319 struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
321 cache_set_err_on(bio->bi_status, c, "accessing uuids");
322 bch_bbio_free(bio, c);
326 static void uuid_io_unlock(struct closure *cl)
328 struct cache_set *c = container_of(cl, struct cache_set, uuid_write);
330 up(&c->uuid_write_mutex);
333 static void uuid_io(struct cache_set *c, int op, unsigned long op_flags,
334 struct bkey *k, struct closure *parent)
336 struct closure *cl = &c->uuid_write;
337 struct uuid_entry *u;
342 down(&c->uuid_write_mutex);
343 closure_init(cl, parent);
345 for (i = 0; i < KEY_PTRS(k); i++) {
346 struct bio *bio = bch_bbio_alloc(c);
348 bio->bi_opf = REQ_SYNC | REQ_META | op_flags;
349 bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
351 bio->bi_end_io = uuid_endio;
352 bio->bi_private = cl;
353 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
354 bch_bio_map(bio, c->uuids);
356 bch_submit_bbio(bio, c, k, i);
358 if (op != REQ_OP_WRITE)
362 bch_extent_to_text(buf, sizeof(buf), k);
363 pr_debug("%s UUIDs at %s", op == REQ_OP_WRITE ? "wrote" : "read", buf);
365 for (u = c->uuids; u < c->uuids + c->nr_uuids; u++)
366 if (!bch_is_zero(u->uuid, 16))
367 pr_debug("Slot %zi: %pU: %s: 1st: %u last: %u inv: %u",
368 u - c->uuids, u->uuid, u->label,
369 u->first_reg, u->last_reg, u->invalidated);
371 closure_return_with_destructor(cl, uuid_io_unlock);
374 static char *uuid_read(struct cache_set *c, struct jset *j, struct closure *cl)
376 struct bkey *k = &j->uuid_bucket;
378 if (__bch_btree_ptr_invalid(c, k))
379 return "bad uuid pointer";
381 bkey_copy(&c->uuid_bucket, k);
382 uuid_io(c, REQ_OP_READ, 0, k, cl);
384 if (j->version < BCACHE_JSET_VERSION_UUIDv1) {
385 struct uuid_entry_v0 *u0 = (void *) c->uuids;
386 struct uuid_entry *u1 = (void *) c->uuids;
392 * Since the new uuid entry is bigger than the old, we have to
393 * convert starting at the highest memory address and work down
394 * in order to do it in place
397 for (i = c->nr_uuids - 1;
400 memcpy(u1[i].uuid, u0[i].uuid, 16);
401 memcpy(u1[i].label, u0[i].label, 32);
403 u1[i].first_reg = u0[i].first_reg;
404 u1[i].last_reg = u0[i].last_reg;
405 u1[i].invalidated = u0[i].invalidated;
415 static int __uuid_write(struct cache_set *c)
420 closure_init_stack(&cl);
421 lockdep_assert_held(&bch_register_lock);
423 if (bch_bucket_alloc_set(c, RESERVE_BTREE, &k.key, 1, true))
426 SET_KEY_SIZE(&k.key, c->sb.bucket_size);
427 uuid_io(c, REQ_OP_WRITE, 0, &k.key, &cl);
430 bkey_copy(&c->uuid_bucket, &k.key);
435 int bch_uuid_write(struct cache_set *c)
437 int ret = __uuid_write(c);
440 bch_journal_meta(c, NULL);
445 static struct uuid_entry *uuid_find(struct cache_set *c, const char *uuid)
447 struct uuid_entry *u;
450 u < c->uuids + c->nr_uuids; u++)
451 if (!memcmp(u->uuid, uuid, 16))
457 static struct uuid_entry *uuid_find_empty(struct cache_set *c)
459 static const char zero_uuid[16] = "\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0";
461 return uuid_find(c, zero_uuid);
465 * Bucket priorities/gens:
467 * For each bucket, we store on disk its
471 * See alloc.c for an explanation of the gen. The priority is used to implement
472 * lru (and in the future other) cache replacement policies; for most purposes
473 * it's just an opaque integer.
475 * The gens and the priorities don't have a whole lot to do with each other, and
476 * it's actually the gens that must be written out at specific times - it's no
477 * big deal if the priorities don't get written, if we lose them we just reuse
478 * buckets in suboptimal order.
480 * On disk they're stored in a packed array, and in as many buckets are required
481 * to fit them all. The buckets we use to store them form a list; the journal
482 * header points to the first bucket, the first bucket points to the second
485 * This code is used by the allocation code; periodically (whenever it runs out
486 * of buckets to allocate from) the allocation code will invalidate some
487 * buckets, but it can't use those buckets until their new gens are safely on
491 static void prio_endio(struct bio *bio)
493 struct cache *ca = bio->bi_private;
495 cache_set_err_on(bio->bi_status, ca->set, "accessing priorities");
496 bch_bbio_free(bio, ca->set);
497 closure_put(&ca->prio);
500 static void prio_io(struct cache *ca, uint64_t bucket, int op,
501 unsigned long op_flags)
503 struct closure *cl = &ca->prio;
504 struct bio *bio = bch_bbio_alloc(ca->set);
506 closure_init_stack(cl);
508 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
509 bio_set_dev(bio, ca->bdev);
510 bio->bi_iter.bi_size = bucket_bytes(ca);
512 bio->bi_end_io = prio_endio;
513 bio->bi_private = ca;
514 bio_set_op_attrs(bio, op, REQ_SYNC|REQ_META|op_flags);
515 bch_bio_map(bio, ca->disk_buckets);
517 closure_bio_submit(ca->set, bio, &ca->prio);
521 void bch_prio_write(struct cache *ca)
527 closure_init_stack(&cl);
529 lockdep_assert_held(&ca->set->bucket_lock);
531 ca->disk_buckets->seq++;
533 atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
534 &ca->meta_sectors_written);
536 //pr_debug("free %zu, free_inc %zu, unused %zu", fifo_used(&ca->free),
537 // fifo_used(&ca->free_inc), fifo_used(&ca->unused));
539 for (i = prio_buckets(ca) - 1; i >= 0; --i) {
541 struct prio_set *p = ca->disk_buckets;
542 struct bucket_disk *d = p->data;
543 struct bucket_disk *end = d + prios_per_bucket(ca);
545 for (b = ca->buckets + i * prios_per_bucket(ca);
546 b < ca->buckets + ca->sb.nbuckets && d < end;
548 d->prio = cpu_to_le16(b->prio);
552 p->next_bucket = ca->prio_buckets[i + 1];
553 p->magic = pset_magic(&ca->sb);
554 p->csum = bch_crc64(&p->magic, bucket_bytes(ca) - 8);
556 bucket = bch_bucket_alloc(ca, RESERVE_PRIO, true);
557 BUG_ON(bucket == -1);
559 mutex_unlock(&ca->set->bucket_lock);
560 prio_io(ca, bucket, REQ_OP_WRITE, 0);
561 mutex_lock(&ca->set->bucket_lock);
563 ca->prio_buckets[i] = bucket;
564 atomic_dec_bug(&ca->buckets[bucket].pin);
567 mutex_unlock(&ca->set->bucket_lock);
569 bch_journal_meta(ca->set, &cl);
572 mutex_lock(&ca->set->bucket_lock);
575 * Don't want the old priorities to get garbage collected until after we
576 * finish writing the new ones, and they're journalled
578 for (i = 0; i < prio_buckets(ca); i++) {
579 if (ca->prio_last_buckets[i])
580 __bch_bucket_free(ca,
581 &ca->buckets[ca->prio_last_buckets[i]]);
583 ca->prio_last_buckets[i] = ca->prio_buckets[i];
587 static void prio_read(struct cache *ca, uint64_t bucket)
589 struct prio_set *p = ca->disk_buckets;
590 struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
592 unsigned int bucket_nr = 0;
594 for (b = ca->buckets;
595 b < ca->buckets + ca->sb.nbuckets;
598 ca->prio_buckets[bucket_nr] = bucket;
599 ca->prio_last_buckets[bucket_nr] = bucket;
602 prio_io(ca, bucket, REQ_OP_READ, 0);
605 bch_crc64(&p->magic, bucket_bytes(ca) - 8))
606 pr_warn("bad csum reading priorities");
608 if (p->magic != pset_magic(&ca->sb))
609 pr_warn("bad magic reading priorities");
611 bucket = p->next_bucket;
615 b->prio = le16_to_cpu(d->prio);
616 b->gen = b->last_gc = d->gen;
622 static int open_dev(struct block_device *b, fmode_t mode)
624 struct bcache_device *d = b->bd_disk->private_data;
626 if (test_bit(BCACHE_DEV_CLOSING, &d->flags))
633 static void release_dev(struct gendisk *b, fmode_t mode)
635 struct bcache_device *d = b->private_data;
640 static int ioctl_dev(struct block_device *b, fmode_t mode,
641 unsigned int cmd, unsigned long arg)
643 struct bcache_device *d = b->bd_disk->private_data;
644 struct cached_dev *dc = container_of(d, struct cached_dev, disk);
649 return d->ioctl(d, mode, cmd, arg);
652 static const struct block_device_operations bcache_ops = {
654 .release = release_dev,
656 .owner = THIS_MODULE,
659 void bcache_device_stop(struct bcache_device *d)
661 if (!test_and_set_bit(BCACHE_DEV_CLOSING, &d->flags))
662 closure_queue(&d->cl);
665 static void bcache_device_unlink(struct bcache_device *d)
667 lockdep_assert_held(&bch_register_lock);
669 if (d->c && !test_and_set_bit(BCACHE_DEV_UNLINK_DONE, &d->flags)) {
673 sysfs_remove_link(&d->c->kobj, d->name);
674 sysfs_remove_link(&d->kobj, "cache");
676 for_each_cache(ca, d->c, i)
677 bd_unlink_disk_holder(ca->bdev, d->disk);
681 static void bcache_device_link(struct bcache_device *d, struct cache_set *c,
687 for_each_cache(ca, d->c, i)
688 bd_link_disk_holder(ca->bdev, d->disk);
690 snprintf(d->name, BCACHEDEVNAME_SIZE,
691 "%s%u", name, d->id);
693 WARN(sysfs_create_link(&d->kobj, &c->kobj, "cache") ||
694 sysfs_create_link(&c->kobj, &d->kobj, d->name),
695 "Couldn't create device <-> cache set symlinks");
697 clear_bit(BCACHE_DEV_UNLINK_DONE, &d->flags);
700 static void bcache_device_detach(struct bcache_device *d)
702 lockdep_assert_held(&bch_register_lock);
704 atomic_dec(&d->c->attached_dev_nr);
706 if (test_bit(BCACHE_DEV_DETACHING, &d->flags)) {
707 struct uuid_entry *u = d->c->uuids + d->id;
709 SET_UUID_FLASH_ONLY(u, 0);
710 memcpy(u->uuid, invalid_uuid, 16);
711 u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds());
712 bch_uuid_write(d->c);
715 bcache_device_unlink(d);
717 d->c->devices[d->id] = NULL;
718 closure_put(&d->c->caching);
722 static void bcache_device_attach(struct bcache_device *d, struct cache_set *c,
729 if (id >= c->devices_max_used)
730 c->devices_max_used = id + 1;
732 closure_get(&c->caching);
735 static inline int first_minor_to_idx(int first_minor)
737 return (first_minor/BCACHE_MINORS);
740 static inline int idx_to_first_minor(int idx)
742 return (idx * BCACHE_MINORS);
745 static void bcache_device_free(struct bcache_device *d)
747 lockdep_assert_held(&bch_register_lock);
749 pr_info("%s stopped", d->disk->disk_name);
752 bcache_device_detach(d);
753 if (d->disk && d->disk->flags & GENHD_FL_UP)
754 del_gendisk(d->disk);
755 if (d->disk && d->disk->queue)
756 blk_cleanup_queue(d->disk->queue);
758 ida_simple_remove(&bcache_device_idx,
759 first_minor_to_idx(d->disk->first_minor));
763 bioset_exit(&d->bio_split);
764 kvfree(d->full_dirty_stripes);
765 kvfree(d->stripe_sectors_dirty);
767 closure_debug_destroy(&d->cl);
770 static int bcache_device_init(struct bcache_device *d, unsigned int block_size,
773 struct request_queue *q;
774 const size_t max_stripes = min_t(size_t, INT_MAX,
775 SIZE_MAX / sizeof(atomic_t));
780 d->stripe_size = 1 << 31;
782 d->nr_stripes = DIV_ROUND_UP_ULL(sectors, d->stripe_size);
784 if (!d->nr_stripes || d->nr_stripes > max_stripes) {
785 pr_err("nr_stripes too large or invalid: %u (start sector beyond end of disk?)",
786 (unsigned int)d->nr_stripes);
790 n = d->nr_stripes * sizeof(atomic_t);
791 d->stripe_sectors_dirty = kvzalloc(n, GFP_KERNEL);
792 if (!d->stripe_sectors_dirty)
795 n = BITS_TO_LONGS(d->nr_stripes) * sizeof(unsigned long);
796 d->full_dirty_stripes = kvzalloc(n, GFP_KERNEL);
797 if (!d->full_dirty_stripes)
800 idx = ida_simple_get(&bcache_device_idx, 0,
801 BCACHE_DEVICE_IDX_MAX, GFP_KERNEL);
805 if (bioset_init(&d->bio_split, 4, offsetof(struct bbio, bio),
806 BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER))
809 d->disk = alloc_disk(BCACHE_MINORS);
813 set_capacity(d->disk, sectors);
814 snprintf(d->disk->disk_name, DISK_NAME_LEN, "bcache%i", idx);
816 d->disk->major = bcache_major;
817 d->disk->first_minor = idx_to_first_minor(idx);
818 d->disk->fops = &bcache_ops;
819 d->disk->private_data = d;
821 q = blk_alloc_queue(GFP_KERNEL);
825 blk_queue_make_request(q, NULL);
828 q->backing_dev_info->congested_data = d;
829 q->limits.max_hw_sectors = UINT_MAX;
830 q->limits.max_sectors = UINT_MAX;
831 q->limits.max_segment_size = UINT_MAX;
832 q->limits.max_segments = BIO_MAX_PAGES;
833 blk_queue_max_discard_sectors(q, UINT_MAX);
834 q->limits.discard_granularity = 512;
835 q->limits.io_min = block_size;
836 q->limits.logical_block_size = block_size;
837 q->limits.physical_block_size = block_size;
838 blk_queue_flag_set(QUEUE_FLAG_NONROT, d->disk->queue);
839 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, d->disk->queue);
840 blk_queue_flag_set(QUEUE_FLAG_DISCARD, d->disk->queue);
842 blk_queue_write_cache(q, true, true);
847 ida_simple_remove(&bcache_device_idx, idx);
854 static void calc_cached_dev_sectors(struct cache_set *c)
856 uint64_t sectors = 0;
857 struct cached_dev *dc;
859 list_for_each_entry(dc, &c->cached_devs, list)
860 sectors += bdev_sectors(dc->bdev);
862 c->cached_dev_sectors = sectors;
865 #define BACKING_DEV_OFFLINE_TIMEOUT 5
866 static int cached_dev_status_update(void *arg)
868 struct cached_dev *dc = arg;
869 struct request_queue *q;
872 * If this delayed worker is stopping outside, directly quit here.
873 * dc->io_disable might be set via sysfs interface, so check it
876 while (!kthread_should_stop() && !dc->io_disable) {
877 q = bdev_get_queue(dc->bdev);
878 if (blk_queue_dying(q))
879 dc->offline_seconds++;
881 dc->offline_seconds = 0;
883 if (dc->offline_seconds >= BACKING_DEV_OFFLINE_TIMEOUT) {
884 pr_err("%s: device offline for %d seconds",
885 dc->backing_dev_name,
886 BACKING_DEV_OFFLINE_TIMEOUT);
887 pr_err("%s: disable I/O request due to backing "
888 "device offline", dc->disk.name);
889 dc->io_disable = true;
890 /* let others know earlier that io_disable is true */
892 bcache_device_stop(&dc->disk);
895 schedule_timeout_interruptible(HZ);
898 wait_for_kthread_stop();
903 void bch_cached_dev_run(struct cached_dev *dc)
905 struct bcache_device *d = &dc->disk;
906 char buf[SB_LABEL_SIZE + 1];
909 kasprintf(GFP_KERNEL, "CACHED_UUID=%pU", dc->sb.uuid),
914 memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
915 buf[SB_LABEL_SIZE] = '\0';
916 env[2] = kasprintf(GFP_KERNEL, "CACHED_LABEL=%s", buf);
918 if (atomic_xchg(&dc->running, 1)) {
925 BDEV_STATE(&dc->sb) != BDEV_STATE_NONE) {
928 closure_init_stack(&cl);
930 SET_BDEV_STATE(&dc->sb, BDEV_STATE_STALE);
931 bch_write_bdev_super(dc, &cl);
936 bd_link_disk_holder(dc->bdev, dc->disk.disk);
937 /* won't show up in the uevent file, use udevadm monitor -e instead
938 * only class / kset properties are persistent */
939 kobject_uevent_env(&disk_to_dev(d->disk)->kobj, KOBJ_CHANGE, env);
943 if (sysfs_create_link(&d->kobj, &disk_to_dev(d->disk)->kobj, "dev") ||
944 sysfs_create_link(&disk_to_dev(d->disk)->kobj, &d->kobj, "bcache"))
945 pr_debug("error creating sysfs link");
947 dc->status_update_thread = kthread_run(cached_dev_status_update,
948 dc, "bcache_status_update");
949 if (IS_ERR(dc->status_update_thread)) {
950 pr_warn("failed to create bcache_status_update kthread, "
951 "continue to run without monitoring backing "
957 * If BCACHE_DEV_RATE_DW_RUNNING is set, it means routine of the delayed
958 * work dc->writeback_rate_update is running. Wait until the routine
959 * quits (BCACHE_DEV_RATE_DW_RUNNING is clear), then continue to
960 * cancel it. If BCACHE_DEV_RATE_DW_RUNNING is not clear after time_out
961 * seconds, give up waiting here and continue to cancel it too.
963 static void cancel_writeback_rate_update_dwork(struct cached_dev *dc)
965 int time_out = WRITEBACK_RATE_UPDATE_SECS_MAX * HZ;
968 if (!test_bit(BCACHE_DEV_RATE_DW_RUNNING,
972 schedule_timeout_interruptible(1);
973 } while (time_out > 0);
976 pr_warn("give up waiting for dc->writeback_write_update to quit");
978 cancel_delayed_work_sync(&dc->writeback_rate_update);
981 static void cached_dev_detach_finish(struct work_struct *w)
983 struct cached_dev *dc = container_of(w, struct cached_dev, detach);
986 closure_init_stack(&cl);
988 BUG_ON(!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags));
989 BUG_ON(refcount_read(&dc->count));
991 mutex_lock(&bch_register_lock);
993 if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
994 cancel_writeback_rate_update_dwork(dc);
996 if (!IS_ERR_OR_NULL(dc->writeback_thread)) {
997 kthread_stop(dc->writeback_thread);
998 dc->writeback_thread = NULL;
1001 memset(&dc->sb.set_uuid, 0, 16);
1002 SET_BDEV_STATE(&dc->sb, BDEV_STATE_NONE);
1004 bch_write_bdev_super(dc, &cl);
1007 bcache_device_detach(&dc->disk);
1008 list_move(&dc->list, &uncached_devices);
1010 clear_bit(BCACHE_DEV_DETACHING, &dc->disk.flags);
1011 clear_bit(BCACHE_DEV_UNLINK_DONE, &dc->disk.flags);
1013 mutex_unlock(&bch_register_lock);
1015 pr_info("Caching disabled for %s", dc->backing_dev_name);
1017 /* Drop ref we took in cached_dev_detach() */
1018 closure_put(&dc->disk.cl);
1021 void bch_cached_dev_detach(struct cached_dev *dc)
1023 lockdep_assert_held(&bch_register_lock);
1025 if (test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
1028 if (test_and_set_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
1032 * Block the device from being closed and freed until we're finished
1035 closure_get(&dc->disk.cl);
1037 bch_writeback_queue(dc);
1042 int bch_cached_dev_attach(struct cached_dev *dc, struct cache_set *c,
1045 uint32_t rtime = cpu_to_le32((u32)ktime_get_real_seconds());
1046 struct uuid_entry *u;
1047 struct cached_dev *exist_dc, *t;
1049 if ((set_uuid && memcmp(set_uuid, c->sb.set_uuid, 16)) ||
1050 (!set_uuid && memcmp(dc->sb.set_uuid, c->sb.set_uuid, 16)))
1054 pr_err("Can't attach %s: already attached",
1055 dc->backing_dev_name);
1059 if (test_bit(CACHE_SET_STOPPING, &c->flags)) {
1060 pr_err("Can't attach %s: shutting down",
1061 dc->backing_dev_name);
1065 if (dc->sb.block_size < c->sb.block_size) {
1067 pr_err("Couldn't attach %s: block size less than set's block size",
1068 dc->backing_dev_name);
1072 /* Check whether already attached */
1073 list_for_each_entry_safe(exist_dc, t, &c->cached_devs, list) {
1074 if (!memcmp(dc->sb.uuid, exist_dc->sb.uuid, 16)) {
1075 pr_err("Tried to attach %s but duplicate UUID already attached",
1076 dc->backing_dev_name);
1082 u = uuid_find(c, dc->sb.uuid);
1085 (BDEV_STATE(&dc->sb) == BDEV_STATE_STALE ||
1086 BDEV_STATE(&dc->sb) == BDEV_STATE_NONE)) {
1087 memcpy(u->uuid, invalid_uuid, 16);
1088 u->invalidated = cpu_to_le32((u32)ktime_get_real_seconds());
1093 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
1094 pr_err("Couldn't find uuid for %s in set",
1095 dc->backing_dev_name);
1099 u = uuid_find_empty(c);
1101 pr_err("Not caching %s, no room for UUID",
1102 dc->backing_dev_name);
1107 /* Deadlocks since we're called via sysfs...
1108 sysfs_remove_file(&dc->kobj, &sysfs_attach);
1111 if (bch_is_zero(u->uuid, 16)) {
1114 closure_init_stack(&cl);
1116 memcpy(u->uuid, dc->sb.uuid, 16);
1117 memcpy(u->label, dc->sb.label, SB_LABEL_SIZE);
1118 u->first_reg = u->last_reg = rtime;
1121 memcpy(dc->sb.set_uuid, c->sb.set_uuid, 16);
1122 SET_BDEV_STATE(&dc->sb, BDEV_STATE_CLEAN);
1124 bch_write_bdev_super(dc, &cl);
1127 u->last_reg = rtime;
1131 bcache_device_attach(&dc->disk, c, u - c->uuids);
1132 list_move(&dc->list, &c->cached_devs);
1133 calc_cached_dev_sectors(c);
1137 * dc->c must be set before dc->count != 0 - paired with the mb in
1140 refcount_set(&dc->count, 1);
1142 /* Block writeback thread, but spawn it */
1143 down_write(&dc->writeback_lock);
1144 if (bch_cached_dev_writeback_start(dc)) {
1145 up_write(&dc->writeback_lock);
1149 if (BDEV_STATE(&dc->sb) == BDEV_STATE_DIRTY) {
1150 bch_sectors_dirty_init(&dc->disk);
1151 atomic_set(&dc->has_dirty, 1);
1152 bch_writeback_queue(dc);
1155 bch_cached_dev_run(dc);
1156 bcache_device_link(&dc->disk, c, "bdev");
1157 atomic_inc(&c->attached_dev_nr);
1159 /* Allow the writeback thread to proceed */
1160 up_write(&dc->writeback_lock);
1162 pr_info("Caching %s as %s on set %pU",
1163 dc->backing_dev_name,
1164 dc->disk.disk->disk_name,
1165 dc->disk.c->sb.set_uuid);
1169 void bch_cached_dev_release(struct kobject *kobj)
1171 struct cached_dev *dc = container_of(kobj, struct cached_dev,
1174 module_put(THIS_MODULE);
1177 static void cached_dev_free(struct closure *cl)
1179 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
1181 mutex_lock(&bch_register_lock);
1183 if (test_and_clear_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
1184 cancel_writeback_rate_update_dwork(dc);
1186 if (!IS_ERR_OR_NULL(dc->writeback_thread))
1187 kthread_stop(dc->writeback_thread);
1188 if (dc->writeback_write_wq)
1189 destroy_workqueue(dc->writeback_write_wq);
1190 if (!IS_ERR_OR_NULL(dc->status_update_thread))
1191 kthread_stop(dc->status_update_thread);
1193 if (atomic_read(&dc->running))
1194 bd_unlink_disk_holder(dc->bdev, dc->disk.disk);
1195 bcache_device_free(&dc->disk);
1196 list_del(&dc->list);
1198 mutex_unlock(&bch_register_lock);
1200 if (!IS_ERR_OR_NULL(dc->bdev))
1201 blkdev_put(dc->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
1203 wake_up(&unregister_wait);
1205 kobject_put(&dc->disk.kobj);
1208 static void cached_dev_flush(struct closure *cl)
1210 struct cached_dev *dc = container_of(cl, struct cached_dev, disk.cl);
1211 struct bcache_device *d = &dc->disk;
1213 mutex_lock(&bch_register_lock);
1214 bcache_device_unlink(d);
1215 mutex_unlock(&bch_register_lock);
1217 bch_cache_accounting_destroy(&dc->accounting);
1218 kobject_del(&d->kobj);
1220 continue_at(cl, cached_dev_free, system_wq);
1223 static int cached_dev_init(struct cached_dev *dc, unsigned int block_size)
1227 struct request_queue *q = bdev_get_queue(dc->bdev);
1229 __module_get(THIS_MODULE);
1230 INIT_LIST_HEAD(&dc->list);
1231 closure_init(&dc->disk.cl, NULL);
1232 set_closure_fn(&dc->disk.cl, cached_dev_flush, system_wq);
1233 kobject_init(&dc->disk.kobj, &bch_cached_dev_ktype);
1234 INIT_WORK(&dc->detach, cached_dev_detach_finish);
1235 sema_init(&dc->sb_write_mutex, 1);
1236 INIT_LIST_HEAD(&dc->io_lru);
1237 spin_lock_init(&dc->io_lock);
1238 bch_cache_accounting_init(&dc->accounting, &dc->disk.cl);
1240 dc->sequential_cutoff = 4 << 20;
1242 for (io = dc->io; io < dc->io + RECENT_IO; io++) {
1243 list_add(&io->lru, &dc->io_lru);
1244 hlist_add_head(&io->hash, dc->io_hash + RECENT_IO);
1247 dc->disk.stripe_size = q->limits.io_opt >> 9;
1249 if (dc->disk.stripe_size)
1250 dc->partial_stripes_expensive =
1251 q->limits.raid_partial_stripes_expensive;
1253 ret = bcache_device_init(&dc->disk, block_size,
1254 dc->bdev->bd_part->nr_sects - dc->sb.data_offset);
1258 dc->disk.disk->queue->backing_dev_info->ra_pages =
1259 max(dc->disk.disk->queue->backing_dev_info->ra_pages,
1260 q->backing_dev_info->ra_pages);
1262 atomic_set(&dc->io_errors, 0);
1263 dc->io_disable = false;
1264 dc->error_limit = DEFAULT_CACHED_DEV_ERROR_LIMIT;
1265 /* default to auto */
1266 dc->stop_when_cache_set_failed = BCH_CACHED_DEV_STOP_AUTO;
1268 bch_cached_dev_request_init(dc);
1269 bch_cached_dev_writeback_init(dc);
1273 /* Cached device - bcache superblock */
1275 static void register_bdev(struct cache_sb *sb, struct page *sb_page,
1276 struct block_device *bdev,
1277 struct cached_dev *dc)
1279 const char *err = "cannot allocate memory";
1280 struct cache_set *c;
1282 bdevname(bdev, dc->backing_dev_name);
1283 memcpy(&dc->sb, sb, sizeof(struct cache_sb));
1285 dc->bdev->bd_holder = dc;
1287 bio_init(&dc->sb_bio, dc->sb_bio.bi_inline_vecs, 1);
1288 bio_first_bvec_all(&dc->sb_bio)->bv_page = sb_page;
1292 if (cached_dev_init(dc, sb->block_size << 9))
1295 err = "error creating kobject";
1296 if (kobject_add(&dc->disk.kobj, &part_to_dev(bdev->bd_part)->kobj,
1299 if (bch_cache_accounting_add_kobjs(&dc->accounting, &dc->disk.kobj))
1302 pr_info("registered backing device %s", dc->backing_dev_name);
1304 list_add(&dc->list, &uncached_devices);
1305 /* attach to a matched cache set if it exists */
1306 list_for_each_entry(c, &bch_cache_sets, list)
1307 bch_cached_dev_attach(dc, c, NULL);
1309 if (BDEV_STATE(&dc->sb) == BDEV_STATE_NONE ||
1310 BDEV_STATE(&dc->sb) == BDEV_STATE_STALE)
1311 bch_cached_dev_run(dc);
1315 pr_notice("error %s: %s", dc->backing_dev_name, err);
1316 bcache_device_stop(&dc->disk);
1319 /* Flash only volumes */
1321 void bch_flash_dev_release(struct kobject *kobj)
1323 struct bcache_device *d = container_of(kobj, struct bcache_device,
1328 static void flash_dev_free(struct closure *cl)
1330 struct bcache_device *d = container_of(cl, struct bcache_device, cl);
1332 mutex_lock(&bch_register_lock);
1333 atomic_long_sub(bcache_dev_sectors_dirty(d),
1334 &d->c->flash_dev_dirty_sectors);
1335 bcache_device_free(d);
1336 mutex_unlock(&bch_register_lock);
1337 kobject_put(&d->kobj);
1340 static void flash_dev_flush(struct closure *cl)
1342 struct bcache_device *d = container_of(cl, struct bcache_device, cl);
1344 mutex_lock(&bch_register_lock);
1345 bcache_device_unlink(d);
1346 mutex_unlock(&bch_register_lock);
1347 kobject_del(&d->kobj);
1348 continue_at(cl, flash_dev_free, system_wq);
1351 static int flash_dev_run(struct cache_set *c, struct uuid_entry *u)
1353 struct bcache_device *d = kzalloc(sizeof(struct bcache_device),
1358 closure_init(&d->cl, NULL);
1359 set_closure_fn(&d->cl, flash_dev_flush, system_wq);
1361 kobject_init(&d->kobj, &bch_flash_dev_ktype);
1363 if (bcache_device_init(d, block_bytes(c), u->sectors))
1366 bcache_device_attach(d, c, u - c->uuids);
1367 bch_sectors_dirty_init(d);
1368 bch_flash_dev_request_init(d);
1371 if (kobject_add(&d->kobj, &disk_to_dev(d->disk)->kobj, "bcache"))
1374 bcache_device_link(d, c, "volume");
1378 kobject_put(&d->kobj);
1382 static int flash_devs_run(struct cache_set *c)
1385 struct uuid_entry *u;
1388 u < c->uuids + c->nr_uuids && !ret;
1390 if (UUID_FLASH_ONLY(u))
1391 ret = flash_dev_run(c, u);
1396 int bch_flash_dev_create(struct cache_set *c, uint64_t size)
1398 struct uuid_entry *u;
1400 if (test_bit(CACHE_SET_STOPPING, &c->flags))
1403 if (!test_bit(CACHE_SET_RUNNING, &c->flags))
1406 u = uuid_find_empty(c);
1408 pr_err("Can't create volume, no room for UUID");
1412 get_random_bytes(u->uuid, 16);
1413 memset(u->label, 0, 32);
1414 u->first_reg = u->last_reg = cpu_to_le32((u32)ktime_get_real_seconds());
1416 SET_UUID_FLASH_ONLY(u, 1);
1417 u->sectors = size >> 9;
1421 return flash_dev_run(c, u);
1424 bool bch_cached_dev_error(struct cached_dev *dc)
1426 struct cache_set *c;
1428 if (!dc || test_bit(BCACHE_DEV_CLOSING, &dc->disk.flags))
1431 dc->io_disable = true;
1432 /* make others know io_disable is true earlier */
1435 pr_err("stop %s: too many IO errors on backing device %s\n",
1436 dc->disk.disk->disk_name, dc->backing_dev_name);
1439 * If the cached device is still attached to a cache set,
1440 * even dc->io_disable is true and no more I/O requests
1441 * accepted, cache device internal I/O (writeback scan or
1442 * garbage collection) may still prevent bcache device from
1443 * being stopped. So here CACHE_SET_IO_DISABLE should be
1444 * set to c->flags too, to make the internal I/O to cache
1445 * device rejected and stopped immediately.
1446 * If c is NULL, that means the bcache device is not attached
1447 * to any cache set, then no CACHE_SET_IO_DISABLE bit to set.
1450 if (c && test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
1451 pr_info("CACHE_SET_IO_DISABLE already set");
1453 bcache_device_stop(&dc->disk);
1460 bool bch_cache_set_error(struct cache_set *c, const char *fmt, ...)
1464 if (c->on_error != ON_ERROR_PANIC &&
1465 test_bit(CACHE_SET_STOPPING, &c->flags))
1468 if (test_and_set_bit(CACHE_SET_IO_DISABLE, &c->flags))
1469 pr_info("CACHE_SET_IO_DISABLE already set");
1471 /* XXX: we can be called from atomic context
1472 acquire_console_sem();
1475 printk(KERN_ERR "bcache: error on %pU: ", c->sb.set_uuid);
1477 va_start(args, fmt);
1481 printk(", disabling caching\n");
1483 if (c->on_error == ON_ERROR_PANIC)
1484 panic("panic forced after error\n");
1486 bch_cache_set_unregister(c);
1490 void bch_cache_set_release(struct kobject *kobj)
1492 struct cache_set *c = container_of(kobj, struct cache_set, kobj);
1495 module_put(THIS_MODULE);
1498 static void cache_set_free(struct closure *cl)
1500 struct cache_set *c = container_of(cl, struct cache_set, cl);
1504 if (!IS_ERR_OR_NULL(c->debug))
1505 debugfs_remove(c->debug);
1507 bch_open_buckets_free(c);
1508 bch_btree_cache_free(c);
1509 bch_journal_free(c);
1511 for_each_cache(ca, c, i)
1514 c->cache[ca->sb.nr_this_dev] = NULL;
1515 kobject_put(&ca->kobj);
1518 bch_bset_sort_state_free(&c->sort);
1519 free_pages((unsigned long) c->uuids, ilog2(bucket_pages(c)));
1521 if (c->moving_gc_wq)
1522 destroy_workqueue(c->moving_gc_wq);
1523 bioset_exit(&c->bio_split);
1524 mempool_exit(&c->fill_iter);
1525 mempool_exit(&c->bio_meta);
1526 mempool_exit(&c->search);
1529 mutex_lock(&bch_register_lock);
1531 mutex_unlock(&bch_register_lock);
1533 pr_info("Cache set %pU unregistered", c->sb.set_uuid);
1534 wake_up(&unregister_wait);
1536 closure_debug_destroy(&c->cl);
1537 kobject_put(&c->kobj);
1540 static void cache_set_flush(struct closure *cl)
1542 struct cache_set *c = container_of(cl, struct cache_set, caching);
1547 bch_cache_accounting_destroy(&c->accounting);
1549 kobject_put(&c->internal);
1550 kobject_del(&c->kobj);
1553 kthread_stop(c->gc_thread);
1555 if (!IS_ERR_OR_NULL(c->root))
1556 list_add(&c->root->list, &c->btree_cache);
1558 /* Should skip this if we're unregistering because of an error */
1559 list_for_each_entry(b, &c->btree_cache, list) {
1560 mutex_lock(&b->write_lock);
1561 if (btree_node_dirty(b))
1562 __bch_btree_node_write(b, NULL);
1563 mutex_unlock(&b->write_lock);
1566 for_each_cache(ca, c, i)
1567 if (ca->alloc_thread)
1568 kthread_stop(ca->alloc_thread);
1570 if (c->journal.cur) {
1571 cancel_delayed_work_sync(&c->journal.work);
1572 /* flush last journal entry if needed */
1573 c->journal.work.work.func(&c->journal.work.work);
1580 * This function is only called when CACHE_SET_IO_DISABLE is set, which means
1581 * cache set is unregistering due to too many I/O errors. In this condition,
1582 * the bcache device might be stopped, it depends on stop_when_cache_set_failed
1583 * value and whether the broken cache has dirty data:
1585 * dc->stop_when_cache_set_failed dc->has_dirty stop bcache device
1586 * BCH_CACHED_STOP_AUTO 0 NO
1587 * BCH_CACHED_STOP_AUTO 1 YES
1588 * BCH_CACHED_DEV_STOP_ALWAYS 0 YES
1589 * BCH_CACHED_DEV_STOP_ALWAYS 1 YES
1591 * The expected behavior is, if stop_when_cache_set_failed is configured to
1592 * "auto" via sysfs interface, the bcache device will not be stopped if the
1593 * backing device is clean on the broken cache device.
1595 static void conditional_stop_bcache_device(struct cache_set *c,
1596 struct bcache_device *d,
1597 struct cached_dev *dc)
1599 if (dc->stop_when_cache_set_failed == BCH_CACHED_DEV_STOP_ALWAYS) {
1600 pr_warn("stop_when_cache_set_failed of %s is \"always\", stop it for failed cache set %pU.",
1601 d->disk->disk_name, c->sb.set_uuid);
1602 bcache_device_stop(d);
1603 } else if (atomic_read(&dc->has_dirty)) {
1605 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
1606 * and dc->has_dirty == 1
1608 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is dirty, stop it to avoid potential data corruption.",
1609 d->disk->disk_name);
1611 * There might be a small time gap that cache set is
1612 * released but bcache device is not. Inside this time
1613 * gap, regular I/O requests will directly go into
1614 * backing device as no cache set attached to. This
1615 * behavior may also introduce potential inconsistence
1616 * data in writeback mode while cache is dirty.
1617 * Therefore before calling bcache_device_stop() due
1618 * to a broken cache device, dc->io_disable should be
1619 * explicitly set to true.
1621 dc->io_disable = true;
1622 /* make others know io_disable is true earlier */
1624 bcache_device_stop(d);
1627 * dc->stop_when_cache_set_failed == BCH_CACHED_STOP_AUTO
1628 * and dc->has_dirty == 0
1630 pr_warn("stop_when_cache_set_failed of %s is \"auto\" and cache is clean, keep it alive.",
1631 d->disk->disk_name);
1635 static void __cache_set_unregister(struct closure *cl)
1637 struct cache_set *c = container_of(cl, struct cache_set, caching);
1638 struct cached_dev *dc;
1639 struct bcache_device *d;
1642 mutex_lock(&bch_register_lock);
1644 for (i = 0; i < c->devices_max_used; i++) {
1649 if (!UUID_FLASH_ONLY(&c->uuids[i]) &&
1650 test_bit(CACHE_SET_UNREGISTERING, &c->flags)) {
1651 dc = container_of(d, struct cached_dev, disk);
1652 bch_cached_dev_detach(dc);
1653 if (test_bit(CACHE_SET_IO_DISABLE, &c->flags))
1654 conditional_stop_bcache_device(c, d, dc);
1656 bcache_device_stop(d);
1660 mutex_unlock(&bch_register_lock);
1662 continue_at(cl, cache_set_flush, system_wq);
1665 void bch_cache_set_stop(struct cache_set *c)
1667 if (!test_and_set_bit(CACHE_SET_STOPPING, &c->flags))
1668 closure_queue(&c->caching);
1671 void bch_cache_set_unregister(struct cache_set *c)
1673 set_bit(CACHE_SET_UNREGISTERING, &c->flags);
1674 bch_cache_set_stop(c);
1677 #define alloc_bucket_pages(gfp, c) \
1678 ((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))
1680 struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
1683 struct cache_set *c = kzalloc(sizeof(struct cache_set), GFP_KERNEL);
1688 __module_get(THIS_MODULE);
1689 closure_init(&c->cl, NULL);
1690 set_closure_fn(&c->cl, cache_set_free, system_wq);
1692 closure_init(&c->caching, &c->cl);
1693 set_closure_fn(&c->caching, __cache_set_unregister, system_wq);
1695 /* Maybe create continue_at_noreturn() and use it here? */
1696 closure_set_stopped(&c->cl);
1697 closure_put(&c->cl);
1699 kobject_init(&c->kobj, &bch_cache_set_ktype);
1700 kobject_init(&c->internal, &bch_cache_set_internal_ktype);
1702 bch_cache_accounting_init(&c->accounting, &c->cl);
1704 memcpy(c->sb.set_uuid, sb->set_uuid, 16);
1705 c->sb.block_size = sb->block_size;
1706 c->sb.bucket_size = sb->bucket_size;
1707 c->sb.nr_in_set = sb->nr_in_set;
1708 c->sb.last_mount = sb->last_mount;
1709 c->bucket_bits = ilog2(sb->bucket_size);
1710 c->block_bits = ilog2(sb->block_size);
1711 c->nr_uuids = bucket_bytes(c) / sizeof(struct uuid_entry);
1712 c->devices_max_used = 0;
1713 atomic_set(&c->attached_dev_nr, 0);
1714 c->btree_pages = bucket_pages(c);
1715 if (c->btree_pages > BTREE_MAX_PAGES)
1716 c->btree_pages = max_t(int, c->btree_pages / 4,
1719 sema_init(&c->sb_write_mutex, 1);
1720 mutex_init(&c->bucket_lock);
1721 init_waitqueue_head(&c->btree_cache_wait);
1722 init_waitqueue_head(&c->bucket_wait);
1723 init_waitqueue_head(&c->gc_wait);
1724 sema_init(&c->uuid_write_mutex, 1);
1726 spin_lock_init(&c->btree_gc_time.lock);
1727 spin_lock_init(&c->btree_split_time.lock);
1728 spin_lock_init(&c->btree_read_time.lock);
1730 bch_moving_init_cache_set(c);
1732 INIT_LIST_HEAD(&c->list);
1733 INIT_LIST_HEAD(&c->cached_devs);
1734 INIT_LIST_HEAD(&c->btree_cache);
1735 INIT_LIST_HEAD(&c->btree_cache_freeable);
1736 INIT_LIST_HEAD(&c->btree_cache_freed);
1737 INIT_LIST_HEAD(&c->data_buckets);
1739 iter_size = (sb->bucket_size / sb->block_size + 1) *
1740 sizeof(struct btree_iter_set);
1742 if (!(c->devices = kcalloc(c->nr_uuids, sizeof(void *), GFP_KERNEL)) ||
1743 mempool_init_slab_pool(&c->search, 32, bch_search_cache) ||
1744 mempool_init_kmalloc_pool(&c->bio_meta, 2,
1745 sizeof(struct bbio) + sizeof(struct bio_vec) *
1747 mempool_init_kmalloc_pool(&c->fill_iter, 1, iter_size) ||
1748 bioset_init(&c->bio_split, 4, offsetof(struct bbio, bio),
1749 BIOSET_NEED_BVECS|BIOSET_NEED_RESCUER) ||
1750 !(c->uuids = alloc_bucket_pages(GFP_KERNEL, c)) ||
1751 !(c->moving_gc_wq = alloc_workqueue("bcache_gc",
1752 WQ_MEM_RECLAIM, 0)) ||
1753 bch_journal_alloc(c) ||
1754 bch_btree_cache_alloc(c) ||
1755 bch_open_buckets_alloc(c) ||
1756 bch_bset_sort_state_init(&c->sort, ilog2(c->btree_pages)))
1759 c->congested_read_threshold_us = 2000;
1760 c->congested_write_threshold_us = 20000;
1761 c->error_limit = DEFAULT_IO_ERROR_LIMIT;
1762 WARN_ON(test_and_clear_bit(CACHE_SET_IO_DISABLE, &c->flags));
1766 bch_cache_set_unregister(c);
1770 static void run_cache_set(struct cache_set *c)
1772 const char *err = "cannot allocate memory";
1773 struct cached_dev *dc, *t;
1778 closure_init_stack(&cl);
1780 for_each_cache(ca, c, i)
1781 c->nbuckets += ca->sb.nbuckets;
1784 if (CACHE_SYNC(&c->sb)) {
1789 err = "cannot allocate memory for journal";
1790 if (bch_journal_read(c, &journal))
1793 pr_debug("btree_journal_read() done");
1795 err = "no journal entries found";
1796 if (list_empty(&journal))
1799 j = &list_entry(journal.prev, struct journal_replay, list)->j;
1801 err = "IO error reading priorities";
1802 for_each_cache(ca, c, i)
1803 prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]);
1806 * If prio_read() fails it'll call cache_set_error and we'll
1807 * tear everything down right away, but if we perhaps checked
1808 * sooner we could avoid journal replay.
1813 err = "bad btree root";
1814 if (__bch_btree_ptr_invalid(c, k))
1817 err = "error reading btree root";
1818 c->root = bch_btree_node_get(c, NULL, k,
1821 if (IS_ERR_OR_NULL(c->root))
1824 list_del_init(&c->root->list);
1825 rw_unlock(true, c->root);
1827 err = uuid_read(c, j, &cl);
1831 err = "error in recovery";
1832 if (bch_btree_check(c))
1835 bch_journal_mark(c, &journal);
1836 bch_initial_gc_finish(c);
1837 pr_debug("btree_check() done");
1840 * bcache_journal_next() can't happen sooner, or
1841 * btree_gc_finish() will give spurious errors about last_gc >
1842 * gc_gen - this is a hack but oh well.
1844 bch_journal_next(&c->journal);
1846 err = "error starting allocator thread";
1847 for_each_cache(ca, c, i)
1848 if (bch_cache_allocator_start(ca))
1852 * First place it's safe to allocate: btree_check() and
1853 * btree_gc_finish() have to run before we have buckets to
1854 * allocate, and bch_bucket_alloc_set() might cause a journal
1855 * entry to be written so bcache_journal_next() has to be called
1858 * If the uuids were in the old format we have to rewrite them
1859 * before the next journal entry is written:
1861 if (j->version < BCACHE_JSET_VERSION_UUID)
1864 bch_journal_replay(c, &journal);
1866 pr_notice("invalidating existing data");
1868 for_each_cache(ca, c, i) {
1871 ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
1872 2, SB_JOURNAL_BUCKETS);
1874 for (j = 0; j < ca->sb.keys; j++)
1875 ca->sb.d[j] = ca->sb.first_bucket + j;
1878 bch_initial_gc_finish(c);
1880 err = "error starting allocator thread";
1881 for_each_cache(ca, c, i)
1882 if (bch_cache_allocator_start(ca))
1885 mutex_lock(&c->bucket_lock);
1886 for_each_cache(ca, c, i)
1888 mutex_unlock(&c->bucket_lock);
1890 err = "cannot allocate new UUID bucket";
1891 if (__uuid_write(c))
1894 err = "cannot allocate new btree root";
1895 c->root = __bch_btree_node_alloc(c, NULL, 0, true, NULL);
1896 if (IS_ERR_OR_NULL(c->root))
1899 mutex_lock(&c->root->write_lock);
1900 bkey_copy_key(&c->root->key, &MAX_KEY);
1901 bch_btree_node_write(c->root, &cl);
1902 mutex_unlock(&c->root->write_lock);
1904 bch_btree_set_root(c->root);
1905 rw_unlock(true, c->root);
1908 * We don't want to write the first journal entry until
1909 * everything is set up - fortunately journal entries won't be
1910 * written until the SET_CACHE_SYNC() here:
1912 SET_CACHE_SYNC(&c->sb, true);
1914 bch_journal_next(&c->journal);
1915 bch_journal_meta(c, &cl);
1918 err = "error starting gc thread";
1919 if (bch_gc_thread_start(c))
1923 c->sb.last_mount = (u32)ktime_get_real_seconds();
1924 bcache_write_super(c);
1926 list_for_each_entry_safe(dc, t, &uncached_devices, list)
1927 bch_cached_dev_attach(dc, c, NULL);
1931 set_bit(CACHE_SET_RUNNING, &c->flags);
1935 /* XXX: test this, it's broken */
1936 bch_cache_set_error(c, "%s", err);
1939 static bool can_attach_cache(struct cache *ca, struct cache_set *c)
1941 return ca->sb.block_size == c->sb.block_size &&
1942 ca->sb.bucket_size == c->sb.bucket_size &&
1943 ca->sb.nr_in_set == c->sb.nr_in_set;
1946 static const char *register_cache_set(struct cache *ca)
1949 const char *err = "cannot allocate memory";
1950 struct cache_set *c;
1952 list_for_each_entry(c, &bch_cache_sets, list)
1953 if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
1954 if (c->cache[ca->sb.nr_this_dev])
1955 return "duplicate cache set member";
1957 if (!can_attach_cache(ca, c))
1958 return "cache sb does not match set";
1960 if (!CACHE_SYNC(&ca->sb))
1961 SET_CACHE_SYNC(&c->sb, false);
1966 c = bch_cache_set_alloc(&ca->sb);
1970 err = "error creating kobject";
1971 if (kobject_add(&c->kobj, bcache_kobj, "%pU", c->sb.set_uuid) ||
1972 kobject_add(&c->internal, &c->kobj, "internal"))
1975 if (bch_cache_accounting_add_kobjs(&c->accounting, &c->kobj))
1978 bch_debug_init_cache_set(c);
1980 list_add(&c->list, &bch_cache_sets);
1982 sprintf(buf, "cache%i", ca->sb.nr_this_dev);
1983 if (sysfs_create_link(&ca->kobj, &c->kobj, "set") ||
1984 sysfs_create_link(&c->kobj, &ca->kobj, buf))
1987 if (ca->sb.seq > c->sb.seq) {
1988 c->sb.version = ca->sb.version;
1989 memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
1990 c->sb.flags = ca->sb.flags;
1991 c->sb.seq = ca->sb.seq;
1992 pr_debug("set version = %llu", c->sb.version);
1995 kobject_get(&ca->kobj);
1997 ca->set->cache[ca->sb.nr_this_dev] = ca;
1998 c->cache_by_alloc[c->caches_loaded++] = ca;
2000 if (c->caches_loaded == c->sb.nr_in_set)
2005 bch_cache_set_unregister(c);
2011 void bch_cache_release(struct kobject *kobj)
2013 struct cache *ca = container_of(kobj, struct cache, kobj);
2017 BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca);
2018 ca->set->cache[ca->sb.nr_this_dev] = NULL;
2021 free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
2022 kfree(ca->prio_buckets);
2025 free_heap(&ca->heap);
2026 free_fifo(&ca->free_inc);
2028 for (i = 0; i < RESERVE_NR; i++)
2029 free_fifo(&ca->free[i]);
2031 if (ca->sb_bio.bi_inline_vecs[0].bv_page)
2032 put_page(bio_first_page_all(&ca->sb_bio));
2034 if (!IS_ERR_OR_NULL(ca->bdev))
2035 blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2038 module_put(THIS_MODULE);
2041 static int cache_alloc(struct cache *ca)
2044 size_t btree_buckets;
2047 __module_get(THIS_MODULE);
2048 kobject_init(&ca->kobj, &bch_cache_ktype);
2050 bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8);
2053 * when ca->sb.njournal_buckets is not zero, journal exists,
2054 * and in bch_journal_replay(), tree node may split,
2055 * so bucket of RESERVE_BTREE type is needed,
2056 * the worst situation is all journal buckets are valid journal,
2057 * and all the keys need to replay,
2058 * so the number of RESERVE_BTREE type buckets should be as much
2059 * as journal buckets
2061 btree_buckets = ca->sb.njournal_buckets ?: 8;
2062 free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
2064 if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets, GFP_KERNEL) ||
2065 !init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca), GFP_KERNEL) ||
2066 !init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL) ||
2067 !init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL) ||
2068 !init_fifo(&ca->free_inc, free << 2, GFP_KERNEL) ||
2069 !init_heap(&ca->heap, free << 3, GFP_KERNEL) ||
2070 !(ca->buckets = vzalloc(array_size(sizeof(struct bucket),
2071 ca->sb.nbuckets))) ||
2072 !(ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t),
2073 prio_buckets(ca), 2),
2075 !(ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca)))
2078 ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
2080 for_each_bucket(b, ca)
2081 atomic_set(&b->pin, 0);
2086 static int register_cache(struct cache_sb *sb, struct page *sb_page,
2087 struct block_device *bdev, struct cache *ca)
2089 const char *err = NULL; /* must be set for any error case */
2092 bdevname(bdev, ca->cache_dev_name);
2093 memcpy(&ca->sb, sb, sizeof(struct cache_sb));
2095 ca->bdev->bd_holder = ca;
2097 bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1);
2098 bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page;
2101 if (blk_queue_discard(bdev_get_queue(bdev)))
2102 ca->discard = CACHE_DISCARD(&ca->sb);
2104 ret = cache_alloc(ca);
2106 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2108 err = "cache_alloc(): -ENOMEM";
2110 err = "cache_alloc(): unknown error";
2114 if (kobject_add(&ca->kobj,
2115 &part_to_dev(bdev->bd_part)->kobj,
2117 err = "error calling kobject_add";
2122 mutex_lock(&bch_register_lock);
2123 err = register_cache_set(ca);
2124 mutex_unlock(&bch_register_lock);
2131 pr_info("registered cache device %s", ca->cache_dev_name);
2134 kobject_put(&ca->kobj);
2138 pr_notice("error %s: %s", ca->cache_dev_name, err);
2143 /* Global interfaces/init */
2145 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
2146 const char *buffer, size_t size);
2148 kobj_attribute_write(register, register_bcache);
2149 kobj_attribute_write(register_quiet, register_bcache);
2151 static bool bch_is_open_backing(struct block_device *bdev) {
2152 struct cache_set *c, *tc;
2153 struct cached_dev *dc, *t;
2155 list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
2156 list_for_each_entry_safe(dc, t, &c->cached_devs, list)
2157 if (dc->bdev == bdev)
2159 list_for_each_entry_safe(dc, t, &uncached_devices, list)
2160 if (dc->bdev == bdev)
2165 static bool bch_is_open_cache(struct block_device *bdev) {
2166 struct cache_set *c, *tc;
2170 list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
2171 for_each_cache(ca, c, i)
2172 if (ca->bdev == bdev)
2177 static bool bch_is_open(struct block_device *bdev) {
2178 return bch_is_open_cache(bdev) || bch_is_open_backing(bdev);
2181 static ssize_t register_bcache(struct kobject *k, struct kobj_attribute *attr,
2182 const char *buffer, size_t size)
2185 const char *err = "cannot allocate memory";
2187 struct cache_sb *sb = NULL;
2188 struct block_device *bdev = NULL;
2189 struct page *sb_page = NULL;
2191 if (!try_module_get(THIS_MODULE))
2194 path = kstrndup(buffer, size, GFP_KERNEL);
2198 sb = kmalloc(sizeof(struct cache_sb), GFP_KERNEL);
2202 err = "failed to open device";
2203 bdev = blkdev_get_by_path(strim(path),
2204 FMODE_READ|FMODE_WRITE|FMODE_EXCL,
2207 if (bdev == ERR_PTR(-EBUSY)) {
2208 bdev = lookup_bdev(strim(path));
2209 mutex_lock(&bch_register_lock);
2210 if (!IS_ERR(bdev) && bch_is_open(bdev))
2211 err = "device already registered";
2213 err = "device busy";
2214 mutex_unlock(&bch_register_lock);
2217 if (attr == &ksysfs_register_quiet)
2223 err = "failed to set blocksize";
2224 if (set_blocksize(bdev, 4096))
2227 err = read_super(sb, bdev, &sb_page);
2231 err = "failed to register device";
2232 if (SB_IS_BDEV(sb)) {
2233 struct cached_dev *dc = kzalloc(sizeof(*dc), GFP_KERNEL);
2238 mutex_lock(&bch_register_lock);
2239 register_bdev(sb, sb_page, bdev, dc);
2240 mutex_unlock(&bch_register_lock);
2242 struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
2247 if (register_cache(sb, sb_page, bdev, ca) != 0)
2255 module_put(THIS_MODULE);
2259 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
2261 pr_info("error %s: %s", path, err);
2266 static int bcache_reboot(struct notifier_block *n, unsigned long code, void *x)
2268 if (code == SYS_DOWN ||
2270 code == SYS_POWER_OFF) {
2272 unsigned long start = jiffies;
2273 bool stopped = false;
2275 struct cache_set *c, *tc;
2276 struct cached_dev *dc, *tdc;
2278 mutex_lock(&bch_register_lock);
2280 if (list_empty(&bch_cache_sets) &&
2281 list_empty(&uncached_devices))
2284 pr_info("Stopping all devices:");
2286 list_for_each_entry_safe(c, tc, &bch_cache_sets, list)
2287 bch_cache_set_stop(c);
2289 list_for_each_entry_safe(dc, tdc, &uncached_devices, list)
2290 bcache_device_stop(&dc->disk);
2292 /* What's a condition variable? */
2294 long timeout = start + 2 * HZ - jiffies;
2296 stopped = list_empty(&bch_cache_sets) &&
2297 list_empty(&uncached_devices);
2299 if (timeout < 0 || stopped)
2302 prepare_to_wait(&unregister_wait, &wait,
2303 TASK_UNINTERRUPTIBLE);
2305 mutex_unlock(&bch_register_lock);
2306 schedule_timeout(timeout);
2307 mutex_lock(&bch_register_lock);
2310 finish_wait(&unregister_wait, &wait);
2313 pr_info("All devices stopped");
2315 pr_notice("Timeout waiting for devices to be closed");
2317 mutex_unlock(&bch_register_lock);
2323 static struct notifier_block reboot = {
2324 .notifier_call = bcache_reboot,
2325 .priority = INT_MAX, /* before any real devices */
2328 static void bcache_exit(void)
2333 kobject_put(bcache_kobj);
2335 destroy_workqueue(bcache_wq);
2337 unregister_blkdev(bcache_major, "bcache");
2338 unregister_reboot_notifier(&reboot);
2339 mutex_destroy(&bch_register_lock);
2342 static int __init bcache_init(void)
2344 static const struct attribute *files[] = {
2345 &ksysfs_register.attr,
2346 &ksysfs_register_quiet.attr,
2350 mutex_init(&bch_register_lock);
2351 init_waitqueue_head(&unregister_wait);
2352 register_reboot_notifier(&reboot);
2354 bcache_major = register_blkdev(0, "bcache");
2355 if (bcache_major < 0) {
2356 unregister_reboot_notifier(&reboot);
2357 mutex_destroy(&bch_register_lock);
2358 return bcache_major;
2361 bcache_wq = alloc_workqueue("bcache", WQ_MEM_RECLAIM, 0);
2365 bcache_kobj = kobject_create_and_add("bcache", fs_kobj);
2369 if (bch_request_init() ||
2370 sysfs_create_files(bcache_kobj, files))
2373 bch_debug_init(bcache_kobj);
2374 closure_debug_init();
2382 module_exit(bcache_exit);
2383 module_init(bcache_init);