Merge tag 'for-5.12-rc4-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave...
[linux-2.6-microblaze.git] / drivers / md / dm-era-target.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 #include "dm.h"
3 #include "persistent-data/dm-transaction-manager.h"
4 #include "persistent-data/dm-bitset.h"
5 #include "persistent-data/dm-space-map.h"
6
7 #include <linux/dm-io.h>
8 #include <linux/dm-kcopyd.h>
9 #include <linux/init.h>
10 #include <linux/mempool.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/vmalloc.h>
14
15 #define DM_MSG_PREFIX "era"
16
17 #define SUPERBLOCK_LOCATION 0
18 #define SUPERBLOCK_MAGIC 2126579579
19 #define SUPERBLOCK_CSUM_XOR 146538381
20 #define MIN_ERA_VERSION 1
21 #define MAX_ERA_VERSION 1
22 #define INVALID_WRITESET_ROOT SUPERBLOCK_LOCATION
23 #define MIN_BLOCK_SIZE 8
24
25 /*----------------------------------------------------------------
26  * Writeset
27  *--------------------------------------------------------------*/
28 struct writeset_metadata {
29         uint32_t nr_bits;
30         dm_block_t root;
31 };
32
33 struct writeset {
34         struct writeset_metadata md;
35
36         /*
37          * An in core copy of the bits to save constantly doing look ups on
38          * disk.
39          */
40         unsigned long *bits;
41 };
42
43 /*
44  * This does not free off the on disk bitset as this will normally be done
45  * after digesting into the era array.
46  */
47 static void writeset_free(struct writeset *ws)
48 {
49         vfree(ws->bits);
50         ws->bits = NULL;
51 }
52
53 static int setup_on_disk_bitset(struct dm_disk_bitset *info,
54                                 unsigned nr_bits, dm_block_t *root)
55 {
56         int r;
57
58         r = dm_bitset_empty(info, root);
59         if (r)
60                 return r;
61
62         return dm_bitset_resize(info, *root, 0, nr_bits, false, root);
63 }
64
65 static size_t bitset_size(unsigned nr_bits)
66 {
67         return sizeof(unsigned long) * dm_div_up(nr_bits, BITS_PER_LONG);
68 }
69
70 /*
71  * Allocates memory for the in core bitset.
72  */
73 static int writeset_alloc(struct writeset *ws, dm_block_t nr_blocks)
74 {
75         ws->bits = vzalloc(bitset_size(nr_blocks));
76         if (!ws->bits) {
77                 DMERR("%s: couldn't allocate in memory bitset", __func__);
78                 return -ENOMEM;
79         }
80
81         return 0;
82 }
83
84 /*
85  * Wipes the in-core bitset, and creates a new on disk bitset.
86  */
87 static int writeset_init(struct dm_disk_bitset *info, struct writeset *ws,
88                          dm_block_t nr_blocks)
89 {
90         int r;
91
92         memset(ws->bits, 0, bitset_size(nr_blocks));
93
94         ws->md.nr_bits = nr_blocks;
95         r = setup_on_disk_bitset(info, ws->md.nr_bits, &ws->md.root);
96         if (r) {
97                 DMERR("%s: setup_on_disk_bitset failed", __func__);
98                 return r;
99         }
100
101         return 0;
102 }
103
104 static bool writeset_marked(struct writeset *ws, dm_block_t block)
105 {
106         return test_bit(block, ws->bits);
107 }
108
109 static int writeset_marked_on_disk(struct dm_disk_bitset *info,
110                                    struct writeset_metadata *m, dm_block_t block,
111                                    bool *result)
112 {
113         dm_block_t old = m->root;
114
115         /*
116          * The bitset was flushed when it was archived, so we know there'll
117          * be no change to the root.
118          */
119         int r = dm_bitset_test_bit(info, m->root, block, &m->root, result);
120         if (r) {
121                 DMERR("%s: dm_bitset_test_bit failed", __func__);
122                 return r;
123         }
124
125         BUG_ON(m->root != old);
126
127         return r;
128 }
129
130 /*
131  * Returns < 0 on error, 0 if the bit wasn't previously set, 1 if it was.
132  */
133 static int writeset_test_and_set(struct dm_disk_bitset *info,
134                                  struct writeset *ws, uint32_t block)
135 {
136         int r;
137
138         if (!test_bit(block, ws->bits)) {
139                 r = dm_bitset_set_bit(info, ws->md.root, block, &ws->md.root);
140                 if (r) {
141                         /* FIXME: fail mode */
142                         return r;
143                 }
144
145                 return 0;
146         }
147
148         return 1;
149 }
150
151 /*----------------------------------------------------------------
152  * On disk metadata layout
153  *--------------------------------------------------------------*/
154 #define SPACE_MAP_ROOT_SIZE 128
155 #define UUID_LEN 16
156
157 struct writeset_disk {
158         __le32 nr_bits;
159         __le64 root;
160 } __packed;
161
162 struct superblock_disk {
163         __le32 csum;
164         __le32 flags;
165         __le64 blocknr;
166
167         __u8 uuid[UUID_LEN];
168         __le64 magic;
169         __le32 version;
170
171         __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
172
173         __le32 data_block_size;
174         __le32 metadata_block_size;
175         __le32 nr_blocks;
176
177         __le32 current_era;
178         struct writeset_disk current_writeset;
179
180         /*
181          * Only these two fields are valid within the metadata snapshot.
182          */
183         __le64 writeset_tree_root;
184         __le64 era_array_root;
185
186         __le64 metadata_snap;
187 } __packed;
188
189 /*----------------------------------------------------------------
190  * Superblock validation
191  *--------------------------------------------------------------*/
192 static void sb_prepare_for_write(struct dm_block_validator *v,
193                                  struct dm_block *b,
194                                  size_t sb_block_size)
195 {
196         struct superblock_disk *disk = dm_block_data(b);
197
198         disk->blocknr = cpu_to_le64(dm_block_location(b));
199         disk->csum = cpu_to_le32(dm_bm_checksum(&disk->flags,
200                                                 sb_block_size - sizeof(__le32),
201                                                 SUPERBLOCK_CSUM_XOR));
202 }
203
204 static int check_metadata_version(struct superblock_disk *disk)
205 {
206         uint32_t metadata_version = le32_to_cpu(disk->version);
207         if (metadata_version < MIN_ERA_VERSION || metadata_version > MAX_ERA_VERSION) {
208                 DMERR("Era metadata version %u found, but only versions between %u and %u supported.",
209                       metadata_version, MIN_ERA_VERSION, MAX_ERA_VERSION);
210                 return -EINVAL;
211         }
212
213         return 0;
214 }
215
216 static int sb_check(struct dm_block_validator *v,
217                     struct dm_block *b,
218                     size_t sb_block_size)
219 {
220         struct superblock_disk *disk = dm_block_data(b);
221         __le32 csum_le;
222
223         if (dm_block_location(b) != le64_to_cpu(disk->blocknr)) {
224                 DMERR("sb_check failed: blocknr %llu: wanted %llu",
225                       le64_to_cpu(disk->blocknr),
226                       (unsigned long long)dm_block_location(b));
227                 return -ENOTBLK;
228         }
229
230         if (le64_to_cpu(disk->magic) != SUPERBLOCK_MAGIC) {
231                 DMERR("sb_check failed: magic %llu: wanted %llu",
232                       le64_to_cpu(disk->magic),
233                       (unsigned long long) SUPERBLOCK_MAGIC);
234                 return -EILSEQ;
235         }
236
237         csum_le = cpu_to_le32(dm_bm_checksum(&disk->flags,
238                                              sb_block_size - sizeof(__le32),
239                                              SUPERBLOCK_CSUM_XOR));
240         if (csum_le != disk->csum) {
241                 DMERR("sb_check failed: csum %u: wanted %u",
242                       le32_to_cpu(csum_le), le32_to_cpu(disk->csum));
243                 return -EILSEQ;
244         }
245
246         return check_metadata_version(disk);
247 }
248
249 static struct dm_block_validator sb_validator = {
250         .name = "superblock",
251         .prepare_for_write = sb_prepare_for_write,
252         .check = sb_check
253 };
254
255 /*----------------------------------------------------------------
256  * Low level metadata handling
257  *--------------------------------------------------------------*/
258 #define DM_ERA_METADATA_BLOCK_SIZE 4096
259 #define ERA_MAX_CONCURRENT_LOCKS 5
260
261 struct era_metadata {
262         struct block_device *bdev;
263         struct dm_block_manager *bm;
264         struct dm_space_map *sm;
265         struct dm_transaction_manager *tm;
266
267         dm_block_t block_size;
268         uint32_t nr_blocks;
269
270         uint32_t current_era;
271
272         /*
273          * We preallocate 2 writesets.  When an era rolls over we
274          * switch between them. This means the allocation is done at
275          * preresume time, rather than on the io path.
276          */
277         struct writeset writesets[2];
278         struct writeset *current_writeset;
279
280         dm_block_t writeset_tree_root;
281         dm_block_t era_array_root;
282
283         struct dm_disk_bitset bitset_info;
284         struct dm_btree_info writeset_tree_info;
285         struct dm_array_info era_array_info;
286
287         dm_block_t metadata_snap;
288
289         /*
290          * A flag that is set whenever a writeset has been archived.
291          */
292         bool archived_writesets;
293
294         /*
295          * Reading the space map root can fail, so we read it into this
296          * buffer before the superblock is locked and updated.
297          */
298         __u8 metadata_space_map_root[SPACE_MAP_ROOT_SIZE];
299 };
300
301 static int superblock_read_lock(struct era_metadata *md,
302                                 struct dm_block **sblock)
303 {
304         return dm_bm_read_lock(md->bm, SUPERBLOCK_LOCATION,
305                                &sb_validator, sblock);
306 }
307
308 static int superblock_lock_zero(struct era_metadata *md,
309                                 struct dm_block **sblock)
310 {
311         return dm_bm_write_lock_zero(md->bm, SUPERBLOCK_LOCATION,
312                                      &sb_validator, sblock);
313 }
314
315 static int superblock_lock(struct era_metadata *md,
316                            struct dm_block **sblock)
317 {
318         return dm_bm_write_lock(md->bm, SUPERBLOCK_LOCATION,
319                                 &sb_validator, sblock);
320 }
321
322 /* FIXME: duplication with cache and thin */
323 static int superblock_all_zeroes(struct dm_block_manager *bm, bool *result)
324 {
325         int r;
326         unsigned i;
327         struct dm_block *b;
328         __le64 *data_le, zero = cpu_to_le64(0);
329         unsigned sb_block_size = dm_bm_block_size(bm) / sizeof(__le64);
330
331         /*
332          * We can't use a validator here - it may be all zeroes.
333          */
334         r = dm_bm_read_lock(bm, SUPERBLOCK_LOCATION, NULL, &b);
335         if (r)
336                 return r;
337
338         data_le = dm_block_data(b);
339         *result = true;
340         for (i = 0; i < sb_block_size; i++) {
341                 if (data_le[i] != zero) {
342                         *result = false;
343                         break;
344                 }
345         }
346
347         dm_bm_unlock(b);
348
349         return 0;
350 }
351
352 /*----------------------------------------------------------------*/
353
354 static void ws_pack(const struct writeset_metadata *core, struct writeset_disk *disk)
355 {
356         disk->nr_bits = cpu_to_le32(core->nr_bits);
357         disk->root = cpu_to_le64(core->root);
358 }
359
360 static void ws_unpack(const struct writeset_disk *disk, struct writeset_metadata *core)
361 {
362         core->nr_bits = le32_to_cpu(disk->nr_bits);
363         core->root = le64_to_cpu(disk->root);
364 }
365
366 static void ws_inc(void *context, const void *value)
367 {
368         struct era_metadata *md = context;
369         struct writeset_disk ws_d;
370         dm_block_t b;
371
372         memcpy(&ws_d, value, sizeof(ws_d));
373         b = le64_to_cpu(ws_d.root);
374
375         dm_tm_inc(md->tm, b);
376 }
377
378 static void ws_dec(void *context, const void *value)
379 {
380         struct era_metadata *md = context;
381         struct writeset_disk ws_d;
382         dm_block_t b;
383
384         memcpy(&ws_d, value, sizeof(ws_d));
385         b = le64_to_cpu(ws_d.root);
386
387         dm_bitset_del(&md->bitset_info, b);
388 }
389
390 static int ws_eq(void *context, const void *value1, const void *value2)
391 {
392         return !memcmp(value1, value2, sizeof(struct writeset_disk));
393 }
394
395 /*----------------------------------------------------------------*/
396
397 static void setup_writeset_tree_info(struct era_metadata *md)
398 {
399         struct dm_btree_value_type *vt = &md->writeset_tree_info.value_type;
400         md->writeset_tree_info.tm = md->tm;
401         md->writeset_tree_info.levels = 1;
402         vt->context = md;
403         vt->size = sizeof(struct writeset_disk);
404         vt->inc = ws_inc;
405         vt->dec = ws_dec;
406         vt->equal = ws_eq;
407 }
408
409 static void setup_era_array_info(struct era_metadata *md)
410
411 {
412         struct dm_btree_value_type vt;
413         vt.context = NULL;
414         vt.size = sizeof(__le32);
415         vt.inc = NULL;
416         vt.dec = NULL;
417         vt.equal = NULL;
418
419         dm_array_info_init(&md->era_array_info, md->tm, &vt);
420 }
421
422 static void setup_infos(struct era_metadata *md)
423 {
424         dm_disk_bitset_init(md->tm, &md->bitset_info);
425         setup_writeset_tree_info(md);
426         setup_era_array_info(md);
427 }
428
429 /*----------------------------------------------------------------*/
430
431 static int create_fresh_metadata(struct era_metadata *md)
432 {
433         int r;
434
435         r = dm_tm_create_with_sm(md->bm, SUPERBLOCK_LOCATION,
436                                  &md->tm, &md->sm);
437         if (r < 0) {
438                 DMERR("dm_tm_create_with_sm failed");
439                 return r;
440         }
441
442         setup_infos(md);
443
444         r = dm_btree_empty(&md->writeset_tree_info, &md->writeset_tree_root);
445         if (r) {
446                 DMERR("couldn't create new writeset tree");
447                 goto bad;
448         }
449
450         r = dm_array_empty(&md->era_array_info, &md->era_array_root);
451         if (r) {
452                 DMERR("couldn't create era array");
453                 goto bad;
454         }
455
456         return 0;
457
458 bad:
459         dm_sm_destroy(md->sm);
460         dm_tm_destroy(md->tm);
461
462         return r;
463 }
464
465 static int save_sm_root(struct era_metadata *md)
466 {
467         int r;
468         size_t metadata_len;
469
470         r = dm_sm_root_size(md->sm, &metadata_len);
471         if (r < 0)
472                 return r;
473
474         return dm_sm_copy_root(md->sm, &md->metadata_space_map_root,
475                                metadata_len);
476 }
477
478 static void copy_sm_root(struct era_metadata *md, struct superblock_disk *disk)
479 {
480         memcpy(&disk->metadata_space_map_root,
481                &md->metadata_space_map_root,
482                sizeof(md->metadata_space_map_root));
483 }
484
485 /*
486  * Writes a superblock, including the static fields that don't get updated
487  * with every commit (possible optimisation here).  'md' should be fully
488  * constructed when this is called.
489  */
490 static void prepare_superblock(struct era_metadata *md, struct superblock_disk *disk)
491 {
492         disk->magic = cpu_to_le64(SUPERBLOCK_MAGIC);
493         disk->flags = cpu_to_le32(0ul);
494
495         /* FIXME: can't keep blanking the uuid (uuid is currently unused though) */
496         memset(disk->uuid, 0, sizeof(disk->uuid));
497         disk->version = cpu_to_le32(MAX_ERA_VERSION);
498
499         copy_sm_root(md, disk);
500
501         disk->data_block_size = cpu_to_le32(md->block_size);
502         disk->metadata_block_size = cpu_to_le32(DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
503         disk->nr_blocks = cpu_to_le32(md->nr_blocks);
504         disk->current_era = cpu_to_le32(md->current_era);
505
506         ws_pack(&md->current_writeset->md, &disk->current_writeset);
507         disk->writeset_tree_root = cpu_to_le64(md->writeset_tree_root);
508         disk->era_array_root = cpu_to_le64(md->era_array_root);
509         disk->metadata_snap = cpu_to_le64(md->metadata_snap);
510 }
511
512 static int write_superblock(struct era_metadata *md)
513 {
514         int r;
515         struct dm_block *sblock;
516         struct superblock_disk *disk;
517
518         r = save_sm_root(md);
519         if (r) {
520                 DMERR("%s: save_sm_root failed", __func__);
521                 return r;
522         }
523
524         r = superblock_lock_zero(md, &sblock);
525         if (r)
526                 return r;
527
528         disk = dm_block_data(sblock);
529         prepare_superblock(md, disk);
530
531         return dm_tm_commit(md->tm, sblock);
532 }
533
534 /*
535  * Assumes block_size and the infos are set.
536  */
537 static int format_metadata(struct era_metadata *md)
538 {
539         int r;
540
541         r = create_fresh_metadata(md);
542         if (r)
543                 return r;
544
545         r = write_superblock(md);
546         if (r) {
547                 dm_sm_destroy(md->sm);
548                 dm_tm_destroy(md->tm);
549                 return r;
550         }
551
552         return 0;
553 }
554
555 static int open_metadata(struct era_metadata *md)
556 {
557         int r;
558         struct dm_block *sblock;
559         struct superblock_disk *disk;
560
561         r = superblock_read_lock(md, &sblock);
562         if (r) {
563                 DMERR("couldn't read_lock superblock");
564                 return r;
565         }
566
567         disk = dm_block_data(sblock);
568
569         /* Verify the data block size hasn't changed */
570         if (le32_to_cpu(disk->data_block_size) != md->block_size) {
571                 DMERR("changing the data block size (from %u to %llu) is not supported",
572                       le32_to_cpu(disk->data_block_size), md->block_size);
573                 r = -EINVAL;
574                 goto bad;
575         }
576
577         r = dm_tm_open_with_sm(md->bm, SUPERBLOCK_LOCATION,
578                                disk->metadata_space_map_root,
579                                sizeof(disk->metadata_space_map_root),
580                                &md->tm, &md->sm);
581         if (r) {
582                 DMERR("dm_tm_open_with_sm failed");
583                 goto bad;
584         }
585
586         setup_infos(md);
587
588         md->nr_blocks = le32_to_cpu(disk->nr_blocks);
589         md->current_era = le32_to_cpu(disk->current_era);
590
591         ws_unpack(&disk->current_writeset, &md->current_writeset->md);
592         md->writeset_tree_root = le64_to_cpu(disk->writeset_tree_root);
593         md->era_array_root = le64_to_cpu(disk->era_array_root);
594         md->metadata_snap = le64_to_cpu(disk->metadata_snap);
595         md->archived_writesets = true;
596
597         dm_bm_unlock(sblock);
598
599         return 0;
600
601 bad:
602         dm_bm_unlock(sblock);
603         return r;
604 }
605
606 static int open_or_format_metadata(struct era_metadata *md,
607                                    bool may_format)
608 {
609         int r;
610         bool unformatted = false;
611
612         r = superblock_all_zeroes(md->bm, &unformatted);
613         if (r)
614                 return r;
615
616         if (unformatted)
617                 return may_format ? format_metadata(md) : -EPERM;
618
619         return open_metadata(md);
620 }
621
622 static int create_persistent_data_objects(struct era_metadata *md,
623                                           bool may_format)
624 {
625         int r;
626
627         md->bm = dm_block_manager_create(md->bdev, DM_ERA_METADATA_BLOCK_SIZE,
628                                          ERA_MAX_CONCURRENT_LOCKS);
629         if (IS_ERR(md->bm)) {
630                 DMERR("could not create block manager");
631                 return PTR_ERR(md->bm);
632         }
633
634         r = open_or_format_metadata(md, may_format);
635         if (r)
636                 dm_block_manager_destroy(md->bm);
637
638         return r;
639 }
640
641 static void destroy_persistent_data_objects(struct era_metadata *md)
642 {
643         dm_sm_destroy(md->sm);
644         dm_tm_destroy(md->tm);
645         dm_block_manager_destroy(md->bm);
646 }
647
648 /*
649  * This waits until all era_map threads have picked up the new filter.
650  */
651 static void swap_writeset(struct era_metadata *md, struct writeset *new_writeset)
652 {
653         rcu_assign_pointer(md->current_writeset, new_writeset);
654         synchronize_rcu();
655 }
656
657 /*----------------------------------------------------------------
658  * Writesets get 'digested' into the main era array.
659  *
660  * We're using a coroutine here so the worker thread can do the digestion,
661  * thus avoiding synchronisation of the metadata.  Digesting a whole
662  * writeset in one go would cause too much latency.
663  *--------------------------------------------------------------*/
664 struct digest {
665         uint32_t era;
666         unsigned nr_bits, current_bit;
667         struct writeset_metadata writeset;
668         __le32 value;
669         struct dm_disk_bitset info;
670
671         int (*step)(struct era_metadata *, struct digest *);
672 };
673
674 static int metadata_digest_lookup_writeset(struct era_metadata *md,
675                                            struct digest *d);
676
677 static int metadata_digest_remove_writeset(struct era_metadata *md,
678                                            struct digest *d)
679 {
680         int r;
681         uint64_t key = d->era;
682
683         r = dm_btree_remove(&md->writeset_tree_info, md->writeset_tree_root,
684                             &key, &md->writeset_tree_root);
685         if (r) {
686                 DMERR("%s: dm_btree_remove failed", __func__);
687                 return r;
688         }
689
690         d->step = metadata_digest_lookup_writeset;
691         return 0;
692 }
693
694 #define INSERTS_PER_STEP 100
695
696 static int metadata_digest_transcribe_writeset(struct era_metadata *md,
697                                                struct digest *d)
698 {
699         int r;
700         bool marked;
701         unsigned b, e = min(d->current_bit + INSERTS_PER_STEP, d->nr_bits);
702
703         for (b = d->current_bit; b < e; b++) {
704                 r = writeset_marked_on_disk(&d->info, &d->writeset, b, &marked);
705                 if (r) {
706                         DMERR("%s: writeset_marked_on_disk failed", __func__);
707                         return r;
708                 }
709
710                 if (!marked)
711                         continue;
712
713                 __dm_bless_for_disk(&d->value);
714                 r = dm_array_set_value(&md->era_array_info, md->era_array_root,
715                                        b, &d->value, &md->era_array_root);
716                 if (r) {
717                         DMERR("%s: dm_array_set_value failed", __func__);
718                         return r;
719                 }
720         }
721
722         if (b == d->nr_bits)
723                 d->step = metadata_digest_remove_writeset;
724         else
725                 d->current_bit = b;
726
727         return 0;
728 }
729
730 static int metadata_digest_lookup_writeset(struct era_metadata *md,
731                                            struct digest *d)
732 {
733         int r;
734         uint64_t key;
735         struct writeset_disk disk;
736
737         r = dm_btree_find_lowest_key(&md->writeset_tree_info,
738                                      md->writeset_tree_root, &key);
739         if (r < 0)
740                 return r;
741
742         d->era = key;
743
744         r = dm_btree_lookup(&md->writeset_tree_info,
745                             md->writeset_tree_root, &key, &disk);
746         if (r) {
747                 if (r == -ENODATA) {
748                         d->step = NULL;
749                         return 0;
750                 }
751
752                 DMERR("%s: dm_btree_lookup failed", __func__);
753                 return r;
754         }
755
756         ws_unpack(&disk, &d->writeset);
757         d->value = cpu_to_le32(key);
758
759         /*
760          * We initialise another bitset info to avoid any caching side effects
761          * with the previous one.
762          */
763         dm_disk_bitset_init(md->tm, &d->info);
764
765         d->nr_bits = min(d->writeset.nr_bits, md->nr_blocks);
766         d->current_bit = 0;
767         d->step = metadata_digest_transcribe_writeset;
768
769         return 0;
770 }
771
772 static int metadata_digest_start(struct era_metadata *md, struct digest *d)
773 {
774         if (d->step)
775                 return 0;
776
777         memset(d, 0, sizeof(*d));
778         d->step = metadata_digest_lookup_writeset;
779
780         return 0;
781 }
782
783 /*----------------------------------------------------------------
784  * High level metadata interface.  Target methods should use these, and not
785  * the lower level ones.
786  *--------------------------------------------------------------*/
787 static struct era_metadata *metadata_open(struct block_device *bdev,
788                                           sector_t block_size,
789                                           bool may_format)
790 {
791         int r;
792         struct era_metadata *md = kzalloc(sizeof(*md), GFP_KERNEL);
793
794         if (!md)
795                 return NULL;
796
797         md->bdev = bdev;
798         md->block_size = block_size;
799
800         md->writesets[0].md.root = INVALID_WRITESET_ROOT;
801         md->writesets[1].md.root = INVALID_WRITESET_ROOT;
802         md->current_writeset = &md->writesets[0];
803
804         r = create_persistent_data_objects(md, may_format);
805         if (r) {
806                 kfree(md);
807                 return ERR_PTR(r);
808         }
809
810         return md;
811 }
812
813 static void metadata_close(struct era_metadata *md)
814 {
815         writeset_free(&md->writesets[0]);
816         writeset_free(&md->writesets[1]);
817         destroy_persistent_data_objects(md);
818         kfree(md);
819 }
820
821 static bool valid_nr_blocks(dm_block_t n)
822 {
823         /*
824          * dm_bitset restricts us to 2^32.  test_bit & co. restrict us
825          * further to 2^31 - 1
826          */
827         return n < (1ull << 31);
828 }
829
830 static int metadata_resize(struct era_metadata *md, void *arg)
831 {
832         int r;
833         dm_block_t *new_size = arg;
834         __le32 value;
835
836         if (!valid_nr_blocks(*new_size)) {
837                 DMERR("Invalid number of origin blocks %llu",
838                       (unsigned long long) *new_size);
839                 return -EINVAL;
840         }
841
842         writeset_free(&md->writesets[0]);
843         writeset_free(&md->writesets[1]);
844
845         r = writeset_alloc(&md->writesets[0], *new_size);
846         if (r) {
847                 DMERR("%s: writeset_alloc failed for writeset 0", __func__);
848                 return r;
849         }
850
851         r = writeset_alloc(&md->writesets[1], *new_size);
852         if (r) {
853                 DMERR("%s: writeset_alloc failed for writeset 1", __func__);
854                 writeset_free(&md->writesets[0]);
855                 return r;
856         }
857
858         value = cpu_to_le32(0u);
859         __dm_bless_for_disk(&value);
860         r = dm_array_resize(&md->era_array_info, md->era_array_root,
861                             md->nr_blocks, *new_size,
862                             &value, &md->era_array_root);
863         if (r) {
864                 DMERR("%s: dm_array_resize failed", __func__);
865                 writeset_free(&md->writesets[0]);
866                 writeset_free(&md->writesets[1]);
867                 return r;
868         }
869
870         md->nr_blocks = *new_size;
871         return 0;
872 }
873
874 static int metadata_era_archive(struct era_metadata *md)
875 {
876         int r;
877         uint64_t keys[1];
878         struct writeset_disk value;
879
880         r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root,
881                             &md->current_writeset->md.root);
882         if (r) {
883                 DMERR("%s: dm_bitset_flush failed", __func__);
884                 return r;
885         }
886
887         ws_pack(&md->current_writeset->md, &value);
888
889         keys[0] = md->current_era;
890         __dm_bless_for_disk(&value);
891         r = dm_btree_insert(&md->writeset_tree_info, md->writeset_tree_root,
892                             keys, &value, &md->writeset_tree_root);
893         if (r) {
894                 DMERR("%s: couldn't insert writeset into btree", __func__);
895                 /* FIXME: fail mode */
896                 return r;
897         }
898
899         md->current_writeset->md.root = INVALID_WRITESET_ROOT;
900         md->archived_writesets = true;
901
902         return 0;
903 }
904
905 static struct writeset *next_writeset(struct era_metadata *md)
906 {
907         return (md->current_writeset == &md->writesets[0]) ?
908                 &md->writesets[1] : &md->writesets[0];
909 }
910
911 static int metadata_new_era(struct era_metadata *md)
912 {
913         int r;
914         struct writeset *new_writeset = next_writeset(md);
915
916         r = writeset_init(&md->bitset_info, new_writeset, md->nr_blocks);
917         if (r) {
918                 DMERR("%s: writeset_init failed", __func__);
919                 return r;
920         }
921
922         swap_writeset(md, new_writeset);
923         md->current_era++;
924
925         return 0;
926 }
927
928 static int metadata_era_rollover(struct era_metadata *md)
929 {
930         int r;
931
932         if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) {
933                 r = metadata_era_archive(md);
934                 if (r) {
935                         DMERR("%s: metadata_archive_era failed", __func__);
936                         /* FIXME: fail mode? */
937                         return r;
938                 }
939         }
940
941         r = metadata_new_era(md);
942         if (r) {
943                 DMERR("%s: new era failed", __func__);
944                 /* FIXME: fail mode */
945                 return r;
946         }
947
948         return 0;
949 }
950
951 static bool metadata_current_marked(struct era_metadata *md, dm_block_t block)
952 {
953         bool r;
954         struct writeset *ws;
955
956         rcu_read_lock();
957         ws = rcu_dereference(md->current_writeset);
958         r = writeset_marked(ws, block);
959         rcu_read_unlock();
960
961         return r;
962 }
963
964 static int metadata_commit(struct era_metadata *md)
965 {
966         int r;
967         struct dm_block *sblock;
968
969         if (md->current_writeset->md.root != INVALID_WRITESET_ROOT) {
970                 r = dm_bitset_flush(&md->bitset_info, md->current_writeset->md.root,
971                                     &md->current_writeset->md.root);
972                 if (r) {
973                         DMERR("%s: bitset flush failed", __func__);
974                         return r;
975                 }
976         }
977
978         r = dm_tm_pre_commit(md->tm);
979         if (r) {
980                 DMERR("%s: pre commit failed", __func__);
981                 return r;
982         }
983
984         r = save_sm_root(md);
985         if (r) {
986                 DMERR("%s: save_sm_root failed", __func__);
987                 return r;
988         }
989
990         r = superblock_lock(md, &sblock);
991         if (r) {
992                 DMERR("%s: superblock lock failed", __func__);
993                 return r;
994         }
995
996         prepare_superblock(md, dm_block_data(sblock));
997
998         return dm_tm_commit(md->tm, sblock);
999 }
1000
1001 static int metadata_checkpoint(struct era_metadata *md)
1002 {
1003         /*
1004          * For now we just rollover, but later I want to put a check in to
1005          * avoid this if the filter is still pretty fresh.
1006          */
1007         return metadata_era_rollover(md);
1008 }
1009
1010 /*
1011  * Metadata snapshots allow userland to access era data.
1012  */
1013 static int metadata_take_snap(struct era_metadata *md)
1014 {
1015         int r, inc;
1016         struct dm_block *clone;
1017
1018         if (md->metadata_snap != SUPERBLOCK_LOCATION) {
1019                 DMERR("%s: metadata snapshot already exists", __func__);
1020                 return -EINVAL;
1021         }
1022
1023         r = metadata_era_rollover(md);
1024         if (r) {
1025                 DMERR("%s: era rollover failed", __func__);
1026                 return r;
1027         }
1028
1029         r = metadata_commit(md);
1030         if (r) {
1031                 DMERR("%s: pre commit failed", __func__);
1032                 return r;
1033         }
1034
1035         r = dm_sm_inc_block(md->sm, SUPERBLOCK_LOCATION);
1036         if (r) {
1037                 DMERR("%s: couldn't increment superblock", __func__);
1038                 return r;
1039         }
1040
1041         r = dm_tm_shadow_block(md->tm, SUPERBLOCK_LOCATION,
1042                                &sb_validator, &clone, &inc);
1043         if (r) {
1044                 DMERR("%s: couldn't shadow superblock", __func__);
1045                 dm_sm_dec_block(md->sm, SUPERBLOCK_LOCATION);
1046                 return r;
1047         }
1048         BUG_ON(!inc);
1049
1050         r = dm_sm_inc_block(md->sm, md->writeset_tree_root);
1051         if (r) {
1052                 DMERR("%s: couldn't inc writeset tree root", __func__);
1053                 dm_tm_unlock(md->tm, clone);
1054                 return r;
1055         }
1056
1057         r = dm_sm_inc_block(md->sm, md->era_array_root);
1058         if (r) {
1059                 DMERR("%s: couldn't inc era tree root", __func__);
1060                 dm_sm_dec_block(md->sm, md->writeset_tree_root);
1061                 dm_tm_unlock(md->tm, clone);
1062                 return r;
1063         }
1064
1065         md->metadata_snap = dm_block_location(clone);
1066
1067         dm_tm_unlock(md->tm, clone);
1068
1069         return 0;
1070 }
1071
1072 static int metadata_drop_snap(struct era_metadata *md)
1073 {
1074         int r;
1075         dm_block_t location;
1076         struct dm_block *clone;
1077         struct superblock_disk *disk;
1078
1079         if (md->metadata_snap == SUPERBLOCK_LOCATION) {
1080                 DMERR("%s: no snap to drop", __func__);
1081                 return -EINVAL;
1082         }
1083
1084         r = dm_tm_read_lock(md->tm, md->metadata_snap, &sb_validator, &clone);
1085         if (r) {
1086                 DMERR("%s: couldn't read lock superblock clone", __func__);
1087                 return r;
1088         }
1089
1090         /*
1091          * Whatever happens now we'll commit with no record of the metadata
1092          * snap.
1093          */
1094         md->metadata_snap = SUPERBLOCK_LOCATION;
1095
1096         disk = dm_block_data(clone);
1097         r = dm_btree_del(&md->writeset_tree_info,
1098                          le64_to_cpu(disk->writeset_tree_root));
1099         if (r) {
1100                 DMERR("%s: error deleting writeset tree clone", __func__);
1101                 dm_tm_unlock(md->tm, clone);
1102                 return r;
1103         }
1104
1105         r = dm_array_del(&md->era_array_info, le64_to_cpu(disk->era_array_root));
1106         if (r) {
1107                 DMERR("%s: error deleting era array clone", __func__);
1108                 dm_tm_unlock(md->tm, clone);
1109                 return r;
1110         }
1111
1112         location = dm_block_location(clone);
1113         dm_tm_unlock(md->tm, clone);
1114
1115         return dm_sm_dec_block(md->sm, location);
1116 }
1117
1118 struct metadata_stats {
1119         dm_block_t used;
1120         dm_block_t total;
1121         dm_block_t snap;
1122         uint32_t era;
1123 };
1124
1125 static int metadata_get_stats(struct era_metadata *md, void *ptr)
1126 {
1127         int r;
1128         struct metadata_stats *s = ptr;
1129         dm_block_t nr_free, nr_total;
1130
1131         r = dm_sm_get_nr_free(md->sm, &nr_free);
1132         if (r) {
1133                 DMERR("dm_sm_get_nr_free returned %d", r);
1134                 return r;
1135         }
1136
1137         r = dm_sm_get_nr_blocks(md->sm, &nr_total);
1138         if (r) {
1139                 DMERR("dm_pool_get_metadata_dev_size returned %d", r);
1140                 return r;
1141         }
1142
1143         s->used = nr_total - nr_free;
1144         s->total = nr_total;
1145         s->snap = md->metadata_snap;
1146         s->era = md->current_era;
1147
1148         return 0;
1149 }
1150
1151 /*----------------------------------------------------------------*/
1152
1153 struct era {
1154         struct dm_target *ti;
1155
1156         struct dm_dev *metadata_dev;
1157         struct dm_dev *origin_dev;
1158
1159         dm_block_t nr_blocks;
1160         uint32_t sectors_per_block;
1161         int sectors_per_block_shift;
1162         struct era_metadata *md;
1163
1164         struct workqueue_struct *wq;
1165         struct work_struct worker;
1166
1167         spinlock_t deferred_lock;
1168         struct bio_list deferred_bios;
1169
1170         spinlock_t rpc_lock;
1171         struct list_head rpc_calls;
1172
1173         struct digest digest;
1174         atomic_t suspended;
1175 };
1176
1177 struct rpc {
1178         struct list_head list;
1179
1180         int (*fn0)(struct era_metadata *);
1181         int (*fn1)(struct era_metadata *, void *);
1182         void *arg;
1183         int result;
1184
1185         struct completion complete;
1186 };
1187
1188 /*----------------------------------------------------------------
1189  * Remapping.
1190  *---------------------------------------------------------------*/
1191 static bool block_size_is_power_of_two(struct era *era)
1192 {
1193         return era->sectors_per_block_shift >= 0;
1194 }
1195
1196 static dm_block_t get_block(struct era *era, struct bio *bio)
1197 {
1198         sector_t block_nr = bio->bi_iter.bi_sector;
1199
1200         if (!block_size_is_power_of_two(era))
1201                 (void) sector_div(block_nr, era->sectors_per_block);
1202         else
1203                 block_nr >>= era->sectors_per_block_shift;
1204
1205         return block_nr;
1206 }
1207
1208 static void remap_to_origin(struct era *era, struct bio *bio)
1209 {
1210         bio_set_dev(bio, era->origin_dev->bdev);
1211 }
1212
1213 /*----------------------------------------------------------------
1214  * Worker thread
1215  *--------------------------------------------------------------*/
1216 static void wake_worker(struct era *era)
1217 {
1218         if (!atomic_read(&era->suspended))
1219                 queue_work(era->wq, &era->worker);
1220 }
1221
1222 static void process_old_eras(struct era *era)
1223 {
1224         int r;
1225
1226         if (!era->digest.step)
1227                 return;
1228
1229         r = era->digest.step(era->md, &era->digest);
1230         if (r < 0) {
1231                 DMERR("%s: digest step failed, stopping digestion", __func__);
1232                 era->digest.step = NULL;
1233
1234         } else if (era->digest.step)
1235                 wake_worker(era);
1236 }
1237
1238 static void process_deferred_bios(struct era *era)
1239 {
1240         int r;
1241         struct bio_list deferred_bios, marked_bios;
1242         struct bio *bio;
1243         struct blk_plug plug;
1244         bool commit_needed = false;
1245         bool failed = false;
1246         struct writeset *ws = era->md->current_writeset;
1247
1248         bio_list_init(&deferred_bios);
1249         bio_list_init(&marked_bios);
1250
1251         spin_lock(&era->deferred_lock);
1252         bio_list_merge(&deferred_bios, &era->deferred_bios);
1253         bio_list_init(&era->deferred_bios);
1254         spin_unlock(&era->deferred_lock);
1255
1256         if (bio_list_empty(&deferred_bios))
1257                 return;
1258
1259         while ((bio = bio_list_pop(&deferred_bios))) {
1260                 r = writeset_test_and_set(&era->md->bitset_info, ws,
1261                                           get_block(era, bio));
1262                 if (r < 0) {
1263                         /*
1264                          * This is bad news, we need to rollback.
1265                          * FIXME: finish.
1266                          */
1267                         failed = true;
1268                 } else if (r == 0)
1269                         commit_needed = true;
1270
1271                 bio_list_add(&marked_bios, bio);
1272         }
1273
1274         if (commit_needed) {
1275                 r = metadata_commit(era->md);
1276                 if (r)
1277                         failed = true;
1278         }
1279
1280         if (failed)
1281                 while ((bio = bio_list_pop(&marked_bios)))
1282                         bio_io_error(bio);
1283         else {
1284                 blk_start_plug(&plug);
1285                 while ((bio = bio_list_pop(&marked_bios))) {
1286                         /*
1287                          * Only update the in-core writeset if the on-disk one
1288                          * was updated too.
1289                          */
1290                         if (commit_needed)
1291                                 set_bit(get_block(era, bio), ws->bits);
1292                         submit_bio_noacct(bio);
1293                 }
1294                 blk_finish_plug(&plug);
1295         }
1296 }
1297
1298 static void process_rpc_calls(struct era *era)
1299 {
1300         int r;
1301         bool need_commit = false;
1302         struct list_head calls;
1303         struct rpc *rpc, *tmp;
1304
1305         INIT_LIST_HEAD(&calls);
1306         spin_lock(&era->rpc_lock);
1307         list_splice_init(&era->rpc_calls, &calls);
1308         spin_unlock(&era->rpc_lock);
1309
1310         list_for_each_entry_safe(rpc, tmp, &calls, list) {
1311                 rpc->result = rpc->fn0 ? rpc->fn0(era->md) : rpc->fn1(era->md, rpc->arg);
1312                 need_commit = true;
1313         }
1314
1315         if (need_commit) {
1316                 r = metadata_commit(era->md);
1317                 if (r)
1318                         list_for_each_entry_safe(rpc, tmp, &calls, list)
1319                                 rpc->result = r;
1320         }
1321
1322         list_for_each_entry_safe(rpc, tmp, &calls, list)
1323                 complete(&rpc->complete);
1324 }
1325
1326 static void kick_off_digest(struct era *era)
1327 {
1328         if (era->md->archived_writesets) {
1329                 era->md->archived_writesets = false;
1330                 metadata_digest_start(era->md, &era->digest);
1331         }
1332 }
1333
1334 static void do_work(struct work_struct *ws)
1335 {
1336         struct era *era = container_of(ws, struct era, worker);
1337
1338         kick_off_digest(era);
1339         process_old_eras(era);
1340         process_deferred_bios(era);
1341         process_rpc_calls(era);
1342 }
1343
1344 static void defer_bio(struct era *era, struct bio *bio)
1345 {
1346         spin_lock(&era->deferred_lock);
1347         bio_list_add(&era->deferred_bios, bio);
1348         spin_unlock(&era->deferred_lock);
1349
1350         wake_worker(era);
1351 }
1352
1353 /*
1354  * Make an rpc call to the worker to change the metadata.
1355  */
1356 static int perform_rpc(struct era *era, struct rpc *rpc)
1357 {
1358         rpc->result = 0;
1359         init_completion(&rpc->complete);
1360
1361         spin_lock(&era->rpc_lock);
1362         list_add(&rpc->list, &era->rpc_calls);
1363         spin_unlock(&era->rpc_lock);
1364
1365         wake_worker(era);
1366         wait_for_completion(&rpc->complete);
1367
1368         return rpc->result;
1369 }
1370
1371 static int in_worker0(struct era *era, int (*fn)(struct era_metadata *))
1372 {
1373         struct rpc rpc;
1374         rpc.fn0 = fn;
1375         rpc.fn1 = NULL;
1376
1377         return perform_rpc(era, &rpc);
1378 }
1379
1380 static int in_worker1(struct era *era,
1381                       int (*fn)(struct era_metadata *, void *), void *arg)
1382 {
1383         struct rpc rpc;
1384         rpc.fn0 = NULL;
1385         rpc.fn1 = fn;
1386         rpc.arg = arg;
1387
1388         return perform_rpc(era, &rpc);
1389 }
1390
1391 static void start_worker(struct era *era)
1392 {
1393         atomic_set(&era->suspended, 0);
1394 }
1395
1396 static void stop_worker(struct era *era)
1397 {
1398         atomic_set(&era->suspended, 1);
1399         flush_workqueue(era->wq);
1400 }
1401
1402 /*----------------------------------------------------------------
1403  * Target methods
1404  *--------------------------------------------------------------*/
1405 static void era_destroy(struct era *era)
1406 {
1407         if (era->md)
1408                 metadata_close(era->md);
1409
1410         if (era->wq)
1411                 destroy_workqueue(era->wq);
1412
1413         if (era->origin_dev)
1414                 dm_put_device(era->ti, era->origin_dev);
1415
1416         if (era->metadata_dev)
1417                 dm_put_device(era->ti, era->metadata_dev);
1418
1419         kfree(era);
1420 }
1421
1422 static dm_block_t calc_nr_blocks(struct era *era)
1423 {
1424         return dm_sector_div_up(era->ti->len, era->sectors_per_block);
1425 }
1426
1427 static bool valid_block_size(dm_block_t block_size)
1428 {
1429         bool greater_than_zero = block_size > 0;
1430         bool multiple_of_min_block_size = (block_size & (MIN_BLOCK_SIZE - 1)) == 0;
1431
1432         return greater_than_zero && multiple_of_min_block_size;
1433 }
1434
1435 /*
1436  * <metadata dev> <data dev> <data block size (sectors)>
1437  */
1438 static int era_ctr(struct dm_target *ti, unsigned argc, char **argv)
1439 {
1440         int r;
1441         char dummy;
1442         struct era *era;
1443         struct era_metadata *md;
1444
1445         if (argc != 3) {
1446                 ti->error = "Invalid argument count";
1447                 return -EINVAL;
1448         }
1449
1450         era = kzalloc(sizeof(*era), GFP_KERNEL);
1451         if (!era) {
1452                 ti->error = "Error allocating era structure";
1453                 return -ENOMEM;
1454         }
1455
1456         era->ti = ti;
1457
1458         r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &era->metadata_dev);
1459         if (r) {
1460                 ti->error = "Error opening metadata device";
1461                 era_destroy(era);
1462                 return -EINVAL;
1463         }
1464
1465         r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &era->origin_dev);
1466         if (r) {
1467                 ti->error = "Error opening data device";
1468                 era_destroy(era);
1469                 return -EINVAL;
1470         }
1471
1472         r = sscanf(argv[2], "%u%c", &era->sectors_per_block, &dummy);
1473         if (r != 1) {
1474                 ti->error = "Error parsing block size";
1475                 era_destroy(era);
1476                 return -EINVAL;
1477         }
1478
1479         r = dm_set_target_max_io_len(ti, era->sectors_per_block);
1480         if (r) {
1481                 ti->error = "could not set max io len";
1482                 era_destroy(era);
1483                 return -EINVAL;
1484         }
1485
1486         if (!valid_block_size(era->sectors_per_block)) {
1487                 ti->error = "Invalid block size";
1488                 era_destroy(era);
1489                 return -EINVAL;
1490         }
1491         if (era->sectors_per_block & (era->sectors_per_block - 1))
1492                 era->sectors_per_block_shift = -1;
1493         else
1494                 era->sectors_per_block_shift = __ffs(era->sectors_per_block);
1495
1496         md = metadata_open(era->metadata_dev->bdev, era->sectors_per_block, true);
1497         if (IS_ERR(md)) {
1498                 ti->error = "Error reading metadata";
1499                 era_destroy(era);
1500                 return PTR_ERR(md);
1501         }
1502         era->md = md;
1503
1504         era->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
1505         if (!era->wq) {
1506                 ti->error = "could not create workqueue for metadata object";
1507                 era_destroy(era);
1508                 return -ENOMEM;
1509         }
1510         INIT_WORK(&era->worker, do_work);
1511
1512         spin_lock_init(&era->deferred_lock);
1513         bio_list_init(&era->deferred_bios);
1514
1515         spin_lock_init(&era->rpc_lock);
1516         INIT_LIST_HEAD(&era->rpc_calls);
1517
1518         ti->private = era;
1519         ti->num_flush_bios = 1;
1520         ti->flush_supported = true;
1521
1522         ti->num_discard_bios = 1;
1523
1524         return 0;
1525 }
1526
1527 static void era_dtr(struct dm_target *ti)
1528 {
1529         era_destroy(ti->private);
1530 }
1531
1532 static int era_map(struct dm_target *ti, struct bio *bio)
1533 {
1534         struct era *era = ti->private;
1535         dm_block_t block = get_block(era, bio);
1536
1537         /*
1538          * All bios get remapped to the origin device.  We do this now, but
1539          * it may not get issued until later.  Depending on whether the
1540          * block is marked in this era.
1541          */
1542         remap_to_origin(era, bio);
1543
1544         /*
1545          * REQ_PREFLUSH bios carry no data, so we're not interested in them.
1546          */
1547         if (!(bio->bi_opf & REQ_PREFLUSH) &&
1548             (bio_data_dir(bio) == WRITE) &&
1549             !metadata_current_marked(era->md, block)) {
1550                 defer_bio(era, bio);
1551                 return DM_MAPIO_SUBMITTED;
1552         }
1553
1554         return DM_MAPIO_REMAPPED;
1555 }
1556
1557 static void era_postsuspend(struct dm_target *ti)
1558 {
1559         int r;
1560         struct era *era = ti->private;
1561
1562         r = in_worker0(era, metadata_era_archive);
1563         if (r) {
1564                 DMERR("%s: couldn't archive current era", __func__);
1565                 /* FIXME: fail mode */
1566         }
1567
1568         stop_worker(era);
1569 }
1570
1571 static int era_preresume(struct dm_target *ti)
1572 {
1573         int r;
1574         struct era *era = ti->private;
1575         dm_block_t new_size = calc_nr_blocks(era);
1576
1577         if (era->nr_blocks != new_size) {
1578                 r = metadata_resize(era->md, &new_size);
1579                 if (r) {
1580                         DMERR("%s: metadata_resize failed", __func__);
1581                         return r;
1582                 }
1583
1584                 r = metadata_commit(era->md);
1585                 if (r) {
1586                         DMERR("%s: metadata_commit failed", __func__);
1587                         return r;
1588                 }
1589
1590                 era->nr_blocks = new_size;
1591         }
1592
1593         start_worker(era);
1594
1595         r = in_worker0(era, metadata_era_rollover);
1596         if (r) {
1597                 DMERR("%s: metadata_era_rollover failed", __func__);
1598                 return r;
1599         }
1600
1601         return 0;
1602 }
1603
1604 /*
1605  * Status format:
1606  *
1607  * <metadata block size> <#used metadata blocks>/<#total metadata blocks>
1608  * <current era> <held metadata root | '-'>
1609  */
1610 static void era_status(struct dm_target *ti, status_type_t type,
1611                        unsigned status_flags, char *result, unsigned maxlen)
1612 {
1613         int r;
1614         struct era *era = ti->private;
1615         ssize_t sz = 0;
1616         struct metadata_stats stats;
1617         char buf[BDEVNAME_SIZE];
1618
1619         switch (type) {
1620         case STATUSTYPE_INFO:
1621                 r = in_worker1(era, metadata_get_stats, &stats);
1622                 if (r)
1623                         goto err;
1624
1625                 DMEMIT("%u %llu/%llu %u",
1626                        (unsigned) (DM_ERA_METADATA_BLOCK_SIZE >> SECTOR_SHIFT),
1627                        (unsigned long long) stats.used,
1628                        (unsigned long long) stats.total,
1629                        (unsigned) stats.era);
1630
1631                 if (stats.snap != SUPERBLOCK_LOCATION)
1632                         DMEMIT(" %llu", stats.snap);
1633                 else
1634                         DMEMIT(" -");
1635                 break;
1636
1637         case STATUSTYPE_TABLE:
1638                 format_dev_t(buf, era->metadata_dev->bdev->bd_dev);
1639                 DMEMIT("%s ", buf);
1640                 format_dev_t(buf, era->origin_dev->bdev->bd_dev);
1641                 DMEMIT("%s %u", buf, era->sectors_per_block);
1642                 break;
1643         }
1644
1645         return;
1646
1647 err:
1648         DMEMIT("Error");
1649 }
1650
1651 static int era_message(struct dm_target *ti, unsigned argc, char **argv,
1652                        char *result, unsigned maxlen)
1653 {
1654         struct era *era = ti->private;
1655
1656         if (argc != 1) {
1657                 DMERR("incorrect number of message arguments");
1658                 return -EINVAL;
1659         }
1660
1661         if (!strcasecmp(argv[0], "checkpoint"))
1662                 return in_worker0(era, metadata_checkpoint);
1663
1664         if (!strcasecmp(argv[0], "take_metadata_snap"))
1665                 return in_worker0(era, metadata_take_snap);
1666
1667         if (!strcasecmp(argv[0], "drop_metadata_snap"))
1668                 return in_worker0(era, metadata_drop_snap);
1669
1670         DMERR("unsupported message '%s'", argv[0]);
1671         return -EINVAL;
1672 }
1673
1674 static sector_t get_dev_size(struct dm_dev *dev)
1675 {
1676         return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
1677 }
1678
1679 static int era_iterate_devices(struct dm_target *ti,
1680                                iterate_devices_callout_fn fn, void *data)
1681 {
1682         struct era *era = ti->private;
1683         return fn(ti, era->origin_dev, 0, get_dev_size(era->origin_dev), data);
1684 }
1685
1686 static void era_io_hints(struct dm_target *ti, struct queue_limits *limits)
1687 {
1688         struct era *era = ti->private;
1689         uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
1690
1691         /*
1692          * If the system-determined stacked limits are compatible with the
1693          * era device's blocksize (io_opt is a factor) do not override them.
1694          */
1695         if (io_opt_sectors < era->sectors_per_block ||
1696             do_div(io_opt_sectors, era->sectors_per_block)) {
1697                 blk_limits_io_min(limits, 0);
1698                 blk_limits_io_opt(limits, era->sectors_per_block << SECTOR_SHIFT);
1699         }
1700 }
1701
1702 /*----------------------------------------------------------------*/
1703
1704 static struct target_type era_target = {
1705         .name = "era",
1706         .version = {1, 0, 0},
1707         .module = THIS_MODULE,
1708         .ctr = era_ctr,
1709         .dtr = era_dtr,
1710         .map = era_map,
1711         .postsuspend = era_postsuspend,
1712         .preresume = era_preresume,
1713         .status = era_status,
1714         .message = era_message,
1715         .iterate_devices = era_iterate_devices,
1716         .io_hints = era_io_hints
1717 };
1718
1719 static int __init dm_era_init(void)
1720 {
1721         int r;
1722
1723         r = dm_register_target(&era_target);
1724         if (r) {
1725                 DMERR("era target registration failed: %d", r);
1726                 return r;
1727         }
1728
1729         return 0;
1730 }
1731
1732 static void __exit dm_era_exit(void)
1733 {
1734         dm_unregister_target(&era_target);
1735 }
1736
1737 module_init(dm_era_init);
1738 module_exit(dm_era_exit);
1739
1740 MODULE_DESCRIPTION(DM_NAME " era target");
1741 MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
1742 MODULE_LICENSE("GPL");