Merge branch 'rework/fast-next-seq' into for-linus
[linux-2.6-microblaze.git] / drivers / md / dm-integrity.c
1 /*
2  * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
3  * Copyright (C) 2016-2017 Milan Broz
4  * Copyright (C) 2016-2017 Mikulas Patocka
5  *
6  * This file is released under the GPL.
7  */
8
9 #include "dm-bio-record.h"
10
11 #include <linux/compiler.h>
12 #include <linux/module.h>
13 #include <linux/device-mapper.h>
14 #include <linux/dm-io.h>
15 #include <linux/vmalloc.h>
16 #include <linux/sort.h>
17 #include <linux/rbtree.h>
18 #include <linux/delay.h>
19 #include <linux/random.h>
20 #include <linux/reboot.h>
21 #include <crypto/hash.h>
22 #include <crypto/skcipher.h>
23 #include <linux/async_tx.h>
24 #include <linux/dm-bufio.h>
25
26 #include "dm-audit.h"
27
28 #define DM_MSG_PREFIX "integrity"
29
30 #define DEFAULT_INTERLEAVE_SECTORS      32768
31 #define DEFAULT_JOURNAL_SIZE_FACTOR     7
32 #define DEFAULT_SECTORS_PER_BITMAP_BIT  32768
33 #define DEFAULT_BUFFER_SECTORS          128
34 #define DEFAULT_JOURNAL_WATERMARK       50
35 #define DEFAULT_SYNC_MSEC               10000
36 #define DEFAULT_MAX_JOURNAL_SECTORS     131072
37 #define MIN_LOG2_INTERLEAVE_SECTORS     3
38 #define MAX_LOG2_INTERLEAVE_SECTORS     31
39 #define METADATA_WORKQUEUE_MAX_ACTIVE   16
40 #define RECALC_SECTORS                  32768
41 #define RECALC_WRITE_SUPER              16
42 #define BITMAP_BLOCK_SIZE               4096    /* don't change it */
43 #define BITMAP_FLUSH_INTERVAL           (10 * HZ)
44 #define DISCARD_FILLER                  0xf6
45 #define SALT_SIZE                       16
46
47 /*
48  * Warning - DEBUG_PRINT prints security-sensitive data to the log,
49  * so it should not be enabled in the official kernel
50  */
51 //#define DEBUG_PRINT
52 //#define INTERNAL_VERIFY
53
54 /*
55  * On disk structures
56  */
57
58 #define SB_MAGIC                        "integrt"
59 #define SB_VERSION_1                    1
60 #define SB_VERSION_2                    2
61 #define SB_VERSION_3                    3
62 #define SB_VERSION_4                    4
63 #define SB_VERSION_5                    5
64 #define SB_SECTORS                      8
65 #define MAX_SECTORS_PER_BLOCK           8
66
67 struct superblock {
68         __u8 magic[8];
69         __u8 version;
70         __u8 log2_interleave_sectors;
71         __le16 integrity_tag_size;
72         __le32 journal_sections;
73         __le64 provided_data_sectors;   /* userspace uses this value */
74         __le32 flags;
75         __u8 log2_sectors_per_block;
76         __u8 log2_blocks_per_bitmap_bit;
77         __u8 pad[2];
78         __le64 recalc_sector;
79         __u8 pad2[8];
80         __u8 salt[SALT_SIZE];
81 };
82
83 #define SB_FLAG_HAVE_JOURNAL_MAC        0x1
84 #define SB_FLAG_RECALCULATING           0x2
85 #define SB_FLAG_DIRTY_BITMAP            0x4
86 #define SB_FLAG_FIXED_PADDING           0x8
87 #define SB_FLAG_FIXED_HMAC              0x10
88
89 #define JOURNAL_ENTRY_ROUNDUP           8
90
91 typedef __le64 commit_id_t;
92 #define JOURNAL_MAC_PER_SECTOR          8
93
94 struct journal_entry {
95         union {
96                 struct {
97                         __le32 sector_lo;
98                         __le32 sector_hi;
99                 } s;
100                 __le64 sector;
101         } u;
102         commit_id_t last_bytes[];
103         /* __u8 tag[0]; */
104 };
105
106 #define journal_entry_tag(ic, je)               ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
107
108 #if BITS_PER_LONG == 64
109 #define journal_entry_set_sector(je, x)         do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
110 #else
111 #define journal_entry_set_sector(je, x)         do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
112 #endif
113 #define journal_entry_get_sector(je)            le64_to_cpu((je)->u.sector)
114 #define journal_entry_is_unused(je)             ((je)->u.s.sector_hi == cpu_to_le32(-1))
115 #define journal_entry_set_unused(je)            do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
116 #define journal_entry_is_inprogress(je)         ((je)->u.s.sector_hi == cpu_to_le32(-2))
117 #define journal_entry_set_inprogress(je)        do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
118
119 #define JOURNAL_BLOCK_SECTORS           8
120 #define JOURNAL_SECTOR_DATA             ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
121 #define JOURNAL_MAC_SIZE                (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
122
123 struct journal_sector {
124         __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
125         __u8 mac[JOURNAL_MAC_PER_SECTOR];
126         commit_id_t commit_id;
127 };
128
129 #define MAX_TAG_SIZE                    (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
130
131 #define METADATA_PADDING_SECTORS        8
132
133 #define N_COMMIT_IDS                    4
134
135 static unsigned char prev_commit_seq(unsigned char seq)
136 {
137         return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
138 }
139
140 static unsigned char next_commit_seq(unsigned char seq)
141 {
142         return (seq + 1) % N_COMMIT_IDS;
143 }
144
145 /*
146  * In-memory structures
147  */
148
149 struct journal_node {
150         struct rb_node node;
151         sector_t sector;
152 };
153
154 struct alg_spec {
155         char *alg_string;
156         char *key_string;
157         __u8 *key;
158         unsigned key_size;
159 };
160
161 struct dm_integrity_c {
162         struct dm_dev *dev;
163         struct dm_dev *meta_dev;
164         unsigned tag_size;
165         __s8 log2_tag_size;
166         sector_t start;
167         mempool_t journal_io_mempool;
168         struct dm_io_client *io;
169         struct dm_bufio_client *bufio;
170         struct workqueue_struct *metadata_wq;
171         struct superblock *sb;
172         unsigned journal_pages;
173         unsigned n_bitmap_blocks;
174
175         struct page_list *journal;
176         struct page_list *journal_io;
177         struct page_list *journal_xor;
178         struct page_list *recalc_bitmap;
179         struct page_list *may_write_bitmap;
180         struct bitmap_block_status *bbs;
181         unsigned bitmap_flush_interval;
182         int synchronous_mode;
183         struct bio_list synchronous_bios;
184         struct delayed_work bitmap_flush_work;
185
186         struct crypto_skcipher *journal_crypt;
187         struct scatterlist **journal_scatterlist;
188         struct scatterlist **journal_io_scatterlist;
189         struct skcipher_request **sk_requests;
190
191         struct crypto_shash *journal_mac;
192
193         struct journal_node *journal_tree;
194         struct rb_root journal_tree_root;
195
196         sector_t provided_data_sectors;
197
198         unsigned short journal_entry_size;
199         unsigned char journal_entries_per_sector;
200         unsigned char journal_section_entries;
201         unsigned short journal_section_sectors;
202         unsigned journal_sections;
203         unsigned journal_entries;
204         sector_t data_device_sectors;
205         sector_t meta_device_sectors;
206         unsigned initial_sectors;
207         unsigned metadata_run;
208         __s8 log2_metadata_run;
209         __u8 log2_buffer_sectors;
210         __u8 sectors_per_block;
211         __u8 log2_blocks_per_bitmap_bit;
212
213         unsigned char mode;
214
215         int failed;
216
217         struct crypto_shash *internal_hash;
218
219         struct dm_target *ti;
220
221         /* these variables are locked with endio_wait.lock */
222         struct rb_root in_progress;
223         struct list_head wait_list;
224         wait_queue_head_t endio_wait;
225         struct workqueue_struct *wait_wq;
226         struct workqueue_struct *offload_wq;
227
228         unsigned char commit_seq;
229         commit_id_t commit_ids[N_COMMIT_IDS];
230
231         unsigned committed_section;
232         unsigned n_committed_sections;
233
234         unsigned uncommitted_section;
235         unsigned n_uncommitted_sections;
236
237         unsigned free_section;
238         unsigned char free_section_entry;
239         unsigned free_sectors;
240
241         unsigned free_sectors_threshold;
242
243         struct workqueue_struct *commit_wq;
244         struct work_struct commit_work;
245
246         struct workqueue_struct *writer_wq;
247         struct work_struct writer_work;
248
249         struct workqueue_struct *recalc_wq;
250         struct work_struct recalc_work;
251         u8 *recalc_buffer;
252         u8 *recalc_tags;
253
254         struct bio_list flush_bio_list;
255
256         unsigned long autocommit_jiffies;
257         struct timer_list autocommit_timer;
258         unsigned autocommit_msec;
259
260         wait_queue_head_t copy_to_journal_wait;
261
262         struct completion crypto_backoff;
263
264         bool journal_uptodate;
265         bool just_formatted;
266         bool recalculate_flag;
267         bool reset_recalculate_flag;
268         bool discard;
269         bool fix_padding;
270         bool fix_hmac;
271         bool legacy_recalculate;
272
273         struct alg_spec internal_hash_alg;
274         struct alg_spec journal_crypt_alg;
275         struct alg_spec journal_mac_alg;
276
277         atomic64_t number_of_mismatches;
278
279         struct notifier_block reboot_notifier;
280 };
281
282 struct dm_integrity_range {
283         sector_t logical_sector;
284         sector_t n_sectors;
285         bool waiting;
286         union {
287                 struct rb_node node;
288                 struct {
289                         struct task_struct *task;
290                         struct list_head wait_entry;
291                 };
292         };
293 };
294
295 struct dm_integrity_io {
296         struct work_struct work;
297
298         struct dm_integrity_c *ic;
299         enum req_opf op;
300         bool fua;
301
302         struct dm_integrity_range range;
303
304         sector_t metadata_block;
305         unsigned metadata_offset;
306
307         atomic_t in_flight;
308         blk_status_t bi_status;
309
310         struct completion *completion;
311
312         struct dm_bio_details bio_details;
313 };
314
315 struct journal_completion {
316         struct dm_integrity_c *ic;
317         atomic_t in_flight;
318         struct completion comp;
319 };
320
321 struct journal_io {
322         struct dm_integrity_range range;
323         struct journal_completion *comp;
324 };
325
326 struct bitmap_block_status {
327         struct work_struct work;
328         struct dm_integrity_c *ic;
329         unsigned idx;
330         unsigned long *bitmap;
331         struct bio_list bio_queue;
332         spinlock_t bio_queue_lock;
333
334 };
335
336 static struct kmem_cache *journal_io_cache;
337
338 #define JOURNAL_IO_MEMPOOL      32
339
340 #ifdef DEBUG_PRINT
341 #define DEBUG_print(x, ...)     printk(KERN_DEBUG x, ##__VA_ARGS__)
342 static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
343 {
344         va_list args;
345         va_start(args, msg);
346         vprintk(msg, args);
347         va_end(args);
348         if (len)
349                 pr_cont(":");
350         while (len) {
351                 pr_cont(" %02x", *bytes);
352                 bytes++;
353                 len--;
354         }
355         pr_cont("\n");
356 }
357 #define DEBUG_bytes(bytes, len, msg, ...)       __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
358 #else
359 #define DEBUG_print(x, ...)                     do { } while (0)
360 #define DEBUG_bytes(bytes, len, msg, ...)       do { } while (0)
361 #endif
362
363 static void dm_integrity_prepare(struct request *rq)
364 {
365 }
366
367 static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
368 {
369 }
370
371 /*
372  * DM Integrity profile, protection is performed layer above (dm-crypt)
373  */
374 static const struct blk_integrity_profile dm_integrity_profile = {
375         .name                   = "DM-DIF-EXT-TAG",
376         .generate_fn            = NULL,
377         .verify_fn              = NULL,
378         .prepare_fn             = dm_integrity_prepare,
379         .complete_fn            = dm_integrity_complete,
380 };
381
382 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
383 static void integrity_bio_wait(struct work_struct *w);
384 static void dm_integrity_dtr(struct dm_target *ti);
385
386 static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
387 {
388         if (err == -EILSEQ)
389                 atomic64_inc(&ic->number_of_mismatches);
390         if (!cmpxchg(&ic->failed, 0, err))
391                 DMERR("Error on %s: %d", msg, err);
392 }
393
394 static int dm_integrity_failed(struct dm_integrity_c *ic)
395 {
396         return READ_ONCE(ic->failed);
397 }
398
399 static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
400 {
401         if (ic->legacy_recalculate)
402                 return false;
403         if (!(ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) ?
404             ic->internal_hash_alg.key || ic->journal_mac_alg.key :
405             ic->internal_hash_alg.key && !ic->journal_mac_alg.key)
406                 return true;
407         return false;
408 }
409
410 static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
411                                           unsigned j, unsigned char seq)
412 {
413         /*
414          * Xor the number with section and sector, so that if a piece of
415          * journal is written at wrong place, it is detected.
416          */
417         return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
418 }
419
420 static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
421                                 sector_t *area, sector_t *offset)
422 {
423         if (!ic->meta_dev) {
424                 __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
425                 *area = data_sector >> log2_interleave_sectors;
426                 *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
427         } else {
428                 *area = 0;
429                 *offset = data_sector;
430         }
431 }
432
433 #define sector_to_block(ic, n)                                          \
434 do {                                                                    \
435         BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1));          \
436         (n) >>= (ic)->sb->log2_sectors_per_block;                       \
437 } while (0)
438
439 static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
440                                             sector_t offset, unsigned *metadata_offset)
441 {
442         __u64 ms;
443         unsigned mo;
444
445         ms = area << ic->sb->log2_interleave_sectors;
446         if (likely(ic->log2_metadata_run >= 0))
447                 ms += area << ic->log2_metadata_run;
448         else
449                 ms += area * ic->metadata_run;
450         ms >>= ic->log2_buffer_sectors;
451
452         sector_to_block(ic, offset);
453
454         if (likely(ic->log2_tag_size >= 0)) {
455                 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
456                 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
457         } else {
458                 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
459                 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
460         }
461         *metadata_offset = mo;
462         return ms;
463 }
464
465 static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
466 {
467         sector_t result;
468
469         if (ic->meta_dev)
470                 return offset;
471
472         result = area << ic->sb->log2_interleave_sectors;
473         if (likely(ic->log2_metadata_run >= 0))
474                 result += (area + 1) << ic->log2_metadata_run;
475         else
476                 result += (area + 1) * ic->metadata_run;
477
478         result += (sector_t)ic->initial_sectors + offset;
479         result += ic->start;
480
481         return result;
482 }
483
484 static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
485 {
486         if (unlikely(*sec_ptr >= ic->journal_sections))
487                 *sec_ptr -= ic->journal_sections;
488 }
489
490 static void sb_set_version(struct dm_integrity_c *ic)
491 {
492         if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC))
493                 ic->sb->version = SB_VERSION_5;
494         else if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING))
495                 ic->sb->version = SB_VERSION_4;
496         else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
497                 ic->sb->version = SB_VERSION_3;
498         else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
499                 ic->sb->version = SB_VERSION_2;
500         else
501                 ic->sb->version = SB_VERSION_1;
502 }
503
504 static int sb_mac(struct dm_integrity_c *ic, bool wr)
505 {
506         SHASH_DESC_ON_STACK(desc, ic->journal_mac);
507         int r;
508         unsigned size = crypto_shash_digestsize(ic->journal_mac);
509
510         if (sizeof(struct superblock) + size > 1 << SECTOR_SHIFT) {
511                 dm_integrity_io_error(ic, "digest is too long", -EINVAL);
512                 return -EINVAL;
513         }
514
515         desc->tfm = ic->journal_mac;
516
517         r = crypto_shash_init(desc);
518         if (unlikely(r < 0)) {
519                 dm_integrity_io_error(ic, "crypto_shash_init", r);
520                 return r;
521         }
522
523         r = crypto_shash_update(desc, (__u8 *)ic->sb, (1 << SECTOR_SHIFT) - size);
524         if (unlikely(r < 0)) {
525                 dm_integrity_io_error(ic, "crypto_shash_update", r);
526                 return r;
527         }
528
529         if (likely(wr)) {
530                 r = crypto_shash_final(desc, (__u8 *)ic->sb + (1 << SECTOR_SHIFT) - size);
531                 if (unlikely(r < 0)) {
532                         dm_integrity_io_error(ic, "crypto_shash_final", r);
533                         return r;
534                 }
535         } else {
536                 __u8 result[HASH_MAX_DIGESTSIZE];
537                 r = crypto_shash_final(desc, result);
538                 if (unlikely(r < 0)) {
539                         dm_integrity_io_error(ic, "crypto_shash_final", r);
540                         return r;
541                 }
542                 if (memcmp((__u8 *)ic->sb + (1 << SECTOR_SHIFT) - size, result, size)) {
543                         dm_integrity_io_error(ic, "superblock mac", -EILSEQ);
544                         dm_audit_log_target(DM_MSG_PREFIX, "mac-superblock", ic->ti, 0);
545                         return -EILSEQ;
546                 }
547         }
548
549         return 0;
550 }
551
552 static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
553 {
554         struct dm_io_request io_req;
555         struct dm_io_region io_loc;
556         int r;
557
558         io_req.bi_op = op;
559         io_req.bi_op_flags = op_flags;
560         io_req.mem.type = DM_IO_KMEM;
561         io_req.mem.ptr.addr = ic->sb;
562         io_req.notify.fn = NULL;
563         io_req.client = ic->io;
564         io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
565         io_loc.sector = ic->start;
566         io_loc.count = SB_SECTORS;
567
568         if (op == REQ_OP_WRITE) {
569                 sb_set_version(ic);
570                 if (ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
571                         r = sb_mac(ic, true);
572                         if (unlikely(r))
573                                 return r;
574                 }
575         }
576
577         r = dm_io(&io_req, 1, &io_loc, NULL);
578         if (unlikely(r))
579                 return r;
580
581         if (op == REQ_OP_READ) {
582                 if (ic->mode != 'R' && ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
583                         r = sb_mac(ic, false);
584                         if (unlikely(r))
585                                 return r;
586                 }
587         }
588
589         return 0;
590 }
591
592 #define BITMAP_OP_TEST_ALL_SET          0
593 #define BITMAP_OP_TEST_ALL_CLEAR        1
594 #define BITMAP_OP_SET                   2
595 #define BITMAP_OP_CLEAR                 3
596
597 static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
598                             sector_t sector, sector_t n_sectors, int mode)
599 {
600         unsigned long bit, end_bit, this_end_bit, page, end_page;
601         unsigned long *data;
602
603         if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
604                 DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
605                         sector,
606                         n_sectors,
607                         ic->sb->log2_sectors_per_block,
608                         ic->log2_blocks_per_bitmap_bit,
609                         mode);
610                 BUG();
611         }
612
613         if (unlikely(!n_sectors))
614                 return true;
615
616         bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
617         end_bit = (sector + n_sectors - 1) >>
618                 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
619
620         page = bit / (PAGE_SIZE * 8);
621         bit %= PAGE_SIZE * 8;
622
623         end_page = end_bit / (PAGE_SIZE * 8);
624         end_bit %= PAGE_SIZE * 8;
625
626 repeat:
627         if (page < end_page) {
628                 this_end_bit = PAGE_SIZE * 8 - 1;
629         } else {
630                 this_end_bit = end_bit;
631         }
632
633         data = lowmem_page_address(bitmap[page].page);
634
635         if (mode == BITMAP_OP_TEST_ALL_SET) {
636                 while (bit <= this_end_bit) {
637                         if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
638                                 do {
639                                         if (data[bit / BITS_PER_LONG] != -1)
640                                                 return false;
641                                         bit += BITS_PER_LONG;
642                                 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
643                                 continue;
644                         }
645                         if (!test_bit(bit, data))
646                                 return false;
647                         bit++;
648                 }
649         } else if (mode == BITMAP_OP_TEST_ALL_CLEAR) {
650                 while (bit <= this_end_bit) {
651                         if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
652                                 do {
653                                         if (data[bit / BITS_PER_LONG] != 0)
654                                                 return false;
655                                         bit += BITS_PER_LONG;
656                                 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
657                                 continue;
658                         }
659                         if (test_bit(bit, data))
660                                 return false;
661                         bit++;
662                 }
663         } else if (mode == BITMAP_OP_SET) {
664                 while (bit <= this_end_bit) {
665                         if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
666                                 do {
667                                         data[bit / BITS_PER_LONG] = -1;
668                                         bit += BITS_PER_LONG;
669                                 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
670                                 continue;
671                         }
672                         __set_bit(bit, data);
673                         bit++;
674                 }
675         } else if (mode == BITMAP_OP_CLEAR) {
676                 if (!bit && this_end_bit == PAGE_SIZE * 8 - 1)
677                         clear_page(data);
678                 else while (bit <= this_end_bit) {
679                         if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
680                                 do {
681                                         data[bit / BITS_PER_LONG] = 0;
682                                         bit += BITS_PER_LONG;
683                                 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
684                                 continue;
685                         }
686                         __clear_bit(bit, data);
687                         bit++;
688                 }
689         } else {
690                 BUG();
691         }
692
693         if (unlikely(page < end_page)) {
694                 bit = 0;
695                 page++;
696                 goto repeat;
697         }
698
699         return true;
700 }
701
702 static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
703 {
704         unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
705         unsigned i;
706
707         for (i = 0; i < n_bitmap_pages; i++) {
708                 unsigned long *dst_data = lowmem_page_address(dst[i].page);
709                 unsigned long *src_data = lowmem_page_address(src[i].page);
710                 copy_page(dst_data, src_data);
711         }
712 }
713
714 static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
715 {
716         unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
717         unsigned bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
718
719         BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
720         return &ic->bbs[bitmap_block];
721 }
722
723 static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
724                                  bool e, const char *function)
725 {
726 #if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
727         unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
728
729         if (unlikely(section >= ic->journal_sections) ||
730             unlikely(offset >= limit)) {
731                 DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)",
732                        function, section, offset, ic->journal_sections, limit);
733                 BUG();
734         }
735 #endif
736 }
737
738 static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
739                                unsigned *pl_index, unsigned *pl_offset)
740 {
741         unsigned sector;
742
743         access_journal_check(ic, section, offset, false, "page_list_location");
744
745         sector = section * ic->journal_section_sectors + offset;
746
747         *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
748         *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
749 }
750
751 static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
752                                                unsigned section, unsigned offset, unsigned *n_sectors)
753 {
754         unsigned pl_index, pl_offset;
755         char *va;
756
757         page_list_location(ic, section, offset, &pl_index, &pl_offset);
758
759         if (n_sectors)
760                 *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
761
762         va = lowmem_page_address(pl[pl_index].page);
763
764         return (struct journal_sector *)(va + pl_offset);
765 }
766
767 static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
768 {
769         return access_page_list(ic, ic->journal, section, offset, NULL);
770 }
771
772 static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
773 {
774         unsigned rel_sector, offset;
775         struct journal_sector *js;
776
777         access_journal_check(ic, section, n, true, "access_journal_entry");
778
779         rel_sector = n % JOURNAL_BLOCK_SECTORS;
780         offset = n / JOURNAL_BLOCK_SECTORS;
781
782         js = access_journal(ic, section, rel_sector);
783         return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
784 }
785
786 static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
787 {
788         n <<= ic->sb->log2_sectors_per_block;
789
790         n += JOURNAL_BLOCK_SECTORS;
791
792         access_journal_check(ic, section, n, false, "access_journal_data");
793
794         return access_journal(ic, section, n);
795 }
796
797 static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
798 {
799         SHASH_DESC_ON_STACK(desc, ic->journal_mac);
800         int r;
801         unsigned j, size;
802
803         desc->tfm = ic->journal_mac;
804
805         r = crypto_shash_init(desc);
806         if (unlikely(r < 0)) {
807                 dm_integrity_io_error(ic, "crypto_shash_init", r);
808                 goto err;
809         }
810
811         if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
812                 __le64 section_le;
813
814                 r = crypto_shash_update(desc, (__u8 *)&ic->sb->salt, SALT_SIZE);
815                 if (unlikely(r < 0)) {
816                         dm_integrity_io_error(ic, "crypto_shash_update", r);
817                         goto err;
818                 }
819
820                 section_le = cpu_to_le64(section);
821                 r = crypto_shash_update(desc, (__u8 *)&section_le, sizeof section_le);
822                 if (unlikely(r < 0)) {
823                         dm_integrity_io_error(ic, "crypto_shash_update", r);
824                         goto err;
825                 }
826         }
827
828         for (j = 0; j < ic->journal_section_entries; j++) {
829                 struct journal_entry *je = access_journal_entry(ic, section, j);
830                 r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
831                 if (unlikely(r < 0)) {
832                         dm_integrity_io_error(ic, "crypto_shash_update", r);
833                         goto err;
834                 }
835         }
836
837         size = crypto_shash_digestsize(ic->journal_mac);
838
839         if (likely(size <= JOURNAL_MAC_SIZE)) {
840                 r = crypto_shash_final(desc, result);
841                 if (unlikely(r < 0)) {
842                         dm_integrity_io_error(ic, "crypto_shash_final", r);
843                         goto err;
844                 }
845                 memset(result + size, 0, JOURNAL_MAC_SIZE - size);
846         } else {
847                 __u8 digest[HASH_MAX_DIGESTSIZE];
848
849                 if (WARN_ON(size > sizeof(digest))) {
850                         dm_integrity_io_error(ic, "digest_size", -EINVAL);
851                         goto err;
852                 }
853                 r = crypto_shash_final(desc, digest);
854                 if (unlikely(r < 0)) {
855                         dm_integrity_io_error(ic, "crypto_shash_final", r);
856                         goto err;
857                 }
858                 memcpy(result, digest, JOURNAL_MAC_SIZE);
859         }
860
861         return;
862 err:
863         memset(result, 0, JOURNAL_MAC_SIZE);
864 }
865
866 static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
867 {
868         __u8 result[JOURNAL_MAC_SIZE];
869         unsigned j;
870
871         if (!ic->journal_mac)
872                 return;
873
874         section_mac(ic, section, result);
875
876         for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
877                 struct journal_sector *js = access_journal(ic, section, j);
878
879                 if (likely(wr))
880                         memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
881                 else {
882                         if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR)) {
883                                 dm_integrity_io_error(ic, "journal mac", -EILSEQ);
884                                 dm_audit_log_target(DM_MSG_PREFIX, "mac-journal", ic->ti, 0);
885                         }
886                 }
887         }
888 }
889
890 static void complete_journal_op(void *context)
891 {
892         struct journal_completion *comp = context;
893         BUG_ON(!atomic_read(&comp->in_flight));
894         if (likely(atomic_dec_and_test(&comp->in_flight)))
895                 complete(&comp->comp);
896 }
897
898 static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
899                         unsigned n_sections, struct journal_completion *comp)
900 {
901         struct async_submit_ctl submit;
902         size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
903         unsigned pl_index, pl_offset, section_index;
904         struct page_list *source_pl, *target_pl;
905
906         if (likely(encrypt)) {
907                 source_pl = ic->journal;
908                 target_pl = ic->journal_io;
909         } else {
910                 source_pl = ic->journal_io;
911                 target_pl = ic->journal;
912         }
913
914         page_list_location(ic, section, 0, &pl_index, &pl_offset);
915
916         atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
917
918         init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
919
920         section_index = pl_index;
921
922         do {
923                 size_t this_step;
924                 struct page *src_pages[2];
925                 struct page *dst_page;
926
927                 while (unlikely(pl_index == section_index)) {
928                         unsigned dummy;
929                         if (likely(encrypt))
930                                 rw_section_mac(ic, section, true);
931                         section++;
932                         n_sections--;
933                         if (!n_sections)
934                                 break;
935                         page_list_location(ic, section, 0, &section_index, &dummy);
936                 }
937
938                 this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
939                 dst_page = target_pl[pl_index].page;
940                 src_pages[0] = source_pl[pl_index].page;
941                 src_pages[1] = ic->journal_xor[pl_index].page;
942
943                 async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
944
945                 pl_index++;
946                 pl_offset = 0;
947                 n_bytes -= this_step;
948         } while (n_bytes);
949
950         BUG_ON(n_sections);
951
952         async_tx_issue_pending_all();
953 }
954
955 static void complete_journal_encrypt(struct crypto_async_request *req, int err)
956 {
957         struct journal_completion *comp = req->data;
958         if (unlikely(err)) {
959                 if (likely(err == -EINPROGRESS)) {
960                         complete(&comp->ic->crypto_backoff);
961                         return;
962                 }
963                 dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
964         }
965         complete_journal_op(comp);
966 }
967
968 static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
969 {
970         int r;
971         skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
972                                       complete_journal_encrypt, comp);
973         if (likely(encrypt))
974                 r = crypto_skcipher_encrypt(req);
975         else
976                 r = crypto_skcipher_decrypt(req);
977         if (likely(!r))
978                 return false;
979         if (likely(r == -EINPROGRESS))
980                 return true;
981         if (likely(r == -EBUSY)) {
982                 wait_for_completion(&comp->ic->crypto_backoff);
983                 reinit_completion(&comp->ic->crypto_backoff);
984                 return true;
985         }
986         dm_integrity_io_error(comp->ic, "encrypt", r);
987         return false;
988 }
989
990 static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
991                           unsigned n_sections, struct journal_completion *comp)
992 {
993         struct scatterlist **source_sg;
994         struct scatterlist **target_sg;
995
996         atomic_add(2, &comp->in_flight);
997
998         if (likely(encrypt)) {
999                 source_sg = ic->journal_scatterlist;
1000                 target_sg = ic->journal_io_scatterlist;
1001         } else {
1002                 source_sg = ic->journal_io_scatterlist;
1003                 target_sg = ic->journal_scatterlist;
1004         }
1005
1006         do {
1007                 struct skcipher_request *req;
1008                 unsigned ivsize;
1009                 char *iv;
1010
1011                 if (likely(encrypt))
1012                         rw_section_mac(ic, section, true);
1013
1014                 req = ic->sk_requests[section];
1015                 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
1016                 iv = req->iv;
1017
1018                 memcpy(iv, iv + ivsize, ivsize);
1019
1020                 req->src = source_sg[section];
1021                 req->dst = target_sg[section];
1022
1023                 if (unlikely(do_crypt(encrypt, req, comp)))
1024                         atomic_inc(&comp->in_flight);
1025
1026                 section++;
1027                 n_sections--;
1028         } while (n_sections);
1029
1030         atomic_dec(&comp->in_flight);
1031         complete_journal_op(comp);
1032 }
1033
1034 static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
1035                             unsigned n_sections, struct journal_completion *comp)
1036 {
1037         if (ic->journal_xor)
1038                 return xor_journal(ic, encrypt, section, n_sections, comp);
1039         else
1040                 return crypt_journal(ic, encrypt, section, n_sections, comp);
1041 }
1042
1043 static void complete_journal_io(unsigned long error, void *context)
1044 {
1045         struct journal_completion *comp = context;
1046         if (unlikely(error != 0))
1047                 dm_integrity_io_error(comp->ic, "writing journal", -EIO);
1048         complete_journal_op(comp);
1049 }
1050
1051 static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
1052                                unsigned sector, unsigned n_sectors, struct journal_completion *comp)
1053 {
1054         struct dm_io_request io_req;
1055         struct dm_io_region io_loc;
1056         unsigned pl_index, pl_offset;
1057         int r;
1058
1059         if (unlikely(dm_integrity_failed(ic))) {
1060                 if (comp)
1061                         complete_journal_io(-1UL, comp);
1062                 return;
1063         }
1064
1065         pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
1066         pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
1067
1068         io_req.bi_op = op;
1069         io_req.bi_op_flags = op_flags;
1070         io_req.mem.type = DM_IO_PAGE_LIST;
1071         if (ic->journal_io)
1072                 io_req.mem.ptr.pl = &ic->journal_io[pl_index];
1073         else
1074                 io_req.mem.ptr.pl = &ic->journal[pl_index];
1075         io_req.mem.offset = pl_offset;
1076         if (likely(comp != NULL)) {
1077                 io_req.notify.fn = complete_journal_io;
1078                 io_req.notify.context = comp;
1079         } else {
1080                 io_req.notify.fn = NULL;
1081         }
1082         io_req.client = ic->io;
1083         io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
1084         io_loc.sector = ic->start + SB_SECTORS + sector;
1085         io_loc.count = n_sectors;
1086
1087         r = dm_io(&io_req, 1, &io_loc, NULL);
1088         if (unlikely(r)) {
1089                 dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
1090                 if (comp) {
1091                         WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
1092                         complete_journal_io(-1UL, comp);
1093                 }
1094         }
1095 }
1096
1097 static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
1098                        unsigned n_sections, struct journal_completion *comp)
1099 {
1100         unsigned sector, n_sectors;
1101
1102         sector = section * ic->journal_section_sectors;
1103         n_sectors = n_sections * ic->journal_section_sectors;
1104
1105         rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp);
1106 }
1107
1108 static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
1109 {
1110         struct journal_completion io_comp;
1111         struct journal_completion crypt_comp_1;
1112         struct journal_completion crypt_comp_2;
1113         unsigned i;
1114
1115         io_comp.ic = ic;
1116         init_completion(&io_comp.comp);
1117
1118         if (commit_start + commit_sections <= ic->journal_sections) {
1119                 io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
1120                 if (ic->journal_io) {
1121                         crypt_comp_1.ic = ic;
1122                         init_completion(&crypt_comp_1.comp);
1123                         crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1124                         encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
1125                         wait_for_completion_io(&crypt_comp_1.comp);
1126                 } else {
1127                         for (i = 0; i < commit_sections; i++)
1128                                 rw_section_mac(ic, commit_start + i, true);
1129                 }
1130                 rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
1131                            commit_sections, &io_comp);
1132         } else {
1133                 unsigned to_end;
1134                 io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
1135                 to_end = ic->journal_sections - commit_start;
1136                 if (ic->journal_io) {
1137                         crypt_comp_1.ic = ic;
1138                         init_completion(&crypt_comp_1.comp);
1139                         crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1140                         encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
1141                         if (try_wait_for_completion(&crypt_comp_1.comp)) {
1142                                 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1143                                 reinit_completion(&crypt_comp_1.comp);
1144                                 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1145                                 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
1146                                 wait_for_completion_io(&crypt_comp_1.comp);
1147                         } else {
1148                                 crypt_comp_2.ic = ic;
1149                                 init_completion(&crypt_comp_2.comp);
1150                                 crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
1151                                 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
1152                                 wait_for_completion_io(&crypt_comp_1.comp);
1153                                 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1154                                 wait_for_completion_io(&crypt_comp_2.comp);
1155                         }
1156                 } else {
1157                         for (i = 0; i < to_end; i++)
1158                                 rw_section_mac(ic, commit_start + i, true);
1159                         rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1160                         for (i = 0; i < commit_sections - to_end; i++)
1161                                 rw_section_mac(ic, i, true);
1162                 }
1163                 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
1164         }
1165
1166         wait_for_completion_io(&io_comp.comp);
1167 }
1168
1169 static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
1170                               unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
1171 {
1172         struct dm_io_request io_req;
1173         struct dm_io_region io_loc;
1174         int r;
1175         unsigned sector, pl_index, pl_offset;
1176
1177         BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
1178
1179         if (unlikely(dm_integrity_failed(ic))) {
1180                 fn(-1UL, data);
1181                 return;
1182         }
1183
1184         sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
1185
1186         pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
1187         pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
1188
1189         io_req.bi_op = REQ_OP_WRITE;
1190         io_req.bi_op_flags = 0;
1191         io_req.mem.type = DM_IO_PAGE_LIST;
1192         io_req.mem.ptr.pl = &ic->journal[pl_index];
1193         io_req.mem.offset = pl_offset;
1194         io_req.notify.fn = fn;
1195         io_req.notify.context = data;
1196         io_req.client = ic->io;
1197         io_loc.bdev = ic->dev->bdev;
1198         io_loc.sector = target;
1199         io_loc.count = n_sectors;
1200
1201         r = dm_io(&io_req, 1, &io_loc, NULL);
1202         if (unlikely(r)) {
1203                 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
1204                 fn(-1UL, data);
1205         }
1206 }
1207
1208 static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
1209 {
1210         return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
1211                range1->logical_sector + range1->n_sectors > range2->logical_sector;
1212 }
1213
1214 static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
1215 {
1216         struct rb_node **n = &ic->in_progress.rb_node;
1217         struct rb_node *parent;
1218
1219         BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
1220
1221         if (likely(check_waiting)) {
1222                 struct dm_integrity_range *range;
1223                 list_for_each_entry(range, &ic->wait_list, wait_entry) {
1224                         if (unlikely(ranges_overlap(range, new_range)))
1225                                 return false;
1226                 }
1227         }
1228
1229         parent = NULL;
1230
1231         while (*n) {
1232                 struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
1233
1234                 parent = *n;
1235                 if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
1236                         n = &range->node.rb_left;
1237                 } else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
1238                         n = &range->node.rb_right;
1239                 } else {
1240                         return false;
1241                 }
1242         }
1243
1244         rb_link_node(&new_range->node, parent, n);
1245         rb_insert_color(&new_range->node, &ic->in_progress);
1246
1247         return true;
1248 }
1249
1250 static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1251 {
1252         rb_erase(&range->node, &ic->in_progress);
1253         while (unlikely(!list_empty(&ic->wait_list))) {
1254                 struct dm_integrity_range *last_range =
1255                         list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
1256                 struct task_struct *last_range_task;
1257                 last_range_task = last_range->task;
1258                 list_del(&last_range->wait_entry);
1259                 if (!add_new_range(ic, last_range, false)) {
1260                         last_range->task = last_range_task;
1261                         list_add(&last_range->wait_entry, &ic->wait_list);
1262                         break;
1263                 }
1264                 last_range->waiting = false;
1265                 wake_up_process(last_range_task);
1266         }
1267 }
1268
1269 static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1270 {
1271         unsigned long flags;
1272
1273         spin_lock_irqsave(&ic->endio_wait.lock, flags);
1274         remove_range_unlocked(ic, range);
1275         spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1276 }
1277
1278 static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1279 {
1280         new_range->waiting = true;
1281         list_add_tail(&new_range->wait_entry, &ic->wait_list);
1282         new_range->task = current;
1283         do {
1284                 __set_current_state(TASK_UNINTERRUPTIBLE);
1285                 spin_unlock_irq(&ic->endio_wait.lock);
1286                 io_schedule();
1287                 spin_lock_irq(&ic->endio_wait.lock);
1288         } while (unlikely(new_range->waiting));
1289 }
1290
1291 static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1292 {
1293         if (unlikely(!add_new_range(ic, new_range, true)))
1294                 wait_and_add_new_range(ic, new_range);
1295 }
1296
1297 static void init_journal_node(struct journal_node *node)
1298 {
1299         RB_CLEAR_NODE(&node->node);
1300         node->sector = (sector_t)-1;
1301 }
1302
1303 static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
1304 {
1305         struct rb_node **link;
1306         struct rb_node *parent;
1307
1308         node->sector = sector;
1309         BUG_ON(!RB_EMPTY_NODE(&node->node));
1310
1311         link = &ic->journal_tree_root.rb_node;
1312         parent = NULL;
1313
1314         while (*link) {
1315                 struct journal_node *j;
1316                 parent = *link;
1317                 j = container_of(parent, struct journal_node, node);
1318                 if (sector < j->sector)
1319                         link = &j->node.rb_left;
1320                 else
1321                         link = &j->node.rb_right;
1322         }
1323
1324         rb_link_node(&node->node, parent, link);
1325         rb_insert_color(&node->node, &ic->journal_tree_root);
1326 }
1327
1328 static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
1329 {
1330         BUG_ON(RB_EMPTY_NODE(&node->node));
1331         rb_erase(&node->node, &ic->journal_tree_root);
1332         init_journal_node(node);
1333 }
1334
1335 #define NOT_FOUND       (-1U)
1336
1337 static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
1338 {
1339         struct rb_node *n = ic->journal_tree_root.rb_node;
1340         unsigned found = NOT_FOUND;
1341         *next_sector = (sector_t)-1;
1342         while (n) {
1343                 struct journal_node *j = container_of(n, struct journal_node, node);
1344                 if (sector == j->sector) {
1345                         found = j - ic->journal_tree;
1346                 }
1347                 if (sector < j->sector) {
1348                         *next_sector = j->sector;
1349                         n = j->node.rb_left;
1350                 } else {
1351                         n = j->node.rb_right;
1352                 }
1353         }
1354
1355         return found;
1356 }
1357
1358 static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
1359 {
1360         struct journal_node *node, *next_node;
1361         struct rb_node *next;
1362
1363         if (unlikely(pos >= ic->journal_entries))
1364                 return false;
1365         node = &ic->journal_tree[pos];
1366         if (unlikely(RB_EMPTY_NODE(&node->node)))
1367                 return false;
1368         if (unlikely(node->sector != sector))
1369                 return false;
1370
1371         next = rb_next(&node->node);
1372         if (unlikely(!next))
1373                 return true;
1374
1375         next_node = container_of(next, struct journal_node, node);
1376         return next_node->sector != sector;
1377 }
1378
1379 static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
1380 {
1381         struct rb_node *next;
1382         struct journal_node *next_node;
1383         unsigned next_section;
1384
1385         BUG_ON(RB_EMPTY_NODE(&node->node));
1386
1387         next = rb_next(&node->node);
1388         if (unlikely(!next))
1389                 return false;
1390
1391         next_node = container_of(next, struct journal_node, node);
1392
1393         if (next_node->sector != node->sector)
1394                 return false;
1395
1396         next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
1397         if (next_section >= ic->committed_section &&
1398             next_section < ic->committed_section + ic->n_committed_sections)
1399                 return true;
1400         if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1401                 return true;
1402
1403         return false;
1404 }
1405
1406 #define TAG_READ        0
1407 #define TAG_WRITE       1
1408 #define TAG_CMP         2
1409
1410 static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1411                                unsigned *metadata_offset, unsigned total_size, int op)
1412 {
1413 #define MAY_BE_FILLER           1
1414 #define MAY_BE_HASH             2
1415         unsigned hash_offset = 0;
1416         unsigned may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1417
1418         do {
1419                 unsigned char *data, *dp;
1420                 struct dm_buffer *b;
1421                 unsigned to_copy;
1422                 int r;
1423
1424                 r = dm_integrity_failed(ic);
1425                 if (unlikely(r))
1426                         return r;
1427
1428                 data = dm_bufio_read(ic->bufio, *metadata_block, &b);
1429                 if (IS_ERR(data))
1430                         return PTR_ERR(data);
1431
1432                 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1433                 dp = data + *metadata_offset;
1434                 if (op == TAG_READ) {
1435                         memcpy(tag, dp, to_copy);
1436                 } else if (op == TAG_WRITE) {
1437                         if (memcmp(dp, tag, to_copy)) {
1438                                 memcpy(dp, tag, to_copy);
1439                                 dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
1440                         }
1441                 } else {
1442                         /* e.g.: op == TAG_CMP */
1443
1444                         if (likely(is_power_of_2(ic->tag_size))) {
1445                                 if (unlikely(memcmp(dp, tag, to_copy)))
1446                                         if (unlikely(!ic->discard) ||
1447                                             unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) {
1448                                                 goto thorough_test;
1449                                 }
1450                         } else {
1451                                 unsigned i, ts;
1452 thorough_test:
1453                                 ts = total_size;
1454
1455                                 for (i = 0; i < to_copy; i++, ts--) {
1456                                         if (unlikely(dp[i] != tag[i]))
1457                                                 may_be &= ~MAY_BE_HASH;
1458                                         if (likely(dp[i] != DISCARD_FILLER))
1459                                                 may_be &= ~MAY_BE_FILLER;
1460                                         hash_offset++;
1461                                         if (unlikely(hash_offset == ic->tag_size)) {
1462                                                 if (unlikely(!may_be)) {
1463                                                         dm_bufio_release(b);
1464                                                         return ts;
1465                                                 }
1466                                                 hash_offset = 0;
1467                                                 may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1468                                         }
1469                                 }
1470                         }
1471                 }
1472                 dm_bufio_release(b);
1473
1474                 tag += to_copy;
1475                 *metadata_offset += to_copy;
1476                 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1477                         (*metadata_block)++;
1478                         *metadata_offset = 0;
1479                 }
1480
1481                 if (unlikely(!is_power_of_2(ic->tag_size))) {
1482                         hash_offset = (hash_offset + to_copy) % ic->tag_size;
1483                 }
1484
1485                 total_size -= to_copy;
1486         } while (unlikely(total_size));
1487
1488         return 0;
1489 #undef MAY_BE_FILLER
1490 #undef MAY_BE_HASH
1491 }
1492
1493 struct flush_request {
1494         struct dm_io_request io_req;
1495         struct dm_io_region io_reg;
1496         struct dm_integrity_c *ic;
1497         struct completion comp;
1498 };
1499
1500 static void flush_notify(unsigned long error, void *fr_)
1501 {
1502         struct flush_request *fr = fr_;
1503         if (unlikely(error != 0))
1504                 dm_integrity_io_error(fr->ic, "flushing disk cache", -EIO);
1505         complete(&fr->comp);
1506 }
1507
1508 static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
1509 {
1510         int r;
1511
1512         struct flush_request fr;
1513
1514         if (!ic->meta_dev)
1515                 flush_data = false;
1516         if (flush_data) {
1517                 fr.io_req.bi_op = REQ_OP_WRITE,
1518                 fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
1519                 fr.io_req.mem.type = DM_IO_KMEM,
1520                 fr.io_req.mem.ptr.addr = NULL,
1521                 fr.io_req.notify.fn = flush_notify,
1522                 fr.io_req.notify.context = &fr;
1523                 fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
1524                 fr.io_reg.bdev = ic->dev->bdev,
1525                 fr.io_reg.sector = 0,
1526                 fr.io_reg.count = 0,
1527                 fr.ic = ic;
1528                 init_completion(&fr.comp);
1529                 r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
1530                 BUG_ON(r);
1531         }
1532
1533         r = dm_bufio_write_dirty_buffers(ic->bufio);
1534         if (unlikely(r))
1535                 dm_integrity_io_error(ic, "writing tags", r);
1536
1537         if (flush_data)
1538                 wait_for_completion(&fr.comp);
1539 }
1540
1541 static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1542 {
1543         DECLARE_WAITQUEUE(wait, current);
1544         __add_wait_queue(&ic->endio_wait, &wait);
1545         __set_current_state(TASK_UNINTERRUPTIBLE);
1546         spin_unlock_irq(&ic->endio_wait.lock);
1547         io_schedule();
1548         spin_lock_irq(&ic->endio_wait.lock);
1549         __remove_wait_queue(&ic->endio_wait, &wait);
1550 }
1551
1552 static void autocommit_fn(struct timer_list *t)
1553 {
1554         struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
1555
1556         if (likely(!dm_integrity_failed(ic)))
1557                 queue_work(ic->commit_wq, &ic->commit_work);
1558 }
1559
1560 static void schedule_autocommit(struct dm_integrity_c *ic)
1561 {
1562         if (!timer_pending(&ic->autocommit_timer))
1563                 mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1564 }
1565
1566 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1567 {
1568         struct bio *bio;
1569         unsigned long flags;
1570
1571         spin_lock_irqsave(&ic->endio_wait.lock, flags);
1572         bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1573         bio_list_add(&ic->flush_bio_list, bio);
1574         spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1575
1576         queue_work(ic->commit_wq, &ic->commit_work);
1577 }
1578
1579 static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1580 {
1581         int r = dm_integrity_failed(ic);
1582         if (unlikely(r) && !bio->bi_status)
1583                 bio->bi_status = errno_to_blk_status(r);
1584         if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
1585                 unsigned long flags;
1586                 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1587                 bio_list_add(&ic->synchronous_bios, bio);
1588                 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
1589                 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1590                 return;
1591         }
1592         bio_endio(bio);
1593 }
1594
1595 static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1596 {
1597         struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1598
1599         if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
1600                 submit_flush_bio(ic, dio);
1601         else
1602                 do_endio(ic, bio);
1603 }
1604
1605 static void dec_in_flight(struct dm_integrity_io *dio)
1606 {
1607         if (atomic_dec_and_test(&dio->in_flight)) {
1608                 struct dm_integrity_c *ic = dio->ic;
1609                 struct bio *bio;
1610
1611                 remove_range(ic, &dio->range);
1612
1613                 if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))
1614                         schedule_autocommit(ic);
1615
1616                 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1617
1618                 if (unlikely(dio->bi_status) && !bio->bi_status)
1619                         bio->bi_status = dio->bi_status;
1620                 if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
1621                         dio->range.logical_sector += dio->range.n_sectors;
1622                         bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1623                         INIT_WORK(&dio->work, integrity_bio_wait);
1624                         queue_work(ic->offload_wq, &dio->work);
1625                         return;
1626                 }
1627                 do_endio_flush(ic, dio);
1628         }
1629 }
1630
1631 static void integrity_end_io(struct bio *bio)
1632 {
1633         struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1634
1635         dm_bio_restore(&dio->bio_details, bio);
1636         if (bio->bi_integrity)
1637                 bio->bi_opf |= REQ_INTEGRITY;
1638
1639         if (dio->completion)
1640                 complete(dio->completion);
1641
1642         dec_in_flight(dio);
1643 }
1644
1645 static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1646                                       const char *data, char *result)
1647 {
1648         __le64 sector_le = cpu_to_le64(sector);
1649         SHASH_DESC_ON_STACK(req, ic->internal_hash);
1650         int r;
1651         unsigned digest_size;
1652
1653         req->tfm = ic->internal_hash;
1654
1655         r = crypto_shash_init(req);
1656         if (unlikely(r < 0)) {
1657                 dm_integrity_io_error(ic, "crypto_shash_init", r);
1658                 goto failed;
1659         }
1660
1661         if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
1662                 r = crypto_shash_update(req, (__u8 *)&ic->sb->salt, SALT_SIZE);
1663                 if (unlikely(r < 0)) {
1664                         dm_integrity_io_error(ic, "crypto_shash_update", r);
1665                         goto failed;
1666                 }
1667         }
1668
1669         r = crypto_shash_update(req, (const __u8 *)&sector_le, sizeof sector_le);
1670         if (unlikely(r < 0)) {
1671                 dm_integrity_io_error(ic, "crypto_shash_update", r);
1672                 goto failed;
1673         }
1674
1675         r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
1676         if (unlikely(r < 0)) {
1677                 dm_integrity_io_error(ic, "crypto_shash_update", r);
1678                 goto failed;
1679         }
1680
1681         r = crypto_shash_final(req, result);
1682         if (unlikely(r < 0)) {
1683                 dm_integrity_io_error(ic, "crypto_shash_final", r);
1684                 goto failed;
1685         }
1686
1687         digest_size = crypto_shash_digestsize(ic->internal_hash);
1688         if (unlikely(digest_size < ic->tag_size))
1689                 memset(result + digest_size, 0, ic->tag_size - digest_size);
1690
1691         return;
1692
1693 failed:
1694         /* this shouldn't happen anyway, the hash functions have no reason to fail */
1695         get_random_bytes(result, ic->tag_size);
1696 }
1697
1698 static void integrity_metadata(struct work_struct *w)
1699 {
1700         struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1701         struct dm_integrity_c *ic = dio->ic;
1702
1703         int r;
1704
1705         if (ic->internal_hash) {
1706                 struct bvec_iter iter;
1707                 struct bio_vec bv;
1708                 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1709                 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1710                 char *checksums;
1711                 unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
1712                 char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1713                 sector_t sector;
1714                 unsigned sectors_to_process;
1715
1716                 if (unlikely(ic->mode == 'R'))
1717                         goto skip_io;
1718
1719                 if (likely(dio->op != REQ_OP_DISCARD))
1720                         checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
1721                                             GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1722                 else
1723                         checksums = kmalloc(PAGE_SIZE, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1724                 if (!checksums) {
1725                         checksums = checksums_onstack;
1726                         if (WARN_ON(extra_space &&
1727                                     digest_size > sizeof(checksums_onstack))) {
1728                                 r = -EINVAL;
1729                                 goto error;
1730                         }
1731                 }
1732
1733                 if (unlikely(dio->op == REQ_OP_DISCARD)) {
1734                         sector_t bi_sector = dio->bio_details.bi_iter.bi_sector;
1735                         unsigned bi_size = dio->bio_details.bi_iter.bi_size;
1736                         unsigned max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
1737                         unsigned max_blocks = max_size / ic->tag_size;
1738                         memset(checksums, DISCARD_FILLER, max_size);
1739
1740                         while (bi_size) {
1741                                 unsigned this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1742                                 this_step_blocks = min(this_step_blocks, max_blocks);
1743                                 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1744                                                         this_step_blocks * ic->tag_size, TAG_WRITE);
1745                                 if (unlikely(r)) {
1746                                         if (likely(checksums != checksums_onstack))
1747                                                 kfree(checksums);
1748                                         goto error;
1749                                 }
1750
1751                                 /*if (bi_size < this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block)) {
1752                                         printk("BUGG: bi_sector: %llx, bi_size: %u\n", bi_sector, bi_size);
1753                                         printk("BUGG: this_step_blocks: %u\n", this_step_blocks);
1754                                         BUG();
1755                                 }*/
1756                                 bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1757                                 bi_sector += this_step_blocks << ic->sb->log2_sectors_per_block;
1758                         }
1759
1760                         if (likely(checksums != checksums_onstack))
1761                                 kfree(checksums);
1762                         goto skip_io;
1763                 }
1764
1765                 sector = dio->range.logical_sector;
1766                 sectors_to_process = dio->range.n_sectors;
1767
1768                 __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
1769                         unsigned pos;
1770                         char *mem, *checksums_ptr;
1771
1772 again:
1773                         mem = bvec_kmap_local(&bv);
1774                         pos = 0;
1775                         checksums_ptr = checksums;
1776                         do {
1777                                 integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1778                                 checksums_ptr += ic->tag_size;
1779                                 sectors_to_process -= ic->sectors_per_block;
1780                                 pos += ic->sectors_per_block << SECTOR_SHIFT;
1781                                 sector += ic->sectors_per_block;
1782                         } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
1783                         kunmap_local(mem);
1784
1785                         r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1786                                                 checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
1787                         if (unlikely(r)) {
1788                                 if (r > 0) {
1789                                         char b[BDEVNAME_SIZE];
1790                                         sector_t s;
1791
1792                                         s = sector - ((r + ic->tag_size - 1) / ic->tag_size);
1793                                         DMERR_LIMIT("%s: Checksum failed at sector 0x%llx",
1794                                                     bio_devname(bio, b), s);
1795                                         r = -EILSEQ;
1796                                         atomic64_inc(&ic->number_of_mismatches);
1797                                         dm_audit_log_bio(DM_MSG_PREFIX, "integrity-checksum",
1798                                                          bio, s, 0);
1799                                 }
1800                                 if (likely(checksums != checksums_onstack))
1801                                         kfree(checksums);
1802                                 goto error;
1803                         }
1804
1805                         if (!sectors_to_process)
1806                                 break;
1807
1808                         if (unlikely(pos < bv.bv_len)) {
1809                                 bv.bv_offset += pos;
1810                                 bv.bv_len -= pos;
1811                                 goto again;
1812                         }
1813                 }
1814
1815                 if (likely(checksums != checksums_onstack))
1816                         kfree(checksums);
1817         } else {
1818                 struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
1819
1820                 if (bip) {
1821                         struct bio_vec biv;
1822                         struct bvec_iter iter;
1823                         unsigned data_to_process = dio->range.n_sectors;
1824                         sector_to_block(ic, data_to_process);
1825                         data_to_process *= ic->tag_size;
1826
1827                         bip_for_each_vec(biv, bip, iter) {
1828                                 unsigned char *tag;
1829                                 unsigned this_len;
1830
1831                                 BUG_ON(PageHighMem(biv.bv_page));
1832                                 tag = bvec_virt(&biv);
1833                                 this_len = min(biv.bv_len, data_to_process);
1834                                 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1835                                                         this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE);
1836                                 if (unlikely(r))
1837                                         goto error;
1838                                 data_to_process -= this_len;
1839                                 if (!data_to_process)
1840                                         break;
1841                         }
1842                 }
1843         }
1844 skip_io:
1845         dec_in_flight(dio);
1846         return;
1847 error:
1848         dio->bi_status = errno_to_blk_status(r);
1849         dec_in_flight(dio);
1850 }
1851
1852 static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1853 {
1854         struct dm_integrity_c *ic = ti->private;
1855         struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1856         struct bio_integrity_payload *bip;
1857
1858         sector_t area, offset;
1859
1860         dio->ic = ic;
1861         dio->bi_status = 0;
1862         dio->op = bio_op(bio);
1863
1864         if (unlikely(dio->op == REQ_OP_DISCARD)) {
1865                 if (ti->max_io_len) {
1866                         sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector);
1867                         unsigned log2_max_io_len = __fls(ti->max_io_len);
1868                         sector_t start_boundary = sec >> log2_max_io_len;
1869                         sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len;
1870                         if (start_boundary < end_boundary) {
1871                                 sector_t len = ti->max_io_len - (sec & (ti->max_io_len - 1));
1872                                 dm_accept_partial_bio(bio, len);
1873                         }
1874                 }
1875         }
1876
1877         if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1878                 submit_flush_bio(ic, dio);
1879                 return DM_MAPIO_SUBMITTED;
1880         }
1881
1882         dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1883         dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA;
1884         if (unlikely(dio->fua)) {
1885                 /*
1886                  * Don't pass down the FUA flag because we have to flush
1887                  * disk cache anyway.
1888                  */
1889                 bio->bi_opf &= ~REQ_FUA;
1890         }
1891         if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1892                 DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1893                       dio->range.logical_sector, bio_sectors(bio),
1894                       ic->provided_data_sectors);
1895                 return DM_MAPIO_KILL;
1896         }
1897         if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
1898                 DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1899                       ic->sectors_per_block,
1900                       dio->range.logical_sector, bio_sectors(bio));
1901                 return DM_MAPIO_KILL;
1902         }
1903
1904         if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) {
1905                 struct bvec_iter iter;
1906                 struct bio_vec bv;
1907                 bio_for_each_segment(bv, bio, iter) {
1908                         if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
1909                                 DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1910                                         bv.bv_offset, bv.bv_len, ic->sectors_per_block);
1911                                 return DM_MAPIO_KILL;
1912                         }
1913                 }
1914         }
1915
1916         bip = bio_integrity(bio);
1917         if (!ic->internal_hash) {
1918                 if (bip) {
1919                         unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1920                         if (ic->log2_tag_size >= 0)
1921                                 wanted_tag_size <<= ic->log2_tag_size;
1922                         else
1923                                 wanted_tag_size *= ic->tag_size;
1924                         if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
1925                                 DMERR("Invalid integrity data size %u, expected %u",
1926                                       bip->bip_iter.bi_size, wanted_tag_size);
1927                                 return DM_MAPIO_KILL;
1928                         }
1929                 }
1930         } else {
1931                 if (unlikely(bip != NULL)) {
1932                         DMERR("Unexpected integrity data when using internal hash");
1933                         return DM_MAPIO_KILL;
1934                 }
1935         }
1936
1937         if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ))
1938                 return DM_MAPIO_KILL;
1939
1940         get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1941         dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1942         bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
1943
1944         dm_integrity_map_continue(dio, true);
1945         return DM_MAPIO_SUBMITTED;
1946 }
1947
1948 static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
1949                                  unsigned journal_section, unsigned journal_entry)
1950 {
1951         struct dm_integrity_c *ic = dio->ic;
1952         sector_t logical_sector;
1953         unsigned n_sectors;
1954
1955         logical_sector = dio->range.logical_sector;
1956         n_sectors = dio->range.n_sectors;
1957         do {
1958                 struct bio_vec bv = bio_iovec(bio);
1959                 char *mem;
1960
1961                 if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
1962                         bv.bv_len = n_sectors << SECTOR_SHIFT;
1963                 n_sectors -= bv.bv_len >> SECTOR_SHIFT;
1964                 bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
1965 retry_kmap:
1966                 mem = kmap_local_page(bv.bv_page);
1967                 if (likely(dio->op == REQ_OP_WRITE))
1968                         flush_dcache_page(bv.bv_page);
1969
1970                 do {
1971                         struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
1972
1973                         if (unlikely(dio->op == REQ_OP_READ)) {
1974                                 struct journal_sector *js;
1975                                 char *mem_ptr;
1976                                 unsigned s;
1977
1978                                 if (unlikely(journal_entry_is_inprogress(je))) {
1979                                         flush_dcache_page(bv.bv_page);
1980                                         kunmap_local(mem);
1981
1982                                         __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1983                                         goto retry_kmap;
1984                                 }
1985                                 smp_rmb();
1986                                 BUG_ON(journal_entry_get_sector(je) != logical_sector);
1987                                 js = access_journal_data(ic, journal_section, journal_entry);
1988                                 mem_ptr = mem + bv.bv_offset;
1989                                 s = 0;
1990                                 do {
1991                                         memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
1992                                         *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
1993                                         js++;
1994                                         mem_ptr += 1 << SECTOR_SHIFT;
1995                                 } while (++s < ic->sectors_per_block);
1996 #ifdef INTERNAL_VERIFY
1997                                 if (ic->internal_hash) {
1998                                         char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1999
2000                                         integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
2001                                         if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
2002                                                 DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
2003                                                             logical_sector);
2004                                                 dm_audit_log_bio(DM_MSG_PREFIX, "journal-checksum",
2005                                                                  bio, logical_sector, 0);
2006                                         }
2007                                 }
2008 #endif
2009                         }
2010
2011                         if (!ic->internal_hash) {
2012                                 struct bio_integrity_payload *bip = bio_integrity(bio);
2013                                 unsigned tag_todo = ic->tag_size;
2014                                 char *tag_ptr = journal_entry_tag(ic, je);
2015
2016                                 if (bip) do {
2017                                         struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
2018                                         unsigned tag_now = min(biv.bv_len, tag_todo);
2019                                         char *tag_addr;
2020                                         BUG_ON(PageHighMem(biv.bv_page));
2021                                         tag_addr = bvec_virt(&biv);
2022                                         if (likely(dio->op == REQ_OP_WRITE))
2023                                                 memcpy(tag_ptr, tag_addr, tag_now);
2024                                         else
2025                                                 memcpy(tag_addr, tag_ptr, tag_now);
2026                                         bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
2027                                         tag_ptr += tag_now;
2028                                         tag_todo -= tag_now;
2029                                 } while (unlikely(tag_todo)); else {
2030                                         if (likely(dio->op == REQ_OP_WRITE))
2031                                                 memset(tag_ptr, 0, tag_todo);
2032                                 }
2033                         }
2034
2035                         if (likely(dio->op == REQ_OP_WRITE)) {
2036                                 struct journal_sector *js;
2037                                 unsigned s;
2038
2039                                 js = access_journal_data(ic, journal_section, journal_entry);
2040                                 memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
2041
2042                                 s = 0;
2043                                 do {
2044                                         je->last_bytes[s] = js[s].commit_id;
2045                                 } while (++s < ic->sectors_per_block);
2046
2047                                 if (ic->internal_hash) {
2048                                         unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
2049                                         if (unlikely(digest_size > ic->tag_size)) {
2050                                                 char checksums_onstack[HASH_MAX_DIGESTSIZE];
2051                                                 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
2052                                                 memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
2053                                         } else
2054                                                 integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
2055                                 }
2056
2057                                 journal_entry_set_sector(je, logical_sector);
2058                         }
2059                         logical_sector += ic->sectors_per_block;
2060
2061                         journal_entry++;
2062                         if (unlikely(journal_entry == ic->journal_section_entries)) {
2063                                 journal_entry = 0;
2064                                 journal_section++;
2065                                 wraparound_section(ic, &journal_section);
2066                         }
2067
2068                         bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
2069                 } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
2070
2071                 if (unlikely(dio->op == REQ_OP_READ))
2072                         flush_dcache_page(bv.bv_page);
2073                 kunmap_local(mem);
2074         } while (n_sectors);
2075
2076         if (likely(dio->op == REQ_OP_WRITE)) {
2077                 smp_mb();
2078                 if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
2079                         wake_up(&ic->copy_to_journal_wait);
2080                 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
2081                         queue_work(ic->commit_wq, &ic->commit_work);
2082                 } else {
2083                         schedule_autocommit(ic);
2084                 }
2085         } else {
2086                 remove_range(ic, &dio->range);
2087         }
2088
2089         if (unlikely(bio->bi_iter.bi_size)) {
2090                 sector_t area, offset;
2091
2092                 dio->range.logical_sector = logical_sector;
2093                 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
2094                 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
2095                 return true;
2096         }
2097
2098         return false;
2099 }
2100
2101 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
2102 {
2103         struct dm_integrity_c *ic = dio->ic;
2104         struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
2105         unsigned journal_section, journal_entry;
2106         unsigned journal_read_pos;
2107         struct completion read_comp;
2108         bool discard_retried = false;
2109         bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
2110         if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D')
2111                 need_sync_io = true;
2112
2113         if (need_sync_io && from_map) {
2114                 INIT_WORK(&dio->work, integrity_bio_wait);
2115                 queue_work(ic->offload_wq, &dio->work);
2116                 return;
2117         }
2118
2119 lock_retry:
2120         spin_lock_irq(&ic->endio_wait.lock);
2121 retry:
2122         if (unlikely(dm_integrity_failed(ic))) {
2123                 spin_unlock_irq(&ic->endio_wait.lock);
2124                 do_endio(ic, bio);
2125                 return;
2126         }
2127         dio->range.n_sectors = bio_sectors(bio);
2128         journal_read_pos = NOT_FOUND;
2129         if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
2130                 if (dio->op == REQ_OP_WRITE) {
2131                         unsigned next_entry, i, pos;
2132                         unsigned ws, we, range_sectors;
2133
2134                         dio->range.n_sectors = min(dio->range.n_sectors,
2135                                                    (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
2136                         if (unlikely(!dio->range.n_sectors)) {
2137                                 if (from_map)
2138                                         goto offload_to_thread;
2139                                 sleep_on_endio_wait(ic);
2140                                 goto retry;
2141                         }
2142                         range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
2143                         ic->free_sectors -= range_sectors;
2144                         journal_section = ic->free_section;
2145                         journal_entry = ic->free_section_entry;
2146
2147                         next_entry = ic->free_section_entry + range_sectors;
2148                         ic->free_section_entry = next_entry % ic->journal_section_entries;
2149                         ic->free_section += next_entry / ic->journal_section_entries;
2150                         ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
2151                         wraparound_section(ic, &ic->free_section);
2152
2153                         pos = journal_section * ic->journal_section_entries + journal_entry;
2154                         ws = journal_section;
2155                         we = journal_entry;
2156                         i = 0;
2157                         do {
2158                                 struct journal_entry *je;
2159
2160                                 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
2161                                 pos++;
2162                                 if (unlikely(pos >= ic->journal_entries))
2163                                         pos = 0;
2164
2165                                 je = access_journal_entry(ic, ws, we);
2166                                 BUG_ON(!journal_entry_is_unused(je));
2167                                 journal_entry_set_inprogress(je);
2168                                 we++;
2169                                 if (unlikely(we == ic->journal_section_entries)) {
2170                                         we = 0;
2171                                         ws++;
2172                                         wraparound_section(ic, &ws);
2173                                 }
2174                         } while ((i += ic->sectors_per_block) < dio->range.n_sectors);
2175
2176                         spin_unlock_irq(&ic->endio_wait.lock);
2177                         goto journal_read_write;
2178                 } else {
2179                         sector_t next_sector;
2180                         journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2181                         if (likely(journal_read_pos == NOT_FOUND)) {
2182                                 if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
2183                                         dio->range.n_sectors = next_sector - dio->range.logical_sector;
2184                         } else {
2185                                 unsigned i;
2186                                 unsigned jp = journal_read_pos + 1;
2187                                 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
2188                                         if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
2189                                                 break;
2190                                 }
2191                                 dio->range.n_sectors = i;
2192                         }
2193                 }
2194         }
2195         if (unlikely(!add_new_range(ic, &dio->range, true))) {
2196                 /*
2197                  * We must not sleep in the request routine because it could
2198                  * stall bios on current->bio_list.
2199                  * So, we offload the bio to a workqueue if we have to sleep.
2200                  */
2201                 if (from_map) {
2202 offload_to_thread:
2203                         spin_unlock_irq(&ic->endio_wait.lock);
2204                         INIT_WORK(&dio->work, integrity_bio_wait);
2205                         queue_work(ic->wait_wq, &dio->work);
2206                         return;
2207                 }
2208                 if (journal_read_pos != NOT_FOUND)
2209                         dio->range.n_sectors = ic->sectors_per_block;
2210                 wait_and_add_new_range(ic, &dio->range);
2211                 /*
2212                  * wait_and_add_new_range drops the spinlock, so the journal
2213                  * may have been changed arbitrarily. We need to recheck.
2214                  * To simplify the code, we restrict I/O size to just one block.
2215                  */
2216                 if (journal_read_pos != NOT_FOUND) {
2217                         sector_t next_sector;
2218                         unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2219                         if (unlikely(new_pos != journal_read_pos)) {
2220                                 remove_range_unlocked(ic, &dio->range);
2221                                 goto retry;
2222                         }
2223                 }
2224         }
2225         if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
2226                 sector_t next_sector;
2227                 unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2228                 if (unlikely(new_pos != NOT_FOUND) ||
2229                     unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) {
2230                         remove_range_unlocked(ic, &dio->range);
2231                         spin_unlock_irq(&ic->endio_wait.lock);
2232                         queue_work(ic->commit_wq, &ic->commit_work);
2233                         flush_workqueue(ic->commit_wq);
2234                         queue_work(ic->writer_wq, &ic->writer_work);
2235                         flush_workqueue(ic->writer_wq);
2236                         discard_retried = true;
2237                         goto lock_retry;
2238                 }
2239         }
2240         spin_unlock_irq(&ic->endio_wait.lock);
2241
2242         if (unlikely(journal_read_pos != NOT_FOUND)) {
2243                 journal_section = journal_read_pos / ic->journal_section_entries;
2244                 journal_entry = journal_read_pos % ic->journal_section_entries;
2245                 goto journal_read_write;
2246         }
2247
2248         if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) {
2249                 if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2250                                      dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2251                         struct bitmap_block_status *bbs;
2252
2253                         bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
2254                         spin_lock(&bbs->bio_queue_lock);
2255                         bio_list_add(&bbs->bio_queue, bio);
2256                         spin_unlock(&bbs->bio_queue_lock);
2257                         queue_work(ic->writer_wq, &bbs->work);
2258                         return;
2259                 }
2260         }
2261
2262         dio->in_flight = (atomic_t)ATOMIC_INIT(2);
2263
2264         if (need_sync_io) {
2265                 init_completion(&read_comp);
2266                 dio->completion = &read_comp;
2267         } else
2268                 dio->completion = NULL;
2269
2270         dm_bio_record(&dio->bio_details, bio);
2271         bio_set_dev(bio, ic->dev->bdev);
2272         bio->bi_integrity = NULL;
2273         bio->bi_opf &= ~REQ_INTEGRITY;
2274         bio->bi_end_io = integrity_end_io;
2275         bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
2276
2277         if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
2278                 integrity_metadata(&dio->work);
2279                 dm_integrity_flush_buffers(ic, false);
2280
2281                 dio->in_flight = (atomic_t)ATOMIC_INIT(1);
2282                 dio->completion = NULL;
2283
2284                 submit_bio_noacct(bio);
2285
2286                 return;
2287         }
2288
2289         submit_bio_noacct(bio);
2290
2291         if (need_sync_io) {
2292                 wait_for_completion_io(&read_comp);
2293                 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
2294                     dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
2295                         goto skip_check;
2296                 if (ic->mode == 'B') {
2297                         if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
2298                                              dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
2299                                 goto skip_check;
2300                 }
2301
2302                 if (likely(!bio->bi_status))
2303                         integrity_metadata(&dio->work);
2304                 else
2305 skip_check:
2306                         dec_in_flight(dio);
2307
2308         } else {
2309                 INIT_WORK(&dio->work, integrity_metadata);
2310                 queue_work(ic->metadata_wq, &dio->work);
2311         }
2312
2313         return;
2314
2315 journal_read_write:
2316         if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
2317                 goto lock_retry;
2318
2319         do_endio_flush(ic, dio);
2320 }
2321
2322
2323 static void integrity_bio_wait(struct work_struct *w)
2324 {
2325         struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
2326
2327         dm_integrity_map_continue(dio, false);
2328 }
2329
2330 static void pad_uncommitted(struct dm_integrity_c *ic)
2331 {
2332         if (ic->free_section_entry) {
2333                 ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
2334                 ic->free_section_entry = 0;
2335                 ic->free_section++;
2336                 wraparound_section(ic, &ic->free_section);
2337                 ic->n_uncommitted_sections++;
2338         }
2339         if (WARN_ON(ic->journal_sections * ic->journal_section_entries !=
2340                     (ic->n_uncommitted_sections + ic->n_committed_sections) *
2341                     ic->journal_section_entries + ic->free_sectors)) {
2342                 DMCRIT("journal_sections %u, journal_section_entries %u, "
2343                        "n_uncommitted_sections %u, n_committed_sections %u, "
2344                        "journal_section_entries %u, free_sectors %u",
2345                        ic->journal_sections, ic->journal_section_entries,
2346                        ic->n_uncommitted_sections, ic->n_committed_sections,
2347                        ic->journal_section_entries, ic->free_sectors);
2348         }
2349 }
2350
2351 static void integrity_commit(struct work_struct *w)
2352 {
2353         struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
2354         unsigned commit_start, commit_sections;
2355         unsigned i, j, n;
2356         struct bio *flushes;
2357
2358         del_timer(&ic->autocommit_timer);
2359
2360         spin_lock_irq(&ic->endio_wait.lock);
2361         flushes = bio_list_get(&ic->flush_bio_list);
2362         if (unlikely(ic->mode != 'J')) {
2363                 spin_unlock_irq(&ic->endio_wait.lock);
2364                 dm_integrity_flush_buffers(ic, true);
2365                 goto release_flush_bios;
2366         }
2367
2368         pad_uncommitted(ic);
2369         commit_start = ic->uncommitted_section;
2370         commit_sections = ic->n_uncommitted_sections;
2371         spin_unlock_irq(&ic->endio_wait.lock);
2372
2373         if (!commit_sections)
2374                 goto release_flush_bios;
2375
2376         i = commit_start;
2377         for (n = 0; n < commit_sections; n++) {
2378                 for (j = 0; j < ic->journal_section_entries; j++) {
2379                         struct journal_entry *je;
2380                         je = access_journal_entry(ic, i, j);
2381                         io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
2382                 }
2383                 for (j = 0; j < ic->journal_section_sectors; j++) {
2384                         struct journal_sector *js;
2385                         js = access_journal(ic, i, j);
2386                         js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
2387                 }
2388                 i++;
2389                 if (unlikely(i >= ic->journal_sections))
2390                         ic->commit_seq = next_commit_seq(ic->commit_seq);
2391                 wraparound_section(ic, &i);
2392         }
2393         smp_rmb();
2394
2395         write_journal(ic, commit_start, commit_sections);
2396
2397         spin_lock_irq(&ic->endio_wait.lock);
2398         ic->uncommitted_section += commit_sections;
2399         wraparound_section(ic, &ic->uncommitted_section);
2400         ic->n_uncommitted_sections -= commit_sections;
2401         ic->n_committed_sections += commit_sections;
2402         spin_unlock_irq(&ic->endio_wait.lock);
2403
2404         if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
2405                 queue_work(ic->writer_wq, &ic->writer_work);
2406
2407 release_flush_bios:
2408         while (flushes) {
2409                 struct bio *next = flushes->bi_next;
2410                 flushes->bi_next = NULL;
2411                 do_endio(ic, flushes);
2412                 flushes = next;
2413         }
2414 }
2415
2416 static void complete_copy_from_journal(unsigned long error, void *context)
2417 {
2418         struct journal_io *io = context;
2419         struct journal_completion *comp = io->comp;
2420         struct dm_integrity_c *ic = comp->ic;
2421         remove_range(ic, &io->range);
2422         mempool_free(io, &ic->journal_io_mempool);
2423         if (unlikely(error != 0))
2424                 dm_integrity_io_error(ic, "copying from journal", -EIO);
2425         complete_journal_op(comp);
2426 }
2427
2428 static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
2429                                struct journal_entry *je)
2430 {
2431         unsigned s = 0;
2432         do {
2433                 js->commit_id = je->last_bytes[s];
2434                 js++;
2435         } while (++s < ic->sectors_per_block);
2436 }
2437
2438 static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
2439                              unsigned write_sections, bool from_replay)
2440 {
2441         unsigned i, j, n;
2442         struct journal_completion comp;
2443         struct blk_plug plug;
2444
2445         blk_start_plug(&plug);
2446
2447         comp.ic = ic;
2448         comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2449         init_completion(&comp.comp);
2450
2451         i = write_start;
2452         for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
2453 #ifndef INTERNAL_VERIFY
2454                 if (unlikely(from_replay))
2455 #endif
2456                         rw_section_mac(ic, i, false);
2457                 for (j = 0; j < ic->journal_section_entries; j++) {
2458                         struct journal_entry *je = access_journal_entry(ic, i, j);
2459                         sector_t sec, area, offset;
2460                         unsigned k, l, next_loop;
2461                         sector_t metadata_block;
2462                         unsigned metadata_offset;
2463                         struct journal_io *io;
2464
2465                         if (journal_entry_is_unused(je))
2466                                 continue;
2467                         BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
2468                         sec = journal_entry_get_sector(je);
2469                         if (unlikely(from_replay)) {
2470                                 if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
2471                                         dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
2472                                         sec &= ~(sector_t)(ic->sectors_per_block - 1);
2473                                 }
2474                         }
2475                         if (unlikely(sec >= ic->provided_data_sectors))
2476                                 continue;
2477                         get_area_and_offset(ic, sec, &area, &offset);
2478                         restore_last_bytes(ic, access_journal_data(ic, i, j), je);
2479                         for (k = j + 1; k < ic->journal_section_entries; k++) {
2480                                 struct journal_entry *je2 = access_journal_entry(ic, i, k);
2481                                 sector_t sec2, area2, offset2;
2482                                 if (journal_entry_is_unused(je2))
2483                                         break;
2484                                 BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
2485                                 sec2 = journal_entry_get_sector(je2);
2486                                 if (unlikely(sec2 >= ic->provided_data_sectors))
2487                                         break;
2488                                 get_area_and_offset(ic, sec2, &area2, &offset2);
2489                                 if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
2490                                         break;
2491                                 restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
2492                         }
2493                         next_loop = k - 1;
2494
2495                         io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
2496                         io->comp = &comp;
2497                         io->range.logical_sector = sec;
2498                         io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
2499
2500                         spin_lock_irq(&ic->endio_wait.lock);
2501                         add_new_range_and_wait(ic, &io->range);
2502
2503                         if (likely(!from_replay)) {
2504                                 struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
2505
2506                                 /* don't write if there is newer committed sector */
2507                                 while (j < k && find_newer_committed_node(ic, &section_node[j])) {
2508                                         struct journal_entry *je2 = access_journal_entry(ic, i, j);
2509
2510                                         journal_entry_set_unused(je2);
2511                                         remove_journal_node(ic, &section_node[j]);
2512                                         j++;
2513                                         sec += ic->sectors_per_block;
2514                                         offset += ic->sectors_per_block;
2515                                 }
2516                                 while (j < k && find_newer_committed_node(ic, &section_node[k - 1])) {
2517                                         struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
2518
2519                                         journal_entry_set_unused(je2);
2520                                         remove_journal_node(ic, &section_node[k - 1]);
2521                                         k--;
2522                                 }
2523                                 if (j == k) {
2524                                         remove_range_unlocked(ic, &io->range);
2525                                         spin_unlock_irq(&ic->endio_wait.lock);
2526                                         mempool_free(io, &ic->journal_io_mempool);
2527                                         goto skip_io;
2528                                 }
2529                                 for (l = j; l < k; l++) {
2530                                         remove_journal_node(ic, &section_node[l]);
2531                                 }
2532                         }
2533                         spin_unlock_irq(&ic->endio_wait.lock);
2534
2535                         metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2536                         for (l = j; l < k; l++) {
2537                                 int r;
2538                                 struct journal_entry *je2 = access_journal_entry(ic, i, l);
2539
2540                                 if (
2541 #ifndef INTERNAL_VERIFY
2542                                     unlikely(from_replay) &&
2543 #endif
2544                                     ic->internal_hash) {
2545                                         char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
2546
2547                                         integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
2548                                                                   (char *)access_journal_data(ic, i, l), test_tag);
2549                                         if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size))) {
2550                                                 dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
2551                                                 dm_audit_log_target(DM_MSG_PREFIX, "integrity-replay-journal", ic->ti, 0);
2552                                         }
2553                                 }
2554
2555                                 journal_entry_set_unused(je2);
2556                                 r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
2557                                                         ic->tag_size, TAG_WRITE);
2558                                 if (unlikely(r)) {
2559                                         dm_integrity_io_error(ic, "reading tags", r);
2560                                 }
2561                         }
2562
2563                         atomic_inc(&comp.in_flight);
2564                         copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
2565                                           (k - j) << ic->sb->log2_sectors_per_block,
2566                                           get_data_sector(ic, area, offset),
2567                                           complete_copy_from_journal, io);
2568 skip_io:
2569                         j = next_loop;
2570                 }
2571         }
2572
2573         dm_bufio_write_dirty_buffers_async(ic->bufio);
2574
2575         blk_finish_plug(&plug);
2576
2577         complete_journal_op(&comp);
2578         wait_for_completion_io(&comp.comp);
2579
2580         dm_integrity_flush_buffers(ic, true);
2581 }
2582
2583 static void integrity_writer(struct work_struct *w)
2584 {
2585         struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
2586         unsigned write_start, write_sections;
2587
2588         unsigned prev_free_sectors;
2589
2590         /* the following test is not needed, but it tests the replay code */
2591         if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev)
2592                 return;
2593
2594         spin_lock_irq(&ic->endio_wait.lock);
2595         write_start = ic->committed_section;
2596         write_sections = ic->n_committed_sections;
2597         spin_unlock_irq(&ic->endio_wait.lock);
2598
2599         if (!write_sections)
2600                 return;
2601
2602         do_journal_write(ic, write_start, write_sections, false);
2603
2604         spin_lock_irq(&ic->endio_wait.lock);
2605
2606         ic->committed_section += write_sections;
2607         wraparound_section(ic, &ic->committed_section);
2608         ic->n_committed_sections -= write_sections;
2609
2610         prev_free_sectors = ic->free_sectors;
2611         ic->free_sectors += write_sections * ic->journal_section_entries;
2612         if (unlikely(!prev_free_sectors))
2613                 wake_up_locked(&ic->endio_wait);
2614
2615         spin_unlock_irq(&ic->endio_wait.lock);
2616 }
2617
2618 static void recalc_write_super(struct dm_integrity_c *ic)
2619 {
2620         int r;
2621
2622         dm_integrity_flush_buffers(ic, false);
2623         if (dm_integrity_failed(ic))
2624                 return;
2625
2626         r = sync_rw_sb(ic, REQ_OP_WRITE, 0);
2627         if (unlikely(r))
2628                 dm_integrity_io_error(ic, "writing superblock", r);
2629 }
2630
2631 static void integrity_recalc(struct work_struct *w)
2632 {
2633         struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
2634         struct dm_integrity_range range;
2635         struct dm_io_request io_req;
2636         struct dm_io_region io_loc;
2637         sector_t area, offset;
2638         sector_t metadata_block;
2639         unsigned metadata_offset;
2640         sector_t logical_sector, n_sectors;
2641         __u8 *t;
2642         unsigned i;
2643         int r;
2644         unsigned super_counter = 0;
2645
2646         DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
2647
2648         spin_lock_irq(&ic->endio_wait.lock);
2649
2650 next_chunk:
2651
2652         if (unlikely(dm_post_suspending(ic->ti)))
2653                 goto unlock_ret;
2654
2655         range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
2656         if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
2657                 if (ic->mode == 'B') {
2658                         block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
2659                         DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
2660                         queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2661                 }
2662                 goto unlock_ret;
2663         }
2664
2665         get_area_and_offset(ic, range.logical_sector, &area, &offset);
2666         range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
2667         if (!ic->meta_dev)
2668                 range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
2669
2670         add_new_range_and_wait(ic, &range);
2671         spin_unlock_irq(&ic->endio_wait.lock);
2672         logical_sector = range.logical_sector;
2673         n_sectors = range.n_sectors;
2674
2675         if (ic->mode == 'B') {
2676                 if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) {
2677                         goto advance_and_next;
2678                 }
2679                 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
2680                                        ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2681                         logical_sector += ic->sectors_per_block;
2682                         n_sectors -= ic->sectors_per_block;
2683                         cond_resched();
2684                 }
2685                 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block,
2686                                        ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2687                         n_sectors -= ic->sectors_per_block;
2688                         cond_resched();
2689                 }
2690                 get_area_and_offset(ic, logical_sector, &area, &offset);
2691         }
2692
2693         DEBUG_print("recalculating: %llx, %llx\n", logical_sector, n_sectors);
2694
2695         if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
2696                 recalc_write_super(ic);
2697                 if (ic->mode == 'B') {
2698                         queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2699                 }
2700                 super_counter = 0;
2701         }
2702
2703         if (unlikely(dm_integrity_failed(ic)))
2704                 goto err;
2705
2706         io_req.bi_op = REQ_OP_READ;
2707         io_req.bi_op_flags = 0;
2708         io_req.mem.type = DM_IO_VMA;
2709         io_req.mem.ptr.addr = ic->recalc_buffer;
2710         io_req.notify.fn = NULL;
2711         io_req.client = ic->io;
2712         io_loc.bdev = ic->dev->bdev;
2713         io_loc.sector = get_data_sector(ic, area, offset);
2714         io_loc.count = n_sectors;
2715
2716         r = dm_io(&io_req, 1, &io_loc, NULL);
2717         if (unlikely(r)) {
2718                 dm_integrity_io_error(ic, "reading data", r);
2719                 goto err;
2720         }
2721
2722         t = ic->recalc_tags;
2723         for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
2724                 integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
2725                 t += ic->tag_size;
2726         }
2727
2728         metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2729
2730         r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tags, TAG_WRITE);
2731         if (unlikely(r)) {
2732                 dm_integrity_io_error(ic, "writing tags", r);
2733                 goto err;
2734         }
2735
2736         if (ic->mode == 'B') {
2737                 sector_t start, end;
2738                 start = (range.logical_sector >>
2739                          (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2740                         (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2741                 end = ((range.logical_sector + range.n_sectors) >>
2742                        (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2743                         (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2744                 block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR);
2745         }
2746
2747 advance_and_next:
2748         cond_resched();
2749
2750         spin_lock_irq(&ic->endio_wait.lock);
2751         remove_range_unlocked(ic, &range);
2752         ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
2753         goto next_chunk;
2754
2755 err:
2756         remove_range(ic, &range);
2757         return;
2758
2759 unlock_ret:
2760         spin_unlock_irq(&ic->endio_wait.lock);
2761
2762         recalc_write_super(ic);
2763 }
2764
2765 static void bitmap_block_work(struct work_struct *w)
2766 {
2767         struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
2768         struct dm_integrity_c *ic = bbs->ic;
2769         struct bio *bio;
2770         struct bio_list bio_queue;
2771         struct bio_list waiting;
2772
2773         bio_list_init(&waiting);
2774
2775         spin_lock(&bbs->bio_queue_lock);
2776         bio_queue = bbs->bio_queue;
2777         bio_list_init(&bbs->bio_queue);
2778         spin_unlock(&bbs->bio_queue_lock);
2779
2780         while ((bio = bio_list_pop(&bio_queue))) {
2781                 struct dm_integrity_io *dio;
2782
2783                 dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2784
2785                 if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2786                                     dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2787                         remove_range(ic, &dio->range);
2788                         INIT_WORK(&dio->work, integrity_bio_wait);
2789                         queue_work(ic->offload_wq, &dio->work);
2790                 } else {
2791                         block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
2792                                         dio->range.n_sectors, BITMAP_OP_SET);
2793                         bio_list_add(&waiting, bio);
2794                 }
2795         }
2796
2797         if (bio_list_empty(&waiting))
2798                 return;
2799
2800         rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC,
2801                            bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT),
2802                            BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL);
2803
2804         while ((bio = bio_list_pop(&waiting))) {
2805                 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2806
2807                 block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2808                                 dio->range.n_sectors, BITMAP_OP_SET);
2809
2810                 remove_range(ic, &dio->range);
2811                 INIT_WORK(&dio->work, integrity_bio_wait);
2812                 queue_work(ic->offload_wq, &dio->work);
2813         }
2814
2815         queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2816 }
2817
2818 static void bitmap_flush_work(struct work_struct *work)
2819 {
2820         struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work);
2821         struct dm_integrity_range range;
2822         unsigned long limit;
2823         struct bio *bio;
2824
2825         dm_integrity_flush_buffers(ic, false);
2826
2827         range.logical_sector = 0;
2828         range.n_sectors = ic->provided_data_sectors;
2829
2830         spin_lock_irq(&ic->endio_wait.lock);
2831         add_new_range_and_wait(ic, &range);
2832         spin_unlock_irq(&ic->endio_wait.lock);
2833
2834         dm_integrity_flush_buffers(ic, true);
2835
2836         limit = ic->provided_data_sectors;
2837         if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
2838                 limit = le64_to_cpu(ic->sb->recalc_sector)
2839                         >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)
2840                         << (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2841         }
2842         /*DEBUG_print("zeroing journal\n");*/
2843         block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
2844         block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
2845
2846         rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
2847                            ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2848
2849         spin_lock_irq(&ic->endio_wait.lock);
2850         remove_range_unlocked(ic, &range);
2851         while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
2852                 bio_endio(bio);
2853                 spin_unlock_irq(&ic->endio_wait.lock);
2854                 spin_lock_irq(&ic->endio_wait.lock);
2855         }
2856         spin_unlock_irq(&ic->endio_wait.lock);
2857 }
2858
2859
2860 static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
2861                          unsigned n_sections, unsigned char commit_seq)
2862 {
2863         unsigned i, j, n;
2864
2865         if (!n_sections)
2866                 return;
2867
2868         for (n = 0; n < n_sections; n++) {
2869                 i = start_section + n;
2870                 wraparound_section(ic, &i);
2871                 for (j = 0; j < ic->journal_section_sectors; j++) {
2872                         struct journal_sector *js = access_journal(ic, i, j);
2873                         memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
2874                         js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2875                 }
2876                 for (j = 0; j < ic->journal_section_entries; j++) {
2877                         struct journal_entry *je = access_journal_entry(ic, i, j);
2878                         journal_entry_set_unused(je);
2879                 }
2880         }
2881
2882         write_journal(ic, start_section, n_sections);
2883 }
2884
2885 static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
2886 {
2887         unsigned char k;
2888         for (k = 0; k < N_COMMIT_IDS; k++) {
2889                 if (dm_integrity_commit_id(ic, i, j, k) == id)
2890                         return k;
2891         }
2892         dm_integrity_io_error(ic, "journal commit id", -EIO);
2893         return -EIO;
2894 }
2895
2896 static void replay_journal(struct dm_integrity_c *ic)
2897 {
2898         unsigned i, j;
2899         bool used_commit_ids[N_COMMIT_IDS];
2900         unsigned max_commit_id_sections[N_COMMIT_IDS];
2901         unsigned write_start, write_sections;
2902         unsigned continue_section;
2903         bool journal_empty;
2904         unsigned char unused, last_used, want_commit_seq;
2905
2906         if (ic->mode == 'R')
2907                 return;
2908
2909         if (ic->journal_uptodate)
2910                 return;
2911
2912         last_used = 0;
2913         write_start = 0;
2914
2915         if (!ic->just_formatted) {
2916                 DEBUG_print("reading journal\n");
2917                 rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
2918                 if (ic->journal_io)
2919                         DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
2920                 if (ic->journal_io) {
2921                         struct journal_completion crypt_comp;
2922                         crypt_comp.ic = ic;
2923                         init_completion(&crypt_comp.comp);
2924                         crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
2925                         encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
2926                         wait_for_completion(&crypt_comp.comp);
2927                 }
2928                 DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
2929         }
2930
2931         if (dm_integrity_failed(ic))
2932                 goto clear_journal;
2933
2934         journal_empty = true;
2935         memset(used_commit_ids, 0, sizeof used_commit_ids);
2936         memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
2937         for (i = 0; i < ic->journal_sections; i++) {
2938                 for (j = 0; j < ic->journal_section_sectors; j++) {
2939                         int k;
2940                         struct journal_sector *js = access_journal(ic, i, j);
2941                         k = find_commit_seq(ic, i, j, js->commit_id);
2942                         if (k < 0)
2943                                 goto clear_journal;
2944                         used_commit_ids[k] = true;
2945                         max_commit_id_sections[k] = i;
2946                 }
2947                 if (journal_empty) {
2948                         for (j = 0; j < ic->journal_section_entries; j++) {
2949                                 struct journal_entry *je = access_journal_entry(ic, i, j);
2950                                 if (!journal_entry_is_unused(je)) {
2951                                         journal_empty = false;
2952                                         break;
2953                                 }
2954                         }
2955                 }
2956         }
2957
2958         if (!used_commit_ids[N_COMMIT_IDS - 1]) {
2959                 unused = N_COMMIT_IDS - 1;
2960                 while (unused && !used_commit_ids[unused - 1])
2961                         unused--;
2962         } else {
2963                 for (unused = 0; unused < N_COMMIT_IDS; unused++)
2964                         if (!used_commit_ids[unused])
2965                                 break;
2966                 if (unused == N_COMMIT_IDS) {
2967                         dm_integrity_io_error(ic, "journal commit ids", -EIO);
2968                         goto clear_journal;
2969                 }
2970         }
2971         DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
2972                     unused, used_commit_ids[0], used_commit_ids[1],
2973                     used_commit_ids[2], used_commit_ids[3]);
2974
2975         last_used = prev_commit_seq(unused);
2976         want_commit_seq = prev_commit_seq(last_used);
2977
2978         if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
2979                 journal_empty = true;
2980
2981         write_start = max_commit_id_sections[last_used] + 1;
2982         if (unlikely(write_start >= ic->journal_sections))
2983                 want_commit_seq = next_commit_seq(want_commit_seq);
2984         wraparound_section(ic, &write_start);
2985
2986         i = write_start;
2987         for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
2988                 for (j = 0; j < ic->journal_section_sectors; j++) {
2989                         struct journal_sector *js = access_journal(ic, i, j);
2990
2991                         if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
2992                                 /*
2993                                  * This could be caused by crash during writing.
2994                                  * We won't replay the inconsistent part of the
2995                                  * journal.
2996                                  */
2997                                 DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
2998                                             i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
2999                                 goto brk;
3000                         }
3001                 }
3002                 i++;
3003                 if (unlikely(i >= ic->journal_sections))
3004                         want_commit_seq = next_commit_seq(want_commit_seq);
3005                 wraparound_section(ic, &i);
3006         }
3007 brk:
3008
3009         if (!journal_empty) {
3010                 DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
3011                             write_sections, write_start, want_commit_seq);
3012                 do_journal_write(ic, write_start, write_sections, true);
3013         }
3014
3015         if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
3016                 continue_section = write_start;
3017                 ic->commit_seq = want_commit_seq;
3018                 DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
3019         } else {
3020                 unsigned s;
3021                 unsigned char erase_seq;
3022 clear_journal:
3023                 DEBUG_print("clearing journal\n");
3024
3025                 erase_seq = prev_commit_seq(prev_commit_seq(last_used));
3026                 s = write_start;
3027                 init_journal(ic, s, 1, erase_seq);
3028                 s++;
3029                 wraparound_section(ic, &s);
3030                 if (ic->journal_sections >= 2) {
3031                         init_journal(ic, s, ic->journal_sections - 2, erase_seq);
3032                         s += ic->journal_sections - 2;
3033                         wraparound_section(ic, &s);
3034                         init_journal(ic, s, 1, erase_seq);
3035                 }
3036
3037                 continue_section = 0;
3038                 ic->commit_seq = next_commit_seq(erase_seq);
3039         }
3040
3041         ic->committed_section = continue_section;
3042         ic->n_committed_sections = 0;
3043
3044         ic->uncommitted_section = continue_section;
3045         ic->n_uncommitted_sections = 0;
3046
3047         ic->free_section = continue_section;
3048         ic->free_section_entry = 0;
3049         ic->free_sectors = ic->journal_entries;
3050
3051         ic->journal_tree_root = RB_ROOT;
3052         for (i = 0; i < ic->journal_entries; i++)
3053                 init_journal_node(&ic->journal_tree[i]);
3054 }
3055
3056 static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
3057 {
3058         DEBUG_print("dm_integrity_enter_synchronous_mode\n");
3059
3060         if (ic->mode == 'B') {
3061                 ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
3062                 ic->synchronous_mode = 1;
3063
3064                 cancel_delayed_work_sync(&ic->bitmap_flush_work);
3065                 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
3066                 flush_workqueue(ic->commit_wq);
3067         }
3068 }
3069
3070 static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x)
3071 {
3072         struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
3073
3074         DEBUG_print("dm_integrity_reboot\n");
3075
3076         dm_integrity_enter_synchronous_mode(ic);
3077
3078         return NOTIFY_DONE;
3079 }
3080
3081 static void dm_integrity_postsuspend(struct dm_target *ti)
3082 {
3083         struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
3084         int r;
3085
3086         WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier));
3087
3088         del_timer_sync(&ic->autocommit_timer);
3089
3090         if (ic->recalc_wq)
3091                 drain_workqueue(ic->recalc_wq);
3092
3093         if (ic->mode == 'B')
3094                 cancel_delayed_work_sync(&ic->bitmap_flush_work);
3095
3096         queue_work(ic->commit_wq, &ic->commit_work);
3097         drain_workqueue(ic->commit_wq);
3098
3099         if (ic->mode == 'J') {
3100                 if (ic->meta_dev)
3101                         queue_work(ic->writer_wq, &ic->writer_work);
3102                 drain_workqueue(ic->writer_wq);
3103                 dm_integrity_flush_buffers(ic, true);
3104         }
3105
3106         if (ic->mode == 'B') {
3107                 dm_integrity_flush_buffers(ic, true);
3108 #if 1
3109                 /* set to 0 to test bitmap replay code */
3110                 init_journal(ic, 0, ic->journal_sections, 0);
3111                 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3112                 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3113                 if (unlikely(r))
3114                         dm_integrity_io_error(ic, "writing superblock", r);
3115 #endif
3116         }
3117
3118         BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
3119
3120         ic->journal_uptodate = true;
3121 }
3122
3123 static void dm_integrity_resume(struct dm_target *ti)
3124 {
3125         struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
3126         __u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
3127         int r;
3128
3129         DEBUG_print("resume\n");
3130
3131         if (ic->provided_data_sectors != old_provided_data_sectors) {
3132                 if (ic->provided_data_sectors > old_provided_data_sectors &&
3133                     ic->mode == 'B' &&
3134                     ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
3135                         rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
3136                                            ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3137                         block_bitmap_op(ic, ic->journal, old_provided_data_sectors,
3138                                         ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET);
3139                         rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3140                                            ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3141                 }
3142
3143                 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3144                 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3145                 if (unlikely(r))
3146                         dm_integrity_io_error(ic, "writing superblock", r);
3147         }
3148
3149         if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
3150                 DEBUG_print("resume dirty_bitmap\n");
3151                 rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
3152                                    ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3153                 if (ic->mode == 'B') {
3154                         if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
3155                             !ic->reset_recalculate_flag) {
3156                                 block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
3157                                 block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
3158                                 if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
3159                                                      BITMAP_OP_TEST_ALL_CLEAR)) {
3160                                         ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3161                                         ic->sb->recalc_sector = cpu_to_le64(0);
3162                                 }
3163                         } else {
3164                                 DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n",
3165                                             ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit);
3166                                 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3167                                 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3168                                 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3169                                 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3170                                 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3171                                                    ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3172                                 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3173                                 ic->sb->recalc_sector = cpu_to_le64(0);
3174                         }
3175                 } else {
3176                         if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
3177                               block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR)) ||
3178                             ic->reset_recalculate_flag) {
3179                                 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3180                                 ic->sb->recalc_sector = cpu_to_le64(0);
3181                         }
3182                         init_journal(ic, 0, ic->journal_sections, 0);
3183                         replay_journal(ic);
3184                         ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3185                 }
3186                 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3187                 if (unlikely(r))
3188                         dm_integrity_io_error(ic, "writing superblock", r);
3189         } else {
3190                 replay_journal(ic);
3191                 if (ic->reset_recalculate_flag) {
3192                         ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3193                         ic->sb->recalc_sector = cpu_to_le64(0);
3194                 }
3195                 if (ic->mode == 'B') {
3196                         ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3197                         ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3198                         r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3199                         if (unlikely(r))
3200                                 dm_integrity_io_error(ic, "writing superblock", r);
3201
3202                         block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3203                         block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3204                         block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3205                         if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
3206                             le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) {
3207                                 block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector),
3208                                                 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3209                                 block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3210                                                 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3211                                 block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3212                                                 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3213                         }
3214                         rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3215                                            ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3216                 }
3217         }
3218
3219         DEBUG_print("testing recalc: %x\n", ic->sb->flags);
3220         if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
3221                 __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
3222                 DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors);
3223                 if (recalc_pos < ic->provided_data_sectors) {
3224                         queue_work(ic->recalc_wq, &ic->recalc_work);
3225                 } else if (recalc_pos > ic->provided_data_sectors) {
3226                         ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
3227                         recalc_write_super(ic);
3228                 }
3229         }
3230
3231         ic->reboot_notifier.notifier_call = dm_integrity_reboot;
3232         ic->reboot_notifier.next = NULL;
3233         ic->reboot_notifier.priority = INT_MAX - 1;     /* be notified after md and before hardware drivers */
3234         WARN_ON(register_reboot_notifier(&ic->reboot_notifier));
3235
3236 #if 0
3237         /* set to 1 to stress test synchronous mode */
3238         dm_integrity_enter_synchronous_mode(ic);
3239 #endif
3240 }
3241
3242 static void dm_integrity_status(struct dm_target *ti, status_type_t type,
3243                                 unsigned status_flags, char *result, unsigned maxlen)
3244 {
3245         struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
3246         unsigned arg_count;
3247         size_t sz = 0;
3248
3249         switch (type) {
3250         case STATUSTYPE_INFO:
3251                 DMEMIT("%llu %llu",
3252                         (unsigned long long)atomic64_read(&ic->number_of_mismatches),
3253                         ic->provided_data_sectors);
3254                 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3255                         DMEMIT(" %llu", le64_to_cpu(ic->sb->recalc_sector));
3256                 else
3257                         DMEMIT(" -");
3258                 break;
3259
3260         case STATUSTYPE_TABLE: {
3261                 __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
3262                 watermark_percentage += ic->journal_entries / 2;
3263                 do_div(watermark_percentage, ic->journal_entries);
3264                 arg_count = 3;
3265                 arg_count += !!ic->meta_dev;
3266                 arg_count += ic->sectors_per_block != 1;
3267                 arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
3268                 arg_count += ic->reset_recalculate_flag;
3269                 arg_count += ic->discard;
3270                 arg_count += ic->mode == 'J';
3271                 arg_count += ic->mode == 'J';
3272                 arg_count += ic->mode == 'B';
3273                 arg_count += ic->mode == 'B';
3274                 arg_count += !!ic->internal_hash_alg.alg_string;
3275                 arg_count += !!ic->journal_crypt_alg.alg_string;
3276                 arg_count += !!ic->journal_mac_alg.alg_string;
3277                 arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
3278                 arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0;
3279                 arg_count += ic->legacy_recalculate;
3280                 DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
3281                        ic->tag_size, ic->mode, arg_count);
3282                 if (ic->meta_dev)
3283                         DMEMIT(" meta_device:%s", ic->meta_dev->name);
3284                 if (ic->sectors_per_block != 1)
3285                         DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
3286                 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3287                         DMEMIT(" recalculate");
3288                 if (ic->reset_recalculate_flag)
3289                         DMEMIT(" reset_recalculate");
3290                 if (ic->discard)
3291                         DMEMIT(" allow_discards");
3292                 DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
3293                 DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
3294                 DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
3295                 if (ic->mode == 'J') {
3296                         DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
3297                         DMEMIT(" commit_time:%u", ic->autocommit_msec);
3298                 }
3299                 if (ic->mode == 'B') {
3300                         DMEMIT(" sectors_per_bit:%llu", (sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
3301                         DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
3302                 }
3303                 if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
3304                         DMEMIT(" fix_padding");
3305                 if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0)
3306                         DMEMIT(" fix_hmac");
3307                 if (ic->legacy_recalculate)
3308                         DMEMIT(" legacy_recalculate");
3309
3310 #define EMIT_ALG(a, n)                                                  \
3311                 do {                                                    \
3312                         if (ic->a.alg_string) {                         \
3313                                 DMEMIT(" %s:%s", n, ic->a.alg_string);  \
3314                                 if (ic->a.key_string)                   \
3315                                         DMEMIT(":%s", ic->a.key_string);\
3316                         }                                               \
3317                 } while (0)
3318                 EMIT_ALG(internal_hash_alg, "internal_hash");
3319                 EMIT_ALG(journal_crypt_alg, "journal_crypt");
3320                 EMIT_ALG(journal_mac_alg, "journal_mac");
3321                 break;
3322         }
3323         case STATUSTYPE_IMA:
3324                 DMEMIT_TARGET_NAME_VERSION(ti->type);
3325                 DMEMIT(",dev_name=%s,start=%llu,tag_size=%u,mode=%c",
3326                         ic->dev->name, ic->start, ic->tag_size, ic->mode);
3327
3328                 if (ic->meta_dev)
3329                         DMEMIT(",meta_device=%s", ic->meta_dev->name);
3330                 if (ic->sectors_per_block != 1)
3331                         DMEMIT(",block_size=%u", ic->sectors_per_block << SECTOR_SHIFT);
3332
3333                 DMEMIT(",recalculate=%c", (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) ?
3334                        'y' : 'n');
3335                 DMEMIT(",allow_discards=%c", ic->discard ? 'y' : 'n');
3336                 DMEMIT(",fix_padding=%c",
3337                        ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0) ? 'y' : 'n');
3338                 DMEMIT(",fix_hmac=%c",
3339                        ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0) ? 'y' : 'n');
3340                 DMEMIT(",legacy_recalculate=%c", ic->legacy_recalculate ? 'y' : 'n');
3341
3342                 DMEMIT(",journal_sectors=%u", ic->initial_sectors - SB_SECTORS);
3343                 DMEMIT(",interleave_sectors=%u", 1U << ic->sb->log2_interleave_sectors);
3344                 DMEMIT(",buffer_sectors=%u", 1U << ic->log2_buffer_sectors);
3345                 DMEMIT(";");
3346                 break;
3347         }
3348 }
3349
3350 static int dm_integrity_iterate_devices(struct dm_target *ti,
3351                                         iterate_devices_callout_fn fn, void *data)
3352 {
3353         struct dm_integrity_c *ic = ti->private;
3354
3355         if (!ic->meta_dev)
3356                 return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
3357         else
3358                 return fn(ti, ic->dev, 0, ti->len, data);
3359 }
3360
3361 static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
3362 {
3363         struct dm_integrity_c *ic = ti->private;
3364
3365         if (ic->sectors_per_block > 1) {
3366                 limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3367                 limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3368                 blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
3369         }
3370 }
3371
3372 static void calculate_journal_section_size(struct dm_integrity_c *ic)
3373 {
3374         unsigned sector_space = JOURNAL_SECTOR_DATA;
3375
3376         ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
3377         ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
3378                                          JOURNAL_ENTRY_ROUNDUP);
3379
3380         if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
3381                 sector_space -= JOURNAL_MAC_PER_SECTOR;
3382         ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
3383         ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
3384         ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
3385         ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
3386 }
3387
3388 static int calculate_device_limits(struct dm_integrity_c *ic)
3389 {
3390         __u64 initial_sectors;
3391
3392         calculate_journal_section_size(ic);
3393         initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
3394         if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
3395                 return -EINVAL;
3396         ic->initial_sectors = initial_sectors;
3397
3398         if (!ic->meta_dev) {
3399                 sector_t last_sector, last_area, last_offset;
3400
3401                 /* we have to maintain excessive padding for compatibility with existing volumes */
3402                 __u64 metadata_run_padding =
3403                         ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ?
3404                         (__u64)(METADATA_PADDING_SECTORS << SECTOR_SHIFT) :
3405                         (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS);
3406
3407                 ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
3408                                             metadata_run_padding) >> SECTOR_SHIFT;
3409                 if (!(ic->metadata_run & (ic->metadata_run - 1)))
3410                         ic->log2_metadata_run = __ffs(ic->metadata_run);
3411                 else
3412                         ic->log2_metadata_run = -1;
3413
3414                 get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
3415                 last_sector = get_data_sector(ic, last_area, last_offset);
3416                 if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
3417                         return -EINVAL;
3418         } else {
3419                 __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
3420                 meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
3421                                 >> (ic->log2_buffer_sectors + SECTOR_SHIFT);
3422                 meta_size <<= ic->log2_buffer_sectors;
3423                 if (ic->initial_sectors + meta_size < ic->initial_sectors ||
3424                     ic->initial_sectors + meta_size > ic->meta_device_sectors)
3425                         return -EINVAL;
3426                 ic->metadata_run = 1;
3427                 ic->log2_metadata_run = 0;
3428         }
3429
3430         return 0;
3431 }
3432
3433 static void get_provided_data_sectors(struct dm_integrity_c *ic)
3434 {
3435         if (!ic->meta_dev) {
3436                 int test_bit;
3437                 ic->provided_data_sectors = 0;
3438                 for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
3439                         __u64 prev_data_sectors = ic->provided_data_sectors;
3440
3441                         ic->provided_data_sectors |= (sector_t)1 << test_bit;
3442                         if (calculate_device_limits(ic))
3443                                 ic->provided_data_sectors = prev_data_sectors;
3444                 }
3445         } else {
3446                 ic->provided_data_sectors = ic->data_device_sectors;
3447                 ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
3448         }
3449 }
3450
3451 static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
3452 {
3453         unsigned journal_sections;
3454         int test_bit;
3455
3456         memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
3457         memcpy(ic->sb->magic, SB_MAGIC, 8);
3458         ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
3459         ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
3460         if (ic->journal_mac_alg.alg_string)
3461                 ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
3462
3463         calculate_journal_section_size(ic);
3464         journal_sections = journal_sectors / ic->journal_section_sectors;
3465         if (!journal_sections)
3466                 journal_sections = 1;
3467
3468         if (ic->fix_hmac && (ic->internal_hash_alg.alg_string || ic->journal_mac_alg.alg_string)) {
3469                 ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_HMAC);
3470                 get_random_bytes(ic->sb->salt, SALT_SIZE);
3471         }
3472
3473         if (!ic->meta_dev) {
3474                 if (ic->fix_padding)
3475                         ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING);
3476                 ic->sb->journal_sections = cpu_to_le32(journal_sections);
3477                 if (!interleave_sectors)
3478                         interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3479                 ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
3480                 ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3481                 ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3482
3483                 get_provided_data_sectors(ic);
3484                 if (!ic->provided_data_sectors)
3485                         return -EINVAL;
3486         } else {
3487                 ic->sb->log2_interleave_sectors = 0;
3488
3489                 get_provided_data_sectors(ic);
3490                 if (!ic->provided_data_sectors)
3491                         return -EINVAL;
3492
3493 try_smaller_buffer:
3494                 ic->sb->journal_sections = cpu_to_le32(0);
3495                 for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
3496                         __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
3497                         __u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
3498                         if (test_journal_sections > journal_sections)
3499                                 continue;
3500                         ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
3501                         if (calculate_device_limits(ic))
3502                                 ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
3503
3504                 }
3505                 if (!le32_to_cpu(ic->sb->journal_sections)) {
3506                         if (ic->log2_buffer_sectors > 3) {
3507                                 ic->log2_buffer_sectors--;
3508                                 goto try_smaller_buffer;
3509                         }
3510                         return -EINVAL;
3511                 }
3512         }
3513
3514         ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3515
3516         sb_set_version(ic);
3517
3518         return 0;
3519 }
3520
3521 static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
3522 {
3523         struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
3524         struct blk_integrity bi;
3525
3526         memset(&bi, 0, sizeof(bi));
3527         bi.profile = &dm_integrity_profile;
3528         bi.tuple_size = ic->tag_size;
3529         bi.tag_size = bi.tuple_size;
3530         bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
3531
3532         blk_integrity_register(disk, &bi);
3533         blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
3534 }
3535
3536 static void dm_integrity_free_page_list(struct page_list *pl)
3537 {
3538         unsigned i;
3539
3540         if (!pl)
3541                 return;
3542         for (i = 0; pl[i].page; i++)
3543                 __free_page(pl[i].page);
3544         kvfree(pl);
3545 }
3546
3547 static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
3548 {
3549         struct page_list *pl;
3550         unsigned i;
3551
3552         pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
3553         if (!pl)
3554                 return NULL;
3555
3556         for (i = 0; i < n_pages; i++) {
3557                 pl[i].page = alloc_page(GFP_KERNEL);
3558                 if (!pl[i].page) {
3559                         dm_integrity_free_page_list(pl);
3560                         return NULL;
3561                 }
3562                 if (i)
3563                         pl[i - 1].next = &pl[i];
3564         }
3565         pl[i].page = NULL;
3566         pl[i].next = NULL;
3567
3568         return pl;
3569 }
3570
3571 static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
3572 {
3573         unsigned i;
3574         for (i = 0; i < ic->journal_sections; i++)
3575                 kvfree(sl[i]);
3576         kvfree(sl);
3577 }
3578
3579 static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic,
3580                                                                    struct page_list *pl)
3581 {
3582         struct scatterlist **sl;
3583         unsigned i;
3584
3585         sl = kvmalloc_array(ic->journal_sections,
3586                             sizeof(struct scatterlist *),
3587                             GFP_KERNEL | __GFP_ZERO);
3588         if (!sl)
3589                 return NULL;
3590
3591         for (i = 0; i < ic->journal_sections; i++) {
3592                 struct scatterlist *s;
3593                 unsigned start_index, start_offset;
3594                 unsigned end_index, end_offset;
3595                 unsigned n_pages;
3596                 unsigned idx;
3597
3598                 page_list_location(ic, i, 0, &start_index, &start_offset);
3599                 page_list_location(ic, i, ic->journal_section_sectors - 1,
3600                                    &end_index, &end_offset);
3601
3602                 n_pages = (end_index - start_index + 1);
3603
3604                 s = kvmalloc_array(n_pages, sizeof(struct scatterlist),
3605                                    GFP_KERNEL);
3606                 if (!s) {
3607                         dm_integrity_free_journal_scatterlist(ic, sl);
3608                         return NULL;
3609                 }
3610
3611                 sg_init_table(s, n_pages);
3612                 for (idx = start_index; idx <= end_index; idx++) {
3613                         char *va = lowmem_page_address(pl[idx].page);
3614                         unsigned start = 0, end = PAGE_SIZE;
3615                         if (idx == start_index)
3616                                 start = start_offset;
3617                         if (idx == end_index)
3618                                 end = end_offset + (1 << SECTOR_SHIFT);
3619                         sg_set_buf(&s[idx - start_index], va + start, end - start);
3620                 }
3621
3622                 sl[i] = s;
3623         }
3624
3625         return sl;
3626 }
3627
3628 static void free_alg(struct alg_spec *a)
3629 {
3630         kfree_sensitive(a->alg_string);
3631         kfree_sensitive(a->key);
3632         memset(a, 0, sizeof *a);
3633 }
3634
3635 static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
3636 {
3637         char *k;
3638
3639         free_alg(a);
3640
3641         a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
3642         if (!a->alg_string)
3643                 goto nomem;
3644
3645         k = strchr(a->alg_string, ':');
3646         if (k) {
3647                 *k = 0;
3648                 a->key_string = k + 1;
3649                 if (strlen(a->key_string) & 1)
3650                         goto inval;
3651
3652                 a->key_size = strlen(a->key_string) / 2;
3653                 a->key = kmalloc(a->key_size, GFP_KERNEL);
3654                 if (!a->key)
3655                         goto nomem;
3656                 if (hex2bin(a->key, a->key_string, a->key_size))
3657                         goto inval;
3658         }
3659
3660         return 0;
3661 inval:
3662         *error = error_inval;
3663         return -EINVAL;
3664 nomem:
3665         *error = "Out of memory for an argument";
3666         return -ENOMEM;
3667 }
3668
3669 static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
3670                    char *error_alg, char *error_key)
3671 {
3672         int r;
3673
3674         if (a->alg_string) {
3675                 *hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
3676                 if (IS_ERR(*hash)) {
3677                         *error = error_alg;
3678                         r = PTR_ERR(*hash);
3679                         *hash = NULL;
3680                         return r;
3681                 }
3682
3683                 if (a->key) {
3684                         r = crypto_shash_setkey(*hash, a->key, a->key_size);
3685                         if (r) {
3686                                 *error = error_key;
3687                                 return r;
3688                         }
3689                 } else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
3690                         *error = error_key;
3691                         return -ENOKEY;
3692                 }
3693         }
3694
3695         return 0;
3696 }
3697
3698 static int create_journal(struct dm_integrity_c *ic, char **error)
3699 {
3700         int r = 0;
3701         unsigned i;
3702         __u64 journal_pages, journal_desc_size, journal_tree_size;
3703         unsigned char *crypt_data = NULL, *crypt_iv = NULL;
3704         struct skcipher_request *req = NULL;
3705
3706         ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
3707         ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
3708         ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
3709         ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
3710
3711         journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
3712                                 PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
3713         journal_desc_size = journal_pages * sizeof(struct page_list);
3714         if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) {
3715                 *error = "Journal doesn't fit into memory";
3716                 r = -ENOMEM;
3717                 goto bad;
3718         }
3719         ic->journal_pages = journal_pages;
3720
3721         ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
3722         if (!ic->journal) {
3723                 *error = "Could not allocate memory for journal";
3724                 r = -ENOMEM;
3725                 goto bad;
3726         }
3727         if (ic->journal_crypt_alg.alg_string) {
3728                 unsigned ivsize, blocksize;
3729                 struct journal_completion comp;
3730
3731                 comp.ic = ic;
3732                 ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
3733                 if (IS_ERR(ic->journal_crypt)) {
3734                         *error = "Invalid journal cipher";
3735                         r = PTR_ERR(ic->journal_crypt);
3736                         ic->journal_crypt = NULL;
3737                         goto bad;
3738                 }
3739                 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
3740                 blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
3741
3742                 if (ic->journal_crypt_alg.key) {
3743                         r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
3744                                                    ic->journal_crypt_alg.key_size);
3745                         if (r) {
3746                                 *error = "Error setting encryption key";
3747                                 goto bad;
3748                         }
3749                 }
3750                 DEBUG_print("cipher %s, block size %u iv size %u\n",
3751                             ic->journal_crypt_alg.alg_string, blocksize, ivsize);
3752
3753                 ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
3754                 if (!ic->journal_io) {
3755                         *error = "Could not allocate memory for journal io";
3756                         r = -ENOMEM;
3757                         goto bad;
3758                 }
3759
3760                 if (blocksize == 1) {
3761                         struct scatterlist *sg;
3762
3763                         req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3764                         if (!req) {
3765                                 *error = "Could not allocate crypt request";
3766                                 r = -ENOMEM;
3767                                 goto bad;
3768                         }
3769
3770                         crypt_iv = kzalloc(ivsize, GFP_KERNEL);
3771                         if (!crypt_iv) {
3772                                 *error = "Could not allocate iv";
3773                                 r = -ENOMEM;
3774                                 goto bad;
3775                         }
3776
3777                         ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
3778                         if (!ic->journal_xor) {
3779                                 *error = "Could not allocate memory for journal xor";
3780                                 r = -ENOMEM;
3781                                 goto bad;
3782                         }
3783
3784                         sg = kvmalloc_array(ic->journal_pages + 1,
3785                                             sizeof(struct scatterlist),
3786                                             GFP_KERNEL);
3787                         if (!sg) {
3788                                 *error = "Unable to allocate sg list";
3789                                 r = -ENOMEM;
3790                                 goto bad;
3791                         }
3792                         sg_init_table(sg, ic->journal_pages + 1);
3793                         for (i = 0; i < ic->journal_pages; i++) {
3794                                 char *va = lowmem_page_address(ic->journal_xor[i].page);
3795                                 clear_page(va);
3796                                 sg_set_buf(&sg[i], va, PAGE_SIZE);
3797                         }
3798                         sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
3799
3800                         skcipher_request_set_crypt(req, sg, sg,
3801                                                    PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
3802                         init_completion(&comp.comp);
3803                         comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3804                         if (do_crypt(true, req, &comp))
3805                                 wait_for_completion(&comp.comp);
3806                         kvfree(sg);
3807                         r = dm_integrity_failed(ic);
3808                         if (r) {
3809                                 *error = "Unable to encrypt journal";
3810                                 goto bad;
3811                         }
3812                         DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
3813
3814                         crypto_free_skcipher(ic->journal_crypt);
3815                         ic->journal_crypt = NULL;
3816                 } else {
3817                         unsigned crypt_len = roundup(ivsize, blocksize);
3818
3819                         req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3820                         if (!req) {
3821                                 *error = "Could not allocate crypt request";
3822                                 r = -ENOMEM;
3823                                 goto bad;
3824                         }
3825
3826                         crypt_iv = kmalloc(ivsize, GFP_KERNEL);
3827                         if (!crypt_iv) {
3828                                 *error = "Could not allocate iv";
3829                                 r = -ENOMEM;
3830                                 goto bad;
3831                         }
3832
3833                         crypt_data = kmalloc(crypt_len, GFP_KERNEL);
3834                         if (!crypt_data) {
3835                                 *error = "Unable to allocate crypt data";
3836                                 r = -ENOMEM;
3837                                 goto bad;
3838                         }
3839
3840                         ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
3841                         if (!ic->journal_scatterlist) {
3842                                 *error = "Unable to allocate sg list";
3843                                 r = -ENOMEM;
3844                                 goto bad;
3845                         }
3846                         ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
3847                         if (!ic->journal_io_scatterlist) {
3848                                 *error = "Unable to allocate sg list";
3849                                 r = -ENOMEM;
3850                                 goto bad;
3851                         }
3852                         ic->sk_requests = kvmalloc_array(ic->journal_sections,
3853                                                          sizeof(struct skcipher_request *),
3854                                                          GFP_KERNEL | __GFP_ZERO);
3855                         if (!ic->sk_requests) {
3856                                 *error = "Unable to allocate sk requests";
3857                                 r = -ENOMEM;
3858                                 goto bad;
3859                         }
3860                         for (i = 0; i < ic->journal_sections; i++) {
3861                                 struct scatterlist sg;
3862                                 struct skcipher_request *section_req;
3863                                 __le32 section_le = cpu_to_le32(i);
3864
3865                                 memset(crypt_iv, 0x00, ivsize);
3866                                 memset(crypt_data, 0x00, crypt_len);
3867                                 memcpy(crypt_data, &section_le, min((size_t)crypt_len, sizeof(section_le)));
3868
3869                                 sg_init_one(&sg, crypt_data, crypt_len);
3870                                 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
3871                                 init_completion(&comp.comp);
3872                                 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3873                                 if (do_crypt(true, req, &comp))
3874                                         wait_for_completion(&comp.comp);
3875
3876                                 r = dm_integrity_failed(ic);
3877                                 if (r) {
3878                                         *error = "Unable to generate iv";
3879                                         goto bad;
3880                                 }
3881
3882                                 section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3883                                 if (!section_req) {
3884                                         *error = "Unable to allocate crypt request";
3885                                         r = -ENOMEM;
3886                                         goto bad;
3887                                 }
3888                                 section_req->iv = kmalloc_array(ivsize, 2,
3889                                                                 GFP_KERNEL);
3890                                 if (!section_req->iv) {
3891                                         skcipher_request_free(section_req);
3892                                         *error = "Unable to allocate iv";
3893                                         r = -ENOMEM;
3894                                         goto bad;
3895                                 }
3896                                 memcpy(section_req->iv + ivsize, crypt_data, ivsize);
3897                                 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
3898                                 ic->sk_requests[i] = section_req;
3899                                 DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
3900                         }
3901                 }
3902         }
3903
3904         for (i = 0; i < N_COMMIT_IDS; i++) {
3905                 unsigned j;
3906 retest_commit_id:
3907                 for (j = 0; j < i; j++) {
3908                         if (ic->commit_ids[j] == ic->commit_ids[i]) {
3909                                 ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
3910                                 goto retest_commit_id;
3911                         }
3912                 }
3913                 DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
3914         }
3915
3916         journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
3917         if (journal_tree_size > ULONG_MAX) {
3918                 *error = "Journal doesn't fit into memory";
3919                 r = -ENOMEM;
3920                 goto bad;
3921         }
3922         ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
3923         if (!ic->journal_tree) {
3924                 *error = "Could not allocate memory for journal tree";
3925                 r = -ENOMEM;
3926         }
3927 bad:
3928         kfree(crypt_data);
3929         kfree(crypt_iv);
3930         skcipher_request_free(req);
3931
3932         return r;
3933 }
3934
3935 /*
3936  * Construct a integrity mapping
3937  *
3938  * Arguments:
3939  *      device
3940  *      offset from the start of the device
3941  *      tag size
3942  *      D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode
3943  *      number of optional arguments
3944  *      optional arguments:
3945  *              journal_sectors
3946  *              interleave_sectors
3947  *              buffer_sectors
3948  *              journal_watermark
3949  *              commit_time
3950  *              meta_device
3951  *              block_size
3952  *              sectors_per_bit
3953  *              bitmap_flush_interval
3954  *              internal_hash
3955  *              journal_crypt
3956  *              journal_mac
3957  *              recalculate
3958  */
3959 static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3960 {
3961         struct dm_integrity_c *ic;
3962         char dummy;
3963         int r;
3964         unsigned extra_args;
3965         struct dm_arg_set as;
3966         static const struct dm_arg _args[] = {
3967                 {0, 18, "Invalid number of feature args"},
3968         };
3969         unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
3970         bool should_write_sb;
3971         __u64 threshold;
3972         unsigned long long start;
3973         __s8 log2_sectors_per_bitmap_bit = -1;
3974         __s8 log2_blocks_per_bitmap_bit;
3975         __u64 bits_in_journal;
3976         __u64 n_bitmap_bits;
3977
3978 #define DIRECT_ARGUMENTS        4
3979
3980         if (argc <= DIRECT_ARGUMENTS) {
3981                 ti->error = "Invalid argument count";
3982                 return -EINVAL;
3983         }
3984
3985         ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
3986         if (!ic) {
3987                 ti->error = "Cannot allocate integrity context";
3988                 return -ENOMEM;
3989         }
3990         ti->private = ic;
3991         ti->per_io_data_size = sizeof(struct dm_integrity_io);
3992         ic->ti = ti;
3993
3994         ic->in_progress = RB_ROOT;
3995         INIT_LIST_HEAD(&ic->wait_list);
3996         init_waitqueue_head(&ic->endio_wait);
3997         bio_list_init(&ic->flush_bio_list);
3998         init_waitqueue_head(&ic->copy_to_journal_wait);
3999         init_completion(&ic->crypto_backoff);
4000         atomic64_set(&ic->number_of_mismatches, 0);
4001         ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL;
4002
4003         r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
4004         if (r) {
4005                 ti->error = "Device lookup failed";
4006                 goto bad;
4007         }
4008
4009         if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
4010                 ti->error = "Invalid starting offset";
4011                 r = -EINVAL;
4012                 goto bad;
4013         }
4014         ic->start = start;
4015
4016         if (strcmp(argv[2], "-")) {
4017                 if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
4018                         ti->error = "Invalid tag size";
4019                         r = -EINVAL;
4020                         goto bad;
4021                 }
4022         }
4023
4024         if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") ||
4025             !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) {
4026                 ic->mode = argv[3][0];
4027         } else {
4028                 ti->error = "Invalid mode (expecting J, B, D, R)";
4029                 r = -EINVAL;
4030                 goto bad;
4031         }
4032
4033         journal_sectors = 0;
4034         interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
4035         buffer_sectors = DEFAULT_BUFFER_SECTORS;
4036         journal_watermark = DEFAULT_JOURNAL_WATERMARK;
4037         sync_msec = DEFAULT_SYNC_MSEC;
4038         ic->sectors_per_block = 1;
4039
4040         as.argc = argc - DIRECT_ARGUMENTS;
4041         as.argv = argv + DIRECT_ARGUMENTS;
4042         r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
4043         if (r)
4044                 goto bad;
4045
4046         while (extra_args--) {
4047                 const char *opt_string;
4048                 unsigned val;
4049                 unsigned long long llval;
4050                 opt_string = dm_shift_arg(&as);
4051                 if (!opt_string) {
4052                         r = -EINVAL;
4053                         ti->error = "Not enough feature arguments";
4054                         goto bad;
4055                 }
4056                 if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
4057                         journal_sectors = val ? val : 1;
4058                 else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
4059                         interleave_sectors = val;
4060                 else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
4061                         buffer_sectors = val;
4062                 else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
4063                         journal_watermark = val;
4064                 else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
4065                         sync_msec = val;
4066                 else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
4067                         if (ic->meta_dev) {
4068                                 dm_put_device(ti, ic->meta_dev);
4069                                 ic->meta_dev = NULL;
4070                         }
4071                         r = dm_get_device(ti, strchr(opt_string, ':') + 1,
4072                                           dm_table_get_mode(ti->table), &ic->meta_dev);
4073                         if (r) {
4074                                 ti->error = "Device lookup failed";
4075                                 goto bad;
4076                         }
4077                 } else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
4078                         if (val < 1 << SECTOR_SHIFT ||
4079                             val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
4080                             (val & (val -1))) {
4081                                 r = -EINVAL;
4082                                 ti->error = "Invalid block_size argument";
4083                                 goto bad;
4084                         }
4085                         ic->sectors_per_block = val >> SECTOR_SHIFT;
4086                 } else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
4087                         log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
4088                 } else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
4089                         if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
4090                                 r = -EINVAL;
4091                                 ti->error = "Invalid bitmap_flush_interval argument";
4092                                 goto bad;
4093                         }
4094                         ic->bitmap_flush_interval = msecs_to_jiffies(val);
4095                 } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
4096                         r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
4097                                             "Invalid internal_hash argument");
4098                         if (r)
4099                                 goto bad;
4100                 } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
4101                         r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
4102                                             "Invalid journal_crypt argument");
4103                         if (r)
4104                                 goto bad;
4105                 } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
4106                         r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
4107                                             "Invalid journal_mac argument");
4108                         if (r)
4109                                 goto bad;
4110                 } else if (!strcmp(opt_string, "recalculate")) {
4111                         ic->recalculate_flag = true;
4112                 } else if (!strcmp(opt_string, "reset_recalculate")) {
4113                         ic->recalculate_flag = true;
4114                         ic->reset_recalculate_flag = true;
4115                 } else if (!strcmp(opt_string, "allow_discards")) {
4116                         ic->discard = true;
4117                 } else if (!strcmp(opt_string, "fix_padding")) {
4118                         ic->fix_padding = true;
4119                 } else if (!strcmp(opt_string, "fix_hmac")) {
4120                         ic->fix_hmac = true;
4121                 } else if (!strcmp(opt_string, "legacy_recalculate")) {
4122                         ic->legacy_recalculate = true;
4123                 } else {
4124                         r = -EINVAL;
4125                         ti->error = "Invalid argument";
4126                         goto bad;
4127                 }
4128         }
4129
4130         ic->data_device_sectors = bdev_nr_sectors(ic->dev->bdev);
4131         if (!ic->meta_dev)
4132                 ic->meta_device_sectors = ic->data_device_sectors;
4133         else
4134                 ic->meta_device_sectors = bdev_nr_sectors(ic->meta_dev->bdev);
4135
4136         if (!journal_sectors) {
4137                 journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
4138                                       ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
4139         }
4140
4141         if (!buffer_sectors)
4142                 buffer_sectors = 1;
4143         ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
4144
4145         r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
4146                     "Invalid internal hash", "Error setting internal hash key");
4147         if (r)
4148                 goto bad;
4149
4150         r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
4151                     "Invalid journal mac", "Error setting journal mac key");
4152         if (r)
4153                 goto bad;
4154
4155         if (!ic->tag_size) {
4156                 if (!ic->internal_hash) {
4157                         ti->error = "Unknown tag size";
4158                         r = -EINVAL;
4159                         goto bad;
4160                 }
4161                 ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
4162         }
4163         if (ic->tag_size > MAX_TAG_SIZE) {
4164                 ti->error = "Too big tag size";
4165                 r = -EINVAL;
4166                 goto bad;
4167         }
4168         if (!(ic->tag_size & (ic->tag_size - 1)))
4169                 ic->log2_tag_size = __ffs(ic->tag_size);
4170         else
4171                 ic->log2_tag_size = -1;
4172
4173         if (ic->mode == 'B' && !ic->internal_hash) {
4174                 r = -EINVAL;
4175                 ti->error = "Bitmap mode can be only used with internal hash";
4176                 goto bad;
4177         }
4178
4179         if (ic->discard && !ic->internal_hash) {
4180                 r = -EINVAL;
4181                 ti->error = "Discard can be only used with internal hash";
4182                 goto bad;
4183         }
4184
4185         ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
4186         ic->autocommit_msec = sync_msec;
4187         timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
4188
4189         ic->io = dm_io_client_create();
4190         if (IS_ERR(ic->io)) {
4191                 r = PTR_ERR(ic->io);
4192                 ic->io = NULL;
4193                 ti->error = "Cannot allocate dm io";
4194                 goto bad;
4195         }
4196
4197         r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
4198         if (r) {
4199                 ti->error = "Cannot allocate mempool";
4200                 goto bad;
4201         }
4202
4203         ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
4204                                           WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
4205         if (!ic->metadata_wq) {
4206                 ti->error = "Cannot allocate workqueue";
4207                 r = -ENOMEM;
4208                 goto bad;
4209         }
4210
4211         /*
4212          * If this workqueue were percpu, it would cause bio reordering
4213          * and reduced performance.
4214          */
4215         ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
4216         if (!ic->wait_wq) {
4217                 ti->error = "Cannot allocate workqueue";
4218                 r = -ENOMEM;
4219                 goto bad;
4220         }
4221
4222         ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM,
4223                                           METADATA_WORKQUEUE_MAX_ACTIVE);
4224         if (!ic->offload_wq) {
4225                 ti->error = "Cannot allocate workqueue";
4226                 r = -ENOMEM;
4227                 goto bad;
4228         }
4229
4230         ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
4231         if (!ic->commit_wq) {
4232                 ti->error = "Cannot allocate workqueue";
4233                 r = -ENOMEM;
4234                 goto bad;
4235         }
4236         INIT_WORK(&ic->commit_work, integrity_commit);
4237
4238         if (ic->mode == 'J' || ic->mode == 'B') {
4239                 ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
4240                 if (!ic->writer_wq) {
4241                         ti->error = "Cannot allocate workqueue";
4242                         r = -ENOMEM;
4243                         goto bad;
4244                 }
4245                 INIT_WORK(&ic->writer_work, integrity_writer);
4246         }
4247
4248         ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
4249         if (!ic->sb) {
4250                 r = -ENOMEM;
4251                 ti->error = "Cannot allocate superblock area";
4252                 goto bad;
4253         }
4254
4255         r = sync_rw_sb(ic, REQ_OP_READ, 0);
4256         if (r) {
4257                 ti->error = "Error reading superblock";
4258                 goto bad;
4259         }
4260         should_write_sb = false;
4261         if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
4262                 if (ic->mode != 'R') {
4263                         if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
4264                                 r = -EINVAL;
4265                                 ti->error = "The device is not initialized";
4266                                 goto bad;
4267                         }
4268                 }
4269
4270                 r = initialize_superblock(ic, journal_sectors, interleave_sectors);
4271                 if (r) {
4272                         ti->error = "Could not initialize superblock";
4273                         goto bad;
4274                 }
4275                 if (ic->mode != 'R')
4276                         should_write_sb = true;
4277         }
4278
4279         if (!ic->sb->version || ic->sb->version > SB_VERSION_5) {
4280                 r = -EINVAL;
4281                 ti->error = "Unknown version";
4282                 goto bad;
4283         }
4284         if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
4285                 r = -EINVAL;
4286                 ti->error = "Tag size doesn't match the information in superblock";
4287                 goto bad;
4288         }
4289         if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
4290                 r = -EINVAL;
4291                 ti->error = "Block size doesn't match the information in superblock";
4292                 goto bad;
4293         }
4294         if (!le32_to_cpu(ic->sb->journal_sections)) {
4295                 r = -EINVAL;
4296                 ti->error = "Corrupted superblock, journal_sections is 0";
4297                 goto bad;
4298         }
4299         /* make sure that ti->max_io_len doesn't overflow */
4300         if (!ic->meta_dev) {
4301                 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
4302                     ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
4303                         r = -EINVAL;
4304                         ti->error = "Invalid interleave_sectors in the superblock";
4305                         goto bad;
4306                 }
4307         } else {
4308                 if (ic->sb->log2_interleave_sectors) {
4309                         r = -EINVAL;
4310                         ti->error = "Invalid interleave_sectors in the superblock";
4311                         goto bad;
4312                 }
4313         }
4314         if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
4315                 r = -EINVAL;
4316                 ti->error = "Journal mac mismatch";
4317                 goto bad;
4318         }
4319
4320         get_provided_data_sectors(ic);
4321         if (!ic->provided_data_sectors) {
4322                 r = -EINVAL;
4323                 ti->error = "The device is too small";
4324                 goto bad;
4325         }
4326
4327 try_smaller_buffer:
4328         r = calculate_device_limits(ic);
4329         if (r) {
4330                 if (ic->meta_dev) {
4331                         if (ic->log2_buffer_sectors > 3) {
4332                                 ic->log2_buffer_sectors--;
4333                                 goto try_smaller_buffer;
4334                         }
4335                 }
4336                 ti->error = "The device is too small";
4337                 goto bad;
4338         }
4339
4340         if (log2_sectors_per_bitmap_bit < 0)
4341                 log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT);
4342         if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block)
4343                 log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block;
4344
4345         bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3);
4346         if (bits_in_journal > UINT_MAX)
4347                 bits_in_journal = UINT_MAX;
4348         while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
4349                 log2_sectors_per_bitmap_bit++;
4350
4351         log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
4352         ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4353         if (should_write_sb) {
4354                 ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4355         }
4356         n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
4357                                 + (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit;
4358         ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
4359
4360         if (!ic->meta_dev)
4361                 ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
4362
4363         if (ti->len > ic->provided_data_sectors) {
4364                 r = -EINVAL;
4365                 ti->error = "Not enough provided sectors for requested mapping size";
4366                 goto bad;
4367         }
4368
4369
4370         threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
4371         threshold += 50;
4372         do_div(threshold, 100);
4373         ic->free_sectors_threshold = threshold;
4374
4375         DEBUG_print("initialized:\n");
4376         DEBUG_print("   integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
4377         DEBUG_print("   journal_entry_size %u\n", ic->journal_entry_size);
4378         DEBUG_print("   journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
4379         DEBUG_print("   journal_section_entries %u\n", ic->journal_section_entries);
4380         DEBUG_print("   journal_section_sectors %u\n", ic->journal_section_sectors);
4381         DEBUG_print("   journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
4382         DEBUG_print("   journal_entries %u\n", ic->journal_entries);
4383         DEBUG_print("   log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
4384         DEBUG_print("   data_device_sectors 0x%llx\n", bdev_nr_sectors(ic->dev->bdev));
4385         DEBUG_print("   initial_sectors 0x%x\n", ic->initial_sectors);
4386         DEBUG_print("   metadata_run 0x%x\n", ic->metadata_run);
4387         DEBUG_print("   log2_metadata_run %d\n", ic->log2_metadata_run);
4388         DEBUG_print("   provided_data_sectors 0x%llx (%llu)\n", ic->provided_data_sectors, ic->provided_data_sectors);
4389         DEBUG_print("   log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
4390         DEBUG_print("   bits_in_journal %llu\n", bits_in_journal);
4391
4392         if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
4393                 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
4394                 ic->sb->recalc_sector = cpu_to_le64(0);
4395         }
4396
4397         if (ic->internal_hash) {
4398                 ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
4399                 if (!ic->recalc_wq ) {
4400                         ti->error = "Cannot allocate workqueue";
4401                         r = -ENOMEM;
4402                         goto bad;
4403                 }
4404                 INIT_WORK(&ic->recalc_work, integrity_recalc);
4405                 ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
4406                 if (!ic->recalc_buffer) {
4407                         ti->error = "Cannot allocate buffer for recalculating";
4408                         r = -ENOMEM;
4409                         goto bad;
4410                 }
4411                 ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
4412                                                  ic->tag_size, GFP_KERNEL);
4413                 if (!ic->recalc_tags) {
4414                         ti->error = "Cannot allocate tags for recalculating";
4415                         r = -ENOMEM;
4416                         goto bad;
4417                 }
4418         } else {
4419                 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
4420                         ti->error = "Recalculate can only be specified with internal_hash";
4421                         r = -EINVAL;
4422                         goto bad;
4423                 }
4424         }
4425
4426         if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
4427             le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors &&
4428             dm_integrity_disable_recalculate(ic)) {
4429                 ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\"";
4430                 r = -EOPNOTSUPP;
4431                 goto bad;
4432         }
4433
4434         ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
4435                         1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
4436         if (IS_ERR(ic->bufio)) {
4437                 r = PTR_ERR(ic->bufio);
4438                 ti->error = "Cannot initialize dm-bufio";
4439                 ic->bufio = NULL;
4440                 goto bad;
4441         }
4442         dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
4443
4444         if (ic->mode != 'R') {
4445                 r = create_journal(ic, &ti->error);
4446                 if (r)
4447                         goto bad;
4448
4449         }
4450
4451         if (ic->mode == 'B') {
4452                 unsigned i;
4453                 unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
4454
4455                 ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4456                 if (!ic->recalc_bitmap) {
4457                         r = -ENOMEM;
4458                         goto bad;
4459                 }
4460                 ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4461                 if (!ic->may_write_bitmap) {
4462                         r = -ENOMEM;
4463                         goto bad;
4464                 }
4465                 ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
4466                 if (!ic->bbs) {
4467                         r = -ENOMEM;
4468                         goto bad;
4469                 }
4470                 INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
4471                 for (i = 0; i < ic->n_bitmap_blocks; i++) {
4472                         struct bitmap_block_status *bbs = &ic->bbs[i];
4473                         unsigned sector, pl_index, pl_offset;
4474
4475                         INIT_WORK(&bbs->work, bitmap_block_work);
4476                         bbs->ic = ic;
4477                         bbs->idx = i;
4478                         bio_list_init(&bbs->bio_queue);
4479                         spin_lock_init(&bbs->bio_queue_lock);
4480
4481                         sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT);
4482                         pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
4483                         pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
4484
4485                         bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset;
4486                 }
4487         }
4488
4489         if (should_write_sb) {
4490                 int r;
4491
4492                 init_journal(ic, 0, ic->journal_sections, 0);
4493                 r = dm_integrity_failed(ic);
4494                 if (unlikely(r)) {
4495                         ti->error = "Error initializing journal";
4496                         goto bad;
4497                 }
4498                 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
4499                 if (r) {
4500                         ti->error = "Error initializing superblock";
4501                         goto bad;
4502                 }
4503                 ic->just_formatted = true;
4504         }
4505
4506         if (!ic->meta_dev) {
4507                 r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
4508                 if (r)
4509                         goto bad;
4510         }
4511         if (ic->mode == 'B') {
4512                 unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
4513                 if (!max_io_len)
4514                         max_io_len = 1U << 31;
4515                 DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
4516                 if (!ti->max_io_len || ti->max_io_len > max_io_len) {
4517                         r = dm_set_target_max_io_len(ti, max_io_len);
4518                         if (r)
4519                                 goto bad;
4520                 }
4521         }
4522
4523         if (!ic->internal_hash)
4524                 dm_integrity_set(ti, ic);
4525
4526         ti->num_flush_bios = 1;
4527         ti->flush_supported = true;
4528         if (ic->discard)
4529                 ti->num_discard_bios = 1;
4530
4531         dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1);
4532         return 0;
4533
4534 bad:
4535         dm_audit_log_ctr(DM_MSG_PREFIX, ti, 0);
4536         dm_integrity_dtr(ti);
4537         return r;
4538 }
4539
4540 static void dm_integrity_dtr(struct dm_target *ti)
4541 {
4542         struct dm_integrity_c *ic = ti->private;
4543
4544         BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
4545         BUG_ON(!list_empty(&ic->wait_list));
4546
4547         if (ic->metadata_wq)
4548                 destroy_workqueue(ic->metadata_wq);
4549         if (ic->wait_wq)
4550                 destroy_workqueue(ic->wait_wq);
4551         if (ic->offload_wq)
4552                 destroy_workqueue(ic->offload_wq);
4553         if (ic->commit_wq)
4554                 destroy_workqueue(ic->commit_wq);
4555         if (ic->writer_wq)
4556                 destroy_workqueue(ic->writer_wq);
4557         if (ic->recalc_wq)
4558                 destroy_workqueue(ic->recalc_wq);
4559         vfree(ic->recalc_buffer);
4560         kvfree(ic->recalc_tags);
4561         kvfree(ic->bbs);
4562         if (ic->bufio)
4563                 dm_bufio_client_destroy(ic->bufio);
4564         mempool_exit(&ic->journal_io_mempool);
4565         if (ic->io)
4566                 dm_io_client_destroy(ic->io);
4567         if (ic->dev)
4568                 dm_put_device(ti, ic->dev);
4569         if (ic->meta_dev)
4570                 dm_put_device(ti, ic->meta_dev);
4571         dm_integrity_free_page_list(ic->journal);
4572         dm_integrity_free_page_list(ic->journal_io);
4573         dm_integrity_free_page_list(ic->journal_xor);
4574         dm_integrity_free_page_list(ic->recalc_bitmap);
4575         dm_integrity_free_page_list(ic->may_write_bitmap);
4576         if (ic->journal_scatterlist)
4577                 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
4578         if (ic->journal_io_scatterlist)
4579                 dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
4580         if (ic->sk_requests) {
4581                 unsigned i;
4582
4583                 for (i = 0; i < ic->journal_sections; i++) {
4584                         struct skcipher_request *req = ic->sk_requests[i];
4585                         if (req) {
4586                                 kfree_sensitive(req->iv);
4587                                 skcipher_request_free(req);
4588                         }
4589                 }
4590                 kvfree(ic->sk_requests);
4591         }
4592         kvfree(ic->journal_tree);
4593         if (ic->sb)
4594                 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
4595
4596         if (ic->internal_hash)
4597                 crypto_free_shash(ic->internal_hash);
4598         free_alg(&ic->internal_hash_alg);
4599
4600         if (ic->journal_crypt)
4601                 crypto_free_skcipher(ic->journal_crypt);
4602         free_alg(&ic->journal_crypt_alg);
4603
4604         if (ic->journal_mac)
4605                 crypto_free_shash(ic->journal_mac);
4606         free_alg(&ic->journal_mac_alg);
4607
4608         kfree(ic);
4609         dm_audit_log_dtr(DM_MSG_PREFIX, ti, 1);
4610 }
4611
4612 static struct target_type integrity_target = {
4613         .name                   = "integrity",
4614         .version                = {1, 10, 0},
4615         .module                 = THIS_MODULE,
4616         .features               = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
4617         .ctr                    = dm_integrity_ctr,
4618         .dtr                    = dm_integrity_dtr,
4619         .map                    = dm_integrity_map,
4620         .postsuspend            = dm_integrity_postsuspend,
4621         .resume                 = dm_integrity_resume,
4622         .status                 = dm_integrity_status,
4623         .iterate_devices        = dm_integrity_iterate_devices,
4624         .io_hints               = dm_integrity_io_hints,
4625 };
4626
4627 static int __init dm_integrity_init(void)
4628 {
4629         int r;
4630
4631         journal_io_cache = kmem_cache_create("integrity_journal_io",
4632                                              sizeof(struct journal_io), 0, 0, NULL);
4633         if (!journal_io_cache) {
4634                 DMERR("can't allocate journal io cache");
4635                 return -ENOMEM;
4636         }
4637
4638         r = dm_register_target(&integrity_target);
4639
4640         if (r < 0)
4641                 DMERR("register failed %d", r);
4642
4643         return r;
4644 }
4645
4646 static void __exit dm_integrity_exit(void)
4647 {
4648         dm_unregister_target(&integrity_target);
4649         kmem_cache_destroy(journal_io_cache);
4650 }
4651
4652 module_init(dm_integrity_init);
4653 module_exit(dm_integrity_exit);
4654
4655 MODULE_AUTHOR("Milan Broz");
4656 MODULE_AUTHOR("Mikulas Patocka");
4657 MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
4658 MODULE_LICENSE("GPL");