2 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
3 * Copyright (C) 2016-2017 Milan Broz
4 * Copyright (C) 2016-2017 Mikulas Patocka
6 * This file is released under the GPL.
9 #include "dm-bio-record.h"
11 #include <linux/compiler.h>
12 #include <linux/module.h>
13 #include <linux/device-mapper.h>
14 #include <linux/dm-io.h>
15 #include <linux/vmalloc.h>
16 #include <linux/sort.h>
17 #include <linux/rbtree.h>
18 #include <linux/delay.h>
19 #include <linux/random.h>
20 #include <linux/reboot.h>
21 #include <crypto/hash.h>
22 #include <crypto/skcipher.h>
23 #include <linux/async_tx.h>
24 #include <linux/dm-bufio.h>
26 #define DM_MSG_PREFIX "integrity"
28 #define DEFAULT_INTERLEAVE_SECTORS 32768
29 #define DEFAULT_JOURNAL_SIZE_FACTOR 7
30 #define DEFAULT_SECTORS_PER_BITMAP_BIT 32768
31 #define DEFAULT_BUFFER_SECTORS 128
32 #define DEFAULT_JOURNAL_WATERMARK 50
33 #define DEFAULT_SYNC_MSEC 10000
34 #define DEFAULT_MAX_JOURNAL_SECTORS 131072
35 #define MIN_LOG2_INTERLEAVE_SECTORS 3
36 #define MAX_LOG2_INTERLEAVE_SECTORS 31
37 #define METADATA_WORKQUEUE_MAX_ACTIVE 16
38 #define RECALC_SECTORS 8192
39 #define RECALC_WRITE_SUPER 16
40 #define BITMAP_BLOCK_SIZE 4096 /* don't change it */
41 #define BITMAP_FLUSH_INTERVAL (10 * HZ)
42 #define DISCARD_FILLER 0xf6
46 * Warning - DEBUG_PRINT prints security-sensitive data to the log,
47 * so it should not be enabled in the official kernel
50 //#define INTERNAL_VERIFY
56 #define SB_MAGIC "integrt"
57 #define SB_VERSION_1 1
58 #define SB_VERSION_2 2
59 #define SB_VERSION_3 3
60 #define SB_VERSION_4 4
61 #define SB_VERSION_5 5
63 #define MAX_SECTORS_PER_BLOCK 8
68 __u8 log2_interleave_sectors;
69 __u16 integrity_tag_size;
70 __u32 journal_sections;
71 __u64 provided_data_sectors; /* userspace uses this value */
73 __u8 log2_sectors_per_block;
74 __u8 log2_blocks_per_bitmap_bit;
81 #define SB_FLAG_HAVE_JOURNAL_MAC 0x1
82 #define SB_FLAG_RECALCULATING 0x2
83 #define SB_FLAG_DIRTY_BITMAP 0x4
84 #define SB_FLAG_FIXED_PADDING 0x8
85 #define SB_FLAG_FIXED_HMAC 0x10
87 #define JOURNAL_ENTRY_ROUNDUP 8
89 typedef __u64 commit_id_t;
90 #define JOURNAL_MAC_PER_SECTOR 8
92 struct journal_entry {
100 commit_id_t last_bytes[];
104 #define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
106 #if BITS_PER_LONG == 64
107 #define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
109 #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
111 #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
112 #define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
113 #define journal_entry_set_unused(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
114 #define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2))
115 #define journal_entry_set_inprogress(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
117 #define JOURNAL_BLOCK_SECTORS 8
118 #define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
119 #define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
121 struct journal_sector {
122 __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
123 __u8 mac[JOURNAL_MAC_PER_SECTOR];
124 commit_id_t commit_id;
127 #define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
129 #define METADATA_PADDING_SECTORS 8
131 #define N_COMMIT_IDS 4
133 static unsigned char prev_commit_seq(unsigned char seq)
135 return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
138 static unsigned char next_commit_seq(unsigned char seq)
140 return (seq + 1) % N_COMMIT_IDS;
144 * In-memory structures
147 struct journal_node {
159 struct dm_integrity_c {
161 struct dm_dev *meta_dev;
165 mempool_t journal_io_mempool;
166 struct dm_io_client *io;
167 struct dm_bufio_client *bufio;
168 struct workqueue_struct *metadata_wq;
169 struct superblock *sb;
170 unsigned journal_pages;
171 unsigned n_bitmap_blocks;
173 struct page_list *journal;
174 struct page_list *journal_io;
175 struct page_list *journal_xor;
176 struct page_list *recalc_bitmap;
177 struct page_list *may_write_bitmap;
178 struct bitmap_block_status *bbs;
179 unsigned bitmap_flush_interval;
180 int synchronous_mode;
181 struct bio_list synchronous_bios;
182 struct delayed_work bitmap_flush_work;
184 struct crypto_skcipher *journal_crypt;
185 struct scatterlist **journal_scatterlist;
186 struct scatterlist **journal_io_scatterlist;
187 struct skcipher_request **sk_requests;
189 struct crypto_shash *journal_mac;
191 struct journal_node *journal_tree;
192 struct rb_root journal_tree_root;
194 sector_t provided_data_sectors;
196 unsigned short journal_entry_size;
197 unsigned char journal_entries_per_sector;
198 unsigned char journal_section_entries;
199 unsigned short journal_section_sectors;
200 unsigned journal_sections;
201 unsigned journal_entries;
202 sector_t data_device_sectors;
203 sector_t meta_device_sectors;
204 unsigned initial_sectors;
205 unsigned metadata_run;
206 __s8 log2_metadata_run;
207 __u8 log2_buffer_sectors;
208 __u8 sectors_per_block;
209 __u8 log2_blocks_per_bitmap_bit;
215 struct crypto_shash *internal_hash;
217 struct dm_target *ti;
219 /* these variables are locked with endio_wait.lock */
220 struct rb_root in_progress;
221 struct list_head wait_list;
222 wait_queue_head_t endio_wait;
223 struct workqueue_struct *wait_wq;
224 struct workqueue_struct *offload_wq;
226 unsigned char commit_seq;
227 commit_id_t commit_ids[N_COMMIT_IDS];
229 unsigned committed_section;
230 unsigned n_committed_sections;
232 unsigned uncommitted_section;
233 unsigned n_uncommitted_sections;
235 unsigned free_section;
236 unsigned char free_section_entry;
237 unsigned free_sectors;
239 unsigned free_sectors_threshold;
241 struct workqueue_struct *commit_wq;
242 struct work_struct commit_work;
244 struct workqueue_struct *writer_wq;
245 struct work_struct writer_work;
247 struct workqueue_struct *recalc_wq;
248 struct work_struct recalc_work;
252 struct bio_list flush_bio_list;
254 unsigned long autocommit_jiffies;
255 struct timer_list autocommit_timer;
256 unsigned autocommit_msec;
258 wait_queue_head_t copy_to_journal_wait;
260 struct completion crypto_backoff;
262 bool journal_uptodate;
264 bool recalculate_flag;
265 bool reset_recalculate_flag;
269 bool legacy_recalculate;
271 struct alg_spec internal_hash_alg;
272 struct alg_spec journal_crypt_alg;
273 struct alg_spec journal_mac_alg;
275 atomic64_t number_of_mismatches;
277 struct notifier_block reboot_notifier;
280 struct dm_integrity_range {
281 sector_t logical_sector;
287 struct task_struct *task;
288 struct list_head wait_entry;
293 struct dm_integrity_io {
294 struct work_struct work;
296 struct dm_integrity_c *ic;
300 struct dm_integrity_range range;
302 sector_t metadata_block;
303 unsigned metadata_offset;
306 blk_status_t bi_status;
308 struct completion *completion;
310 struct dm_bio_details bio_details;
313 struct journal_completion {
314 struct dm_integrity_c *ic;
316 struct completion comp;
320 struct dm_integrity_range range;
321 struct journal_completion *comp;
324 struct bitmap_block_status {
325 struct work_struct work;
326 struct dm_integrity_c *ic;
328 unsigned long *bitmap;
329 struct bio_list bio_queue;
330 spinlock_t bio_queue_lock;
334 static struct kmem_cache *journal_io_cache;
336 #define JOURNAL_IO_MEMPOOL 32
339 #define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__)
340 static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
349 pr_cont(" %02x", *bytes);
355 #define DEBUG_bytes(bytes, len, msg, ...) __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
357 #define DEBUG_print(x, ...) do { } while (0)
358 #define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
361 static void dm_integrity_prepare(struct request *rq)
365 static void dm_integrity_complete(struct request *rq, unsigned int nr_bytes)
370 * DM Integrity profile, protection is performed layer above (dm-crypt)
372 static const struct blk_integrity_profile dm_integrity_profile = {
373 .name = "DM-DIF-EXT-TAG",
376 .prepare_fn = dm_integrity_prepare,
377 .complete_fn = dm_integrity_complete,
380 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
381 static void integrity_bio_wait(struct work_struct *w);
382 static void dm_integrity_dtr(struct dm_target *ti);
384 static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
387 atomic64_inc(&ic->number_of_mismatches);
388 if (!cmpxchg(&ic->failed, 0, err))
389 DMERR("Error on %s: %d", msg, err);
392 static int dm_integrity_failed(struct dm_integrity_c *ic)
394 return READ_ONCE(ic->failed);
397 static bool dm_integrity_disable_recalculate(struct dm_integrity_c *ic)
399 if (ic->legacy_recalculate)
401 if (!(ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) ?
402 ic->internal_hash_alg.key || ic->journal_mac_alg.key :
403 ic->internal_hash_alg.key && !ic->journal_mac_alg.key)
408 static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
409 unsigned j, unsigned char seq)
412 * Xor the number with section and sector, so that if a piece of
413 * journal is written at wrong place, it is detected.
415 return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
418 static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
419 sector_t *area, sector_t *offset)
422 __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
423 *area = data_sector >> log2_interleave_sectors;
424 *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
427 *offset = data_sector;
431 #define sector_to_block(ic, n) \
433 BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1)); \
434 (n) >>= (ic)->sb->log2_sectors_per_block; \
437 static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
438 sector_t offset, unsigned *metadata_offset)
443 ms = area << ic->sb->log2_interleave_sectors;
444 if (likely(ic->log2_metadata_run >= 0))
445 ms += area << ic->log2_metadata_run;
447 ms += area * ic->metadata_run;
448 ms >>= ic->log2_buffer_sectors;
450 sector_to_block(ic, offset);
452 if (likely(ic->log2_tag_size >= 0)) {
453 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
454 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
456 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
457 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
459 *metadata_offset = mo;
463 static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
470 result = area << ic->sb->log2_interleave_sectors;
471 if (likely(ic->log2_metadata_run >= 0))
472 result += (area + 1) << ic->log2_metadata_run;
474 result += (area + 1) * ic->metadata_run;
476 result += (sector_t)ic->initial_sectors + offset;
482 static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
484 if (unlikely(*sec_ptr >= ic->journal_sections))
485 *sec_ptr -= ic->journal_sections;
488 static void sb_set_version(struct dm_integrity_c *ic)
490 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC))
491 ic->sb->version = SB_VERSION_5;
492 else if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING))
493 ic->sb->version = SB_VERSION_4;
494 else if (ic->mode == 'B' || ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP))
495 ic->sb->version = SB_VERSION_3;
496 else if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
497 ic->sb->version = SB_VERSION_2;
499 ic->sb->version = SB_VERSION_1;
502 static int sb_mac(struct dm_integrity_c *ic, bool wr)
504 SHASH_DESC_ON_STACK(desc, ic->journal_mac);
506 unsigned size = crypto_shash_digestsize(ic->journal_mac);
508 if (sizeof(struct superblock) + size > 1 << SECTOR_SHIFT) {
509 dm_integrity_io_error(ic, "digest is too long", -EINVAL);
513 desc->tfm = ic->journal_mac;
515 r = crypto_shash_init(desc);
516 if (unlikely(r < 0)) {
517 dm_integrity_io_error(ic, "crypto_shash_init", r);
521 r = crypto_shash_update(desc, (__u8 *)ic->sb, (1 << SECTOR_SHIFT) - size);
522 if (unlikely(r < 0)) {
523 dm_integrity_io_error(ic, "crypto_shash_update", r);
528 r = crypto_shash_final(desc, (__u8 *)ic->sb + (1 << SECTOR_SHIFT) - size);
529 if (unlikely(r < 0)) {
530 dm_integrity_io_error(ic, "crypto_shash_final", r);
534 __u8 result[HASH_MAX_DIGESTSIZE];
535 r = crypto_shash_final(desc, result);
536 if (unlikely(r < 0)) {
537 dm_integrity_io_error(ic, "crypto_shash_final", r);
540 if (memcmp((__u8 *)ic->sb + (1 << SECTOR_SHIFT) - size, result, size)) {
541 dm_integrity_io_error(ic, "superblock mac", -EILSEQ);
549 static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
551 struct dm_io_request io_req;
552 struct dm_io_region io_loc;
556 io_req.bi_op_flags = op_flags;
557 io_req.mem.type = DM_IO_KMEM;
558 io_req.mem.ptr.addr = ic->sb;
559 io_req.notify.fn = NULL;
560 io_req.client = ic->io;
561 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
562 io_loc.sector = ic->start;
563 io_loc.count = SB_SECTORS;
565 if (op == REQ_OP_WRITE) {
567 if (ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
568 r = sb_mac(ic, true);
574 r = dm_io(&io_req, 1, &io_loc, NULL);
578 if (op == REQ_OP_READ) {
579 if (ic->mode != 'R' && ic->journal_mac && ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
580 r = sb_mac(ic, false);
589 #define BITMAP_OP_TEST_ALL_SET 0
590 #define BITMAP_OP_TEST_ALL_CLEAR 1
591 #define BITMAP_OP_SET 2
592 #define BITMAP_OP_CLEAR 3
594 static bool block_bitmap_op(struct dm_integrity_c *ic, struct page_list *bitmap,
595 sector_t sector, sector_t n_sectors, int mode)
597 unsigned long bit, end_bit, this_end_bit, page, end_page;
600 if (unlikely(((sector | n_sectors) & ((1 << ic->sb->log2_sectors_per_block) - 1)) != 0)) {
601 DMCRIT("invalid bitmap access (%llx,%llx,%d,%d,%d)",
604 ic->sb->log2_sectors_per_block,
605 ic->log2_blocks_per_bitmap_bit,
610 if (unlikely(!n_sectors))
613 bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
614 end_bit = (sector + n_sectors - 1) >>
615 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
617 page = bit / (PAGE_SIZE * 8);
618 bit %= PAGE_SIZE * 8;
620 end_page = end_bit / (PAGE_SIZE * 8);
621 end_bit %= PAGE_SIZE * 8;
624 if (page < end_page) {
625 this_end_bit = PAGE_SIZE * 8 - 1;
627 this_end_bit = end_bit;
630 data = lowmem_page_address(bitmap[page].page);
632 if (mode == BITMAP_OP_TEST_ALL_SET) {
633 while (bit <= this_end_bit) {
634 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
636 if (data[bit / BITS_PER_LONG] != -1)
638 bit += BITS_PER_LONG;
639 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
642 if (!test_bit(bit, data))
646 } else if (mode == BITMAP_OP_TEST_ALL_CLEAR) {
647 while (bit <= this_end_bit) {
648 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
650 if (data[bit / BITS_PER_LONG] != 0)
652 bit += BITS_PER_LONG;
653 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
656 if (test_bit(bit, data))
660 } else if (mode == BITMAP_OP_SET) {
661 while (bit <= this_end_bit) {
662 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
664 data[bit / BITS_PER_LONG] = -1;
665 bit += BITS_PER_LONG;
666 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
669 __set_bit(bit, data);
672 } else if (mode == BITMAP_OP_CLEAR) {
673 if (!bit && this_end_bit == PAGE_SIZE * 8 - 1)
675 else while (bit <= this_end_bit) {
676 if (!(bit % BITS_PER_LONG) && this_end_bit >= bit + BITS_PER_LONG - 1) {
678 data[bit / BITS_PER_LONG] = 0;
679 bit += BITS_PER_LONG;
680 } while (this_end_bit >= bit + BITS_PER_LONG - 1);
683 __clear_bit(bit, data);
690 if (unlikely(page < end_page)) {
699 static void block_bitmap_copy(struct dm_integrity_c *ic, struct page_list *dst, struct page_list *src)
701 unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
704 for (i = 0; i < n_bitmap_pages; i++) {
705 unsigned long *dst_data = lowmem_page_address(dst[i].page);
706 unsigned long *src_data = lowmem_page_address(src[i].page);
707 copy_page(dst_data, src_data);
711 static struct bitmap_block_status *sector_to_bitmap_block(struct dm_integrity_c *ic, sector_t sector)
713 unsigned bit = sector >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
714 unsigned bitmap_block = bit / (BITMAP_BLOCK_SIZE * 8);
716 BUG_ON(bitmap_block >= ic->n_bitmap_blocks);
717 return &ic->bbs[bitmap_block];
720 static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
721 bool e, const char *function)
723 #if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
724 unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
726 if (unlikely(section >= ic->journal_sections) ||
727 unlikely(offset >= limit)) {
728 DMCRIT("%s: invalid access at (%u,%u), limit (%u,%u)",
729 function, section, offset, ic->journal_sections, limit);
735 static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
736 unsigned *pl_index, unsigned *pl_offset)
740 access_journal_check(ic, section, offset, false, "page_list_location");
742 sector = section * ic->journal_section_sectors + offset;
744 *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
745 *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
748 static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
749 unsigned section, unsigned offset, unsigned *n_sectors)
751 unsigned pl_index, pl_offset;
754 page_list_location(ic, section, offset, &pl_index, &pl_offset);
757 *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
759 va = lowmem_page_address(pl[pl_index].page);
761 return (struct journal_sector *)(va + pl_offset);
764 static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
766 return access_page_list(ic, ic->journal, section, offset, NULL);
769 static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
771 unsigned rel_sector, offset;
772 struct journal_sector *js;
774 access_journal_check(ic, section, n, true, "access_journal_entry");
776 rel_sector = n % JOURNAL_BLOCK_SECTORS;
777 offset = n / JOURNAL_BLOCK_SECTORS;
779 js = access_journal(ic, section, rel_sector);
780 return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
783 static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
785 n <<= ic->sb->log2_sectors_per_block;
787 n += JOURNAL_BLOCK_SECTORS;
789 access_journal_check(ic, section, n, false, "access_journal_data");
791 return access_journal(ic, section, n);
794 static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
796 SHASH_DESC_ON_STACK(desc, ic->journal_mac);
800 desc->tfm = ic->journal_mac;
802 r = crypto_shash_init(desc);
803 if (unlikely(r < 0)) {
804 dm_integrity_io_error(ic, "crypto_shash_init", r);
808 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
811 r = crypto_shash_update(desc, (__u8 *)&ic->sb->salt, SALT_SIZE);
812 if (unlikely(r < 0)) {
813 dm_integrity_io_error(ic, "crypto_shash_update", r);
817 section_le = cpu_to_le64(section);
818 r = crypto_shash_update(desc, (__u8 *)§ion_le, sizeof section_le);
819 if (unlikely(r < 0)) {
820 dm_integrity_io_error(ic, "crypto_shash_update", r);
825 for (j = 0; j < ic->journal_section_entries; j++) {
826 struct journal_entry *je = access_journal_entry(ic, section, j);
827 r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
828 if (unlikely(r < 0)) {
829 dm_integrity_io_error(ic, "crypto_shash_update", r);
834 size = crypto_shash_digestsize(ic->journal_mac);
836 if (likely(size <= JOURNAL_MAC_SIZE)) {
837 r = crypto_shash_final(desc, result);
838 if (unlikely(r < 0)) {
839 dm_integrity_io_error(ic, "crypto_shash_final", r);
842 memset(result + size, 0, JOURNAL_MAC_SIZE - size);
844 __u8 digest[HASH_MAX_DIGESTSIZE];
846 if (WARN_ON(size > sizeof(digest))) {
847 dm_integrity_io_error(ic, "digest_size", -EINVAL);
850 r = crypto_shash_final(desc, digest);
851 if (unlikely(r < 0)) {
852 dm_integrity_io_error(ic, "crypto_shash_final", r);
855 memcpy(result, digest, JOURNAL_MAC_SIZE);
860 memset(result, 0, JOURNAL_MAC_SIZE);
863 static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
865 __u8 result[JOURNAL_MAC_SIZE];
868 if (!ic->journal_mac)
871 section_mac(ic, section, result);
873 for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
874 struct journal_sector *js = access_journal(ic, section, j);
877 memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
879 if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR))
880 dm_integrity_io_error(ic, "journal mac", -EILSEQ);
885 static void complete_journal_op(void *context)
887 struct journal_completion *comp = context;
888 BUG_ON(!atomic_read(&comp->in_flight));
889 if (likely(atomic_dec_and_test(&comp->in_flight)))
890 complete(&comp->comp);
893 static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
894 unsigned n_sections, struct journal_completion *comp)
896 struct async_submit_ctl submit;
897 size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
898 unsigned pl_index, pl_offset, section_index;
899 struct page_list *source_pl, *target_pl;
901 if (likely(encrypt)) {
902 source_pl = ic->journal;
903 target_pl = ic->journal_io;
905 source_pl = ic->journal_io;
906 target_pl = ic->journal;
909 page_list_location(ic, section, 0, &pl_index, &pl_offset);
911 atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
913 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
915 section_index = pl_index;
919 struct page *src_pages[2];
920 struct page *dst_page;
922 while (unlikely(pl_index == section_index)) {
925 rw_section_mac(ic, section, true);
930 page_list_location(ic, section, 0, §ion_index, &dummy);
933 this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
934 dst_page = target_pl[pl_index].page;
935 src_pages[0] = source_pl[pl_index].page;
936 src_pages[1] = ic->journal_xor[pl_index].page;
938 async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
942 n_bytes -= this_step;
947 async_tx_issue_pending_all();
950 static void complete_journal_encrypt(struct crypto_async_request *req, int err)
952 struct journal_completion *comp = req->data;
954 if (likely(err == -EINPROGRESS)) {
955 complete(&comp->ic->crypto_backoff);
958 dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
960 complete_journal_op(comp);
963 static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
966 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
967 complete_journal_encrypt, comp);
969 r = crypto_skcipher_encrypt(req);
971 r = crypto_skcipher_decrypt(req);
974 if (likely(r == -EINPROGRESS))
976 if (likely(r == -EBUSY)) {
977 wait_for_completion(&comp->ic->crypto_backoff);
978 reinit_completion(&comp->ic->crypto_backoff);
981 dm_integrity_io_error(comp->ic, "encrypt", r);
985 static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
986 unsigned n_sections, struct journal_completion *comp)
988 struct scatterlist **source_sg;
989 struct scatterlist **target_sg;
991 atomic_add(2, &comp->in_flight);
993 if (likely(encrypt)) {
994 source_sg = ic->journal_scatterlist;
995 target_sg = ic->journal_io_scatterlist;
997 source_sg = ic->journal_io_scatterlist;
998 target_sg = ic->journal_scatterlist;
1002 struct skcipher_request *req;
1006 if (likely(encrypt))
1007 rw_section_mac(ic, section, true);
1009 req = ic->sk_requests[section];
1010 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
1013 memcpy(iv, iv + ivsize, ivsize);
1015 req->src = source_sg[section];
1016 req->dst = target_sg[section];
1018 if (unlikely(do_crypt(encrypt, req, comp)))
1019 atomic_inc(&comp->in_flight);
1023 } while (n_sections);
1025 atomic_dec(&comp->in_flight);
1026 complete_journal_op(comp);
1029 static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
1030 unsigned n_sections, struct journal_completion *comp)
1032 if (ic->journal_xor)
1033 return xor_journal(ic, encrypt, section, n_sections, comp);
1035 return crypt_journal(ic, encrypt, section, n_sections, comp);
1038 static void complete_journal_io(unsigned long error, void *context)
1040 struct journal_completion *comp = context;
1041 if (unlikely(error != 0))
1042 dm_integrity_io_error(comp->ic, "writing journal", -EIO);
1043 complete_journal_op(comp);
1046 static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
1047 unsigned sector, unsigned n_sectors, struct journal_completion *comp)
1049 struct dm_io_request io_req;
1050 struct dm_io_region io_loc;
1051 unsigned pl_index, pl_offset;
1054 if (unlikely(dm_integrity_failed(ic))) {
1056 complete_journal_io(-1UL, comp);
1060 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
1061 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
1064 io_req.bi_op_flags = op_flags;
1065 io_req.mem.type = DM_IO_PAGE_LIST;
1067 io_req.mem.ptr.pl = &ic->journal_io[pl_index];
1069 io_req.mem.ptr.pl = &ic->journal[pl_index];
1070 io_req.mem.offset = pl_offset;
1071 if (likely(comp != NULL)) {
1072 io_req.notify.fn = complete_journal_io;
1073 io_req.notify.context = comp;
1075 io_req.notify.fn = NULL;
1077 io_req.client = ic->io;
1078 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
1079 io_loc.sector = ic->start + SB_SECTORS + sector;
1080 io_loc.count = n_sectors;
1082 r = dm_io(&io_req, 1, &io_loc, NULL);
1084 dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
1086 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
1087 complete_journal_io(-1UL, comp);
1092 static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
1093 unsigned n_sections, struct journal_completion *comp)
1095 unsigned sector, n_sectors;
1097 sector = section * ic->journal_section_sectors;
1098 n_sectors = n_sections * ic->journal_section_sectors;
1100 rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp);
1103 static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
1105 struct journal_completion io_comp;
1106 struct journal_completion crypt_comp_1;
1107 struct journal_completion crypt_comp_2;
1111 init_completion(&io_comp.comp);
1113 if (commit_start + commit_sections <= ic->journal_sections) {
1114 io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
1115 if (ic->journal_io) {
1116 crypt_comp_1.ic = ic;
1117 init_completion(&crypt_comp_1.comp);
1118 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1119 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
1120 wait_for_completion_io(&crypt_comp_1.comp);
1122 for (i = 0; i < commit_sections; i++)
1123 rw_section_mac(ic, commit_start + i, true);
1125 rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
1126 commit_sections, &io_comp);
1129 io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
1130 to_end = ic->journal_sections - commit_start;
1131 if (ic->journal_io) {
1132 crypt_comp_1.ic = ic;
1133 init_completion(&crypt_comp_1.comp);
1134 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1135 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
1136 if (try_wait_for_completion(&crypt_comp_1.comp)) {
1137 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1138 reinit_completion(&crypt_comp_1.comp);
1139 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
1140 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
1141 wait_for_completion_io(&crypt_comp_1.comp);
1143 crypt_comp_2.ic = ic;
1144 init_completion(&crypt_comp_2.comp);
1145 crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
1146 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
1147 wait_for_completion_io(&crypt_comp_1.comp);
1148 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1149 wait_for_completion_io(&crypt_comp_2.comp);
1152 for (i = 0; i < to_end; i++)
1153 rw_section_mac(ic, commit_start + i, true);
1154 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
1155 for (i = 0; i < commit_sections - to_end; i++)
1156 rw_section_mac(ic, i, true);
1158 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
1161 wait_for_completion_io(&io_comp.comp);
1164 static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
1165 unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
1167 struct dm_io_request io_req;
1168 struct dm_io_region io_loc;
1170 unsigned sector, pl_index, pl_offset;
1172 BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
1174 if (unlikely(dm_integrity_failed(ic))) {
1179 sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
1181 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
1182 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
1184 io_req.bi_op = REQ_OP_WRITE;
1185 io_req.bi_op_flags = 0;
1186 io_req.mem.type = DM_IO_PAGE_LIST;
1187 io_req.mem.ptr.pl = &ic->journal[pl_index];
1188 io_req.mem.offset = pl_offset;
1189 io_req.notify.fn = fn;
1190 io_req.notify.context = data;
1191 io_req.client = ic->io;
1192 io_loc.bdev = ic->dev->bdev;
1193 io_loc.sector = target;
1194 io_loc.count = n_sectors;
1196 r = dm_io(&io_req, 1, &io_loc, NULL);
1198 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
1203 static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
1205 return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
1206 range1->logical_sector + range1->n_sectors > range2->logical_sector;
1209 static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
1211 struct rb_node **n = &ic->in_progress.rb_node;
1212 struct rb_node *parent;
1214 BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
1216 if (likely(check_waiting)) {
1217 struct dm_integrity_range *range;
1218 list_for_each_entry(range, &ic->wait_list, wait_entry) {
1219 if (unlikely(ranges_overlap(range, new_range)))
1227 struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
1230 if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
1231 n = &range->node.rb_left;
1232 } else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
1233 n = &range->node.rb_right;
1239 rb_link_node(&new_range->node, parent, n);
1240 rb_insert_color(&new_range->node, &ic->in_progress);
1245 static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1247 rb_erase(&range->node, &ic->in_progress);
1248 while (unlikely(!list_empty(&ic->wait_list))) {
1249 struct dm_integrity_range *last_range =
1250 list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
1251 struct task_struct *last_range_task;
1252 last_range_task = last_range->task;
1253 list_del(&last_range->wait_entry);
1254 if (!add_new_range(ic, last_range, false)) {
1255 last_range->task = last_range_task;
1256 list_add(&last_range->wait_entry, &ic->wait_list);
1259 last_range->waiting = false;
1260 wake_up_process(last_range_task);
1264 static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
1266 unsigned long flags;
1268 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1269 remove_range_unlocked(ic, range);
1270 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1273 static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1275 new_range->waiting = true;
1276 list_add_tail(&new_range->wait_entry, &ic->wait_list);
1277 new_range->task = current;
1279 __set_current_state(TASK_UNINTERRUPTIBLE);
1280 spin_unlock_irq(&ic->endio_wait.lock);
1282 spin_lock_irq(&ic->endio_wait.lock);
1283 } while (unlikely(new_range->waiting));
1286 static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1288 if (unlikely(!add_new_range(ic, new_range, true)))
1289 wait_and_add_new_range(ic, new_range);
1292 static void init_journal_node(struct journal_node *node)
1294 RB_CLEAR_NODE(&node->node);
1295 node->sector = (sector_t)-1;
1298 static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
1300 struct rb_node **link;
1301 struct rb_node *parent;
1303 node->sector = sector;
1304 BUG_ON(!RB_EMPTY_NODE(&node->node));
1306 link = &ic->journal_tree_root.rb_node;
1310 struct journal_node *j;
1312 j = container_of(parent, struct journal_node, node);
1313 if (sector < j->sector)
1314 link = &j->node.rb_left;
1316 link = &j->node.rb_right;
1319 rb_link_node(&node->node, parent, link);
1320 rb_insert_color(&node->node, &ic->journal_tree_root);
1323 static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
1325 BUG_ON(RB_EMPTY_NODE(&node->node));
1326 rb_erase(&node->node, &ic->journal_tree_root);
1327 init_journal_node(node);
1330 #define NOT_FOUND (-1U)
1332 static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
1334 struct rb_node *n = ic->journal_tree_root.rb_node;
1335 unsigned found = NOT_FOUND;
1336 *next_sector = (sector_t)-1;
1338 struct journal_node *j = container_of(n, struct journal_node, node);
1339 if (sector == j->sector) {
1340 found = j - ic->journal_tree;
1342 if (sector < j->sector) {
1343 *next_sector = j->sector;
1344 n = j->node.rb_left;
1346 n = j->node.rb_right;
1353 static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
1355 struct journal_node *node, *next_node;
1356 struct rb_node *next;
1358 if (unlikely(pos >= ic->journal_entries))
1360 node = &ic->journal_tree[pos];
1361 if (unlikely(RB_EMPTY_NODE(&node->node)))
1363 if (unlikely(node->sector != sector))
1366 next = rb_next(&node->node);
1367 if (unlikely(!next))
1370 next_node = container_of(next, struct journal_node, node);
1371 return next_node->sector != sector;
1374 static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
1376 struct rb_node *next;
1377 struct journal_node *next_node;
1378 unsigned next_section;
1380 BUG_ON(RB_EMPTY_NODE(&node->node));
1382 next = rb_next(&node->node);
1383 if (unlikely(!next))
1386 next_node = container_of(next, struct journal_node, node);
1388 if (next_node->sector != node->sector)
1391 next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
1392 if (next_section >= ic->committed_section &&
1393 next_section < ic->committed_section + ic->n_committed_sections)
1395 if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1405 static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1406 unsigned *metadata_offset, unsigned total_size, int op)
1408 #define MAY_BE_FILLER 1
1409 #define MAY_BE_HASH 2
1410 unsigned hash_offset = 0;
1411 unsigned may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1414 unsigned char *data, *dp;
1415 struct dm_buffer *b;
1419 r = dm_integrity_failed(ic);
1423 data = dm_bufio_read(ic->bufio, *metadata_block, &b);
1425 return PTR_ERR(data);
1427 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1428 dp = data + *metadata_offset;
1429 if (op == TAG_READ) {
1430 memcpy(tag, dp, to_copy);
1431 } else if (op == TAG_WRITE) {
1432 memcpy(dp, tag, to_copy);
1433 dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
1435 /* e.g.: op == TAG_CMP */
1437 if (likely(is_power_of_2(ic->tag_size))) {
1438 if (unlikely(memcmp(dp, tag, to_copy)))
1439 if (unlikely(!ic->discard) ||
1440 unlikely(memchr_inv(dp, DISCARD_FILLER, to_copy) != NULL)) {
1448 for (i = 0; i < to_copy; i++, ts--) {
1449 if (unlikely(dp[i] != tag[i]))
1450 may_be &= ~MAY_BE_HASH;
1451 if (likely(dp[i] != DISCARD_FILLER))
1452 may_be &= ~MAY_BE_FILLER;
1454 if (unlikely(hash_offset == ic->tag_size)) {
1455 if (unlikely(!may_be)) {
1456 dm_bufio_release(b);
1460 may_be = MAY_BE_HASH | (ic->discard ? MAY_BE_FILLER : 0);
1465 dm_bufio_release(b);
1468 *metadata_offset += to_copy;
1469 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1470 (*metadata_block)++;
1471 *metadata_offset = 0;
1474 if (unlikely(!is_power_of_2(ic->tag_size))) {
1475 hash_offset = (hash_offset + to_copy) % ic->tag_size;
1478 total_size -= to_copy;
1479 } while (unlikely(total_size));
1482 #undef MAY_BE_FILLER
1486 struct flush_request {
1487 struct dm_io_request io_req;
1488 struct dm_io_region io_reg;
1489 struct dm_integrity_c *ic;
1490 struct completion comp;
1493 static void flush_notify(unsigned long error, void *fr_)
1495 struct flush_request *fr = fr_;
1496 if (unlikely(error != 0))
1497 dm_integrity_io_error(fr->ic, "flushing disk cache", -EIO);
1498 complete(&fr->comp);
1501 static void dm_integrity_flush_buffers(struct dm_integrity_c *ic, bool flush_data)
1505 struct flush_request fr;
1510 fr.io_req.bi_op = REQ_OP_WRITE,
1511 fr.io_req.bi_op_flags = REQ_PREFLUSH | REQ_SYNC,
1512 fr.io_req.mem.type = DM_IO_KMEM,
1513 fr.io_req.mem.ptr.addr = NULL,
1514 fr.io_req.notify.fn = flush_notify,
1515 fr.io_req.notify.context = &fr;
1516 fr.io_req.client = dm_bufio_get_dm_io_client(ic->bufio),
1517 fr.io_reg.bdev = ic->dev->bdev,
1518 fr.io_reg.sector = 0,
1519 fr.io_reg.count = 0,
1521 init_completion(&fr.comp);
1522 r = dm_io(&fr.io_req, 1, &fr.io_reg, NULL);
1526 r = dm_bufio_write_dirty_buffers(ic->bufio);
1528 dm_integrity_io_error(ic, "writing tags", r);
1531 wait_for_completion(&fr.comp);
1534 static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1536 DECLARE_WAITQUEUE(wait, current);
1537 __add_wait_queue(&ic->endio_wait, &wait);
1538 __set_current_state(TASK_UNINTERRUPTIBLE);
1539 spin_unlock_irq(&ic->endio_wait.lock);
1541 spin_lock_irq(&ic->endio_wait.lock);
1542 __remove_wait_queue(&ic->endio_wait, &wait);
1545 static void autocommit_fn(struct timer_list *t)
1547 struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
1549 if (likely(!dm_integrity_failed(ic)))
1550 queue_work(ic->commit_wq, &ic->commit_work);
1553 static void schedule_autocommit(struct dm_integrity_c *ic)
1555 if (!timer_pending(&ic->autocommit_timer))
1556 mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1559 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1562 unsigned long flags;
1564 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1565 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1566 bio_list_add(&ic->flush_bio_list, bio);
1567 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1569 queue_work(ic->commit_wq, &ic->commit_work);
1572 static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1574 int r = dm_integrity_failed(ic);
1575 if (unlikely(r) && !bio->bi_status)
1576 bio->bi_status = errno_to_blk_status(r);
1577 if (unlikely(ic->synchronous_mode) && bio_op(bio) == REQ_OP_WRITE) {
1578 unsigned long flags;
1579 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1580 bio_list_add(&ic->synchronous_bios, bio);
1581 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
1582 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1588 static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1590 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1592 if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
1593 submit_flush_bio(ic, dio);
1598 static void dec_in_flight(struct dm_integrity_io *dio)
1600 if (atomic_dec_and_test(&dio->in_flight)) {
1601 struct dm_integrity_c *ic = dio->ic;
1604 remove_range(ic, &dio->range);
1606 if (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))
1607 schedule_autocommit(ic);
1609 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1611 if (unlikely(dio->bi_status) && !bio->bi_status)
1612 bio->bi_status = dio->bi_status;
1613 if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
1614 dio->range.logical_sector += dio->range.n_sectors;
1615 bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1616 INIT_WORK(&dio->work, integrity_bio_wait);
1617 queue_work(ic->offload_wq, &dio->work);
1620 do_endio_flush(ic, dio);
1624 static void integrity_end_io(struct bio *bio)
1626 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1628 dm_bio_restore(&dio->bio_details, bio);
1629 if (bio->bi_integrity)
1630 bio->bi_opf |= REQ_INTEGRITY;
1632 if (dio->completion)
1633 complete(dio->completion);
1638 static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1639 const char *data, char *result)
1641 __u64 sector_le = cpu_to_le64(sector);
1642 SHASH_DESC_ON_STACK(req, ic->internal_hash);
1644 unsigned digest_size;
1646 req->tfm = ic->internal_hash;
1648 r = crypto_shash_init(req);
1649 if (unlikely(r < 0)) {
1650 dm_integrity_io_error(ic, "crypto_shash_init", r);
1654 if (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) {
1655 r = crypto_shash_update(req, (__u8 *)&ic->sb->salt, SALT_SIZE);
1656 if (unlikely(r < 0)) {
1657 dm_integrity_io_error(ic, "crypto_shash_update", r);
1662 r = crypto_shash_update(req, (const __u8 *)§or_le, sizeof sector_le);
1663 if (unlikely(r < 0)) {
1664 dm_integrity_io_error(ic, "crypto_shash_update", r);
1668 r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
1669 if (unlikely(r < 0)) {
1670 dm_integrity_io_error(ic, "crypto_shash_update", r);
1674 r = crypto_shash_final(req, result);
1675 if (unlikely(r < 0)) {
1676 dm_integrity_io_error(ic, "crypto_shash_final", r);
1680 digest_size = crypto_shash_digestsize(ic->internal_hash);
1681 if (unlikely(digest_size < ic->tag_size))
1682 memset(result + digest_size, 0, ic->tag_size - digest_size);
1687 /* this shouldn't happen anyway, the hash functions have no reason to fail */
1688 get_random_bytes(result, ic->tag_size);
1691 static void integrity_metadata(struct work_struct *w)
1693 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1694 struct dm_integrity_c *ic = dio->ic;
1698 if (ic->internal_hash) {
1699 struct bvec_iter iter;
1701 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1702 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1704 unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
1705 char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1707 unsigned sectors_to_process;
1709 if (unlikely(ic->mode == 'R'))
1712 if (likely(dio->op != REQ_OP_DISCARD))
1713 checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
1714 GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1716 checksums = kmalloc(PAGE_SIZE, GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1718 checksums = checksums_onstack;
1719 if (WARN_ON(extra_space &&
1720 digest_size > sizeof(checksums_onstack))) {
1726 if (unlikely(dio->op == REQ_OP_DISCARD)) {
1727 sector_t bi_sector = dio->bio_details.bi_iter.bi_sector;
1728 unsigned bi_size = dio->bio_details.bi_iter.bi_size;
1729 unsigned max_size = likely(checksums != checksums_onstack) ? PAGE_SIZE : HASH_MAX_DIGESTSIZE;
1730 unsigned max_blocks = max_size / ic->tag_size;
1731 memset(checksums, DISCARD_FILLER, max_size);
1734 unsigned this_step_blocks = bi_size >> (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1735 this_step_blocks = min(this_step_blocks, max_blocks);
1736 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1737 this_step_blocks * ic->tag_size, TAG_WRITE);
1739 if (likely(checksums != checksums_onstack))
1744 /*if (bi_size < this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block)) {
1745 printk("BUGG: bi_sector: %llx, bi_size: %u\n", bi_sector, bi_size);
1746 printk("BUGG: this_step_blocks: %u\n", this_step_blocks);
1749 bi_size -= this_step_blocks << (SECTOR_SHIFT + ic->sb->log2_sectors_per_block);
1750 bi_sector += this_step_blocks << ic->sb->log2_sectors_per_block;
1753 if (likely(checksums != checksums_onstack))
1758 sector = dio->range.logical_sector;
1759 sectors_to_process = dio->range.n_sectors;
1761 __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
1763 char *mem, *checksums_ptr;
1766 mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset;
1768 checksums_ptr = checksums;
1770 integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1771 checksums_ptr += ic->tag_size;
1772 sectors_to_process -= ic->sectors_per_block;
1773 pos += ic->sectors_per_block << SECTOR_SHIFT;
1774 sector += ic->sectors_per_block;
1775 } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
1778 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1779 checksums_ptr - checksums, dio->op == REQ_OP_READ ? TAG_CMP : TAG_WRITE);
1782 char b[BDEVNAME_SIZE];
1783 DMERR_LIMIT("%s: Checksum failed at sector 0x%llx", bio_devname(bio, b),
1784 (sector - ((r + ic->tag_size - 1) / ic->tag_size)));
1786 atomic64_inc(&ic->number_of_mismatches);
1788 if (likely(checksums != checksums_onstack))
1793 if (!sectors_to_process)
1796 if (unlikely(pos < bv.bv_len)) {
1797 bv.bv_offset += pos;
1803 if (likely(checksums != checksums_onstack))
1806 struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
1810 struct bvec_iter iter;
1811 unsigned data_to_process = dio->range.n_sectors;
1812 sector_to_block(ic, data_to_process);
1813 data_to_process *= ic->tag_size;
1815 bip_for_each_vec(biv, bip, iter) {
1819 BUG_ON(PageHighMem(biv.bv_page));
1820 tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1821 this_len = min(biv.bv_len, data_to_process);
1822 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1823 this_len, dio->op == REQ_OP_READ ? TAG_READ : TAG_WRITE);
1826 data_to_process -= this_len;
1827 if (!data_to_process)
1836 dio->bi_status = errno_to_blk_status(r);
1840 static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1842 struct dm_integrity_c *ic = ti->private;
1843 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1844 struct bio_integrity_payload *bip;
1846 sector_t area, offset;
1850 dio->op = bio_op(bio);
1852 if (unlikely(dio->op == REQ_OP_DISCARD)) {
1853 if (ti->max_io_len) {
1854 sector_t sec = dm_target_offset(ti, bio->bi_iter.bi_sector);
1855 unsigned log2_max_io_len = __fls(ti->max_io_len);
1856 sector_t start_boundary = sec >> log2_max_io_len;
1857 sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len;
1858 if (start_boundary < end_boundary) {
1859 sector_t len = ti->max_io_len - (sec & (ti->max_io_len - 1));
1860 dm_accept_partial_bio(bio, len);
1865 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1866 submit_flush_bio(ic, dio);
1867 return DM_MAPIO_SUBMITTED;
1870 dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1871 dio->fua = dio->op == REQ_OP_WRITE && bio->bi_opf & REQ_FUA;
1872 if (unlikely(dio->fua)) {
1874 * Don't pass down the FUA flag because we have to flush
1875 * disk cache anyway.
1877 bio->bi_opf &= ~REQ_FUA;
1879 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1880 DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1881 dio->range.logical_sector, bio_sectors(bio),
1882 ic->provided_data_sectors);
1883 return DM_MAPIO_KILL;
1885 if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
1886 DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1887 ic->sectors_per_block,
1888 dio->range.logical_sector, bio_sectors(bio));
1889 return DM_MAPIO_KILL;
1892 if (ic->sectors_per_block > 1 && likely(dio->op != REQ_OP_DISCARD)) {
1893 struct bvec_iter iter;
1895 bio_for_each_segment(bv, bio, iter) {
1896 if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
1897 DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1898 bv.bv_offset, bv.bv_len, ic->sectors_per_block);
1899 return DM_MAPIO_KILL;
1904 bip = bio_integrity(bio);
1905 if (!ic->internal_hash) {
1907 unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1908 if (ic->log2_tag_size >= 0)
1909 wanted_tag_size <<= ic->log2_tag_size;
1911 wanted_tag_size *= ic->tag_size;
1912 if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
1913 DMERR("Invalid integrity data size %u, expected %u",
1914 bip->bip_iter.bi_size, wanted_tag_size);
1915 return DM_MAPIO_KILL;
1919 if (unlikely(bip != NULL)) {
1920 DMERR("Unexpected integrity data when using internal hash");
1921 return DM_MAPIO_KILL;
1925 if (unlikely(ic->mode == 'R') && unlikely(dio->op != REQ_OP_READ))
1926 return DM_MAPIO_KILL;
1928 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1929 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1930 bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
1932 dm_integrity_map_continue(dio, true);
1933 return DM_MAPIO_SUBMITTED;
1936 static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
1937 unsigned journal_section, unsigned journal_entry)
1939 struct dm_integrity_c *ic = dio->ic;
1940 sector_t logical_sector;
1943 logical_sector = dio->range.logical_sector;
1944 n_sectors = dio->range.n_sectors;
1946 struct bio_vec bv = bio_iovec(bio);
1949 if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
1950 bv.bv_len = n_sectors << SECTOR_SHIFT;
1951 n_sectors -= bv.bv_len >> SECTOR_SHIFT;
1952 bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
1954 mem = kmap_atomic(bv.bv_page);
1955 if (likely(dio->op == REQ_OP_WRITE))
1956 flush_dcache_page(bv.bv_page);
1959 struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
1961 if (unlikely(dio->op == REQ_OP_READ)) {
1962 struct journal_sector *js;
1966 if (unlikely(journal_entry_is_inprogress(je))) {
1967 flush_dcache_page(bv.bv_page);
1970 __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1974 BUG_ON(journal_entry_get_sector(je) != logical_sector);
1975 js = access_journal_data(ic, journal_section, journal_entry);
1976 mem_ptr = mem + bv.bv_offset;
1979 memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
1980 *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
1982 mem_ptr += 1 << SECTOR_SHIFT;
1983 } while (++s < ic->sectors_per_block);
1984 #ifdef INTERNAL_VERIFY
1985 if (ic->internal_hash) {
1986 char checksums_onstack[max((size_t)HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1988 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
1989 if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
1990 DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
1997 if (!ic->internal_hash) {
1998 struct bio_integrity_payload *bip = bio_integrity(bio);
1999 unsigned tag_todo = ic->tag_size;
2000 char *tag_ptr = journal_entry_tag(ic, je);
2003 struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
2004 unsigned tag_now = min(biv.bv_len, tag_todo);
2006 BUG_ON(PageHighMem(biv.bv_page));
2007 tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
2008 if (likely(dio->op == REQ_OP_WRITE))
2009 memcpy(tag_ptr, tag_addr, tag_now);
2011 memcpy(tag_addr, tag_ptr, tag_now);
2012 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
2014 tag_todo -= tag_now;
2015 } while (unlikely(tag_todo)); else {
2016 if (likely(dio->op == REQ_OP_WRITE))
2017 memset(tag_ptr, 0, tag_todo);
2021 if (likely(dio->op == REQ_OP_WRITE)) {
2022 struct journal_sector *js;
2025 js = access_journal_data(ic, journal_section, journal_entry);
2026 memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
2030 je->last_bytes[s] = js[s].commit_id;
2031 } while (++s < ic->sectors_per_block);
2033 if (ic->internal_hash) {
2034 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
2035 if (unlikely(digest_size > ic->tag_size)) {
2036 char checksums_onstack[HASH_MAX_DIGESTSIZE];
2037 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
2038 memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
2040 integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
2043 journal_entry_set_sector(je, logical_sector);
2045 logical_sector += ic->sectors_per_block;
2048 if (unlikely(journal_entry == ic->journal_section_entries)) {
2051 wraparound_section(ic, &journal_section);
2054 bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
2055 } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
2057 if (unlikely(dio->op == REQ_OP_READ))
2058 flush_dcache_page(bv.bv_page);
2060 } while (n_sectors);
2062 if (likely(dio->op == REQ_OP_WRITE)) {
2064 if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
2065 wake_up(&ic->copy_to_journal_wait);
2066 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
2067 queue_work(ic->commit_wq, &ic->commit_work);
2069 schedule_autocommit(ic);
2072 remove_range(ic, &dio->range);
2075 if (unlikely(bio->bi_iter.bi_size)) {
2076 sector_t area, offset;
2078 dio->range.logical_sector = logical_sector;
2079 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
2080 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
2087 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
2089 struct dm_integrity_c *ic = dio->ic;
2090 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
2091 unsigned journal_section, journal_entry;
2092 unsigned journal_read_pos;
2093 struct completion read_comp;
2094 bool discard_retried = false;
2095 bool need_sync_io = ic->internal_hash && dio->op == REQ_OP_READ;
2096 if (unlikely(dio->op == REQ_OP_DISCARD) && ic->mode != 'D')
2097 need_sync_io = true;
2099 if (need_sync_io && from_map) {
2100 INIT_WORK(&dio->work, integrity_bio_wait);
2101 queue_work(ic->offload_wq, &dio->work);
2106 spin_lock_irq(&ic->endio_wait.lock);
2108 if (unlikely(dm_integrity_failed(ic))) {
2109 spin_unlock_irq(&ic->endio_wait.lock);
2113 dio->range.n_sectors = bio_sectors(bio);
2114 journal_read_pos = NOT_FOUND;
2115 if (ic->mode == 'J' && likely(dio->op != REQ_OP_DISCARD)) {
2116 if (dio->op == REQ_OP_WRITE) {
2117 unsigned next_entry, i, pos;
2118 unsigned ws, we, range_sectors;
2120 dio->range.n_sectors = min(dio->range.n_sectors,
2121 (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
2122 if (unlikely(!dio->range.n_sectors)) {
2124 goto offload_to_thread;
2125 sleep_on_endio_wait(ic);
2128 range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
2129 ic->free_sectors -= range_sectors;
2130 journal_section = ic->free_section;
2131 journal_entry = ic->free_section_entry;
2133 next_entry = ic->free_section_entry + range_sectors;
2134 ic->free_section_entry = next_entry % ic->journal_section_entries;
2135 ic->free_section += next_entry / ic->journal_section_entries;
2136 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
2137 wraparound_section(ic, &ic->free_section);
2139 pos = journal_section * ic->journal_section_entries + journal_entry;
2140 ws = journal_section;
2144 struct journal_entry *je;
2146 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
2148 if (unlikely(pos >= ic->journal_entries))
2151 je = access_journal_entry(ic, ws, we);
2152 BUG_ON(!journal_entry_is_unused(je));
2153 journal_entry_set_inprogress(je);
2155 if (unlikely(we == ic->journal_section_entries)) {
2158 wraparound_section(ic, &ws);
2160 } while ((i += ic->sectors_per_block) < dio->range.n_sectors);
2162 spin_unlock_irq(&ic->endio_wait.lock);
2163 goto journal_read_write;
2165 sector_t next_sector;
2166 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2167 if (likely(journal_read_pos == NOT_FOUND)) {
2168 if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
2169 dio->range.n_sectors = next_sector - dio->range.logical_sector;
2172 unsigned jp = journal_read_pos + 1;
2173 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
2174 if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
2177 dio->range.n_sectors = i;
2181 if (unlikely(!add_new_range(ic, &dio->range, true))) {
2183 * We must not sleep in the request routine because it could
2184 * stall bios on current->bio_list.
2185 * So, we offload the bio to a workqueue if we have to sleep.
2189 spin_unlock_irq(&ic->endio_wait.lock);
2190 INIT_WORK(&dio->work, integrity_bio_wait);
2191 queue_work(ic->wait_wq, &dio->work);
2194 if (journal_read_pos != NOT_FOUND)
2195 dio->range.n_sectors = ic->sectors_per_block;
2196 wait_and_add_new_range(ic, &dio->range);
2198 * wait_and_add_new_range drops the spinlock, so the journal
2199 * may have been changed arbitrarily. We need to recheck.
2200 * To simplify the code, we restrict I/O size to just one block.
2202 if (journal_read_pos != NOT_FOUND) {
2203 sector_t next_sector;
2204 unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2205 if (unlikely(new_pos != journal_read_pos)) {
2206 remove_range_unlocked(ic, &dio->range);
2211 if (ic->mode == 'J' && likely(dio->op == REQ_OP_DISCARD) && !discard_retried) {
2212 sector_t next_sector;
2213 unsigned new_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
2214 if (unlikely(new_pos != NOT_FOUND) ||
2215 unlikely(next_sector < dio->range.logical_sector - dio->range.n_sectors)) {
2216 remove_range_unlocked(ic, &dio->range);
2217 spin_unlock_irq(&ic->endio_wait.lock);
2218 queue_work(ic->commit_wq, &ic->commit_work);
2219 flush_workqueue(ic->commit_wq);
2220 queue_work(ic->writer_wq, &ic->writer_work);
2221 flush_workqueue(ic->writer_wq);
2222 discard_retried = true;
2226 spin_unlock_irq(&ic->endio_wait.lock);
2228 if (unlikely(journal_read_pos != NOT_FOUND)) {
2229 journal_section = journal_read_pos / ic->journal_section_entries;
2230 journal_entry = journal_read_pos % ic->journal_section_entries;
2231 goto journal_read_write;
2234 if (ic->mode == 'B' && (dio->op == REQ_OP_WRITE || unlikely(dio->op == REQ_OP_DISCARD))) {
2235 if (!block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2236 dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2237 struct bitmap_block_status *bbs;
2239 bbs = sector_to_bitmap_block(ic, dio->range.logical_sector);
2240 spin_lock(&bbs->bio_queue_lock);
2241 bio_list_add(&bbs->bio_queue, bio);
2242 spin_unlock(&bbs->bio_queue_lock);
2243 queue_work(ic->writer_wq, &bbs->work);
2248 dio->in_flight = (atomic_t)ATOMIC_INIT(2);
2251 init_completion(&read_comp);
2252 dio->completion = &read_comp;
2254 dio->completion = NULL;
2256 dm_bio_record(&dio->bio_details, bio);
2257 bio_set_dev(bio, ic->dev->bdev);
2258 bio->bi_integrity = NULL;
2259 bio->bi_opf &= ~REQ_INTEGRITY;
2260 bio->bi_end_io = integrity_end_io;
2261 bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
2263 if (unlikely(dio->op == REQ_OP_DISCARD) && likely(ic->mode != 'D')) {
2264 integrity_metadata(&dio->work);
2265 dm_integrity_flush_buffers(ic, false);
2267 dio->in_flight = (atomic_t)ATOMIC_INIT(1);
2268 dio->completion = NULL;
2270 submit_bio_noacct(bio);
2275 submit_bio_noacct(bio);
2278 wait_for_completion_io(&read_comp);
2279 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
2280 dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
2282 if (ic->mode == 'B') {
2283 if (!block_bitmap_op(ic, ic->recalc_bitmap, dio->range.logical_sector,
2284 dio->range.n_sectors, BITMAP_OP_TEST_ALL_CLEAR))
2288 if (likely(!bio->bi_status))
2289 integrity_metadata(&dio->work);
2295 INIT_WORK(&dio->work, integrity_metadata);
2296 queue_work(ic->metadata_wq, &dio->work);
2302 if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
2305 do_endio_flush(ic, dio);
2309 static void integrity_bio_wait(struct work_struct *w)
2311 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
2313 dm_integrity_map_continue(dio, false);
2316 static void pad_uncommitted(struct dm_integrity_c *ic)
2318 if (ic->free_section_entry) {
2319 ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
2320 ic->free_section_entry = 0;
2322 wraparound_section(ic, &ic->free_section);
2323 ic->n_uncommitted_sections++;
2325 if (WARN_ON(ic->journal_sections * ic->journal_section_entries !=
2326 (ic->n_uncommitted_sections + ic->n_committed_sections) *
2327 ic->journal_section_entries + ic->free_sectors)) {
2328 DMCRIT("journal_sections %u, journal_section_entries %u, "
2329 "n_uncommitted_sections %u, n_committed_sections %u, "
2330 "journal_section_entries %u, free_sectors %u",
2331 ic->journal_sections, ic->journal_section_entries,
2332 ic->n_uncommitted_sections, ic->n_committed_sections,
2333 ic->journal_section_entries, ic->free_sectors);
2337 static void integrity_commit(struct work_struct *w)
2339 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
2340 unsigned commit_start, commit_sections;
2342 struct bio *flushes;
2344 del_timer(&ic->autocommit_timer);
2346 spin_lock_irq(&ic->endio_wait.lock);
2347 flushes = bio_list_get(&ic->flush_bio_list);
2348 if (unlikely(ic->mode != 'J')) {
2349 spin_unlock_irq(&ic->endio_wait.lock);
2350 dm_integrity_flush_buffers(ic, true);
2351 goto release_flush_bios;
2354 pad_uncommitted(ic);
2355 commit_start = ic->uncommitted_section;
2356 commit_sections = ic->n_uncommitted_sections;
2357 spin_unlock_irq(&ic->endio_wait.lock);
2359 if (!commit_sections)
2360 goto release_flush_bios;
2363 for (n = 0; n < commit_sections; n++) {
2364 for (j = 0; j < ic->journal_section_entries; j++) {
2365 struct journal_entry *je;
2366 je = access_journal_entry(ic, i, j);
2367 io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
2369 for (j = 0; j < ic->journal_section_sectors; j++) {
2370 struct journal_sector *js;
2371 js = access_journal(ic, i, j);
2372 js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
2375 if (unlikely(i >= ic->journal_sections))
2376 ic->commit_seq = next_commit_seq(ic->commit_seq);
2377 wraparound_section(ic, &i);
2381 write_journal(ic, commit_start, commit_sections);
2383 spin_lock_irq(&ic->endio_wait.lock);
2384 ic->uncommitted_section += commit_sections;
2385 wraparound_section(ic, &ic->uncommitted_section);
2386 ic->n_uncommitted_sections -= commit_sections;
2387 ic->n_committed_sections += commit_sections;
2388 spin_unlock_irq(&ic->endio_wait.lock);
2390 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
2391 queue_work(ic->writer_wq, &ic->writer_work);
2395 struct bio *next = flushes->bi_next;
2396 flushes->bi_next = NULL;
2397 do_endio(ic, flushes);
2402 static void complete_copy_from_journal(unsigned long error, void *context)
2404 struct journal_io *io = context;
2405 struct journal_completion *comp = io->comp;
2406 struct dm_integrity_c *ic = comp->ic;
2407 remove_range(ic, &io->range);
2408 mempool_free(io, &ic->journal_io_mempool);
2409 if (unlikely(error != 0))
2410 dm_integrity_io_error(ic, "copying from journal", -EIO);
2411 complete_journal_op(comp);
2414 static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
2415 struct journal_entry *je)
2419 js->commit_id = je->last_bytes[s];
2421 } while (++s < ic->sectors_per_block);
2424 static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
2425 unsigned write_sections, bool from_replay)
2428 struct journal_completion comp;
2429 struct blk_plug plug;
2431 blk_start_plug(&plug);
2434 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2435 init_completion(&comp.comp);
2438 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
2439 #ifndef INTERNAL_VERIFY
2440 if (unlikely(from_replay))
2442 rw_section_mac(ic, i, false);
2443 for (j = 0; j < ic->journal_section_entries; j++) {
2444 struct journal_entry *je = access_journal_entry(ic, i, j);
2445 sector_t sec, area, offset;
2446 unsigned k, l, next_loop;
2447 sector_t metadata_block;
2448 unsigned metadata_offset;
2449 struct journal_io *io;
2451 if (journal_entry_is_unused(je))
2453 BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
2454 sec = journal_entry_get_sector(je);
2455 if (unlikely(from_replay)) {
2456 if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
2457 dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
2458 sec &= ~(sector_t)(ic->sectors_per_block - 1);
2461 if (unlikely(sec >= ic->provided_data_sectors))
2463 get_area_and_offset(ic, sec, &area, &offset);
2464 restore_last_bytes(ic, access_journal_data(ic, i, j), je);
2465 for (k = j + 1; k < ic->journal_section_entries; k++) {
2466 struct journal_entry *je2 = access_journal_entry(ic, i, k);
2467 sector_t sec2, area2, offset2;
2468 if (journal_entry_is_unused(je2))
2470 BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
2471 sec2 = journal_entry_get_sector(je2);
2472 if (unlikely(sec2 >= ic->provided_data_sectors))
2474 get_area_and_offset(ic, sec2, &area2, &offset2);
2475 if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
2477 restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
2481 io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
2483 io->range.logical_sector = sec;
2484 io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
2486 spin_lock_irq(&ic->endio_wait.lock);
2487 add_new_range_and_wait(ic, &io->range);
2489 if (likely(!from_replay)) {
2490 struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
2492 /* don't write if there is newer committed sector */
2493 while (j < k && find_newer_committed_node(ic, §ion_node[j])) {
2494 struct journal_entry *je2 = access_journal_entry(ic, i, j);
2496 journal_entry_set_unused(je2);
2497 remove_journal_node(ic, §ion_node[j]);
2499 sec += ic->sectors_per_block;
2500 offset += ic->sectors_per_block;
2502 while (j < k && find_newer_committed_node(ic, §ion_node[k - 1])) {
2503 struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
2505 journal_entry_set_unused(je2);
2506 remove_journal_node(ic, §ion_node[k - 1]);
2510 remove_range_unlocked(ic, &io->range);
2511 spin_unlock_irq(&ic->endio_wait.lock);
2512 mempool_free(io, &ic->journal_io_mempool);
2515 for (l = j; l < k; l++) {
2516 remove_journal_node(ic, §ion_node[l]);
2519 spin_unlock_irq(&ic->endio_wait.lock);
2521 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2522 for (l = j; l < k; l++) {
2524 struct journal_entry *je2 = access_journal_entry(ic, i, l);
2527 #ifndef INTERNAL_VERIFY
2528 unlikely(from_replay) &&
2530 ic->internal_hash) {
2531 char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
2533 integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
2534 (char *)access_journal_data(ic, i, l), test_tag);
2535 if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size)))
2536 dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
2539 journal_entry_set_unused(je2);
2540 r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
2541 ic->tag_size, TAG_WRITE);
2543 dm_integrity_io_error(ic, "reading tags", r);
2547 atomic_inc(&comp.in_flight);
2548 copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
2549 (k - j) << ic->sb->log2_sectors_per_block,
2550 get_data_sector(ic, area, offset),
2551 complete_copy_from_journal, io);
2557 dm_bufio_write_dirty_buffers_async(ic->bufio);
2559 blk_finish_plug(&plug);
2561 complete_journal_op(&comp);
2562 wait_for_completion_io(&comp.comp);
2564 dm_integrity_flush_buffers(ic, true);
2567 static void integrity_writer(struct work_struct *w)
2569 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
2570 unsigned write_start, write_sections;
2572 unsigned prev_free_sectors;
2574 /* the following test is not needed, but it tests the replay code */
2575 if (unlikely(dm_post_suspending(ic->ti)) && !ic->meta_dev)
2578 spin_lock_irq(&ic->endio_wait.lock);
2579 write_start = ic->committed_section;
2580 write_sections = ic->n_committed_sections;
2581 spin_unlock_irq(&ic->endio_wait.lock);
2583 if (!write_sections)
2586 do_journal_write(ic, write_start, write_sections, false);
2588 spin_lock_irq(&ic->endio_wait.lock);
2590 ic->committed_section += write_sections;
2591 wraparound_section(ic, &ic->committed_section);
2592 ic->n_committed_sections -= write_sections;
2594 prev_free_sectors = ic->free_sectors;
2595 ic->free_sectors += write_sections * ic->journal_section_entries;
2596 if (unlikely(!prev_free_sectors))
2597 wake_up_locked(&ic->endio_wait);
2599 spin_unlock_irq(&ic->endio_wait.lock);
2602 static void recalc_write_super(struct dm_integrity_c *ic)
2606 dm_integrity_flush_buffers(ic, false);
2607 if (dm_integrity_failed(ic))
2610 r = sync_rw_sb(ic, REQ_OP_WRITE, 0);
2612 dm_integrity_io_error(ic, "writing superblock", r);
2615 static void integrity_recalc(struct work_struct *w)
2617 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
2618 struct dm_integrity_range range;
2619 struct dm_io_request io_req;
2620 struct dm_io_region io_loc;
2621 sector_t area, offset;
2622 sector_t metadata_block;
2623 unsigned metadata_offset;
2624 sector_t logical_sector, n_sectors;
2628 unsigned super_counter = 0;
2630 DEBUG_print("start recalculation... (position %llx)\n", le64_to_cpu(ic->sb->recalc_sector));
2632 spin_lock_irq(&ic->endio_wait.lock);
2636 if (unlikely(dm_post_suspending(ic->ti)))
2639 range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
2640 if (unlikely(range.logical_sector >= ic->provided_data_sectors)) {
2641 if (ic->mode == 'B') {
2642 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
2643 DEBUG_print("queue_delayed_work: bitmap_flush_work\n");
2644 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
2649 get_area_and_offset(ic, range.logical_sector, &area, &offset);
2650 range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
2652 range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
2654 add_new_range_and_wait(ic, &range);
2655 spin_unlock_irq(&ic->endio_wait.lock);
2656 logical_sector = range.logical_sector;
2657 n_sectors = range.n_sectors;
2659 if (ic->mode == 'B') {
2660 if (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector, n_sectors, BITMAP_OP_TEST_ALL_CLEAR)) {
2661 goto advance_and_next;
2663 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector,
2664 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2665 logical_sector += ic->sectors_per_block;
2666 n_sectors -= ic->sectors_per_block;
2669 while (block_bitmap_op(ic, ic->recalc_bitmap, logical_sector + n_sectors - ic->sectors_per_block,
2670 ic->sectors_per_block, BITMAP_OP_TEST_ALL_CLEAR)) {
2671 n_sectors -= ic->sectors_per_block;
2674 get_area_and_offset(ic, logical_sector, &area, &offset);
2677 DEBUG_print("recalculating: %llx, %llx\n", logical_sector, n_sectors);
2679 if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
2680 recalc_write_super(ic);
2681 if (ic->mode == 'B') {
2682 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2687 if (unlikely(dm_integrity_failed(ic)))
2690 io_req.bi_op = REQ_OP_READ;
2691 io_req.bi_op_flags = 0;
2692 io_req.mem.type = DM_IO_VMA;
2693 io_req.mem.ptr.addr = ic->recalc_buffer;
2694 io_req.notify.fn = NULL;
2695 io_req.client = ic->io;
2696 io_loc.bdev = ic->dev->bdev;
2697 io_loc.sector = get_data_sector(ic, area, offset);
2698 io_loc.count = n_sectors;
2700 r = dm_io(&io_req, 1, &io_loc, NULL);
2702 dm_integrity_io_error(ic, "reading data", r);
2706 t = ic->recalc_tags;
2707 for (i = 0; i < n_sectors; i += ic->sectors_per_block) {
2708 integrity_sector_checksum(ic, logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
2712 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2714 r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tags, TAG_WRITE);
2716 dm_integrity_io_error(ic, "writing tags", r);
2720 if (ic->mode == 'B') {
2721 sector_t start, end;
2722 start = (range.logical_sector >>
2723 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2724 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2725 end = ((range.logical_sector + range.n_sectors) >>
2726 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)) <<
2727 (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2728 block_bitmap_op(ic, ic->recalc_bitmap, start, end - start, BITMAP_OP_CLEAR);
2734 spin_lock_irq(&ic->endio_wait.lock);
2735 remove_range_unlocked(ic, &range);
2736 ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
2740 remove_range(ic, &range);
2744 spin_unlock_irq(&ic->endio_wait.lock);
2746 recalc_write_super(ic);
2749 static void bitmap_block_work(struct work_struct *w)
2751 struct bitmap_block_status *bbs = container_of(w, struct bitmap_block_status, work);
2752 struct dm_integrity_c *ic = bbs->ic;
2754 struct bio_list bio_queue;
2755 struct bio_list waiting;
2757 bio_list_init(&waiting);
2759 spin_lock(&bbs->bio_queue_lock);
2760 bio_queue = bbs->bio_queue;
2761 bio_list_init(&bbs->bio_queue);
2762 spin_unlock(&bbs->bio_queue_lock);
2764 while ((bio = bio_list_pop(&bio_queue))) {
2765 struct dm_integrity_io *dio;
2767 dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2769 if (block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2770 dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
2771 remove_range(ic, &dio->range);
2772 INIT_WORK(&dio->work, integrity_bio_wait);
2773 queue_work(ic->offload_wq, &dio->work);
2775 block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
2776 dio->range.n_sectors, BITMAP_OP_SET);
2777 bio_list_add(&waiting, bio);
2781 if (bio_list_empty(&waiting))
2784 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC,
2785 bbs->idx * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT),
2786 BITMAP_BLOCK_SIZE >> SECTOR_SHIFT, NULL);
2788 while ((bio = bio_list_pop(&waiting))) {
2789 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
2791 block_bitmap_op(ic, ic->may_write_bitmap, dio->range.logical_sector,
2792 dio->range.n_sectors, BITMAP_OP_SET);
2794 remove_range(ic, &dio->range);
2795 INIT_WORK(&dio->work, integrity_bio_wait);
2796 queue_work(ic->offload_wq, &dio->work);
2799 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
2802 static void bitmap_flush_work(struct work_struct *work)
2804 struct dm_integrity_c *ic = container_of(work, struct dm_integrity_c, bitmap_flush_work.work);
2805 struct dm_integrity_range range;
2806 unsigned long limit;
2809 dm_integrity_flush_buffers(ic, false);
2811 range.logical_sector = 0;
2812 range.n_sectors = ic->provided_data_sectors;
2814 spin_lock_irq(&ic->endio_wait.lock);
2815 add_new_range_and_wait(ic, &range);
2816 spin_unlock_irq(&ic->endio_wait.lock);
2818 dm_integrity_flush_buffers(ic, true);
2820 limit = ic->provided_data_sectors;
2821 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
2822 limit = le64_to_cpu(ic->sb->recalc_sector)
2823 >> (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit)
2824 << (ic->sb->log2_sectors_per_block + ic->log2_blocks_per_bitmap_bit);
2826 /*DEBUG_print("zeroing journal\n");*/
2827 block_bitmap_op(ic, ic->journal, 0, limit, BITMAP_OP_CLEAR);
2828 block_bitmap_op(ic, ic->may_write_bitmap, 0, limit, BITMAP_OP_CLEAR);
2830 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
2831 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
2833 spin_lock_irq(&ic->endio_wait.lock);
2834 remove_range_unlocked(ic, &range);
2835 while (unlikely((bio = bio_list_pop(&ic->synchronous_bios)) != NULL)) {
2837 spin_unlock_irq(&ic->endio_wait.lock);
2838 spin_lock_irq(&ic->endio_wait.lock);
2840 spin_unlock_irq(&ic->endio_wait.lock);
2844 static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
2845 unsigned n_sections, unsigned char commit_seq)
2852 for (n = 0; n < n_sections; n++) {
2853 i = start_section + n;
2854 wraparound_section(ic, &i);
2855 for (j = 0; j < ic->journal_section_sectors; j++) {
2856 struct journal_sector *js = access_journal(ic, i, j);
2857 memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
2858 js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2860 for (j = 0; j < ic->journal_section_entries; j++) {
2861 struct journal_entry *je = access_journal_entry(ic, i, j);
2862 journal_entry_set_unused(je);
2866 write_journal(ic, start_section, n_sections);
2869 static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
2872 for (k = 0; k < N_COMMIT_IDS; k++) {
2873 if (dm_integrity_commit_id(ic, i, j, k) == id)
2876 dm_integrity_io_error(ic, "journal commit id", -EIO);
2880 static void replay_journal(struct dm_integrity_c *ic)
2883 bool used_commit_ids[N_COMMIT_IDS];
2884 unsigned max_commit_id_sections[N_COMMIT_IDS];
2885 unsigned write_start, write_sections;
2886 unsigned continue_section;
2888 unsigned char unused, last_used, want_commit_seq;
2890 if (ic->mode == 'R')
2893 if (ic->journal_uptodate)
2899 if (!ic->just_formatted) {
2900 DEBUG_print("reading journal\n");
2901 rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
2903 DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
2904 if (ic->journal_io) {
2905 struct journal_completion crypt_comp;
2907 init_completion(&crypt_comp.comp);
2908 crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
2909 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
2910 wait_for_completion(&crypt_comp.comp);
2912 DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
2915 if (dm_integrity_failed(ic))
2918 journal_empty = true;
2919 memset(used_commit_ids, 0, sizeof used_commit_ids);
2920 memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
2921 for (i = 0; i < ic->journal_sections; i++) {
2922 for (j = 0; j < ic->journal_section_sectors; j++) {
2924 struct journal_sector *js = access_journal(ic, i, j);
2925 k = find_commit_seq(ic, i, j, js->commit_id);
2928 used_commit_ids[k] = true;
2929 max_commit_id_sections[k] = i;
2931 if (journal_empty) {
2932 for (j = 0; j < ic->journal_section_entries; j++) {
2933 struct journal_entry *je = access_journal_entry(ic, i, j);
2934 if (!journal_entry_is_unused(je)) {
2935 journal_empty = false;
2942 if (!used_commit_ids[N_COMMIT_IDS - 1]) {
2943 unused = N_COMMIT_IDS - 1;
2944 while (unused && !used_commit_ids[unused - 1])
2947 for (unused = 0; unused < N_COMMIT_IDS; unused++)
2948 if (!used_commit_ids[unused])
2950 if (unused == N_COMMIT_IDS) {
2951 dm_integrity_io_error(ic, "journal commit ids", -EIO);
2955 DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
2956 unused, used_commit_ids[0], used_commit_ids[1],
2957 used_commit_ids[2], used_commit_ids[3]);
2959 last_used = prev_commit_seq(unused);
2960 want_commit_seq = prev_commit_seq(last_used);
2962 if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
2963 journal_empty = true;
2965 write_start = max_commit_id_sections[last_used] + 1;
2966 if (unlikely(write_start >= ic->journal_sections))
2967 want_commit_seq = next_commit_seq(want_commit_seq);
2968 wraparound_section(ic, &write_start);
2971 for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
2972 for (j = 0; j < ic->journal_section_sectors; j++) {
2973 struct journal_sector *js = access_journal(ic, i, j);
2975 if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
2977 * This could be caused by crash during writing.
2978 * We won't replay the inconsistent part of the
2981 DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
2982 i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
2987 if (unlikely(i >= ic->journal_sections))
2988 want_commit_seq = next_commit_seq(want_commit_seq);
2989 wraparound_section(ic, &i);
2993 if (!journal_empty) {
2994 DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
2995 write_sections, write_start, want_commit_seq);
2996 do_journal_write(ic, write_start, write_sections, true);
2999 if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
3000 continue_section = write_start;
3001 ic->commit_seq = want_commit_seq;
3002 DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
3005 unsigned char erase_seq;
3007 DEBUG_print("clearing journal\n");
3009 erase_seq = prev_commit_seq(prev_commit_seq(last_used));
3011 init_journal(ic, s, 1, erase_seq);
3013 wraparound_section(ic, &s);
3014 if (ic->journal_sections >= 2) {
3015 init_journal(ic, s, ic->journal_sections - 2, erase_seq);
3016 s += ic->journal_sections - 2;
3017 wraparound_section(ic, &s);
3018 init_journal(ic, s, 1, erase_seq);
3021 continue_section = 0;
3022 ic->commit_seq = next_commit_seq(erase_seq);
3025 ic->committed_section = continue_section;
3026 ic->n_committed_sections = 0;
3028 ic->uncommitted_section = continue_section;
3029 ic->n_uncommitted_sections = 0;
3031 ic->free_section = continue_section;
3032 ic->free_section_entry = 0;
3033 ic->free_sectors = ic->journal_entries;
3035 ic->journal_tree_root = RB_ROOT;
3036 for (i = 0; i < ic->journal_entries; i++)
3037 init_journal_node(&ic->journal_tree[i]);
3040 static void dm_integrity_enter_synchronous_mode(struct dm_integrity_c *ic)
3042 DEBUG_print("dm_integrity_enter_synchronous_mode\n");
3044 if (ic->mode == 'B') {
3045 ic->bitmap_flush_interval = msecs_to_jiffies(10) + 1;
3046 ic->synchronous_mode = 1;
3048 cancel_delayed_work_sync(&ic->bitmap_flush_work);
3049 queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, 0);
3050 flush_workqueue(ic->commit_wq);
3054 static int dm_integrity_reboot(struct notifier_block *n, unsigned long code, void *x)
3056 struct dm_integrity_c *ic = container_of(n, struct dm_integrity_c, reboot_notifier);
3058 DEBUG_print("dm_integrity_reboot\n");
3060 dm_integrity_enter_synchronous_mode(ic);
3065 static void dm_integrity_postsuspend(struct dm_target *ti)
3067 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
3070 WARN_ON(unregister_reboot_notifier(&ic->reboot_notifier));
3072 del_timer_sync(&ic->autocommit_timer);
3075 drain_workqueue(ic->recalc_wq);
3077 if (ic->mode == 'B')
3078 cancel_delayed_work_sync(&ic->bitmap_flush_work);
3080 queue_work(ic->commit_wq, &ic->commit_work);
3081 drain_workqueue(ic->commit_wq);
3083 if (ic->mode == 'J') {
3085 queue_work(ic->writer_wq, &ic->writer_work);
3086 drain_workqueue(ic->writer_wq);
3087 dm_integrity_flush_buffers(ic, true);
3090 if (ic->mode == 'B') {
3091 dm_integrity_flush_buffers(ic, true);
3093 /* set to 0 to test bitmap replay code */
3094 init_journal(ic, 0, ic->journal_sections, 0);
3095 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3096 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3098 dm_integrity_io_error(ic, "writing superblock", r);
3102 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
3104 ic->journal_uptodate = true;
3107 static void dm_integrity_resume(struct dm_target *ti)
3109 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
3110 __u64 old_provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
3113 DEBUG_print("resume\n");
3115 if (ic->provided_data_sectors != old_provided_data_sectors) {
3116 if (ic->provided_data_sectors > old_provided_data_sectors &&
3118 ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit) {
3119 rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
3120 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3121 block_bitmap_op(ic, ic->journal, old_provided_data_sectors,
3122 ic->provided_data_sectors - old_provided_data_sectors, BITMAP_OP_SET);
3123 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3124 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3127 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3128 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3130 dm_integrity_io_error(ic, "writing superblock", r);
3133 if (ic->sb->flags & cpu_to_le32(SB_FLAG_DIRTY_BITMAP)) {
3134 DEBUG_print("resume dirty_bitmap\n");
3135 rw_journal_sectors(ic, REQ_OP_READ, 0, 0,
3136 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3137 if (ic->mode == 'B') {
3138 if (ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
3139 !ic->reset_recalculate_flag) {
3140 block_bitmap_copy(ic, ic->recalc_bitmap, ic->journal);
3141 block_bitmap_copy(ic, ic->may_write_bitmap, ic->journal);
3142 if (!block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors,
3143 BITMAP_OP_TEST_ALL_CLEAR)) {
3144 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3145 ic->sb->recalc_sector = cpu_to_le64(0);
3148 DEBUG_print("non-matching blocks_per_bitmap_bit: %u, %u\n",
3149 ic->sb->log2_blocks_per_bitmap_bit, ic->log2_blocks_per_bitmap_bit);
3150 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3151 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3152 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3153 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_SET);
3154 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3155 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3156 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3157 ic->sb->recalc_sector = cpu_to_le64(0);
3160 if (!(ic->sb->log2_blocks_per_bitmap_bit == ic->log2_blocks_per_bitmap_bit &&
3161 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_TEST_ALL_CLEAR)) ||
3162 ic->reset_recalculate_flag) {
3163 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3164 ic->sb->recalc_sector = cpu_to_le64(0);
3166 init_journal(ic, 0, ic->journal_sections, 0);
3168 ic->sb->flags &= ~cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3170 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3172 dm_integrity_io_error(ic, "writing superblock", r);
3175 if (ic->reset_recalculate_flag) {
3176 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3177 ic->sb->recalc_sector = cpu_to_le64(0);
3179 if (ic->mode == 'B') {
3180 ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
3181 ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
3182 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3184 dm_integrity_io_error(ic, "writing superblock", r);
3186 block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3187 block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3188 block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
3189 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
3190 le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) {
3191 block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector),
3192 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3193 block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3194 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3195 block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
3196 ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
3198 rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
3199 ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
3203 DEBUG_print("testing recalc: %x\n", ic->sb->flags);
3204 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
3205 __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
3206 DEBUG_print("recalc pos: %llx / %llx\n", recalc_pos, ic->provided_data_sectors);
3207 if (recalc_pos < ic->provided_data_sectors) {
3208 queue_work(ic->recalc_wq, &ic->recalc_work);
3209 } else if (recalc_pos > ic->provided_data_sectors) {
3210 ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
3211 recalc_write_super(ic);
3215 ic->reboot_notifier.notifier_call = dm_integrity_reboot;
3216 ic->reboot_notifier.next = NULL;
3217 ic->reboot_notifier.priority = INT_MAX - 1; /* be notified after md and before hardware drivers */
3218 WARN_ON(register_reboot_notifier(&ic->reboot_notifier));
3221 /* set to 1 to stress test synchronous mode */
3222 dm_integrity_enter_synchronous_mode(ic);
3226 static void dm_integrity_status(struct dm_target *ti, status_type_t type,
3227 unsigned status_flags, char *result, unsigned maxlen)
3229 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
3234 case STATUSTYPE_INFO:
3236 (unsigned long long)atomic64_read(&ic->number_of_mismatches),
3237 ic->provided_data_sectors);
3238 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3239 DMEMIT(" %llu", le64_to_cpu(ic->sb->recalc_sector));
3244 case STATUSTYPE_TABLE: {
3245 __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
3246 watermark_percentage += ic->journal_entries / 2;
3247 do_div(watermark_percentage, ic->journal_entries);
3249 arg_count += !!ic->meta_dev;
3250 arg_count += ic->sectors_per_block != 1;
3251 arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
3252 arg_count += ic->reset_recalculate_flag;
3253 arg_count += ic->discard;
3254 arg_count += ic->mode == 'J';
3255 arg_count += ic->mode == 'J';
3256 arg_count += ic->mode == 'B';
3257 arg_count += ic->mode == 'B';
3258 arg_count += !!ic->internal_hash_alg.alg_string;
3259 arg_count += !!ic->journal_crypt_alg.alg_string;
3260 arg_count += !!ic->journal_mac_alg.alg_string;
3261 arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0;
3262 arg_count += (ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0;
3263 arg_count += ic->legacy_recalculate;
3264 DMEMIT("%s %llu %u %c %u", ic->dev->name, ic->start,
3265 ic->tag_size, ic->mode, arg_count);
3267 DMEMIT(" meta_device:%s", ic->meta_dev->name);
3268 if (ic->sectors_per_block != 1)
3269 DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
3270 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
3271 DMEMIT(" recalculate");
3272 if (ic->reset_recalculate_flag)
3273 DMEMIT(" reset_recalculate");
3275 DMEMIT(" allow_discards");
3276 DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
3277 DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
3278 DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
3279 if (ic->mode == 'J') {
3280 DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
3281 DMEMIT(" commit_time:%u", ic->autocommit_msec);
3283 if (ic->mode == 'B') {
3284 DMEMIT(" sectors_per_bit:%llu", (sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit);
3285 DMEMIT(" bitmap_flush_interval:%u", jiffies_to_msecs(ic->bitmap_flush_interval));
3287 if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING)) != 0)
3288 DMEMIT(" fix_padding");
3289 if ((ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_HMAC)) != 0)
3290 DMEMIT(" fix_hmac");
3291 if (ic->legacy_recalculate)
3292 DMEMIT(" legacy_recalculate");
3294 #define EMIT_ALG(a, n) \
3296 if (ic->a.alg_string) { \
3297 DMEMIT(" %s:%s", n, ic->a.alg_string); \
3298 if (ic->a.key_string) \
3299 DMEMIT(":%s", ic->a.key_string);\
3302 EMIT_ALG(internal_hash_alg, "internal_hash");
3303 EMIT_ALG(journal_crypt_alg, "journal_crypt");
3304 EMIT_ALG(journal_mac_alg, "journal_mac");
3310 static int dm_integrity_iterate_devices(struct dm_target *ti,
3311 iterate_devices_callout_fn fn, void *data)
3313 struct dm_integrity_c *ic = ti->private;
3316 return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
3318 return fn(ti, ic->dev, 0, ti->len, data);
3321 static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
3323 struct dm_integrity_c *ic = ti->private;
3325 if (ic->sectors_per_block > 1) {
3326 limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3327 limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
3328 blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
3332 static void calculate_journal_section_size(struct dm_integrity_c *ic)
3334 unsigned sector_space = JOURNAL_SECTOR_DATA;
3336 ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
3337 ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
3338 JOURNAL_ENTRY_ROUNDUP);
3340 if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
3341 sector_space -= JOURNAL_MAC_PER_SECTOR;
3342 ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
3343 ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
3344 ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
3345 ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
3348 static int calculate_device_limits(struct dm_integrity_c *ic)
3350 __u64 initial_sectors;
3352 calculate_journal_section_size(ic);
3353 initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
3354 if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
3356 ic->initial_sectors = initial_sectors;
3358 if (!ic->meta_dev) {
3359 sector_t last_sector, last_area, last_offset;
3361 /* we have to maintain excessive padding for compatibility with existing volumes */
3362 __u64 metadata_run_padding =
3363 ic->sb->flags & cpu_to_le32(SB_FLAG_FIXED_PADDING) ?
3364 (__u64)(METADATA_PADDING_SECTORS << SECTOR_SHIFT) :
3365 (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS);
3367 ic->metadata_run = round_up((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
3368 metadata_run_padding) >> SECTOR_SHIFT;
3369 if (!(ic->metadata_run & (ic->metadata_run - 1)))
3370 ic->log2_metadata_run = __ffs(ic->metadata_run);
3372 ic->log2_metadata_run = -1;
3374 get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
3375 last_sector = get_data_sector(ic, last_area, last_offset);
3376 if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
3379 __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
3380 meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
3381 >> (ic->log2_buffer_sectors + SECTOR_SHIFT);
3382 meta_size <<= ic->log2_buffer_sectors;
3383 if (ic->initial_sectors + meta_size < ic->initial_sectors ||
3384 ic->initial_sectors + meta_size > ic->meta_device_sectors)
3386 ic->metadata_run = 1;
3387 ic->log2_metadata_run = 0;
3393 static void get_provided_data_sectors(struct dm_integrity_c *ic)
3395 if (!ic->meta_dev) {
3397 ic->provided_data_sectors = 0;
3398 for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
3399 __u64 prev_data_sectors = ic->provided_data_sectors;
3401 ic->provided_data_sectors |= (sector_t)1 << test_bit;
3402 if (calculate_device_limits(ic))
3403 ic->provided_data_sectors = prev_data_sectors;
3406 ic->provided_data_sectors = ic->data_device_sectors;
3407 ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
3411 static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
3413 unsigned journal_sections;
3416 memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
3417 memcpy(ic->sb->magic, SB_MAGIC, 8);
3418 ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
3419 ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
3420 if (ic->journal_mac_alg.alg_string)
3421 ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
3423 calculate_journal_section_size(ic);
3424 journal_sections = journal_sectors / ic->journal_section_sectors;
3425 if (!journal_sections)
3426 journal_sections = 1;
3428 if (ic->fix_hmac && (ic->internal_hash_alg.alg_string || ic->journal_mac_alg.alg_string)) {
3429 ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_HMAC);
3430 get_random_bytes(ic->sb->salt, SALT_SIZE);
3433 if (!ic->meta_dev) {
3434 if (ic->fix_padding)
3435 ic->sb->flags |= cpu_to_le32(SB_FLAG_FIXED_PADDING);
3436 ic->sb->journal_sections = cpu_to_le32(journal_sections);
3437 if (!interleave_sectors)
3438 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3439 ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
3440 ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3441 ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
3443 get_provided_data_sectors(ic);
3444 if (!ic->provided_data_sectors)
3447 ic->sb->log2_interleave_sectors = 0;
3449 get_provided_data_sectors(ic);
3450 if (!ic->provided_data_sectors)
3454 ic->sb->journal_sections = cpu_to_le32(0);
3455 for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
3456 __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
3457 __u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
3458 if (test_journal_sections > journal_sections)
3460 ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
3461 if (calculate_device_limits(ic))
3462 ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
3465 if (!le32_to_cpu(ic->sb->journal_sections)) {
3466 if (ic->log2_buffer_sectors > 3) {
3467 ic->log2_buffer_sectors--;
3468 goto try_smaller_buffer;
3474 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
3481 static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
3483 struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
3484 struct blk_integrity bi;
3486 memset(&bi, 0, sizeof(bi));
3487 bi.profile = &dm_integrity_profile;
3488 bi.tuple_size = ic->tag_size;
3489 bi.tag_size = bi.tuple_size;
3490 bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
3492 blk_integrity_register(disk, &bi);
3493 blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
3496 static void dm_integrity_free_page_list(struct page_list *pl)
3502 for (i = 0; pl[i].page; i++)
3503 __free_page(pl[i].page);
3507 static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
3509 struct page_list *pl;
3512 pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
3516 for (i = 0; i < n_pages; i++) {
3517 pl[i].page = alloc_page(GFP_KERNEL);
3519 dm_integrity_free_page_list(pl);
3523 pl[i - 1].next = &pl[i];
3531 static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
3534 for (i = 0; i < ic->journal_sections; i++)
3539 static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic,
3540 struct page_list *pl)
3542 struct scatterlist **sl;
3545 sl = kvmalloc_array(ic->journal_sections,
3546 sizeof(struct scatterlist *),
3547 GFP_KERNEL | __GFP_ZERO);
3551 for (i = 0; i < ic->journal_sections; i++) {
3552 struct scatterlist *s;
3553 unsigned start_index, start_offset;
3554 unsigned end_index, end_offset;
3558 page_list_location(ic, i, 0, &start_index, &start_offset);
3559 page_list_location(ic, i, ic->journal_section_sectors - 1,
3560 &end_index, &end_offset);
3562 n_pages = (end_index - start_index + 1);
3564 s = kvmalloc_array(n_pages, sizeof(struct scatterlist),
3567 dm_integrity_free_journal_scatterlist(ic, sl);
3571 sg_init_table(s, n_pages);
3572 for (idx = start_index; idx <= end_index; idx++) {
3573 char *va = lowmem_page_address(pl[idx].page);
3574 unsigned start = 0, end = PAGE_SIZE;
3575 if (idx == start_index)
3576 start = start_offset;
3577 if (idx == end_index)
3578 end = end_offset + (1 << SECTOR_SHIFT);
3579 sg_set_buf(&s[idx - start_index], va + start, end - start);
3588 static void free_alg(struct alg_spec *a)
3590 kfree_sensitive(a->alg_string);
3591 kfree_sensitive(a->key);
3592 memset(a, 0, sizeof *a);
3595 static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
3601 a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
3605 k = strchr(a->alg_string, ':');
3608 a->key_string = k + 1;
3609 if (strlen(a->key_string) & 1)
3612 a->key_size = strlen(a->key_string) / 2;
3613 a->key = kmalloc(a->key_size, GFP_KERNEL);
3616 if (hex2bin(a->key, a->key_string, a->key_size))
3622 *error = error_inval;
3625 *error = "Out of memory for an argument";
3629 static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
3630 char *error_alg, char *error_key)
3634 if (a->alg_string) {
3635 *hash = crypto_alloc_shash(a->alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
3636 if (IS_ERR(*hash)) {
3644 r = crypto_shash_setkey(*hash, a->key, a->key_size);
3649 } else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
3658 static int create_journal(struct dm_integrity_c *ic, char **error)
3662 __u64 journal_pages, journal_desc_size, journal_tree_size;
3663 unsigned char *crypt_data = NULL, *crypt_iv = NULL;
3664 struct skcipher_request *req = NULL;
3666 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
3667 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
3668 ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
3669 ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
3671 journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
3672 PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
3673 journal_desc_size = journal_pages * sizeof(struct page_list);
3674 if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) {
3675 *error = "Journal doesn't fit into memory";
3679 ic->journal_pages = journal_pages;
3681 ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
3683 *error = "Could not allocate memory for journal";
3687 if (ic->journal_crypt_alg.alg_string) {
3688 unsigned ivsize, blocksize;
3689 struct journal_completion comp;
3692 ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, CRYPTO_ALG_ALLOCATES_MEMORY);
3693 if (IS_ERR(ic->journal_crypt)) {
3694 *error = "Invalid journal cipher";
3695 r = PTR_ERR(ic->journal_crypt);
3696 ic->journal_crypt = NULL;
3699 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
3700 blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
3702 if (ic->journal_crypt_alg.key) {
3703 r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
3704 ic->journal_crypt_alg.key_size);
3706 *error = "Error setting encryption key";
3710 DEBUG_print("cipher %s, block size %u iv size %u\n",
3711 ic->journal_crypt_alg.alg_string, blocksize, ivsize);
3713 ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
3714 if (!ic->journal_io) {
3715 *error = "Could not allocate memory for journal io";
3720 if (blocksize == 1) {
3721 struct scatterlist *sg;
3723 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3725 *error = "Could not allocate crypt request";
3730 crypt_iv = kzalloc(ivsize, GFP_KERNEL);
3732 *error = "Could not allocate iv";
3737 ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
3738 if (!ic->journal_xor) {
3739 *error = "Could not allocate memory for journal xor";
3744 sg = kvmalloc_array(ic->journal_pages + 1,
3745 sizeof(struct scatterlist),
3748 *error = "Unable to allocate sg list";
3752 sg_init_table(sg, ic->journal_pages + 1);
3753 for (i = 0; i < ic->journal_pages; i++) {
3754 char *va = lowmem_page_address(ic->journal_xor[i].page);
3756 sg_set_buf(&sg[i], va, PAGE_SIZE);
3758 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
3760 skcipher_request_set_crypt(req, sg, sg,
3761 PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
3762 init_completion(&comp.comp);
3763 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3764 if (do_crypt(true, req, &comp))
3765 wait_for_completion(&comp.comp);
3767 r = dm_integrity_failed(ic);
3769 *error = "Unable to encrypt journal";
3772 DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
3774 crypto_free_skcipher(ic->journal_crypt);
3775 ic->journal_crypt = NULL;
3777 unsigned crypt_len = roundup(ivsize, blocksize);
3779 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3781 *error = "Could not allocate crypt request";
3786 crypt_iv = kmalloc(ivsize, GFP_KERNEL);
3788 *error = "Could not allocate iv";
3793 crypt_data = kmalloc(crypt_len, GFP_KERNEL);
3795 *error = "Unable to allocate crypt data";
3800 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
3801 if (!ic->journal_scatterlist) {
3802 *error = "Unable to allocate sg list";
3806 ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
3807 if (!ic->journal_io_scatterlist) {
3808 *error = "Unable to allocate sg list";
3812 ic->sk_requests = kvmalloc_array(ic->journal_sections,
3813 sizeof(struct skcipher_request *),
3814 GFP_KERNEL | __GFP_ZERO);
3815 if (!ic->sk_requests) {
3816 *error = "Unable to allocate sk requests";
3820 for (i = 0; i < ic->journal_sections; i++) {
3821 struct scatterlist sg;
3822 struct skcipher_request *section_req;
3823 __u32 section_le = cpu_to_le32(i);
3825 memset(crypt_iv, 0x00, ivsize);
3826 memset(crypt_data, 0x00, crypt_len);
3827 memcpy(crypt_data, §ion_le, min((size_t)crypt_len, sizeof(section_le)));
3829 sg_init_one(&sg, crypt_data, crypt_len);
3830 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
3831 init_completion(&comp.comp);
3832 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3833 if (do_crypt(true, req, &comp))
3834 wait_for_completion(&comp.comp);
3836 r = dm_integrity_failed(ic);
3838 *error = "Unable to generate iv";
3842 section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3844 *error = "Unable to allocate crypt request";
3848 section_req->iv = kmalloc_array(ivsize, 2,
3850 if (!section_req->iv) {
3851 skcipher_request_free(section_req);
3852 *error = "Unable to allocate iv";
3856 memcpy(section_req->iv + ivsize, crypt_data, ivsize);
3857 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
3858 ic->sk_requests[i] = section_req;
3859 DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
3864 for (i = 0; i < N_COMMIT_IDS; i++) {
3867 for (j = 0; j < i; j++) {
3868 if (ic->commit_ids[j] == ic->commit_ids[i]) {
3869 ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
3870 goto retest_commit_id;
3873 DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
3876 journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
3877 if (journal_tree_size > ULONG_MAX) {
3878 *error = "Journal doesn't fit into memory";
3882 ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
3883 if (!ic->journal_tree) {
3884 *error = "Could not allocate memory for journal tree";
3890 skcipher_request_free(req);
3896 * Construct a integrity mapping
3900 * offset from the start of the device
3902 * D - direct writes, J - journal writes, B - bitmap mode, R - recovery mode
3903 * number of optional arguments
3904 * optional arguments:
3906 * interleave_sectors
3913 * bitmap_flush_interval
3919 static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3921 struct dm_integrity_c *ic;
3924 unsigned extra_args;
3925 struct dm_arg_set as;
3926 static const struct dm_arg _args[] = {
3927 {0, 18, "Invalid number of feature args"},
3929 unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
3930 bool should_write_sb;
3932 unsigned long long start;
3933 __s8 log2_sectors_per_bitmap_bit = -1;
3934 __s8 log2_blocks_per_bitmap_bit;
3935 __u64 bits_in_journal;
3936 __u64 n_bitmap_bits;
3938 #define DIRECT_ARGUMENTS 4
3940 if (argc <= DIRECT_ARGUMENTS) {
3941 ti->error = "Invalid argument count";
3945 ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
3947 ti->error = "Cannot allocate integrity context";
3951 ti->per_io_data_size = sizeof(struct dm_integrity_io);
3954 ic->in_progress = RB_ROOT;
3955 INIT_LIST_HEAD(&ic->wait_list);
3956 init_waitqueue_head(&ic->endio_wait);
3957 bio_list_init(&ic->flush_bio_list);
3958 init_waitqueue_head(&ic->copy_to_journal_wait);
3959 init_completion(&ic->crypto_backoff);
3960 atomic64_set(&ic->number_of_mismatches, 0);
3961 ic->bitmap_flush_interval = BITMAP_FLUSH_INTERVAL;
3963 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
3965 ti->error = "Device lookup failed";
3969 if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
3970 ti->error = "Invalid starting offset";
3976 if (strcmp(argv[2], "-")) {
3977 if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
3978 ti->error = "Invalid tag size";
3984 if (!strcmp(argv[3], "J") || !strcmp(argv[3], "B") ||
3985 !strcmp(argv[3], "D") || !strcmp(argv[3], "R")) {
3986 ic->mode = argv[3][0];
3988 ti->error = "Invalid mode (expecting J, B, D, R)";
3993 journal_sectors = 0;
3994 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3995 buffer_sectors = DEFAULT_BUFFER_SECTORS;
3996 journal_watermark = DEFAULT_JOURNAL_WATERMARK;
3997 sync_msec = DEFAULT_SYNC_MSEC;
3998 ic->sectors_per_block = 1;
4000 as.argc = argc - DIRECT_ARGUMENTS;
4001 as.argv = argv + DIRECT_ARGUMENTS;
4002 r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
4006 while (extra_args--) {
4007 const char *opt_string;
4009 unsigned long long llval;
4010 opt_string = dm_shift_arg(&as);
4013 ti->error = "Not enough feature arguments";
4016 if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
4017 journal_sectors = val ? val : 1;
4018 else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
4019 interleave_sectors = val;
4020 else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
4021 buffer_sectors = val;
4022 else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
4023 journal_watermark = val;
4024 else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
4026 else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
4028 dm_put_device(ti, ic->meta_dev);
4029 ic->meta_dev = NULL;
4031 r = dm_get_device(ti, strchr(opt_string, ':') + 1,
4032 dm_table_get_mode(ti->table), &ic->meta_dev);
4034 ti->error = "Device lookup failed";
4037 } else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
4038 if (val < 1 << SECTOR_SHIFT ||
4039 val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
4042 ti->error = "Invalid block_size argument";
4045 ic->sectors_per_block = val >> SECTOR_SHIFT;
4046 } else if (sscanf(opt_string, "sectors_per_bit:%llu%c", &llval, &dummy) == 1) {
4047 log2_sectors_per_bitmap_bit = !llval ? 0 : __ilog2_u64(llval);
4048 } else if (sscanf(opt_string, "bitmap_flush_interval:%u%c", &val, &dummy) == 1) {
4049 if (val >= (uint64_t)UINT_MAX * 1000 / HZ) {
4051 ti->error = "Invalid bitmap_flush_interval argument";
4054 ic->bitmap_flush_interval = msecs_to_jiffies(val);
4055 } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
4056 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
4057 "Invalid internal_hash argument");
4060 } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
4061 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
4062 "Invalid journal_crypt argument");
4065 } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
4066 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
4067 "Invalid journal_mac argument");
4070 } else if (!strcmp(opt_string, "recalculate")) {
4071 ic->recalculate_flag = true;
4072 } else if (!strcmp(opt_string, "reset_recalculate")) {
4073 ic->recalculate_flag = true;
4074 ic->reset_recalculate_flag = true;
4075 } else if (!strcmp(opt_string, "allow_discards")) {
4077 } else if (!strcmp(opt_string, "fix_padding")) {
4078 ic->fix_padding = true;
4079 } else if (!strcmp(opt_string, "fix_hmac")) {
4080 ic->fix_hmac = true;
4081 } else if (!strcmp(opt_string, "legacy_recalculate")) {
4082 ic->legacy_recalculate = true;
4085 ti->error = "Invalid argument";
4090 ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
4092 ic->meta_device_sectors = ic->data_device_sectors;
4094 ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT;
4096 if (!journal_sectors) {
4097 journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
4098 ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
4101 if (!buffer_sectors)
4103 ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
4105 r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
4106 "Invalid internal hash", "Error setting internal hash key");
4110 r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
4111 "Invalid journal mac", "Error setting journal mac key");
4115 if (!ic->tag_size) {
4116 if (!ic->internal_hash) {
4117 ti->error = "Unknown tag size";
4121 ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
4123 if (ic->tag_size > MAX_TAG_SIZE) {
4124 ti->error = "Too big tag size";
4128 if (!(ic->tag_size & (ic->tag_size - 1)))
4129 ic->log2_tag_size = __ffs(ic->tag_size);
4131 ic->log2_tag_size = -1;
4133 if (ic->mode == 'B' && !ic->internal_hash) {
4135 ti->error = "Bitmap mode can be only used with internal hash";
4139 if (ic->discard && !ic->internal_hash) {
4141 ti->error = "Discard can be only used with internal hash";
4145 ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
4146 ic->autocommit_msec = sync_msec;
4147 timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
4149 ic->io = dm_io_client_create();
4150 if (IS_ERR(ic->io)) {
4151 r = PTR_ERR(ic->io);
4153 ti->error = "Cannot allocate dm io";
4157 r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
4159 ti->error = "Cannot allocate mempool";
4163 ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
4164 WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
4165 if (!ic->metadata_wq) {
4166 ti->error = "Cannot allocate workqueue";
4172 * If this workqueue were percpu, it would cause bio reordering
4173 * and reduced performance.
4175 ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
4177 ti->error = "Cannot allocate workqueue";
4182 ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM,
4183 METADATA_WORKQUEUE_MAX_ACTIVE);
4184 if (!ic->offload_wq) {
4185 ti->error = "Cannot allocate workqueue";
4190 ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
4191 if (!ic->commit_wq) {
4192 ti->error = "Cannot allocate workqueue";
4196 INIT_WORK(&ic->commit_work, integrity_commit);
4198 if (ic->mode == 'J' || ic->mode == 'B') {
4199 ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
4200 if (!ic->writer_wq) {
4201 ti->error = "Cannot allocate workqueue";
4205 INIT_WORK(&ic->writer_work, integrity_writer);
4208 ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
4211 ti->error = "Cannot allocate superblock area";
4215 r = sync_rw_sb(ic, REQ_OP_READ, 0);
4217 ti->error = "Error reading superblock";
4220 should_write_sb = false;
4221 if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
4222 if (ic->mode != 'R') {
4223 if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
4225 ti->error = "The device is not initialized";
4230 r = initialize_superblock(ic, journal_sectors, interleave_sectors);
4232 ti->error = "Could not initialize superblock";
4235 if (ic->mode != 'R')
4236 should_write_sb = true;
4239 if (!ic->sb->version || ic->sb->version > SB_VERSION_5) {
4241 ti->error = "Unknown version";
4244 if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
4246 ti->error = "Tag size doesn't match the information in superblock";
4249 if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
4251 ti->error = "Block size doesn't match the information in superblock";
4254 if (!le32_to_cpu(ic->sb->journal_sections)) {
4256 ti->error = "Corrupted superblock, journal_sections is 0";
4259 /* make sure that ti->max_io_len doesn't overflow */
4260 if (!ic->meta_dev) {
4261 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
4262 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
4264 ti->error = "Invalid interleave_sectors in the superblock";
4268 if (ic->sb->log2_interleave_sectors) {
4270 ti->error = "Invalid interleave_sectors in the superblock";
4274 if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
4276 ti->error = "Journal mac mismatch";
4280 get_provided_data_sectors(ic);
4281 if (!ic->provided_data_sectors) {
4283 ti->error = "The device is too small";
4288 r = calculate_device_limits(ic);
4291 if (ic->log2_buffer_sectors > 3) {
4292 ic->log2_buffer_sectors--;
4293 goto try_smaller_buffer;
4296 ti->error = "The device is too small";
4300 if (log2_sectors_per_bitmap_bit < 0)
4301 log2_sectors_per_bitmap_bit = __fls(DEFAULT_SECTORS_PER_BITMAP_BIT);
4302 if (log2_sectors_per_bitmap_bit < ic->sb->log2_sectors_per_block)
4303 log2_sectors_per_bitmap_bit = ic->sb->log2_sectors_per_block;
4305 bits_in_journal = ((__u64)ic->journal_section_sectors * ic->journal_sections) << (SECTOR_SHIFT + 3);
4306 if (bits_in_journal > UINT_MAX)
4307 bits_in_journal = UINT_MAX;
4308 while (bits_in_journal < (ic->provided_data_sectors + ((sector_t)1 << log2_sectors_per_bitmap_bit) - 1) >> log2_sectors_per_bitmap_bit)
4309 log2_sectors_per_bitmap_bit++;
4311 log2_blocks_per_bitmap_bit = log2_sectors_per_bitmap_bit - ic->sb->log2_sectors_per_block;
4312 ic->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4313 if (should_write_sb) {
4314 ic->sb->log2_blocks_per_bitmap_bit = log2_blocks_per_bitmap_bit;
4316 n_bitmap_bits = ((ic->provided_data_sectors >> ic->sb->log2_sectors_per_block)
4317 + (((sector_t)1 << log2_blocks_per_bitmap_bit) - 1)) >> log2_blocks_per_bitmap_bit;
4318 ic->n_bitmap_blocks = DIV_ROUND_UP(n_bitmap_bits, BITMAP_BLOCK_SIZE * 8);
4321 ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
4323 if (ti->len > ic->provided_data_sectors) {
4325 ti->error = "Not enough provided sectors for requested mapping size";
4330 threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
4332 do_div(threshold, 100);
4333 ic->free_sectors_threshold = threshold;
4335 DEBUG_print("initialized:\n");
4336 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
4337 DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size);
4338 DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
4339 DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries);
4340 DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors);
4341 DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
4342 DEBUG_print(" journal_entries %u\n", ic->journal_entries);
4343 DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
4344 DEBUG_print(" data_device_sectors 0x%llx\n", i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT);
4345 DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
4346 DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
4347 DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
4348 DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", ic->provided_data_sectors, ic->provided_data_sectors);
4349 DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
4350 DEBUG_print(" bits_in_journal %llu\n", bits_in_journal);
4352 if (ic->recalculate_flag && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
4353 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
4354 ic->sb->recalc_sector = cpu_to_le64(0);
4357 if (ic->internal_hash) {
4358 ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
4359 if (!ic->recalc_wq ) {
4360 ti->error = "Cannot allocate workqueue";
4364 INIT_WORK(&ic->recalc_work, integrity_recalc);
4365 ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
4366 if (!ic->recalc_buffer) {
4367 ti->error = "Cannot allocate buffer for recalculating";
4371 ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
4372 ic->tag_size, GFP_KERNEL);
4373 if (!ic->recalc_tags) {
4374 ti->error = "Cannot allocate tags for recalculating";
4379 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
4380 ti->error = "Recalculate can only be specified with internal_hash";
4386 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
4387 le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors &&
4388 dm_integrity_disable_recalculate(ic)) {
4389 ti->error = "Recalculating with HMAC is disabled for security reasons - if you really need it, use the argument \"legacy_recalculate\"";
4394 ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
4395 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
4396 if (IS_ERR(ic->bufio)) {
4397 r = PTR_ERR(ic->bufio);
4398 ti->error = "Cannot initialize dm-bufio";
4402 dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
4404 if (ic->mode != 'R') {
4405 r = create_journal(ic, &ti->error);
4411 if (ic->mode == 'B') {
4413 unsigned n_bitmap_pages = DIV_ROUND_UP(ic->n_bitmap_blocks, PAGE_SIZE / BITMAP_BLOCK_SIZE);
4415 ic->recalc_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4416 if (!ic->recalc_bitmap) {
4420 ic->may_write_bitmap = dm_integrity_alloc_page_list(n_bitmap_pages);
4421 if (!ic->may_write_bitmap) {
4425 ic->bbs = kvmalloc_array(ic->n_bitmap_blocks, sizeof(struct bitmap_block_status), GFP_KERNEL);
4430 INIT_DELAYED_WORK(&ic->bitmap_flush_work, bitmap_flush_work);
4431 for (i = 0; i < ic->n_bitmap_blocks; i++) {
4432 struct bitmap_block_status *bbs = &ic->bbs[i];
4433 unsigned sector, pl_index, pl_offset;
4435 INIT_WORK(&bbs->work, bitmap_block_work);
4438 bio_list_init(&bbs->bio_queue);
4439 spin_lock_init(&bbs->bio_queue_lock);
4441 sector = i * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT);
4442 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
4443 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
4445 bbs->bitmap = lowmem_page_address(ic->journal[pl_index].page) + pl_offset;
4449 if (should_write_sb) {
4452 init_journal(ic, 0, ic->journal_sections, 0);
4453 r = dm_integrity_failed(ic);
4455 ti->error = "Error initializing journal";
4458 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
4460 ti->error = "Error initializing superblock";
4463 ic->just_formatted = true;
4466 if (!ic->meta_dev) {
4467 r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
4471 if (ic->mode == 'B') {
4472 unsigned max_io_len = ((sector_t)ic->sectors_per_block << ic->log2_blocks_per_bitmap_bit) * (BITMAP_BLOCK_SIZE * 8);
4474 max_io_len = 1U << 31;
4475 DEBUG_print("max_io_len: old %u, new %u\n", ti->max_io_len, max_io_len);
4476 if (!ti->max_io_len || ti->max_io_len > max_io_len) {
4477 r = dm_set_target_max_io_len(ti, max_io_len);
4483 if (!ic->internal_hash)
4484 dm_integrity_set(ti, ic);
4486 ti->num_flush_bios = 1;
4487 ti->flush_supported = true;
4489 ti->num_discard_bios = 1;
4494 dm_integrity_dtr(ti);
4498 static void dm_integrity_dtr(struct dm_target *ti)
4500 struct dm_integrity_c *ic = ti->private;
4502 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
4503 BUG_ON(!list_empty(&ic->wait_list));
4505 if (ic->metadata_wq)
4506 destroy_workqueue(ic->metadata_wq);
4508 destroy_workqueue(ic->wait_wq);
4510 destroy_workqueue(ic->offload_wq);
4512 destroy_workqueue(ic->commit_wq);
4514 destroy_workqueue(ic->writer_wq);
4516 destroy_workqueue(ic->recalc_wq);
4517 vfree(ic->recalc_buffer);
4518 kvfree(ic->recalc_tags);
4521 dm_bufio_client_destroy(ic->bufio);
4522 mempool_exit(&ic->journal_io_mempool);
4524 dm_io_client_destroy(ic->io);
4526 dm_put_device(ti, ic->dev);
4528 dm_put_device(ti, ic->meta_dev);
4529 dm_integrity_free_page_list(ic->journal);
4530 dm_integrity_free_page_list(ic->journal_io);
4531 dm_integrity_free_page_list(ic->journal_xor);
4532 dm_integrity_free_page_list(ic->recalc_bitmap);
4533 dm_integrity_free_page_list(ic->may_write_bitmap);
4534 if (ic->journal_scatterlist)
4535 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
4536 if (ic->journal_io_scatterlist)
4537 dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
4538 if (ic->sk_requests) {
4541 for (i = 0; i < ic->journal_sections; i++) {
4542 struct skcipher_request *req = ic->sk_requests[i];
4544 kfree_sensitive(req->iv);
4545 skcipher_request_free(req);
4548 kvfree(ic->sk_requests);
4550 kvfree(ic->journal_tree);
4552 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
4554 if (ic->internal_hash)
4555 crypto_free_shash(ic->internal_hash);
4556 free_alg(&ic->internal_hash_alg);
4558 if (ic->journal_crypt)
4559 crypto_free_skcipher(ic->journal_crypt);
4560 free_alg(&ic->journal_crypt_alg);
4562 if (ic->journal_mac)
4563 crypto_free_shash(ic->journal_mac);
4564 free_alg(&ic->journal_mac_alg);
4569 static struct target_type integrity_target = {
4570 .name = "integrity",
4571 .version = {1, 8, 0},
4572 .module = THIS_MODULE,
4573 .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
4574 .ctr = dm_integrity_ctr,
4575 .dtr = dm_integrity_dtr,
4576 .map = dm_integrity_map,
4577 .postsuspend = dm_integrity_postsuspend,
4578 .resume = dm_integrity_resume,
4579 .status = dm_integrity_status,
4580 .iterate_devices = dm_integrity_iterate_devices,
4581 .io_hints = dm_integrity_io_hints,
4584 static int __init dm_integrity_init(void)
4588 journal_io_cache = kmem_cache_create("integrity_journal_io",
4589 sizeof(struct journal_io), 0, 0, NULL);
4590 if (!journal_io_cache) {
4591 DMERR("can't allocate journal io cache");
4595 r = dm_register_target(&integrity_target);
4598 DMERR("register failed %d", r);
4603 static void __exit dm_integrity_exit(void)
4605 dm_unregister_target(&integrity_target);
4606 kmem_cache_destroy(journal_io_cache);
4609 module_init(dm_integrity_init);
4610 module_exit(dm_integrity_exit);
4612 MODULE_AUTHOR("Milan Broz");
4613 MODULE_AUTHOR("Mikulas Patocka");
4614 MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
4615 MODULE_LICENSE("GPL");