2 * Copyright (C) 2016-2017 Red Hat, Inc. All rights reserved.
3 * Copyright (C) 2016-2017 Milan Broz
4 * Copyright (C) 2016-2017 Mikulas Patocka
6 * This file is released under the GPL.
9 #include <linux/compiler.h>
10 #include <linux/module.h>
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/vmalloc.h>
14 #include <linux/sort.h>
15 #include <linux/rbtree.h>
16 #include <linux/delay.h>
17 #include <linux/random.h>
18 #include <crypto/hash.h>
19 #include <crypto/skcipher.h>
20 #include <linux/async_tx.h>
21 #include <linux/dm-bufio.h>
23 #define DM_MSG_PREFIX "integrity"
25 #define DEFAULT_INTERLEAVE_SECTORS 32768
26 #define DEFAULT_JOURNAL_SIZE_FACTOR 7
27 #define DEFAULT_BUFFER_SECTORS 128
28 #define DEFAULT_JOURNAL_WATERMARK 50
29 #define DEFAULT_SYNC_MSEC 10000
30 #define DEFAULT_MAX_JOURNAL_SECTORS 131072
31 #define MIN_LOG2_INTERLEAVE_SECTORS 3
32 #define MAX_LOG2_INTERLEAVE_SECTORS 31
33 #define METADATA_WORKQUEUE_MAX_ACTIVE 16
34 #define RECALC_SECTORS 8192
35 #define RECALC_WRITE_SUPER 16
38 * Warning - DEBUG_PRINT prints security-sensitive data to the log,
39 * so it should not be enabled in the official kernel
42 //#define INTERNAL_VERIFY
48 #define SB_MAGIC "integrt"
49 #define SB_VERSION_1 1
50 #define SB_VERSION_2 2
52 #define MAX_SECTORS_PER_BLOCK 8
57 __u8 log2_interleave_sectors;
58 __u16 integrity_tag_size;
59 __u32 journal_sections;
60 __u64 provided_data_sectors; /* userspace uses this value */
62 __u8 log2_sectors_per_block;
67 #define SB_FLAG_HAVE_JOURNAL_MAC 0x1
68 #define SB_FLAG_RECALCULATING 0x2
70 #define JOURNAL_ENTRY_ROUNDUP 8
72 typedef __u64 commit_id_t;
73 #define JOURNAL_MAC_PER_SECTOR 8
75 struct journal_entry {
83 commit_id_t last_bytes[0];
87 #define journal_entry_tag(ic, je) ((__u8 *)&(je)->last_bytes[(ic)->sectors_per_block])
89 #if BITS_PER_LONG == 64
90 #define journal_entry_set_sector(je, x) do { smp_wmb(); WRITE_ONCE((je)->u.sector, cpu_to_le64(x)); } while (0)
91 #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
92 #elif defined(CONFIG_LBDAF)
93 #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32((x) >> 32)); } while (0)
94 #define journal_entry_get_sector(je) le64_to_cpu((je)->u.sector)
96 #define journal_entry_set_sector(je, x) do { (je)->u.s.sector_lo = cpu_to_le32(x); smp_wmb(); WRITE_ONCE((je)->u.s.sector_hi, cpu_to_le32(0)); } while (0)
97 #define journal_entry_get_sector(je) le32_to_cpu((je)->u.s.sector_lo)
99 #define journal_entry_is_unused(je) ((je)->u.s.sector_hi == cpu_to_le32(-1))
100 #define journal_entry_set_unused(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-1)); } while (0)
101 #define journal_entry_is_inprogress(je) ((je)->u.s.sector_hi == cpu_to_le32(-2))
102 #define journal_entry_set_inprogress(je) do { ((je)->u.s.sector_hi = cpu_to_le32(-2)); } while (0)
104 #define JOURNAL_BLOCK_SECTORS 8
105 #define JOURNAL_SECTOR_DATA ((1 << SECTOR_SHIFT) - sizeof(commit_id_t))
106 #define JOURNAL_MAC_SIZE (JOURNAL_MAC_PER_SECTOR * JOURNAL_BLOCK_SECTORS)
108 struct journal_sector {
109 __u8 entries[JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR];
110 __u8 mac[JOURNAL_MAC_PER_SECTOR];
111 commit_id_t commit_id;
114 #define MAX_TAG_SIZE (JOURNAL_SECTOR_DATA - JOURNAL_MAC_PER_SECTOR - offsetof(struct journal_entry, last_bytes[MAX_SECTORS_PER_BLOCK]))
116 #define METADATA_PADDING_SECTORS 8
118 #define N_COMMIT_IDS 4
120 static unsigned char prev_commit_seq(unsigned char seq)
122 return (seq + N_COMMIT_IDS - 1) % N_COMMIT_IDS;
125 static unsigned char next_commit_seq(unsigned char seq)
127 return (seq + 1) % N_COMMIT_IDS;
131 * In-memory structures
134 struct journal_node {
146 struct dm_integrity_c {
148 struct dm_dev *meta_dev;
152 mempool_t journal_io_mempool;
153 struct dm_io_client *io;
154 struct dm_bufio_client *bufio;
155 struct workqueue_struct *metadata_wq;
156 struct superblock *sb;
157 unsigned journal_pages;
158 struct page_list *journal;
159 struct page_list *journal_io;
160 struct page_list *journal_xor;
162 struct crypto_skcipher *journal_crypt;
163 struct scatterlist **journal_scatterlist;
164 struct scatterlist **journal_io_scatterlist;
165 struct skcipher_request **sk_requests;
167 struct crypto_shash *journal_mac;
169 struct journal_node *journal_tree;
170 struct rb_root journal_tree_root;
172 sector_t provided_data_sectors;
174 unsigned short journal_entry_size;
175 unsigned char journal_entries_per_sector;
176 unsigned char journal_section_entries;
177 unsigned short journal_section_sectors;
178 unsigned journal_sections;
179 unsigned journal_entries;
180 sector_t data_device_sectors;
181 sector_t meta_device_sectors;
182 unsigned initial_sectors;
183 unsigned metadata_run;
184 __s8 log2_metadata_run;
185 __u8 log2_buffer_sectors;
186 __u8 sectors_per_block;
193 struct crypto_shash *internal_hash;
195 /* these variables are locked with endio_wait.lock */
196 struct rb_root in_progress;
197 struct list_head wait_list;
198 wait_queue_head_t endio_wait;
199 struct workqueue_struct *wait_wq;
201 unsigned char commit_seq;
202 commit_id_t commit_ids[N_COMMIT_IDS];
204 unsigned committed_section;
205 unsigned n_committed_sections;
207 unsigned uncommitted_section;
208 unsigned n_uncommitted_sections;
210 unsigned free_section;
211 unsigned char free_section_entry;
212 unsigned free_sectors;
214 unsigned free_sectors_threshold;
216 struct workqueue_struct *commit_wq;
217 struct work_struct commit_work;
219 struct workqueue_struct *writer_wq;
220 struct work_struct writer_work;
222 struct workqueue_struct *recalc_wq;
223 struct work_struct recalc_work;
227 struct bio_list flush_bio_list;
229 unsigned long autocommit_jiffies;
230 struct timer_list autocommit_timer;
231 unsigned autocommit_msec;
233 wait_queue_head_t copy_to_journal_wait;
235 struct completion crypto_backoff;
237 bool journal_uptodate;
240 struct alg_spec internal_hash_alg;
241 struct alg_spec journal_crypt_alg;
242 struct alg_spec journal_mac_alg;
244 atomic64_t number_of_mismatches;
247 struct dm_integrity_range {
248 sector_t logical_sector;
254 struct task_struct *task;
255 struct list_head wait_entry;
260 struct dm_integrity_io {
261 struct work_struct work;
263 struct dm_integrity_c *ic;
267 struct dm_integrity_range range;
269 sector_t metadata_block;
270 unsigned metadata_offset;
273 blk_status_t bi_status;
275 struct completion *completion;
277 struct gendisk *orig_bi_disk;
279 bio_end_io_t *orig_bi_end_io;
280 struct bio_integrity_payload *orig_bi_integrity;
281 struct bvec_iter orig_bi_iter;
284 struct journal_completion {
285 struct dm_integrity_c *ic;
287 struct completion comp;
291 struct dm_integrity_range range;
292 struct journal_completion *comp;
295 static struct kmem_cache *journal_io_cache;
297 #define JOURNAL_IO_MEMPOOL 32
300 #define DEBUG_print(x, ...) printk(KERN_DEBUG x, ##__VA_ARGS__)
301 static void __DEBUG_bytes(__u8 *bytes, size_t len, const char *msg, ...)
310 pr_cont(" %02x", *bytes);
316 #define DEBUG_bytes(bytes, len, msg, ...) __DEBUG_bytes(bytes, len, KERN_DEBUG msg, ##__VA_ARGS__)
318 #define DEBUG_print(x, ...) do { } while (0)
319 #define DEBUG_bytes(bytes, len, msg, ...) do { } while (0)
323 * DM Integrity profile, protection is performed layer above (dm-crypt)
325 static const struct blk_integrity_profile dm_integrity_profile = {
326 .name = "DM-DIF-EXT-TAG",
331 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map);
332 static void integrity_bio_wait(struct work_struct *w);
333 static void dm_integrity_dtr(struct dm_target *ti);
335 static void dm_integrity_io_error(struct dm_integrity_c *ic, const char *msg, int err)
338 atomic64_inc(&ic->number_of_mismatches);
339 if (!cmpxchg(&ic->failed, 0, err))
340 DMERR("Error on %s: %d", msg, err);
343 static int dm_integrity_failed(struct dm_integrity_c *ic)
345 return READ_ONCE(ic->failed);
348 static commit_id_t dm_integrity_commit_id(struct dm_integrity_c *ic, unsigned i,
349 unsigned j, unsigned char seq)
352 * Xor the number with section and sector, so that if a piece of
353 * journal is written at wrong place, it is detected.
355 return ic->commit_ids[seq] ^ cpu_to_le64(((__u64)i << 32) ^ j);
358 static void get_area_and_offset(struct dm_integrity_c *ic, sector_t data_sector,
359 sector_t *area, sector_t *offset)
362 __u8 log2_interleave_sectors = ic->sb->log2_interleave_sectors;
363 *area = data_sector >> log2_interleave_sectors;
364 *offset = (unsigned)data_sector & ((1U << log2_interleave_sectors) - 1);
367 *offset = data_sector;
371 #define sector_to_block(ic, n) \
373 BUG_ON((n) & (unsigned)((ic)->sectors_per_block - 1)); \
374 (n) >>= (ic)->sb->log2_sectors_per_block; \
377 static __u64 get_metadata_sector_and_offset(struct dm_integrity_c *ic, sector_t area,
378 sector_t offset, unsigned *metadata_offset)
383 ms = area << ic->sb->log2_interleave_sectors;
384 if (likely(ic->log2_metadata_run >= 0))
385 ms += area << ic->log2_metadata_run;
387 ms += area * ic->metadata_run;
388 ms >>= ic->log2_buffer_sectors;
390 sector_to_block(ic, offset);
392 if (likely(ic->log2_tag_size >= 0)) {
393 ms += offset >> (SECTOR_SHIFT + ic->log2_buffer_sectors - ic->log2_tag_size);
394 mo = (offset << ic->log2_tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
396 ms += (__u64)offset * ic->tag_size >> (SECTOR_SHIFT + ic->log2_buffer_sectors);
397 mo = (offset * ic->tag_size) & ((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - 1);
399 *metadata_offset = mo;
403 static sector_t get_data_sector(struct dm_integrity_c *ic, sector_t area, sector_t offset)
410 result = area << ic->sb->log2_interleave_sectors;
411 if (likely(ic->log2_metadata_run >= 0))
412 result += (area + 1) << ic->log2_metadata_run;
414 result += (area + 1) * ic->metadata_run;
416 result += (sector_t)ic->initial_sectors + offset;
422 static void wraparound_section(struct dm_integrity_c *ic, unsigned *sec_ptr)
424 if (unlikely(*sec_ptr >= ic->journal_sections))
425 *sec_ptr -= ic->journal_sections;
428 static void sb_set_version(struct dm_integrity_c *ic)
430 if (ic->meta_dev || ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
431 ic->sb->version = SB_VERSION_2;
433 ic->sb->version = SB_VERSION_1;
436 static int sync_rw_sb(struct dm_integrity_c *ic, int op, int op_flags)
438 struct dm_io_request io_req;
439 struct dm_io_region io_loc;
442 io_req.bi_op_flags = op_flags;
443 io_req.mem.type = DM_IO_KMEM;
444 io_req.mem.ptr.addr = ic->sb;
445 io_req.notify.fn = NULL;
446 io_req.client = ic->io;
447 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
448 io_loc.sector = ic->start;
449 io_loc.count = SB_SECTORS;
451 return dm_io(&io_req, 1, &io_loc, NULL);
454 static void access_journal_check(struct dm_integrity_c *ic, unsigned section, unsigned offset,
455 bool e, const char *function)
457 #if defined(CONFIG_DM_DEBUG) || defined(INTERNAL_VERIFY)
458 unsigned limit = e ? ic->journal_section_entries : ic->journal_section_sectors;
460 if (unlikely(section >= ic->journal_sections) ||
461 unlikely(offset >= limit)) {
462 printk(KERN_CRIT "%s: invalid access at (%u,%u), limit (%u,%u)\n",
463 function, section, offset, ic->journal_sections, limit);
469 static void page_list_location(struct dm_integrity_c *ic, unsigned section, unsigned offset,
470 unsigned *pl_index, unsigned *pl_offset)
474 access_journal_check(ic, section, offset, false, "page_list_location");
476 sector = section * ic->journal_section_sectors + offset;
478 *pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
479 *pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
482 static struct journal_sector *access_page_list(struct dm_integrity_c *ic, struct page_list *pl,
483 unsigned section, unsigned offset, unsigned *n_sectors)
485 unsigned pl_index, pl_offset;
488 page_list_location(ic, section, offset, &pl_index, &pl_offset);
491 *n_sectors = (PAGE_SIZE - pl_offset) >> SECTOR_SHIFT;
493 va = lowmem_page_address(pl[pl_index].page);
495 return (struct journal_sector *)(va + pl_offset);
498 static struct journal_sector *access_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset)
500 return access_page_list(ic, ic->journal, section, offset, NULL);
503 static struct journal_entry *access_journal_entry(struct dm_integrity_c *ic, unsigned section, unsigned n)
505 unsigned rel_sector, offset;
506 struct journal_sector *js;
508 access_journal_check(ic, section, n, true, "access_journal_entry");
510 rel_sector = n % JOURNAL_BLOCK_SECTORS;
511 offset = n / JOURNAL_BLOCK_SECTORS;
513 js = access_journal(ic, section, rel_sector);
514 return (struct journal_entry *)((char *)js + offset * ic->journal_entry_size);
517 static struct journal_sector *access_journal_data(struct dm_integrity_c *ic, unsigned section, unsigned n)
519 n <<= ic->sb->log2_sectors_per_block;
521 n += JOURNAL_BLOCK_SECTORS;
523 access_journal_check(ic, section, n, false, "access_journal_data");
525 return access_journal(ic, section, n);
528 static void section_mac(struct dm_integrity_c *ic, unsigned section, __u8 result[JOURNAL_MAC_SIZE])
530 SHASH_DESC_ON_STACK(desc, ic->journal_mac);
534 desc->tfm = ic->journal_mac;
537 r = crypto_shash_init(desc);
539 dm_integrity_io_error(ic, "crypto_shash_init", r);
543 for (j = 0; j < ic->journal_section_entries; j++) {
544 struct journal_entry *je = access_journal_entry(ic, section, j);
545 r = crypto_shash_update(desc, (__u8 *)&je->u.sector, sizeof je->u.sector);
547 dm_integrity_io_error(ic, "crypto_shash_update", r);
552 size = crypto_shash_digestsize(ic->journal_mac);
554 if (likely(size <= JOURNAL_MAC_SIZE)) {
555 r = crypto_shash_final(desc, result);
557 dm_integrity_io_error(ic, "crypto_shash_final", r);
560 memset(result + size, 0, JOURNAL_MAC_SIZE - size);
562 __u8 digest[HASH_MAX_DIGESTSIZE];
564 if (WARN_ON(size > sizeof(digest))) {
565 dm_integrity_io_error(ic, "digest_size", -EINVAL);
568 r = crypto_shash_final(desc, digest);
570 dm_integrity_io_error(ic, "crypto_shash_final", r);
573 memcpy(result, digest, JOURNAL_MAC_SIZE);
578 memset(result, 0, JOURNAL_MAC_SIZE);
581 static void rw_section_mac(struct dm_integrity_c *ic, unsigned section, bool wr)
583 __u8 result[JOURNAL_MAC_SIZE];
586 if (!ic->journal_mac)
589 section_mac(ic, section, result);
591 for (j = 0; j < JOURNAL_BLOCK_SECTORS; j++) {
592 struct journal_sector *js = access_journal(ic, section, j);
595 memcpy(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR);
597 if (memcmp(&js->mac, result + (j * JOURNAL_MAC_PER_SECTOR), JOURNAL_MAC_PER_SECTOR))
598 dm_integrity_io_error(ic, "journal mac", -EILSEQ);
603 static void complete_journal_op(void *context)
605 struct journal_completion *comp = context;
606 BUG_ON(!atomic_read(&comp->in_flight));
607 if (likely(atomic_dec_and_test(&comp->in_flight)))
608 complete(&comp->comp);
611 static void xor_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
612 unsigned n_sections, struct journal_completion *comp)
614 struct async_submit_ctl submit;
615 size_t n_bytes = (size_t)(n_sections * ic->journal_section_sectors) << SECTOR_SHIFT;
616 unsigned pl_index, pl_offset, section_index;
617 struct page_list *source_pl, *target_pl;
619 if (likely(encrypt)) {
620 source_pl = ic->journal;
621 target_pl = ic->journal_io;
623 source_pl = ic->journal_io;
624 target_pl = ic->journal;
627 page_list_location(ic, section, 0, &pl_index, &pl_offset);
629 atomic_add(roundup(pl_offset + n_bytes, PAGE_SIZE) >> PAGE_SHIFT, &comp->in_flight);
631 init_async_submit(&submit, ASYNC_TX_XOR_ZERO_DST, NULL, complete_journal_op, comp, NULL);
633 section_index = pl_index;
637 struct page *src_pages[2];
638 struct page *dst_page;
640 while (unlikely(pl_index == section_index)) {
643 rw_section_mac(ic, section, true);
648 page_list_location(ic, section, 0, §ion_index, &dummy);
651 this_step = min(n_bytes, (size_t)PAGE_SIZE - pl_offset);
652 dst_page = target_pl[pl_index].page;
653 src_pages[0] = source_pl[pl_index].page;
654 src_pages[1] = ic->journal_xor[pl_index].page;
656 async_xor(dst_page, src_pages, pl_offset, 2, this_step, &submit);
660 n_bytes -= this_step;
665 async_tx_issue_pending_all();
668 static void complete_journal_encrypt(struct crypto_async_request *req, int err)
670 struct journal_completion *comp = req->data;
672 if (likely(err == -EINPROGRESS)) {
673 complete(&comp->ic->crypto_backoff);
676 dm_integrity_io_error(comp->ic, "asynchronous encrypt", err);
678 complete_journal_op(comp);
681 static bool do_crypt(bool encrypt, struct skcipher_request *req, struct journal_completion *comp)
684 skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
685 complete_journal_encrypt, comp);
687 r = crypto_skcipher_encrypt(req);
689 r = crypto_skcipher_decrypt(req);
692 if (likely(r == -EINPROGRESS))
694 if (likely(r == -EBUSY)) {
695 wait_for_completion(&comp->ic->crypto_backoff);
696 reinit_completion(&comp->ic->crypto_backoff);
699 dm_integrity_io_error(comp->ic, "encrypt", r);
703 static void crypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
704 unsigned n_sections, struct journal_completion *comp)
706 struct scatterlist **source_sg;
707 struct scatterlist **target_sg;
709 atomic_add(2, &comp->in_flight);
711 if (likely(encrypt)) {
712 source_sg = ic->journal_scatterlist;
713 target_sg = ic->journal_io_scatterlist;
715 source_sg = ic->journal_io_scatterlist;
716 target_sg = ic->journal_scatterlist;
720 struct skcipher_request *req;
725 rw_section_mac(ic, section, true);
727 req = ic->sk_requests[section];
728 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
731 memcpy(iv, iv + ivsize, ivsize);
733 req->src = source_sg[section];
734 req->dst = target_sg[section];
736 if (unlikely(do_crypt(encrypt, req, comp)))
737 atomic_inc(&comp->in_flight);
741 } while (n_sections);
743 atomic_dec(&comp->in_flight);
744 complete_journal_op(comp);
747 static void encrypt_journal(struct dm_integrity_c *ic, bool encrypt, unsigned section,
748 unsigned n_sections, struct journal_completion *comp)
751 return xor_journal(ic, encrypt, section, n_sections, comp);
753 return crypt_journal(ic, encrypt, section, n_sections, comp);
756 static void complete_journal_io(unsigned long error, void *context)
758 struct journal_completion *comp = context;
759 if (unlikely(error != 0))
760 dm_integrity_io_error(comp->ic, "writing journal", -EIO);
761 complete_journal_op(comp);
764 static void rw_journal_sectors(struct dm_integrity_c *ic, int op, int op_flags,
765 unsigned sector, unsigned n_sectors, struct journal_completion *comp)
767 struct dm_io_request io_req;
768 struct dm_io_region io_loc;
769 unsigned pl_index, pl_offset;
772 if (unlikely(dm_integrity_failed(ic))) {
774 complete_journal_io(-1UL, comp);
778 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
779 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
782 io_req.bi_op_flags = op_flags;
783 io_req.mem.type = DM_IO_PAGE_LIST;
785 io_req.mem.ptr.pl = &ic->journal_io[pl_index];
787 io_req.mem.ptr.pl = &ic->journal[pl_index];
788 io_req.mem.offset = pl_offset;
789 if (likely(comp != NULL)) {
790 io_req.notify.fn = complete_journal_io;
791 io_req.notify.context = comp;
793 io_req.notify.fn = NULL;
795 io_req.client = ic->io;
796 io_loc.bdev = ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev;
797 io_loc.sector = ic->start + SB_SECTORS + sector;
798 io_loc.count = n_sectors;
800 r = dm_io(&io_req, 1, &io_loc, NULL);
802 dm_integrity_io_error(ic, op == REQ_OP_READ ? "reading journal" : "writing journal", r);
804 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
805 complete_journal_io(-1UL, comp);
810 static void rw_journal(struct dm_integrity_c *ic, int op, int op_flags, unsigned section,
811 unsigned n_sections, struct journal_completion *comp)
813 unsigned sector, n_sectors;
815 sector = section * ic->journal_section_sectors;
816 n_sectors = n_sections * ic->journal_section_sectors;
818 rw_journal_sectors(ic, op, op_flags, sector, n_sectors, comp);
821 static void write_journal(struct dm_integrity_c *ic, unsigned commit_start, unsigned commit_sections)
823 struct journal_completion io_comp;
824 struct journal_completion crypt_comp_1;
825 struct journal_completion crypt_comp_2;
829 init_completion(&io_comp.comp);
831 if (commit_start + commit_sections <= ic->journal_sections) {
832 io_comp.in_flight = (atomic_t)ATOMIC_INIT(1);
833 if (ic->journal_io) {
834 crypt_comp_1.ic = ic;
835 init_completion(&crypt_comp_1.comp);
836 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
837 encrypt_journal(ic, true, commit_start, commit_sections, &crypt_comp_1);
838 wait_for_completion_io(&crypt_comp_1.comp);
840 for (i = 0; i < commit_sections; i++)
841 rw_section_mac(ic, commit_start + i, true);
843 rw_journal(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, commit_start,
844 commit_sections, &io_comp);
847 io_comp.in_flight = (atomic_t)ATOMIC_INIT(2);
848 to_end = ic->journal_sections - commit_start;
849 if (ic->journal_io) {
850 crypt_comp_1.ic = ic;
851 init_completion(&crypt_comp_1.comp);
852 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
853 encrypt_journal(ic, true, commit_start, to_end, &crypt_comp_1);
854 if (try_wait_for_completion(&crypt_comp_1.comp)) {
855 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
856 reinit_completion(&crypt_comp_1.comp);
857 crypt_comp_1.in_flight = (atomic_t)ATOMIC_INIT(0);
858 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_1);
859 wait_for_completion_io(&crypt_comp_1.comp);
861 crypt_comp_2.ic = ic;
862 init_completion(&crypt_comp_2.comp);
863 crypt_comp_2.in_flight = (atomic_t)ATOMIC_INIT(0);
864 encrypt_journal(ic, true, 0, commit_sections - to_end, &crypt_comp_2);
865 wait_for_completion_io(&crypt_comp_1.comp);
866 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
867 wait_for_completion_io(&crypt_comp_2.comp);
870 for (i = 0; i < to_end; i++)
871 rw_section_mac(ic, commit_start + i, true);
872 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, commit_start, to_end, &io_comp);
873 for (i = 0; i < commit_sections - to_end; i++)
874 rw_section_mac(ic, i, true);
876 rw_journal(ic, REQ_OP_WRITE, REQ_FUA, 0, commit_sections - to_end, &io_comp);
879 wait_for_completion_io(&io_comp.comp);
882 static void copy_from_journal(struct dm_integrity_c *ic, unsigned section, unsigned offset,
883 unsigned n_sectors, sector_t target, io_notify_fn fn, void *data)
885 struct dm_io_request io_req;
886 struct dm_io_region io_loc;
888 unsigned sector, pl_index, pl_offset;
890 BUG_ON((target | n_sectors | offset) & (unsigned)(ic->sectors_per_block - 1));
892 if (unlikely(dm_integrity_failed(ic))) {
897 sector = section * ic->journal_section_sectors + JOURNAL_BLOCK_SECTORS + offset;
899 pl_index = sector >> (PAGE_SHIFT - SECTOR_SHIFT);
900 pl_offset = (sector << SECTOR_SHIFT) & (PAGE_SIZE - 1);
902 io_req.bi_op = REQ_OP_WRITE;
903 io_req.bi_op_flags = 0;
904 io_req.mem.type = DM_IO_PAGE_LIST;
905 io_req.mem.ptr.pl = &ic->journal[pl_index];
906 io_req.mem.offset = pl_offset;
907 io_req.notify.fn = fn;
908 io_req.notify.context = data;
909 io_req.client = ic->io;
910 io_loc.bdev = ic->dev->bdev;
911 io_loc.sector = target;
912 io_loc.count = n_sectors;
914 r = dm_io(&io_req, 1, &io_loc, NULL);
916 WARN_ONCE(1, "asynchronous dm_io failed: %d", r);
921 static bool ranges_overlap(struct dm_integrity_range *range1, struct dm_integrity_range *range2)
923 return range1->logical_sector < range2->logical_sector + range2->n_sectors &&
924 range1->logical_sector + range1->n_sectors > range2->logical_sector;
927 static bool add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range, bool check_waiting)
929 struct rb_node **n = &ic->in_progress.rb_node;
930 struct rb_node *parent;
932 BUG_ON((new_range->logical_sector | new_range->n_sectors) & (unsigned)(ic->sectors_per_block - 1));
934 if (likely(check_waiting)) {
935 struct dm_integrity_range *range;
936 list_for_each_entry(range, &ic->wait_list, wait_entry) {
937 if (unlikely(ranges_overlap(range, new_range)))
945 struct dm_integrity_range *range = container_of(*n, struct dm_integrity_range, node);
948 if (new_range->logical_sector + new_range->n_sectors <= range->logical_sector) {
949 n = &range->node.rb_left;
950 } else if (new_range->logical_sector >= range->logical_sector + range->n_sectors) {
951 n = &range->node.rb_right;
957 rb_link_node(&new_range->node, parent, n);
958 rb_insert_color(&new_range->node, &ic->in_progress);
963 static void remove_range_unlocked(struct dm_integrity_c *ic, struct dm_integrity_range *range)
965 rb_erase(&range->node, &ic->in_progress);
966 while (unlikely(!list_empty(&ic->wait_list))) {
967 struct dm_integrity_range *last_range =
968 list_first_entry(&ic->wait_list, struct dm_integrity_range, wait_entry);
969 struct task_struct *last_range_task;
970 last_range_task = last_range->task;
971 list_del(&last_range->wait_entry);
972 if (!add_new_range(ic, last_range, false)) {
973 last_range->task = last_range_task;
974 list_add(&last_range->wait_entry, &ic->wait_list);
977 last_range->waiting = false;
978 wake_up_process(last_range_task);
982 static void remove_range(struct dm_integrity_c *ic, struct dm_integrity_range *range)
986 spin_lock_irqsave(&ic->endio_wait.lock, flags);
987 remove_range_unlocked(ic, range);
988 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
991 static void wait_and_add_new_range(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
993 new_range->waiting = true;
994 list_add_tail(&new_range->wait_entry, &ic->wait_list);
995 new_range->task = current;
997 __set_current_state(TASK_UNINTERRUPTIBLE);
998 spin_unlock_irq(&ic->endio_wait.lock);
1000 spin_lock_irq(&ic->endio_wait.lock);
1001 } while (unlikely(new_range->waiting));
1004 static void add_new_range_and_wait(struct dm_integrity_c *ic, struct dm_integrity_range *new_range)
1006 if (unlikely(!add_new_range(ic, new_range, true)))
1007 wait_and_add_new_range(ic, new_range);
1010 static void init_journal_node(struct journal_node *node)
1012 RB_CLEAR_NODE(&node->node);
1013 node->sector = (sector_t)-1;
1016 static void add_journal_node(struct dm_integrity_c *ic, struct journal_node *node, sector_t sector)
1018 struct rb_node **link;
1019 struct rb_node *parent;
1021 node->sector = sector;
1022 BUG_ON(!RB_EMPTY_NODE(&node->node));
1024 link = &ic->journal_tree_root.rb_node;
1028 struct journal_node *j;
1030 j = container_of(parent, struct journal_node, node);
1031 if (sector < j->sector)
1032 link = &j->node.rb_left;
1034 link = &j->node.rb_right;
1037 rb_link_node(&node->node, parent, link);
1038 rb_insert_color(&node->node, &ic->journal_tree_root);
1041 static void remove_journal_node(struct dm_integrity_c *ic, struct journal_node *node)
1043 BUG_ON(RB_EMPTY_NODE(&node->node));
1044 rb_erase(&node->node, &ic->journal_tree_root);
1045 init_journal_node(node);
1048 #define NOT_FOUND (-1U)
1050 static unsigned find_journal_node(struct dm_integrity_c *ic, sector_t sector, sector_t *next_sector)
1052 struct rb_node *n = ic->journal_tree_root.rb_node;
1053 unsigned found = NOT_FOUND;
1054 *next_sector = (sector_t)-1;
1056 struct journal_node *j = container_of(n, struct journal_node, node);
1057 if (sector == j->sector) {
1058 found = j - ic->journal_tree;
1060 if (sector < j->sector) {
1061 *next_sector = j->sector;
1062 n = j->node.rb_left;
1064 n = j->node.rb_right;
1071 static bool test_journal_node(struct dm_integrity_c *ic, unsigned pos, sector_t sector)
1073 struct journal_node *node, *next_node;
1074 struct rb_node *next;
1076 if (unlikely(pos >= ic->journal_entries))
1078 node = &ic->journal_tree[pos];
1079 if (unlikely(RB_EMPTY_NODE(&node->node)))
1081 if (unlikely(node->sector != sector))
1084 next = rb_next(&node->node);
1085 if (unlikely(!next))
1088 next_node = container_of(next, struct journal_node, node);
1089 return next_node->sector != sector;
1092 static bool find_newer_committed_node(struct dm_integrity_c *ic, struct journal_node *node)
1094 struct rb_node *next;
1095 struct journal_node *next_node;
1096 unsigned next_section;
1098 BUG_ON(RB_EMPTY_NODE(&node->node));
1100 next = rb_next(&node->node);
1101 if (unlikely(!next))
1104 next_node = container_of(next, struct journal_node, node);
1106 if (next_node->sector != node->sector)
1109 next_section = (unsigned)(next_node - ic->journal_tree) / ic->journal_section_entries;
1110 if (next_section >= ic->committed_section &&
1111 next_section < ic->committed_section + ic->n_committed_sections)
1113 if (next_section + ic->journal_sections < ic->committed_section + ic->n_committed_sections)
1123 static int dm_integrity_rw_tag(struct dm_integrity_c *ic, unsigned char *tag, sector_t *metadata_block,
1124 unsigned *metadata_offset, unsigned total_size, int op)
1127 unsigned char *data, *dp;
1128 struct dm_buffer *b;
1132 r = dm_integrity_failed(ic);
1136 data = dm_bufio_read(ic->bufio, *metadata_block, &b);
1138 return PTR_ERR(data);
1140 to_copy = min((1U << SECTOR_SHIFT << ic->log2_buffer_sectors) - *metadata_offset, total_size);
1141 dp = data + *metadata_offset;
1142 if (op == TAG_READ) {
1143 memcpy(tag, dp, to_copy);
1144 } else if (op == TAG_WRITE) {
1145 memcpy(dp, tag, to_copy);
1146 dm_bufio_mark_partial_buffer_dirty(b, *metadata_offset, *metadata_offset + to_copy);
1148 /* e.g.: op == TAG_CMP */
1149 if (unlikely(memcmp(dp, tag, to_copy))) {
1152 for (i = 0; i < to_copy; i++) {
1153 if (dp[i] != tag[i])
1157 dm_bufio_release(b);
1161 dm_bufio_release(b);
1164 *metadata_offset += to_copy;
1165 if (unlikely(*metadata_offset == 1U << SECTOR_SHIFT << ic->log2_buffer_sectors)) {
1166 (*metadata_block)++;
1167 *metadata_offset = 0;
1169 total_size -= to_copy;
1170 } while (unlikely(total_size));
1175 static void dm_integrity_flush_buffers(struct dm_integrity_c *ic)
1178 r = dm_bufio_write_dirty_buffers(ic->bufio);
1180 dm_integrity_io_error(ic, "writing tags", r);
1183 static void sleep_on_endio_wait(struct dm_integrity_c *ic)
1185 DECLARE_WAITQUEUE(wait, current);
1186 __add_wait_queue(&ic->endio_wait, &wait);
1187 __set_current_state(TASK_UNINTERRUPTIBLE);
1188 spin_unlock_irq(&ic->endio_wait.lock);
1190 spin_lock_irq(&ic->endio_wait.lock);
1191 __remove_wait_queue(&ic->endio_wait, &wait);
1194 static void autocommit_fn(struct timer_list *t)
1196 struct dm_integrity_c *ic = from_timer(ic, t, autocommit_timer);
1198 if (likely(!dm_integrity_failed(ic)))
1199 queue_work(ic->commit_wq, &ic->commit_work);
1202 static void schedule_autocommit(struct dm_integrity_c *ic)
1204 if (!timer_pending(&ic->autocommit_timer))
1205 mod_timer(&ic->autocommit_timer, jiffies + ic->autocommit_jiffies);
1208 static void submit_flush_bio(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1211 unsigned long flags;
1213 spin_lock_irqsave(&ic->endio_wait.lock, flags);
1214 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1215 bio_list_add(&ic->flush_bio_list, bio);
1216 spin_unlock_irqrestore(&ic->endio_wait.lock, flags);
1218 queue_work(ic->commit_wq, &ic->commit_work);
1221 static void do_endio(struct dm_integrity_c *ic, struct bio *bio)
1223 int r = dm_integrity_failed(ic);
1224 if (unlikely(r) && !bio->bi_status)
1225 bio->bi_status = errno_to_blk_status(r);
1229 static void do_endio_flush(struct dm_integrity_c *ic, struct dm_integrity_io *dio)
1231 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1233 if (unlikely(dio->fua) && likely(!bio->bi_status) && likely(!dm_integrity_failed(ic)))
1234 submit_flush_bio(ic, dio);
1239 static void dec_in_flight(struct dm_integrity_io *dio)
1241 if (atomic_dec_and_test(&dio->in_flight)) {
1242 struct dm_integrity_c *ic = dio->ic;
1245 remove_range(ic, &dio->range);
1247 if (unlikely(dio->write))
1248 schedule_autocommit(ic);
1250 bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1252 if (unlikely(dio->bi_status) && !bio->bi_status)
1253 bio->bi_status = dio->bi_status;
1254 if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) {
1255 dio->range.logical_sector += dio->range.n_sectors;
1256 bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
1257 INIT_WORK(&dio->work, integrity_bio_wait);
1258 queue_work(ic->wait_wq, &dio->work);
1261 do_endio_flush(ic, dio);
1265 static void integrity_end_io(struct bio *bio)
1267 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1269 bio->bi_iter = dio->orig_bi_iter;
1270 bio->bi_disk = dio->orig_bi_disk;
1271 bio->bi_partno = dio->orig_bi_partno;
1272 if (dio->orig_bi_integrity) {
1273 bio->bi_integrity = dio->orig_bi_integrity;
1274 bio->bi_opf |= REQ_INTEGRITY;
1276 bio->bi_end_io = dio->orig_bi_end_io;
1278 if (dio->completion)
1279 complete(dio->completion);
1284 static void integrity_sector_checksum(struct dm_integrity_c *ic, sector_t sector,
1285 const char *data, char *result)
1287 __u64 sector_le = cpu_to_le64(sector);
1288 SHASH_DESC_ON_STACK(req, ic->internal_hash);
1290 unsigned digest_size;
1292 req->tfm = ic->internal_hash;
1295 r = crypto_shash_init(req);
1296 if (unlikely(r < 0)) {
1297 dm_integrity_io_error(ic, "crypto_shash_init", r);
1301 r = crypto_shash_update(req, (const __u8 *)§or_le, sizeof sector_le);
1302 if (unlikely(r < 0)) {
1303 dm_integrity_io_error(ic, "crypto_shash_update", r);
1307 r = crypto_shash_update(req, data, ic->sectors_per_block << SECTOR_SHIFT);
1308 if (unlikely(r < 0)) {
1309 dm_integrity_io_error(ic, "crypto_shash_update", r);
1313 r = crypto_shash_final(req, result);
1314 if (unlikely(r < 0)) {
1315 dm_integrity_io_error(ic, "crypto_shash_final", r);
1319 digest_size = crypto_shash_digestsize(ic->internal_hash);
1320 if (unlikely(digest_size < ic->tag_size))
1321 memset(result + digest_size, 0, ic->tag_size - digest_size);
1326 /* this shouldn't happen anyway, the hash functions have no reason to fail */
1327 get_random_bytes(result, ic->tag_size);
1330 static void integrity_metadata(struct work_struct *w)
1332 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1333 struct dm_integrity_c *ic = dio->ic;
1337 if (ic->internal_hash) {
1338 struct bvec_iter iter;
1340 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1341 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1343 unsigned extra_space = unlikely(digest_size > ic->tag_size) ? digest_size - ic->tag_size : 0;
1344 char checksums_onstack[HASH_MAX_DIGESTSIZE];
1345 unsigned sectors_to_process = dio->range.n_sectors;
1346 sector_t sector = dio->range.logical_sector;
1348 if (unlikely(ic->mode == 'R'))
1351 checksums = kmalloc((PAGE_SIZE >> SECTOR_SHIFT >> ic->sb->log2_sectors_per_block) * ic->tag_size + extra_space,
1352 GFP_NOIO | __GFP_NORETRY | __GFP_NOWARN);
1354 checksums = checksums_onstack;
1355 if (WARN_ON(extra_space &&
1356 digest_size > sizeof(checksums_onstack))) {
1362 __bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) {
1364 char *mem, *checksums_ptr;
1367 mem = (char *)kmap_atomic(bv.bv_page) + bv.bv_offset;
1369 checksums_ptr = checksums;
1371 integrity_sector_checksum(ic, sector, mem + pos, checksums_ptr);
1372 checksums_ptr += ic->tag_size;
1373 sectors_to_process -= ic->sectors_per_block;
1374 pos += ic->sectors_per_block << SECTOR_SHIFT;
1375 sector += ic->sectors_per_block;
1376 } while (pos < bv.bv_len && sectors_to_process && checksums != checksums_onstack);
1379 r = dm_integrity_rw_tag(ic, checksums, &dio->metadata_block, &dio->metadata_offset,
1380 checksums_ptr - checksums, !dio->write ? TAG_CMP : TAG_WRITE);
1383 DMERR_LIMIT("Checksum failed at sector 0x%llx",
1384 (unsigned long long)(sector - ((r + ic->tag_size - 1) / ic->tag_size)));
1386 atomic64_inc(&ic->number_of_mismatches);
1388 if (likely(checksums != checksums_onstack))
1393 if (!sectors_to_process)
1396 if (unlikely(pos < bv.bv_len)) {
1397 bv.bv_offset += pos;
1403 if (likely(checksums != checksums_onstack))
1406 struct bio_integrity_payload *bip = dio->orig_bi_integrity;
1410 struct bvec_iter iter;
1411 unsigned data_to_process = dio->range.n_sectors;
1412 sector_to_block(ic, data_to_process);
1413 data_to_process *= ic->tag_size;
1415 bip_for_each_vec(biv, bip, iter) {
1419 BUG_ON(PageHighMem(biv.bv_page));
1420 tag = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1421 this_len = min(biv.bv_len, data_to_process);
1422 r = dm_integrity_rw_tag(ic, tag, &dio->metadata_block, &dio->metadata_offset,
1423 this_len, !dio->write ? TAG_READ : TAG_WRITE);
1426 data_to_process -= this_len;
1427 if (!data_to_process)
1436 dio->bi_status = errno_to_blk_status(r);
1440 static int dm_integrity_map(struct dm_target *ti, struct bio *bio)
1442 struct dm_integrity_c *ic = ti->private;
1443 struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
1444 struct bio_integrity_payload *bip;
1446 sector_t area, offset;
1451 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1452 submit_flush_bio(ic, dio);
1453 return DM_MAPIO_SUBMITTED;
1456 dio->range.logical_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1457 dio->write = bio_op(bio) == REQ_OP_WRITE;
1458 dio->fua = dio->write && bio->bi_opf & REQ_FUA;
1459 if (unlikely(dio->fua)) {
1461 * Don't pass down the FUA flag because we have to flush
1462 * disk cache anyway.
1464 bio->bi_opf &= ~REQ_FUA;
1466 if (unlikely(dio->range.logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) {
1467 DMERR("Too big sector number: 0x%llx + 0x%x > 0x%llx",
1468 (unsigned long long)dio->range.logical_sector, bio_sectors(bio),
1469 (unsigned long long)ic->provided_data_sectors);
1470 return DM_MAPIO_KILL;
1472 if (unlikely((dio->range.logical_sector | bio_sectors(bio)) & (unsigned)(ic->sectors_per_block - 1))) {
1473 DMERR("Bio not aligned on %u sectors: 0x%llx, 0x%x",
1474 ic->sectors_per_block,
1475 (unsigned long long)dio->range.logical_sector, bio_sectors(bio));
1476 return DM_MAPIO_KILL;
1479 if (ic->sectors_per_block > 1) {
1480 struct bvec_iter iter;
1482 bio_for_each_segment(bv, bio, iter) {
1483 if (unlikely(bv.bv_len & ((ic->sectors_per_block << SECTOR_SHIFT) - 1))) {
1484 DMERR("Bio vector (%u,%u) is not aligned on %u-sector boundary",
1485 bv.bv_offset, bv.bv_len, ic->sectors_per_block);
1486 return DM_MAPIO_KILL;
1491 bip = bio_integrity(bio);
1492 if (!ic->internal_hash) {
1494 unsigned wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block;
1495 if (ic->log2_tag_size >= 0)
1496 wanted_tag_size <<= ic->log2_tag_size;
1498 wanted_tag_size *= ic->tag_size;
1499 if (unlikely(wanted_tag_size != bip->bip_iter.bi_size)) {
1500 DMERR("Invalid integrity data size %u, expected %u", bip->bip_iter.bi_size, wanted_tag_size);
1501 return DM_MAPIO_KILL;
1505 if (unlikely(bip != NULL)) {
1506 DMERR("Unexpected integrity data when using internal hash");
1507 return DM_MAPIO_KILL;
1511 if (unlikely(ic->mode == 'R') && unlikely(dio->write))
1512 return DM_MAPIO_KILL;
1514 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1515 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1516 bio->bi_iter.bi_sector = get_data_sector(ic, area, offset);
1518 dm_integrity_map_continue(dio, true);
1519 return DM_MAPIO_SUBMITTED;
1522 static bool __journal_read_write(struct dm_integrity_io *dio, struct bio *bio,
1523 unsigned journal_section, unsigned journal_entry)
1525 struct dm_integrity_c *ic = dio->ic;
1526 sector_t logical_sector;
1529 logical_sector = dio->range.logical_sector;
1530 n_sectors = dio->range.n_sectors;
1532 struct bio_vec bv = bio_iovec(bio);
1535 if (unlikely(bv.bv_len >> SECTOR_SHIFT > n_sectors))
1536 bv.bv_len = n_sectors << SECTOR_SHIFT;
1537 n_sectors -= bv.bv_len >> SECTOR_SHIFT;
1538 bio_advance_iter(bio, &bio->bi_iter, bv.bv_len);
1540 mem = kmap_atomic(bv.bv_page);
1541 if (likely(dio->write))
1542 flush_dcache_page(bv.bv_page);
1545 struct journal_entry *je = access_journal_entry(ic, journal_section, journal_entry);
1547 if (unlikely(!dio->write)) {
1548 struct journal_sector *js;
1552 if (unlikely(journal_entry_is_inprogress(je))) {
1553 flush_dcache_page(bv.bv_page);
1556 __io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1560 BUG_ON(journal_entry_get_sector(je) != logical_sector);
1561 js = access_journal_data(ic, journal_section, journal_entry);
1562 mem_ptr = mem + bv.bv_offset;
1565 memcpy(mem_ptr, js, JOURNAL_SECTOR_DATA);
1566 *(commit_id_t *)(mem_ptr + JOURNAL_SECTOR_DATA) = je->last_bytes[s];
1568 mem_ptr += 1 << SECTOR_SHIFT;
1569 } while (++s < ic->sectors_per_block);
1570 #ifdef INTERNAL_VERIFY
1571 if (ic->internal_hash) {
1572 char checksums_onstack[max(HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
1574 integrity_sector_checksum(ic, logical_sector, mem + bv.bv_offset, checksums_onstack);
1575 if (unlikely(memcmp(checksums_onstack, journal_entry_tag(ic, je), ic->tag_size))) {
1576 DMERR_LIMIT("Checksum failed when reading from journal, at sector 0x%llx",
1577 (unsigned long long)logical_sector);
1583 if (!ic->internal_hash) {
1584 struct bio_integrity_payload *bip = bio_integrity(bio);
1585 unsigned tag_todo = ic->tag_size;
1586 char *tag_ptr = journal_entry_tag(ic, je);
1589 struct bio_vec biv = bvec_iter_bvec(bip->bip_vec, bip->bip_iter);
1590 unsigned tag_now = min(biv.bv_len, tag_todo);
1592 BUG_ON(PageHighMem(biv.bv_page));
1593 tag_addr = lowmem_page_address(biv.bv_page) + biv.bv_offset;
1594 if (likely(dio->write))
1595 memcpy(tag_ptr, tag_addr, tag_now);
1597 memcpy(tag_addr, tag_ptr, tag_now);
1598 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, tag_now);
1600 tag_todo -= tag_now;
1601 } while (unlikely(tag_todo)); else {
1602 if (likely(dio->write))
1603 memset(tag_ptr, 0, tag_todo);
1607 if (likely(dio->write)) {
1608 struct journal_sector *js;
1611 js = access_journal_data(ic, journal_section, journal_entry);
1612 memcpy(js, mem + bv.bv_offset, ic->sectors_per_block << SECTOR_SHIFT);
1616 je->last_bytes[s] = js[s].commit_id;
1617 } while (++s < ic->sectors_per_block);
1619 if (ic->internal_hash) {
1620 unsigned digest_size = crypto_shash_digestsize(ic->internal_hash);
1621 if (unlikely(digest_size > ic->tag_size)) {
1622 char checksums_onstack[HASH_MAX_DIGESTSIZE];
1623 integrity_sector_checksum(ic, logical_sector, (char *)js, checksums_onstack);
1624 memcpy(journal_entry_tag(ic, je), checksums_onstack, ic->tag_size);
1626 integrity_sector_checksum(ic, logical_sector, (char *)js, journal_entry_tag(ic, je));
1629 journal_entry_set_sector(je, logical_sector);
1631 logical_sector += ic->sectors_per_block;
1634 if (unlikely(journal_entry == ic->journal_section_entries)) {
1637 wraparound_section(ic, &journal_section);
1640 bv.bv_offset += ic->sectors_per_block << SECTOR_SHIFT;
1641 } while (bv.bv_len -= ic->sectors_per_block << SECTOR_SHIFT);
1643 if (unlikely(!dio->write))
1644 flush_dcache_page(bv.bv_page);
1646 } while (n_sectors);
1648 if (likely(dio->write)) {
1650 if (unlikely(waitqueue_active(&ic->copy_to_journal_wait)))
1651 wake_up(&ic->copy_to_journal_wait);
1652 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold) {
1653 queue_work(ic->commit_wq, &ic->commit_work);
1655 schedule_autocommit(ic);
1658 remove_range(ic, &dio->range);
1661 if (unlikely(bio->bi_iter.bi_size)) {
1662 sector_t area, offset;
1664 dio->range.logical_sector = logical_sector;
1665 get_area_and_offset(ic, dio->range.logical_sector, &area, &offset);
1666 dio->metadata_block = get_metadata_sector_and_offset(ic, area, offset, &dio->metadata_offset);
1673 static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map)
1675 struct dm_integrity_c *ic = dio->ic;
1676 struct bio *bio = dm_bio_from_per_bio_data(dio, sizeof(struct dm_integrity_io));
1677 unsigned journal_section, journal_entry;
1678 unsigned journal_read_pos;
1679 struct completion read_comp;
1680 bool need_sync_io = ic->internal_hash && !dio->write;
1682 if (need_sync_io && from_map) {
1683 INIT_WORK(&dio->work, integrity_bio_wait);
1684 queue_work(ic->metadata_wq, &dio->work);
1689 spin_lock_irq(&ic->endio_wait.lock);
1691 if (unlikely(dm_integrity_failed(ic))) {
1692 spin_unlock_irq(&ic->endio_wait.lock);
1696 dio->range.n_sectors = bio_sectors(bio);
1697 journal_read_pos = NOT_FOUND;
1698 if (likely(ic->mode == 'J')) {
1700 unsigned next_entry, i, pos;
1701 unsigned ws, we, range_sectors;
1703 dio->range.n_sectors = min(dio->range.n_sectors,
1704 (sector_t)ic->free_sectors << ic->sb->log2_sectors_per_block);
1705 if (unlikely(!dio->range.n_sectors)) {
1707 goto offload_to_thread;
1708 sleep_on_endio_wait(ic);
1711 range_sectors = dio->range.n_sectors >> ic->sb->log2_sectors_per_block;
1712 ic->free_sectors -= range_sectors;
1713 journal_section = ic->free_section;
1714 journal_entry = ic->free_section_entry;
1716 next_entry = ic->free_section_entry + range_sectors;
1717 ic->free_section_entry = next_entry % ic->journal_section_entries;
1718 ic->free_section += next_entry / ic->journal_section_entries;
1719 ic->n_uncommitted_sections += next_entry / ic->journal_section_entries;
1720 wraparound_section(ic, &ic->free_section);
1722 pos = journal_section * ic->journal_section_entries + journal_entry;
1723 ws = journal_section;
1727 struct journal_entry *je;
1729 add_journal_node(ic, &ic->journal_tree[pos], dio->range.logical_sector + i);
1731 if (unlikely(pos >= ic->journal_entries))
1734 je = access_journal_entry(ic, ws, we);
1735 BUG_ON(!journal_entry_is_unused(je));
1736 journal_entry_set_inprogress(je);
1738 if (unlikely(we == ic->journal_section_entries)) {
1741 wraparound_section(ic, &ws);
1743 } while ((i += ic->sectors_per_block) < dio->range.n_sectors);
1745 spin_unlock_irq(&ic->endio_wait.lock);
1746 goto journal_read_write;
1748 sector_t next_sector;
1749 journal_read_pos = find_journal_node(ic, dio->range.logical_sector, &next_sector);
1750 if (likely(journal_read_pos == NOT_FOUND)) {
1751 if (unlikely(dio->range.n_sectors > next_sector - dio->range.logical_sector))
1752 dio->range.n_sectors = next_sector - dio->range.logical_sector;
1755 unsigned jp = journal_read_pos + 1;
1756 for (i = ic->sectors_per_block; i < dio->range.n_sectors; i += ic->sectors_per_block, jp++) {
1757 if (!test_journal_node(ic, jp, dio->range.logical_sector + i))
1760 dio->range.n_sectors = i;
1764 if (unlikely(!add_new_range(ic, &dio->range, true))) {
1766 * We must not sleep in the request routine because it could
1767 * stall bios on current->bio_list.
1768 * So, we offload the bio to a workqueue if we have to sleep.
1772 spin_unlock_irq(&ic->endio_wait.lock);
1773 INIT_WORK(&dio->work, integrity_bio_wait);
1774 queue_work(ic->wait_wq, &dio->work);
1777 wait_and_add_new_range(ic, &dio->range);
1779 spin_unlock_irq(&ic->endio_wait.lock);
1781 if (unlikely(journal_read_pos != NOT_FOUND)) {
1782 journal_section = journal_read_pos / ic->journal_section_entries;
1783 journal_entry = journal_read_pos % ic->journal_section_entries;
1784 goto journal_read_write;
1787 dio->in_flight = (atomic_t)ATOMIC_INIT(2);
1790 init_completion(&read_comp);
1791 dio->completion = &read_comp;
1793 dio->completion = NULL;
1795 dio->orig_bi_iter = bio->bi_iter;
1797 dio->orig_bi_disk = bio->bi_disk;
1798 dio->orig_bi_partno = bio->bi_partno;
1799 bio_set_dev(bio, ic->dev->bdev);
1801 dio->orig_bi_integrity = bio_integrity(bio);
1802 bio->bi_integrity = NULL;
1803 bio->bi_opf &= ~REQ_INTEGRITY;
1805 dio->orig_bi_end_io = bio->bi_end_io;
1806 bio->bi_end_io = integrity_end_io;
1808 bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
1809 generic_make_request(bio);
1812 wait_for_completion_io(&read_comp);
1813 if (unlikely(ic->recalc_wq != NULL) &&
1814 ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
1815 dio->range.logical_sector + dio->range.n_sectors > le64_to_cpu(ic->sb->recalc_sector))
1817 if (likely(!bio->bi_status))
1818 integrity_metadata(&dio->work);
1824 INIT_WORK(&dio->work, integrity_metadata);
1825 queue_work(ic->metadata_wq, &dio->work);
1831 if (unlikely(__journal_read_write(dio, bio, journal_section, journal_entry)))
1834 do_endio_flush(ic, dio);
1838 static void integrity_bio_wait(struct work_struct *w)
1840 struct dm_integrity_io *dio = container_of(w, struct dm_integrity_io, work);
1842 dm_integrity_map_continue(dio, false);
1845 static void pad_uncommitted(struct dm_integrity_c *ic)
1847 if (ic->free_section_entry) {
1848 ic->free_sectors -= ic->journal_section_entries - ic->free_section_entry;
1849 ic->free_section_entry = 0;
1851 wraparound_section(ic, &ic->free_section);
1852 ic->n_uncommitted_sections++;
1854 WARN_ON(ic->journal_sections * ic->journal_section_entries !=
1855 (ic->n_uncommitted_sections + ic->n_committed_sections) * ic->journal_section_entries + ic->free_sectors);
1858 static void integrity_commit(struct work_struct *w)
1860 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, commit_work);
1861 unsigned commit_start, commit_sections;
1863 struct bio *flushes;
1865 del_timer(&ic->autocommit_timer);
1867 spin_lock_irq(&ic->endio_wait.lock);
1868 flushes = bio_list_get(&ic->flush_bio_list);
1869 if (unlikely(ic->mode != 'J')) {
1870 spin_unlock_irq(&ic->endio_wait.lock);
1871 dm_integrity_flush_buffers(ic);
1872 goto release_flush_bios;
1875 pad_uncommitted(ic);
1876 commit_start = ic->uncommitted_section;
1877 commit_sections = ic->n_uncommitted_sections;
1878 spin_unlock_irq(&ic->endio_wait.lock);
1880 if (!commit_sections)
1881 goto release_flush_bios;
1884 for (n = 0; n < commit_sections; n++) {
1885 for (j = 0; j < ic->journal_section_entries; j++) {
1886 struct journal_entry *je;
1887 je = access_journal_entry(ic, i, j);
1888 io_wait_event(ic->copy_to_journal_wait, !journal_entry_is_inprogress(je));
1890 for (j = 0; j < ic->journal_section_sectors; j++) {
1891 struct journal_sector *js;
1892 js = access_journal(ic, i, j);
1893 js->commit_id = dm_integrity_commit_id(ic, i, j, ic->commit_seq);
1896 if (unlikely(i >= ic->journal_sections))
1897 ic->commit_seq = next_commit_seq(ic->commit_seq);
1898 wraparound_section(ic, &i);
1902 write_journal(ic, commit_start, commit_sections);
1904 spin_lock_irq(&ic->endio_wait.lock);
1905 ic->uncommitted_section += commit_sections;
1906 wraparound_section(ic, &ic->uncommitted_section);
1907 ic->n_uncommitted_sections -= commit_sections;
1908 ic->n_committed_sections += commit_sections;
1909 spin_unlock_irq(&ic->endio_wait.lock);
1911 if (READ_ONCE(ic->free_sectors) <= ic->free_sectors_threshold)
1912 queue_work(ic->writer_wq, &ic->writer_work);
1916 struct bio *next = flushes->bi_next;
1917 flushes->bi_next = NULL;
1918 do_endio(ic, flushes);
1923 static void complete_copy_from_journal(unsigned long error, void *context)
1925 struct journal_io *io = context;
1926 struct journal_completion *comp = io->comp;
1927 struct dm_integrity_c *ic = comp->ic;
1928 remove_range(ic, &io->range);
1929 mempool_free(io, &ic->journal_io_mempool);
1930 if (unlikely(error != 0))
1931 dm_integrity_io_error(ic, "copying from journal", -EIO);
1932 complete_journal_op(comp);
1935 static void restore_last_bytes(struct dm_integrity_c *ic, struct journal_sector *js,
1936 struct journal_entry *je)
1940 js->commit_id = je->last_bytes[s];
1942 } while (++s < ic->sectors_per_block);
1945 static void do_journal_write(struct dm_integrity_c *ic, unsigned write_start,
1946 unsigned write_sections, bool from_replay)
1949 struct journal_completion comp;
1950 struct blk_plug plug;
1952 blk_start_plug(&plug);
1955 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
1956 init_completion(&comp.comp);
1959 for (n = 0; n < write_sections; n++, i++, wraparound_section(ic, &i)) {
1960 #ifndef INTERNAL_VERIFY
1961 if (unlikely(from_replay))
1963 rw_section_mac(ic, i, false);
1964 for (j = 0; j < ic->journal_section_entries; j++) {
1965 struct journal_entry *je = access_journal_entry(ic, i, j);
1966 sector_t sec, area, offset;
1967 unsigned k, l, next_loop;
1968 sector_t metadata_block;
1969 unsigned metadata_offset;
1970 struct journal_io *io;
1972 if (journal_entry_is_unused(je))
1974 BUG_ON(unlikely(journal_entry_is_inprogress(je)) && !from_replay);
1975 sec = journal_entry_get_sector(je);
1976 if (unlikely(from_replay)) {
1977 if (unlikely(sec & (unsigned)(ic->sectors_per_block - 1))) {
1978 dm_integrity_io_error(ic, "invalid sector in journal", -EIO);
1979 sec &= ~(sector_t)(ic->sectors_per_block - 1);
1982 get_area_and_offset(ic, sec, &area, &offset);
1983 restore_last_bytes(ic, access_journal_data(ic, i, j), je);
1984 for (k = j + 1; k < ic->journal_section_entries; k++) {
1985 struct journal_entry *je2 = access_journal_entry(ic, i, k);
1986 sector_t sec2, area2, offset2;
1987 if (journal_entry_is_unused(je2))
1989 BUG_ON(unlikely(journal_entry_is_inprogress(je2)) && !from_replay);
1990 sec2 = journal_entry_get_sector(je2);
1991 get_area_and_offset(ic, sec2, &area2, &offset2);
1992 if (area2 != area || offset2 != offset + ((k - j) << ic->sb->log2_sectors_per_block))
1994 restore_last_bytes(ic, access_journal_data(ic, i, k), je2);
1998 io = mempool_alloc(&ic->journal_io_mempool, GFP_NOIO);
2000 io->range.logical_sector = sec;
2001 io->range.n_sectors = (k - j) << ic->sb->log2_sectors_per_block;
2003 spin_lock_irq(&ic->endio_wait.lock);
2004 add_new_range_and_wait(ic, &io->range);
2006 if (likely(!from_replay)) {
2007 struct journal_node *section_node = &ic->journal_tree[i * ic->journal_section_entries];
2009 /* don't write if there is newer committed sector */
2010 while (j < k && find_newer_committed_node(ic, §ion_node[j])) {
2011 struct journal_entry *je2 = access_journal_entry(ic, i, j);
2013 journal_entry_set_unused(je2);
2014 remove_journal_node(ic, §ion_node[j]);
2016 sec += ic->sectors_per_block;
2017 offset += ic->sectors_per_block;
2019 while (j < k && find_newer_committed_node(ic, §ion_node[k - 1])) {
2020 struct journal_entry *je2 = access_journal_entry(ic, i, k - 1);
2022 journal_entry_set_unused(je2);
2023 remove_journal_node(ic, §ion_node[k - 1]);
2027 remove_range_unlocked(ic, &io->range);
2028 spin_unlock_irq(&ic->endio_wait.lock);
2029 mempool_free(io, &ic->journal_io_mempool);
2032 for (l = j; l < k; l++) {
2033 remove_journal_node(ic, §ion_node[l]);
2036 spin_unlock_irq(&ic->endio_wait.lock);
2038 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2039 for (l = j; l < k; l++) {
2041 struct journal_entry *je2 = access_journal_entry(ic, i, l);
2044 #ifndef INTERNAL_VERIFY
2045 unlikely(from_replay) &&
2047 ic->internal_hash) {
2048 char test_tag[max_t(size_t, HASH_MAX_DIGESTSIZE, MAX_TAG_SIZE)];
2050 integrity_sector_checksum(ic, sec + ((l - j) << ic->sb->log2_sectors_per_block),
2051 (char *)access_journal_data(ic, i, l), test_tag);
2052 if (unlikely(memcmp(test_tag, journal_entry_tag(ic, je2), ic->tag_size)))
2053 dm_integrity_io_error(ic, "tag mismatch when replaying journal", -EILSEQ);
2056 journal_entry_set_unused(je2);
2057 r = dm_integrity_rw_tag(ic, journal_entry_tag(ic, je2), &metadata_block, &metadata_offset,
2058 ic->tag_size, TAG_WRITE);
2060 dm_integrity_io_error(ic, "reading tags", r);
2064 atomic_inc(&comp.in_flight);
2065 copy_from_journal(ic, i, j << ic->sb->log2_sectors_per_block,
2066 (k - j) << ic->sb->log2_sectors_per_block,
2067 get_data_sector(ic, area, offset),
2068 complete_copy_from_journal, io);
2074 dm_bufio_write_dirty_buffers_async(ic->bufio);
2076 blk_finish_plug(&plug);
2078 complete_journal_op(&comp);
2079 wait_for_completion_io(&comp.comp);
2081 dm_integrity_flush_buffers(ic);
2084 static void integrity_writer(struct work_struct *w)
2086 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, writer_work);
2087 unsigned write_start, write_sections;
2089 unsigned prev_free_sectors;
2091 /* the following test is not needed, but it tests the replay code */
2092 if (READ_ONCE(ic->suspending) && !ic->meta_dev)
2095 spin_lock_irq(&ic->endio_wait.lock);
2096 write_start = ic->committed_section;
2097 write_sections = ic->n_committed_sections;
2098 spin_unlock_irq(&ic->endio_wait.lock);
2100 if (!write_sections)
2103 do_journal_write(ic, write_start, write_sections, false);
2105 spin_lock_irq(&ic->endio_wait.lock);
2107 ic->committed_section += write_sections;
2108 wraparound_section(ic, &ic->committed_section);
2109 ic->n_committed_sections -= write_sections;
2111 prev_free_sectors = ic->free_sectors;
2112 ic->free_sectors += write_sections * ic->journal_section_entries;
2113 if (unlikely(!prev_free_sectors))
2114 wake_up_locked(&ic->endio_wait);
2116 spin_unlock_irq(&ic->endio_wait.lock);
2119 static void recalc_write_super(struct dm_integrity_c *ic)
2123 dm_integrity_flush_buffers(ic);
2124 if (dm_integrity_failed(ic))
2128 r = sync_rw_sb(ic, REQ_OP_WRITE, 0);
2130 dm_integrity_io_error(ic, "writing superblock", r);
2133 static void integrity_recalc(struct work_struct *w)
2135 struct dm_integrity_c *ic = container_of(w, struct dm_integrity_c, recalc_work);
2136 struct dm_integrity_range range;
2137 struct dm_io_request io_req;
2138 struct dm_io_region io_loc;
2139 sector_t area, offset;
2140 sector_t metadata_block;
2141 unsigned metadata_offset;
2145 unsigned super_counter = 0;
2147 spin_lock_irq(&ic->endio_wait.lock);
2151 if (unlikely(READ_ONCE(ic->suspending)))
2154 range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
2155 if (unlikely(range.logical_sector >= ic->provided_data_sectors))
2158 get_area_and_offset(ic, range.logical_sector, &area, &offset);
2159 range.n_sectors = min((sector_t)RECALC_SECTORS, ic->provided_data_sectors - range.logical_sector);
2161 range.n_sectors = min(range.n_sectors, ((sector_t)1U << ic->sb->log2_interleave_sectors) - (unsigned)offset);
2163 add_new_range_and_wait(ic, &range);
2165 spin_unlock_irq(&ic->endio_wait.lock);
2167 if (unlikely(++super_counter == RECALC_WRITE_SUPER)) {
2168 recalc_write_super(ic);
2172 if (unlikely(dm_integrity_failed(ic)))
2175 io_req.bi_op = REQ_OP_READ;
2176 io_req.bi_op_flags = 0;
2177 io_req.mem.type = DM_IO_VMA;
2178 io_req.mem.ptr.addr = ic->recalc_buffer;
2179 io_req.notify.fn = NULL;
2180 io_req.client = ic->io;
2181 io_loc.bdev = ic->dev->bdev;
2182 io_loc.sector = get_data_sector(ic, area, offset);
2183 io_loc.count = range.n_sectors;
2185 r = dm_io(&io_req, 1, &io_loc, NULL);
2187 dm_integrity_io_error(ic, "reading data", r);
2191 t = ic->recalc_tags;
2192 for (i = 0; i < range.n_sectors; i += ic->sectors_per_block) {
2193 integrity_sector_checksum(ic, range.logical_sector + i, ic->recalc_buffer + (i << SECTOR_SHIFT), t);
2197 metadata_block = get_metadata_sector_and_offset(ic, area, offset, &metadata_offset);
2199 r = dm_integrity_rw_tag(ic, ic->recalc_tags, &metadata_block, &metadata_offset, t - ic->recalc_tags, TAG_WRITE);
2201 dm_integrity_io_error(ic, "writing tags", r);
2205 spin_lock_irq(&ic->endio_wait.lock);
2206 remove_range_unlocked(ic, &range);
2207 ic->sb->recalc_sector = cpu_to_le64(range.logical_sector + range.n_sectors);
2211 remove_range(ic, &range);
2215 spin_unlock_irq(&ic->endio_wait.lock);
2217 recalc_write_super(ic);
2220 static void init_journal(struct dm_integrity_c *ic, unsigned start_section,
2221 unsigned n_sections, unsigned char commit_seq)
2228 for (n = 0; n < n_sections; n++) {
2229 i = start_section + n;
2230 wraparound_section(ic, &i);
2231 for (j = 0; j < ic->journal_section_sectors; j++) {
2232 struct journal_sector *js = access_journal(ic, i, j);
2233 memset(&js->entries, 0, JOURNAL_SECTOR_DATA);
2234 js->commit_id = dm_integrity_commit_id(ic, i, j, commit_seq);
2236 for (j = 0; j < ic->journal_section_entries; j++) {
2237 struct journal_entry *je = access_journal_entry(ic, i, j);
2238 journal_entry_set_unused(je);
2242 write_journal(ic, start_section, n_sections);
2245 static int find_commit_seq(struct dm_integrity_c *ic, unsigned i, unsigned j, commit_id_t id)
2248 for (k = 0; k < N_COMMIT_IDS; k++) {
2249 if (dm_integrity_commit_id(ic, i, j, k) == id)
2252 dm_integrity_io_error(ic, "journal commit id", -EIO);
2256 static void replay_journal(struct dm_integrity_c *ic)
2259 bool used_commit_ids[N_COMMIT_IDS];
2260 unsigned max_commit_id_sections[N_COMMIT_IDS];
2261 unsigned write_start, write_sections;
2262 unsigned continue_section;
2264 unsigned char unused, last_used, want_commit_seq;
2266 if (ic->mode == 'R')
2269 if (ic->journal_uptodate)
2275 if (!ic->just_formatted) {
2276 DEBUG_print("reading journal\n");
2277 rw_journal(ic, REQ_OP_READ, 0, 0, ic->journal_sections, NULL);
2279 DEBUG_bytes(lowmem_page_address(ic->journal_io[0].page), 64, "read journal");
2280 if (ic->journal_io) {
2281 struct journal_completion crypt_comp;
2283 init_completion(&crypt_comp.comp);
2284 crypt_comp.in_flight = (atomic_t)ATOMIC_INIT(0);
2285 encrypt_journal(ic, false, 0, ic->journal_sections, &crypt_comp);
2286 wait_for_completion(&crypt_comp.comp);
2288 DEBUG_bytes(lowmem_page_address(ic->journal[0].page), 64, "decrypted journal");
2291 if (dm_integrity_failed(ic))
2294 journal_empty = true;
2295 memset(used_commit_ids, 0, sizeof used_commit_ids);
2296 memset(max_commit_id_sections, 0, sizeof max_commit_id_sections);
2297 for (i = 0; i < ic->journal_sections; i++) {
2298 for (j = 0; j < ic->journal_section_sectors; j++) {
2300 struct journal_sector *js = access_journal(ic, i, j);
2301 k = find_commit_seq(ic, i, j, js->commit_id);
2304 used_commit_ids[k] = true;
2305 max_commit_id_sections[k] = i;
2307 if (journal_empty) {
2308 for (j = 0; j < ic->journal_section_entries; j++) {
2309 struct journal_entry *je = access_journal_entry(ic, i, j);
2310 if (!journal_entry_is_unused(je)) {
2311 journal_empty = false;
2318 if (!used_commit_ids[N_COMMIT_IDS - 1]) {
2319 unused = N_COMMIT_IDS - 1;
2320 while (unused && !used_commit_ids[unused - 1])
2323 for (unused = 0; unused < N_COMMIT_IDS; unused++)
2324 if (!used_commit_ids[unused])
2326 if (unused == N_COMMIT_IDS) {
2327 dm_integrity_io_error(ic, "journal commit ids", -EIO);
2331 DEBUG_print("first unused commit seq %d [%d,%d,%d,%d]\n",
2332 unused, used_commit_ids[0], used_commit_ids[1],
2333 used_commit_ids[2], used_commit_ids[3]);
2335 last_used = prev_commit_seq(unused);
2336 want_commit_seq = prev_commit_seq(last_used);
2338 if (!used_commit_ids[want_commit_seq] && used_commit_ids[prev_commit_seq(want_commit_seq)])
2339 journal_empty = true;
2341 write_start = max_commit_id_sections[last_used] + 1;
2342 if (unlikely(write_start >= ic->journal_sections))
2343 want_commit_seq = next_commit_seq(want_commit_seq);
2344 wraparound_section(ic, &write_start);
2347 for (write_sections = 0; write_sections < ic->journal_sections; write_sections++) {
2348 for (j = 0; j < ic->journal_section_sectors; j++) {
2349 struct journal_sector *js = access_journal(ic, i, j);
2351 if (js->commit_id != dm_integrity_commit_id(ic, i, j, want_commit_seq)) {
2353 * This could be caused by crash during writing.
2354 * We won't replay the inconsistent part of the
2357 DEBUG_print("commit id mismatch at position (%u, %u): %d != %d\n",
2358 i, j, find_commit_seq(ic, i, j, js->commit_id), want_commit_seq);
2363 if (unlikely(i >= ic->journal_sections))
2364 want_commit_seq = next_commit_seq(want_commit_seq);
2365 wraparound_section(ic, &i);
2369 if (!journal_empty) {
2370 DEBUG_print("replaying %u sections, starting at %u, commit seq %d\n",
2371 write_sections, write_start, want_commit_seq);
2372 do_journal_write(ic, write_start, write_sections, true);
2375 if (write_sections == ic->journal_sections && (ic->mode == 'J' || journal_empty)) {
2376 continue_section = write_start;
2377 ic->commit_seq = want_commit_seq;
2378 DEBUG_print("continuing from section %u, commit seq %d\n", write_start, ic->commit_seq);
2381 unsigned char erase_seq;
2383 DEBUG_print("clearing journal\n");
2385 erase_seq = prev_commit_seq(prev_commit_seq(last_used));
2387 init_journal(ic, s, 1, erase_seq);
2389 wraparound_section(ic, &s);
2390 if (ic->journal_sections >= 2) {
2391 init_journal(ic, s, ic->journal_sections - 2, erase_seq);
2392 s += ic->journal_sections - 2;
2393 wraparound_section(ic, &s);
2394 init_journal(ic, s, 1, erase_seq);
2397 continue_section = 0;
2398 ic->commit_seq = next_commit_seq(erase_seq);
2401 ic->committed_section = continue_section;
2402 ic->n_committed_sections = 0;
2404 ic->uncommitted_section = continue_section;
2405 ic->n_uncommitted_sections = 0;
2407 ic->free_section = continue_section;
2408 ic->free_section_entry = 0;
2409 ic->free_sectors = ic->journal_entries;
2411 ic->journal_tree_root = RB_ROOT;
2412 for (i = 0; i < ic->journal_entries; i++)
2413 init_journal_node(&ic->journal_tree[i]);
2416 static void dm_integrity_postsuspend(struct dm_target *ti)
2418 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2420 del_timer_sync(&ic->autocommit_timer);
2422 WRITE_ONCE(ic->suspending, 1);
2425 drain_workqueue(ic->recalc_wq);
2427 queue_work(ic->commit_wq, &ic->commit_work);
2428 drain_workqueue(ic->commit_wq);
2430 if (ic->mode == 'J') {
2432 queue_work(ic->writer_wq, &ic->writer_work);
2433 drain_workqueue(ic->writer_wq);
2434 dm_integrity_flush_buffers(ic);
2437 WRITE_ONCE(ic->suspending, 0);
2439 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
2441 ic->journal_uptodate = true;
2444 static void dm_integrity_resume(struct dm_target *ti)
2446 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2450 if (ic->recalc_wq && ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
2451 __u64 recalc_pos = le64_to_cpu(ic->sb->recalc_sector);
2452 if (recalc_pos < ic->provided_data_sectors) {
2453 queue_work(ic->recalc_wq, &ic->recalc_work);
2454 } else if (recalc_pos > ic->provided_data_sectors) {
2455 ic->sb->recalc_sector = cpu_to_le64(ic->provided_data_sectors);
2456 recalc_write_super(ic);
2461 static void dm_integrity_status(struct dm_target *ti, status_type_t type,
2462 unsigned status_flags, char *result, unsigned maxlen)
2464 struct dm_integrity_c *ic = (struct dm_integrity_c *)ti->private;
2469 case STATUSTYPE_INFO:
2471 (unsigned long long)atomic64_read(&ic->number_of_mismatches),
2472 (unsigned long long)ic->provided_data_sectors);
2473 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
2474 DMEMIT(" %llu", (unsigned long long)le64_to_cpu(ic->sb->recalc_sector));
2479 case STATUSTYPE_TABLE: {
2480 __u64 watermark_percentage = (__u64)(ic->journal_entries - ic->free_sectors_threshold) * 100;
2481 watermark_percentage += ic->journal_entries / 2;
2482 do_div(watermark_percentage, ic->journal_entries);
2484 arg_count += !!ic->meta_dev;
2485 arg_count += ic->sectors_per_block != 1;
2486 arg_count += !!(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING));
2487 arg_count += ic->mode == 'J';
2488 arg_count += ic->mode == 'J';
2489 arg_count += !!ic->internal_hash_alg.alg_string;
2490 arg_count += !!ic->journal_crypt_alg.alg_string;
2491 arg_count += !!ic->journal_mac_alg.alg_string;
2492 DMEMIT("%s %llu %u %c %u", ic->dev->name, (unsigned long long)ic->start,
2493 ic->tag_size, ic->mode, arg_count);
2495 DMEMIT(" meta_device:%s", ic->meta_dev->name);
2496 if (ic->sectors_per_block != 1)
2497 DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
2498 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
2499 DMEMIT(" recalculate");
2500 DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
2501 DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
2502 DMEMIT(" buffer_sectors:%u", 1U << ic->log2_buffer_sectors);
2503 if (ic->mode == 'J') {
2504 DMEMIT(" journal_watermark:%u", (unsigned)watermark_percentage);
2505 DMEMIT(" commit_time:%u", ic->autocommit_msec);
2508 #define EMIT_ALG(a, n) \
2510 if (ic->a.alg_string) { \
2511 DMEMIT(" %s:%s", n, ic->a.alg_string); \
2512 if (ic->a.key_string) \
2513 DMEMIT(":%s", ic->a.key_string);\
2516 EMIT_ALG(internal_hash_alg, "internal_hash");
2517 EMIT_ALG(journal_crypt_alg, "journal_crypt");
2518 EMIT_ALG(journal_mac_alg, "journal_mac");
2524 static int dm_integrity_iterate_devices(struct dm_target *ti,
2525 iterate_devices_callout_fn fn, void *data)
2527 struct dm_integrity_c *ic = ti->private;
2530 return fn(ti, ic->dev, ic->start + ic->initial_sectors + ic->metadata_run, ti->len, data);
2532 return fn(ti, ic->dev, 0, ti->len, data);
2535 static void dm_integrity_io_hints(struct dm_target *ti, struct queue_limits *limits)
2537 struct dm_integrity_c *ic = ti->private;
2539 if (ic->sectors_per_block > 1) {
2540 limits->logical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
2541 limits->physical_block_size = ic->sectors_per_block << SECTOR_SHIFT;
2542 blk_limits_io_min(limits, ic->sectors_per_block << SECTOR_SHIFT);
2546 static void calculate_journal_section_size(struct dm_integrity_c *ic)
2548 unsigned sector_space = JOURNAL_SECTOR_DATA;
2550 ic->journal_sections = le32_to_cpu(ic->sb->journal_sections);
2551 ic->journal_entry_size = roundup(offsetof(struct journal_entry, last_bytes[ic->sectors_per_block]) + ic->tag_size,
2552 JOURNAL_ENTRY_ROUNDUP);
2554 if (ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC))
2555 sector_space -= JOURNAL_MAC_PER_SECTOR;
2556 ic->journal_entries_per_sector = sector_space / ic->journal_entry_size;
2557 ic->journal_section_entries = ic->journal_entries_per_sector * JOURNAL_BLOCK_SECTORS;
2558 ic->journal_section_sectors = (ic->journal_section_entries << ic->sb->log2_sectors_per_block) + JOURNAL_BLOCK_SECTORS;
2559 ic->journal_entries = ic->journal_section_entries * ic->journal_sections;
2562 static int calculate_device_limits(struct dm_integrity_c *ic)
2564 __u64 initial_sectors;
2566 calculate_journal_section_size(ic);
2567 initial_sectors = SB_SECTORS + (__u64)ic->journal_section_sectors * ic->journal_sections;
2568 if (initial_sectors + METADATA_PADDING_SECTORS >= ic->meta_device_sectors || initial_sectors > UINT_MAX)
2570 ic->initial_sectors = initial_sectors;
2572 if (!ic->meta_dev) {
2573 sector_t last_sector, last_area, last_offset;
2575 ic->metadata_run = roundup((__u64)ic->tag_size << (ic->sb->log2_interleave_sectors - ic->sb->log2_sectors_per_block),
2576 (__u64)(1 << SECTOR_SHIFT << METADATA_PADDING_SECTORS)) >> SECTOR_SHIFT;
2577 if (!(ic->metadata_run & (ic->metadata_run - 1)))
2578 ic->log2_metadata_run = __ffs(ic->metadata_run);
2580 ic->log2_metadata_run = -1;
2582 get_area_and_offset(ic, ic->provided_data_sectors - 1, &last_area, &last_offset);
2583 last_sector = get_data_sector(ic, last_area, last_offset);
2584 if (last_sector < ic->start || last_sector >= ic->meta_device_sectors)
2587 __u64 meta_size = (ic->provided_data_sectors >> ic->sb->log2_sectors_per_block) * ic->tag_size;
2588 meta_size = (meta_size + ((1U << (ic->log2_buffer_sectors + SECTOR_SHIFT)) - 1))
2589 >> (ic->log2_buffer_sectors + SECTOR_SHIFT);
2590 meta_size <<= ic->log2_buffer_sectors;
2591 if (ic->initial_sectors + meta_size < ic->initial_sectors ||
2592 ic->initial_sectors + meta_size > ic->meta_device_sectors)
2594 ic->metadata_run = 1;
2595 ic->log2_metadata_run = 0;
2601 static int initialize_superblock(struct dm_integrity_c *ic, unsigned journal_sectors, unsigned interleave_sectors)
2603 unsigned journal_sections;
2606 memset(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT);
2607 memcpy(ic->sb->magic, SB_MAGIC, 8);
2608 ic->sb->integrity_tag_size = cpu_to_le16(ic->tag_size);
2609 ic->sb->log2_sectors_per_block = __ffs(ic->sectors_per_block);
2610 if (ic->journal_mac_alg.alg_string)
2611 ic->sb->flags |= cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC);
2613 calculate_journal_section_size(ic);
2614 journal_sections = journal_sectors / ic->journal_section_sectors;
2615 if (!journal_sections)
2616 journal_sections = 1;
2618 if (!ic->meta_dev) {
2619 ic->sb->journal_sections = cpu_to_le32(journal_sections);
2620 if (!interleave_sectors)
2621 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
2622 ic->sb->log2_interleave_sectors = __fls(interleave_sectors);
2623 ic->sb->log2_interleave_sectors = max((__u8)MIN_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
2624 ic->sb->log2_interleave_sectors = min((__u8)MAX_LOG2_INTERLEAVE_SECTORS, ic->sb->log2_interleave_sectors);
2626 ic->provided_data_sectors = 0;
2627 for (test_bit = fls64(ic->meta_device_sectors) - 1; test_bit >= 3; test_bit--) {
2628 __u64 prev_data_sectors = ic->provided_data_sectors;
2630 ic->provided_data_sectors |= (sector_t)1 << test_bit;
2631 if (calculate_device_limits(ic))
2632 ic->provided_data_sectors = prev_data_sectors;
2634 if (!ic->provided_data_sectors)
2637 ic->sb->log2_interleave_sectors = 0;
2638 ic->provided_data_sectors = ic->data_device_sectors;
2639 ic->provided_data_sectors &= ~(sector_t)(ic->sectors_per_block - 1);
2642 ic->sb->journal_sections = cpu_to_le32(0);
2643 for (test_bit = fls(journal_sections) - 1; test_bit >= 0; test_bit--) {
2644 __u32 prev_journal_sections = le32_to_cpu(ic->sb->journal_sections);
2645 __u32 test_journal_sections = prev_journal_sections | (1U << test_bit);
2646 if (test_journal_sections > journal_sections)
2648 ic->sb->journal_sections = cpu_to_le32(test_journal_sections);
2649 if (calculate_device_limits(ic))
2650 ic->sb->journal_sections = cpu_to_le32(prev_journal_sections);
2653 if (!le32_to_cpu(ic->sb->journal_sections)) {
2654 if (ic->log2_buffer_sectors > 3) {
2655 ic->log2_buffer_sectors--;
2656 goto try_smaller_buffer;
2662 ic->sb->provided_data_sectors = cpu_to_le64(ic->provided_data_sectors);
2669 static void dm_integrity_set(struct dm_target *ti, struct dm_integrity_c *ic)
2671 struct gendisk *disk = dm_disk(dm_table_get_md(ti->table));
2672 struct blk_integrity bi;
2674 memset(&bi, 0, sizeof(bi));
2675 bi.profile = &dm_integrity_profile;
2676 bi.tuple_size = ic->tag_size;
2677 bi.tag_size = bi.tuple_size;
2678 bi.interval_exp = ic->sb->log2_sectors_per_block + SECTOR_SHIFT;
2680 blk_integrity_register(disk, &bi);
2681 blk_queue_max_integrity_segments(disk->queue, UINT_MAX);
2684 static void dm_integrity_free_page_list(struct page_list *pl)
2690 for (i = 0; pl[i].page; i++)
2691 __free_page(pl[i].page);
2695 static struct page_list *dm_integrity_alloc_page_list(unsigned n_pages)
2697 struct page_list *pl;
2700 pl = kvmalloc_array(n_pages + 1, sizeof(struct page_list), GFP_KERNEL | __GFP_ZERO);
2704 for (i = 0; i < n_pages; i++) {
2705 pl[i].page = alloc_page(GFP_KERNEL);
2707 dm_integrity_free_page_list(pl);
2711 pl[i - 1].next = &pl[i];
2719 static void dm_integrity_free_journal_scatterlist(struct dm_integrity_c *ic, struct scatterlist **sl)
2722 for (i = 0; i < ic->journal_sections; i++)
2727 static struct scatterlist **dm_integrity_alloc_journal_scatterlist(struct dm_integrity_c *ic, struct page_list *pl)
2729 struct scatterlist **sl;
2732 sl = kvmalloc_array(ic->journal_sections,
2733 sizeof(struct scatterlist *),
2734 GFP_KERNEL | __GFP_ZERO);
2738 for (i = 0; i < ic->journal_sections; i++) {
2739 struct scatterlist *s;
2740 unsigned start_index, start_offset;
2741 unsigned end_index, end_offset;
2745 page_list_location(ic, i, 0, &start_index, &start_offset);
2746 page_list_location(ic, i, ic->journal_section_sectors - 1, &end_index, &end_offset);
2748 n_pages = (end_index - start_index + 1);
2750 s = kvmalloc_array(n_pages, sizeof(struct scatterlist),
2753 dm_integrity_free_journal_scatterlist(ic, sl);
2757 sg_init_table(s, n_pages);
2758 for (idx = start_index; idx <= end_index; idx++) {
2759 char *va = lowmem_page_address(pl[idx].page);
2760 unsigned start = 0, end = PAGE_SIZE;
2761 if (idx == start_index)
2762 start = start_offset;
2763 if (idx == end_index)
2764 end = end_offset + (1 << SECTOR_SHIFT);
2765 sg_set_buf(&s[idx - start_index], va + start, end - start);
2774 static void free_alg(struct alg_spec *a)
2776 kzfree(a->alg_string);
2778 memset(a, 0, sizeof *a);
2781 static int get_alg_and_key(const char *arg, struct alg_spec *a, char **error, char *error_inval)
2787 a->alg_string = kstrdup(strchr(arg, ':') + 1, GFP_KERNEL);
2791 k = strchr(a->alg_string, ':');
2794 a->key_string = k + 1;
2795 if (strlen(a->key_string) & 1)
2798 a->key_size = strlen(a->key_string) / 2;
2799 a->key = kmalloc(a->key_size, GFP_KERNEL);
2802 if (hex2bin(a->key, a->key_string, a->key_size))
2808 *error = error_inval;
2811 *error = "Out of memory for an argument";
2815 static int get_mac(struct crypto_shash **hash, struct alg_spec *a, char **error,
2816 char *error_alg, char *error_key)
2820 if (a->alg_string) {
2821 *hash = crypto_alloc_shash(a->alg_string, 0, 0);
2822 if (IS_ERR(*hash)) {
2830 r = crypto_shash_setkey(*hash, a->key, a->key_size);
2835 } else if (crypto_shash_get_flags(*hash) & CRYPTO_TFM_NEED_KEY) {
2844 static int create_journal(struct dm_integrity_c *ic, char **error)
2848 __u64 journal_pages, journal_desc_size, journal_tree_size;
2849 unsigned char *crypt_data = NULL, *crypt_iv = NULL;
2850 struct skcipher_request *req = NULL;
2852 ic->commit_ids[0] = cpu_to_le64(0x1111111111111111ULL);
2853 ic->commit_ids[1] = cpu_to_le64(0x2222222222222222ULL);
2854 ic->commit_ids[2] = cpu_to_le64(0x3333333333333333ULL);
2855 ic->commit_ids[3] = cpu_to_le64(0x4444444444444444ULL);
2857 journal_pages = roundup((__u64)ic->journal_sections * ic->journal_section_sectors,
2858 PAGE_SIZE >> SECTOR_SHIFT) >> (PAGE_SHIFT - SECTOR_SHIFT);
2859 journal_desc_size = journal_pages * sizeof(struct page_list);
2860 if (journal_pages >= totalram_pages() - totalhigh_pages() || journal_desc_size > ULONG_MAX) {
2861 *error = "Journal doesn't fit into memory";
2865 ic->journal_pages = journal_pages;
2867 ic->journal = dm_integrity_alloc_page_list(ic->journal_pages);
2869 *error = "Could not allocate memory for journal";
2873 if (ic->journal_crypt_alg.alg_string) {
2874 unsigned ivsize, blocksize;
2875 struct journal_completion comp;
2878 ic->journal_crypt = crypto_alloc_skcipher(ic->journal_crypt_alg.alg_string, 0, 0);
2879 if (IS_ERR(ic->journal_crypt)) {
2880 *error = "Invalid journal cipher";
2881 r = PTR_ERR(ic->journal_crypt);
2882 ic->journal_crypt = NULL;
2885 ivsize = crypto_skcipher_ivsize(ic->journal_crypt);
2886 blocksize = crypto_skcipher_blocksize(ic->journal_crypt);
2888 if (ic->journal_crypt_alg.key) {
2889 r = crypto_skcipher_setkey(ic->journal_crypt, ic->journal_crypt_alg.key,
2890 ic->journal_crypt_alg.key_size);
2892 *error = "Error setting encryption key";
2896 DEBUG_print("cipher %s, block size %u iv size %u\n",
2897 ic->journal_crypt_alg.alg_string, blocksize, ivsize);
2899 ic->journal_io = dm_integrity_alloc_page_list(ic->journal_pages);
2900 if (!ic->journal_io) {
2901 *error = "Could not allocate memory for journal io";
2906 if (blocksize == 1) {
2907 struct scatterlist *sg;
2909 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
2911 *error = "Could not allocate crypt request";
2916 crypt_iv = kmalloc(ivsize, GFP_KERNEL);
2918 *error = "Could not allocate iv";
2923 ic->journal_xor = dm_integrity_alloc_page_list(ic->journal_pages);
2924 if (!ic->journal_xor) {
2925 *error = "Could not allocate memory for journal xor";
2930 sg = kvmalloc_array(ic->journal_pages + 1,
2931 sizeof(struct scatterlist),
2934 *error = "Unable to allocate sg list";
2938 sg_init_table(sg, ic->journal_pages + 1);
2939 for (i = 0; i < ic->journal_pages; i++) {
2940 char *va = lowmem_page_address(ic->journal_xor[i].page);
2942 sg_set_buf(&sg[i], va, PAGE_SIZE);
2944 sg_set_buf(&sg[i], &ic->commit_ids, sizeof ic->commit_ids);
2945 memset(crypt_iv, 0x00, ivsize);
2947 skcipher_request_set_crypt(req, sg, sg, PAGE_SIZE * ic->journal_pages + sizeof ic->commit_ids, crypt_iv);
2948 init_completion(&comp.comp);
2949 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
2950 if (do_crypt(true, req, &comp))
2951 wait_for_completion(&comp.comp);
2953 r = dm_integrity_failed(ic);
2955 *error = "Unable to encrypt journal";
2958 DEBUG_bytes(lowmem_page_address(ic->journal_xor[0].page), 64, "xor data");
2960 crypto_free_skcipher(ic->journal_crypt);
2961 ic->journal_crypt = NULL;
2963 unsigned crypt_len = roundup(ivsize, blocksize);
2965 req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
2967 *error = "Could not allocate crypt request";
2972 crypt_iv = kmalloc(ivsize, GFP_KERNEL);
2974 *error = "Could not allocate iv";
2979 crypt_data = kmalloc(crypt_len, GFP_KERNEL);
2981 *error = "Unable to allocate crypt data";
2986 ic->journal_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal);
2987 if (!ic->journal_scatterlist) {
2988 *error = "Unable to allocate sg list";
2992 ic->journal_io_scatterlist = dm_integrity_alloc_journal_scatterlist(ic, ic->journal_io);
2993 if (!ic->journal_io_scatterlist) {
2994 *error = "Unable to allocate sg list";
2998 ic->sk_requests = kvmalloc_array(ic->journal_sections,
2999 sizeof(struct skcipher_request *),
3000 GFP_KERNEL | __GFP_ZERO);
3001 if (!ic->sk_requests) {
3002 *error = "Unable to allocate sk requests";
3006 for (i = 0; i < ic->journal_sections; i++) {
3007 struct scatterlist sg;
3008 struct skcipher_request *section_req;
3009 __u32 section_le = cpu_to_le32(i);
3011 memset(crypt_iv, 0x00, ivsize);
3012 memset(crypt_data, 0x00, crypt_len);
3013 memcpy(crypt_data, §ion_le, min((size_t)crypt_len, sizeof(section_le)));
3015 sg_init_one(&sg, crypt_data, crypt_len);
3016 skcipher_request_set_crypt(req, &sg, &sg, crypt_len, crypt_iv);
3017 init_completion(&comp.comp);
3018 comp.in_flight = (atomic_t)ATOMIC_INIT(1);
3019 if (do_crypt(true, req, &comp))
3020 wait_for_completion(&comp.comp);
3022 r = dm_integrity_failed(ic);
3024 *error = "Unable to generate iv";
3028 section_req = skcipher_request_alloc(ic->journal_crypt, GFP_KERNEL);
3030 *error = "Unable to allocate crypt request";
3034 section_req->iv = kmalloc_array(ivsize, 2,
3036 if (!section_req->iv) {
3037 skcipher_request_free(section_req);
3038 *error = "Unable to allocate iv";
3042 memcpy(section_req->iv + ivsize, crypt_data, ivsize);
3043 section_req->cryptlen = (size_t)ic->journal_section_sectors << SECTOR_SHIFT;
3044 ic->sk_requests[i] = section_req;
3045 DEBUG_bytes(crypt_data, ivsize, "iv(%u)", i);
3050 for (i = 0; i < N_COMMIT_IDS; i++) {
3053 for (j = 0; j < i; j++) {
3054 if (ic->commit_ids[j] == ic->commit_ids[i]) {
3055 ic->commit_ids[i] = cpu_to_le64(le64_to_cpu(ic->commit_ids[i]) + 1);
3056 goto retest_commit_id;
3059 DEBUG_print("commit id %u: %016llx\n", i, ic->commit_ids[i]);
3062 journal_tree_size = (__u64)ic->journal_entries * sizeof(struct journal_node);
3063 if (journal_tree_size > ULONG_MAX) {
3064 *error = "Journal doesn't fit into memory";
3068 ic->journal_tree = kvmalloc(journal_tree_size, GFP_KERNEL);
3069 if (!ic->journal_tree) {
3070 *error = "Could not allocate memory for journal tree";
3076 skcipher_request_free(req);
3082 * Construct a integrity mapping
3086 * offset from the start of the device
3088 * D - direct writes, J - journal writes, R - recovery mode
3089 * number of optional arguments
3090 * optional arguments:
3092 * interleave_sectors
3103 static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
3105 struct dm_integrity_c *ic;
3108 unsigned extra_args;
3109 struct dm_arg_set as;
3110 static const struct dm_arg _args[] = {
3111 {0, 9, "Invalid number of feature args"},
3113 unsigned journal_sectors, interleave_sectors, buffer_sectors, journal_watermark, sync_msec;
3115 bool should_write_sb;
3117 unsigned long long start;
3119 #define DIRECT_ARGUMENTS 4
3121 if (argc <= DIRECT_ARGUMENTS) {
3122 ti->error = "Invalid argument count";
3126 ic = kzalloc(sizeof(struct dm_integrity_c), GFP_KERNEL);
3128 ti->error = "Cannot allocate integrity context";
3132 ti->per_io_data_size = sizeof(struct dm_integrity_io);
3134 ic->in_progress = RB_ROOT;
3135 INIT_LIST_HEAD(&ic->wait_list);
3136 init_waitqueue_head(&ic->endio_wait);
3137 bio_list_init(&ic->flush_bio_list);
3138 init_waitqueue_head(&ic->copy_to_journal_wait);
3139 init_completion(&ic->crypto_backoff);
3140 atomic64_set(&ic->number_of_mismatches, 0);
3142 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &ic->dev);
3144 ti->error = "Device lookup failed";
3148 if (sscanf(argv[1], "%llu%c", &start, &dummy) != 1 || start != (sector_t)start) {
3149 ti->error = "Invalid starting offset";
3155 if (strcmp(argv[2], "-")) {
3156 if (sscanf(argv[2], "%u%c", &ic->tag_size, &dummy) != 1 || !ic->tag_size) {
3157 ti->error = "Invalid tag size";
3163 if (!strcmp(argv[3], "J") || !strcmp(argv[3], "D") || !strcmp(argv[3], "R"))
3164 ic->mode = argv[3][0];
3166 ti->error = "Invalid mode (expecting J, D, R)";
3171 journal_sectors = 0;
3172 interleave_sectors = DEFAULT_INTERLEAVE_SECTORS;
3173 buffer_sectors = DEFAULT_BUFFER_SECTORS;
3174 journal_watermark = DEFAULT_JOURNAL_WATERMARK;
3175 sync_msec = DEFAULT_SYNC_MSEC;
3176 recalculate = false;
3177 ic->sectors_per_block = 1;
3179 as.argc = argc - DIRECT_ARGUMENTS;
3180 as.argv = argv + DIRECT_ARGUMENTS;
3181 r = dm_read_arg_group(_args, &as, &extra_args, &ti->error);
3185 while (extra_args--) {
3186 const char *opt_string;
3188 opt_string = dm_shift_arg(&as);
3191 ti->error = "Not enough feature arguments";
3194 if (sscanf(opt_string, "journal_sectors:%u%c", &val, &dummy) == 1)
3195 journal_sectors = val ? val : 1;
3196 else if (sscanf(opt_string, "interleave_sectors:%u%c", &val, &dummy) == 1)
3197 interleave_sectors = val;
3198 else if (sscanf(opt_string, "buffer_sectors:%u%c", &val, &dummy) == 1)
3199 buffer_sectors = val;
3200 else if (sscanf(opt_string, "journal_watermark:%u%c", &val, &dummy) == 1 && val <= 100)
3201 journal_watermark = val;
3202 else if (sscanf(opt_string, "commit_time:%u%c", &val, &dummy) == 1)
3204 else if (!strncmp(opt_string, "meta_device:", strlen("meta_device:"))) {
3206 dm_put_device(ti, ic->meta_dev);
3207 ic->meta_dev = NULL;
3209 r = dm_get_device(ti, strchr(opt_string, ':') + 1, dm_table_get_mode(ti->table), &ic->meta_dev);
3211 ti->error = "Device lookup failed";
3214 } else if (sscanf(opt_string, "block_size:%u%c", &val, &dummy) == 1) {
3215 if (val < 1 << SECTOR_SHIFT ||
3216 val > MAX_SECTORS_PER_BLOCK << SECTOR_SHIFT ||
3219 ti->error = "Invalid block_size argument";
3222 ic->sectors_per_block = val >> SECTOR_SHIFT;
3223 } else if (!strncmp(opt_string, "internal_hash:", strlen("internal_hash:"))) {
3224 r = get_alg_and_key(opt_string, &ic->internal_hash_alg, &ti->error,
3225 "Invalid internal_hash argument");
3228 } else if (!strncmp(opt_string, "journal_crypt:", strlen("journal_crypt:"))) {
3229 r = get_alg_and_key(opt_string, &ic->journal_crypt_alg, &ti->error,
3230 "Invalid journal_crypt argument");
3233 } else if (!strncmp(opt_string, "journal_mac:", strlen("journal_mac:"))) {
3234 r = get_alg_and_key(opt_string, &ic->journal_mac_alg, &ti->error,
3235 "Invalid journal_mac argument");
3238 } else if (!strcmp(opt_string, "recalculate")) {
3242 ti->error = "Invalid argument";
3247 ic->data_device_sectors = i_size_read(ic->dev->bdev->bd_inode) >> SECTOR_SHIFT;
3249 ic->meta_device_sectors = ic->data_device_sectors;
3251 ic->meta_device_sectors = i_size_read(ic->meta_dev->bdev->bd_inode) >> SECTOR_SHIFT;
3253 if (!journal_sectors) {
3254 journal_sectors = min((sector_t)DEFAULT_MAX_JOURNAL_SECTORS,
3255 ic->data_device_sectors >> DEFAULT_JOURNAL_SIZE_FACTOR);
3258 if (!buffer_sectors)
3260 ic->log2_buffer_sectors = min((int)__fls(buffer_sectors), 31 - SECTOR_SHIFT);
3262 r = get_mac(&ic->internal_hash, &ic->internal_hash_alg, &ti->error,
3263 "Invalid internal hash", "Error setting internal hash key");
3267 r = get_mac(&ic->journal_mac, &ic->journal_mac_alg, &ti->error,
3268 "Invalid journal mac", "Error setting journal mac key");
3272 if (!ic->tag_size) {
3273 if (!ic->internal_hash) {
3274 ti->error = "Unknown tag size";
3278 ic->tag_size = crypto_shash_digestsize(ic->internal_hash);
3280 if (ic->tag_size > MAX_TAG_SIZE) {
3281 ti->error = "Too big tag size";
3285 if (!(ic->tag_size & (ic->tag_size - 1)))
3286 ic->log2_tag_size = __ffs(ic->tag_size);
3288 ic->log2_tag_size = -1;
3290 ic->autocommit_jiffies = msecs_to_jiffies(sync_msec);
3291 ic->autocommit_msec = sync_msec;
3292 timer_setup(&ic->autocommit_timer, autocommit_fn, 0);
3294 ic->io = dm_io_client_create();
3295 if (IS_ERR(ic->io)) {
3296 r = PTR_ERR(ic->io);
3298 ti->error = "Cannot allocate dm io";
3302 r = mempool_init_slab_pool(&ic->journal_io_mempool, JOURNAL_IO_MEMPOOL, journal_io_cache);
3304 ti->error = "Cannot allocate mempool";
3308 ic->metadata_wq = alloc_workqueue("dm-integrity-metadata",
3309 WQ_MEM_RECLAIM, METADATA_WORKQUEUE_MAX_ACTIVE);
3310 if (!ic->metadata_wq) {
3311 ti->error = "Cannot allocate workqueue";
3317 * If this workqueue were percpu, it would cause bio reordering
3318 * and reduced performance.
3320 ic->wait_wq = alloc_workqueue("dm-integrity-wait", WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
3322 ti->error = "Cannot allocate workqueue";
3327 ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
3328 if (!ic->commit_wq) {
3329 ti->error = "Cannot allocate workqueue";
3333 INIT_WORK(&ic->commit_work, integrity_commit);
3335 if (ic->mode == 'J') {
3336 ic->writer_wq = alloc_workqueue("dm-integrity-writer", WQ_MEM_RECLAIM, 1);
3337 if (!ic->writer_wq) {
3338 ti->error = "Cannot allocate workqueue";
3342 INIT_WORK(&ic->writer_work, integrity_writer);
3345 ic->sb = alloc_pages_exact(SB_SECTORS << SECTOR_SHIFT, GFP_KERNEL);
3348 ti->error = "Cannot allocate superblock area";
3352 r = sync_rw_sb(ic, REQ_OP_READ, 0);
3354 ti->error = "Error reading superblock";
3357 should_write_sb = false;
3358 if (memcmp(ic->sb->magic, SB_MAGIC, 8)) {
3359 if (ic->mode != 'R') {
3360 if (memchr_inv(ic->sb, 0, SB_SECTORS << SECTOR_SHIFT)) {
3362 ti->error = "The device is not initialized";
3367 r = initialize_superblock(ic, journal_sectors, interleave_sectors);
3369 ti->error = "Could not initialize superblock";
3372 if (ic->mode != 'R')
3373 should_write_sb = true;
3376 if (!ic->sb->version || ic->sb->version > SB_VERSION_2) {
3378 ti->error = "Unknown version";
3381 if (le16_to_cpu(ic->sb->integrity_tag_size) != ic->tag_size) {
3383 ti->error = "Tag size doesn't match the information in superblock";
3386 if (ic->sb->log2_sectors_per_block != __ffs(ic->sectors_per_block)) {
3388 ti->error = "Block size doesn't match the information in superblock";
3391 if (!le32_to_cpu(ic->sb->journal_sections)) {
3393 ti->error = "Corrupted superblock, journal_sections is 0";
3396 /* make sure that ti->max_io_len doesn't overflow */
3397 if (!ic->meta_dev) {
3398 if (ic->sb->log2_interleave_sectors < MIN_LOG2_INTERLEAVE_SECTORS ||
3399 ic->sb->log2_interleave_sectors > MAX_LOG2_INTERLEAVE_SECTORS) {
3401 ti->error = "Invalid interleave_sectors in the superblock";
3405 if (ic->sb->log2_interleave_sectors) {
3407 ti->error = "Invalid interleave_sectors in the superblock";
3411 ic->provided_data_sectors = le64_to_cpu(ic->sb->provided_data_sectors);
3412 if (ic->provided_data_sectors != le64_to_cpu(ic->sb->provided_data_sectors)) {
3413 /* test for overflow */
3415 ti->error = "The superblock has 64-bit device size, but the kernel was compiled with 32-bit sectors";
3418 if (!!(ic->sb->flags & cpu_to_le32(SB_FLAG_HAVE_JOURNAL_MAC)) != !!ic->journal_mac_alg.alg_string) {
3420 ti->error = "Journal mac mismatch";
3425 r = calculate_device_limits(ic);
3428 if (ic->log2_buffer_sectors > 3) {
3429 ic->log2_buffer_sectors--;
3430 goto try_smaller_buffer;
3433 ti->error = "The device is too small";
3437 ic->log2_buffer_sectors = min(ic->log2_buffer_sectors, (__u8)__ffs(ic->metadata_run));
3439 if (ti->len > ic->provided_data_sectors) {
3441 ti->error = "Not enough provided sectors for requested mapping size";
3446 threshold = (__u64)ic->journal_entries * (100 - journal_watermark);
3448 do_div(threshold, 100);
3449 ic->free_sectors_threshold = threshold;
3451 DEBUG_print("initialized:\n");
3452 DEBUG_print(" integrity_tag_size %u\n", le16_to_cpu(ic->sb->integrity_tag_size));
3453 DEBUG_print(" journal_entry_size %u\n", ic->journal_entry_size);
3454 DEBUG_print(" journal_entries_per_sector %u\n", ic->journal_entries_per_sector);
3455 DEBUG_print(" journal_section_entries %u\n", ic->journal_section_entries);
3456 DEBUG_print(" journal_section_sectors %u\n", ic->journal_section_sectors);
3457 DEBUG_print(" journal_sections %u\n", (unsigned)le32_to_cpu(ic->sb->journal_sections));
3458 DEBUG_print(" journal_entries %u\n", ic->journal_entries);
3459 DEBUG_print(" log2_interleave_sectors %d\n", ic->sb->log2_interleave_sectors);
3460 DEBUG_print(" data_device_sectors 0x%llx\n", (unsigned long long)ic->data_device_sectors);
3461 DEBUG_print(" initial_sectors 0x%x\n", ic->initial_sectors);
3462 DEBUG_print(" metadata_run 0x%x\n", ic->metadata_run);
3463 DEBUG_print(" log2_metadata_run %d\n", ic->log2_metadata_run);
3464 DEBUG_print(" provided_data_sectors 0x%llx (%llu)\n", (unsigned long long)ic->provided_data_sectors,
3465 (unsigned long long)ic->provided_data_sectors);
3466 DEBUG_print(" log2_buffer_sectors %u\n", ic->log2_buffer_sectors);
3468 if (recalculate && !(ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))) {
3469 ic->sb->flags |= cpu_to_le32(SB_FLAG_RECALCULATING);
3470 ic->sb->recalc_sector = cpu_to_le64(0);
3473 if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING)) {
3474 if (!ic->internal_hash) {
3476 ti->error = "Recalculate is only valid with internal hash";
3479 ic->recalc_wq = alloc_workqueue("dm-integrity-recalc", WQ_MEM_RECLAIM, 1);
3480 if (!ic->recalc_wq ) {
3481 ti->error = "Cannot allocate workqueue";
3485 INIT_WORK(&ic->recalc_work, integrity_recalc);
3486 ic->recalc_buffer = vmalloc(RECALC_SECTORS << SECTOR_SHIFT);
3487 if (!ic->recalc_buffer) {
3488 ti->error = "Cannot allocate buffer for recalculating";
3492 ic->recalc_tags = kvmalloc_array(RECALC_SECTORS >> ic->sb->log2_sectors_per_block,
3493 ic->tag_size, GFP_KERNEL);
3494 if (!ic->recalc_tags) {
3495 ti->error = "Cannot allocate tags for recalculating";
3501 ic->bufio = dm_bufio_client_create(ic->meta_dev ? ic->meta_dev->bdev : ic->dev->bdev,
3502 1U << (SECTOR_SHIFT + ic->log2_buffer_sectors), 1, 0, NULL, NULL);
3503 if (IS_ERR(ic->bufio)) {
3504 r = PTR_ERR(ic->bufio);
3505 ti->error = "Cannot initialize dm-bufio";
3509 dm_bufio_set_sector_offset(ic->bufio, ic->start + ic->initial_sectors);
3511 if (ic->mode != 'R') {
3512 r = create_journal(ic, &ti->error);
3517 if (should_write_sb) {
3520 init_journal(ic, 0, ic->journal_sections, 0);
3521 r = dm_integrity_failed(ic);
3523 ti->error = "Error initializing journal";
3526 r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
3528 ti->error = "Error initializing superblock";
3531 ic->just_formatted = true;
3534 if (!ic->meta_dev) {
3535 r = dm_set_target_max_io_len(ti, 1U << ic->sb->log2_interleave_sectors);
3540 if (!ic->internal_hash)
3541 dm_integrity_set(ti, ic);
3543 ti->num_flush_bios = 1;
3544 ti->flush_supported = true;
3548 dm_integrity_dtr(ti);
3552 static void dm_integrity_dtr(struct dm_target *ti)
3554 struct dm_integrity_c *ic = ti->private;
3556 BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
3557 BUG_ON(!list_empty(&ic->wait_list));
3559 if (ic->metadata_wq)
3560 destroy_workqueue(ic->metadata_wq);
3562 destroy_workqueue(ic->wait_wq);
3564 destroy_workqueue(ic->commit_wq);
3566 destroy_workqueue(ic->writer_wq);
3568 destroy_workqueue(ic->recalc_wq);
3569 vfree(ic->recalc_buffer);
3570 kvfree(ic->recalc_tags);
3572 dm_bufio_client_destroy(ic->bufio);
3573 mempool_exit(&ic->journal_io_mempool);
3575 dm_io_client_destroy(ic->io);
3577 dm_put_device(ti, ic->dev);
3579 dm_put_device(ti, ic->meta_dev);
3580 dm_integrity_free_page_list(ic->journal);
3581 dm_integrity_free_page_list(ic->journal_io);
3582 dm_integrity_free_page_list(ic->journal_xor);
3583 if (ic->journal_scatterlist)
3584 dm_integrity_free_journal_scatterlist(ic, ic->journal_scatterlist);
3585 if (ic->journal_io_scatterlist)
3586 dm_integrity_free_journal_scatterlist(ic, ic->journal_io_scatterlist);
3587 if (ic->sk_requests) {
3590 for (i = 0; i < ic->journal_sections; i++) {
3591 struct skcipher_request *req = ic->sk_requests[i];
3594 skcipher_request_free(req);
3597 kvfree(ic->sk_requests);
3599 kvfree(ic->journal_tree);
3601 free_pages_exact(ic->sb, SB_SECTORS << SECTOR_SHIFT);
3603 if (ic->internal_hash)
3604 crypto_free_shash(ic->internal_hash);
3605 free_alg(&ic->internal_hash_alg);
3607 if (ic->journal_crypt)
3608 crypto_free_skcipher(ic->journal_crypt);
3609 free_alg(&ic->journal_crypt_alg);
3611 if (ic->journal_mac)
3612 crypto_free_shash(ic->journal_mac);
3613 free_alg(&ic->journal_mac_alg);
3618 static struct target_type integrity_target = {
3619 .name = "integrity",
3620 .version = {1, 2, 0},
3621 .module = THIS_MODULE,
3622 .features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
3623 .ctr = dm_integrity_ctr,
3624 .dtr = dm_integrity_dtr,
3625 .map = dm_integrity_map,
3626 .postsuspend = dm_integrity_postsuspend,
3627 .resume = dm_integrity_resume,
3628 .status = dm_integrity_status,
3629 .iterate_devices = dm_integrity_iterate_devices,
3630 .io_hints = dm_integrity_io_hints,
3633 static int __init dm_integrity_init(void)
3637 journal_io_cache = kmem_cache_create("integrity_journal_io",
3638 sizeof(struct journal_io), 0, 0, NULL);
3639 if (!journal_io_cache) {
3640 DMERR("can't allocate journal io cache");
3644 r = dm_register_target(&integrity_target);
3647 DMERR("register failed %d", r);
3652 static void __exit dm_integrity_exit(void)
3654 dm_unregister_target(&integrity_target);
3655 kmem_cache_destroy(journal_io_cache);
3658 module_init(dm_integrity_init);
3659 module_exit(dm_integrity_exit);
3661 MODULE_AUTHOR("Milan Broz");
3662 MODULE_AUTHOR("Mikulas Patocka");
3663 MODULE_DESCRIPTION(DM_NAME " target for integrity tags extension");
3664 MODULE_LICENSE("GPL");