1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2018 Red Hat. All rights reserved.
5 * This file is released under the GPL.
8 #include <linux/device-mapper.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/vmalloc.h>
12 #include <linux/kthread.h>
13 #include <linux/dm-io.h>
14 #include <linux/dm-kcopyd.h>
15 #include <linux/dax.h>
16 #include <linux/pfn_t.h>
17 #include <linux/libnvdimm.h>
19 #define DM_MSG_PREFIX "writecache"
21 #define HIGH_WATERMARK 50
22 #define LOW_WATERMARK 45
23 #define MAX_WRITEBACK_JOBS 0
24 #define ENDIO_LATENCY 16
25 #define WRITEBACK_LATENCY 64
26 #define AUTOCOMMIT_BLOCKS_SSD 65536
27 #define AUTOCOMMIT_BLOCKS_PMEM 64
28 #define AUTOCOMMIT_MSEC 1000
29 #define MAX_AGE_DIV 16
30 #define MAX_AGE_UNSPECIFIED -1UL
32 #define BITMAP_GRANULARITY 65536
33 #if BITMAP_GRANULARITY < PAGE_SIZE
34 #undef BITMAP_GRANULARITY
35 #define BITMAP_GRANULARITY PAGE_SIZE
38 #if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_DAX_DRIVER)
39 #define DM_WRITECACHE_HAS_PMEM
42 #ifdef DM_WRITECACHE_HAS_PMEM
43 #define pmem_assign(dest, src) \
45 typeof(dest) uniq = (src); \
46 memcpy_flushcache(&(dest), &uniq, sizeof(dest)); \
49 #define pmem_assign(dest, src) ((dest) = (src))
52 #if defined(__HAVE_ARCH_MEMCPY_MCSAFE) && defined(DM_WRITECACHE_HAS_PMEM)
53 #define DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
56 #define MEMORY_SUPERBLOCK_MAGIC 0x23489321
57 #define MEMORY_SUPERBLOCK_VERSION 1
59 struct wc_memory_entry {
60 __le64 original_sector;
64 struct wc_memory_superblock {
76 struct wc_memory_entry entries[0];
80 struct rb_node rb_node;
82 unsigned short wc_list_contiguous;
83 bool write_in_progress
84 #if BITS_PER_LONG == 64
89 #if BITS_PER_LONG == 64
94 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
95 uint64_t original_sector;
100 #ifdef DM_WRITECACHE_HAS_PMEM
101 #define WC_MODE_PMEM(wc) ((wc)->pmem_mode)
102 #define WC_MODE_FUA(wc) ((wc)->writeback_fua)
104 #define WC_MODE_PMEM(wc) false
105 #define WC_MODE_FUA(wc) false
107 #define WC_MODE_SORT_FREELIST(wc) (!WC_MODE_PMEM(wc))
109 struct dm_writecache {
111 struct list_head lru;
113 struct list_head freelist;
115 struct rb_root freetree;
116 struct wc_entry *current_free;
121 size_t freelist_size;
122 size_t writeback_size;
123 size_t freelist_high_watermark;
124 size_t freelist_low_watermark;
125 unsigned long max_age;
127 unsigned uncommitted_blocks;
128 unsigned autocommit_blocks;
129 unsigned max_writeback_jobs;
133 unsigned long autocommit_jiffies;
134 struct timer_list autocommit_timer;
135 struct wait_queue_head freelist_wait;
137 struct timer_list max_age_timer;
139 atomic_t bio_in_progress[2];
140 struct wait_queue_head bio_in_progress_wait[2];
142 struct dm_target *ti;
144 struct dm_dev *ssd_dev;
145 sector_t start_sector;
147 uint64_t memory_map_size;
148 size_t metadata_sectors;
152 struct wc_entry *entries;
154 unsigned char block_size_bits;
157 bool writeback_fua:1;
159 bool overwrote_committed:1;
160 bool memory_vmapped:1;
162 bool high_wm_percent_set:1;
163 bool low_wm_percent_set:1;
164 bool max_writeback_jobs_set:1;
165 bool autocommit_blocks_set:1;
166 bool autocommit_time_set:1;
167 bool writeback_fua_set:1;
168 bool flush_on_suspend:1;
171 unsigned writeback_all;
172 struct workqueue_struct *writeback_wq;
173 struct work_struct writeback_work;
174 struct work_struct flush_work;
176 struct dm_io_client *dm_io;
178 raw_spinlock_t endio_list_lock;
179 struct list_head endio_list;
180 struct task_struct *endio_thread;
182 struct task_struct *flush_thread;
183 struct bio_list flush_list;
185 struct dm_kcopyd_client *dm_kcopyd;
186 unsigned long *dirty_bitmap;
187 unsigned dirty_bitmap_size;
189 struct bio_set bio_set;
193 #define WB_LIST_INLINE 16
195 struct writeback_struct {
196 struct list_head endio_entry;
197 struct dm_writecache *wc;
198 struct wc_entry **wc_list;
200 struct wc_entry *wc_list_inline[WB_LIST_INLINE];
205 struct list_head endio_entry;
206 struct dm_writecache *wc;
212 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(dm_writecache_throttle,
213 "A percentage of time allocated for data copying");
215 static void wc_lock(struct dm_writecache *wc)
217 mutex_lock(&wc->lock);
220 static void wc_unlock(struct dm_writecache *wc)
222 mutex_unlock(&wc->lock);
225 #ifdef DM_WRITECACHE_HAS_PMEM
226 static int persistent_memory_claim(struct dm_writecache *wc)
236 wc->memory_vmapped = false;
238 s = wc->memory_map_size;
244 if (p != s >> PAGE_SHIFT) {
249 offset = get_start_sect(wc->ssd_dev->bdev);
250 if (offset & (PAGE_SIZE / 512 - 1)) {
254 offset >>= PAGE_SHIFT - 9;
256 id = dax_read_lock();
258 da = dax_direct_access(wc->ssd_dev->dax_dev, offset, p, &wc->memory_map, &pfn);
260 wc->memory_map = NULL;
264 if (!pfn_t_has_page(pfn)) {
265 wc->memory_map = NULL;
271 wc->memory_map = NULL;
272 pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
280 daa = dax_direct_access(wc->ssd_dev->dax_dev, offset + i, p - i,
283 r = daa ? daa : -EINVAL;
286 if (!pfn_t_has_page(pfn)) {
290 while (daa-- && i < p) {
291 pages[i++] = pfn_t_to_page(pfn);
297 wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL);
298 if (!wc->memory_map) {
303 wc->memory_vmapped = true;
308 wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT;
309 wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT;
320 static int persistent_memory_claim(struct dm_writecache *wc)
326 static void persistent_memory_release(struct dm_writecache *wc)
328 if (wc->memory_vmapped)
329 vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT));
332 static struct page *persistent_memory_page(void *addr)
334 if (is_vmalloc_addr(addr))
335 return vmalloc_to_page(addr);
337 return virt_to_page(addr);
340 static unsigned persistent_memory_page_offset(void *addr)
342 return (unsigned long)addr & (PAGE_SIZE - 1);
345 static void persistent_memory_flush_cache(void *ptr, size_t size)
347 if (is_vmalloc_addr(ptr))
348 flush_kernel_vmap_range(ptr, size);
351 static void persistent_memory_invalidate_cache(void *ptr, size_t size)
353 if (is_vmalloc_addr(ptr))
354 invalidate_kernel_vmap_range(ptr, size);
357 static struct wc_memory_superblock *sb(struct dm_writecache *wc)
359 return wc->memory_map;
362 static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e)
364 return &sb(wc)->entries[e->index];
367 static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
369 return (char *)wc->block_start + (e->index << wc->block_size_bits);
372 static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e)
374 return wc->start_sector + wc->metadata_sectors +
375 ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT));
378 static uint64_t read_original_sector(struct dm_writecache *wc, struct wc_entry *e)
380 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
381 return e->original_sector;
383 return le64_to_cpu(memory_entry(wc, e)->original_sector);
387 static uint64_t read_seq_count(struct dm_writecache *wc, struct wc_entry *e)
389 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
392 return le64_to_cpu(memory_entry(wc, e)->seq_count);
396 static void clear_seq_count(struct dm_writecache *wc, struct wc_entry *e)
398 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
401 pmem_assign(memory_entry(wc, e)->seq_count, cpu_to_le64(-1));
404 static void write_original_sector_seq_count(struct dm_writecache *wc, struct wc_entry *e,
405 uint64_t original_sector, uint64_t seq_count)
407 struct wc_memory_entry me;
408 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
409 e->original_sector = original_sector;
410 e->seq_count = seq_count;
412 me.original_sector = cpu_to_le64(original_sector);
413 me.seq_count = cpu_to_le64(seq_count);
414 pmem_assign(*memory_entry(wc, e), me);
417 #define writecache_error(wc, err, msg, arg...) \
419 if (!cmpxchg(&(wc)->error, 0, err)) \
421 wake_up(&(wc)->freelist_wait); \
424 #define writecache_has_error(wc) (unlikely(READ_ONCE((wc)->error)))
426 static void writecache_flush_all_metadata(struct dm_writecache *wc)
428 if (!WC_MODE_PMEM(wc))
429 memset(wc->dirty_bitmap, -1, wc->dirty_bitmap_size);
432 static void writecache_flush_region(struct dm_writecache *wc, void *ptr, size_t size)
434 if (!WC_MODE_PMEM(wc))
435 __set_bit(((char *)ptr - (char *)wc->memory_map) / BITMAP_GRANULARITY,
439 static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev);
442 struct dm_writecache *wc;
447 static void writecache_notify_io(unsigned long error, void *context)
449 struct io_notify *endio = context;
451 if (unlikely(error != 0))
452 writecache_error(endio->wc, -EIO, "error writing metadata");
453 BUG_ON(atomic_read(&endio->count) <= 0);
454 if (atomic_dec_and_test(&endio->count))
458 static void writecache_wait_for_ios(struct dm_writecache *wc, int direction)
460 wait_event(wc->bio_in_progress_wait[direction],
461 !atomic_read(&wc->bio_in_progress[direction]));
464 static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
466 struct dm_io_region region;
467 struct dm_io_request req;
468 struct io_notify endio = {
470 COMPLETION_INITIALIZER_ONSTACK(endio.c),
473 unsigned bitmap_bits = wc->dirty_bitmap_size * 8;
478 i = find_next_bit(wc->dirty_bitmap, bitmap_bits, i);
479 if (unlikely(i == bitmap_bits))
481 j = find_next_zero_bit(wc->dirty_bitmap, bitmap_bits, i);
483 region.bdev = wc->ssd_dev->bdev;
484 region.sector = (sector_t)i * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
485 region.count = (sector_t)(j - i) * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
487 if (unlikely(region.sector >= wc->metadata_sectors))
489 if (unlikely(region.sector + region.count > wc->metadata_sectors))
490 region.count = wc->metadata_sectors - region.sector;
492 region.sector += wc->start_sector;
493 atomic_inc(&endio.count);
494 req.bi_op = REQ_OP_WRITE;
495 req.bi_op_flags = REQ_SYNC;
496 req.mem.type = DM_IO_VMA;
497 req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY;
498 req.client = wc->dm_io;
499 req.notify.fn = writecache_notify_io;
500 req.notify.context = &endio;
502 /* writing via async dm-io (implied by notify.fn above) won't return an error */
503 (void) dm_io(&req, 1, ®ion, NULL);
507 writecache_notify_io(0, &endio);
508 wait_for_completion_io(&endio.c);
511 writecache_wait_for_ios(wc, WRITE);
513 writecache_disk_flush(wc, wc->ssd_dev);
515 memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size);
518 static void ssd_commit_superblock(struct dm_writecache *wc)
521 struct dm_io_region region;
522 struct dm_io_request req;
524 region.bdev = wc->ssd_dev->bdev;
526 region.count = PAGE_SIZE;
528 if (unlikely(region.sector + region.count > wc->metadata_sectors))
529 region.count = wc->metadata_sectors - region.sector;
531 region.sector += wc->start_sector;
533 req.bi_op = REQ_OP_WRITE;
534 req.bi_op_flags = REQ_SYNC | REQ_FUA;
535 req.mem.type = DM_IO_VMA;
536 req.mem.ptr.vma = (char *)wc->memory_map;
537 req.client = wc->dm_io;
538 req.notify.fn = NULL;
539 req.notify.context = NULL;
541 r = dm_io(&req, 1, ®ion, NULL);
543 writecache_error(wc, r, "error writing superblock");
546 static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
548 if (WC_MODE_PMEM(wc))
551 ssd_commit_flushed(wc, wait_for_ios);
554 static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
557 struct dm_io_region region;
558 struct dm_io_request req;
560 region.bdev = dev->bdev;
563 req.bi_op = REQ_OP_WRITE;
564 req.bi_op_flags = REQ_PREFLUSH;
565 req.mem.type = DM_IO_KMEM;
566 req.mem.ptr.addr = NULL;
567 req.client = wc->dm_io;
568 req.notify.fn = NULL;
570 r = dm_io(&req, 1, ®ion, NULL);
572 writecache_error(wc, r, "error flushing metadata: %d", r);
575 #define WFE_RETURN_FOLLOWING 1
576 #define WFE_LOWEST_SEQ 2
578 static struct wc_entry *writecache_find_entry(struct dm_writecache *wc,
579 uint64_t block, int flags)
582 struct rb_node *node = wc->tree.rb_node;
588 e = container_of(node, struct wc_entry, rb_node);
589 if (read_original_sector(wc, e) == block)
592 node = (read_original_sector(wc, e) >= block ?
593 e->rb_node.rb_left : e->rb_node.rb_right);
594 if (unlikely(!node)) {
595 if (!(flags & WFE_RETURN_FOLLOWING))
597 if (read_original_sector(wc, e) >= block) {
600 node = rb_next(&e->rb_node);
603 e = container_of(node, struct wc_entry, rb_node);
611 if (flags & WFE_LOWEST_SEQ)
612 node = rb_prev(&e->rb_node);
614 node = rb_next(&e->rb_node);
617 e2 = container_of(node, struct wc_entry, rb_node);
618 if (read_original_sector(wc, e2) != block)
624 static void writecache_insert_entry(struct dm_writecache *wc, struct wc_entry *ins)
627 struct rb_node **node = &wc->tree.rb_node, *parent = NULL;
630 e = container_of(*node, struct wc_entry, rb_node);
631 parent = &e->rb_node;
632 if (read_original_sector(wc, e) > read_original_sector(wc, ins))
633 node = &parent->rb_left;
635 node = &parent->rb_right;
637 rb_link_node(&ins->rb_node, parent, node);
638 rb_insert_color(&ins->rb_node, &wc->tree);
639 list_add(&ins->lru, &wc->lru);
643 static void writecache_unlink(struct dm_writecache *wc, struct wc_entry *e)
646 rb_erase(&e->rb_node, &wc->tree);
649 static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry *e)
651 if (WC_MODE_SORT_FREELIST(wc)) {
652 struct rb_node **node = &wc->freetree.rb_node, *parent = NULL;
653 if (unlikely(!*node))
654 wc->current_free = e;
657 if (&e->rb_node < *node)
658 node = &parent->rb_left;
660 node = &parent->rb_right;
662 rb_link_node(&e->rb_node, parent, node);
663 rb_insert_color(&e->rb_node, &wc->freetree);
665 list_add_tail(&e->lru, &wc->freelist);
670 static inline void writecache_verify_watermark(struct dm_writecache *wc)
672 if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
673 queue_work(wc->writeback_wq, &wc->writeback_work);
676 static void writecache_max_age_timer(struct timer_list *t)
678 struct dm_writecache *wc = from_timer(wc, t, max_age_timer);
680 if (!dm_suspended(wc->ti) && !writecache_has_error(wc)) {
681 queue_work(wc->writeback_wq, &wc->writeback_work);
682 mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV);
686 static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, sector_t expected_sector)
690 if (WC_MODE_SORT_FREELIST(wc)) {
691 struct rb_node *next;
692 if (unlikely(!wc->current_free))
694 e = wc->current_free;
695 if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector))
697 next = rb_next(&e->rb_node);
698 rb_erase(&e->rb_node, &wc->freetree);
700 next = rb_first(&wc->freetree);
701 wc->current_free = next ? container_of(next, struct wc_entry, rb_node) : NULL;
703 if (unlikely(list_empty(&wc->freelist)))
705 e = container_of(wc->freelist.next, struct wc_entry, lru);
706 if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector))
712 writecache_verify_watermark(wc);
717 static void writecache_free_entry(struct dm_writecache *wc, struct wc_entry *e)
719 writecache_unlink(wc, e);
720 writecache_add_to_freelist(wc, e);
721 clear_seq_count(wc, e);
722 writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
723 if (unlikely(waitqueue_active(&wc->freelist_wait)))
724 wake_up(&wc->freelist_wait);
727 static void writecache_wait_on_freelist(struct dm_writecache *wc)
731 prepare_to_wait(&wc->freelist_wait, &wait, TASK_UNINTERRUPTIBLE);
734 finish_wait(&wc->freelist_wait, &wait);
738 static void writecache_poison_lists(struct dm_writecache *wc)
741 * Catch incorrect access to these values while the device is suspended.
743 memset(&wc->tree, -1, sizeof wc->tree);
744 wc->lru.next = LIST_POISON1;
745 wc->lru.prev = LIST_POISON2;
746 wc->freelist.next = LIST_POISON1;
747 wc->freelist.prev = LIST_POISON2;
750 static void writecache_flush_entry(struct dm_writecache *wc, struct wc_entry *e)
752 writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
753 if (WC_MODE_PMEM(wc))
754 writecache_flush_region(wc, memory_data(wc, e), wc->block_size);
757 static bool writecache_entry_is_committed(struct dm_writecache *wc, struct wc_entry *e)
759 return read_seq_count(wc, e) < wc->seq_count;
762 static void writecache_flush(struct dm_writecache *wc)
764 struct wc_entry *e, *e2;
765 bool need_flush_after_free;
767 wc->uncommitted_blocks = 0;
768 del_timer(&wc->autocommit_timer);
770 if (list_empty(&wc->lru))
773 e = container_of(wc->lru.next, struct wc_entry, lru);
774 if (writecache_entry_is_committed(wc, e)) {
775 if (wc->overwrote_committed) {
776 writecache_wait_for_ios(wc, WRITE);
777 writecache_disk_flush(wc, wc->ssd_dev);
778 wc->overwrote_committed = false;
783 writecache_flush_entry(wc, e);
784 if (unlikely(e->lru.next == &wc->lru))
786 e2 = container_of(e->lru.next, struct wc_entry, lru);
787 if (writecache_entry_is_committed(wc, e2))
792 writecache_commit_flushed(wc, true);
795 pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count));
796 if (WC_MODE_PMEM(wc))
797 writecache_commit_flushed(wc, false);
799 ssd_commit_superblock(wc);
801 wc->overwrote_committed = false;
803 need_flush_after_free = false;
805 /* Free another committed entry with lower seq-count */
806 struct rb_node *rb_node = rb_prev(&e->rb_node);
809 e2 = container_of(rb_node, struct wc_entry, rb_node);
810 if (read_original_sector(wc, e2) == read_original_sector(wc, e) &&
811 likely(!e2->write_in_progress)) {
812 writecache_free_entry(wc, e2);
813 need_flush_after_free = true;
816 if (unlikely(e->lru.prev == &wc->lru))
818 e = container_of(e->lru.prev, struct wc_entry, lru);
822 if (need_flush_after_free)
823 writecache_commit_flushed(wc, false);
826 static void writecache_flush_work(struct work_struct *work)
828 struct dm_writecache *wc = container_of(work, struct dm_writecache, flush_work);
831 writecache_flush(wc);
835 static void writecache_autocommit_timer(struct timer_list *t)
837 struct dm_writecache *wc = from_timer(wc, t, autocommit_timer);
838 if (!writecache_has_error(wc))
839 queue_work(wc->writeback_wq, &wc->flush_work);
842 static void writecache_schedule_autocommit(struct dm_writecache *wc)
844 if (!timer_pending(&wc->autocommit_timer))
845 mod_timer(&wc->autocommit_timer, jiffies + wc->autocommit_jiffies);
848 static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_t end)
851 bool discarded_something = false;
853 e = writecache_find_entry(wc, start, WFE_RETURN_FOLLOWING | WFE_LOWEST_SEQ);
857 while (read_original_sector(wc, e) < end) {
858 struct rb_node *node = rb_next(&e->rb_node);
860 if (likely(!e->write_in_progress)) {
861 if (!discarded_something) {
862 if (!WC_MODE_PMEM(wc)) {
863 writecache_wait_for_ios(wc, READ);
864 writecache_wait_for_ios(wc, WRITE);
866 discarded_something = true;
868 if (!writecache_entry_is_committed(wc, e))
869 wc->uncommitted_blocks--;
870 writecache_free_entry(wc, e);
876 e = container_of(node, struct wc_entry, rb_node);
879 if (discarded_something)
880 writecache_commit_flushed(wc, false);
883 static bool writecache_wait_for_writeback(struct dm_writecache *wc)
885 if (wc->writeback_size) {
886 writecache_wait_on_freelist(wc);
892 static void writecache_suspend(struct dm_target *ti)
894 struct dm_writecache *wc = ti->private;
895 bool flush_on_suspend;
897 del_timer_sync(&wc->autocommit_timer);
898 del_timer_sync(&wc->max_age_timer);
901 writecache_flush(wc);
902 flush_on_suspend = wc->flush_on_suspend;
903 if (flush_on_suspend) {
904 wc->flush_on_suspend = false;
906 queue_work(wc->writeback_wq, &wc->writeback_work);
910 drain_workqueue(wc->writeback_wq);
913 if (flush_on_suspend)
915 while (writecache_wait_for_writeback(wc));
917 if (WC_MODE_PMEM(wc))
918 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
920 writecache_poison_lists(wc);
925 static int writecache_alloc_entries(struct dm_writecache *wc)
931 wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
934 for (b = 0; b < wc->n_blocks; b++) {
935 struct wc_entry *e = &wc->entries[b];
937 e->write_in_progress = false;
944 static int writecache_read_metadata(struct dm_writecache *wc, sector_t n_sectors)
946 struct dm_io_region region;
947 struct dm_io_request req;
949 region.bdev = wc->ssd_dev->bdev;
950 region.sector = wc->start_sector;
951 region.count = n_sectors;
952 req.bi_op = REQ_OP_READ;
953 req.bi_op_flags = REQ_SYNC;
954 req.mem.type = DM_IO_VMA;
955 req.mem.ptr.vma = (char *)wc->memory_map;
956 req.client = wc->dm_io;
957 req.notify.fn = NULL;
959 return dm_io(&req, 1, ®ion, NULL);
962 static void writecache_resume(struct dm_target *ti)
964 struct dm_writecache *wc = ti->private;
966 bool need_flush = false;
972 if (WC_MODE_PMEM(wc)) {
973 persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
975 r = writecache_read_metadata(wc, wc->metadata_sectors);
977 size_t sb_entries_offset;
978 writecache_error(wc, r, "unable to read metadata: %d", r);
979 sb_entries_offset = offsetof(struct wc_memory_superblock, entries);
980 memset((char *)wc->memory_map + sb_entries_offset, -1,
981 (wc->metadata_sectors << SECTOR_SHIFT) - sb_entries_offset);
986 INIT_LIST_HEAD(&wc->lru);
987 if (WC_MODE_SORT_FREELIST(wc)) {
988 wc->freetree = RB_ROOT;
989 wc->current_free = NULL;
991 INIT_LIST_HEAD(&wc->freelist);
993 wc->freelist_size = 0;
995 r = memcpy_mcsafe(&sb_seq_count, &sb(wc)->seq_count, sizeof(uint64_t));
997 writecache_error(wc, r, "hardware memory error when reading superblock: %d", r);
998 sb_seq_count = cpu_to_le64(0);
1000 wc->seq_count = le64_to_cpu(sb_seq_count);
1002 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
1003 for (b = 0; b < wc->n_blocks; b++) {
1004 struct wc_entry *e = &wc->entries[b];
1005 struct wc_memory_entry wme;
1006 if (writecache_has_error(wc)) {
1007 e->original_sector = -1;
1011 r = memcpy_mcsafe(&wme, memory_entry(wc, e), sizeof(struct wc_memory_entry));
1013 writecache_error(wc, r, "hardware memory error when reading metadata entry %lu: %d",
1014 (unsigned long)b, r);
1015 e->original_sector = -1;
1018 e->original_sector = le64_to_cpu(wme.original_sector);
1019 e->seq_count = le64_to_cpu(wme.seq_count);
1024 for (b = 0; b < wc->n_blocks; b++) {
1025 struct wc_entry *e = &wc->entries[b];
1026 if (!writecache_entry_is_committed(wc, e)) {
1027 if (read_seq_count(wc, e) != -1) {
1029 clear_seq_count(wc, e);
1032 writecache_add_to_freelist(wc, e);
1034 struct wc_entry *old;
1036 old = writecache_find_entry(wc, read_original_sector(wc, e), 0);
1038 writecache_insert_entry(wc, e);
1040 if (read_seq_count(wc, old) == read_seq_count(wc, e)) {
1041 writecache_error(wc, -EINVAL,
1042 "two identical entries, position %llu, sector %llu, sequence %llu",
1043 (unsigned long long)b, (unsigned long long)read_original_sector(wc, e),
1044 (unsigned long long)read_seq_count(wc, e));
1046 if (read_seq_count(wc, old) > read_seq_count(wc, e)) {
1049 writecache_free_entry(wc, old);
1050 writecache_insert_entry(wc, e);
1059 writecache_flush_all_metadata(wc);
1060 writecache_commit_flushed(wc, false);
1063 writecache_verify_watermark(wc);
1065 if (wc->max_age != MAX_AGE_UNSPECIFIED)
1066 mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV);
1071 static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1077 if (dm_suspended(wc->ti)) {
1081 if (writecache_has_error(wc)) {
1086 writecache_flush(wc);
1087 wc->writeback_all++;
1088 queue_work(wc->writeback_wq, &wc->writeback_work);
1091 flush_workqueue(wc->writeback_wq);
1094 wc->writeback_all--;
1095 if (writecache_has_error(wc)) {
1104 static int process_flush_on_suspend_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1110 wc->flush_on_suspend = true;
1116 static void activate_cleaner(struct dm_writecache *wc)
1118 wc->flush_on_suspend = true;
1120 wc->freelist_high_watermark = wc->n_blocks;
1121 wc->freelist_low_watermark = wc->n_blocks;
1124 static int process_cleaner_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1130 activate_cleaner(wc);
1131 if (!dm_suspended(wc->ti))
1132 writecache_verify_watermark(wc);
1138 static int writecache_message(struct dm_target *ti, unsigned argc, char **argv,
1139 char *result, unsigned maxlen)
1142 struct dm_writecache *wc = ti->private;
1144 if (!strcasecmp(argv[0], "flush"))
1145 r = process_flush_mesg(argc, argv, wc);
1146 else if (!strcasecmp(argv[0], "flush_on_suspend"))
1147 r = process_flush_on_suspend_mesg(argc, argv, wc);
1148 else if (!strcasecmp(argv[0], "cleaner"))
1149 r = process_cleaner_mesg(argc, argv, wc);
1151 DMERR("unrecognised message received: %s", argv[0]);
1156 static void memcpy_flushcache_optimized(void *dest, void *source, size_t size)
1159 * clflushopt performs better with block size 1024, 2048, 4096
1160 * non-temporal stores perform better with block size 512
1162 * block size 512 1024 2048 4096
1163 * movnti 496 MB/s 642 MB/s 725 MB/s 744 MB/s
1164 * clflushopt 373 MB/s 688 MB/s 1.1 GB/s 1.2 GB/s
1166 * We see that movnti performs better for 512-byte blocks, and
1167 * clflushopt performs better for 1024-byte and larger blocks. So, we
1168 * prefer clflushopt for sizes >= 768.
1170 * NOTE: this happens to be the case now (with dm-writecache's single
1171 * threaded model) but re-evaluate this once memcpy_flushcache() is
1172 * enabled to use movdir64b which might invalidate this performance
1173 * advantage seen with cache-allocating-writes plus flushing.
1176 if (static_cpu_has(X86_FEATURE_CLFLUSHOPT) &&
1177 likely(boot_cpu_data.x86_clflush_size == 64) &&
1178 likely(size >= 768)) {
1180 memcpy((void *)dest, (void *)source, 64);
1181 clflushopt((void *)dest);
1185 } while (size >= 64);
1189 memcpy_flushcache(dest, source, size);
1192 static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
1195 unsigned long flags;
1197 int rw = bio_data_dir(bio);
1198 unsigned remaining_size = wc->block_size;
1201 struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
1202 buf = bvec_kmap_irq(&bv, &flags);
1204 if (unlikely(size > remaining_size))
1205 size = remaining_size;
1209 r = memcpy_mcsafe(buf, data, size);
1210 flush_dcache_page(bio_page(bio));
1212 writecache_error(wc, r, "hardware memory error when reading data: %d", r);
1213 bio->bi_status = BLK_STS_IOERR;
1216 flush_dcache_page(bio_page(bio));
1217 memcpy_flushcache_optimized(data, buf, size);
1220 bvec_kunmap_irq(buf, &flags);
1222 data = (char *)data + size;
1223 remaining_size -= size;
1224 bio_advance(bio, size);
1225 } while (unlikely(remaining_size));
1228 static int writecache_flush_thread(void *data)
1230 struct dm_writecache *wc = data;
1236 bio = bio_list_pop(&wc->flush_list);
1238 set_current_state(TASK_INTERRUPTIBLE);
1241 if (unlikely(kthread_should_stop())) {
1242 set_current_state(TASK_RUNNING);
1250 if (bio_op(bio) == REQ_OP_DISCARD) {
1251 writecache_discard(wc, bio->bi_iter.bi_sector,
1252 bio_end_sector(bio));
1254 bio_set_dev(bio, wc->dev->bdev);
1255 submit_bio_noacct(bio);
1257 writecache_flush(wc);
1259 if (writecache_has_error(wc))
1260 bio->bi_status = BLK_STS_IOERR;
1268 static void writecache_offload_bio(struct dm_writecache *wc, struct bio *bio)
1270 if (bio_list_empty(&wc->flush_list))
1271 wake_up_process(wc->flush_thread);
1272 bio_list_add(&wc->flush_list, bio);
1275 static int writecache_map(struct dm_target *ti, struct bio *bio)
1278 struct dm_writecache *wc = ti->private;
1280 bio->bi_private = NULL;
1284 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1285 if (writecache_has_error(wc))
1287 if (WC_MODE_PMEM(wc)) {
1288 writecache_flush(wc);
1289 if (writecache_has_error(wc))
1293 writecache_offload_bio(wc, bio);
1298 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1300 if (unlikely((((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
1301 (wc->block_size / 512 - 1)) != 0)) {
1302 DMERR("I/O is not aligned, sector %llu, size %u, block size %u",
1303 (unsigned long long)bio->bi_iter.bi_sector,
1304 bio->bi_iter.bi_size, wc->block_size);
1308 if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
1309 if (writecache_has_error(wc))
1311 if (WC_MODE_PMEM(wc)) {
1312 writecache_discard(wc, bio->bi_iter.bi_sector, bio_end_sector(bio));
1313 goto unlock_remap_origin;
1315 writecache_offload_bio(wc, bio);
1320 if (bio_data_dir(bio) == READ) {
1322 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1323 if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) {
1324 if (WC_MODE_PMEM(wc)) {
1325 bio_copy_block(wc, bio, memory_data(wc, e));
1326 if (bio->bi_iter.bi_size)
1327 goto read_next_block;
1330 dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
1331 bio_set_dev(bio, wc->ssd_dev->bdev);
1332 bio->bi_iter.bi_sector = cache_sector(wc, e);
1333 if (!writecache_entry_is_committed(wc, e))
1334 writecache_wait_for_ios(wc, WRITE);
1339 sector_t next_boundary =
1340 read_original_sector(wc, e) - bio->bi_iter.bi_sector;
1341 if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
1342 dm_accept_partial_bio(bio, next_boundary);
1345 goto unlock_remap_origin;
1349 bool found_entry = false;
1350 if (writecache_has_error(wc))
1352 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
1354 if (!writecache_entry_is_committed(wc, e))
1356 if (!WC_MODE_PMEM(wc) && !e->write_in_progress) {
1357 wc->overwrote_committed = true;
1362 if (unlikely(wc->cleaner))
1365 e = writecache_pop_from_freelist(wc, (sector_t)-1);
1369 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1371 sector_t next_boundary = read_original_sector(wc, e) - bio->bi_iter.bi_sector;
1372 BUG_ON(!next_boundary);
1373 if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
1374 dm_accept_partial_bio(bio, next_boundary);
1377 goto unlock_remap_origin;
1379 writecache_wait_on_freelist(wc);
1382 write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count);
1383 writecache_insert_entry(wc, e);
1384 wc->uncommitted_blocks++;
1386 if (WC_MODE_PMEM(wc)) {
1387 bio_copy_block(wc, bio, memory_data(wc, e));
1389 unsigned bio_size = wc->block_size;
1390 sector_t start_cache_sec = cache_sector(wc, e);
1391 sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
1393 while (bio_size < bio->bi_iter.bi_size) {
1394 struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
1397 write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
1398 (bio_size >> SECTOR_SHIFT), wc->seq_count);
1399 writecache_insert_entry(wc, f);
1400 wc->uncommitted_blocks++;
1401 bio_size += wc->block_size;
1402 current_cache_sec += wc->block_size >> SECTOR_SHIFT;
1405 bio_set_dev(bio, wc->ssd_dev->bdev);
1406 bio->bi_iter.bi_sector = start_cache_sec;
1407 dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT);
1409 if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) {
1410 wc->uncommitted_blocks = 0;
1411 queue_work(wc->writeback_wq, &wc->flush_work);
1413 writecache_schedule_autocommit(wc);
1417 } while (bio->bi_iter.bi_size);
1419 if (unlikely(bio->bi_opf & REQ_FUA ||
1420 wc->uncommitted_blocks >= wc->autocommit_blocks))
1421 writecache_flush(wc);
1423 writecache_schedule_autocommit(wc);
1427 unlock_remap_origin:
1428 bio_set_dev(bio, wc->dev->bdev);
1430 return DM_MAPIO_REMAPPED;
1433 /* make sure that writecache_end_io decrements bio_in_progress: */
1434 bio->bi_private = (void *)1;
1435 atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]);
1437 return DM_MAPIO_REMAPPED;
1442 return DM_MAPIO_SUBMITTED;
1446 return DM_MAPIO_SUBMITTED;
1451 return DM_MAPIO_SUBMITTED;
1454 static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)
1456 struct dm_writecache *wc = ti->private;
1458 if (bio->bi_private != NULL) {
1459 int dir = bio_data_dir(bio);
1460 if (atomic_dec_and_test(&wc->bio_in_progress[dir]))
1461 if (unlikely(waitqueue_active(&wc->bio_in_progress_wait[dir])))
1462 wake_up(&wc->bio_in_progress_wait[dir]);
1467 static int writecache_iterate_devices(struct dm_target *ti,
1468 iterate_devices_callout_fn fn, void *data)
1470 struct dm_writecache *wc = ti->private;
1472 return fn(ti, wc->dev, 0, ti->len, data);
1475 static void writecache_io_hints(struct dm_target *ti, struct queue_limits *limits)
1477 struct dm_writecache *wc = ti->private;
1479 if (limits->logical_block_size < wc->block_size)
1480 limits->logical_block_size = wc->block_size;
1482 if (limits->physical_block_size < wc->block_size)
1483 limits->physical_block_size = wc->block_size;
1485 if (limits->io_min < wc->block_size)
1486 limits->io_min = wc->block_size;
1490 static void writecache_writeback_endio(struct bio *bio)
1492 struct writeback_struct *wb = container_of(bio, struct writeback_struct, bio);
1493 struct dm_writecache *wc = wb->wc;
1494 unsigned long flags;
1496 raw_spin_lock_irqsave(&wc->endio_list_lock, flags);
1497 if (unlikely(list_empty(&wc->endio_list)))
1498 wake_up_process(wc->endio_thread);
1499 list_add_tail(&wb->endio_entry, &wc->endio_list);
1500 raw_spin_unlock_irqrestore(&wc->endio_list_lock, flags);
1503 static void writecache_copy_endio(int read_err, unsigned long write_err, void *ptr)
1505 struct copy_struct *c = ptr;
1506 struct dm_writecache *wc = c->wc;
1508 c->error = likely(!(read_err | write_err)) ? 0 : -EIO;
1510 raw_spin_lock_irq(&wc->endio_list_lock);
1511 if (unlikely(list_empty(&wc->endio_list)))
1512 wake_up_process(wc->endio_thread);
1513 list_add_tail(&c->endio_entry, &wc->endio_list);
1514 raw_spin_unlock_irq(&wc->endio_list_lock);
1517 static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *list)
1520 struct writeback_struct *wb;
1522 unsigned long n_walked = 0;
1525 wb = list_entry(list->next, struct writeback_struct, endio_entry);
1526 list_del(&wb->endio_entry);
1528 if (unlikely(wb->bio.bi_status != BLK_STS_OK))
1529 writecache_error(wc, blk_status_to_errno(wb->bio.bi_status),
1530 "write error %d", wb->bio.bi_status);
1534 BUG_ON(!e->write_in_progress);
1535 e->write_in_progress = false;
1536 INIT_LIST_HEAD(&e->lru);
1537 if (!writecache_has_error(wc))
1538 writecache_free_entry(wc, e);
1539 BUG_ON(!wc->writeback_size);
1540 wc->writeback_size--;
1542 if (unlikely(n_walked >= ENDIO_LATENCY)) {
1543 writecache_commit_flushed(wc, false);
1548 } while (++i < wb->wc_list_n);
1550 if (wb->wc_list != wb->wc_list_inline)
1553 } while (!list_empty(list));
1556 static void __writecache_endio_ssd(struct dm_writecache *wc, struct list_head *list)
1558 struct copy_struct *c;
1562 c = list_entry(list->next, struct copy_struct, endio_entry);
1563 list_del(&c->endio_entry);
1565 if (unlikely(c->error))
1566 writecache_error(wc, c->error, "copy error");
1570 BUG_ON(!e->write_in_progress);
1571 e->write_in_progress = false;
1572 INIT_LIST_HEAD(&e->lru);
1573 if (!writecache_has_error(wc))
1574 writecache_free_entry(wc, e);
1576 BUG_ON(!wc->writeback_size);
1577 wc->writeback_size--;
1579 } while (--c->n_entries);
1580 mempool_free(c, &wc->copy_pool);
1581 } while (!list_empty(list));
1584 static int writecache_endio_thread(void *data)
1586 struct dm_writecache *wc = data;
1589 struct list_head list;
1591 raw_spin_lock_irq(&wc->endio_list_lock);
1592 if (!list_empty(&wc->endio_list))
1594 set_current_state(TASK_INTERRUPTIBLE);
1595 raw_spin_unlock_irq(&wc->endio_list_lock);
1597 if (unlikely(kthread_should_stop())) {
1598 set_current_state(TASK_RUNNING);
1607 list = wc->endio_list;
1608 list.next->prev = list.prev->next = &list;
1609 INIT_LIST_HEAD(&wc->endio_list);
1610 raw_spin_unlock_irq(&wc->endio_list_lock);
1612 if (!WC_MODE_FUA(wc))
1613 writecache_disk_flush(wc, wc->dev);
1617 if (WC_MODE_PMEM(wc)) {
1618 __writecache_endio_pmem(wc, &list);
1620 __writecache_endio_ssd(wc, &list);
1621 writecache_wait_for_ios(wc, READ);
1624 writecache_commit_flushed(wc, false);
1632 static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e, gfp_t gfp)
1634 struct dm_writecache *wc = wb->wc;
1635 unsigned block_size = wc->block_size;
1636 void *address = memory_data(wc, e);
1638 persistent_memory_flush_cache(address, block_size);
1639 return bio_add_page(&wb->bio, persistent_memory_page(address),
1640 block_size, persistent_memory_page_offset(address)) != 0;
1643 struct writeback_list {
1644 struct list_head list;
1648 static void __writeback_throttle(struct dm_writecache *wc, struct writeback_list *wbl)
1650 if (unlikely(wc->max_writeback_jobs)) {
1651 if (READ_ONCE(wc->writeback_size) - wbl->size >= wc->max_writeback_jobs) {
1653 while (wc->writeback_size - wbl->size >= wc->max_writeback_jobs)
1654 writecache_wait_on_freelist(wc);
1661 static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeback_list *wbl)
1663 struct wc_entry *e, *f;
1665 struct writeback_struct *wb;
1670 e = container_of(wbl->list.prev, struct wc_entry, lru);
1673 max_pages = e->wc_list_contiguous;
1675 bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set);
1676 wb = container_of(bio, struct writeback_struct, bio);
1678 bio->bi_end_io = writecache_writeback_endio;
1679 bio_set_dev(bio, wc->dev->bdev);
1680 bio->bi_iter.bi_sector = read_original_sector(wc, e);
1681 if (max_pages <= WB_LIST_INLINE ||
1682 unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
1683 GFP_NOIO | __GFP_NORETRY |
1684 __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
1685 wb->wc_list = wb->wc_list_inline;
1686 max_pages = WB_LIST_INLINE;
1689 BUG_ON(!wc_add_block(wb, e, GFP_NOIO));
1694 while (wbl->size && wb->wc_list_n < max_pages) {
1695 f = container_of(wbl->list.prev, struct wc_entry, lru);
1696 if (read_original_sector(wc, f) !=
1697 read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
1699 if (!wc_add_block(wb, f, GFP_NOWAIT | __GFP_NOWARN))
1703 wb->wc_list[wb->wc_list_n++] = f;
1706 bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA);
1707 if (writecache_has_error(wc)) {
1708 bio->bi_status = BLK_STS_IOERR;
1714 __writeback_throttle(wc, wbl);
1718 static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writeback_list *wbl)
1720 struct wc_entry *e, *f;
1721 struct dm_io_region from, to;
1722 struct copy_struct *c;
1728 e = container_of(wbl->list.prev, struct wc_entry, lru);
1731 n_sectors = e->wc_list_contiguous << (wc->block_size_bits - SECTOR_SHIFT);
1733 from.bdev = wc->ssd_dev->bdev;
1734 from.sector = cache_sector(wc, e);
1735 from.count = n_sectors;
1736 to.bdev = wc->dev->bdev;
1737 to.sector = read_original_sector(wc, e);
1738 to.count = n_sectors;
1740 c = mempool_alloc(&wc->copy_pool, GFP_NOIO);
1743 c->n_entries = e->wc_list_contiguous;
1745 while ((n_sectors -= wc->block_size >> SECTOR_SHIFT)) {
1747 f = container_of(wbl->list.prev, struct wc_entry, lru);
1753 dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c);
1755 __writeback_throttle(wc, wbl);
1759 static void writecache_writeback(struct work_struct *work)
1761 struct dm_writecache *wc = container_of(work, struct dm_writecache, writeback_work);
1762 struct blk_plug plug;
1763 struct wc_entry *f, *g, *e = NULL;
1764 struct rb_node *node, *next_node;
1765 struct list_head skipped;
1766 struct writeback_list wbl;
1767 unsigned long n_walked;
1771 if (writecache_has_error(wc)) {
1776 if (unlikely(wc->writeback_all)) {
1777 if (writecache_wait_for_writeback(wc))
1781 if (wc->overwrote_committed) {
1782 writecache_wait_for_ios(wc, WRITE);
1786 INIT_LIST_HEAD(&skipped);
1787 INIT_LIST_HEAD(&wbl.list);
1789 while (!list_empty(&wc->lru) &&
1790 (wc->writeback_all ||
1791 wc->freelist_size + wc->writeback_size <= wc->freelist_low_watermark ||
1792 (jiffies - container_of(wc->lru.prev, struct wc_entry, lru)->age >=
1793 wc->max_age - wc->max_age / MAX_AGE_DIV))) {
1796 if (unlikely(n_walked > WRITEBACK_LATENCY) &&
1797 likely(!wc->writeback_all) && likely(!dm_suspended(wc->ti))) {
1798 queue_work(wc->writeback_wq, &wc->writeback_work);
1802 if (unlikely(wc->writeback_all)) {
1804 writecache_flush(wc);
1805 e = container_of(rb_first(&wc->tree), struct wc_entry, rb_node);
1809 e = container_of(wc->lru.prev, struct wc_entry, lru);
1810 BUG_ON(e->write_in_progress);
1811 if (unlikely(!writecache_entry_is_committed(wc, e))) {
1812 writecache_flush(wc);
1814 node = rb_prev(&e->rb_node);
1816 f = container_of(node, struct wc_entry, rb_node);
1817 if (unlikely(read_original_sector(wc, f) ==
1818 read_original_sector(wc, e))) {
1819 BUG_ON(!f->write_in_progress);
1821 list_add(&e->lru, &skipped);
1826 wc->writeback_size++;
1828 list_add(&e->lru, &wbl.list);
1830 e->write_in_progress = true;
1831 e->wc_list_contiguous = 1;
1836 next_node = rb_next(&f->rb_node);
1837 if (unlikely(!next_node))
1839 g = container_of(next_node, struct wc_entry, rb_node);
1840 if (unlikely(read_original_sector(wc, g) ==
1841 read_original_sector(wc, f))) {
1845 if (read_original_sector(wc, g) !=
1846 read_original_sector(wc, f) + (wc->block_size >> SECTOR_SHIFT))
1848 if (unlikely(g->write_in_progress))
1850 if (unlikely(!writecache_entry_is_committed(wc, g)))
1853 if (!WC_MODE_PMEM(wc)) {
1859 //if (unlikely(n_walked > WRITEBACK_LATENCY) && likely(!wc->writeback_all))
1862 wc->writeback_size++;
1864 list_add(&g->lru, &wbl.list);
1866 g->write_in_progress = true;
1867 g->wc_list_contiguous = BIO_MAX_PAGES;
1869 e->wc_list_contiguous++;
1870 if (unlikely(e->wc_list_contiguous == BIO_MAX_PAGES)) {
1871 if (unlikely(wc->writeback_all)) {
1872 next_node = rb_next(&f->rb_node);
1873 if (likely(next_node))
1874 g = container_of(next_node, struct wc_entry, rb_node);
1882 if (!list_empty(&skipped)) {
1883 list_splice_tail(&skipped, &wc->lru);
1885 * If we didn't do any progress, we must wait until some
1886 * writeback finishes to avoid burning CPU in a loop
1888 if (unlikely(!wbl.size))
1889 writecache_wait_for_writeback(wc);
1894 blk_start_plug(&plug);
1896 if (WC_MODE_PMEM(wc))
1897 __writecache_writeback_pmem(wc, &wbl);
1899 __writecache_writeback_ssd(wc, &wbl);
1901 blk_finish_plug(&plug);
1903 if (unlikely(wc->writeback_all)) {
1905 while (writecache_wait_for_writeback(wc));
1910 static int calculate_memory_size(uint64_t device_size, unsigned block_size,
1911 size_t *n_blocks_p, size_t *n_metadata_blocks_p)
1913 uint64_t n_blocks, offset;
1916 n_blocks = device_size;
1917 do_div(n_blocks, block_size + sizeof(struct wc_memory_entry));
1922 /* Verify the following entries[n_blocks] won't overflow */
1923 if (n_blocks >= ((size_t)-sizeof(struct wc_memory_superblock) /
1924 sizeof(struct wc_memory_entry)))
1926 offset = offsetof(struct wc_memory_superblock, entries[n_blocks]);
1927 offset = (offset + block_size - 1) & ~(uint64_t)(block_size - 1);
1928 if (offset + n_blocks * block_size <= device_size)
1933 /* check if the bit field overflows */
1935 if (e.index != n_blocks)
1939 *n_blocks_p = n_blocks;
1940 if (n_metadata_blocks_p)
1941 *n_metadata_blocks_p = offset >> __ffs(block_size);
1945 static int init_memory(struct dm_writecache *wc)
1950 r = calculate_memory_size(wc->memory_map_size, wc->block_size, &wc->n_blocks, NULL);
1954 r = writecache_alloc_entries(wc);
1958 for (b = 0; b < ARRAY_SIZE(sb(wc)->padding); b++)
1959 pmem_assign(sb(wc)->padding[b], cpu_to_le64(0));
1960 pmem_assign(sb(wc)->version, cpu_to_le32(MEMORY_SUPERBLOCK_VERSION));
1961 pmem_assign(sb(wc)->block_size, cpu_to_le32(wc->block_size));
1962 pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks));
1963 pmem_assign(sb(wc)->seq_count, cpu_to_le64(0));
1965 for (b = 0; b < wc->n_blocks; b++) {
1966 write_original_sector_seq_count(wc, &wc->entries[b], -1, -1);
1970 writecache_flush_all_metadata(wc);
1971 writecache_commit_flushed(wc, false);
1972 pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC));
1973 writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic);
1974 writecache_commit_flushed(wc, false);
1979 static void writecache_dtr(struct dm_target *ti)
1981 struct dm_writecache *wc = ti->private;
1986 if (wc->endio_thread)
1987 kthread_stop(wc->endio_thread);
1989 if (wc->flush_thread)
1990 kthread_stop(wc->flush_thread);
1992 bioset_exit(&wc->bio_set);
1994 mempool_exit(&wc->copy_pool);
1996 if (wc->writeback_wq)
1997 destroy_workqueue(wc->writeback_wq);
2000 dm_put_device(ti, wc->dev);
2003 dm_put_device(ti, wc->ssd_dev);
2008 if (wc->memory_map) {
2009 if (WC_MODE_PMEM(wc))
2010 persistent_memory_release(wc);
2012 vfree(wc->memory_map);
2016 dm_kcopyd_client_destroy(wc->dm_kcopyd);
2019 dm_io_client_destroy(wc->dm_io);
2021 if (wc->dirty_bitmap)
2022 vfree(wc->dirty_bitmap);
2027 static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
2029 struct dm_writecache *wc;
2030 struct dm_arg_set as;
2032 unsigned opt_params;
2033 size_t offset, data_size;
2036 int high_wm_percent = HIGH_WATERMARK;
2037 int low_wm_percent = LOW_WATERMARK;
2039 struct wc_memory_superblock s;
2041 static struct dm_arg _args[] = {
2042 {0, 10, "Invalid number of feature args"},
2048 wc = kzalloc(sizeof(struct dm_writecache), GFP_KERNEL);
2050 ti->error = "Cannot allocate writecache structure";
2057 mutex_init(&wc->lock);
2058 wc->max_age = MAX_AGE_UNSPECIFIED;
2059 writecache_poison_lists(wc);
2060 init_waitqueue_head(&wc->freelist_wait);
2061 timer_setup(&wc->autocommit_timer, writecache_autocommit_timer, 0);
2062 timer_setup(&wc->max_age_timer, writecache_max_age_timer, 0);
2064 for (i = 0; i < 2; i++) {
2065 atomic_set(&wc->bio_in_progress[i], 0);
2066 init_waitqueue_head(&wc->bio_in_progress_wait[i]);
2069 wc->dm_io = dm_io_client_create();
2070 if (IS_ERR(wc->dm_io)) {
2071 r = PTR_ERR(wc->dm_io);
2072 ti->error = "Unable to allocate dm-io client";
2077 wc->writeback_wq = alloc_workqueue("writecache-writeback", WQ_MEM_RECLAIM, 1);
2078 if (!wc->writeback_wq) {
2080 ti->error = "Could not allocate writeback workqueue";
2083 INIT_WORK(&wc->writeback_work, writecache_writeback);
2084 INIT_WORK(&wc->flush_work, writecache_flush_work);
2086 raw_spin_lock_init(&wc->endio_list_lock);
2087 INIT_LIST_HEAD(&wc->endio_list);
2088 wc->endio_thread = kthread_create(writecache_endio_thread, wc, "writecache_endio");
2089 if (IS_ERR(wc->endio_thread)) {
2090 r = PTR_ERR(wc->endio_thread);
2091 wc->endio_thread = NULL;
2092 ti->error = "Couldn't spawn endio thread";
2095 wake_up_process(wc->endio_thread);
2098 * Parse the mode (pmem or ssd)
2100 string = dm_shift_arg(&as);
2104 if (!strcasecmp(string, "s")) {
2105 wc->pmem_mode = false;
2106 } else if (!strcasecmp(string, "p")) {
2107 #ifdef DM_WRITECACHE_HAS_PMEM
2108 wc->pmem_mode = true;
2109 wc->writeback_fua = true;
2112 * If the architecture doesn't support persistent memory or
2113 * the kernel doesn't support any DAX drivers, this driver can
2114 * only be used in SSD-only mode.
2117 ti->error = "Persistent memory or DAX not supported on this system";
2124 if (WC_MODE_PMEM(wc)) {
2125 r = bioset_init(&wc->bio_set, BIO_POOL_SIZE,
2126 offsetof(struct writeback_struct, bio),
2129 ti->error = "Could not allocate bio set";
2133 r = mempool_init_kmalloc_pool(&wc->copy_pool, 1, sizeof(struct copy_struct));
2135 ti->error = "Could not allocate mempool";
2141 * Parse the origin data device
2143 string = dm_shift_arg(&as);
2146 r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->dev);
2148 ti->error = "Origin data device lookup failed";
2153 * Parse cache data device (be it pmem or ssd)
2155 string = dm_shift_arg(&as);
2159 r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->ssd_dev);
2161 ti->error = "Cache data device lookup failed";
2164 wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode);
2167 * Parse the cache block size
2169 string = dm_shift_arg(&as);
2172 if (sscanf(string, "%u%c", &wc->block_size, &dummy) != 1 ||
2173 wc->block_size < 512 || wc->block_size > PAGE_SIZE ||
2174 (wc->block_size & (wc->block_size - 1))) {
2176 ti->error = "Invalid block size";
2179 if (wc->block_size < bdev_logical_block_size(wc->dev->bdev) ||
2180 wc->block_size < bdev_logical_block_size(wc->ssd_dev->bdev)) {
2182 ti->error = "Block size is smaller than device logical block size";
2185 wc->block_size_bits = __ffs(wc->block_size);
2187 wc->max_writeback_jobs = MAX_WRITEBACK_JOBS;
2188 wc->autocommit_blocks = !WC_MODE_PMEM(wc) ? AUTOCOMMIT_BLOCKS_SSD : AUTOCOMMIT_BLOCKS_PMEM;
2189 wc->autocommit_jiffies = msecs_to_jiffies(AUTOCOMMIT_MSEC);
2192 * Parse optional arguments
2194 r = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
2198 while (opt_params) {
2199 string = dm_shift_arg(&as), opt_params--;
2200 if (!strcasecmp(string, "start_sector") && opt_params >= 1) {
2201 unsigned long long start_sector;
2202 string = dm_shift_arg(&as), opt_params--;
2203 if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
2204 goto invalid_optional;
2205 wc->start_sector = start_sector;
2206 if (wc->start_sector != start_sector ||
2207 wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT)
2208 goto invalid_optional;
2209 } else if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
2210 string = dm_shift_arg(&as), opt_params--;
2211 if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1)
2212 goto invalid_optional;
2213 if (high_wm_percent < 0 || high_wm_percent > 100)
2214 goto invalid_optional;
2215 wc->high_wm_percent_set = true;
2216 } else if (!strcasecmp(string, "low_watermark") && opt_params >= 1) {
2217 string = dm_shift_arg(&as), opt_params--;
2218 if (sscanf(string, "%d%c", &low_wm_percent, &dummy) != 1)
2219 goto invalid_optional;
2220 if (low_wm_percent < 0 || low_wm_percent > 100)
2221 goto invalid_optional;
2222 wc->low_wm_percent_set = true;
2223 } else if (!strcasecmp(string, "writeback_jobs") && opt_params >= 1) {
2224 string = dm_shift_arg(&as), opt_params--;
2225 if (sscanf(string, "%u%c", &wc->max_writeback_jobs, &dummy) != 1)
2226 goto invalid_optional;
2227 wc->max_writeback_jobs_set = true;
2228 } else if (!strcasecmp(string, "autocommit_blocks") && opt_params >= 1) {
2229 string = dm_shift_arg(&as), opt_params--;
2230 if (sscanf(string, "%u%c", &wc->autocommit_blocks, &dummy) != 1)
2231 goto invalid_optional;
2232 wc->autocommit_blocks_set = true;
2233 } else if (!strcasecmp(string, "autocommit_time") && opt_params >= 1) {
2234 unsigned autocommit_msecs;
2235 string = dm_shift_arg(&as), opt_params--;
2236 if (sscanf(string, "%u%c", &autocommit_msecs, &dummy) != 1)
2237 goto invalid_optional;
2238 if (autocommit_msecs > 3600000)
2239 goto invalid_optional;
2240 wc->autocommit_jiffies = msecs_to_jiffies(autocommit_msecs);
2241 wc->autocommit_time_set = true;
2242 } else if (!strcasecmp(string, "max_age") && opt_params >= 1) {
2243 unsigned max_age_msecs;
2244 string = dm_shift_arg(&as), opt_params--;
2245 if (sscanf(string, "%u%c", &max_age_msecs, &dummy) != 1)
2246 goto invalid_optional;
2247 if (max_age_msecs > 86400000)
2248 goto invalid_optional;
2249 wc->max_age = msecs_to_jiffies(max_age_msecs);
2250 } else if (!strcasecmp(string, "cleaner")) {
2252 } else if (!strcasecmp(string, "fua")) {
2253 if (WC_MODE_PMEM(wc)) {
2254 wc->writeback_fua = true;
2255 wc->writeback_fua_set = true;
2256 } else goto invalid_optional;
2257 } else if (!strcasecmp(string, "nofua")) {
2258 if (WC_MODE_PMEM(wc)) {
2259 wc->writeback_fua = false;
2260 wc->writeback_fua_set = true;
2261 } else goto invalid_optional;
2265 ti->error = "Invalid optional argument";
2270 if (high_wm_percent < low_wm_percent) {
2272 ti->error = "High watermark must be greater than or equal to low watermark";
2276 if (WC_MODE_PMEM(wc)) {
2277 if (!dax_synchronous(wc->ssd_dev->dax_dev)) {
2279 ti->error = "Asynchronous persistent memory not supported as pmem cache";
2283 r = persistent_memory_claim(wc);
2285 ti->error = "Unable to map persistent memory for cache";
2289 size_t n_blocks, n_metadata_blocks;
2290 uint64_t n_bitmap_bits;
2292 wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT;
2294 bio_list_init(&wc->flush_list);
2295 wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush");
2296 if (IS_ERR(wc->flush_thread)) {
2297 r = PTR_ERR(wc->flush_thread);
2298 wc->flush_thread = NULL;
2299 ti->error = "Couldn't spawn flush thread";
2302 wake_up_process(wc->flush_thread);
2304 r = calculate_memory_size(wc->memory_map_size, wc->block_size,
2305 &n_blocks, &n_metadata_blocks);
2307 ti->error = "Invalid device size";
2311 n_bitmap_bits = (((uint64_t)n_metadata_blocks << wc->block_size_bits) +
2312 BITMAP_GRANULARITY - 1) / BITMAP_GRANULARITY;
2313 /* this is limitation of test_bit functions */
2314 if (n_bitmap_bits > 1U << 31) {
2316 ti->error = "Invalid device size";
2320 wc->memory_map = vmalloc(n_metadata_blocks << wc->block_size_bits);
2321 if (!wc->memory_map) {
2323 ti->error = "Unable to allocate memory for metadata";
2327 wc->dm_kcopyd = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2328 if (IS_ERR(wc->dm_kcopyd)) {
2329 r = PTR_ERR(wc->dm_kcopyd);
2330 ti->error = "Unable to allocate dm-kcopyd client";
2331 wc->dm_kcopyd = NULL;
2335 wc->metadata_sectors = n_metadata_blocks << (wc->block_size_bits - SECTOR_SHIFT);
2336 wc->dirty_bitmap_size = (n_bitmap_bits + BITS_PER_LONG - 1) /
2337 BITS_PER_LONG * sizeof(unsigned long);
2338 wc->dirty_bitmap = vzalloc(wc->dirty_bitmap_size);
2339 if (!wc->dirty_bitmap) {
2341 ti->error = "Unable to allocate dirty bitmap";
2345 r = writecache_read_metadata(wc, wc->block_size >> SECTOR_SHIFT);
2347 ti->error = "Unable to read first block of metadata";
2352 r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock));
2354 ti->error = "Hardware memory error when reading superblock";
2357 if (!le32_to_cpu(s.magic) && !le32_to_cpu(s.version)) {
2358 r = init_memory(wc);
2360 ti->error = "Unable to initialize device";
2363 r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock));
2365 ti->error = "Hardware memory error when reading superblock";
2370 if (le32_to_cpu(s.magic) != MEMORY_SUPERBLOCK_MAGIC) {
2371 ti->error = "Invalid magic in the superblock";
2376 if (le32_to_cpu(s.version) != MEMORY_SUPERBLOCK_VERSION) {
2377 ti->error = "Invalid version in the superblock";
2382 if (le32_to_cpu(s.block_size) != wc->block_size) {
2383 ti->error = "Block size does not match superblock";
2388 wc->n_blocks = le64_to_cpu(s.n_blocks);
2390 offset = wc->n_blocks * sizeof(struct wc_memory_entry);
2391 if (offset / sizeof(struct wc_memory_entry) != le64_to_cpu(sb(wc)->n_blocks)) {
2393 ti->error = "Overflow in size calculation";
2397 offset += sizeof(struct wc_memory_superblock);
2398 if (offset < sizeof(struct wc_memory_superblock))
2400 offset = (offset + wc->block_size - 1) & ~(size_t)(wc->block_size - 1);
2401 data_size = wc->n_blocks * (size_t)wc->block_size;
2402 if (!offset || (data_size / wc->block_size != wc->n_blocks) ||
2403 (offset + data_size < offset))
2405 if (offset + data_size > wc->memory_map_size) {
2406 ti->error = "Memory area is too small";
2411 wc->metadata_sectors = offset >> SECTOR_SHIFT;
2412 wc->block_start = (char *)sb(wc) + offset;
2414 x = (uint64_t)wc->n_blocks * (100 - high_wm_percent);
2417 wc->freelist_high_watermark = x;
2418 x = (uint64_t)wc->n_blocks * (100 - low_wm_percent);
2421 wc->freelist_low_watermark = x;
2424 activate_cleaner(wc);
2426 r = writecache_alloc_entries(wc);
2428 ti->error = "Cannot allocate memory";
2432 ti->num_flush_bios = 1;
2433 ti->flush_supported = true;
2434 ti->num_discard_bios = 1;
2436 if (WC_MODE_PMEM(wc))
2437 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
2443 ti->error = "Bad arguments";
2449 static void writecache_status(struct dm_target *ti, status_type_t type,
2450 unsigned status_flags, char *result, unsigned maxlen)
2452 struct dm_writecache *wc = ti->private;
2453 unsigned extra_args;
2458 case STATUSTYPE_INFO:
2459 DMEMIT("%ld %llu %llu %llu", writecache_has_error(wc),
2460 (unsigned long long)wc->n_blocks, (unsigned long long)wc->freelist_size,
2461 (unsigned long long)wc->writeback_size);
2463 case STATUSTYPE_TABLE:
2464 DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc) ? 'p' : 's',
2465 wc->dev->name, wc->ssd_dev->name, wc->block_size);
2467 if (wc->start_sector)
2469 if (wc->high_wm_percent_set && !wc->cleaner)
2471 if (wc->low_wm_percent_set && !wc->cleaner)
2473 if (wc->max_writeback_jobs_set)
2475 if (wc->autocommit_blocks_set)
2477 if (wc->autocommit_time_set)
2481 if (wc->writeback_fua_set)
2484 DMEMIT("%u", extra_args);
2485 if (wc->start_sector)
2486 DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector);
2487 if (wc->high_wm_percent_set && !wc->cleaner) {
2488 x = (uint64_t)wc->freelist_high_watermark * 100;
2489 x += wc->n_blocks / 2;
2490 do_div(x, (size_t)wc->n_blocks);
2491 DMEMIT(" high_watermark %u", 100 - (unsigned)x);
2493 if (wc->low_wm_percent_set && !wc->cleaner) {
2494 x = (uint64_t)wc->freelist_low_watermark * 100;
2495 x += wc->n_blocks / 2;
2496 do_div(x, (size_t)wc->n_blocks);
2497 DMEMIT(" low_watermark %u", 100 - (unsigned)x);
2499 if (wc->max_writeback_jobs_set)
2500 DMEMIT(" writeback_jobs %u", wc->max_writeback_jobs);
2501 if (wc->autocommit_blocks_set)
2502 DMEMIT(" autocommit_blocks %u", wc->autocommit_blocks);
2503 if (wc->autocommit_time_set)
2504 DMEMIT(" autocommit_time %u", jiffies_to_msecs(wc->autocommit_jiffies));
2505 if (wc->max_age != MAX_AGE_UNSPECIFIED)
2506 DMEMIT(" max_age %u", jiffies_to_msecs(wc->max_age));
2509 if (wc->writeback_fua_set)
2510 DMEMIT(" %sfua", wc->writeback_fua ? "" : "no");
2515 static struct target_type writecache_target = {
2516 .name = "writecache",
2517 .version = {1, 3, 0},
2518 .module = THIS_MODULE,
2519 .ctr = writecache_ctr,
2520 .dtr = writecache_dtr,
2521 .status = writecache_status,
2522 .postsuspend = writecache_suspend,
2523 .resume = writecache_resume,
2524 .message = writecache_message,
2525 .map = writecache_map,
2526 .end_io = writecache_end_io,
2527 .iterate_devices = writecache_iterate_devices,
2528 .io_hints = writecache_io_hints,
2531 static int __init dm_writecache_init(void)
2535 r = dm_register_target(&writecache_target);
2537 DMERR("register failed %d", r);
2544 static void __exit dm_writecache_exit(void)
2546 dm_unregister_target(&writecache_target);
2549 module_init(dm_writecache_init);
2550 module_exit(dm_writecache_exit);
2552 MODULE_DESCRIPTION(DM_NAME " writecache target");
2553 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2554 MODULE_LICENSE("GPL");