dm writecache: optimize superblock write
[linux-2.6-microblaze.git] / drivers / md / dm-writecache.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2018 Red Hat. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7
8 #include <linux/device-mapper.h>
9 #include <linux/module.h>
10 #include <linux/init.h>
11 #include <linux/vmalloc.h>
12 #include <linux/kthread.h>
13 #include <linux/dm-io.h>
14 #include <linux/dm-kcopyd.h>
15 #include <linux/dax.h>
16 #include <linux/pfn_t.h>
17 #include <linux/libnvdimm.h>
18
19 #define DM_MSG_PREFIX "writecache"
20
21 #define HIGH_WATERMARK                  50
22 #define LOW_WATERMARK                   45
23 #define MAX_WRITEBACK_JOBS              0
24 #define ENDIO_LATENCY                   16
25 #define WRITEBACK_LATENCY               64
26 #define AUTOCOMMIT_BLOCKS_SSD           65536
27 #define AUTOCOMMIT_BLOCKS_PMEM          64
28 #define AUTOCOMMIT_MSEC                 1000
29 #define MAX_AGE_DIV                     16
30 #define MAX_AGE_UNSPECIFIED             -1UL
31
32 #define BITMAP_GRANULARITY      65536
33 #if BITMAP_GRANULARITY < PAGE_SIZE
34 #undef BITMAP_GRANULARITY
35 #define BITMAP_GRANULARITY      PAGE_SIZE
36 #endif
37
38 #if IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API) && IS_ENABLED(CONFIG_DAX_DRIVER)
39 #define DM_WRITECACHE_HAS_PMEM
40 #endif
41
42 #ifdef DM_WRITECACHE_HAS_PMEM
43 #define pmem_assign(dest, src)                                  \
44 do {                                                            \
45         typeof(dest) uniq = (src);                              \
46         memcpy_flushcache(&(dest), &uniq, sizeof(dest));        \
47 } while (0)
48 #else
49 #define pmem_assign(dest, src)  ((dest) = (src))
50 #endif
51
52 #if defined(__HAVE_ARCH_MEMCPY_MCSAFE) && defined(DM_WRITECACHE_HAS_PMEM)
53 #define DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
54 #endif
55
56 #define MEMORY_SUPERBLOCK_MAGIC         0x23489321
57 #define MEMORY_SUPERBLOCK_VERSION       1
58
59 struct wc_memory_entry {
60         __le64 original_sector;
61         __le64 seq_count;
62 };
63
64 struct wc_memory_superblock {
65         union {
66                 struct {
67                         __le32 magic;
68                         __le32 version;
69                         __le32 block_size;
70                         __le32 pad;
71                         __le64 n_blocks;
72                         __le64 seq_count;
73                 };
74                 __le64 padding[8];
75         };
76         struct wc_memory_entry entries[0];
77 };
78
79 struct wc_entry {
80         struct rb_node rb_node;
81         struct list_head lru;
82         unsigned short wc_list_contiguous;
83         bool write_in_progress
84 #if BITS_PER_LONG == 64
85                 :1
86 #endif
87         ;
88         unsigned long index
89 #if BITS_PER_LONG == 64
90                 :47
91 #endif
92         ;
93         unsigned long age;
94 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
95         uint64_t original_sector;
96         uint64_t seq_count;
97 #endif
98 };
99
100 #ifdef DM_WRITECACHE_HAS_PMEM
101 #define WC_MODE_PMEM(wc)                        ((wc)->pmem_mode)
102 #define WC_MODE_FUA(wc)                         ((wc)->writeback_fua)
103 #else
104 #define WC_MODE_PMEM(wc)                        false
105 #define WC_MODE_FUA(wc)                         false
106 #endif
107 #define WC_MODE_SORT_FREELIST(wc)               (!WC_MODE_PMEM(wc))
108
109 struct dm_writecache {
110         struct mutex lock;
111         struct list_head lru;
112         union {
113                 struct list_head freelist;
114                 struct {
115                         struct rb_root freetree;
116                         struct wc_entry *current_free;
117                 };
118         };
119         struct rb_root tree;
120
121         size_t freelist_size;
122         size_t writeback_size;
123         size_t freelist_high_watermark;
124         size_t freelist_low_watermark;
125         unsigned long max_age;
126
127         unsigned uncommitted_blocks;
128         unsigned autocommit_blocks;
129         unsigned max_writeback_jobs;
130
131         int error;
132
133         unsigned long autocommit_jiffies;
134         struct timer_list autocommit_timer;
135         struct wait_queue_head freelist_wait;
136
137         struct timer_list max_age_timer;
138
139         atomic_t bio_in_progress[2];
140         struct wait_queue_head bio_in_progress_wait[2];
141
142         struct dm_target *ti;
143         struct dm_dev *dev;
144         struct dm_dev *ssd_dev;
145         sector_t start_sector;
146         void *memory_map;
147         uint64_t memory_map_size;
148         size_t metadata_sectors;
149         size_t n_blocks;
150         uint64_t seq_count;
151         void *block_start;
152         struct wc_entry *entries;
153         unsigned block_size;
154         unsigned char block_size_bits;
155
156         bool pmem_mode:1;
157         bool writeback_fua:1;
158
159         bool overwrote_committed:1;
160         bool memory_vmapped:1;
161
162         bool high_wm_percent_set:1;
163         bool low_wm_percent_set:1;
164         bool max_writeback_jobs_set:1;
165         bool autocommit_blocks_set:1;
166         bool autocommit_time_set:1;
167         bool writeback_fua_set:1;
168         bool flush_on_suspend:1;
169         bool cleaner:1;
170
171         unsigned writeback_all;
172         struct workqueue_struct *writeback_wq;
173         struct work_struct writeback_work;
174         struct work_struct flush_work;
175
176         struct dm_io_client *dm_io;
177
178         raw_spinlock_t endio_list_lock;
179         struct list_head endio_list;
180         struct task_struct *endio_thread;
181
182         struct task_struct *flush_thread;
183         struct bio_list flush_list;
184
185         struct dm_kcopyd_client *dm_kcopyd;
186         unsigned long *dirty_bitmap;
187         unsigned dirty_bitmap_size;
188
189         struct bio_set bio_set;
190         mempool_t copy_pool;
191 };
192
193 #define WB_LIST_INLINE          16
194
195 struct writeback_struct {
196         struct list_head endio_entry;
197         struct dm_writecache *wc;
198         struct wc_entry **wc_list;
199         unsigned wc_list_n;
200         struct wc_entry *wc_list_inline[WB_LIST_INLINE];
201         struct bio bio;
202 };
203
204 struct copy_struct {
205         struct list_head endio_entry;
206         struct dm_writecache *wc;
207         struct wc_entry *e;
208         unsigned n_entries;
209         int error;
210 };
211
212 DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(dm_writecache_throttle,
213                                             "A percentage of time allocated for data copying");
214
215 static void wc_lock(struct dm_writecache *wc)
216 {
217         mutex_lock(&wc->lock);
218 }
219
220 static void wc_unlock(struct dm_writecache *wc)
221 {
222         mutex_unlock(&wc->lock);
223 }
224
225 #ifdef DM_WRITECACHE_HAS_PMEM
226 static int persistent_memory_claim(struct dm_writecache *wc)
227 {
228         int r;
229         loff_t s;
230         long p, da;
231         pfn_t pfn;
232         int id;
233         struct page **pages;
234
235         wc->memory_vmapped = false;
236
237         if (!wc->ssd_dev->dax_dev) {
238                 r = -EOPNOTSUPP;
239                 goto err1;
240         }
241         s = wc->memory_map_size;
242         p = s >> PAGE_SHIFT;
243         if (!p) {
244                 r = -EINVAL;
245                 goto err1;
246         }
247         if (p != s >> PAGE_SHIFT) {
248                 r = -EOVERFLOW;
249                 goto err1;
250         }
251
252         id = dax_read_lock();
253
254         da = dax_direct_access(wc->ssd_dev->dax_dev, 0, p, &wc->memory_map, &pfn);
255         if (da < 0) {
256                 wc->memory_map = NULL;
257                 r = da;
258                 goto err2;
259         }
260         if (!pfn_t_has_page(pfn)) {
261                 wc->memory_map = NULL;
262                 r = -EOPNOTSUPP;
263                 goto err2;
264         }
265         if (da != p) {
266                 long i;
267                 wc->memory_map = NULL;
268                 pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
269                 if (!pages) {
270                         r = -ENOMEM;
271                         goto err2;
272                 }
273                 i = 0;
274                 do {
275                         long daa;
276                         daa = dax_direct_access(wc->ssd_dev->dax_dev, i, p - i,
277                                                 NULL, &pfn);
278                         if (daa <= 0) {
279                                 r = daa ? daa : -EINVAL;
280                                 goto err3;
281                         }
282                         if (!pfn_t_has_page(pfn)) {
283                                 r = -EOPNOTSUPP;
284                                 goto err3;
285                         }
286                         while (daa-- && i < p) {
287                                 pages[i++] = pfn_t_to_page(pfn);
288                                 pfn.val++;
289                         }
290                 } while (i < p);
291                 wc->memory_map = vmap(pages, p, VM_MAP, PAGE_KERNEL);
292                 if (!wc->memory_map) {
293                         r = -ENOMEM;
294                         goto err3;
295                 }
296                 kvfree(pages);
297                 wc->memory_vmapped = true;
298         }
299
300         dax_read_unlock(id);
301
302         wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT;
303         wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT;
304
305         return 0;
306 err3:
307         kvfree(pages);
308 err2:
309         dax_read_unlock(id);
310 err1:
311         return r;
312 }
313 #else
314 static int persistent_memory_claim(struct dm_writecache *wc)
315 {
316         BUG();
317 }
318 #endif
319
320 static void persistent_memory_release(struct dm_writecache *wc)
321 {
322         if (wc->memory_vmapped)
323                 vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT));
324 }
325
326 static struct page *persistent_memory_page(void *addr)
327 {
328         if (is_vmalloc_addr(addr))
329                 return vmalloc_to_page(addr);
330         else
331                 return virt_to_page(addr);
332 }
333
334 static unsigned persistent_memory_page_offset(void *addr)
335 {
336         return (unsigned long)addr & (PAGE_SIZE - 1);
337 }
338
339 static void persistent_memory_flush_cache(void *ptr, size_t size)
340 {
341         if (is_vmalloc_addr(ptr))
342                 flush_kernel_vmap_range(ptr, size);
343 }
344
345 static void persistent_memory_invalidate_cache(void *ptr, size_t size)
346 {
347         if (is_vmalloc_addr(ptr))
348                 invalidate_kernel_vmap_range(ptr, size);
349 }
350
351 static struct wc_memory_superblock *sb(struct dm_writecache *wc)
352 {
353         return wc->memory_map;
354 }
355
356 static struct wc_memory_entry *memory_entry(struct dm_writecache *wc, struct wc_entry *e)
357 {
358         return &sb(wc)->entries[e->index];
359 }
360
361 static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
362 {
363         return (char *)wc->block_start + (e->index << wc->block_size_bits);
364 }
365
366 static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e)
367 {
368         return wc->start_sector + wc->metadata_sectors +
369                 ((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT));
370 }
371
372 static uint64_t read_original_sector(struct dm_writecache *wc, struct wc_entry *e)
373 {
374 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
375         return e->original_sector;
376 #else
377         return le64_to_cpu(memory_entry(wc, e)->original_sector);
378 #endif
379 }
380
381 static uint64_t read_seq_count(struct dm_writecache *wc, struct wc_entry *e)
382 {
383 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
384         return e->seq_count;
385 #else
386         return le64_to_cpu(memory_entry(wc, e)->seq_count);
387 #endif
388 }
389
390 static void clear_seq_count(struct dm_writecache *wc, struct wc_entry *e)
391 {
392 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
393         e->seq_count = -1;
394 #endif
395         pmem_assign(memory_entry(wc, e)->seq_count, cpu_to_le64(-1));
396 }
397
398 static void write_original_sector_seq_count(struct dm_writecache *wc, struct wc_entry *e,
399                                             uint64_t original_sector, uint64_t seq_count)
400 {
401         struct wc_memory_entry me;
402 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
403         e->original_sector = original_sector;
404         e->seq_count = seq_count;
405 #endif
406         me.original_sector = cpu_to_le64(original_sector);
407         me.seq_count = cpu_to_le64(seq_count);
408         pmem_assign(*memory_entry(wc, e), me);
409 }
410
411 #define writecache_error(wc, err, msg, arg...)                          \
412 do {                                                                    \
413         if (!cmpxchg(&(wc)->error, 0, err))                             \
414                 DMERR(msg, ##arg);                                      \
415         wake_up(&(wc)->freelist_wait);                                  \
416 } while (0)
417
418 #define writecache_has_error(wc)        (unlikely(READ_ONCE((wc)->error)))
419
420 static void writecache_flush_all_metadata(struct dm_writecache *wc)
421 {
422         if (!WC_MODE_PMEM(wc))
423                 memset(wc->dirty_bitmap, -1, wc->dirty_bitmap_size);
424 }
425
426 static void writecache_flush_region(struct dm_writecache *wc, void *ptr, size_t size)
427 {
428         if (!WC_MODE_PMEM(wc))
429                 __set_bit(((char *)ptr - (char *)wc->memory_map) / BITMAP_GRANULARITY,
430                           wc->dirty_bitmap);
431 }
432
433 static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev);
434
435 struct io_notify {
436         struct dm_writecache *wc;
437         struct completion c;
438         atomic_t count;
439 };
440
441 static void writecache_notify_io(unsigned long error, void *context)
442 {
443         struct io_notify *endio = context;
444
445         if (unlikely(error != 0))
446                 writecache_error(endio->wc, -EIO, "error writing metadata");
447         BUG_ON(atomic_read(&endio->count) <= 0);
448         if (atomic_dec_and_test(&endio->count))
449                 complete(&endio->c);
450 }
451
452 static void writecache_wait_for_ios(struct dm_writecache *wc, int direction)
453 {
454         wait_event(wc->bio_in_progress_wait[direction],
455                    !atomic_read(&wc->bio_in_progress[direction]));
456 }
457
458 static void ssd_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
459 {
460         struct dm_io_region region;
461         struct dm_io_request req;
462         struct io_notify endio = {
463                 wc,
464                 COMPLETION_INITIALIZER_ONSTACK(endio.c),
465                 ATOMIC_INIT(1),
466         };
467         unsigned bitmap_bits = wc->dirty_bitmap_size * 8;
468         unsigned i = 0;
469
470         while (1) {
471                 unsigned j;
472                 i = find_next_bit(wc->dirty_bitmap, bitmap_bits, i);
473                 if (unlikely(i == bitmap_bits))
474                         break;
475                 j = find_next_zero_bit(wc->dirty_bitmap, bitmap_bits, i);
476
477                 region.bdev = wc->ssd_dev->bdev;
478                 region.sector = (sector_t)i * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
479                 region.count = (sector_t)(j - i) * (BITMAP_GRANULARITY >> SECTOR_SHIFT);
480
481                 if (unlikely(region.sector >= wc->metadata_sectors))
482                         break;
483                 if (unlikely(region.sector + region.count > wc->metadata_sectors))
484                         region.count = wc->metadata_sectors - region.sector;
485
486                 region.sector += wc->start_sector;
487                 atomic_inc(&endio.count);
488                 req.bi_op = REQ_OP_WRITE;
489                 req.bi_op_flags = REQ_SYNC;
490                 req.mem.type = DM_IO_VMA;
491                 req.mem.ptr.vma = (char *)wc->memory_map + (size_t)i * BITMAP_GRANULARITY;
492                 req.client = wc->dm_io;
493                 req.notify.fn = writecache_notify_io;
494                 req.notify.context = &endio;
495
496                 /* writing via async dm-io (implied by notify.fn above) won't return an error */
497                 (void) dm_io(&req, 1, &region, NULL);
498                 i = j;
499         }
500
501         writecache_notify_io(0, &endio);
502         wait_for_completion_io(&endio.c);
503
504         if (wait_for_ios)
505                 writecache_wait_for_ios(wc, WRITE);
506
507         writecache_disk_flush(wc, wc->ssd_dev);
508
509         memset(wc->dirty_bitmap, 0, wc->dirty_bitmap_size);
510 }
511
512 static void ssd_commit_superblock(struct dm_writecache *wc)
513 {
514         int r;
515         struct dm_io_region region;
516         struct dm_io_request req;
517
518         region.bdev = wc->ssd_dev->bdev;
519         region.sector = 0;
520         region.count = PAGE_SIZE;
521
522         if (unlikely(region.sector + region.count > wc->metadata_sectors))
523                 region.count = wc->metadata_sectors - region.sector;
524
525         region.sector += wc->start_sector;
526
527         req.bi_op = REQ_OP_WRITE;
528         req.bi_op_flags = REQ_SYNC | REQ_FUA;
529         req.mem.type = DM_IO_VMA;
530         req.mem.ptr.vma = (char *)wc->memory_map;
531         req.client = wc->dm_io;
532         req.notify.fn = NULL;
533         req.notify.context = NULL;
534
535         r = dm_io(&req, 1, &region, NULL);
536         if (unlikely(r))
537                 writecache_error(wc, r, "error writing superblock");
538 }
539
540 static void writecache_commit_flushed(struct dm_writecache *wc, bool wait_for_ios)
541 {
542         if (WC_MODE_PMEM(wc))
543                 wmb();
544         else
545                 ssd_commit_flushed(wc, wait_for_ios);
546 }
547
548 static void writecache_disk_flush(struct dm_writecache *wc, struct dm_dev *dev)
549 {
550         int r;
551         struct dm_io_region region;
552         struct dm_io_request req;
553
554         region.bdev = dev->bdev;
555         region.sector = 0;
556         region.count = 0;
557         req.bi_op = REQ_OP_WRITE;
558         req.bi_op_flags = REQ_PREFLUSH;
559         req.mem.type = DM_IO_KMEM;
560         req.mem.ptr.addr = NULL;
561         req.client = wc->dm_io;
562         req.notify.fn = NULL;
563
564         r = dm_io(&req, 1, &region, NULL);
565         if (unlikely(r))
566                 writecache_error(wc, r, "error flushing metadata: %d", r);
567 }
568
569 #define WFE_RETURN_FOLLOWING    1
570 #define WFE_LOWEST_SEQ          2
571
572 static struct wc_entry *writecache_find_entry(struct dm_writecache *wc,
573                                               uint64_t block, int flags)
574 {
575         struct wc_entry *e;
576         struct rb_node *node = wc->tree.rb_node;
577
578         if (unlikely(!node))
579                 return NULL;
580
581         while (1) {
582                 e = container_of(node, struct wc_entry, rb_node);
583                 if (read_original_sector(wc, e) == block)
584                         break;
585
586                 node = (read_original_sector(wc, e) >= block ?
587                         e->rb_node.rb_left : e->rb_node.rb_right);
588                 if (unlikely(!node)) {
589                         if (!(flags & WFE_RETURN_FOLLOWING))
590                                 return NULL;
591                         if (read_original_sector(wc, e) >= block) {
592                                 return e;
593                         } else {
594                                 node = rb_next(&e->rb_node);
595                                 if (unlikely(!node))
596                                         return NULL;
597                                 e = container_of(node, struct wc_entry, rb_node);
598                                 return e;
599                         }
600                 }
601         }
602
603         while (1) {
604                 struct wc_entry *e2;
605                 if (flags & WFE_LOWEST_SEQ)
606                         node = rb_prev(&e->rb_node);
607                 else
608                         node = rb_next(&e->rb_node);
609                 if (unlikely(!node))
610                         return e;
611                 e2 = container_of(node, struct wc_entry, rb_node);
612                 if (read_original_sector(wc, e2) != block)
613                         return e;
614                 e = e2;
615         }
616 }
617
618 static void writecache_insert_entry(struct dm_writecache *wc, struct wc_entry *ins)
619 {
620         struct wc_entry *e;
621         struct rb_node **node = &wc->tree.rb_node, *parent = NULL;
622
623         while (*node) {
624                 e = container_of(*node, struct wc_entry, rb_node);
625                 parent = &e->rb_node;
626                 if (read_original_sector(wc, e) > read_original_sector(wc, ins))
627                         node = &parent->rb_left;
628                 else
629                         node = &parent->rb_right;
630         }
631         rb_link_node(&ins->rb_node, parent, node);
632         rb_insert_color(&ins->rb_node, &wc->tree);
633         list_add(&ins->lru, &wc->lru);
634         ins->age = jiffies;
635 }
636
637 static void writecache_unlink(struct dm_writecache *wc, struct wc_entry *e)
638 {
639         list_del(&e->lru);
640         rb_erase(&e->rb_node, &wc->tree);
641 }
642
643 static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry *e)
644 {
645         if (WC_MODE_SORT_FREELIST(wc)) {
646                 struct rb_node **node = &wc->freetree.rb_node, *parent = NULL;
647                 if (unlikely(!*node))
648                         wc->current_free = e;
649                 while (*node) {
650                         parent = *node;
651                         if (&e->rb_node < *node)
652                                 node = &parent->rb_left;
653                         else
654                                 node = &parent->rb_right;
655                 }
656                 rb_link_node(&e->rb_node, parent, node);
657                 rb_insert_color(&e->rb_node, &wc->freetree);
658         } else {
659                 list_add_tail(&e->lru, &wc->freelist);
660         }
661         wc->freelist_size++;
662 }
663
664 static inline void writecache_verify_watermark(struct dm_writecache *wc)
665 {
666         if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
667                 queue_work(wc->writeback_wq, &wc->writeback_work);
668 }
669
670 static void writecache_max_age_timer(struct timer_list *t)
671 {
672         struct dm_writecache *wc = from_timer(wc, t, max_age_timer);
673
674         if (!dm_suspended(wc->ti) && !writecache_has_error(wc)) {
675                 queue_work(wc->writeback_wq, &wc->writeback_work);
676                 mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV);
677         }
678 }
679
680 static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, sector_t expected_sector)
681 {
682         struct wc_entry *e;
683
684         if (WC_MODE_SORT_FREELIST(wc)) {
685                 struct rb_node *next;
686                 if (unlikely(!wc->current_free))
687                         return NULL;
688                 e = wc->current_free;
689                 if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector))
690                         return NULL;
691                 next = rb_next(&e->rb_node);
692                 rb_erase(&e->rb_node, &wc->freetree);
693                 if (unlikely(!next))
694                         next = rb_first(&wc->freetree);
695                 wc->current_free = next ? container_of(next, struct wc_entry, rb_node) : NULL;
696         } else {
697                 if (unlikely(list_empty(&wc->freelist)))
698                         return NULL;
699                 e = container_of(wc->freelist.next, struct wc_entry, lru);
700                 if (expected_sector != (sector_t)-1 && unlikely(cache_sector(wc, e) != expected_sector))
701                         return NULL;
702                 list_del(&e->lru);
703         }
704         wc->freelist_size--;
705
706         writecache_verify_watermark(wc);
707
708         return e;
709 }
710
711 static void writecache_free_entry(struct dm_writecache *wc, struct wc_entry *e)
712 {
713         writecache_unlink(wc, e);
714         writecache_add_to_freelist(wc, e);
715         clear_seq_count(wc, e);
716         writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
717         if (unlikely(waitqueue_active(&wc->freelist_wait)))
718                 wake_up(&wc->freelist_wait);
719 }
720
721 static void writecache_wait_on_freelist(struct dm_writecache *wc)
722 {
723         DEFINE_WAIT(wait);
724
725         prepare_to_wait(&wc->freelist_wait, &wait, TASK_UNINTERRUPTIBLE);
726         wc_unlock(wc);
727         io_schedule();
728         finish_wait(&wc->freelist_wait, &wait);
729         wc_lock(wc);
730 }
731
732 static void writecache_poison_lists(struct dm_writecache *wc)
733 {
734         /*
735          * Catch incorrect access to these values while the device is suspended.
736          */
737         memset(&wc->tree, -1, sizeof wc->tree);
738         wc->lru.next = LIST_POISON1;
739         wc->lru.prev = LIST_POISON2;
740         wc->freelist.next = LIST_POISON1;
741         wc->freelist.prev = LIST_POISON2;
742 }
743
744 static void writecache_flush_entry(struct dm_writecache *wc, struct wc_entry *e)
745 {
746         writecache_flush_region(wc, memory_entry(wc, e), sizeof(struct wc_memory_entry));
747         if (WC_MODE_PMEM(wc))
748                 writecache_flush_region(wc, memory_data(wc, e), wc->block_size);
749 }
750
751 static bool writecache_entry_is_committed(struct dm_writecache *wc, struct wc_entry *e)
752 {
753         return read_seq_count(wc, e) < wc->seq_count;
754 }
755
756 static void writecache_flush(struct dm_writecache *wc)
757 {
758         struct wc_entry *e, *e2;
759         bool need_flush_after_free;
760
761         wc->uncommitted_blocks = 0;
762         del_timer(&wc->autocommit_timer);
763
764         if (list_empty(&wc->lru))
765                 return;
766
767         e = container_of(wc->lru.next, struct wc_entry, lru);
768         if (writecache_entry_is_committed(wc, e)) {
769                 if (wc->overwrote_committed) {
770                         writecache_wait_for_ios(wc, WRITE);
771                         writecache_disk_flush(wc, wc->ssd_dev);
772                         wc->overwrote_committed = false;
773                 }
774                 return;
775         }
776         while (1) {
777                 writecache_flush_entry(wc, e);
778                 if (unlikely(e->lru.next == &wc->lru))
779                         break;
780                 e2 = container_of(e->lru.next, struct wc_entry, lru);
781                 if (writecache_entry_is_committed(wc, e2))
782                         break;
783                 e = e2;
784                 cond_resched();
785         }
786         writecache_commit_flushed(wc, true);
787
788         wc->seq_count++;
789         pmem_assign(sb(wc)->seq_count, cpu_to_le64(wc->seq_count));
790         if (WC_MODE_PMEM(wc))
791                 writecache_commit_flushed(wc, false);
792         else
793                 ssd_commit_superblock(wc);
794
795         wc->overwrote_committed = false;
796
797         need_flush_after_free = false;
798         while (1) {
799                 /* Free another committed entry with lower seq-count */
800                 struct rb_node *rb_node = rb_prev(&e->rb_node);
801
802                 if (rb_node) {
803                         e2 = container_of(rb_node, struct wc_entry, rb_node);
804                         if (read_original_sector(wc, e2) == read_original_sector(wc, e) &&
805                             likely(!e2->write_in_progress)) {
806                                 writecache_free_entry(wc, e2);
807                                 need_flush_after_free = true;
808                         }
809                 }
810                 if (unlikely(e->lru.prev == &wc->lru))
811                         break;
812                 e = container_of(e->lru.prev, struct wc_entry, lru);
813                 cond_resched();
814         }
815
816         if (need_flush_after_free)
817                 writecache_commit_flushed(wc, false);
818 }
819
820 static void writecache_flush_work(struct work_struct *work)
821 {
822         struct dm_writecache *wc = container_of(work, struct dm_writecache, flush_work);
823
824         wc_lock(wc);
825         writecache_flush(wc);
826         wc_unlock(wc);
827 }
828
829 static void writecache_autocommit_timer(struct timer_list *t)
830 {
831         struct dm_writecache *wc = from_timer(wc, t, autocommit_timer);
832         if (!writecache_has_error(wc))
833                 queue_work(wc->writeback_wq, &wc->flush_work);
834 }
835
836 static void writecache_schedule_autocommit(struct dm_writecache *wc)
837 {
838         if (!timer_pending(&wc->autocommit_timer))
839                 mod_timer(&wc->autocommit_timer, jiffies + wc->autocommit_jiffies);
840 }
841
842 static void writecache_discard(struct dm_writecache *wc, sector_t start, sector_t end)
843 {
844         struct wc_entry *e;
845         bool discarded_something = false;
846
847         e = writecache_find_entry(wc, start, WFE_RETURN_FOLLOWING | WFE_LOWEST_SEQ);
848         if (unlikely(!e))
849                 return;
850
851         while (read_original_sector(wc, e) < end) {
852                 struct rb_node *node = rb_next(&e->rb_node);
853
854                 if (likely(!e->write_in_progress)) {
855                         if (!discarded_something) {
856                                 writecache_wait_for_ios(wc, READ);
857                                 writecache_wait_for_ios(wc, WRITE);
858                                 discarded_something = true;
859                         }
860                         writecache_free_entry(wc, e);
861                 }
862
863                 if (unlikely(!node))
864                         break;
865
866                 e = container_of(node, struct wc_entry, rb_node);
867         }
868
869         if (discarded_something)
870                 writecache_commit_flushed(wc, false);
871 }
872
873 static bool writecache_wait_for_writeback(struct dm_writecache *wc)
874 {
875         if (wc->writeback_size) {
876                 writecache_wait_on_freelist(wc);
877                 return true;
878         }
879         return false;
880 }
881
882 static void writecache_suspend(struct dm_target *ti)
883 {
884         struct dm_writecache *wc = ti->private;
885         bool flush_on_suspend;
886
887         del_timer_sync(&wc->autocommit_timer);
888         del_timer_sync(&wc->max_age_timer);
889
890         wc_lock(wc);
891         writecache_flush(wc);
892         flush_on_suspend = wc->flush_on_suspend;
893         if (flush_on_suspend) {
894                 wc->flush_on_suspend = false;
895                 wc->writeback_all++;
896                 queue_work(wc->writeback_wq, &wc->writeback_work);
897         }
898         wc_unlock(wc);
899
900         drain_workqueue(wc->writeback_wq);
901
902         wc_lock(wc);
903         if (flush_on_suspend)
904                 wc->writeback_all--;
905         while (writecache_wait_for_writeback(wc));
906
907         if (WC_MODE_PMEM(wc))
908                 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
909
910         writecache_poison_lists(wc);
911
912         wc_unlock(wc);
913 }
914
915 static int writecache_alloc_entries(struct dm_writecache *wc)
916 {
917         size_t b;
918
919         if (wc->entries)
920                 return 0;
921         wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
922         if (!wc->entries)
923                 return -ENOMEM;
924         for (b = 0; b < wc->n_blocks; b++) {
925                 struct wc_entry *e = &wc->entries[b];
926                 e->index = b;
927                 e->write_in_progress = false;
928         }
929
930         return 0;
931 }
932
933 static void writecache_resume(struct dm_target *ti)
934 {
935         struct dm_writecache *wc = ti->private;
936         size_t b;
937         bool need_flush = false;
938         __le64 sb_seq_count;
939         int r;
940
941         wc_lock(wc);
942
943         if (WC_MODE_PMEM(wc))
944                 persistent_memory_invalidate_cache(wc->memory_map, wc->memory_map_size);
945
946         wc->tree = RB_ROOT;
947         INIT_LIST_HEAD(&wc->lru);
948         if (WC_MODE_SORT_FREELIST(wc)) {
949                 wc->freetree = RB_ROOT;
950                 wc->current_free = NULL;
951         } else {
952                 INIT_LIST_HEAD(&wc->freelist);
953         }
954         wc->freelist_size = 0;
955
956         r = memcpy_mcsafe(&sb_seq_count, &sb(wc)->seq_count, sizeof(uint64_t));
957         if (r) {
958                 writecache_error(wc, r, "hardware memory error when reading superblock: %d", r);
959                 sb_seq_count = cpu_to_le64(0);
960         }
961         wc->seq_count = le64_to_cpu(sb_seq_count);
962
963 #ifdef DM_WRITECACHE_HANDLE_HARDWARE_ERRORS
964         for (b = 0; b < wc->n_blocks; b++) {
965                 struct wc_entry *e = &wc->entries[b];
966                 struct wc_memory_entry wme;
967                 if (writecache_has_error(wc)) {
968                         e->original_sector = -1;
969                         e->seq_count = -1;
970                         continue;
971                 }
972                 r = memcpy_mcsafe(&wme, memory_entry(wc, e), sizeof(struct wc_memory_entry));
973                 if (r) {
974                         writecache_error(wc, r, "hardware memory error when reading metadata entry %lu: %d",
975                                          (unsigned long)b, r);
976                         e->original_sector = -1;
977                         e->seq_count = -1;
978                 } else {
979                         e->original_sector = le64_to_cpu(wme.original_sector);
980                         e->seq_count = le64_to_cpu(wme.seq_count);
981                 }
982         }
983 #endif
984         for (b = 0; b < wc->n_blocks; b++) {
985                 struct wc_entry *e = &wc->entries[b];
986                 if (!writecache_entry_is_committed(wc, e)) {
987                         if (read_seq_count(wc, e) != -1) {
988 erase_this:
989                                 clear_seq_count(wc, e);
990                                 need_flush = true;
991                         }
992                         writecache_add_to_freelist(wc, e);
993                 } else {
994                         struct wc_entry *old;
995
996                         old = writecache_find_entry(wc, read_original_sector(wc, e), 0);
997                         if (!old) {
998                                 writecache_insert_entry(wc, e);
999                         } else {
1000                                 if (read_seq_count(wc, old) == read_seq_count(wc, e)) {
1001                                         writecache_error(wc, -EINVAL,
1002                                                  "two identical entries, position %llu, sector %llu, sequence %llu",
1003                                                  (unsigned long long)b, (unsigned long long)read_original_sector(wc, e),
1004                                                  (unsigned long long)read_seq_count(wc, e));
1005                                 }
1006                                 if (read_seq_count(wc, old) > read_seq_count(wc, e)) {
1007                                         goto erase_this;
1008                                 } else {
1009                                         writecache_free_entry(wc, old);
1010                                         writecache_insert_entry(wc, e);
1011                                         need_flush = true;
1012                                 }
1013                         }
1014                 }
1015                 cond_resched();
1016         }
1017
1018         if (need_flush) {
1019                 writecache_flush_all_metadata(wc);
1020                 writecache_commit_flushed(wc, false);
1021         }
1022
1023         writecache_verify_watermark(wc);
1024
1025         if (wc->max_age != MAX_AGE_UNSPECIFIED)
1026                 mod_timer(&wc->max_age_timer, jiffies + wc->max_age / MAX_AGE_DIV);
1027
1028         wc_unlock(wc);
1029 }
1030
1031 static int process_flush_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1032 {
1033         if (argc != 1)
1034                 return -EINVAL;
1035
1036         wc_lock(wc);
1037         if (dm_suspended(wc->ti)) {
1038                 wc_unlock(wc);
1039                 return -EBUSY;
1040         }
1041         if (writecache_has_error(wc)) {
1042                 wc_unlock(wc);
1043                 return -EIO;
1044         }
1045
1046         writecache_flush(wc);
1047         wc->writeback_all++;
1048         queue_work(wc->writeback_wq, &wc->writeback_work);
1049         wc_unlock(wc);
1050
1051         flush_workqueue(wc->writeback_wq);
1052
1053         wc_lock(wc);
1054         wc->writeback_all--;
1055         if (writecache_has_error(wc)) {
1056                 wc_unlock(wc);
1057                 return -EIO;
1058         }
1059         wc_unlock(wc);
1060
1061         return 0;
1062 }
1063
1064 static int process_flush_on_suspend_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1065 {
1066         if (argc != 1)
1067                 return -EINVAL;
1068
1069         wc_lock(wc);
1070         wc->flush_on_suspend = true;
1071         wc_unlock(wc);
1072
1073         return 0;
1074 }
1075
1076 static void activate_cleaner(struct dm_writecache *wc)
1077 {
1078         wc->flush_on_suspend = true;
1079         wc->cleaner = true;
1080         wc->freelist_high_watermark = wc->n_blocks;
1081         wc->freelist_low_watermark = wc->n_blocks;
1082 }
1083
1084 static int process_cleaner_mesg(unsigned argc, char **argv, struct dm_writecache *wc)
1085 {
1086         if (argc != 1)
1087                 return -EINVAL;
1088
1089         wc_lock(wc);
1090         activate_cleaner(wc);
1091         if (!dm_suspended(wc->ti))
1092                 writecache_verify_watermark(wc);
1093         wc_unlock(wc);
1094
1095         return 0;
1096 }
1097
1098 static int writecache_message(struct dm_target *ti, unsigned argc, char **argv,
1099                               char *result, unsigned maxlen)
1100 {
1101         int r = -EINVAL;
1102         struct dm_writecache *wc = ti->private;
1103
1104         if (!strcasecmp(argv[0], "flush"))
1105                 r = process_flush_mesg(argc, argv, wc);
1106         else if (!strcasecmp(argv[0], "flush_on_suspend"))
1107                 r = process_flush_on_suspend_mesg(argc, argv, wc);
1108         else if (!strcasecmp(argv[0], "cleaner"))
1109                 r = process_cleaner_mesg(argc, argv, wc);
1110         else
1111                 DMERR("unrecognised message received: %s", argv[0]);
1112
1113         return r;
1114 }
1115
1116 static void bio_copy_block(struct dm_writecache *wc, struct bio *bio, void *data)
1117 {
1118         void *buf;
1119         unsigned long flags;
1120         unsigned size;
1121         int rw = bio_data_dir(bio);
1122         unsigned remaining_size = wc->block_size;
1123
1124         do {
1125                 struct bio_vec bv = bio_iter_iovec(bio, bio->bi_iter);
1126                 buf = bvec_kmap_irq(&bv, &flags);
1127                 size = bv.bv_len;
1128                 if (unlikely(size > remaining_size))
1129                         size = remaining_size;
1130
1131                 if (rw == READ) {
1132                         int r;
1133                         r = memcpy_mcsafe(buf, data, size);
1134                         flush_dcache_page(bio_page(bio));
1135                         if (unlikely(r)) {
1136                                 writecache_error(wc, r, "hardware memory error when reading data: %d", r);
1137                                 bio->bi_status = BLK_STS_IOERR;
1138                         }
1139                 } else {
1140                         flush_dcache_page(bio_page(bio));
1141                         memcpy_flushcache(data, buf, size);
1142                 }
1143
1144                 bvec_kunmap_irq(buf, &flags);
1145
1146                 data = (char *)data + size;
1147                 remaining_size -= size;
1148                 bio_advance(bio, size);
1149         } while (unlikely(remaining_size));
1150 }
1151
1152 static int writecache_flush_thread(void *data)
1153 {
1154         struct dm_writecache *wc = data;
1155
1156         while (1) {
1157                 struct bio *bio;
1158
1159                 wc_lock(wc);
1160                 bio = bio_list_pop(&wc->flush_list);
1161                 if (!bio) {
1162                         set_current_state(TASK_INTERRUPTIBLE);
1163                         wc_unlock(wc);
1164
1165                         if (unlikely(kthread_should_stop())) {
1166                                 set_current_state(TASK_RUNNING);
1167                                 break;
1168                         }
1169
1170                         schedule();
1171                         continue;
1172                 }
1173
1174                 if (bio_op(bio) == REQ_OP_DISCARD) {
1175                         writecache_discard(wc, bio->bi_iter.bi_sector,
1176                                            bio_end_sector(bio));
1177                         wc_unlock(wc);
1178                         bio_set_dev(bio, wc->dev->bdev);
1179                         generic_make_request(bio);
1180                 } else {
1181                         writecache_flush(wc);
1182                         wc_unlock(wc);
1183                         if (writecache_has_error(wc))
1184                                 bio->bi_status = BLK_STS_IOERR;
1185                         bio_endio(bio);
1186                 }
1187         }
1188
1189         return 0;
1190 }
1191
1192 static void writecache_offload_bio(struct dm_writecache *wc, struct bio *bio)
1193 {
1194         if (bio_list_empty(&wc->flush_list))
1195                 wake_up_process(wc->flush_thread);
1196         bio_list_add(&wc->flush_list, bio);
1197 }
1198
1199 static int writecache_map(struct dm_target *ti, struct bio *bio)
1200 {
1201         struct wc_entry *e;
1202         struct dm_writecache *wc = ti->private;
1203
1204         bio->bi_private = NULL;
1205
1206         wc_lock(wc);
1207
1208         if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1209                 if (writecache_has_error(wc))
1210                         goto unlock_error;
1211                 if (WC_MODE_PMEM(wc)) {
1212                         writecache_flush(wc);
1213                         if (writecache_has_error(wc))
1214                                 goto unlock_error;
1215                         goto unlock_submit;
1216                 } else {
1217                         writecache_offload_bio(wc, bio);
1218                         goto unlock_return;
1219                 }
1220         }
1221
1222         bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
1223
1224         if (unlikely((((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
1225                                 (wc->block_size / 512 - 1)) != 0)) {
1226                 DMERR("I/O is not aligned, sector %llu, size %u, block size %u",
1227                       (unsigned long long)bio->bi_iter.bi_sector,
1228                       bio->bi_iter.bi_size, wc->block_size);
1229                 goto unlock_error;
1230         }
1231
1232         if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) {
1233                 if (writecache_has_error(wc))
1234                         goto unlock_error;
1235                 if (WC_MODE_PMEM(wc)) {
1236                         writecache_discard(wc, bio->bi_iter.bi_sector, bio_end_sector(bio));
1237                         goto unlock_remap_origin;
1238                 } else {
1239                         writecache_offload_bio(wc, bio);
1240                         goto unlock_return;
1241                 }
1242         }
1243
1244         if (bio_data_dir(bio) == READ) {
1245 read_next_block:
1246                 e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1247                 if (e && read_original_sector(wc, e) == bio->bi_iter.bi_sector) {
1248                         if (WC_MODE_PMEM(wc)) {
1249                                 bio_copy_block(wc, bio, memory_data(wc, e));
1250                                 if (bio->bi_iter.bi_size)
1251                                         goto read_next_block;
1252                                 goto unlock_submit;
1253                         } else {
1254                                 dm_accept_partial_bio(bio, wc->block_size >> SECTOR_SHIFT);
1255                                 bio_set_dev(bio, wc->ssd_dev->bdev);
1256                                 bio->bi_iter.bi_sector = cache_sector(wc, e);
1257                                 if (!writecache_entry_is_committed(wc, e))
1258                                         writecache_wait_for_ios(wc, WRITE);
1259                                 goto unlock_remap;
1260                         }
1261                 } else {
1262                         if (e) {
1263                                 sector_t next_boundary =
1264                                         read_original_sector(wc, e) - bio->bi_iter.bi_sector;
1265                                 if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
1266                                         dm_accept_partial_bio(bio, next_boundary);
1267                                 }
1268                         }
1269                         goto unlock_remap_origin;
1270                 }
1271         } else {
1272                 do {
1273                         bool found_entry = false;
1274                         if (writecache_has_error(wc))
1275                                 goto unlock_error;
1276                         e = writecache_find_entry(wc, bio->bi_iter.bi_sector, 0);
1277                         if (e) {
1278                                 if (!writecache_entry_is_committed(wc, e))
1279                                         goto bio_copy;
1280                                 if (!WC_MODE_PMEM(wc) && !e->write_in_progress) {
1281                                         wc->overwrote_committed = true;
1282                                         goto bio_copy;
1283                                 }
1284                                 found_entry = true;
1285                         } else {
1286                                 if (unlikely(wc->cleaner))
1287                                         goto direct_write;
1288                         }
1289                         e = writecache_pop_from_freelist(wc, (sector_t)-1);
1290                         if (unlikely(!e)) {
1291                                 if (!found_entry) {
1292 direct_write:
1293                                         e = writecache_find_entry(wc, bio->bi_iter.bi_sector, WFE_RETURN_FOLLOWING);
1294                                         if (e) {
1295                                                 sector_t next_boundary = read_original_sector(wc, e) - bio->bi_iter.bi_sector;
1296                                                 BUG_ON(!next_boundary);
1297                                                 if (next_boundary < bio->bi_iter.bi_size >> SECTOR_SHIFT) {
1298                                                         dm_accept_partial_bio(bio, next_boundary);
1299                                                 }
1300                                         }
1301                                         goto unlock_remap_origin;
1302                                 }
1303                                 writecache_wait_on_freelist(wc);
1304                                 continue;
1305                         }
1306                         write_original_sector_seq_count(wc, e, bio->bi_iter.bi_sector, wc->seq_count);
1307                         writecache_insert_entry(wc, e);
1308                         wc->uncommitted_blocks++;
1309 bio_copy:
1310                         if (WC_MODE_PMEM(wc)) {
1311                                 bio_copy_block(wc, bio, memory_data(wc, e));
1312                         } else {
1313                                 unsigned bio_size = wc->block_size;
1314                                 sector_t start_cache_sec = cache_sector(wc, e);
1315                                 sector_t current_cache_sec = start_cache_sec + (bio_size >> SECTOR_SHIFT);
1316
1317                                 while (bio_size < bio->bi_iter.bi_size) {
1318                                         struct wc_entry *f = writecache_pop_from_freelist(wc, current_cache_sec);
1319                                         if (!f)
1320                                                 break;
1321                                         write_original_sector_seq_count(wc, f, bio->bi_iter.bi_sector +
1322                                                                         (bio_size >> SECTOR_SHIFT), wc->seq_count);
1323                                         writecache_insert_entry(wc, f);
1324                                         wc->uncommitted_blocks++;
1325                                         bio_size += wc->block_size;
1326                                         current_cache_sec += wc->block_size >> SECTOR_SHIFT;
1327                                 }
1328
1329                                 bio_set_dev(bio, wc->ssd_dev->bdev);
1330                                 bio->bi_iter.bi_sector = start_cache_sec;
1331                                 dm_accept_partial_bio(bio, bio_size >> SECTOR_SHIFT);
1332
1333                                 if (unlikely(wc->uncommitted_blocks >= wc->autocommit_blocks)) {
1334                                         wc->uncommitted_blocks = 0;
1335                                         queue_work(wc->writeback_wq, &wc->flush_work);
1336                                 } else {
1337                                         writecache_schedule_autocommit(wc);
1338                                 }
1339                                 goto unlock_remap;
1340                         }
1341                 } while (bio->bi_iter.bi_size);
1342
1343                 if (unlikely(bio->bi_opf & REQ_FUA ||
1344                              wc->uncommitted_blocks >= wc->autocommit_blocks))
1345                         writecache_flush(wc);
1346                 else
1347                         writecache_schedule_autocommit(wc);
1348                 goto unlock_submit;
1349         }
1350
1351 unlock_remap_origin:
1352         bio_set_dev(bio, wc->dev->bdev);
1353         wc_unlock(wc);
1354         return DM_MAPIO_REMAPPED;
1355
1356 unlock_remap:
1357         /* make sure that writecache_end_io decrements bio_in_progress: */
1358         bio->bi_private = (void *)1;
1359         atomic_inc(&wc->bio_in_progress[bio_data_dir(bio)]);
1360         wc_unlock(wc);
1361         return DM_MAPIO_REMAPPED;
1362
1363 unlock_submit:
1364         wc_unlock(wc);
1365         bio_endio(bio);
1366         return DM_MAPIO_SUBMITTED;
1367
1368 unlock_return:
1369         wc_unlock(wc);
1370         return DM_MAPIO_SUBMITTED;
1371
1372 unlock_error:
1373         wc_unlock(wc);
1374         bio_io_error(bio);
1375         return DM_MAPIO_SUBMITTED;
1376 }
1377
1378 static int writecache_end_io(struct dm_target *ti, struct bio *bio, blk_status_t *status)
1379 {
1380         struct dm_writecache *wc = ti->private;
1381
1382         if (bio->bi_private != NULL) {
1383                 int dir = bio_data_dir(bio);
1384                 if (atomic_dec_and_test(&wc->bio_in_progress[dir]))
1385                         if (unlikely(waitqueue_active(&wc->bio_in_progress_wait[dir])))
1386                                 wake_up(&wc->bio_in_progress_wait[dir]);
1387         }
1388         return 0;
1389 }
1390
1391 static int writecache_iterate_devices(struct dm_target *ti,
1392                                       iterate_devices_callout_fn fn, void *data)
1393 {
1394         struct dm_writecache *wc = ti->private;
1395
1396         return fn(ti, wc->dev, 0, ti->len, data);
1397 }
1398
1399 static void writecache_io_hints(struct dm_target *ti, struct queue_limits *limits)
1400 {
1401         struct dm_writecache *wc = ti->private;
1402
1403         if (limits->logical_block_size < wc->block_size)
1404                 limits->logical_block_size = wc->block_size;
1405
1406         if (limits->physical_block_size < wc->block_size)
1407                 limits->physical_block_size = wc->block_size;
1408
1409         if (limits->io_min < wc->block_size)
1410                 limits->io_min = wc->block_size;
1411 }
1412
1413
1414 static void writecache_writeback_endio(struct bio *bio)
1415 {
1416         struct writeback_struct *wb = container_of(bio, struct writeback_struct, bio);
1417         struct dm_writecache *wc = wb->wc;
1418         unsigned long flags;
1419
1420         raw_spin_lock_irqsave(&wc->endio_list_lock, flags);
1421         if (unlikely(list_empty(&wc->endio_list)))
1422                 wake_up_process(wc->endio_thread);
1423         list_add_tail(&wb->endio_entry, &wc->endio_list);
1424         raw_spin_unlock_irqrestore(&wc->endio_list_lock, flags);
1425 }
1426
1427 static void writecache_copy_endio(int read_err, unsigned long write_err, void *ptr)
1428 {
1429         struct copy_struct *c = ptr;
1430         struct dm_writecache *wc = c->wc;
1431
1432         c->error = likely(!(read_err | write_err)) ? 0 : -EIO;
1433
1434         raw_spin_lock_irq(&wc->endio_list_lock);
1435         if (unlikely(list_empty(&wc->endio_list)))
1436                 wake_up_process(wc->endio_thread);
1437         list_add_tail(&c->endio_entry, &wc->endio_list);
1438         raw_spin_unlock_irq(&wc->endio_list_lock);
1439 }
1440
1441 static void __writecache_endio_pmem(struct dm_writecache *wc, struct list_head *list)
1442 {
1443         unsigned i;
1444         struct writeback_struct *wb;
1445         struct wc_entry *e;
1446         unsigned long n_walked = 0;
1447
1448         do {
1449                 wb = list_entry(list->next, struct writeback_struct, endio_entry);
1450                 list_del(&wb->endio_entry);
1451
1452                 if (unlikely(wb->bio.bi_status != BLK_STS_OK))
1453                         writecache_error(wc, blk_status_to_errno(wb->bio.bi_status),
1454                                         "write error %d", wb->bio.bi_status);
1455                 i = 0;
1456                 do {
1457                         e = wb->wc_list[i];
1458                         BUG_ON(!e->write_in_progress);
1459                         e->write_in_progress = false;
1460                         INIT_LIST_HEAD(&e->lru);
1461                         if (!writecache_has_error(wc))
1462                                 writecache_free_entry(wc, e);
1463                         BUG_ON(!wc->writeback_size);
1464                         wc->writeback_size--;
1465                         n_walked++;
1466                         if (unlikely(n_walked >= ENDIO_LATENCY)) {
1467                                 writecache_commit_flushed(wc, false);
1468                                 wc_unlock(wc);
1469                                 wc_lock(wc);
1470                                 n_walked = 0;
1471                         }
1472                 } while (++i < wb->wc_list_n);
1473
1474                 if (wb->wc_list != wb->wc_list_inline)
1475                         kfree(wb->wc_list);
1476                 bio_put(&wb->bio);
1477         } while (!list_empty(list));
1478 }
1479
1480 static void __writecache_endio_ssd(struct dm_writecache *wc, struct list_head *list)
1481 {
1482         struct copy_struct *c;
1483         struct wc_entry *e;
1484
1485         do {
1486                 c = list_entry(list->next, struct copy_struct, endio_entry);
1487                 list_del(&c->endio_entry);
1488
1489                 if (unlikely(c->error))
1490                         writecache_error(wc, c->error, "copy error");
1491
1492                 e = c->e;
1493                 do {
1494                         BUG_ON(!e->write_in_progress);
1495                         e->write_in_progress = false;
1496                         INIT_LIST_HEAD(&e->lru);
1497                         if (!writecache_has_error(wc))
1498                                 writecache_free_entry(wc, e);
1499
1500                         BUG_ON(!wc->writeback_size);
1501                         wc->writeback_size--;
1502                         e++;
1503                 } while (--c->n_entries);
1504                 mempool_free(c, &wc->copy_pool);
1505         } while (!list_empty(list));
1506 }
1507
1508 static int writecache_endio_thread(void *data)
1509 {
1510         struct dm_writecache *wc = data;
1511
1512         while (1) {
1513                 struct list_head list;
1514
1515                 raw_spin_lock_irq(&wc->endio_list_lock);
1516                 if (!list_empty(&wc->endio_list))
1517                         goto pop_from_list;
1518                 set_current_state(TASK_INTERRUPTIBLE);
1519                 raw_spin_unlock_irq(&wc->endio_list_lock);
1520
1521                 if (unlikely(kthread_should_stop())) {
1522                         set_current_state(TASK_RUNNING);
1523                         break;
1524                 }
1525
1526                 schedule();
1527
1528                 continue;
1529
1530 pop_from_list:
1531                 list = wc->endio_list;
1532                 list.next->prev = list.prev->next = &list;
1533                 INIT_LIST_HEAD(&wc->endio_list);
1534                 raw_spin_unlock_irq(&wc->endio_list_lock);
1535
1536                 if (!WC_MODE_FUA(wc))
1537                         writecache_disk_flush(wc, wc->dev);
1538
1539                 wc_lock(wc);
1540
1541                 if (WC_MODE_PMEM(wc)) {
1542                         __writecache_endio_pmem(wc, &list);
1543                 } else {
1544                         __writecache_endio_ssd(wc, &list);
1545                         writecache_wait_for_ios(wc, READ);
1546                 }
1547
1548                 writecache_commit_flushed(wc, false);
1549
1550                 wc_unlock(wc);
1551         }
1552
1553         return 0;
1554 }
1555
1556 static bool wc_add_block(struct writeback_struct *wb, struct wc_entry *e, gfp_t gfp)
1557 {
1558         struct dm_writecache *wc = wb->wc;
1559         unsigned block_size = wc->block_size;
1560         void *address = memory_data(wc, e);
1561
1562         persistent_memory_flush_cache(address, block_size);
1563         return bio_add_page(&wb->bio, persistent_memory_page(address),
1564                             block_size, persistent_memory_page_offset(address)) != 0;
1565 }
1566
1567 struct writeback_list {
1568         struct list_head list;
1569         size_t size;
1570 };
1571
1572 static void __writeback_throttle(struct dm_writecache *wc, struct writeback_list *wbl)
1573 {
1574         if (unlikely(wc->max_writeback_jobs)) {
1575                 if (READ_ONCE(wc->writeback_size) - wbl->size >= wc->max_writeback_jobs) {
1576                         wc_lock(wc);
1577                         while (wc->writeback_size - wbl->size >= wc->max_writeback_jobs)
1578                                 writecache_wait_on_freelist(wc);
1579                         wc_unlock(wc);
1580                 }
1581         }
1582         cond_resched();
1583 }
1584
1585 static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeback_list *wbl)
1586 {
1587         struct wc_entry *e, *f;
1588         struct bio *bio;
1589         struct writeback_struct *wb;
1590         unsigned max_pages;
1591
1592         while (wbl->size) {
1593                 wbl->size--;
1594                 e = container_of(wbl->list.prev, struct wc_entry, lru);
1595                 list_del(&e->lru);
1596
1597                 max_pages = e->wc_list_contiguous;
1598
1599                 bio = bio_alloc_bioset(GFP_NOIO, max_pages, &wc->bio_set);
1600                 wb = container_of(bio, struct writeback_struct, bio);
1601                 wb->wc = wc;
1602                 bio->bi_end_io = writecache_writeback_endio;
1603                 bio_set_dev(bio, wc->dev->bdev);
1604                 bio->bi_iter.bi_sector = read_original_sector(wc, e);
1605                 if (max_pages <= WB_LIST_INLINE ||
1606                     unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
1607                                                            GFP_NOIO | __GFP_NORETRY |
1608                                                            __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
1609                         wb->wc_list = wb->wc_list_inline;
1610                         max_pages = WB_LIST_INLINE;
1611                 }
1612
1613                 BUG_ON(!wc_add_block(wb, e, GFP_NOIO));
1614
1615                 wb->wc_list[0] = e;
1616                 wb->wc_list_n = 1;
1617
1618                 while (wbl->size && wb->wc_list_n < max_pages) {
1619                         f = container_of(wbl->list.prev, struct wc_entry, lru);
1620                         if (read_original_sector(wc, f) !=
1621                             read_original_sector(wc, e) + (wc->block_size >> SECTOR_SHIFT))
1622                                 break;
1623                         if (!wc_add_block(wb, f, GFP_NOWAIT | __GFP_NOWARN))
1624                                 break;
1625                         wbl->size--;
1626                         list_del(&f->lru);
1627                         wb->wc_list[wb->wc_list_n++] = f;
1628                         e = f;
1629                 }
1630                 bio_set_op_attrs(bio, REQ_OP_WRITE, WC_MODE_FUA(wc) * REQ_FUA);
1631                 if (writecache_has_error(wc)) {
1632                         bio->bi_status = BLK_STS_IOERR;
1633                         bio_endio(bio);
1634                 } else {
1635                         submit_bio(bio);
1636                 }
1637
1638                 __writeback_throttle(wc, wbl);
1639         }
1640 }
1641
1642 static void __writecache_writeback_ssd(struct dm_writecache *wc, struct writeback_list *wbl)
1643 {
1644         struct wc_entry *e, *f;
1645         struct dm_io_region from, to;
1646         struct copy_struct *c;
1647
1648         while (wbl->size) {
1649                 unsigned n_sectors;
1650
1651                 wbl->size--;
1652                 e = container_of(wbl->list.prev, struct wc_entry, lru);
1653                 list_del(&e->lru);
1654
1655                 n_sectors = e->wc_list_contiguous << (wc->block_size_bits - SECTOR_SHIFT);
1656
1657                 from.bdev = wc->ssd_dev->bdev;
1658                 from.sector = cache_sector(wc, e);
1659                 from.count = n_sectors;
1660                 to.bdev = wc->dev->bdev;
1661                 to.sector = read_original_sector(wc, e);
1662                 to.count = n_sectors;
1663
1664                 c = mempool_alloc(&wc->copy_pool, GFP_NOIO);
1665                 c->wc = wc;
1666                 c->e = e;
1667                 c->n_entries = e->wc_list_contiguous;
1668
1669                 while ((n_sectors -= wc->block_size >> SECTOR_SHIFT)) {
1670                         wbl->size--;
1671                         f = container_of(wbl->list.prev, struct wc_entry, lru);
1672                         BUG_ON(f != e + 1);
1673                         list_del(&f->lru);
1674                         e = f;
1675                 }
1676
1677                 dm_kcopyd_copy(wc->dm_kcopyd, &from, 1, &to, 0, writecache_copy_endio, c);
1678
1679                 __writeback_throttle(wc, wbl);
1680         }
1681 }
1682
1683 static void writecache_writeback(struct work_struct *work)
1684 {
1685         struct dm_writecache *wc = container_of(work, struct dm_writecache, writeback_work);
1686         struct blk_plug plug;
1687         struct wc_entry *f, *uninitialized_var(g), *e = NULL;
1688         struct rb_node *node, *next_node;
1689         struct list_head skipped;
1690         struct writeback_list wbl;
1691         unsigned long n_walked;
1692
1693         wc_lock(wc);
1694 restart:
1695         if (writecache_has_error(wc)) {
1696                 wc_unlock(wc);
1697                 return;
1698         }
1699
1700         if (unlikely(wc->writeback_all)) {
1701                 if (writecache_wait_for_writeback(wc))
1702                         goto restart;
1703         }
1704
1705         if (wc->overwrote_committed) {
1706                 writecache_wait_for_ios(wc, WRITE);
1707         }
1708
1709         n_walked = 0;
1710         INIT_LIST_HEAD(&skipped);
1711         INIT_LIST_HEAD(&wbl.list);
1712         wbl.size = 0;
1713         while (!list_empty(&wc->lru) &&
1714                (wc->writeback_all ||
1715                 wc->freelist_size + wc->writeback_size <= wc->freelist_low_watermark ||
1716                 (jiffies - container_of(wc->lru.prev, struct wc_entry, lru)->age >=
1717                  wc->max_age - wc->max_age / MAX_AGE_DIV))) {
1718
1719                 n_walked++;
1720                 if (unlikely(n_walked > WRITEBACK_LATENCY) &&
1721                     likely(!wc->writeback_all) && likely(!dm_suspended(wc->ti))) {
1722                         queue_work(wc->writeback_wq, &wc->writeback_work);
1723                         break;
1724                 }
1725
1726                 if (unlikely(wc->writeback_all)) {
1727                         if (unlikely(!e)) {
1728                                 writecache_flush(wc);
1729                                 e = container_of(rb_first(&wc->tree), struct wc_entry, rb_node);
1730                         } else
1731                                 e = g;
1732                 } else
1733                         e = container_of(wc->lru.prev, struct wc_entry, lru);
1734                 BUG_ON(e->write_in_progress);
1735                 if (unlikely(!writecache_entry_is_committed(wc, e))) {
1736                         writecache_flush(wc);
1737                 }
1738                 node = rb_prev(&e->rb_node);
1739                 if (node) {
1740                         f = container_of(node, struct wc_entry, rb_node);
1741                         if (unlikely(read_original_sector(wc, f) ==
1742                                      read_original_sector(wc, e))) {
1743                                 BUG_ON(!f->write_in_progress);
1744                                 list_del(&e->lru);
1745                                 list_add(&e->lru, &skipped);
1746                                 cond_resched();
1747                                 continue;
1748                         }
1749                 }
1750                 wc->writeback_size++;
1751                 list_del(&e->lru);
1752                 list_add(&e->lru, &wbl.list);
1753                 wbl.size++;
1754                 e->write_in_progress = true;
1755                 e->wc_list_contiguous = 1;
1756
1757                 f = e;
1758
1759                 while (1) {
1760                         next_node = rb_next(&f->rb_node);
1761                         if (unlikely(!next_node))
1762                                 break;
1763                         g = container_of(next_node, struct wc_entry, rb_node);
1764                         if (unlikely(read_original_sector(wc, g) ==
1765                             read_original_sector(wc, f))) {
1766                                 f = g;
1767                                 continue;
1768                         }
1769                         if (read_original_sector(wc, g) !=
1770                             read_original_sector(wc, f) + (wc->block_size >> SECTOR_SHIFT))
1771                                 break;
1772                         if (unlikely(g->write_in_progress))
1773                                 break;
1774                         if (unlikely(!writecache_entry_is_committed(wc, g)))
1775                                 break;
1776
1777                         if (!WC_MODE_PMEM(wc)) {
1778                                 if (g != f + 1)
1779                                         break;
1780                         }
1781
1782                         n_walked++;
1783                         //if (unlikely(n_walked > WRITEBACK_LATENCY) && likely(!wc->writeback_all))
1784                         //      break;
1785
1786                         wc->writeback_size++;
1787                         list_del(&g->lru);
1788                         list_add(&g->lru, &wbl.list);
1789                         wbl.size++;
1790                         g->write_in_progress = true;
1791                         g->wc_list_contiguous = BIO_MAX_PAGES;
1792                         f = g;
1793                         e->wc_list_contiguous++;
1794                         if (unlikely(e->wc_list_contiguous == BIO_MAX_PAGES)) {
1795                                 if (unlikely(wc->writeback_all)) {
1796                                         next_node = rb_next(&f->rb_node);
1797                                         if (likely(next_node))
1798                                                 g = container_of(next_node, struct wc_entry, rb_node);
1799                                 }
1800                                 break;
1801                         }
1802                 }
1803                 cond_resched();
1804         }
1805
1806         if (!list_empty(&skipped)) {
1807                 list_splice_tail(&skipped, &wc->lru);
1808                 /*
1809                  * If we didn't do any progress, we must wait until some
1810                  * writeback finishes to avoid burning CPU in a loop
1811                  */
1812                 if (unlikely(!wbl.size))
1813                         writecache_wait_for_writeback(wc);
1814         }
1815
1816         wc_unlock(wc);
1817
1818         blk_start_plug(&plug);
1819
1820         if (WC_MODE_PMEM(wc))
1821                 __writecache_writeback_pmem(wc, &wbl);
1822         else
1823                 __writecache_writeback_ssd(wc, &wbl);
1824
1825         blk_finish_plug(&plug);
1826
1827         if (unlikely(wc->writeback_all)) {
1828                 wc_lock(wc);
1829                 while (writecache_wait_for_writeback(wc));
1830                 wc_unlock(wc);
1831         }
1832 }
1833
1834 static int calculate_memory_size(uint64_t device_size, unsigned block_size,
1835                                  size_t *n_blocks_p, size_t *n_metadata_blocks_p)
1836 {
1837         uint64_t n_blocks, offset;
1838         struct wc_entry e;
1839
1840         n_blocks = device_size;
1841         do_div(n_blocks, block_size + sizeof(struct wc_memory_entry));
1842
1843         while (1) {
1844                 if (!n_blocks)
1845                         return -ENOSPC;
1846                 /* Verify the following entries[n_blocks] won't overflow */
1847                 if (n_blocks >= ((size_t)-sizeof(struct wc_memory_superblock) /
1848                                  sizeof(struct wc_memory_entry)))
1849                         return -EFBIG;
1850                 offset = offsetof(struct wc_memory_superblock, entries[n_blocks]);
1851                 offset = (offset + block_size - 1) & ~(uint64_t)(block_size - 1);
1852                 if (offset + n_blocks * block_size <= device_size)
1853                         break;
1854                 n_blocks--;
1855         }
1856
1857         /* check if the bit field overflows */
1858         e.index = n_blocks;
1859         if (e.index != n_blocks)
1860                 return -EFBIG;
1861
1862         if (n_blocks_p)
1863                 *n_blocks_p = n_blocks;
1864         if (n_metadata_blocks_p)
1865                 *n_metadata_blocks_p = offset >> __ffs(block_size);
1866         return 0;
1867 }
1868
1869 static int init_memory(struct dm_writecache *wc)
1870 {
1871         size_t b;
1872         int r;
1873
1874         r = calculate_memory_size(wc->memory_map_size, wc->block_size, &wc->n_blocks, NULL);
1875         if (r)
1876                 return r;
1877
1878         r = writecache_alloc_entries(wc);
1879         if (r)
1880                 return r;
1881
1882         for (b = 0; b < ARRAY_SIZE(sb(wc)->padding); b++)
1883                 pmem_assign(sb(wc)->padding[b], cpu_to_le64(0));
1884         pmem_assign(sb(wc)->version, cpu_to_le32(MEMORY_SUPERBLOCK_VERSION));
1885         pmem_assign(sb(wc)->block_size, cpu_to_le32(wc->block_size));
1886         pmem_assign(sb(wc)->n_blocks, cpu_to_le64(wc->n_blocks));
1887         pmem_assign(sb(wc)->seq_count, cpu_to_le64(0));
1888
1889         for (b = 0; b < wc->n_blocks; b++)
1890                 write_original_sector_seq_count(wc, &wc->entries[b], -1, -1);
1891
1892         writecache_flush_all_metadata(wc);
1893         writecache_commit_flushed(wc, false);
1894         pmem_assign(sb(wc)->magic, cpu_to_le32(MEMORY_SUPERBLOCK_MAGIC));
1895         writecache_flush_region(wc, &sb(wc)->magic, sizeof sb(wc)->magic);
1896         writecache_commit_flushed(wc, false);
1897
1898         return 0;
1899 }
1900
1901 static void writecache_dtr(struct dm_target *ti)
1902 {
1903         struct dm_writecache *wc = ti->private;
1904
1905         if (!wc)
1906                 return;
1907
1908         if (wc->endio_thread)
1909                 kthread_stop(wc->endio_thread);
1910
1911         if (wc->flush_thread)
1912                 kthread_stop(wc->flush_thread);
1913
1914         bioset_exit(&wc->bio_set);
1915
1916         mempool_exit(&wc->copy_pool);
1917
1918         if (wc->writeback_wq)
1919                 destroy_workqueue(wc->writeback_wq);
1920
1921         if (wc->dev)
1922                 dm_put_device(ti, wc->dev);
1923
1924         if (wc->ssd_dev)
1925                 dm_put_device(ti, wc->ssd_dev);
1926
1927         if (wc->entries)
1928                 vfree(wc->entries);
1929
1930         if (wc->memory_map) {
1931                 if (WC_MODE_PMEM(wc))
1932                         persistent_memory_release(wc);
1933                 else
1934                         vfree(wc->memory_map);
1935         }
1936
1937         if (wc->dm_kcopyd)
1938                 dm_kcopyd_client_destroy(wc->dm_kcopyd);
1939
1940         if (wc->dm_io)
1941                 dm_io_client_destroy(wc->dm_io);
1942
1943         if (wc->dirty_bitmap)
1944                 vfree(wc->dirty_bitmap);
1945
1946         kfree(wc);
1947 }
1948
1949 static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
1950 {
1951         struct dm_writecache *wc;
1952         struct dm_arg_set as;
1953         const char *string;
1954         unsigned opt_params;
1955         size_t offset, data_size;
1956         int i, r;
1957         char dummy;
1958         int high_wm_percent = HIGH_WATERMARK;
1959         int low_wm_percent = LOW_WATERMARK;
1960         uint64_t x;
1961         struct wc_memory_superblock s;
1962
1963         static struct dm_arg _args[] = {
1964                 {0, 10, "Invalid number of feature args"},
1965         };
1966
1967         as.argc = argc;
1968         as.argv = argv;
1969
1970         wc = kzalloc(sizeof(struct dm_writecache), GFP_KERNEL);
1971         if (!wc) {
1972                 ti->error = "Cannot allocate writecache structure";
1973                 r = -ENOMEM;
1974                 goto bad;
1975         }
1976         ti->private = wc;
1977         wc->ti = ti;
1978
1979         mutex_init(&wc->lock);
1980         wc->max_age = MAX_AGE_UNSPECIFIED;
1981         writecache_poison_lists(wc);
1982         init_waitqueue_head(&wc->freelist_wait);
1983         timer_setup(&wc->autocommit_timer, writecache_autocommit_timer, 0);
1984         timer_setup(&wc->max_age_timer, writecache_max_age_timer, 0);
1985
1986         for (i = 0; i < 2; i++) {
1987                 atomic_set(&wc->bio_in_progress[i], 0);
1988                 init_waitqueue_head(&wc->bio_in_progress_wait[i]);
1989         }
1990
1991         wc->dm_io = dm_io_client_create();
1992         if (IS_ERR(wc->dm_io)) {
1993                 r = PTR_ERR(wc->dm_io);
1994                 ti->error = "Unable to allocate dm-io client";
1995                 wc->dm_io = NULL;
1996                 goto bad;
1997         }
1998
1999         wc->writeback_wq = alloc_workqueue("writecache-writeback", WQ_MEM_RECLAIM, 1);
2000         if (!wc->writeback_wq) {
2001                 r = -ENOMEM;
2002                 ti->error = "Could not allocate writeback workqueue";
2003                 goto bad;
2004         }
2005         INIT_WORK(&wc->writeback_work, writecache_writeback);
2006         INIT_WORK(&wc->flush_work, writecache_flush_work);
2007
2008         raw_spin_lock_init(&wc->endio_list_lock);
2009         INIT_LIST_HEAD(&wc->endio_list);
2010         wc->endio_thread = kthread_create(writecache_endio_thread, wc, "writecache_endio");
2011         if (IS_ERR(wc->endio_thread)) {
2012                 r = PTR_ERR(wc->endio_thread);
2013                 wc->endio_thread = NULL;
2014                 ti->error = "Couldn't spawn endio thread";
2015                 goto bad;
2016         }
2017         wake_up_process(wc->endio_thread);
2018
2019         /*
2020          * Parse the mode (pmem or ssd)
2021          */
2022         string = dm_shift_arg(&as);
2023         if (!string)
2024                 goto bad_arguments;
2025
2026         if (!strcasecmp(string, "s")) {
2027                 wc->pmem_mode = false;
2028         } else if (!strcasecmp(string, "p")) {
2029 #ifdef DM_WRITECACHE_HAS_PMEM
2030                 wc->pmem_mode = true;
2031                 wc->writeback_fua = true;
2032 #else
2033                 /*
2034                  * If the architecture doesn't support persistent memory or
2035                  * the kernel doesn't support any DAX drivers, this driver can
2036                  * only be used in SSD-only mode.
2037                  */
2038                 r = -EOPNOTSUPP;
2039                 ti->error = "Persistent memory or DAX not supported on this system";
2040                 goto bad;
2041 #endif
2042         } else {
2043                 goto bad_arguments;
2044         }
2045
2046         if (WC_MODE_PMEM(wc)) {
2047                 r = bioset_init(&wc->bio_set, BIO_POOL_SIZE,
2048                                 offsetof(struct writeback_struct, bio),
2049                                 BIOSET_NEED_BVECS);
2050                 if (r) {
2051                         ti->error = "Could not allocate bio set";
2052                         goto bad;
2053                 }
2054         } else {
2055                 r = mempool_init_kmalloc_pool(&wc->copy_pool, 1, sizeof(struct copy_struct));
2056                 if (r) {
2057                         ti->error = "Could not allocate mempool";
2058                         goto bad;
2059                 }
2060         }
2061
2062         /*
2063          * Parse the origin data device
2064          */
2065         string = dm_shift_arg(&as);
2066         if (!string)
2067                 goto bad_arguments;
2068         r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->dev);
2069         if (r) {
2070                 ti->error = "Origin data device lookup failed";
2071                 goto bad;
2072         }
2073
2074         /*
2075          * Parse cache data device (be it pmem or ssd)
2076          */
2077         string = dm_shift_arg(&as);
2078         if (!string)
2079                 goto bad_arguments;
2080
2081         r = dm_get_device(ti, string, dm_table_get_mode(ti->table), &wc->ssd_dev);
2082         if (r) {
2083                 ti->error = "Cache data device lookup failed";
2084                 goto bad;
2085         }
2086         wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode);
2087
2088         /*
2089          * Parse the cache block size
2090          */
2091         string = dm_shift_arg(&as);
2092         if (!string)
2093                 goto bad_arguments;
2094         if (sscanf(string, "%u%c", &wc->block_size, &dummy) != 1 ||
2095             wc->block_size < 512 || wc->block_size > PAGE_SIZE ||
2096             (wc->block_size & (wc->block_size - 1))) {
2097                 r = -EINVAL;
2098                 ti->error = "Invalid block size";
2099                 goto bad;
2100         }
2101         wc->block_size_bits = __ffs(wc->block_size);
2102
2103         wc->max_writeback_jobs = MAX_WRITEBACK_JOBS;
2104         wc->autocommit_blocks = !WC_MODE_PMEM(wc) ? AUTOCOMMIT_BLOCKS_SSD : AUTOCOMMIT_BLOCKS_PMEM;
2105         wc->autocommit_jiffies = msecs_to_jiffies(AUTOCOMMIT_MSEC);
2106
2107         /*
2108          * Parse optional arguments
2109          */
2110         r = dm_read_arg_group(_args, &as, &opt_params, &ti->error);
2111         if (r)
2112                 goto bad;
2113
2114         while (opt_params) {
2115                 string = dm_shift_arg(&as), opt_params--;
2116                 if (!strcasecmp(string, "start_sector") && opt_params >= 1) {
2117                         unsigned long long start_sector;
2118                         string = dm_shift_arg(&as), opt_params--;
2119                         if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
2120                                 goto invalid_optional;
2121                         wc->start_sector = start_sector;
2122                         if (wc->start_sector != start_sector ||
2123                             wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT)
2124                                 goto invalid_optional;
2125                 } else if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
2126                         string = dm_shift_arg(&as), opt_params--;
2127                         if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1)
2128                                 goto invalid_optional;
2129                         if (high_wm_percent < 0 || high_wm_percent > 100)
2130                                 goto invalid_optional;
2131                         wc->high_wm_percent_set = true;
2132                 } else if (!strcasecmp(string, "low_watermark") && opt_params >= 1) {
2133                         string = dm_shift_arg(&as), opt_params--;
2134                         if (sscanf(string, "%d%c", &low_wm_percent, &dummy) != 1)
2135                                 goto invalid_optional;
2136                         if (low_wm_percent < 0 || low_wm_percent > 100)
2137                                 goto invalid_optional;
2138                         wc->low_wm_percent_set = true;
2139                 } else if (!strcasecmp(string, "writeback_jobs") && opt_params >= 1) {
2140                         string = dm_shift_arg(&as), opt_params--;
2141                         if (sscanf(string, "%u%c", &wc->max_writeback_jobs, &dummy) != 1)
2142                                 goto invalid_optional;
2143                         wc->max_writeback_jobs_set = true;
2144                 } else if (!strcasecmp(string, "autocommit_blocks") && opt_params >= 1) {
2145                         string = dm_shift_arg(&as), opt_params--;
2146                         if (sscanf(string, "%u%c", &wc->autocommit_blocks, &dummy) != 1)
2147                                 goto invalid_optional;
2148                         wc->autocommit_blocks_set = true;
2149                 } else if (!strcasecmp(string, "autocommit_time") && opt_params >= 1) {
2150                         unsigned autocommit_msecs;
2151                         string = dm_shift_arg(&as), opt_params--;
2152                         if (sscanf(string, "%u%c", &autocommit_msecs, &dummy) != 1)
2153                                 goto invalid_optional;
2154                         if (autocommit_msecs > 3600000)
2155                                 goto invalid_optional;
2156                         wc->autocommit_jiffies = msecs_to_jiffies(autocommit_msecs);
2157                         wc->autocommit_time_set = true;
2158                 } else if (!strcasecmp(string, "max_age") && opt_params >= 1) {
2159                         unsigned max_age_msecs;
2160                         string = dm_shift_arg(&as), opt_params--;
2161                         if (sscanf(string, "%u%c", &max_age_msecs, &dummy) != 1)
2162                                 goto invalid_optional;
2163                         if (max_age_msecs > 86400000)
2164                                 goto invalid_optional;
2165                         wc->max_age = msecs_to_jiffies(max_age_msecs);
2166                 } else if (!strcasecmp(string, "cleaner")) {
2167                         wc->cleaner = true;
2168                 } else if (!strcasecmp(string, "fua")) {
2169                         if (WC_MODE_PMEM(wc)) {
2170                                 wc->writeback_fua = true;
2171                                 wc->writeback_fua_set = true;
2172                         } else goto invalid_optional;
2173                 } else if (!strcasecmp(string, "nofua")) {
2174                         if (WC_MODE_PMEM(wc)) {
2175                                 wc->writeback_fua = false;
2176                                 wc->writeback_fua_set = true;
2177                         } else goto invalid_optional;
2178                 } else {
2179 invalid_optional:
2180                         r = -EINVAL;
2181                         ti->error = "Invalid optional argument";
2182                         goto bad;
2183                 }
2184         }
2185
2186         if (high_wm_percent < low_wm_percent) {
2187                 r = -EINVAL;
2188                 ti->error = "High watermark must be greater than or equal to low watermark";
2189                 goto bad;
2190         }
2191
2192         if (WC_MODE_PMEM(wc)) {
2193                 r = persistent_memory_claim(wc);
2194                 if (r) {
2195                         ti->error = "Unable to map persistent memory for cache";
2196                         goto bad;
2197                 }
2198         } else {
2199                 struct dm_io_region region;
2200                 struct dm_io_request req;
2201                 size_t n_blocks, n_metadata_blocks;
2202                 uint64_t n_bitmap_bits;
2203
2204                 wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT;
2205
2206                 bio_list_init(&wc->flush_list);
2207                 wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush");
2208                 if (IS_ERR(wc->flush_thread)) {
2209                         r = PTR_ERR(wc->flush_thread);
2210                         wc->flush_thread = NULL;
2211                         ti->error = "Couldn't spawn flush thread";
2212                         goto bad;
2213                 }
2214                 wake_up_process(wc->flush_thread);
2215
2216                 r = calculate_memory_size(wc->memory_map_size, wc->block_size,
2217                                           &n_blocks, &n_metadata_blocks);
2218                 if (r) {
2219                         ti->error = "Invalid device size";
2220                         goto bad;
2221                 }
2222
2223                 n_bitmap_bits = (((uint64_t)n_metadata_blocks << wc->block_size_bits) +
2224                                  BITMAP_GRANULARITY - 1) / BITMAP_GRANULARITY;
2225                 /* this is limitation of test_bit functions */
2226                 if (n_bitmap_bits > 1U << 31) {
2227                         r = -EFBIG;
2228                         ti->error = "Invalid device size";
2229                         goto bad;
2230                 }
2231
2232                 wc->memory_map = vmalloc(n_metadata_blocks << wc->block_size_bits);
2233                 if (!wc->memory_map) {
2234                         r = -ENOMEM;
2235                         ti->error = "Unable to allocate memory for metadata";
2236                         goto bad;
2237                 }
2238
2239                 wc->dm_kcopyd = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2240                 if (IS_ERR(wc->dm_kcopyd)) {
2241                         r = PTR_ERR(wc->dm_kcopyd);
2242                         ti->error = "Unable to allocate dm-kcopyd client";
2243                         wc->dm_kcopyd = NULL;
2244                         goto bad;
2245                 }
2246
2247                 wc->metadata_sectors = n_metadata_blocks << (wc->block_size_bits - SECTOR_SHIFT);
2248                 wc->dirty_bitmap_size = (n_bitmap_bits + BITS_PER_LONG - 1) /
2249                         BITS_PER_LONG * sizeof(unsigned long);
2250                 wc->dirty_bitmap = vzalloc(wc->dirty_bitmap_size);
2251                 if (!wc->dirty_bitmap) {
2252                         r = -ENOMEM;
2253                         ti->error = "Unable to allocate dirty bitmap";
2254                         goto bad;
2255                 }
2256
2257                 region.bdev = wc->ssd_dev->bdev;
2258                 region.sector = wc->start_sector;
2259                 region.count = wc->metadata_sectors;
2260                 req.bi_op = REQ_OP_READ;
2261                 req.bi_op_flags = REQ_SYNC;
2262                 req.mem.type = DM_IO_VMA;
2263                 req.mem.ptr.vma = (char *)wc->memory_map;
2264                 req.client = wc->dm_io;
2265                 req.notify.fn = NULL;
2266
2267                 r = dm_io(&req, 1, &region, NULL);
2268                 if (r) {
2269                         ti->error = "Unable to read metadata";
2270                         goto bad;
2271                 }
2272         }
2273
2274         r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock));
2275         if (r) {
2276                 ti->error = "Hardware memory error when reading superblock";
2277                 goto bad;
2278         }
2279         if (!le32_to_cpu(s.magic) && !le32_to_cpu(s.version)) {
2280                 r = init_memory(wc);
2281                 if (r) {
2282                         ti->error = "Unable to initialize device";
2283                         goto bad;
2284                 }
2285                 r = memcpy_mcsafe(&s, sb(wc), sizeof(struct wc_memory_superblock));
2286                 if (r) {
2287                         ti->error = "Hardware memory error when reading superblock";
2288                         goto bad;
2289                 }
2290         }
2291
2292         if (le32_to_cpu(s.magic) != MEMORY_SUPERBLOCK_MAGIC) {
2293                 ti->error = "Invalid magic in the superblock";
2294                 r = -EINVAL;
2295                 goto bad;
2296         }
2297
2298         if (le32_to_cpu(s.version) != MEMORY_SUPERBLOCK_VERSION) {
2299                 ti->error = "Invalid version in the superblock";
2300                 r = -EINVAL;
2301                 goto bad;
2302         }
2303
2304         if (le32_to_cpu(s.block_size) != wc->block_size) {
2305                 ti->error = "Block size does not match superblock";
2306                 r = -EINVAL;
2307                 goto bad;
2308         }
2309
2310         wc->n_blocks = le64_to_cpu(s.n_blocks);
2311
2312         offset = wc->n_blocks * sizeof(struct wc_memory_entry);
2313         if (offset / sizeof(struct wc_memory_entry) != le64_to_cpu(sb(wc)->n_blocks)) {
2314 overflow:
2315                 ti->error = "Overflow in size calculation";
2316                 r = -EINVAL;
2317                 goto bad;
2318         }
2319         offset += sizeof(struct wc_memory_superblock);
2320         if (offset < sizeof(struct wc_memory_superblock))
2321                 goto overflow;
2322         offset = (offset + wc->block_size - 1) & ~(size_t)(wc->block_size - 1);
2323         data_size = wc->n_blocks * (size_t)wc->block_size;
2324         if (!offset || (data_size / wc->block_size != wc->n_blocks) ||
2325             (offset + data_size < offset))
2326                 goto overflow;
2327         if (offset + data_size > wc->memory_map_size) {
2328                 ti->error = "Memory area is too small";
2329                 r = -EINVAL;
2330                 goto bad;
2331         }
2332
2333         wc->metadata_sectors = offset >> SECTOR_SHIFT;
2334         wc->block_start = (char *)sb(wc) + offset;
2335
2336         x = (uint64_t)wc->n_blocks * (100 - high_wm_percent);
2337         x += 50;
2338         do_div(x, 100);
2339         wc->freelist_high_watermark = x;
2340         x = (uint64_t)wc->n_blocks * (100 - low_wm_percent);
2341         x += 50;
2342         do_div(x, 100);
2343         wc->freelist_low_watermark = x;
2344
2345         if (wc->cleaner)
2346                 activate_cleaner(wc);
2347
2348         r = writecache_alloc_entries(wc);
2349         if (r) {
2350                 ti->error = "Cannot allocate memory";
2351                 goto bad;
2352         }
2353
2354         ti->num_flush_bios = 1;
2355         ti->flush_supported = true;
2356         ti->num_discard_bios = 1;
2357
2358         if (WC_MODE_PMEM(wc))
2359                 persistent_memory_flush_cache(wc->memory_map, wc->memory_map_size);
2360
2361         return 0;
2362
2363 bad_arguments:
2364         r = -EINVAL;
2365         ti->error = "Bad arguments";
2366 bad:
2367         writecache_dtr(ti);
2368         return r;
2369 }
2370
2371 static void writecache_status(struct dm_target *ti, status_type_t type,
2372                               unsigned status_flags, char *result, unsigned maxlen)
2373 {
2374         struct dm_writecache *wc = ti->private;
2375         unsigned extra_args;
2376         unsigned sz = 0;
2377         uint64_t x;
2378
2379         switch (type) {
2380         case STATUSTYPE_INFO:
2381                 DMEMIT("%ld %llu %llu %llu", writecache_has_error(wc),
2382                        (unsigned long long)wc->n_blocks, (unsigned long long)wc->freelist_size,
2383                        (unsigned long long)wc->writeback_size);
2384                 break;
2385         case STATUSTYPE_TABLE:
2386                 DMEMIT("%c %s %s %u ", WC_MODE_PMEM(wc) ? 'p' : 's',
2387                                 wc->dev->name, wc->ssd_dev->name, wc->block_size);
2388                 extra_args = 0;
2389                 if (wc->start_sector)
2390                         extra_args += 2;
2391                 if (wc->high_wm_percent_set && !wc->cleaner)
2392                         extra_args += 2;
2393                 if (wc->low_wm_percent_set && !wc->cleaner)
2394                         extra_args += 2;
2395                 if (wc->max_writeback_jobs_set)
2396                         extra_args += 2;
2397                 if (wc->autocommit_blocks_set)
2398                         extra_args += 2;
2399                 if (wc->autocommit_time_set)
2400                         extra_args += 2;
2401                 if (wc->cleaner)
2402                         extra_args++;
2403                 if (wc->writeback_fua_set)
2404                         extra_args++;
2405
2406                 DMEMIT("%u", extra_args);
2407                 if (wc->start_sector)
2408                         DMEMIT(" start_sector %llu", (unsigned long long)wc->start_sector);
2409                 if (wc->high_wm_percent_set && !wc->cleaner) {
2410                         x = (uint64_t)wc->freelist_high_watermark * 100;
2411                         x += wc->n_blocks / 2;
2412                         do_div(x, (size_t)wc->n_blocks);
2413                         DMEMIT(" high_watermark %u", 100 - (unsigned)x);
2414                 }
2415                 if (wc->low_wm_percent_set && !wc->cleaner) {
2416                         x = (uint64_t)wc->freelist_low_watermark * 100;
2417                         x += wc->n_blocks / 2;
2418                         do_div(x, (size_t)wc->n_blocks);
2419                         DMEMIT(" low_watermark %u", 100 - (unsigned)x);
2420                 }
2421                 if (wc->max_writeback_jobs_set)
2422                         DMEMIT(" writeback_jobs %u", wc->max_writeback_jobs);
2423                 if (wc->autocommit_blocks_set)
2424                         DMEMIT(" autocommit_blocks %u", wc->autocommit_blocks);
2425                 if (wc->autocommit_time_set)
2426                         DMEMIT(" autocommit_time %u", jiffies_to_msecs(wc->autocommit_jiffies));
2427                 if (wc->max_age != MAX_AGE_UNSPECIFIED)
2428                         DMEMIT(" max_age %u", jiffies_to_msecs(wc->max_age));
2429                 if (wc->cleaner)
2430                         DMEMIT(" cleaner");
2431                 if (wc->writeback_fua_set)
2432                         DMEMIT(" %sfua", wc->writeback_fua ? "" : "no");
2433                 break;
2434         }
2435 }
2436
2437 static struct target_type writecache_target = {
2438         .name                   = "writecache",
2439         .version                = {1, 3, 0},
2440         .module                 = THIS_MODULE,
2441         .ctr                    = writecache_ctr,
2442         .dtr                    = writecache_dtr,
2443         .status                 = writecache_status,
2444         .postsuspend            = writecache_suspend,
2445         .resume                 = writecache_resume,
2446         .message                = writecache_message,
2447         .map                    = writecache_map,
2448         .end_io                 = writecache_end_io,
2449         .iterate_devices        = writecache_iterate_devices,
2450         .io_hints               = writecache_io_hints,
2451 };
2452
2453 static int __init dm_writecache_init(void)
2454 {
2455         int r;
2456
2457         r = dm_register_target(&writecache_target);
2458         if (r < 0) {
2459                 DMERR("register failed %d", r);
2460                 return r;
2461         }
2462
2463         return 0;
2464 }
2465
2466 static void __exit dm_writecache_exit(void)
2467 {
2468         dm_unregister_target(&writecache_target);
2469 }
2470
2471 module_init(dm_writecache_init);
2472 module_exit(dm_writecache_exit);
2473
2474 MODULE_DESCRIPTION(DM_NAME " writecache target");
2475 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2476 MODULE_LICENSE("GPL");