1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2009-2011 Red Hat, Inc.
5 * Author: Mikulas Patocka <mpatocka@redhat.com>
7 * This file is released under the GPL.
10 #include <linux/dm-bufio.h>
12 #include <linux/device-mapper.h>
13 #include <linux/dm-io.h>
14 #include <linux/slab.h>
15 #include <linux/sched/mm.h>
16 #include <linux/jiffies.h>
17 #include <linux/vmalloc.h>
18 #include <linux/shrinker.h>
19 #include <linux/module.h>
20 #include <linux/rbtree.h>
21 #include <linux/stacktrace.h>
22 #include <linux/jump_label.h>
26 #define DM_MSG_PREFIX "bufio"
29 * Memory management policy:
30 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
31 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
32 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
33 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
36 #define DM_BUFIO_MIN_BUFFERS 8
38 #define DM_BUFIO_MEMORY_PERCENT 2
39 #define DM_BUFIO_VMALLOC_PERCENT 25
40 #define DM_BUFIO_WRITEBACK_RATIO 3
41 #define DM_BUFIO_LOW_WATERMARK_RATIO 16
44 * Check buffer ages in this interval (seconds)
46 #define DM_BUFIO_WORK_TIMER_SECS 30
49 * Free buffers when they are older than this (seconds)
51 #define DM_BUFIO_DEFAULT_AGE_SECS 300
54 * The nr of bytes of cached data to keep around.
56 #define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
59 * Align buffer writes to this boundary.
60 * Tests show that SSDs have the highest IOPS when using 4k writes.
62 #define DM_BUFIO_WRITE_ALIGN 4096
65 * dm_buffer->list_mode
71 /*--------------------------------------------------------------*/
74 * Rather than use an LRU list, we use a clock algorithm where entries
75 * are held in a circular list. When an entry is 'hit' a reference bit
76 * is set. The least recently used entry is approximated by running a
77 * cursor around the list selecting unreferenced entries. Referenced
78 * entries have their reference bit cleared as the cursor passes them.
81 struct list_head list;
87 struct list_head list;
88 struct lru_entry *stop;
93 struct list_head *cursor;
96 struct list_head iterators;
101 static void lru_init(struct lru *lru)
105 INIT_LIST_HEAD(&lru->iterators);
108 static void lru_destroy(struct lru *lru)
110 WARN_ON_ONCE(lru->cursor);
111 WARN_ON_ONCE(!list_empty(&lru->iterators));
115 * Insert a new entry into the lru.
117 static void lru_insert(struct lru *lru, struct lru_entry *le)
120 * Don't be tempted to set to 1, makes the lru aspect
123 atomic_set(&le->referenced, 0);
126 list_add_tail(&le->list, lru->cursor);
128 INIT_LIST_HEAD(&le->list);
129 lru->cursor = &le->list;
137 * Convert a list_head pointer to an lru_entry pointer.
139 static inline struct lru_entry *to_le(struct list_head *l)
141 return container_of(l, struct lru_entry, list);
145 * Initialize an lru_iter and add it to the list of cursors in the lru.
147 static void lru_iter_begin(struct lru *lru, struct lru_iter *it)
150 it->stop = lru->cursor ? to_le(lru->cursor->prev) : NULL;
151 it->e = lru->cursor ? to_le(lru->cursor) : NULL;
152 list_add(&it->list, &lru->iterators);
156 * Remove an lru_iter from the list of cursors in the lru.
158 static inline void lru_iter_end(struct lru_iter *it)
163 /* Predicate function type to be used with lru_iter_next */
164 typedef bool (*iter_predicate)(struct lru_entry *le, void *context);
167 * Advance the cursor to the next entry that passes the
168 * predicate, and return that entry. Returns NULL if the
169 * iteration is complete.
171 static struct lru_entry *lru_iter_next(struct lru_iter *it,
172 iter_predicate pred, void *context)
179 /* advance the cursor */
180 if (it->e == it->stop)
183 it->e = to_le(it->e->list.next);
185 if (pred(e, context))
193 * Invalidate a specific lru_entry and update all cursors in
194 * the lru accordingly.
196 static void lru_iter_invalidate(struct lru *lru, struct lru_entry *e)
200 list_for_each_entry(it, &lru->iterators, list) {
201 /* Move c->e forwards if necc. */
203 it->e = to_le(it->e->list.next);
208 /* Move it->stop backwards if necc. */
210 it->stop = to_le(it->stop->list.prev);
220 * Remove a specific entry from the lru.
222 static void lru_remove(struct lru *lru, struct lru_entry *le)
224 lru_iter_invalidate(lru, le);
225 if (lru->count == 1) {
228 if (lru->cursor == &le->list)
229 lru->cursor = lru->cursor->next;
236 * Mark as referenced.
238 static inline void lru_reference(struct lru_entry *le)
240 atomic_set(&le->referenced, 1);
246 * Remove the least recently used entry (approx), that passes the predicate.
247 * Returns NULL on failure.
252 ER_STOP, /* stop looking for something to evict */
255 typedef enum evict_result (*le_predicate)(struct lru_entry *le, void *context);
257 static struct lru_entry *lru_evict(struct lru *lru, le_predicate pred, void *context, bool no_sleep)
259 unsigned long tested = 0;
260 struct list_head *h = lru->cursor;
261 struct lru_entry *le;
266 * In the worst case we have to loop around twice. Once to clear
267 * the reference flags, and then again to discover the predicate
268 * fails for all entries.
270 while (tested < lru->count) {
271 le = container_of(h, struct lru_entry, list);
273 if (atomic_read(&le->referenced)) {
274 atomic_set(&le->referenced, 0);
277 switch (pred(le, context)) {
280 * Adjust the cursor, so we start the next
283 lru->cursor = le->list.next;
291 lru->cursor = le->list.next;
305 /*--------------------------------------------------------------*/
315 * Describes how the block was allocated:
316 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
317 * See the comment at alloc_buffer_data.
321 DATA_MODE_GET_FREE_PAGES = 1,
322 DATA_MODE_VMALLOC = 2,
327 /* protected by the locks in dm_buffer_cache */
330 /* immutable, so don't need protecting */
333 unsigned char data_mode; /* DATA_MODE_* */
336 * These two fields are used in isolation, so do not need
337 * a surrounding lock.
340 unsigned long last_accessed;
343 * Everything else is protected by the mutex in
347 struct lru_entry lru;
348 unsigned char list_mode; /* LIST_* */
349 blk_status_t read_error;
350 blk_status_t write_error;
351 unsigned int dirty_start;
352 unsigned int dirty_end;
353 unsigned int write_start;
354 unsigned int write_end;
355 struct list_head write_list;
356 struct dm_bufio_client *c;
357 void (*end_io)(struct dm_buffer *b, blk_status_t bs);
358 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
360 unsigned int stack_len;
361 unsigned long stack_entries[MAX_STACK];
365 /*--------------------------------------------------------------*/
368 * The buffer cache manages buffers, particularly:
369 * - inc/dec of holder count
370 * - setting the last_accessed field
371 * - maintains clean/dirty state along with lru
372 * - selecting buffers that match predicates
374 * It does *not* handle:
375 * - allocation/freeing of buffers.
377 * - Eviction or cache sizing.
379 * cache_get() and cache_put() are threadsafe, you do not need to
380 * protect these calls with a surrounding mutex. All the other
381 * methods are not threadsafe; they do use locking primitives, but
382 * only enough to ensure get/put are threadsafe.
387 struct rw_semaphore lock;
391 } ____cacheline_aligned_in_smp;
393 struct dm_buffer_cache {
394 struct lru lru[LIST_SIZE];
396 * We spread entries across multiple trees to reduce contention
399 unsigned int num_locks;
401 struct buffer_tree trees[];
404 static DEFINE_STATIC_KEY_FALSE(no_sleep_enabled);
406 static inline unsigned int cache_index(sector_t block, unsigned int num_locks)
408 return dm_hash_locks_index(block, num_locks);
411 static inline void cache_read_lock(struct dm_buffer_cache *bc, sector_t block)
413 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
414 read_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
416 down_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
419 static inline void cache_read_unlock(struct dm_buffer_cache *bc, sector_t block)
421 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
422 read_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
424 up_read(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
427 static inline void cache_write_lock(struct dm_buffer_cache *bc, sector_t block)
429 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
430 write_lock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
432 down_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
435 static inline void cache_write_unlock(struct dm_buffer_cache *bc, sector_t block)
437 if (static_branch_unlikely(&no_sleep_enabled) && bc->no_sleep)
438 write_unlock_bh(&bc->trees[cache_index(block, bc->num_locks)].u.spinlock);
440 up_write(&bc->trees[cache_index(block, bc->num_locks)].u.lock);
444 * Sometimes we want to repeatedly get and drop locks as part of an iteration.
445 * This struct helps avoid redundant drop and gets of the same lock.
447 struct lock_history {
448 struct dm_buffer_cache *cache;
450 unsigned int previous;
451 unsigned int no_previous;
454 static void lh_init(struct lock_history *lh, struct dm_buffer_cache *cache, bool write)
458 lh->no_previous = cache->num_locks;
459 lh->previous = lh->no_previous;
462 static void __lh_lock(struct lock_history *lh, unsigned int index)
465 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
466 write_lock_bh(&lh->cache->trees[index].u.spinlock);
468 down_write(&lh->cache->trees[index].u.lock);
470 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
471 read_lock_bh(&lh->cache->trees[index].u.spinlock);
473 down_read(&lh->cache->trees[index].u.lock);
477 static void __lh_unlock(struct lock_history *lh, unsigned int index)
480 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
481 write_unlock_bh(&lh->cache->trees[index].u.spinlock);
483 up_write(&lh->cache->trees[index].u.lock);
485 if (static_branch_unlikely(&no_sleep_enabled) && lh->cache->no_sleep)
486 read_unlock_bh(&lh->cache->trees[index].u.spinlock);
488 up_read(&lh->cache->trees[index].u.lock);
493 * Make sure you call this since it will unlock the final lock.
495 static void lh_exit(struct lock_history *lh)
497 if (lh->previous != lh->no_previous) {
498 __lh_unlock(lh, lh->previous);
499 lh->previous = lh->no_previous;
504 * Named 'next' because there is no corresponding
505 * 'up/unlock' call since it's done automatically.
507 static void lh_next(struct lock_history *lh, sector_t b)
509 unsigned int index = cache_index(b, lh->no_previous); /* no_previous is num_locks */
511 if (lh->previous != lh->no_previous) {
512 if (lh->previous != index) {
513 __lh_unlock(lh, lh->previous);
514 __lh_lock(lh, index);
515 lh->previous = index;
518 __lh_lock(lh, index);
519 lh->previous = index;
523 static inline struct dm_buffer *le_to_buffer(struct lru_entry *le)
525 return container_of(le, struct dm_buffer, lru);
528 static struct dm_buffer *list_to_buffer(struct list_head *l)
530 struct lru_entry *le = list_entry(l, struct lru_entry, list);
535 return le_to_buffer(le);
538 static void cache_init(struct dm_buffer_cache *bc, unsigned int num_locks, bool no_sleep)
542 bc->num_locks = num_locks;
543 bc->no_sleep = no_sleep;
545 for (i = 0; i < bc->num_locks; i++) {
547 rwlock_init(&bc->trees[i].u.spinlock);
549 init_rwsem(&bc->trees[i].u.lock);
550 bc->trees[i].root = RB_ROOT;
553 lru_init(&bc->lru[LIST_CLEAN]);
554 lru_init(&bc->lru[LIST_DIRTY]);
557 static void cache_destroy(struct dm_buffer_cache *bc)
561 for (i = 0; i < bc->num_locks; i++)
562 WARN_ON_ONCE(!RB_EMPTY_ROOT(&bc->trees[i].root));
564 lru_destroy(&bc->lru[LIST_CLEAN]);
565 lru_destroy(&bc->lru[LIST_DIRTY]);
571 * not threadsafe, or racey depending how you look at it
573 static inline unsigned long cache_count(struct dm_buffer_cache *bc, int list_mode)
575 return bc->lru[list_mode].count;
578 static inline unsigned long cache_total(struct dm_buffer_cache *bc)
580 return cache_count(bc, LIST_CLEAN) + cache_count(bc, LIST_DIRTY);
586 * Gets a specific buffer, indexed by block.
587 * If the buffer is found then its holder count will be incremented and
588 * lru_reference will be called.
592 static struct dm_buffer *__cache_get(const struct rb_root *root, sector_t block)
594 struct rb_node *n = root->rb_node;
598 b = container_of(n, struct dm_buffer, node);
600 if (b->block == block)
603 n = block < b->block ? n->rb_left : n->rb_right;
609 static void __cache_inc_buffer(struct dm_buffer *b)
611 atomic_inc(&b->hold_count);
612 WRITE_ONCE(b->last_accessed, jiffies);
615 static struct dm_buffer *cache_get(struct dm_buffer_cache *bc, sector_t block)
619 cache_read_lock(bc, block);
620 b = __cache_get(&bc->trees[cache_index(block, bc->num_locks)].root, block);
622 lru_reference(&b->lru);
623 __cache_inc_buffer(b);
625 cache_read_unlock(bc, block);
633 * Returns true if the hold count hits zero.
636 static bool cache_put(struct dm_buffer_cache *bc, struct dm_buffer *b)
640 cache_read_lock(bc, b->block);
641 BUG_ON(!atomic_read(&b->hold_count));
642 r = atomic_dec_and_test(&b->hold_count);
643 cache_read_unlock(bc, b->block);
650 typedef enum evict_result (*b_predicate)(struct dm_buffer *, void *);
653 * Evicts a buffer based on a predicate. The oldest buffer that
654 * matches the predicate will be selected. In addition to the
655 * predicate the hold_count of the selected buffer will be zero.
657 struct evict_wrapper {
658 struct lock_history *lh;
664 * Wraps the buffer predicate turning it into an lru predicate. Adds
665 * extra test for hold_count.
667 static enum evict_result __evict_pred(struct lru_entry *le, void *context)
669 struct evict_wrapper *w = context;
670 struct dm_buffer *b = le_to_buffer(le);
672 lh_next(w->lh, b->block);
674 if (atomic_read(&b->hold_count))
675 return ER_DONT_EVICT;
677 return w->pred(b, w->context);
680 static struct dm_buffer *__cache_evict(struct dm_buffer_cache *bc, int list_mode,
681 b_predicate pred, void *context,
682 struct lock_history *lh)
684 struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context};
685 struct lru_entry *le;
688 le = lru_evict(&bc->lru[list_mode], __evict_pred, &w, bc->no_sleep);
692 b = le_to_buffer(le);
693 /* __evict_pred will have locked the appropriate tree. */
694 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
699 static struct dm_buffer *cache_evict(struct dm_buffer_cache *bc, int list_mode,
700 b_predicate pred, void *context)
703 struct lock_history lh;
705 lh_init(&lh, bc, true);
706 b = __cache_evict(bc, list_mode, pred, context, &lh);
715 * Mark a buffer as clean or dirty. Not threadsafe.
717 static void cache_mark(struct dm_buffer_cache *bc, struct dm_buffer *b, int list_mode)
719 cache_write_lock(bc, b->block);
720 if (list_mode != b->list_mode) {
721 lru_remove(&bc->lru[b->list_mode], &b->lru);
722 b->list_mode = list_mode;
723 lru_insert(&bc->lru[b->list_mode], &b->lru);
725 cache_write_unlock(bc, b->block);
731 * Runs through the lru associated with 'old_mode', if the predicate matches then
732 * it moves them to 'new_mode'. Not threadsafe.
734 static void __cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode,
735 b_predicate pred, void *context, struct lock_history *lh)
737 struct lru_entry *le;
739 struct evict_wrapper w = {.lh = lh, .pred = pred, .context = context};
742 le = lru_evict(&bc->lru[old_mode], __evict_pred, &w, bc->no_sleep);
746 b = le_to_buffer(le);
747 b->list_mode = new_mode;
748 lru_insert(&bc->lru[b->list_mode], &b->lru);
752 static void cache_mark_many(struct dm_buffer_cache *bc, int old_mode, int new_mode,
753 b_predicate pred, void *context)
755 struct lock_history lh;
757 lh_init(&lh, bc, true);
758 __cache_mark_many(bc, old_mode, new_mode, pred, context, &lh);
765 * Iterates through all clean or dirty entries calling a function for each
766 * entry. The callback may terminate the iteration early. Not threadsafe.
770 * Iterator functions should return one of these actions to indicate
771 * how the iteration should proceed.
778 typedef enum it_action (*iter_fn)(struct dm_buffer *b, void *context);
780 static void __cache_iterate(struct dm_buffer_cache *bc, int list_mode,
781 iter_fn fn, void *context, struct lock_history *lh)
783 struct lru *lru = &bc->lru[list_mode];
784 struct lru_entry *le, *first;
789 first = le = to_le(lru->cursor);
791 struct dm_buffer *b = le_to_buffer(le);
793 lh_next(lh, b->block);
795 switch (fn(b, context)) {
804 le = to_le(le->list.next);
805 } while (le != first);
808 static void cache_iterate(struct dm_buffer_cache *bc, int list_mode,
809 iter_fn fn, void *context)
811 struct lock_history lh;
813 lh_init(&lh, bc, false);
814 __cache_iterate(bc, list_mode, fn, context, &lh);
821 * Passes ownership of the buffer to the cache. Returns false if the
822 * buffer was already present (in which case ownership does not pass).
823 * eg, a race with another thread.
825 * Holder count should be 1 on insertion.
829 static bool __cache_insert(struct rb_root *root, struct dm_buffer *b)
831 struct rb_node **new = &root->rb_node, *parent = NULL;
832 struct dm_buffer *found;
835 found = container_of(*new, struct dm_buffer, node);
837 if (found->block == b->block)
841 new = b->block < found->block ?
842 &found->node.rb_left : &found->node.rb_right;
845 rb_link_node(&b->node, parent, new);
846 rb_insert_color(&b->node, root);
851 static bool cache_insert(struct dm_buffer_cache *bc, struct dm_buffer *b)
855 if (WARN_ON_ONCE(b->list_mode >= LIST_SIZE))
858 cache_write_lock(bc, b->block);
859 BUG_ON(atomic_read(&b->hold_count) != 1);
860 r = __cache_insert(&bc->trees[cache_index(b->block, bc->num_locks)].root, b);
862 lru_insert(&bc->lru[b->list_mode], &b->lru);
863 cache_write_unlock(bc, b->block);
871 * Removes buffer from cache, ownership of the buffer passes back to the caller.
872 * Fails if the hold_count is not one (ie. the caller holds the only reference).
876 static bool cache_remove(struct dm_buffer_cache *bc, struct dm_buffer *b)
880 cache_write_lock(bc, b->block);
882 if (atomic_read(&b->hold_count) != 1) {
886 rb_erase(&b->node, &bc->trees[cache_index(b->block, bc->num_locks)].root);
887 lru_remove(&bc->lru[b->list_mode], &b->lru);
890 cache_write_unlock(bc, b->block);
897 typedef void (*b_release)(struct dm_buffer *);
899 static struct dm_buffer *__find_next(struct rb_root *root, sector_t block)
901 struct rb_node *n = root->rb_node;
903 struct dm_buffer *best = NULL;
906 b = container_of(n, struct dm_buffer, node);
908 if (b->block == block)
911 if (block <= b->block) {
922 static void __remove_range(struct dm_buffer_cache *bc,
923 struct rb_root *root,
924 sector_t begin, sector_t end,
925 b_predicate pred, b_release release)
932 b = __find_next(root, begin);
933 if (!b || (b->block >= end))
936 begin = b->block + 1;
938 if (atomic_read(&b->hold_count))
941 if (pred(b, NULL) == ER_EVICT) {
942 rb_erase(&b->node, root);
943 lru_remove(&bc->lru[b->list_mode], &b->lru);
949 static void cache_remove_range(struct dm_buffer_cache *bc,
950 sector_t begin, sector_t end,
951 b_predicate pred, b_release release)
955 BUG_ON(bc->no_sleep);
956 for (i = 0; i < bc->num_locks; i++) {
957 down_write(&bc->trees[i].u.lock);
958 __remove_range(bc, &bc->trees[i].root, begin, end, pred, release);
959 up_write(&bc->trees[i].u.lock);
963 /*----------------------------------------------------------------*/
966 * Linking of buffers:
967 * All buffers are linked to buffer_cache with their node field.
969 * Clean buffers that are not being written (B_WRITING not set)
970 * are linked to lru[LIST_CLEAN] with their lru_list field.
972 * Dirty and clean buffers that are being written are linked to
973 * lru[LIST_DIRTY] with their lru_list field. When the write
974 * finishes, the buffer cannot be relinked immediately (because we
975 * are in an interrupt context and relinking requires process
976 * context), so some clean-not-writing buffers can be held on
977 * dirty_lru too. They are later added to lru in the process
980 struct dm_bufio_client {
981 struct block_device *bdev;
982 unsigned int block_size;
983 s8 sectors_per_block_bits;
989 int async_write_error;
991 void (*alloc_callback)(struct dm_buffer *buf);
992 void (*write_callback)(struct dm_buffer *buf);
993 struct kmem_cache *slab_buffer;
994 struct kmem_cache *slab_cache;
995 struct dm_io_client *dm_io;
997 struct list_head reserved_buffers;
998 unsigned int need_reserved_buffers;
1000 unsigned int minimum_buffers;
1004 struct shrinker *shrinker;
1005 struct work_struct shrink_work;
1006 atomic_long_t need_shrink;
1008 wait_queue_head_t free_buffer_wait;
1010 struct list_head client_list;
1013 * Used by global_cleanup to sort the clients list.
1015 unsigned long oldest_buffer;
1017 struct dm_buffer_cache cache; /* must be last member */
1020 /*----------------------------------------------------------------*/
1022 #define dm_bufio_in_request() (!!current->bio_list)
1024 static void dm_bufio_lock(struct dm_bufio_client *c)
1026 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1027 spin_lock_bh(&c->spinlock);
1029 mutex_lock_nested(&c->lock, dm_bufio_in_request());
1032 static void dm_bufio_unlock(struct dm_bufio_client *c)
1034 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1035 spin_unlock_bh(&c->spinlock);
1037 mutex_unlock(&c->lock);
1040 /*----------------------------------------------------------------*/
1043 * Default cache size: available memory divided by the ratio.
1045 static unsigned long dm_bufio_default_cache_size;
1048 * Total cache size set by the user.
1050 static unsigned long dm_bufio_cache_size;
1053 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
1054 * at any time. If it disagrees, the user has changed cache size.
1056 static unsigned long dm_bufio_cache_size_latch;
1058 static DEFINE_SPINLOCK(global_spinlock);
1061 * Buffers are freed after this timeout
1063 static unsigned int dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
1064 static unsigned long dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
1066 static unsigned long dm_bufio_peak_allocated;
1067 static unsigned long dm_bufio_allocated_kmem_cache;
1068 static unsigned long dm_bufio_allocated_get_free_pages;
1069 static unsigned long dm_bufio_allocated_vmalloc;
1070 static unsigned long dm_bufio_current_allocated;
1072 /*----------------------------------------------------------------*/
1075 * The current number of clients.
1077 static int dm_bufio_client_count;
1080 * The list of all clients.
1082 static LIST_HEAD(dm_bufio_all_clients);
1085 * This mutex protects dm_bufio_cache_size_latch and dm_bufio_client_count
1087 static DEFINE_MUTEX(dm_bufio_clients_lock);
1089 static struct workqueue_struct *dm_bufio_wq;
1090 static struct delayed_work dm_bufio_cleanup_old_work;
1091 static struct work_struct dm_bufio_replacement_work;
1094 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1095 static void buffer_record_stack(struct dm_buffer *b)
1097 b->stack_len = stack_trace_save(b->stack_entries, MAX_STACK, 2);
1101 /*----------------------------------------------------------------*/
1103 static void adjust_total_allocated(struct dm_buffer *b, bool unlink)
1105 unsigned char data_mode;
1108 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
1109 &dm_bufio_allocated_kmem_cache,
1110 &dm_bufio_allocated_get_free_pages,
1111 &dm_bufio_allocated_vmalloc,
1114 data_mode = b->data_mode;
1115 diff = (long)b->c->block_size;
1119 spin_lock(&global_spinlock);
1121 *class_ptr[data_mode] += diff;
1123 dm_bufio_current_allocated += diff;
1125 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
1126 dm_bufio_peak_allocated = dm_bufio_current_allocated;
1129 if (dm_bufio_current_allocated > dm_bufio_cache_size)
1130 queue_work(dm_bufio_wq, &dm_bufio_replacement_work);
1133 spin_unlock(&global_spinlock);
1137 * Change the number of clients and recalculate per-client limit.
1139 static void __cache_size_refresh(void)
1141 if (WARN_ON(!mutex_is_locked(&dm_bufio_clients_lock)))
1143 if (WARN_ON(dm_bufio_client_count < 0))
1146 dm_bufio_cache_size_latch = READ_ONCE(dm_bufio_cache_size);
1149 * Use default if set to 0 and report the actual cache size used.
1151 if (!dm_bufio_cache_size_latch) {
1152 (void)cmpxchg(&dm_bufio_cache_size, 0,
1153 dm_bufio_default_cache_size);
1154 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
1159 * Allocating buffer data.
1161 * Small buffers are allocated with kmem_cache, to use space optimally.
1163 * For large buffers, we choose between get_free_pages and vmalloc.
1164 * Each has advantages and disadvantages.
1166 * __get_free_pages can randomly fail if the memory is fragmented.
1167 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
1168 * as low as 128M) so using it for caching is not appropriate.
1170 * If the allocation may fail we use __get_free_pages. Memory fragmentation
1171 * won't have a fatal effect here, but it just causes flushes of some other
1172 * buffers and more I/O will be performed. Don't use __get_free_pages if it
1173 * always fails (i.e. order > MAX_PAGE_ORDER).
1175 * If the allocation shouldn't fail we use __vmalloc. This is only for the
1176 * initial reserve allocation, so there's no risk of wasting all vmalloc
1179 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
1180 unsigned char *data_mode)
1182 if (unlikely(c->slab_cache != NULL)) {
1183 *data_mode = DATA_MODE_SLAB;
1184 return kmem_cache_alloc(c->slab_cache, gfp_mask);
1187 if (c->block_size <= KMALLOC_MAX_SIZE &&
1188 gfp_mask & __GFP_NORETRY) {
1189 *data_mode = DATA_MODE_GET_FREE_PAGES;
1190 return (void *)__get_free_pages(gfp_mask,
1191 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
1194 *data_mode = DATA_MODE_VMALLOC;
1196 return __vmalloc(c->block_size, gfp_mask);
1200 * Free buffer's data.
1202 static void free_buffer_data(struct dm_bufio_client *c,
1203 void *data, unsigned char data_mode)
1205 switch (data_mode) {
1206 case DATA_MODE_SLAB:
1207 kmem_cache_free(c->slab_cache, data);
1210 case DATA_MODE_GET_FREE_PAGES:
1211 free_pages((unsigned long)data,
1212 c->sectors_per_block_bits - (PAGE_SHIFT - SECTOR_SHIFT));
1215 case DATA_MODE_VMALLOC:
1220 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
1227 * Allocate buffer and its data.
1229 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
1231 struct dm_buffer *b = kmem_cache_alloc(c->slab_buffer, gfp_mask);
1238 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
1240 kmem_cache_free(c->slab_buffer, b);
1243 adjust_total_allocated(b, false);
1245 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1252 * Free buffer and its data.
1254 static void free_buffer(struct dm_buffer *b)
1256 struct dm_bufio_client *c = b->c;
1258 adjust_total_allocated(b, true);
1259 free_buffer_data(c, b->data, b->data_mode);
1260 kmem_cache_free(c->slab_buffer, b);
1264 *--------------------------------------------------------------------------
1265 * Submit I/O on the buffer.
1267 * Bio interface is faster but it has some problems:
1268 * the vector list is limited (increasing this limit increases
1269 * memory-consumption per buffer, so it is not viable);
1271 * the memory must be direct-mapped, not vmalloced;
1273 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
1274 * it is not vmalloced, try using the bio interface.
1276 * If the buffer is big, if it is vmalloced or if the underlying device
1277 * rejects the bio because it is too large, use dm-io layer to do the I/O.
1278 * The dm-io layer splits the I/O into multiple requests, avoiding the above
1280 *--------------------------------------------------------------------------
1284 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
1285 * that the request was handled directly with bio interface.
1287 static void dmio_complete(unsigned long error, void *context)
1289 struct dm_buffer *b = context;
1291 b->end_io(b, unlikely(error != 0) ? BLK_STS_IOERR : 0);
1294 static void use_dmio(struct dm_buffer *b, enum req_op op, sector_t sector,
1295 unsigned int n_sectors, unsigned int offset,
1296 unsigned short ioprio)
1299 struct dm_io_request io_req = {
1301 .notify.fn = dmio_complete,
1302 .notify.context = b,
1303 .client = b->c->dm_io,
1305 struct dm_io_region region = {
1311 if (b->data_mode != DATA_MODE_VMALLOC) {
1312 io_req.mem.type = DM_IO_KMEM;
1313 io_req.mem.ptr.addr = (char *)b->data + offset;
1315 io_req.mem.type = DM_IO_VMA;
1316 io_req.mem.ptr.vma = (char *)b->data + offset;
1319 r = dm_io(&io_req, 1, ®ion, NULL, ioprio);
1321 b->end_io(b, errno_to_blk_status(r));
1324 static void bio_complete(struct bio *bio)
1326 struct dm_buffer *b = bio->bi_private;
1327 blk_status_t status = bio->bi_status;
1331 b->end_io(b, status);
1334 static void use_bio(struct dm_buffer *b, enum req_op op, sector_t sector,
1335 unsigned int n_sectors, unsigned int offset,
1336 unsigned short ioprio)
1342 bio = bio_kmalloc(1, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOWARN);
1344 use_dmio(b, op, sector, n_sectors, offset, ioprio);
1347 bio_init(bio, b->c->bdev, bio->bi_inline_vecs, 1, op);
1348 bio->bi_iter.bi_sector = sector;
1349 bio->bi_end_io = bio_complete;
1350 bio->bi_private = b;
1351 bio->bi_ioprio = ioprio;
1353 ptr = (char *)b->data + offset;
1354 len = n_sectors << SECTOR_SHIFT;
1356 __bio_add_page(bio, virt_to_page(ptr), len, offset_in_page(ptr));
1361 static inline sector_t block_to_sector(struct dm_bufio_client *c, sector_t block)
1365 if (likely(c->sectors_per_block_bits >= 0))
1366 sector = block << c->sectors_per_block_bits;
1368 sector = block * (c->block_size >> SECTOR_SHIFT);
1374 static void submit_io(struct dm_buffer *b, enum req_op op, unsigned short ioprio,
1375 void (*end_io)(struct dm_buffer *, blk_status_t))
1377 unsigned int n_sectors;
1379 unsigned int offset, end;
1383 sector = block_to_sector(b->c, b->block);
1385 if (op != REQ_OP_WRITE) {
1386 n_sectors = b->c->block_size >> SECTOR_SHIFT;
1389 if (b->c->write_callback)
1390 b->c->write_callback(b);
1391 offset = b->write_start;
1393 offset &= -DM_BUFIO_WRITE_ALIGN;
1394 end += DM_BUFIO_WRITE_ALIGN - 1;
1395 end &= -DM_BUFIO_WRITE_ALIGN;
1396 if (unlikely(end > b->c->block_size))
1397 end = b->c->block_size;
1399 sector += offset >> SECTOR_SHIFT;
1400 n_sectors = (end - offset) >> SECTOR_SHIFT;
1403 if (b->data_mode != DATA_MODE_VMALLOC)
1404 use_bio(b, op, sector, n_sectors, offset, ioprio);
1406 use_dmio(b, op, sector, n_sectors, offset, ioprio);
1410 *--------------------------------------------------------------
1411 * Writing dirty buffers
1412 *--------------------------------------------------------------
1416 * The endio routine for write.
1418 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
1421 static void write_endio(struct dm_buffer *b, blk_status_t status)
1423 b->write_error = status;
1424 if (unlikely(status)) {
1425 struct dm_bufio_client *c = b->c;
1427 (void)cmpxchg(&c->async_write_error, 0,
1428 blk_status_to_errno(status));
1431 BUG_ON(!test_bit(B_WRITING, &b->state));
1433 smp_mb__before_atomic();
1434 clear_bit(B_WRITING, &b->state);
1435 smp_mb__after_atomic();
1437 wake_up_bit(&b->state, B_WRITING);
1441 * Initiate a write on a dirty buffer, but don't wait for it.
1443 * - If the buffer is not dirty, exit.
1444 * - If there some previous write going on, wait for it to finish (we can't
1445 * have two writes on the same buffer simultaneously).
1446 * - Submit our write and don't wait on it. We set B_WRITING indicating
1447 * that there is a write in progress.
1449 static void __write_dirty_buffer(struct dm_buffer *b,
1450 struct list_head *write_list)
1452 if (!test_bit(B_DIRTY, &b->state))
1455 clear_bit(B_DIRTY, &b->state);
1456 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
1458 b->write_start = b->dirty_start;
1459 b->write_end = b->dirty_end;
1462 submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio);
1464 list_add_tail(&b->write_list, write_list);
1467 static void __flush_write_list(struct list_head *write_list)
1469 struct blk_plug plug;
1471 blk_start_plug(&plug);
1472 while (!list_empty(write_list)) {
1473 struct dm_buffer *b =
1474 list_entry(write_list->next, struct dm_buffer, write_list);
1475 list_del(&b->write_list);
1476 submit_io(b, REQ_OP_WRITE, IOPRIO_DEFAULT, write_endio);
1479 blk_finish_plug(&plug);
1483 * Wait until any activity on the buffer finishes. Possibly write the
1484 * buffer if it is dirty. When this function finishes, there is no I/O
1485 * running on the buffer and the buffer is not dirty.
1487 static void __make_buffer_clean(struct dm_buffer *b)
1489 BUG_ON(atomic_read(&b->hold_count));
1491 /* smp_load_acquire() pairs with read_endio()'s smp_mb__before_atomic() */
1492 if (!smp_load_acquire(&b->state)) /* fast case */
1495 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1496 __write_dirty_buffer(b, NULL);
1497 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
1500 static enum evict_result is_clean(struct dm_buffer *b, void *context)
1502 struct dm_bufio_client *c = context;
1504 /* These should never happen */
1505 if (WARN_ON_ONCE(test_bit(B_WRITING, &b->state)))
1506 return ER_DONT_EVICT;
1507 if (WARN_ON_ONCE(test_bit(B_DIRTY, &b->state)))
1508 return ER_DONT_EVICT;
1509 if (WARN_ON_ONCE(b->list_mode != LIST_CLEAN))
1510 return ER_DONT_EVICT;
1512 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep &&
1513 unlikely(test_bit(B_READING, &b->state)))
1514 return ER_DONT_EVICT;
1519 static enum evict_result is_dirty(struct dm_buffer *b, void *context)
1521 /* These should never happen */
1522 if (WARN_ON_ONCE(test_bit(B_READING, &b->state)))
1523 return ER_DONT_EVICT;
1524 if (WARN_ON_ONCE(b->list_mode != LIST_DIRTY))
1525 return ER_DONT_EVICT;
1531 * Find some buffer that is not held by anybody, clean it, unlink it and
1534 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
1536 struct dm_buffer *b;
1538 b = cache_evict(&c->cache, LIST_CLEAN, is_clean, c);
1540 /* this also waits for pending reads */
1541 __make_buffer_clean(b);
1545 if (static_branch_unlikely(&no_sleep_enabled) && c->no_sleep)
1548 b = cache_evict(&c->cache, LIST_DIRTY, is_dirty, NULL);
1550 __make_buffer_clean(b);
1558 * Wait until some other threads free some buffer or release hold count on
1561 * This function is entered with c->lock held, drops it and regains it
1564 static void __wait_for_free_buffer(struct dm_bufio_client *c)
1566 DECLARE_WAITQUEUE(wait, current);
1568 add_wait_queue(&c->free_buffer_wait, &wait);
1569 set_current_state(TASK_UNINTERRUPTIBLE);
1573 * It's possible to miss a wake up event since we don't always
1574 * hold c->lock when wake_up is called. So we have a timeout here,
1577 io_schedule_timeout(5 * HZ);
1579 remove_wait_queue(&c->free_buffer_wait, &wait);
1592 * Allocate a new buffer. If the allocation is not possible, wait until
1593 * some other thread frees a buffer.
1595 * May drop the lock and regain it.
1597 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
1599 struct dm_buffer *b;
1600 bool tried_noio_alloc = false;
1603 * dm-bufio is resistant to allocation failures (it just keeps
1604 * one buffer reserved in cases all the allocations fail).
1605 * So set flags to not try too hard:
1606 * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
1607 * mutex and wait ourselves.
1608 * __GFP_NORETRY: don't retry and rather return failure
1609 * __GFP_NOMEMALLOC: don't use emergency reserves
1610 * __GFP_NOWARN: don't print a warning in case of failure
1612 * For debugging, if we set the cache size to 1, no new buffers will
1616 if (dm_bufio_cache_size_latch != 1) {
1617 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
1622 if (nf == NF_PREFETCH)
1625 if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
1627 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
1631 tried_noio_alloc = true;
1634 if (!list_empty(&c->reserved_buffers)) {
1635 b = list_to_buffer(c->reserved_buffers.next);
1636 list_del(&b->lru.list);
1637 c->need_reserved_buffers++;
1642 b = __get_unclaimed_buffer(c);
1646 __wait_for_free_buffer(c);
1650 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
1652 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
1657 if (c->alloc_callback)
1658 c->alloc_callback(b);
1664 * Free a buffer and wake other threads waiting for free buffers.
1666 static void __free_buffer_wake(struct dm_buffer *b)
1668 struct dm_bufio_client *c = b->c;
1671 if (!c->need_reserved_buffers)
1674 list_add(&b->lru.list, &c->reserved_buffers);
1675 c->need_reserved_buffers--;
1679 * We hold the bufio lock here, so no one can add entries to the
1680 * wait queue anyway.
1682 if (unlikely(waitqueue_active(&c->free_buffer_wait)))
1683 wake_up(&c->free_buffer_wait);
1686 static enum evict_result cleaned(struct dm_buffer *b, void *context)
1688 if (WARN_ON_ONCE(test_bit(B_READING, &b->state)))
1689 return ER_DONT_EVICT; /* should never happen */
1691 if (test_bit(B_DIRTY, &b->state) || test_bit(B_WRITING, &b->state))
1692 return ER_DONT_EVICT;
1697 static void __move_clean_buffers(struct dm_bufio_client *c)
1699 cache_mark_many(&c->cache, LIST_DIRTY, LIST_CLEAN, cleaned, NULL);
1702 struct write_context {
1704 struct list_head *write_list;
1707 static enum it_action write_one(struct dm_buffer *b, void *context)
1709 struct write_context *wc = context;
1711 if (wc->no_wait && test_bit(B_WRITING, &b->state))
1714 __write_dirty_buffer(b, wc->write_list);
1718 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
1719 struct list_head *write_list)
1721 struct write_context wc = {.no_wait = no_wait, .write_list = write_list};
1723 __move_clean_buffers(c);
1724 cache_iterate(&c->cache, LIST_DIRTY, write_one, &wc);
1728 * Check if we're over watermark.
1729 * If we are over threshold_buffers, start freeing buffers.
1730 * If we're over "limit_buffers", block until we get under the limit.
1732 static void __check_watermark(struct dm_bufio_client *c,
1733 struct list_head *write_list)
1735 if (cache_count(&c->cache, LIST_DIRTY) >
1736 cache_count(&c->cache, LIST_CLEAN) * DM_BUFIO_WRITEBACK_RATIO)
1737 __write_dirty_buffers_async(c, 1, write_list);
1741 *--------------------------------------------------------------
1743 *--------------------------------------------------------------
1746 static void cache_put_and_wake(struct dm_bufio_client *c, struct dm_buffer *b)
1749 * Relying on waitqueue_active() is racey, but we sleep
1750 * with schedule_timeout anyway.
1752 if (cache_put(&c->cache, b) &&
1753 unlikely(waitqueue_active(&c->free_buffer_wait)))
1754 wake_up(&c->free_buffer_wait);
1758 * This assumes you have already checked the cache to see if the buffer
1759 * is already present (it will recheck after dropping the lock for allocation).
1761 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
1762 enum new_flag nf, int *need_submit,
1763 struct list_head *write_list)
1765 struct dm_buffer *b, *new_b = NULL;
1769 /* This can't be called with NF_GET */
1770 if (WARN_ON_ONCE(nf == NF_GET))
1773 new_b = __alloc_buffer_wait(c, nf);
1778 * We've had a period where the mutex was unlocked, so need to
1779 * recheck the buffer tree.
1781 b = cache_get(&c->cache, block);
1783 __free_buffer_wake(new_b);
1787 __check_watermark(c, write_list);
1790 atomic_set(&b->hold_count, 1);
1791 WRITE_ONCE(b->last_accessed, jiffies);
1795 b->list_mode = LIST_CLEAN;
1800 b->state = 1 << B_READING;
1805 * We mustn't insert into the cache until the B_READING state
1806 * is set. Otherwise another thread could get it and use
1807 * it before it had been read.
1809 cache_insert(&c->cache, b);
1814 if (nf == NF_PREFETCH) {
1815 cache_put_and_wake(c, b);
1820 * Note: it is essential that we don't wait for the buffer to be
1821 * read if dm_bufio_get function is used. Both dm_bufio_get and
1822 * dm_bufio_prefetch can be used in the driver request routine.
1823 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1824 * the same buffer, it would deadlock if we waited.
1826 if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) {
1827 cache_put_and_wake(c, b);
1835 * The endio routine for reading: set the error, clear the bit and wake up
1836 * anyone waiting on the buffer.
1838 static void read_endio(struct dm_buffer *b, blk_status_t status)
1840 b->read_error = status;
1842 BUG_ON(!test_bit(B_READING, &b->state));
1844 smp_mb__before_atomic();
1845 clear_bit(B_READING, &b->state);
1846 smp_mb__after_atomic();
1848 wake_up_bit(&b->state, B_READING);
1852 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
1853 * functions is similar except that dm_bufio_new doesn't read the
1854 * buffer from the disk (assuming that the caller overwrites all the data
1855 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1857 static void *new_read(struct dm_bufio_client *c, sector_t block,
1858 enum new_flag nf, struct dm_buffer **bp,
1859 unsigned short ioprio)
1861 int need_submit = 0;
1862 struct dm_buffer *b;
1864 LIST_HEAD(write_list);
1869 * Fast path, hopefully the block is already in the cache. No need
1870 * to get the client lock for this.
1872 b = cache_get(&c->cache, block);
1874 if (nf == NF_PREFETCH) {
1875 cache_put_and_wake(c, b);
1880 * Note: it is essential that we don't wait for the buffer to be
1881 * read if dm_bufio_get function is used. Both dm_bufio_get and
1882 * dm_bufio_prefetch can be used in the driver request routine.
1883 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1884 * the same buffer, it would deadlock if we waited.
1886 if (nf == NF_GET && unlikely(test_bit_acquire(B_READING, &b->state))) {
1887 cache_put_and_wake(c, b);
1897 b = __bufio_new(c, block, nf, &need_submit, &write_list);
1901 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1902 if (b && (atomic_read(&b->hold_count) == 1))
1903 buffer_record_stack(b);
1906 __flush_write_list(&write_list);
1912 submit_io(b, REQ_OP_READ, ioprio, read_endio);
1914 if (nf != NF_GET) /* we already tested this condition above */
1915 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1917 if (b->read_error) {
1918 int error = blk_status_to_errno(b->read_error);
1920 dm_bufio_release(b);
1922 return ERR_PTR(error);
1930 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1931 struct dm_buffer **bp)
1933 return new_read(c, block, NF_GET, bp, IOPRIO_DEFAULT);
1935 EXPORT_SYMBOL_GPL(dm_bufio_get);
1937 static void *__dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1938 struct dm_buffer **bp, unsigned short ioprio)
1940 if (WARN_ON_ONCE(dm_bufio_in_request()))
1941 return ERR_PTR(-EINVAL);
1943 return new_read(c, block, NF_READ, bp, ioprio);
1946 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1947 struct dm_buffer **bp)
1949 return __dm_bufio_read(c, block, bp, IOPRIO_DEFAULT);
1951 EXPORT_SYMBOL_GPL(dm_bufio_read);
1953 void *dm_bufio_read_with_ioprio(struct dm_bufio_client *c, sector_t block,
1954 struct dm_buffer **bp, unsigned short ioprio)
1956 return __dm_bufio_read(c, block, bp, ioprio);
1958 EXPORT_SYMBOL_GPL(dm_bufio_read_with_ioprio);
1960 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1961 struct dm_buffer **bp)
1963 if (WARN_ON_ONCE(dm_bufio_in_request()))
1964 return ERR_PTR(-EINVAL);
1966 return new_read(c, block, NF_FRESH, bp, IOPRIO_DEFAULT);
1968 EXPORT_SYMBOL_GPL(dm_bufio_new);
1970 static void __dm_bufio_prefetch(struct dm_bufio_client *c,
1971 sector_t block, unsigned int n_blocks,
1972 unsigned short ioprio)
1974 struct blk_plug plug;
1976 LIST_HEAD(write_list);
1978 if (WARN_ON_ONCE(dm_bufio_in_request()))
1979 return; /* should never happen */
1981 blk_start_plug(&plug);
1983 for (; n_blocks--; block++) {
1985 struct dm_buffer *b;
1987 b = cache_get(&c->cache, block);
1989 /* already in cache */
1990 cache_put_and_wake(c, b);
1995 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1997 if (unlikely(!list_empty(&write_list))) {
1999 blk_finish_plug(&plug);
2000 __flush_write_list(&write_list);
2001 blk_start_plug(&plug);
2004 if (unlikely(b != NULL)) {
2008 submit_io(b, REQ_OP_READ, ioprio, read_endio);
2009 dm_bufio_release(b);
2021 blk_finish_plug(&plug);
2024 void dm_bufio_prefetch(struct dm_bufio_client *c, sector_t block, unsigned int n_blocks)
2026 return __dm_bufio_prefetch(c, block, n_blocks, IOPRIO_DEFAULT);
2028 EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
2030 void dm_bufio_prefetch_with_ioprio(struct dm_bufio_client *c, sector_t block,
2031 unsigned int n_blocks, unsigned short ioprio)
2033 return __dm_bufio_prefetch(c, block, n_blocks, ioprio);
2035 EXPORT_SYMBOL_GPL(dm_bufio_prefetch_with_ioprio);
2037 void dm_bufio_release(struct dm_buffer *b)
2039 struct dm_bufio_client *c = b->c;
2042 * If there were errors on the buffer, and the buffer is not
2043 * to be written, free the buffer. There is no point in caching
2046 if ((b->read_error || b->write_error) &&
2047 !test_bit_acquire(B_READING, &b->state) &&
2048 !test_bit(B_WRITING, &b->state) &&
2049 !test_bit(B_DIRTY, &b->state)) {
2052 /* cache remove can fail if there are other holders */
2053 if (cache_remove(&c->cache, b)) {
2054 __free_buffer_wake(b);
2062 cache_put_and_wake(c, b);
2064 EXPORT_SYMBOL_GPL(dm_bufio_release);
2066 void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer *b,
2067 unsigned int start, unsigned int end)
2069 struct dm_bufio_client *c = b->c;
2071 BUG_ON(start >= end);
2072 BUG_ON(end > b->c->block_size);
2076 BUG_ON(test_bit(B_READING, &b->state));
2078 if (!test_and_set_bit(B_DIRTY, &b->state)) {
2079 b->dirty_start = start;
2081 cache_mark(&c->cache, b, LIST_DIRTY);
2083 if (start < b->dirty_start)
2084 b->dirty_start = start;
2085 if (end > b->dirty_end)
2091 EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty);
2093 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
2095 dm_bufio_mark_partial_buffer_dirty(b, 0, b->c->block_size);
2097 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
2099 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
2101 LIST_HEAD(write_list);
2103 if (WARN_ON_ONCE(dm_bufio_in_request()))
2104 return; /* should never happen */
2107 __write_dirty_buffers_async(c, 0, &write_list);
2109 __flush_write_list(&write_list);
2111 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
2114 * For performance, it is essential that the buffers are written asynchronously
2115 * and simultaneously (so that the block layer can merge the writes) and then
2118 * Finally, we flush hardware disk cache.
2120 static bool is_writing(struct lru_entry *e, void *context)
2122 struct dm_buffer *b = le_to_buffer(e);
2124 return test_bit(B_WRITING, &b->state);
2127 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
2130 unsigned long nr_buffers;
2131 struct lru_entry *e;
2134 LIST_HEAD(write_list);
2137 __write_dirty_buffers_async(c, 0, &write_list);
2139 __flush_write_list(&write_list);
2142 nr_buffers = cache_count(&c->cache, LIST_DIRTY);
2143 lru_iter_begin(&c->cache.lru[LIST_DIRTY], &it);
2144 while ((e = lru_iter_next(&it, is_writing, c))) {
2145 struct dm_buffer *b = le_to_buffer(e);
2146 __cache_inc_buffer(b);
2148 BUG_ON(test_bit(B_READING, &b->state));
2153 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
2156 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
2159 if (!test_bit(B_DIRTY, &b->state) && !test_bit(B_WRITING, &b->state))
2160 cache_mark(&c->cache, b, LIST_CLEAN);
2162 cache_put_and_wake(c, b);
2168 wake_up(&c->free_buffer_wait);
2171 a = xchg(&c->async_write_error, 0);
2172 f = dm_bufio_issue_flush(c);
2178 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
2181 * Use dm-io to send an empty barrier to flush the device.
2183 int dm_bufio_issue_flush(struct dm_bufio_client *c)
2185 struct dm_io_request io_req = {
2186 .bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC,
2187 .mem.type = DM_IO_KMEM,
2188 .mem.ptr.addr = NULL,
2191 struct dm_io_region io_reg = {
2197 if (WARN_ON_ONCE(dm_bufio_in_request()))
2200 return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
2202 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
2205 * Use dm-io to send a discard request to flush the device.
2207 int dm_bufio_issue_discard(struct dm_bufio_client *c, sector_t block, sector_t count)
2209 struct dm_io_request io_req = {
2210 .bi_opf = REQ_OP_DISCARD | REQ_SYNC,
2211 .mem.type = DM_IO_KMEM,
2212 .mem.ptr.addr = NULL,
2215 struct dm_io_region io_reg = {
2217 .sector = block_to_sector(c, block),
2218 .count = block_to_sector(c, count),
2221 if (WARN_ON_ONCE(dm_bufio_in_request()))
2222 return -EINVAL; /* discards are optional */
2224 return dm_io(&io_req, 1, &io_reg, NULL, IOPRIO_DEFAULT);
2226 EXPORT_SYMBOL_GPL(dm_bufio_issue_discard);
2228 static bool forget_buffer(struct dm_bufio_client *c, sector_t block)
2230 struct dm_buffer *b;
2232 b = cache_get(&c->cache, block);
2234 if (likely(!smp_load_acquire(&b->state))) {
2235 if (cache_remove(&c->cache, b))
2236 __free_buffer_wake(b);
2238 cache_put_and_wake(c, b);
2240 cache_put_and_wake(c, b);
2244 return b ? true : false;
2248 * Free the given buffer.
2250 * This is just a hint, if the buffer is in use or dirty, this function
2253 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
2256 forget_buffer(c, block);
2259 EXPORT_SYMBOL_GPL(dm_bufio_forget);
2261 static enum evict_result idle(struct dm_buffer *b, void *context)
2263 return b->state ? ER_DONT_EVICT : ER_EVICT;
2266 void dm_bufio_forget_buffers(struct dm_bufio_client *c, sector_t block, sector_t n_blocks)
2269 cache_remove_range(&c->cache, block, block + n_blocks, idle, __free_buffer_wake);
2272 EXPORT_SYMBOL_GPL(dm_bufio_forget_buffers);
2274 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned int n)
2276 c->minimum_buffers = n;
2278 EXPORT_SYMBOL_GPL(dm_bufio_set_minimum_buffers);
2280 unsigned int dm_bufio_get_block_size(struct dm_bufio_client *c)
2282 return c->block_size;
2284 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
2286 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
2288 sector_t s = bdev_nr_sectors(c->bdev);
2294 if (likely(c->sectors_per_block_bits >= 0))
2295 s >>= c->sectors_per_block_bits;
2297 sector_div(s, c->block_size >> SECTOR_SHIFT);
2300 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
2302 struct dm_io_client *dm_bufio_get_dm_io_client(struct dm_bufio_client *c)
2306 EXPORT_SYMBOL_GPL(dm_bufio_get_dm_io_client);
2308 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
2312 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
2314 void *dm_bufio_get_block_data(struct dm_buffer *b)
2318 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
2320 void *dm_bufio_get_aux_data(struct dm_buffer *b)
2324 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
2326 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
2330 EXPORT_SYMBOL_GPL(dm_bufio_get_client);
2332 static enum it_action warn_leak(struct dm_buffer *b, void *context)
2334 bool *warned = context;
2336 WARN_ON(!(*warned));
2338 DMERR("leaked buffer %llx, hold count %u, list %d",
2339 (unsigned long long)b->block, atomic_read(&b->hold_count), b->list_mode);
2340 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
2341 stack_trace_print(b->stack_entries, b->stack_len, 1);
2342 /* mark unclaimed to avoid WARN_ON at end of drop_buffers() */
2343 atomic_set(&b->hold_count, 0);
2348 static void drop_buffers(struct dm_bufio_client *c)
2351 struct dm_buffer *b;
2353 if (WARN_ON(dm_bufio_in_request()))
2354 return; /* should never happen */
2357 * An optimization so that the buffers are not written one-by-one.
2359 dm_bufio_write_dirty_buffers_async(c);
2363 while ((b = __get_unclaimed_buffer(c)))
2364 __free_buffer_wake(b);
2366 for (i = 0; i < LIST_SIZE; i++) {
2367 bool warned = false;
2369 cache_iterate(&c->cache, i, warn_leak, &warned);
2372 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
2373 while ((b = __get_unclaimed_buffer(c)))
2374 __free_buffer_wake(b);
2377 for (i = 0; i < LIST_SIZE; i++)
2378 WARN_ON(cache_count(&c->cache, i));
2383 static unsigned long get_retain_buffers(struct dm_bufio_client *c)
2385 unsigned long retain_bytes = READ_ONCE(dm_bufio_retain_bytes);
2387 if (likely(c->sectors_per_block_bits >= 0))
2388 retain_bytes >>= c->sectors_per_block_bits + SECTOR_SHIFT;
2390 retain_bytes /= c->block_size;
2392 return retain_bytes;
2395 static void __scan(struct dm_bufio_client *c)
2398 struct dm_buffer *b;
2399 unsigned long freed = 0;
2400 unsigned long retain_target = get_retain_buffers(c);
2401 unsigned long count = cache_total(&c->cache);
2403 for (l = 0; l < LIST_SIZE; l++) {
2405 if (count - freed <= retain_target)
2406 atomic_long_set(&c->need_shrink, 0);
2407 if (!atomic_long_read(&c->need_shrink))
2410 b = cache_evict(&c->cache, l,
2411 l == LIST_CLEAN ? is_clean : is_dirty, c);
2415 __make_buffer_clean(b);
2416 __free_buffer_wake(b);
2418 atomic_long_dec(&c->need_shrink);
2425 static void shrink_work(struct work_struct *w)
2427 struct dm_bufio_client *c = container_of(w, struct dm_bufio_client, shrink_work);
2434 static unsigned long dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
2436 struct dm_bufio_client *c;
2438 c = shrink->private_data;
2439 atomic_long_add(sc->nr_to_scan, &c->need_shrink);
2440 queue_work(dm_bufio_wq, &c->shrink_work);
2442 return sc->nr_to_scan;
2445 static unsigned long dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
2447 struct dm_bufio_client *c = shrink->private_data;
2448 unsigned long count = cache_total(&c->cache);
2449 unsigned long retain_target = get_retain_buffers(c);
2450 unsigned long queued_for_cleanup = atomic_long_read(&c->need_shrink);
2452 if (unlikely(count < retain_target))
2455 count -= retain_target;
2457 if (unlikely(count < queued_for_cleanup))
2460 count -= queued_for_cleanup;
2466 * Create the buffering interface
2468 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
2469 unsigned int reserved_buffers, unsigned int aux_size,
2470 void (*alloc_callback)(struct dm_buffer *),
2471 void (*write_callback)(struct dm_buffer *),
2475 unsigned int num_locks;
2476 struct dm_bufio_client *c;
2479 if (!block_size || block_size & ((1 << SECTOR_SHIFT) - 1)) {
2480 DMERR("%s: block size not specified or is not multiple of 512b", __func__);
2485 num_locks = dm_num_hash_locks();
2486 c = kzalloc(sizeof(*c) + (num_locks * sizeof(struct buffer_tree)), GFP_KERNEL);
2491 cache_init(&c->cache, num_locks, (flags & DM_BUFIO_CLIENT_NO_SLEEP) != 0);
2494 c->block_size = block_size;
2495 if (is_power_of_2(block_size))
2496 c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
2498 c->sectors_per_block_bits = -1;
2500 c->alloc_callback = alloc_callback;
2501 c->write_callback = write_callback;
2503 if (flags & DM_BUFIO_CLIENT_NO_SLEEP) {
2505 static_branch_inc(&no_sleep_enabled);
2508 mutex_init(&c->lock);
2509 spin_lock_init(&c->spinlock);
2510 INIT_LIST_HEAD(&c->reserved_buffers);
2511 c->need_reserved_buffers = reserved_buffers;
2513 dm_bufio_set_minimum_buffers(c, DM_BUFIO_MIN_BUFFERS);
2515 init_waitqueue_head(&c->free_buffer_wait);
2516 c->async_write_error = 0;
2518 c->dm_io = dm_io_client_create();
2519 if (IS_ERR(c->dm_io)) {
2520 r = PTR_ERR(c->dm_io);
2524 if (block_size <= KMALLOC_MAX_SIZE &&
2525 (block_size < PAGE_SIZE || !is_power_of_2(block_size))) {
2526 unsigned int align = min(1U << __ffs(block_size), (unsigned int)PAGE_SIZE);
2528 snprintf(slab_name, sizeof(slab_name), "dm_bufio_cache-%u", block_size);
2529 c->slab_cache = kmem_cache_create(slab_name, block_size, align,
2530 SLAB_RECLAIM_ACCOUNT, NULL);
2531 if (!c->slab_cache) {
2537 snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer-%u", aux_size);
2539 snprintf(slab_name, sizeof(slab_name), "dm_bufio_buffer");
2540 c->slab_buffer = kmem_cache_create(slab_name, sizeof(struct dm_buffer) + aux_size,
2541 0, SLAB_RECLAIM_ACCOUNT, NULL);
2542 if (!c->slab_buffer) {
2547 while (c->need_reserved_buffers) {
2548 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
2554 __free_buffer_wake(b);
2557 INIT_WORK(&c->shrink_work, shrink_work);
2558 atomic_long_set(&c->need_shrink, 0);
2560 c->shrinker = shrinker_alloc(0, "dm-bufio:(%u:%u)",
2561 MAJOR(bdev->bd_dev), MINOR(bdev->bd_dev));
2567 c->shrinker->count_objects = dm_bufio_shrink_count;
2568 c->shrinker->scan_objects = dm_bufio_shrink_scan;
2569 c->shrinker->seeks = 1;
2570 c->shrinker->batch = 0;
2571 c->shrinker->private_data = c;
2573 shrinker_register(c->shrinker);
2575 mutex_lock(&dm_bufio_clients_lock);
2576 dm_bufio_client_count++;
2577 list_add(&c->client_list, &dm_bufio_all_clients);
2578 __cache_size_refresh();
2579 mutex_unlock(&dm_bufio_clients_lock);
2584 while (!list_empty(&c->reserved_buffers)) {
2585 struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next);
2587 list_del(&b->lru.list);
2590 kmem_cache_destroy(c->slab_cache);
2591 kmem_cache_destroy(c->slab_buffer);
2592 dm_io_client_destroy(c->dm_io);
2594 mutex_destroy(&c->lock);
2596 static_branch_dec(&no_sleep_enabled);
2601 EXPORT_SYMBOL_GPL(dm_bufio_client_create);
2604 * Free the buffering interface.
2605 * It is required that there are no references on any buffers.
2607 void dm_bufio_client_destroy(struct dm_bufio_client *c)
2613 shrinker_free(c->shrinker);
2614 flush_work(&c->shrink_work);
2616 mutex_lock(&dm_bufio_clients_lock);
2618 list_del(&c->client_list);
2619 dm_bufio_client_count--;
2620 __cache_size_refresh();
2622 mutex_unlock(&dm_bufio_clients_lock);
2624 WARN_ON(c->need_reserved_buffers);
2626 while (!list_empty(&c->reserved_buffers)) {
2627 struct dm_buffer *b = list_to_buffer(c->reserved_buffers.next);
2629 list_del(&b->lru.list);
2633 for (i = 0; i < LIST_SIZE; i++)
2634 if (cache_count(&c->cache, i))
2635 DMERR("leaked buffer count %d: %lu", i, cache_count(&c->cache, i));
2637 for (i = 0; i < LIST_SIZE; i++)
2638 WARN_ON(cache_count(&c->cache, i));
2640 cache_destroy(&c->cache);
2641 kmem_cache_destroy(c->slab_cache);
2642 kmem_cache_destroy(c->slab_buffer);
2643 dm_io_client_destroy(c->dm_io);
2644 mutex_destroy(&c->lock);
2646 static_branch_dec(&no_sleep_enabled);
2649 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
2651 void dm_bufio_client_reset(struct dm_bufio_client *c)
2654 flush_work(&c->shrink_work);
2656 EXPORT_SYMBOL_GPL(dm_bufio_client_reset);
2658 void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
2662 EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset);
2664 /*--------------------------------------------------------------*/
2666 static unsigned int get_max_age_hz(void)
2668 unsigned int max_age = READ_ONCE(dm_bufio_max_age);
2670 if (max_age > UINT_MAX / HZ)
2671 max_age = UINT_MAX / HZ;
2673 return max_age * HZ;
2676 static bool older_than(struct dm_buffer *b, unsigned long age_hz)
2678 return time_after_eq(jiffies, READ_ONCE(b->last_accessed) + age_hz);
2681 struct evict_params {
2683 unsigned long age_hz;
2686 * This gets updated with the largest last_accessed (ie. most
2687 * recently used) of the evicted buffers. It will not be reinitialised
2688 * by __evict_many(), so you can use it across multiple invocations.
2690 unsigned long last_accessed;
2694 * We may not be able to evict this buffer if IO pending or the client
2695 * is still using it.
2697 * And if GFP_NOFS is used, we must not do any I/O because we hold
2698 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
2699 * rerouted to different bufio client.
2701 static enum evict_result select_for_evict(struct dm_buffer *b, void *context)
2703 struct evict_params *params = context;
2705 if (!(params->gfp & __GFP_FS) ||
2706 (static_branch_unlikely(&no_sleep_enabled) && b->c->no_sleep)) {
2707 if (test_bit_acquire(B_READING, &b->state) ||
2708 test_bit(B_WRITING, &b->state) ||
2709 test_bit(B_DIRTY, &b->state))
2710 return ER_DONT_EVICT;
2713 return older_than(b, params->age_hz) ? ER_EVICT : ER_STOP;
2716 static unsigned long __evict_many(struct dm_bufio_client *c,
2717 struct evict_params *params,
2718 int list_mode, unsigned long max_count)
2720 unsigned long count;
2721 unsigned long last_accessed;
2722 struct dm_buffer *b;
2724 for (count = 0; count < max_count; count++) {
2725 b = cache_evict(&c->cache, list_mode, select_for_evict, params);
2729 last_accessed = READ_ONCE(b->last_accessed);
2730 if (time_after_eq(params->last_accessed, last_accessed))
2731 params->last_accessed = last_accessed;
2733 __make_buffer_clean(b);
2734 __free_buffer_wake(b);
2742 static void evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
2744 struct evict_params params = {.gfp = 0, .age_hz = age_hz, .last_accessed = 0};
2745 unsigned long retain = get_retain_buffers(c);
2746 unsigned long count;
2747 LIST_HEAD(write_list);
2751 __check_watermark(c, &write_list);
2752 if (unlikely(!list_empty(&write_list))) {
2754 __flush_write_list(&write_list);
2758 count = cache_total(&c->cache);
2760 __evict_many(c, ¶ms, LIST_CLEAN, count - retain);
2765 static void cleanup_old_buffers(void)
2767 unsigned long max_age_hz = get_max_age_hz();
2768 struct dm_bufio_client *c;
2770 mutex_lock(&dm_bufio_clients_lock);
2772 __cache_size_refresh();
2774 list_for_each_entry(c, &dm_bufio_all_clients, client_list)
2775 evict_old_buffers(c, max_age_hz);
2777 mutex_unlock(&dm_bufio_clients_lock);
2780 static void work_fn(struct work_struct *w)
2782 cleanup_old_buffers();
2784 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2785 DM_BUFIO_WORK_TIMER_SECS * HZ);
2788 /*--------------------------------------------------------------*/
2791 * Global cleanup tries to evict the oldest buffers from across _all_
2792 * the clients. It does this by repeatedly evicting a few buffers from
2793 * the client that holds the oldest buffer. It's approximate, but hopefully
2796 static struct dm_bufio_client *__pop_client(void)
2798 struct list_head *h;
2800 if (list_empty(&dm_bufio_all_clients))
2803 h = dm_bufio_all_clients.next;
2805 return container_of(h, struct dm_bufio_client, client_list);
2809 * Inserts the client in the global client list based on its
2810 * 'oldest_buffer' field.
2812 static void __insert_client(struct dm_bufio_client *new_client)
2814 struct dm_bufio_client *c;
2815 struct list_head *h = dm_bufio_all_clients.next;
2817 while (h != &dm_bufio_all_clients) {
2818 c = container_of(h, struct dm_bufio_client, client_list);
2819 if (time_after_eq(c->oldest_buffer, new_client->oldest_buffer))
2824 list_add_tail(&new_client->client_list, h);
2827 static unsigned long __evict_a_few(unsigned long nr_buffers)
2829 unsigned long count;
2830 struct dm_bufio_client *c;
2831 struct evict_params params = {
2834 /* set to jiffies in case there are no buffers in this client */
2835 .last_accessed = jiffies
2843 count = __evict_many(c, ¶ms, LIST_CLEAN, nr_buffers);
2847 c->oldest_buffer = params.last_accessed;
2853 static void check_watermarks(void)
2855 LIST_HEAD(write_list);
2856 struct dm_bufio_client *c;
2858 mutex_lock(&dm_bufio_clients_lock);
2859 list_for_each_entry(c, &dm_bufio_all_clients, client_list) {
2861 __check_watermark(c, &write_list);
2864 mutex_unlock(&dm_bufio_clients_lock);
2866 __flush_write_list(&write_list);
2869 static void evict_old(void)
2871 unsigned long threshold = dm_bufio_cache_size -
2872 dm_bufio_cache_size / DM_BUFIO_LOW_WATERMARK_RATIO;
2874 mutex_lock(&dm_bufio_clients_lock);
2875 while (dm_bufio_current_allocated > threshold) {
2876 if (!__evict_a_few(64))
2880 mutex_unlock(&dm_bufio_clients_lock);
2883 static void do_global_cleanup(struct work_struct *w)
2890 *--------------------------------------------------------------
2892 *--------------------------------------------------------------
2896 * This is called only once for the whole dm_bufio module.
2897 * It initializes memory limit.
2899 static int __init dm_bufio_init(void)
2903 dm_bufio_allocated_kmem_cache = 0;
2904 dm_bufio_allocated_get_free_pages = 0;
2905 dm_bufio_allocated_vmalloc = 0;
2906 dm_bufio_current_allocated = 0;
2908 mem = (__u64)mult_frac(totalram_pages() - totalhigh_pages(),
2909 DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
2911 if (mem > ULONG_MAX)
2915 if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
2916 mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
2919 dm_bufio_default_cache_size = mem;
2921 mutex_lock(&dm_bufio_clients_lock);
2922 __cache_size_refresh();
2923 mutex_unlock(&dm_bufio_clients_lock);
2925 dm_bufio_wq = alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM, 0);
2929 INIT_DELAYED_WORK(&dm_bufio_cleanup_old_work, work_fn);
2930 INIT_WORK(&dm_bufio_replacement_work, do_global_cleanup);
2931 queue_delayed_work(dm_bufio_wq, &dm_bufio_cleanup_old_work,
2932 DM_BUFIO_WORK_TIMER_SECS * HZ);
2938 * This is called once when unloading the dm_bufio module.
2940 static void __exit dm_bufio_exit(void)
2944 cancel_delayed_work_sync(&dm_bufio_cleanup_old_work);
2945 destroy_workqueue(dm_bufio_wq);
2947 if (dm_bufio_client_count) {
2948 DMCRIT("%s: dm_bufio_client_count leaked: %d",
2949 __func__, dm_bufio_client_count);
2953 if (dm_bufio_current_allocated) {
2954 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
2955 __func__, dm_bufio_current_allocated);
2959 if (dm_bufio_allocated_get_free_pages) {
2960 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
2961 __func__, dm_bufio_allocated_get_free_pages);
2965 if (dm_bufio_allocated_vmalloc) {
2966 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
2967 __func__, dm_bufio_allocated_vmalloc);
2971 WARN_ON(bug); /* leaks are not worth crashing the system */
2974 module_init(dm_bufio_init)
2975 module_exit(dm_bufio_exit)
2977 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, 0644);
2978 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
2980 module_param_named(max_age_seconds, dm_bufio_max_age, uint, 0644);
2981 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
2983 module_param_named(retain_bytes, dm_bufio_retain_bytes, ulong, 0644);
2984 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
2986 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, 0644);
2987 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
2989 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, 0444);
2990 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
2992 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, 0444);
2993 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
2995 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, 0444);
2996 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
2998 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, 0444);
2999 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
3001 MODULE_AUTHOR("Mikulas Patocka <dm-devel@lists.linux.dev>");
3002 MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
3003 MODULE_LICENSE("GPL");