1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHE_WRITEBACK_H
3 #define _BCACHE_WRITEBACK_H
5 #define CUTOFF_WRITEBACK 40
6 #define CUTOFF_WRITEBACK_SYNC 70
8 #define CUTOFF_WRITEBACK_MAX 70
9 #define CUTOFF_WRITEBACK_SYNC_MAX 90
11 #define MAX_WRITEBACKS_IN_PASS 5
12 #define MAX_WRITESIZE_IN_PASS 5000 /* *512b */
14 #define WRITEBACK_RATE_UPDATE_SECS_MAX 60
15 #define WRITEBACK_RATE_UPDATE_SECS_DEFAULT 5
17 #define BCH_AUTO_GC_DIRTY_THRESHOLD 50
19 #define BCH_WRITEBACK_FRAGMENT_THRESHOLD_LOW 50
20 #define BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID 57
21 #define BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH 64
23 #define BCH_DIRTY_INIT_THRD_MAX 12
25 * 14 (16384ths) is chosen here as something that each backing device
26 * should be a reasonable fraction of the share, and not to blow up
27 * until individual backing devices are a petabyte.
29 #define WRITEBACK_SHARE_SHIFT 14
31 struct bch_dirty_init_state;
32 struct dirty_init_thrd_info {
33 struct bch_dirty_init_state *state;
34 struct task_struct *thread;
37 struct bch_dirty_init_state {
39 struct bcache_device *d;
45 wait_queue_head_t wait;
46 struct dirty_init_thrd_info infos[BCH_DIRTY_INIT_THRD_MAX];
49 static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
53 for (i = 0; i < d->nr_stripes; i++)
54 ret += atomic_read(d->stripe_sectors_dirty + i);
59 static inline int offset_to_stripe(struct bcache_device *d,
62 do_div(offset, d->stripe_size);
64 /* d->nr_stripes is in range [1, INT_MAX] */
65 if (unlikely(offset >= d->nr_stripes)) {
66 pr_err("Invalid stripe %llu (>= nr_stripes %d).\n",
67 offset, d->nr_stripes);
72 * Here offset is definitly smaller than INT_MAX,
73 * return it as int will never overflow.
78 static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
80 unsigned int nr_sectors)
82 int stripe = offset_to_stripe(&dc->disk, offset);
88 if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
91 if (nr_sectors <= dc->disk.stripe_size)
94 nr_sectors -= dc->disk.stripe_size;
99 extern unsigned int bch_cutoff_writeback;
100 extern unsigned int bch_cutoff_writeback_sync;
102 static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
103 unsigned int cache_mode, bool would_skip)
105 unsigned int in_use = dc->disk.c->gc_stats.in_use;
107 if (cache_mode != CACHE_MODE_WRITEBACK ||
108 test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
109 in_use > bch_cutoff_writeback_sync)
112 if (bio_op(bio) == REQ_OP_DISCARD)
115 if (dc->partial_stripes_expensive &&
116 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
123 return (op_is_sync(bio->bi_opf) ||
124 bio->bi_opf & (REQ_META|REQ_PRIO) ||
125 in_use <= bch_cutoff_writeback);
128 static inline void bch_writeback_queue(struct cached_dev *dc)
130 if (!IS_ERR_OR_NULL(dc->writeback_thread))
131 wake_up_process(dc->writeback_thread);
134 static inline void bch_writeback_add(struct cached_dev *dc)
136 if (!atomic_read(&dc->has_dirty) &&
137 !atomic_xchg(&dc->has_dirty, 1)) {
138 if (BDEV_STATE(&dc->sb) != BDEV_STATE_DIRTY) {
139 SET_BDEV_STATE(&dc->sb, BDEV_STATE_DIRTY);
140 /* XXX: should do this synchronously */
141 bch_write_bdev_super(dc, NULL);
144 bch_writeback_queue(dc);
148 void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
149 uint64_t offset, int nr_sectors);
151 void bch_sectors_dirty_init(struct bcache_device *d);
152 void bch_cached_dev_writeback_init(struct cached_dev *dc);
153 int bch_cached_dev_writeback_start(struct cached_dev *dc);