1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2005-2007 Red Hat GmbH
5 * A target that delays reads and/or writes and can send
6 * them to different devices.
8 * This file is released under the GPL.
11 #include <linux/module.h>
12 #include <linux/init.h>
13 #include <linux/blkdev.h>
14 #include <linux/bio.h>
15 #include <linux/slab.h>
16 #include <linux/kthread.h>
18 #include <linux/device-mapper.h>
20 #define DM_MSG_PREFIX "delay"
30 struct timer_list delay_timer;
31 struct mutex timer_lock;
32 struct workqueue_struct *kdelayd_wq;
33 struct work_struct flush_expired_bios;
34 struct list_head delayed_bios;
35 struct task_struct *worker;
38 struct delay_class read;
39 struct delay_class write;
40 struct delay_class flush;
45 struct dm_delay_info {
46 struct delay_c *context;
47 struct delay_class *class;
48 struct list_head list;
49 unsigned long expires;
52 static DEFINE_MUTEX(delayed_bios_lock);
54 static void handle_delayed_timer(struct timer_list *t)
56 struct delay_c *dc = from_timer(dc, t, delay_timer);
58 queue_work(dc->kdelayd_wq, &dc->flush_expired_bios);
61 static void queue_timeout(struct delay_c *dc, unsigned long expires)
63 mutex_lock(&dc->timer_lock);
65 if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires)
66 mod_timer(&dc->delay_timer, expires);
68 mutex_unlock(&dc->timer_lock);
71 static inline bool delay_is_fast(struct delay_c *dc)
76 static void flush_delayed_bios_fast(struct delay_c *dc, bool flush_all)
78 struct dm_delay_info *delayed, *next;
80 mutex_lock(&delayed_bios_lock);
81 list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {
82 if (flush_all || time_after_eq(jiffies, delayed->expires)) {
83 struct bio *bio = dm_bio_from_per_bio_data(delayed,
84 sizeof(struct dm_delay_info));
85 list_del(&delayed->list);
86 dm_submit_bio_remap(bio, NULL);
87 delayed->class->ops--;
90 mutex_unlock(&delayed_bios_lock);
93 static int flush_worker_fn(void *data)
95 struct delay_c *dc = data;
98 flush_delayed_bios_fast(dc, false);
99 if (unlikely(list_empty(&dc->delayed_bios))) {
100 set_current_state(TASK_INTERRUPTIBLE);
109 static void flush_bios(struct bio *bio)
116 dm_submit_bio_remap(bio, NULL);
121 static struct bio *flush_delayed_bios(struct delay_c *dc, bool flush_all)
123 struct dm_delay_info *delayed, *next;
124 unsigned long next_expires = 0;
125 unsigned long start_timer = 0;
126 struct bio_list flush_bios = { };
128 mutex_lock(&delayed_bios_lock);
129 list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) {
130 if (flush_all || time_after_eq(jiffies, delayed->expires)) {
131 struct bio *bio = dm_bio_from_per_bio_data(delayed,
132 sizeof(struct dm_delay_info));
133 list_del(&delayed->list);
134 bio_list_add(&flush_bios, bio);
135 delayed->class->ops--;
141 next_expires = delayed->expires;
143 next_expires = min(next_expires, delayed->expires);
145 mutex_unlock(&delayed_bios_lock);
148 queue_timeout(dc, next_expires);
150 return bio_list_get(&flush_bios);
153 static void flush_expired_bios(struct work_struct *work)
157 dc = container_of(work, struct delay_c, flush_expired_bios);
158 if (delay_is_fast(dc))
159 flush_delayed_bios_fast(dc, false);
161 flush_bios(flush_delayed_bios(dc, false));
164 static void delay_dtr(struct dm_target *ti)
166 struct delay_c *dc = ti->private;
169 destroy_workqueue(dc->kdelayd_wq);
172 dm_put_device(ti, dc->read.dev);
174 dm_put_device(ti, dc->write.dev);
176 dm_put_device(ti, dc->flush.dev);
178 kthread_stop(dc->worker);
180 if (!delay_is_fast(dc))
181 mutex_destroy(&dc->timer_lock);
186 static int delay_class_ctr(struct dm_target *ti, struct delay_class *c, char **argv)
189 unsigned long long tmpll;
192 if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1 || tmpll != (sector_t)tmpll) {
193 ti->error = "Invalid device sector";
198 if (sscanf(argv[2], "%u%c", &c->delay, &dummy) != 1) {
199 ti->error = "Invalid delay";
203 ret = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &c->dev);
205 ti->error = "Device lookup failed";
213 * Mapping parameters:
214 * <device> <offset> <delay> [<write_device> <write_offset> <write_delay>]
216 * With separate write parameters, the first set is only used for reads.
217 * Offsets are specified in sectors.
218 * Delays are specified in milliseconds.
220 static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv)
224 unsigned int max_delay;
226 if (argc != 3 && argc != 6 && argc != 9) {
227 ti->error = "Requires exactly 3, 6 or 9 arguments";
231 dc = kzalloc(sizeof(*dc), GFP_KERNEL);
233 ti->error = "Cannot allocate context";
238 INIT_LIST_HEAD(&dc->delayed_bios);
239 atomic_set(&dc->may_delay, 1);
242 ret = delay_class_ctr(ti, &dc->read, argv);
245 max_delay = dc->read.delay;
248 ret = delay_class_ctr(ti, &dc->write, argv);
251 ret = delay_class_ctr(ti, &dc->flush, argv);
254 max_delay = max(max_delay, dc->write.delay);
255 max_delay = max(max_delay, dc->flush.delay);
259 ret = delay_class_ctr(ti, &dc->write, argv + 3);
263 ret = delay_class_ctr(ti, &dc->flush, argv + 3);
266 max_delay = max(max_delay, dc->flush.delay);
270 ret = delay_class_ctr(ti, &dc->flush, argv + 6);
273 max_delay = max(max_delay, dc->flush.delay);
276 if (max_delay < 50) {
278 * In case of small requested delays, use kthread instead of
279 * timers and workqueue to achieve better latency.
281 dc->worker = kthread_create(&flush_worker_fn, dc,
282 "dm-delay-flush-worker");
283 if (IS_ERR(dc->worker)) {
284 ret = PTR_ERR(dc->worker);
288 timer_setup(&dc->delay_timer, handle_delayed_timer, 0);
289 INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
290 mutex_init(&dc->timer_lock);
291 dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
292 if (!dc->kdelayd_wq) {
294 DMERR("Couldn't start kdelayd");
299 ti->num_flush_bios = 1;
300 ti->num_discard_bios = 1;
301 ti->accounts_remapped_io = true;
302 ti->per_io_data_size = sizeof(struct dm_delay_info);
310 static int delay_bio(struct delay_c *dc, struct delay_class *c, struct bio *bio)
312 struct dm_delay_info *delayed;
313 unsigned long expires = 0;
315 if (!c->delay || !atomic_read(&dc->may_delay))
316 return DM_MAPIO_REMAPPED;
318 delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
320 delayed->context = dc;
321 delayed->expires = expires = jiffies + msecs_to_jiffies(c->delay);
323 mutex_lock(&delayed_bios_lock);
325 list_add_tail(&delayed->list, &dc->delayed_bios);
326 mutex_unlock(&delayed_bios_lock);
328 if (delay_is_fast(dc))
329 wake_up_process(dc->worker);
331 queue_timeout(dc, expires);
333 return DM_MAPIO_SUBMITTED;
336 static void delay_presuspend(struct dm_target *ti)
338 struct delay_c *dc = ti->private;
340 atomic_set(&dc->may_delay, 0);
342 if (delay_is_fast(dc))
343 flush_delayed_bios_fast(dc, true);
345 del_timer_sync(&dc->delay_timer);
346 flush_bios(flush_delayed_bios(dc, true));
350 static void delay_resume(struct dm_target *ti)
352 struct delay_c *dc = ti->private;
354 atomic_set(&dc->may_delay, 1);
357 static int delay_map(struct dm_target *ti, struct bio *bio)
359 struct delay_c *dc = ti->private;
360 struct delay_class *c;
361 struct dm_delay_info *delayed = dm_per_bio_data(bio, sizeof(struct dm_delay_info));
363 if (bio_data_dir(bio) == WRITE) {
364 if (unlikely(bio->bi_opf & REQ_PREFLUSH))
372 bio_set_dev(bio, c->dev->bdev);
373 bio->bi_iter.bi_sector = c->start + dm_target_offset(ti, bio->bi_iter.bi_sector);
375 return delay_bio(dc, c, bio);
378 #define DMEMIT_DELAY_CLASS(c) \
379 DMEMIT("%s %llu %u", (c)->dev->name, (unsigned long long)(c)->start, (c)->delay)
381 static void delay_status(struct dm_target *ti, status_type_t type,
382 unsigned int status_flags, char *result, unsigned int maxlen)
384 struct delay_c *dc = ti->private;
388 case STATUSTYPE_INFO:
389 DMEMIT("%u %u %u", dc->read.ops, dc->write.ops, dc->flush.ops);
392 case STATUSTYPE_TABLE:
393 DMEMIT_DELAY_CLASS(&dc->read);
396 DMEMIT_DELAY_CLASS(&dc->write);
400 DMEMIT_DELAY_CLASS(&dc->flush);
410 static int delay_iterate_devices(struct dm_target *ti,
411 iterate_devices_callout_fn fn, void *data)
413 struct delay_c *dc = ti->private;
416 ret = fn(ti, dc->read.dev, dc->read.start, ti->len, data);
419 ret = fn(ti, dc->write.dev, dc->write.start, ti->len, data);
422 ret = fn(ti, dc->flush.dev, dc->flush.start, ti->len, data);
430 static struct target_type delay_target = {
432 .version = {1, 4, 0},
433 .features = DM_TARGET_PASSES_INTEGRITY,
434 .module = THIS_MODULE,
438 .presuspend = delay_presuspend,
439 .resume = delay_resume,
440 .status = delay_status,
441 .iterate_devices = delay_iterate_devices,
445 MODULE_DESCRIPTION(DM_NAME " delay target");
446 MODULE_AUTHOR("Heinz Mauelshagen <mauelshagen@redhat.com>");
447 MODULE_LICENSE("GPL");