2 * Copyright (C) 2003 Sistina Software Limited.
3 * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
8 #include <linux/device-mapper.h>
11 #include "dm-bio-record.h"
12 #include "dm-path-selector.h"
13 #include "dm-uevent.h"
15 #include <linux/blkdev.h>
16 #include <linux/ctype.h>
17 #include <linux/init.h>
18 #include <linux/mempool.h>
19 #include <linux/module.h>
20 #include <linux/pagemap.h>
21 #include <linux/slab.h>
22 #include <linux/time.h>
23 #include <linux/timer.h>
24 #include <linux/workqueue.h>
25 #include <linux/delay.h>
26 #include <scsi/scsi_dh.h>
27 #include <linux/atomic.h>
28 #include <linux/blk-mq.h>
30 #define DM_MSG_PREFIX "multipath"
31 #define DM_PG_INIT_DELAY_MSECS 2000
32 #define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
33 #define QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT 0
35 static unsigned long queue_if_no_path_timeout_secs = QUEUE_IF_NO_PATH_TIMEOUT_DEFAULT;
39 struct list_head list;
41 struct priority_group *pg; /* Owning PG */
42 unsigned fail_count; /* Cumulative failure count */
45 struct delayed_work activate_path;
47 bool is_active:1; /* Path status */
50 #define path_to_pgpath(__pgp) container_of((__pgp), struct pgpath, path)
53 * Paths are grouped into Priority Groups and numbered from 1 upwards.
54 * Each has a path selector which controls which path gets used.
56 struct priority_group {
57 struct list_head list;
59 struct multipath *m; /* Owning multipath instance */
60 struct path_selector ps;
62 unsigned pg_num; /* Reference number */
63 unsigned nr_pgpaths; /* Number of paths in PG */
64 struct list_head pgpaths;
66 bool bypassed:1; /* Temporarily bypass this PG? */
69 /* Multipath context */
71 unsigned long flags; /* Multipath state flags */
74 enum dm_queue_mode queue_mode;
76 struct pgpath *current_pgpath;
77 struct priority_group *current_pg;
78 struct priority_group *next_pg; /* Switch to this PG if set */
80 atomic_t nr_valid_paths; /* Total number of usable paths */
81 unsigned nr_priority_groups;
82 struct list_head priority_groups;
84 const char *hw_handler_name;
85 char *hw_handler_params;
86 wait_queue_head_t pg_init_wait; /* Wait for pg_init completion */
87 unsigned pg_init_retries; /* Number of times to retry pg_init */
88 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
89 atomic_t pg_init_in_progress; /* Only one pg_init allowed at once */
90 atomic_t pg_init_count; /* Number of times pg_init called */
92 struct mutex work_mutex;
93 struct work_struct trigger_event;
96 struct work_struct process_queued_bios;
97 struct bio_list queued_bios;
99 struct timer_list nopath_timer; /* Timeout for queue_if_no_path */
103 * Context information attached to each io we process.
106 struct pgpath *pgpath;
110 typedef int (*action_fn) (struct pgpath *pgpath);
112 static struct workqueue_struct *kmultipathd, *kmpath_handlerd;
113 static void trigger_event(struct work_struct *work);
114 static void activate_or_offline_path(struct pgpath *pgpath);
115 static void activate_path_work(struct work_struct *work);
116 static void process_queued_bios(struct work_struct *work);
117 static void queue_if_no_path_timeout_work(struct timer_list *t);
119 /*-----------------------------------------------
120 * Multipath state flags.
121 *-----------------------------------------------*/
123 #define MPATHF_QUEUE_IO 0 /* Must we queue all I/O? */
124 #define MPATHF_QUEUE_IF_NO_PATH 1 /* Queue I/O if last path fails? */
125 #define MPATHF_SAVED_QUEUE_IF_NO_PATH 2 /* Saved state during suspension */
126 #define MPATHF_RETAIN_ATTACHED_HW_HANDLER 3 /* If there's already a hw_handler present, don't change it. */
127 #define MPATHF_PG_INIT_DISABLED 4 /* pg_init is not currently allowed */
128 #define MPATHF_PG_INIT_REQUIRED 5 /* pg_init needs calling? */
129 #define MPATHF_PG_INIT_DELAY_RETRY 6 /* Delay pg_init retry? */
131 /*-----------------------------------------------
132 * Allocation routines
133 *-----------------------------------------------*/
135 static struct pgpath *alloc_pgpath(void)
137 struct pgpath *pgpath = kzalloc(sizeof(*pgpath), GFP_KERNEL);
142 pgpath->is_active = true;
147 static void free_pgpath(struct pgpath *pgpath)
152 static struct priority_group *alloc_priority_group(void)
154 struct priority_group *pg;
156 pg = kzalloc(sizeof(*pg), GFP_KERNEL);
159 INIT_LIST_HEAD(&pg->pgpaths);
164 static void free_pgpaths(struct list_head *pgpaths, struct dm_target *ti)
166 struct pgpath *pgpath, *tmp;
168 list_for_each_entry_safe(pgpath, tmp, pgpaths, list) {
169 list_del(&pgpath->list);
170 dm_put_device(ti, pgpath->path.dev);
175 static void free_priority_group(struct priority_group *pg,
176 struct dm_target *ti)
178 struct path_selector *ps = &pg->ps;
181 ps->type->destroy(ps);
182 dm_put_path_selector(ps->type);
185 free_pgpaths(&pg->pgpaths, ti);
189 static struct multipath *alloc_multipath(struct dm_target *ti)
193 m = kzalloc(sizeof(*m), GFP_KERNEL);
195 INIT_LIST_HEAD(&m->priority_groups);
196 spin_lock_init(&m->lock);
197 atomic_set(&m->nr_valid_paths, 0);
198 INIT_WORK(&m->trigger_event, trigger_event);
199 mutex_init(&m->work_mutex);
201 m->queue_mode = DM_TYPE_NONE;
206 timer_setup(&m->nopath_timer, queue_if_no_path_timeout_work, 0);
212 static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m)
214 if (m->queue_mode == DM_TYPE_NONE) {
215 m->queue_mode = DM_TYPE_REQUEST_BASED;
216 } else if (m->queue_mode == DM_TYPE_BIO_BASED) {
217 INIT_WORK(&m->process_queued_bios, process_queued_bios);
219 * bio-based doesn't support any direct scsi_dh management;
220 * it just discovers if a scsi_dh is attached.
222 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
225 dm_table_set_type(ti->table, m->queue_mode);
228 * Init fields that are only used when a scsi_dh is attached
229 * - must do this unconditionally (really doesn't hurt non-SCSI uses)
231 set_bit(MPATHF_QUEUE_IO, &m->flags);
232 atomic_set(&m->pg_init_in_progress, 0);
233 atomic_set(&m->pg_init_count, 0);
234 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT;
235 init_waitqueue_head(&m->pg_init_wait);
240 static void free_multipath(struct multipath *m)
242 struct priority_group *pg, *tmp;
244 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) {
246 free_priority_group(pg, m->ti);
249 kfree(m->hw_handler_name);
250 kfree(m->hw_handler_params);
251 mutex_destroy(&m->work_mutex);
255 static struct dm_mpath_io *get_mpio(union map_info *info)
260 static size_t multipath_per_bio_data_size(void)
262 return sizeof(struct dm_mpath_io) + sizeof(struct dm_bio_details);
265 static struct dm_mpath_io *get_mpio_from_bio(struct bio *bio)
267 return dm_per_bio_data(bio, multipath_per_bio_data_size());
270 static struct dm_bio_details *get_bio_details_from_mpio(struct dm_mpath_io *mpio)
272 /* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */
273 void *bio_details = mpio + 1;
277 static void multipath_init_per_bio_data(struct bio *bio, struct dm_mpath_io **mpio_p)
279 struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
280 struct dm_bio_details *bio_details = get_bio_details_from_mpio(mpio);
282 mpio->nr_bytes = bio->bi_iter.bi_size;
286 dm_bio_record(bio_details, bio);
289 /*-----------------------------------------------
291 *-----------------------------------------------*/
293 static int __pg_init_all_paths(struct multipath *m)
295 struct pgpath *pgpath;
296 unsigned long pg_init_delay = 0;
298 lockdep_assert_held(&m->lock);
300 if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
303 atomic_inc(&m->pg_init_count);
304 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
306 /* Check here to reset pg_init_required */
310 if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags))
311 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ?
312 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS);
313 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) {
314 /* Skip failed paths */
315 if (!pgpath->is_active)
317 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path,
319 atomic_inc(&m->pg_init_in_progress);
321 return atomic_read(&m->pg_init_in_progress);
324 static int pg_init_all_paths(struct multipath *m)
329 spin_lock_irqsave(&m->lock, flags);
330 ret = __pg_init_all_paths(m);
331 spin_unlock_irqrestore(&m->lock, flags);
336 static void __switch_pg(struct multipath *m, struct priority_group *pg)
338 lockdep_assert_held(&m->lock);
342 /* Must we initialise the PG first, and queue I/O till it's ready? */
343 if (m->hw_handler_name) {
344 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
345 set_bit(MPATHF_QUEUE_IO, &m->flags);
347 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
348 clear_bit(MPATHF_QUEUE_IO, &m->flags);
351 atomic_set(&m->pg_init_count, 0);
354 static struct pgpath *choose_path_in_pg(struct multipath *m,
355 struct priority_group *pg,
359 struct dm_path *path;
360 struct pgpath *pgpath;
362 path = pg->ps.type->select_path(&pg->ps, nr_bytes);
364 return ERR_PTR(-ENXIO);
366 pgpath = path_to_pgpath(path);
368 if (unlikely(READ_ONCE(m->current_pg) != pg)) {
369 /* Only update current_pgpath if pg changed */
370 spin_lock_irqsave(&m->lock, flags);
371 m->current_pgpath = pgpath;
373 spin_unlock_irqrestore(&m->lock, flags);
379 static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes)
382 struct priority_group *pg;
383 struct pgpath *pgpath;
384 unsigned bypassed = 1;
386 if (!atomic_read(&m->nr_valid_paths)) {
387 spin_lock_irqsave(&m->lock, flags);
388 clear_bit(MPATHF_QUEUE_IO, &m->flags);
389 spin_unlock_irqrestore(&m->lock, flags);
393 /* Were we instructed to switch PG? */
394 if (READ_ONCE(m->next_pg)) {
395 spin_lock_irqsave(&m->lock, flags);
398 spin_unlock_irqrestore(&m->lock, flags);
399 goto check_current_pg;
402 spin_unlock_irqrestore(&m->lock, flags);
403 pgpath = choose_path_in_pg(m, pg, nr_bytes);
404 if (!IS_ERR_OR_NULL(pgpath))
408 /* Don't change PG until it has no remaining paths */
410 pg = READ_ONCE(m->current_pg);
412 pgpath = choose_path_in_pg(m, pg, nr_bytes);
413 if (!IS_ERR_OR_NULL(pgpath))
418 * Loop through priority groups until we find a valid path.
419 * First time we skip PGs marked 'bypassed'.
420 * Second time we only try the ones we skipped, but set
421 * pg_init_delay_retry so we do not hammer controllers.
424 list_for_each_entry(pg, &m->priority_groups, list) {
425 if (pg->bypassed == !!bypassed)
427 pgpath = choose_path_in_pg(m, pg, nr_bytes);
428 if (!IS_ERR_OR_NULL(pgpath)) {
430 spin_lock_irqsave(&m->lock, flags);
431 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
432 spin_unlock_irqrestore(&m->lock, flags);
437 } while (bypassed--);
440 spin_lock_irqsave(&m->lock, flags);
441 m->current_pgpath = NULL;
442 m->current_pg = NULL;
443 spin_unlock_irqrestore(&m->lock, flags);
449 * dm_report_EIO() is a macro instead of a function to make pr_debug_ratelimited()
450 * report the function name and line number of the function from which
451 * it has been invoked.
453 #define dm_report_EIO(m) \
455 struct mapped_device *md = dm_table_get_md((m)->ti->table); \
457 DMDEBUG_LIMIT("%s: returning EIO; QIFNP = %d; SQIFNP = %d; DNFS = %d", \
458 dm_device_name(md), \
459 test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
460 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
461 dm_noflush_suspending((m)->ti)); \
465 * Check whether bios must be queued in the device-mapper core rather
466 * than here in the target.
468 static bool __must_push_back(struct multipath *m)
470 return dm_noflush_suspending(m->ti);
473 static bool must_push_back_rq(struct multipath *m)
475 return test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) || __must_push_back(m);
479 * Map cloned requests (request-based multipath)
481 static int multipath_clone_and_map(struct dm_target *ti, struct request *rq,
482 union map_info *map_context,
483 struct request **__clone)
485 struct multipath *m = ti->private;
486 size_t nr_bytes = blk_rq_bytes(rq);
487 struct pgpath *pgpath;
488 struct block_device *bdev;
489 struct dm_mpath_io *mpio = get_mpio(map_context);
490 struct request_queue *q;
491 struct request *clone;
493 /* Do we need to select a new pgpath? */
494 pgpath = READ_ONCE(m->current_pgpath);
495 if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
496 pgpath = choose_pgpath(m, nr_bytes);
499 if (must_push_back_rq(m))
500 return DM_MAPIO_DELAY_REQUEUE;
501 dm_report_EIO(m); /* Failed */
502 return DM_MAPIO_KILL;
503 } else if (test_bit(MPATHF_QUEUE_IO, &m->flags) ||
504 test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
505 pg_init_all_paths(m);
506 return DM_MAPIO_DELAY_REQUEUE;
509 mpio->pgpath = pgpath;
510 mpio->nr_bytes = nr_bytes;
512 bdev = pgpath->path.dev->bdev;
513 q = bdev_get_queue(bdev);
514 clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE,
517 /* EBUSY, ENODEV or EWOULDBLOCK: requeue */
518 if (blk_queue_dying(q)) {
519 atomic_inc(&m->pg_init_in_progress);
520 activate_or_offline_path(pgpath);
521 return DM_MAPIO_DELAY_REQUEUE;
525 * blk-mq's SCHED_RESTART can cover this requeue, so we
526 * needn't deal with it by DELAY_REQUEUE. More importantly,
527 * we have to return DM_MAPIO_REQUEUE so that blk-mq can
528 * get the queue busy feedback (via BLK_STS_RESOURCE),
529 * otherwise I/O merging can suffer.
531 return DM_MAPIO_REQUEUE;
533 clone->bio = clone->biotail = NULL;
534 clone->rq_disk = bdev->bd_disk;
535 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT;
538 if (pgpath->pg->ps.type->start_io)
539 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
542 return DM_MAPIO_REMAPPED;
545 static void multipath_release_clone(struct request *clone,
546 union map_info *map_context)
548 if (unlikely(map_context)) {
550 * non-NULL map_context means caller is still map
551 * method; must undo multipath_clone_and_map()
553 struct dm_mpath_io *mpio = get_mpio(map_context);
554 struct pgpath *pgpath = mpio->pgpath;
556 if (pgpath && pgpath->pg->ps.type->end_io)
557 pgpath->pg->ps.type->end_io(&pgpath->pg->ps,
560 clone->io_start_time_ns);
563 blk_put_request(clone);
567 * Map cloned bios (bio-based multipath)
570 static struct pgpath *__map_bio(struct multipath *m, struct bio *bio)
572 struct pgpath *pgpath;
576 /* Do we need to select a new pgpath? */
577 pgpath = READ_ONCE(m->current_pgpath);
578 if (!pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
579 pgpath = choose_pgpath(m, bio->bi_iter.bi_size);
581 /* MPATHF_QUEUE_IO might have been cleared by choose_pgpath. */
582 queue_io = test_bit(MPATHF_QUEUE_IO, &m->flags);
584 if ((pgpath && queue_io) ||
585 (!pgpath && test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))) {
586 /* Queue for the daemon to resubmit */
587 spin_lock_irqsave(&m->lock, flags);
588 bio_list_add(&m->queued_bios, bio);
589 spin_unlock_irqrestore(&m->lock, flags);
591 /* PG_INIT_REQUIRED cannot be set without QUEUE_IO */
592 if (queue_io || test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
593 pg_init_all_paths(m);
595 queue_work(kmultipathd, &m->process_queued_bios);
597 return ERR_PTR(-EAGAIN);
603 static int __multipath_map_bio(struct multipath *m, struct bio *bio,
604 struct dm_mpath_io *mpio)
606 struct pgpath *pgpath = __map_bio(m, bio);
609 return DM_MAPIO_SUBMITTED;
612 if (__must_push_back(m))
613 return DM_MAPIO_REQUEUE;
615 return DM_MAPIO_KILL;
618 mpio->pgpath = pgpath;
621 bio_set_dev(bio, pgpath->path.dev->bdev);
622 bio->bi_opf |= REQ_FAILFAST_TRANSPORT;
624 if (pgpath->pg->ps.type->start_io)
625 pgpath->pg->ps.type->start_io(&pgpath->pg->ps,
628 return DM_MAPIO_REMAPPED;
631 static int multipath_map_bio(struct dm_target *ti, struct bio *bio)
633 struct multipath *m = ti->private;
634 struct dm_mpath_io *mpio = NULL;
636 multipath_init_per_bio_data(bio, &mpio);
637 return __multipath_map_bio(m, bio, mpio);
640 static void process_queued_io_list(struct multipath *m)
642 if (m->queue_mode == DM_TYPE_REQUEST_BASED)
643 dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table));
644 else if (m->queue_mode == DM_TYPE_BIO_BASED)
645 queue_work(kmultipathd, &m->process_queued_bios);
648 static void process_queued_bios(struct work_struct *work)
653 struct bio_list bios;
654 struct blk_plug plug;
655 struct multipath *m =
656 container_of(work, struct multipath, process_queued_bios);
658 bio_list_init(&bios);
660 spin_lock_irqsave(&m->lock, flags);
662 if (bio_list_empty(&m->queued_bios)) {
663 spin_unlock_irqrestore(&m->lock, flags);
667 bio_list_merge(&bios, &m->queued_bios);
668 bio_list_init(&m->queued_bios);
670 spin_unlock_irqrestore(&m->lock, flags);
672 blk_start_plug(&plug);
673 while ((bio = bio_list_pop(&bios))) {
674 struct dm_mpath_io *mpio = get_mpio_from_bio(bio);
675 dm_bio_restore(get_bio_details_from_mpio(mpio), bio);
676 r = __multipath_map_bio(m, bio, mpio);
679 bio->bi_status = BLK_STS_IOERR;
682 case DM_MAPIO_REQUEUE:
683 bio->bi_status = BLK_STS_DM_REQUEUE;
686 case DM_MAPIO_REMAPPED:
687 generic_make_request(bio);
689 case DM_MAPIO_SUBMITTED:
692 WARN_ONCE(true, "__multipath_map_bio() returned %d\n", r);
695 blk_finish_plug(&plug);
699 * If we run out of usable paths, should we queue I/O or error it?
701 static int queue_if_no_path(struct multipath *m, bool queue_if_no_path,
702 bool save_old_value, const char *caller)
705 bool queue_if_no_path_bit, saved_queue_if_no_path_bit;
706 const char *dm_dev_name = dm_device_name(dm_table_get_md(m->ti->table));
708 DMDEBUG("%s: %s caller=%s queue_if_no_path=%d save_old_value=%d",
709 dm_dev_name, __func__, caller, queue_if_no_path, save_old_value);
711 spin_lock_irqsave(&m->lock, flags);
713 queue_if_no_path_bit = test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
714 saved_queue_if_no_path_bit = test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
716 if (save_old_value) {
717 if (unlikely(!queue_if_no_path_bit && saved_queue_if_no_path_bit)) {
718 DMERR("%s: QIFNP disabled but saved as enabled, saving again loses state, not saving!",
721 assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path_bit);
722 } else if (!queue_if_no_path && saved_queue_if_no_path_bit) {
723 /* due to "fail_if_no_path" message, need to honor it. */
724 clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
726 assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path);
728 DMDEBUG("%s: after %s changes; QIFNP = %d; SQIFNP = %d; DNFS = %d",
729 dm_dev_name, __func__,
730 test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
731 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags),
732 dm_noflush_suspending(m->ti));
734 spin_unlock_irqrestore(&m->lock, flags);
736 if (!queue_if_no_path) {
737 dm_table_run_md_queue_async(m->ti->table);
738 process_queued_io_list(m);
745 * If the queue_if_no_path timeout fires, turn off queue_if_no_path and
746 * process any queued I/O.
748 static void queue_if_no_path_timeout_work(struct timer_list *t)
750 struct multipath *m = from_timer(m, t, nopath_timer);
751 struct mapped_device *md = dm_table_get_md(m->ti->table);
753 DMWARN("queue_if_no_path timeout on %s, failing queued IO", dm_device_name(md));
754 queue_if_no_path(m, false, false, __func__);
758 * Enable the queue_if_no_path timeout if necessary.
759 * Called with m->lock held.
761 static void enable_nopath_timeout(struct multipath *m)
763 unsigned long queue_if_no_path_timeout =
764 READ_ONCE(queue_if_no_path_timeout_secs) * HZ;
766 lockdep_assert_held(&m->lock);
768 if (queue_if_no_path_timeout > 0 &&
769 atomic_read(&m->nr_valid_paths) == 0 &&
770 test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
771 mod_timer(&m->nopath_timer,
772 jiffies + queue_if_no_path_timeout);
776 static void disable_nopath_timeout(struct multipath *m)
778 del_timer_sync(&m->nopath_timer);
782 * An event is triggered whenever a path is taken out of use.
783 * Includes path failure and PG bypass.
785 static void trigger_event(struct work_struct *work)
787 struct multipath *m =
788 container_of(work, struct multipath, trigger_event);
790 dm_table_event(m->ti->table);
793 /*-----------------------------------------------------------------
794 * Constructor/argument parsing:
795 * <#multipath feature args> [<arg>]*
796 * <#hw_handler args> [hw_handler [<arg>]*]
798 * <initial priority group>
799 * [<selector> <#selector args> [<arg>]*
800 * <#paths> <#per-path selector args>
801 * [<path> [<arg>]* ]+ ]+
802 *---------------------------------------------------------------*/
803 static int parse_path_selector(struct dm_arg_set *as, struct priority_group *pg,
804 struct dm_target *ti)
807 struct path_selector_type *pst;
810 static const struct dm_arg _args[] = {
811 {0, 1024, "invalid number of path selector args"},
814 pst = dm_get_path_selector(dm_shift_arg(as));
816 ti->error = "unknown path selector type";
820 r = dm_read_arg_group(_args, as, &ps_argc, &ti->error);
822 dm_put_path_selector(pst);
826 r = pst->create(&pg->ps, ps_argc, as->argv);
828 dm_put_path_selector(pst);
829 ti->error = "path selector constructor failed";
834 dm_consume_args(as, ps_argc);
839 static int setup_scsi_dh(struct block_device *bdev, struct multipath *m,
840 const char **attached_handler_name, char **error)
842 struct request_queue *q = bdev_get_queue(bdev);
845 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) {
847 if (*attached_handler_name) {
849 * Clear any hw_handler_params associated with a
850 * handler that isn't already attached.
852 if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) {
853 kfree(m->hw_handler_params);
854 m->hw_handler_params = NULL;
858 * Reset hw_handler_name to match the attached handler
860 * NB. This modifies the table line to show the actual
861 * handler instead of the original table passed in.
863 kfree(m->hw_handler_name);
864 m->hw_handler_name = *attached_handler_name;
865 *attached_handler_name = NULL;
869 if (m->hw_handler_name) {
870 r = scsi_dh_attach(q, m->hw_handler_name);
872 char b[BDEVNAME_SIZE];
874 printk(KERN_INFO "dm-mpath: retaining handler on device %s\n",
879 *error = "error attaching hardware handler";
883 if (m->hw_handler_params) {
884 r = scsi_dh_set_params(q, m->hw_handler_params);
886 *error = "unable to set hardware handler parameters";
895 static struct pgpath *parse_path(struct dm_arg_set *as, struct path_selector *ps,
896 struct dm_target *ti)
900 struct multipath *m = ti->private;
901 struct request_queue *q;
902 const char *attached_handler_name = NULL;
904 /* we need at least a path arg */
906 ti->error = "no device given";
907 return ERR_PTR(-EINVAL);
912 return ERR_PTR(-ENOMEM);
914 r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table),
917 ti->error = "error getting device";
921 q = bdev_get_queue(p->path.dev->bdev);
922 attached_handler_name = scsi_dh_attached_handler_name(q, GFP_KERNEL);
923 if (attached_handler_name || m->hw_handler_name) {
924 INIT_DELAYED_WORK(&p->activate_path, activate_path_work);
925 r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error);
926 kfree(attached_handler_name);
928 dm_put_device(ti, p->path.dev);
933 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
935 dm_put_device(ti, p->path.dev);
945 static struct priority_group *parse_priority_group(struct dm_arg_set *as,
948 static const struct dm_arg _args[] = {
949 {1, 1024, "invalid number of paths"},
950 {0, 1024, "invalid number of selector args"}
954 unsigned i, nr_selector_args, nr_args;
955 struct priority_group *pg;
956 struct dm_target *ti = m->ti;
960 ti->error = "not enough priority group arguments";
961 return ERR_PTR(-EINVAL);
964 pg = alloc_priority_group();
966 ti->error = "couldn't allocate priority group";
967 return ERR_PTR(-ENOMEM);
971 r = parse_path_selector(as, pg, ti);
978 r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error);
982 r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error);
986 nr_args = 1 + nr_selector_args;
987 for (i = 0; i < pg->nr_pgpaths; i++) {
988 struct pgpath *pgpath;
989 struct dm_arg_set path_args;
991 if (as->argc < nr_args) {
992 ti->error = "not enough path parameters";
997 path_args.argc = nr_args;
998 path_args.argv = as->argv;
1000 pgpath = parse_path(&path_args, &pg->ps, ti);
1001 if (IS_ERR(pgpath)) {
1002 r = PTR_ERR(pgpath);
1007 list_add_tail(&pgpath->list, &pg->pgpaths);
1008 dm_consume_args(as, nr_args);
1014 free_priority_group(pg, ti);
1018 static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m)
1022 struct dm_target *ti = m->ti;
1024 static const struct dm_arg _args[] = {
1025 {0, 1024, "invalid number of hardware handler args"},
1028 if (dm_read_arg_group(_args, as, &hw_argc, &ti->error))
1034 if (m->queue_mode == DM_TYPE_BIO_BASED) {
1035 dm_consume_args(as, hw_argc);
1036 DMERR("bio-based multipath doesn't allow hardware handler args");
1040 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL);
1041 if (!m->hw_handler_name)
1048 for (i = 0; i <= hw_argc - 2; i++)
1049 len += strlen(as->argv[i]) + 1;
1050 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
1052 ti->error = "memory allocation failed";
1056 j = sprintf(p, "%d", hw_argc - 1);
1057 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
1058 j = sprintf(p, "%s", as->argv[i]);
1060 dm_consume_args(as, hw_argc - 1);
1064 kfree(m->hw_handler_name);
1065 m->hw_handler_name = NULL;
1069 static int parse_features(struct dm_arg_set *as, struct multipath *m)
1073 struct dm_target *ti = m->ti;
1074 const char *arg_name;
1076 static const struct dm_arg _args[] = {
1077 {0, 8, "invalid number of feature args"},
1078 {1, 50, "pg_init_retries must be between 1 and 50"},
1079 {0, 60000, "pg_init_delay_msecs must be between 0 and 60000"},
1082 r = dm_read_arg_group(_args, as, &argc, &ti->error);
1090 arg_name = dm_shift_arg(as);
1093 if (!strcasecmp(arg_name, "queue_if_no_path")) {
1094 r = queue_if_no_path(m, true, false, __func__);
1098 if (!strcasecmp(arg_name, "retain_attached_hw_handler")) {
1099 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags);
1103 if (!strcasecmp(arg_name, "pg_init_retries") &&
1105 r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error);
1110 if (!strcasecmp(arg_name, "pg_init_delay_msecs") &&
1112 r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error);
1117 if (!strcasecmp(arg_name, "queue_mode") &&
1119 const char *queue_mode_name = dm_shift_arg(as);
1121 if (!strcasecmp(queue_mode_name, "bio"))
1122 m->queue_mode = DM_TYPE_BIO_BASED;
1123 else if (!strcasecmp(queue_mode_name, "rq") ||
1124 !strcasecmp(queue_mode_name, "mq"))
1125 m->queue_mode = DM_TYPE_REQUEST_BASED;
1127 ti->error = "Unknown 'queue_mode' requested";
1134 ti->error = "Unrecognised multipath feature request";
1136 } while (argc && !r);
1141 static int multipath_ctr(struct dm_target *ti, unsigned argc, char **argv)
1143 /* target arguments */
1144 static const struct dm_arg _args[] = {
1145 {0, 1024, "invalid number of priority groups"},
1146 {0, 1024, "invalid initial priority group number"},
1150 struct multipath *m;
1151 struct dm_arg_set as;
1152 unsigned pg_count = 0;
1153 unsigned next_pg_num;
1154 unsigned long flags;
1159 m = alloc_multipath(ti);
1161 ti->error = "can't allocate multipath";
1165 r = parse_features(&as, m);
1169 r = alloc_multipath_stage2(ti, m);
1173 r = parse_hw_handler(&as, m);
1177 r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error);
1181 r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error);
1185 if ((!m->nr_priority_groups && next_pg_num) ||
1186 (m->nr_priority_groups && !next_pg_num)) {
1187 ti->error = "invalid initial priority group";
1192 /* parse the priority groups */
1194 struct priority_group *pg;
1195 unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths);
1197 pg = parse_priority_group(&as, m);
1203 nr_valid_paths += pg->nr_pgpaths;
1204 atomic_set(&m->nr_valid_paths, nr_valid_paths);
1206 list_add_tail(&pg->list, &m->priority_groups);
1208 pg->pg_num = pg_count;
1213 if (pg_count != m->nr_priority_groups) {
1214 ti->error = "priority group count mismatch";
1219 spin_lock_irqsave(&m->lock, flags);
1220 enable_nopath_timeout(m);
1221 spin_unlock_irqrestore(&m->lock, flags);
1223 ti->num_flush_bios = 1;
1224 ti->num_discard_bios = 1;
1225 ti->num_write_same_bios = 1;
1226 ti->num_write_zeroes_bios = 1;
1227 if (m->queue_mode == DM_TYPE_BIO_BASED)
1228 ti->per_io_data_size = multipath_per_bio_data_size();
1230 ti->per_io_data_size = sizeof(struct dm_mpath_io);
1239 static void multipath_wait_for_pg_init_completion(struct multipath *m)
1244 prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE);
1246 if (!atomic_read(&m->pg_init_in_progress))
1251 finish_wait(&m->pg_init_wait, &wait);
1254 static void flush_multipath_work(struct multipath *m)
1256 if (m->hw_handler_name) {
1257 set_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1258 smp_mb__after_atomic();
1260 if (atomic_read(&m->pg_init_in_progress))
1261 flush_workqueue(kmpath_handlerd);
1262 multipath_wait_for_pg_init_completion(m);
1264 clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags);
1265 smp_mb__after_atomic();
1268 if (m->queue_mode == DM_TYPE_BIO_BASED)
1269 flush_work(&m->process_queued_bios);
1270 flush_work(&m->trigger_event);
1273 static void multipath_dtr(struct dm_target *ti)
1275 struct multipath *m = ti->private;
1277 disable_nopath_timeout(m);
1278 flush_multipath_work(m);
1283 * Take a path out of use.
1285 static int fail_path(struct pgpath *pgpath)
1287 unsigned long flags;
1288 struct multipath *m = pgpath->pg->m;
1290 spin_lock_irqsave(&m->lock, flags);
1292 if (!pgpath->is_active)
1295 DMWARN("%s: Failing path %s.",
1296 dm_device_name(dm_table_get_md(m->ti->table)),
1297 pgpath->path.dev->name);
1299 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path);
1300 pgpath->is_active = false;
1301 pgpath->fail_count++;
1303 atomic_dec(&m->nr_valid_paths);
1305 if (pgpath == m->current_pgpath)
1306 m->current_pgpath = NULL;
1308 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti,
1309 pgpath->path.dev->name, atomic_read(&m->nr_valid_paths));
1311 schedule_work(&m->trigger_event);
1313 enable_nopath_timeout(m);
1316 spin_unlock_irqrestore(&m->lock, flags);
1322 * Reinstate a previously-failed path
1324 static int reinstate_path(struct pgpath *pgpath)
1326 int r = 0, run_queue = 0;
1327 unsigned long flags;
1328 struct multipath *m = pgpath->pg->m;
1329 unsigned nr_valid_paths;
1331 spin_lock_irqsave(&m->lock, flags);
1333 if (pgpath->is_active)
1336 DMWARN("%s: Reinstating path %s.",
1337 dm_device_name(dm_table_get_md(m->ti->table)),
1338 pgpath->path.dev->name);
1340 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path);
1344 pgpath->is_active = true;
1346 nr_valid_paths = atomic_inc_return(&m->nr_valid_paths);
1347 if (nr_valid_paths == 1) {
1348 m->current_pgpath = NULL;
1350 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) {
1351 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work))
1352 atomic_inc(&m->pg_init_in_progress);
1355 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti,
1356 pgpath->path.dev->name, nr_valid_paths);
1358 schedule_work(&m->trigger_event);
1361 spin_unlock_irqrestore(&m->lock, flags);
1363 dm_table_run_md_queue_async(m->ti->table);
1364 process_queued_io_list(m);
1367 if (pgpath->is_active)
1368 disable_nopath_timeout(m);
1374 * Fail or reinstate all paths that match the provided struct dm_dev.
1376 static int action_dev(struct multipath *m, struct dm_dev *dev,
1380 struct pgpath *pgpath;
1381 struct priority_group *pg;
1383 list_for_each_entry(pg, &m->priority_groups, list) {
1384 list_for_each_entry(pgpath, &pg->pgpaths, list) {
1385 if (pgpath->path.dev == dev)
1394 * Temporarily try to avoid having to use the specified PG
1396 static void bypass_pg(struct multipath *m, struct priority_group *pg,
1399 unsigned long flags;
1401 spin_lock_irqsave(&m->lock, flags);
1403 pg->bypassed = bypassed;
1404 m->current_pgpath = NULL;
1405 m->current_pg = NULL;
1407 spin_unlock_irqrestore(&m->lock, flags);
1409 schedule_work(&m->trigger_event);
1413 * Switch to using the specified PG from the next I/O that gets mapped
1415 static int switch_pg_num(struct multipath *m, const char *pgstr)
1417 struct priority_group *pg;
1419 unsigned long flags;
1422 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1423 !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1424 DMWARN("invalid PG number supplied to switch_pg_num");
1428 spin_lock_irqsave(&m->lock, flags);
1429 list_for_each_entry(pg, &m->priority_groups, list) {
1430 pg->bypassed = false;
1434 m->current_pgpath = NULL;
1435 m->current_pg = NULL;
1438 spin_unlock_irqrestore(&m->lock, flags);
1440 schedule_work(&m->trigger_event);
1445 * Set/clear bypassed status of a PG.
1446 * PGs are numbered upwards from 1 in the order they were declared.
1448 static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed)
1450 struct priority_group *pg;
1454 if (!pgstr || (sscanf(pgstr, "%u%c", &pgnum, &dummy) != 1) || !pgnum ||
1455 !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) {
1456 DMWARN("invalid PG number supplied to bypass_pg");
1460 list_for_each_entry(pg, &m->priority_groups, list) {
1465 bypass_pg(m, pg, bypassed);
1470 * Should we retry pg_init immediately?
1472 static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath)
1474 unsigned long flags;
1475 bool limit_reached = false;
1477 spin_lock_irqsave(&m->lock, flags);
1479 if (atomic_read(&m->pg_init_count) <= m->pg_init_retries &&
1480 !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags))
1481 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags);
1483 limit_reached = true;
1485 spin_unlock_irqrestore(&m->lock, flags);
1487 return limit_reached;
1490 static void pg_init_done(void *data, int errors)
1492 struct pgpath *pgpath = data;
1493 struct priority_group *pg = pgpath->pg;
1494 struct multipath *m = pg->m;
1495 unsigned long flags;
1496 bool delay_retry = false;
1498 /* device or driver problems */
1503 if (!m->hw_handler_name) {
1507 DMERR("Could not failover the device: Handler scsi_dh_%s "
1508 "Error %d.", m->hw_handler_name, errors);
1510 * Fail path for now, so we do not ping pong
1514 case SCSI_DH_DEV_TEMP_BUSY:
1516 * Probably doing something like FW upgrade on the
1517 * controller so try the other pg.
1519 bypass_pg(m, pg, true);
1522 /* Wait before retrying. */
1525 case SCSI_DH_IMM_RETRY:
1526 case SCSI_DH_RES_TEMP_UNAVAIL:
1527 if (pg_init_limit_reached(m, pgpath))
1531 case SCSI_DH_DEV_OFFLINED:
1534 * We probably do not want to fail the path for a device
1535 * error, but this is what the old dm did. In future
1536 * patches we can do more advanced handling.
1541 spin_lock_irqsave(&m->lock, flags);
1543 if (pgpath == m->current_pgpath) {
1544 DMERR("Could not failover device. Error %d.", errors);
1545 m->current_pgpath = NULL;
1546 m->current_pg = NULL;
1548 } else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1549 pg->bypassed = false;
1551 if (atomic_dec_return(&m->pg_init_in_progress) > 0)
1552 /* Activations of other paths are still on going */
1555 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) {
1557 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1559 clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags);
1561 if (__pg_init_all_paths(m))
1564 clear_bit(MPATHF_QUEUE_IO, &m->flags);
1566 process_queued_io_list(m);
1569 * Wake up any thread waiting to suspend.
1571 wake_up(&m->pg_init_wait);
1574 spin_unlock_irqrestore(&m->lock, flags);
1577 static void activate_or_offline_path(struct pgpath *pgpath)
1579 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
1581 if (pgpath->is_active && !blk_queue_dying(q))
1582 scsi_dh_activate(q, pg_init_done, pgpath);
1584 pg_init_done(pgpath, SCSI_DH_DEV_OFFLINED);
1587 static void activate_path_work(struct work_struct *work)
1589 struct pgpath *pgpath =
1590 container_of(work, struct pgpath, activate_path.work);
1592 activate_or_offline_path(pgpath);
1595 static int multipath_end_io(struct dm_target *ti, struct request *clone,
1596 blk_status_t error, union map_info *map_context)
1598 struct dm_mpath_io *mpio = get_mpio(map_context);
1599 struct pgpath *pgpath = mpio->pgpath;
1600 int r = DM_ENDIO_DONE;
1603 * We don't queue any clone request inside the multipath target
1604 * during end I/O handling, since those clone requests don't have
1605 * bio clones. If we queue them inside the multipath target,
1606 * we need to make bio clones, that requires memory allocation.
1607 * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests
1608 * don't have bio clones.)
1609 * Instead of queueing the clone request here, we queue the original
1610 * request into dm core, which will remake a clone request and
1611 * clone bios for it and resubmit it later.
1613 if (error && blk_path_error(error)) {
1614 struct multipath *m = ti->private;
1616 if (error == BLK_STS_RESOURCE)
1617 r = DM_ENDIO_DELAY_REQUEUE;
1619 r = DM_ENDIO_REQUEUE;
1624 if (!atomic_read(&m->nr_valid_paths)) {
1625 unsigned long flags;
1626 spin_lock_irqsave(&m->lock, flags);
1627 if (!must_push_back_rq(m)) {
1628 if (error == BLK_STS_IOERR)
1630 /* complete with the original error */
1633 spin_unlock_irqrestore(&m->lock, flags);
1638 struct path_selector *ps = &pgpath->pg->ps;
1640 if (ps->type->end_io)
1641 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
1642 clone->io_start_time_ns);
1648 static int multipath_end_io_bio(struct dm_target *ti, struct bio *clone,
1649 blk_status_t *error)
1651 struct multipath *m = ti->private;
1652 struct dm_mpath_io *mpio = get_mpio_from_bio(clone);
1653 struct pgpath *pgpath = mpio->pgpath;
1654 unsigned long flags;
1655 int r = DM_ENDIO_DONE;
1657 if (!*error || !blk_path_error(*error))
1663 if (!atomic_read(&m->nr_valid_paths)) {
1664 spin_lock_irqsave(&m->lock, flags);
1665 if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
1666 if (__must_push_back(m)) {
1667 r = DM_ENDIO_REQUEUE;
1670 *error = BLK_STS_IOERR;
1672 spin_unlock_irqrestore(&m->lock, flags);
1675 spin_unlock_irqrestore(&m->lock, flags);
1678 spin_lock_irqsave(&m->lock, flags);
1679 bio_list_add(&m->queued_bios, clone);
1680 if (!test_bit(MPATHF_QUEUE_IO, &m->flags))
1681 queue_work(kmultipathd, &m->process_queued_bios);
1682 spin_unlock_irqrestore(&m->lock, flags);
1684 r = DM_ENDIO_INCOMPLETE;
1687 struct path_selector *ps = &pgpath->pg->ps;
1689 if (ps->type->end_io)
1690 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes,
1691 dm_start_time_ns_from_clone(clone));
1698 * Suspend with flush can't complete until all the I/O is processed
1699 * so if the last path fails we must error any remaining I/O.
1700 * - Note that if the freeze_bdev fails while suspending, the
1701 * queue_if_no_path state is lost - userspace should reset it.
1702 * Otherwise, during noflush suspend, queue_if_no_path will not change.
1704 static void multipath_presuspend(struct dm_target *ti)
1706 struct multipath *m = ti->private;
1708 /* FIXME: bio-based shouldn't need to always disable queue_if_no_path */
1709 if (m->queue_mode == DM_TYPE_BIO_BASED || !dm_noflush_suspending(m->ti))
1710 queue_if_no_path(m, false, true, __func__);
1713 static void multipath_postsuspend(struct dm_target *ti)
1715 struct multipath *m = ti->private;
1717 mutex_lock(&m->work_mutex);
1718 flush_multipath_work(m);
1719 mutex_unlock(&m->work_mutex);
1723 * Restore the queue_if_no_path setting.
1725 static void multipath_resume(struct dm_target *ti)
1727 struct multipath *m = ti->private;
1728 unsigned long flags;
1730 spin_lock_irqsave(&m->lock, flags);
1731 if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) {
1732 set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags);
1733 clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags);
1736 DMDEBUG("%s: %s finished; QIFNP = %d; SQIFNP = %d",
1737 dm_device_name(dm_table_get_md(m->ti->table)), __func__,
1738 test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags),
1739 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags));
1741 spin_unlock_irqrestore(&m->lock, flags);
1745 * Info output has the following format:
1746 * num_multipath_feature_args [multipath_feature_args]*
1747 * num_handler_status_args [handler_status_args]*
1748 * num_groups init_group_number
1749 * [A|D|E num_ps_status_args [ps_status_args]*
1750 * num_paths num_selector_args
1751 * [path_dev A|F fail_count [selector_args]* ]+ ]+
1753 * Table output has the following format (identical to the constructor string):
1754 * num_feature_args [features_args]*
1755 * num_handler_args hw_handler [hw_handler_args]*
1756 * num_groups init_group_number
1757 * [priority selector-name num_ps_args [ps_args]*
1758 * num_paths num_selector_args [path_dev [selector_args]* ]+ ]+
1760 static void multipath_status(struct dm_target *ti, status_type_t type,
1761 unsigned status_flags, char *result, unsigned maxlen)
1764 unsigned long flags;
1765 struct multipath *m = ti->private;
1766 struct priority_group *pg;
1771 spin_lock_irqsave(&m->lock, flags);
1774 if (type == STATUSTYPE_INFO)
1775 DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags),
1776 atomic_read(&m->pg_init_count));
1778 DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) +
1779 (m->pg_init_retries > 0) * 2 +
1780 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 +
1781 test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) +
1782 (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2);
1784 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1785 DMEMIT("queue_if_no_path ");
1786 if (m->pg_init_retries)
1787 DMEMIT("pg_init_retries %u ", m->pg_init_retries);
1788 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT)
1789 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs);
1790 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags))
1791 DMEMIT("retain_attached_hw_handler ");
1792 if (m->queue_mode != DM_TYPE_REQUEST_BASED) {
1793 switch(m->queue_mode) {
1794 case DM_TYPE_BIO_BASED:
1795 DMEMIT("queue_mode bio ");
1804 if (!m->hw_handler_name || type == STATUSTYPE_INFO)
1807 DMEMIT("1 %s ", m->hw_handler_name);
1809 DMEMIT("%u ", m->nr_priority_groups);
1812 pg_num = m->next_pg->pg_num;
1813 else if (m->current_pg)
1814 pg_num = m->current_pg->pg_num;
1816 pg_num = (m->nr_priority_groups ? 1 : 0);
1818 DMEMIT("%u ", pg_num);
1821 case STATUSTYPE_INFO:
1822 list_for_each_entry(pg, &m->priority_groups, list) {
1824 state = 'D'; /* Disabled */
1825 else if (pg == m->current_pg)
1826 state = 'A'; /* Currently Active */
1828 state = 'E'; /* Enabled */
1830 DMEMIT("%c ", state);
1832 if (pg->ps.type->status)
1833 sz += pg->ps.type->status(&pg->ps, NULL, type,
1839 DMEMIT("%u %u ", pg->nr_pgpaths,
1840 pg->ps.type->info_args);
1842 list_for_each_entry(p, &pg->pgpaths, list) {
1843 DMEMIT("%s %s %u ", p->path.dev->name,
1844 p->is_active ? "A" : "F",
1846 if (pg->ps.type->status)
1847 sz += pg->ps.type->status(&pg->ps,
1848 &p->path, type, result + sz,
1854 case STATUSTYPE_TABLE:
1855 list_for_each_entry(pg, &m->priority_groups, list) {
1856 DMEMIT("%s ", pg->ps.type->name);
1858 if (pg->ps.type->status)
1859 sz += pg->ps.type->status(&pg->ps, NULL, type,
1865 DMEMIT("%u %u ", pg->nr_pgpaths,
1866 pg->ps.type->table_args);
1868 list_for_each_entry(p, &pg->pgpaths, list) {
1869 DMEMIT("%s ", p->path.dev->name);
1870 if (pg->ps.type->status)
1871 sz += pg->ps.type->status(&pg->ps,
1872 &p->path, type, result + sz,
1879 spin_unlock_irqrestore(&m->lock, flags);
1882 static int multipath_message(struct dm_target *ti, unsigned argc, char **argv,
1883 char *result, unsigned maxlen)
1887 struct multipath *m = ti->private;
1889 unsigned long flags;
1891 mutex_lock(&m->work_mutex);
1893 if (dm_suspended(ti)) {
1899 if (!strcasecmp(argv[0], "queue_if_no_path")) {
1900 r = queue_if_no_path(m, true, false, __func__);
1901 spin_lock_irqsave(&m->lock, flags);
1902 enable_nopath_timeout(m);
1903 spin_unlock_irqrestore(&m->lock, flags);
1905 } else if (!strcasecmp(argv[0], "fail_if_no_path")) {
1906 r = queue_if_no_path(m, false, false, __func__);
1907 disable_nopath_timeout(m);
1913 DMWARN("Invalid multipath message arguments. Expected 2 arguments, got %d.", argc);
1917 if (!strcasecmp(argv[0], "disable_group")) {
1918 r = bypass_pg_num(m, argv[1], true);
1920 } else if (!strcasecmp(argv[0], "enable_group")) {
1921 r = bypass_pg_num(m, argv[1], false);
1923 } else if (!strcasecmp(argv[0], "switch_group")) {
1924 r = switch_pg_num(m, argv[1]);
1926 } else if (!strcasecmp(argv[0], "reinstate_path"))
1927 action = reinstate_path;
1928 else if (!strcasecmp(argv[0], "fail_path"))
1931 DMWARN("Unrecognised multipath message received: %s", argv[0]);
1935 r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev);
1937 DMWARN("message: error getting device %s",
1942 r = action_dev(m, dev, action);
1944 dm_put_device(ti, dev);
1947 mutex_unlock(&m->work_mutex);
1951 static int multipath_prepare_ioctl(struct dm_target *ti,
1952 struct block_device **bdev)
1954 struct multipath *m = ti->private;
1955 struct pgpath *current_pgpath;
1956 unsigned long flags;
1959 current_pgpath = READ_ONCE(m->current_pgpath);
1960 if (!current_pgpath || !test_bit(MPATHF_QUEUE_IO, &m->flags))
1961 current_pgpath = choose_pgpath(m, 0);
1963 if (current_pgpath) {
1964 if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) {
1965 *bdev = current_pgpath->path.dev->bdev;
1968 /* pg_init has not started or completed */
1972 /* No path is available */
1974 spin_lock_irqsave(&m->lock, flags);
1975 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags))
1977 spin_unlock_irqrestore(&m->lock, flags);
1980 if (r == -ENOTCONN) {
1981 if (!READ_ONCE(m->current_pg)) {
1982 /* Path status changed, redo selection */
1983 (void) choose_pgpath(m, 0);
1985 spin_lock_irqsave(&m->lock, flags);
1986 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags))
1987 (void) __pg_init_all_paths(m);
1988 spin_unlock_irqrestore(&m->lock, flags);
1989 dm_table_run_md_queue_async(m->ti->table);
1990 process_queued_io_list(m);
1994 * Only pass ioctls through if the device sizes match exactly.
1996 if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT)
2001 static int multipath_iterate_devices(struct dm_target *ti,
2002 iterate_devices_callout_fn fn, void *data)
2004 struct multipath *m = ti->private;
2005 struct priority_group *pg;
2009 list_for_each_entry(pg, &m->priority_groups, list) {
2010 list_for_each_entry(p, &pg->pgpaths, list) {
2011 ret = fn(ti, p->path.dev, ti->begin, ti->len, data);
2021 static int pgpath_busy(struct pgpath *pgpath)
2023 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev);
2025 return blk_lld_busy(q);
2029 * We return "busy", only when we can map I/Os but underlying devices
2030 * are busy (so even if we map I/Os now, the I/Os will wait on
2031 * the underlying queue).
2032 * In other words, if we want to kill I/Os or queue them inside us
2033 * due to map unavailability, we don't return "busy". Otherwise,
2034 * dm core won't give us the I/Os and we can't do what we want.
2036 static int multipath_busy(struct dm_target *ti)
2038 bool busy = false, has_active = false;
2039 struct multipath *m = ti->private;
2040 struct priority_group *pg, *next_pg;
2041 struct pgpath *pgpath;
2043 /* pg_init in progress */
2044 if (atomic_read(&m->pg_init_in_progress))
2047 /* no paths available, for blk-mq: rely on IO mapping to delay requeue */
2048 if (!atomic_read(&m->nr_valid_paths)) {
2049 unsigned long flags;
2050 spin_lock_irqsave(&m->lock, flags);
2051 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) {
2052 spin_unlock_irqrestore(&m->lock, flags);
2053 return (m->queue_mode != DM_TYPE_REQUEST_BASED);
2055 spin_unlock_irqrestore(&m->lock, flags);
2058 /* Guess which priority_group will be used at next mapping time */
2059 pg = READ_ONCE(m->current_pg);
2060 next_pg = READ_ONCE(m->next_pg);
2061 if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg))
2066 * We don't know which pg will be used at next mapping time.
2067 * We don't call choose_pgpath() here to avoid to trigger
2068 * pg_init just by busy checking.
2069 * So we don't know whether underlying devices we will be using
2070 * at next mapping time are busy or not. Just try mapping.
2076 * If there is one non-busy active path at least, the path selector
2077 * will be able to select it. So we consider such a pg as not busy.
2080 list_for_each_entry(pgpath, &pg->pgpaths, list) {
2081 if (pgpath->is_active) {
2083 if (!pgpath_busy(pgpath)) {
2092 * No active path in this pg, so this pg won't be used and
2093 * the current_pg will be changed at next mapping time.
2094 * We need to try mapping to determine it.
2102 /*-----------------------------------------------------------------
2104 *---------------------------------------------------------------*/
2105 static struct target_type multipath_target = {
2106 .name = "multipath",
2107 .version = {1, 14, 0},
2108 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE |
2109 DM_TARGET_PASSES_INTEGRITY,
2110 .module = THIS_MODULE,
2111 .ctr = multipath_ctr,
2112 .dtr = multipath_dtr,
2113 .clone_and_map_rq = multipath_clone_and_map,
2114 .release_clone_rq = multipath_release_clone,
2115 .rq_end_io = multipath_end_io,
2116 .map = multipath_map_bio,
2117 .end_io = multipath_end_io_bio,
2118 .presuspend = multipath_presuspend,
2119 .postsuspend = multipath_postsuspend,
2120 .resume = multipath_resume,
2121 .status = multipath_status,
2122 .message = multipath_message,
2123 .prepare_ioctl = multipath_prepare_ioctl,
2124 .iterate_devices = multipath_iterate_devices,
2125 .busy = multipath_busy,
2128 static int __init dm_multipath_init(void)
2132 kmultipathd = alloc_workqueue("kmpathd", WQ_MEM_RECLAIM, 0);
2134 DMERR("failed to create workqueue kmpathd");
2136 goto bad_alloc_kmultipathd;
2140 * A separate workqueue is used to handle the device handlers
2141 * to avoid overloading existing workqueue. Overloading the
2142 * old workqueue would also create a bottleneck in the
2143 * path of the storage hardware device activation.
2145 kmpath_handlerd = alloc_ordered_workqueue("kmpath_handlerd",
2147 if (!kmpath_handlerd) {
2148 DMERR("failed to create workqueue kmpath_handlerd");
2150 goto bad_alloc_kmpath_handlerd;
2153 r = dm_register_target(&multipath_target);
2155 DMERR("request-based register failed %d", r);
2157 goto bad_register_target;
2162 bad_register_target:
2163 destroy_workqueue(kmpath_handlerd);
2164 bad_alloc_kmpath_handlerd:
2165 destroy_workqueue(kmultipathd);
2166 bad_alloc_kmultipathd:
2170 static void __exit dm_multipath_exit(void)
2172 destroy_workqueue(kmpath_handlerd);
2173 destroy_workqueue(kmultipathd);
2175 dm_unregister_target(&multipath_target);
2178 module_init(dm_multipath_init);
2179 module_exit(dm_multipath_exit);
2181 module_param_named(queue_if_no_path_timeout_secs,
2182 queue_if_no_path_timeout_secs, ulong, S_IRUGO | S_IWUSR);
2183 MODULE_PARM_DESC(queue_if_no_path_timeout_secs, "No available paths queue IO timeout in seconds");
2185 MODULE_DESCRIPTION(DM_NAME " multipath target");
2186 MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");
2187 MODULE_LICENSE("GPL");