2 * Copyright (C) 2010-2011 Neil Brown
3 * Copyright (C) 2010-2016 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
8 #include <linux/slab.h>
9 #include <linux/module.h>
17 #include <linux/device-mapper.h>
19 #define DM_MSG_PREFIX "raid"
20 #define MAX_RAID_DEVICES 253 /* md-raid kernel limit */
22 static bool devices_handle_discard_safely = false;
25 * The following flags are used by dm-raid.c to set up the array state.
26 * They must be cleared before md_run is called.
28 #define FirstUse 10 /* rdev flag */
32 * Two DM devices, one to hold metadata and one to hold the
33 * actual data/parity. The reason for this is to not confuse
34 * ti->len and give more flexibility in altering size and
37 * While it is possible for this device to be associated
38 * with a different physical device than the data_dev, it
39 * is intended for it to be the same.
40 * |--------- Physical Device ---------|
41 * |- meta_dev -|------ data_dev ------|
43 struct dm_dev *meta_dev;
44 struct dm_dev *data_dev;
49 * Flags for rs->ctr_flags field.
54 #define CTR_FLAG_SYNC 0x1 /* 1 */ /* Not with raid0! */
55 #define CTR_FLAG_NOSYNC 0x2 /* 1 */ /* Not with raid0! */
56 #define CTR_FLAG_REBUILD 0x4 /* 2 */ /* Not with raid0! */
57 #define CTR_FLAG_DAEMON_SLEEP 0x8 /* 2 */ /* Not with raid0! */
58 #define CTR_FLAG_MIN_RECOVERY_RATE 0x10 /* 2 */ /* Not with raid0! */
59 #define CTR_FLAG_MAX_RECOVERY_RATE 0x20 /* 2 */ /* Not with raid0! */
60 #define CTR_FLAG_MAX_WRITE_BEHIND 0x40 /* 2 */ /* Only with raid1! */
61 #define CTR_FLAG_WRITE_MOSTLY 0x80 /* 2 */ /* Only with raid1! */
62 #define CTR_FLAG_STRIPE_CACHE 0x100 /* 2 */ /* Only with raid4/5/6! */
63 #define CTR_FLAG_REGION_SIZE 0x200 /* 2 */ /* Not with raid0! */
64 #define CTR_FLAG_RAID10_COPIES 0x400 /* 2 */ /* Only with raid10 */
65 #define CTR_FLAG_RAID10_FORMAT 0x800 /* 2 */ /* Only with raid10 */
68 * Definitions of various constructor flags to
69 * be used in checks of valid / invalid flags
72 /* Define all any sync flags */
73 #define CTR_FLAGS_ANY_SYNC (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)
75 /* Define flags for options without argument (e.g. 'nosync') */
76 #define CTR_FLAG_OPTIONS_NO_ARGS CTR_FLAGS_ANY_SYNC
78 /* Define flags for options with one argument (e.g. 'delta_disks +2') */
79 #define CTR_FLAG_OPTIONS_ONE_ARG (CTR_FLAG_REBUILD | \
80 CTR_FLAG_WRITE_MOSTLY | \
81 CTR_FLAG_DAEMON_SLEEP | \
82 CTR_FLAG_MIN_RECOVERY_RATE | \
83 CTR_FLAG_MAX_RECOVERY_RATE | \
84 CTR_FLAG_MAX_WRITE_BEHIND | \
85 CTR_FLAG_STRIPE_CACHE | \
86 CTR_FLAG_REGION_SIZE | \
87 CTR_FLAG_RAID10_COPIES | \
88 CTR_FLAG_RAID10_FORMAT)
90 /* All ctr optional arguments */
91 #define ALL_CTR_FLAGS (CTR_FLAG_OPTIONS_NO_ARGS | \
92 CTR_FLAG_OPTIONS_ONE_ARG)
94 /* Invalid options definitions per raid level... */
96 /* "raid0" does not accept any options */
97 #define RAID0_INVALID_FLAGS ALL_CTR_FLAGS
99 /* "raid1" does not accept stripe cache or any raid10 options */
100 #define RAID1_INVALID_FLAGS (CTR_FLAG_STRIPE_CACHE | \
101 CTR_FLAG_RAID10_COPIES | \
102 CTR_FLAG_RAID10_FORMAT)
104 /* "raid10" does not accept any raid1 or stripe cache options */
105 #define RAID10_INVALID_FLAGS (CTR_FLAG_WRITE_MOSTLY | \
106 CTR_FLAG_MAX_WRITE_BEHIND | \
107 CTR_FLAG_STRIPE_CACHE)
109 * "raid4/5/6" do not accept any raid1 or raid10 specific options
111 * "raid6" does not accept "nosync", because it is not guaranteed
112 * that both parity and q-syndrome are being written properly with
115 #define RAID45_INVALID_FLAGS (CTR_FLAG_WRITE_MOSTLY | \
116 CTR_FLAG_MAX_WRITE_BEHIND | \
117 CTR_FLAG_RAID10_FORMAT | \
118 CTR_FLAG_RAID10_COPIES)
119 #define RAID6_INVALID_FLAGS (CTR_FLAG_NOSYNC | RAID45_INVALID_FLAGS)
120 /* ...invalid options definitions per raid level */
123 struct dm_target *ti;
125 uint32_t bitmap_loaded;
129 struct raid_type *raid_type;
130 struct dm_target_callbacks callbacks;
132 struct raid_dev dev[0];
135 /* Supported raid types and properties. */
136 static struct raid_type {
137 const char *name; /* RAID algorithm. */
138 const char *descr; /* Descriptor text for logging. */
139 const unsigned parity_devs; /* # of parity devices. */
140 const unsigned minimal_devs; /* minimal # of devices in set. */
141 const unsigned level; /* RAID level. */
142 const unsigned algorithm; /* RAID algorithm. */
144 {"raid0", "RAID0 (striping)", 0, 2, 0, 0 /* NONE */},
145 {"raid1", "RAID1 (mirroring)", 0, 2, 1, 0 /* NONE */},
146 {"raid10", "RAID10 (striped mirrors)", 0, 2, 10, UINT_MAX /* Varies */},
147 {"raid4", "RAID4 (dedicated parity disk)", 1, 2, 5, ALGORITHM_PARITY_0},
148 {"raid5_la", "RAID5 (left asymmetric)", 1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
149 {"raid5_ra", "RAID5 (right asymmetric)", 1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
150 {"raid5_ls", "RAID5 (left symmetric)", 1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
151 {"raid5_rs", "RAID5 (right symmetric)", 1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
152 {"raid6_zr", "RAID6 (zero restart)", 2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
153 {"raid6_nr", "RAID6 (N restart)", 2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
154 {"raid6_nc", "RAID6 (N continue)", 2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
157 /* True, if @v is in inclusive range [@min, @max] */
158 static bool _in_range(long v, long min, long max)
160 return v >= min && v <= max;
163 /* ctr flag bit manipulation... */
164 /* Set single @flag in @flags */
165 static void _set_flag(uint32_t flag, uint32_t *flags)
167 WARN_ON_ONCE(hweight32(flag) != 1);
171 /* Test single @flag in @flags */
172 static bool _test_flag(uint32_t flag, uint32_t flags)
174 WARN_ON_ONCE(hweight32(flag) != 1);
175 return (flag & flags) ? true : false;
178 /* Return true if single @flag is set in @*flags, else set it and return false */
179 static bool _test_and_set_flag(uint32_t flag, uint32_t *flags)
181 if (_test_flag(flag, *flags))
184 _set_flag(flag, flags);
187 /* ...ctr and runtime flag bit manipulation */
189 /* All table line arguments are defined here */
190 static struct arg_name_flag {
193 } _arg_name_flags[] = {
194 { CTR_FLAG_SYNC, "sync"},
195 { CTR_FLAG_NOSYNC, "nosync"},
196 { CTR_FLAG_REBUILD, "rebuild"},
197 { CTR_FLAG_DAEMON_SLEEP, "daemon_sleep"},
198 { CTR_FLAG_MIN_RECOVERY_RATE, "min_recovery_rate"},
199 { CTR_FLAG_MAX_RECOVERY_RATE, "max_recovery_rate"},
200 { CTR_FLAG_MAX_WRITE_BEHIND, "max_write_behind"},
201 { CTR_FLAG_WRITE_MOSTLY, "writemostly"},
202 { CTR_FLAG_STRIPE_CACHE, "stripe_cache"},
203 { CTR_FLAG_REGION_SIZE, "region_size"},
204 { CTR_FLAG_RAID10_COPIES, "raid10_copies"},
205 { CTR_FLAG_RAID10_FORMAT, "raid10_format"},
208 /* Return argument name string for given @flag */
209 static const char *_argname_by_flag(const uint32_t flag)
211 if (hweight32(flag) == 1) {
212 struct arg_name_flag *anf = _arg_name_flags + ARRAY_SIZE(_arg_name_flags);
214 while (anf-- > _arg_name_flags)
215 if (_test_flag(flag, anf->flag))
219 DMERR("%s called with more than one flag!", __func__);
225 * bool helpers to test for various raid levels of a raid type
228 /* Return true, if raid type in @rt is raid0 */
229 static bool rt_is_raid0(struct raid_type *rt)
234 /* Return true, if raid type in @rt is raid1 */
235 static bool rt_is_raid1(struct raid_type *rt)
237 return rt->level == 1;
240 /* Return true, if raid type in @rt is raid10 */
241 static bool rt_is_raid10(struct raid_type *rt)
243 return rt->level == 10;
246 /* Return true, if raid type in @rt is raid4/5 */
247 static bool rt_is_raid45(struct raid_type *rt)
249 return _in_range(rt->level, 4, 5);
252 /* Return true, if raid type in @rt is raid6 */
253 static bool rt_is_raid6(struct raid_type *rt)
255 return rt->level == 6;
257 /* END: raid level bools */
260 * Convenience functions to set ti->error to @errmsg and
261 * return @r in order to shorten code in a lot of places
263 static int ti_error_ret(struct dm_target *ti, const char *errmsg, int r)
265 ti->error = (char *) errmsg;
269 static int ti_error_einval(struct dm_target *ti, const char *errmsg)
271 return ti_error_ret(ti, errmsg, -EINVAL);
273 /* END: convenience functions to set ti->error to @errmsg... */
275 /* Return invalid ctr flags for the raid level of @rs */
276 static uint32_t _invalid_flags(struct raid_set *rs)
278 if (rt_is_raid0(rs->raid_type))
279 return RAID0_INVALID_FLAGS;
280 else if (rt_is_raid1(rs->raid_type))
281 return RAID1_INVALID_FLAGS;
282 else if (rt_is_raid10(rs->raid_type))
283 return RAID10_INVALID_FLAGS;
284 else if (rt_is_raid45(rs->raid_type))
285 return RAID45_INVALID_FLAGS;
286 else if (rt_is_raid6(rs->raid_type))
287 return RAID6_INVALID_FLAGS;
293 * Check for any invalid flags set on @rs defined by bitset @invalid_flags
295 * Has to be called after parsing of the ctr flags!
297 static int rs_check_for_invalid_flags(struct raid_set *rs)
299 unsigned int ctr_flags = rs->ctr_flags, flag = 0;
300 const uint32_t invalid_flags = _invalid_flags(rs);
302 while ((ctr_flags &= ~flag)) {
303 flag = 1 << __ffs(ctr_flags);
305 if (_test_flag(flag, rs->ctr_flags) &&
306 _test_flag(flag, invalid_flags))
307 return ti_error_einval(rs->ti, "Invalid flag combined");
313 static char *raid10_md_layout_to_format(int layout)
316 * Bit 16 and 17 stand for "offset" and "use_far_sets"
317 * Refer to MD's raid10.c for details
319 if ((layout & 0x10000) && (layout & 0x20000))
322 if ((layout & 0xFF) > 1)
328 static unsigned raid10_md_layout_to_copies(int layout)
330 if ((layout & 0xFF) > 1)
331 return layout & 0xFF;
332 return (layout >> 8) & 0xFF;
335 static int raid10_format_to_md_layout(char *format, unsigned copies)
337 unsigned n = 1, f = 1;
339 if (!strcasecmp("near", format))
344 if (!strcasecmp("offset", format))
345 return 0x30000 | (f << 8) | n;
347 if (!strcasecmp("far", format))
348 return 0x20000 | (f << 8) | n;
353 static struct raid_type *get_raid_type(const char *name)
357 for (i = 0; i < ARRAY_SIZE(raid_types); i++)
358 if (!strcmp(raid_types[i].name, name))
359 return &raid_types[i];
364 static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *raid_type, unsigned raid_devs)
369 if (raid_devs <= raid_type->parity_devs)
370 return ERR_PTR(ti_error_einval(ti, "Insufficient number of devices"));
372 rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL);
374 return ERR_PTR(ti_error_ret(ti, "Cannot allocate raid context", -ENOMEM));
379 rs->raid_type = raid_type;
380 rs->md.raid_disks = raid_devs;
381 rs->md.level = raid_type->level;
382 rs->md.new_level = rs->md.level;
383 rs->md.layout = raid_type->algorithm;
384 rs->md.new_layout = rs->md.layout;
385 rs->md.delta_disks = 0;
386 rs->md.recovery_cp = 0;
388 for (i = 0; i < raid_devs; i++)
389 md_rdev_init(&rs->dev[i].rdev);
392 * Remaining items to be initialized by further RAID params:
395 * rs->md.chunk_sectors
396 * rs->md.new_chunk_sectors
403 static void context_free(struct raid_set *rs)
407 for (i = 0; i < rs->md.raid_disks; i++) {
408 if (rs->dev[i].meta_dev)
409 dm_put_device(rs->ti, rs->dev[i].meta_dev);
410 md_rdev_clear(&rs->dev[i].rdev);
411 if (rs->dev[i].data_dev)
412 dm_put_device(rs->ti, rs->dev[i].data_dev);
419 * For every device we have two words
420 * <meta_dev>: meta device name or '-' if missing
421 * <data_dev>: data device name or '-' if missing
423 * The following are permitted:
426 * <meta_dev> <data_dev>
428 * The following is not allowed:
431 * This code parses those words. If there is a failure,
432 * the caller must use context_free to unwind the operations.
434 static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as)
438 int metadata_available = 0;
442 /* Put off the number of raid devices argument to get to dev pairs */
443 arg = dm_shift_arg(as);
447 for (i = 0; i < rs->md.raid_disks; i++) {
448 rs->dev[i].rdev.raid_disk = i;
450 rs->dev[i].meta_dev = NULL;
451 rs->dev[i].data_dev = NULL;
454 * There are no offsets, since there is a separate device
455 * for data and metadata.
457 rs->dev[i].rdev.data_offset = 0;
458 rs->dev[i].rdev.mddev = &rs->md;
460 arg = dm_shift_arg(as);
464 if (strcmp(arg, "-")) {
465 r = dm_get_device(rs->ti, arg,
466 dm_table_get_mode(rs->ti->table),
467 &rs->dev[i].meta_dev);
469 return ti_error_ret(rs->ti, "RAID metadata device lookup failure", r);
471 rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
472 if (!rs->dev[i].rdev.sb_page)
473 return ti_error_ret(rs->ti, "Failed to allocate superblock page", -ENOMEM);
476 arg = dm_shift_arg(as);
480 if (!strcmp(arg, "-")) {
481 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
482 (!rs->dev[i].rdev.recovery_offset))
483 return ti_error_einval(rs->ti, "Drive designated for rebuild not specified");
485 if (rs->dev[i].meta_dev)
486 return ti_error_einval(rs->ti, "No data device supplied with metadata device");
491 r = dm_get_device(rs->ti, arg,
492 dm_table_get_mode(rs->ti->table),
493 &rs->dev[i].data_dev);
495 return ti_error_ret(rs->ti, "RAID device lookup failure", r);
497 if (rs->dev[i].meta_dev) {
498 metadata_available = 1;
499 rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
501 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
502 list_add(&rs->dev[i].rdev.same_set, &rs->md.disks);
503 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
507 if (metadata_available) {
509 rs->md.persistent = 1;
510 rs->md.major_version = 2;
511 } else if (rebuild && !rs->md.recovery_cp) {
513 * Without metadata, we will not be able to tell if the array
514 * is in-sync or not - we must assume it is not. Therefore,
515 * it is impossible to rebuild a drive.
517 * Even if there is metadata, the on-disk information may
518 * indicate that the array is not in-sync and it will then
521 * User could specify 'nosync' option if desperate.
523 DMERR("Unable to rebuild drive while array is not in-sync");
524 return ti_error_einval(rs->ti, "Unable to rebuild drive while array is not in-sync");
531 * validate_region_size
533 * @region_size: region size in sectors. If 0, pick a size (4MiB default).
535 * Set rs->md.bitmap_info.chunksize (which really refers to 'region size').
536 * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap.
538 * Returns: 0 on success, -EINVAL on failure.
540 static int validate_region_size(struct raid_set *rs, unsigned long region_size)
542 unsigned long min_region_size = rs->ti->len / (1 << 21);
546 * Choose a reasonable default. All figures in sectors.
548 if (min_region_size > (1 << 13)) {
549 /* If not a power of 2, make it the next power of 2 */
550 region_size = roundup_pow_of_two(min_region_size);
551 DMINFO("Choosing default region size of %lu sectors",
554 DMINFO("Choosing default region size of 4MiB");
555 region_size = 1 << 13; /* sectors */
559 * Validate user-supplied value.
561 if (region_size > rs->ti->len)
562 return ti_error_einval(rs->ti, "Supplied region size is too large");
564 if (region_size < min_region_size) {
565 DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
566 region_size, min_region_size);
567 return ti_error_einval(rs->ti, "Supplied region size is too small");
570 if (!is_power_of_2(region_size))
571 return ti_error_einval(rs->ti, "Region size is not a power of 2");
573 if (region_size < rs->md.chunk_sectors)
574 return ti_error_einval(rs->ti, "Region size is smaller than the chunk size");
578 * Convert sectors to bytes.
580 rs->md.bitmap_info.chunksize = (region_size << 9);
586 * validate_raid_redundancy
589 * Determine if there are enough devices in the array that haven't
590 * failed (or are being rebuilt) to form a usable array.
592 * Returns: 0 on success, -EINVAL on failure.
594 static int validate_raid_redundancy(struct raid_set *rs)
596 unsigned i, rebuild_cnt = 0;
597 unsigned rebuilds_per_group = 0, copies, d;
598 unsigned group_size, last_group_start;
600 for (i = 0; i < rs->md.raid_disks; i++)
601 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
602 !rs->dev[i].rdev.sb_page)
605 switch (rs->raid_type->level) {
607 if (rebuild_cnt >= rs->md.raid_disks)
613 if (rebuild_cnt > rs->raid_type->parity_devs)
617 copies = raid10_md_layout_to_copies(rs->md.layout);
618 if (rebuild_cnt < copies)
622 * It is possible to have a higher rebuild count for RAID10,
623 * as long as the failed devices occur in different mirror
624 * groups (i.e. different stripes).
626 * When checking "near" format, make sure no adjacent devices
627 * have failed beyond what can be handled. In addition to the
628 * simple case where the number of devices is a multiple of the
629 * number of copies, we must also handle cases where the number
630 * of devices is not a multiple of the number of copies.
631 * E.g. dev1 dev2 dev3 dev4 dev5
635 if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) {
636 for (i = 0; i < rs->md.raid_disks * copies; i++) {
638 rebuilds_per_group = 0;
639 d = i % rs->md.raid_disks;
640 if ((!rs->dev[d].rdev.sb_page ||
641 !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
642 (++rebuilds_per_group >= copies))
649 * When checking "far" and "offset" formats, we need to ensure
650 * that the device that holds its copy is not also dead or
651 * being rebuilt. (Note that "far" and "offset" formats only
652 * support two copies right now. These formats also only ever
653 * use the 'use_far_sets' variant.)
655 * This check is somewhat complicated by the need to account
656 * for arrays that are not a multiple of (far) copies. This
657 * results in the need to treat the last (potentially larger)
660 group_size = (rs->md.raid_disks / copies);
661 last_group_start = (rs->md.raid_disks / group_size) - 1;
662 last_group_start *= group_size;
663 for (i = 0; i < rs->md.raid_disks; i++) {
664 if (!(i % copies) && !(i > last_group_start))
665 rebuilds_per_group = 0;
666 if ((!rs->dev[i].rdev.sb_page ||
667 !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
668 (++rebuilds_per_group >= copies))
684 * Possible arguments are...
685 * <chunk_size> [optional_args]
687 * Argument definitions
688 * <chunk_size> The number of sectors per disk that
689 * will form the "stripe"
690 * [[no]sync] Force or prevent recovery of the
692 * [rebuild <idx>] Rebuild the drive indicated by the index
693 * [daemon_sleep <ms>] Time between bitmap daemon work to
695 * [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
696 * [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
697 * [write_mostly <idx>] Indicate a write mostly drive via index
698 * [max_write_behind <sectors>] See '-write-behind=' (man mdadm)
699 * [stripe_cache <sectors>] Stripe cache size for higher RAIDs
700 * [region_size <sectors>] Defines granularity of bitmap
702 * RAID10-only options:
703 * [raid10_copies <# copies>] Number of copies. (Default: 2)
704 * [raid10_format <near|far|offset>] Layout algorithm. (Default: near)
706 static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
707 unsigned num_raid_params)
709 char *raid10_format = "near";
710 unsigned raid10_copies = 2;
712 unsigned value, region_size = 0;
713 sector_t sectors_per_dev = rs->ti->len;
715 const char *arg, *key;
718 arg = dm_shift_arg(as);
719 num_raid_params--; /* Account for chunk_size argument */
721 if (kstrtouint(arg, 10, &value) < 0)
722 return ti_error_einval(rs->ti, "Bad numerical argument given for chunk_size");
725 * First, parse the in-order required arguments
726 * "chunk_size" is the only argument of this type.
728 if (rs->raid_type->level == 1) {
730 DMERR("Ignoring chunk size parameter for RAID 1");
732 } else if (!is_power_of_2(value))
733 return ti_error_einval(rs->ti, "Chunk size must be a power of 2");
735 return ti_error_einval(rs->ti, "Chunk size value is too small");
737 rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
740 * We set each individual device as In_sync with a completed
741 * 'recovery_offset'. If there has been a device failure or
742 * replacement then one of the following cases applies:
744 * 1) User specifies 'rebuild'.
745 * - Device is reset when param is read.
746 * 2) A new device is supplied.
747 * - No matching superblock found, resets device.
748 * 3) Device failure was transient and returns on reload.
749 * - Failure noticed, resets device for bitmap replay.
750 * 4) Device hadn't completed recovery after previous failure.
751 * - Superblock is read and overrides recovery_offset.
753 * What is found in the superblocks of the devices is always
754 * authoritative, unless 'rebuild' or '[no]sync' was specified.
756 for (i = 0; i < rs->md.raid_disks; i++) {
757 set_bit(In_sync, &rs->dev[i].rdev.flags);
758 rs->dev[i].rdev.recovery_offset = MaxSector;
762 * Second, parse the unordered optional arguments
764 for (i = 0; i < num_raid_params; i++) {
765 arg = dm_shift_arg(as);
767 return ti_error_einval(rs->ti, "Not enough raid parameters given");
769 if (!strcasecmp(arg, "nosync")) {
770 rs->md.recovery_cp = MaxSector;
771 _set_flag(CTR_FLAG_NOSYNC, &rs->ctr_flags);
774 if (!strcasecmp(arg, "sync")) {
775 rs->md.recovery_cp = 0;
776 _set_flag(CTR_FLAG_SYNC, &rs->ctr_flags);
781 arg = dm_shift_arg(as);
782 i++; /* Account for the argument pairs */
784 return ti_error_einval(rs->ti, "Wrong number of raid parameters given");
787 * Parameters that take a string value are checked here.
790 if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_RAID10_FORMAT))) {
791 if (_test_and_set_flag(CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags))
792 return ti_error_einval(rs->ti, "Only one raid10_format argument pair allowed");
793 if (rs->raid_type->level != 10)
794 return ti_error_einval(rs->ti, "'raid10_format' is an invalid parameter for this RAID type");
795 if (strcmp("near", arg) &&
796 strcmp("far", arg) &&
797 strcmp("offset", arg))
798 return ti_error_einval(rs->ti, "Invalid 'raid10_format' value given");
800 raid10_format = (char *) arg;
804 if (kstrtouint(arg, 10, &value) < 0)
805 return ti_error_einval(rs->ti, "Bad numerical argument given in raid params");
807 if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_REBUILD))) {
809 * "rebuild" is being passed in by userspace to provide
810 * indexes of replaced devices and to set up additional
811 * devices on raid level takeover.
813 if (!_in_range(value, 0, rs->md.raid_disks - 1))
814 return ti_error_einval(rs->ti, "Invalid rebuild index given");
816 rd = rs->dev + value;
817 clear_bit(In_sync, &rd->rdev.flags);
818 clear_bit(Faulty, &rd->rdev.flags);
819 rd->rdev.recovery_offset = 0;
820 _set_flag(CTR_FLAG_REBUILD, &rs->ctr_flags);
821 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_WRITE_MOSTLY))) {
822 if (rs->raid_type->level != 1)
823 return ti_error_einval(rs->ti, "write_mostly option is only valid for RAID1");
825 if (!_in_range(value, 0, rs->md.raid_disks - 1))
826 return ti_error_einval(rs->ti, "Invalid write_mostly index given");
828 set_bit(WriteMostly, &rs->dev[value].rdev.flags);
829 _set_flag(CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags);
830 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_MAX_WRITE_BEHIND))) {
831 if (rs->raid_type->level != 1)
832 return ti_error_einval(rs->ti, "max_write_behind option is only valid for RAID1");
834 if (_test_and_set_flag(CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags))
835 return ti_error_einval(rs->ti, "Only one max_write_behind argument pair allowed");
838 * In device-mapper, we specify things in sectors, but
839 * MD records this value in kB
842 if (value > COUNTER_MAX)
843 return ti_error_einval(rs->ti, "Max write-behind limit out of range");
845 rs->md.bitmap_info.max_write_behind = value;
846 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_DAEMON_SLEEP))) {
847 if (_test_and_set_flag(CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags))
848 return ti_error_einval(rs->ti, "Only one daemon_sleep argument pair allowed");
849 if (!value || (value > MAX_SCHEDULE_TIMEOUT))
850 return ti_error_einval(rs->ti, "daemon sleep period out of range");
851 rs->md.bitmap_info.daemon_sleep = value;
852 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_STRIPE_CACHE))) {
853 if (_test_and_set_flag(CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags))
854 return ti_error_einval(rs->ti, "Only one stripe_cache argument pair allowed");
856 * In device-mapper, we specify things in sectors, but
857 * MD records this value in kB
861 if (!_in_range(rs->raid_type->level, 4, 6))
862 return ti_error_einval(rs->ti, "Inappropriate argument: stripe_cache");
863 if (raid5_set_cache_size(&rs->md, (int)value))
864 return ti_error_einval(rs->ti, "Bad stripe_cache size");
866 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_MIN_RECOVERY_RATE))) {
867 if (_test_and_set_flag(CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags))
868 return ti_error_einval(rs->ti, "Only one min_recovery_rate argument pair allowed");
870 return ti_error_einval(rs->ti, "min_recovery_rate out of range");
871 rs->md.sync_speed_min = (int)value;
872 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_MAX_RECOVERY_RATE))) {
873 if (_test_and_set_flag(CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags))
874 return ti_error_einval(rs->ti, "Only one max_recovery_rate argument pair allowed");
876 return ti_error_einval(rs->ti, "max_recovery_rate out of range");
877 rs->md.sync_speed_max = (int)value;
878 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_REGION_SIZE))) {
879 if (_test_and_set_flag(CTR_FLAG_REGION_SIZE, &rs->ctr_flags))
880 return ti_error_einval(rs->ti, "Only one region_size argument pair allowed");
883 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_RAID10_COPIES))) {
884 if (_test_and_set_flag(CTR_FLAG_RAID10_COPIES, &rs->ctr_flags))
885 return ti_error_einval(rs->ti, "Only one raid10_copies argument pair allowed");
887 if (!_in_range(value, 2, rs->md.raid_disks))
888 return ti_error_einval(rs->ti, "Bad value for 'raid10_copies'");
890 raid10_copies = value;
892 DMERR("Unable to parse RAID parameter: %s", key);
893 return ti_error_einval(rs->ti, "Unable to parse RAID parameters");
897 if (validate_region_size(rs, region_size))
900 if (rs->md.chunk_sectors)
901 max_io_len = rs->md.chunk_sectors;
903 max_io_len = region_size;
905 if (dm_set_target_max_io_len(rs->ti, max_io_len))
908 if (rs->raid_type->level == 10) {
909 if (raid10_copies > rs->md.raid_disks)
910 return ti_error_einval(rs->ti, "Not enough devices to satisfy specification");
913 * If the format is not "near", we only support
914 * two copies at the moment.
916 if (strcmp("near", raid10_format) && (raid10_copies > 2))
917 return ti_error_einval(rs->ti, "Too many copies for given RAID10 format.");
919 /* (Len * #mirrors) / #devices */
920 sectors_per_dev = rs->ti->len * raid10_copies;
921 sector_div(sectors_per_dev, rs->md.raid_disks);
923 rs->md.layout = raid10_format_to_md_layout(raid10_format,
925 rs->md.new_layout = rs->md.layout;
926 } else if ((!rs->raid_type->level || rs->raid_type->level > 1) &&
927 sector_div(sectors_per_dev,
928 (rs->md.raid_disks - rs->raid_type->parity_devs)))
929 return ti_error_einval(rs->ti, "Target length not divisible by number of data devices");
931 rs->md.dev_sectors = sectors_per_dev;
933 /* Assume there are no metadata devices until the drives are parsed */
934 rs->md.persistent = 0;
937 /* Check, if any invalid ctr arguments have been passed in for the raid level */
938 return rs_check_for_invalid_flags(rs);
941 static void do_table_event(struct work_struct *ws)
943 struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
945 dm_table_event(rs->ti->table);
948 static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
950 struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
952 return mddev_congested(&rs->md, bits);
956 * This structure is never routinely used by userspace, unlike md superblocks.
957 * Devices with this superblock should only ever be accessed via device-mapper.
959 #define DM_RAID_MAGIC 0x64526D44
960 struct dm_raid_superblock {
961 __le32 magic; /* "DmRd" */
962 __le32 features; /* Used to indicate possible future changes */
964 __le32 num_devices; /* Number of devices in this array. (Max 64) */
965 __le32 array_position; /* The position of this drive in the array */
967 __le64 events; /* Incremented by md when superblock updated */
968 __le64 failed_devices; /* Bit field of devices to indicate failures */
971 * This offset tracks the progress of the repair or replacement of
972 * an individual drive.
974 __le64 disk_recovery_offset;
977 * This offset tracks the progress of the initial array
978 * synchronisation/parity calculation.
980 __le64 array_resync_offset;
983 * RAID characteristics
987 __le32 stripe_sectors;
989 /* Remainder of a logical block is zero-filled when writing (see super_sync()). */
992 static int read_disk_sb(struct md_rdev *rdev, int size)
994 BUG_ON(!rdev->sb_page);
999 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, 1)) {
1000 DMERR("Failed to read superblock of device at position %d",
1002 md_error(rdev->mddev, rdev);
1006 rdev->sb_loaded = 1;
1011 static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
1014 uint64_t failed_devices;
1015 struct dm_raid_superblock *sb;
1016 struct raid_set *rs = container_of(mddev, struct raid_set, md);
1018 sb = page_address(rdev->sb_page);
1019 failed_devices = le64_to_cpu(sb->failed_devices);
1021 for (i = 0; i < mddev->raid_disks; i++)
1022 if (!rs->dev[i].data_dev ||
1023 test_bit(Faulty, &(rs->dev[i].rdev.flags)))
1024 failed_devices |= (1ULL << i);
1026 memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
1028 sb->magic = cpu_to_le32(DM_RAID_MAGIC);
1029 sb->features = cpu_to_le32(0); /* No features yet */
1031 sb->num_devices = cpu_to_le32(mddev->raid_disks);
1032 sb->array_position = cpu_to_le32(rdev->raid_disk);
1034 sb->events = cpu_to_le64(mddev->events);
1035 sb->failed_devices = cpu_to_le64(failed_devices);
1037 sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
1038 sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
1040 sb->level = cpu_to_le32(mddev->level);
1041 sb->layout = cpu_to_le32(mddev->layout);
1042 sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
1048 * This function creates a superblock if one is not found on the device
1049 * and will decide which superblock to use if there's a choice.
1051 * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise
1053 static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
1056 struct dm_raid_superblock *sb;
1057 struct dm_raid_superblock *refsb;
1058 uint64_t events_sb, events_refsb;
1061 rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
1062 if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
1063 DMERR("superblock size of a logical block is no longer valid");
1067 r = read_disk_sb(rdev, rdev->sb_size);
1071 sb = page_address(rdev->sb_page);
1074 * Two cases that we want to write new superblocks and rebuild:
1075 * 1) New device (no matching magic number)
1076 * 2) Device specified for rebuild (!In_sync w/ offset == 0)
1078 if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) ||
1079 (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) {
1080 super_sync(rdev->mddev, rdev);
1082 set_bit(FirstUse, &rdev->flags);
1084 /* Force writing of superblocks to disk */
1085 set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags);
1087 /* Any superblock is better than none, choose that if given */
1088 return refdev ? 0 : 1;
1094 events_sb = le64_to_cpu(sb->events);
1096 refsb = page_address(refdev->sb_page);
1097 events_refsb = le64_to_cpu(refsb->events);
1099 return (events_sb > events_refsb) ? 1 : 0;
1102 static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
1105 struct raid_set *rs = container_of(mddev, struct raid_set, md);
1107 uint64_t failed_devices;
1108 struct dm_raid_superblock *sb;
1109 uint32_t new_devs = 0;
1110 uint32_t rebuilds = 0;
1112 struct dm_raid_superblock *sb2;
1114 sb = page_address(rdev->sb_page);
1115 events_sb = le64_to_cpu(sb->events);
1116 failed_devices = le64_to_cpu(sb->failed_devices);
1119 * Initialise to 1 if this is a new superblock.
1121 mddev->events = events_sb ? : 1;
1124 * Reshaping is not currently allowed
1126 if (le32_to_cpu(sb->level) != mddev->level) {
1127 DMERR("Reshaping arrays not yet supported. (RAID level change)");
1130 if (le32_to_cpu(sb->layout) != mddev->layout) {
1131 DMERR("Reshaping arrays not yet supported. (RAID layout change)");
1132 DMERR(" 0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
1133 DMERR(" Old layout: %s w/ %d copies",
1134 raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
1135 raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
1136 DMERR(" New layout: %s w/ %d copies",
1137 raid10_md_layout_to_format(mddev->layout),
1138 raid10_md_layout_to_copies(mddev->layout));
1141 if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
1142 DMERR("Reshaping arrays not yet supported. (stripe sectors change)");
1146 /* We can only change the number of devices in RAID1 right now */
1147 if ((rs->raid_type->level != 1) &&
1148 (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
1149 DMERR("Reshaping arrays not yet supported. (device count change)");
1153 if (!(rs->ctr_flags & (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)))
1154 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
1157 * During load, we set FirstUse if a new superblock was written.
1158 * There are two reasons we might not have a superblock:
1159 * 1) The array is brand new - in which case, all of the
1160 * devices must have their In_sync bit set. Also,
1161 * recovery_cp must be 0, unless forced.
1162 * 2) This is a new device being added to an old array
1163 * and the new device needs to be rebuilt - in which
1164 * case the In_sync bit will /not/ be set and
1165 * recovery_cp must be MaxSector.
1167 rdev_for_each(r, mddev) {
1168 if (!test_bit(In_sync, &r->flags)) {
1169 DMINFO("Device %d specified for rebuild: "
1170 "Clearing superblock", r->raid_disk);
1172 } else if (test_bit(FirstUse, &r->flags))
1177 if (new_devs == mddev->raid_disks) {
1178 DMINFO("Superblocks created for new array");
1179 set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
1180 } else if (new_devs) {
1181 DMERR("New device injected "
1182 "into existing array without 'rebuild' "
1183 "parameter specified");
1186 } else if (new_devs) {
1187 DMERR("'rebuild' devices cannot be "
1188 "injected into an array with other first-time devices");
1190 } else if (mddev->recovery_cp != MaxSector) {
1191 DMERR("'rebuild' specified while array is not in-sync");
1196 * Now we set the Faulty bit for those devices that are
1197 * recorded in the superblock as failed.
1199 rdev_for_each(r, mddev) {
1202 sb2 = page_address(r->sb_page);
1203 sb2->failed_devices = 0;
1206 * Check for any device re-ordering.
1208 if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
1209 role = le32_to_cpu(sb2->array_position);
1210 if (role != r->raid_disk) {
1211 if (rs->raid_type->level != 1)
1212 return ti_error_einval(rs->ti, "Cannot change device "
1213 "positions in RAID array");
1214 DMINFO("RAID1 device #%d now at position #%d",
1215 role, r->raid_disk);
1219 * Partial recovery is performed on
1220 * returning failed devices.
1222 if (failed_devices & (1 << role))
1223 set_bit(Faulty, &r->flags);
1230 static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
1232 struct mddev *mddev = &rs->md;
1233 struct dm_raid_superblock *sb = page_address(rdev->sb_page);
1236 * If mddev->events is not set, we know we have not yet initialized
1239 if (!mddev->events && super_init_validation(mddev, rdev))
1242 if (le32_to_cpu(sb->features)) {
1243 rs->ti->error = "Unable to assemble array: No feature flags supported yet";
1247 /* Enable bitmap creation for RAID levels != 0 */
1248 mddev->bitmap_info.offset = (rs->raid_type->level) ? to_sector(4096) : 0;
1249 rdev->mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
1251 if (!test_bit(FirstUse, &rdev->flags)) {
1252 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
1253 if (rdev->recovery_offset != MaxSector)
1254 clear_bit(In_sync, &rdev->flags);
1258 * If a device comes back, set it as not In_sync and no longer faulty.
1260 if (test_bit(Faulty, &rdev->flags)) {
1261 clear_bit(Faulty, &rdev->flags);
1262 clear_bit(In_sync, &rdev->flags);
1263 rdev->saved_raid_disk = rdev->raid_disk;
1264 rdev->recovery_offset = 0;
1267 clear_bit(FirstUse, &rdev->flags);
1273 * Analyse superblocks and select the freshest.
1275 static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
1278 struct raid_dev *dev;
1279 struct md_rdev *rdev, *tmp, *freshest;
1280 struct mddev *mddev = &rs->md;
1283 rdev_for_each_safe(rdev, tmp, mddev) {
1285 * Skipping super_load due to CTR_FLAG_SYNC will cause
1286 * the array to undergo initialization again as
1287 * though it were new. This is the intended effect
1288 * of the "sync" directive.
1290 * When reshaping capability is added, we must ensure
1291 * that the "sync" directive is disallowed during the
1294 rdev->sectors = to_sector(i_size_read(rdev->bdev->bd_inode));
1296 if (rs->ctr_flags & CTR_FLAG_SYNC)
1299 if (!rdev->meta_bdev)
1302 r = super_load(rdev, freshest);
1311 dev = container_of(rdev, struct raid_dev, rdev);
1313 dm_put_device(ti, dev->meta_dev);
1315 dev->meta_dev = NULL;
1316 rdev->meta_bdev = NULL;
1319 put_page(rdev->sb_page);
1321 rdev->sb_page = NULL;
1323 rdev->sb_loaded = 0;
1326 * We might be able to salvage the data device
1327 * even though the meta device has failed. For
1328 * now, we behave as though '- -' had been
1329 * set for this device in the table.
1332 dm_put_device(ti, dev->data_dev);
1334 dev->data_dev = NULL;
1337 list_del(&rdev->same_set);
1344 if (validate_raid_redundancy(rs))
1345 return ti_error_einval(rs->ti, "Insufficient redundancy to activate array");
1348 * Validation of the freshest device provides the source of
1349 * validation for the remaining devices.
1351 if (super_validate(rs, freshest))
1352 return ti_error_einval(rs->ti, "Unable to assemble array: Invalid superblocks");
1354 rdev_for_each(rdev, mddev)
1355 if ((rdev != freshest) && super_validate(rs, rdev))
1362 * Enable/disable discard support on RAID set depending on
1363 * RAID level and discard properties of underlying RAID members.
1365 static void configure_discard_support(struct dm_target *ti, struct raid_set *rs)
1370 /* Assume discards not supported until after checks below. */
1371 ti->discards_supported = false;
1373 /* RAID level 4,5,6 require discard_zeroes_data for data integrity! */
1374 raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);
1376 for (i = 0; i < rs->md.raid_disks; i++) {
1377 struct request_queue *q;
1379 if (!rs->dev[i].rdev.bdev)
1382 q = bdev_get_queue(rs->dev[i].rdev.bdev);
1383 if (!q || !blk_queue_discard(q))
1387 if (!q->limits.discard_zeroes_data)
1389 if (!devices_handle_discard_safely) {
1390 DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty.");
1391 DMERR("Set dm-raid.devices_handle_discard_safely=Y to override.");
1397 /* All RAID members properly support discards */
1398 ti->discards_supported = true;
1401 * RAID1 and RAID10 personalities require bio splitting,
1402 * RAID0/4/5/6 don't and process large discard bios properly.
1404 ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10);
1405 ti->num_discard_bios = 1;
1409 * Construct a RAID0/1/10/4/5/6 mapping:
1411 * <raid_type> <#raid_params> <raid_params>{0,} \
1412 * <#raid_devs> [<meta_dev1> <dev1>]{1,}
1414 * <raid_params> varies by <raid_type>. See 'parse_raid_params' for
1415 * details on possible <raid_params>.
1417 * Userspace is free to initialize the metadata devices, hence the superblocks to
1418 * enforce recreation based on the passed in table parameters.
1421 static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
1424 struct raid_type *rt;
1425 unsigned num_raid_params, num_raid_devs;
1426 struct raid_set *rs = NULL;
1428 struct dm_arg_set as = { argc, argv }, as_nrd;
1429 struct dm_arg _args[] = {
1430 { 0, as.argc, "Cannot understand number of raid parameters" },
1431 { 1, 254, "Cannot understand number of raid devices parameters" }
1434 /* Must have <raid_type> */
1435 arg = dm_shift_arg(&as);
1437 return ti_error_einval(rs->ti, "No arguments");
1439 rt = get_raid_type(arg);
1441 return ti_error_einval(rs->ti, "Unrecognised raid_type");
1443 /* Must have <#raid_params> */
1444 if (dm_read_arg_group(_args, &as, &num_raid_params, &ti->error))
1447 /* number of raid device tupples <meta_dev data_dev> */
1449 dm_consume_args(&as_nrd, num_raid_params);
1450 _args[1].max = (as_nrd.argc - 1) / 2;
1451 if (dm_read_arg(_args + 1, &as_nrd, &num_raid_devs, &ti->error))
1454 if (!_in_range(num_raid_devs, 1, MAX_RAID_DEVICES))
1455 return ti_error_einval(rs->ti, "Invalid number of supplied raid devices");
1457 rs = context_alloc(ti, rt, num_raid_devs);
1461 r = parse_raid_params(rs, &as, num_raid_params);
1465 r = parse_dev_params(rs, &as);
1469 rs->md.sync_super = super_sync;
1470 r = analyse_superblocks(ti, rs);
1474 INIT_WORK(&rs->md.event_work, do_table_event);
1476 ti->num_flush_bios = 1;
1479 * Disable/enable discard support on RAID set.
1481 configure_discard_support(ti, rs);
1483 /* Has to be held on running the array */
1484 mddev_lock_nointr(&rs->md);
1485 r = md_run(&rs->md);
1486 rs->md.in_sync = 0; /* Assume already marked dirty */
1487 mddev_unlock(&rs->md);
1490 ti->error = "Fail to run raid array";
1494 if (ti->len != rs->md.array_sectors) {
1495 r = ti_error_einval(ti, "Array size does not match requested target length");
1498 rs->callbacks.congested_fn = raid_is_congested;
1499 dm_table_add_target_callbacks(ti->table, &rs->callbacks);
1501 mddev_suspend(&rs->md);
1512 static void raid_dtr(struct dm_target *ti)
1514 struct raid_set *rs = ti->private;
1516 list_del_init(&rs->callbacks.list);
1521 static int raid_map(struct dm_target *ti, struct bio *bio)
1523 struct raid_set *rs = ti->private;
1524 struct mddev *mddev = &rs->md;
1526 mddev->pers->make_request(mddev, bio);
1528 return DM_MAPIO_SUBMITTED;
1531 static const char *decipher_sync_action(struct mddev *mddev)
1533 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
1536 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1537 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
1538 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1541 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1542 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1544 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1549 if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
1556 static void raid_status(struct dm_target *ti, status_type_t type,
1557 unsigned status_flags, char *result, unsigned maxlen)
1559 struct raid_set *rs = ti->private;
1560 unsigned raid_param_cnt = 1; /* at least 1 for chunksize */
1562 int i, array_in_sync = 0;
1566 case STATUSTYPE_INFO:
1567 DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks);
1569 if (rs->raid_type->level) {
1570 if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery))
1571 sync = rs->md.curr_resync_completed;
1573 sync = rs->md.recovery_cp;
1575 if (sync >= rs->md.resync_max_sectors) {
1580 sync = rs->md.resync_max_sectors;
1581 } else if (test_bit(MD_RECOVERY_REQUESTED, &rs->md.recovery)) {
1583 * If "check" or "repair" is occurring, the array has
1584 * undergone and initial sync and the health characters
1585 * should not be 'a' anymore.
1590 * The array may be doing an initial sync, or it may
1591 * be rebuilding individual components. If all the
1592 * devices are In_sync, then it is the array that is
1593 * being initialized.
1595 for (i = 0; i < rs->md.raid_disks; i++)
1596 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
1602 sync = rs->md.resync_max_sectors;
1606 * Status characters:
1607 * 'D' = Dead/Failed device
1608 * 'a' = Alive but not in-sync
1609 * 'A' = Alive and in-sync
1611 for (i = 0; i < rs->md.raid_disks; i++) {
1612 if (test_bit(Faulty, &rs->dev[i].rdev.flags))
1614 else if (!array_in_sync ||
1615 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1623 * The in-sync ratio shows the progress of:
1624 * - Initializing the array
1625 * - Rebuilding a subset of devices of the array
1626 * The user can distinguish between the two by referring
1627 * to the status characters.
1629 DMEMIT(" %llu/%llu",
1630 (unsigned long long) sync,
1631 (unsigned long long) rs->md.resync_max_sectors);
1635 * See Documentation/device-mapper/dm-raid.c for
1636 * information on each of these states.
1638 DMEMIT(" %s", decipher_sync_action(&rs->md));
1641 * resync_mismatches/mismatch_cnt
1642 * This field shows the number of discrepancies found when
1643 * performing a "check" of the array.
1646 (strcmp(rs->md.last_sync_action, "check")) ? 0 :
1647 (unsigned long long)
1648 atomic64_read(&rs->md.resync_mismatches));
1650 case STATUSTYPE_TABLE:
1651 /* The string you would use to construct this array */
1652 for (i = 0; i < rs->md.raid_disks; i++) {
1653 if ((rs->ctr_flags & CTR_FLAG_REBUILD) &&
1654 rs->dev[i].data_dev &&
1655 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1656 raid_param_cnt += 2; /* for rebuilds */
1657 if (rs->dev[i].data_dev &&
1658 test_bit(WriteMostly, &rs->dev[i].rdev.flags))
1659 raid_param_cnt += 2;
1662 raid_param_cnt += (hweight32(rs->ctr_flags & ~CTR_FLAG_REBUILD) * 2);
1663 if (rs->ctr_flags & (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC))
1666 DMEMIT("%s %u %u", rs->raid_type->name,
1667 raid_param_cnt, rs->md.chunk_sectors);
1669 if ((rs->ctr_flags & CTR_FLAG_SYNC) &&
1670 (rs->md.recovery_cp == MaxSector))
1672 if (rs->ctr_flags & CTR_FLAG_NOSYNC)
1675 for (i = 0; i < rs->md.raid_disks; i++)
1676 if ((rs->ctr_flags & CTR_FLAG_REBUILD) &&
1677 rs->dev[i].data_dev &&
1678 !test_bit(In_sync, &rs->dev[i].rdev.flags))
1679 DMEMIT(" rebuild %u", i);
1681 if (rs->ctr_flags & CTR_FLAG_DAEMON_SLEEP)
1682 DMEMIT(" daemon_sleep %lu",
1683 rs->md.bitmap_info.daemon_sleep);
1685 if (rs->ctr_flags & CTR_FLAG_MIN_RECOVERY_RATE)
1686 DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min);
1688 if (rs->ctr_flags & CTR_FLAG_MAX_RECOVERY_RATE)
1689 DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max);
1691 for (i = 0; i < rs->md.raid_disks; i++)
1692 if (rs->dev[i].data_dev &&
1693 test_bit(WriteMostly, &rs->dev[i].rdev.flags))
1694 DMEMIT(" write_mostly %u", i);
1696 if (rs->ctr_flags & CTR_FLAG_MAX_WRITE_BEHIND)
1697 DMEMIT(" max_write_behind %lu",
1698 rs->md.bitmap_info.max_write_behind);
1700 if (rs->ctr_flags & CTR_FLAG_STRIPE_CACHE) {
1701 struct r5conf *conf = rs->md.private;
1703 /* convert from kiB to sectors */
1704 DMEMIT(" stripe_cache %d",
1705 conf ? conf->max_nr_stripes * 2 : 0);
1708 if (rs->ctr_flags & CTR_FLAG_REGION_SIZE)
1709 DMEMIT(" region_size %lu",
1710 rs->md.bitmap_info.chunksize >> 9);
1712 if (rs->ctr_flags & CTR_FLAG_RAID10_COPIES)
1713 DMEMIT(" raid10_copies %u",
1714 raid10_md_layout_to_copies(rs->md.layout));
1716 if (rs->ctr_flags & CTR_FLAG_RAID10_FORMAT)
1717 DMEMIT(" raid10_format %s",
1718 raid10_md_layout_to_format(rs->md.layout));
1720 DMEMIT(" %d", rs->md.raid_disks);
1721 for (i = 0; i < rs->md.raid_disks; i++) {
1722 if (rs->dev[i].meta_dev)
1723 DMEMIT(" %s", rs->dev[i].meta_dev->name);
1727 if (rs->dev[i].data_dev)
1728 DMEMIT(" %s", rs->dev[i].data_dev->name);
1735 static int raid_message(struct dm_target *ti, unsigned argc, char **argv)
1737 struct raid_set *rs = ti->private;
1738 struct mddev *mddev = &rs->md;
1740 if (!strcasecmp(argv[0], "reshape")) {
1741 DMERR("Reshape not supported.");
1745 if (!mddev->pers || !mddev->pers->sync_request)
1748 if (!strcasecmp(argv[0], "frozen"))
1749 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1751 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1753 if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
1754 if (mddev->sync_thread) {
1755 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1756 md_reap_sync_thread(mddev);
1758 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1759 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
1761 else if (!strcasecmp(argv[0], "resync"))
1762 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1763 else if (!strcasecmp(argv[0], "recover")) {
1764 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
1765 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1767 if (!strcasecmp(argv[0], "check"))
1768 set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
1769 else if (!!strcasecmp(argv[0], "repair"))
1771 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
1772 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
1774 if (mddev->ro == 2) {
1775 /* A write to sync_action is enough to justify
1776 * canceling read-auto mode
1779 if (!mddev->suspended)
1780 md_wakeup_thread(mddev->sync_thread);
1782 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1783 if (!mddev->suspended)
1784 md_wakeup_thread(mddev->thread);
1789 static int raid_iterate_devices(struct dm_target *ti,
1790 iterate_devices_callout_fn fn, void *data)
1792 struct raid_set *rs = ti->private;
1796 for (i = 0; !r && i < rs->md.raid_disks; i++)
1797 if (rs->dev[i].data_dev)
1799 rs->dev[i].data_dev,
1800 0, /* No offset on data devs */
1807 static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
1809 struct raid_set *rs = ti->private;
1810 unsigned chunk_size = rs->md.chunk_sectors << 9;
1811 struct r5conf *conf = rs->md.private;
1813 blk_limits_io_min(limits, chunk_size);
1814 blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded));
1817 static void raid_presuspend(struct dm_target *ti)
1819 struct raid_set *rs = ti->private;
1821 md_stop_writes(&rs->md);
1824 static void raid_postsuspend(struct dm_target *ti)
1826 struct raid_set *rs = ti->private;
1828 mddev_suspend(&rs->md);
1831 static void attempt_restore_of_faulty_devices(struct raid_set *rs)
1834 uint64_t failed_devices, cleared_failed_devices = 0;
1835 unsigned long flags;
1836 struct dm_raid_superblock *sb;
1839 for (i = 0; i < rs->md.raid_disks; i++) {
1840 r = &rs->dev[i].rdev;
1841 if (test_bit(Faulty, &r->flags) && r->sb_page &&
1842 sync_page_io(r, 0, r->sb_size, r->sb_page, REQ_OP_READ, 0,
1844 DMINFO("Faulty %s device #%d has readable super block."
1845 " Attempting to revive it.",
1846 rs->raid_type->name, i);
1849 * Faulty bit may be set, but sometimes the array can
1850 * be suspended before the personalities can respond
1851 * by removing the device from the array (i.e. calling
1852 * 'hot_remove_disk'). If they haven't yet removed
1853 * the failed device, its 'raid_disk' number will be
1854 * '>= 0' - meaning we must call this function
1857 if ((r->raid_disk >= 0) &&
1858 (r->mddev->pers->hot_remove_disk(r->mddev, r) != 0))
1859 /* Failed to revive this device, try next */
1863 r->saved_raid_disk = i;
1865 clear_bit(Faulty, &r->flags);
1866 clear_bit(WriteErrorSeen, &r->flags);
1867 clear_bit(In_sync, &r->flags);
1868 if (r->mddev->pers->hot_add_disk(r->mddev, r)) {
1870 r->saved_raid_disk = -1;
1873 r->recovery_offset = 0;
1874 cleared_failed_devices |= 1 << i;
1878 if (cleared_failed_devices) {
1879 rdev_for_each(r, &rs->md) {
1880 sb = page_address(r->sb_page);
1881 failed_devices = le64_to_cpu(sb->failed_devices);
1882 failed_devices &= ~cleared_failed_devices;
1883 sb->failed_devices = cpu_to_le64(failed_devices);
1888 static void raid_resume(struct dm_target *ti)
1890 struct raid_set *rs = ti->private;
1892 if (rs->raid_type->level) {
1893 set_bit(MD_CHANGE_DEVS, &rs->md.flags);
1895 if (!rs->bitmap_loaded) {
1896 bitmap_load(&rs->md);
1897 rs->bitmap_loaded = 1;
1900 * A secondary resume while the device is active.
1901 * Take this opportunity to check whether any failed
1902 * devices are reachable again.
1904 attempt_restore_of_faulty_devices(rs);
1907 clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
1910 mddev_resume(&rs->md);
1913 static struct target_type raid_target = {
1915 .version = {1, 8, 1},
1916 .module = THIS_MODULE,
1920 .status = raid_status,
1921 .message = raid_message,
1922 .iterate_devices = raid_iterate_devices,
1923 .io_hints = raid_io_hints,
1924 .presuspend = raid_presuspend,
1925 .postsuspend = raid_postsuspend,
1926 .resume = raid_resume,
1929 static int __init dm_raid_init(void)
1931 DMINFO("Loading target version %u.%u.%u",
1932 raid_target.version[0],
1933 raid_target.version[1],
1934 raid_target.version[2]);
1935 return dm_register_target(&raid_target);
1938 static void __exit dm_raid_exit(void)
1940 dm_unregister_target(&raid_target);
1943 module_init(dm_raid_init);
1944 module_exit(dm_raid_exit);
1946 module_param(devices_handle_discard_safely, bool, 0644);
1947 MODULE_PARM_DESC(devices_handle_discard_safely,
1948 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
1950 MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target");
1951 MODULE_ALIAS("dm-raid1");
1952 MODULE_ALIAS("dm-raid10");
1953 MODULE_ALIAS("dm-raid4");
1954 MODULE_ALIAS("dm-raid5");
1955 MODULE_ALIAS("dm-raid6");
1956 MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
1957 MODULE_LICENSE("GPL");