dm raid: check constructor arguments for invalid raid level/argument combinations
[linux-2.6-microblaze.git] / drivers / md / dm-raid.c
1 /*
2  * Copyright (C) 2010-2011 Neil Brown
3  * Copyright (C) 2010-2016 Red Hat, Inc. All rights reserved.
4  *
5  * This file is released under the GPL.
6  */
7
8 #include <linux/slab.h>
9 #include <linux/module.h>
10
11 #include "md.h"
12 #include "raid1.h"
13 #include "raid5.h"
14 #include "raid10.h"
15 #include "bitmap.h"
16
17 #include <linux/device-mapper.h>
18
19 #define DM_MSG_PREFIX "raid"
20 #define MAX_RAID_DEVICES        253 /* md-raid kernel limit */
21
22 static bool devices_handle_discard_safely = false;
23
24 /*
25  * The following flags are used by dm-raid.c to set up the array state.
26  * They must be cleared before md_run is called.
27  */
28 #define FirstUse 10             /* rdev flag */
29
30 struct raid_dev {
31         /*
32          * Two DM devices, one to hold metadata and one to hold the
33          * actual data/parity.  The reason for this is to not confuse
34          * ti->len and give more flexibility in altering size and
35          * characteristics.
36          *
37          * While it is possible for this device to be associated
38          * with a different physical device than the data_dev, it
39          * is intended for it to be the same.
40          *    |--------- Physical Device ---------|
41          *    |- meta_dev -|------ data_dev ------|
42          */
43         struct dm_dev *meta_dev;
44         struct dm_dev *data_dev;
45         struct md_rdev rdev;
46 };
47
48 /*
49  * Flags for rs->ctr_flags field.
50  *
51  * 1 = no flag value
52  * 2 = flag with value
53  */
54 #define CTR_FLAG_SYNC              0x1   /* 1 */ /* Not with raid0! */
55 #define CTR_FLAG_NOSYNC            0x2   /* 1 */ /* Not with raid0! */
56 #define CTR_FLAG_REBUILD           0x4   /* 2 */ /* Not with raid0! */
57 #define CTR_FLAG_DAEMON_SLEEP      0x8   /* 2 */ /* Not with raid0! */
58 #define CTR_FLAG_MIN_RECOVERY_RATE 0x10  /* 2 */ /* Not with raid0! */
59 #define CTR_FLAG_MAX_RECOVERY_RATE 0x20  /* 2 */ /* Not with raid0! */
60 #define CTR_FLAG_MAX_WRITE_BEHIND  0x40  /* 2 */ /* Only with raid1! */
61 #define CTR_FLAG_WRITE_MOSTLY      0x80  /* 2 */ /* Only with raid1! */
62 #define CTR_FLAG_STRIPE_CACHE      0x100 /* 2 */ /* Only with raid4/5/6! */
63 #define CTR_FLAG_REGION_SIZE       0x200 /* 2 */ /* Not with raid0! */
64 #define CTR_FLAG_RAID10_COPIES     0x400 /* 2 */ /* Only with raid10 */
65 #define CTR_FLAG_RAID10_FORMAT     0x800 /* 2 */ /* Only with raid10 */
66
67 /*
68  * Definitions of various constructor flags to
69  * be used in checks of valid / invalid flags
70  * per raid level.
71  */
72 /* Define all any sync flags */
73 #define CTR_FLAGS_ANY_SYNC              (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)
74
75 /* Define flags for options without argument (e.g. 'nosync') */
76 #define CTR_FLAG_OPTIONS_NO_ARGS        CTR_FLAGS_ANY_SYNC
77
78 /* Define flags for options with one argument (e.g. 'delta_disks +2') */
79 #define CTR_FLAG_OPTIONS_ONE_ARG (CTR_FLAG_REBUILD | \
80                                   CTR_FLAG_WRITE_MOSTLY | \
81                                   CTR_FLAG_DAEMON_SLEEP | \
82                                   CTR_FLAG_MIN_RECOVERY_RATE | \
83                                   CTR_FLAG_MAX_RECOVERY_RATE | \
84                                   CTR_FLAG_MAX_WRITE_BEHIND | \
85                                   CTR_FLAG_STRIPE_CACHE | \
86                                   CTR_FLAG_REGION_SIZE | \
87                                   CTR_FLAG_RAID10_COPIES | \
88                                   CTR_FLAG_RAID10_FORMAT)
89
90 /* All ctr optional arguments */
91 #define ALL_CTR_FLAGS           (CTR_FLAG_OPTIONS_NO_ARGS | \
92                                  CTR_FLAG_OPTIONS_ONE_ARG)
93
94 /* Invalid options definitions per raid level... */
95
96 /* "raid0" does not accept any options */
97 #define RAID0_INVALID_FLAGS ALL_CTR_FLAGS
98
99 /* "raid1" does not accept stripe cache or any raid10 options */
100 #define RAID1_INVALID_FLAGS     (CTR_FLAG_STRIPE_CACHE | \
101                                  CTR_FLAG_RAID10_COPIES | \
102                                  CTR_FLAG_RAID10_FORMAT)
103
104 /* "raid10" does not accept any raid1 or stripe cache options */
105 #define RAID10_INVALID_FLAGS    (CTR_FLAG_WRITE_MOSTLY | \
106                                  CTR_FLAG_MAX_WRITE_BEHIND | \
107                                  CTR_FLAG_STRIPE_CACHE)
108 /*
109  * "raid4/5/6" do not accept any raid1 or raid10 specific options
110  *
111  * "raid6" does not accept "nosync", because it is not guaranteed
112  * that both parity and q-syndrome are being written properly with
113  * any writes
114  */
115 #define RAID45_INVALID_FLAGS    (CTR_FLAG_WRITE_MOSTLY | \
116                                  CTR_FLAG_MAX_WRITE_BEHIND | \
117                                  CTR_FLAG_RAID10_FORMAT | \
118                                  CTR_FLAG_RAID10_COPIES)
119 #define RAID6_INVALID_FLAGS     (CTR_FLAG_NOSYNC | RAID45_INVALID_FLAGS)
120 /* ...invalid options definitions per raid level */
121
122 struct raid_set {
123         struct dm_target *ti;
124
125         uint32_t bitmap_loaded;
126         uint32_t ctr_flags;
127
128         struct mddev md;
129         struct raid_type *raid_type;
130         struct dm_target_callbacks callbacks;
131
132         struct raid_dev dev[0];
133 };
134
135 /* Supported raid types and properties. */
136 static struct raid_type {
137         const char *name;               /* RAID algorithm. */
138         const char *descr;              /* Descriptor text for logging. */
139         const unsigned parity_devs;     /* # of parity devices. */
140         const unsigned minimal_devs;    /* minimal # of devices in set. */
141         const unsigned level;           /* RAID level. */
142         const unsigned algorithm;       /* RAID algorithm. */
143 } raid_types[] = {
144         {"raid0",    "RAID0 (striping)",                0, 2, 0, 0 /* NONE */},
145         {"raid1",    "RAID1 (mirroring)",               0, 2, 1, 0 /* NONE */},
146         {"raid10",   "RAID10 (striped mirrors)",        0, 2, 10, UINT_MAX /* Varies */},
147         {"raid4",    "RAID4 (dedicated parity disk)",   1, 2, 5, ALGORITHM_PARITY_0},
148         {"raid5_la", "RAID5 (left asymmetric)",         1, 2, 5, ALGORITHM_LEFT_ASYMMETRIC},
149         {"raid5_ra", "RAID5 (right asymmetric)",        1, 2, 5, ALGORITHM_RIGHT_ASYMMETRIC},
150         {"raid5_ls", "RAID5 (left symmetric)",          1, 2, 5, ALGORITHM_LEFT_SYMMETRIC},
151         {"raid5_rs", "RAID5 (right symmetric)",         1, 2, 5, ALGORITHM_RIGHT_SYMMETRIC},
152         {"raid6_zr", "RAID6 (zero restart)",            2, 4, 6, ALGORITHM_ROTATING_ZERO_RESTART},
153         {"raid6_nr", "RAID6 (N restart)",               2, 4, 6, ALGORITHM_ROTATING_N_RESTART},
154         {"raid6_nc", "RAID6 (N continue)",              2, 4, 6, ALGORITHM_ROTATING_N_CONTINUE}
155 };
156
157 /* True, if @v is in inclusive range [@min, @max] */
158 static bool _in_range(long v, long min, long max)
159 {
160         return v >= min && v <= max;
161 }
162
163 /* ctr flag bit manipulation... */
164 /* Set single @flag in @flags */
165 static void _set_flag(uint32_t flag, uint32_t *flags)
166 {
167         WARN_ON_ONCE(hweight32(flag) != 1);
168         *flags |= flag;
169 }
170
171 /* Test single @flag in @flags */
172 static bool _test_flag(uint32_t flag, uint32_t flags)
173 {
174         WARN_ON_ONCE(hweight32(flag) != 1);
175         return (flag & flags) ? true : false;
176 }
177
178 /* Return true if single @flag is set in @*flags, else set it and return false */
179 static bool _test_and_set_flag(uint32_t flag, uint32_t *flags)
180 {
181         if (_test_flag(flag, *flags))
182                 return true;
183
184         _set_flag(flag, flags);
185         return false;
186 }
187 /* ...ctr and runtime flag bit manipulation */
188
189 /* All table line arguments are defined here */
190 static struct arg_name_flag {
191         const uint32_t flag;
192         const char *name;
193 } _arg_name_flags[] = {
194         { CTR_FLAG_SYNC, "sync"},
195         { CTR_FLAG_NOSYNC, "nosync"},
196         { CTR_FLAG_REBUILD, "rebuild"},
197         { CTR_FLAG_DAEMON_SLEEP, "daemon_sleep"},
198         { CTR_FLAG_MIN_RECOVERY_RATE, "min_recovery_rate"},
199         { CTR_FLAG_MAX_RECOVERY_RATE, "max_recovery_rate"},
200         { CTR_FLAG_MAX_WRITE_BEHIND, "max_write_behind"},
201         { CTR_FLAG_WRITE_MOSTLY, "writemostly"},
202         { CTR_FLAG_STRIPE_CACHE, "stripe_cache"},
203         { CTR_FLAG_REGION_SIZE, "region_size"},
204         { CTR_FLAG_RAID10_COPIES, "raid10_copies"},
205         { CTR_FLAG_RAID10_FORMAT, "raid10_format"},
206 };
207
208 /* Return argument name string for given @flag */
209 static const char *_argname_by_flag(const uint32_t flag)
210 {
211         if (hweight32(flag) == 1) {
212                 struct arg_name_flag *anf = _arg_name_flags + ARRAY_SIZE(_arg_name_flags);
213
214                 while (anf-- > _arg_name_flags)
215                         if (_test_flag(flag, anf->flag))
216                                 return anf->name;
217
218         } else
219                 DMERR("%s called with more than one flag!", __func__);
220
221         return NULL;
222 }
223
224 /*
225  * bool helpers to test for various raid levels of a raid type
226  */
227
228 /* Return true, if raid type in @rt is raid0 */
229 static bool rt_is_raid0(struct raid_type *rt)
230 {
231         return !rt->level;
232 }
233
234 /* Return true, if raid type in @rt is raid1 */
235 static bool rt_is_raid1(struct raid_type *rt)
236 {
237         return rt->level == 1;
238 }
239
240 /* Return true, if raid type in @rt is raid10 */
241 static bool rt_is_raid10(struct raid_type *rt)
242 {
243         return rt->level == 10;
244 }
245
246 /* Return true, if raid type in @rt is raid4/5 */
247 static bool rt_is_raid45(struct raid_type *rt)
248 {
249         return _in_range(rt->level, 4, 5);
250 }
251
252 /* Return true, if raid type in @rt is raid6 */
253 static bool rt_is_raid6(struct raid_type *rt)
254 {
255         return rt->level == 6;
256 }
257 /* END: raid level bools */
258
259 /*
260  * Convenience functions to set ti->error to @errmsg and
261  * return @r in order to shorten code in a lot of places
262  */
263 static int ti_error_ret(struct dm_target *ti, const char *errmsg, int r)
264 {
265         ti->error = (char *) errmsg;
266         return r;
267 }
268
269 static int ti_error_einval(struct dm_target *ti, const char *errmsg)
270 {
271         return ti_error_ret(ti, errmsg, -EINVAL);
272 }
273 /* END: convenience functions to set ti->error to @errmsg... */
274
275 /* Return invalid ctr flags for the raid level of @rs */
276 static uint32_t _invalid_flags(struct raid_set *rs)
277 {
278         if (rt_is_raid0(rs->raid_type))
279                 return RAID0_INVALID_FLAGS;
280         else if (rt_is_raid1(rs->raid_type))
281                 return RAID1_INVALID_FLAGS;
282         else if (rt_is_raid10(rs->raid_type))
283                 return RAID10_INVALID_FLAGS;
284         else if (rt_is_raid45(rs->raid_type))
285                 return RAID45_INVALID_FLAGS;
286         else if (rt_is_raid6(rs->raid_type))
287                 return RAID6_INVALID_FLAGS;
288
289         return ~0;
290 }
291
292 /*
293  * Check for any invalid flags set on @rs defined by bitset @invalid_flags
294  *
295  * Has to be called after parsing of the ctr flags!
296  */
297 static int rs_check_for_invalid_flags(struct raid_set *rs)
298 {
299         unsigned int ctr_flags = rs->ctr_flags, flag = 0;
300         const uint32_t invalid_flags = _invalid_flags(rs);
301
302         while ((ctr_flags &= ~flag)) {
303                 flag = 1 << __ffs(ctr_flags);
304
305                 if (_test_flag(flag, rs->ctr_flags) &&
306                     _test_flag(flag, invalid_flags))
307                         return ti_error_einval(rs->ti, "Invalid flag combined");
308         }
309
310         return 0;
311 }
312
313 static char *raid10_md_layout_to_format(int layout)
314 {
315         /*
316          * Bit 16 and 17 stand for "offset" and "use_far_sets"
317          * Refer to MD's raid10.c for details
318          */
319         if ((layout & 0x10000) && (layout & 0x20000))
320                 return "offset";
321
322         if ((layout & 0xFF) > 1)
323                 return "near";
324
325         return "far";
326 }
327
328 static unsigned raid10_md_layout_to_copies(int layout)
329 {
330         if ((layout & 0xFF) > 1)
331                 return layout & 0xFF;
332         return (layout >> 8) & 0xFF;
333 }
334
335 static int raid10_format_to_md_layout(char *format, unsigned copies)
336 {
337         unsigned n = 1, f = 1;
338
339         if (!strcasecmp("near", format))
340                 n = copies;
341         else
342                 f = copies;
343
344         if (!strcasecmp("offset", format))
345                 return 0x30000 | (f << 8) | n;
346
347         if (!strcasecmp("far", format))
348                 return 0x20000 | (f << 8) | n;
349
350         return (f << 8) | n;
351 }
352
353 static struct raid_type *get_raid_type(const char *name)
354 {
355         int i;
356
357         for (i = 0; i < ARRAY_SIZE(raid_types); i++)
358                 if (!strcmp(raid_types[i].name, name))
359                         return &raid_types[i];
360
361         return NULL;
362 }
363
364 static struct raid_set *context_alloc(struct dm_target *ti, struct raid_type *raid_type, unsigned raid_devs)
365 {
366         unsigned i;
367         struct raid_set *rs;
368
369         if (raid_devs <= raid_type->parity_devs)
370                 return ERR_PTR(ti_error_einval(ti, "Insufficient number of devices"));
371
372         rs = kzalloc(sizeof(*rs) + raid_devs * sizeof(rs->dev[0]), GFP_KERNEL);
373         if (!rs)
374                 return ERR_PTR(ti_error_ret(ti, "Cannot allocate raid context", -ENOMEM));
375
376         mddev_init(&rs->md);
377
378         rs->ti = ti;
379         rs->raid_type = raid_type;
380         rs->md.raid_disks = raid_devs;
381         rs->md.level = raid_type->level;
382         rs->md.new_level = rs->md.level;
383         rs->md.layout = raid_type->algorithm;
384         rs->md.new_layout = rs->md.layout;
385         rs->md.delta_disks = 0;
386         rs->md.recovery_cp = 0;
387
388         for (i = 0; i < raid_devs; i++)
389                 md_rdev_init(&rs->dev[i].rdev);
390
391         /*
392          * Remaining items to be initialized by further RAID params:
393          *  rs->md.persistent
394          *  rs->md.external
395          *  rs->md.chunk_sectors
396          *  rs->md.new_chunk_sectors
397          *  rs->md.dev_sectors
398          */
399
400         return rs;
401 }
402
403 static void context_free(struct raid_set *rs)
404 {
405         int i;
406
407         for (i = 0; i < rs->md.raid_disks; i++) {
408                 if (rs->dev[i].meta_dev)
409                         dm_put_device(rs->ti, rs->dev[i].meta_dev);
410                 md_rdev_clear(&rs->dev[i].rdev);
411                 if (rs->dev[i].data_dev)
412                         dm_put_device(rs->ti, rs->dev[i].data_dev);
413         }
414
415         kfree(rs);
416 }
417
418 /*
419  * For every device we have two words
420  *  <meta_dev>: meta device name or '-' if missing
421  *  <data_dev>: data device name or '-' if missing
422  *
423  * The following are permitted:
424  *    - -
425  *    - <data_dev>
426  *    <meta_dev> <data_dev>
427  *
428  * The following is not allowed:
429  *    <meta_dev> -
430  *
431  * This code parses those words.  If there is a failure,
432  * the caller must use context_free to unwind the operations.
433  */
434 static int parse_dev_params(struct raid_set *rs, struct dm_arg_set *as)
435 {
436         int i;
437         int rebuild = 0;
438         int metadata_available = 0;
439         int r = 0;
440         const char *arg;
441
442         /* Put off the number of raid devices argument to get to dev pairs */
443         arg = dm_shift_arg(as);
444         if (!arg)
445                 return -EINVAL;
446
447         for (i = 0; i < rs->md.raid_disks; i++) {
448                 rs->dev[i].rdev.raid_disk = i;
449
450                 rs->dev[i].meta_dev = NULL;
451                 rs->dev[i].data_dev = NULL;
452
453                 /*
454                  * There are no offsets, since there is a separate device
455                  * for data and metadata.
456                  */
457                 rs->dev[i].rdev.data_offset = 0;
458                 rs->dev[i].rdev.mddev = &rs->md;
459
460                 arg = dm_shift_arg(as);
461                 if (!arg)
462                         return -EINVAL;
463
464                 if (strcmp(arg, "-")) {
465                         r = dm_get_device(rs->ti, arg,
466                                             dm_table_get_mode(rs->ti->table),
467                                             &rs->dev[i].meta_dev);
468                         if (r)
469                                 return ti_error_ret(rs->ti, "RAID metadata device lookup failure", r);
470
471                         rs->dev[i].rdev.sb_page = alloc_page(GFP_KERNEL);
472                         if (!rs->dev[i].rdev.sb_page)
473                                 return ti_error_ret(rs->ti, "Failed to allocate superblock page", -ENOMEM);
474                 }
475
476                 arg = dm_shift_arg(as);
477                 if (!arg)
478                         return -EINVAL;
479
480                 if (!strcmp(arg, "-")) {
481                         if (!test_bit(In_sync, &rs->dev[i].rdev.flags) &&
482                             (!rs->dev[i].rdev.recovery_offset))
483                                 return ti_error_einval(rs->ti, "Drive designated for rebuild not specified");
484
485                         if (rs->dev[i].meta_dev)
486                                 return ti_error_einval(rs->ti, "No data device supplied with metadata device");
487
488                         continue;
489                 }
490
491                 r = dm_get_device(rs->ti, arg,
492                                     dm_table_get_mode(rs->ti->table),
493                                     &rs->dev[i].data_dev);
494                 if (r)
495                         return ti_error_ret(rs->ti, "RAID device lookup failure", r);
496
497                 if (rs->dev[i].meta_dev) {
498                         metadata_available = 1;
499                         rs->dev[i].rdev.meta_bdev = rs->dev[i].meta_dev->bdev;
500                 }
501                 rs->dev[i].rdev.bdev = rs->dev[i].data_dev->bdev;
502                 list_add(&rs->dev[i].rdev.same_set, &rs->md.disks);
503                 if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
504                         rebuild++;
505         }
506
507         if (metadata_available) {
508                 rs->md.external = 0;
509                 rs->md.persistent = 1;
510                 rs->md.major_version = 2;
511         } else if (rebuild && !rs->md.recovery_cp) {
512                 /*
513                  * Without metadata, we will not be able to tell if the array
514                  * is in-sync or not - we must assume it is not.  Therefore,
515                  * it is impossible to rebuild a drive.
516                  *
517                  * Even if there is metadata, the on-disk information may
518                  * indicate that the array is not in-sync and it will then
519                  * fail at that time.
520                  *
521                  * User could specify 'nosync' option if desperate.
522                  */
523                 DMERR("Unable to rebuild drive while array is not in-sync");
524                 return ti_error_einval(rs->ti, "Unable to rebuild drive while array is not in-sync");
525         }
526
527         return 0;
528 }
529
530 /*
531  * validate_region_size
532  * @rs
533  * @region_size:  region size in sectors.  If 0, pick a size (4MiB default).
534  *
535  * Set rs->md.bitmap_info.chunksize (which really refers to 'region size').
536  * Ensure that (ti->len/region_size < 2^21) - required by MD bitmap.
537  *
538  * Returns: 0 on success, -EINVAL on failure.
539  */
540 static int validate_region_size(struct raid_set *rs, unsigned long region_size)
541 {
542         unsigned long min_region_size = rs->ti->len / (1 << 21);
543
544         if (!region_size) {
545                 /*
546                  * Choose a reasonable default.  All figures in sectors.
547                  */
548                 if (min_region_size > (1 << 13)) {
549                         /* If not a power of 2, make it the next power of 2 */
550                         region_size = roundup_pow_of_two(min_region_size);
551                         DMINFO("Choosing default region size of %lu sectors",
552                                region_size);
553                 } else {
554                         DMINFO("Choosing default region size of 4MiB");
555                         region_size = 1 << 13; /* sectors */
556                 }
557         } else {
558                 /*
559                  * Validate user-supplied value.
560                  */
561                 if (region_size > rs->ti->len)
562                         return ti_error_einval(rs->ti, "Supplied region size is too large");
563
564                 if (region_size < min_region_size) {
565                         DMERR("Supplied region_size (%lu sectors) below minimum (%lu)",
566                               region_size, min_region_size);
567                         return ti_error_einval(rs->ti, "Supplied region size is too small");
568                 }
569
570                 if (!is_power_of_2(region_size))
571                         return ti_error_einval(rs->ti, "Region size is not a power of 2");
572
573                 if (region_size < rs->md.chunk_sectors)
574                         return ti_error_einval(rs->ti, "Region size is smaller than the chunk size");
575         }
576
577         /*
578          * Convert sectors to bytes.
579          */
580         rs->md.bitmap_info.chunksize = (region_size << 9);
581
582         return 0;
583 }
584
585 /*
586  * validate_raid_redundancy
587  * @rs
588  *
589  * Determine if there are enough devices in the array that haven't
590  * failed (or are being rebuilt) to form a usable array.
591  *
592  * Returns: 0 on success, -EINVAL on failure.
593  */
594 static int validate_raid_redundancy(struct raid_set *rs)
595 {
596         unsigned i, rebuild_cnt = 0;
597         unsigned rebuilds_per_group = 0, copies, d;
598         unsigned group_size, last_group_start;
599
600         for (i = 0; i < rs->md.raid_disks; i++)
601                 if (!test_bit(In_sync, &rs->dev[i].rdev.flags) ||
602                     !rs->dev[i].rdev.sb_page)
603                         rebuild_cnt++;
604
605         switch (rs->raid_type->level) {
606         case 1:
607                 if (rebuild_cnt >= rs->md.raid_disks)
608                         goto too_many;
609                 break;
610         case 4:
611         case 5:
612         case 6:
613                 if (rebuild_cnt > rs->raid_type->parity_devs)
614                         goto too_many;
615                 break;
616         case 10:
617                 copies = raid10_md_layout_to_copies(rs->md.layout);
618                 if (rebuild_cnt < copies)
619                         break;
620
621                 /*
622                  * It is possible to have a higher rebuild count for RAID10,
623                  * as long as the failed devices occur in different mirror
624                  * groups (i.e. different stripes).
625                  *
626                  * When checking "near" format, make sure no adjacent devices
627                  * have failed beyond what can be handled.  In addition to the
628                  * simple case where the number of devices is a multiple of the
629                  * number of copies, we must also handle cases where the number
630                  * of devices is not a multiple of the number of copies.
631                  * E.g.    dev1 dev2 dev3 dev4 dev5
632                  *          A    A    B    B    C
633                  *          C    D    D    E    E
634                  */
635                 if (!strcmp("near", raid10_md_layout_to_format(rs->md.layout))) {
636                         for (i = 0; i < rs->md.raid_disks * copies; i++) {
637                                 if (!(i % copies))
638                                         rebuilds_per_group = 0;
639                                 d = i % rs->md.raid_disks;
640                                 if ((!rs->dev[d].rdev.sb_page ||
641                                      !test_bit(In_sync, &rs->dev[d].rdev.flags)) &&
642                                     (++rebuilds_per_group >= copies))
643                                         goto too_many;
644                         }
645                         break;
646                 }
647
648                 /*
649                  * When checking "far" and "offset" formats, we need to ensure
650                  * that the device that holds its copy is not also dead or
651                  * being rebuilt.  (Note that "far" and "offset" formats only
652                  * support two copies right now.  These formats also only ever
653                  * use the 'use_far_sets' variant.)
654                  *
655                  * This check is somewhat complicated by the need to account
656                  * for arrays that are not a multiple of (far) copies.  This
657                  * results in the need to treat the last (potentially larger)
658                  * set differently.
659                  */
660                 group_size = (rs->md.raid_disks / copies);
661                 last_group_start = (rs->md.raid_disks / group_size) - 1;
662                 last_group_start *= group_size;
663                 for (i = 0; i < rs->md.raid_disks; i++) {
664                         if (!(i % copies) && !(i > last_group_start))
665                                 rebuilds_per_group = 0;
666                         if ((!rs->dev[i].rdev.sb_page ||
667                              !test_bit(In_sync, &rs->dev[i].rdev.flags)) &&
668                             (++rebuilds_per_group >= copies))
669                                         goto too_many;
670                 }
671                 break;
672         default:
673                 if (rebuild_cnt)
674                         return -EINVAL;
675         }
676
677         return 0;
678
679 too_many:
680         return -EINVAL;
681 }
682
683 /*
684  * Possible arguments are...
685  *      <chunk_size> [optional_args]
686  *
687  * Argument definitions
688  *    <chunk_size>                      The number of sectors per disk that
689  *                                      will form the "stripe"
690  *    [[no]sync]                        Force or prevent recovery of the
691  *                                      entire array
692  *    [rebuild <idx>]                   Rebuild the drive indicated by the index
693  *    [daemon_sleep <ms>]               Time between bitmap daemon work to
694  *                                      clear bits
695  *    [min_recovery_rate <kB/sec/disk>] Throttle RAID initialization
696  *    [max_recovery_rate <kB/sec/disk>] Throttle RAID initialization
697  *    [write_mostly <idx>]              Indicate a write mostly drive via index
698  *    [max_write_behind <sectors>]      See '-write-behind=' (man mdadm)
699  *    [stripe_cache <sectors>]          Stripe cache size for higher RAIDs
700  *    [region_size <sectors>]           Defines granularity of bitmap
701  *
702  * RAID10-only options:
703  *    [raid10_copies <# copies>]        Number of copies.  (Default: 2)
704  *    [raid10_format <near|far|offset>] Layout algorithm.  (Default: near)
705  */
706 static int parse_raid_params(struct raid_set *rs, struct dm_arg_set *as,
707                              unsigned num_raid_params)
708 {
709         char *raid10_format = "near";
710         unsigned raid10_copies = 2;
711         unsigned i;
712         unsigned value, region_size = 0;
713         sector_t sectors_per_dev = rs->ti->len;
714         sector_t max_io_len;
715         const char *arg, *key;
716         struct raid_dev *rd;
717
718         arg = dm_shift_arg(as);
719         num_raid_params--; /* Account for chunk_size argument */
720
721         if (kstrtouint(arg, 10, &value) < 0)
722                 return ti_error_einval(rs->ti, "Bad numerical argument given for chunk_size");
723
724         /*
725          * First, parse the in-order required arguments
726          * "chunk_size" is the only argument of this type.
727          */
728         if (rs->raid_type->level == 1) {
729                 if (value)
730                         DMERR("Ignoring chunk size parameter for RAID 1");
731                 value = 0;
732         } else if (!is_power_of_2(value))
733                 return ti_error_einval(rs->ti, "Chunk size must be a power of 2");
734         else if (value < 8)
735                 return ti_error_einval(rs->ti, "Chunk size value is too small");
736
737         rs->md.new_chunk_sectors = rs->md.chunk_sectors = value;
738
739         /*
740          * We set each individual device as In_sync with a completed
741          * 'recovery_offset'.  If there has been a device failure or
742          * replacement then one of the following cases applies:
743          *
744          *   1) User specifies 'rebuild'.
745          *      - Device is reset when param is read.
746          *   2) A new device is supplied.
747          *      - No matching superblock found, resets device.
748          *   3) Device failure was transient and returns on reload.
749          *      - Failure noticed, resets device for bitmap replay.
750          *   4) Device hadn't completed recovery after previous failure.
751          *      - Superblock is read and overrides recovery_offset.
752          *
753          * What is found in the superblocks of the devices is always
754          * authoritative, unless 'rebuild' or '[no]sync' was specified.
755          */
756         for (i = 0; i < rs->md.raid_disks; i++) {
757                 set_bit(In_sync, &rs->dev[i].rdev.flags);
758                 rs->dev[i].rdev.recovery_offset = MaxSector;
759         }
760
761         /*
762          * Second, parse the unordered optional arguments
763          */
764         for (i = 0; i < num_raid_params; i++) {
765                 arg = dm_shift_arg(as);
766                 if (!arg)
767                         return ti_error_einval(rs->ti, "Not enough raid parameters given");
768
769                 if (!strcasecmp(arg, "nosync")) {
770                         rs->md.recovery_cp = MaxSector;
771                         _set_flag(CTR_FLAG_NOSYNC, &rs->ctr_flags);
772                         continue;
773                 }
774                 if (!strcasecmp(arg, "sync")) {
775                         rs->md.recovery_cp = 0;
776                         _set_flag(CTR_FLAG_SYNC, &rs->ctr_flags);
777                         continue;
778                 }
779
780                 key = arg;
781                 arg = dm_shift_arg(as);
782                 i++; /* Account for the argument pairs */
783                 if (!arg)
784                         return ti_error_einval(rs->ti, "Wrong number of raid parameters given");
785
786                 /*
787                  * Parameters that take a string value are checked here.
788                  */
789
790                 if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_RAID10_FORMAT))) {
791                         if (_test_and_set_flag(CTR_FLAG_RAID10_FORMAT, &rs->ctr_flags))
792                                 return ti_error_einval(rs->ti, "Only one raid10_format argument pair allowed");
793                         if (rs->raid_type->level != 10)
794                                 return ti_error_einval(rs->ti, "'raid10_format' is an invalid parameter for this RAID type");
795                         if (strcmp("near", arg) &&
796                             strcmp("far", arg) &&
797                             strcmp("offset", arg))
798                                 return ti_error_einval(rs->ti, "Invalid 'raid10_format' value given");
799
800                         raid10_format = (char *) arg;
801                         continue;
802                 }
803
804                 if (kstrtouint(arg, 10, &value) < 0)
805                         return ti_error_einval(rs->ti, "Bad numerical argument given in raid params");
806
807                 if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_REBUILD))) {
808                         /*
809                          * "rebuild" is being passed in by userspace to provide
810                          * indexes of replaced devices and to set up additional
811                          * devices on raid level takeover.
812                          */
813                         if (!_in_range(value, 0, rs->md.raid_disks - 1))
814                                 return ti_error_einval(rs->ti, "Invalid rebuild index given");
815
816                         rd = rs->dev + value;
817                         clear_bit(In_sync, &rd->rdev.flags);
818                         clear_bit(Faulty, &rd->rdev.flags);
819                         rd->rdev.recovery_offset = 0;
820                         _set_flag(CTR_FLAG_REBUILD, &rs->ctr_flags);
821                 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_WRITE_MOSTLY))) {
822                         if (rs->raid_type->level != 1)
823                                 return ti_error_einval(rs->ti, "write_mostly option is only valid for RAID1");
824
825                         if (!_in_range(value, 0, rs->md.raid_disks - 1))
826                                 return ti_error_einval(rs->ti, "Invalid write_mostly index given");
827
828                         set_bit(WriteMostly, &rs->dev[value].rdev.flags);
829                         _set_flag(CTR_FLAG_WRITE_MOSTLY, &rs->ctr_flags);
830                 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_MAX_WRITE_BEHIND))) {
831                         if (rs->raid_type->level != 1)
832                                 return ti_error_einval(rs->ti, "max_write_behind option is only valid for RAID1");
833
834                         if (_test_and_set_flag(CTR_FLAG_MAX_WRITE_BEHIND, &rs->ctr_flags))
835                                 return ti_error_einval(rs->ti, "Only one max_write_behind argument pair allowed");
836
837                         /*
838                          * In device-mapper, we specify things in sectors, but
839                          * MD records this value in kB
840                          */
841                         value /= 2;
842                         if (value > COUNTER_MAX)
843                                 return ti_error_einval(rs->ti, "Max write-behind limit out of range");
844
845                         rs->md.bitmap_info.max_write_behind = value;
846                 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_DAEMON_SLEEP))) {
847                         if (_test_and_set_flag(CTR_FLAG_DAEMON_SLEEP, &rs->ctr_flags))
848                                 return ti_error_einval(rs->ti, "Only one daemon_sleep argument pair allowed");
849                         if (!value || (value > MAX_SCHEDULE_TIMEOUT))
850                                 return ti_error_einval(rs->ti, "daemon sleep period out of range");
851                         rs->md.bitmap_info.daemon_sleep = value;
852                 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_STRIPE_CACHE))) {
853                         if (_test_and_set_flag(CTR_FLAG_STRIPE_CACHE, &rs->ctr_flags))
854                                 return ti_error_einval(rs->ti, "Only one stripe_cache argument pair allowed");
855                         /*
856                          * In device-mapper, we specify things in sectors, but
857                          * MD records this value in kB
858                          */
859                         value /= 2;
860
861                         if (!_in_range(rs->raid_type->level, 4, 6))
862                                 return ti_error_einval(rs->ti, "Inappropriate argument: stripe_cache");
863                         if (raid5_set_cache_size(&rs->md, (int)value))
864                                 return ti_error_einval(rs->ti, "Bad stripe_cache size");
865
866                 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_MIN_RECOVERY_RATE))) {
867                         if (_test_and_set_flag(CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags))
868                                 return ti_error_einval(rs->ti, "Only one min_recovery_rate argument pair allowed");
869                         if (value > INT_MAX)
870                                 return ti_error_einval(rs->ti, "min_recovery_rate out of range");
871                         rs->md.sync_speed_min = (int)value;
872                 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_MAX_RECOVERY_RATE))) {
873                         if (_test_and_set_flag(CTR_FLAG_MIN_RECOVERY_RATE, &rs->ctr_flags))
874                                 return ti_error_einval(rs->ti, "Only one max_recovery_rate argument pair allowed");
875                         if (value > INT_MAX)
876                                 return ti_error_einval(rs->ti, "max_recovery_rate out of range");
877                         rs->md.sync_speed_max = (int)value;
878                 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_REGION_SIZE))) {
879                         if (_test_and_set_flag(CTR_FLAG_REGION_SIZE, &rs->ctr_flags))
880                                 return ti_error_einval(rs->ti, "Only one region_size argument pair allowed");
881
882                         region_size = value;
883                 } else if (!strcasecmp(key, _argname_by_flag(CTR_FLAG_RAID10_COPIES))) {
884                         if (_test_and_set_flag(CTR_FLAG_RAID10_COPIES, &rs->ctr_flags))
885                                 return ti_error_einval(rs->ti, "Only one raid10_copies argument pair allowed");
886
887                         if (!_in_range(value, 2, rs->md.raid_disks))
888                                 return ti_error_einval(rs->ti, "Bad value for 'raid10_copies'");
889
890                         raid10_copies = value;
891                 } else {
892                         DMERR("Unable to parse RAID parameter: %s", key);
893                         return ti_error_einval(rs->ti, "Unable to parse RAID parameters");
894                 }
895         }
896
897         if (validate_region_size(rs, region_size))
898                 return -EINVAL;
899
900         if (rs->md.chunk_sectors)
901                 max_io_len = rs->md.chunk_sectors;
902         else
903                 max_io_len = region_size;
904
905         if (dm_set_target_max_io_len(rs->ti, max_io_len))
906                 return -EINVAL;
907
908         if (rs->raid_type->level == 10) {
909                 if (raid10_copies > rs->md.raid_disks)
910                         return ti_error_einval(rs->ti, "Not enough devices to satisfy specification");
911
912                 /*
913                  * If the format is not "near", we only support
914                  * two copies at the moment.
915                  */
916                 if (strcmp("near", raid10_format) && (raid10_copies > 2))
917                         return ti_error_einval(rs->ti, "Too many copies for given RAID10 format.");
918
919                 /* (Len * #mirrors) / #devices */
920                 sectors_per_dev = rs->ti->len * raid10_copies;
921                 sector_div(sectors_per_dev, rs->md.raid_disks);
922
923                 rs->md.layout = raid10_format_to_md_layout(raid10_format,
924                                                            raid10_copies);
925                 rs->md.new_layout = rs->md.layout;
926         } else if ((!rs->raid_type->level || rs->raid_type->level > 1) &&
927                    sector_div(sectors_per_dev,
928                               (rs->md.raid_disks - rs->raid_type->parity_devs)))
929                 return ti_error_einval(rs->ti, "Target length not divisible by number of data devices");
930
931         rs->md.dev_sectors = sectors_per_dev;
932
933         /* Assume there are no metadata devices until the drives are parsed */
934         rs->md.persistent = 0;
935         rs->md.external = 1;
936
937         /* Check, if any invalid ctr arguments have been passed in for the raid level */
938         return rs_check_for_invalid_flags(rs);
939 }
940
941 static void do_table_event(struct work_struct *ws)
942 {
943         struct raid_set *rs = container_of(ws, struct raid_set, md.event_work);
944
945         dm_table_event(rs->ti->table);
946 }
947
948 static int raid_is_congested(struct dm_target_callbacks *cb, int bits)
949 {
950         struct raid_set *rs = container_of(cb, struct raid_set, callbacks);
951
952         return mddev_congested(&rs->md, bits);
953 }
954
955 /*
956  * This structure is never routinely used by userspace, unlike md superblocks.
957  * Devices with this superblock should only ever be accessed via device-mapper.
958  */
959 #define DM_RAID_MAGIC 0x64526D44
960 struct dm_raid_superblock {
961         __le32 magic;           /* "DmRd" */
962         __le32 features;        /* Used to indicate possible future changes */
963
964         __le32 num_devices;     /* Number of devices in this array. (Max 64) */
965         __le32 array_position;  /* The position of this drive in the array */
966
967         __le64 events;          /* Incremented by md when superblock updated */
968         __le64 failed_devices;  /* Bit field of devices to indicate failures */
969
970         /*
971          * This offset tracks the progress of the repair or replacement of
972          * an individual drive.
973          */
974         __le64 disk_recovery_offset;
975
976         /*
977          * This offset tracks the progress of the initial array
978          * synchronisation/parity calculation.
979          */
980         __le64 array_resync_offset;
981
982         /*
983          * RAID characteristics
984          */
985         __le32 level;
986         __le32 layout;
987         __le32 stripe_sectors;
988
989         /* Remainder of a logical block is zero-filled when writing (see super_sync()). */
990 } __packed;
991
992 static int read_disk_sb(struct md_rdev *rdev, int size)
993 {
994         BUG_ON(!rdev->sb_page);
995
996         if (rdev->sb_loaded)
997                 return 0;
998
999         if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, 0, 1)) {
1000                 DMERR("Failed to read superblock of device at position %d",
1001                       rdev->raid_disk);
1002                 md_error(rdev->mddev, rdev);
1003                 return -EINVAL;
1004         }
1005
1006         rdev->sb_loaded = 1;
1007
1008         return 0;
1009 }
1010
1011 static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
1012 {
1013         int i;
1014         uint64_t failed_devices;
1015         struct dm_raid_superblock *sb;
1016         struct raid_set *rs = container_of(mddev, struct raid_set, md);
1017
1018         sb = page_address(rdev->sb_page);
1019         failed_devices = le64_to_cpu(sb->failed_devices);
1020
1021         for (i = 0; i < mddev->raid_disks; i++)
1022                 if (!rs->dev[i].data_dev ||
1023                     test_bit(Faulty, &(rs->dev[i].rdev.flags)))
1024                         failed_devices |= (1ULL << i);
1025
1026         memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
1027
1028         sb->magic = cpu_to_le32(DM_RAID_MAGIC);
1029         sb->features = cpu_to_le32(0);  /* No features yet */
1030
1031         sb->num_devices = cpu_to_le32(mddev->raid_disks);
1032         sb->array_position = cpu_to_le32(rdev->raid_disk);
1033
1034         sb->events = cpu_to_le64(mddev->events);
1035         sb->failed_devices = cpu_to_le64(failed_devices);
1036
1037         sb->disk_recovery_offset = cpu_to_le64(rdev->recovery_offset);
1038         sb->array_resync_offset = cpu_to_le64(mddev->recovery_cp);
1039
1040         sb->level = cpu_to_le32(mddev->level);
1041         sb->layout = cpu_to_le32(mddev->layout);
1042         sb->stripe_sectors = cpu_to_le32(mddev->chunk_sectors);
1043 }
1044
1045 /*
1046  * super_load
1047  *
1048  * This function creates a superblock if one is not found on the device
1049  * and will decide which superblock to use if there's a choice.
1050  *
1051  * Return: 1 if use rdev, 0 if use refdev, -Exxx otherwise
1052  */
1053 static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
1054 {
1055         int r;
1056         struct dm_raid_superblock *sb;
1057         struct dm_raid_superblock *refsb;
1058         uint64_t events_sb, events_refsb;
1059
1060         rdev->sb_start = 0;
1061         rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
1062         if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
1063                 DMERR("superblock size of a logical block is no longer valid");
1064                 return -EINVAL;
1065         }
1066
1067         r = read_disk_sb(rdev, rdev->sb_size);
1068         if (r)
1069                 return r;
1070
1071         sb = page_address(rdev->sb_page);
1072
1073         /*
1074          * Two cases that we want to write new superblocks and rebuild:
1075          * 1) New device (no matching magic number)
1076          * 2) Device specified for rebuild (!In_sync w/ offset == 0)
1077          */
1078         if ((sb->magic != cpu_to_le32(DM_RAID_MAGIC)) ||
1079             (!test_bit(In_sync, &rdev->flags) && !rdev->recovery_offset)) {
1080                 super_sync(rdev->mddev, rdev);
1081
1082                 set_bit(FirstUse, &rdev->flags);
1083
1084                 /* Force writing of superblocks to disk */
1085                 set_bit(MD_CHANGE_DEVS, &rdev->mddev->flags);
1086
1087                 /* Any superblock is better than none, choose that if given */
1088                 return refdev ? 0 : 1;
1089         }
1090
1091         if (!refdev)
1092                 return 1;
1093
1094         events_sb = le64_to_cpu(sb->events);
1095
1096         refsb = page_address(refdev->sb_page);
1097         events_refsb = le64_to_cpu(refsb->events);
1098
1099         return (events_sb > events_refsb) ? 1 : 0;
1100 }
1101
1102 static int super_init_validation(struct mddev *mddev, struct md_rdev *rdev)
1103 {
1104         int role;
1105         struct raid_set *rs = container_of(mddev, struct raid_set, md);
1106         uint64_t events_sb;
1107         uint64_t failed_devices;
1108         struct dm_raid_superblock *sb;
1109         uint32_t new_devs = 0;
1110         uint32_t rebuilds = 0;
1111         struct md_rdev *r;
1112         struct dm_raid_superblock *sb2;
1113
1114         sb = page_address(rdev->sb_page);
1115         events_sb = le64_to_cpu(sb->events);
1116         failed_devices = le64_to_cpu(sb->failed_devices);
1117
1118         /*
1119          * Initialise to 1 if this is a new superblock.
1120          */
1121         mddev->events = events_sb ? : 1;
1122
1123         /*
1124          * Reshaping is not currently allowed
1125          */
1126         if (le32_to_cpu(sb->level) != mddev->level) {
1127                 DMERR("Reshaping arrays not yet supported. (RAID level change)");
1128                 return -EINVAL;
1129         }
1130         if (le32_to_cpu(sb->layout) != mddev->layout) {
1131                 DMERR("Reshaping arrays not yet supported. (RAID layout change)");
1132                 DMERR("  0x%X vs 0x%X", le32_to_cpu(sb->layout), mddev->layout);
1133                 DMERR("  Old layout: %s w/ %d copies",
1134                       raid10_md_layout_to_format(le32_to_cpu(sb->layout)),
1135                       raid10_md_layout_to_copies(le32_to_cpu(sb->layout)));
1136                 DMERR("  New layout: %s w/ %d copies",
1137                       raid10_md_layout_to_format(mddev->layout),
1138                       raid10_md_layout_to_copies(mddev->layout));
1139                 return -EINVAL;
1140         }
1141         if (le32_to_cpu(sb->stripe_sectors) != mddev->chunk_sectors) {
1142                 DMERR("Reshaping arrays not yet supported. (stripe sectors change)");
1143                 return -EINVAL;
1144         }
1145
1146         /* We can only change the number of devices in RAID1 right now */
1147         if ((rs->raid_type->level != 1) &&
1148             (le32_to_cpu(sb->num_devices) != mddev->raid_disks)) {
1149                 DMERR("Reshaping arrays not yet supported. (device count change)");
1150                 return -EINVAL;
1151         }
1152
1153         if (!(rs->ctr_flags & (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC)))
1154                 mddev->recovery_cp = le64_to_cpu(sb->array_resync_offset);
1155
1156         /*
1157          * During load, we set FirstUse if a new superblock was written.
1158          * There are two reasons we might not have a superblock:
1159          * 1) The array is brand new - in which case, all of the
1160          *    devices must have their In_sync bit set.  Also,
1161          *    recovery_cp must be 0, unless forced.
1162          * 2) This is a new device being added to an old array
1163          *    and the new device needs to be rebuilt - in which
1164          *    case the In_sync bit will /not/ be set and
1165          *    recovery_cp must be MaxSector.
1166          */
1167         rdev_for_each(r, mddev) {
1168                 if (!test_bit(In_sync, &r->flags)) {
1169                         DMINFO("Device %d specified for rebuild: "
1170                                "Clearing superblock", r->raid_disk);
1171                         rebuilds++;
1172                 } else if (test_bit(FirstUse, &r->flags))
1173                         new_devs++;
1174         }
1175
1176         if (!rebuilds) {
1177                 if (new_devs == mddev->raid_disks) {
1178                         DMINFO("Superblocks created for new array");
1179                         set_bit(MD_ARRAY_FIRST_USE, &mddev->flags);
1180                 } else if (new_devs) {
1181                         DMERR("New device injected "
1182                               "into existing array without 'rebuild' "
1183                               "parameter specified");
1184                         return -EINVAL;
1185                 }
1186         } else if (new_devs) {
1187                 DMERR("'rebuild' devices cannot be "
1188                       "injected into an array with other first-time devices");
1189                 return -EINVAL;
1190         } else if (mddev->recovery_cp != MaxSector) {
1191                 DMERR("'rebuild' specified while array is not in-sync");
1192                 return -EINVAL;
1193         }
1194
1195         /*
1196          * Now we set the Faulty bit for those devices that are
1197          * recorded in the superblock as failed.
1198          */
1199         rdev_for_each(r, mddev) {
1200                 if (!r->sb_page)
1201                         continue;
1202                 sb2 = page_address(r->sb_page);
1203                 sb2->failed_devices = 0;
1204
1205                 /*
1206                  * Check for any device re-ordering.
1207                  */
1208                 if (!test_bit(FirstUse, &r->flags) && (r->raid_disk >= 0)) {
1209                         role = le32_to_cpu(sb2->array_position);
1210                         if (role != r->raid_disk) {
1211                                 if (rs->raid_type->level != 1)
1212                                         return ti_error_einval(rs->ti, "Cannot change device "
1213                                                                        "positions in RAID array");
1214                                 DMINFO("RAID1 device #%d now at position #%d",
1215                                        role, r->raid_disk);
1216                         }
1217
1218                         /*
1219                          * Partial recovery is performed on
1220                          * returning failed devices.
1221                          */
1222                         if (failed_devices & (1 << role))
1223                                 set_bit(Faulty, &r->flags);
1224                 }
1225         }
1226
1227         return 0;
1228 }
1229
1230 static int super_validate(struct raid_set *rs, struct md_rdev *rdev)
1231 {
1232         struct mddev *mddev = &rs->md;
1233         struct dm_raid_superblock *sb = page_address(rdev->sb_page);
1234
1235         /*
1236          * If mddev->events is not set, we know we have not yet initialized
1237          * the array.
1238          */
1239         if (!mddev->events && super_init_validation(mddev, rdev))
1240                 return -EINVAL;
1241
1242         if (le32_to_cpu(sb->features)) {
1243                 rs->ti->error = "Unable to assemble array: No feature flags supported yet";
1244                 return -EINVAL;
1245         }
1246
1247         /* Enable bitmap creation for RAID levels != 0 */
1248         mddev->bitmap_info.offset = (rs->raid_type->level) ? to_sector(4096) : 0;
1249         rdev->mddev->bitmap_info.default_offset = mddev->bitmap_info.offset;
1250
1251         if (!test_bit(FirstUse, &rdev->flags)) {
1252                 rdev->recovery_offset = le64_to_cpu(sb->disk_recovery_offset);
1253                 if (rdev->recovery_offset != MaxSector)
1254                         clear_bit(In_sync, &rdev->flags);
1255         }
1256
1257         /*
1258          * If a device comes back, set it as not In_sync and no longer faulty.
1259          */
1260         if (test_bit(Faulty, &rdev->flags)) {
1261                 clear_bit(Faulty, &rdev->flags);
1262                 clear_bit(In_sync, &rdev->flags);
1263                 rdev->saved_raid_disk = rdev->raid_disk;
1264                 rdev->recovery_offset = 0;
1265         }
1266
1267         clear_bit(FirstUse, &rdev->flags);
1268
1269         return 0;
1270 }
1271
1272 /*
1273  * Analyse superblocks and select the freshest.
1274  */
1275 static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
1276 {
1277         int r;
1278         struct raid_dev *dev;
1279         struct md_rdev *rdev, *tmp, *freshest;
1280         struct mddev *mddev = &rs->md;
1281
1282         freshest = NULL;
1283         rdev_for_each_safe(rdev, tmp, mddev) {
1284                 /*
1285                  * Skipping super_load due to CTR_FLAG_SYNC will cause
1286                  * the array to undergo initialization again as
1287                  * though it were new.  This is the intended effect
1288                  * of the "sync" directive.
1289                  *
1290                  * When reshaping capability is added, we must ensure
1291                  * that the "sync" directive is disallowed during the
1292                  * reshape.
1293                  */
1294                 rdev->sectors = to_sector(i_size_read(rdev->bdev->bd_inode));
1295
1296                 if (rs->ctr_flags & CTR_FLAG_SYNC)
1297                         continue;
1298
1299                 if (!rdev->meta_bdev)
1300                         continue;
1301
1302                 r = super_load(rdev, freshest);
1303
1304                 switch (r) {
1305                 case 1:
1306                         freshest = rdev;
1307                         break;
1308                 case 0:
1309                         break;
1310                 default:
1311                         dev = container_of(rdev, struct raid_dev, rdev);
1312                         if (dev->meta_dev)
1313                                 dm_put_device(ti, dev->meta_dev);
1314
1315                         dev->meta_dev = NULL;
1316                         rdev->meta_bdev = NULL;
1317
1318                         if (rdev->sb_page)
1319                                 put_page(rdev->sb_page);
1320
1321                         rdev->sb_page = NULL;
1322
1323                         rdev->sb_loaded = 0;
1324
1325                         /*
1326                          * We might be able to salvage the data device
1327                          * even though the meta device has failed.  For
1328                          * now, we behave as though '- -' had been
1329                          * set for this device in the table.
1330                          */
1331                         if (dev->data_dev)
1332                                 dm_put_device(ti, dev->data_dev);
1333
1334                         dev->data_dev = NULL;
1335                         rdev->bdev = NULL;
1336
1337                         list_del(&rdev->same_set);
1338                 }
1339         }
1340
1341         if (!freshest)
1342                 return 0;
1343
1344         if (validate_raid_redundancy(rs))
1345                 return ti_error_einval(rs->ti, "Insufficient redundancy to activate array");
1346
1347         /*
1348          * Validation of the freshest device provides the source of
1349          * validation for the remaining devices.
1350          */
1351         if (super_validate(rs, freshest))
1352                 return ti_error_einval(rs->ti, "Unable to assemble array: Invalid superblocks");
1353
1354         rdev_for_each(rdev, mddev)
1355                 if ((rdev != freshest) && super_validate(rs, rdev))
1356                         return -EINVAL;
1357
1358         return 0;
1359 }
1360
1361 /*
1362  * Enable/disable discard support on RAID set depending on
1363  * RAID level and discard properties of underlying RAID members.
1364  */
1365 static void configure_discard_support(struct dm_target *ti, struct raid_set *rs)
1366 {
1367         int i;
1368         bool raid456;
1369
1370         /* Assume discards not supported until after checks below. */
1371         ti->discards_supported = false;
1372
1373         /* RAID level 4,5,6 require discard_zeroes_data for data integrity! */
1374         raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);
1375
1376         for (i = 0; i < rs->md.raid_disks; i++) {
1377                 struct request_queue *q;
1378
1379                 if (!rs->dev[i].rdev.bdev)
1380                         continue;
1381
1382                 q = bdev_get_queue(rs->dev[i].rdev.bdev);
1383                 if (!q || !blk_queue_discard(q))
1384                         return;
1385
1386                 if (raid456) {
1387                         if (!q->limits.discard_zeroes_data)
1388                                 return;
1389                         if (!devices_handle_discard_safely) {
1390                                 DMERR("raid456 discard support disabled due to discard_zeroes_data uncertainty.");
1391                                 DMERR("Set dm-raid.devices_handle_discard_safely=Y to override.");
1392                                 return;
1393                         }
1394                 }
1395         }
1396
1397         /* All RAID members properly support discards */
1398         ti->discards_supported = true;
1399
1400         /*
1401          * RAID1 and RAID10 personalities require bio splitting,
1402          * RAID0/4/5/6 don't and process large discard bios properly.
1403          */
1404         ti->split_discard_bios = !!(rs->md.level == 1 || rs->md.level == 10);
1405         ti->num_discard_bios = 1;
1406 }
1407
1408 /*
1409  * Construct a RAID0/1/10/4/5/6 mapping:
1410  * Args:
1411  *      <raid_type> <#raid_params> <raid_params>{0,}    \
1412  *      <#raid_devs> [<meta_dev1> <dev1>]{1,}
1413  *
1414  * <raid_params> varies by <raid_type>.  See 'parse_raid_params' for
1415  * details on possible <raid_params>.
1416  *
1417  * Userspace is free to initialize the metadata devices, hence the superblocks to
1418  * enforce recreation based on the passed in table parameters.
1419  *
1420  */
1421 static int raid_ctr(struct dm_target *ti, unsigned argc, char **argv)
1422 {
1423         int r;
1424         struct raid_type *rt;
1425         unsigned num_raid_params, num_raid_devs;
1426         struct raid_set *rs = NULL;
1427         const char *arg;
1428         struct dm_arg_set as = { argc, argv }, as_nrd;
1429         struct dm_arg _args[] = {
1430                 { 0, as.argc, "Cannot understand number of raid parameters" },
1431                 { 1, 254, "Cannot understand number of raid devices parameters" }
1432         };
1433
1434         /* Must have <raid_type> */
1435         arg = dm_shift_arg(&as);
1436         if (!arg)
1437                 return ti_error_einval(rs->ti, "No arguments");
1438
1439         rt = get_raid_type(arg);
1440         if (!rt)
1441                 return ti_error_einval(rs->ti, "Unrecognised raid_type");
1442
1443         /* Must have <#raid_params> */
1444         if (dm_read_arg_group(_args, &as, &num_raid_params, &ti->error))
1445                 return -EINVAL;
1446
1447         /* number of raid device tupples <meta_dev data_dev> */
1448         as_nrd = as;
1449         dm_consume_args(&as_nrd, num_raid_params);
1450         _args[1].max = (as_nrd.argc - 1) / 2;
1451         if (dm_read_arg(_args + 1, &as_nrd, &num_raid_devs, &ti->error))
1452                 return -EINVAL;
1453
1454         if (!_in_range(num_raid_devs, 1, MAX_RAID_DEVICES))
1455                 return ti_error_einval(rs->ti, "Invalid number of supplied raid devices");
1456
1457         rs = context_alloc(ti, rt, num_raid_devs);
1458         if (IS_ERR(rs))
1459                 return PTR_ERR(rs);
1460
1461         r = parse_raid_params(rs, &as, num_raid_params);
1462         if (r)
1463                 goto bad;
1464
1465         r = parse_dev_params(rs, &as);
1466         if (r)
1467                 goto bad;
1468
1469         rs->md.sync_super = super_sync;
1470         r = analyse_superblocks(ti, rs);
1471         if (r)
1472                 goto bad;
1473
1474         INIT_WORK(&rs->md.event_work, do_table_event);
1475         ti->private = rs;
1476         ti->num_flush_bios = 1;
1477
1478         /*
1479          * Disable/enable discard support on RAID set.
1480          */
1481         configure_discard_support(ti, rs);
1482
1483         /* Has to be held on running the array */
1484         mddev_lock_nointr(&rs->md);
1485         r = md_run(&rs->md);
1486         rs->md.in_sync = 0; /* Assume already marked dirty */
1487         mddev_unlock(&rs->md);
1488
1489         if (r) {
1490                 ti->error = "Fail to run raid array";
1491                 goto bad;
1492         }
1493
1494         if (ti->len != rs->md.array_sectors) {
1495                 r = ti_error_einval(ti, "Array size does not match requested target length");
1496                 goto size_mismatch;
1497         }
1498         rs->callbacks.congested_fn = raid_is_congested;
1499         dm_table_add_target_callbacks(ti->table, &rs->callbacks);
1500
1501         mddev_suspend(&rs->md);
1502         return 0;
1503
1504 size_mismatch:
1505         md_stop(&rs->md);
1506 bad:
1507         context_free(rs);
1508
1509         return r;
1510 }
1511
1512 static void raid_dtr(struct dm_target *ti)
1513 {
1514         struct raid_set *rs = ti->private;
1515
1516         list_del_init(&rs->callbacks.list);
1517         md_stop(&rs->md);
1518         context_free(rs);
1519 }
1520
1521 static int raid_map(struct dm_target *ti, struct bio *bio)
1522 {
1523         struct raid_set *rs = ti->private;
1524         struct mddev *mddev = &rs->md;
1525
1526         mddev->pers->make_request(mddev, bio);
1527
1528         return DM_MAPIO_SUBMITTED;
1529 }
1530
1531 static const char *decipher_sync_action(struct mddev *mddev)
1532 {
1533         if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
1534                 return "frozen";
1535
1536         if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1537             (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) {
1538                 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery))
1539                         return "reshape";
1540
1541                 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) {
1542                         if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1543                                 return "resync";
1544                         else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery))
1545                                 return "check";
1546                         return "repair";
1547                 }
1548
1549                 if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery))
1550                         return "recover";
1551         }
1552
1553         return "idle";
1554 }
1555
1556 static void raid_status(struct dm_target *ti, status_type_t type,
1557                         unsigned status_flags, char *result, unsigned maxlen)
1558 {
1559         struct raid_set *rs = ti->private;
1560         unsigned raid_param_cnt = 1; /* at least 1 for chunksize */
1561         unsigned sz = 0;
1562         int i, array_in_sync = 0;
1563         sector_t sync;
1564
1565         switch (type) {
1566         case STATUSTYPE_INFO:
1567                 DMEMIT("%s %d ", rs->raid_type->name, rs->md.raid_disks);
1568
1569                 if (rs->raid_type->level) {
1570                         if (test_bit(MD_RECOVERY_RUNNING, &rs->md.recovery))
1571                                 sync = rs->md.curr_resync_completed;
1572                         else
1573                                 sync = rs->md.recovery_cp;
1574
1575                         if (sync >= rs->md.resync_max_sectors) {
1576                                 /*
1577                                  * Sync complete.
1578                                  */
1579                                 array_in_sync = 1;
1580                                 sync = rs->md.resync_max_sectors;
1581                         } else if (test_bit(MD_RECOVERY_REQUESTED, &rs->md.recovery)) {
1582                                 /*
1583                                  * If "check" or "repair" is occurring, the array has
1584                                  * undergone and initial sync and the health characters
1585                                  * should not be 'a' anymore.
1586                                  */
1587                                 array_in_sync = 1;
1588                         } else {
1589                                 /*
1590                                  * The array may be doing an initial sync, or it may
1591                                  * be rebuilding individual components.  If all the
1592                                  * devices are In_sync, then it is the array that is
1593                                  * being initialized.
1594                                  */
1595                                 for (i = 0; i < rs->md.raid_disks; i++)
1596                                         if (!test_bit(In_sync, &rs->dev[i].rdev.flags))
1597                                                 array_in_sync = 1;
1598                         }
1599                 } else {
1600                         /* RAID0 */
1601                         array_in_sync = 1;
1602                         sync = rs->md.resync_max_sectors;
1603                 }
1604
1605                 /*
1606                  * Status characters:
1607                  *  'D' = Dead/Failed device
1608                  *  'a' = Alive but not in-sync
1609                  *  'A' = Alive and in-sync
1610                  */
1611                 for (i = 0; i < rs->md.raid_disks; i++) {
1612                         if (test_bit(Faulty, &rs->dev[i].rdev.flags))
1613                                 DMEMIT("D");
1614                         else if (!array_in_sync ||
1615                                  !test_bit(In_sync, &rs->dev[i].rdev.flags))
1616                                 DMEMIT("a");
1617                         else
1618                                 DMEMIT("A");
1619                 }
1620
1621                 /*
1622                  * In-sync ratio:
1623                  *  The in-sync ratio shows the progress of:
1624                  *   - Initializing the array
1625                  *   - Rebuilding a subset of devices of the array
1626                  *  The user can distinguish between the two by referring
1627                  *  to the status characters.
1628                  */
1629                 DMEMIT(" %llu/%llu",
1630                        (unsigned long long) sync,
1631                        (unsigned long long) rs->md.resync_max_sectors);
1632
1633                 /*
1634                  * Sync action:
1635                  *   See Documentation/device-mapper/dm-raid.c for
1636                  *   information on each of these states.
1637                  */
1638                 DMEMIT(" %s", decipher_sync_action(&rs->md));
1639
1640                 /*
1641                  * resync_mismatches/mismatch_cnt
1642                  *   This field shows the number of discrepancies found when
1643                  *   performing a "check" of the array.
1644                  */
1645                 DMEMIT(" %llu",
1646                        (strcmp(rs->md.last_sync_action, "check")) ? 0 :
1647                        (unsigned long long)
1648                        atomic64_read(&rs->md.resync_mismatches));
1649                 break;
1650         case STATUSTYPE_TABLE:
1651                 /* The string you would use to construct this array */
1652                 for (i = 0; i < rs->md.raid_disks; i++) {
1653                         if ((rs->ctr_flags & CTR_FLAG_REBUILD) &&
1654                             rs->dev[i].data_dev &&
1655                             !test_bit(In_sync, &rs->dev[i].rdev.flags))
1656                                 raid_param_cnt += 2; /* for rebuilds */
1657                         if (rs->dev[i].data_dev &&
1658                             test_bit(WriteMostly, &rs->dev[i].rdev.flags))
1659                                 raid_param_cnt += 2;
1660                 }
1661
1662                 raid_param_cnt += (hweight32(rs->ctr_flags & ~CTR_FLAG_REBUILD) * 2);
1663                 if (rs->ctr_flags & (CTR_FLAG_SYNC | CTR_FLAG_NOSYNC))
1664                         raid_param_cnt--;
1665
1666                 DMEMIT("%s %u %u", rs->raid_type->name,
1667                        raid_param_cnt, rs->md.chunk_sectors);
1668
1669                 if ((rs->ctr_flags & CTR_FLAG_SYNC) &&
1670                     (rs->md.recovery_cp == MaxSector))
1671                         DMEMIT(" sync");
1672                 if (rs->ctr_flags & CTR_FLAG_NOSYNC)
1673                         DMEMIT(" nosync");
1674
1675                 for (i = 0; i < rs->md.raid_disks; i++)
1676                         if ((rs->ctr_flags & CTR_FLAG_REBUILD) &&
1677                             rs->dev[i].data_dev &&
1678                             !test_bit(In_sync, &rs->dev[i].rdev.flags))
1679                                 DMEMIT(" rebuild %u", i);
1680
1681                 if (rs->ctr_flags & CTR_FLAG_DAEMON_SLEEP)
1682                         DMEMIT(" daemon_sleep %lu",
1683                                rs->md.bitmap_info.daemon_sleep);
1684
1685                 if (rs->ctr_flags & CTR_FLAG_MIN_RECOVERY_RATE)
1686                         DMEMIT(" min_recovery_rate %d", rs->md.sync_speed_min);
1687
1688                 if (rs->ctr_flags & CTR_FLAG_MAX_RECOVERY_RATE)
1689                         DMEMIT(" max_recovery_rate %d", rs->md.sync_speed_max);
1690
1691                 for (i = 0; i < rs->md.raid_disks; i++)
1692                         if (rs->dev[i].data_dev &&
1693                             test_bit(WriteMostly, &rs->dev[i].rdev.flags))
1694                                 DMEMIT(" write_mostly %u", i);
1695
1696                 if (rs->ctr_flags & CTR_FLAG_MAX_WRITE_BEHIND)
1697                         DMEMIT(" max_write_behind %lu",
1698                                rs->md.bitmap_info.max_write_behind);
1699
1700                 if (rs->ctr_flags & CTR_FLAG_STRIPE_CACHE) {
1701                         struct r5conf *conf = rs->md.private;
1702
1703                         /* convert from kiB to sectors */
1704                         DMEMIT(" stripe_cache %d",
1705                                conf ? conf->max_nr_stripes * 2 : 0);
1706                 }
1707
1708                 if (rs->ctr_flags & CTR_FLAG_REGION_SIZE)
1709                         DMEMIT(" region_size %lu",
1710                                rs->md.bitmap_info.chunksize >> 9);
1711
1712                 if (rs->ctr_flags & CTR_FLAG_RAID10_COPIES)
1713                         DMEMIT(" raid10_copies %u",
1714                                raid10_md_layout_to_copies(rs->md.layout));
1715
1716                 if (rs->ctr_flags & CTR_FLAG_RAID10_FORMAT)
1717                         DMEMIT(" raid10_format %s",
1718                                raid10_md_layout_to_format(rs->md.layout));
1719
1720                 DMEMIT(" %d", rs->md.raid_disks);
1721                 for (i = 0; i < rs->md.raid_disks; i++) {
1722                         if (rs->dev[i].meta_dev)
1723                                 DMEMIT(" %s", rs->dev[i].meta_dev->name);
1724                         else
1725                                 DMEMIT(" -");
1726
1727                         if (rs->dev[i].data_dev)
1728                                 DMEMIT(" %s", rs->dev[i].data_dev->name);
1729                         else
1730                                 DMEMIT(" -");
1731                 }
1732         }
1733 }
1734
1735 static int raid_message(struct dm_target *ti, unsigned argc, char **argv)
1736 {
1737         struct raid_set *rs = ti->private;
1738         struct mddev *mddev = &rs->md;
1739
1740         if (!strcasecmp(argv[0], "reshape")) {
1741                 DMERR("Reshape not supported.");
1742                 return -EINVAL;
1743         }
1744
1745         if (!mddev->pers || !mddev->pers->sync_request)
1746                 return -EINVAL;
1747
1748         if (!strcasecmp(argv[0], "frozen"))
1749                 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1750         else
1751                 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
1752
1753         if (!strcasecmp(argv[0], "idle") || !strcasecmp(argv[0], "frozen")) {
1754                 if (mddev->sync_thread) {
1755                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1756                         md_reap_sync_thread(mddev);
1757                 }
1758         } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
1759                    test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
1760                 return -EBUSY;
1761         else if (!strcasecmp(argv[0], "resync"))
1762                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1763         else if (!strcasecmp(argv[0], "recover")) {
1764                 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
1765                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1766         } else {
1767                 if (!strcasecmp(argv[0], "check"))
1768                         set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
1769                 else if (!!strcasecmp(argv[0], "repair"))
1770                         return -EINVAL;
1771                 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
1772                 set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
1773         }
1774         if (mddev->ro == 2) {
1775                 /* A write to sync_action is enough to justify
1776                  * canceling read-auto mode
1777                  */
1778                 mddev->ro = 0;
1779                 if (!mddev->suspended)
1780                         md_wakeup_thread(mddev->sync_thread);
1781         }
1782         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
1783         if (!mddev->suspended)
1784                 md_wakeup_thread(mddev->thread);
1785
1786         return 0;
1787 }
1788
1789 static int raid_iterate_devices(struct dm_target *ti,
1790                                 iterate_devices_callout_fn fn, void *data)
1791 {
1792         struct raid_set *rs = ti->private;
1793         unsigned i;
1794         int r = 0;
1795
1796         for (i = 0; !r && i < rs->md.raid_disks; i++)
1797                 if (rs->dev[i].data_dev)
1798                         r = fn(ti,
1799                                  rs->dev[i].data_dev,
1800                                  0, /* No offset on data devs */
1801                                  rs->md.dev_sectors,
1802                                  data);
1803
1804         return r;
1805 }
1806
1807 static void raid_io_hints(struct dm_target *ti, struct queue_limits *limits)
1808 {
1809         struct raid_set *rs = ti->private;
1810         unsigned chunk_size = rs->md.chunk_sectors << 9;
1811         struct r5conf *conf = rs->md.private;
1812
1813         blk_limits_io_min(limits, chunk_size);
1814         blk_limits_io_opt(limits, chunk_size * (conf->raid_disks - conf->max_degraded));
1815 }
1816
1817 static void raid_presuspend(struct dm_target *ti)
1818 {
1819         struct raid_set *rs = ti->private;
1820
1821         md_stop_writes(&rs->md);
1822 }
1823
1824 static void raid_postsuspend(struct dm_target *ti)
1825 {
1826         struct raid_set *rs = ti->private;
1827
1828         mddev_suspend(&rs->md);
1829 }
1830
1831 static void attempt_restore_of_faulty_devices(struct raid_set *rs)
1832 {
1833         int i;
1834         uint64_t failed_devices, cleared_failed_devices = 0;
1835         unsigned long flags;
1836         struct dm_raid_superblock *sb;
1837         struct md_rdev *r;
1838
1839         for (i = 0; i < rs->md.raid_disks; i++) {
1840                 r = &rs->dev[i].rdev;
1841                 if (test_bit(Faulty, &r->flags) && r->sb_page &&
1842                     sync_page_io(r, 0, r->sb_size, r->sb_page, REQ_OP_READ, 0,
1843                                  1)) {
1844                         DMINFO("Faulty %s device #%d has readable super block."
1845                                "  Attempting to revive it.",
1846                                rs->raid_type->name, i);
1847
1848                         /*
1849                          * Faulty bit may be set, but sometimes the array can
1850                          * be suspended before the personalities can respond
1851                          * by removing the device from the array (i.e. calling
1852                          * 'hot_remove_disk').  If they haven't yet removed
1853                          * the failed device, its 'raid_disk' number will be
1854                          * '>= 0' - meaning we must call this function
1855                          * ourselves.
1856                          */
1857                         if ((r->raid_disk >= 0) &&
1858                             (r->mddev->pers->hot_remove_disk(r->mddev, r) != 0))
1859                                 /* Failed to revive this device, try next */
1860                                 continue;
1861
1862                         r->raid_disk = i;
1863                         r->saved_raid_disk = i;
1864                         flags = r->flags;
1865                         clear_bit(Faulty, &r->flags);
1866                         clear_bit(WriteErrorSeen, &r->flags);
1867                         clear_bit(In_sync, &r->flags);
1868                         if (r->mddev->pers->hot_add_disk(r->mddev, r)) {
1869                                 r->raid_disk = -1;
1870                                 r->saved_raid_disk = -1;
1871                                 r->flags = flags;
1872                         } else {
1873                                 r->recovery_offset = 0;
1874                                 cleared_failed_devices |= 1 << i;
1875                         }
1876                 }
1877         }
1878         if (cleared_failed_devices) {
1879                 rdev_for_each(r, &rs->md) {
1880                         sb = page_address(r->sb_page);
1881                         failed_devices = le64_to_cpu(sb->failed_devices);
1882                         failed_devices &= ~cleared_failed_devices;
1883                         sb->failed_devices = cpu_to_le64(failed_devices);
1884                 }
1885         }
1886 }
1887
1888 static void raid_resume(struct dm_target *ti)
1889 {
1890         struct raid_set *rs = ti->private;
1891
1892         if (rs->raid_type->level) {
1893                 set_bit(MD_CHANGE_DEVS, &rs->md.flags);
1894
1895                 if (!rs->bitmap_loaded) {
1896                         bitmap_load(&rs->md);
1897                         rs->bitmap_loaded = 1;
1898                 } else {
1899                         /*
1900                          * A secondary resume while the device is active.
1901                          * Take this opportunity to check whether any failed
1902                          * devices are reachable again.
1903                          */
1904                         attempt_restore_of_faulty_devices(rs);
1905                 }
1906
1907                 clear_bit(MD_RECOVERY_FROZEN, &rs->md.recovery);
1908         }
1909
1910         mddev_resume(&rs->md);
1911 }
1912
1913 static struct target_type raid_target = {
1914         .name = "raid",
1915         .version = {1, 8, 1},
1916         .module = THIS_MODULE,
1917         .ctr = raid_ctr,
1918         .dtr = raid_dtr,
1919         .map = raid_map,
1920         .status = raid_status,
1921         .message = raid_message,
1922         .iterate_devices = raid_iterate_devices,
1923         .io_hints = raid_io_hints,
1924         .presuspend = raid_presuspend,
1925         .postsuspend = raid_postsuspend,
1926         .resume = raid_resume,
1927 };
1928
1929 static int __init dm_raid_init(void)
1930 {
1931         DMINFO("Loading target version %u.%u.%u",
1932                raid_target.version[0],
1933                raid_target.version[1],
1934                raid_target.version[2]);
1935         return dm_register_target(&raid_target);
1936 }
1937
1938 static void __exit dm_raid_exit(void)
1939 {
1940         dm_unregister_target(&raid_target);
1941 }
1942
1943 module_init(dm_raid_init);
1944 module_exit(dm_raid_exit);
1945
1946 module_param(devices_handle_discard_safely, bool, 0644);
1947 MODULE_PARM_DESC(devices_handle_discard_safely,
1948                  "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
1949
1950 MODULE_DESCRIPTION(DM_NAME " raid4/5/6 target");
1951 MODULE_ALIAS("dm-raid1");
1952 MODULE_ALIAS("dm-raid10");
1953 MODULE_ALIAS("dm-raid4");
1954 MODULE_ALIAS("dm-raid5");
1955 MODULE_ALIAS("dm-raid6");
1956 MODULE_AUTHOR("Neil Brown <dm-devel@redhat.com>");
1957 MODULE_LICENSE("GPL");