Merge tag 'amd-drm-fixes-5.9-2020-08-20' of git://people.freedesktop.org/~agd5f/linux...
[linux-2.6-microblaze.git] / drivers / md / raid1.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * raid1.c : Multiple Devices driver for Linux
4  *
5  * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat
6  *
7  * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman
8  *
9  * RAID-1 management functions.
10  *
11  * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000
12  *
13  * Fixes to reconstruction by Jakob Ã˜stergaard" <jakob@ostenfeld.dk>
14  * Various fixes by Neil Brown <neilb@cse.unsw.edu.au>
15  *
16  * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support
17  * bitmapped intelligence in resync:
18  *
19  *      - bitmap marked during normal i/o
20  *      - bitmap used to skip nondirty blocks during sync
21  *
22  * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology:
23  * - persistent bitmap code
24  */
25
26 #include <linux/slab.h>
27 #include <linux/delay.h>
28 #include <linux/blkdev.h>
29 #include <linux/module.h>
30 #include <linux/seq_file.h>
31 #include <linux/ratelimit.h>
32 #include <linux/interval_tree_generic.h>
33
34 #include <trace/events/block.h>
35
36 #include "md.h"
37 #include "raid1.h"
38 #include "md-bitmap.h"
39
40 #define UNSUPPORTED_MDDEV_FLAGS         \
41         ((1L << MD_HAS_JOURNAL) |       \
42          (1L << MD_JOURNAL_CLEAN) |     \
43          (1L << MD_HAS_PPL) |           \
44          (1L << MD_HAS_MULTIPLE_PPLS))
45
46 static void allow_barrier(struct r1conf *conf, sector_t sector_nr);
47 static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
48
49 #define raid1_log(md, fmt, args...)                             \
50         do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
51
52 #include "raid1-10.c"
53
54 #define START(node) ((node)->start)
55 #define LAST(node) ((node)->last)
56 INTERVAL_TREE_DEFINE(struct serial_info, node, sector_t, _subtree_last,
57                      START, LAST, static inline, raid1_rb);
58
59 static int check_and_add_serial(struct md_rdev *rdev, struct r1bio *r1_bio,
60                                 struct serial_info *si, int idx)
61 {
62         unsigned long flags;
63         int ret = 0;
64         sector_t lo = r1_bio->sector;
65         sector_t hi = lo + r1_bio->sectors;
66         struct serial_in_rdev *serial = &rdev->serial[idx];
67
68         spin_lock_irqsave(&serial->serial_lock, flags);
69         /* collision happened */
70         if (raid1_rb_iter_first(&serial->serial_rb, lo, hi))
71                 ret = -EBUSY;
72         else {
73                 si->start = lo;
74                 si->last = hi;
75                 raid1_rb_insert(si, &serial->serial_rb);
76         }
77         spin_unlock_irqrestore(&serial->serial_lock, flags);
78
79         return ret;
80 }
81
82 static void wait_for_serialization(struct md_rdev *rdev, struct r1bio *r1_bio)
83 {
84         struct mddev *mddev = rdev->mddev;
85         struct serial_info *si;
86         int idx = sector_to_idx(r1_bio->sector);
87         struct serial_in_rdev *serial = &rdev->serial[idx];
88
89         if (WARN_ON(!mddev->serial_info_pool))
90                 return;
91         si = mempool_alloc(mddev->serial_info_pool, GFP_NOIO);
92         wait_event(serial->serial_io_wait,
93                    check_and_add_serial(rdev, r1_bio, si, idx) == 0);
94 }
95
96 static void remove_serial(struct md_rdev *rdev, sector_t lo, sector_t hi)
97 {
98         struct serial_info *si;
99         unsigned long flags;
100         int found = 0;
101         struct mddev *mddev = rdev->mddev;
102         int idx = sector_to_idx(lo);
103         struct serial_in_rdev *serial = &rdev->serial[idx];
104
105         spin_lock_irqsave(&serial->serial_lock, flags);
106         for (si = raid1_rb_iter_first(&serial->serial_rb, lo, hi);
107              si; si = raid1_rb_iter_next(si, lo, hi)) {
108                 if (si->start == lo && si->last == hi) {
109                         raid1_rb_remove(si, &serial->serial_rb);
110                         mempool_free(si, mddev->serial_info_pool);
111                         found = 1;
112                         break;
113                 }
114         }
115         if (!found)
116                 WARN(1, "The write IO is not recorded for serialization\n");
117         spin_unlock_irqrestore(&serial->serial_lock, flags);
118         wake_up(&serial->serial_io_wait);
119 }
120
121 /*
122  * for resync bio, r1bio pointer can be retrieved from the per-bio
123  * 'struct resync_pages'.
124  */
125 static inline struct r1bio *get_resync_r1bio(struct bio *bio)
126 {
127         return get_resync_pages(bio)->raid_bio;
128 }
129
130 static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data)
131 {
132         struct pool_info *pi = data;
133         int size = offsetof(struct r1bio, bios[pi->raid_disks]);
134
135         /* allocate a r1bio with room for raid_disks entries in the bios array */
136         return kzalloc(size, gfp_flags);
137 }
138
139 #define RESYNC_DEPTH 32
140 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9)
141 #define RESYNC_WINDOW (RESYNC_BLOCK_SIZE * RESYNC_DEPTH)
142 #define RESYNC_WINDOW_SECTORS (RESYNC_WINDOW >> 9)
143 #define CLUSTER_RESYNC_WINDOW (16 * RESYNC_WINDOW)
144 #define CLUSTER_RESYNC_WINDOW_SECTORS (CLUSTER_RESYNC_WINDOW >> 9)
145
146 static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
147 {
148         struct pool_info *pi = data;
149         struct r1bio *r1_bio;
150         struct bio *bio;
151         int need_pages;
152         int j;
153         struct resync_pages *rps;
154
155         r1_bio = r1bio_pool_alloc(gfp_flags, pi);
156         if (!r1_bio)
157                 return NULL;
158
159         rps = kmalloc_array(pi->raid_disks, sizeof(struct resync_pages),
160                             gfp_flags);
161         if (!rps)
162                 goto out_free_r1bio;
163
164         /*
165          * Allocate bios : 1 for reading, n-1 for writing
166          */
167         for (j = pi->raid_disks ; j-- ; ) {
168                 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES);
169                 if (!bio)
170                         goto out_free_bio;
171                 r1_bio->bios[j] = bio;
172         }
173         /*
174          * Allocate RESYNC_PAGES data pages and attach them to
175          * the first bio.
176          * If this is a user-requested check/repair, allocate
177          * RESYNC_PAGES for each bio.
178          */
179         if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery))
180                 need_pages = pi->raid_disks;
181         else
182                 need_pages = 1;
183         for (j = 0; j < pi->raid_disks; j++) {
184                 struct resync_pages *rp = &rps[j];
185
186                 bio = r1_bio->bios[j];
187
188                 if (j < need_pages) {
189                         if (resync_alloc_pages(rp, gfp_flags))
190                                 goto out_free_pages;
191                 } else {
192                         memcpy(rp, &rps[0], sizeof(*rp));
193                         resync_get_all_pages(rp);
194                 }
195
196                 rp->raid_bio = r1_bio;
197                 bio->bi_private = rp;
198         }
199
200         r1_bio->master_bio = NULL;
201
202         return r1_bio;
203
204 out_free_pages:
205         while (--j >= 0)
206                 resync_free_pages(&rps[j]);
207
208 out_free_bio:
209         while (++j < pi->raid_disks)
210                 bio_put(r1_bio->bios[j]);
211         kfree(rps);
212
213 out_free_r1bio:
214         rbio_pool_free(r1_bio, data);
215         return NULL;
216 }
217
218 static void r1buf_pool_free(void *__r1_bio, void *data)
219 {
220         struct pool_info *pi = data;
221         int i;
222         struct r1bio *r1bio = __r1_bio;
223         struct resync_pages *rp = NULL;
224
225         for (i = pi->raid_disks; i--; ) {
226                 rp = get_resync_pages(r1bio->bios[i]);
227                 resync_free_pages(rp);
228                 bio_put(r1bio->bios[i]);
229         }
230
231         /* resync pages array stored in the 1st bio's .bi_private */
232         kfree(rp);
233
234         rbio_pool_free(r1bio, data);
235 }
236
237 static void put_all_bios(struct r1conf *conf, struct r1bio *r1_bio)
238 {
239         int i;
240
241         for (i = 0; i < conf->raid_disks * 2; i++) {
242                 struct bio **bio = r1_bio->bios + i;
243                 if (!BIO_SPECIAL(*bio))
244                         bio_put(*bio);
245                 *bio = NULL;
246         }
247 }
248
249 static void free_r1bio(struct r1bio *r1_bio)
250 {
251         struct r1conf *conf = r1_bio->mddev->private;
252
253         put_all_bios(conf, r1_bio);
254         mempool_free(r1_bio, &conf->r1bio_pool);
255 }
256
257 static void put_buf(struct r1bio *r1_bio)
258 {
259         struct r1conf *conf = r1_bio->mddev->private;
260         sector_t sect = r1_bio->sector;
261         int i;
262
263         for (i = 0; i < conf->raid_disks * 2; i++) {
264                 struct bio *bio = r1_bio->bios[i];
265                 if (bio->bi_end_io)
266                         rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev);
267         }
268
269         mempool_free(r1_bio, &conf->r1buf_pool);
270
271         lower_barrier(conf, sect);
272 }
273
274 static void reschedule_retry(struct r1bio *r1_bio)
275 {
276         unsigned long flags;
277         struct mddev *mddev = r1_bio->mddev;
278         struct r1conf *conf = mddev->private;
279         int idx;
280
281         idx = sector_to_idx(r1_bio->sector);
282         spin_lock_irqsave(&conf->device_lock, flags);
283         list_add(&r1_bio->retry_list, &conf->retry_list);
284         atomic_inc(&conf->nr_queued[idx]);
285         spin_unlock_irqrestore(&conf->device_lock, flags);
286
287         wake_up(&conf->wait_barrier);
288         md_wakeup_thread(mddev->thread);
289 }
290
291 /*
292  * raid_end_bio_io() is called when we have finished servicing a mirrored
293  * operation and are ready to return a success/failure code to the buffer
294  * cache layer.
295  */
296 static void call_bio_endio(struct r1bio *r1_bio)
297 {
298         struct bio *bio = r1_bio->master_bio;
299
300         if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
301                 bio->bi_status = BLK_STS_IOERR;
302
303         bio_endio(bio);
304 }
305
306 static void raid_end_bio_io(struct r1bio *r1_bio)
307 {
308         struct bio *bio = r1_bio->master_bio;
309         struct r1conf *conf = r1_bio->mddev->private;
310
311         /* if nobody has done the final endio yet, do it now */
312         if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
313                 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
314                          (bio_data_dir(bio) == WRITE) ? "write" : "read",
315                          (unsigned long long) bio->bi_iter.bi_sector,
316                          (unsigned long long) bio_end_sector(bio) - 1);
317
318                 call_bio_endio(r1_bio);
319         }
320         /*
321          * Wake up any possible resync thread that waits for the device
322          * to go idle.  All I/Os, even write-behind writes, are done.
323          */
324         allow_barrier(conf, r1_bio->sector);
325
326         free_r1bio(r1_bio);
327 }
328
329 /*
330  * Update disk head position estimator based on IRQ completion info.
331  */
332 static inline void update_head_pos(int disk, struct r1bio *r1_bio)
333 {
334         struct r1conf *conf = r1_bio->mddev->private;
335
336         conf->mirrors[disk].head_position =
337                 r1_bio->sector + (r1_bio->sectors);
338 }
339
340 /*
341  * Find the disk number which triggered given bio
342  */
343 static int find_bio_disk(struct r1bio *r1_bio, struct bio *bio)
344 {
345         int mirror;
346         struct r1conf *conf = r1_bio->mddev->private;
347         int raid_disks = conf->raid_disks;
348
349         for (mirror = 0; mirror < raid_disks * 2; mirror++)
350                 if (r1_bio->bios[mirror] == bio)
351                         break;
352
353         BUG_ON(mirror == raid_disks * 2);
354         update_head_pos(mirror, r1_bio);
355
356         return mirror;
357 }
358
359 static void raid1_end_read_request(struct bio *bio)
360 {
361         int uptodate = !bio->bi_status;
362         struct r1bio *r1_bio = bio->bi_private;
363         struct r1conf *conf = r1_bio->mddev->private;
364         struct md_rdev *rdev = conf->mirrors[r1_bio->read_disk].rdev;
365
366         /*
367          * this branch is our 'one mirror IO has finished' event handler:
368          */
369         update_head_pos(r1_bio->read_disk, r1_bio);
370
371         if (uptodate)
372                 set_bit(R1BIO_Uptodate, &r1_bio->state);
373         else if (test_bit(FailFast, &rdev->flags) &&
374                  test_bit(R1BIO_FailFast, &r1_bio->state))
375                 /* This was a fail-fast read so we definitely
376                  * want to retry */
377                 ;
378         else {
379                 /* If all other devices have failed, we want to return
380                  * the error upwards rather than fail the last device.
381                  * Here we redefine "uptodate" to mean "Don't want to retry"
382                  */
383                 unsigned long flags;
384                 spin_lock_irqsave(&conf->device_lock, flags);
385                 if (r1_bio->mddev->degraded == conf->raid_disks ||
386                     (r1_bio->mddev->degraded == conf->raid_disks-1 &&
387                      test_bit(In_sync, &rdev->flags)))
388                         uptodate = 1;
389                 spin_unlock_irqrestore(&conf->device_lock, flags);
390         }
391
392         if (uptodate) {
393                 raid_end_bio_io(r1_bio);
394                 rdev_dec_pending(rdev, conf->mddev);
395         } else {
396                 /*
397                  * oops, read error:
398                  */
399                 char b[BDEVNAME_SIZE];
400                 pr_err_ratelimited("md/raid1:%s: %s: rescheduling sector %llu\n",
401                                    mdname(conf->mddev),
402                                    bdevname(rdev->bdev, b),
403                                    (unsigned long long)r1_bio->sector);
404                 set_bit(R1BIO_ReadError, &r1_bio->state);
405                 reschedule_retry(r1_bio);
406                 /* don't drop the reference on read_disk yet */
407         }
408 }
409
410 static void close_write(struct r1bio *r1_bio)
411 {
412         /* it really is the end of this request */
413         if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
414                 bio_free_pages(r1_bio->behind_master_bio);
415                 bio_put(r1_bio->behind_master_bio);
416                 r1_bio->behind_master_bio = NULL;
417         }
418         /* clear the bitmap if all writes complete successfully */
419         md_bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector,
420                            r1_bio->sectors,
421                            !test_bit(R1BIO_Degraded, &r1_bio->state),
422                            test_bit(R1BIO_BehindIO, &r1_bio->state));
423         md_write_end(r1_bio->mddev);
424 }
425
426 static void r1_bio_write_done(struct r1bio *r1_bio)
427 {
428         if (!atomic_dec_and_test(&r1_bio->remaining))
429                 return;
430
431         if (test_bit(R1BIO_WriteError, &r1_bio->state))
432                 reschedule_retry(r1_bio);
433         else {
434                 close_write(r1_bio);
435                 if (test_bit(R1BIO_MadeGood, &r1_bio->state))
436                         reschedule_retry(r1_bio);
437                 else
438                         raid_end_bio_io(r1_bio);
439         }
440 }
441
442 static void raid1_end_write_request(struct bio *bio)
443 {
444         struct r1bio *r1_bio = bio->bi_private;
445         int behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
446         struct r1conf *conf = r1_bio->mddev->private;
447         struct bio *to_put = NULL;
448         int mirror = find_bio_disk(r1_bio, bio);
449         struct md_rdev *rdev = conf->mirrors[mirror].rdev;
450         bool discard_error;
451         sector_t lo = r1_bio->sector;
452         sector_t hi = r1_bio->sector + r1_bio->sectors;
453
454         discard_error = bio->bi_status && bio_op(bio) == REQ_OP_DISCARD;
455
456         /*
457          * 'one mirror IO has finished' event handler:
458          */
459         if (bio->bi_status && !discard_error) {
460                 set_bit(WriteErrorSeen, &rdev->flags);
461                 if (!test_and_set_bit(WantReplacement, &rdev->flags))
462                         set_bit(MD_RECOVERY_NEEDED, &
463                                 conf->mddev->recovery);
464
465                 if (test_bit(FailFast, &rdev->flags) &&
466                     (bio->bi_opf & MD_FAILFAST) &&
467                     /* We never try FailFast to WriteMostly devices */
468                     !test_bit(WriteMostly, &rdev->flags)) {
469                         md_error(r1_bio->mddev, rdev);
470                 }
471
472                 /*
473                  * When the device is faulty, it is not necessary to
474                  * handle write error.
475                  * For failfast, this is the only remaining device,
476                  * We need to retry the write without FailFast.
477                  */
478                 if (!test_bit(Faulty, &rdev->flags))
479                         set_bit(R1BIO_WriteError, &r1_bio->state);
480                 else {
481                         /* Finished with this branch */
482                         r1_bio->bios[mirror] = NULL;
483                         to_put = bio;
484                 }
485         } else {
486                 /*
487                  * Set R1BIO_Uptodate in our master bio, so that we
488                  * will return a good error code for to the higher
489                  * levels even if IO on some other mirrored buffer
490                  * fails.
491                  *
492                  * The 'master' represents the composite IO operation
493                  * to user-side. So if something waits for IO, then it
494                  * will wait for the 'master' bio.
495                  */
496                 sector_t first_bad;
497                 int bad_sectors;
498
499                 r1_bio->bios[mirror] = NULL;
500                 to_put = bio;
501                 /*
502                  * Do not set R1BIO_Uptodate if the current device is
503                  * rebuilding or Faulty. This is because we cannot use
504                  * such device for properly reading the data back (we could
505                  * potentially use it, if the current write would have felt
506                  * before rdev->recovery_offset, but for simplicity we don't
507                  * check this here.
508                  */
509                 if (test_bit(In_sync, &rdev->flags) &&
510                     !test_bit(Faulty, &rdev->flags))
511                         set_bit(R1BIO_Uptodate, &r1_bio->state);
512
513                 /* Maybe we can clear some bad blocks. */
514                 if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
515                                 &first_bad, &bad_sectors) && !discard_error) {
516                         r1_bio->bios[mirror] = IO_MADE_GOOD;
517                         set_bit(R1BIO_MadeGood, &r1_bio->state);
518                 }
519         }
520
521         if (behind) {
522                 if (test_bit(CollisionCheck, &rdev->flags))
523                         remove_serial(rdev, lo, hi);
524                 if (test_bit(WriteMostly, &rdev->flags))
525                         atomic_dec(&r1_bio->behind_remaining);
526
527                 /*
528                  * In behind mode, we ACK the master bio once the I/O
529                  * has safely reached all non-writemostly
530                  * disks. Setting the Returned bit ensures that this
531                  * gets done only once -- we don't ever want to return
532                  * -EIO here, instead we'll wait
533                  */
534                 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
535                     test_bit(R1BIO_Uptodate, &r1_bio->state)) {
536                         /* Maybe we can return now */
537                         if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
538                                 struct bio *mbio = r1_bio->master_bio;
539                                 pr_debug("raid1: behind end write sectors"
540                                          " %llu-%llu\n",
541                                          (unsigned long long) mbio->bi_iter.bi_sector,
542                                          (unsigned long long) bio_end_sector(mbio) - 1);
543                                 call_bio_endio(r1_bio);
544                         }
545                 }
546         } else if (rdev->mddev->serialize_policy)
547                 remove_serial(rdev, lo, hi);
548         if (r1_bio->bios[mirror] == NULL)
549                 rdev_dec_pending(rdev, conf->mddev);
550
551         /*
552          * Let's see if all mirrored write operations have finished
553          * already.
554          */
555         r1_bio_write_done(r1_bio);
556
557         if (to_put)
558                 bio_put(to_put);
559 }
560
561 static sector_t align_to_barrier_unit_end(sector_t start_sector,
562                                           sector_t sectors)
563 {
564         sector_t len;
565
566         WARN_ON(sectors == 0);
567         /*
568          * len is the number of sectors from start_sector to end of the
569          * barrier unit which start_sector belongs to.
570          */
571         len = round_up(start_sector + 1, BARRIER_UNIT_SECTOR_SIZE) -
572               start_sector;
573
574         if (len > sectors)
575                 len = sectors;
576
577         return len;
578 }
579
580 /*
581  * This routine returns the disk from which the requested read should
582  * be done. There is a per-array 'next expected sequential IO' sector
583  * number - if this matches on the next IO then we use the last disk.
584  * There is also a per-disk 'last know head position' sector that is
585  * maintained from IRQ contexts, both the normal and the resync IO
586  * completion handlers update this position correctly. If there is no
587  * perfect sequential match then we pick the disk whose head is closest.
588  *
589  * If there are 2 mirrors in the same 2 devices, performance degrades
590  * because position is mirror, not device based.
591  *
592  * The rdev for the device selected will have nr_pending incremented.
593  */
594 static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sectors)
595 {
596         const sector_t this_sector = r1_bio->sector;
597         int sectors;
598         int best_good_sectors;
599         int best_disk, best_dist_disk, best_pending_disk;
600         int has_nonrot_disk;
601         int disk;
602         sector_t best_dist;
603         unsigned int min_pending;
604         struct md_rdev *rdev;
605         int choose_first;
606         int choose_next_idle;
607
608         rcu_read_lock();
609         /*
610          * Check if we can balance. We can balance on the whole
611          * device if no resync is going on, or below the resync window.
612          * We take the first readable disk when above the resync window.
613          */
614  retry:
615         sectors = r1_bio->sectors;
616         best_disk = -1;
617         best_dist_disk = -1;
618         best_dist = MaxSector;
619         best_pending_disk = -1;
620         min_pending = UINT_MAX;
621         best_good_sectors = 0;
622         has_nonrot_disk = 0;
623         choose_next_idle = 0;
624         clear_bit(R1BIO_FailFast, &r1_bio->state);
625
626         if ((conf->mddev->recovery_cp < this_sector + sectors) ||
627             (mddev_is_clustered(conf->mddev) &&
628             md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
629                     this_sector + sectors)))
630                 choose_first = 1;
631         else
632                 choose_first = 0;
633
634         for (disk = 0 ; disk < conf->raid_disks * 2 ; disk++) {
635                 sector_t dist;
636                 sector_t first_bad;
637                 int bad_sectors;
638                 unsigned int pending;
639                 bool nonrot;
640
641                 rdev = rcu_dereference(conf->mirrors[disk].rdev);
642                 if (r1_bio->bios[disk] == IO_BLOCKED
643                     || rdev == NULL
644                     || test_bit(Faulty, &rdev->flags))
645                         continue;
646                 if (!test_bit(In_sync, &rdev->flags) &&
647                     rdev->recovery_offset < this_sector + sectors)
648                         continue;
649                 if (test_bit(WriteMostly, &rdev->flags)) {
650                         /* Don't balance among write-mostly, just
651                          * use the first as a last resort */
652                         if (best_dist_disk < 0) {
653                                 if (is_badblock(rdev, this_sector, sectors,
654                                                 &first_bad, &bad_sectors)) {
655                                         if (first_bad <= this_sector)
656                                                 /* Cannot use this */
657                                                 continue;
658                                         best_good_sectors = first_bad - this_sector;
659                                 } else
660                                         best_good_sectors = sectors;
661                                 best_dist_disk = disk;
662                                 best_pending_disk = disk;
663                         }
664                         continue;
665                 }
666                 /* This is a reasonable device to use.  It might
667                  * even be best.
668                  */
669                 if (is_badblock(rdev, this_sector, sectors,
670                                 &first_bad, &bad_sectors)) {
671                         if (best_dist < MaxSector)
672                                 /* already have a better device */
673                                 continue;
674                         if (first_bad <= this_sector) {
675                                 /* cannot read here. If this is the 'primary'
676                                  * device, then we must not read beyond
677                                  * bad_sectors from another device..
678                                  */
679                                 bad_sectors -= (this_sector - first_bad);
680                                 if (choose_first && sectors > bad_sectors)
681                                         sectors = bad_sectors;
682                                 if (best_good_sectors > sectors)
683                                         best_good_sectors = sectors;
684
685                         } else {
686                                 sector_t good_sectors = first_bad - this_sector;
687                                 if (good_sectors > best_good_sectors) {
688                                         best_good_sectors = good_sectors;
689                                         best_disk = disk;
690                                 }
691                                 if (choose_first)
692                                         break;
693                         }
694                         continue;
695                 } else {
696                         if ((sectors > best_good_sectors) && (best_disk >= 0))
697                                 best_disk = -1;
698                         best_good_sectors = sectors;
699                 }
700
701                 if (best_disk >= 0)
702                         /* At least two disks to choose from so failfast is OK */
703                         set_bit(R1BIO_FailFast, &r1_bio->state);
704
705                 nonrot = blk_queue_nonrot(bdev_get_queue(rdev->bdev));
706                 has_nonrot_disk |= nonrot;
707                 pending = atomic_read(&rdev->nr_pending);
708                 dist = abs(this_sector - conf->mirrors[disk].head_position);
709                 if (choose_first) {
710                         best_disk = disk;
711                         break;
712                 }
713                 /* Don't change to another disk for sequential reads */
714                 if (conf->mirrors[disk].next_seq_sect == this_sector
715                     || dist == 0) {
716                         int opt_iosize = bdev_io_opt(rdev->bdev) >> 9;
717                         struct raid1_info *mirror = &conf->mirrors[disk];
718
719                         best_disk = disk;
720                         /*
721                          * If buffered sequential IO size exceeds optimal
722                          * iosize, check if there is idle disk. If yes, choose
723                          * the idle disk. read_balance could already choose an
724                          * idle disk before noticing it's a sequential IO in
725                          * this disk. This doesn't matter because this disk
726                          * will idle, next time it will be utilized after the
727                          * first disk has IO size exceeds optimal iosize. In
728                          * this way, iosize of the first disk will be optimal
729                          * iosize at least. iosize of the second disk might be
730                          * small, but not a big deal since when the second disk
731                          * starts IO, the first disk is likely still busy.
732                          */
733                         if (nonrot && opt_iosize > 0 &&
734                             mirror->seq_start != MaxSector &&
735                             mirror->next_seq_sect > opt_iosize &&
736                             mirror->next_seq_sect - opt_iosize >=
737                             mirror->seq_start) {
738                                 choose_next_idle = 1;
739                                 continue;
740                         }
741                         break;
742                 }
743
744                 if (choose_next_idle)
745                         continue;
746
747                 if (min_pending > pending) {
748                         min_pending = pending;
749                         best_pending_disk = disk;
750                 }
751
752                 if (dist < best_dist) {
753                         best_dist = dist;
754                         best_dist_disk = disk;
755                 }
756         }
757
758         /*
759          * If all disks are rotational, choose the closest disk. If any disk is
760          * non-rotational, choose the disk with less pending request even the
761          * disk is rotational, which might/might not be optimal for raids with
762          * mixed ratation/non-rotational disks depending on workload.
763          */
764         if (best_disk == -1) {
765                 if (has_nonrot_disk || min_pending == 0)
766                         best_disk = best_pending_disk;
767                 else
768                         best_disk = best_dist_disk;
769         }
770
771         if (best_disk >= 0) {
772                 rdev = rcu_dereference(conf->mirrors[best_disk].rdev);
773                 if (!rdev)
774                         goto retry;
775                 atomic_inc(&rdev->nr_pending);
776                 sectors = best_good_sectors;
777
778                 if (conf->mirrors[best_disk].next_seq_sect != this_sector)
779                         conf->mirrors[best_disk].seq_start = this_sector;
780
781                 conf->mirrors[best_disk].next_seq_sect = this_sector + sectors;
782         }
783         rcu_read_unlock();
784         *max_sectors = sectors;
785
786         return best_disk;
787 }
788
789 static void flush_bio_list(struct r1conf *conf, struct bio *bio)
790 {
791         /* flush any pending bitmap writes to disk before proceeding w/ I/O */
792         md_bitmap_unplug(conf->mddev->bitmap);
793         wake_up(&conf->wait_barrier);
794
795         while (bio) { /* submit pending writes */
796                 struct bio *next = bio->bi_next;
797                 struct md_rdev *rdev = (void *)bio->bi_disk;
798                 bio->bi_next = NULL;
799                 bio_set_dev(bio, rdev->bdev);
800                 if (test_bit(Faulty, &rdev->flags)) {
801                         bio_io_error(bio);
802                 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
803                                     !blk_queue_discard(bio->bi_disk->queue)))
804                         /* Just ignore it */
805                         bio_endio(bio);
806                 else
807                         submit_bio_noacct(bio);
808                 bio = next;
809                 cond_resched();
810         }
811 }
812
813 static void flush_pending_writes(struct r1conf *conf)
814 {
815         /* Any writes that have been queued but are awaiting
816          * bitmap updates get flushed here.
817          */
818         spin_lock_irq(&conf->device_lock);
819
820         if (conf->pending_bio_list.head) {
821                 struct blk_plug plug;
822                 struct bio *bio;
823
824                 bio = bio_list_get(&conf->pending_bio_list);
825                 conf->pending_count = 0;
826                 spin_unlock_irq(&conf->device_lock);
827
828                 /*
829                  * As this is called in a wait_event() loop (see freeze_array),
830                  * current->state might be TASK_UNINTERRUPTIBLE which will
831                  * cause a warning when we prepare to wait again.  As it is
832                  * rare that this path is taken, it is perfectly safe to force
833                  * us to go around the wait_event() loop again, so the warning
834                  * is a false-positive.  Silence the warning by resetting
835                  * thread state
836                  */
837                 __set_current_state(TASK_RUNNING);
838                 blk_start_plug(&plug);
839                 flush_bio_list(conf, bio);
840                 blk_finish_plug(&plug);
841         } else
842                 spin_unlock_irq(&conf->device_lock);
843 }
844
845 /* Barriers....
846  * Sometimes we need to suspend IO while we do something else,
847  * either some resync/recovery, or reconfigure the array.
848  * To do this we raise a 'barrier'.
849  * The 'barrier' is a counter that can be raised multiple times
850  * to count how many activities are happening which preclude
851  * normal IO.
852  * We can only raise the barrier if there is no pending IO.
853  * i.e. if nr_pending == 0.
854  * We choose only to raise the barrier if no-one is waiting for the
855  * barrier to go down.  This means that as soon as an IO request
856  * is ready, no other operations which require a barrier will start
857  * until the IO request has had a chance.
858  *
859  * So: regular IO calls 'wait_barrier'.  When that returns there
860  *    is no backgroup IO happening,  It must arrange to call
861  *    allow_barrier when it has finished its IO.
862  * backgroup IO calls must call raise_barrier.  Once that returns
863  *    there is no normal IO happeing.  It must arrange to call
864  *    lower_barrier when the particular background IO completes.
865  *
866  * If resync/recovery is interrupted, returns -EINTR;
867  * Otherwise, returns 0.
868  */
869 static int raise_barrier(struct r1conf *conf, sector_t sector_nr)
870 {
871         int idx = sector_to_idx(sector_nr);
872
873         spin_lock_irq(&conf->resync_lock);
874
875         /* Wait until no block IO is waiting */
876         wait_event_lock_irq(conf->wait_barrier,
877                             !atomic_read(&conf->nr_waiting[idx]),
878                             conf->resync_lock);
879
880         /* block any new IO from starting */
881         atomic_inc(&conf->barrier[idx]);
882         /*
883          * In raise_barrier() we firstly increase conf->barrier[idx] then
884          * check conf->nr_pending[idx]. In _wait_barrier() we firstly
885          * increase conf->nr_pending[idx] then check conf->barrier[idx].
886          * A memory barrier here to make sure conf->nr_pending[idx] won't
887          * be fetched before conf->barrier[idx] is increased. Otherwise
888          * there will be a race between raise_barrier() and _wait_barrier().
889          */
890         smp_mb__after_atomic();
891
892         /* For these conditions we must wait:
893          * A: while the array is in frozen state
894          * B: while conf->nr_pending[idx] is not 0, meaning regular I/O
895          *    existing in corresponding I/O barrier bucket.
896          * C: while conf->barrier[idx] >= RESYNC_DEPTH, meaning reaches
897          *    max resync count which allowed on current I/O barrier bucket.
898          */
899         wait_event_lock_irq(conf->wait_barrier,
900                             (!conf->array_frozen &&
901                              !atomic_read(&conf->nr_pending[idx]) &&
902                              atomic_read(&conf->barrier[idx]) < RESYNC_DEPTH) ||
903                                 test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery),
904                             conf->resync_lock);
905
906         if (test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) {
907                 atomic_dec(&conf->barrier[idx]);
908                 spin_unlock_irq(&conf->resync_lock);
909                 wake_up(&conf->wait_barrier);
910                 return -EINTR;
911         }
912
913         atomic_inc(&conf->nr_sync_pending);
914         spin_unlock_irq(&conf->resync_lock);
915
916         return 0;
917 }
918
919 static void lower_barrier(struct r1conf *conf, sector_t sector_nr)
920 {
921         int idx = sector_to_idx(sector_nr);
922
923         BUG_ON(atomic_read(&conf->barrier[idx]) <= 0);
924
925         atomic_dec(&conf->barrier[idx]);
926         atomic_dec(&conf->nr_sync_pending);
927         wake_up(&conf->wait_barrier);
928 }
929
930 static void _wait_barrier(struct r1conf *conf, int idx)
931 {
932         /*
933          * We need to increase conf->nr_pending[idx] very early here,
934          * then raise_barrier() can be blocked when it waits for
935          * conf->nr_pending[idx] to be 0. Then we can avoid holding
936          * conf->resync_lock when there is no barrier raised in same
937          * barrier unit bucket. Also if the array is frozen, I/O
938          * should be blocked until array is unfrozen.
939          */
940         atomic_inc(&conf->nr_pending[idx]);
941         /*
942          * In _wait_barrier() we firstly increase conf->nr_pending[idx], then
943          * check conf->barrier[idx]. In raise_barrier() we firstly increase
944          * conf->barrier[idx], then check conf->nr_pending[idx]. A memory
945          * barrier is necessary here to make sure conf->barrier[idx] won't be
946          * fetched before conf->nr_pending[idx] is increased. Otherwise there
947          * will be a race between _wait_barrier() and raise_barrier().
948          */
949         smp_mb__after_atomic();
950
951         /*
952          * Don't worry about checking two atomic_t variables at same time
953          * here. If during we check conf->barrier[idx], the array is
954          * frozen (conf->array_frozen is 1), and chonf->barrier[idx] is
955          * 0, it is safe to return and make the I/O continue. Because the
956          * array is frozen, all I/O returned here will eventually complete
957          * or be queued, no race will happen. See code comment in
958          * frozen_array().
959          */
960         if (!READ_ONCE(conf->array_frozen) &&
961             !atomic_read(&conf->barrier[idx]))
962                 return;
963
964         /*
965          * After holding conf->resync_lock, conf->nr_pending[idx]
966          * should be decreased before waiting for barrier to drop.
967          * Otherwise, we may encounter a race condition because
968          * raise_barrer() might be waiting for conf->nr_pending[idx]
969          * to be 0 at same time.
970          */
971         spin_lock_irq(&conf->resync_lock);
972         atomic_inc(&conf->nr_waiting[idx]);
973         atomic_dec(&conf->nr_pending[idx]);
974         /*
975          * In case freeze_array() is waiting for
976          * get_unqueued_pending() == extra
977          */
978         wake_up(&conf->wait_barrier);
979         /* Wait for the barrier in same barrier unit bucket to drop. */
980         wait_event_lock_irq(conf->wait_barrier,
981                             !conf->array_frozen &&
982                              !atomic_read(&conf->barrier[idx]),
983                             conf->resync_lock);
984         atomic_inc(&conf->nr_pending[idx]);
985         atomic_dec(&conf->nr_waiting[idx]);
986         spin_unlock_irq(&conf->resync_lock);
987 }
988
989 static void wait_read_barrier(struct r1conf *conf, sector_t sector_nr)
990 {
991         int idx = sector_to_idx(sector_nr);
992
993         /*
994          * Very similar to _wait_barrier(). The difference is, for read
995          * I/O we don't need wait for sync I/O, but if the whole array
996          * is frozen, the read I/O still has to wait until the array is
997          * unfrozen. Since there is no ordering requirement with
998          * conf->barrier[idx] here, memory barrier is unnecessary as well.
999          */
1000         atomic_inc(&conf->nr_pending[idx]);
1001
1002         if (!READ_ONCE(conf->array_frozen))
1003                 return;
1004
1005         spin_lock_irq(&conf->resync_lock);
1006         atomic_inc(&conf->nr_waiting[idx]);
1007         atomic_dec(&conf->nr_pending[idx]);
1008         /*
1009          * In case freeze_array() is waiting for
1010          * get_unqueued_pending() == extra
1011          */
1012         wake_up(&conf->wait_barrier);
1013         /* Wait for array to be unfrozen */
1014         wait_event_lock_irq(conf->wait_barrier,
1015                             !conf->array_frozen,
1016                             conf->resync_lock);
1017         atomic_inc(&conf->nr_pending[idx]);
1018         atomic_dec(&conf->nr_waiting[idx]);
1019         spin_unlock_irq(&conf->resync_lock);
1020 }
1021
1022 static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
1023 {
1024         int idx = sector_to_idx(sector_nr);
1025
1026         _wait_barrier(conf, idx);
1027 }
1028
1029 static void _allow_barrier(struct r1conf *conf, int idx)
1030 {
1031         atomic_dec(&conf->nr_pending[idx]);
1032         wake_up(&conf->wait_barrier);
1033 }
1034
1035 static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
1036 {
1037         int idx = sector_to_idx(sector_nr);
1038
1039         _allow_barrier(conf, idx);
1040 }
1041
1042 /* conf->resync_lock should be held */
1043 static int get_unqueued_pending(struct r1conf *conf)
1044 {
1045         int idx, ret;
1046
1047         ret = atomic_read(&conf->nr_sync_pending);
1048         for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
1049                 ret += atomic_read(&conf->nr_pending[idx]) -
1050                         atomic_read(&conf->nr_queued[idx]);
1051
1052         return ret;
1053 }
1054
1055 static void freeze_array(struct r1conf *conf, int extra)
1056 {
1057         /* Stop sync I/O and normal I/O and wait for everything to
1058          * go quiet.
1059          * This is called in two situations:
1060          * 1) management command handlers (reshape, remove disk, quiesce).
1061          * 2) one normal I/O request failed.
1062
1063          * After array_frozen is set to 1, new sync IO will be blocked at
1064          * raise_barrier(), and new normal I/O will blocked at _wait_barrier()
1065          * or wait_read_barrier(). The flying I/Os will either complete or be
1066          * queued. When everything goes quite, there are only queued I/Os left.
1067
1068          * Every flying I/O contributes to a conf->nr_pending[idx], idx is the
1069          * barrier bucket index which this I/O request hits. When all sync and
1070          * normal I/O are queued, sum of all conf->nr_pending[] will match sum
1071          * of all conf->nr_queued[]. But normal I/O failure is an exception,
1072          * in handle_read_error(), we may call freeze_array() before trying to
1073          * fix the read error. In this case, the error read I/O is not queued,
1074          * so get_unqueued_pending() == 1.
1075          *
1076          * Therefore before this function returns, we need to wait until
1077          * get_unqueued_pendings(conf) gets equal to extra. For
1078          * normal I/O context, extra is 1, in rested situations extra is 0.
1079          */
1080         spin_lock_irq(&conf->resync_lock);
1081         conf->array_frozen = 1;
1082         raid1_log(conf->mddev, "wait freeze");
1083         wait_event_lock_irq_cmd(
1084                 conf->wait_barrier,
1085                 get_unqueued_pending(conf) == extra,
1086                 conf->resync_lock,
1087                 flush_pending_writes(conf));
1088         spin_unlock_irq(&conf->resync_lock);
1089 }
1090 static void unfreeze_array(struct r1conf *conf)
1091 {
1092         /* reverse the effect of the freeze */
1093         spin_lock_irq(&conf->resync_lock);
1094         conf->array_frozen = 0;
1095         spin_unlock_irq(&conf->resync_lock);
1096         wake_up(&conf->wait_barrier);
1097 }
1098
1099 static void alloc_behind_master_bio(struct r1bio *r1_bio,
1100                                            struct bio *bio)
1101 {
1102         int size = bio->bi_iter.bi_size;
1103         unsigned vcnt = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
1104         int i = 0;
1105         struct bio *behind_bio = NULL;
1106
1107         behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
1108         if (!behind_bio)
1109                 return;
1110
1111         /* discard op, we don't support writezero/writesame yet */
1112         if (!bio_has_data(bio)) {
1113                 behind_bio->bi_iter.bi_size = size;
1114                 goto skip_copy;
1115         }
1116
1117         behind_bio->bi_write_hint = bio->bi_write_hint;
1118
1119         while (i < vcnt && size) {
1120                 struct page *page;
1121                 int len = min_t(int, PAGE_SIZE, size);
1122
1123                 page = alloc_page(GFP_NOIO);
1124                 if (unlikely(!page))
1125                         goto free_pages;
1126
1127                 bio_add_page(behind_bio, page, len, 0);
1128
1129                 size -= len;
1130                 i++;
1131         }
1132
1133         bio_copy_data(behind_bio, bio);
1134 skip_copy:
1135         r1_bio->behind_master_bio = behind_bio;
1136         set_bit(R1BIO_BehindIO, &r1_bio->state);
1137
1138         return;
1139
1140 free_pages:
1141         pr_debug("%dB behind alloc failed, doing sync I/O\n",
1142                  bio->bi_iter.bi_size);
1143         bio_free_pages(behind_bio);
1144         bio_put(behind_bio);
1145 }
1146
1147 struct raid1_plug_cb {
1148         struct blk_plug_cb      cb;
1149         struct bio_list         pending;
1150         int                     pending_cnt;
1151 };
1152
1153 static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1154 {
1155         struct raid1_plug_cb *plug = container_of(cb, struct raid1_plug_cb,
1156                                                   cb);
1157         struct mddev *mddev = plug->cb.data;
1158         struct r1conf *conf = mddev->private;
1159         struct bio *bio;
1160
1161         if (from_schedule || current->bio_list) {
1162                 spin_lock_irq(&conf->device_lock);
1163                 bio_list_merge(&conf->pending_bio_list, &plug->pending);
1164                 conf->pending_count += plug->pending_cnt;
1165                 spin_unlock_irq(&conf->device_lock);
1166                 wake_up(&conf->wait_barrier);
1167                 md_wakeup_thread(mddev->thread);
1168                 kfree(plug);
1169                 return;
1170         }
1171
1172         /* we aren't scheduling, so we can do the write-out directly. */
1173         bio = bio_list_get(&plug->pending);
1174         flush_bio_list(conf, bio);
1175         kfree(plug);
1176 }
1177
1178 static void init_r1bio(struct r1bio *r1_bio, struct mddev *mddev, struct bio *bio)
1179 {
1180         r1_bio->master_bio = bio;
1181         r1_bio->sectors = bio_sectors(bio);
1182         r1_bio->state = 0;
1183         r1_bio->mddev = mddev;
1184         r1_bio->sector = bio->bi_iter.bi_sector;
1185 }
1186
1187 static inline struct r1bio *
1188 alloc_r1bio(struct mddev *mddev, struct bio *bio)
1189 {
1190         struct r1conf *conf = mddev->private;
1191         struct r1bio *r1_bio;
1192
1193         r1_bio = mempool_alloc(&conf->r1bio_pool, GFP_NOIO);
1194         /* Ensure no bio records IO_BLOCKED */
1195         memset(r1_bio->bios, 0, conf->raid_disks * sizeof(r1_bio->bios[0]));
1196         init_r1bio(r1_bio, mddev, bio);
1197         return r1_bio;
1198 }
1199
1200 static void raid1_read_request(struct mddev *mddev, struct bio *bio,
1201                                int max_read_sectors, struct r1bio *r1_bio)
1202 {
1203         struct r1conf *conf = mddev->private;
1204         struct raid1_info *mirror;
1205         struct bio *read_bio;
1206         struct bitmap *bitmap = mddev->bitmap;
1207         const int op = bio_op(bio);
1208         const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
1209         int max_sectors;
1210         int rdisk;
1211         bool print_msg = !!r1_bio;
1212         char b[BDEVNAME_SIZE];
1213
1214         /*
1215          * If r1_bio is set, we are blocking the raid1d thread
1216          * so there is a tiny risk of deadlock.  So ask for
1217          * emergency memory if needed.
1218          */
1219         gfp_t gfp = r1_bio ? (GFP_NOIO | __GFP_HIGH) : GFP_NOIO;
1220
1221         if (print_msg) {
1222                 /* Need to get the block device name carefully */
1223                 struct md_rdev *rdev;
1224                 rcu_read_lock();
1225                 rdev = rcu_dereference(conf->mirrors[r1_bio->read_disk].rdev);
1226                 if (rdev)
1227                         bdevname(rdev->bdev, b);
1228                 else
1229                         strcpy(b, "???");
1230                 rcu_read_unlock();
1231         }
1232
1233         /*
1234          * Still need barrier for READ in case that whole
1235          * array is frozen.
1236          */
1237         wait_read_barrier(conf, bio->bi_iter.bi_sector);
1238
1239         if (!r1_bio)
1240                 r1_bio = alloc_r1bio(mddev, bio);
1241         else
1242                 init_r1bio(r1_bio, mddev, bio);
1243         r1_bio->sectors = max_read_sectors;
1244
1245         /*
1246          * make_request() can abort the operation when read-ahead is being
1247          * used and no empty request is available.
1248          */
1249         rdisk = read_balance(conf, r1_bio, &max_sectors);
1250
1251         if (rdisk < 0) {
1252                 /* couldn't find anywhere to read from */
1253                 if (print_msg) {
1254                         pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
1255                                             mdname(mddev),
1256                                             b,
1257                                             (unsigned long long)r1_bio->sector);
1258                 }
1259                 raid_end_bio_io(r1_bio);
1260                 return;
1261         }
1262         mirror = conf->mirrors + rdisk;
1263
1264         if (print_msg)
1265                 pr_info_ratelimited("md/raid1:%s: redirecting sector %llu to other mirror: %s\n",
1266                                     mdname(mddev),
1267                                     (unsigned long long)r1_bio->sector,
1268                                     bdevname(mirror->rdev->bdev, b));
1269
1270         if (test_bit(WriteMostly, &mirror->rdev->flags) &&
1271             bitmap) {
1272                 /*
1273                  * Reading from a write-mostly device must take care not to
1274                  * over-take any writes that are 'behind'
1275                  */
1276                 raid1_log(mddev, "wait behind writes");
1277                 wait_event(bitmap->behind_wait,
1278                            atomic_read(&bitmap->behind_writes) == 0);
1279         }
1280
1281         if (max_sectors < bio_sectors(bio)) {
1282                 struct bio *split = bio_split(bio, max_sectors,
1283                                               gfp, &conf->bio_split);
1284                 bio_chain(split, bio);
1285                 submit_bio_noacct(bio);
1286                 bio = split;
1287                 r1_bio->master_bio = bio;
1288                 r1_bio->sectors = max_sectors;
1289         }
1290
1291         r1_bio->read_disk = rdisk;
1292
1293         read_bio = bio_clone_fast(bio, gfp, &mddev->bio_set);
1294
1295         r1_bio->bios[rdisk] = read_bio;
1296
1297         read_bio->bi_iter.bi_sector = r1_bio->sector +
1298                 mirror->rdev->data_offset;
1299         bio_set_dev(read_bio, mirror->rdev->bdev);
1300         read_bio->bi_end_io = raid1_end_read_request;
1301         bio_set_op_attrs(read_bio, op, do_sync);
1302         if (test_bit(FailFast, &mirror->rdev->flags) &&
1303             test_bit(R1BIO_FailFast, &r1_bio->state))
1304                 read_bio->bi_opf |= MD_FAILFAST;
1305         read_bio->bi_private = r1_bio;
1306
1307         if (mddev->gendisk)
1308                 trace_block_bio_remap(read_bio->bi_disk->queue, read_bio,
1309                                 disk_devt(mddev->gendisk), r1_bio->sector);
1310
1311         submit_bio_noacct(read_bio);
1312 }
1313
1314 static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1315                                 int max_write_sectors)
1316 {
1317         struct r1conf *conf = mddev->private;
1318         struct r1bio *r1_bio;
1319         int i, disks;
1320         struct bitmap *bitmap = mddev->bitmap;
1321         unsigned long flags;
1322         struct md_rdev *blocked_rdev;
1323         struct blk_plug_cb *cb;
1324         struct raid1_plug_cb *plug = NULL;
1325         int first_clone;
1326         int max_sectors;
1327
1328         if (mddev_is_clustered(mddev) &&
1329              md_cluster_ops->area_resyncing(mddev, WRITE,
1330                      bio->bi_iter.bi_sector, bio_end_sector(bio))) {
1331
1332                 DEFINE_WAIT(w);
1333                 for (;;) {
1334                         prepare_to_wait(&conf->wait_barrier,
1335                                         &w, TASK_IDLE);
1336                         if (!md_cluster_ops->area_resyncing(mddev, WRITE,
1337                                                         bio->bi_iter.bi_sector,
1338                                                         bio_end_sector(bio)))
1339                                 break;
1340                         schedule();
1341                 }
1342                 finish_wait(&conf->wait_barrier, &w);
1343         }
1344
1345         /*
1346          * Register the new request and wait if the reconstruction
1347          * thread has put up a bar for new requests.
1348          * Continue immediately if no resync is active currently.
1349          */
1350         wait_barrier(conf, bio->bi_iter.bi_sector);
1351
1352         r1_bio = alloc_r1bio(mddev, bio);
1353         r1_bio->sectors = max_write_sectors;
1354
1355         if (conf->pending_count >= max_queued_requests) {
1356                 md_wakeup_thread(mddev->thread);
1357                 raid1_log(mddev, "wait queued");
1358                 wait_event(conf->wait_barrier,
1359                            conf->pending_count < max_queued_requests);
1360         }
1361         /* first select target devices under rcu_lock and
1362          * inc refcount on their rdev.  Record them by setting
1363          * bios[x] to bio
1364          * If there are known/acknowledged bad blocks on any device on
1365          * which we have seen a write error, we want to avoid writing those
1366          * blocks.
1367          * This potentially requires several writes to write around
1368          * the bad blocks.  Each set of writes gets it's own r1bio
1369          * with a set of bios attached.
1370          */
1371
1372         disks = conf->raid_disks * 2;
1373  retry_write:
1374         blocked_rdev = NULL;
1375         rcu_read_lock();
1376         max_sectors = r1_bio->sectors;
1377         for (i = 0;  i < disks; i++) {
1378                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1379                 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) {
1380                         atomic_inc(&rdev->nr_pending);
1381                         blocked_rdev = rdev;
1382                         break;
1383                 }
1384                 r1_bio->bios[i] = NULL;
1385                 if (!rdev || test_bit(Faulty, &rdev->flags)) {
1386                         if (i < conf->raid_disks)
1387                                 set_bit(R1BIO_Degraded, &r1_bio->state);
1388                         continue;
1389                 }
1390
1391                 atomic_inc(&rdev->nr_pending);
1392                 if (test_bit(WriteErrorSeen, &rdev->flags)) {
1393                         sector_t first_bad;
1394                         int bad_sectors;
1395                         int is_bad;
1396
1397                         is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
1398                                              &first_bad, &bad_sectors);
1399                         if (is_bad < 0) {
1400                                 /* mustn't write here until the bad block is
1401                                  * acknowledged*/
1402                                 set_bit(BlockedBadBlocks, &rdev->flags);
1403                                 blocked_rdev = rdev;
1404                                 break;
1405                         }
1406                         if (is_bad && first_bad <= r1_bio->sector) {
1407                                 /* Cannot write here at all */
1408                                 bad_sectors -= (r1_bio->sector - first_bad);
1409                                 if (bad_sectors < max_sectors)
1410                                         /* mustn't write more than bad_sectors
1411                                          * to other devices yet
1412                                          */
1413                                         max_sectors = bad_sectors;
1414                                 rdev_dec_pending(rdev, mddev);
1415                                 /* We don't set R1BIO_Degraded as that
1416                                  * only applies if the disk is
1417                                  * missing, so it might be re-added,
1418                                  * and we want to know to recover this
1419                                  * chunk.
1420                                  * In this case the device is here,
1421                                  * and the fact that this chunk is not
1422                                  * in-sync is recorded in the bad
1423                                  * block log
1424                                  */
1425                                 continue;
1426                         }
1427                         if (is_bad) {
1428                                 int good_sectors = first_bad - r1_bio->sector;
1429                                 if (good_sectors < max_sectors)
1430                                         max_sectors = good_sectors;
1431                         }
1432                 }
1433                 r1_bio->bios[i] = bio;
1434         }
1435         rcu_read_unlock();
1436
1437         if (unlikely(blocked_rdev)) {
1438                 /* Wait for this device to become unblocked */
1439                 int j;
1440
1441                 for (j = 0; j < i; j++)
1442                         if (r1_bio->bios[j])
1443                                 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1444                 r1_bio->state = 0;
1445                 allow_barrier(conf, bio->bi_iter.bi_sector);
1446                 raid1_log(mddev, "wait rdev %d blocked", blocked_rdev->raid_disk);
1447                 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1448                 wait_barrier(conf, bio->bi_iter.bi_sector);
1449                 goto retry_write;
1450         }
1451
1452         if (max_sectors < bio_sectors(bio)) {
1453                 struct bio *split = bio_split(bio, max_sectors,
1454                                               GFP_NOIO, &conf->bio_split);
1455                 bio_chain(split, bio);
1456                 submit_bio_noacct(bio);
1457                 bio = split;
1458                 r1_bio->master_bio = bio;
1459                 r1_bio->sectors = max_sectors;
1460         }
1461
1462         atomic_set(&r1_bio->remaining, 1);
1463         atomic_set(&r1_bio->behind_remaining, 0);
1464
1465         first_clone = 1;
1466
1467         for (i = 0; i < disks; i++) {
1468                 struct bio *mbio = NULL;
1469                 struct md_rdev *rdev = conf->mirrors[i].rdev;
1470                 if (!r1_bio->bios[i])
1471                         continue;
1472
1473                 if (first_clone) {
1474                         /* do behind I/O ?
1475                          * Not if there are too many, or cannot
1476                          * allocate memory, or a reader on WriteMostly
1477                          * is waiting for behind writes to flush */
1478                         if (bitmap &&
1479                             (atomic_read(&bitmap->behind_writes)
1480                              < mddev->bitmap_info.max_write_behind) &&
1481                             !waitqueue_active(&bitmap->behind_wait)) {
1482                                 alloc_behind_master_bio(r1_bio, bio);
1483                         }
1484
1485                         md_bitmap_startwrite(bitmap, r1_bio->sector, r1_bio->sectors,
1486                                              test_bit(R1BIO_BehindIO, &r1_bio->state));
1487                         first_clone = 0;
1488                 }
1489
1490                 if (r1_bio->behind_master_bio)
1491                         mbio = bio_clone_fast(r1_bio->behind_master_bio,
1492                                               GFP_NOIO, &mddev->bio_set);
1493                 else
1494                         mbio = bio_clone_fast(bio, GFP_NOIO, &mddev->bio_set);
1495
1496                 if (r1_bio->behind_master_bio) {
1497                         if (test_bit(CollisionCheck, &rdev->flags))
1498                                 wait_for_serialization(rdev, r1_bio);
1499                         if (test_bit(WriteMostly, &rdev->flags))
1500                                 atomic_inc(&r1_bio->behind_remaining);
1501                 } else if (mddev->serialize_policy)
1502                         wait_for_serialization(rdev, r1_bio);
1503
1504                 r1_bio->bios[i] = mbio;
1505
1506                 mbio->bi_iter.bi_sector = (r1_bio->sector +
1507                                    conf->mirrors[i].rdev->data_offset);
1508                 bio_set_dev(mbio, conf->mirrors[i].rdev->bdev);
1509                 mbio->bi_end_io = raid1_end_write_request;
1510                 mbio->bi_opf = bio_op(bio) | (bio->bi_opf & (REQ_SYNC | REQ_FUA));
1511                 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags) &&
1512                     !test_bit(WriteMostly, &conf->mirrors[i].rdev->flags) &&
1513                     conf->raid_disks - mddev->degraded > 1)
1514                         mbio->bi_opf |= MD_FAILFAST;
1515                 mbio->bi_private = r1_bio;
1516
1517                 atomic_inc(&r1_bio->remaining);
1518
1519                 if (mddev->gendisk)
1520                         trace_block_bio_remap(mbio->bi_disk->queue,
1521                                               mbio, disk_devt(mddev->gendisk),
1522                                               r1_bio->sector);
1523                 /* flush_pending_writes() needs access to the rdev so...*/
1524                 mbio->bi_disk = (void *)conf->mirrors[i].rdev;
1525
1526                 cb = blk_check_plugged(raid1_unplug, mddev, sizeof(*plug));
1527                 if (cb)
1528                         plug = container_of(cb, struct raid1_plug_cb, cb);
1529                 else
1530                         plug = NULL;
1531                 if (plug) {
1532                         bio_list_add(&plug->pending, mbio);
1533                         plug->pending_cnt++;
1534                 } else {
1535                         spin_lock_irqsave(&conf->device_lock, flags);
1536                         bio_list_add(&conf->pending_bio_list, mbio);
1537                         conf->pending_count++;
1538                         spin_unlock_irqrestore(&conf->device_lock, flags);
1539                         md_wakeup_thread(mddev->thread);
1540                 }
1541         }
1542
1543         r1_bio_write_done(r1_bio);
1544
1545         /* In case raid1d snuck in to freeze_array */
1546         wake_up(&conf->wait_barrier);
1547 }
1548
1549 static bool raid1_make_request(struct mddev *mddev, struct bio *bio)
1550 {
1551         sector_t sectors;
1552
1553         if (unlikely(bio->bi_opf & REQ_PREFLUSH)
1554             && md_flush_request(mddev, bio))
1555                 return true;
1556
1557         /*
1558          * There is a limit to the maximum size, but
1559          * the read/write handler might find a lower limit
1560          * due to bad blocks.  To avoid multiple splits,
1561          * we pass the maximum number of sectors down
1562          * and let the lower level perform the split.
1563          */
1564         sectors = align_to_barrier_unit_end(
1565                 bio->bi_iter.bi_sector, bio_sectors(bio));
1566
1567         if (bio_data_dir(bio) == READ)
1568                 raid1_read_request(mddev, bio, sectors, NULL);
1569         else {
1570                 if (!md_write_start(mddev,bio))
1571                         return false;
1572                 raid1_write_request(mddev, bio, sectors);
1573         }
1574         return true;
1575 }
1576
1577 static void raid1_status(struct seq_file *seq, struct mddev *mddev)
1578 {
1579         struct r1conf *conf = mddev->private;
1580         int i;
1581
1582         seq_printf(seq, " [%d/%d] [", conf->raid_disks,
1583                    conf->raid_disks - mddev->degraded);
1584         rcu_read_lock();
1585         for (i = 0; i < conf->raid_disks; i++) {
1586                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1587                 seq_printf(seq, "%s",
1588                            rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_");
1589         }
1590         rcu_read_unlock();
1591         seq_printf(seq, "]");
1592 }
1593
1594 static void raid1_error(struct mddev *mddev, struct md_rdev *rdev)
1595 {
1596         char b[BDEVNAME_SIZE];
1597         struct r1conf *conf = mddev->private;
1598         unsigned long flags;
1599
1600         /*
1601          * If it is not operational, then we have already marked it as dead
1602          * else if it is the last working disks with "fail_last_dev == false",
1603          * ignore the error, let the next level up know.
1604          * else mark the drive as failed
1605          */
1606         spin_lock_irqsave(&conf->device_lock, flags);
1607         if (test_bit(In_sync, &rdev->flags) && !mddev->fail_last_dev
1608             && (conf->raid_disks - mddev->degraded) == 1) {
1609                 /*
1610                  * Don't fail the drive, act as though we were just a
1611                  * normal single drive.
1612                  * However don't try a recovery from this drive as
1613                  * it is very likely to fail.
1614                  */
1615                 conf->recovery_disabled = mddev->recovery_disabled;
1616                 spin_unlock_irqrestore(&conf->device_lock, flags);
1617                 return;
1618         }
1619         set_bit(Blocked, &rdev->flags);
1620         if (test_and_clear_bit(In_sync, &rdev->flags))
1621                 mddev->degraded++;
1622         set_bit(Faulty, &rdev->flags);
1623         spin_unlock_irqrestore(&conf->device_lock, flags);
1624         /*
1625          * if recovery is running, make sure it aborts.
1626          */
1627         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1628         set_mask_bits(&mddev->sb_flags, 0,
1629                       BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_PENDING));
1630         pr_crit("md/raid1:%s: Disk failure on %s, disabling device.\n"
1631                 "md/raid1:%s: Operation continuing on %d devices.\n",
1632                 mdname(mddev), bdevname(rdev->bdev, b),
1633                 mdname(mddev), conf->raid_disks - mddev->degraded);
1634 }
1635
1636 static void print_conf(struct r1conf *conf)
1637 {
1638         int i;
1639
1640         pr_debug("RAID1 conf printout:\n");
1641         if (!conf) {
1642                 pr_debug("(!conf)\n");
1643                 return;
1644         }
1645         pr_debug(" --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded,
1646                  conf->raid_disks);
1647
1648         rcu_read_lock();
1649         for (i = 0; i < conf->raid_disks; i++) {
1650                 char b[BDEVNAME_SIZE];
1651                 struct md_rdev *rdev = rcu_dereference(conf->mirrors[i].rdev);
1652                 if (rdev)
1653                         pr_debug(" disk %d, wo:%d, o:%d, dev:%s\n",
1654                                  i, !test_bit(In_sync, &rdev->flags),
1655                                  !test_bit(Faulty, &rdev->flags),
1656                                  bdevname(rdev->bdev,b));
1657         }
1658         rcu_read_unlock();
1659 }
1660
1661 static void close_sync(struct r1conf *conf)
1662 {
1663         int idx;
1664
1665         for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
1666                 _wait_barrier(conf, idx);
1667                 _allow_barrier(conf, idx);
1668         }
1669
1670         mempool_exit(&conf->r1buf_pool);
1671 }
1672
1673 static int raid1_spare_active(struct mddev *mddev)
1674 {
1675         int i;
1676         struct r1conf *conf = mddev->private;
1677         int count = 0;
1678         unsigned long flags;
1679
1680         /*
1681          * Find all failed disks within the RAID1 configuration
1682          * and mark them readable.
1683          * Called under mddev lock, so rcu protection not needed.
1684          * device_lock used to avoid races with raid1_end_read_request
1685          * which expects 'In_sync' flags and ->degraded to be consistent.
1686          */
1687         spin_lock_irqsave(&conf->device_lock, flags);
1688         for (i = 0; i < conf->raid_disks; i++) {
1689                 struct md_rdev *rdev = conf->mirrors[i].rdev;
1690                 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
1691                 if (repl
1692                     && !test_bit(Candidate, &repl->flags)
1693                     && repl->recovery_offset == MaxSector
1694                     && !test_bit(Faulty, &repl->flags)
1695                     && !test_and_set_bit(In_sync, &repl->flags)) {
1696                         /* replacement has just become active */
1697                         if (!rdev ||
1698                             !test_and_clear_bit(In_sync, &rdev->flags))
1699                                 count++;
1700                         if (rdev) {
1701                                 /* Replaced device not technically
1702                                  * faulty, but we need to be sure
1703                                  * it gets removed and never re-added
1704                                  */
1705                                 set_bit(Faulty, &rdev->flags);
1706                                 sysfs_notify_dirent_safe(
1707                                         rdev->sysfs_state);
1708                         }
1709                 }
1710                 if (rdev
1711                     && rdev->recovery_offset == MaxSector
1712                     && !test_bit(Faulty, &rdev->flags)
1713                     && !test_and_set_bit(In_sync, &rdev->flags)) {
1714                         count++;
1715                         sysfs_notify_dirent_safe(rdev->sysfs_state);
1716                 }
1717         }
1718         mddev->degraded -= count;
1719         spin_unlock_irqrestore(&conf->device_lock, flags);
1720
1721         print_conf(conf);
1722         return count;
1723 }
1724
1725 static int raid1_add_disk(struct mddev *mddev, struct md_rdev *rdev)
1726 {
1727         struct r1conf *conf = mddev->private;
1728         int err = -EEXIST;
1729         int mirror = 0;
1730         struct raid1_info *p;
1731         int first = 0;
1732         int last = conf->raid_disks - 1;
1733
1734         if (mddev->recovery_disabled == conf->recovery_disabled)
1735                 return -EBUSY;
1736
1737         if (md_integrity_add_rdev(rdev, mddev))
1738                 return -ENXIO;
1739
1740         if (rdev->raid_disk >= 0)
1741                 first = last = rdev->raid_disk;
1742
1743         /*
1744          * find the disk ... but prefer rdev->saved_raid_disk
1745          * if possible.
1746          */
1747         if (rdev->saved_raid_disk >= 0 &&
1748             rdev->saved_raid_disk >= first &&
1749             rdev->saved_raid_disk < conf->raid_disks &&
1750             conf->mirrors[rdev->saved_raid_disk].rdev == NULL)
1751                 first = last = rdev->saved_raid_disk;
1752
1753         for (mirror = first; mirror <= last; mirror++) {
1754                 p = conf->mirrors + mirror;
1755                 if (!p->rdev) {
1756                         if (mddev->gendisk)
1757                                 disk_stack_limits(mddev->gendisk, rdev->bdev,
1758                                                   rdev->data_offset << 9);
1759
1760                         p->head_position = 0;
1761                         rdev->raid_disk = mirror;
1762                         err = 0;
1763                         /* As all devices are equivalent, we don't need a full recovery
1764                          * if this was recently any drive of the array
1765                          */
1766                         if (rdev->saved_raid_disk < 0)
1767                                 conf->fullsync = 1;
1768                         rcu_assign_pointer(p->rdev, rdev);
1769                         break;
1770                 }
1771                 if (test_bit(WantReplacement, &p->rdev->flags) &&
1772                     p[conf->raid_disks].rdev == NULL) {
1773                         /* Add this device as a replacement */
1774                         clear_bit(In_sync, &rdev->flags);
1775                         set_bit(Replacement, &rdev->flags);
1776                         rdev->raid_disk = mirror;
1777                         err = 0;
1778                         conf->fullsync = 1;
1779                         rcu_assign_pointer(p[conf->raid_disks].rdev, rdev);
1780                         break;
1781                 }
1782         }
1783         if (mddev->queue && blk_queue_discard(bdev_get_queue(rdev->bdev)))
1784                 blk_queue_flag_set(QUEUE_FLAG_DISCARD, mddev->queue);
1785         print_conf(conf);
1786         return err;
1787 }
1788
1789 static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev)
1790 {
1791         struct r1conf *conf = mddev->private;
1792         int err = 0;
1793         int number = rdev->raid_disk;
1794         struct raid1_info *p = conf->mirrors + number;
1795
1796         if (rdev != p->rdev)
1797                 p = conf->mirrors + conf->raid_disks + number;
1798
1799         print_conf(conf);
1800         if (rdev == p->rdev) {
1801                 if (test_bit(In_sync, &rdev->flags) ||
1802                     atomic_read(&rdev->nr_pending)) {
1803                         err = -EBUSY;
1804                         goto abort;
1805                 }
1806                 /* Only remove non-faulty devices if recovery
1807                  * is not possible.
1808                  */
1809                 if (!test_bit(Faulty, &rdev->flags) &&
1810                     mddev->recovery_disabled != conf->recovery_disabled &&
1811                     mddev->degraded < conf->raid_disks) {
1812                         err = -EBUSY;
1813                         goto abort;
1814                 }
1815                 p->rdev = NULL;
1816                 if (!test_bit(RemoveSynchronized, &rdev->flags)) {
1817                         synchronize_rcu();
1818                         if (atomic_read(&rdev->nr_pending)) {
1819                                 /* lost the race, try later */
1820                                 err = -EBUSY;
1821                                 p->rdev = rdev;
1822                                 goto abort;
1823                         }
1824                 }
1825                 if (conf->mirrors[conf->raid_disks + number].rdev) {
1826                         /* We just removed a device that is being replaced.
1827                          * Move down the replacement.  We drain all IO before
1828                          * doing this to avoid confusion.
1829                          */
1830                         struct md_rdev *repl =
1831                                 conf->mirrors[conf->raid_disks + number].rdev;
1832                         freeze_array(conf, 0);
1833                         if (atomic_read(&repl->nr_pending)) {
1834                                 /* It means that some queued IO of retry_list
1835                                  * hold repl. Thus, we cannot set replacement
1836                                  * as NULL, avoiding rdev NULL pointer
1837                                  * dereference in sync_request_write and
1838                                  * handle_write_finished.
1839                                  */
1840                                 err = -EBUSY;
1841                                 unfreeze_array(conf);
1842                                 goto abort;
1843                         }
1844                         clear_bit(Replacement, &repl->flags);
1845                         p->rdev = repl;
1846                         conf->mirrors[conf->raid_disks + number].rdev = NULL;
1847                         unfreeze_array(conf);
1848                 }
1849
1850                 clear_bit(WantReplacement, &rdev->flags);
1851                 err = md_integrity_register(mddev);
1852         }
1853 abort:
1854
1855         print_conf(conf);
1856         return err;
1857 }
1858
1859 static void end_sync_read(struct bio *bio)
1860 {
1861         struct r1bio *r1_bio = get_resync_r1bio(bio);
1862
1863         update_head_pos(r1_bio->read_disk, r1_bio);
1864
1865         /*
1866          * we have read a block, now it needs to be re-written,
1867          * or re-read if the read failed.
1868          * We don't do much here, just schedule handling by raid1d
1869          */
1870         if (!bio->bi_status)
1871                 set_bit(R1BIO_Uptodate, &r1_bio->state);
1872
1873         if (atomic_dec_and_test(&r1_bio->remaining))
1874                 reschedule_retry(r1_bio);
1875 }
1876
1877 static void abort_sync_write(struct mddev *mddev, struct r1bio *r1_bio)
1878 {
1879         sector_t sync_blocks = 0;
1880         sector_t s = r1_bio->sector;
1881         long sectors_to_go = r1_bio->sectors;
1882
1883         /* make sure these bits don't get cleared. */
1884         do {
1885                 md_bitmap_end_sync(mddev->bitmap, s, &sync_blocks, 1);
1886                 s += sync_blocks;
1887                 sectors_to_go -= sync_blocks;
1888         } while (sectors_to_go > 0);
1889 }
1890
1891 static void put_sync_write_buf(struct r1bio *r1_bio, int uptodate)
1892 {
1893         if (atomic_dec_and_test(&r1_bio->remaining)) {
1894                 struct mddev *mddev = r1_bio->mddev;
1895                 int s = r1_bio->sectors;
1896
1897                 if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
1898                     test_bit(R1BIO_WriteError, &r1_bio->state))
1899                         reschedule_retry(r1_bio);
1900                 else {
1901                         put_buf(r1_bio);
1902                         md_done_sync(mddev, s, uptodate);
1903                 }
1904         }
1905 }
1906
1907 static void end_sync_write(struct bio *bio)
1908 {
1909         int uptodate = !bio->bi_status;
1910         struct r1bio *r1_bio = get_resync_r1bio(bio);
1911         struct mddev *mddev = r1_bio->mddev;
1912         struct r1conf *conf = mddev->private;
1913         sector_t first_bad;
1914         int bad_sectors;
1915         struct md_rdev *rdev = conf->mirrors[find_bio_disk(r1_bio, bio)].rdev;
1916
1917         if (!uptodate) {
1918                 abort_sync_write(mddev, r1_bio);
1919                 set_bit(WriteErrorSeen, &rdev->flags);
1920                 if (!test_and_set_bit(WantReplacement, &rdev->flags))
1921                         set_bit(MD_RECOVERY_NEEDED, &
1922                                 mddev->recovery);
1923                 set_bit(R1BIO_WriteError, &r1_bio->state);
1924         } else if (is_badblock(rdev, r1_bio->sector, r1_bio->sectors,
1925                                &first_bad, &bad_sectors) &&
1926                    !is_badblock(conf->mirrors[r1_bio->read_disk].rdev,
1927                                 r1_bio->sector,
1928                                 r1_bio->sectors,
1929                                 &first_bad, &bad_sectors)
1930                 )
1931                 set_bit(R1BIO_MadeGood, &r1_bio->state);
1932
1933         put_sync_write_buf(r1_bio, uptodate);
1934 }
1935
1936 static int r1_sync_page_io(struct md_rdev *rdev, sector_t sector,
1937                             int sectors, struct page *page, int rw)
1938 {
1939         if (sync_page_io(rdev, sector, sectors << 9, page, rw, 0, false))
1940                 /* success */
1941                 return 1;
1942         if (rw == WRITE) {
1943                 set_bit(WriteErrorSeen, &rdev->flags);
1944                 if (!test_and_set_bit(WantReplacement,
1945                                       &rdev->flags))
1946                         set_bit(MD_RECOVERY_NEEDED, &
1947                                 rdev->mddev->recovery);
1948         }
1949         /* need to record an error - either for the block or the device */
1950         if (!rdev_set_badblocks(rdev, sector, sectors, 0))
1951                 md_error(rdev->mddev, rdev);
1952         return 0;
1953 }
1954
1955 static int fix_sync_read_error(struct r1bio *r1_bio)
1956 {
1957         /* Try some synchronous reads of other devices to get
1958          * good data, much like with normal read errors.  Only
1959          * read into the pages we already have so we don't
1960          * need to re-issue the read request.
1961          * We don't need to freeze the array, because being in an
1962          * active sync request, there is no normal IO, and
1963          * no overlapping syncs.
1964          * We don't need to check is_badblock() again as we
1965          * made sure that anything with a bad block in range
1966          * will have bi_end_io clear.
1967          */
1968         struct mddev *mddev = r1_bio->mddev;
1969         struct r1conf *conf = mddev->private;
1970         struct bio *bio = r1_bio->bios[r1_bio->read_disk];
1971         struct page **pages = get_resync_pages(bio)->pages;
1972         sector_t sect = r1_bio->sector;
1973         int sectors = r1_bio->sectors;
1974         int idx = 0;
1975         struct md_rdev *rdev;
1976
1977         rdev = conf->mirrors[r1_bio->read_disk].rdev;
1978         if (test_bit(FailFast, &rdev->flags)) {
1979                 /* Don't try recovering from here - just fail it
1980                  * ... unless it is the last working device of course */
1981                 md_error(mddev, rdev);
1982                 if (test_bit(Faulty, &rdev->flags))
1983                         /* Don't try to read from here, but make sure
1984                          * put_buf does it's thing
1985                          */
1986                         bio->bi_end_io = end_sync_write;
1987         }
1988
1989         while(sectors) {
1990                 int s = sectors;
1991                 int d = r1_bio->read_disk;
1992                 int success = 0;
1993                 int start;
1994
1995                 if (s > (PAGE_SIZE>>9))
1996                         s = PAGE_SIZE >> 9;
1997                 do {
1998                         if (r1_bio->bios[d]->bi_end_io == end_sync_read) {
1999                                 /* No rcu protection needed here devices
2000                                  * can only be removed when no resync is
2001                                  * active, and resync is currently active
2002                                  */
2003                                 rdev = conf->mirrors[d].rdev;
2004                                 if (sync_page_io(rdev, sect, s<<9,
2005                                                  pages[idx],
2006                                                  REQ_OP_READ, 0, false)) {
2007                                         success = 1;
2008                                         break;
2009                                 }
2010                         }
2011                         d++;
2012                         if (d == conf->raid_disks * 2)
2013                                 d = 0;
2014                 } while (!success && d != r1_bio->read_disk);
2015
2016                 if (!success) {
2017                         char b[BDEVNAME_SIZE];
2018                         int abort = 0;
2019                         /* Cannot read from anywhere, this block is lost.
2020                          * Record a bad block on each device.  If that doesn't
2021                          * work just disable and interrupt the recovery.
2022                          * Don't fail devices as that won't really help.
2023                          */
2024                         pr_crit_ratelimited("md/raid1:%s: %s: unrecoverable I/O read error for block %llu\n",
2025                                             mdname(mddev), bio_devname(bio, b),
2026                                             (unsigned long long)r1_bio->sector);
2027                         for (d = 0; d < conf->raid_disks * 2; d++) {
2028                                 rdev = conf->mirrors[d].rdev;
2029                                 if (!rdev || test_bit(Faulty, &rdev->flags))
2030                                         continue;
2031                                 if (!rdev_set_badblocks(rdev, sect, s, 0))
2032                                         abort = 1;
2033                         }
2034                         if (abort) {
2035                                 conf->recovery_disabled =
2036                                         mddev->recovery_disabled;
2037                                 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2038                                 md_done_sync(mddev, r1_bio->sectors, 0);
2039                                 put_buf(r1_bio);
2040                                 return 0;
2041                         }
2042                         /* Try next page */
2043                         sectors -= s;
2044                         sect += s;
2045                         idx++;
2046                         continue;
2047                 }
2048
2049                 start = d;
2050                 /* write it back and re-read */
2051                 while (d != r1_bio->read_disk) {
2052                         if (d == 0)
2053                                 d = conf->raid_disks * 2;
2054                         d--;
2055                         if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2056                                 continue;
2057                         rdev = conf->mirrors[d].rdev;
2058                         if (r1_sync_page_io(rdev, sect, s,
2059                                             pages[idx],
2060                                             WRITE) == 0) {
2061                                 r1_bio->bios[d]->bi_end_io = NULL;
2062                                 rdev_dec_pending(rdev, mddev);
2063                         }
2064                 }
2065                 d = start;
2066                 while (d != r1_bio->read_disk) {
2067                         if (d == 0)
2068                                 d = conf->raid_disks * 2;
2069                         d--;
2070                         if (r1_bio->bios[d]->bi_end_io != end_sync_read)
2071                                 continue;
2072                         rdev = conf->mirrors[d].rdev;
2073                         if (r1_sync_page_io(rdev, sect, s,
2074                                             pages[idx],
2075                                             READ) != 0)
2076                                 atomic_add(s, &rdev->corrected_errors);
2077                 }
2078                 sectors -= s;
2079                 sect += s;
2080                 idx ++;
2081         }
2082         set_bit(R1BIO_Uptodate, &r1_bio->state);
2083         bio->bi_status = 0;
2084         return 1;
2085 }
2086
2087 static void process_checks(struct r1bio *r1_bio)
2088 {
2089         /* We have read all readable devices.  If we haven't
2090          * got the block, then there is no hope left.
2091          * If we have, then we want to do a comparison
2092          * and skip the write if everything is the same.
2093          * If any blocks failed to read, then we need to
2094          * attempt an over-write
2095          */
2096         struct mddev *mddev = r1_bio->mddev;
2097         struct r1conf *conf = mddev->private;
2098         int primary;
2099         int i;
2100         int vcnt;
2101
2102         /* Fix variable parts of all bios */
2103         vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
2104         for (i = 0; i < conf->raid_disks * 2; i++) {
2105                 blk_status_t status;
2106                 struct bio *b = r1_bio->bios[i];
2107                 struct resync_pages *rp = get_resync_pages(b);
2108                 if (b->bi_end_io != end_sync_read)
2109                         continue;
2110                 /* fixup the bio for reuse, but preserve errno */
2111                 status = b->bi_status;
2112                 bio_reset(b);
2113                 b->bi_status = status;
2114                 b->bi_iter.bi_sector = r1_bio->sector +
2115                         conf->mirrors[i].rdev->data_offset;
2116                 bio_set_dev(b, conf->mirrors[i].rdev->bdev);
2117                 b->bi_end_io = end_sync_read;
2118                 rp->raid_bio = r1_bio;
2119                 b->bi_private = rp;
2120
2121                 /* initialize bvec table again */
2122                 md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
2123         }
2124         for (primary = 0; primary < conf->raid_disks * 2; primary++)
2125                 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
2126                     !r1_bio->bios[primary]->bi_status) {
2127                         r1_bio->bios[primary]->bi_end_io = NULL;
2128                         rdev_dec_pending(conf->mirrors[primary].rdev, mddev);
2129                         break;
2130                 }
2131         r1_bio->read_disk = primary;
2132         for (i = 0; i < conf->raid_disks * 2; i++) {
2133                 int j = 0;
2134                 struct bio *pbio = r1_bio->bios[primary];
2135                 struct bio *sbio = r1_bio->bios[i];
2136                 blk_status_t status = sbio->bi_status;
2137                 struct page **ppages = get_resync_pages(pbio)->pages;
2138                 struct page **spages = get_resync_pages(sbio)->pages;
2139                 struct bio_vec *bi;
2140                 int page_len[RESYNC_PAGES] = { 0 };
2141                 struct bvec_iter_all iter_all;
2142
2143                 if (sbio->bi_end_io != end_sync_read)
2144                         continue;
2145                 /* Now we can 'fixup' the error value */
2146                 sbio->bi_status = 0;
2147
2148                 bio_for_each_segment_all(bi, sbio, iter_all)
2149                         page_len[j++] = bi->bv_len;
2150
2151                 if (!status) {
2152                         for (j = vcnt; j-- ; ) {
2153                                 if (memcmp(page_address(ppages[j]),
2154                                            page_address(spages[j]),
2155                                            page_len[j]))
2156                                         break;
2157                         }
2158                 } else
2159                         j = 0;
2160                 if (j >= 0)
2161                         atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
2162                 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
2163                               && !status)) {
2164                         /* No need to write to this device. */
2165                         sbio->bi_end_io = NULL;
2166                         rdev_dec_pending(conf->mirrors[i].rdev, mddev);
2167                         continue;
2168                 }
2169
2170                 bio_copy_data(sbio, pbio);
2171         }
2172 }
2173
2174 static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
2175 {
2176         struct r1conf *conf = mddev->private;
2177         int i;
2178         int disks = conf->raid_disks * 2;
2179         struct bio *wbio;
2180
2181         if (!test_bit(R1BIO_Uptodate, &r1_bio->state))
2182                 /* ouch - failed to read all of that. */
2183                 if (!fix_sync_read_error(r1_bio))
2184                         return;
2185
2186         if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2187                 process_checks(r1_bio);
2188
2189         /*
2190          * schedule writes
2191          */
2192         atomic_set(&r1_bio->remaining, 1);
2193         for (i = 0; i < disks ; i++) {
2194                 wbio = r1_bio->bios[i];
2195                 if (wbio->bi_end_io == NULL ||
2196                     (wbio->bi_end_io == end_sync_read &&
2197                      (i == r1_bio->read_disk ||
2198                       !test_bit(MD_RECOVERY_SYNC, &mddev->recovery))))
2199                         continue;
2200                 if (test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
2201                         abort_sync_write(mddev, r1_bio);
2202                         continue;
2203                 }
2204
2205                 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2206                 if (test_bit(FailFast, &conf->mirrors[i].rdev->flags))
2207                         wbio->bi_opf |= MD_FAILFAST;
2208
2209                 wbio->bi_end_io = end_sync_write;
2210                 atomic_inc(&r1_bio->remaining);
2211                 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
2212
2213                 submit_bio_noacct(wbio);
2214         }
2215
2216         put_sync_write_buf(r1_bio, 1);
2217 }
2218
2219 /*
2220  * This is a kernel thread which:
2221  *
2222  *      1.      Retries failed read operations on working mirrors.
2223  *      2.      Updates the raid superblock when problems encounter.
2224  *      3.      Performs writes following reads for array synchronising.
2225  */
2226
2227 static void fix_read_error(struct r1conf *conf, int read_disk,
2228                            sector_t sect, int sectors)
2229 {
2230         struct mddev *mddev = conf->mddev;
2231         while(sectors) {
2232                 int s = sectors;
2233                 int d = read_disk;
2234                 int success = 0;
2235                 int start;
2236                 struct md_rdev *rdev;
2237
2238                 if (s > (PAGE_SIZE>>9))
2239                         s = PAGE_SIZE >> 9;
2240
2241                 do {
2242                         sector_t first_bad;
2243                         int bad_sectors;
2244
2245                         rcu_read_lock();
2246                         rdev = rcu_dereference(conf->mirrors[d].rdev);
2247                         if (rdev &&
2248                             (test_bit(In_sync, &rdev->flags) ||
2249                              (!test_bit(Faulty, &rdev->flags) &&
2250                               rdev->recovery_offset >= sect + s)) &&
2251                             is_badblock(rdev, sect, s,
2252                                         &first_bad, &bad_sectors) == 0) {
2253                                 atomic_inc(&rdev->nr_pending);
2254                                 rcu_read_unlock();
2255                                 if (sync_page_io(rdev, sect, s<<9,
2256                                          conf->tmppage, REQ_OP_READ, 0, false))
2257                                         success = 1;
2258                                 rdev_dec_pending(rdev, mddev);
2259                                 if (success)
2260                                         break;
2261                         } else
2262                                 rcu_read_unlock();
2263                         d++;
2264                         if (d == conf->raid_disks * 2)
2265                                 d = 0;
2266                 } while (!success && d != read_disk);
2267
2268                 if (!success) {
2269                         /* Cannot read from anywhere - mark it bad */
2270                         struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
2271                         if (!rdev_set_badblocks(rdev, sect, s, 0))
2272                                 md_error(mddev, rdev);
2273                         break;
2274                 }
2275                 /* write it back and re-read */
2276                 start = d;
2277                 while (d != read_disk) {
2278                         if (d==0)
2279                                 d = conf->raid_disks * 2;
2280                         d--;
2281                         rcu_read_lock();
2282                         rdev = rcu_dereference(conf->mirrors[d].rdev);
2283                         if (rdev &&
2284                             !test_bit(Faulty, &rdev->flags)) {
2285                                 atomic_inc(&rdev->nr_pending);
2286                                 rcu_read_unlock();
2287                                 r1_sync_page_io(rdev, sect, s,
2288                                                 conf->tmppage, WRITE);
2289                                 rdev_dec_pending(rdev, mddev);
2290                         } else
2291                                 rcu_read_unlock();
2292                 }
2293                 d = start;
2294                 while (d != read_disk) {
2295                         char b[BDEVNAME_SIZE];
2296                         if (d==0)
2297                                 d = conf->raid_disks * 2;
2298                         d--;
2299                         rcu_read_lock();
2300                         rdev = rcu_dereference(conf->mirrors[d].rdev);
2301                         if (rdev &&
2302                             !test_bit(Faulty, &rdev->flags)) {
2303                                 atomic_inc(&rdev->nr_pending);
2304                                 rcu_read_unlock();
2305                                 if (r1_sync_page_io(rdev, sect, s,
2306                                                     conf->tmppage, READ)) {
2307                                         atomic_add(s, &rdev->corrected_errors);
2308                                         pr_info("md/raid1:%s: read error corrected (%d sectors at %llu on %s)\n",
2309                                                 mdname(mddev), s,
2310                                                 (unsigned long long)(sect +
2311                                                                      rdev->data_offset),
2312                                                 bdevname(rdev->bdev, b));
2313                                 }
2314                                 rdev_dec_pending(rdev, mddev);
2315                         } else
2316                                 rcu_read_unlock();
2317                 }
2318                 sectors -= s;
2319                 sect += s;
2320         }
2321 }
2322
2323 static int narrow_write_error(struct r1bio *r1_bio, int i)
2324 {
2325         struct mddev *mddev = r1_bio->mddev;
2326         struct r1conf *conf = mddev->private;
2327         struct md_rdev *rdev = conf->mirrors[i].rdev;
2328
2329         /* bio has the data to be written to device 'i' where
2330          * we just recently had a write error.
2331          * We repeatedly clone the bio and trim down to one block,
2332          * then try the write.  Where the write fails we record
2333          * a bad block.
2334          * It is conceivable that the bio doesn't exactly align with
2335          * blocks.  We must handle this somehow.
2336          *
2337          * We currently own a reference on the rdev.
2338          */
2339
2340         int block_sectors;
2341         sector_t sector;
2342         int sectors;
2343         int sect_to_write = r1_bio->sectors;
2344         int ok = 1;
2345
2346         if (rdev->badblocks.shift < 0)
2347                 return 0;
2348
2349         block_sectors = roundup(1 << rdev->badblocks.shift,
2350                                 bdev_logical_block_size(rdev->bdev) >> 9);
2351         sector = r1_bio->sector;
2352         sectors = ((sector + block_sectors)
2353                    & ~(sector_t)(block_sectors - 1))
2354                 - sector;
2355
2356         while (sect_to_write) {
2357                 struct bio *wbio;
2358                 if (sectors > sect_to_write)
2359                         sectors = sect_to_write;
2360                 /* Write at 'sector' for 'sectors'*/
2361
2362                 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2363                         wbio = bio_clone_fast(r1_bio->behind_master_bio,
2364                                               GFP_NOIO,
2365                                               &mddev->bio_set);
2366                 } else {
2367                         wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
2368                                               &mddev->bio_set);
2369                 }
2370
2371                 bio_set_op_attrs(wbio, REQ_OP_WRITE, 0);
2372                 wbio->bi_iter.bi_sector = r1_bio->sector;
2373                 wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2374
2375                 bio_trim(wbio, sector - r1_bio->sector, sectors);
2376                 wbio->bi_iter.bi_sector += rdev->data_offset;
2377                 bio_set_dev(wbio, rdev->bdev);
2378
2379                 if (submit_bio_wait(wbio) < 0)
2380                         /* failure! */
2381                         ok = rdev_set_badblocks(rdev, sector,
2382                                                 sectors, 0)
2383                                 && ok;
2384
2385                 bio_put(wbio);
2386                 sect_to_write -= sectors;
2387                 sector += sectors;
2388                 sectors = block_sectors;
2389         }
2390         return ok;
2391 }
2392
2393 static void handle_sync_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2394 {
2395         int m;
2396         int s = r1_bio->sectors;
2397         for (m = 0; m < conf->raid_disks * 2 ; m++) {
2398                 struct md_rdev *rdev = conf->mirrors[m].rdev;
2399                 struct bio *bio = r1_bio->bios[m];
2400                 if (bio->bi_end_io == NULL)
2401                         continue;
2402                 if (!bio->bi_status &&
2403                     test_bit(R1BIO_MadeGood, &r1_bio->state)) {
2404                         rdev_clear_badblocks(rdev, r1_bio->sector, s, 0);
2405                 }
2406                 if (bio->bi_status &&
2407                     test_bit(R1BIO_WriteError, &r1_bio->state)) {
2408                         if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0))
2409                                 md_error(conf->mddev, rdev);
2410                 }
2411         }
2412         put_buf(r1_bio);
2413         md_done_sync(conf->mddev, s, 1);
2414 }
2415
2416 static void handle_write_finished(struct r1conf *conf, struct r1bio *r1_bio)
2417 {
2418         int m, idx;
2419         bool fail = false;
2420
2421         for (m = 0; m < conf->raid_disks * 2 ; m++)
2422                 if (r1_bio->bios[m] == IO_MADE_GOOD) {
2423                         struct md_rdev *rdev = conf->mirrors[m].rdev;
2424                         rdev_clear_badblocks(rdev,
2425                                              r1_bio->sector,
2426                                              r1_bio->sectors, 0);
2427                         rdev_dec_pending(rdev, conf->mddev);
2428                 } else if (r1_bio->bios[m] != NULL) {
2429                         /* This drive got a write error.  We need to
2430                          * narrow down and record precise write
2431                          * errors.
2432                          */
2433                         fail = true;
2434                         if (!narrow_write_error(r1_bio, m)) {
2435                                 md_error(conf->mddev,
2436                                          conf->mirrors[m].rdev);
2437                                 /* an I/O failed, we can't clear the bitmap */
2438                                 set_bit(R1BIO_Degraded, &r1_bio->state);
2439                         }
2440                         rdev_dec_pending(conf->mirrors[m].rdev,
2441                                          conf->mddev);
2442                 }
2443         if (fail) {
2444                 spin_lock_irq(&conf->device_lock);
2445                 list_add(&r1_bio->retry_list, &conf->bio_end_io_list);
2446                 idx = sector_to_idx(r1_bio->sector);
2447                 atomic_inc(&conf->nr_queued[idx]);
2448                 spin_unlock_irq(&conf->device_lock);
2449                 /*
2450                  * In case freeze_array() is waiting for condition
2451                  * get_unqueued_pending() == extra to be true.
2452                  */
2453                 wake_up(&conf->wait_barrier);
2454                 md_wakeup_thread(conf->mddev->thread);
2455         } else {
2456                 if (test_bit(R1BIO_WriteError, &r1_bio->state))
2457                         close_write(r1_bio);
2458                 raid_end_bio_io(r1_bio);
2459         }
2460 }
2461
2462 static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
2463 {
2464         struct mddev *mddev = conf->mddev;
2465         struct bio *bio;
2466         struct md_rdev *rdev;
2467
2468         clear_bit(R1BIO_ReadError, &r1_bio->state);
2469         /* we got a read error. Maybe the drive is bad.  Maybe just
2470          * the block and we can fix it.
2471          * We freeze all other IO, and try reading the block from
2472          * other devices.  When we find one, we re-write
2473          * and check it that fixes the read error.
2474          * This is all done synchronously while the array is
2475          * frozen
2476          */
2477
2478         bio = r1_bio->bios[r1_bio->read_disk];
2479         bio_put(bio);
2480         r1_bio->bios[r1_bio->read_disk] = NULL;
2481
2482         rdev = conf->mirrors[r1_bio->read_disk].rdev;
2483         if (mddev->ro == 0
2484             && !test_bit(FailFast, &rdev->flags)) {
2485                 freeze_array(conf, 1);
2486                 fix_read_error(conf, r1_bio->read_disk,
2487                                r1_bio->sector, r1_bio->sectors);
2488                 unfreeze_array(conf);
2489         } else if (mddev->ro == 0 && test_bit(FailFast, &rdev->flags)) {
2490                 md_error(mddev, rdev);
2491         } else {
2492                 r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
2493         }
2494
2495         rdev_dec_pending(rdev, conf->mddev);
2496         allow_barrier(conf, r1_bio->sector);
2497         bio = r1_bio->master_bio;
2498
2499         /* Reuse the old r1_bio so that the IO_BLOCKED settings are preserved */
2500         r1_bio->state = 0;
2501         raid1_read_request(mddev, bio, r1_bio->sectors, r1_bio);
2502 }
2503
2504 static void raid1d(struct md_thread *thread)
2505 {
2506         struct mddev *mddev = thread->mddev;
2507         struct r1bio *r1_bio;
2508         unsigned long flags;
2509         struct r1conf *conf = mddev->private;
2510         struct list_head *head = &conf->retry_list;
2511         struct blk_plug plug;
2512         int idx;
2513
2514         md_check_recovery(mddev);
2515
2516         if (!list_empty_careful(&conf->bio_end_io_list) &&
2517             !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) {
2518                 LIST_HEAD(tmp);
2519                 spin_lock_irqsave(&conf->device_lock, flags);
2520                 if (!test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags))
2521                         list_splice_init(&conf->bio_end_io_list, &tmp);
2522                 spin_unlock_irqrestore(&conf->device_lock, flags);
2523                 while (!list_empty(&tmp)) {
2524                         r1_bio = list_first_entry(&tmp, struct r1bio,
2525                                                   retry_list);
2526                         list_del(&r1_bio->retry_list);
2527                         idx = sector_to_idx(r1_bio->sector);
2528                         atomic_dec(&conf->nr_queued[idx]);
2529                         if (mddev->degraded)
2530                                 set_bit(R1BIO_Degraded, &r1_bio->state);
2531                         if (test_bit(R1BIO_WriteError, &r1_bio->state))
2532                                 close_write(r1_bio);
2533                         raid_end_bio_io(r1_bio);
2534                 }
2535         }
2536
2537         blk_start_plug(&plug);
2538         for (;;) {
2539
2540                 flush_pending_writes(conf);
2541
2542                 spin_lock_irqsave(&conf->device_lock, flags);
2543                 if (list_empty(head)) {
2544                         spin_unlock_irqrestore(&conf->device_lock, flags);
2545                         break;
2546                 }
2547                 r1_bio = list_entry(head->prev, struct r1bio, retry_list);
2548                 list_del(head->prev);
2549                 idx = sector_to_idx(r1_bio->sector);
2550                 atomic_dec(&conf->nr_queued[idx]);
2551                 spin_unlock_irqrestore(&conf->device_lock, flags);
2552
2553                 mddev = r1_bio->mddev;
2554                 conf = mddev->private;
2555                 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
2556                         if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2557                             test_bit(R1BIO_WriteError, &r1_bio->state))
2558                                 handle_sync_write_finished(conf, r1_bio);
2559                         else
2560                                 sync_request_write(mddev, r1_bio);
2561                 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) ||
2562                            test_bit(R1BIO_WriteError, &r1_bio->state))
2563                         handle_write_finished(conf, r1_bio);
2564                 else if (test_bit(R1BIO_ReadError, &r1_bio->state))
2565                         handle_read_error(conf, r1_bio);
2566                 else
2567                         WARN_ON_ONCE(1);
2568
2569                 cond_resched();
2570                 if (mddev->sb_flags & ~(1<<MD_SB_CHANGE_PENDING))
2571                         md_check_recovery(mddev);
2572         }
2573         blk_finish_plug(&plug);
2574 }
2575
2576 static int init_resync(struct r1conf *conf)
2577 {
2578         int buffs;
2579
2580         buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE;
2581         BUG_ON(mempool_initialized(&conf->r1buf_pool));
2582
2583         return mempool_init(&conf->r1buf_pool, buffs, r1buf_pool_alloc,
2584                             r1buf_pool_free, conf->poolinfo);
2585 }
2586
2587 static struct r1bio *raid1_alloc_init_r1buf(struct r1conf *conf)
2588 {
2589         struct r1bio *r1bio = mempool_alloc(&conf->r1buf_pool, GFP_NOIO);
2590         struct resync_pages *rps;
2591         struct bio *bio;
2592         int i;
2593
2594         for (i = conf->poolinfo->raid_disks; i--; ) {
2595                 bio = r1bio->bios[i];
2596                 rps = bio->bi_private;
2597                 bio_reset(bio);
2598                 bio->bi_private = rps;
2599         }
2600         r1bio->master_bio = NULL;
2601         return r1bio;
2602 }
2603
2604 /*
2605  * perform a "sync" on one "block"
2606  *
2607  * We need to make sure that no normal I/O request - particularly write
2608  * requests - conflict with active sync requests.
2609  *
2610  * This is achieved by tracking pending requests and a 'barrier' concept
2611  * that can be installed to exclude normal IO requests.
2612  */
2613
2614 static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2615                                    int *skipped)
2616 {
2617         struct r1conf *conf = mddev->private;
2618         struct r1bio *r1_bio;
2619         struct bio *bio;
2620         sector_t max_sector, nr_sectors;
2621         int disk = -1;
2622         int i;
2623         int wonly = -1;
2624         int write_targets = 0, read_targets = 0;
2625         sector_t sync_blocks;
2626         int still_degraded = 0;
2627         int good_sectors = RESYNC_SECTORS;
2628         int min_bad = 0; /* number of sectors that are bad in all devices */
2629         int idx = sector_to_idx(sector_nr);
2630         int page_idx = 0;
2631
2632         if (!mempool_initialized(&conf->r1buf_pool))
2633                 if (init_resync(conf))
2634                         return 0;
2635
2636         max_sector = mddev->dev_sectors;
2637         if (sector_nr >= max_sector) {
2638                 /* If we aborted, we need to abort the
2639                  * sync on the 'current' bitmap chunk (there will
2640                  * only be one in raid1 resync.
2641                  * We can find the current addess in mddev->curr_resync
2642                  */
2643                 if (mddev->curr_resync < max_sector) /* aborted */
2644                         md_bitmap_end_sync(mddev->bitmap, mddev->curr_resync,
2645                                            &sync_blocks, 1);
2646                 else /* completed sync */
2647                         conf->fullsync = 0;
2648
2649                 md_bitmap_close_sync(mddev->bitmap);
2650                 close_sync(conf);
2651
2652                 if (mddev_is_clustered(mddev)) {
2653                         conf->cluster_sync_low = 0;
2654                         conf->cluster_sync_high = 0;
2655                 }
2656                 return 0;
2657         }
2658
2659         if (mddev->bitmap == NULL &&
2660             mddev->recovery_cp == MaxSector &&
2661             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) &&
2662             conf->fullsync == 0) {
2663                 *skipped = 1;
2664                 return max_sector - sector_nr;
2665         }
2666         /* before building a request, check if we can skip these blocks..
2667          * This call the bitmap_start_sync doesn't actually record anything
2668          */
2669         if (!md_bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
2670             !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2671                 /* We can skip this block, and probably several more */
2672                 *skipped = 1;
2673                 return sync_blocks;
2674         }
2675
2676         /*
2677          * If there is non-resync activity waiting for a turn, then let it
2678          * though before starting on this new sync request.
2679          */
2680         if (atomic_read(&conf->nr_waiting[idx]))
2681                 schedule_timeout_uninterruptible(1);
2682
2683         /* we are incrementing sector_nr below. To be safe, we check against
2684          * sector_nr + two times RESYNC_SECTORS
2685          */
2686
2687         md_bitmap_cond_end_sync(mddev->bitmap, sector_nr,
2688                 mddev_is_clustered(mddev) && (sector_nr + 2 * RESYNC_SECTORS > conf->cluster_sync_high));
2689
2690
2691         if (raise_barrier(conf, sector_nr))
2692                 return 0;
2693
2694         r1_bio = raid1_alloc_init_r1buf(conf);
2695
2696         rcu_read_lock();
2697         /*
2698          * If we get a correctably read error during resync or recovery,
2699          * we might want to read from a different device.  So we
2700          * flag all drives that could conceivably be read from for READ,
2701          * and any others (which will be non-In_sync devices) for WRITE.
2702          * If a read fails, we try reading from something else for which READ
2703          * is OK.
2704          */
2705
2706         r1_bio->mddev = mddev;
2707         r1_bio->sector = sector_nr;
2708         r1_bio->state = 0;
2709         set_bit(R1BIO_IsSync, &r1_bio->state);
2710         /* make sure good_sectors won't go across barrier unit boundary */
2711         good_sectors = align_to_barrier_unit_end(sector_nr, good_sectors);
2712
2713         for (i = 0; i < conf->raid_disks * 2; i++) {
2714                 struct md_rdev *rdev;
2715                 bio = r1_bio->bios[i];
2716
2717                 rdev = rcu_dereference(conf->mirrors[i].rdev);
2718                 if (rdev == NULL ||
2719                     test_bit(Faulty, &rdev->flags)) {
2720                         if (i < conf->raid_disks)
2721                                 still_degraded = 1;
2722                 } else if (!test_bit(In_sync, &rdev->flags)) {
2723                         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2724                         bio->bi_end_io = end_sync_write;
2725                         write_targets ++;
2726                 } else {
2727                         /* may need to read from here */
2728                         sector_t first_bad = MaxSector;
2729                         int bad_sectors;
2730
2731                         if (is_badblock(rdev, sector_nr, good_sectors,
2732                                         &first_bad, &bad_sectors)) {
2733                                 if (first_bad > sector_nr)
2734                                         good_sectors = first_bad - sector_nr;
2735                                 else {
2736                                         bad_sectors -= (sector_nr - first_bad);
2737                                         if (min_bad == 0 ||
2738                                             min_bad > bad_sectors)
2739                                                 min_bad = bad_sectors;
2740                                 }
2741                         }
2742                         if (sector_nr < first_bad) {
2743                                 if (test_bit(WriteMostly, &rdev->flags)) {
2744                                         if (wonly < 0)
2745                                                 wonly = i;
2746                                 } else {
2747                                         if (disk < 0)
2748                                                 disk = i;
2749                                 }
2750                                 bio_set_op_attrs(bio, REQ_OP_READ, 0);
2751                                 bio->bi_end_io = end_sync_read;
2752                                 read_targets++;
2753                         } else if (!test_bit(WriteErrorSeen, &rdev->flags) &&
2754                                 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) &&
2755                                 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) {
2756                                 /*
2757                                  * The device is suitable for reading (InSync),
2758                                  * but has bad block(s) here. Let's try to correct them,
2759                                  * if we are doing resync or repair. Otherwise, leave
2760                                  * this device alone for this sync request.
2761                                  */
2762                                 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
2763                                 bio->bi_end_io = end_sync_write;
2764                                 write_targets++;
2765                         }
2766                 }
2767                 if (rdev && bio->bi_end_io) {
2768                         atomic_inc(&rdev->nr_pending);
2769                         bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2770                         bio_set_dev(bio, rdev->bdev);
2771                         if (test_bit(FailFast, &rdev->flags))
2772                                 bio->bi_opf |= MD_FAILFAST;
2773                 }
2774         }
2775         rcu_read_unlock();
2776         if (disk < 0)
2777                 disk = wonly;
2778         r1_bio->read_disk = disk;
2779
2780         if (read_targets == 0 && min_bad > 0) {
2781                 /* These sectors are bad on all InSync devices, so we
2782                  * need to mark them bad on all write targets
2783                  */
2784                 int ok = 1;
2785                 for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2786                         if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2787                                 struct md_rdev *rdev = conf->mirrors[i].rdev;
2788                                 ok = rdev_set_badblocks(rdev, sector_nr,
2789                                                         min_bad, 0
2790                                         ) && ok;
2791                         }
2792                 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags);
2793                 *skipped = 1;
2794                 put_buf(r1_bio);
2795
2796                 if (!ok) {
2797                         /* Cannot record the badblocks, so need to
2798                          * abort the resync.
2799                          * If there are multiple read targets, could just
2800                          * fail the really bad ones ???
2801                          */
2802                         conf->recovery_disabled = mddev->recovery_disabled;
2803                         set_bit(MD_RECOVERY_INTR, &mddev->recovery);
2804                         return 0;
2805                 } else
2806                         return min_bad;
2807
2808         }
2809         if (min_bad > 0 && min_bad < good_sectors) {
2810                 /* only resync enough to reach the next bad->good
2811                  * transition */
2812                 good_sectors = min_bad;
2813         }
2814
2815         if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0)
2816                 /* extra read targets are also write targets */
2817                 write_targets += read_targets-1;
2818
2819         if (write_targets == 0 || read_targets == 0) {
2820                 /* There is nowhere to write, so all non-sync
2821                  * drives must be failed - so we are finished
2822                  */
2823                 sector_t rv;
2824                 if (min_bad > 0)
2825                         max_sector = sector_nr + min_bad;
2826                 rv = max_sector - sector_nr;
2827                 *skipped = 1;
2828                 put_buf(r1_bio);
2829                 return rv;
2830         }
2831
2832         if (max_sector > mddev->resync_max)
2833                 max_sector = mddev->resync_max; /* Don't do IO beyond here */
2834         if (max_sector > sector_nr + good_sectors)
2835                 max_sector = sector_nr + good_sectors;
2836         nr_sectors = 0;
2837         sync_blocks = 0;
2838         do {
2839                 struct page *page;
2840                 int len = PAGE_SIZE;
2841                 if (sector_nr + (len>>9) > max_sector)
2842                         len = (max_sector - sector_nr) << 9;
2843                 if (len == 0)
2844                         break;
2845                 if (sync_blocks == 0) {
2846                         if (!md_bitmap_start_sync(mddev->bitmap, sector_nr,
2847                                                   &sync_blocks, still_degraded) &&
2848                             !conf->fullsync &&
2849                             !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
2850                                 break;
2851                         if ((len >> 9) > sync_blocks)
2852                                 len = sync_blocks<<9;
2853                 }
2854
2855                 for (i = 0 ; i < conf->raid_disks * 2; i++) {
2856                         struct resync_pages *rp;
2857
2858                         bio = r1_bio->bios[i];
2859                         rp = get_resync_pages(bio);
2860                         if (bio->bi_end_io) {
2861                                 page = resync_fetch_page(rp, page_idx);
2862
2863                                 /*
2864                                  * won't fail because the vec table is big
2865                                  * enough to hold all these pages
2866                                  */
2867                                 bio_add_page(bio, page, len, 0);
2868                         }
2869                 }
2870                 nr_sectors += len>>9;
2871                 sector_nr += len>>9;
2872                 sync_blocks -= (len>>9);
2873         } while (++page_idx < RESYNC_PAGES);
2874
2875         r1_bio->sectors = nr_sectors;
2876
2877         if (mddev_is_clustered(mddev) &&
2878                         conf->cluster_sync_high < sector_nr + nr_sectors) {
2879                 conf->cluster_sync_low = mddev->curr_resync_completed;
2880                 conf->cluster_sync_high = conf->cluster_sync_low + CLUSTER_RESYNC_WINDOW_SECTORS;
2881                 /* Send resync message */
2882                 md_cluster_ops->resync_info_update(mddev,
2883                                 conf->cluster_sync_low,
2884                                 conf->cluster_sync_high);
2885         }
2886
2887         /* For a user-requested sync, we read all readable devices and do a
2888          * compare
2889          */
2890         if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
2891                 atomic_set(&r1_bio->remaining, read_targets);
2892                 for (i = 0; i < conf->raid_disks * 2 && read_targets; i++) {
2893                         bio = r1_bio->bios[i];
2894                         if (bio->bi_end_io == end_sync_read) {
2895                                 read_targets--;
2896                                 md_sync_acct_bio(bio, nr_sectors);
2897                                 if (read_targets == 1)
2898                                         bio->bi_opf &= ~MD_FAILFAST;
2899                                 submit_bio_noacct(bio);
2900                         }
2901                 }
2902         } else {
2903                 atomic_set(&r1_bio->remaining, 1);
2904                 bio = r1_bio->bios[r1_bio->read_disk];
2905                 md_sync_acct_bio(bio, nr_sectors);
2906                 if (read_targets == 1)
2907                         bio->bi_opf &= ~MD_FAILFAST;
2908                 submit_bio_noacct(bio);
2909         }
2910         return nr_sectors;
2911 }
2912
2913 static sector_t raid1_size(struct mddev *mddev, sector_t sectors, int raid_disks)
2914 {
2915         if (sectors)
2916                 return sectors;
2917
2918         return mddev->dev_sectors;
2919 }
2920
2921 static struct r1conf *setup_conf(struct mddev *mddev)
2922 {
2923         struct r1conf *conf;
2924         int i;
2925         struct raid1_info *disk;
2926         struct md_rdev *rdev;
2927         int err = -ENOMEM;
2928
2929         conf = kzalloc(sizeof(struct r1conf), GFP_KERNEL);
2930         if (!conf)
2931                 goto abort;
2932
2933         conf->nr_pending = kcalloc(BARRIER_BUCKETS_NR,
2934                                    sizeof(atomic_t), GFP_KERNEL);
2935         if (!conf->nr_pending)
2936                 goto abort;
2937
2938         conf->nr_waiting = kcalloc(BARRIER_BUCKETS_NR,
2939                                    sizeof(atomic_t), GFP_KERNEL);
2940         if (!conf->nr_waiting)
2941                 goto abort;
2942
2943         conf->nr_queued = kcalloc(BARRIER_BUCKETS_NR,
2944                                   sizeof(atomic_t), GFP_KERNEL);
2945         if (!conf->nr_queued)
2946                 goto abort;
2947
2948         conf->barrier = kcalloc(BARRIER_BUCKETS_NR,
2949                                 sizeof(atomic_t), GFP_KERNEL);
2950         if (!conf->barrier)
2951                 goto abort;
2952
2953         conf->mirrors = kzalloc(array3_size(sizeof(struct raid1_info),
2954                                             mddev->raid_disks, 2),
2955                                 GFP_KERNEL);
2956         if (!conf->mirrors)
2957                 goto abort;
2958
2959         conf->tmppage = alloc_page(GFP_KERNEL);
2960         if (!conf->tmppage)
2961                 goto abort;
2962
2963         conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL);
2964         if (!conf->poolinfo)
2965                 goto abort;
2966         conf->poolinfo->raid_disks = mddev->raid_disks * 2;
2967         err = mempool_init(&conf->r1bio_pool, NR_RAID_BIOS, r1bio_pool_alloc,
2968                            rbio_pool_free, conf->poolinfo);
2969         if (err)
2970                 goto abort;
2971
2972         err = bioset_init(&conf->bio_split, BIO_POOL_SIZE, 0, 0);
2973         if (err)
2974                 goto abort;
2975
2976         conf->poolinfo->mddev = mddev;
2977
2978         err = -EINVAL;
2979         spin_lock_init(&conf->device_lock);
2980         rdev_for_each(rdev, mddev) {
2981                 int disk_idx = rdev->raid_disk;
2982                 if (disk_idx >= mddev->raid_disks
2983                     || disk_idx < 0)
2984                         continue;
2985                 if (test_bit(Replacement, &rdev->flags))
2986                         disk = conf->mirrors + mddev->raid_disks + disk_idx;
2987                 else
2988                         disk = conf->mirrors + disk_idx;
2989
2990                 if (disk->rdev)
2991                         goto abort;
2992                 disk->rdev = rdev;
2993                 disk->head_position = 0;
2994                 disk->seq_start = MaxSector;
2995         }
2996         conf->raid_disks = mddev->raid_disks;
2997         conf->mddev = mddev;
2998         INIT_LIST_HEAD(&conf->retry_list);
2999         INIT_LIST_HEAD(&conf->bio_end_io_list);
3000
3001         spin_lock_init(&conf->resync_lock);
3002         init_waitqueue_head(&conf->wait_barrier);
3003
3004         bio_list_init(&conf->pending_bio_list);
3005         conf->pending_count = 0;
3006         conf->recovery_disabled = mddev->recovery_disabled - 1;
3007
3008         err = -EIO;
3009         for (i = 0; i < conf->raid_disks * 2; i++) {
3010
3011                 disk = conf->mirrors + i;
3012
3013                 if (i < conf->raid_disks &&
3014                     disk[conf->raid_disks].rdev) {
3015                         /* This slot has a replacement. */
3016                         if (!disk->rdev) {
3017                                 /* No original, just make the replacement
3018                                  * a recovering spare
3019                                  */
3020                                 disk->rdev =
3021                                         disk[conf->raid_disks].rdev;
3022                                 disk[conf->raid_disks].rdev = NULL;
3023                         } else if (!test_bit(In_sync, &disk->rdev->flags))
3024                                 /* Original is not in_sync - bad */
3025                                 goto abort;
3026                 }
3027
3028                 if (!disk->rdev ||
3029                     !test_bit(In_sync, &disk->rdev->flags)) {
3030                         disk->head_position = 0;
3031                         if (disk->rdev &&
3032                             (disk->rdev->saved_raid_disk < 0))
3033                                 conf->fullsync = 1;
3034                 }
3035         }
3036
3037         err = -ENOMEM;
3038         conf->thread = md_register_thread(raid1d, mddev, "raid1");
3039         if (!conf->thread)
3040                 goto abort;
3041
3042         return conf;
3043
3044  abort:
3045         if (conf) {
3046                 mempool_exit(&conf->r1bio_pool);
3047                 kfree(conf->mirrors);
3048                 safe_put_page(conf->tmppage);
3049                 kfree(conf->poolinfo);
3050                 kfree(conf->nr_pending);
3051                 kfree(conf->nr_waiting);
3052                 kfree(conf->nr_queued);
3053                 kfree(conf->barrier);
3054                 bioset_exit(&conf->bio_split);
3055                 kfree(conf);
3056         }
3057         return ERR_PTR(err);
3058 }
3059
3060 static void raid1_free(struct mddev *mddev, void *priv);
3061 static int raid1_run(struct mddev *mddev)
3062 {
3063         struct r1conf *conf;
3064         int i;
3065         struct md_rdev *rdev;
3066         int ret;
3067         bool discard_supported = false;
3068
3069         if (mddev->level != 1) {
3070                 pr_warn("md/raid1:%s: raid level not set to mirroring (%d)\n",
3071                         mdname(mddev), mddev->level);
3072                 return -EIO;
3073         }
3074         if (mddev->reshape_position != MaxSector) {
3075                 pr_warn("md/raid1:%s: reshape_position set but not supported\n",
3076                         mdname(mddev));
3077                 return -EIO;
3078         }
3079         if (mddev_init_writes_pending(mddev) < 0)
3080                 return -ENOMEM;
3081         /*
3082          * copy the already verified devices into our private RAID1
3083          * bookkeeping area. [whatever we allocate in run(),
3084          * should be freed in raid1_free()]
3085          */
3086         if (mddev->private == NULL)
3087                 conf = setup_conf(mddev);
3088         else
3089                 conf = mddev->private;
3090
3091         if (IS_ERR(conf))
3092                 return PTR_ERR(conf);
3093
3094         if (mddev->queue) {
3095                 blk_queue_max_write_same_sectors(mddev->queue, 0);
3096                 blk_queue_max_write_zeroes_sectors(mddev->queue, 0);
3097         }
3098
3099         rdev_for_each(rdev, mddev) {
3100                 if (!mddev->gendisk)
3101                         continue;
3102                 disk_stack_limits(mddev->gendisk, rdev->bdev,
3103                                   rdev->data_offset << 9);
3104                 if (blk_queue_discard(bdev_get_queue(rdev->bdev)))
3105                         discard_supported = true;
3106         }
3107
3108         mddev->degraded = 0;
3109         for (i = 0; i < conf->raid_disks; i++)
3110                 if (conf->mirrors[i].rdev == NULL ||
3111                     !test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
3112                     test_bit(Faulty, &conf->mirrors[i].rdev->flags))
3113                         mddev->degraded++;
3114         /*
3115          * RAID1 needs at least one disk in active
3116          */
3117         if (conf->raid_disks - mddev->degraded < 1) {
3118                 ret = -EINVAL;
3119                 goto abort;
3120         }
3121
3122         if (conf->raid_disks - mddev->degraded == 1)
3123                 mddev->recovery_cp = MaxSector;
3124
3125         if (mddev->recovery_cp != MaxSector)
3126                 pr_info("md/raid1:%s: not clean -- starting background reconstruction\n",
3127                         mdname(mddev));
3128         pr_info("md/raid1:%s: active with %d out of %d mirrors\n",
3129                 mdname(mddev), mddev->raid_disks - mddev->degraded,
3130                 mddev->raid_disks);
3131
3132         /*
3133          * Ok, everything is just fine now
3134          */
3135         mddev->thread = conf->thread;
3136         conf->thread = NULL;
3137         mddev->private = conf;
3138         set_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
3139
3140         md_set_array_sectors(mddev, raid1_size(mddev, 0, 0));
3141
3142         if (mddev->queue) {
3143                 if (discard_supported)
3144                         blk_queue_flag_set(QUEUE_FLAG_DISCARD,
3145                                                 mddev->queue);
3146                 else
3147                         blk_queue_flag_clear(QUEUE_FLAG_DISCARD,
3148                                                   mddev->queue);
3149         }
3150
3151         ret = md_integrity_register(mddev);
3152         if (ret) {
3153                 md_unregister_thread(&mddev->thread);
3154                 goto abort;
3155         }
3156         return 0;
3157
3158 abort:
3159         raid1_free(mddev, conf);
3160         return ret;
3161 }
3162
3163 static void raid1_free(struct mddev *mddev, void *priv)
3164 {
3165         struct r1conf *conf = priv;
3166
3167         mempool_exit(&conf->r1bio_pool);
3168         kfree(conf->mirrors);
3169         safe_put_page(conf->tmppage);
3170         kfree(conf->poolinfo);
3171         kfree(conf->nr_pending);
3172         kfree(conf->nr_waiting);
3173         kfree(conf->nr_queued);
3174         kfree(conf->barrier);
3175         bioset_exit(&conf->bio_split);
3176         kfree(conf);
3177 }
3178
3179 static int raid1_resize(struct mddev *mddev, sector_t sectors)
3180 {
3181         /* no resync is happening, and there is enough space
3182          * on all devices, so we can resize.
3183          * We need to make sure resync covers any new space.
3184          * If the array is shrinking we should possibly wait until
3185          * any io in the removed space completes, but it hardly seems
3186          * worth it.
3187          */
3188         sector_t newsize = raid1_size(mddev, sectors, 0);
3189         if (mddev->external_size &&
3190             mddev->array_sectors > newsize)
3191                 return -EINVAL;
3192         if (mddev->bitmap) {
3193                 int ret = md_bitmap_resize(mddev->bitmap, newsize, 0, 0);
3194                 if (ret)
3195                         return ret;
3196         }
3197         md_set_array_sectors(mddev, newsize);
3198         if (sectors > mddev->dev_sectors &&
3199             mddev->recovery_cp > mddev->dev_sectors) {
3200                 mddev->recovery_cp = mddev->dev_sectors;
3201                 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3202         }
3203         mddev->dev_sectors = sectors;
3204         mddev->resync_max_sectors = sectors;
3205         return 0;
3206 }
3207
3208 static int raid1_reshape(struct mddev *mddev)
3209 {
3210         /* We need to:
3211          * 1/ resize the r1bio_pool
3212          * 2/ resize conf->mirrors
3213          *
3214          * We allocate a new r1bio_pool if we can.
3215          * Then raise a device barrier and wait until all IO stops.
3216          * Then resize conf->mirrors and swap in the new r1bio pool.
3217          *
3218          * At the same time, we "pack" the devices so that all the missing
3219          * devices have the higher raid_disk numbers.
3220          */
3221         mempool_t newpool, oldpool;
3222         struct pool_info *newpoolinfo;
3223         struct raid1_info *newmirrors;
3224         struct r1conf *conf = mddev->private;
3225         int cnt, raid_disks;
3226         unsigned long flags;
3227         int d, d2;
3228         int ret;
3229
3230         memset(&newpool, 0, sizeof(newpool));
3231         memset(&oldpool, 0, sizeof(oldpool));
3232
3233         /* Cannot change chunk_size, layout, or level */
3234         if (mddev->chunk_sectors != mddev->new_chunk_sectors ||
3235             mddev->layout != mddev->new_layout ||
3236             mddev->level != mddev->new_level) {
3237                 mddev->new_chunk_sectors = mddev->chunk_sectors;
3238                 mddev->new_layout = mddev->layout;
3239                 mddev->new_level = mddev->level;
3240                 return -EINVAL;
3241         }
3242
3243         if (!mddev_is_clustered(mddev))
3244                 md_allow_write(mddev);
3245
3246         raid_disks = mddev->raid_disks + mddev->delta_disks;
3247
3248         if (raid_disks < conf->raid_disks) {
3249                 cnt=0;
3250                 for (d= 0; d < conf->raid_disks; d++)
3251                         if (conf->mirrors[d].rdev)
3252                                 cnt++;
3253                 if (cnt > raid_disks)
3254                         return -EBUSY;
3255         }
3256
3257         newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL);
3258         if (!newpoolinfo)
3259                 return -ENOMEM;
3260         newpoolinfo->mddev = mddev;
3261         newpoolinfo->raid_disks = raid_disks * 2;
3262
3263         ret = mempool_init(&newpool, NR_RAID_BIOS, r1bio_pool_alloc,
3264                            rbio_pool_free, newpoolinfo);
3265         if (ret) {
3266                 kfree(newpoolinfo);
3267                 return ret;
3268         }
3269         newmirrors = kzalloc(array3_size(sizeof(struct raid1_info),
3270                                          raid_disks, 2),
3271                              GFP_KERNEL);
3272         if (!newmirrors) {
3273                 kfree(newpoolinfo);
3274                 mempool_exit(&newpool);
3275                 return -ENOMEM;
3276         }
3277
3278         freeze_array(conf, 0);
3279
3280         /* ok, everything is stopped */
3281         oldpool = conf->r1bio_pool;
3282         conf->r1bio_pool = newpool;
3283
3284         for (d = d2 = 0; d < conf->raid_disks; d++) {
3285                 struct md_rdev *rdev = conf->mirrors[d].rdev;
3286                 if (rdev && rdev->raid_disk != d2) {
3287                         sysfs_unlink_rdev(mddev, rdev);
3288                         rdev->raid_disk = d2;
3289                         sysfs_unlink_rdev(mddev, rdev);
3290                         if (sysfs_link_rdev(mddev, rdev))
3291                                 pr_warn("md/raid1:%s: cannot register rd%d\n",
3292                                         mdname(mddev), rdev->raid_disk);
3293                 }
3294                 if (rdev)
3295                         newmirrors[d2++].rdev = rdev;
3296         }
3297         kfree(conf->mirrors);
3298         conf->mirrors = newmirrors;
3299         kfree(conf->poolinfo);
3300         conf->poolinfo = newpoolinfo;
3301
3302         spin_lock_irqsave(&conf->device_lock, flags);
3303         mddev->degraded += (raid_disks - conf->raid_disks);
3304         spin_unlock_irqrestore(&conf->device_lock, flags);
3305         conf->raid_disks = mddev->raid_disks = raid_disks;
3306         mddev->delta_disks = 0;
3307
3308         unfreeze_array(conf);
3309
3310         set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
3311         set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
3312         md_wakeup_thread(mddev->thread);
3313
3314         mempool_exit(&oldpool);
3315         return 0;
3316 }
3317
3318 static void raid1_quiesce(struct mddev *mddev, int quiesce)
3319 {
3320         struct r1conf *conf = mddev->private;
3321
3322         if (quiesce)
3323                 freeze_array(conf, 0);
3324         else
3325                 unfreeze_array(conf);
3326 }
3327
3328 static void *raid1_takeover(struct mddev *mddev)
3329 {
3330         /* raid1 can take over:
3331          *  raid5 with 2 devices, any layout or chunk size
3332          */
3333         if (mddev->level == 5 && mddev->raid_disks == 2) {
3334                 struct r1conf *conf;
3335                 mddev->new_level = 1;
3336                 mddev->new_layout = 0;
3337                 mddev->new_chunk_sectors = 0;
3338                 conf = setup_conf(mddev);
3339                 if (!IS_ERR(conf)) {
3340                         /* Array must appear to be quiesced */
3341                         conf->array_frozen = 1;
3342                         mddev_clear_unsupported_flags(mddev,
3343                                 UNSUPPORTED_MDDEV_FLAGS);
3344                 }
3345                 return conf;
3346         }
3347         return ERR_PTR(-EINVAL);
3348 }
3349
3350 static struct md_personality raid1_personality =
3351 {
3352         .name           = "raid1",
3353         .level          = 1,
3354         .owner          = THIS_MODULE,
3355         .make_request   = raid1_make_request,
3356         .run            = raid1_run,
3357         .free           = raid1_free,
3358         .status         = raid1_status,
3359         .error_handler  = raid1_error,
3360         .hot_add_disk   = raid1_add_disk,
3361         .hot_remove_disk= raid1_remove_disk,
3362         .spare_active   = raid1_spare_active,
3363         .sync_request   = raid1_sync_request,
3364         .resize         = raid1_resize,
3365         .size           = raid1_size,
3366         .check_reshape  = raid1_reshape,
3367         .quiesce        = raid1_quiesce,
3368         .takeover       = raid1_takeover,
3369 };
3370
3371 static int __init raid_init(void)
3372 {
3373         return register_md_personality(&raid1_personality);
3374 }
3375
3376 static void raid_exit(void)
3377 {
3378         unregister_md_personality(&raid1_personality);
3379 }
3380
3381 module_init(raid_init);
3382 module_exit(raid_exit);
3383 MODULE_LICENSE("GPL");
3384 MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD");
3385 MODULE_ALIAS("md-personality-3"); /* RAID1 */
3386 MODULE_ALIAS("md-raid1");
3387 MODULE_ALIAS("md-level-1");
3388
3389 module_param(max_queued_requests, int, S_IRUGO|S_IWUSR);