1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2012 Fusion-io All rights reserved.
4 * Copyright (C) 2012 Intel Corp. All rights reserved.
7 #include <linux/sched.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/raid/pq.h>
12 #include <linux/hash.h>
13 #include <linux/list_sort.h>
14 #include <linux/raid/xor.h>
20 #include "async-thread.h"
22 /* set when additional merges to this rbio are not allowed */
23 #define RBIO_RMW_LOCKED_BIT 1
26 * set when this rbio is sitting in the hash, but it is just a cache
29 #define RBIO_CACHE_BIT 2
32 * set when it is safe to trust the stripe_pages for caching
34 #define RBIO_CACHE_READY_BIT 3
36 #define RBIO_CACHE_SIZE 1024
40 BTRFS_RBIO_READ_REBUILD,
41 BTRFS_RBIO_PARITY_SCRUB,
42 BTRFS_RBIO_REBUILD_MISSING,
45 struct btrfs_raid_bio {
46 struct btrfs_fs_info *fs_info;
47 struct btrfs_bio *bbio;
49 /* while we're doing rmw on a stripe
50 * we put it into a hash table so we can
51 * lock the stripe and merge more rbios
54 struct list_head hash_list;
57 * LRU list for the stripe cache
59 struct list_head stripe_cache;
62 * for scheduling work in the helper threads
64 struct btrfs_work work;
67 * bio list and bio_list_lock are used
68 * to add more bios into the stripe
69 * in hopes of avoiding the full rmw
71 struct bio_list bio_list;
72 spinlock_t bio_list_lock;
74 /* also protected by the bio_list_lock, the
75 * plug list is used by the plugging code
76 * to collect partial bios while plugged. The
77 * stripe locking code also uses it to hand off
78 * the stripe lock to the next pending IO
80 struct list_head plug_list;
83 * flags that tell us if it is safe to
88 /* size of each individual stripe on disk */
91 /* number of data stripes (no p/q) */
98 * set if we're doing a parity rebuild
99 * for a read from higher up, which is handled
100 * differently from a parity rebuild as part of
103 enum btrfs_rbio_ops operation;
105 /* first bad stripe */
108 /* second bad stripe (for raid6 use) */
113 * number of pages needed to represent the full
119 * size of all the bios in the bio_list. This
120 * helps us decide if the rbio maps to a full
129 atomic_t stripes_pending;
133 * these are two arrays of pointers. We allocate the
134 * rbio big enough to hold them both and setup their
135 * locations when the rbio is allocated
138 /* pointers to pages that we allocated for
139 * reading/writing stripes directly from the disk (including P/Q)
141 struct page **stripe_pages;
144 * pointers to the pages in the bio_list. Stored
145 * here for faster lookup
147 struct page **bio_pages;
150 * bitmap to record which horizontal stripe has data
152 unsigned long *dbitmap;
154 /* allocated with real_stripes-many pointers for finish_*() calls */
155 void **finish_pointers;
157 /* allocated with stripe_npages-many bits for finish_*() calls */
158 unsigned long *finish_pbitmap;
161 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
162 static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
163 static void rmw_work(struct btrfs_work *work);
164 static void read_rebuild_work(struct btrfs_work *work);
165 static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
166 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed);
167 static void __free_raid_bio(struct btrfs_raid_bio *rbio);
168 static void index_rbio_pages(struct btrfs_raid_bio *rbio);
169 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
171 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
173 static void async_scrub_parity(struct btrfs_raid_bio *rbio);
175 static void start_async_work(struct btrfs_raid_bio *rbio, btrfs_func_t work_func)
177 btrfs_init_work(&rbio->work, btrfs_rmw_helper, work_func, NULL, NULL);
178 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
182 * the stripe hash table is used for locking, and to collect
183 * bios in hopes of making a full stripe
185 int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info)
187 struct btrfs_stripe_hash_table *table;
188 struct btrfs_stripe_hash_table *x;
189 struct btrfs_stripe_hash *cur;
190 struct btrfs_stripe_hash *h;
191 int num_entries = 1 << BTRFS_STRIPE_HASH_TABLE_BITS;
195 if (info->stripe_hash_table)
199 * The table is large, starting with order 4 and can go as high as
200 * order 7 in case lock debugging is turned on.
202 * Try harder to allocate and fallback to vmalloc to lower the chance
203 * of a failing mount.
205 table_size = sizeof(*table) + sizeof(*h) * num_entries;
206 table = kvzalloc(table_size, GFP_KERNEL);
210 spin_lock_init(&table->cache_lock);
211 INIT_LIST_HEAD(&table->stripe_cache);
215 for (i = 0; i < num_entries; i++) {
217 INIT_LIST_HEAD(&cur->hash_list);
218 spin_lock_init(&cur->lock);
221 x = cmpxchg(&info->stripe_hash_table, NULL, table);
228 * caching an rbio means to copy anything from the
229 * bio_pages array into the stripe_pages array. We
230 * use the page uptodate bit in the stripe cache array
231 * to indicate if it has valid data
233 * once the caching is done, we set the cache ready
236 static void cache_rbio_pages(struct btrfs_raid_bio *rbio)
243 ret = alloc_rbio_pages(rbio);
247 for (i = 0; i < rbio->nr_pages; i++) {
248 if (!rbio->bio_pages[i])
251 s = kmap(rbio->bio_pages[i]);
252 d = kmap(rbio->stripe_pages[i]);
256 kunmap(rbio->bio_pages[i]);
257 kunmap(rbio->stripe_pages[i]);
258 SetPageUptodate(rbio->stripe_pages[i]);
260 set_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
264 * we hash on the first logical address of the stripe
266 static int rbio_bucket(struct btrfs_raid_bio *rbio)
268 u64 num = rbio->bbio->raid_map[0];
271 * we shift down quite a bit. We're using byte
272 * addressing, and most of the lower bits are zeros.
273 * This tends to upset hash_64, and it consistently
274 * returns just one or two different values.
276 * shifting off the lower bits fixes things.
278 return hash_64(num >> 16, BTRFS_STRIPE_HASH_TABLE_BITS);
282 * stealing an rbio means taking all the uptodate pages from the stripe
283 * array in the source rbio and putting them into the destination rbio
285 static void steal_rbio(struct btrfs_raid_bio *src, struct btrfs_raid_bio *dest)
291 if (!test_bit(RBIO_CACHE_READY_BIT, &src->flags))
294 for (i = 0; i < dest->nr_pages; i++) {
295 s = src->stripe_pages[i];
296 if (!s || !PageUptodate(s)) {
300 d = dest->stripe_pages[i];
304 dest->stripe_pages[i] = s;
305 src->stripe_pages[i] = NULL;
310 * merging means we take the bio_list from the victim and
311 * splice it into the destination. The victim should
312 * be discarded afterwards.
314 * must be called with dest->rbio_list_lock held
316 static void merge_rbio(struct btrfs_raid_bio *dest,
317 struct btrfs_raid_bio *victim)
319 bio_list_merge(&dest->bio_list, &victim->bio_list);
320 dest->bio_list_bytes += victim->bio_list_bytes;
321 dest->generic_bio_cnt += victim->generic_bio_cnt;
322 bio_list_init(&victim->bio_list);
326 * used to prune items that are in the cache. The caller
327 * must hold the hash table lock.
329 static void __remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
331 int bucket = rbio_bucket(rbio);
332 struct btrfs_stripe_hash_table *table;
333 struct btrfs_stripe_hash *h;
337 * check the bit again under the hash table lock.
339 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
342 table = rbio->fs_info->stripe_hash_table;
343 h = table->table + bucket;
345 /* hold the lock for the bucket because we may be
346 * removing it from the hash table
351 * hold the lock for the bio list because we need
352 * to make sure the bio list is empty
354 spin_lock(&rbio->bio_list_lock);
356 if (test_and_clear_bit(RBIO_CACHE_BIT, &rbio->flags)) {
357 list_del_init(&rbio->stripe_cache);
358 table->cache_size -= 1;
361 /* if the bio list isn't empty, this rbio is
362 * still involved in an IO. We take it out
363 * of the cache list, and drop the ref that
364 * was held for the list.
366 * If the bio_list was empty, we also remove
367 * the rbio from the hash_table, and drop
368 * the corresponding ref
370 if (bio_list_empty(&rbio->bio_list)) {
371 if (!list_empty(&rbio->hash_list)) {
372 list_del_init(&rbio->hash_list);
373 refcount_dec(&rbio->refs);
374 BUG_ON(!list_empty(&rbio->plug_list));
379 spin_unlock(&rbio->bio_list_lock);
380 spin_unlock(&h->lock);
383 __free_raid_bio(rbio);
387 * prune a given rbio from the cache
389 static void remove_rbio_from_cache(struct btrfs_raid_bio *rbio)
391 struct btrfs_stripe_hash_table *table;
394 if (!test_bit(RBIO_CACHE_BIT, &rbio->flags))
397 table = rbio->fs_info->stripe_hash_table;
399 spin_lock_irqsave(&table->cache_lock, flags);
400 __remove_rbio_from_cache(rbio);
401 spin_unlock_irqrestore(&table->cache_lock, flags);
405 * remove everything in the cache
407 static void btrfs_clear_rbio_cache(struct btrfs_fs_info *info)
409 struct btrfs_stripe_hash_table *table;
411 struct btrfs_raid_bio *rbio;
413 table = info->stripe_hash_table;
415 spin_lock_irqsave(&table->cache_lock, flags);
416 while (!list_empty(&table->stripe_cache)) {
417 rbio = list_entry(table->stripe_cache.next,
418 struct btrfs_raid_bio,
420 __remove_rbio_from_cache(rbio);
422 spin_unlock_irqrestore(&table->cache_lock, flags);
426 * remove all cached entries and free the hash table
429 void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info)
431 if (!info->stripe_hash_table)
433 btrfs_clear_rbio_cache(info);
434 kvfree(info->stripe_hash_table);
435 info->stripe_hash_table = NULL;
439 * insert an rbio into the stripe cache. It
440 * must have already been prepared by calling
443 * If this rbio was already cached, it gets
444 * moved to the front of the lru.
446 * If the size of the rbio cache is too big, we
449 static void cache_rbio(struct btrfs_raid_bio *rbio)
451 struct btrfs_stripe_hash_table *table;
454 if (!test_bit(RBIO_CACHE_READY_BIT, &rbio->flags))
457 table = rbio->fs_info->stripe_hash_table;
459 spin_lock_irqsave(&table->cache_lock, flags);
460 spin_lock(&rbio->bio_list_lock);
462 /* bump our ref if we were not in the list before */
463 if (!test_and_set_bit(RBIO_CACHE_BIT, &rbio->flags))
464 refcount_inc(&rbio->refs);
466 if (!list_empty(&rbio->stripe_cache)){
467 list_move(&rbio->stripe_cache, &table->stripe_cache);
469 list_add(&rbio->stripe_cache, &table->stripe_cache);
470 table->cache_size += 1;
473 spin_unlock(&rbio->bio_list_lock);
475 if (table->cache_size > RBIO_CACHE_SIZE) {
476 struct btrfs_raid_bio *found;
478 found = list_entry(table->stripe_cache.prev,
479 struct btrfs_raid_bio,
483 __remove_rbio_from_cache(found);
486 spin_unlock_irqrestore(&table->cache_lock, flags);
490 * helper function to run the xor_blocks api. It is only
491 * able to do MAX_XOR_BLOCKS at a time, so we need to
494 static void run_xor(void **pages, int src_cnt, ssize_t len)
498 void *dest = pages[src_cnt];
501 xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);
502 xor_blocks(xor_src_cnt, len, dest, pages + src_off);
504 src_cnt -= xor_src_cnt;
505 src_off += xor_src_cnt;
510 * returns true if the bio list inside this rbio
511 * covers an entire stripe (no rmw required).
512 * Must be called with the bio list lock held, or
513 * at a time when you know it is impossible to add
514 * new bios into the list
516 static int __rbio_is_full(struct btrfs_raid_bio *rbio)
518 unsigned long size = rbio->bio_list_bytes;
521 if (size != rbio->nr_data * rbio->stripe_len)
524 BUG_ON(size > rbio->nr_data * rbio->stripe_len);
528 static int rbio_is_full(struct btrfs_raid_bio *rbio)
533 spin_lock_irqsave(&rbio->bio_list_lock, flags);
534 ret = __rbio_is_full(rbio);
535 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
540 * returns 1 if it is safe to merge two rbios together.
541 * The merging is safe if the two rbios correspond to
542 * the same stripe and if they are both going in the same
543 * direction (read vs write), and if neither one is
544 * locked for final IO
546 * The caller is responsible for locking such that
547 * rmw_locked is safe to test
549 static int rbio_can_merge(struct btrfs_raid_bio *last,
550 struct btrfs_raid_bio *cur)
552 if (test_bit(RBIO_RMW_LOCKED_BIT, &last->flags) ||
553 test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags))
557 * we can't merge with cached rbios, since the
558 * idea is that when we merge the destination
559 * rbio is going to run our IO for us. We can
560 * steal from cached rbios though, other functions
563 if (test_bit(RBIO_CACHE_BIT, &last->flags) ||
564 test_bit(RBIO_CACHE_BIT, &cur->flags))
567 if (last->bbio->raid_map[0] !=
568 cur->bbio->raid_map[0])
571 /* we can't merge with different operations */
572 if (last->operation != cur->operation)
575 * We've need read the full stripe from the drive.
576 * check and repair the parity and write the new results.
578 * We're not allowed to add any new bios to the
579 * bio list here, anyone else that wants to
580 * change this stripe needs to do their own rmw.
582 if (last->operation == BTRFS_RBIO_PARITY_SCRUB)
585 if (last->operation == BTRFS_RBIO_REBUILD_MISSING)
588 if (last->operation == BTRFS_RBIO_READ_REBUILD) {
589 int fa = last->faila;
590 int fb = last->failb;
591 int cur_fa = cur->faila;
592 int cur_fb = cur->failb;
594 if (last->faila >= last->failb) {
599 if (cur->faila >= cur->failb) {
604 if (fa != cur_fa || fb != cur_fb)
610 static int rbio_stripe_page_index(struct btrfs_raid_bio *rbio, int stripe,
613 return stripe * rbio->stripe_npages + index;
617 * these are just the pages from the rbio array, not from anything
618 * the FS sent down to us
620 static struct page *rbio_stripe_page(struct btrfs_raid_bio *rbio, int stripe,
623 return rbio->stripe_pages[rbio_stripe_page_index(rbio, stripe, index)];
627 * helper to index into the pstripe
629 static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
631 return rbio_stripe_page(rbio, rbio->nr_data, index);
635 * helper to index into the qstripe, returns null
636 * if there is no qstripe
638 static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
640 if (rbio->nr_data + 1 == rbio->real_stripes)
642 return rbio_stripe_page(rbio, rbio->nr_data + 1, index);
646 * The first stripe in the table for a logical address
647 * has the lock. rbios are added in one of three ways:
649 * 1) Nobody has the stripe locked yet. The rbio is given
650 * the lock and 0 is returned. The caller must start the IO
653 * 2) Someone has the stripe locked, but we're able to merge
654 * with the lock owner. The rbio is freed and the IO will
655 * start automatically along with the existing rbio. 1 is returned.
657 * 3) Someone has the stripe locked, but we're not able to merge.
658 * The rbio is added to the lock owner's plug list, or merged into
659 * an rbio already on the plug list. When the lock owner unlocks,
660 * the next rbio on the list is run and the IO is started automatically.
663 * If we return 0, the caller still owns the rbio and must continue with
664 * IO submission. If we return 1, the caller must assume the rbio has
665 * already been freed.
667 static noinline int lock_stripe_add(struct btrfs_raid_bio *rbio)
669 int bucket = rbio_bucket(rbio);
670 struct btrfs_stripe_hash *h = rbio->fs_info->stripe_hash_table->table + bucket;
671 struct btrfs_raid_bio *cur;
672 struct btrfs_raid_bio *pending;
674 struct btrfs_raid_bio *freeit = NULL;
675 struct btrfs_raid_bio *cache_drop = NULL;
678 spin_lock_irqsave(&h->lock, flags);
679 list_for_each_entry(cur, &h->hash_list, hash_list) {
680 if (cur->bbio->raid_map[0] == rbio->bbio->raid_map[0]) {
681 spin_lock(&cur->bio_list_lock);
683 /* can we steal this cached rbio's pages? */
684 if (bio_list_empty(&cur->bio_list) &&
685 list_empty(&cur->plug_list) &&
686 test_bit(RBIO_CACHE_BIT, &cur->flags) &&
687 !test_bit(RBIO_RMW_LOCKED_BIT, &cur->flags)) {
688 list_del_init(&cur->hash_list);
689 refcount_dec(&cur->refs);
691 steal_rbio(cur, rbio);
693 spin_unlock(&cur->bio_list_lock);
698 /* can we merge into the lock owner? */
699 if (rbio_can_merge(cur, rbio)) {
700 merge_rbio(cur, rbio);
701 spin_unlock(&cur->bio_list_lock);
709 * we couldn't merge with the running
710 * rbio, see if we can merge with the
711 * pending ones. We don't have to
712 * check for rmw_locked because there
713 * is no way they are inside finish_rmw
716 list_for_each_entry(pending, &cur->plug_list,
718 if (rbio_can_merge(pending, rbio)) {
719 merge_rbio(pending, rbio);
720 spin_unlock(&cur->bio_list_lock);
727 /* no merging, put us on the tail of the plug list,
728 * our rbio will be started with the currently
729 * running rbio unlocks
731 list_add_tail(&rbio->plug_list, &cur->plug_list);
732 spin_unlock(&cur->bio_list_lock);
738 refcount_inc(&rbio->refs);
739 list_add(&rbio->hash_list, &h->hash_list);
741 spin_unlock_irqrestore(&h->lock, flags);
743 remove_rbio_from_cache(cache_drop);
745 __free_raid_bio(freeit);
750 * called as rmw or parity rebuild is completed. If the plug list has more
751 * rbios waiting for this stripe, the next one on the list will be started
753 static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
756 struct btrfs_stripe_hash *h;
760 bucket = rbio_bucket(rbio);
761 h = rbio->fs_info->stripe_hash_table->table + bucket;
763 if (list_empty(&rbio->plug_list))
766 spin_lock_irqsave(&h->lock, flags);
767 spin_lock(&rbio->bio_list_lock);
769 if (!list_empty(&rbio->hash_list)) {
771 * if we're still cached and there is no other IO
772 * to perform, just leave this rbio here for others
773 * to steal from later
775 if (list_empty(&rbio->plug_list) &&
776 test_bit(RBIO_CACHE_BIT, &rbio->flags)) {
778 clear_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
779 BUG_ON(!bio_list_empty(&rbio->bio_list));
783 list_del_init(&rbio->hash_list);
784 refcount_dec(&rbio->refs);
787 * we use the plug list to hold all the rbios
788 * waiting for the chance to lock this stripe.
789 * hand the lock over to one of them.
791 if (!list_empty(&rbio->plug_list)) {
792 struct btrfs_raid_bio *next;
793 struct list_head *head = rbio->plug_list.next;
795 next = list_entry(head, struct btrfs_raid_bio,
798 list_del_init(&rbio->plug_list);
800 list_add(&next->hash_list, &h->hash_list);
801 refcount_inc(&next->refs);
802 spin_unlock(&rbio->bio_list_lock);
803 spin_unlock_irqrestore(&h->lock, flags);
805 if (next->operation == BTRFS_RBIO_READ_REBUILD)
806 start_async_work(next, read_rebuild_work);
807 else if (next->operation == BTRFS_RBIO_REBUILD_MISSING) {
808 steal_rbio(rbio, next);
809 start_async_work(next, read_rebuild_work);
810 } else if (next->operation == BTRFS_RBIO_WRITE) {
811 steal_rbio(rbio, next);
812 start_async_work(next, rmw_work);
813 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
814 steal_rbio(rbio, next);
815 async_scrub_parity(next);
822 spin_unlock(&rbio->bio_list_lock);
823 spin_unlock_irqrestore(&h->lock, flags);
827 remove_rbio_from_cache(rbio);
830 static void __free_raid_bio(struct btrfs_raid_bio *rbio)
834 if (!refcount_dec_and_test(&rbio->refs))
837 WARN_ON(!list_empty(&rbio->stripe_cache));
838 WARN_ON(!list_empty(&rbio->hash_list));
839 WARN_ON(!bio_list_empty(&rbio->bio_list));
841 for (i = 0; i < rbio->nr_pages; i++) {
842 if (rbio->stripe_pages[i]) {
843 __free_page(rbio->stripe_pages[i]);
844 rbio->stripe_pages[i] = NULL;
848 btrfs_put_bbio(rbio->bbio);
852 static void rbio_endio_bio_list(struct bio *cur, blk_status_t err)
859 cur->bi_status = err;
866 * this frees the rbio and runs through all the bios in the
867 * bio_list and calls end_io on them
869 static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, blk_status_t err)
871 struct bio *cur = bio_list_get(&rbio->bio_list);
874 if (rbio->generic_bio_cnt)
875 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
878 * At this moment, rbio->bio_list is empty, however since rbio does not
879 * always have RBIO_RMW_LOCKED_BIT set and rbio is still linked on the
880 * hash list, rbio may be merged with others so that rbio->bio_list
882 * Once unlock_stripe() is done, rbio->bio_list will not be updated any
883 * more and we can call bio_endio() on all queued bios.
886 extra = bio_list_get(&rbio->bio_list);
887 __free_raid_bio(rbio);
889 rbio_endio_bio_list(cur, err);
891 rbio_endio_bio_list(extra, err);
895 * end io function used by finish_rmw. When we finally
896 * get here, we've written a full stripe
898 static void raid_write_end_io(struct bio *bio)
900 struct btrfs_raid_bio *rbio = bio->bi_private;
901 blk_status_t err = bio->bi_status;
905 fail_bio_stripe(rbio, bio);
909 if (!atomic_dec_and_test(&rbio->stripes_pending))
914 /* OK, we have read all the stripes we need to. */
915 max_errors = (rbio->operation == BTRFS_RBIO_PARITY_SCRUB) ?
916 0 : rbio->bbio->max_errors;
917 if (atomic_read(&rbio->error) > max_errors)
920 rbio_orig_end_io(rbio, err);
924 * the read/modify/write code wants to use the original bio for
925 * any pages it included, and then use the rbio for everything
926 * else. This function decides if a given index (stripe number)
927 * and page number in that stripe fall inside the original bio
930 * if you set bio_list_only, you'll get a NULL back for any ranges
931 * that are outside the bio_list
933 * This doesn't take any refs on anything, you get a bare page pointer
934 * and the caller must bump refs as required.
936 * You must call index_rbio_pages once before you can trust
937 * the answers from this function.
939 static struct page *page_in_rbio(struct btrfs_raid_bio *rbio,
940 int index, int pagenr, int bio_list_only)
943 struct page *p = NULL;
945 chunk_page = index * (rbio->stripe_len >> PAGE_SHIFT) + pagenr;
947 spin_lock_irq(&rbio->bio_list_lock);
948 p = rbio->bio_pages[chunk_page];
949 spin_unlock_irq(&rbio->bio_list_lock);
951 if (p || bio_list_only)
954 return rbio->stripe_pages[chunk_page];
958 * number of pages we need for the entire stripe across all the
961 static unsigned long rbio_nr_pages(unsigned long stripe_len, int nr_stripes)
963 return DIV_ROUND_UP(stripe_len, PAGE_SIZE) * nr_stripes;
967 * allocation and initial setup for the btrfs_raid_bio. Not
968 * this does not allocate any pages for rbio->pages.
970 static struct btrfs_raid_bio *alloc_rbio(struct btrfs_fs_info *fs_info,
971 struct btrfs_bio *bbio,
974 struct btrfs_raid_bio *rbio;
976 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
977 int num_pages = rbio_nr_pages(stripe_len, real_stripes);
978 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
981 rbio = kzalloc(sizeof(*rbio) +
982 sizeof(*rbio->stripe_pages) * num_pages +
983 sizeof(*rbio->bio_pages) * num_pages +
984 sizeof(*rbio->finish_pointers) * real_stripes +
985 sizeof(*rbio->dbitmap) * BITS_TO_LONGS(stripe_npages) +
986 sizeof(*rbio->finish_pbitmap) *
987 BITS_TO_LONGS(stripe_npages),
990 return ERR_PTR(-ENOMEM);
992 bio_list_init(&rbio->bio_list);
993 INIT_LIST_HEAD(&rbio->plug_list);
994 spin_lock_init(&rbio->bio_list_lock);
995 INIT_LIST_HEAD(&rbio->stripe_cache);
996 INIT_LIST_HEAD(&rbio->hash_list);
998 rbio->fs_info = fs_info;
999 rbio->stripe_len = stripe_len;
1000 rbio->nr_pages = num_pages;
1001 rbio->real_stripes = real_stripes;
1002 rbio->stripe_npages = stripe_npages;
1005 refcount_set(&rbio->refs, 1);
1006 atomic_set(&rbio->error, 0);
1007 atomic_set(&rbio->stripes_pending, 0);
1010 * the stripe_pages, bio_pages, etc arrays point to the extra
1011 * memory we allocated past the end of the rbio
1014 #define CONSUME_ALLOC(ptr, count) do { \
1016 p = (unsigned char *)p + sizeof(*(ptr)) * (count); \
1018 CONSUME_ALLOC(rbio->stripe_pages, num_pages);
1019 CONSUME_ALLOC(rbio->bio_pages, num_pages);
1020 CONSUME_ALLOC(rbio->finish_pointers, real_stripes);
1021 CONSUME_ALLOC(rbio->dbitmap, BITS_TO_LONGS(stripe_npages));
1022 CONSUME_ALLOC(rbio->finish_pbitmap, BITS_TO_LONGS(stripe_npages));
1023 #undef CONSUME_ALLOC
1025 if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID5)
1026 nr_data = real_stripes - 1;
1027 else if (bbio->map_type & BTRFS_BLOCK_GROUP_RAID6)
1028 nr_data = real_stripes - 2;
1032 rbio->nr_data = nr_data;
1036 /* allocate pages for all the stripes in the bio, including parity */
1037 static int alloc_rbio_pages(struct btrfs_raid_bio *rbio)
1042 for (i = 0; i < rbio->nr_pages; i++) {
1043 if (rbio->stripe_pages[i])
1045 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1048 rbio->stripe_pages[i] = page;
1053 /* only allocate pages for p/q stripes */
1054 static int alloc_rbio_parity_pages(struct btrfs_raid_bio *rbio)
1059 i = rbio_stripe_page_index(rbio, rbio->nr_data, 0);
1061 for (; i < rbio->nr_pages; i++) {
1062 if (rbio->stripe_pages[i])
1064 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
1067 rbio->stripe_pages[i] = page;
1073 * add a single page from a specific stripe into our list of bios for IO
1074 * this will try to merge into existing bios if possible, and returns
1075 * zero if all went well.
1077 static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1078 struct bio_list *bio_list,
1081 unsigned long page_index,
1082 unsigned long bio_max_len)
1084 struct bio *last = bio_list->tail;
1088 struct btrfs_bio_stripe *stripe;
1091 stripe = &rbio->bbio->stripes[stripe_nr];
1092 disk_start = stripe->physical + (page_index << PAGE_SHIFT);
1094 /* if the device is missing, just fail this stripe */
1095 if (!stripe->dev->bdev)
1096 return fail_rbio_index(rbio, stripe_nr);
1098 /* see if we can add this page onto our existing bio */
1100 last_end = (u64)last->bi_iter.bi_sector << 9;
1101 last_end += last->bi_iter.bi_size;
1104 * we can't merge these if they are from different
1105 * devices or if they are not contiguous
1107 if (last_end == disk_start && stripe->dev->bdev &&
1109 last->bi_disk == stripe->dev->bdev->bd_disk &&
1110 last->bi_partno == stripe->dev->bdev->bd_partno) {
1111 ret = bio_add_page(last, page, PAGE_SIZE, 0);
1112 if (ret == PAGE_SIZE)
1117 /* put a new bio on the list */
1118 bio = btrfs_io_bio_alloc(bio_max_len >> PAGE_SHIFT ?: 1);
1119 bio->bi_iter.bi_size = 0;
1120 bio_set_dev(bio, stripe->dev->bdev);
1121 bio->bi_iter.bi_sector = disk_start >> 9;
1123 bio_add_page(bio, page, PAGE_SIZE, 0);
1124 bio_list_add(bio_list, bio);
1129 * while we're doing the read/modify/write cycle, we could
1130 * have errors in reading pages off the disk. This checks
1131 * for errors and if we're not able to read the page it'll
1132 * trigger parity reconstruction. The rmw will be finished
1133 * after we've reconstructed the failed stripes
1135 static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1137 if (rbio->faila >= 0 || rbio->failb >= 0) {
1138 BUG_ON(rbio->faila == rbio->real_stripes - 1);
1139 __raid56_parity_recover(rbio);
1146 * helper function to walk our bio list and populate the bio_pages array with
1147 * the result. This seems expensive, but it is faster than constantly
1148 * searching through the bio list as we setup the IO in finish_rmw or stripe
1151 * This must be called before you trust the answers from page_in_rbio
1153 static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1157 unsigned long stripe_offset;
1158 unsigned long page_index;
1160 spin_lock_irq(&rbio->bio_list_lock);
1161 bio_list_for_each(bio, &rbio->bio_list) {
1162 struct bio_vec bvec;
1163 struct bvec_iter iter;
1166 start = (u64)bio->bi_iter.bi_sector << 9;
1167 stripe_offset = start - rbio->bbio->raid_map[0];
1168 page_index = stripe_offset >> PAGE_SHIFT;
1170 if (bio_flagged(bio, BIO_CLONED))
1171 bio->bi_iter = btrfs_io_bio(bio)->iter;
1173 bio_for_each_segment(bvec, bio, iter) {
1174 rbio->bio_pages[page_index + i] = bvec.bv_page;
1178 spin_unlock_irq(&rbio->bio_list_lock);
1182 * this is called from one of two situations. We either
1183 * have a full stripe from the higher layers, or we've read all
1184 * the missing bits off disk.
1186 * This will calculate the parity and then send down any
1189 static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1191 struct btrfs_bio *bbio = rbio->bbio;
1192 void **pointers = rbio->finish_pointers;
1193 int nr_data = rbio->nr_data;
1198 struct bio_list bio_list;
1202 bio_list_init(&bio_list);
1204 if (rbio->real_stripes - rbio->nr_data == 1) {
1205 p_stripe = rbio->real_stripes - 1;
1206 } else if (rbio->real_stripes - rbio->nr_data == 2) {
1207 p_stripe = rbio->real_stripes - 2;
1208 q_stripe = rbio->real_stripes - 1;
1213 /* at this point we either have a full stripe,
1214 * or we've read the full stripe from the drive.
1215 * recalculate the parity and write the new results.
1217 * We're not allowed to add any new bios to the
1218 * bio list here, anyone else that wants to
1219 * change this stripe needs to do their own rmw.
1221 spin_lock_irq(&rbio->bio_list_lock);
1222 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1223 spin_unlock_irq(&rbio->bio_list_lock);
1225 atomic_set(&rbio->error, 0);
1228 * now that we've set rmw_locked, run through the
1229 * bio list one last time and map the page pointers
1231 * We don't cache full rbios because we're assuming
1232 * the higher layers are unlikely to use this area of
1233 * the disk again soon. If they do use it again,
1234 * hopefully they will send another full bio.
1236 index_rbio_pages(rbio);
1237 if (!rbio_is_full(rbio))
1238 cache_rbio_pages(rbio);
1240 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1242 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1244 /* first collect one page from each data stripe */
1245 for (stripe = 0; stripe < nr_data; stripe++) {
1246 p = page_in_rbio(rbio, stripe, pagenr, 0);
1247 pointers[stripe] = kmap(p);
1250 /* then add the parity stripe */
1251 p = rbio_pstripe_page(rbio, pagenr);
1253 pointers[stripe++] = kmap(p);
1255 if (q_stripe != -1) {
1258 * raid6, add the qstripe and call the
1259 * library function to fill in our p/q
1261 p = rbio_qstripe_page(rbio, pagenr);
1263 pointers[stripe++] = kmap(p);
1265 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1269 copy_page(pointers[nr_data], pointers[0]);
1270 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
1274 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1275 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1279 * time to start writing. Make bios for everything from the
1280 * higher layers (the bio_list in our rbio) and our p/q. Ignore
1283 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1284 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1286 if (stripe < rbio->nr_data) {
1287 page = page_in_rbio(rbio, stripe, pagenr, 1);
1291 page = rbio_stripe_page(rbio, stripe, pagenr);
1294 ret = rbio_add_io_page(rbio, &bio_list,
1295 page, stripe, pagenr, rbio->stripe_len);
1301 if (likely(!bbio->num_tgtdevs))
1304 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1305 if (!bbio->tgtdev_map[stripe])
1308 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1310 if (stripe < rbio->nr_data) {
1311 page = page_in_rbio(rbio, stripe, pagenr, 1);
1315 page = rbio_stripe_page(rbio, stripe, pagenr);
1318 ret = rbio_add_io_page(rbio, &bio_list, page,
1319 rbio->bbio->tgtdev_map[stripe],
1320 pagenr, rbio->stripe_len);
1327 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1328 BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1331 bio = bio_list_pop(&bio_list);
1335 bio->bi_private = rbio;
1336 bio->bi_end_io = raid_write_end_io;
1337 bio->bi_opf = REQ_OP_WRITE;
1344 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1346 while ((bio = bio_list_pop(&bio_list)))
1351 * helper to find the stripe number for a given bio. Used to figure out which
1352 * stripe has failed. This expects the bio to correspond to a physical disk,
1353 * so it looks up based on physical sector numbers.
1355 static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1358 u64 physical = bio->bi_iter.bi_sector;
1361 struct btrfs_bio_stripe *stripe;
1365 for (i = 0; i < rbio->bbio->num_stripes; i++) {
1366 stripe = &rbio->bbio->stripes[i];
1367 stripe_start = stripe->physical;
1368 if (physical >= stripe_start &&
1369 physical < stripe_start + rbio->stripe_len &&
1370 stripe->dev->bdev &&
1371 bio->bi_disk == stripe->dev->bdev->bd_disk &&
1372 bio->bi_partno == stripe->dev->bdev->bd_partno) {
1380 * helper to find the stripe number for a given
1381 * bio (before mapping). Used to figure out which stripe has
1382 * failed. This looks up based on logical block numbers.
1384 static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1387 u64 logical = bio->bi_iter.bi_sector;
1393 for (i = 0; i < rbio->nr_data; i++) {
1394 stripe_start = rbio->bbio->raid_map[i];
1395 if (logical >= stripe_start &&
1396 logical < stripe_start + rbio->stripe_len) {
1404 * returns -EIO if we had too many failures
1406 static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1408 unsigned long flags;
1411 spin_lock_irqsave(&rbio->bio_list_lock, flags);
1413 /* we already know this stripe is bad, move on */
1414 if (rbio->faila == failed || rbio->failb == failed)
1417 if (rbio->faila == -1) {
1418 /* first failure on this rbio */
1419 rbio->faila = failed;
1420 atomic_inc(&rbio->error);
1421 } else if (rbio->failb == -1) {
1422 /* second failure on this rbio */
1423 rbio->failb = failed;
1424 atomic_inc(&rbio->error);
1429 spin_unlock_irqrestore(&rbio->bio_list_lock, flags);
1435 * helper to fail a stripe based on a physical disk
1438 static int fail_bio_stripe(struct btrfs_raid_bio *rbio,
1441 int failed = find_bio_stripe(rbio, bio);
1446 return fail_rbio_index(rbio, failed);
1450 * this sets each page in the bio uptodate. It should only be used on private
1451 * rbio pages, nothing that comes in from the higher layers
1453 static void set_bio_pages_uptodate(struct bio *bio)
1455 struct bio_vec *bvec;
1458 ASSERT(!bio_flagged(bio, BIO_CLONED));
1460 bio_for_each_segment_all(bvec, bio, i)
1461 SetPageUptodate(bvec->bv_page);
1465 * end io for the read phase of the rmw cycle. All the bios here are physical
1466 * stripe bios we've read from the disk so we can recalculate the parity of the
1469 * This will usually kick off finish_rmw once all the bios are read in, but it
1470 * may trigger parity reconstruction if we had any errors along the way
1472 static void raid_rmw_end_io(struct bio *bio)
1474 struct btrfs_raid_bio *rbio = bio->bi_private;
1477 fail_bio_stripe(rbio, bio);
1479 set_bio_pages_uptodate(bio);
1483 if (!atomic_dec_and_test(&rbio->stripes_pending))
1486 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1490 * this will normally call finish_rmw to start our write
1491 * but if there are any failed stripes we'll reconstruct
1494 validate_rbio_for_rmw(rbio);
1499 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1503 * the stripe must be locked by the caller. It will
1504 * unlock after all the writes are done
1506 static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1508 int bios_to_read = 0;
1509 struct bio_list bio_list;
1515 bio_list_init(&bio_list);
1517 ret = alloc_rbio_pages(rbio);
1521 index_rbio_pages(rbio);
1523 atomic_set(&rbio->error, 0);
1525 * build a list of bios to read all the missing parts of this
1528 for (stripe = 0; stripe < rbio->nr_data; stripe++) {
1529 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1532 * we want to find all the pages missing from
1533 * the rbio and read them from the disk. If
1534 * page_in_rbio finds a page in the bio list
1535 * we don't need to read it off the stripe.
1537 page = page_in_rbio(rbio, stripe, pagenr, 1);
1541 page = rbio_stripe_page(rbio, stripe, pagenr);
1543 * the bio cache may have handed us an uptodate
1544 * page. If so, be happy and use it
1546 if (PageUptodate(page))
1549 ret = rbio_add_io_page(rbio, &bio_list, page,
1550 stripe, pagenr, rbio->stripe_len);
1556 bios_to_read = bio_list_size(&bio_list);
1557 if (!bios_to_read) {
1559 * this can happen if others have merged with
1560 * us, it means there is nothing left to read.
1561 * But if there are missing devices it may not be
1562 * safe to do the full stripe write yet.
1568 * the bbio may be freed once we submit the last bio. Make sure
1569 * not to touch it after that
1571 atomic_set(&rbio->stripes_pending, bios_to_read);
1573 bio = bio_list_pop(&bio_list);
1577 bio->bi_private = rbio;
1578 bio->bi_end_io = raid_rmw_end_io;
1579 bio->bi_opf = REQ_OP_READ;
1581 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
1585 /* the actual write will happen once the reads are done */
1589 rbio_orig_end_io(rbio, BLK_STS_IOERR);
1591 while ((bio = bio_list_pop(&bio_list)))
1597 validate_rbio_for_rmw(rbio);
1602 * if the upper layers pass in a full stripe, we thank them by only allocating
1603 * enough pages to hold the parity, and sending it all down quickly.
1605 static int full_stripe_write(struct btrfs_raid_bio *rbio)
1609 ret = alloc_rbio_parity_pages(rbio);
1611 __free_raid_bio(rbio);
1615 ret = lock_stripe_add(rbio);
1622 * partial stripe writes get handed over to async helpers.
1623 * We're really hoping to merge a few more writes into this
1624 * rbio before calculating new parity
1626 static int partial_stripe_write(struct btrfs_raid_bio *rbio)
1630 ret = lock_stripe_add(rbio);
1632 start_async_work(rbio, rmw_work);
1637 * sometimes while we were reading from the drive to
1638 * recalculate parity, enough new bios come into create
1639 * a full stripe. So we do a check here to see if we can
1640 * go directly to finish_rmw
1642 static int __raid56_parity_write(struct btrfs_raid_bio *rbio)
1644 /* head off into rmw land if we don't have a full stripe */
1645 if (!rbio_is_full(rbio))
1646 return partial_stripe_write(rbio);
1647 return full_stripe_write(rbio);
1651 * We use plugging call backs to collect full stripes.
1652 * Any time we get a partial stripe write while plugged
1653 * we collect it into a list. When the unplug comes down,
1654 * we sort the list by logical block number and merge
1655 * everything we can into the same rbios
1657 struct btrfs_plug_cb {
1658 struct blk_plug_cb cb;
1659 struct btrfs_fs_info *info;
1660 struct list_head rbio_list;
1661 struct btrfs_work work;
1665 * rbios on the plug list are sorted for easier merging.
1667 static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1669 struct btrfs_raid_bio *ra = container_of(a, struct btrfs_raid_bio,
1671 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1673 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1674 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1676 if (a_sector < b_sector)
1678 if (a_sector > b_sector)
1683 static void run_plug(struct btrfs_plug_cb *plug)
1685 struct btrfs_raid_bio *cur;
1686 struct btrfs_raid_bio *last = NULL;
1689 * sort our plug list then try to merge
1690 * everything we can in hopes of creating full
1693 list_sort(NULL, &plug->rbio_list, plug_cmp);
1694 while (!list_empty(&plug->rbio_list)) {
1695 cur = list_entry(plug->rbio_list.next,
1696 struct btrfs_raid_bio, plug_list);
1697 list_del_init(&cur->plug_list);
1699 if (rbio_is_full(cur)) {
1700 /* we have a full stripe, send it down */
1701 full_stripe_write(cur);
1705 if (rbio_can_merge(last, cur)) {
1706 merge_rbio(last, cur);
1707 __free_raid_bio(cur);
1711 __raid56_parity_write(last);
1716 __raid56_parity_write(last);
1722 * if the unplug comes from schedule, we have to push the
1723 * work off to a helper thread
1725 static void unplug_work(struct btrfs_work *work)
1727 struct btrfs_plug_cb *plug;
1728 plug = container_of(work, struct btrfs_plug_cb, work);
1732 static void btrfs_raid_unplug(struct blk_plug_cb *cb, bool from_schedule)
1734 struct btrfs_plug_cb *plug;
1735 plug = container_of(cb, struct btrfs_plug_cb, cb);
1737 if (from_schedule) {
1738 btrfs_init_work(&plug->work, btrfs_rmw_helper,
1739 unplug_work, NULL, NULL);
1740 btrfs_queue_work(plug->info->rmw_workers,
1748 * our main entry point for writes from the rest of the FS.
1750 int raid56_parity_write(struct btrfs_fs_info *fs_info, struct bio *bio,
1751 struct btrfs_bio *bbio, u64 stripe_len)
1753 struct btrfs_raid_bio *rbio;
1754 struct btrfs_plug_cb *plug = NULL;
1755 struct blk_plug_cb *cb;
1758 rbio = alloc_rbio(fs_info, bbio, stripe_len);
1760 btrfs_put_bbio(bbio);
1761 return PTR_ERR(rbio);
1763 bio_list_add(&rbio->bio_list, bio);
1764 rbio->bio_list_bytes = bio->bi_iter.bi_size;
1765 rbio->operation = BTRFS_RBIO_WRITE;
1767 btrfs_bio_counter_inc_noblocked(fs_info);
1768 rbio->generic_bio_cnt = 1;
1771 * don't plug on full rbios, just get them out the door
1772 * as quickly as we can
1774 if (rbio_is_full(rbio)) {
1775 ret = full_stripe_write(rbio);
1777 btrfs_bio_counter_dec(fs_info);
1781 cb = blk_check_plugged(btrfs_raid_unplug, fs_info, sizeof(*plug));
1783 plug = container_of(cb, struct btrfs_plug_cb, cb);
1785 plug->info = fs_info;
1786 INIT_LIST_HEAD(&plug->rbio_list);
1788 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1791 ret = __raid56_parity_write(rbio);
1793 btrfs_bio_counter_dec(fs_info);
1799 * all parity reconstruction happens here. We've read in everything
1800 * we can find from the drives and this does the heavy lifting of
1801 * sorting the good from the bad.
1803 static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1807 int faila = -1, failb = -1;
1812 pointers = kcalloc(rbio->real_stripes, sizeof(void *), GFP_NOFS);
1814 err = BLK_STS_RESOURCE;
1818 faila = rbio->faila;
1819 failb = rbio->failb;
1821 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1822 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1823 spin_lock_irq(&rbio->bio_list_lock);
1824 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1825 spin_unlock_irq(&rbio->bio_list_lock);
1828 index_rbio_pages(rbio);
1830 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
1832 * Now we just use bitmap to mark the horizontal stripes in
1833 * which we have data when doing parity scrub.
1835 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1836 !test_bit(pagenr, rbio->dbitmap))
1839 /* setup our array of pointers with pages
1842 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1844 * if we're rebuilding a read, we have to use
1845 * pages from the bio list
1847 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1848 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1849 (stripe == faila || stripe == failb)) {
1850 page = page_in_rbio(rbio, stripe, pagenr, 0);
1852 page = rbio_stripe_page(rbio, stripe, pagenr);
1854 pointers[stripe] = kmap(page);
1857 /* all raid6 handling here */
1858 if (rbio->bbio->map_type & BTRFS_BLOCK_GROUP_RAID6) {
1860 * single failure, rebuild from parity raid5
1864 if (faila == rbio->nr_data) {
1866 * Just the P stripe has failed, without
1867 * a bad data or Q stripe.
1868 * TODO, we should redo the xor here.
1870 err = BLK_STS_IOERR;
1874 * a single failure in raid6 is rebuilt
1875 * in the pstripe code below
1880 /* make sure our ps and qs are in order */
1881 if (faila > failb) {
1887 /* if the q stripe is failed, do a pstripe reconstruction
1889 * If both the q stripe and the P stripe are failed, we're
1890 * here due to a crc mismatch and we can't give them the
1893 if (rbio->bbio->raid_map[failb] == RAID6_Q_STRIPE) {
1894 if (rbio->bbio->raid_map[faila] ==
1896 err = BLK_STS_IOERR;
1900 * otherwise we have one bad data stripe and
1901 * a good P stripe. raid5!
1906 if (rbio->bbio->raid_map[failb] == RAID5_P_STRIPE) {
1907 raid6_datap_recov(rbio->real_stripes,
1908 PAGE_SIZE, faila, pointers);
1910 raid6_2data_recov(rbio->real_stripes,
1911 PAGE_SIZE, faila, failb,
1917 /* rebuild from P stripe here (raid5 or raid6) */
1918 BUG_ON(failb != -1);
1920 /* Copy parity block into failed block to start with */
1921 copy_page(pointers[faila], pointers[rbio->nr_data]);
1923 /* rearrange the pointer array */
1924 p = pointers[faila];
1925 for (stripe = faila; stripe < rbio->nr_data - 1; stripe++)
1926 pointers[stripe] = pointers[stripe + 1];
1927 pointers[rbio->nr_data - 1] = p;
1929 /* xor in the rest */
1930 run_xor(pointers, rbio->nr_data - 1, PAGE_SIZE);
1932 /* if we're doing this rebuild as part of an rmw, go through
1933 * and set all of our private rbio pages in the
1934 * failed stripes as uptodate. This way finish_rmw will
1935 * know they can be trusted. If this was a read reconstruction,
1936 * other endio functions will fiddle the uptodate bits
1938 if (rbio->operation == BTRFS_RBIO_WRITE) {
1939 for (i = 0; i < rbio->stripe_npages; i++) {
1941 page = rbio_stripe_page(rbio, faila, i);
1942 SetPageUptodate(page);
1945 page = rbio_stripe_page(rbio, failb, i);
1946 SetPageUptodate(page);
1950 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1952 * if we're rebuilding a read, we have to use
1953 * pages from the bio list
1955 if ((rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1956 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) &&
1957 (stripe == faila || stripe == failb)) {
1958 page = page_in_rbio(rbio, stripe, pagenr, 0);
1960 page = rbio_stripe_page(rbio, stripe, pagenr);
1972 * Similar to READ_REBUILD, REBUILD_MISSING at this point also has a
1973 * valid rbio which is consistent with ondisk content, thus such a
1974 * valid rbio can be cached to avoid further disk reads.
1976 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
1977 rbio->operation == BTRFS_RBIO_REBUILD_MISSING) {
1979 * - In case of two failures, where rbio->failb != -1:
1981 * Do not cache this rbio since the above read reconstruction
1982 * (raid6_datap_recov() or raid6_2data_recov()) may have
1983 * changed some content of stripes which are not identical to
1984 * on-disk content any more, otherwise, a later write/recover
1985 * may steal stripe_pages from this rbio and end up with
1986 * corruptions or rebuild failures.
1988 * - In case of single failure, where rbio->failb == -1:
1990 * Cache this rbio iff the above read reconstruction is
1991 * excuted without problems.
1993 if (err == BLK_STS_OK && rbio->failb < 0)
1994 cache_rbio_pages(rbio);
1996 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
1998 rbio_orig_end_io(rbio, err);
1999 } else if (err == BLK_STS_OK) {
2003 if (rbio->operation == BTRFS_RBIO_WRITE)
2005 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
2006 finish_parity_scrub(rbio, 0);
2010 rbio_orig_end_io(rbio, err);
2015 * This is called only for stripes we've read from disk to
2016 * reconstruct the parity.
2018 static void raid_recover_end_io(struct bio *bio)
2020 struct btrfs_raid_bio *rbio = bio->bi_private;
2023 * we only read stripe pages off the disk, set them
2024 * up to date if there were no errors
2027 fail_bio_stripe(rbio, bio);
2029 set_bio_pages_uptodate(bio);
2032 if (!atomic_dec_and_test(&rbio->stripes_pending))
2035 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2036 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2038 __raid_recover_end_io(rbio);
2042 * reads everything we need off the disk to reconstruct
2043 * the parity. endio handlers trigger final reconstruction
2044 * when the IO is done.
2046 * This is used both for reads from the higher layers and for
2047 * parity construction required to finish a rmw cycle.
2049 static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2051 int bios_to_read = 0;
2052 struct bio_list bio_list;
2058 bio_list_init(&bio_list);
2060 ret = alloc_rbio_pages(rbio);
2064 atomic_set(&rbio->error, 0);
2067 * read everything that hasn't failed. Thanks to the
2068 * stripe cache, it is possible that some or all of these
2069 * pages are going to be uptodate.
2071 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2072 if (rbio->faila == stripe || rbio->failb == stripe) {
2073 atomic_inc(&rbio->error);
2077 for (pagenr = 0; pagenr < rbio->stripe_npages; pagenr++) {
2081 * the rmw code may have already read this
2084 p = rbio_stripe_page(rbio, stripe, pagenr);
2085 if (PageUptodate(p))
2088 ret = rbio_add_io_page(rbio, &bio_list,
2089 rbio_stripe_page(rbio, stripe, pagenr),
2090 stripe, pagenr, rbio->stripe_len);
2096 bios_to_read = bio_list_size(&bio_list);
2097 if (!bios_to_read) {
2099 * we might have no bios to read just because the pages
2100 * were up to date, or we might have no bios to read because
2101 * the devices were gone.
2103 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
2104 __raid_recover_end_io(rbio);
2112 * the bbio may be freed once we submit the last bio. Make sure
2113 * not to touch it after that
2115 atomic_set(&rbio->stripes_pending, bios_to_read);
2117 bio = bio_list_pop(&bio_list);
2121 bio->bi_private = rbio;
2122 bio->bi_end_io = raid_recover_end_io;
2123 bio->bi_opf = REQ_OP_READ;
2125 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2133 if (rbio->operation == BTRFS_RBIO_READ_REBUILD ||
2134 rbio->operation == BTRFS_RBIO_REBUILD_MISSING)
2135 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2137 while ((bio = bio_list_pop(&bio_list)))
2144 * the main entry point for reads from the higher layers. This
2145 * is really only called when the normal read path had a failure,
2146 * so we assume the bio they send down corresponds to a failed part
2149 int raid56_parity_recover(struct btrfs_fs_info *fs_info, struct bio *bio,
2150 struct btrfs_bio *bbio, u64 stripe_len,
2151 int mirror_num, int generic_io)
2153 struct btrfs_raid_bio *rbio;
2157 ASSERT(bbio->mirror_num == mirror_num);
2158 btrfs_io_bio(bio)->mirror_num = mirror_num;
2161 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2164 btrfs_put_bbio(bbio);
2165 return PTR_ERR(rbio);
2168 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2169 bio_list_add(&rbio->bio_list, bio);
2170 rbio->bio_list_bytes = bio->bi_iter.bi_size;
2172 rbio->faila = find_logical_bio_stripe(rbio, bio);
2173 if (rbio->faila == -1) {
2175 "%s could not find the bad stripe in raid56 so that we cannot recover any more (bio has logical %llu len %llu, bbio has map_type %llu)",
2176 __func__, (u64)bio->bi_iter.bi_sector << 9,
2177 (u64)bio->bi_iter.bi_size, bbio->map_type);
2179 btrfs_put_bbio(bbio);
2185 btrfs_bio_counter_inc_noblocked(fs_info);
2186 rbio->generic_bio_cnt = 1;
2188 btrfs_get_bbio(bbio);
2193 * for 'mirror == 2', reconstruct from all other stripes.
2194 * for 'mirror_num > 2', select a stripe to fail on every retry.
2196 if (mirror_num > 2) {
2198 * 'mirror == 3' is to fail the p stripe and
2199 * reconstruct from the q stripe. 'mirror > 3' is to
2200 * fail a data stripe and reconstruct from p+q stripe.
2202 rbio->failb = rbio->real_stripes - (mirror_num - 1);
2203 ASSERT(rbio->failb > 0);
2204 if (rbio->failb <= rbio->faila)
2208 ret = lock_stripe_add(rbio);
2211 * __raid56_parity_recover will end the bio with
2212 * any errors it hits. We don't want to return
2213 * its error value up the stack because our caller
2214 * will end up calling bio_endio with any nonzero
2218 __raid56_parity_recover(rbio);
2220 * our rbio has been added to the list of
2221 * rbios that will be handled after the
2222 * currently lock owner is done
2228 static void rmw_work(struct btrfs_work *work)
2230 struct btrfs_raid_bio *rbio;
2232 rbio = container_of(work, struct btrfs_raid_bio, work);
2233 raid56_rmw_stripe(rbio);
2236 static void read_rebuild_work(struct btrfs_work *work)
2238 struct btrfs_raid_bio *rbio;
2240 rbio = container_of(work, struct btrfs_raid_bio, work);
2241 __raid56_parity_recover(rbio);
2245 * The following code is used to scrub/replace the parity stripe
2247 * Caller must have already increased bio_counter for getting @bbio.
2249 * Note: We need make sure all the pages that add into the scrub/replace
2250 * raid bio are correct and not be changed during the scrub/replace. That
2251 * is those pages just hold metadata or file data with checksum.
2254 struct btrfs_raid_bio *
2255 raid56_parity_alloc_scrub_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2256 struct btrfs_bio *bbio, u64 stripe_len,
2257 struct btrfs_device *scrub_dev,
2258 unsigned long *dbitmap, int stripe_nsectors)
2260 struct btrfs_raid_bio *rbio;
2263 rbio = alloc_rbio(fs_info, bbio, stripe_len);
2266 bio_list_add(&rbio->bio_list, bio);
2268 * This is a special bio which is used to hold the completion handler
2269 * and make the scrub rbio is similar to the other types
2271 ASSERT(!bio->bi_iter.bi_size);
2272 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2275 * After mapping bbio with BTRFS_MAP_WRITE, parities have been sorted
2276 * to the end position, so this search can start from the first parity
2279 for (i = rbio->nr_data; i < rbio->real_stripes; i++) {
2280 if (bbio->stripes[i].dev == scrub_dev) {
2285 ASSERT(i < rbio->real_stripes);
2287 /* Now we just support the sectorsize equals to page size */
2288 ASSERT(fs_info->sectorsize == PAGE_SIZE);
2289 ASSERT(rbio->stripe_npages == stripe_nsectors);
2290 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2293 * We have already increased bio_counter when getting bbio, record it
2294 * so we can free it at rbio_orig_end_io().
2296 rbio->generic_bio_cnt = 1;
2301 /* Used for both parity scrub and missing. */
2302 void raid56_add_scrub_pages(struct btrfs_raid_bio *rbio, struct page *page,
2308 ASSERT(logical >= rbio->bbio->raid_map[0]);
2309 ASSERT(logical + PAGE_SIZE <= rbio->bbio->raid_map[0] +
2310 rbio->stripe_len * rbio->nr_data);
2311 stripe_offset = (int)(logical - rbio->bbio->raid_map[0]);
2312 index = stripe_offset >> PAGE_SHIFT;
2313 rbio->bio_pages[index] = page;
2317 * We just scrub the parity that we have correct data on the same horizontal,
2318 * so we needn't allocate all pages for all the stripes.
2320 static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2327 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2328 for (i = 0; i < rbio->real_stripes; i++) {
2329 index = i * rbio->stripe_npages + bit;
2330 if (rbio->stripe_pages[index])
2333 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2336 rbio->stripe_pages[index] = page;
2342 static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2345 struct btrfs_bio *bbio = rbio->bbio;
2346 void **pointers = rbio->finish_pointers;
2347 unsigned long *pbitmap = rbio->finish_pbitmap;
2348 int nr_data = rbio->nr_data;
2353 struct page *p_page = NULL;
2354 struct page *q_page = NULL;
2355 struct bio_list bio_list;
2360 bio_list_init(&bio_list);
2362 if (rbio->real_stripes - rbio->nr_data == 1) {
2363 p_stripe = rbio->real_stripes - 1;
2364 } else if (rbio->real_stripes - rbio->nr_data == 2) {
2365 p_stripe = rbio->real_stripes - 2;
2366 q_stripe = rbio->real_stripes - 1;
2371 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2373 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2377 * Because the higher layers(scrubber) are unlikely to
2378 * use this area of the disk again soon, so don't cache
2381 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2386 p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2389 SetPageUptodate(p_page);
2391 if (q_stripe != -1) {
2392 q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2394 __free_page(p_page);
2397 SetPageUptodate(q_page);
2400 atomic_set(&rbio->error, 0);
2402 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2405 /* first collect one page from each data stripe */
2406 for (stripe = 0; stripe < nr_data; stripe++) {
2407 p = page_in_rbio(rbio, stripe, pagenr, 0);
2408 pointers[stripe] = kmap(p);
2411 /* then add the parity stripe */
2412 pointers[stripe++] = kmap(p_page);
2414 if (q_stripe != -1) {
2417 * raid6, add the qstripe and call the
2418 * library function to fill in our p/q
2420 pointers[stripe++] = kmap(q_page);
2422 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2426 copy_page(pointers[nr_data], pointers[0]);
2427 run_xor(pointers + 1, nr_data - 1, PAGE_SIZE);
2430 /* Check scrubbing parity and repair it */
2431 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2433 if (memcmp(parity, pointers[rbio->scrubp], PAGE_SIZE))
2434 copy_page(parity, pointers[rbio->scrubp]);
2436 /* Parity is right, needn't writeback */
2437 bitmap_clear(rbio->dbitmap, pagenr, 1);
2440 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
2441 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2444 __free_page(p_page);
2446 __free_page(q_page);
2450 * time to start writing. Make bios for everything from the
2451 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2454 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2457 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2458 ret = rbio_add_io_page(rbio, &bio_list,
2459 page, rbio->scrubp, pagenr, rbio->stripe_len);
2467 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2470 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2471 ret = rbio_add_io_page(rbio, &bio_list, page,
2472 bbio->tgtdev_map[rbio->scrubp],
2473 pagenr, rbio->stripe_len);
2479 nr_data = bio_list_size(&bio_list);
2481 /* Every parity is right */
2482 rbio_orig_end_io(rbio, BLK_STS_OK);
2486 atomic_set(&rbio->stripes_pending, nr_data);
2489 bio = bio_list_pop(&bio_list);
2493 bio->bi_private = rbio;
2494 bio->bi_end_io = raid_write_end_io;
2495 bio->bi_opf = REQ_OP_WRITE;
2502 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2504 while ((bio = bio_list_pop(&bio_list)))
2508 static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2510 if (stripe >= 0 && stripe < rbio->nr_data)
2516 * While we're doing the parity check and repair, we could have errors
2517 * in reading pages off the disk. This checks for errors and if we're
2518 * not able to read the page it'll trigger parity reconstruction. The
2519 * parity scrub will be finished after we've reconstructed the failed
2522 static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2524 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2527 if (rbio->faila >= 0 || rbio->failb >= 0) {
2528 int dfail = 0, failp = -1;
2530 if (is_data_stripe(rbio, rbio->faila))
2532 else if (is_parity_stripe(rbio->faila))
2533 failp = rbio->faila;
2535 if (is_data_stripe(rbio, rbio->failb))
2537 else if (is_parity_stripe(rbio->failb))
2538 failp = rbio->failb;
2541 * Because we can not use a scrubbing parity to repair
2542 * the data, so the capability of the repair is declined.
2543 * (In the case of RAID5, we can not repair anything)
2545 if (dfail > rbio->bbio->max_errors - 1)
2549 * If all data is good, only parity is correctly, just
2550 * repair the parity.
2553 finish_parity_scrub(rbio, 0);
2558 * Here means we got one corrupted data stripe and one
2559 * corrupted parity on RAID6, if the corrupted parity
2560 * is scrubbing parity, luckily, use the other one to repair
2561 * the data, or we can not repair the data stripe.
2563 if (failp != rbio->scrubp)
2566 __raid_recover_end_io(rbio);
2568 finish_parity_scrub(rbio, 1);
2573 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2577 * end io for the read phase of the rmw cycle. All the bios here are physical
2578 * stripe bios we've read from the disk so we can recalculate the parity of the
2581 * This will usually kick off finish_rmw once all the bios are read in, but it
2582 * may trigger parity reconstruction if we had any errors along the way
2584 static void raid56_parity_scrub_end_io(struct bio *bio)
2586 struct btrfs_raid_bio *rbio = bio->bi_private;
2589 fail_bio_stripe(rbio, bio);
2591 set_bio_pages_uptodate(bio);
2595 if (!atomic_dec_and_test(&rbio->stripes_pending))
2599 * this will normally call finish_rmw to start our write
2600 * but if there are any failed stripes we'll reconstruct
2603 validate_rbio_for_parity_scrub(rbio);
2606 static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2608 int bios_to_read = 0;
2609 struct bio_list bio_list;
2615 bio_list_init(&bio_list);
2617 ret = alloc_rbio_essential_pages(rbio);
2621 atomic_set(&rbio->error, 0);
2623 * build a list of bios to read all the missing parts of this
2626 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2627 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2630 * we want to find all the pages missing from
2631 * the rbio and read them from the disk. If
2632 * page_in_rbio finds a page in the bio list
2633 * we don't need to read it off the stripe.
2635 page = page_in_rbio(rbio, stripe, pagenr, 1);
2639 page = rbio_stripe_page(rbio, stripe, pagenr);
2641 * the bio cache may have handed us an uptodate
2642 * page. If so, be happy and use it
2644 if (PageUptodate(page))
2647 ret = rbio_add_io_page(rbio, &bio_list, page,
2648 stripe, pagenr, rbio->stripe_len);
2654 bios_to_read = bio_list_size(&bio_list);
2655 if (!bios_to_read) {
2657 * this can happen if others have merged with
2658 * us, it means there is nothing left to read.
2659 * But if there are missing devices it may not be
2660 * safe to do the full stripe write yet.
2666 * the bbio may be freed once we submit the last bio. Make sure
2667 * not to touch it after that
2669 atomic_set(&rbio->stripes_pending, bios_to_read);
2671 bio = bio_list_pop(&bio_list);
2675 bio->bi_private = rbio;
2676 bio->bi_end_io = raid56_parity_scrub_end_io;
2677 bio->bi_opf = REQ_OP_READ;
2679 btrfs_bio_wq_end_io(rbio->fs_info, bio, BTRFS_WQ_ENDIO_RAID56);
2683 /* the actual write will happen once the reads are done */
2687 rbio_orig_end_io(rbio, BLK_STS_IOERR);
2689 while ((bio = bio_list_pop(&bio_list)))
2695 validate_rbio_for_parity_scrub(rbio);
2698 static void scrub_parity_work(struct btrfs_work *work)
2700 struct btrfs_raid_bio *rbio;
2702 rbio = container_of(work, struct btrfs_raid_bio, work);
2703 raid56_parity_scrub_stripe(rbio);
2706 static void async_scrub_parity(struct btrfs_raid_bio *rbio)
2708 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2709 scrub_parity_work, NULL, NULL);
2711 btrfs_queue_work(rbio->fs_info->rmw_workers, &rbio->work);
2714 void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2716 if (!lock_stripe_add(rbio))
2717 async_scrub_parity(rbio);
2720 /* The following code is used for dev replace of a missing RAID 5/6 device. */
2722 struct btrfs_raid_bio *
2723 raid56_alloc_missing_rbio(struct btrfs_fs_info *fs_info, struct bio *bio,
2724 struct btrfs_bio *bbio, u64 length)
2726 struct btrfs_raid_bio *rbio;
2728 rbio = alloc_rbio(fs_info, bbio, length);
2732 rbio->operation = BTRFS_RBIO_REBUILD_MISSING;
2733 bio_list_add(&rbio->bio_list, bio);
2735 * This is a special bio which is used to hold the completion handler
2736 * and make the scrub rbio is similar to the other types
2738 ASSERT(!bio->bi_iter.bi_size);
2740 rbio->faila = find_logical_bio_stripe(rbio, bio);
2741 if (rbio->faila == -1) {
2748 * When we get bbio, we have already increased bio_counter, record it
2749 * so we can free it at rbio_orig_end_io()
2751 rbio->generic_bio_cnt = 1;
2756 void raid56_submit_missing_rbio(struct btrfs_raid_bio *rbio)
2758 if (!lock_stripe_add(rbio))
2759 start_async_work(rbio, read_rebuild_work);