Btrfs: Make raid_map array be inlined in btrfs_bio structure
[linux-2.6-microblaze.git] / fs / btrfs / scrub.c
1 /*
2  * Copyright (C) 2011, 2012 STRATO.  All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU General Public
6  * License v2 as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public
14  * License along with this program; if not, write to the
15  * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16  * Boston, MA 021110-1307, USA.
17  */
18
19 #include <linux/blkdev.h>
20 #include <linux/ratelimit.h>
21 #include "ctree.h"
22 #include "volumes.h"
23 #include "disk-io.h"
24 #include "ordered-data.h"
25 #include "transaction.h"
26 #include "backref.h"
27 #include "extent_io.h"
28 #include "dev-replace.h"
29 #include "check-integrity.h"
30 #include "rcu-string.h"
31 #include "raid56.h"
32
33 /*
34  * This is only the first step towards a full-features scrub. It reads all
35  * extent and super block and verifies the checksums. In case a bad checksum
36  * is found or the extent cannot be read, good data will be written back if
37  * any can be found.
38  *
39  * Future enhancements:
40  *  - In case an unrepairable extent is encountered, track which files are
41  *    affected and report them
42  *  - track and record media errors, throw out bad devices
43  *  - add a mode to also read unallocated space
44  */
45
46 struct scrub_block;
47 struct scrub_ctx;
48
49 /*
50  * the following three values only influence the performance.
51  * The last one configures the number of parallel and outstanding I/O
52  * operations. The first two values configure an upper limit for the number
53  * of (dynamically allocated) pages that are added to a bio.
54  */
55 #define SCRUB_PAGES_PER_RD_BIO  32      /* 128k per bio */
56 #define SCRUB_PAGES_PER_WR_BIO  32      /* 128k per bio */
57 #define SCRUB_BIOS_PER_SCTX     64      /* 8MB per device in flight */
58
59 /*
60  * the following value times PAGE_SIZE needs to be large enough to match the
61  * largest node/leaf/sector size that shall be supported.
62  * Values larger than BTRFS_STRIPE_LEN are not supported.
63  */
64 #define SCRUB_MAX_PAGES_PER_BLOCK       16      /* 64k per node/leaf/sector */
65
66 struct scrub_recover {
67         atomic_t                refs;
68         struct btrfs_bio        *bbio;
69         u64                     map_length;
70 };
71
72 struct scrub_page {
73         struct scrub_block      *sblock;
74         struct page             *page;
75         struct btrfs_device     *dev;
76         struct list_head        list;
77         u64                     flags;  /* extent flags */
78         u64                     generation;
79         u64                     logical;
80         u64                     physical;
81         u64                     physical_for_dev_replace;
82         atomic_t                ref_count;
83         struct {
84                 unsigned int    mirror_num:8;
85                 unsigned int    have_csum:1;
86                 unsigned int    io_error:1;
87         };
88         u8                      csum[BTRFS_CSUM_SIZE];
89
90         struct scrub_recover    *recover;
91 };
92
93 struct scrub_bio {
94         int                     index;
95         struct scrub_ctx        *sctx;
96         struct btrfs_device     *dev;
97         struct bio              *bio;
98         int                     err;
99         u64                     logical;
100         u64                     physical;
101 #if SCRUB_PAGES_PER_WR_BIO >= SCRUB_PAGES_PER_RD_BIO
102         struct scrub_page       *pagev[SCRUB_PAGES_PER_WR_BIO];
103 #else
104         struct scrub_page       *pagev[SCRUB_PAGES_PER_RD_BIO];
105 #endif
106         int                     page_count;
107         int                     next_free;
108         struct btrfs_work       work;
109 };
110
111 struct scrub_block {
112         struct scrub_page       *pagev[SCRUB_MAX_PAGES_PER_BLOCK];
113         int                     page_count;
114         atomic_t                outstanding_pages;
115         atomic_t                ref_count; /* free mem on transition to zero */
116         struct scrub_ctx        *sctx;
117         struct scrub_parity     *sparity;
118         struct {
119                 unsigned int    header_error:1;
120                 unsigned int    checksum_error:1;
121                 unsigned int    no_io_error_seen:1;
122                 unsigned int    generation_error:1; /* also sets header_error */
123
124                 /* The following is for the data used to check parity */
125                 /* It is for the data with checksum */
126                 unsigned int    data_corrected:1;
127         };
128 };
129
130 /* Used for the chunks with parity stripe such RAID5/6 */
131 struct scrub_parity {
132         struct scrub_ctx        *sctx;
133
134         struct btrfs_device     *scrub_dev;
135
136         u64                     logic_start;
137
138         u64                     logic_end;
139
140         int                     nsectors;
141
142         int                     stripe_len;
143
144         atomic_t                ref_count;
145
146         struct list_head        spages;
147
148         /* Work of parity check and repair */
149         struct btrfs_work       work;
150
151         /* Mark the parity blocks which have data */
152         unsigned long           *dbitmap;
153
154         /*
155          * Mark the parity blocks which have data, but errors happen when
156          * read data or check data
157          */
158         unsigned long           *ebitmap;
159
160         unsigned long           bitmap[0];
161 };
162
163 struct scrub_wr_ctx {
164         struct scrub_bio *wr_curr_bio;
165         struct btrfs_device *tgtdev;
166         int pages_per_wr_bio; /* <= SCRUB_PAGES_PER_WR_BIO */
167         atomic_t flush_all_writes;
168         struct mutex wr_lock;
169 };
170
171 struct scrub_ctx {
172         struct scrub_bio        *bios[SCRUB_BIOS_PER_SCTX];
173         struct btrfs_root       *dev_root;
174         int                     first_free;
175         int                     curr;
176         atomic_t                bios_in_flight;
177         atomic_t                workers_pending;
178         spinlock_t              list_lock;
179         wait_queue_head_t       list_wait;
180         u16                     csum_size;
181         struct list_head        csum_list;
182         atomic_t                cancel_req;
183         int                     readonly;
184         int                     pages_per_rd_bio;
185         u32                     sectorsize;
186         u32                     nodesize;
187
188         int                     is_dev_replace;
189         struct scrub_wr_ctx     wr_ctx;
190
191         /*
192          * statistics
193          */
194         struct btrfs_scrub_progress stat;
195         spinlock_t              stat_lock;
196 };
197
198 struct scrub_fixup_nodatasum {
199         struct scrub_ctx        *sctx;
200         struct btrfs_device     *dev;
201         u64                     logical;
202         struct btrfs_root       *root;
203         struct btrfs_work       work;
204         int                     mirror_num;
205 };
206
207 struct scrub_nocow_inode {
208         u64                     inum;
209         u64                     offset;
210         u64                     root;
211         struct list_head        list;
212 };
213
214 struct scrub_copy_nocow_ctx {
215         struct scrub_ctx        *sctx;
216         u64                     logical;
217         u64                     len;
218         int                     mirror_num;
219         u64                     physical_for_dev_replace;
220         struct list_head        inodes;
221         struct btrfs_work       work;
222 };
223
224 struct scrub_warning {
225         struct btrfs_path       *path;
226         u64                     extent_item_size;
227         const char              *errstr;
228         sector_t                sector;
229         u64                     logical;
230         struct btrfs_device     *dev;
231 };
232
233 static void scrub_pending_bio_inc(struct scrub_ctx *sctx);
234 static void scrub_pending_bio_dec(struct scrub_ctx *sctx);
235 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx);
236 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx);
237 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check);
238 static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
239                                      struct btrfs_fs_info *fs_info,
240                                      struct scrub_block *original_sblock,
241                                      u64 length, u64 logical,
242                                      struct scrub_block *sblocks_for_recheck);
243 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
244                                 struct scrub_block *sblock, int is_metadata,
245                                 int have_csum, u8 *csum, u64 generation,
246                                 u16 csum_size, int retry_failed_mirror);
247 static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
248                                          struct scrub_block *sblock,
249                                          int is_metadata, int have_csum,
250                                          const u8 *csum, u64 generation,
251                                          u16 csum_size);
252 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
253                                              struct scrub_block *sblock_good,
254                                              int force_write);
255 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
256                                             struct scrub_block *sblock_good,
257                                             int page_num, int force_write);
258 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock);
259 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
260                                            int page_num);
261 static int scrub_checksum_data(struct scrub_block *sblock);
262 static int scrub_checksum_tree_block(struct scrub_block *sblock);
263 static int scrub_checksum_super(struct scrub_block *sblock);
264 static void scrub_block_get(struct scrub_block *sblock);
265 static void scrub_block_put(struct scrub_block *sblock);
266 static void scrub_page_get(struct scrub_page *spage);
267 static void scrub_page_put(struct scrub_page *spage);
268 static void scrub_parity_get(struct scrub_parity *sparity);
269 static void scrub_parity_put(struct scrub_parity *sparity);
270 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
271                                     struct scrub_page *spage);
272 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
273                        u64 physical, struct btrfs_device *dev, u64 flags,
274                        u64 gen, int mirror_num, u8 *csum, int force,
275                        u64 physical_for_dev_replace);
276 static void scrub_bio_end_io(struct bio *bio, int err);
277 static void scrub_bio_end_io_worker(struct btrfs_work *work);
278 static void scrub_block_complete(struct scrub_block *sblock);
279 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
280                                u64 extent_logical, u64 extent_len,
281                                u64 *extent_physical,
282                                struct btrfs_device **extent_dev,
283                                int *extent_mirror_num);
284 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
285                               struct scrub_wr_ctx *wr_ctx,
286                               struct btrfs_fs_info *fs_info,
287                               struct btrfs_device *dev,
288                               int is_dev_replace);
289 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx);
290 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
291                                     struct scrub_page *spage);
292 static void scrub_wr_submit(struct scrub_ctx *sctx);
293 static void scrub_wr_bio_end_io(struct bio *bio, int err);
294 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
295 static int write_page_nocow(struct scrub_ctx *sctx,
296                             u64 physical_for_dev_replace, struct page *page);
297 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
298                                       struct scrub_copy_nocow_ctx *ctx);
299 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
300                             int mirror_num, u64 physical_for_dev_replace);
301 static void copy_nocow_pages_worker(struct btrfs_work *work);
302 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
303 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
304
305
306 static void scrub_pending_bio_inc(struct scrub_ctx *sctx)
307 {
308         atomic_inc(&sctx->bios_in_flight);
309 }
310
311 static void scrub_pending_bio_dec(struct scrub_ctx *sctx)
312 {
313         atomic_dec(&sctx->bios_in_flight);
314         wake_up(&sctx->list_wait);
315 }
316
317 static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
318 {
319         while (atomic_read(&fs_info->scrub_pause_req)) {
320                 mutex_unlock(&fs_info->scrub_lock);
321                 wait_event(fs_info->scrub_pause_wait,
322                    atomic_read(&fs_info->scrub_pause_req) == 0);
323                 mutex_lock(&fs_info->scrub_lock);
324         }
325 }
326
327 static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info)
328 {
329         atomic_inc(&fs_info->scrubs_paused);
330         wake_up(&fs_info->scrub_pause_wait);
331
332         mutex_lock(&fs_info->scrub_lock);
333         __scrub_blocked_if_needed(fs_info);
334         atomic_dec(&fs_info->scrubs_paused);
335         mutex_unlock(&fs_info->scrub_lock);
336
337         wake_up(&fs_info->scrub_pause_wait);
338 }
339
340 /*
341  * used for workers that require transaction commits (i.e., for the
342  * NOCOW case)
343  */
344 static void scrub_pending_trans_workers_inc(struct scrub_ctx *sctx)
345 {
346         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
347
348         /*
349          * increment scrubs_running to prevent cancel requests from
350          * completing as long as a worker is running. we must also
351          * increment scrubs_paused to prevent deadlocking on pause
352          * requests used for transactions commits (as the worker uses a
353          * transaction context). it is safe to regard the worker
354          * as paused for all matters practical. effectively, we only
355          * avoid cancellation requests from completing.
356          */
357         mutex_lock(&fs_info->scrub_lock);
358         atomic_inc(&fs_info->scrubs_running);
359         atomic_inc(&fs_info->scrubs_paused);
360         mutex_unlock(&fs_info->scrub_lock);
361
362         /*
363          * check if @scrubs_running=@scrubs_paused condition
364          * inside wait_event() is not an atomic operation.
365          * which means we may inc/dec @scrub_running/paused
366          * at any time. Let's wake up @scrub_pause_wait as
367          * much as we can to let commit transaction blocked less.
368          */
369         wake_up(&fs_info->scrub_pause_wait);
370
371         atomic_inc(&sctx->workers_pending);
372 }
373
374 /* used for workers that require transaction commits */
375 static void scrub_pending_trans_workers_dec(struct scrub_ctx *sctx)
376 {
377         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
378
379         /*
380          * see scrub_pending_trans_workers_inc() why we're pretending
381          * to be paused in the scrub counters
382          */
383         mutex_lock(&fs_info->scrub_lock);
384         atomic_dec(&fs_info->scrubs_running);
385         atomic_dec(&fs_info->scrubs_paused);
386         mutex_unlock(&fs_info->scrub_lock);
387         atomic_dec(&sctx->workers_pending);
388         wake_up(&fs_info->scrub_pause_wait);
389         wake_up(&sctx->list_wait);
390 }
391
392 static void scrub_free_csums(struct scrub_ctx *sctx)
393 {
394         while (!list_empty(&sctx->csum_list)) {
395                 struct btrfs_ordered_sum *sum;
396                 sum = list_first_entry(&sctx->csum_list,
397                                        struct btrfs_ordered_sum, list);
398                 list_del(&sum->list);
399                 kfree(sum);
400         }
401 }
402
403 static noinline_for_stack void scrub_free_ctx(struct scrub_ctx *sctx)
404 {
405         int i;
406
407         if (!sctx)
408                 return;
409
410         scrub_free_wr_ctx(&sctx->wr_ctx);
411
412         /* this can happen when scrub is cancelled */
413         if (sctx->curr != -1) {
414                 struct scrub_bio *sbio = sctx->bios[sctx->curr];
415
416                 for (i = 0; i < sbio->page_count; i++) {
417                         WARN_ON(!sbio->pagev[i]->page);
418                         scrub_block_put(sbio->pagev[i]->sblock);
419                 }
420                 bio_put(sbio->bio);
421         }
422
423         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
424                 struct scrub_bio *sbio = sctx->bios[i];
425
426                 if (!sbio)
427                         break;
428                 kfree(sbio);
429         }
430
431         scrub_free_csums(sctx);
432         kfree(sctx);
433 }
434
435 static noinline_for_stack
436 struct scrub_ctx *scrub_setup_ctx(struct btrfs_device *dev, int is_dev_replace)
437 {
438         struct scrub_ctx *sctx;
439         int             i;
440         struct btrfs_fs_info *fs_info = dev->dev_root->fs_info;
441         int pages_per_rd_bio;
442         int ret;
443
444         /*
445          * the setting of pages_per_rd_bio is correct for scrub but might
446          * be wrong for the dev_replace code where we might read from
447          * different devices in the initial huge bios. However, that
448          * code is able to correctly handle the case when adding a page
449          * to a bio fails.
450          */
451         if (dev->bdev)
452                 pages_per_rd_bio = min_t(int, SCRUB_PAGES_PER_RD_BIO,
453                                          bio_get_nr_vecs(dev->bdev));
454         else
455                 pages_per_rd_bio = SCRUB_PAGES_PER_RD_BIO;
456         sctx = kzalloc(sizeof(*sctx), GFP_NOFS);
457         if (!sctx)
458                 goto nomem;
459         sctx->is_dev_replace = is_dev_replace;
460         sctx->pages_per_rd_bio = pages_per_rd_bio;
461         sctx->curr = -1;
462         sctx->dev_root = dev->dev_root;
463         for (i = 0; i < SCRUB_BIOS_PER_SCTX; ++i) {
464                 struct scrub_bio *sbio;
465
466                 sbio = kzalloc(sizeof(*sbio), GFP_NOFS);
467                 if (!sbio)
468                         goto nomem;
469                 sctx->bios[i] = sbio;
470
471                 sbio->index = i;
472                 sbio->sctx = sctx;
473                 sbio->page_count = 0;
474                 btrfs_init_work(&sbio->work, btrfs_scrub_helper,
475                                 scrub_bio_end_io_worker, NULL, NULL);
476
477                 if (i != SCRUB_BIOS_PER_SCTX - 1)
478                         sctx->bios[i]->next_free = i + 1;
479                 else
480                         sctx->bios[i]->next_free = -1;
481         }
482         sctx->first_free = 0;
483         sctx->nodesize = dev->dev_root->nodesize;
484         sctx->sectorsize = dev->dev_root->sectorsize;
485         atomic_set(&sctx->bios_in_flight, 0);
486         atomic_set(&sctx->workers_pending, 0);
487         atomic_set(&sctx->cancel_req, 0);
488         sctx->csum_size = btrfs_super_csum_size(fs_info->super_copy);
489         INIT_LIST_HEAD(&sctx->csum_list);
490
491         spin_lock_init(&sctx->list_lock);
492         spin_lock_init(&sctx->stat_lock);
493         init_waitqueue_head(&sctx->list_wait);
494
495         ret = scrub_setup_wr_ctx(sctx, &sctx->wr_ctx, fs_info,
496                                  fs_info->dev_replace.tgtdev, is_dev_replace);
497         if (ret) {
498                 scrub_free_ctx(sctx);
499                 return ERR_PTR(ret);
500         }
501         return sctx;
502
503 nomem:
504         scrub_free_ctx(sctx);
505         return ERR_PTR(-ENOMEM);
506 }
507
508 static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root,
509                                      void *warn_ctx)
510 {
511         u64 isize;
512         u32 nlink;
513         int ret;
514         int i;
515         struct extent_buffer *eb;
516         struct btrfs_inode_item *inode_item;
517         struct scrub_warning *swarn = warn_ctx;
518         struct btrfs_fs_info *fs_info = swarn->dev->dev_root->fs_info;
519         struct inode_fs_paths *ipath = NULL;
520         struct btrfs_root *local_root;
521         struct btrfs_key root_key;
522         struct btrfs_key key;
523
524         root_key.objectid = root;
525         root_key.type = BTRFS_ROOT_ITEM_KEY;
526         root_key.offset = (u64)-1;
527         local_root = btrfs_read_fs_root_no_name(fs_info, &root_key);
528         if (IS_ERR(local_root)) {
529                 ret = PTR_ERR(local_root);
530                 goto err;
531         }
532
533         /*
534          * this makes the path point to (inum INODE_ITEM ioff)
535          */
536         key.objectid = inum;
537         key.type = BTRFS_INODE_ITEM_KEY;
538         key.offset = 0;
539
540         ret = btrfs_search_slot(NULL, local_root, &key, swarn->path, 0, 0);
541         if (ret) {
542                 btrfs_release_path(swarn->path);
543                 goto err;
544         }
545
546         eb = swarn->path->nodes[0];
547         inode_item = btrfs_item_ptr(eb, swarn->path->slots[0],
548                                         struct btrfs_inode_item);
549         isize = btrfs_inode_size(eb, inode_item);
550         nlink = btrfs_inode_nlink(eb, inode_item);
551         btrfs_release_path(swarn->path);
552
553         ipath = init_ipath(4096, local_root, swarn->path);
554         if (IS_ERR(ipath)) {
555                 ret = PTR_ERR(ipath);
556                 ipath = NULL;
557                 goto err;
558         }
559         ret = paths_from_inode(inum, ipath);
560
561         if (ret < 0)
562                 goto err;
563
564         /*
565          * we deliberately ignore the bit ipath might have been too small to
566          * hold all of the paths here
567          */
568         for (i = 0; i < ipath->fspath->elem_cnt; ++i)
569                 printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
570                         "%s, sector %llu, root %llu, inode %llu, offset %llu, "
571                         "length %llu, links %u (path: %s)\n", swarn->errstr,
572                         swarn->logical, rcu_str_deref(swarn->dev->name),
573                         (unsigned long long)swarn->sector, root, inum, offset,
574                         min(isize - offset, (u64)PAGE_SIZE), nlink,
575                         (char *)(unsigned long)ipath->fspath->val[i]);
576
577         free_ipath(ipath);
578         return 0;
579
580 err:
581         printk_in_rcu(KERN_WARNING "BTRFS: %s at logical %llu on dev "
582                 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
583                 "resolving failed with ret=%d\n", swarn->errstr,
584                 swarn->logical, rcu_str_deref(swarn->dev->name),
585                 (unsigned long long)swarn->sector, root, inum, offset, ret);
586
587         free_ipath(ipath);
588         return 0;
589 }
590
591 static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
592 {
593         struct btrfs_device *dev;
594         struct btrfs_fs_info *fs_info;
595         struct btrfs_path *path;
596         struct btrfs_key found_key;
597         struct extent_buffer *eb;
598         struct btrfs_extent_item *ei;
599         struct scrub_warning swarn;
600         unsigned long ptr = 0;
601         u64 extent_item_pos;
602         u64 flags = 0;
603         u64 ref_root;
604         u32 item_size;
605         u8 ref_level;
606         int ret;
607
608         WARN_ON(sblock->page_count < 1);
609         dev = sblock->pagev[0]->dev;
610         fs_info = sblock->sctx->dev_root->fs_info;
611
612         path = btrfs_alloc_path();
613         if (!path)
614                 return;
615
616         swarn.sector = (sblock->pagev[0]->physical) >> 9;
617         swarn.logical = sblock->pagev[0]->logical;
618         swarn.errstr = errstr;
619         swarn.dev = NULL;
620
621         ret = extent_from_logical(fs_info, swarn.logical, path, &found_key,
622                                   &flags);
623         if (ret < 0)
624                 goto out;
625
626         extent_item_pos = swarn.logical - found_key.objectid;
627         swarn.extent_item_size = found_key.offset;
628
629         eb = path->nodes[0];
630         ei = btrfs_item_ptr(eb, path->slots[0], struct btrfs_extent_item);
631         item_size = btrfs_item_size_nr(eb, path->slots[0]);
632
633         if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
634                 do {
635                         ret = tree_backref_for_extent(&ptr, eb, &found_key, ei,
636                                                       item_size, &ref_root,
637                                                       &ref_level);
638                         printk_in_rcu(KERN_WARNING
639                                 "BTRFS: %s at logical %llu on dev %s, "
640                                 "sector %llu: metadata %s (level %d) in tree "
641                                 "%llu\n", errstr, swarn.logical,
642                                 rcu_str_deref(dev->name),
643                                 (unsigned long long)swarn.sector,
644                                 ref_level ? "node" : "leaf",
645                                 ret < 0 ? -1 : ref_level,
646                                 ret < 0 ? -1 : ref_root);
647                 } while (ret != 1);
648                 btrfs_release_path(path);
649         } else {
650                 btrfs_release_path(path);
651                 swarn.path = path;
652                 swarn.dev = dev;
653                 iterate_extent_inodes(fs_info, found_key.objectid,
654                                         extent_item_pos, 1,
655                                         scrub_print_warning_inode, &swarn);
656         }
657
658 out:
659         btrfs_free_path(path);
660 }
661
662 static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
663 {
664         struct page *page = NULL;
665         unsigned long index;
666         struct scrub_fixup_nodatasum *fixup = fixup_ctx;
667         int ret;
668         int corrected = 0;
669         struct btrfs_key key;
670         struct inode *inode = NULL;
671         struct btrfs_fs_info *fs_info;
672         u64 end = offset + PAGE_SIZE - 1;
673         struct btrfs_root *local_root;
674         int srcu_index;
675
676         key.objectid = root;
677         key.type = BTRFS_ROOT_ITEM_KEY;
678         key.offset = (u64)-1;
679
680         fs_info = fixup->root->fs_info;
681         srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
682
683         local_root = btrfs_read_fs_root_no_name(fs_info, &key);
684         if (IS_ERR(local_root)) {
685                 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
686                 return PTR_ERR(local_root);
687         }
688
689         key.type = BTRFS_INODE_ITEM_KEY;
690         key.objectid = inum;
691         key.offset = 0;
692         inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
693         srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
694         if (IS_ERR(inode))
695                 return PTR_ERR(inode);
696
697         index = offset >> PAGE_CACHE_SHIFT;
698
699         page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
700         if (!page) {
701                 ret = -ENOMEM;
702                 goto out;
703         }
704
705         if (PageUptodate(page)) {
706                 if (PageDirty(page)) {
707                         /*
708                          * we need to write the data to the defect sector. the
709                          * data that was in that sector is not in memory,
710                          * because the page was modified. we must not write the
711                          * modified page to that sector.
712                          *
713                          * TODO: what could be done here: wait for the delalloc
714                          *       runner to write out that page (might involve
715                          *       COW) and see whether the sector is still
716                          *       referenced afterwards.
717                          *
718                          * For the meantime, we'll treat this error
719                          * incorrectable, although there is a chance that a
720                          * later scrub will find the bad sector again and that
721                          * there's no dirty page in memory, then.
722                          */
723                         ret = -EIO;
724                         goto out;
725                 }
726                 ret = repair_io_failure(inode, offset, PAGE_SIZE,
727                                         fixup->logical, page,
728                                         offset - page_offset(page),
729                                         fixup->mirror_num);
730                 unlock_page(page);
731                 corrected = !ret;
732         } else {
733                 /*
734                  * we need to get good data first. the general readpage path
735                  * will call repair_io_failure for us, we just have to make
736                  * sure we read the bad mirror.
737                  */
738                 ret = set_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
739                                         EXTENT_DAMAGED, GFP_NOFS);
740                 if (ret) {
741                         /* set_extent_bits should give proper error */
742                         WARN_ON(ret > 0);
743                         if (ret > 0)
744                                 ret = -EFAULT;
745                         goto out;
746                 }
747
748                 ret = extent_read_full_page(&BTRFS_I(inode)->io_tree, page,
749                                                 btrfs_get_extent,
750                                                 fixup->mirror_num);
751                 wait_on_page_locked(page);
752
753                 corrected = !test_range_bit(&BTRFS_I(inode)->io_tree, offset,
754                                                 end, EXTENT_DAMAGED, 0, NULL);
755                 if (!corrected)
756                         clear_extent_bits(&BTRFS_I(inode)->io_tree, offset, end,
757                                                 EXTENT_DAMAGED, GFP_NOFS);
758         }
759
760 out:
761         if (page)
762                 put_page(page);
763
764         iput(inode);
765
766         if (ret < 0)
767                 return ret;
768
769         if (ret == 0 && corrected) {
770                 /*
771                  * we only need to call readpage for one of the inodes belonging
772                  * to this extent. so make iterate_extent_inodes stop
773                  */
774                 return 1;
775         }
776
777         return -EIO;
778 }
779
780 static void scrub_fixup_nodatasum(struct btrfs_work *work)
781 {
782         int ret;
783         struct scrub_fixup_nodatasum *fixup;
784         struct scrub_ctx *sctx;
785         struct btrfs_trans_handle *trans = NULL;
786         struct btrfs_path *path;
787         int uncorrectable = 0;
788
789         fixup = container_of(work, struct scrub_fixup_nodatasum, work);
790         sctx = fixup->sctx;
791
792         path = btrfs_alloc_path();
793         if (!path) {
794                 spin_lock(&sctx->stat_lock);
795                 ++sctx->stat.malloc_errors;
796                 spin_unlock(&sctx->stat_lock);
797                 uncorrectable = 1;
798                 goto out;
799         }
800
801         trans = btrfs_join_transaction(fixup->root);
802         if (IS_ERR(trans)) {
803                 uncorrectable = 1;
804                 goto out;
805         }
806
807         /*
808          * the idea is to trigger a regular read through the standard path. we
809          * read a page from the (failed) logical address by specifying the
810          * corresponding copynum of the failed sector. thus, that readpage is
811          * expected to fail.
812          * that is the point where on-the-fly error correction will kick in
813          * (once it's finished) and rewrite the failed sector if a good copy
814          * can be found.
815          */
816         ret = iterate_inodes_from_logical(fixup->logical, fixup->root->fs_info,
817                                                 path, scrub_fixup_readpage,
818                                                 fixup);
819         if (ret < 0) {
820                 uncorrectable = 1;
821                 goto out;
822         }
823         WARN_ON(ret != 1);
824
825         spin_lock(&sctx->stat_lock);
826         ++sctx->stat.corrected_errors;
827         spin_unlock(&sctx->stat_lock);
828
829 out:
830         if (trans && !IS_ERR(trans))
831                 btrfs_end_transaction(trans, fixup->root);
832         if (uncorrectable) {
833                 spin_lock(&sctx->stat_lock);
834                 ++sctx->stat.uncorrectable_errors;
835                 spin_unlock(&sctx->stat_lock);
836                 btrfs_dev_replace_stats_inc(
837                         &sctx->dev_root->fs_info->dev_replace.
838                         num_uncorrectable_read_errors);
839                 printk_ratelimited_in_rcu(KERN_ERR "BTRFS: "
840                     "unable to fixup (nodatasum) error at logical %llu on dev %s\n",
841                         fixup->logical, rcu_str_deref(fixup->dev->name));
842         }
843
844         btrfs_free_path(path);
845         kfree(fixup);
846
847         scrub_pending_trans_workers_dec(sctx);
848 }
849
850 static inline void scrub_get_recover(struct scrub_recover *recover)
851 {
852         atomic_inc(&recover->refs);
853 }
854
855 static inline void scrub_put_recover(struct scrub_recover *recover)
856 {
857         if (atomic_dec_and_test(&recover->refs)) {
858                 kfree(recover->bbio);
859                 kfree(recover);
860         }
861 }
862
863 /*
864  * scrub_handle_errored_block gets called when either verification of the
865  * pages failed or the bio failed to read, e.g. with EIO. In the latter
866  * case, this function handles all pages in the bio, even though only one
867  * may be bad.
868  * The goal of this function is to repair the errored block by using the
869  * contents of one of the mirrors.
870  */
871 static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
872 {
873         struct scrub_ctx *sctx = sblock_to_check->sctx;
874         struct btrfs_device *dev;
875         struct btrfs_fs_info *fs_info;
876         u64 length;
877         u64 logical;
878         u64 generation;
879         unsigned int failed_mirror_index;
880         unsigned int is_metadata;
881         unsigned int have_csum;
882         u8 *csum;
883         struct scrub_block *sblocks_for_recheck; /* holds one for each mirror */
884         struct scrub_block *sblock_bad;
885         int ret;
886         int mirror_index;
887         int page_num;
888         int success;
889         static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
890                                       DEFAULT_RATELIMIT_BURST);
891
892         BUG_ON(sblock_to_check->page_count < 1);
893         fs_info = sctx->dev_root->fs_info;
894         if (sblock_to_check->pagev[0]->flags & BTRFS_EXTENT_FLAG_SUPER) {
895                 /*
896                  * if we find an error in a super block, we just report it.
897                  * They will get written with the next transaction commit
898                  * anyway
899                  */
900                 spin_lock(&sctx->stat_lock);
901                 ++sctx->stat.super_errors;
902                 spin_unlock(&sctx->stat_lock);
903                 return 0;
904         }
905         length = sblock_to_check->page_count * PAGE_SIZE;
906         logical = sblock_to_check->pagev[0]->logical;
907         generation = sblock_to_check->pagev[0]->generation;
908         BUG_ON(sblock_to_check->pagev[0]->mirror_num < 1);
909         failed_mirror_index = sblock_to_check->pagev[0]->mirror_num - 1;
910         is_metadata = !(sblock_to_check->pagev[0]->flags &
911                         BTRFS_EXTENT_FLAG_DATA);
912         have_csum = sblock_to_check->pagev[0]->have_csum;
913         csum = sblock_to_check->pagev[0]->csum;
914         dev = sblock_to_check->pagev[0]->dev;
915
916         if (sctx->is_dev_replace && !is_metadata && !have_csum) {
917                 sblocks_for_recheck = NULL;
918                 goto nodatasum_case;
919         }
920
921         /*
922          * read all mirrors one after the other. This includes to
923          * re-read the extent or metadata block that failed (that was
924          * the cause that this fixup code is called) another time,
925          * page by page this time in order to know which pages
926          * caused I/O errors and which ones are good (for all mirrors).
927          * It is the goal to handle the situation when more than one
928          * mirror contains I/O errors, but the errors do not
929          * overlap, i.e. the data can be repaired by selecting the
930          * pages from those mirrors without I/O error on the
931          * particular pages. One example (with blocks >= 2 * PAGE_SIZE)
932          * would be that mirror #1 has an I/O error on the first page,
933          * the second page is good, and mirror #2 has an I/O error on
934          * the second page, but the first page is good.
935          * Then the first page of the first mirror can be repaired by
936          * taking the first page of the second mirror, and the
937          * second page of the second mirror can be repaired by
938          * copying the contents of the 2nd page of the 1st mirror.
939          * One more note: if the pages of one mirror contain I/O
940          * errors, the checksum cannot be verified. In order to get
941          * the best data for repairing, the first attempt is to find
942          * a mirror without I/O errors and with a validated checksum.
943          * Only if this is not possible, the pages are picked from
944          * mirrors with I/O errors without considering the checksum.
945          * If the latter is the case, at the end, the checksum of the
946          * repaired area is verified in order to correctly maintain
947          * the statistics.
948          */
949
950         sblocks_for_recheck = kzalloc(BTRFS_MAX_MIRRORS *
951                                      sizeof(*sblocks_for_recheck),
952                                      GFP_NOFS);
953         if (!sblocks_for_recheck) {
954                 spin_lock(&sctx->stat_lock);
955                 sctx->stat.malloc_errors++;
956                 sctx->stat.read_errors++;
957                 sctx->stat.uncorrectable_errors++;
958                 spin_unlock(&sctx->stat_lock);
959                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
960                 goto out;
961         }
962
963         /* setup the context, map the logical blocks and alloc the pages */
964         ret = scrub_setup_recheck_block(sctx, fs_info, sblock_to_check, length,
965                                         logical, sblocks_for_recheck);
966         if (ret) {
967                 spin_lock(&sctx->stat_lock);
968                 sctx->stat.read_errors++;
969                 sctx->stat.uncorrectable_errors++;
970                 spin_unlock(&sctx->stat_lock);
971                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
972                 goto out;
973         }
974         BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
975         sblock_bad = sblocks_for_recheck + failed_mirror_index;
976
977         /* build and submit the bios for the failed mirror, check checksums */
978         scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
979                             csum, generation, sctx->csum_size, 1);
980
981         if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
982             sblock_bad->no_io_error_seen) {
983                 /*
984                  * the error disappeared after reading page by page, or
985                  * the area was part of a huge bio and other parts of the
986                  * bio caused I/O errors, or the block layer merged several
987                  * read requests into one and the error is caused by a
988                  * different bio (usually one of the two latter cases is
989                  * the cause)
990                  */
991                 spin_lock(&sctx->stat_lock);
992                 sctx->stat.unverified_errors++;
993                 sblock_to_check->data_corrected = 1;
994                 spin_unlock(&sctx->stat_lock);
995
996                 if (sctx->is_dev_replace)
997                         scrub_write_block_to_dev_replace(sblock_bad);
998                 goto out;
999         }
1000
1001         if (!sblock_bad->no_io_error_seen) {
1002                 spin_lock(&sctx->stat_lock);
1003                 sctx->stat.read_errors++;
1004                 spin_unlock(&sctx->stat_lock);
1005                 if (__ratelimit(&_rs))
1006                         scrub_print_warning("i/o error", sblock_to_check);
1007                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_READ_ERRS);
1008         } else if (sblock_bad->checksum_error) {
1009                 spin_lock(&sctx->stat_lock);
1010                 sctx->stat.csum_errors++;
1011                 spin_unlock(&sctx->stat_lock);
1012                 if (__ratelimit(&_rs))
1013                         scrub_print_warning("checksum error", sblock_to_check);
1014                 btrfs_dev_stat_inc_and_print(dev,
1015                                              BTRFS_DEV_STAT_CORRUPTION_ERRS);
1016         } else if (sblock_bad->header_error) {
1017                 spin_lock(&sctx->stat_lock);
1018                 sctx->stat.verify_errors++;
1019                 spin_unlock(&sctx->stat_lock);
1020                 if (__ratelimit(&_rs))
1021                         scrub_print_warning("checksum/header error",
1022                                             sblock_to_check);
1023                 if (sblock_bad->generation_error)
1024                         btrfs_dev_stat_inc_and_print(dev,
1025                                 BTRFS_DEV_STAT_GENERATION_ERRS);
1026                 else
1027                         btrfs_dev_stat_inc_and_print(dev,
1028                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1029         }
1030
1031         if (sctx->readonly) {
1032                 ASSERT(!sctx->is_dev_replace);
1033                 goto out;
1034         }
1035
1036         if (!is_metadata && !have_csum) {
1037                 struct scrub_fixup_nodatasum *fixup_nodatasum;
1038
1039 nodatasum_case:
1040                 WARN_ON(sctx->is_dev_replace);
1041
1042                 /*
1043                  * !is_metadata and !have_csum, this means that the data
1044                  * might not be COW'ed, that it might be modified
1045                  * concurrently. The general strategy to work on the
1046                  * commit root does not help in the case when COW is not
1047                  * used.
1048                  */
1049                 fixup_nodatasum = kzalloc(sizeof(*fixup_nodatasum), GFP_NOFS);
1050                 if (!fixup_nodatasum)
1051                         goto did_not_correct_error;
1052                 fixup_nodatasum->sctx = sctx;
1053                 fixup_nodatasum->dev = dev;
1054                 fixup_nodatasum->logical = logical;
1055                 fixup_nodatasum->root = fs_info->extent_root;
1056                 fixup_nodatasum->mirror_num = failed_mirror_index + 1;
1057                 scrub_pending_trans_workers_inc(sctx);
1058                 btrfs_init_work(&fixup_nodatasum->work, btrfs_scrub_helper,
1059                                 scrub_fixup_nodatasum, NULL, NULL);
1060                 btrfs_queue_work(fs_info->scrub_workers,
1061                                  &fixup_nodatasum->work);
1062                 goto out;
1063         }
1064
1065         /*
1066          * now build and submit the bios for the other mirrors, check
1067          * checksums.
1068          * First try to pick the mirror which is completely without I/O
1069          * errors and also does not have a checksum error.
1070          * If one is found, and if a checksum is present, the full block
1071          * that is known to contain an error is rewritten. Afterwards
1072          * the block is known to be corrected.
1073          * If a mirror is found which is completely correct, and no
1074          * checksum is present, only those pages are rewritten that had
1075          * an I/O error in the block to be repaired, since it cannot be
1076          * determined, which copy of the other pages is better (and it
1077          * could happen otherwise that a correct page would be
1078          * overwritten by a bad one).
1079          */
1080         for (mirror_index = 0;
1081              mirror_index < BTRFS_MAX_MIRRORS &&
1082              sblocks_for_recheck[mirror_index].page_count > 0;
1083              mirror_index++) {
1084                 struct scrub_block *sblock_other;
1085
1086                 if (mirror_index == failed_mirror_index)
1087                         continue;
1088                 sblock_other = sblocks_for_recheck + mirror_index;
1089
1090                 /* build and submit the bios, check checksums */
1091                 scrub_recheck_block(fs_info, sblock_other, is_metadata,
1092                                     have_csum, csum, generation,
1093                                     sctx->csum_size, 0);
1094
1095                 if (!sblock_other->header_error &&
1096                     !sblock_other->checksum_error &&
1097                     sblock_other->no_io_error_seen) {
1098                         if (sctx->is_dev_replace) {
1099                                 scrub_write_block_to_dev_replace(sblock_other);
1100                         } else {
1101                                 int force_write = is_metadata || have_csum;
1102
1103                                 ret = scrub_repair_block_from_good_copy(
1104                                                 sblock_bad, sblock_other,
1105                                                 force_write);
1106                         }
1107                         if (0 == ret)
1108                                 goto corrected_error;
1109                 }
1110         }
1111
1112         /*
1113          * for dev_replace, pick good pages and write to the target device.
1114          */
1115         if (sctx->is_dev_replace) {
1116                 success = 1;
1117                 for (page_num = 0; page_num < sblock_bad->page_count;
1118                      page_num++) {
1119                         int sub_success;
1120
1121                         sub_success = 0;
1122                         for (mirror_index = 0;
1123                              mirror_index < BTRFS_MAX_MIRRORS &&
1124                              sblocks_for_recheck[mirror_index].page_count > 0;
1125                              mirror_index++) {
1126                                 struct scrub_block *sblock_other =
1127                                         sblocks_for_recheck + mirror_index;
1128                                 struct scrub_page *page_other =
1129                                         sblock_other->pagev[page_num];
1130
1131                                 if (!page_other->io_error) {
1132                                         ret = scrub_write_page_to_dev_replace(
1133                                                         sblock_other, page_num);
1134                                         if (ret == 0) {
1135                                                 /* succeeded for this page */
1136                                                 sub_success = 1;
1137                                                 break;
1138                                         } else {
1139                                                 btrfs_dev_replace_stats_inc(
1140                                                         &sctx->dev_root->
1141                                                         fs_info->dev_replace.
1142                                                         num_write_errors);
1143                                         }
1144                                 }
1145                         }
1146
1147                         if (!sub_success) {
1148                                 /*
1149                                  * did not find a mirror to fetch the page
1150                                  * from. scrub_write_page_to_dev_replace()
1151                                  * handles this case (page->io_error), by
1152                                  * filling the block with zeros before
1153                                  * submitting the write request
1154                                  */
1155                                 success = 0;
1156                                 ret = scrub_write_page_to_dev_replace(
1157                                                 sblock_bad, page_num);
1158                                 if (ret)
1159                                         btrfs_dev_replace_stats_inc(
1160                                                 &sctx->dev_root->fs_info->
1161                                                 dev_replace.num_write_errors);
1162                         }
1163                 }
1164
1165                 goto out;
1166         }
1167
1168         /*
1169          * for regular scrub, repair those pages that are errored.
1170          * In case of I/O errors in the area that is supposed to be
1171          * repaired, continue by picking good copies of those pages.
1172          * Select the good pages from mirrors to rewrite bad pages from
1173          * the area to fix. Afterwards verify the checksum of the block
1174          * that is supposed to be repaired. This verification step is
1175          * only done for the purpose of statistic counting and for the
1176          * final scrub report, whether errors remain.
1177          * A perfect algorithm could make use of the checksum and try
1178          * all possible combinations of pages from the different mirrors
1179          * until the checksum verification succeeds. For example, when
1180          * the 2nd page of mirror #1 faces I/O errors, and the 2nd page
1181          * of mirror #2 is readable but the final checksum test fails,
1182          * then the 2nd page of mirror #3 could be tried, whether now
1183          * the final checksum succeedes. But this would be a rare
1184          * exception and is therefore not implemented. At least it is
1185          * avoided that the good copy is overwritten.
1186          * A more useful improvement would be to pick the sectors
1187          * without I/O error based on sector sizes (512 bytes on legacy
1188          * disks) instead of on PAGE_SIZE. Then maybe 512 byte of one
1189          * mirror could be repaired by taking 512 byte of a different
1190          * mirror, even if other 512 byte sectors in the same PAGE_SIZE
1191          * area are unreadable.
1192          */
1193
1194         /* can only fix I/O errors from here on */
1195         if (sblock_bad->no_io_error_seen)
1196                 goto did_not_correct_error;
1197
1198         success = 1;
1199         for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1200                 struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1201
1202                 if (!page_bad->io_error)
1203                         continue;
1204
1205                 for (mirror_index = 0;
1206                      mirror_index < BTRFS_MAX_MIRRORS &&
1207                      sblocks_for_recheck[mirror_index].page_count > 0;
1208                      mirror_index++) {
1209                         struct scrub_block *sblock_other = sblocks_for_recheck +
1210                                                            mirror_index;
1211                         struct scrub_page *page_other = sblock_other->pagev[
1212                                                         page_num];
1213
1214                         if (!page_other->io_error) {
1215                                 ret = scrub_repair_page_from_good_copy(
1216                                         sblock_bad, sblock_other, page_num, 0);
1217                                 if (0 == ret) {
1218                                         page_bad->io_error = 0;
1219                                         break; /* succeeded for this page */
1220                                 }
1221                         }
1222                 }
1223
1224                 if (page_bad->io_error) {
1225                         /* did not find a mirror to copy the page from */
1226                         success = 0;
1227                 }
1228         }
1229
1230         if (success) {
1231                 if (is_metadata || have_csum) {
1232                         /*
1233                          * need to verify the checksum now that all
1234                          * sectors on disk are repaired (the write
1235                          * request for data to be repaired is on its way).
1236                          * Just be lazy and use scrub_recheck_block()
1237                          * which re-reads the data before the checksum
1238                          * is verified, but most likely the data comes out
1239                          * of the page cache.
1240                          */
1241                         scrub_recheck_block(fs_info, sblock_bad,
1242                                             is_metadata, have_csum, csum,
1243                                             generation, sctx->csum_size, 1);
1244                         if (!sblock_bad->header_error &&
1245                             !sblock_bad->checksum_error &&
1246                             sblock_bad->no_io_error_seen)
1247                                 goto corrected_error;
1248                         else
1249                                 goto did_not_correct_error;
1250                 } else {
1251 corrected_error:
1252                         spin_lock(&sctx->stat_lock);
1253                         sctx->stat.corrected_errors++;
1254                         sblock_to_check->data_corrected = 1;
1255                         spin_unlock(&sctx->stat_lock);
1256                         printk_ratelimited_in_rcu(KERN_ERR
1257                                 "BTRFS: fixed up error at logical %llu on dev %s\n",
1258                                 logical, rcu_str_deref(dev->name));
1259                 }
1260         } else {
1261 did_not_correct_error:
1262                 spin_lock(&sctx->stat_lock);
1263                 sctx->stat.uncorrectable_errors++;
1264                 spin_unlock(&sctx->stat_lock);
1265                 printk_ratelimited_in_rcu(KERN_ERR
1266                         "BTRFS: unable to fixup (regular) error at logical %llu on dev %s\n",
1267                         logical, rcu_str_deref(dev->name));
1268         }
1269
1270 out:
1271         if (sblocks_for_recheck) {
1272                 for (mirror_index = 0; mirror_index < BTRFS_MAX_MIRRORS;
1273                      mirror_index++) {
1274                         struct scrub_block *sblock = sblocks_for_recheck +
1275                                                      mirror_index;
1276                         struct scrub_recover *recover;
1277                         int page_index;
1278
1279                         for (page_index = 0; page_index < sblock->page_count;
1280                              page_index++) {
1281                                 sblock->pagev[page_index]->sblock = NULL;
1282                                 recover = sblock->pagev[page_index]->recover;
1283                                 if (recover) {
1284                                         scrub_put_recover(recover);
1285                                         sblock->pagev[page_index]->recover =
1286                                                                         NULL;
1287                                 }
1288                                 scrub_page_put(sblock->pagev[page_index]);
1289                         }
1290                 }
1291                 kfree(sblocks_for_recheck);
1292         }
1293
1294         return 0;
1295 }
1296
1297 static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio)
1298 {
1299         if (bbio->raid_map) {
1300                 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
1301
1302                 if (bbio->raid_map[real_stripes - 1] == RAID6_Q_STRIPE)
1303                         return 3;
1304                 else
1305                         return 2;
1306         } else {
1307                 return (int)bbio->num_stripes;
1308         }
1309 }
1310
1311 static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map,
1312                                                  u64 mapped_length,
1313                                                  int nstripes, int mirror,
1314                                                  int *stripe_index,
1315                                                  u64 *stripe_offset)
1316 {
1317         int i;
1318
1319         if (raid_map) {
1320                 /* RAID5/6 */
1321                 for (i = 0; i < nstripes; i++) {
1322                         if (raid_map[i] == RAID6_Q_STRIPE ||
1323                             raid_map[i] == RAID5_P_STRIPE)
1324                                 continue;
1325
1326                         if (logical >= raid_map[i] &&
1327                             logical < raid_map[i] + mapped_length)
1328                                 break;
1329                 }
1330
1331                 *stripe_index = i;
1332                 *stripe_offset = logical - raid_map[i];
1333         } else {
1334                 /* The other RAID type */
1335                 *stripe_index = mirror;
1336                 *stripe_offset = 0;
1337         }
1338 }
1339
1340 static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
1341                                      struct btrfs_fs_info *fs_info,
1342                                      struct scrub_block *original_sblock,
1343                                      u64 length, u64 logical,
1344                                      struct scrub_block *sblocks_for_recheck)
1345 {
1346         struct scrub_recover *recover;
1347         struct btrfs_bio *bbio;
1348         u64 sublen;
1349         u64 mapped_length;
1350         u64 stripe_offset;
1351         int stripe_index;
1352         int page_index;
1353         int mirror_index;
1354         int nmirrors;
1355         int ret;
1356
1357         /*
1358          * note: the two members ref_count and outstanding_pages
1359          * are not used (and not set) in the blocks that are used for
1360          * the recheck procedure
1361          */
1362
1363         page_index = 0;
1364         while (length > 0) {
1365                 sublen = min_t(u64, length, PAGE_SIZE);
1366                 mapped_length = sublen;
1367                 bbio = NULL;
1368
1369                 /*
1370                  * with a length of PAGE_SIZE, each returned stripe
1371                  * represents one mirror
1372                  */
1373                 ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
1374                                        &mapped_length, &bbio, 0, 1);
1375                 if (ret || !bbio || mapped_length < sublen) {
1376                         kfree(bbio);
1377                         return -EIO;
1378                 }
1379
1380                 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1381                 if (!recover) {
1382                         kfree(bbio);
1383                         return -ENOMEM;
1384                 }
1385
1386                 atomic_set(&recover->refs, 1);
1387                 recover->bbio = bbio;
1388                 recover->map_length = mapped_length;
1389
1390                 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
1391
1392                 nmirrors = scrub_nr_raid_mirrors(bbio);
1393                 for (mirror_index = 0; mirror_index < nmirrors;
1394                      mirror_index++) {
1395                         struct scrub_block *sblock;
1396                         struct scrub_page *page;
1397
1398                         if (mirror_index >= BTRFS_MAX_MIRRORS)
1399                                 continue;
1400
1401                         sblock = sblocks_for_recheck + mirror_index;
1402                         sblock->sctx = sctx;
1403                         page = kzalloc(sizeof(*page), GFP_NOFS);
1404                         if (!page) {
1405 leave_nomem:
1406                                 spin_lock(&sctx->stat_lock);
1407                                 sctx->stat.malloc_errors++;
1408                                 spin_unlock(&sctx->stat_lock);
1409                                 scrub_put_recover(recover);
1410                                 return -ENOMEM;
1411                         }
1412                         scrub_page_get(page);
1413                         sblock->pagev[page_index] = page;
1414                         page->logical = logical;
1415
1416                         scrub_stripe_index_and_offset(logical, bbio->raid_map,
1417                                                       mapped_length,
1418                                                       bbio->num_stripes -
1419                                                       bbio->num_tgtdevs,
1420                                                       mirror_index,
1421                                                       &stripe_index,
1422                                                       &stripe_offset);
1423                         page->physical = bbio->stripes[stripe_index].physical +
1424                                          stripe_offset;
1425                         page->dev = bbio->stripes[stripe_index].dev;
1426
1427                         BUG_ON(page_index >= original_sblock->page_count);
1428                         page->physical_for_dev_replace =
1429                                 original_sblock->pagev[page_index]->
1430                                 physical_for_dev_replace;
1431                         /* for missing devices, dev->bdev is NULL */
1432                         page->mirror_num = mirror_index + 1;
1433                         sblock->page_count++;
1434                         page->page = alloc_page(GFP_NOFS);
1435                         if (!page->page)
1436                                 goto leave_nomem;
1437
1438                         scrub_get_recover(recover);
1439                         page->recover = recover;
1440                 }
1441                 scrub_put_recover(recover);
1442                 length -= sublen;
1443                 logical += sublen;
1444                 page_index++;
1445         }
1446
1447         return 0;
1448 }
1449
1450 struct scrub_bio_ret {
1451         struct completion event;
1452         int error;
1453 };
1454
1455 static void scrub_bio_wait_endio(struct bio *bio, int error)
1456 {
1457         struct scrub_bio_ret *ret = bio->bi_private;
1458
1459         ret->error = error;
1460         complete(&ret->event);
1461 }
1462
1463 static inline int scrub_is_page_on_raid56(struct scrub_page *page)
1464 {
1465         return page->recover && page->recover->bbio->raid_map;
1466 }
1467
1468 static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1469                                         struct bio *bio,
1470                                         struct scrub_page *page)
1471 {
1472         struct scrub_bio_ret done;
1473         int ret;
1474
1475         init_completion(&done.event);
1476         done.error = 0;
1477         bio->bi_iter.bi_sector = page->logical >> 9;
1478         bio->bi_private = &done;
1479         bio->bi_end_io = scrub_bio_wait_endio;
1480
1481         ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
1482                                     page->recover->map_length,
1483                                     page->mirror_num, 0);
1484         if (ret)
1485                 return ret;
1486
1487         wait_for_completion(&done.event);
1488         if (done.error)
1489                 return -EIO;
1490
1491         return 0;
1492 }
1493
1494 /*
1495  * this function will check the on disk data for checksum errors, header
1496  * errors and read I/O errors. If any I/O errors happen, the exact pages
1497  * which are errored are marked as being bad. The goal is to enable scrub
1498  * to take those pages that are not errored from all the mirrors so that
1499  * the pages that are errored in the just handled mirror can be repaired.
1500  */
1501 static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1502                                 struct scrub_block *sblock, int is_metadata,
1503                                 int have_csum, u8 *csum, u64 generation,
1504                                 u16 csum_size, int retry_failed_mirror)
1505 {
1506         int page_num;
1507
1508         sblock->no_io_error_seen = 1;
1509         sblock->header_error = 0;
1510         sblock->checksum_error = 0;
1511
1512         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1513                 struct bio *bio;
1514                 struct scrub_page *page = sblock->pagev[page_num];
1515
1516                 if (page->dev->bdev == NULL) {
1517                         page->io_error = 1;
1518                         sblock->no_io_error_seen = 0;
1519                         continue;
1520                 }
1521
1522                 WARN_ON(!page->page);
1523                 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1524                 if (!bio) {
1525                         page->io_error = 1;
1526                         sblock->no_io_error_seen = 0;
1527                         continue;
1528                 }
1529                 bio->bi_bdev = page->dev->bdev;
1530
1531                 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1532                 if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
1533                         if (scrub_submit_raid56_bio_wait(fs_info, bio, page))
1534                                 sblock->no_io_error_seen = 0;
1535                 } else {
1536                         bio->bi_iter.bi_sector = page->physical >> 9;
1537
1538                         if (btrfsic_submit_bio_wait(READ, bio))
1539                                 sblock->no_io_error_seen = 0;
1540                 }
1541
1542                 bio_put(bio);
1543         }
1544
1545         if (sblock->no_io_error_seen)
1546                 scrub_recheck_block_checksum(fs_info, sblock, is_metadata,
1547                                              have_csum, csum, generation,
1548                                              csum_size);
1549
1550         return;
1551 }
1552
1553 static inline int scrub_check_fsid(u8 fsid[],
1554                                    struct scrub_page *spage)
1555 {
1556         struct btrfs_fs_devices *fs_devices = spage->dev->fs_devices;
1557         int ret;
1558
1559         ret = memcmp(fsid, fs_devices->fsid, BTRFS_UUID_SIZE);
1560         return !ret;
1561 }
1562
1563 static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1564                                          struct scrub_block *sblock,
1565                                          int is_metadata, int have_csum,
1566                                          const u8 *csum, u64 generation,
1567                                          u16 csum_size)
1568 {
1569         int page_num;
1570         u8 calculated_csum[BTRFS_CSUM_SIZE];
1571         u32 crc = ~(u32)0;
1572         void *mapped_buffer;
1573
1574         WARN_ON(!sblock->pagev[0]->page);
1575         if (is_metadata) {
1576                 struct btrfs_header *h;
1577
1578                 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
1579                 h = (struct btrfs_header *)mapped_buffer;
1580
1581                 if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h) ||
1582                     !scrub_check_fsid(h->fsid, sblock->pagev[0]) ||
1583                     memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1584                            BTRFS_UUID_SIZE)) {
1585                         sblock->header_error = 1;
1586                 } else if (generation != btrfs_stack_header_generation(h)) {
1587                         sblock->header_error = 1;
1588                         sblock->generation_error = 1;
1589                 }
1590                 csum = h->csum;
1591         } else {
1592                 if (!have_csum)
1593                         return;
1594
1595                 mapped_buffer = kmap_atomic(sblock->pagev[0]->page);
1596         }
1597
1598         for (page_num = 0;;) {
1599                 if (page_num == 0 && is_metadata)
1600                         crc = btrfs_csum_data(
1601                                 ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE,
1602                                 crc, PAGE_SIZE - BTRFS_CSUM_SIZE);
1603                 else
1604                         crc = btrfs_csum_data(mapped_buffer, crc, PAGE_SIZE);
1605
1606                 kunmap_atomic(mapped_buffer);
1607                 page_num++;
1608                 if (page_num >= sblock->page_count)
1609                         break;
1610                 WARN_ON(!sblock->pagev[page_num]->page);
1611
1612                 mapped_buffer = kmap_atomic(sblock->pagev[page_num]->page);
1613         }
1614
1615         btrfs_csum_final(crc, calculated_csum);
1616         if (memcmp(calculated_csum, csum, csum_size))
1617                 sblock->checksum_error = 1;
1618 }
1619
1620 static int scrub_repair_block_from_good_copy(struct scrub_block *sblock_bad,
1621                                              struct scrub_block *sblock_good,
1622                                              int force_write)
1623 {
1624         int page_num;
1625         int ret = 0;
1626
1627         for (page_num = 0; page_num < sblock_bad->page_count; page_num++) {
1628                 int ret_sub;
1629
1630                 ret_sub = scrub_repair_page_from_good_copy(sblock_bad,
1631                                                            sblock_good,
1632                                                            page_num,
1633                                                            force_write);
1634                 if (ret_sub)
1635                         ret = ret_sub;
1636         }
1637
1638         return ret;
1639 }
1640
1641 static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1642                                             struct scrub_block *sblock_good,
1643                                             int page_num, int force_write)
1644 {
1645         struct scrub_page *page_bad = sblock_bad->pagev[page_num];
1646         struct scrub_page *page_good = sblock_good->pagev[page_num];
1647
1648         BUG_ON(page_bad->page == NULL);
1649         BUG_ON(page_good->page == NULL);
1650         if (force_write || sblock_bad->header_error ||
1651             sblock_bad->checksum_error || page_bad->io_error) {
1652                 struct bio *bio;
1653                 int ret;
1654
1655                 if (!page_bad->dev->bdev) {
1656                         printk_ratelimited(KERN_WARNING "BTRFS: "
1657                                 "scrub_repair_page_from_good_copy(bdev == NULL) "
1658                                 "is unexpected!\n");
1659                         return -EIO;
1660                 }
1661
1662                 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1663                 if (!bio)
1664                         return -EIO;
1665                 bio->bi_bdev = page_bad->dev->bdev;
1666                 bio->bi_iter.bi_sector = page_bad->physical >> 9;
1667
1668                 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1669                 if (PAGE_SIZE != ret) {
1670                         bio_put(bio);
1671                         return -EIO;
1672                 }
1673
1674                 if (btrfsic_submit_bio_wait(WRITE, bio)) {
1675                         btrfs_dev_stat_inc_and_print(page_bad->dev,
1676                                 BTRFS_DEV_STAT_WRITE_ERRS);
1677                         btrfs_dev_replace_stats_inc(
1678                                 &sblock_bad->sctx->dev_root->fs_info->
1679                                 dev_replace.num_write_errors);
1680                         bio_put(bio);
1681                         return -EIO;
1682                 }
1683                 bio_put(bio);
1684         }
1685
1686         return 0;
1687 }
1688
1689 static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1690 {
1691         int page_num;
1692
1693         /*
1694          * This block is used for the check of the parity on the source device,
1695          * so the data needn't be written into the destination device.
1696          */
1697         if (sblock->sparity)
1698                 return;
1699
1700         for (page_num = 0; page_num < sblock->page_count; page_num++) {
1701                 int ret;
1702
1703                 ret = scrub_write_page_to_dev_replace(sblock, page_num);
1704                 if (ret)
1705                         btrfs_dev_replace_stats_inc(
1706                                 &sblock->sctx->dev_root->fs_info->dev_replace.
1707                                 num_write_errors);
1708         }
1709 }
1710
1711 static int scrub_write_page_to_dev_replace(struct scrub_block *sblock,
1712                                            int page_num)
1713 {
1714         struct scrub_page *spage = sblock->pagev[page_num];
1715
1716         BUG_ON(spage->page == NULL);
1717         if (spage->io_error) {
1718                 void *mapped_buffer = kmap_atomic(spage->page);
1719
1720                 memset(mapped_buffer, 0, PAGE_CACHE_SIZE);
1721                 flush_dcache_page(spage->page);
1722                 kunmap_atomic(mapped_buffer);
1723         }
1724         return scrub_add_page_to_wr_bio(sblock->sctx, spage);
1725 }
1726
1727 static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
1728                                     struct scrub_page *spage)
1729 {
1730         struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1731         struct scrub_bio *sbio;
1732         int ret;
1733
1734         mutex_lock(&wr_ctx->wr_lock);
1735 again:
1736         if (!wr_ctx->wr_curr_bio) {
1737                 wr_ctx->wr_curr_bio = kzalloc(sizeof(*wr_ctx->wr_curr_bio),
1738                                               GFP_NOFS);
1739                 if (!wr_ctx->wr_curr_bio) {
1740                         mutex_unlock(&wr_ctx->wr_lock);
1741                         return -ENOMEM;
1742                 }
1743                 wr_ctx->wr_curr_bio->sctx = sctx;
1744                 wr_ctx->wr_curr_bio->page_count = 0;
1745         }
1746         sbio = wr_ctx->wr_curr_bio;
1747         if (sbio->page_count == 0) {
1748                 struct bio *bio;
1749
1750                 sbio->physical = spage->physical_for_dev_replace;
1751                 sbio->logical = spage->logical;
1752                 sbio->dev = wr_ctx->tgtdev;
1753                 bio = sbio->bio;
1754                 if (!bio) {
1755                         bio = btrfs_io_bio_alloc(GFP_NOFS, wr_ctx->pages_per_wr_bio);
1756                         if (!bio) {
1757                                 mutex_unlock(&wr_ctx->wr_lock);
1758                                 return -ENOMEM;
1759                         }
1760                         sbio->bio = bio;
1761                 }
1762
1763                 bio->bi_private = sbio;
1764                 bio->bi_end_io = scrub_wr_bio_end_io;
1765                 bio->bi_bdev = sbio->dev->bdev;
1766                 bio->bi_iter.bi_sector = sbio->physical >> 9;
1767                 sbio->err = 0;
1768         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1769                    spage->physical_for_dev_replace ||
1770                    sbio->logical + sbio->page_count * PAGE_SIZE !=
1771                    spage->logical) {
1772                 scrub_wr_submit(sctx);
1773                 goto again;
1774         }
1775
1776         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
1777         if (ret != PAGE_SIZE) {
1778                 if (sbio->page_count < 1) {
1779                         bio_put(sbio->bio);
1780                         sbio->bio = NULL;
1781                         mutex_unlock(&wr_ctx->wr_lock);
1782                         return -EIO;
1783                 }
1784                 scrub_wr_submit(sctx);
1785                 goto again;
1786         }
1787
1788         sbio->pagev[sbio->page_count] = spage;
1789         scrub_page_get(spage);
1790         sbio->page_count++;
1791         if (sbio->page_count == wr_ctx->pages_per_wr_bio)
1792                 scrub_wr_submit(sctx);
1793         mutex_unlock(&wr_ctx->wr_lock);
1794
1795         return 0;
1796 }
1797
1798 static void scrub_wr_submit(struct scrub_ctx *sctx)
1799 {
1800         struct scrub_wr_ctx *wr_ctx = &sctx->wr_ctx;
1801         struct scrub_bio *sbio;
1802
1803         if (!wr_ctx->wr_curr_bio)
1804                 return;
1805
1806         sbio = wr_ctx->wr_curr_bio;
1807         wr_ctx->wr_curr_bio = NULL;
1808         WARN_ON(!sbio->bio->bi_bdev);
1809         scrub_pending_bio_inc(sctx);
1810         /* process all writes in a single worker thread. Then the block layer
1811          * orders the requests before sending them to the driver which
1812          * doubled the write performance on spinning disks when measured
1813          * with Linux 3.5 */
1814         btrfsic_submit_bio(WRITE, sbio->bio);
1815 }
1816
1817 static void scrub_wr_bio_end_io(struct bio *bio, int err)
1818 {
1819         struct scrub_bio *sbio = bio->bi_private;
1820         struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
1821
1822         sbio->err = err;
1823         sbio->bio = bio;
1824
1825         btrfs_init_work(&sbio->work, btrfs_scrubwrc_helper,
1826                          scrub_wr_bio_end_io_worker, NULL, NULL);
1827         btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
1828 }
1829
1830 static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
1831 {
1832         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
1833         struct scrub_ctx *sctx = sbio->sctx;
1834         int i;
1835
1836         WARN_ON(sbio->page_count > SCRUB_PAGES_PER_WR_BIO);
1837         if (sbio->err) {
1838                 struct btrfs_dev_replace *dev_replace =
1839                         &sbio->sctx->dev_root->fs_info->dev_replace;
1840
1841                 for (i = 0; i < sbio->page_count; i++) {
1842                         struct scrub_page *spage = sbio->pagev[i];
1843
1844                         spage->io_error = 1;
1845                         btrfs_dev_replace_stats_inc(&dev_replace->
1846                                                     num_write_errors);
1847                 }
1848         }
1849
1850         for (i = 0; i < sbio->page_count; i++)
1851                 scrub_page_put(sbio->pagev[i]);
1852
1853         bio_put(sbio->bio);
1854         kfree(sbio);
1855         scrub_pending_bio_dec(sctx);
1856 }
1857
1858 static int scrub_checksum(struct scrub_block *sblock)
1859 {
1860         u64 flags;
1861         int ret;
1862
1863         WARN_ON(sblock->page_count < 1);
1864         flags = sblock->pagev[0]->flags;
1865         ret = 0;
1866         if (flags & BTRFS_EXTENT_FLAG_DATA)
1867                 ret = scrub_checksum_data(sblock);
1868         else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)
1869                 ret = scrub_checksum_tree_block(sblock);
1870         else if (flags & BTRFS_EXTENT_FLAG_SUPER)
1871                 (void)scrub_checksum_super(sblock);
1872         else
1873                 WARN_ON(1);
1874         if (ret)
1875                 scrub_handle_errored_block(sblock);
1876
1877         return ret;
1878 }
1879
1880 static int scrub_checksum_data(struct scrub_block *sblock)
1881 {
1882         struct scrub_ctx *sctx = sblock->sctx;
1883         u8 csum[BTRFS_CSUM_SIZE];
1884         u8 *on_disk_csum;
1885         struct page *page;
1886         void *buffer;
1887         u32 crc = ~(u32)0;
1888         int fail = 0;
1889         u64 len;
1890         int index;
1891
1892         BUG_ON(sblock->page_count < 1);
1893         if (!sblock->pagev[0]->have_csum)
1894                 return 0;
1895
1896         on_disk_csum = sblock->pagev[0]->csum;
1897         page = sblock->pagev[0]->page;
1898         buffer = kmap_atomic(page);
1899
1900         len = sctx->sectorsize;
1901         index = 0;
1902         for (;;) {
1903                 u64 l = min_t(u64, len, PAGE_SIZE);
1904
1905                 crc = btrfs_csum_data(buffer, crc, l);
1906                 kunmap_atomic(buffer);
1907                 len -= l;
1908                 if (len == 0)
1909                         break;
1910                 index++;
1911                 BUG_ON(index >= sblock->page_count);
1912                 BUG_ON(!sblock->pagev[index]->page);
1913                 page = sblock->pagev[index]->page;
1914                 buffer = kmap_atomic(page);
1915         }
1916
1917         btrfs_csum_final(crc, csum);
1918         if (memcmp(csum, on_disk_csum, sctx->csum_size))
1919                 fail = 1;
1920
1921         return fail;
1922 }
1923
1924 static int scrub_checksum_tree_block(struct scrub_block *sblock)
1925 {
1926         struct scrub_ctx *sctx = sblock->sctx;
1927         struct btrfs_header *h;
1928         struct btrfs_root *root = sctx->dev_root;
1929         struct btrfs_fs_info *fs_info = root->fs_info;
1930         u8 calculated_csum[BTRFS_CSUM_SIZE];
1931         u8 on_disk_csum[BTRFS_CSUM_SIZE];
1932         struct page *page;
1933         void *mapped_buffer;
1934         u64 mapped_size;
1935         void *p;
1936         u32 crc = ~(u32)0;
1937         int fail = 0;
1938         int crc_fail = 0;
1939         u64 len;
1940         int index;
1941
1942         BUG_ON(sblock->page_count < 1);
1943         page = sblock->pagev[0]->page;
1944         mapped_buffer = kmap_atomic(page);
1945         h = (struct btrfs_header *)mapped_buffer;
1946         memcpy(on_disk_csum, h->csum, sctx->csum_size);
1947
1948         /*
1949          * we don't use the getter functions here, as we
1950          * a) don't have an extent buffer and
1951          * b) the page is already kmapped
1952          */
1953
1954         if (sblock->pagev[0]->logical != btrfs_stack_header_bytenr(h))
1955                 ++fail;
1956
1957         if (sblock->pagev[0]->generation != btrfs_stack_header_generation(h))
1958                 ++fail;
1959
1960         if (!scrub_check_fsid(h->fsid, sblock->pagev[0]))
1961                 ++fail;
1962
1963         if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1964                    BTRFS_UUID_SIZE))
1965                 ++fail;
1966
1967         len = sctx->nodesize - BTRFS_CSUM_SIZE;
1968         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
1969         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
1970         index = 0;
1971         for (;;) {
1972                 u64 l = min_t(u64, len, mapped_size);
1973
1974                 crc = btrfs_csum_data(p, crc, l);
1975                 kunmap_atomic(mapped_buffer);
1976                 len -= l;
1977                 if (len == 0)
1978                         break;
1979                 index++;
1980                 BUG_ON(index >= sblock->page_count);
1981                 BUG_ON(!sblock->pagev[index]->page);
1982                 page = sblock->pagev[index]->page;
1983                 mapped_buffer = kmap_atomic(page);
1984                 mapped_size = PAGE_SIZE;
1985                 p = mapped_buffer;
1986         }
1987
1988         btrfs_csum_final(crc, calculated_csum);
1989         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
1990                 ++crc_fail;
1991
1992         return fail || crc_fail;
1993 }
1994
1995 static int scrub_checksum_super(struct scrub_block *sblock)
1996 {
1997         struct btrfs_super_block *s;
1998         struct scrub_ctx *sctx = sblock->sctx;
1999         u8 calculated_csum[BTRFS_CSUM_SIZE];
2000         u8 on_disk_csum[BTRFS_CSUM_SIZE];
2001         struct page *page;
2002         void *mapped_buffer;
2003         u64 mapped_size;
2004         void *p;
2005         u32 crc = ~(u32)0;
2006         int fail_gen = 0;
2007         int fail_cor = 0;
2008         u64 len;
2009         int index;
2010
2011         BUG_ON(sblock->page_count < 1);
2012         page = sblock->pagev[0]->page;
2013         mapped_buffer = kmap_atomic(page);
2014         s = (struct btrfs_super_block *)mapped_buffer;
2015         memcpy(on_disk_csum, s->csum, sctx->csum_size);
2016
2017         if (sblock->pagev[0]->logical != btrfs_super_bytenr(s))
2018                 ++fail_cor;
2019
2020         if (sblock->pagev[0]->generation != btrfs_super_generation(s))
2021                 ++fail_gen;
2022
2023         if (!scrub_check_fsid(s->fsid, sblock->pagev[0]))
2024                 ++fail_cor;
2025
2026         len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
2027         mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
2028         p = ((u8 *)mapped_buffer) + BTRFS_CSUM_SIZE;
2029         index = 0;
2030         for (;;) {
2031                 u64 l = min_t(u64, len, mapped_size);
2032
2033                 crc = btrfs_csum_data(p, crc, l);
2034                 kunmap_atomic(mapped_buffer);
2035                 len -= l;
2036                 if (len == 0)
2037                         break;
2038                 index++;
2039                 BUG_ON(index >= sblock->page_count);
2040                 BUG_ON(!sblock->pagev[index]->page);
2041                 page = sblock->pagev[index]->page;
2042                 mapped_buffer = kmap_atomic(page);
2043                 mapped_size = PAGE_SIZE;
2044                 p = mapped_buffer;
2045         }
2046
2047         btrfs_csum_final(crc, calculated_csum);
2048         if (memcmp(calculated_csum, on_disk_csum, sctx->csum_size))
2049                 ++fail_cor;
2050
2051         if (fail_cor + fail_gen) {
2052                 /*
2053                  * if we find an error in a super block, we just report it.
2054                  * They will get written with the next transaction commit
2055                  * anyway
2056                  */
2057                 spin_lock(&sctx->stat_lock);
2058                 ++sctx->stat.super_errors;
2059                 spin_unlock(&sctx->stat_lock);
2060                 if (fail_cor)
2061                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
2062                                 BTRFS_DEV_STAT_CORRUPTION_ERRS);
2063                 else
2064                         btrfs_dev_stat_inc_and_print(sblock->pagev[0]->dev,
2065                                 BTRFS_DEV_STAT_GENERATION_ERRS);
2066         }
2067
2068         return fail_cor + fail_gen;
2069 }
2070
2071 static void scrub_block_get(struct scrub_block *sblock)
2072 {
2073         atomic_inc(&sblock->ref_count);
2074 }
2075
2076 static void scrub_block_put(struct scrub_block *sblock)
2077 {
2078         if (atomic_dec_and_test(&sblock->ref_count)) {
2079                 int i;
2080
2081                 if (sblock->sparity)
2082                         scrub_parity_put(sblock->sparity);
2083
2084                 for (i = 0; i < sblock->page_count; i++)
2085                         scrub_page_put(sblock->pagev[i]);
2086                 kfree(sblock);
2087         }
2088 }
2089
2090 static void scrub_page_get(struct scrub_page *spage)
2091 {
2092         atomic_inc(&spage->ref_count);
2093 }
2094
2095 static void scrub_page_put(struct scrub_page *spage)
2096 {
2097         if (atomic_dec_and_test(&spage->ref_count)) {
2098                 if (spage->page)
2099                         __free_page(spage->page);
2100                 kfree(spage);
2101         }
2102 }
2103
2104 static void scrub_submit(struct scrub_ctx *sctx)
2105 {
2106         struct scrub_bio *sbio;
2107
2108         if (sctx->curr == -1)
2109                 return;
2110
2111         sbio = sctx->bios[sctx->curr];
2112         sctx->curr = -1;
2113         scrub_pending_bio_inc(sctx);
2114
2115         if (!sbio->bio->bi_bdev) {
2116                 /*
2117                  * this case should not happen. If btrfs_map_block() is
2118                  * wrong, it could happen for dev-replace operations on
2119                  * missing devices when no mirrors are available, but in
2120                  * this case it should already fail the mount.
2121                  * This case is handled correctly (but _very_ slowly).
2122                  */
2123                 printk_ratelimited(KERN_WARNING
2124                         "BTRFS: scrub_submit(bio bdev == NULL) is unexpected!\n");
2125                 bio_endio(sbio->bio, -EIO);
2126         } else {
2127                 btrfsic_submit_bio(READ, sbio->bio);
2128         }
2129 }
2130
2131 static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
2132                                     struct scrub_page *spage)
2133 {
2134         struct scrub_block *sblock = spage->sblock;
2135         struct scrub_bio *sbio;
2136         int ret;
2137
2138 again:
2139         /*
2140          * grab a fresh bio or wait for one to become available
2141          */
2142         while (sctx->curr == -1) {
2143                 spin_lock(&sctx->list_lock);
2144                 sctx->curr = sctx->first_free;
2145                 if (sctx->curr != -1) {
2146                         sctx->first_free = sctx->bios[sctx->curr]->next_free;
2147                         sctx->bios[sctx->curr]->next_free = -1;
2148                         sctx->bios[sctx->curr]->page_count = 0;
2149                         spin_unlock(&sctx->list_lock);
2150                 } else {
2151                         spin_unlock(&sctx->list_lock);
2152                         wait_event(sctx->list_wait, sctx->first_free != -1);
2153                 }
2154         }
2155         sbio = sctx->bios[sctx->curr];
2156         if (sbio->page_count == 0) {
2157                 struct bio *bio;
2158
2159                 sbio->physical = spage->physical;
2160                 sbio->logical = spage->logical;
2161                 sbio->dev = spage->dev;
2162                 bio = sbio->bio;
2163                 if (!bio) {
2164                         bio = btrfs_io_bio_alloc(GFP_NOFS, sctx->pages_per_rd_bio);
2165                         if (!bio)
2166                                 return -ENOMEM;
2167                         sbio->bio = bio;
2168                 }
2169
2170                 bio->bi_private = sbio;
2171                 bio->bi_end_io = scrub_bio_end_io;
2172                 bio->bi_bdev = sbio->dev->bdev;
2173                 bio->bi_iter.bi_sector = sbio->physical >> 9;
2174                 sbio->err = 0;
2175         } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
2176                    spage->physical ||
2177                    sbio->logical + sbio->page_count * PAGE_SIZE !=
2178                    spage->logical ||
2179                    sbio->dev != spage->dev) {
2180                 scrub_submit(sctx);
2181                 goto again;
2182         }
2183
2184         sbio->pagev[sbio->page_count] = spage;
2185         ret = bio_add_page(sbio->bio, spage->page, PAGE_SIZE, 0);
2186         if (ret != PAGE_SIZE) {
2187                 if (sbio->page_count < 1) {
2188                         bio_put(sbio->bio);
2189                         sbio->bio = NULL;
2190                         return -EIO;
2191                 }
2192                 scrub_submit(sctx);
2193                 goto again;
2194         }
2195
2196         scrub_block_get(sblock); /* one for the page added to the bio */
2197         atomic_inc(&sblock->outstanding_pages);
2198         sbio->page_count++;
2199         if (sbio->page_count == sctx->pages_per_rd_bio)
2200                 scrub_submit(sctx);
2201
2202         return 0;
2203 }
2204
2205 static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
2206                        u64 physical, struct btrfs_device *dev, u64 flags,
2207                        u64 gen, int mirror_num, u8 *csum, int force,
2208                        u64 physical_for_dev_replace)
2209 {
2210         struct scrub_block *sblock;
2211         int index;
2212
2213         sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
2214         if (!sblock) {
2215                 spin_lock(&sctx->stat_lock);
2216                 sctx->stat.malloc_errors++;
2217                 spin_unlock(&sctx->stat_lock);
2218                 return -ENOMEM;
2219         }
2220
2221         /* one ref inside this function, plus one for each page added to
2222          * a bio later on */
2223         atomic_set(&sblock->ref_count, 1);
2224         sblock->sctx = sctx;
2225         sblock->no_io_error_seen = 1;
2226
2227         for (index = 0; len > 0; index++) {
2228                 struct scrub_page *spage;
2229                 u64 l = min_t(u64, len, PAGE_SIZE);
2230
2231                 spage = kzalloc(sizeof(*spage), GFP_NOFS);
2232                 if (!spage) {
2233 leave_nomem:
2234                         spin_lock(&sctx->stat_lock);
2235                         sctx->stat.malloc_errors++;
2236                         spin_unlock(&sctx->stat_lock);
2237                         scrub_block_put(sblock);
2238                         return -ENOMEM;
2239                 }
2240                 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2241                 scrub_page_get(spage);
2242                 sblock->pagev[index] = spage;
2243                 spage->sblock = sblock;
2244                 spage->dev = dev;
2245                 spage->flags = flags;
2246                 spage->generation = gen;
2247                 spage->logical = logical;
2248                 spage->physical = physical;
2249                 spage->physical_for_dev_replace = physical_for_dev_replace;
2250                 spage->mirror_num = mirror_num;
2251                 if (csum) {
2252                         spage->have_csum = 1;
2253                         memcpy(spage->csum, csum, sctx->csum_size);
2254                 } else {
2255                         spage->have_csum = 0;
2256                 }
2257                 sblock->page_count++;
2258                 spage->page = alloc_page(GFP_NOFS);
2259                 if (!spage->page)
2260                         goto leave_nomem;
2261                 len -= l;
2262                 logical += l;
2263                 physical += l;
2264                 physical_for_dev_replace += l;
2265         }
2266
2267         WARN_ON(sblock->page_count == 0);
2268         for (index = 0; index < sblock->page_count; index++) {
2269                 struct scrub_page *spage = sblock->pagev[index];
2270                 int ret;
2271
2272                 ret = scrub_add_page_to_rd_bio(sctx, spage);
2273                 if (ret) {
2274                         scrub_block_put(sblock);
2275                         return ret;
2276                 }
2277         }
2278
2279         if (force)
2280                 scrub_submit(sctx);
2281
2282         /* last one frees, either here or in bio completion for last page */
2283         scrub_block_put(sblock);
2284         return 0;
2285 }
2286
2287 static void scrub_bio_end_io(struct bio *bio, int err)
2288 {
2289         struct scrub_bio *sbio = bio->bi_private;
2290         struct btrfs_fs_info *fs_info = sbio->dev->dev_root->fs_info;
2291
2292         sbio->err = err;
2293         sbio->bio = bio;
2294
2295         btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
2296 }
2297
2298 static void scrub_bio_end_io_worker(struct btrfs_work *work)
2299 {
2300         struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
2301         struct scrub_ctx *sctx = sbio->sctx;
2302         int i;
2303
2304         BUG_ON(sbio->page_count > SCRUB_PAGES_PER_RD_BIO);
2305         if (sbio->err) {
2306                 for (i = 0; i < sbio->page_count; i++) {
2307                         struct scrub_page *spage = sbio->pagev[i];
2308
2309                         spage->io_error = 1;
2310                         spage->sblock->no_io_error_seen = 0;
2311                 }
2312         }
2313
2314         /* now complete the scrub_block items that have all pages completed */
2315         for (i = 0; i < sbio->page_count; i++) {
2316                 struct scrub_page *spage = sbio->pagev[i];
2317                 struct scrub_block *sblock = spage->sblock;
2318
2319                 if (atomic_dec_and_test(&sblock->outstanding_pages))
2320                         scrub_block_complete(sblock);
2321                 scrub_block_put(sblock);
2322         }
2323
2324         bio_put(sbio->bio);
2325         sbio->bio = NULL;
2326         spin_lock(&sctx->list_lock);
2327         sbio->next_free = sctx->first_free;
2328         sctx->first_free = sbio->index;
2329         spin_unlock(&sctx->list_lock);
2330
2331         if (sctx->is_dev_replace &&
2332             atomic_read(&sctx->wr_ctx.flush_all_writes)) {
2333                 mutex_lock(&sctx->wr_ctx.wr_lock);
2334                 scrub_wr_submit(sctx);
2335                 mutex_unlock(&sctx->wr_ctx.wr_lock);
2336         }
2337
2338         scrub_pending_bio_dec(sctx);
2339 }
2340
2341 static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2342                                        unsigned long *bitmap,
2343                                        u64 start, u64 len)
2344 {
2345         int offset;
2346         int nsectors;
2347         int sectorsize = sparity->sctx->dev_root->sectorsize;
2348
2349         if (len >= sparity->stripe_len) {
2350                 bitmap_set(bitmap, 0, sparity->nsectors);
2351                 return;
2352         }
2353
2354         start -= sparity->logic_start;
2355         offset = (int)do_div(start, sparity->stripe_len);
2356         offset /= sectorsize;
2357         nsectors = (int)len / sectorsize;
2358
2359         if (offset + nsectors <= sparity->nsectors) {
2360                 bitmap_set(bitmap, offset, nsectors);
2361                 return;
2362         }
2363
2364         bitmap_set(bitmap, offset, sparity->nsectors - offset);
2365         bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2366 }
2367
2368 static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2369                                                    u64 start, u64 len)
2370 {
2371         __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2372 }
2373
2374 static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2375                                                   u64 start, u64 len)
2376 {
2377         __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2378 }
2379
2380 static void scrub_block_complete(struct scrub_block *sblock)
2381 {
2382         int corrupted = 0;
2383
2384         if (!sblock->no_io_error_seen) {
2385                 corrupted = 1;
2386                 scrub_handle_errored_block(sblock);
2387         } else {
2388                 /*
2389                  * if has checksum error, write via repair mechanism in
2390                  * dev replace case, otherwise write here in dev replace
2391                  * case.
2392                  */
2393                 corrupted = scrub_checksum(sblock);
2394                 if (!corrupted && sblock->sctx->is_dev_replace)
2395                         scrub_write_block_to_dev_replace(sblock);
2396         }
2397
2398         if (sblock->sparity && corrupted && !sblock->data_corrected) {
2399                 u64 start = sblock->pagev[0]->logical;
2400                 u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2401                           PAGE_SIZE;
2402
2403                 scrub_parity_mark_sectors_error(sblock->sparity,
2404                                                 start, end - start);
2405         }
2406 }
2407
2408 static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len,
2409                            u8 *csum)
2410 {
2411         struct btrfs_ordered_sum *sum = NULL;
2412         unsigned long index;
2413         unsigned long num_sectors;
2414
2415         while (!list_empty(&sctx->csum_list)) {
2416                 sum = list_first_entry(&sctx->csum_list,
2417                                        struct btrfs_ordered_sum, list);
2418                 if (sum->bytenr > logical)
2419                         return 0;
2420                 if (sum->bytenr + sum->len > logical)
2421                         break;
2422
2423                 ++sctx->stat.csum_discards;
2424                 list_del(&sum->list);
2425                 kfree(sum);
2426                 sum = NULL;
2427         }
2428         if (!sum)
2429                 return 0;
2430
2431         index = ((u32)(logical - sum->bytenr)) / sctx->sectorsize;
2432         num_sectors = sum->len / sctx->sectorsize;
2433         memcpy(csum, sum->sums + index, sctx->csum_size);
2434         if (index == num_sectors - 1) {
2435                 list_del(&sum->list);
2436                 kfree(sum);
2437         }
2438         return 1;
2439 }
2440
2441 /* scrub extent tries to collect up to 64 kB for each bio */
2442 static int scrub_extent(struct scrub_ctx *sctx, u64 logical, u64 len,
2443                         u64 physical, struct btrfs_device *dev, u64 flags,
2444                         u64 gen, int mirror_num, u64 physical_for_dev_replace)
2445 {
2446         int ret;
2447         u8 csum[BTRFS_CSUM_SIZE];
2448         u32 blocksize;
2449
2450         if (flags & BTRFS_EXTENT_FLAG_DATA) {
2451                 blocksize = sctx->sectorsize;
2452                 spin_lock(&sctx->stat_lock);
2453                 sctx->stat.data_extents_scrubbed++;
2454                 sctx->stat.data_bytes_scrubbed += len;
2455                 spin_unlock(&sctx->stat_lock);
2456         } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2457                 blocksize = sctx->nodesize;
2458                 spin_lock(&sctx->stat_lock);
2459                 sctx->stat.tree_extents_scrubbed++;
2460                 sctx->stat.tree_bytes_scrubbed += len;
2461                 spin_unlock(&sctx->stat_lock);
2462         } else {
2463                 blocksize = sctx->sectorsize;
2464                 WARN_ON(1);
2465         }
2466
2467         while (len) {
2468                 u64 l = min_t(u64, len, blocksize);
2469                 int have_csum = 0;
2470
2471                 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2472                         /* push csums to sbio */
2473                         have_csum = scrub_find_csum(sctx, logical, l, csum);
2474                         if (have_csum == 0)
2475                                 ++sctx->stat.no_csum;
2476                         if (sctx->is_dev_replace && !have_csum) {
2477                                 ret = copy_nocow_pages(sctx, logical, l,
2478                                                        mirror_num,
2479                                                       physical_for_dev_replace);
2480                                 goto behind_scrub_pages;
2481                         }
2482                 }
2483                 ret = scrub_pages(sctx, logical, l, physical, dev, flags, gen,
2484                                   mirror_num, have_csum ? csum : NULL, 0,
2485                                   physical_for_dev_replace);
2486 behind_scrub_pages:
2487                 if (ret)
2488                         return ret;
2489                 len -= l;
2490                 logical += l;
2491                 physical += l;
2492                 physical_for_dev_replace += l;
2493         }
2494         return 0;
2495 }
2496
2497 static int scrub_pages_for_parity(struct scrub_parity *sparity,
2498                                   u64 logical, u64 len,
2499                                   u64 physical, struct btrfs_device *dev,
2500                                   u64 flags, u64 gen, int mirror_num, u8 *csum)
2501 {
2502         struct scrub_ctx *sctx = sparity->sctx;
2503         struct scrub_block *sblock;
2504         int index;
2505
2506         sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
2507         if (!sblock) {
2508                 spin_lock(&sctx->stat_lock);
2509                 sctx->stat.malloc_errors++;
2510                 spin_unlock(&sctx->stat_lock);
2511                 return -ENOMEM;
2512         }
2513
2514         /* one ref inside this function, plus one for each page added to
2515          * a bio later on */
2516         atomic_set(&sblock->ref_count, 1);
2517         sblock->sctx = sctx;
2518         sblock->no_io_error_seen = 1;
2519         sblock->sparity = sparity;
2520         scrub_parity_get(sparity);
2521
2522         for (index = 0; len > 0; index++) {
2523                 struct scrub_page *spage;
2524                 u64 l = min_t(u64, len, PAGE_SIZE);
2525
2526                 spage = kzalloc(sizeof(*spage), GFP_NOFS);
2527                 if (!spage) {
2528 leave_nomem:
2529                         spin_lock(&sctx->stat_lock);
2530                         sctx->stat.malloc_errors++;
2531                         spin_unlock(&sctx->stat_lock);
2532                         scrub_block_put(sblock);
2533                         return -ENOMEM;
2534                 }
2535                 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2536                 /* For scrub block */
2537                 scrub_page_get(spage);
2538                 sblock->pagev[index] = spage;
2539                 /* For scrub parity */
2540                 scrub_page_get(spage);
2541                 list_add_tail(&spage->list, &sparity->spages);
2542                 spage->sblock = sblock;
2543                 spage->dev = dev;
2544                 spage->flags = flags;
2545                 spage->generation = gen;
2546                 spage->logical = logical;
2547                 spage->physical = physical;
2548                 spage->mirror_num = mirror_num;
2549                 if (csum) {
2550                         spage->have_csum = 1;
2551                         memcpy(spage->csum, csum, sctx->csum_size);
2552                 } else {
2553                         spage->have_csum = 0;
2554                 }
2555                 sblock->page_count++;
2556                 spage->page = alloc_page(GFP_NOFS);
2557                 if (!spage->page)
2558                         goto leave_nomem;
2559                 len -= l;
2560                 logical += l;
2561                 physical += l;
2562         }
2563
2564         WARN_ON(sblock->page_count == 0);
2565         for (index = 0; index < sblock->page_count; index++) {
2566                 struct scrub_page *spage = sblock->pagev[index];
2567                 int ret;
2568
2569                 ret = scrub_add_page_to_rd_bio(sctx, spage);
2570                 if (ret) {
2571                         scrub_block_put(sblock);
2572                         return ret;
2573                 }
2574         }
2575
2576         /* last one frees, either here or in bio completion for last page */
2577         scrub_block_put(sblock);
2578         return 0;
2579 }
2580
2581 static int scrub_extent_for_parity(struct scrub_parity *sparity,
2582                                    u64 logical, u64 len,
2583                                    u64 physical, struct btrfs_device *dev,
2584                                    u64 flags, u64 gen, int mirror_num)
2585 {
2586         struct scrub_ctx *sctx = sparity->sctx;
2587         int ret;
2588         u8 csum[BTRFS_CSUM_SIZE];
2589         u32 blocksize;
2590
2591         if (flags & BTRFS_EXTENT_FLAG_DATA) {
2592                 blocksize = sctx->sectorsize;
2593         } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2594                 blocksize = sctx->nodesize;
2595         } else {
2596                 blocksize = sctx->sectorsize;
2597                 WARN_ON(1);
2598         }
2599
2600         while (len) {
2601                 u64 l = min_t(u64, len, blocksize);
2602                 int have_csum = 0;
2603
2604                 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2605                         /* push csums to sbio */
2606                         have_csum = scrub_find_csum(sctx, logical, l, csum);
2607                         if (have_csum == 0)
2608                                 goto skip;
2609                 }
2610                 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2611                                              flags, gen, mirror_num,
2612                                              have_csum ? csum : NULL);
2613                 if (ret)
2614                         return ret;
2615 skip:
2616                 len -= l;
2617                 logical += l;
2618                 physical += l;
2619         }
2620         return 0;
2621 }
2622
2623 /*
2624  * Given a physical address, this will calculate it's
2625  * logical offset. if this is a parity stripe, it will return
2626  * the most left data stripe's logical offset.
2627  *
2628  * return 0 if it is a data stripe, 1 means parity stripe.
2629  */
2630 static int get_raid56_logic_offset(u64 physical, int num,
2631                                    struct map_lookup *map, u64 *offset,
2632                                    u64 *stripe_start)
2633 {
2634         int i;
2635         int j = 0;
2636         u64 stripe_nr;
2637         u64 last_offset;
2638         int stripe_index;
2639         int rot;
2640
2641         last_offset = (physical - map->stripes[num].physical) *
2642                       nr_data_stripes(map);
2643         if (stripe_start)
2644                 *stripe_start = last_offset;
2645
2646         *offset = last_offset;
2647         for (i = 0; i < nr_data_stripes(map); i++) {
2648                 *offset = last_offset + i * map->stripe_len;
2649
2650                 stripe_nr = *offset;
2651                 do_div(stripe_nr, map->stripe_len);
2652                 do_div(stripe_nr, nr_data_stripes(map));
2653
2654                 /* Work out the disk rotation on this stripe-set */
2655                 rot = do_div(stripe_nr, map->num_stripes);
2656                 /* calculate which stripe this data locates */
2657                 rot += i;
2658                 stripe_index = rot % map->num_stripes;
2659                 if (stripe_index == num)
2660                         return 0;
2661                 if (stripe_index < num)
2662                         j++;
2663         }
2664         *offset = last_offset + j * map->stripe_len;
2665         return 1;
2666 }
2667
2668 static void scrub_free_parity(struct scrub_parity *sparity)
2669 {
2670         struct scrub_ctx *sctx = sparity->sctx;
2671         struct scrub_page *curr, *next;
2672         int nbits;
2673
2674         nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2675         if (nbits) {
2676                 spin_lock(&sctx->stat_lock);
2677                 sctx->stat.read_errors += nbits;
2678                 sctx->stat.uncorrectable_errors += nbits;
2679                 spin_unlock(&sctx->stat_lock);
2680         }
2681
2682         list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2683                 list_del_init(&curr->list);
2684                 scrub_page_put(curr);
2685         }
2686
2687         kfree(sparity);
2688 }
2689
2690 static void scrub_parity_bio_endio(struct bio *bio, int error)
2691 {
2692         struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
2693         struct scrub_ctx *sctx = sparity->sctx;
2694
2695         if (error)
2696                 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2697                           sparity->nsectors);
2698
2699         scrub_free_parity(sparity);
2700         scrub_pending_bio_dec(sctx);
2701         bio_put(bio);
2702 }
2703
2704 static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2705 {
2706         struct scrub_ctx *sctx = sparity->sctx;
2707         struct bio *bio;
2708         struct btrfs_raid_bio *rbio;
2709         struct scrub_page *spage;
2710         struct btrfs_bio *bbio = NULL;
2711         u64 length;
2712         int ret;
2713
2714         if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
2715                            sparity->nsectors))
2716                 goto out;
2717
2718         length = sparity->logic_end - sparity->logic_start + 1;
2719         ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
2720                                sparity->logic_start,
2721                                &length, &bbio, 0, 1);
2722         if (ret || !bbio || !bbio->raid_map)
2723                 goto bbio_out;
2724
2725         bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2726         if (!bio)
2727                 goto bbio_out;
2728
2729         bio->bi_iter.bi_sector = sparity->logic_start >> 9;
2730         bio->bi_private = sparity;
2731         bio->bi_end_io = scrub_parity_bio_endio;
2732
2733         rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio,
2734                                               length, sparity->scrub_dev,
2735                                               sparity->dbitmap,
2736                                               sparity->nsectors);
2737         if (!rbio)
2738                 goto rbio_out;
2739
2740         list_for_each_entry(spage, &sparity->spages, list)
2741                 raid56_parity_add_scrub_pages(rbio, spage->page,
2742                                               spage->logical);
2743
2744         scrub_pending_bio_inc(sctx);
2745         raid56_parity_submit_scrub_rbio(rbio);
2746         return;
2747
2748 rbio_out:
2749         bio_put(bio);
2750 bbio_out:
2751         kfree(bbio);
2752         bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2753                   sparity->nsectors);
2754         spin_lock(&sctx->stat_lock);
2755         sctx->stat.malloc_errors++;
2756         spin_unlock(&sctx->stat_lock);
2757 out:
2758         scrub_free_parity(sparity);
2759 }
2760
2761 static inline int scrub_calc_parity_bitmap_len(int nsectors)
2762 {
2763         return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * (BITS_PER_LONG / 8);
2764 }
2765
2766 static void scrub_parity_get(struct scrub_parity *sparity)
2767 {
2768         atomic_inc(&sparity->ref_count);
2769 }
2770
2771 static void scrub_parity_put(struct scrub_parity *sparity)
2772 {
2773         if (!atomic_dec_and_test(&sparity->ref_count))
2774                 return;
2775
2776         scrub_parity_check_and_repair(sparity);
2777 }
2778
2779 static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
2780                                                   struct map_lookup *map,
2781                                                   struct btrfs_device *sdev,
2782                                                   struct btrfs_path *path,
2783                                                   u64 logic_start,
2784                                                   u64 logic_end)
2785 {
2786         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2787         struct btrfs_root *root = fs_info->extent_root;
2788         struct btrfs_root *csum_root = fs_info->csum_root;
2789         struct btrfs_extent_item *extent;
2790         u64 flags;
2791         int ret;
2792         int slot;
2793         struct extent_buffer *l;
2794         struct btrfs_key key;
2795         u64 generation;
2796         u64 extent_logical;
2797         u64 extent_physical;
2798         u64 extent_len;
2799         struct btrfs_device *extent_dev;
2800         struct scrub_parity *sparity;
2801         int nsectors;
2802         int bitmap_len;
2803         int extent_mirror_num;
2804         int stop_loop = 0;
2805
2806         nsectors = map->stripe_len / root->sectorsize;
2807         bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
2808         sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
2809                           GFP_NOFS);
2810         if (!sparity) {
2811                 spin_lock(&sctx->stat_lock);
2812                 sctx->stat.malloc_errors++;
2813                 spin_unlock(&sctx->stat_lock);
2814                 return -ENOMEM;
2815         }
2816
2817         sparity->stripe_len = map->stripe_len;
2818         sparity->nsectors = nsectors;
2819         sparity->sctx = sctx;
2820         sparity->scrub_dev = sdev;
2821         sparity->logic_start = logic_start;
2822         sparity->logic_end = logic_end;
2823         atomic_set(&sparity->ref_count, 1);
2824         INIT_LIST_HEAD(&sparity->spages);
2825         sparity->dbitmap = sparity->bitmap;
2826         sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
2827
2828         ret = 0;
2829         while (logic_start < logic_end) {
2830                 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2831                         key.type = BTRFS_METADATA_ITEM_KEY;
2832                 else
2833                         key.type = BTRFS_EXTENT_ITEM_KEY;
2834                 key.objectid = logic_start;
2835                 key.offset = (u64)-1;
2836
2837                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2838                 if (ret < 0)
2839                         goto out;
2840
2841                 if (ret > 0) {
2842                         ret = btrfs_previous_extent_item(root, path, 0);
2843                         if (ret < 0)
2844                                 goto out;
2845                         if (ret > 0) {
2846                                 btrfs_release_path(path);
2847                                 ret = btrfs_search_slot(NULL, root, &key,
2848                                                         path, 0, 0);
2849                                 if (ret < 0)
2850                                         goto out;
2851                         }
2852                 }
2853
2854                 stop_loop = 0;
2855                 while (1) {
2856                         u64 bytes;
2857
2858                         l = path->nodes[0];
2859                         slot = path->slots[0];
2860                         if (slot >= btrfs_header_nritems(l)) {
2861                                 ret = btrfs_next_leaf(root, path);
2862                                 if (ret == 0)
2863                                         continue;
2864                                 if (ret < 0)
2865                                         goto out;
2866
2867                                 stop_loop = 1;
2868                                 break;
2869                         }
2870                         btrfs_item_key_to_cpu(l, &key, slot);
2871
2872                         if (key.type == BTRFS_METADATA_ITEM_KEY)
2873                                 bytes = root->nodesize;
2874                         else
2875                                 bytes = key.offset;
2876
2877                         if (key.objectid + bytes <= logic_start)
2878                                 goto next;
2879
2880                         if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2881                             key.type != BTRFS_METADATA_ITEM_KEY)
2882                                 goto next;
2883
2884                         if (key.objectid > logic_end) {
2885                                 stop_loop = 1;
2886                                 break;
2887                         }
2888
2889                         while (key.objectid >= logic_start + map->stripe_len)
2890                                 logic_start += map->stripe_len;
2891
2892                         extent = btrfs_item_ptr(l, slot,
2893                                                 struct btrfs_extent_item);
2894                         flags = btrfs_extent_flags(l, extent);
2895                         generation = btrfs_extent_generation(l, extent);
2896
2897                         if (key.objectid < logic_start &&
2898                             (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
2899                                 btrfs_err(fs_info,
2900                                           "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2901                                            key.objectid, logic_start);
2902                                 goto next;
2903                         }
2904 again:
2905                         extent_logical = key.objectid;
2906                         extent_len = bytes;
2907
2908                         if (extent_logical < logic_start) {
2909                                 extent_len -= logic_start - extent_logical;
2910                                 extent_logical = logic_start;
2911                         }
2912
2913                         if (extent_logical + extent_len >
2914                             logic_start + map->stripe_len)
2915                                 extent_len = logic_start + map->stripe_len -
2916                                              extent_logical;
2917
2918                         scrub_parity_mark_sectors_data(sparity, extent_logical,
2919                                                        extent_len);
2920
2921                         scrub_remap_extent(fs_info, extent_logical,
2922                                            extent_len, &extent_physical,
2923                                            &extent_dev,
2924                                            &extent_mirror_num);
2925
2926                         ret = btrfs_lookup_csums_range(csum_root,
2927                                                 extent_logical,
2928                                                 extent_logical + extent_len - 1,
2929                                                 &sctx->csum_list, 1);
2930                         if (ret)
2931                                 goto out;
2932
2933                         ret = scrub_extent_for_parity(sparity, extent_logical,
2934                                                       extent_len,
2935                                                       extent_physical,
2936                                                       extent_dev, flags,
2937                                                       generation,
2938                                                       extent_mirror_num);
2939                         if (ret)
2940                                 goto out;
2941
2942                         scrub_free_csums(sctx);
2943                         if (extent_logical + extent_len <
2944                             key.objectid + bytes) {
2945                                 logic_start += map->stripe_len;
2946
2947                                 if (logic_start >= logic_end) {
2948                                         stop_loop = 1;
2949                                         break;
2950                                 }
2951
2952                                 if (logic_start < key.objectid + bytes) {
2953                                         cond_resched();
2954                                         goto again;
2955                                 }
2956                         }
2957 next:
2958                         path->slots[0]++;
2959                 }
2960
2961                 btrfs_release_path(path);
2962
2963                 if (stop_loop)
2964                         break;
2965
2966                 logic_start += map->stripe_len;
2967         }
2968 out:
2969         if (ret < 0)
2970                 scrub_parity_mark_sectors_error(sparity, logic_start,
2971                                                 logic_end - logic_start + 1);
2972         scrub_parity_put(sparity);
2973         scrub_submit(sctx);
2974         mutex_lock(&sctx->wr_ctx.wr_lock);
2975         scrub_wr_submit(sctx);
2976         mutex_unlock(&sctx->wr_ctx.wr_lock);
2977
2978         btrfs_release_path(path);
2979         return ret < 0 ? ret : 0;
2980 }
2981
2982 static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2983                                            struct map_lookup *map,
2984                                            struct btrfs_device *scrub_dev,
2985                                            int num, u64 base, u64 length,
2986                                            int is_dev_replace)
2987 {
2988         struct btrfs_path *path, *ppath;
2989         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2990         struct btrfs_root *root = fs_info->extent_root;
2991         struct btrfs_root *csum_root = fs_info->csum_root;
2992         struct btrfs_extent_item *extent;
2993         struct blk_plug plug;
2994         u64 flags;
2995         int ret;
2996         int slot;
2997         u64 nstripes;
2998         struct extent_buffer *l;
2999         struct btrfs_key key;
3000         u64 physical;
3001         u64 logical;
3002         u64 logic_end;
3003         u64 physical_end;
3004         u64 generation;
3005         int mirror_num;
3006         struct reada_control *reada1;
3007         struct reada_control *reada2;
3008         struct btrfs_key key_start;
3009         struct btrfs_key key_end;
3010         u64 increment = map->stripe_len;
3011         u64 offset;
3012         u64 extent_logical;
3013         u64 extent_physical;
3014         u64 extent_len;
3015         u64 stripe_logical;
3016         u64 stripe_end;
3017         struct btrfs_device *extent_dev;
3018         int extent_mirror_num;
3019         int stop_loop = 0;
3020
3021         nstripes = length;
3022         physical = map->stripes[num].physical;
3023         offset = 0;
3024         do_div(nstripes, map->stripe_len);
3025         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
3026                 offset = map->stripe_len * num;
3027                 increment = map->stripe_len * map->num_stripes;
3028                 mirror_num = 1;
3029         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
3030                 int factor = map->num_stripes / map->sub_stripes;
3031                 offset = map->stripe_len * (num / map->sub_stripes);
3032                 increment = map->stripe_len * factor;
3033                 mirror_num = num % map->sub_stripes + 1;
3034         } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) {
3035                 increment = map->stripe_len;
3036                 mirror_num = num % map->num_stripes + 1;
3037         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
3038                 increment = map->stripe_len;
3039                 mirror_num = num % map->num_stripes + 1;
3040         } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
3041                                 BTRFS_BLOCK_GROUP_RAID6)) {
3042                 get_raid56_logic_offset(physical, num, map, &offset, NULL);
3043                 increment = map->stripe_len * nr_data_stripes(map);
3044                 mirror_num = 1;
3045         } else {
3046                 increment = map->stripe_len;
3047                 mirror_num = 1;
3048         }
3049
3050         path = btrfs_alloc_path();
3051         if (!path)
3052                 return -ENOMEM;
3053
3054         ppath = btrfs_alloc_path();
3055         if (!ppath) {
3056                 btrfs_free_path(ppath);
3057                 return -ENOMEM;
3058         }
3059
3060         /*
3061          * work on commit root. The related disk blocks are static as
3062          * long as COW is applied. This means, it is save to rewrite
3063          * them to repair disk errors without any race conditions
3064          */
3065         path->search_commit_root = 1;
3066         path->skip_locking = 1;
3067
3068         /*
3069          * trigger the readahead for extent tree csum tree and wait for
3070          * completion. During readahead, the scrub is officially paused
3071          * to not hold off transaction commits
3072          */
3073         logical = base + offset;
3074         physical_end = physical + nstripes * map->stripe_len;
3075         if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
3076                          BTRFS_BLOCK_GROUP_RAID6)) {
3077                 get_raid56_logic_offset(physical_end, num,
3078                                         map, &logic_end, NULL);
3079                 logic_end += base;
3080         } else {
3081                 logic_end = logical + increment * nstripes;
3082         }
3083         wait_event(sctx->list_wait,
3084                    atomic_read(&sctx->bios_in_flight) == 0);
3085         scrub_blocked_if_needed(fs_info);
3086
3087         /* FIXME it might be better to start readahead at commit root */
3088         key_start.objectid = logical;
3089         key_start.type = BTRFS_EXTENT_ITEM_KEY;
3090         key_start.offset = (u64)0;
3091         key_end.objectid = logic_end;
3092         key_end.type = BTRFS_METADATA_ITEM_KEY;
3093         key_end.offset = (u64)-1;
3094         reada1 = btrfs_reada_add(root, &key_start, &key_end);
3095
3096         key_start.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3097         key_start.type = BTRFS_EXTENT_CSUM_KEY;
3098         key_start.offset = logical;
3099         key_end.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
3100         key_end.type = BTRFS_EXTENT_CSUM_KEY;
3101         key_end.offset = logic_end;
3102         reada2 = btrfs_reada_add(csum_root, &key_start, &key_end);
3103
3104         if (!IS_ERR(reada1))
3105                 btrfs_reada_wait(reada1);
3106         if (!IS_ERR(reada2))
3107                 btrfs_reada_wait(reada2);
3108
3109
3110         /*
3111          * collect all data csums for the stripe to avoid seeking during
3112          * the scrub. This might currently (crc32) end up to be about 1MB
3113          */
3114         blk_start_plug(&plug);
3115
3116         /*
3117          * now find all extents for each stripe and scrub them
3118          */
3119         ret = 0;
3120         while (physical < physical_end) {
3121                 /* for raid56, we skip parity stripe */
3122                 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
3123                                 BTRFS_BLOCK_GROUP_RAID6)) {
3124                         ret = get_raid56_logic_offset(physical, num,
3125                                         map, &logical, &stripe_logical);
3126                         logical += base;
3127                         if (ret) {
3128                                 stripe_logical += base;
3129                                 stripe_end = stripe_logical + increment - 1;
3130                                 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3131                                                 ppath, stripe_logical,
3132                                                 stripe_end);
3133                                 if (ret)
3134                                         goto out;
3135                                 goto skip;
3136                         }
3137                 }
3138                 /*
3139                  * canceled?
3140                  */
3141                 if (atomic_read(&fs_info->scrub_cancel_req) ||
3142                     atomic_read(&sctx->cancel_req)) {
3143                         ret = -ECANCELED;
3144                         goto out;
3145                 }
3146                 /*
3147                  * check to see if we have to pause
3148                  */
3149                 if (atomic_read(&fs_info->scrub_pause_req)) {
3150                         /* push queued extents */
3151                         atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3152                         scrub_submit(sctx);
3153                         mutex_lock(&sctx->wr_ctx.wr_lock);
3154                         scrub_wr_submit(sctx);
3155                         mutex_unlock(&sctx->wr_ctx.wr_lock);
3156                         wait_event(sctx->list_wait,
3157                                    atomic_read(&sctx->bios_in_flight) == 0);
3158                         atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3159                         scrub_blocked_if_needed(fs_info);
3160                 }
3161
3162                 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
3163                         key.type = BTRFS_METADATA_ITEM_KEY;
3164                 else
3165                         key.type = BTRFS_EXTENT_ITEM_KEY;
3166                 key.objectid = logical;
3167                 key.offset = (u64)-1;
3168
3169                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3170                 if (ret < 0)
3171                         goto out;
3172
3173                 if (ret > 0) {
3174                         ret = btrfs_previous_extent_item(root, path, 0);
3175                         if (ret < 0)
3176                                 goto out;
3177                         if (ret > 0) {
3178                                 /* there's no smaller item, so stick with the
3179                                  * larger one */
3180                                 btrfs_release_path(path);
3181                                 ret = btrfs_search_slot(NULL, root, &key,
3182                                                         path, 0, 0);
3183                                 if (ret < 0)
3184                                         goto out;
3185                         }
3186                 }
3187
3188                 stop_loop = 0;
3189                 while (1) {
3190                         u64 bytes;
3191
3192                         l = path->nodes[0];
3193                         slot = path->slots[0];
3194                         if (slot >= btrfs_header_nritems(l)) {
3195                                 ret = btrfs_next_leaf(root, path);
3196                                 if (ret == 0)
3197                                         continue;
3198                                 if (ret < 0)
3199                                         goto out;
3200
3201                                 stop_loop = 1;
3202                                 break;
3203                         }
3204                         btrfs_item_key_to_cpu(l, &key, slot);
3205
3206                         if (key.type == BTRFS_METADATA_ITEM_KEY)
3207                                 bytes = root->nodesize;
3208                         else
3209                                 bytes = key.offset;
3210
3211                         if (key.objectid + bytes <= logical)
3212                                 goto next;
3213
3214                         if (key.type != BTRFS_EXTENT_ITEM_KEY &&
3215                             key.type != BTRFS_METADATA_ITEM_KEY)
3216                                 goto next;
3217
3218                         if (key.objectid >= logical + map->stripe_len) {
3219                                 /* out of this device extent */
3220                                 if (key.objectid >= logic_end)
3221                                         stop_loop = 1;
3222                                 break;
3223                         }
3224
3225                         extent = btrfs_item_ptr(l, slot,
3226                                                 struct btrfs_extent_item);
3227                         flags = btrfs_extent_flags(l, extent);
3228                         generation = btrfs_extent_generation(l, extent);
3229
3230                         if (key.objectid < logical &&
3231                             (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
3232                                 btrfs_err(fs_info,
3233                                            "scrub: tree block %llu spanning "
3234                                            "stripes, ignored. logical=%llu",
3235                                        key.objectid, logical);
3236                                 goto next;
3237                         }
3238
3239 again:
3240                         extent_logical = key.objectid;
3241                         extent_len = bytes;
3242
3243                         /*
3244                          * trim extent to this stripe
3245                          */
3246                         if (extent_logical < logical) {
3247                                 extent_len -= logical - extent_logical;
3248                                 extent_logical = logical;
3249                         }
3250                         if (extent_logical + extent_len >
3251                             logical + map->stripe_len) {
3252                                 extent_len = logical + map->stripe_len -
3253                                              extent_logical;
3254                         }
3255
3256                         extent_physical = extent_logical - logical + physical;
3257                         extent_dev = scrub_dev;
3258                         extent_mirror_num = mirror_num;
3259                         if (is_dev_replace)
3260                                 scrub_remap_extent(fs_info, extent_logical,
3261                                                    extent_len, &extent_physical,
3262                                                    &extent_dev,
3263                                                    &extent_mirror_num);
3264
3265                         ret = btrfs_lookup_csums_range(csum_root, logical,
3266                                                 logical + map->stripe_len - 1,
3267                                                 &sctx->csum_list, 1);
3268                         if (ret)
3269                                 goto out;
3270
3271                         ret = scrub_extent(sctx, extent_logical, extent_len,
3272                                            extent_physical, extent_dev, flags,
3273                                            generation, extent_mirror_num,
3274                                            extent_logical - logical + physical);
3275                         if (ret)
3276                                 goto out;
3277
3278                         scrub_free_csums(sctx);
3279                         if (extent_logical + extent_len <
3280                             key.objectid + bytes) {
3281                                 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
3282                                         BTRFS_BLOCK_GROUP_RAID6)) {
3283                                         /*
3284                                          * loop until we find next data stripe
3285                                          * or we have finished all stripes.
3286                                          */
3287 loop:
3288                                         physical += map->stripe_len;
3289                                         ret = get_raid56_logic_offset(physical,
3290                                                         num, map, &logical,
3291                                                         &stripe_logical);
3292                                         logical += base;
3293
3294                                         if (ret && physical < physical_end) {
3295                                                 stripe_logical += base;
3296                                                 stripe_end = stripe_logical +
3297                                                                 increment - 1;
3298                                                 ret = scrub_raid56_parity(sctx,
3299                                                         map, scrub_dev, ppath,
3300                                                         stripe_logical,
3301                                                         stripe_end);
3302                                                 if (ret)
3303                                                         goto out;
3304                                                 goto loop;
3305                                         }
3306                                 } else {
3307                                         physical += map->stripe_len;
3308                                         logical += increment;
3309                                 }
3310                                 if (logical < key.objectid + bytes) {
3311                                         cond_resched();
3312                                         goto again;
3313                                 }
3314
3315                                 if (physical >= physical_end) {
3316                                         stop_loop = 1;
3317                                         break;
3318                                 }
3319                         }
3320 next:
3321                         path->slots[0]++;
3322                 }
3323                 btrfs_release_path(path);
3324 skip:
3325                 logical += increment;
3326                 physical += map->stripe_len;
3327                 spin_lock(&sctx->stat_lock);
3328                 if (stop_loop)
3329                         sctx->stat.last_physical = map->stripes[num].physical +
3330                                                    length;
3331                 else
3332                         sctx->stat.last_physical = physical;
3333                 spin_unlock(&sctx->stat_lock);
3334                 if (stop_loop)
3335                         break;
3336         }
3337 out:
3338         /* push queued extents */
3339         scrub_submit(sctx);
3340         mutex_lock(&sctx->wr_ctx.wr_lock);
3341         scrub_wr_submit(sctx);
3342         mutex_unlock(&sctx->wr_ctx.wr_lock);
3343
3344         blk_finish_plug(&plug);
3345         btrfs_free_path(path);
3346         btrfs_free_path(ppath);
3347         return ret < 0 ? ret : 0;
3348 }
3349
3350 static noinline_for_stack int scrub_chunk(struct scrub_ctx *sctx,
3351                                           struct btrfs_device *scrub_dev,
3352                                           u64 chunk_tree, u64 chunk_objectid,
3353                                           u64 chunk_offset, u64 length,
3354                                           u64 dev_offset, int is_dev_replace)
3355 {
3356         struct btrfs_mapping_tree *map_tree =
3357                 &sctx->dev_root->fs_info->mapping_tree;
3358         struct map_lookup *map;
3359         struct extent_map *em;
3360         int i;
3361         int ret = 0;
3362
3363         read_lock(&map_tree->map_tree.lock);
3364         em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1);
3365         read_unlock(&map_tree->map_tree.lock);
3366
3367         if (!em)
3368                 return -EINVAL;
3369
3370         map = (struct map_lookup *)em->bdev;
3371         if (em->start != chunk_offset)
3372                 goto out;
3373
3374         if (em->len < length)
3375                 goto out;
3376
3377         for (i = 0; i < map->num_stripes; ++i) {
3378                 if (map->stripes[i].dev->bdev == scrub_dev->bdev &&
3379                     map->stripes[i].physical == dev_offset) {
3380                         ret = scrub_stripe(sctx, map, scrub_dev, i,
3381                                            chunk_offset, length,
3382                                            is_dev_replace);
3383                         if (ret)
3384                                 goto out;
3385                 }
3386         }
3387 out:
3388         free_extent_map(em);
3389
3390         return ret;
3391 }
3392
3393 static noinline_for_stack
3394 int scrub_enumerate_chunks(struct scrub_ctx *sctx,
3395                            struct btrfs_device *scrub_dev, u64 start, u64 end,
3396                            int is_dev_replace)
3397 {
3398         struct btrfs_dev_extent *dev_extent = NULL;
3399         struct btrfs_path *path;
3400         struct btrfs_root *root = sctx->dev_root;
3401         struct btrfs_fs_info *fs_info = root->fs_info;
3402         u64 length;
3403         u64 chunk_tree;
3404         u64 chunk_objectid;
3405         u64 chunk_offset;
3406         int ret;
3407         int slot;
3408         struct extent_buffer *l;
3409         struct btrfs_key key;
3410         struct btrfs_key found_key;
3411         struct btrfs_block_group_cache *cache;
3412         struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
3413
3414         path = btrfs_alloc_path();
3415         if (!path)
3416                 return -ENOMEM;
3417
3418         path->reada = 2;
3419         path->search_commit_root = 1;
3420         path->skip_locking = 1;
3421
3422         key.objectid = scrub_dev->devid;
3423         key.offset = 0ull;
3424         key.type = BTRFS_DEV_EXTENT_KEY;
3425
3426         while (1) {
3427                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3428                 if (ret < 0)
3429                         break;
3430                 if (ret > 0) {
3431                         if (path->slots[0] >=
3432                             btrfs_header_nritems(path->nodes[0])) {
3433                                 ret = btrfs_next_leaf(root, path);
3434                                 if (ret)
3435                                         break;
3436                         }
3437                 }
3438
3439                 l = path->nodes[0];
3440                 slot = path->slots[0];
3441
3442                 btrfs_item_key_to_cpu(l, &found_key, slot);
3443
3444                 if (found_key.objectid != scrub_dev->devid)
3445                         break;
3446
3447                 if (found_key.type != BTRFS_DEV_EXTENT_KEY)
3448                         break;
3449
3450                 if (found_key.offset >= end)
3451                         break;
3452
3453                 if (found_key.offset < key.offset)
3454                         break;
3455
3456                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
3457                 length = btrfs_dev_extent_length(l, dev_extent);
3458
3459                 if (found_key.offset + length <= start)
3460                         goto skip;
3461
3462                 chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
3463                 chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
3464                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
3465
3466                 /*
3467                  * get a reference on the corresponding block group to prevent
3468                  * the chunk from going away while we scrub it
3469                  */
3470                 cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3471
3472                 /* some chunks are removed but not committed to disk yet,
3473                  * continue scrubbing */
3474                 if (!cache)
3475                         goto skip;
3476
3477                 dev_replace->cursor_right = found_key.offset + length;
3478                 dev_replace->cursor_left = found_key.offset;
3479                 dev_replace->item_needs_writeback = 1;
3480                 ret = scrub_chunk(sctx, scrub_dev, chunk_tree, chunk_objectid,
3481                                   chunk_offset, length, found_key.offset,
3482                                   is_dev_replace);
3483
3484                 /*
3485                  * flush, submit all pending read and write bios, afterwards
3486                  * wait for them.
3487                  * Note that in the dev replace case, a read request causes
3488                  * write requests that are submitted in the read completion
3489                  * worker. Therefore in the current situation, it is required
3490                  * that all write requests are flushed, so that all read and
3491                  * write requests are really completed when bios_in_flight
3492                  * changes to 0.
3493                  */
3494                 atomic_set(&sctx->wr_ctx.flush_all_writes, 1);
3495                 scrub_submit(sctx);
3496                 mutex_lock(&sctx->wr_ctx.wr_lock);
3497                 scrub_wr_submit(sctx);
3498                 mutex_unlock(&sctx->wr_ctx.wr_lock);
3499
3500                 wait_event(sctx->list_wait,
3501                            atomic_read(&sctx->bios_in_flight) == 0);
3502                 atomic_inc(&fs_info->scrubs_paused);
3503                 wake_up(&fs_info->scrub_pause_wait);
3504
3505                 /*
3506                  * must be called before we decrease @scrub_paused.
3507                  * make sure we don't block transaction commit while
3508                  * we are waiting pending workers finished.
3509                  */
3510                 wait_event(sctx->list_wait,
3511                            atomic_read(&sctx->workers_pending) == 0);
3512                 atomic_set(&sctx->wr_ctx.flush_all_writes, 0);
3513
3514                 mutex_lock(&fs_info->scrub_lock);
3515                 __scrub_blocked_if_needed(fs_info);
3516                 atomic_dec(&fs_info->scrubs_paused);
3517                 mutex_unlock(&fs_info->scrub_lock);
3518                 wake_up(&fs_info->scrub_pause_wait);
3519
3520                 btrfs_put_block_group(cache);
3521                 if (ret)
3522                         break;
3523                 if (is_dev_replace &&
3524                     atomic64_read(&dev_replace->num_write_errors) > 0) {
3525                         ret = -EIO;
3526                         break;
3527                 }
3528                 if (sctx->stat.malloc_errors > 0) {
3529                         ret = -ENOMEM;
3530                         break;
3531                 }
3532
3533                 dev_replace->cursor_left = dev_replace->cursor_right;
3534                 dev_replace->item_needs_writeback = 1;
3535 skip:
3536                 key.offset = found_key.offset + length;
3537                 btrfs_release_path(path);
3538         }
3539
3540         btrfs_free_path(path);
3541
3542         /*
3543          * ret can still be 1 from search_slot or next_leaf,
3544          * that's not an error
3545          */
3546         return ret < 0 ? ret : 0;
3547 }
3548
3549 static noinline_for_stack int scrub_supers(struct scrub_ctx *sctx,
3550                                            struct btrfs_device *scrub_dev)
3551 {
3552         int     i;
3553         u64     bytenr;
3554         u64     gen;
3555         int     ret;
3556         struct btrfs_root *root = sctx->dev_root;
3557
3558         if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
3559                 return -EIO;
3560
3561         /* Seed devices of a new filesystem has their own generation. */
3562         if (scrub_dev->fs_devices != root->fs_info->fs_devices)
3563                 gen = scrub_dev->generation;
3564         else
3565                 gen = root->fs_info->last_trans_committed;
3566
3567         for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
3568                 bytenr = btrfs_sb_offset(i);
3569                 if (bytenr + BTRFS_SUPER_INFO_SIZE >
3570                     scrub_dev->commit_total_bytes)
3571                         break;
3572
3573                 ret = scrub_pages(sctx, bytenr, BTRFS_SUPER_INFO_SIZE, bytenr,
3574                                   scrub_dev, BTRFS_EXTENT_FLAG_SUPER, gen, i,
3575                                   NULL, 1, bytenr);
3576                 if (ret)
3577                         return ret;
3578         }
3579         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3580
3581         return 0;
3582 }
3583
3584 /*
3585  * get a reference count on fs_info->scrub_workers. start worker if necessary
3586  */
3587 static noinline_for_stack int scrub_workers_get(struct btrfs_fs_info *fs_info,
3588                                                 int is_dev_replace)
3589 {
3590         int ret = 0;
3591         int flags = WQ_FREEZABLE | WQ_UNBOUND;
3592         int max_active = fs_info->thread_pool_size;
3593
3594         if (fs_info->scrub_workers_refcnt == 0) {
3595                 if (is_dev_replace)
3596                         fs_info->scrub_workers =
3597                                 btrfs_alloc_workqueue("btrfs-scrub", flags,
3598                                                       1, 4);
3599                 else
3600                         fs_info->scrub_workers =
3601                                 btrfs_alloc_workqueue("btrfs-scrub", flags,
3602                                                       max_active, 4);
3603                 if (!fs_info->scrub_workers) {
3604                         ret = -ENOMEM;
3605                         goto out;
3606                 }
3607                 fs_info->scrub_wr_completion_workers =
3608                         btrfs_alloc_workqueue("btrfs-scrubwrc", flags,
3609                                               max_active, 2);
3610                 if (!fs_info->scrub_wr_completion_workers) {
3611                         ret = -ENOMEM;
3612                         goto out;
3613                 }
3614                 fs_info->scrub_nocow_workers =
3615                         btrfs_alloc_workqueue("btrfs-scrubnc", flags, 1, 0);
3616                 if (!fs_info->scrub_nocow_workers) {
3617                         ret = -ENOMEM;
3618                         goto out;
3619                 }
3620         }
3621         ++fs_info->scrub_workers_refcnt;
3622 out:
3623         return ret;
3624 }
3625
3626 static noinline_for_stack void scrub_workers_put(struct btrfs_fs_info *fs_info)
3627 {
3628         if (--fs_info->scrub_workers_refcnt == 0) {
3629                 btrfs_destroy_workqueue(fs_info->scrub_workers);
3630                 btrfs_destroy_workqueue(fs_info->scrub_wr_completion_workers);
3631                 btrfs_destroy_workqueue(fs_info->scrub_nocow_workers);
3632         }
3633         WARN_ON(fs_info->scrub_workers_refcnt < 0);
3634 }
3635
3636 int btrfs_scrub_dev(struct btrfs_fs_info *fs_info, u64 devid, u64 start,
3637                     u64 end, struct btrfs_scrub_progress *progress,
3638                     int readonly, int is_dev_replace)
3639 {
3640         struct scrub_ctx *sctx;
3641         int ret;
3642         struct btrfs_device *dev;
3643         struct rcu_string *name;
3644
3645         if (btrfs_fs_closing(fs_info))
3646                 return -EINVAL;
3647
3648         if (fs_info->chunk_root->nodesize > BTRFS_STRIPE_LEN) {
3649                 /*
3650                  * in this case scrub is unable to calculate the checksum
3651                  * the way scrub is implemented. Do not handle this
3652                  * situation at all because it won't ever happen.
3653                  */
3654                 btrfs_err(fs_info,
3655                            "scrub: size assumption nodesize <= BTRFS_STRIPE_LEN (%d <= %d) fails",
3656                        fs_info->chunk_root->nodesize, BTRFS_STRIPE_LEN);
3657                 return -EINVAL;
3658         }
3659
3660         if (fs_info->chunk_root->sectorsize != PAGE_SIZE) {
3661                 /* not supported for data w/o checksums */
3662                 btrfs_err(fs_info,
3663                            "scrub: size assumption sectorsize != PAGE_SIZE "
3664                            "(%d != %lu) fails",
3665                        fs_info->chunk_root->sectorsize, PAGE_SIZE);
3666                 return -EINVAL;
3667         }
3668
3669         if (fs_info->chunk_root->nodesize >
3670             PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK ||
3671             fs_info->chunk_root->sectorsize >
3672             PAGE_SIZE * SCRUB_MAX_PAGES_PER_BLOCK) {
3673                 /*
3674                  * would exhaust the array bounds of pagev member in
3675                  * struct scrub_block
3676                  */
3677                 btrfs_err(fs_info, "scrub: size assumption nodesize and sectorsize "
3678                            "<= SCRUB_MAX_PAGES_PER_BLOCK (%d <= %d && %d <= %d) fails",
3679                        fs_info->chunk_root->nodesize,
3680                        SCRUB_MAX_PAGES_PER_BLOCK,
3681                        fs_info->chunk_root->sectorsize,
3682                        SCRUB_MAX_PAGES_PER_BLOCK);
3683                 return -EINVAL;
3684         }
3685
3686
3687         mutex_lock(&fs_info->fs_devices->device_list_mutex);
3688         dev = btrfs_find_device(fs_info, devid, NULL, NULL);
3689         if (!dev || (dev->missing && !is_dev_replace)) {
3690                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3691                 return -ENODEV;
3692         }
3693
3694         if (!is_dev_replace && !readonly && !dev->writeable) {
3695                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3696                 rcu_read_lock();
3697                 name = rcu_dereference(dev->name);
3698                 btrfs_err(fs_info, "scrub: device %s is not writable",
3699                           name->str);
3700                 rcu_read_unlock();
3701                 return -EROFS;
3702         }
3703
3704         mutex_lock(&fs_info->scrub_lock);
3705         if (!dev->in_fs_metadata || dev->is_tgtdev_for_dev_replace) {
3706                 mutex_unlock(&fs_info->scrub_lock);
3707                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3708                 return -EIO;
3709         }
3710
3711         btrfs_dev_replace_lock(&fs_info->dev_replace);
3712         if (dev->scrub_device ||
3713             (!is_dev_replace &&
3714              btrfs_dev_replace_is_ongoing(&fs_info->dev_replace))) {
3715                 btrfs_dev_replace_unlock(&fs_info->dev_replace);
3716                 mutex_unlock(&fs_info->scrub_lock);
3717                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3718                 return -EINPROGRESS;
3719         }
3720         btrfs_dev_replace_unlock(&fs_info->dev_replace);
3721
3722         ret = scrub_workers_get(fs_info, is_dev_replace);
3723         if (ret) {
3724                 mutex_unlock(&fs_info->scrub_lock);
3725                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3726                 return ret;
3727         }
3728
3729         sctx = scrub_setup_ctx(dev, is_dev_replace);
3730         if (IS_ERR(sctx)) {
3731                 mutex_unlock(&fs_info->scrub_lock);
3732                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3733                 scrub_workers_put(fs_info);
3734                 return PTR_ERR(sctx);
3735         }
3736         sctx->readonly = readonly;
3737         dev->scrub_device = sctx;
3738         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3739
3740         /*
3741          * checking @scrub_pause_req here, we can avoid
3742          * race between committing transaction and scrubbing.
3743          */
3744         __scrub_blocked_if_needed(fs_info);
3745         atomic_inc(&fs_info->scrubs_running);
3746         mutex_unlock(&fs_info->scrub_lock);
3747
3748         if (!is_dev_replace) {
3749                 /*
3750                  * by holding device list mutex, we can
3751                  * kick off writing super in log tree sync.
3752                  */
3753                 mutex_lock(&fs_info->fs_devices->device_list_mutex);
3754                 ret = scrub_supers(sctx, dev);
3755                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
3756         }
3757
3758         if (!ret)
3759                 ret = scrub_enumerate_chunks(sctx, dev, start, end,
3760                                              is_dev_replace);
3761
3762         wait_event(sctx->list_wait, atomic_read(&sctx->bios_in_flight) == 0);
3763         atomic_dec(&fs_info->scrubs_running);
3764         wake_up(&fs_info->scrub_pause_wait);
3765
3766         wait_event(sctx->list_wait, atomic_read(&sctx->workers_pending) == 0);
3767
3768         if (progress)
3769                 memcpy(progress, &sctx->stat, sizeof(*progress));
3770
3771         mutex_lock(&fs_info->scrub_lock);
3772         dev->scrub_device = NULL;
3773         scrub_workers_put(fs_info);
3774         mutex_unlock(&fs_info->scrub_lock);
3775
3776         scrub_free_ctx(sctx);
3777
3778         return ret;
3779 }
3780
3781 void btrfs_scrub_pause(struct btrfs_root *root)
3782 {
3783         struct btrfs_fs_info *fs_info = root->fs_info;
3784
3785         mutex_lock(&fs_info->scrub_lock);
3786         atomic_inc(&fs_info->scrub_pause_req);
3787         while (atomic_read(&fs_info->scrubs_paused) !=
3788                atomic_read(&fs_info->scrubs_running)) {
3789                 mutex_unlock(&fs_info->scrub_lock);
3790                 wait_event(fs_info->scrub_pause_wait,
3791                            atomic_read(&fs_info->scrubs_paused) ==
3792                            atomic_read(&fs_info->scrubs_running));
3793                 mutex_lock(&fs_info->scrub_lock);
3794         }
3795         mutex_unlock(&fs_info->scrub_lock);
3796 }
3797
3798 void btrfs_scrub_continue(struct btrfs_root *root)
3799 {
3800         struct btrfs_fs_info *fs_info = root->fs_info;
3801
3802         atomic_dec(&fs_info->scrub_pause_req);
3803         wake_up(&fs_info->scrub_pause_wait);
3804 }
3805
3806 int btrfs_scrub_cancel(struct btrfs_fs_info *fs_info)
3807 {
3808         mutex_lock(&fs_info->scrub_lock);
3809         if (!atomic_read(&fs_info->scrubs_running)) {
3810                 mutex_unlock(&fs_info->scrub_lock);
3811                 return -ENOTCONN;
3812         }
3813
3814         atomic_inc(&fs_info->scrub_cancel_req);
3815         while (atomic_read(&fs_info->scrubs_running)) {
3816                 mutex_unlock(&fs_info->scrub_lock);
3817                 wait_event(fs_info->scrub_pause_wait,
3818                            atomic_read(&fs_info->scrubs_running) == 0);
3819                 mutex_lock(&fs_info->scrub_lock);
3820         }
3821         atomic_dec(&fs_info->scrub_cancel_req);
3822         mutex_unlock(&fs_info->scrub_lock);
3823
3824         return 0;
3825 }
3826
3827 int btrfs_scrub_cancel_dev(struct btrfs_fs_info *fs_info,
3828                            struct btrfs_device *dev)
3829 {
3830         struct scrub_ctx *sctx;
3831
3832         mutex_lock(&fs_info->scrub_lock);
3833         sctx = dev->scrub_device;
3834         if (!sctx) {
3835                 mutex_unlock(&fs_info->scrub_lock);
3836                 return -ENOTCONN;
3837         }
3838         atomic_inc(&sctx->cancel_req);
3839         while (dev->scrub_device) {
3840                 mutex_unlock(&fs_info->scrub_lock);
3841                 wait_event(fs_info->scrub_pause_wait,
3842                            dev->scrub_device == NULL);
3843                 mutex_lock(&fs_info->scrub_lock);
3844         }
3845         mutex_unlock(&fs_info->scrub_lock);
3846
3847         return 0;
3848 }
3849
3850 int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
3851                          struct btrfs_scrub_progress *progress)
3852 {
3853         struct btrfs_device *dev;
3854         struct scrub_ctx *sctx = NULL;
3855
3856         mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
3857         dev = btrfs_find_device(root->fs_info, devid, NULL, NULL);
3858         if (dev)
3859                 sctx = dev->scrub_device;
3860         if (sctx)
3861                 memcpy(progress, &sctx->stat, sizeof(*progress));
3862         mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
3863
3864         return dev ? (sctx ? 0 : -ENOTCONN) : -ENODEV;
3865 }
3866
3867 static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
3868                                u64 extent_logical, u64 extent_len,
3869                                u64 *extent_physical,
3870                                struct btrfs_device **extent_dev,
3871                                int *extent_mirror_num)
3872 {
3873         u64 mapped_length;
3874         struct btrfs_bio *bbio = NULL;
3875         int ret;
3876
3877         mapped_length = extent_len;
3878         ret = btrfs_map_block(fs_info, READ, extent_logical,
3879                               &mapped_length, &bbio, 0);
3880         if (ret || !bbio || mapped_length < extent_len ||
3881             !bbio->stripes[0].dev->bdev) {
3882                 kfree(bbio);
3883                 return;
3884         }
3885
3886         *extent_physical = bbio->stripes[0].physical;
3887         *extent_mirror_num = bbio->mirror_num;
3888         *extent_dev = bbio->stripes[0].dev;
3889         kfree(bbio);
3890 }
3891
3892 static int scrub_setup_wr_ctx(struct scrub_ctx *sctx,
3893                               struct scrub_wr_ctx *wr_ctx,
3894                               struct btrfs_fs_info *fs_info,
3895                               struct btrfs_device *dev,
3896                               int is_dev_replace)
3897 {
3898         WARN_ON(wr_ctx->wr_curr_bio != NULL);
3899
3900         mutex_init(&wr_ctx->wr_lock);
3901         wr_ctx->wr_curr_bio = NULL;
3902         if (!is_dev_replace)
3903                 return 0;
3904
3905         WARN_ON(!dev->bdev);
3906         wr_ctx->pages_per_wr_bio = min_t(int, SCRUB_PAGES_PER_WR_BIO,
3907                                          bio_get_nr_vecs(dev->bdev));
3908         wr_ctx->tgtdev = dev;
3909         atomic_set(&wr_ctx->flush_all_writes, 0);
3910         return 0;
3911 }
3912
3913 static void scrub_free_wr_ctx(struct scrub_wr_ctx *wr_ctx)
3914 {
3915         mutex_lock(&wr_ctx->wr_lock);
3916         kfree(wr_ctx->wr_curr_bio);
3917         wr_ctx->wr_curr_bio = NULL;
3918         mutex_unlock(&wr_ctx->wr_lock);
3919 }
3920
3921 static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
3922                             int mirror_num, u64 physical_for_dev_replace)
3923 {
3924         struct scrub_copy_nocow_ctx *nocow_ctx;
3925         struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
3926
3927         nocow_ctx = kzalloc(sizeof(*nocow_ctx), GFP_NOFS);
3928         if (!nocow_ctx) {
3929                 spin_lock(&sctx->stat_lock);
3930                 sctx->stat.malloc_errors++;
3931                 spin_unlock(&sctx->stat_lock);
3932                 return -ENOMEM;
3933         }
3934
3935         scrub_pending_trans_workers_inc(sctx);
3936
3937         nocow_ctx->sctx = sctx;
3938         nocow_ctx->logical = logical;
3939         nocow_ctx->len = len;
3940         nocow_ctx->mirror_num = mirror_num;
3941         nocow_ctx->physical_for_dev_replace = physical_for_dev_replace;
3942         btrfs_init_work(&nocow_ctx->work, btrfs_scrubnc_helper,
3943                         copy_nocow_pages_worker, NULL, NULL);
3944         INIT_LIST_HEAD(&nocow_ctx->inodes);
3945         btrfs_queue_work(fs_info->scrub_nocow_workers,
3946                          &nocow_ctx->work);
3947
3948         return 0;
3949 }
3950
3951 static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
3952 {
3953         struct scrub_copy_nocow_ctx *nocow_ctx = ctx;
3954         struct scrub_nocow_inode *nocow_inode;
3955
3956         nocow_inode = kzalloc(sizeof(*nocow_inode), GFP_NOFS);
3957         if (!nocow_inode)
3958                 return -ENOMEM;
3959         nocow_inode->inum = inum;
3960         nocow_inode->offset = offset;
3961         nocow_inode->root = root;
3962         list_add_tail(&nocow_inode->list, &nocow_ctx->inodes);
3963         return 0;
3964 }
3965
3966 #define COPY_COMPLETE 1
3967
3968 static void copy_nocow_pages_worker(struct btrfs_work *work)
3969 {
3970         struct scrub_copy_nocow_ctx *nocow_ctx =
3971                 container_of(work, struct scrub_copy_nocow_ctx, work);
3972         struct scrub_ctx *sctx = nocow_ctx->sctx;
3973         u64 logical = nocow_ctx->logical;
3974         u64 len = nocow_ctx->len;
3975         int mirror_num = nocow_ctx->mirror_num;
3976         u64 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
3977         int ret;
3978         struct btrfs_trans_handle *trans = NULL;
3979         struct btrfs_fs_info *fs_info;
3980         struct btrfs_path *path;
3981         struct btrfs_root *root;
3982         int not_written = 0;
3983
3984         fs_info = sctx->dev_root->fs_info;
3985         root = fs_info->extent_root;
3986
3987         path = btrfs_alloc_path();
3988         if (!path) {
3989                 spin_lock(&sctx->stat_lock);
3990                 sctx->stat.malloc_errors++;
3991                 spin_unlock(&sctx->stat_lock);
3992                 not_written = 1;
3993                 goto out;
3994         }
3995
3996         trans = btrfs_join_transaction(root);
3997         if (IS_ERR(trans)) {
3998                 not_written = 1;
3999                 goto out;
4000         }
4001
4002         ret = iterate_inodes_from_logical(logical, fs_info, path,
4003                                           record_inode_for_nocow, nocow_ctx);
4004         if (ret != 0 && ret != -ENOENT) {
4005                 btrfs_warn(fs_info, "iterate_inodes_from_logical() failed: log %llu, "
4006                         "phys %llu, len %llu, mir %u, ret %d",
4007                         logical, physical_for_dev_replace, len, mirror_num,
4008                         ret);
4009                 not_written = 1;
4010                 goto out;
4011         }
4012
4013         btrfs_end_transaction(trans, root);
4014         trans = NULL;
4015         while (!list_empty(&nocow_ctx->inodes)) {
4016                 struct scrub_nocow_inode *entry;
4017                 entry = list_first_entry(&nocow_ctx->inodes,
4018                                          struct scrub_nocow_inode,
4019                                          list);
4020                 list_del_init(&entry->list);
4021                 ret = copy_nocow_pages_for_inode(entry->inum, entry->offset,
4022                                                  entry->root, nocow_ctx);
4023                 kfree(entry);
4024                 if (ret == COPY_COMPLETE) {
4025                         ret = 0;
4026                         break;
4027                 } else if (ret) {
4028                         break;
4029                 }
4030         }
4031 out:
4032         while (!list_empty(&nocow_ctx->inodes)) {
4033                 struct scrub_nocow_inode *entry;
4034                 entry = list_first_entry(&nocow_ctx->inodes,
4035                                          struct scrub_nocow_inode,
4036                                          list);
4037                 list_del_init(&entry->list);
4038                 kfree(entry);
4039         }
4040         if (trans && !IS_ERR(trans))
4041                 btrfs_end_transaction(trans, root);
4042         if (not_written)
4043                 btrfs_dev_replace_stats_inc(&fs_info->dev_replace.
4044                                             num_uncorrectable_read_errors);
4045
4046         btrfs_free_path(path);
4047         kfree(nocow_ctx);
4048
4049         scrub_pending_trans_workers_dec(sctx);
4050 }
4051
4052 static int check_extent_to_block(struct inode *inode, u64 start, u64 len,
4053                                  u64 logical)
4054 {
4055         struct extent_state *cached_state = NULL;
4056         struct btrfs_ordered_extent *ordered;
4057         struct extent_io_tree *io_tree;
4058         struct extent_map *em;
4059         u64 lockstart = start, lockend = start + len - 1;
4060         int ret = 0;
4061
4062         io_tree = &BTRFS_I(inode)->io_tree;
4063
4064         lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state);
4065         ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
4066         if (ordered) {
4067                 btrfs_put_ordered_extent(ordered);
4068                 ret = 1;
4069                 goto out_unlock;
4070         }
4071
4072         em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
4073         if (IS_ERR(em)) {
4074                 ret = PTR_ERR(em);
4075                 goto out_unlock;
4076         }
4077
4078         /*
4079          * This extent does not actually cover the logical extent anymore,
4080          * move on to the next inode.
4081          */
4082         if (em->block_start > logical ||
4083             em->block_start + em->block_len < logical + len) {
4084                 free_extent_map(em);
4085                 ret = 1;
4086                 goto out_unlock;
4087         }
4088         free_extent_map(em);
4089
4090 out_unlock:
4091         unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
4092                              GFP_NOFS);
4093         return ret;
4094 }
4095
4096 static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
4097                                       struct scrub_copy_nocow_ctx *nocow_ctx)
4098 {
4099         struct btrfs_fs_info *fs_info = nocow_ctx->sctx->dev_root->fs_info;
4100         struct btrfs_key key;
4101         struct inode *inode;
4102         struct page *page;
4103         struct btrfs_root *local_root;
4104         struct extent_io_tree *io_tree;
4105         u64 physical_for_dev_replace;
4106         u64 nocow_ctx_logical;
4107         u64 len = nocow_ctx->len;
4108         unsigned long index;
4109         int srcu_index;
4110         int ret = 0;
4111         int err = 0;
4112
4113         key.objectid = root;
4114         key.type = BTRFS_ROOT_ITEM_KEY;
4115         key.offset = (u64)-1;
4116
4117         srcu_index = srcu_read_lock(&fs_info->subvol_srcu);
4118
4119         local_root = btrfs_read_fs_root_no_name(fs_info, &key);
4120         if (IS_ERR(local_root)) {
4121                 srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4122                 return PTR_ERR(local_root);
4123         }
4124
4125         key.type = BTRFS_INODE_ITEM_KEY;
4126         key.objectid = inum;
4127         key.offset = 0;
4128         inode = btrfs_iget(fs_info->sb, &key, local_root, NULL);
4129         srcu_read_unlock(&fs_info->subvol_srcu, srcu_index);
4130         if (IS_ERR(inode))
4131                 return PTR_ERR(inode);
4132
4133         /* Avoid truncate/dio/punch hole.. */
4134         mutex_lock(&inode->i_mutex);
4135         inode_dio_wait(inode);
4136
4137         physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
4138         io_tree = &BTRFS_I(inode)->io_tree;
4139         nocow_ctx_logical = nocow_ctx->logical;
4140
4141         ret = check_extent_to_block(inode, offset, len, nocow_ctx_logical);
4142         if (ret) {
4143                 ret = ret > 0 ? 0 : ret;
4144                 goto out;
4145         }
4146
4147         while (len >= PAGE_CACHE_SIZE) {
4148                 index = offset >> PAGE_CACHE_SHIFT;
4149 again:
4150                 page = find_or_create_page(inode->i_mapping, index, GFP_NOFS);
4151                 if (!page) {
4152                         btrfs_err(fs_info, "find_or_create_page() failed");
4153                         ret = -ENOMEM;
4154                         goto out;
4155                 }
4156
4157                 if (PageUptodate(page)) {
4158                         if (PageDirty(page))
4159                                 goto next_page;
4160                 } else {
4161                         ClearPageError(page);
4162                         err = extent_read_full_page(io_tree, page,
4163                                                            btrfs_get_extent,
4164                                                            nocow_ctx->mirror_num);
4165                         if (err) {
4166                                 ret = err;
4167                                 goto next_page;
4168                         }
4169
4170                         lock_page(page);
4171                         /*
4172                          * If the page has been remove from the page cache,
4173                          * the data on it is meaningless, because it may be
4174                          * old one, the new data may be written into the new
4175                          * page in the page cache.
4176                          */
4177                         if (page->mapping != inode->i_mapping) {
4178                                 unlock_page(page);
4179                                 page_cache_release(page);
4180                                 goto again;
4181                         }
4182                         if (!PageUptodate(page)) {
4183                                 ret = -EIO;
4184                                 goto next_page;
4185                         }
4186                 }
4187
4188                 ret = check_extent_to_block(inode, offset, len,
4189                                             nocow_ctx_logical);
4190                 if (ret) {
4191                         ret = ret > 0 ? 0 : ret;
4192                         goto next_page;
4193                 }
4194
4195                 err = write_page_nocow(nocow_ctx->sctx,
4196                                        physical_for_dev_replace, page);
4197                 if (err)
4198                         ret = err;
4199 next_page:
4200                 unlock_page(page);
4201                 page_cache_release(page);
4202
4203                 if (ret)
4204                         break;
4205
4206                 offset += PAGE_CACHE_SIZE;
4207                 physical_for_dev_replace += PAGE_CACHE_SIZE;
4208                 nocow_ctx_logical += PAGE_CACHE_SIZE;
4209                 len -= PAGE_CACHE_SIZE;
4210         }
4211         ret = COPY_COMPLETE;
4212 out:
4213         mutex_unlock(&inode->i_mutex);
4214         iput(inode);
4215         return ret;
4216 }
4217
4218 static int write_page_nocow(struct scrub_ctx *sctx,
4219                             u64 physical_for_dev_replace, struct page *page)
4220 {
4221         struct bio *bio;
4222         struct btrfs_device *dev;
4223         int ret;
4224
4225         dev = sctx->wr_ctx.tgtdev;
4226         if (!dev)
4227                 return -EIO;
4228         if (!dev->bdev) {
4229                 printk_ratelimited(KERN_WARNING
4230                         "BTRFS: scrub write_page_nocow(bdev == NULL) is unexpected!\n");
4231                 return -EIO;
4232         }
4233         bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
4234         if (!bio) {
4235                 spin_lock(&sctx->stat_lock);
4236                 sctx->stat.malloc_errors++;
4237                 spin_unlock(&sctx->stat_lock);
4238                 return -ENOMEM;
4239         }
4240         bio->bi_iter.bi_size = 0;
4241         bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
4242         bio->bi_bdev = dev->bdev;
4243         ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
4244         if (ret != PAGE_CACHE_SIZE) {
4245 leave_with_eio:
4246                 bio_put(bio);
4247                 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
4248                 return -EIO;
4249         }
4250
4251         if (btrfsic_submit_bio_wait(WRITE_SYNC, bio))
4252                 goto leave_with_eio;
4253
4254         bio_put(bio);
4255         return 0;
4256 }