Merge tag 'io_uring-5.15-2021-09-11' of git://git.kernel.dk/linux-block
[linux-2.6-microblaze.git] / fs / btrfs / volumes.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5
6 #include <linux/sched.h>
7 #include <linux/sched/mm.h>
8 #include <linux/bio.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/ratelimit.h>
12 #include <linux/kthread.h>
13 #include <linux/raid/pq.h>
14 #include <linux/semaphore.h>
15 #include <linux/uuid.h>
16 #include <linux/list_sort.h>
17 #include "misc.h"
18 #include "ctree.h"
19 #include "extent_map.h"
20 #include "disk-io.h"
21 #include "transaction.h"
22 #include "print-tree.h"
23 #include "volumes.h"
24 #include "raid56.h"
25 #include "async-thread.h"
26 #include "check-integrity.h"
27 #include "rcu-string.h"
28 #include "dev-replace.h"
29 #include "sysfs.h"
30 #include "tree-checker.h"
31 #include "space-info.h"
32 #include "block-group.h"
33 #include "discard.h"
34 #include "zoned.h"
35
36 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
37         [BTRFS_RAID_RAID10] = {
38                 .sub_stripes    = 2,
39                 .dev_stripes    = 1,
40                 .devs_max       = 0,    /* 0 == as many as possible */
41                 .devs_min       = 2,
42                 .tolerated_failures = 1,
43                 .devs_increment = 2,
44                 .ncopies        = 2,
45                 .nparity        = 0,
46                 .raid_name      = "raid10",
47                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID10,
48                 .mindev_error   = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
49         },
50         [BTRFS_RAID_RAID1] = {
51                 .sub_stripes    = 1,
52                 .dev_stripes    = 1,
53                 .devs_max       = 2,
54                 .devs_min       = 2,
55                 .tolerated_failures = 1,
56                 .devs_increment = 2,
57                 .ncopies        = 2,
58                 .nparity        = 0,
59                 .raid_name      = "raid1",
60                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID1,
61                 .mindev_error   = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
62         },
63         [BTRFS_RAID_RAID1C3] = {
64                 .sub_stripes    = 1,
65                 .dev_stripes    = 1,
66                 .devs_max       = 3,
67                 .devs_min       = 3,
68                 .tolerated_failures = 2,
69                 .devs_increment = 3,
70                 .ncopies        = 3,
71                 .nparity        = 0,
72                 .raid_name      = "raid1c3",
73                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID1C3,
74                 .mindev_error   = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
75         },
76         [BTRFS_RAID_RAID1C4] = {
77                 .sub_stripes    = 1,
78                 .dev_stripes    = 1,
79                 .devs_max       = 4,
80                 .devs_min       = 4,
81                 .tolerated_failures = 3,
82                 .devs_increment = 4,
83                 .ncopies        = 4,
84                 .nparity        = 0,
85                 .raid_name      = "raid1c4",
86                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID1C4,
87                 .mindev_error   = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
88         },
89         [BTRFS_RAID_DUP] = {
90                 .sub_stripes    = 1,
91                 .dev_stripes    = 2,
92                 .devs_max       = 1,
93                 .devs_min       = 1,
94                 .tolerated_failures = 0,
95                 .devs_increment = 1,
96                 .ncopies        = 2,
97                 .nparity        = 0,
98                 .raid_name      = "dup",
99                 .bg_flag        = BTRFS_BLOCK_GROUP_DUP,
100                 .mindev_error   = 0,
101         },
102         [BTRFS_RAID_RAID0] = {
103                 .sub_stripes    = 1,
104                 .dev_stripes    = 1,
105                 .devs_max       = 0,
106                 .devs_min       = 1,
107                 .tolerated_failures = 0,
108                 .devs_increment = 1,
109                 .ncopies        = 1,
110                 .nparity        = 0,
111                 .raid_name      = "raid0",
112                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID0,
113                 .mindev_error   = 0,
114         },
115         [BTRFS_RAID_SINGLE] = {
116                 .sub_stripes    = 1,
117                 .dev_stripes    = 1,
118                 .devs_max       = 1,
119                 .devs_min       = 1,
120                 .tolerated_failures = 0,
121                 .devs_increment = 1,
122                 .ncopies        = 1,
123                 .nparity        = 0,
124                 .raid_name      = "single",
125                 .bg_flag        = 0,
126                 .mindev_error   = 0,
127         },
128         [BTRFS_RAID_RAID5] = {
129                 .sub_stripes    = 1,
130                 .dev_stripes    = 1,
131                 .devs_max       = 0,
132                 .devs_min       = 2,
133                 .tolerated_failures = 1,
134                 .devs_increment = 1,
135                 .ncopies        = 1,
136                 .nparity        = 1,
137                 .raid_name      = "raid5",
138                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID5,
139                 .mindev_error   = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
140         },
141         [BTRFS_RAID_RAID6] = {
142                 .sub_stripes    = 1,
143                 .dev_stripes    = 1,
144                 .devs_max       = 0,
145                 .devs_min       = 3,
146                 .tolerated_failures = 2,
147                 .devs_increment = 1,
148                 .ncopies        = 1,
149                 .nparity        = 2,
150                 .raid_name      = "raid6",
151                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID6,
152                 .mindev_error   = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
153         },
154 };
155
156 /*
157  * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which
158  * can be used as index to access btrfs_raid_array[].
159  */
160 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags)
161 {
162         if (flags & BTRFS_BLOCK_GROUP_RAID10)
163                 return BTRFS_RAID_RAID10;
164         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
165                 return BTRFS_RAID_RAID1;
166         else if (flags & BTRFS_BLOCK_GROUP_RAID1C3)
167                 return BTRFS_RAID_RAID1C3;
168         else if (flags & BTRFS_BLOCK_GROUP_RAID1C4)
169                 return BTRFS_RAID_RAID1C4;
170         else if (flags & BTRFS_BLOCK_GROUP_DUP)
171                 return BTRFS_RAID_DUP;
172         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
173                 return BTRFS_RAID_RAID0;
174         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
175                 return BTRFS_RAID_RAID5;
176         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
177                 return BTRFS_RAID_RAID6;
178
179         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
180 }
181
182 const char *btrfs_bg_type_to_raid_name(u64 flags)
183 {
184         const int index = btrfs_bg_flags_to_raid_index(flags);
185
186         if (index >= BTRFS_NR_RAID_TYPES)
187                 return NULL;
188
189         return btrfs_raid_array[index].raid_name;
190 }
191
192 /*
193  * Fill @buf with textual description of @bg_flags, no more than @size_buf
194  * bytes including terminating null byte.
195  */
196 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
197 {
198         int i;
199         int ret;
200         char *bp = buf;
201         u64 flags = bg_flags;
202         u32 size_bp = size_buf;
203
204         if (!flags) {
205                 strcpy(bp, "NONE");
206                 return;
207         }
208
209 #define DESCRIBE_FLAG(flag, desc)                                               \
210         do {                                                            \
211                 if (flags & (flag)) {                                   \
212                         ret = snprintf(bp, size_bp, "%s|", (desc));     \
213                         if (ret < 0 || ret >= size_bp)                  \
214                                 goto out_overflow;                      \
215                         size_bp -= ret;                                 \
216                         bp += ret;                                      \
217                         flags &= ~(flag);                               \
218                 }                                                       \
219         } while (0)
220
221         DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
222         DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
223         DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
224
225         DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
226         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
227                 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
228                               btrfs_raid_array[i].raid_name);
229 #undef DESCRIBE_FLAG
230
231         if (flags) {
232                 ret = snprintf(bp, size_bp, "0x%llx|", flags);
233                 size_bp -= ret;
234         }
235
236         if (size_bp < size_buf)
237                 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
238
239         /*
240          * The text is trimmed, it's up to the caller to provide sufficiently
241          * large buffer
242          */
243 out_overflow:;
244 }
245
246 static int init_first_rw_device(struct btrfs_trans_handle *trans);
247 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
248 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
249 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
250 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
251                              enum btrfs_map_op op,
252                              u64 logical, u64 *length,
253                              struct btrfs_bio **bbio_ret,
254                              int mirror_num, int need_raid_map);
255
256 /*
257  * Device locking
258  * ==============
259  *
260  * There are several mutexes that protect manipulation of devices and low-level
261  * structures like chunks but not block groups, extents or files
262  *
263  * uuid_mutex (global lock)
264  * ------------------------
265  * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
266  * the SCAN_DEV ioctl registration or from mount either implicitly (the first
267  * device) or requested by the device= mount option
268  *
269  * the mutex can be very coarse and can cover long-running operations
270  *
271  * protects: updates to fs_devices counters like missing devices, rw devices,
272  * seeding, structure cloning, opening/closing devices at mount/umount time
273  *
274  * global::fs_devs - add, remove, updates to the global list
275  *
276  * does not protect: manipulation of the fs_devices::devices list in general
277  * but in mount context it could be used to exclude list modifications by eg.
278  * scan ioctl
279  *
280  * btrfs_device::name - renames (write side), read is RCU
281  *
282  * fs_devices::device_list_mutex (per-fs, with RCU)
283  * ------------------------------------------------
284  * protects updates to fs_devices::devices, ie. adding and deleting
285  *
286  * simple list traversal with read-only actions can be done with RCU protection
287  *
288  * may be used to exclude some operations from running concurrently without any
289  * modifications to the list (see write_all_supers)
290  *
291  * Is not required at mount and close times, because our device list is
292  * protected by the uuid_mutex at that point.
293  *
294  * balance_mutex
295  * -------------
296  * protects balance structures (status, state) and context accessed from
297  * several places (internally, ioctl)
298  *
299  * chunk_mutex
300  * -----------
301  * protects chunks, adding or removing during allocation, trim or when a new
302  * device is added/removed. Additionally it also protects post_commit_list of
303  * individual devices, since they can be added to the transaction's
304  * post_commit_list only with chunk_mutex held.
305  *
306  * cleaner_mutex
307  * -------------
308  * a big lock that is held by the cleaner thread and prevents running subvolume
309  * cleaning together with relocation or delayed iputs
310  *
311  *
312  * Lock nesting
313  * ============
314  *
315  * uuid_mutex
316  *   device_list_mutex
317  *     chunk_mutex
318  *   balance_mutex
319  *
320  *
321  * Exclusive operations
322  * ====================
323  *
324  * Maintains the exclusivity of the following operations that apply to the
325  * whole filesystem and cannot run in parallel.
326  *
327  * - Balance (*)
328  * - Device add
329  * - Device remove
330  * - Device replace (*)
331  * - Resize
332  *
333  * The device operations (as above) can be in one of the following states:
334  *
335  * - Running state
336  * - Paused state
337  * - Completed state
338  *
339  * Only device operations marked with (*) can go into the Paused state for the
340  * following reasons:
341  *
342  * - ioctl (only Balance can be Paused through ioctl)
343  * - filesystem remounted as read-only
344  * - filesystem unmounted and mounted as read-only
345  * - system power-cycle and filesystem mounted as read-only
346  * - filesystem or device errors leading to forced read-only
347  *
348  * The status of exclusive operation is set and cleared atomically.
349  * During the course of Paused state, fs_info::exclusive_operation remains set.
350  * A device operation in Paused or Running state can be canceled or resumed
351  * either by ioctl (Balance only) or when remounted as read-write.
352  * The exclusive status is cleared when the device operation is canceled or
353  * completed.
354  */
355
356 DEFINE_MUTEX(uuid_mutex);
357 static LIST_HEAD(fs_uuids);
358 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
359 {
360         return &fs_uuids;
361 }
362
363 /*
364  * alloc_fs_devices - allocate struct btrfs_fs_devices
365  * @fsid:               if not NULL, copy the UUID to fs_devices::fsid
366  * @metadata_fsid:      if not NULL, copy the UUID to fs_devices::metadata_fsid
367  *
368  * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
369  * The returned struct is not linked onto any lists and can be destroyed with
370  * kfree() right away.
371  */
372 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
373                                                  const u8 *metadata_fsid)
374 {
375         struct btrfs_fs_devices *fs_devs;
376
377         fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
378         if (!fs_devs)
379                 return ERR_PTR(-ENOMEM);
380
381         mutex_init(&fs_devs->device_list_mutex);
382
383         INIT_LIST_HEAD(&fs_devs->devices);
384         INIT_LIST_HEAD(&fs_devs->alloc_list);
385         INIT_LIST_HEAD(&fs_devs->fs_list);
386         INIT_LIST_HEAD(&fs_devs->seed_list);
387         if (fsid)
388                 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
389
390         if (metadata_fsid)
391                 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
392         else if (fsid)
393                 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
394
395         return fs_devs;
396 }
397
398 void btrfs_free_device(struct btrfs_device *device)
399 {
400         WARN_ON(!list_empty(&device->post_commit_list));
401         rcu_string_free(device->name);
402         extent_io_tree_release(&device->alloc_state);
403         bio_put(device->flush_bio);
404         btrfs_destroy_dev_zone_info(device);
405         kfree(device);
406 }
407
408 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
409 {
410         struct btrfs_device *device;
411         WARN_ON(fs_devices->opened);
412         while (!list_empty(&fs_devices->devices)) {
413                 device = list_entry(fs_devices->devices.next,
414                                     struct btrfs_device, dev_list);
415                 list_del(&device->dev_list);
416                 btrfs_free_device(device);
417         }
418         kfree(fs_devices);
419 }
420
421 void __exit btrfs_cleanup_fs_uuids(void)
422 {
423         struct btrfs_fs_devices *fs_devices;
424
425         while (!list_empty(&fs_uuids)) {
426                 fs_devices = list_entry(fs_uuids.next,
427                                         struct btrfs_fs_devices, fs_list);
428                 list_del(&fs_devices->fs_list);
429                 free_fs_devices(fs_devices);
430         }
431 }
432
433 static noinline struct btrfs_fs_devices *find_fsid(
434                 const u8 *fsid, const u8 *metadata_fsid)
435 {
436         struct btrfs_fs_devices *fs_devices;
437
438         ASSERT(fsid);
439
440         /* Handle non-split brain cases */
441         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
442                 if (metadata_fsid) {
443                         if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
444                             && memcmp(metadata_fsid, fs_devices->metadata_uuid,
445                                       BTRFS_FSID_SIZE) == 0)
446                                 return fs_devices;
447                 } else {
448                         if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
449                                 return fs_devices;
450                 }
451         }
452         return NULL;
453 }
454
455 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid(
456                                 struct btrfs_super_block *disk_super)
457 {
458
459         struct btrfs_fs_devices *fs_devices;
460
461         /*
462          * Handle scanned device having completed its fsid change but
463          * belonging to a fs_devices that was created by first scanning
464          * a device which didn't have its fsid/metadata_uuid changed
465          * at all and the CHANGING_FSID_V2 flag set.
466          */
467         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
468                 if (fs_devices->fsid_change &&
469                     memcmp(disk_super->metadata_uuid, fs_devices->fsid,
470                            BTRFS_FSID_SIZE) == 0 &&
471                     memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
472                            BTRFS_FSID_SIZE) == 0) {
473                         return fs_devices;
474                 }
475         }
476         /*
477          * Handle scanned device having completed its fsid change but
478          * belonging to a fs_devices that was created by a device that
479          * has an outdated pair of fsid/metadata_uuid and
480          * CHANGING_FSID_V2 flag set.
481          */
482         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
483                 if (fs_devices->fsid_change &&
484                     memcmp(fs_devices->metadata_uuid,
485                            fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
486                     memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid,
487                            BTRFS_FSID_SIZE) == 0) {
488                         return fs_devices;
489                 }
490         }
491
492         return find_fsid(disk_super->fsid, disk_super->metadata_uuid);
493 }
494
495
496 static int
497 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
498                       int flush, struct block_device **bdev,
499                       struct btrfs_super_block **disk_super)
500 {
501         int ret;
502
503         *bdev = blkdev_get_by_path(device_path, flags, holder);
504
505         if (IS_ERR(*bdev)) {
506                 ret = PTR_ERR(*bdev);
507                 goto error;
508         }
509
510         if (flush)
511                 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
512         ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
513         if (ret) {
514                 blkdev_put(*bdev, flags);
515                 goto error;
516         }
517         invalidate_bdev(*bdev);
518         *disk_super = btrfs_read_dev_super(*bdev);
519         if (IS_ERR(*disk_super)) {
520                 ret = PTR_ERR(*disk_super);
521                 blkdev_put(*bdev, flags);
522                 goto error;
523         }
524
525         return 0;
526
527 error:
528         *bdev = NULL;
529         return ret;
530 }
531
532 static bool device_path_matched(const char *path, struct btrfs_device *device)
533 {
534         int found;
535
536         rcu_read_lock();
537         found = strcmp(rcu_str_deref(device->name), path);
538         rcu_read_unlock();
539
540         return found == 0;
541 }
542
543 /*
544  *  Search and remove all stale (devices which are not mounted) devices.
545  *  When both inputs are NULL, it will search and release all stale devices.
546  *  path:       Optional. When provided will it release all unmounted devices
547  *              matching this path only.
548  *  skip_dev:   Optional. Will skip this device when searching for the stale
549  *              devices.
550  *  Return:     0 for success or if @path is NULL.
551  *              -EBUSY if @path is a mounted device.
552  *              -ENOENT if @path does not match any device in the list.
553  */
554 static int btrfs_free_stale_devices(const char *path,
555                                      struct btrfs_device *skip_device)
556 {
557         struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
558         struct btrfs_device *device, *tmp_device;
559         int ret = 0;
560
561         lockdep_assert_held(&uuid_mutex);
562
563         if (path)
564                 ret = -ENOENT;
565
566         list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
567
568                 mutex_lock(&fs_devices->device_list_mutex);
569                 list_for_each_entry_safe(device, tmp_device,
570                                          &fs_devices->devices, dev_list) {
571                         if (skip_device && skip_device == device)
572                                 continue;
573                         if (path && !device->name)
574                                 continue;
575                         if (path && !device_path_matched(path, device))
576                                 continue;
577                         if (fs_devices->opened) {
578                                 /* for an already deleted device return 0 */
579                                 if (path && ret != 0)
580                                         ret = -EBUSY;
581                                 break;
582                         }
583
584                         /* delete the stale device */
585                         fs_devices->num_devices--;
586                         list_del(&device->dev_list);
587                         btrfs_free_device(device);
588
589                         ret = 0;
590                 }
591                 mutex_unlock(&fs_devices->device_list_mutex);
592
593                 if (fs_devices->num_devices == 0) {
594                         btrfs_sysfs_remove_fsid(fs_devices);
595                         list_del(&fs_devices->fs_list);
596                         free_fs_devices(fs_devices);
597                 }
598         }
599
600         return ret;
601 }
602
603 /*
604  * This is only used on mount, and we are protected from competing things
605  * messing with our fs_devices by the uuid_mutex, thus we do not need the
606  * fs_devices->device_list_mutex here.
607  */
608 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
609                         struct btrfs_device *device, fmode_t flags,
610                         void *holder)
611 {
612         struct request_queue *q;
613         struct block_device *bdev;
614         struct btrfs_super_block *disk_super;
615         u64 devid;
616         int ret;
617
618         if (device->bdev)
619                 return -EINVAL;
620         if (!device->name)
621                 return -EINVAL;
622
623         ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
624                                     &bdev, &disk_super);
625         if (ret)
626                 return ret;
627
628         devid = btrfs_stack_device_id(&disk_super->dev_item);
629         if (devid != device->devid)
630                 goto error_free_page;
631
632         if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
633                 goto error_free_page;
634
635         device->generation = btrfs_super_generation(disk_super);
636
637         if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
638                 if (btrfs_super_incompat_flags(disk_super) &
639                     BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
640                         pr_err(
641                 "BTRFS: Invalid seeding and uuid-changed device detected\n");
642                         goto error_free_page;
643                 }
644
645                 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
646                 fs_devices->seeding = true;
647         } else {
648                 if (bdev_read_only(bdev))
649                         clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
650                 else
651                         set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
652         }
653
654         q = bdev_get_queue(bdev);
655         if (!blk_queue_nonrot(q))
656                 fs_devices->rotating = true;
657
658         device->bdev = bdev;
659         clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
660         device->mode = flags;
661
662         fs_devices->open_devices++;
663         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
664             device->devid != BTRFS_DEV_REPLACE_DEVID) {
665                 fs_devices->rw_devices++;
666                 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
667         }
668         btrfs_release_disk_super(disk_super);
669
670         return 0;
671
672 error_free_page:
673         btrfs_release_disk_super(disk_super);
674         blkdev_put(bdev, flags);
675
676         return -EINVAL;
677 }
678
679 /*
680  * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
681  * being created with a disk that has already completed its fsid change. Such
682  * disk can belong to an fs which has its FSID changed or to one which doesn't.
683  * Handle both cases here.
684  */
685 static struct btrfs_fs_devices *find_fsid_inprogress(
686                                         struct btrfs_super_block *disk_super)
687 {
688         struct btrfs_fs_devices *fs_devices;
689
690         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
691                 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
692                            BTRFS_FSID_SIZE) != 0 &&
693                     memcmp(fs_devices->metadata_uuid, disk_super->fsid,
694                            BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
695                         return fs_devices;
696                 }
697         }
698
699         return find_fsid(disk_super->fsid, NULL);
700 }
701
702
703 static struct btrfs_fs_devices *find_fsid_changed(
704                                         struct btrfs_super_block *disk_super)
705 {
706         struct btrfs_fs_devices *fs_devices;
707
708         /*
709          * Handles the case where scanned device is part of an fs that had
710          * multiple successful changes of FSID but currently device didn't
711          * observe it. Meaning our fsid will be different than theirs. We need
712          * to handle two subcases :
713          *  1 - The fs still continues to have different METADATA/FSID uuids.
714          *  2 - The fs is switched back to its original FSID (METADATA/FSID
715          *  are equal).
716          */
717         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
718                 /* Changed UUIDs */
719                 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
720                            BTRFS_FSID_SIZE) != 0 &&
721                     memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
722                            BTRFS_FSID_SIZE) == 0 &&
723                     memcmp(fs_devices->fsid, disk_super->fsid,
724                            BTRFS_FSID_SIZE) != 0)
725                         return fs_devices;
726
727                 /* Unchanged UUIDs */
728                 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
729                            BTRFS_FSID_SIZE) == 0 &&
730                     memcmp(fs_devices->fsid, disk_super->metadata_uuid,
731                            BTRFS_FSID_SIZE) == 0)
732                         return fs_devices;
733         }
734
735         return NULL;
736 }
737
738 static struct btrfs_fs_devices *find_fsid_reverted_metadata(
739                                 struct btrfs_super_block *disk_super)
740 {
741         struct btrfs_fs_devices *fs_devices;
742
743         /*
744          * Handle the case where the scanned device is part of an fs whose last
745          * metadata UUID change reverted it to the original FSID. At the same
746          * time * fs_devices was first created by another constitutent device
747          * which didn't fully observe the operation. This results in an
748          * btrfs_fs_devices created with metadata/fsid different AND
749          * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
750          * fs_devices equal to the FSID of the disk.
751          */
752         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
753                 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
754                            BTRFS_FSID_SIZE) != 0 &&
755                     memcmp(fs_devices->metadata_uuid, disk_super->fsid,
756                            BTRFS_FSID_SIZE) == 0 &&
757                     fs_devices->fsid_change)
758                         return fs_devices;
759         }
760
761         return NULL;
762 }
763 /*
764  * Add new device to list of registered devices
765  *
766  * Returns:
767  * device pointer which was just added or updated when successful
768  * error pointer when failed
769  */
770 static noinline struct btrfs_device *device_list_add(const char *path,
771                            struct btrfs_super_block *disk_super,
772                            bool *new_device_added)
773 {
774         struct btrfs_device *device;
775         struct btrfs_fs_devices *fs_devices = NULL;
776         struct rcu_string *name;
777         u64 found_transid = btrfs_super_generation(disk_super);
778         u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
779         bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
780                 BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
781         bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
782                                         BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
783
784         if (fsid_change_in_progress) {
785                 if (!has_metadata_uuid)
786                         fs_devices = find_fsid_inprogress(disk_super);
787                 else
788                         fs_devices = find_fsid_changed(disk_super);
789         } else if (has_metadata_uuid) {
790                 fs_devices = find_fsid_with_metadata_uuid(disk_super);
791         } else {
792                 fs_devices = find_fsid_reverted_metadata(disk_super);
793                 if (!fs_devices)
794                         fs_devices = find_fsid(disk_super->fsid, NULL);
795         }
796
797
798         if (!fs_devices) {
799                 if (has_metadata_uuid)
800                         fs_devices = alloc_fs_devices(disk_super->fsid,
801                                                       disk_super->metadata_uuid);
802                 else
803                         fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
804
805                 if (IS_ERR(fs_devices))
806                         return ERR_CAST(fs_devices);
807
808                 fs_devices->fsid_change = fsid_change_in_progress;
809
810                 mutex_lock(&fs_devices->device_list_mutex);
811                 list_add(&fs_devices->fs_list, &fs_uuids);
812
813                 device = NULL;
814         } else {
815                 mutex_lock(&fs_devices->device_list_mutex);
816                 device = btrfs_find_device(fs_devices, devid,
817                                 disk_super->dev_item.uuid, NULL);
818
819                 /*
820                  * If this disk has been pulled into an fs devices created by
821                  * a device which had the CHANGING_FSID_V2 flag then replace the
822                  * metadata_uuid/fsid values of the fs_devices.
823                  */
824                 if (fs_devices->fsid_change &&
825                     found_transid > fs_devices->latest_generation) {
826                         memcpy(fs_devices->fsid, disk_super->fsid,
827                                         BTRFS_FSID_SIZE);
828
829                         if (has_metadata_uuid)
830                                 memcpy(fs_devices->metadata_uuid,
831                                        disk_super->metadata_uuid,
832                                        BTRFS_FSID_SIZE);
833                         else
834                                 memcpy(fs_devices->metadata_uuid,
835                                        disk_super->fsid, BTRFS_FSID_SIZE);
836
837                         fs_devices->fsid_change = false;
838                 }
839         }
840
841         if (!device) {
842                 if (fs_devices->opened) {
843                         mutex_unlock(&fs_devices->device_list_mutex);
844                         return ERR_PTR(-EBUSY);
845                 }
846
847                 device = btrfs_alloc_device(NULL, &devid,
848                                             disk_super->dev_item.uuid);
849                 if (IS_ERR(device)) {
850                         mutex_unlock(&fs_devices->device_list_mutex);
851                         /* we can safely leave the fs_devices entry around */
852                         return device;
853                 }
854
855                 name = rcu_string_strdup(path, GFP_NOFS);
856                 if (!name) {
857                         btrfs_free_device(device);
858                         mutex_unlock(&fs_devices->device_list_mutex);
859                         return ERR_PTR(-ENOMEM);
860                 }
861                 rcu_assign_pointer(device->name, name);
862
863                 list_add_rcu(&device->dev_list, &fs_devices->devices);
864                 fs_devices->num_devices++;
865
866                 device->fs_devices = fs_devices;
867                 *new_device_added = true;
868
869                 if (disk_super->label[0])
870                         pr_info(
871         "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
872                                 disk_super->label, devid, found_transid, path,
873                                 current->comm, task_pid_nr(current));
874                 else
875                         pr_info(
876         "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
877                                 disk_super->fsid, devid, found_transid, path,
878                                 current->comm, task_pid_nr(current));
879
880         } else if (!device->name || strcmp(device->name->str, path)) {
881                 /*
882                  * When FS is already mounted.
883                  * 1. If you are here and if the device->name is NULL that
884                  *    means this device was missing at time of FS mount.
885                  * 2. If you are here and if the device->name is different
886                  *    from 'path' that means either
887                  *      a. The same device disappeared and reappeared with
888                  *         different name. or
889                  *      b. The missing-disk-which-was-replaced, has
890                  *         reappeared now.
891                  *
892                  * We must allow 1 and 2a above. But 2b would be a spurious
893                  * and unintentional.
894                  *
895                  * Further in case of 1 and 2a above, the disk at 'path'
896                  * would have missed some transaction when it was away and
897                  * in case of 2a the stale bdev has to be updated as well.
898                  * 2b must not be allowed at all time.
899                  */
900
901                 /*
902                  * For now, we do allow update to btrfs_fs_device through the
903                  * btrfs dev scan cli after FS has been mounted.  We're still
904                  * tracking a problem where systems fail mount by subvolume id
905                  * when we reject replacement on a mounted FS.
906                  */
907                 if (!fs_devices->opened && found_transid < device->generation) {
908                         /*
909                          * That is if the FS is _not_ mounted and if you
910                          * are here, that means there is more than one
911                          * disk with same uuid and devid.We keep the one
912                          * with larger generation number or the last-in if
913                          * generation are equal.
914                          */
915                         mutex_unlock(&fs_devices->device_list_mutex);
916                         return ERR_PTR(-EEXIST);
917                 }
918
919                 /*
920                  * We are going to replace the device path for a given devid,
921                  * make sure it's the same device if the device is mounted
922                  */
923                 if (device->bdev) {
924                         int error;
925                         dev_t path_dev;
926
927                         error = lookup_bdev(path, &path_dev);
928                         if (error) {
929                                 mutex_unlock(&fs_devices->device_list_mutex);
930                                 return ERR_PTR(error);
931                         }
932
933                         if (device->bdev->bd_dev != path_dev) {
934                                 mutex_unlock(&fs_devices->device_list_mutex);
935                                 /*
936                                  * device->fs_info may not be reliable here, so
937                                  * pass in a NULL instead. This avoids a
938                                  * possible use-after-free when the fs_info and
939                                  * fs_info->sb are already torn down.
940                                  */
941                                 btrfs_warn_in_rcu(NULL,
942         "duplicate device %s devid %llu generation %llu scanned by %s (%d)",
943                                                   path, devid, found_transid,
944                                                   current->comm,
945                                                   task_pid_nr(current));
946                                 return ERR_PTR(-EEXIST);
947                         }
948                         btrfs_info_in_rcu(device->fs_info,
949         "devid %llu device path %s changed to %s scanned by %s (%d)",
950                                           devid, rcu_str_deref(device->name),
951                                           path, current->comm,
952                                           task_pid_nr(current));
953                 }
954
955                 name = rcu_string_strdup(path, GFP_NOFS);
956                 if (!name) {
957                         mutex_unlock(&fs_devices->device_list_mutex);
958                         return ERR_PTR(-ENOMEM);
959                 }
960                 rcu_string_free(device->name);
961                 rcu_assign_pointer(device->name, name);
962                 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
963                         fs_devices->missing_devices--;
964                         clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
965                 }
966         }
967
968         /*
969          * Unmount does not free the btrfs_device struct but would zero
970          * generation along with most of the other members. So just update
971          * it back. We need it to pick the disk with largest generation
972          * (as above).
973          */
974         if (!fs_devices->opened) {
975                 device->generation = found_transid;
976                 fs_devices->latest_generation = max_t(u64, found_transid,
977                                                 fs_devices->latest_generation);
978         }
979
980         fs_devices->total_devices = btrfs_super_num_devices(disk_super);
981
982         mutex_unlock(&fs_devices->device_list_mutex);
983         return device;
984 }
985
986 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
987 {
988         struct btrfs_fs_devices *fs_devices;
989         struct btrfs_device *device;
990         struct btrfs_device *orig_dev;
991         int ret = 0;
992
993         lockdep_assert_held(&uuid_mutex);
994
995         fs_devices = alloc_fs_devices(orig->fsid, NULL);
996         if (IS_ERR(fs_devices))
997                 return fs_devices;
998
999         fs_devices->total_devices = orig->total_devices;
1000
1001         list_for_each_entry(orig_dev, &orig->devices, dev_list) {
1002                 struct rcu_string *name;
1003
1004                 device = btrfs_alloc_device(NULL, &orig_dev->devid,
1005                                             orig_dev->uuid);
1006                 if (IS_ERR(device)) {
1007                         ret = PTR_ERR(device);
1008                         goto error;
1009                 }
1010
1011                 /*
1012                  * This is ok to do without rcu read locked because we hold the
1013                  * uuid mutex so nothing we touch in here is going to disappear.
1014                  */
1015                 if (orig_dev->name) {
1016                         name = rcu_string_strdup(orig_dev->name->str,
1017                                         GFP_KERNEL);
1018                         if (!name) {
1019                                 btrfs_free_device(device);
1020                                 ret = -ENOMEM;
1021                                 goto error;
1022                         }
1023                         rcu_assign_pointer(device->name, name);
1024                 }
1025
1026                 list_add(&device->dev_list, &fs_devices->devices);
1027                 device->fs_devices = fs_devices;
1028                 fs_devices->num_devices++;
1029         }
1030         return fs_devices;
1031 error:
1032         free_fs_devices(fs_devices);
1033         return ERR_PTR(ret);
1034 }
1035
1036 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
1037                                       struct btrfs_device **latest_dev)
1038 {
1039         struct btrfs_device *device, *next;
1040
1041         /* This is the initialized path, it is safe to release the devices. */
1042         list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
1043                 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) {
1044                         if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1045                                       &device->dev_state) &&
1046                             !test_bit(BTRFS_DEV_STATE_MISSING,
1047                                       &device->dev_state) &&
1048                             (!*latest_dev ||
1049                              device->generation > (*latest_dev)->generation)) {
1050                                 *latest_dev = device;
1051                         }
1052                         continue;
1053                 }
1054
1055                 /*
1056                  * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
1057                  * in btrfs_init_dev_replace() so just continue.
1058                  */
1059                 if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1060                         continue;
1061
1062                 if (device->bdev) {
1063                         blkdev_put(device->bdev, device->mode);
1064                         device->bdev = NULL;
1065                         fs_devices->open_devices--;
1066                 }
1067                 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1068                         list_del_init(&device->dev_alloc_list);
1069                         clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1070                         fs_devices->rw_devices--;
1071                 }
1072                 list_del_init(&device->dev_list);
1073                 fs_devices->num_devices--;
1074                 btrfs_free_device(device);
1075         }
1076
1077 }
1078
1079 /*
1080  * After we have read the system tree and know devids belonging to this
1081  * filesystem, remove the device which does not belong there.
1082  */
1083 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices)
1084 {
1085         struct btrfs_device *latest_dev = NULL;
1086         struct btrfs_fs_devices *seed_dev;
1087
1088         mutex_lock(&uuid_mutex);
1089         __btrfs_free_extra_devids(fs_devices, &latest_dev);
1090
1091         list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list)
1092                 __btrfs_free_extra_devids(seed_dev, &latest_dev);
1093
1094         fs_devices->latest_bdev = latest_dev->bdev;
1095
1096         mutex_unlock(&uuid_mutex);
1097 }
1098
1099 static void btrfs_close_bdev(struct btrfs_device *device)
1100 {
1101         if (!device->bdev)
1102                 return;
1103
1104         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1105                 sync_blockdev(device->bdev);
1106                 invalidate_bdev(device->bdev);
1107         }
1108
1109         blkdev_put(device->bdev, device->mode);
1110 }
1111
1112 static void btrfs_close_one_device(struct btrfs_device *device)
1113 {
1114         struct btrfs_fs_devices *fs_devices = device->fs_devices;
1115
1116         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1117             device->devid != BTRFS_DEV_REPLACE_DEVID) {
1118                 list_del_init(&device->dev_alloc_list);
1119                 fs_devices->rw_devices--;
1120         }
1121
1122         if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1123                 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
1124
1125         if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
1126                 fs_devices->missing_devices--;
1127
1128         btrfs_close_bdev(device);
1129         if (device->bdev) {
1130                 fs_devices->open_devices--;
1131                 device->bdev = NULL;
1132         }
1133         clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1134         btrfs_destroy_dev_zone_info(device);
1135
1136         device->fs_info = NULL;
1137         atomic_set(&device->dev_stats_ccnt, 0);
1138         extent_io_tree_release(&device->alloc_state);
1139
1140         /* Verify the device is back in a pristine state  */
1141         ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
1142         ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1143         ASSERT(list_empty(&device->dev_alloc_list));
1144         ASSERT(list_empty(&device->post_commit_list));
1145         ASSERT(atomic_read(&device->reada_in_flight) == 0);
1146 }
1147
1148 static void close_fs_devices(struct btrfs_fs_devices *fs_devices)
1149 {
1150         struct btrfs_device *device, *tmp;
1151
1152         lockdep_assert_held(&uuid_mutex);
1153
1154         if (--fs_devices->opened > 0)
1155                 return;
1156
1157         list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list)
1158                 btrfs_close_one_device(device);
1159
1160         WARN_ON(fs_devices->open_devices);
1161         WARN_ON(fs_devices->rw_devices);
1162         fs_devices->opened = 0;
1163         fs_devices->seeding = false;
1164         fs_devices->fs_info = NULL;
1165 }
1166
1167 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
1168 {
1169         LIST_HEAD(list);
1170         struct btrfs_fs_devices *tmp;
1171
1172         mutex_lock(&uuid_mutex);
1173         close_fs_devices(fs_devices);
1174         if (!fs_devices->opened)
1175                 list_splice_init(&fs_devices->seed_list, &list);
1176
1177         list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) {
1178                 close_fs_devices(fs_devices);
1179                 list_del(&fs_devices->seed_list);
1180                 free_fs_devices(fs_devices);
1181         }
1182         mutex_unlock(&uuid_mutex);
1183 }
1184
1185 static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
1186                                 fmode_t flags, void *holder)
1187 {
1188         struct btrfs_device *device;
1189         struct btrfs_device *latest_dev = NULL;
1190         struct btrfs_device *tmp_device;
1191
1192         flags |= FMODE_EXCL;
1193
1194         list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
1195                                  dev_list) {
1196                 int ret;
1197
1198                 ret = btrfs_open_one_device(fs_devices, device, flags, holder);
1199                 if (ret == 0 &&
1200                     (!latest_dev || device->generation > latest_dev->generation)) {
1201                         latest_dev = device;
1202                 } else if (ret == -ENODATA) {
1203                         fs_devices->num_devices--;
1204                         list_del(&device->dev_list);
1205                         btrfs_free_device(device);
1206                 }
1207         }
1208         if (fs_devices->open_devices == 0)
1209                 return -EINVAL;
1210
1211         fs_devices->opened = 1;
1212         fs_devices->latest_bdev = latest_dev->bdev;
1213         fs_devices->total_rw_bytes = 0;
1214         fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
1215         fs_devices->read_policy = BTRFS_READ_POLICY_PID;
1216
1217         return 0;
1218 }
1219
1220 static int devid_cmp(void *priv, const struct list_head *a,
1221                      const struct list_head *b)
1222 {
1223         const struct btrfs_device *dev1, *dev2;
1224
1225         dev1 = list_entry(a, struct btrfs_device, dev_list);
1226         dev2 = list_entry(b, struct btrfs_device, dev_list);
1227
1228         if (dev1->devid < dev2->devid)
1229                 return -1;
1230         else if (dev1->devid > dev2->devid)
1231                 return 1;
1232         return 0;
1233 }
1234
1235 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1236                        fmode_t flags, void *holder)
1237 {
1238         int ret;
1239
1240         lockdep_assert_held(&uuid_mutex);
1241         /*
1242          * The device_list_mutex cannot be taken here in case opening the
1243          * underlying device takes further locks like open_mutex.
1244          *
1245          * We also don't need the lock here as this is called during mount and
1246          * exclusion is provided by uuid_mutex
1247          */
1248
1249         if (fs_devices->opened) {
1250                 fs_devices->opened++;
1251                 ret = 0;
1252         } else {
1253                 list_sort(NULL, &fs_devices->devices, devid_cmp);
1254                 ret = open_fs_devices(fs_devices, flags, holder);
1255         }
1256
1257         return ret;
1258 }
1259
1260 void btrfs_release_disk_super(struct btrfs_super_block *super)
1261 {
1262         struct page *page = virt_to_page(super);
1263
1264         put_page(page);
1265 }
1266
1267 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
1268                                                        u64 bytenr, u64 bytenr_orig)
1269 {
1270         struct btrfs_super_block *disk_super;
1271         struct page *page;
1272         void *p;
1273         pgoff_t index;
1274
1275         /* make sure our super fits in the device */
1276         if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1277                 return ERR_PTR(-EINVAL);
1278
1279         /* make sure our super fits in the page */
1280         if (sizeof(*disk_super) > PAGE_SIZE)
1281                 return ERR_PTR(-EINVAL);
1282
1283         /* make sure our super doesn't straddle pages on disk */
1284         index = bytenr >> PAGE_SHIFT;
1285         if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
1286                 return ERR_PTR(-EINVAL);
1287
1288         /* pull in the page with our super */
1289         page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL);
1290
1291         if (IS_ERR(page))
1292                 return ERR_CAST(page);
1293
1294         p = page_address(page);
1295
1296         /* align our pointer to the offset of the super block */
1297         disk_super = p + offset_in_page(bytenr);
1298
1299         if (btrfs_super_bytenr(disk_super) != bytenr_orig ||
1300             btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1301                 btrfs_release_disk_super(p);
1302                 return ERR_PTR(-EINVAL);
1303         }
1304
1305         if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1])
1306                 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0;
1307
1308         return disk_super;
1309 }
1310
1311 int btrfs_forget_devices(const char *path)
1312 {
1313         int ret;
1314
1315         mutex_lock(&uuid_mutex);
1316         ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL);
1317         mutex_unlock(&uuid_mutex);
1318
1319         return ret;
1320 }
1321
1322 /*
1323  * Look for a btrfs signature on a device. This may be called out of the mount path
1324  * and we are not allowed to call set_blocksize during the scan. The superblock
1325  * is read via pagecache
1326  */
1327 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
1328                                            void *holder)
1329 {
1330         struct btrfs_super_block *disk_super;
1331         bool new_device_added = false;
1332         struct btrfs_device *device = NULL;
1333         struct block_device *bdev;
1334         u64 bytenr, bytenr_orig;
1335         int ret;
1336
1337         lockdep_assert_held(&uuid_mutex);
1338
1339         /*
1340          * we would like to check all the supers, but that would make
1341          * a btrfs mount succeed after a mkfs from a different FS.
1342          * So, we need to add a special mount option to scan for
1343          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1344          */
1345         flags |= FMODE_EXCL;
1346
1347         bdev = blkdev_get_by_path(path, flags, holder);
1348         if (IS_ERR(bdev))
1349                 return ERR_CAST(bdev);
1350
1351         bytenr_orig = btrfs_sb_offset(0);
1352         ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr);
1353         if (ret)
1354                 return ERR_PTR(ret);
1355
1356         disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig);
1357         if (IS_ERR(disk_super)) {
1358                 device = ERR_CAST(disk_super);
1359                 goto error_bdev_put;
1360         }
1361
1362         device = device_list_add(path, disk_super, &new_device_added);
1363         if (!IS_ERR(device)) {
1364                 if (new_device_added)
1365                         btrfs_free_stale_devices(path, device);
1366         }
1367
1368         btrfs_release_disk_super(disk_super);
1369
1370 error_bdev_put:
1371         blkdev_put(bdev, flags);
1372
1373         return device;
1374 }
1375
1376 /*
1377  * Try to find a chunk that intersects [start, start + len] range and when one
1378  * such is found, record the end of it in *start
1379  */
1380 static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
1381                                     u64 len)
1382 {
1383         u64 physical_start, physical_end;
1384
1385         lockdep_assert_held(&device->fs_info->chunk_mutex);
1386
1387         if (!find_first_extent_bit(&device->alloc_state, *start,
1388                                    &physical_start, &physical_end,
1389                                    CHUNK_ALLOCATED, NULL)) {
1390
1391                 if (in_range(physical_start, *start, len) ||
1392                     in_range(*start, physical_start,
1393                              physical_end - physical_start)) {
1394                         *start = physical_end + 1;
1395                         return true;
1396                 }
1397         }
1398         return false;
1399 }
1400
1401 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
1402 {
1403         switch (device->fs_devices->chunk_alloc_policy) {
1404         case BTRFS_CHUNK_ALLOC_REGULAR:
1405                 /*
1406                  * We don't want to overwrite the superblock on the drive nor
1407                  * any area used by the boot loader (grub for example), so we
1408                  * make sure to start at an offset of at least 1MB.
1409                  */
1410                 return max_t(u64, start, SZ_1M);
1411         case BTRFS_CHUNK_ALLOC_ZONED:
1412                 /*
1413                  * We don't care about the starting region like regular
1414                  * allocator, because we anyway use/reserve the first two zones
1415                  * for superblock logging.
1416                  */
1417                 return ALIGN(start, device->zone_info->zone_size);
1418         default:
1419                 BUG();
1420         }
1421 }
1422
1423 static bool dev_extent_hole_check_zoned(struct btrfs_device *device,
1424                                         u64 *hole_start, u64 *hole_size,
1425                                         u64 num_bytes)
1426 {
1427         u64 zone_size = device->zone_info->zone_size;
1428         u64 pos;
1429         int ret;
1430         bool changed = false;
1431
1432         ASSERT(IS_ALIGNED(*hole_start, zone_size));
1433
1434         while (*hole_size > 0) {
1435                 pos = btrfs_find_allocatable_zones(device, *hole_start,
1436                                                    *hole_start + *hole_size,
1437                                                    num_bytes);
1438                 if (pos != *hole_start) {
1439                         *hole_size = *hole_start + *hole_size - pos;
1440                         *hole_start = pos;
1441                         changed = true;
1442                         if (*hole_size < num_bytes)
1443                                 break;
1444                 }
1445
1446                 ret = btrfs_ensure_empty_zones(device, pos, num_bytes);
1447
1448                 /* Range is ensured to be empty */
1449                 if (!ret)
1450                         return changed;
1451
1452                 /* Given hole range was invalid (outside of device) */
1453                 if (ret == -ERANGE) {
1454                         *hole_start += *hole_size;
1455                         *hole_size = 0;
1456                         return true;
1457                 }
1458
1459                 *hole_start += zone_size;
1460                 *hole_size -= zone_size;
1461                 changed = true;
1462         }
1463
1464         return changed;
1465 }
1466
1467 /**
1468  * dev_extent_hole_check - check if specified hole is suitable for allocation
1469  * @device:     the device which we have the hole
1470  * @hole_start: starting position of the hole
1471  * @hole_size:  the size of the hole
1472  * @num_bytes:  the size of the free space that we need
1473  *
1474  * This function may modify @hole_start and @hole_size to reflect the suitable
1475  * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
1476  */
1477 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
1478                                   u64 *hole_size, u64 num_bytes)
1479 {
1480         bool changed = false;
1481         u64 hole_end = *hole_start + *hole_size;
1482
1483         for (;;) {
1484                 /*
1485                  * Check before we set max_hole_start, otherwise we could end up
1486                  * sending back this offset anyway.
1487                  */
1488                 if (contains_pending_extent(device, hole_start, *hole_size)) {
1489                         if (hole_end >= *hole_start)
1490                                 *hole_size = hole_end - *hole_start;
1491                         else
1492                                 *hole_size = 0;
1493                         changed = true;
1494                 }
1495
1496                 switch (device->fs_devices->chunk_alloc_policy) {
1497                 case BTRFS_CHUNK_ALLOC_REGULAR:
1498                         /* No extra check */
1499                         break;
1500                 case BTRFS_CHUNK_ALLOC_ZONED:
1501                         if (dev_extent_hole_check_zoned(device, hole_start,
1502                                                         hole_size, num_bytes)) {
1503                                 changed = true;
1504                                 /*
1505                                  * The changed hole can contain pending extent.
1506                                  * Loop again to check that.
1507                                  */
1508                                 continue;
1509                         }
1510                         break;
1511                 default:
1512                         BUG();
1513                 }
1514
1515                 break;
1516         }
1517
1518         return changed;
1519 }
1520
1521 /*
1522  * find_free_dev_extent_start - find free space in the specified device
1523  * @device:       the device which we search the free space in
1524  * @num_bytes:    the size of the free space that we need
1525  * @search_start: the position from which to begin the search
1526  * @start:        store the start of the free space.
1527  * @len:          the size of the free space. that we find, or the size
1528  *                of the max free space if we don't find suitable free space
1529  *
1530  * this uses a pretty simple search, the expectation is that it is
1531  * called very infrequently and that a given device has a small number
1532  * of extents
1533  *
1534  * @start is used to store the start of the free space if we find. But if we
1535  * don't find suitable free space, it will be used to store the start position
1536  * of the max free space.
1537  *
1538  * @len is used to store the size of the free space that we find.
1539  * But if we don't find suitable free space, it is used to store the size of
1540  * the max free space.
1541  *
1542  * NOTE: This function will search *commit* root of device tree, and does extra
1543  * check to ensure dev extents are not double allocated.
1544  * This makes the function safe to allocate dev extents but may not report
1545  * correct usable device space, as device extent freed in current transaction
1546  * is not reported as available.
1547  */
1548 static int find_free_dev_extent_start(struct btrfs_device *device,
1549                                 u64 num_bytes, u64 search_start, u64 *start,
1550                                 u64 *len)
1551 {
1552         struct btrfs_fs_info *fs_info = device->fs_info;
1553         struct btrfs_root *root = fs_info->dev_root;
1554         struct btrfs_key key;
1555         struct btrfs_dev_extent *dev_extent;
1556         struct btrfs_path *path;
1557         u64 hole_size;
1558         u64 max_hole_start;
1559         u64 max_hole_size;
1560         u64 extent_end;
1561         u64 search_end = device->total_bytes;
1562         int ret;
1563         int slot;
1564         struct extent_buffer *l;
1565
1566         search_start = dev_extent_search_start(device, search_start);
1567
1568         WARN_ON(device->zone_info &&
1569                 !IS_ALIGNED(num_bytes, device->zone_info->zone_size));
1570
1571         path = btrfs_alloc_path();
1572         if (!path)
1573                 return -ENOMEM;
1574
1575         max_hole_start = search_start;
1576         max_hole_size = 0;
1577
1578 again:
1579         if (search_start >= search_end ||
1580                 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1581                 ret = -ENOSPC;
1582                 goto out;
1583         }
1584
1585         path->reada = READA_FORWARD;
1586         path->search_commit_root = 1;
1587         path->skip_locking = 1;
1588
1589         key.objectid = device->devid;
1590         key.offset = search_start;
1591         key.type = BTRFS_DEV_EXTENT_KEY;
1592
1593         ret = btrfs_search_backwards(root, &key, path);
1594         if (ret < 0)
1595                 goto out;
1596
1597         while (1) {
1598                 l = path->nodes[0];
1599                 slot = path->slots[0];
1600                 if (slot >= btrfs_header_nritems(l)) {
1601                         ret = btrfs_next_leaf(root, path);
1602                         if (ret == 0)
1603                                 continue;
1604                         if (ret < 0)
1605                                 goto out;
1606
1607                         break;
1608                 }
1609                 btrfs_item_key_to_cpu(l, &key, slot);
1610
1611                 if (key.objectid < device->devid)
1612                         goto next;
1613
1614                 if (key.objectid > device->devid)
1615                         break;
1616
1617                 if (key.type != BTRFS_DEV_EXTENT_KEY)
1618                         goto next;
1619
1620                 if (key.offset > search_start) {
1621                         hole_size = key.offset - search_start;
1622                         dev_extent_hole_check(device, &search_start, &hole_size,
1623                                               num_bytes);
1624
1625                         if (hole_size > max_hole_size) {
1626                                 max_hole_start = search_start;
1627                                 max_hole_size = hole_size;
1628                         }
1629
1630                         /*
1631                          * If this free space is greater than which we need,
1632                          * it must be the max free space that we have found
1633                          * until now, so max_hole_start must point to the start
1634                          * of this free space and the length of this free space
1635                          * is stored in max_hole_size. Thus, we return
1636                          * max_hole_start and max_hole_size and go back to the
1637                          * caller.
1638                          */
1639                         if (hole_size >= num_bytes) {
1640                                 ret = 0;
1641                                 goto out;
1642                         }
1643                 }
1644
1645                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1646                 extent_end = key.offset + btrfs_dev_extent_length(l,
1647                                                                   dev_extent);
1648                 if (extent_end > search_start)
1649                         search_start = extent_end;
1650 next:
1651                 path->slots[0]++;
1652                 cond_resched();
1653         }
1654
1655         /*
1656          * At this point, search_start should be the end of
1657          * allocated dev extents, and when shrinking the device,
1658          * search_end may be smaller than search_start.
1659          */
1660         if (search_end > search_start) {
1661                 hole_size = search_end - search_start;
1662                 if (dev_extent_hole_check(device, &search_start, &hole_size,
1663                                           num_bytes)) {
1664                         btrfs_release_path(path);
1665                         goto again;
1666                 }
1667
1668                 if (hole_size > max_hole_size) {
1669                         max_hole_start = search_start;
1670                         max_hole_size = hole_size;
1671                 }
1672         }
1673
1674         /* See above. */
1675         if (max_hole_size < num_bytes)
1676                 ret = -ENOSPC;
1677         else
1678                 ret = 0;
1679
1680 out:
1681         btrfs_free_path(path);
1682         *start = max_hole_start;
1683         if (len)
1684                 *len = max_hole_size;
1685         return ret;
1686 }
1687
1688 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1689                          u64 *start, u64 *len)
1690 {
1691         /* FIXME use last free of some kind */
1692         return find_free_dev_extent_start(device, num_bytes, 0, start, len);
1693 }
1694
1695 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1696                           struct btrfs_device *device,
1697                           u64 start, u64 *dev_extent_len)
1698 {
1699         struct btrfs_fs_info *fs_info = device->fs_info;
1700         struct btrfs_root *root = fs_info->dev_root;
1701         int ret;
1702         struct btrfs_path *path;
1703         struct btrfs_key key;
1704         struct btrfs_key found_key;
1705         struct extent_buffer *leaf = NULL;
1706         struct btrfs_dev_extent *extent = NULL;
1707
1708         path = btrfs_alloc_path();
1709         if (!path)
1710                 return -ENOMEM;
1711
1712         key.objectid = device->devid;
1713         key.offset = start;
1714         key.type = BTRFS_DEV_EXTENT_KEY;
1715 again:
1716         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1717         if (ret > 0) {
1718                 ret = btrfs_previous_item(root, path, key.objectid,
1719                                           BTRFS_DEV_EXTENT_KEY);
1720                 if (ret)
1721                         goto out;
1722                 leaf = path->nodes[0];
1723                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1724                 extent = btrfs_item_ptr(leaf, path->slots[0],
1725                                         struct btrfs_dev_extent);
1726                 BUG_ON(found_key.offset > start || found_key.offset +
1727                        btrfs_dev_extent_length(leaf, extent) < start);
1728                 key = found_key;
1729                 btrfs_release_path(path);
1730                 goto again;
1731         } else if (ret == 0) {
1732                 leaf = path->nodes[0];
1733                 extent = btrfs_item_ptr(leaf, path->slots[0],
1734                                         struct btrfs_dev_extent);
1735         } else {
1736                 goto out;
1737         }
1738
1739         *dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1740
1741         ret = btrfs_del_item(trans, root, path);
1742         if (ret == 0)
1743                 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1744 out:
1745         btrfs_free_path(path);
1746         return ret;
1747 }
1748
1749 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1750 {
1751         struct extent_map_tree *em_tree;
1752         struct extent_map *em;
1753         struct rb_node *n;
1754         u64 ret = 0;
1755
1756         em_tree = &fs_info->mapping_tree;
1757         read_lock(&em_tree->lock);
1758         n = rb_last(&em_tree->map.rb_root);
1759         if (n) {
1760                 em = rb_entry(n, struct extent_map, rb_node);
1761                 ret = em->start + em->len;
1762         }
1763         read_unlock(&em_tree->lock);
1764
1765         return ret;
1766 }
1767
1768 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1769                                     u64 *devid_ret)
1770 {
1771         int ret;
1772         struct btrfs_key key;
1773         struct btrfs_key found_key;
1774         struct btrfs_path *path;
1775
1776         path = btrfs_alloc_path();
1777         if (!path)
1778                 return -ENOMEM;
1779
1780         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1781         key.type = BTRFS_DEV_ITEM_KEY;
1782         key.offset = (u64)-1;
1783
1784         ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1785         if (ret < 0)
1786                 goto error;
1787
1788         if (ret == 0) {
1789                 /* Corruption */
1790                 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
1791                 ret = -EUCLEAN;
1792                 goto error;
1793         }
1794
1795         ret = btrfs_previous_item(fs_info->chunk_root, path,
1796                                   BTRFS_DEV_ITEMS_OBJECTID,
1797                                   BTRFS_DEV_ITEM_KEY);
1798         if (ret) {
1799                 *devid_ret = 1;
1800         } else {
1801                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1802                                       path->slots[0]);
1803                 *devid_ret = found_key.offset + 1;
1804         }
1805         ret = 0;
1806 error:
1807         btrfs_free_path(path);
1808         return ret;
1809 }
1810
1811 /*
1812  * the device information is stored in the chunk root
1813  * the btrfs_device struct should be fully filled in
1814  */
1815 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
1816                             struct btrfs_device *device)
1817 {
1818         int ret;
1819         struct btrfs_path *path;
1820         struct btrfs_dev_item *dev_item;
1821         struct extent_buffer *leaf;
1822         struct btrfs_key key;
1823         unsigned long ptr;
1824
1825         path = btrfs_alloc_path();
1826         if (!path)
1827                 return -ENOMEM;
1828
1829         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1830         key.type = BTRFS_DEV_ITEM_KEY;
1831         key.offset = device->devid;
1832
1833         ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
1834                                       &key, sizeof(*dev_item));
1835         if (ret)
1836                 goto out;
1837
1838         leaf = path->nodes[0];
1839         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1840
1841         btrfs_set_device_id(leaf, dev_item, device->devid);
1842         btrfs_set_device_generation(leaf, dev_item, 0);
1843         btrfs_set_device_type(leaf, dev_item, device->type);
1844         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1845         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1846         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1847         btrfs_set_device_total_bytes(leaf, dev_item,
1848                                      btrfs_device_get_disk_total_bytes(device));
1849         btrfs_set_device_bytes_used(leaf, dev_item,
1850                                     btrfs_device_get_bytes_used(device));
1851         btrfs_set_device_group(leaf, dev_item, 0);
1852         btrfs_set_device_seek_speed(leaf, dev_item, 0);
1853         btrfs_set_device_bandwidth(leaf, dev_item, 0);
1854         btrfs_set_device_start_offset(leaf, dev_item, 0);
1855
1856         ptr = btrfs_device_uuid(dev_item);
1857         write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1858         ptr = btrfs_device_fsid(dev_item);
1859         write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
1860                             ptr, BTRFS_FSID_SIZE);
1861         btrfs_mark_buffer_dirty(leaf);
1862
1863         ret = 0;
1864 out:
1865         btrfs_free_path(path);
1866         return ret;
1867 }
1868
1869 /*
1870  * Function to update ctime/mtime for a given device path.
1871  * Mainly used for ctime/mtime based probe like libblkid.
1872  */
1873 static void update_dev_time(struct block_device *bdev)
1874 {
1875         struct inode *inode = bdev->bd_inode;
1876         struct timespec64 now;
1877
1878         /* Shouldn't happen but just in case. */
1879         if (!inode)
1880                 return;
1881
1882         now = current_time(inode);
1883         generic_update_time(inode, &now, S_MTIME | S_CTIME);
1884 }
1885
1886 static int btrfs_rm_dev_item(struct btrfs_device *device)
1887 {
1888         struct btrfs_root *root = device->fs_info->chunk_root;
1889         int ret;
1890         struct btrfs_path *path;
1891         struct btrfs_key key;
1892         struct btrfs_trans_handle *trans;
1893
1894         path = btrfs_alloc_path();
1895         if (!path)
1896                 return -ENOMEM;
1897
1898         trans = btrfs_start_transaction(root, 0);
1899         if (IS_ERR(trans)) {
1900                 btrfs_free_path(path);
1901                 return PTR_ERR(trans);
1902         }
1903         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1904         key.type = BTRFS_DEV_ITEM_KEY;
1905         key.offset = device->devid;
1906
1907         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1908         if (ret) {
1909                 if (ret > 0)
1910                         ret = -ENOENT;
1911                 btrfs_abort_transaction(trans, ret);
1912                 btrfs_end_transaction(trans);
1913                 goto out;
1914         }
1915
1916         ret = btrfs_del_item(trans, root, path);
1917         if (ret) {
1918                 btrfs_abort_transaction(trans, ret);
1919                 btrfs_end_transaction(trans);
1920         }
1921
1922 out:
1923         btrfs_free_path(path);
1924         if (!ret)
1925                 ret = btrfs_commit_transaction(trans);
1926         return ret;
1927 }
1928
1929 /*
1930  * Verify that @num_devices satisfies the RAID profile constraints in the whole
1931  * filesystem. It's up to the caller to adjust that number regarding eg. device
1932  * replace.
1933  */
1934 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
1935                 u64 num_devices)
1936 {
1937         u64 all_avail;
1938         unsigned seq;
1939         int i;
1940
1941         do {
1942                 seq = read_seqbegin(&fs_info->profiles_lock);
1943
1944                 all_avail = fs_info->avail_data_alloc_bits |
1945                             fs_info->avail_system_alloc_bits |
1946                             fs_info->avail_metadata_alloc_bits;
1947         } while (read_seqretry(&fs_info->profiles_lock, seq));
1948
1949         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
1950                 if (!(all_avail & btrfs_raid_array[i].bg_flag))
1951                         continue;
1952
1953                 if (num_devices < btrfs_raid_array[i].devs_min)
1954                         return btrfs_raid_array[i].mindev_error;
1955         }
1956
1957         return 0;
1958 }
1959
1960 static struct btrfs_device * btrfs_find_next_active_device(
1961                 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
1962 {
1963         struct btrfs_device *next_device;
1964
1965         list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
1966                 if (next_device != device &&
1967                     !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
1968                     && next_device->bdev)
1969                         return next_device;
1970         }
1971
1972         return NULL;
1973 }
1974
1975 /*
1976  * Helper function to check if the given device is part of s_bdev / latest_bdev
1977  * and replace it with the provided or the next active device, in the context
1978  * where this function called, there should be always be another device (or
1979  * this_dev) which is active.
1980  */
1981 void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
1982                                             struct btrfs_device *next_device)
1983 {
1984         struct btrfs_fs_info *fs_info = device->fs_info;
1985
1986         if (!next_device)
1987                 next_device = btrfs_find_next_active_device(fs_info->fs_devices,
1988                                                             device);
1989         ASSERT(next_device);
1990
1991         if (fs_info->sb->s_bdev &&
1992                         (fs_info->sb->s_bdev == device->bdev))
1993                 fs_info->sb->s_bdev = next_device->bdev;
1994
1995         if (fs_info->fs_devices->latest_bdev == device->bdev)
1996                 fs_info->fs_devices->latest_bdev = next_device->bdev;
1997 }
1998
1999 /*
2000  * Return btrfs_fs_devices::num_devices excluding the device that's being
2001  * currently replaced.
2002  */
2003 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
2004 {
2005         u64 num_devices = fs_info->fs_devices->num_devices;
2006
2007         down_read(&fs_info->dev_replace.rwsem);
2008         if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
2009                 ASSERT(num_devices > 1);
2010                 num_devices--;
2011         }
2012         up_read(&fs_info->dev_replace.rwsem);
2013
2014         return num_devices;
2015 }
2016
2017 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
2018                                struct block_device *bdev,
2019                                const char *device_path)
2020 {
2021         struct btrfs_super_block *disk_super;
2022         int copy_num;
2023
2024         if (!bdev)
2025                 return;
2026
2027         for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
2028                 struct page *page;
2029                 int ret;
2030
2031                 disk_super = btrfs_read_dev_one_super(bdev, copy_num);
2032                 if (IS_ERR(disk_super))
2033                         continue;
2034
2035                 if (bdev_is_zoned(bdev)) {
2036                         btrfs_reset_sb_log_zones(bdev, copy_num);
2037                         continue;
2038                 }
2039
2040                 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
2041
2042                 page = virt_to_page(disk_super);
2043                 set_page_dirty(page);
2044                 lock_page(page);
2045                 /* write_on_page() unlocks the page */
2046                 ret = write_one_page(page);
2047                 if (ret)
2048                         btrfs_warn(fs_info,
2049                                 "error clearing superblock number %d (%d)",
2050                                 copy_num, ret);
2051                 btrfs_release_disk_super(disk_super);
2052
2053         }
2054
2055         /* Notify udev that device has changed */
2056         btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
2057
2058         /* Update ctime/mtime for device path for libblkid */
2059         update_dev_time(bdev);
2060 }
2061
2062 int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
2063                     u64 devid, struct block_device **bdev, fmode_t *mode)
2064 {
2065         struct btrfs_device *device;
2066         struct btrfs_fs_devices *cur_devices;
2067         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2068         u64 num_devices;
2069         int ret = 0;
2070
2071         mutex_lock(&uuid_mutex);
2072
2073         num_devices = btrfs_num_devices(fs_info);
2074
2075         ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
2076         if (ret)
2077                 goto out;
2078
2079         device = btrfs_find_device_by_devspec(fs_info, devid, device_path);
2080
2081         if (IS_ERR(device)) {
2082                 if (PTR_ERR(device) == -ENOENT &&
2083                     device_path && strcmp(device_path, "missing") == 0)
2084                         ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2085                 else
2086                         ret = PTR_ERR(device);
2087                 goto out;
2088         }
2089
2090         if (btrfs_pinned_by_swapfile(fs_info, device)) {
2091                 btrfs_warn_in_rcu(fs_info,
2092                   "cannot remove device %s (devid %llu) due to active swapfile",
2093                                   rcu_str_deref(device->name), device->devid);
2094                 ret = -ETXTBSY;
2095                 goto out;
2096         }
2097
2098         if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2099                 ret = BTRFS_ERROR_DEV_TGT_REPLACE;
2100                 goto out;
2101         }
2102
2103         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
2104             fs_info->fs_devices->rw_devices == 1) {
2105                 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
2106                 goto out;
2107         }
2108
2109         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2110                 mutex_lock(&fs_info->chunk_mutex);
2111                 list_del_init(&device->dev_alloc_list);
2112                 device->fs_devices->rw_devices--;
2113                 mutex_unlock(&fs_info->chunk_mutex);
2114         }
2115
2116         mutex_unlock(&uuid_mutex);
2117         ret = btrfs_shrink_device(device, 0);
2118         if (!ret)
2119                 btrfs_reada_remove_dev(device);
2120         mutex_lock(&uuid_mutex);
2121         if (ret)
2122                 goto error_undo;
2123
2124         /*
2125          * TODO: the superblock still includes this device in its num_devices
2126          * counter although write_all_supers() is not locked out. This
2127          * could give a filesystem state which requires a degraded mount.
2128          */
2129         ret = btrfs_rm_dev_item(device);
2130         if (ret)
2131                 goto error_undo;
2132
2133         clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2134         btrfs_scrub_cancel_dev(device);
2135
2136         /*
2137          * the device list mutex makes sure that we don't change
2138          * the device list while someone else is writing out all
2139          * the device supers. Whoever is writing all supers, should
2140          * lock the device list mutex before getting the number of
2141          * devices in the super block (super_copy). Conversely,
2142          * whoever updates the number of devices in the super block
2143          * (super_copy) should hold the device list mutex.
2144          */
2145
2146         /*
2147          * In normal cases the cur_devices == fs_devices. But in case
2148          * of deleting a seed device, the cur_devices should point to
2149          * its own fs_devices listed under the fs_devices->seed.
2150          */
2151         cur_devices = device->fs_devices;
2152         mutex_lock(&fs_devices->device_list_mutex);
2153         list_del_rcu(&device->dev_list);
2154
2155         cur_devices->num_devices--;
2156         cur_devices->total_devices--;
2157         /* Update total_devices of the parent fs_devices if it's seed */
2158         if (cur_devices != fs_devices)
2159                 fs_devices->total_devices--;
2160
2161         if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
2162                 cur_devices->missing_devices--;
2163
2164         btrfs_assign_next_active_device(device, NULL);
2165
2166         if (device->bdev) {
2167                 cur_devices->open_devices--;
2168                 /* remove sysfs entry */
2169                 btrfs_sysfs_remove_device(device);
2170         }
2171
2172         num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
2173         btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
2174         mutex_unlock(&fs_devices->device_list_mutex);
2175
2176         /*
2177          * At this point, the device is zero sized and detached from the
2178          * devices list.  All that's left is to zero out the old supers and
2179          * free the device.
2180          *
2181          * We cannot call btrfs_close_bdev() here because we're holding the sb
2182          * write lock, and blkdev_put() will pull in the ->open_mutex on the
2183          * block device and it's dependencies.  Instead just flush the device
2184          * and let the caller do the final blkdev_put.
2185          */
2186         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2187                 btrfs_scratch_superblocks(fs_info, device->bdev,
2188                                           device->name->str);
2189                 if (device->bdev) {
2190                         sync_blockdev(device->bdev);
2191                         invalidate_bdev(device->bdev);
2192                 }
2193         }
2194
2195         *bdev = device->bdev;
2196         *mode = device->mode;
2197         synchronize_rcu();
2198         btrfs_free_device(device);
2199
2200         if (cur_devices->open_devices == 0) {
2201                 list_del_init(&cur_devices->seed_list);
2202                 close_fs_devices(cur_devices);
2203                 free_fs_devices(cur_devices);
2204         }
2205
2206 out:
2207         mutex_unlock(&uuid_mutex);
2208         return ret;
2209
2210 error_undo:
2211         btrfs_reada_undo_remove_dev(device);
2212         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2213                 mutex_lock(&fs_info->chunk_mutex);
2214                 list_add(&device->dev_alloc_list,
2215                          &fs_devices->alloc_list);
2216                 device->fs_devices->rw_devices++;
2217                 mutex_unlock(&fs_info->chunk_mutex);
2218         }
2219         goto out;
2220 }
2221
2222 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2223 {
2224         struct btrfs_fs_devices *fs_devices;
2225
2226         lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2227
2228         /*
2229          * in case of fs with no seed, srcdev->fs_devices will point
2230          * to fs_devices of fs_info. However when the dev being replaced is
2231          * a seed dev it will point to the seed's local fs_devices. In short
2232          * srcdev will have its correct fs_devices in both the cases.
2233          */
2234         fs_devices = srcdev->fs_devices;
2235
2236         list_del_rcu(&srcdev->dev_list);
2237         list_del(&srcdev->dev_alloc_list);
2238         fs_devices->num_devices--;
2239         if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2240                 fs_devices->missing_devices--;
2241
2242         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2243                 fs_devices->rw_devices--;
2244
2245         if (srcdev->bdev)
2246                 fs_devices->open_devices--;
2247 }
2248
2249 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
2250 {
2251         struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2252
2253         mutex_lock(&uuid_mutex);
2254
2255         btrfs_close_bdev(srcdev);
2256         synchronize_rcu();
2257         btrfs_free_device(srcdev);
2258
2259         /* if this is no devs we rather delete the fs_devices */
2260         if (!fs_devices->num_devices) {
2261                 /*
2262                  * On a mounted FS, num_devices can't be zero unless it's a
2263                  * seed. In case of a seed device being replaced, the replace
2264                  * target added to the sprout FS, so there will be no more
2265                  * device left under the seed FS.
2266                  */
2267                 ASSERT(fs_devices->seeding);
2268
2269                 list_del_init(&fs_devices->seed_list);
2270                 close_fs_devices(fs_devices);
2271                 free_fs_devices(fs_devices);
2272         }
2273         mutex_unlock(&uuid_mutex);
2274 }
2275
2276 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2277 {
2278         struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2279
2280         mutex_lock(&fs_devices->device_list_mutex);
2281
2282         btrfs_sysfs_remove_device(tgtdev);
2283
2284         if (tgtdev->bdev)
2285                 fs_devices->open_devices--;
2286
2287         fs_devices->num_devices--;
2288
2289         btrfs_assign_next_active_device(tgtdev, NULL);
2290
2291         list_del_rcu(&tgtdev->dev_list);
2292
2293         mutex_unlock(&fs_devices->device_list_mutex);
2294
2295         /*
2296          * The update_dev_time() with in btrfs_scratch_superblocks()
2297          * may lead to a call to btrfs_show_devname() which will try
2298          * to hold device_list_mutex. And here this device
2299          * is already out of device list, so we don't have to hold
2300          * the device_list_mutex lock.
2301          */
2302         btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev,
2303                                   tgtdev->name->str);
2304
2305         btrfs_close_bdev(tgtdev);
2306         synchronize_rcu();
2307         btrfs_free_device(tgtdev);
2308 }
2309
2310 static struct btrfs_device *btrfs_find_device_by_path(
2311                 struct btrfs_fs_info *fs_info, const char *device_path)
2312 {
2313         int ret = 0;
2314         struct btrfs_super_block *disk_super;
2315         u64 devid;
2316         u8 *dev_uuid;
2317         struct block_device *bdev;
2318         struct btrfs_device *device;
2319
2320         ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2321                                     fs_info->bdev_holder, 0, &bdev, &disk_super);
2322         if (ret)
2323                 return ERR_PTR(ret);
2324
2325         devid = btrfs_stack_device_id(&disk_super->dev_item);
2326         dev_uuid = disk_super->dev_item.uuid;
2327         if (btrfs_fs_incompat(fs_info, METADATA_UUID))
2328                 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2329                                            disk_super->metadata_uuid);
2330         else
2331                 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2332                                            disk_super->fsid);
2333
2334         btrfs_release_disk_super(disk_super);
2335         if (!device)
2336                 device = ERR_PTR(-ENOENT);
2337         blkdev_put(bdev, FMODE_READ);
2338         return device;
2339 }
2340
2341 /*
2342  * Lookup a device given by device id, or the path if the id is 0.
2343  */
2344 struct btrfs_device *btrfs_find_device_by_devspec(
2345                 struct btrfs_fs_info *fs_info, u64 devid,
2346                 const char *device_path)
2347 {
2348         struct btrfs_device *device;
2349
2350         if (devid) {
2351                 device = btrfs_find_device(fs_info->fs_devices, devid, NULL,
2352                                            NULL);
2353                 if (!device)
2354                         return ERR_PTR(-ENOENT);
2355                 return device;
2356         }
2357
2358         if (!device_path || !device_path[0])
2359                 return ERR_PTR(-EINVAL);
2360
2361         if (strcmp(device_path, "missing") == 0) {
2362                 /* Find first missing device */
2363                 list_for_each_entry(device, &fs_info->fs_devices->devices,
2364                                     dev_list) {
2365                         if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
2366                                      &device->dev_state) && !device->bdev)
2367                                 return device;
2368                 }
2369                 return ERR_PTR(-ENOENT);
2370         }
2371
2372         return btrfs_find_device_by_path(fs_info, device_path);
2373 }
2374
2375 /*
2376  * does all the dirty work required for changing file system's UUID.
2377  */
2378 static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
2379 {
2380         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2381         struct btrfs_fs_devices *old_devices;
2382         struct btrfs_fs_devices *seed_devices;
2383         struct btrfs_super_block *disk_super = fs_info->super_copy;
2384         struct btrfs_device *device;
2385         u64 super_flags;
2386
2387         lockdep_assert_held(&uuid_mutex);
2388         if (!fs_devices->seeding)
2389                 return -EINVAL;
2390
2391         /*
2392          * Private copy of the seed devices, anchored at
2393          * fs_info->fs_devices->seed_list
2394          */
2395         seed_devices = alloc_fs_devices(NULL, NULL);
2396         if (IS_ERR(seed_devices))
2397                 return PTR_ERR(seed_devices);
2398
2399         /*
2400          * It's necessary to retain a copy of the original seed fs_devices in
2401          * fs_uuids so that filesystems which have been seeded can successfully
2402          * reference the seed device from open_seed_devices. This also supports
2403          * multiple fs seed.
2404          */
2405         old_devices = clone_fs_devices(fs_devices);
2406         if (IS_ERR(old_devices)) {
2407                 kfree(seed_devices);
2408                 return PTR_ERR(old_devices);
2409         }
2410
2411         list_add(&old_devices->fs_list, &fs_uuids);
2412
2413         memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2414         seed_devices->opened = 1;
2415         INIT_LIST_HEAD(&seed_devices->devices);
2416         INIT_LIST_HEAD(&seed_devices->alloc_list);
2417         mutex_init(&seed_devices->device_list_mutex);
2418
2419         mutex_lock(&fs_devices->device_list_mutex);
2420         list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2421                               synchronize_rcu);
2422         list_for_each_entry(device, &seed_devices->devices, dev_list)
2423                 device->fs_devices = seed_devices;
2424
2425         fs_devices->seeding = false;
2426         fs_devices->num_devices = 0;
2427         fs_devices->open_devices = 0;
2428         fs_devices->missing_devices = 0;
2429         fs_devices->rotating = false;
2430         list_add(&seed_devices->seed_list, &fs_devices->seed_list);
2431
2432         generate_random_uuid(fs_devices->fsid);
2433         memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
2434         memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2435         mutex_unlock(&fs_devices->device_list_mutex);
2436
2437         super_flags = btrfs_super_flags(disk_super) &
2438                       ~BTRFS_SUPER_FLAG_SEEDING;
2439         btrfs_set_super_flags(disk_super, super_flags);
2440
2441         return 0;
2442 }
2443
2444 /*
2445  * Store the expected generation for seed devices in device items.
2446  */
2447 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
2448 {
2449         struct btrfs_fs_info *fs_info = trans->fs_info;
2450         struct btrfs_root *root = fs_info->chunk_root;
2451         struct btrfs_path *path;
2452         struct extent_buffer *leaf;
2453         struct btrfs_dev_item *dev_item;
2454         struct btrfs_device *device;
2455         struct btrfs_key key;
2456         u8 fs_uuid[BTRFS_FSID_SIZE];
2457         u8 dev_uuid[BTRFS_UUID_SIZE];
2458         u64 devid;
2459         int ret;
2460
2461         path = btrfs_alloc_path();
2462         if (!path)
2463                 return -ENOMEM;
2464
2465         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2466         key.offset = 0;
2467         key.type = BTRFS_DEV_ITEM_KEY;
2468
2469         while (1) {
2470                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2471                 if (ret < 0)
2472                         goto error;
2473
2474                 leaf = path->nodes[0];
2475 next_slot:
2476                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2477                         ret = btrfs_next_leaf(root, path);
2478                         if (ret > 0)
2479                                 break;
2480                         if (ret < 0)
2481                                 goto error;
2482                         leaf = path->nodes[0];
2483                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2484                         btrfs_release_path(path);
2485                         continue;
2486                 }
2487
2488                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2489                 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2490                     key.type != BTRFS_DEV_ITEM_KEY)
2491                         break;
2492
2493                 dev_item = btrfs_item_ptr(leaf, path->slots[0],
2494                                           struct btrfs_dev_item);
2495                 devid = btrfs_device_id(leaf, dev_item);
2496                 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2497                                    BTRFS_UUID_SIZE);
2498                 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2499                                    BTRFS_FSID_SIZE);
2500                 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2501                                            fs_uuid);
2502                 BUG_ON(!device); /* Logic error */
2503
2504                 if (device->fs_devices->seeding) {
2505                         btrfs_set_device_generation(leaf, dev_item,
2506                                                     device->generation);
2507                         btrfs_mark_buffer_dirty(leaf);
2508                 }
2509
2510                 path->slots[0]++;
2511                 goto next_slot;
2512         }
2513         ret = 0;
2514 error:
2515         btrfs_free_path(path);
2516         return ret;
2517 }
2518
2519 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2520 {
2521         struct btrfs_root *root = fs_info->dev_root;
2522         struct request_queue *q;
2523         struct btrfs_trans_handle *trans;
2524         struct btrfs_device *device;
2525         struct block_device *bdev;
2526         struct super_block *sb = fs_info->sb;
2527         struct rcu_string *name;
2528         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2529         u64 orig_super_total_bytes;
2530         u64 orig_super_num_devices;
2531         int seeding_dev = 0;
2532         int ret = 0;
2533         bool locked = false;
2534
2535         if (sb_rdonly(sb) && !fs_devices->seeding)
2536                 return -EROFS;
2537
2538         bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2539                                   fs_info->bdev_holder);
2540         if (IS_ERR(bdev))
2541                 return PTR_ERR(bdev);
2542
2543         if (!btrfs_check_device_zone_type(fs_info, bdev)) {
2544                 ret = -EINVAL;
2545                 goto error;
2546         }
2547
2548         if (fs_devices->seeding) {
2549                 seeding_dev = 1;
2550                 down_write(&sb->s_umount);
2551                 mutex_lock(&uuid_mutex);
2552                 locked = true;
2553         }
2554
2555         sync_blockdev(bdev);
2556
2557         rcu_read_lock();
2558         list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
2559                 if (device->bdev == bdev) {
2560                         ret = -EEXIST;
2561                         rcu_read_unlock();
2562                         goto error;
2563                 }
2564         }
2565         rcu_read_unlock();
2566
2567         device = btrfs_alloc_device(fs_info, NULL, NULL);
2568         if (IS_ERR(device)) {
2569                 /* we can safely leave the fs_devices entry around */
2570                 ret = PTR_ERR(device);
2571                 goto error;
2572         }
2573
2574         name = rcu_string_strdup(device_path, GFP_KERNEL);
2575         if (!name) {
2576                 ret = -ENOMEM;
2577                 goto error_free_device;
2578         }
2579         rcu_assign_pointer(device->name, name);
2580
2581         device->fs_info = fs_info;
2582         device->bdev = bdev;
2583
2584         ret = btrfs_get_dev_zone_info(device);
2585         if (ret)
2586                 goto error_free_device;
2587
2588         trans = btrfs_start_transaction(root, 0);
2589         if (IS_ERR(trans)) {
2590                 ret = PTR_ERR(trans);
2591                 goto error_free_zone;
2592         }
2593
2594         q = bdev_get_queue(bdev);
2595         set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
2596         device->generation = trans->transid;
2597         device->io_width = fs_info->sectorsize;
2598         device->io_align = fs_info->sectorsize;
2599         device->sector_size = fs_info->sectorsize;
2600         device->total_bytes = round_down(i_size_read(bdev->bd_inode),
2601                                          fs_info->sectorsize);
2602         device->disk_total_bytes = device->total_bytes;
2603         device->commit_total_bytes = device->total_bytes;
2604         set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2605         clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2606         device->mode = FMODE_EXCL;
2607         device->dev_stats_valid = 1;
2608         set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2609
2610         if (seeding_dev) {
2611                 btrfs_clear_sb_rdonly(sb);
2612                 ret = btrfs_prepare_sprout(fs_info);
2613                 if (ret) {
2614                         btrfs_abort_transaction(trans, ret);
2615                         goto error_trans;
2616                 }
2617         }
2618
2619         device->fs_devices = fs_devices;
2620
2621         mutex_lock(&fs_devices->device_list_mutex);
2622         mutex_lock(&fs_info->chunk_mutex);
2623         list_add_rcu(&device->dev_list, &fs_devices->devices);
2624         list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
2625         fs_devices->num_devices++;
2626         fs_devices->open_devices++;
2627         fs_devices->rw_devices++;
2628         fs_devices->total_devices++;
2629         fs_devices->total_rw_bytes += device->total_bytes;
2630
2631         atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2632
2633         if (!blk_queue_nonrot(q))
2634                 fs_devices->rotating = true;
2635
2636         orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2637         btrfs_set_super_total_bytes(fs_info->super_copy,
2638                 round_down(orig_super_total_bytes + device->total_bytes,
2639                            fs_info->sectorsize));
2640
2641         orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
2642         btrfs_set_super_num_devices(fs_info->super_copy,
2643                                     orig_super_num_devices + 1);
2644
2645         /*
2646          * we've got more storage, clear any full flags on the space
2647          * infos
2648          */
2649         btrfs_clear_space_info_full(fs_info);
2650
2651         mutex_unlock(&fs_info->chunk_mutex);
2652
2653         /* Add sysfs device entry */
2654         btrfs_sysfs_add_device(device);
2655
2656         mutex_unlock(&fs_devices->device_list_mutex);
2657
2658         if (seeding_dev) {
2659                 mutex_lock(&fs_info->chunk_mutex);
2660                 ret = init_first_rw_device(trans);
2661                 mutex_unlock(&fs_info->chunk_mutex);
2662                 if (ret) {
2663                         btrfs_abort_transaction(trans, ret);
2664                         goto error_sysfs;
2665                 }
2666         }
2667
2668         ret = btrfs_add_dev_item(trans, device);
2669         if (ret) {
2670                 btrfs_abort_transaction(trans, ret);
2671                 goto error_sysfs;
2672         }
2673
2674         if (seeding_dev) {
2675                 ret = btrfs_finish_sprout(trans);
2676                 if (ret) {
2677                         btrfs_abort_transaction(trans, ret);
2678                         goto error_sysfs;
2679                 }
2680
2681                 /*
2682                  * fs_devices now represents the newly sprouted filesystem and
2683                  * its fsid has been changed by btrfs_prepare_sprout
2684                  */
2685                 btrfs_sysfs_update_sprout_fsid(fs_devices);
2686         }
2687
2688         ret = btrfs_commit_transaction(trans);
2689
2690         if (seeding_dev) {
2691                 mutex_unlock(&uuid_mutex);
2692                 up_write(&sb->s_umount);
2693                 locked = false;
2694
2695                 if (ret) /* transaction commit */
2696                         return ret;
2697
2698                 ret = btrfs_relocate_sys_chunks(fs_info);
2699                 if (ret < 0)
2700                         btrfs_handle_fs_error(fs_info, ret,
2701                                     "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2702                 trans = btrfs_attach_transaction(root);
2703                 if (IS_ERR(trans)) {
2704                         if (PTR_ERR(trans) == -ENOENT)
2705                                 return 0;
2706                         ret = PTR_ERR(trans);
2707                         trans = NULL;
2708                         goto error_sysfs;
2709                 }
2710                 ret = btrfs_commit_transaction(trans);
2711         }
2712
2713         /*
2714          * Now that we have written a new super block to this device, check all
2715          * other fs_devices list if device_path alienates any other scanned
2716          * device.
2717          * We can ignore the return value as it typically returns -EINVAL and
2718          * only succeeds if the device was an alien.
2719          */
2720         btrfs_forget_devices(device_path);
2721
2722         /* Update ctime/mtime for blkid or udev */
2723         update_dev_time(bdev);
2724
2725         return ret;
2726
2727 error_sysfs:
2728         btrfs_sysfs_remove_device(device);
2729         mutex_lock(&fs_info->fs_devices->device_list_mutex);
2730         mutex_lock(&fs_info->chunk_mutex);
2731         list_del_rcu(&device->dev_list);
2732         list_del(&device->dev_alloc_list);
2733         fs_info->fs_devices->num_devices--;
2734         fs_info->fs_devices->open_devices--;
2735         fs_info->fs_devices->rw_devices--;
2736         fs_info->fs_devices->total_devices--;
2737         fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
2738         atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
2739         btrfs_set_super_total_bytes(fs_info->super_copy,
2740                                     orig_super_total_bytes);
2741         btrfs_set_super_num_devices(fs_info->super_copy,
2742                                     orig_super_num_devices);
2743         mutex_unlock(&fs_info->chunk_mutex);
2744         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2745 error_trans:
2746         if (seeding_dev)
2747                 btrfs_set_sb_rdonly(sb);
2748         if (trans)
2749                 btrfs_end_transaction(trans);
2750 error_free_zone:
2751         btrfs_destroy_dev_zone_info(device);
2752 error_free_device:
2753         btrfs_free_device(device);
2754 error:
2755         blkdev_put(bdev, FMODE_EXCL);
2756         if (locked) {
2757                 mutex_unlock(&uuid_mutex);
2758                 up_write(&sb->s_umount);
2759         }
2760         return ret;
2761 }
2762
2763 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2764                                         struct btrfs_device *device)
2765 {
2766         int ret;
2767         struct btrfs_path *path;
2768         struct btrfs_root *root = device->fs_info->chunk_root;
2769         struct btrfs_dev_item *dev_item;
2770         struct extent_buffer *leaf;
2771         struct btrfs_key key;
2772
2773         path = btrfs_alloc_path();
2774         if (!path)
2775                 return -ENOMEM;
2776
2777         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2778         key.type = BTRFS_DEV_ITEM_KEY;
2779         key.offset = device->devid;
2780
2781         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2782         if (ret < 0)
2783                 goto out;
2784
2785         if (ret > 0) {
2786                 ret = -ENOENT;
2787                 goto out;
2788         }
2789
2790         leaf = path->nodes[0];
2791         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2792
2793         btrfs_set_device_id(leaf, dev_item, device->devid);
2794         btrfs_set_device_type(leaf, dev_item, device->type);
2795         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2796         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2797         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2798         btrfs_set_device_total_bytes(leaf, dev_item,
2799                                      btrfs_device_get_disk_total_bytes(device));
2800         btrfs_set_device_bytes_used(leaf, dev_item,
2801                                     btrfs_device_get_bytes_used(device));
2802         btrfs_mark_buffer_dirty(leaf);
2803
2804 out:
2805         btrfs_free_path(path);
2806         return ret;
2807 }
2808
2809 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2810                       struct btrfs_device *device, u64 new_size)
2811 {
2812         struct btrfs_fs_info *fs_info = device->fs_info;
2813         struct btrfs_super_block *super_copy = fs_info->super_copy;
2814         u64 old_total;
2815         u64 diff;
2816
2817         if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2818                 return -EACCES;
2819
2820         new_size = round_down(new_size, fs_info->sectorsize);
2821
2822         mutex_lock(&fs_info->chunk_mutex);
2823         old_total = btrfs_super_total_bytes(super_copy);
2824         diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2825
2826         if (new_size <= device->total_bytes ||
2827             test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2828                 mutex_unlock(&fs_info->chunk_mutex);
2829                 return -EINVAL;
2830         }
2831
2832         btrfs_set_super_total_bytes(super_copy,
2833                         round_down(old_total + diff, fs_info->sectorsize));
2834         device->fs_devices->total_rw_bytes += diff;
2835
2836         btrfs_device_set_total_bytes(device, new_size);
2837         btrfs_device_set_disk_total_bytes(device, new_size);
2838         btrfs_clear_space_info_full(device->fs_info);
2839         if (list_empty(&device->post_commit_list))
2840                 list_add_tail(&device->post_commit_list,
2841                               &trans->transaction->dev_update_list);
2842         mutex_unlock(&fs_info->chunk_mutex);
2843
2844         return btrfs_update_device(trans, device);
2845 }
2846
2847 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2848 {
2849         struct btrfs_fs_info *fs_info = trans->fs_info;
2850         struct btrfs_root *root = fs_info->chunk_root;
2851         int ret;
2852         struct btrfs_path *path;
2853         struct btrfs_key key;
2854
2855         path = btrfs_alloc_path();
2856         if (!path)
2857                 return -ENOMEM;
2858
2859         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2860         key.offset = chunk_offset;
2861         key.type = BTRFS_CHUNK_ITEM_KEY;
2862
2863         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2864         if (ret < 0)
2865                 goto out;
2866         else if (ret > 0) { /* Logic error or corruption */
2867                 btrfs_handle_fs_error(fs_info, -ENOENT,
2868                                       "Failed lookup while freeing chunk.");
2869                 ret = -ENOENT;
2870                 goto out;
2871         }
2872
2873         ret = btrfs_del_item(trans, root, path);
2874         if (ret < 0)
2875                 btrfs_handle_fs_error(fs_info, ret,
2876                                       "Failed to delete chunk item.");
2877 out:
2878         btrfs_free_path(path);
2879         return ret;
2880 }
2881
2882 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2883 {
2884         struct btrfs_super_block *super_copy = fs_info->super_copy;
2885         struct btrfs_disk_key *disk_key;
2886         struct btrfs_chunk *chunk;
2887         u8 *ptr;
2888         int ret = 0;
2889         u32 num_stripes;
2890         u32 array_size;
2891         u32 len = 0;
2892         u32 cur;
2893         struct btrfs_key key;
2894
2895         lockdep_assert_held(&fs_info->chunk_mutex);
2896         array_size = btrfs_super_sys_array_size(super_copy);
2897
2898         ptr = super_copy->sys_chunk_array;
2899         cur = 0;
2900
2901         while (cur < array_size) {
2902                 disk_key = (struct btrfs_disk_key *)ptr;
2903                 btrfs_disk_key_to_cpu(&key, disk_key);
2904
2905                 len = sizeof(*disk_key);
2906
2907                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2908                         chunk = (struct btrfs_chunk *)(ptr + len);
2909                         num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2910                         len += btrfs_chunk_item_size(num_stripes);
2911                 } else {
2912                         ret = -EIO;
2913                         break;
2914                 }
2915                 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
2916                     key.offset == chunk_offset) {
2917                         memmove(ptr, ptr + len, array_size - (cur + len));
2918                         array_size -= len;
2919                         btrfs_set_super_sys_array_size(super_copy, array_size);
2920                 } else {
2921                         ptr += len;
2922                         cur += len;
2923                 }
2924         }
2925         return ret;
2926 }
2927
2928 /*
2929  * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
2930  * @logical: Logical block offset in bytes.
2931  * @length: Length of extent in bytes.
2932  *
2933  * Return: Chunk mapping or ERR_PTR.
2934  */
2935 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
2936                                        u64 logical, u64 length)
2937 {
2938         struct extent_map_tree *em_tree;
2939         struct extent_map *em;
2940
2941         em_tree = &fs_info->mapping_tree;
2942         read_lock(&em_tree->lock);
2943         em = lookup_extent_mapping(em_tree, logical, length);
2944         read_unlock(&em_tree->lock);
2945
2946         if (!em) {
2947                 btrfs_crit(fs_info, "unable to find logical %llu length %llu",
2948                            logical, length);
2949                 return ERR_PTR(-EINVAL);
2950         }
2951
2952         if (em->start > logical || em->start + em->len < logical) {
2953                 btrfs_crit(fs_info,
2954                            "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
2955                            logical, length, em->start, em->start + em->len);
2956                 free_extent_map(em);
2957                 return ERR_PTR(-EINVAL);
2958         }
2959
2960         /* callers are responsible for dropping em's ref. */
2961         return em;
2962 }
2963
2964 static int remove_chunk_item(struct btrfs_trans_handle *trans,
2965                              struct map_lookup *map, u64 chunk_offset)
2966 {
2967         int i;
2968
2969         /*
2970          * Removing chunk items and updating the device items in the chunks btree
2971          * requires holding the chunk_mutex.
2972          * See the comment at btrfs_chunk_alloc() for the details.
2973          */
2974         lockdep_assert_held(&trans->fs_info->chunk_mutex);
2975
2976         for (i = 0; i < map->num_stripes; i++) {
2977                 int ret;
2978
2979                 ret = btrfs_update_device(trans, map->stripes[i].dev);
2980                 if (ret)
2981                         return ret;
2982         }
2983
2984         return btrfs_free_chunk(trans, chunk_offset);
2985 }
2986
2987 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2988 {
2989         struct btrfs_fs_info *fs_info = trans->fs_info;
2990         struct extent_map *em;
2991         struct map_lookup *map;
2992         u64 dev_extent_len = 0;
2993         int i, ret = 0;
2994         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2995
2996         em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
2997         if (IS_ERR(em)) {
2998                 /*
2999                  * This is a logic error, but we don't want to just rely on the
3000                  * user having built with ASSERT enabled, so if ASSERT doesn't
3001                  * do anything we still error out.
3002                  */
3003                 ASSERT(0);
3004                 return PTR_ERR(em);
3005         }
3006         map = em->map_lookup;
3007
3008         /*
3009          * First delete the device extent items from the devices btree.
3010          * We take the device_list_mutex to avoid racing with the finishing phase
3011          * of a device replace operation. See the comment below before acquiring
3012          * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex
3013          * because that can result in a deadlock when deleting the device extent
3014          * items from the devices btree - COWing an extent buffer from the btree
3015          * may result in allocating a new metadata chunk, which would attempt to
3016          * lock again fs_info->chunk_mutex.
3017          */
3018         mutex_lock(&fs_devices->device_list_mutex);
3019         for (i = 0; i < map->num_stripes; i++) {
3020                 struct btrfs_device *device = map->stripes[i].dev;
3021                 ret = btrfs_free_dev_extent(trans, device,
3022                                             map->stripes[i].physical,
3023                                             &dev_extent_len);
3024                 if (ret) {
3025                         mutex_unlock(&fs_devices->device_list_mutex);
3026                         btrfs_abort_transaction(trans, ret);
3027                         goto out;
3028                 }
3029
3030                 if (device->bytes_used > 0) {
3031                         mutex_lock(&fs_info->chunk_mutex);
3032                         btrfs_device_set_bytes_used(device,
3033                                         device->bytes_used - dev_extent_len);
3034                         atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
3035                         btrfs_clear_space_info_full(fs_info);
3036                         mutex_unlock(&fs_info->chunk_mutex);
3037                 }
3038         }
3039         mutex_unlock(&fs_devices->device_list_mutex);
3040
3041         /*
3042          * We acquire fs_info->chunk_mutex for 2 reasons:
3043          *
3044          * 1) Just like with the first phase of the chunk allocation, we must
3045          *    reserve system space, do all chunk btree updates and deletions, and
3046          *    update the system chunk array in the superblock while holding this
3047          *    mutex. This is for similar reasons as explained on the comment at
3048          *    the top of btrfs_chunk_alloc();
3049          *
3050          * 2) Prevent races with the final phase of a device replace operation
3051          *    that replaces the device object associated with the map's stripes,
3052          *    because the device object's id can change at any time during that
3053          *    final phase of the device replace operation
3054          *    (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
3055          *    replaced device and then see it with an ID of
3056          *    BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating
3057          *    the device item, which does not exists on the chunk btree.
3058          *    The finishing phase of device replace acquires both the
3059          *    device_list_mutex and the chunk_mutex, in that order, so we are
3060          *    safe by just acquiring the chunk_mutex.
3061          */
3062         trans->removing_chunk = true;
3063         mutex_lock(&fs_info->chunk_mutex);
3064
3065         check_system_chunk(trans, map->type);
3066
3067         ret = remove_chunk_item(trans, map, chunk_offset);
3068         /*
3069          * Normally we should not get -ENOSPC since we reserved space before
3070          * through the call to check_system_chunk().
3071          *
3072          * Despite our system space_info having enough free space, we may not
3073          * be able to allocate extents from its block groups, because all have
3074          * an incompatible profile, which will force us to allocate a new system
3075          * block group with the right profile, or right after we called
3076          * check_system_space() above, a scrub turned the only system block group
3077          * with enough free space into RO mode.
3078          * This is explained with more detail at do_chunk_alloc().
3079          *
3080          * So if we get -ENOSPC, allocate a new system chunk and retry once.
3081          */
3082         if (ret == -ENOSPC) {
3083                 const u64 sys_flags = btrfs_system_alloc_profile(fs_info);
3084                 struct btrfs_block_group *sys_bg;
3085
3086                 sys_bg = btrfs_alloc_chunk(trans, sys_flags);
3087                 if (IS_ERR(sys_bg)) {
3088                         ret = PTR_ERR(sys_bg);
3089                         btrfs_abort_transaction(trans, ret);
3090                         goto out;
3091                 }
3092
3093                 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
3094                 if (ret) {
3095                         btrfs_abort_transaction(trans, ret);
3096                         goto out;
3097                 }
3098
3099                 ret = remove_chunk_item(trans, map, chunk_offset);
3100                 if (ret) {
3101                         btrfs_abort_transaction(trans, ret);
3102                         goto out;
3103                 }
3104         } else if (ret) {
3105                 btrfs_abort_transaction(trans, ret);
3106                 goto out;
3107         }
3108
3109         trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
3110
3111         if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3112                 ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
3113                 if (ret) {
3114                         btrfs_abort_transaction(trans, ret);
3115                         goto out;
3116                 }
3117         }
3118
3119         mutex_unlock(&fs_info->chunk_mutex);
3120         trans->removing_chunk = false;
3121
3122         /*
3123          * We are done with chunk btree updates and deletions, so release the
3124          * system space we previously reserved (with check_system_chunk()).
3125          */
3126         btrfs_trans_release_chunk_metadata(trans);
3127
3128         ret = btrfs_remove_block_group(trans, chunk_offset, em);
3129         if (ret) {
3130                 btrfs_abort_transaction(trans, ret);
3131                 goto out;
3132         }
3133
3134 out:
3135         if (trans->removing_chunk) {
3136                 mutex_unlock(&fs_info->chunk_mutex);
3137                 trans->removing_chunk = false;
3138         }
3139         /* once for us */
3140         free_extent_map(em);
3141         return ret;
3142 }
3143
3144 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3145 {
3146         struct btrfs_root *root = fs_info->chunk_root;
3147         struct btrfs_trans_handle *trans;
3148         struct btrfs_block_group *block_group;
3149         u64 length;
3150         int ret;
3151
3152         /*
3153          * Prevent races with automatic removal of unused block groups.
3154          * After we relocate and before we remove the chunk with offset
3155          * chunk_offset, automatic removal of the block group can kick in,
3156          * resulting in a failure when calling btrfs_remove_chunk() below.
3157          *
3158          * Make sure to acquire this mutex before doing a tree search (dev
3159          * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3160          * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3161          * we release the path used to search the chunk/dev tree and before
3162          * the current task acquires this mutex and calls us.
3163          */
3164         lockdep_assert_held(&fs_info->reclaim_bgs_lock);
3165
3166         /* step one, relocate all the extents inside this chunk */
3167         btrfs_scrub_pause(fs_info);
3168         ret = btrfs_relocate_block_group(fs_info, chunk_offset);
3169         btrfs_scrub_continue(fs_info);
3170         if (ret)
3171                 return ret;
3172
3173         block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
3174         if (!block_group)
3175                 return -ENOENT;
3176         btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
3177         length = block_group->length;
3178         btrfs_put_block_group(block_group);
3179
3180         /*
3181          * On a zoned file system, discard the whole block group, this will
3182          * trigger a REQ_OP_ZONE_RESET operation on the device zone. If
3183          * resetting the zone fails, don't treat it as a fatal problem from the
3184          * filesystem's point of view.
3185          */
3186         if (btrfs_is_zoned(fs_info)) {
3187                 ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL);
3188                 if (ret)
3189                         btrfs_info(fs_info,
3190                                 "failed to reset zone %llu after relocation",
3191                                 chunk_offset);
3192         }
3193
3194         trans = btrfs_start_trans_remove_block_group(root->fs_info,
3195                                                      chunk_offset);
3196         if (IS_ERR(trans)) {
3197                 ret = PTR_ERR(trans);
3198                 btrfs_handle_fs_error(root->fs_info, ret, NULL);
3199                 return ret;
3200         }
3201
3202         /*
3203          * step two, delete the device extents and the
3204          * chunk tree entries
3205          */
3206         ret = btrfs_remove_chunk(trans, chunk_offset);
3207         btrfs_end_transaction(trans);
3208         return ret;
3209 }
3210
3211 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
3212 {
3213         struct btrfs_root *chunk_root = fs_info->chunk_root;
3214         struct btrfs_path *path;
3215         struct extent_buffer *leaf;
3216         struct btrfs_chunk *chunk;
3217         struct btrfs_key key;
3218         struct btrfs_key found_key;
3219         u64 chunk_type;
3220         bool retried = false;
3221         int failed = 0;
3222         int ret;
3223
3224         path = btrfs_alloc_path();
3225         if (!path)
3226                 return -ENOMEM;
3227
3228 again:
3229         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3230         key.offset = (u64)-1;
3231         key.type = BTRFS_CHUNK_ITEM_KEY;
3232
3233         while (1) {
3234                 mutex_lock(&fs_info->reclaim_bgs_lock);
3235                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3236                 if (ret < 0) {
3237                         mutex_unlock(&fs_info->reclaim_bgs_lock);
3238                         goto error;
3239                 }
3240                 BUG_ON(ret == 0); /* Corruption */
3241
3242                 ret = btrfs_previous_item(chunk_root, path, key.objectid,
3243                                           key.type);
3244                 if (ret)
3245                         mutex_unlock(&fs_info->reclaim_bgs_lock);
3246                 if (ret < 0)
3247                         goto error;
3248                 if (ret > 0)
3249                         break;
3250
3251                 leaf = path->nodes[0];
3252                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3253
3254                 chunk = btrfs_item_ptr(leaf, path->slots[0],
3255                                        struct btrfs_chunk);
3256                 chunk_type = btrfs_chunk_type(leaf, chunk);
3257                 btrfs_release_path(path);
3258
3259                 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3260                         ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3261                         if (ret == -ENOSPC)
3262                                 failed++;
3263                         else
3264                                 BUG_ON(ret);
3265                 }
3266                 mutex_unlock(&fs_info->reclaim_bgs_lock);
3267
3268                 if (found_key.offset == 0)
3269                         break;
3270                 key.offset = found_key.offset - 1;
3271         }
3272         ret = 0;
3273         if (failed && !retried) {
3274                 failed = 0;
3275                 retried = true;
3276                 goto again;
3277         } else if (WARN_ON(failed && retried)) {
3278                 ret = -ENOSPC;
3279         }
3280 error:
3281         btrfs_free_path(path);
3282         return ret;
3283 }
3284
3285 /*
3286  * return 1 : allocate a data chunk successfully,
3287  * return <0: errors during allocating a data chunk,
3288  * return 0 : no need to allocate a data chunk.
3289  */
3290 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3291                                       u64 chunk_offset)
3292 {
3293         struct btrfs_block_group *cache;
3294         u64 bytes_used;
3295         u64 chunk_type;
3296
3297         cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3298         ASSERT(cache);
3299         chunk_type = cache->flags;
3300         btrfs_put_block_group(cache);
3301
3302         if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
3303                 return 0;
3304
3305         spin_lock(&fs_info->data_sinfo->lock);
3306         bytes_used = fs_info->data_sinfo->bytes_used;
3307         spin_unlock(&fs_info->data_sinfo->lock);
3308
3309         if (!bytes_used) {
3310                 struct btrfs_trans_handle *trans;
3311                 int ret;
3312
3313                 trans = btrfs_join_transaction(fs_info->tree_root);
3314                 if (IS_ERR(trans))
3315                         return PTR_ERR(trans);
3316
3317                 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
3318                 btrfs_end_transaction(trans);
3319                 if (ret < 0)
3320                         return ret;
3321                 return 1;
3322         }
3323
3324         return 0;
3325 }
3326
3327 static int insert_balance_item(struct btrfs_fs_info *fs_info,
3328                                struct btrfs_balance_control *bctl)
3329 {
3330         struct btrfs_root *root = fs_info->tree_root;
3331         struct btrfs_trans_handle *trans;
3332         struct btrfs_balance_item *item;
3333         struct btrfs_disk_balance_args disk_bargs;
3334         struct btrfs_path *path;
3335         struct extent_buffer *leaf;
3336         struct btrfs_key key;
3337         int ret, err;
3338
3339         path = btrfs_alloc_path();
3340         if (!path)
3341                 return -ENOMEM;
3342
3343         trans = btrfs_start_transaction(root, 0);
3344         if (IS_ERR(trans)) {
3345                 btrfs_free_path(path);
3346                 return PTR_ERR(trans);
3347         }
3348
3349         key.objectid = BTRFS_BALANCE_OBJECTID;
3350         key.type = BTRFS_TEMPORARY_ITEM_KEY;
3351         key.offset = 0;
3352
3353         ret = btrfs_insert_empty_item(trans, root, path, &key,
3354                                       sizeof(*item));
3355         if (ret)
3356                 goto out;
3357
3358         leaf = path->nodes[0];
3359         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3360
3361         memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3362
3363         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3364         btrfs_set_balance_data(leaf, item, &disk_bargs);
3365         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3366         btrfs_set_balance_meta(leaf, item, &disk_bargs);
3367         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3368         btrfs_set_balance_sys(leaf, item, &disk_bargs);
3369
3370         btrfs_set_balance_flags(leaf, item, bctl->flags);
3371
3372         btrfs_mark_buffer_dirty(leaf);
3373 out:
3374         btrfs_free_path(path);
3375         err = btrfs_commit_transaction(trans);
3376         if (err && !ret)
3377                 ret = err;
3378         return ret;
3379 }
3380
3381 static int del_balance_item(struct btrfs_fs_info *fs_info)
3382 {
3383         struct btrfs_root *root = fs_info->tree_root;
3384         struct btrfs_trans_handle *trans;
3385         struct btrfs_path *path;
3386         struct btrfs_key key;
3387         int ret, err;
3388
3389         path = btrfs_alloc_path();
3390         if (!path)
3391                 return -ENOMEM;
3392
3393         trans = btrfs_start_transaction_fallback_global_rsv(root, 0);
3394         if (IS_ERR(trans)) {
3395                 btrfs_free_path(path);
3396                 return PTR_ERR(trans);
3397         }
3398
3399         key.objectid = BTRFS_BALANCE_OBJECTID;
3400         key.type = BTRFS_TEMPORARY_ITEM_KEY;
3401         key.offset = 0;
3402
3403         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3404         if (ret < 0)
3405                 goto out;
3406         if (ret > 0) {
3407                 ret = -ENOENT;
3408                 goto out;
3409         }
3410
3411         ret = btrfs_del_item(trans, root, path);
3412 out:
3413         btrfs_free_path(path);
3414         err = btrfs_commit_transaction(trans);
3415         if (err && !ret)
3416                 ret = err;
3417         return ret;
3418 }
3419
3420 /*
3421  * This is a heuristic used to reduce the number of chunks balanced on
3422  * resume after balance was interrupted.
3423  */
3424 static void update_balance_args(struct btrfs_balance_control *bctl)
3425 {
3426         /*
3427          * Turn on soft mode for chunk types that were being converted.
3428          */
3429         if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3430                 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3431         if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3432                 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3433         if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3434                 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3435
3436         /*
3437          * Turn on usage filter if is not already used.  The idea is
3438          * that chunks that we have already balanced should be
3439          * reasonably full.  Don't do it for chunks that are being
3440          * converted - that will keep us from relocating unconverted
3441          * (albeit full) chunks.
3442          */
3443         if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3444             !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3445             !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3446                 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3447                 bctl->data.usage = 90;
3448         }
3449         if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3450             !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3451             !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3452                 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3453                 bctl->sys.usage = 90;
3454         }
3455         if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3456             !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3457             !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3458                 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3459                 bctl->meta.usage = 90;
3460         }
3461 }
3462
3463 /*
3464  * Clear the balance status in fs_info and delete the balance item from disk.
3465  */
3466 static void reset_balance_state(struct btrfs_fs_info *fs_info)
3467 {
3468         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3469         int ret;
3470
3471         BUG_ON(!fs_info->balance_ctl);
3472
3473         spin_lock(&fs_info->balance_lock);
3474         fs_info->balance_ctl = NULL;
3475         spin_unlock(&fs_info->balance_lock);
3476
3477         kfree(bctl);
3478         ret = del_balance_item(fs_info);
3479         if (ret)
3480                 btrfs_handle_fs_error(fs_info, ret, NULL);
3481 }
3482
3483 /*
3484  * Balance filters.  Return 1 if chunk should be filtered out
3485  * (should not be balanced).
3486  */
3487 static int chunk_profiles_filter(u64 chunk_type,
3488                                  struct btrfs_balance_args *bargs)
3489 {
3490         chunk_type = chunk_to_extended(chunk_type) &
3491                                 BTRFS_EXTENDED_PROFILE_MASK;
3492
3493         if (bargs->profiles & chunk_type)
3494                 return 0;
3495
3496         return 1;
3497 }
3498
3499 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3500                               struct btrfs_balance_args *bargs)
3501 {
3502         struct btrfs_block_group *cache;
3503         u64 chunk_used;
3504         u64 user_thresh_min;
3505         u64 user_thresh_max;
3506         int ret = 1;
3507
3508         cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3509         chunk_used = cache->used;
3510
3511         if (bargs->usage_min == 0)
3512                 user_thresh_min = 0;
3513         else
3514                 user_thresh_min = div_factor_fine(cache->length,
3515                                                   bargs->usage_min);
3516
3517         if (bargs->usage_max == 0)
3518                 user_thresh_max = 1;
3519         else if (bargs->usage_max > 100)
3520                 user_thresh_max = cache->length;
3521         else
3522                 user_thresh_max = div_factor_fine(cache->length,
3523                                                   bargs->usage_max);
3524
3525         if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3526                 ret = 0;
3527
3528         btrfs_put_block_group(cache);
3529         return ret;
3530 }
3531
3532 static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3533                 u64 chunk_offset, struct btrfs_balance_args *bargs)
3534 {
3535         struct btrfs_block_group *cache;
3536         u64 chunk_used, user_thresh;
3537         int ret = 1;
3538
3539         cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3540         chunk_used = cache->used;
3541
3542         if (bargs->usage_min == 0)
3543                 user_thresh = 1;
3544         else if (bargs->usage > 100)
3545                 user_thresh = cache->length;
3546         else
3547                 user_thresh = div_factor_fine(cache->length, bargs->usage);
3548
3549         if (chunk_used < user_thresh)
3550                 ret = 0;
3551
3552         btrfs_put_block_group(cache);
3553         return ret;
3554 }
3555
3556 static int chunk_devid_filter(struct extent_buffer *leaf,
3557                               struct btrfs_chunk *chunk,
3558                               struct btrfs_balance_args *bargs)
3559 {
3560         struct btrfs_stripe *stripe;
3561         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3562         int i;
3563
3564         for (i = 0; i < num_stripes; i++) {
3565                 stripe = btrfs_stripe_nr(chunk, i);
3566                 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3567                         return 0;
3568         }
3569
3570         return 1;
3571 }
3572
3573 static u64 calc_data_stripes(u64 type, int num_stripes)
3574 {
3575         const int index = btrfs_bg_flags_to_raid_index(type);
3576         const int ncopies = btrfs_raid_array[index].ncopies;
3577         const int nparity = btrfs_raid_array[index].nparity;
3578
3579         return (num_stripes - nparity) / ncopies;
3580 }
3581
3582 /* [pstart, pend) */
3583 static int chunk_drange_filter(struct extent_buffer *leaf,
3584                                struct btrfs_chunk *chunk,
3585                                struct btrfs_balance_args *bargs)
3586 {
3587         struct btrfs_stripe *stripe;
3588         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3589         u64 stripe_offset;
3590         u64 stripe_length;
3591         u64 type;
3592         int factor;
3593         int i;
3594
3595         if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3596                 return 0;
3597
3598         type = btrfs_chunk_type(leaf, chunk);
3599         factor = calc_data_stripes(type, num_stripes);
3600
3601         for (i = 0; i < num_stripes; i++) {
3602                 stripe = btrfs_stripe_nr(chunk, i);
3603                 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3604                         continue;
3605
3606                 stripe_offset = btrfs_stripe_offset(leaf, stripe);
3607                 stripe_length = btrfs_chunk_length(leaf, chunk);
3608                 stripe_length = div_u64(stripe_length, factor);
3609
3610                 if (stripe_offset < bargs->pend &&
3611                     stripe_offset + stripe_length > bargs->pstart)
3612                         return 0;
3613         }
3614
3615         return 1;
3616 }
3617
3618 /* [vstart, vend) */
3619 static int chunk_vrange_filter(struct extent_buffer *leaf,
3620                                struct btrfs_chunk *chunk,
3621                                u64 chunk_offset,
3622                                struct btrfs_balance_args *bargs)
3623 {
3624         if (chunk_offset < bargs->vend &&
3625             chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3626                 /* at least part of the chunk is inside this vrange */
3627                 return 0;
3628
3629         return 1;
3630 }
3631
3632 static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3633                                struct btrfs_chunk *chunk,
3634                                struct btrfs_balance_args *bargs)
3635 {
3636         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3637
3638         if (bargs->stripes_min <= num_stripes
3639                         && num_stripes <= bargs->stripes_max)
3640                 return 0;
3641
3642         return 1;
3643 }
3644
3645 static int chunk_soft_convert_filter(u64 chunk_type,
3646                                      struct btrfs_balance_args *bargs)
3647 {
3648         if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3649                 return 0;
3650
3651         chunk_type = chunk_to_extended(chunk_type) &
3652                                 BTRFS_EXTENDED_PROFILE_MASK;
3653
3654         if (bargs->target == chunk_type)
3655                 return 1;
3656
3657         return 0;
3658 }
3659
3660 static int should_balance_chunk(struct extent_buffer *leaf,
3661                                 struct btrfs_chunk *chunk, u64 chunk_offset)
3662 {
3663         struct btrfs_fs_info *fs_info = leaf->fs_info;
3664         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3665         struct btrfs_balance_args *bargs = NULL;
3666         u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3667
3668         /* type filter */
3669         if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3670               (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3671                 return 0;
3672         }
3673
3674         if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3675                 bargs = &bctl->data;
3676         else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3677                 bargs = &bctl->sys;
3678         else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3679                 bargs = &bctl->meta;
3680
3681         /* profiles filter */
3682         if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3683             chunk_profiles_filter(chunk_type, bargs)) {
3684                 return 0;
3685         }
3686
3687         /* usage filter */
3688         if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3689             chunk_usage_filter(fs_info, chunk_offset, bargs)) {
3690                 return 0;
3691         } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3692             chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3693                 return 0;
3694         }
3695
3696         /* devid filter */
3697         if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3698             chunk_devid_filter(leaf, chunk, bargs)) {
3699                 return 0;
3700         }
3701
3702         /* drange filter, makes sense only with devid filter */
3703         if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3704             chunk_drange_filter(leaf, chunk, bargs)) {
3705                 return 0;
3706         }
3707
3708         /* vrange filter */
3709         if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3710             chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3711                 return 0;
3712         }
3713
3714         /* stripes filter */
3715         if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3716             chunk_stripes_range_filter(leaf, chunk, bargs)) {
3717                 return 0;
3718         }
3719
3720         /* soft profile changing mode */
3721         if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3722             chunk_soft_convert_filter(chunk_type, bargs)) {
3723                 return 0;
3724         }
3725
3726         /*
3727          * limited by count, must be the last filter
3728          */
3729         if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3730                 if (bargs->limit == 0)
3731                         return 0;
3732                 else
3733                         bargs->limit--;
3734         } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3735                 /*
3736                  * Same logic as the 'limit' filter; the minimum cannot be
3737                  * determined here because we do not have the global information
3738                  * about the count of all chunks that satisfy the filters.
3739                  */
3740                 if (bargs->limit_max == 0)
3741                         return 0;
3742                 else
3743                         bargs->limit_max--;
3744         }
3745
3746         return 1;
3747 }
3748
3749 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3750 {
3751         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3752         struct btrfs_root *chunk_root = fs_info->chunk_root;
3753         u64 chunk_type;
3754         struct btrfs_chunk *chunk;
3755         struct btrfs_path *path = NULL;
3756         struct btrfs_key key;
3757         struct btrfs_key found_key;
3758         struct extent_buffer *leaf;
3759         int slot;
3760         int ret;
3761         int enospc_errors = 0;
3762         bool counting = true;
3763         /* The single value limit and min/max limits use the same bytes in the */
3764         u64 limit_data = bctl->data.limit;
3765         u64 limit_meta = bctl->meta.limit;
3766         u64 limit_sys = bctl->sys.limit;
3767         u32 count_data = 0;
3768         u32 count_meta = 0;
3769         u32 count_sys = 0;
3770         int chunk_reserved = 0;
3771
3772         path = btrfs_alloc_path();
3773         if (!path) {
3774                 ret = -ENOMEM;
3775                 goto error;
3776         }
3777
3778         /* zero out stat counters */
3779         spin_lock(&fs_info->balance_lock);
3780         memset(&bctl->stat, 0, sizeof(bctl->stat));
3781         spin_unlock(&fs_info->balance_lock);
3782 again:
3783         if (!counting) {
3784                 /*
3785                  * The single value limit and min/max limits use the same bytes
3786                  * in the
3787                  */
3788                 bctl->data.limit = limit_data;
3789                 bctl->meta.limit = limit_meta;
3790                 bctl->sys.limit = limit_sys;
3791         }
3792         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3793         key.offset = (u64)-1;
3794         key.type = BTRFS_CHUNK_ITEM_KEY;
3795
3796         while (1) {
3797                 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3798                     atomic_read(&fs_info->balance_cancel_req)) {
3799                         ret = -ECANCELED;
3800                         goto error;
3801                 }
3802
3803                 mutex_lock(&fs_info->reclaim_bgs_lock);
3804                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3805                 if (ret < 0) {
3806                         mutex_unlock(&fs_info->reclaim_bgs_lock);
3807                         goto error;
3808                 }
3809
3810                 /*
3811                  * this shouldn't happen, it means the last relocate
3812                  * failed
3813                  */
3814                 if (ret == 0)
3815                         BUG(); /* FIXME break ? */
3816
3817                 ret = btrfs_previous_item(chunk_root, path, 0,
3818                                           BTRFS_CHUNK_ITEM_KEY);
3819                 if (ret) {
3820                         mutex_unlock(&fs_info->reclaim_bgs_lock);
3821                         ret = 0;
3822                         break;
3823                 }
3824
3825                 leaf = path->nodes[0];
3826                 slot = path->slots[0];
3827                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3828
3829                 if (found_key.objectid != key.objectid) {
3830                         mutex_unlock(&fs_info->reclaim_bgs_lock);
3831                         break;
3832                 }
3833
3834                 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3835                 chunk_type = btrfs_chunk_type(leaf, chunk);
3836
3837                 if (!counting) {
3838                         spin_lock(&fs_info->balance_lock);
3839                         bctl->stat.considered++;
3840                         spin_unlock(&fs_info->balance_lock);
3841                 }
3842
3843                 ret = should_balance_chunk(leaf, chunk, found_key.offset);
3844
3845                 btrfs_release_path(path);
3846                 if (!ret) {
3847                         mutex_unlock(&fs_info->reclaim_bgs_lock);
3848                         goto loop;
3849                 }
3850
3851                 if (counting) {
3852                         mutex_unlock(&fs_info->reclaim_bgs_lock);
3853                         spin_lock(&fs_info->balance_lock);
3854                         bctl->stat.expected++;
3855                         spin_unlock(&fs_info->balance_lock);
3856
3857                         if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3858                                 count_data++;
3859                         else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3860                                 count_sys++;
3861                         else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3862                                 count_meta++;
3863
3864                         goto loop;
3865                 }
3866
3867                 /*
3868                  * Apply limit_min filter, no need to check if the LIMITS
3869                  * filter is used, limit_min is 0 by default
3870                  */
3871                 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3872                                         count_data < bctl->data.limit_min)
3873                                 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3874                                         count_meta < bctl->meta.limit_min)
3875                                 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3876                                         count_sys < bctl->sys.limit_min)) {
3877                         mutex_unlock(&fs_info->reclaim_bgs_lock);
3878                         goto loop;
3879                 }
3880
3881                 if (!chunk_reserved) {
3882                         /*
3883                          * We may be relocating the only data chunk we have,
3884                          * which could potentially end up with losing data's
3885                          * raid profile, so lets allocate an empty one in
3886                          * advance.
3887                          */
3888                         ret = btrfs_may_alloc_data_chunk(fs_info,
3889                                                          found_key.offset);
3890                         if (ret < 0) {
3891                                 mutex_unlock(&fs_info->reclaim_bgs_lock);
3892                                 goto error;
3893                         } else if (ret == 1) {
3894                                 chunk_reserved = 1;
3895                         }
3896                 }
3897
3898                 ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3899                 mutex_unlock(&fs_info->reclaim_bgs_lock);
3900                 if (ret == -ENOSPC) {
3901                         enospc_errors++;
3902                 } else if (ret == -ETXTBSY) {
3903                         btrfs_info(fs_info,
3904            "skipping relocation of block group %llu due to active swapfile",
3905                                    found_key.offset);
3906                         ret = 0;
3907                 } else if (ret) {
3908                         goto error;
3909                 } else {
3910                         spin_lock(&fs_info->balance_lock);
3911                         bctl->stat.completed++;
3912                         spin_unlock(&fs_info->balance_lock);
3913                 }
3914 loop:
3915                 if (found_key.offset == 0)
3916                         break;
3917                 key.offset = found_key.offset - 1;
3918         }
3919
3920         if (counting) {
3921                 btrfs_release_path(path);
3922                 counting = false;
3923                 goto again;
3924         }
3925 error:
3926         btrfs_free_path(path);
3927         if (enospc_errors) {
3928                 btrfs_info(fs_info, "%d enospc errors during balance",
3929                            enospc_errors);
3930                 if (!ret)
3931                         ret = -ENOSPC;
3932         }
3933
3934         return ret;
3935 }
3936
3937 /**
3938  * alloc_profile_is_valid - see if a given profile is valid and reduced
3939  * @flags: profile to validate
3940  * @extended: if true @flags is treated as an extended profile
3941  */
3942 static int alloc_profile_is_valid(u64 flags, int extended)
3943 {
3944         u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3945                                BTRFS_BLOCK_GROUP_PROFILE_MASK);
3946
3947         flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3948
3949         /* 1) check that all other bits are zeroed */
3950         if (flags & ~mask)
3951                 return 0;
3952
3953         /* 2) see if profile is reduced */
3954         if (flags == 0)
3955                 return !extended; /* "0" is valid for usual profiles */
3956
3957         return has_single_bit_set(flags);
3958 }
3959
3960 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3961 {
3962         /* cancel requested || normal exit path */
3963         return atomic_read(&fs_info->balance_cancel_req) ||
3964                 (atomic_read(&fs_info->balance_pause_req) == 0 &&
3965                  atomic_read(&fs_info->balance_cancel_req) == 0);
3966 }
3967
3968 /*
3969  * Validate target profile against allowed profiles and return true if it's OK.
3970  * Otherwise print the error message and return false.
3971  */
3972 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
3973                 const struct btrfs_balance_args *bargs,
3974                 u64 allowed, const char *type)
3975 {
3976         if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3977                 return true;
3978
3979         if (fs_info->sectorsize < PAGE_SIZE &&
3980                 bargs->target & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3981                 btrfs_err(fs_info,
3982                 "RAID56 is not yet supported for sectorsize %u with page size %lu",
3983                           fs_info->sectorsize, PAGE_SIZE);
3984                 return false;
3985         }
3986         /* Profile is valid and does not have bits outside of the allowed set */
3987         if (alloc_profile_is_valid(bargs->target, 1) &&
3988             (bargs->target & ~allowed) == 0)
3989                 return true;
3990
3991         btrfs_err(fs_info, "balance: invalid convert %s profile %s",
3992                         type, btrfs_bg_type_to_raid_name(bargs->target));
3993         return false;
3994 }
3995
3996 /*
3997  * Fill @buf with textual description of balance filter flags @bargs, up to
3998  * @size_buf including the terminating null. The output may be trimmed if it
3999  * does not fit into the provided buffer.
4000  */
4001 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
4002                                  u32 size_buf)
4003 {
4004         int ret;
4005         u32 size_bp = size_buf;
4006         char *bp = buf;
4007         u64 flags = bargs->flags;
4008         char tmp_buf[128] = {'\0'};
4009
4010         if (!flags)
4011                 return;
4012
4013 #define CHECK_APPEND_NOARG(a)                                           \
4014         do {                                                            \
4015                 ret = snprintf(bp, size_bp, (a));                       \
4016                 if (ret < 0 || ret >= size_bp)                          \
4017                         goto out_overflow;                              \
4018                 size_bp -= ret;                                         \
4019                 bp += ret;                                              \
4020         } while (0)
4021
4022 #define CHECK_APPEND_1ARG(a, v1)                                        \
4023         do {                                                            \
4024                 ret = snprintf(bp, size_bp, (a), (v1));                 \
4025                 if (ret < 0 || ret >= size_bp)                          \
4026                         goto out_overflow;                              \
4027                 size_bp -= ret;                                         \
4028                 bp += ret;                                              \
4029         } while (0)
4030
4031 #define CHECK_APPEND_2ARG(a, v1, v2)                                    \
4032         do {                                                            \
4033                 ret = snprintf(bp, size_bp, (a), (v1), (v2));           \
4034                 if (ret < 0 || ret >= size_bp)                          \
4035                         goto out_overflow;                              \
4036                 size_bp -= ret;                                         \
4037                 bp += ret;                                              \
4038         } while (0)
4039
4040         if (flags & BTRFS_BALANCE_ARGS_CONVERT)
4041                 CHECK_APPEND_1ARG("convert=%s,",
4042                                   btrfs_bg_type_to_raid_name(bargs->target));
4043
4044         if (flags & BTRFS_BALANCE_ARGS_SOFT)
4045                 CHECK_APPEND_NOARG("soft,");
4046
4047         if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
4048                 btrfs_describe_block_groups(bargs->profiles, tmp_buf,
4049                                             sizeof(tmp_buf));
4050                 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
4051         }
4052
4053         if (flags & BTRFS_BALANCE_ARGS_USAGE)
4054                 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
4055
4056         if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
4057                 CHECK_APPEND_2ARG("usage=%u..%u,",
4058                                   bargs->usage_min, bargs->usage_max);
4059
4060         if (flags & BTRFS_BALANCE_ARGS_DEVID)
4061                 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
4062
4063         if (flags & BTRFS_BALANCE_ARGS_DRANGE)
4064                 CHECK_APPEND_2ARG("drange=%llu..%llu,",
4065                                   bargs->pstart, bargs->pend);
4066
4067         if (flags & BTRFS_BALANCE_ARGS_VRANGE)
4068                 CHECK_APPEND_2ARG("vrange=%llu..%llu,",
4069                                   bargs->vstart, bargs->vend);
4070
4071         if (flags & BTRFS_BALANCE_ARGS_LIMIT)
4072                 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
4073
4074         if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
4075                 CHECK_APPEND_2ARG("limit=%u..%u,",
4076                                 bargs->limit_min, bargs->limit_max);
4077
4078         if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
4079                 CHECK_APPEND_2ARG("stripes=%u..%u,",
4080                                   bargs->stripes_min, bargs->stripes_max);
4081
4082 #undef CHECK_APPEND_2ARG
4083 #undef CHECK_APPEND_1ARG
4084 #undef CHECK_APPEND_NOARG
4085
4086 out_overflow:
4087
4088         if (size_bp < size_buf)
4089                 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
4090         else
4091                 buf[0] = '\0';
4092 }
4093
4094 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
4095 {
4096         u32 size_buf = 1024;
4097         char tmp_buf[192] = {'\0'};
4098         char *buf;
4099         char *bp;
4100         u32 size_bp = size_buf;
4101         int ret;
4102         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4103
4104         buf = kzalloc(size_buf, GFP_KERNEL);
4105         if (!buf)
4106                 return;
4107
4108         bp = buf;
4109
4110 #define CHECK_APPEND_1ARG(a, v1)                                        \
4111         do {                                                            \
4112                 ret = snprintf(bp, size_bp, (a), (v1));                 \
4113                 if (ret < 0 || ret >= size_bp)                          \
4114                         goto out_overflow;                              \
4115                 size_bp -= ret;                                         \
4116                 bp += ret;                                              \
4117         } while (0)
4118
4119         if (bctl->flags & BTRFS_BALANCE_FORCE)
4120                 CHECK_APPEND_1ARG("%s", "-f ");
4121
4122         if (bctl->flags & BTRFS_BALANCE_DATA) {
4123                 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
4124                 CHECK_APPEND_1ARG("-d%s ", tmp_buf);
4125         }
4126
4127         if (bctl->flags & BTRFS_BALANCE_METADATA) {
4128                 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
4129                 CHECK_APPEND_1ARG("-m%s ", tmp_buf);
4130         }
4131
4132         if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
4133                 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
4134                 CHECK_APPEND_1ARG("-s%s ", tmp_buf);
4135         }
4136
4137 #undef CHECK_APPEND_1ARG
4138
4139 out_overflow:
4140
4141         if (size_bp < size_buf)
4142                 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
4143         btrfs_info(fs_info, "balance: %s %s",
4144                    (bctl->flags & BTRFS_BALANCE_RESUME) ?
4145                    "resume" : "start", buf);
4146
4147         kfree(buf);
4148 }
4149
4150 /*
4151  * Should be called with balance mutexe held
4152  */
4153 int btrfs_balance(struct btrfs_fs_info *fs_info,
4154                   struct btrfs_balance_control *bctl,
4155                   struct btrfs_ioctl_balance_args *bargs)
4156 {
4157         u64 meta_target, data_target;
4158         u64 allowed;
4159         int mixed = 0;
4160         int ret;
4161         u64 num_devices;
4162         unsigned seq;
4163         bool reducing_redundancy;
4164         int i;
4165
4166         if (btrfs_fs_closing(fs_info) ||
4167             atomic_read(&fs_info->balance_pause_req) ||
4168             btrfs_should_cancel_balance(fs_info)) {
4169                 ret = -EINVAL;
4170                 goto out;
4171         }
4172
4173         allowed = btrfs_super_incompat_flags(fs_info->super_copy);
4174         if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
4175                 mixed = 1;
4176
4177         /*
4178          * In case of mixed groups both data and meta should be picked,
4179          * and identical options should be given for both of them.
4180          */
4181         allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
4182         if (mixed && (bctl->flags & allowed)) {
4183                 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
4184                     !(bctl->flags & BTRFS_BALANCE_METADATA) ||
4185                     memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
4186                         btrfs_err(fs_info,
4187           "balance: mixed groups data and metadata options must be the same");
4188                         ret = -EINVAL;
4189                         goto out;
4190                 }
4191         }
4192
4193         /*
4194          * rw_devices will not change at the moment, device add/delete/replace
4195          * are exclusive
4196          */
4197         num_devices = fs_info->fs_devices->rw_devices;
4198
4199         /*
4200          * SINGLE profile on-disk has no profile bit, but in-memory we have a
4201          * special bit for it, to make it easier to distinguish.  Thus we need
4202          * to set it manually, or balance would refuse the profile.
4203          */
4204         allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
4205         for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
4206                 if (num_devices >= btrfs_raid_array[i].devs_min)
4207                         allowed |= btrfs_raid_array[i].bg_flag;
4208
4209         if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") ||
4210             !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") ||
4211             !validate_convert_profile(fs_info, &bctl->sys,  allowed, "system")) {
4212                 ret = -EINVAL;
4213                 goto out;
4214         }
4215
4216         /*
4217          * Allow to reduce metadata or system integrity only if force set for
4218          * profiles with redundancy (copies, parity)
4219          */
4220         allowed = 0;
4221         for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
4222                 if (btrfs_raid_array[i].ncopies >= 2 ||
4223                     btrfs_raid_array[i].tolerated_failures >= 1)
4224                         allowed |= btrfs_raid_array[i].bg_flag;
4225         }
4226         do {
4227                 seq = read_seqbegin(&fs_info->profiles_lock);
4228
4229                 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4230                      (fs_info->avail_system_alloc_bits & allowed) &&
4231                      !(bctl->sys.target & allowed)) ||
4232                     ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4233                      (fs_info->avail_metadata_alloc_bits & allowed) &&
4234                      !(bctl->meta.target & allowed)))
4235                         reducing_redundancy = true;
4236                 else
4237                         reducing_redundancy = false;
4238
4239                 /* if we're not converting, the target field is uninitialized */
4240                 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4241                         bctl->meta.target : fs_info->avail_metadata_alloc_bits;
4242                 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4243                         bctl->data.target : fs_info->avail_data_alloc_bits;
4244         } while (read_seqretry(&fs_info->profiles_lock, seq));
4245
4246         if (reducing_redundancy) {
4247                 if (bctl->flags & BTRFS_BALANCE_FORCE) {
4248                         btrfs_info(fs_info,
4249                            "balance: force reducing metadata redundancy");
4250                 } else {
4251                         btrfs_err(fs_info,
4252         "balance: reduces metadata redundancy, use --force if you want this");
4253                         ret = -EINVAL;
4254                         goto out;
4255                 }
4256         }
4257
4258         if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
4259                 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
4260                 btrfs_warn(fs_info,
4261         "balance: metadata profile %s has lower redundancy than data profile %s",
4262                                 btrfs_bg_type_to_raid_name(meta_target),
4263                                 btrfs_bg_type_to_raid_name(data_target));
4264         }
4265
4266         ret = insert_balance_item(fs_info, bctl);
4267         if (ret && ret != -EEXIST)
4268                 goto out;
4269
4270         if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
4271                 BUG_ON(ret == -EEXIST);
4272                 BUG_ON(fs_info->balance_ctl);
4273                 spin_lock(&fs_info->balance_lock);
4274                 fs_info->balance_ctl = bctl;
4275                 spin_unlock(&fs_info->balance_lock);
4276         } else {
4277                 BUG_ON(ret != -EEXIST);
4278                 spin_lock(&fs_info->balance_lock);
4279                 update_balance_args(bctl);
4280                 spin_unlock(&fs_info->balance_lock);
4281         }
4282
4283         ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4284         set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4285         describe_balance_start_or_resume(fs_info);
4286         mutex_unlock(&fs_info->balance_mutex);
4287
4288         ret = __btrfs_balance(fs_info);
4289
4290         mutex_lock(&fs_info->balance_mutex);
4291         if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
4292                 btrfs_info(fs_info, "balance: paused");
4293         /*
4294          * Balance can be canceled by:
4295          *
4296          * - Regular cancel request
4297          *   Then ret == -ECANCELED and balance_cancel_req > 0
4298          *
4299          * - Fatal signal to "btrfs" process
4300          *   Either the signal caught by wait_reserve_ticket() and callers
4301          *   got -EINTR, or caught by btrfs_should_cancel_balance() and
4302          *   got -ECANCELED.
4303          *   Either way, in this case balance_cancel_req = 0, and
4304          *   ret == -EINTR or ret == -ECANCELED.
4305          *
4306          * So here we only check the return value to catch canceled balance.
4307          */
4308         else if (ret == -ECANCELED || ret == -EINTR)
4309                 btrfs_info(fs_info, "balance: canceled");
4310         else
4311                 btrfs_info(fs_info, "balance: ended with status: %d", ret);
4312
4313         clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4314
4315         if (bargs) {
4316                 memset(bargs, 0, sizeof(*bargs));
4317                 btrfs_update_ioctl_balance_args(fs_info, bargs);
4318         }
4319
4320         if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
4321             balance_need_close(fs_info)) {
4322                 reset_balance_state(fs_info);
4323                 btrfs_exclop_finish(fs_info);
4324         }
4325
4326         wake_up(&fs_info->balance_wait_q);
4327
4328         return ret;
4329 out:
4330         if (bctl->flags & BTRFS_BALANCE_RESUME)
4331                 reset_balance_state(fs_info);
4332         else
4333                 kfree(bctl);
4334         btrfs_exclop_finish(fs_info);
4335
4336         return ret;
4337 }
4338
4339 static int balance_kthread(void *data)
4340 {
4341         struct btrfs_fs_info *fs_info = data;
4342         int ret = 0;
4343
4344         mutex_lock(&fs_info->balance_mutex);
4345         if (fs_info->balance_ctl)
4346                 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
4347         mutex_unlock(&fs_info->balance_mutex);
4348
4349         return ret;
4350 }
4351
4352 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
4353 {
4354         struct task_struct *tsk;
4355
4356         mutex_lock(&fs_info->balance_mutex);
4357         if (!fs_info->balance_ctl) {
4358                 mutex_unlock(&fs_info->balance_mutex);
4359                 return 0;
4360         }
4361         mutex_unlock(&fs_info->balance_mutex);
4362
4363         if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
4364                 btrfs_info(fs_info, "balance: resume skipped");
4365                 return 0;
4366         }
4367
4368         /*
4369          * A ro->rw remount sequence should continue with the paused balance
4370          * regardless of who pauses it, system or the user as of now, so set
4371          * the resume flag.
4372          */
4373         spin_lock(&fs_info->balance_lock);
4374         fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
4375         spin_unlock(&fs_info->balance_lock);
4376
4377         tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
4378         return PTR_ERR_OR_ZERO(tsk);
4379 }
4380
4381 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
4382 {
4383         struct btrfs_balance_control *bctl;
4384         struct btrfs_balance_item *item;
4385         struct btrfs_disk_balance_args disk_bargs;
4386         struct btrfs_path *path;
4387         struct extent_buffer *leaf;
4388         struct btrfs_key key;
4389         int ret;
4390
4391         path = btrfs_alloc_path();
4392         if (!path)
4393                 return -ENOMEM;
4394
4395         key.objectid = BTRFS_BALANCE_OBJECTID;
4396         key.type = BTRFS_TEMPORARY_ITEM_KEY;
4397         key.offset = 0;
4398
4399         ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4400         if (ret < 0)
4401                 goto out;
4402         if (ret > 0) { /* ret = -ENOENT; */
4403                 ret = 0;
4404                 goto out;
4405         }
4406
4407         bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4408         if (!bctl) {
4409                 ret = -ENOMEM;
4410                 goto out;
4411         }
4412
4413         leaf = path->nodes[0];
4414         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
4415
4416         bctl->flags = btrfs_balance_flags(leaf, item);
4417         bctl->flags |= BTRFS_BALANCE_RESUME;
4418
4419         btrfs_balance_data(leaf, item, &disk_bargs);
4420         btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4421         btrfs_balance_meta(leaf, item, &disk_bargs);
4422         btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4423         btrfs_balance_sys(leaf, item, &disk_bargs);
4424         btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4425
4426         /*
4427          * This should never happen, as the paused balance state is recovered
4428          * during mount without any chance of other exclusive ops to collide.
4429          *
4430          * This gives the exclusive op status to balance and keeps in paused
4431          * state until user intervention (cancel or umount). If the ownership
4432          * cannot be assigned, show a message but do not fail. The balance
4433          * is in a paused state and must have fs_info::balance_ctl properly
4434          * set up.
4435          */
4436         if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE))
4437                 btrfs_warn(fs_info,
4438         "balance: cannot set exclusive op status, resume manually");
4439
4440         btrfs_release_path(path);
4441
4442         mutex_lock(&fs_info->balance_mutex);
4443         BUG_ON(fs_info->balance_ctl);
4444         spin_lock(&fs_info->balance_lock);
4445         fs_info->balance_ctl = bctl;
4446         spin_unlock(&fs_info->balance_lock);
4447         mutex_unlock(&fs_info->balance_mutex);
4448 out:
4449         btrfs_free_path(path);
4450         return ret;
4451 }
4452
4453 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4454 {
4455         int ret = 0;
4456
4457         mutex_lock(&fs_info->balance_mutex);
4458         if (!fs_info->balance_ctl) {
4459                 mutex_unlock(&fs_info->balance_mutex);
4460                 return -ENOTCONN;
4461         }
4462
4463         if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4464                 atomic_inc(&fs_info->balance_pause_req);
4465                 mutex_unlock(&fs_info->balance_mutex);
4466
4467                 wait_event(fs_info->balance_wait_q,
4468                            !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4469
4470                 mutex_lock(&fs_info->balance_mutex);
4471                 /* we are good with balance_ctl ripped off from under us */
4472                 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4473                 atomic_dec(&fs_info->balance_pause_req);
4474         } else {
4475                 ret = -ENOTCONN;
4476         }
4477
4478         mutex_unlock(&fs_info->balance_mutex);
4479         return ret;
4480 }
4481
4482 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4483 {
4484         mutex_lock(&fs_info->balance_mutex);
4485         if (!fs_info->balance_ctl) {
4486                 mutex_unlock(&fs_info->balance_mutex);
4487                 return -ENOTCONN;
4488         }
4489
4490         /*
4491          * A paused balance with the item stored on disk can be resumed at
4492          * mount time if the mount is read-write. Otherwise it's still paused
4493          * and we must not allow cancelling as it deletes the item.
4494          */
4495         if (sb_rdonly(fs_info->sb)) {
4496                 mutex_unlock(&fs_info->balance_mutex);
4497                 return -EROFS;
4498         }
4499
4500         atomic_inc(&fs_info->balance_cancel_req);
4501         /*
4502          * if we are running just wait and return, balance item is
4503          * deleted in btrfs_balance in this case
4504          */
4505         if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4506                 mutex_unlock(&fs_info->balance_mutex);
4507                 wait_event(fs_info->balance_wait_q,
4508                            !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4509                 mutex_lock(&fs_info->balance_mutex);
4510         } else {
4511                 mutex_unlock(&fs_info->balance_mutex);
4512                 /*
4513                  * Lock released to allow other waiters to continue, we'll
4514                  * reexamine the status again.
4515                  */
4516                 mutex_lock(&fs_info->balance_mutex);
4517
4518                 if (fs_info->balance_ctl) {
4519                         reset_balance_state(fs_info);
4520                         btrfs_exclop_finish(fs_info);
4521                         btrfs_info(fs_info, "balance: canceled");
4522                 }
4523         }
4524
4525         BUG_ON(fs_info->balance_ctl ||
4526                 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4527         atomic_dec(&fs_info->balance_cancel_req);
4528         mutex_unlock(&fs_info->balance_mutex);
4529         return 0;
4530 }
4531
4532 int btrfs_uuid_scan_kthread(void *data)
4533 {
4534         struct btrfs_fs_info *fs_info = data;
4535         struct btrfs_root *root = fs_info->tree_root;
4536         struct btrfs_key key;
4537         struct btrfs_path *path = NULL;
4538         int ret = 0;
4539         struct extent_buffer *eb;
4540         int slot;
4541         struct btrfs_root_item root_item;
4542         u32 item_size;
4543         struct btrfs_trans_handle *trans = NULL;
4544         bool closing = false;
4545
4546         path = btrfs_alloc_path();
4547         if (!path) {
4548                 ret = -ENOMEM;
4549                 goto out;
4550         }
4551
4552         key.objectid = 0;
4553         key.type = BTRFS_ROOT_ITEM_KEY;
4554         key.offset = 0;
4555
4556         while (1) {
4557                 if (btrfs_fs_closing(fs_info)) {
4558                         closing = true;
4559                         break;
4560                 }
4561                 ret = btrfs_search_forward(root, &key, path,
4562                                 BTRFS_OLDEST_GENERATION);
4563                 if (ret) {
4564                         if (ret > 0)
4565                                 ret = 0;
4566                         break;
4567                 }
4568
4569                 if (key.type != BTRFS_ROOT_ITEM_KEY ||
4570                     (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4571                      key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4572                     key.objectid > BTRFS_LAST_FREE_OBJECTID)
4573                         goto skip;
4574
4575                 eb = path->nodes[0];
4576                 slot = path->slots[0];
4577                 item_size = btrfs_item_size_nr(eb, slot);
4578                 if (item_size < sizeof(root_item))
4579                         goto skip;
4580
4581                 read_extent_buffer(eb, &root_item,
4582                                    btrfs_item_ptr_offset(eb, slot),
4583                                    (int)sizeof(root_item));
4584                 if (btrfs_root_refs(&root_item) == 0)
4585                         goto skip;
4586
4587                 if (!btrfs_is_empty_uuid(root_item.uuid) ||
4588                     !btrfs_is_empty_uuid(root_item.received_uuid)) {
4589                         if (trans)
4590                                 goto update_tree;
4591
4592                         btrfs_release_path(path);
4593                         /*
4594                          * 1 - subvol uuid item
4595                          * 1 - received_subvol uuid item
4596                          */
4597                         trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4598                         if (IS_ERR(trans)) {
4599                                 ret = PTR_ERR(trans);
4600                                 break;
4601                         }
4602                         continue;
4603                 } else {
4604                         goto skip;
4605                 }
4606 update_tree:
4607                 btrfs_release_path(path);
4608                 if (!btrfs_is_empty_uuid(root_item.uuid)) {
4609                         ret = btrfs_uuid_tree_add(trans, root_item.uuid,
4610                                                   BTRFS_UUID_KEY_SUBVOL,
4611                                                   key.objectid);
4612                         if (ret < 0) {
4613                                 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4614                                         ret);
4615                                 break;
4616                         }
4617                 }
4618
4619                 if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4620                         ret = btrfs_uuid_tree_add(trans,
4621                                                   root_item.received_uuid,
4622                                                  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4623                                                   key.objectid);
4624                         if (ret < 0) {
4625                                 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4626                                         ret);
4627                                 break;
4628                         }
4629                 }
4630
4631 skip:
4632                 btrfs_release_path(path);
4633                 if (trans) {
4634                         ret = btrfs_end_transaction(trans);
4635                         trans = NULL;
4636                         if (ret)
4637                                 break;
4638                 }
4639
4640                 if (key.offset < (u64)-1) {
4641                         key.offset++;
4642                 } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4643                         key.offset = 0;
4644                         key.type = BTRFS_ROOT_ITEM_KEY;
4645                 } else if (key.objectid < (u64)-1) {
4646                         key.offset = 0;
4647                         key.type = BTRFS_ROOT_ITEM_KEY;
4648                         key.objectid++;
4649                 } else {
4650                         break;
4651                 }
4652                 cond_resched();
4653         }
4654
4655 out:
4656         btrfs_free_path(path);
4657         if (trans && !IS_ERR(trans))
4658                 btrfs_end_transaction(trans);
4659         if (ret)
4660                 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4661         else if (!closing)
4662                 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4663         up(&fs_info->uuid_tree_rescan_sem);
4664         return 0;
4665 }
4666
4667 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4668 {
4669         struct btrfs_trans_handle *trans;
4670         struct btrfs_root *tree_root = fs_info->tree_root;
4671         struct btrfs_root *uuid_root;
4672         struct task_struct *task;
4673         int ret;
4674
4675         /*
4676          * 1 - root node
4677          * 1 - root item
4678          */
4679         trans = btrfs_start_transaction(tree_root, 2);
4680         if (IS_ERR(trans))
4681                 return PTR_ERR(trans);
4682
4683         uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
4684         if (IS_ERR(uuid_root)) {
4685                 ret = PTR_ERR(uuid_root);
4686                 btrfs_abort_transaction(trans, ret);
4687                 btrfs_end_transaction(trans);
4688                 return ret;
4689         }
4690
4691         fs_info->uuid_root = uuid_root;
4692
4693         ret = btrfs_commit_transaction(trans);
4694         if (ret)
4695                 return ret;
4696
4697         down(&fs_info->uuid_tree_rescan_sem);
4698         task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4699         if (IS_ERR(task)) {
4700                 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4701                 btrfs_warn(fs_info, "failed to start uuid_scan task");
4702                 up(&fs_info->uuid_tree_rescan_sem);
4703                 return PTR_ERR(task);
4704         }
4705
4706         return 0;
4707 }
4708
4709 /*
4710  * shrinking a device means finding all of the device extents past
4711  * the new size, and then following the back refs to the chunks.
4712  * The chunk relocation code actually frees the device extent
4713  */
4714 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4715 {
4716         struct btrfs_fs_info *fs_info = device->fs_info;
4717         struct btrfs_root *root = fs_info->dev_root;
4718         struct btrfs_trans_handle *trans;
4719         struct btrfs_dev_extent *dev_extent = NULL;
4720         struct btrfs_path *path;
4721         u64 length;
4722         u64 chunk_offset;
4723         int ret;
4724         int slot;
4725         int failed = 0;
4726         bool retried = false;
4727         struct extent_buffer *l;
4728         struct btrfs_key key;
4729         struct btrfs_super_block *super_copy = fs_info->super_copy;
4730         u64 old_total = btrfs_super_total_bytes(super_copy);
4731         u64 old_size = btrfs_device_get_total_bytes(device);
4732         u64 diff;
4733         u64 start;
4734
4735         new_size = round_down(new_size, fs_info->sectorsize);
4736         start = new_size;
4737         diff = round_down(old_size - new_size, fs_info->sectorsize);
4738
4739         if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4740                 return -EINVAL;
4741
4742         path = btrfs_alloc_path();
4743         if (!path)
4744                 return -ENOMEM;
4745
4746         path->reada = READA_BACK;
4747
4748         trans = btrfs_start_transaction(root, 0);
4749         if (IS_ERR(trans)) {
4750                 btrfs_free_path(path);
4751                 return PTR_ERR(trans);
4752         }
4753
4754         mutex_lock(&fs_info->chunk_mutex);
4755
4756         btrfs_device_set_total_bytes(device, new_size);
4757         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4758                 device->fs_devices->total_rw_bytes -= diff;
4759                 atomic64_sub(diff, &fs_info->free_chunk_space);
4760         }
4761
4762         /*
4763          * Once the device's size has been set to the new size, ensure all
4764          * in-memory chunks are synced to disk so that the loop below sees them
4765          * and relocates them accordingly.
4766          */
4767         if (contains_pending_extent(device, &start, diff)) {
4768                 mutex_unlock(&fs_info->chunk_mutex);
4769                 ret = btrfs_commit_transaction(trans);
4770                 if (ret)
4771                         goto done;
4772         } else {
4773                 mutex_unlock(&fs_info->chunk_mutex);
4774                 btrfs_end_transaction(trans);
4775         }
4776
4777 again:
4778         key.objectid = device->devid;
4779         key.offset = (u64)-1;
4780         key.type = BTRFS_DEV_EXTENT_KEY;
4781
4782         do {
4783                 mutex_lock(&fs_info->reclaim_bgs_lock);
4784                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4785                 if (ret < 0) {
4786                         mutex_unlock(&fs_info->reclaim_bgs_lock);
4787                         goto done;
4788                 }
4789
4790                 ret = btrfs_previous_item(root, path, 0, key.type);
4791                 if (ret) {
4792                         mutex_unlock(&fs_info->reclaim_bgs_lock);
4793                         if (ret < 0)
4794                                 goto done;
4795                         ret = 0;
4796                         btrfs_release_path(path);
4797                         break;
4798                 }
4799
4800                 l = path->nodes[0];
4801                 slot = path->slots[0];
4802                 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4803
4804                 if (key.objectid != device->devid) {
4805                         mutex_unlock(&fs_info->reclaim_bgs_lock);
4806                         btrfs_release_path(path);
4807                         break;
4808                 }
4809
4810                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4811                 length = btrfs_dev_extent_length(l, dev_extent);
4812
4813                 if (key.offset + length <= new_size) {
4814                         mutex_unlock(&fs_info->reclaim_bgs_lock);
4815                         btrfs_release_path(path);
4816                         break;
4817                 }
4818
4819                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4820                 btrfs_release_path(path);
4821
4822                 /*
4823                  * We may be relocating the only data chunk we have,
4824                  * which could potentially end up with losing data's
4825                  * raid profile, so lets allocate an empty one in
4826                  * advance.
4827                  */
4828                 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
4829                 if (ret < 0) {
4830                         mutex_unlock(&fs_info->reclaim_bgs_lock);
4831                         goto done;
4832                 }
4833
4834                 ret = btrfs_relocate_chunk(fs_info, chunk_offset);
4835                 mutex_unlock(&fs_info->reclaim_bgs_lock);
4836                 if (ret == -ENOSPC) {
4837                         failed++;
4838                 } else if (ret) {
4839                         if (ret == -ETXTBSY) {
4840                                 btrfs_warn(fs_info,
4841                    "could not shrink block group %llu due to active swapfile",
4842                                            chunk_offset);
4843                         }
4844                         goto done;
4845                 }
4846         } while (key.offset-- > 0);
4847
4848         if (failed && !retried) {
4849                 failed = 0;
4850                 retried = true;
4851                 goto again;
4852         } else if (failed && retried) {
4853                 ret = -ENOSPC;
4854                 goto done;
4855         }
4856
4857         /* Shrinking succeeded, else we would be at "done". */
4858         trans = btrfs_start_transaction(root, 0);
4859         if (IS_ERR(trans)) {
4860                 ret = PTR_ERR(trans);
4861                 goto done;
4862         }
4863
4864         mutex_lock(&fs_info->chunk_mutex);
4865         /* Clear all state bits beyond the shrunk device size */
4866         clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
4867                           CHUNK_STATE_MASK);
4868
4869         btrfs_device_set_disk_total_bytes(device, new_size);
4870         if (list_empty(&device->post_commit_list))
4871                 list_add_tail(&device->post_commit_list,
4872                               &trans->transaction->dev_update_list);
4873
4874         WARN_ON(diff > old_total);
4875         btrfs_set_super_total_bytes(super_copy,
4876                         round_down(old_total - diff, fs_info->sectorsize));
4877         mutex_unlock(&fs_info->chunk_mutex);
4878
4879         /* Now btrfs_update_device() will change the on-disk size. */
4880         ret = btrfs_update_device(trans, device);
4881         if (ret < 0) {
4882                 btrfs_abort_transaction(trans, ret);
4883                 btrfs_end_transaction(trans);
4884         } else {
4885                 ret = btrfs_commit_transaction(trans);
4886         }
4887 done:
4888         btrfs_free_path(path);
4889         if (ret) {
4890                 mutex_lock(&fs_info->chunk_mutex);
4891                 btrfs_device_set_total_bytes(device, old_size);
4892                 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
4893                         device->fs_devices->total_rw_bytes += diff;
4894                 atomic64_add(diff, &fs_info->free_chunk_space);
4895                 mutex_unlock(&fs_info->chunk_mutex);
4896         }
4897         return ret;
4898 }
4899
4900 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
4901                            struct btrfs_key *key,
4902                            struct btrfs_chunk *chunk, int item_size)
4903 {
4904         struct btrfs_super_block *super_copy = fs_info->super_copy;
4905         struct btrfs_disk_key disk_key;
4906         u32 array_size;
4907         u8 *ptr;
4908
4909         lockdep_assert_held(&fs_info->chunk_mutex);
4910
4911         array_size = btrfs_super_sys_array_size(super_copy);
4912         if (array_size + item_size + sizeof(disk_key)
4913                         > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
4914                 return -EFBIG;
4915
4916         ptr = super_copy->sys_chunk_array + array_size;
4917         btrfs_cpu_key_to_disk(&disk_key, key);
4918         memcpy(ptr, &disk_key, sizeof(disk_key));
4919         ptr += sizeof(disk_key);
4920         memcpy(ptr, chunk, item_size);
4921         item_size += sizeof(disk_key);
4922         btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4923
4924         return 0;
4925 }
4926
4927 /*
4928  * sort the devices in descending order by max_avail, total_avail
4929  */
4930 static int btrfs_cmp_device_info(const void *a, const void *b)
4931 {
4932         const struct btrfs_device_info *di_a = a;
4933         const struct btrfs_device_info *di_b = b;
4934
4935         if (di_a->max_avail > di_b->max_avail)
4936                 return -1;
4937         if (di_a->max_avail < di_b->max_avail)
4938                 return 1;
4939         if (di_a->total_avail > di_b->total_avail)
4940                 return -1;
4941         if (di_a->total_avail < di_b->total_avail)
4942                 return 1;
4943         return 0;
4944 }
4945
4946 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4947 {
4948         if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4949                 return;
4950
4951         btrfs_set_fs_incompat(info, RAID56);
4952 }
4953
4954 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
4955 {
4956         if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4)))
4957                 return;
4958
4959         btrfs_set_fs_incompat(info, RAID1C34);
4960 }
4961
4962 /*
4963  * Structure used internally for __btrfs_alloc_chunk() function.
4964  * Wraps needed parameters.
4965  */
4966 struct alloc_chunk_ctl {
4967         u64 start;
4968         u64 type;
4969         /* Total number of stripes to allocate */
4970         int num_stripes;
4971         /* sub_stripes info for map */
4972         int sub_stripes;
4973         /* Stripes per device */
4974         int dev_stripes;
4975         /* Maximum number of devices to use */
4976         int devs_max;
4977         /* Minimum number of devices to use */
4978         int devs_min;
4979         /* ndevs has to be a multiple of this */
4980         int devs_increment;
4981         /* Number of copies */
4982         int ncopies;
4983         /* Number of stripes worth of bytes to store parity information */
4984         int nparity;
4985         u64 max_stripe_size;
4986         u64 max_chunk_size;
4987         u64 dev_extent_min;
4988         u64 stripe_size;
4989         u64 chunk_size;
4990         int ndevs;
4991 };
4992
4993 static void init_alloc_chunk_ctl_policy_regular(
4994                                 struct btrfs_fs_devices *fs_devices,
4995                                 struct alloc_chunk_ctl *ctl)
4996 {
4997         u64 type = ctl->type;
4998
4999         if (type & BTRFS_BLOCK_GROUP_DATA) {
5000                 ctl->max_stripe_size = SZ_1G;
5001                 ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
5002         } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
5003                 /* For larger filesystems, use larger metadata chunks */
5004                 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
5005                         ctl->max_stripe_size = SZ_1G;
5006                 else
5007                         ctl->max_stripe_size = SZ_256M;
5008                 ctl->max_chunk_size = ctl->max_stripe_size;
5009         } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
5010                 ctl->max_stripe_size = SZ_32M;
5011                 ctl->max_chunk_size = 2 * ctl->max_stripe_size;
5012                 ctl->devs_max = min_t(int, ctl->devs_max,
5013                                       BTRFS_MAX_DEVS_SYS_CHUNK);
5014         } else {
5015                 BUG();
5016         }
5017
5018         /* We don't want a chunk larger than 10% of writable space */
5019         ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
5020                                   ctl->max_chunk_size);
5021         ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes;
5022 }
5023
5024 static void init_alloc_chunk_ctl_policy_zoned(
5025                                       struct btrfs_fs_devices *fs_devices,
5026                                       struct alloc_chunk_ctl *ctl)
5027 {
5028         u64 zone_size = fs_devices->fs_info->zone_size;
5029         u64 limit;
5030         int min_num_stripes = ctl->devs_min * ctl->dev_stripes;
5031         int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies;
5032         u64 min_chunk_size = min_data_stripes * zone_size;
5033         u64 type = ctl->type;
5034
5035         ctl->max_stripe_size = zone_size;
5036         if (type & BTRFS_BLOCK_GROUP_DATA) {
5037                 ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE,
5038                                                  zone_size);
5039         } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
5040                 ctl->max_chunk_size = ctl->max_stripe_size;
5041         } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
5042                 ctl->max_chunk_size = 2 * ctl->max_stripe_size;
5043                 ctl->devs_max = min_t(int, ctl->devs_max,
5044                                       BTRFS_MAX_DEVS_SYS_CHUNK);
5045         } else {
5046                 BUG();
5047         }
5048
5049         /* We don't want a chunk larger than 10% of writable space */
5050         limit = max(round_down(div_factor(fs_devices->total_rw_bytes, 1),
5051                                zone_size),
5052                     min_chunk_size);
5053         ctl->max_chunk_size = min(limit, ctl->max_chunk_size);
5054         ctl->dev_extent_min = zone_size * ctl->dev_stripes;
5055 }
5056
5057 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
5058                                  struct alloc_chunk_ctl *ctl)
5059 {
5060         int index = btrfs_bg_flags_to_raid_index(ctl->type);
5061
5062         ctl->sub_stripes = btrfs_raid_array[index].sub_stripes;
5063         ctl->dev_stripes = btrfs_raid_array[index].dev_stripes;
5064         ctl->devs_max = btrfs_raid_array[index].devs_max;
5065         if (!ctl->devs_max)
5066                 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info);
5067         ctl->devs_min = btrfs_raid_array[index].devs_min;
5068         ctl->devs_increment = btrfs_raid_array[index].devs_increment;
5069         ctl->ncopies = btrfs_raid_array[index].ncopies;
5070         ctl->nparity = btrfs_raid_array[index].nparity;
5071         ctl->ndevs = 0;
5072
5073         switch (fs_devices->chunk_alloc_policy) {
5074         case BTRFS_CHUNK_ALLOC_REGULAR:
5075                 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
5076                 break;
5077         case BTRFS_CHUNK_ALLOC_ZONED:
5078                 init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl);
5079                 break;
5080         default:
5081                 BUG();
5082         }
5083 }
5084
5085 static int gather_device_info(struct btrfs_fs_devices *fs_devices,
5086                               struct alloc_chunk_ctl *ctl,
5087                               struct btrfs_device_info *devices_info)
5088 {
5089         struct btrfs_fs_info *info = fs_devices->fs_info;
5090         struct btrfs_device *device;
5091         u64 total_avail;
5092         u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes;
5093         int ret;
5094         int ndevs = 0;
5095         u64 max_avail;
5096         u64 dev_offset;
5097
5098         /*
5099          * in the first pass through the devices list, we gather information
5100          * about the available holes on each device.
5101          */
5102         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
5103                 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
5104                         WARN(1, KERN_ERR
5105                                "BTRFS: read-only device in alloc_list\n");
5106                         continue;
5107                 }
5108
5109                 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
5110                                         &device->dev_state) ||
5111                     test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
5112                         continue;
5113
5114                 if (device->total_bytes > device->bytes_used)
5115                         total_avail = device->total_bytes - device->bytes_used;
5116                 else
5117                         total_avail = 0;
5118
5119                 /* If there is no space on this device, skip it. */
5120                 if (total_avail < ctl->dev_extent_min)
5121                         continue;
5122
5123                 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset,
5124                                            &max_avail);
5125                 if (ret && ret != -ENOSPC)
5126                         return ret;
5127
5128                 if (ret == 0)
5129                         max_avail = dev_extent_want;
5130
5131                 if (max_avail < ctl->dev_extent_min) {
5132                         if (btrfs_test_opt(info, ENOSPC_DEBUG))
5133                                 btrfs_debug(info,
5134                         "%s: devid %llu has no free space, have=%llu want=%llu",
5135                                             __func__, device->devid, max_avail,
5136                                             ctl->dev_extent_min);
5137                         continue;
5138                 }
5139
5140                 if (ndevs == fs_devices->rw_devices) {
5141                         WARN(1, "%s: found more than %llu devices\n",
5142                              __func__, fs_devices->rw_devices);
5143                         break;
5144                 }
5145                 devices_info[ndevs].dev_offset = dev_offset;
5146                 devices_info[ndevs].max_avail = max_avail;
5147                 devices_info[ndevs].total_avail = total_avail;
5148                 devices_info[ndevs].dev = device;
5149                 ++ndevs;
5150         }
5151         ctl->ndevs = ndevs;
5152
5153         /*
5154          * now sort the devices by hole size / available space
5155          */
5156         sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
5157              btrfs_cmp_device_info, NULL);
5158
5159         return 0;
5160 }
5161
5162 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
5163                                       struct btrfs_device_info *devices_info)
5164 {
5165         /* Number of stripes that count for block group size */
5166         int data_stripes;
5167
5168         /*
5169          * The primary goal is to maximize the number of stripes, so use as
5170          * many devices as possible, even if the stripes are not maximum sized.
5171          *
5172          * The DUP profile stores more than one stripe per device, the
5173          * max_avail is the total size so we have to adjust.
5174          */
5175         ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail,
5176                                    ctl->dev_stripes);
5177         ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5178
5179         /* This will have to be fixed for RAID1 and RAID10 over more drives */
5180         data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5181
5182         /*
5183          * Use the number of data stripes to figure out how big this chunk is
5184          * really going to be in terms of logical address space, and compare
5185          * that answer with the max chunk size. If it's higher, we try to
5186          * reduce stripe_size.
5187          */
5188         if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5189                 /*
5190                  * Reduce stripe_size, round it up to a 16MB boundary again and
5191                  * then use it, unless it ends up being even bigger than the
5192                  * previous value we had already.
5193                  */
5194                 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size,
5195                                                         data_stripes), SZ_16M),
5196                                        ctl->stripe_size);
5197         }
5198
5199         /* Align to BTRFS_STRIPE_LEN */
5200         ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
5201         ctl->chunk_size = ctl->stripe_size * data_stripes;
5202
5203         return 0;
5204 }
5205
5206 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl,
5207                                     struct btrfs_device_info *devices_info)
5208 {
5209         u64 zone_size = devices_info[0].dev->zone_info->zone_size;
5210         /* Number of stripes that count for block group size */
5211         int data_stripes;
5212
5213         /*
5214          * It should hold because:
5215          *    dev_extent_min == dev_extent_want == zone_size * dev_stripes
5216          */
5217         ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min);
5218
5219         ctl->stripe_size = zone_size;
5220         ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5221         data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5222
5223         /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */
5224         if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5225                 ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies,
5226                                              ctl->stripe_size) + ctl->nparity,
5227                                      ctl->dev_stripes);
5228                 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5229                 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5230                 ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size);
5231         }
5232
5233         ctl->chunk_size = ctl->stripe_size * data_stripes;
5234
5235         return 0;
5236 }
5237
5238 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
5239                               struct alloc_chunk_ctl *ctl,
5240                               struct btrfs_device_info *devices_info)
5241 {
5242         struct btrfs_fs_info *info = fs_devices->fs_info;
5243
5244         /*
5245          * Round down to number of usable stripes, devs_increment can be any
5246          * number so we can't use round_down() that requires power of 2, while
5247          * rounddown is safe.
5248          */
5249         ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment);
5250
5251         if (ctl->ndevs < ctl->devs_min) {
5252                 if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
5253                         btrfs_debug(info,
5254         "%s: not enough devices with free space: have=%d minimum required=%d",
5255                                     __func__, ctl->ndevs, ctl->devs_min);
5256                 }
5257                 return -ENOSPC;
5258         }
5259
5260         ctl->ndevs = min(ctl->ndevs, ctl->devs_max);
5261
5262         switch (fs_devices->chunk_alloc_policy) {
5263         case BTRFS_CHUNK_ALLOC_REGULAR:
5264                 return decide_stripe_size_regular(ctl, devices_info);
5265         case BTRFS_CHUNK_ALLOC_ZONED:
5266                 return decide_stripe_size_zoned(ctl, devices_info);
5267         default:
5268                 BUG();
5269         }
5270 }
5271
5272 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans,
5273                         struct alloc_chunk_ctl *ctl,
5274                         struct btrfs_device_info *devices_info)
5275 {
5276         struct btrfs_fs_info *info = trans->fs_info;
5277         struct map_lookup *map = NULL;
5278         struct extent_map_tree *em_tree;
5279         struct btrfs_block_group *block_group;
5280         struct extent_map *em;
5281         u64 start = ctl->start;
5282         u64 type = ctl->type;
5283         int ret;
5284         int i;
5285         int j;
5286
5287         map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS);
5288         if (!map)
5289                 return ERR_PTR(-ENOMEM);
5290         map->num_stripes = ctl->num_stripes;
5291
5292         for (i = 0; i < ctl->ndevs; ++i) {
5293                 for (j = 0; j < ctl->dev_stripes; ++j) {
5294                         int s = i * ctl->dev_stripes + j;
5295                         map->stripes[s].dev = devices_info[i].dev;
5296                         map->stripes[s].physical = devices_info[i].dev_offset +
5297                                                    j * ctl->stripe_size;
5298                 }
5299         }
5300         map->stripe_len = BTRFS_STRIPE_LEN;
5301         map->io_align = BTRFS_STRIPE_LEN;
5302         map->io_width = BTRFS_STRIPE_LEN;
5303         map->type = type;
5304         map->sub_stripes = ctl->sub_stripes;
5305
5306         trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size);
5307
5308         em = alloc_extent_map();
5309         if (!em) {
5310                 kfree(map);
5311                 return ERR_PTR(-ENOMEM);
5312         }
5313         set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
5314         em->map_lookup = map;
5315         em->start = start;
5316         em->len = ctl->chunk_size;
5317         em->block_start = 0;
5318         em->block_len = em->len;
5319         em->orig_block_len = ctl->stripe_size;
5320
5321         em_tree = &info->mapping_tree;
5322         write_lock(&em_tree->lock);
5323         ret = add_extent_mapping(em_tree, em, 0);
5324         if (ret) {
5325                 write_unlock(&em_tree->lock);
5326                 free_extent_map(em);
5327                 return ERR_PTR(ret);
5328         }
5329         write_unlock(&em_tree->lock);
5330
5331         block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
5332         if (IS_ERR(block_group))
5333                 goto error_del_extent;
5334
5335         for (i = 0; i < map->num_stripes; i++) {
5336                 struct btrfs_device *dev = map->stripes[i].dev;
5337
5338                 btrfs_device_set_bytes_used(dev,
5339                                             dev->bytes_used + ctl->stripe_size);
5340                 if (list_empty(&dev->post_commit_list))
5341                         list_add_tail(&dev->post_commit_list,
5342                                       &trans->transaction->dev_update_list);
5343         }
5344
5345         atomic64_sub(ctl->stripe_size * map->num_stripes,
5346                      &info->free_chunk_space);
5347
5348         free_extent_map(em);
5349         check_raid56_incompat_flag(info, type);
5350         check_raid1c34_incompat_flag(info, type);
5351
5352         return block_group;
5353
5354 error_del_extent:
5355         write_lock(&em_tree->lock);
5356         remove_extent_mapping(em_tree, em);
5357         write_unlock(&em_tree->lock);
5358
5359         /* One for our allocation */
5360         free_extent_map(em);
5361         /* One for the tree reference */
5362         free_extent_map(em);
5363
5364         return block_group;
5365 }
5366
5367 struct btrfs_block_group *btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
5368                                             u64 type)
5369 {
5370         struct btrfs_fs_info *info = trans->fs_info;
5371         struct btrfs_fs_devices *fs_devices = info->fs_devices;
5372         struct btrfs_device_info *devices_info = NULL;
5373         struct alloc_chunk_ctl ctl;
5374         struct btrfs_block_group *block_group;
5375         int ret;
5376
5377         lockdep_assert_held(&info->chunk_mutex);
5378
5379         if (!alloc_profile_is_valid(type, 0)) {
5380                 ASSERT(0);
5381                 return ERR_PTR(-EINVAL);
5382         }
5383
5384         if (list_empty(&fs_devices->alloc_list)) {
5385                 if (btrfs_test_opt(info, ENOSPC_DEBUG))
5386                         btrfs_debug(info, "%s: no writable device", __func__);
5387                 return ERR_PTR(-ENOSPC);
5388         }
5389
5390         if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
5391                 btrfs_err(info, "invalid chunk type 0x%llx requested", type);
5392                 ASSERT(0);
5393                 return ERR_PTR(-EINVAL);
5394         }
5395
5396         ctl.start = find_next_chunk(info);
5397         ctl.type = type;
5398         init_alloc_chunk_ctl(fs_devices, &ctl);
5399
5400         devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
5401                                GFP_NOFS);
5402         if (!devices_info)
5403                 return ERR_PTR(-ENOMEM);
5404
5405         ret = gather_device_info(fs_devices, &ctl, devices_info);
5406         if (ret < 0) {
5407                 block_group = ERR_PTR(ret);
5408                 goto out;
5409         }
5410
5411         ret = decide_stripe_size(fs_devices, &ctl, devices_info);
5412         if (ret < 0) {
5413                 block_group = ERR_PTR(ret);
5414                 goto out;
5415         }
5416
5417         block_group = create_chunk(trans, &ctl, devices_info);
5418
5419 out:
5420         kfree(devices_info);
5421         return block_group;
5422 }
5423
5424 /*
5425  * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the
5426  * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system
5427  * chunks.
5428  *
5429  * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
5430  * phases.
5431  */
5432 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
5433                                      struct btrfs_block_group *bg)
5434 {
5435         struct btrfs_fs_info *fs_info = trans->fs_info;
5436         struct btrfs_root *extent_root = fs_info->extent_root;
5437         struct btrfs_root *chunk_root = fs_info->chunk_root;
5438         struct btrfs_key key;
5439         struct btrfs_chunk *chunk;
5440         struct btrfs_stripe *stripe;
5441         struct extent_map *em;
5442         struct map_lookup *map;
5443         size_t item_size;
5444         int i;
5445         int ret;
5446
5447         /*
5448          * We take the chunk_mutex for 2 reasons:
5449          *
5450          * 1) Updates and insertions in the chunk btree must be done while holding
5451          *    the chunk_mutex, as well as updating the system chunk array in the
5452          *    superblock. See the comment on top of btrfs_chunk_alloc() for the
5453          *    details;
5454          *
5455          * 2) To prevent races with the final phase of a device replace operation
5456          *    that replaces the device object associated with the map's stripes,
5457          *    because the device object's id can change at any time during that
5458          *    final phase of the device replace operation
5459          *    (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
5460          *    replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
5461          *    which would cause a failure when updating the device item, which does
5462          *    not exists, or persisting a stripe of the chunk item with such ID.
5463          *    Here we can't use the device_list_mutex because our caller already
5464          *    has locked the chunk_mutex, and the final phase of device replace
5465          *    acquires both mutexes - first the device_list_mutex and then the
5466          *    chunk_mutex. Using any of those two mutexes protects us from a
5467          *    concurrent device replace.
5468          */
5469         lockdep_assert_held(&fs_info->chunk_mutex);
5470
5471         em = btrfs_get_chunk_map(fs_info, bg->start, bg->length);
5472         if (IS_ERR(em)) {
5473                 ret = PTR_ERR(em);
5474                 btrfs_abort_transaction(trans, ret);
5475                 return ret;
5476         }
5477
5478         map = em->map_lookup;
5479         item_size = btrfs_chunk_item_size(map->num_stripes);
5480
5481         chunk = kzalloc(item_size, GFP_NOFS);
5482         if (!chunk) {
5483                 ret = -ENOMEM;
5484                 btrfs_abort_transaction(trans, ret);
5485                 goto out;
5486         }
5487
5488         for (i = 0; i < map->num_stripes; i++) {
5489                 struct btrfs_device *device = map->stripes[i].dev;
5490
5491                 ret = btrfs_update_device(trans, device);
5492                 if (ret)
5493                         goto out;
5494         }
5495
5496         stripe = &chunk->stripe;
5497         for (i = 0; i < map->num_stripes; i++) {
5498                 struct btrfs_device *device = map->stripes[i].dev;
5499                 const u64 dev_offset = map->stripes[i].physical;
5500
5501                 btrfs_set_stack_stripe_devid(stripe, device->devid);
5502                 btrfs_set_stack_stripe_offset(stripe, dev_offset);
5503                 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
5504                 stripe++;
5505         }
5506
5507         btrfs_set_stack_chunk_length(chunk, bg->length);
5508         btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
5509         btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
5510         btrfs_set_stack_chunk_type(chunk, map->type);
5511         btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5512         btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
5513         btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
5514         btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
5515         btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5516
5517         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
5518         key.type = BTRFS_CHUNK_ITEM_KEY;
5519         key.offset = bg->start;
5520
5521         ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5522         if (ret)
5523                 goto out;
5524
5525         bg->chunk_item_inserted = 1;
5526
5527         if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
5528                 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
5529                 if (ret)
5530                         goto out;
5531         }
5532
5533 out:
5534         kfree(chunk);
5535         free_extent_map(em);
5536         return ret;
5537 }
5538
5539 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
5540 {
5541         struct btrfs_fs_info *fs_info = trans->fs_info;
5542         u64 alloc_profile;
5543         struct btrfs_block_group *meta_bg;
5544         struct btrfs_block_group *sys_bg;
5545
5546         /*
5547          * When adding a new device for sprouting, the seed device is read-only
5548          * so we must first allocate a metadata and a system chunk. But before
5549          * adding the block group items to the extent, device and chunk btrees,
5550          * we must first:
5551          *
5552          * 1) Create both chunks without doing any changes to the btrees, as
5553          *    otherwise we would get -ENOSPC since the block groups from the
5554          *    seed device are read-only;
5555          *
5556          * 2) Add the device item for the new sprout device - finishing the setup
5557          *    of a new block group requires updating the device item in the chunk
5558          *    btree, so it must exist when we attempt to do it. The previous step
5559          *    ensures this does not fail with -ENOSPC.
5560          *
5561          * After that we can add the block group items to their btrees:
5562          * update existing device item in the chunk btree, add a new block group
5563          * item to the extent btree, add a new chunk item to the chunk btree and
5564          * finally add the new device extent items to the devices btree.
5565          */
5566
5567         alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5568         meta_bg = btrfs_alloc_chunk(trans, alloc_profile);
5569         if (IS_ERR(meta_bg))
5570                 return PTR_ERR(meta_bg);
5571
5572         alloc_profile = btrfs_system_alloc_profile(fs_info);
5573         sys_bg = btrfs_alloc_chunk(trans, alloc_profile);
5574         if (IS_ERR(sys_bg))
5575                 return PTR_ERR(sys_bg);
5576
5577         return 0;
5578 }
5579
5580 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
5581 {
5582         const int index = btrfs_bg_flags_to_raid_index(map->type);
5583
5584         return btrfs_raid_array[index].tolerated_failures;
5585 }
5586
5587 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5588 {
5589         struct extent_map *em;
5590         struct map_lookup *map;
5591         int readonly = 0;
5592         int miss_ndevs = 0;
5593         int i;
5594
5595         em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5596         if (IS_ERR(em))
5597                 return 1;
5598
5599         map = em->map_lookup;
5600         for (i = 0; i < map->num_stripes; i++) {
5601                 if (test_bit(BTRFS_DEV_STATE_MISSING,
5602                                         &map->stripes[i].dev->dev_state)) {
5603                         miss_ndevs++;
5604                         continue;
5605                 }
5606                 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
5607                                         &map->stripes[i].dev->dev_state)) {
5608                         readonly = 1;
5609                         goto end;
5610                 }
5611         }
5612
5613         /*
5614          * If the number of missing devices is larger than max errors,
5615          * we can not write the data into that chunk successfully, so
5616          * set it readonly.
5617          */
5618         if (miss_ndevs > btrfs_chunk_max_errors(map))
5619                 readonly = 1;
5620 end:
5621         free_extent_map(em);
5622         return readonly;
5623 }
5624
5625 void btrfs_mapping_tree_free(struct extent_map_tree *tree)
5626 {
5627         struct extent_map *em;
5628
5629         while (1) {
5630                 write_lock(&tree->lock);
5631                 em = lookup_extent_mapping(tree, 0, (u64)-1);
5632                 if (em)
5633                         remove_extent_mapping(tree, em);
5634                 write_unlock(&tree->lock);
5635                 if (!em)
5636                         break;
5637                 /* once for us */
5638                 free_extent_map(em);
5639                 /* once for the tree */
5640                 free_extent_map(em);
5641         }
5642 }
5643
5644 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5645 {
5646         struct extent_map *em;
5647         struct map_lookup *map;
5648         int ret;
5649
5650         em = btrfs_get_chunk_map(fs_info, logical, len);
5651         if (IS_ERR(em))
5652                 /*
5653                  * We could return errors for these cases, but that could get
5654                  * ugly and we'd probably do the same thing which is just not do
5655                  * anything else and exit, so return 1 so the callers don't try
5656                  * to use other copies.
5657                  */
5658                 return 1;
5659
5660         map = em->map_lookup;
5661         if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK))
5662                 ret = map->num_stripes;
5663         else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5664                 ret = map->sub_stripes;
5665         else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5666                 ret = 2;
5667         else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5668                 /*
5669                  * There could be two corrupted data stripes, we need
5670                  * to loop retry in order to rebuild the correct data.
5671                  *
5672                  * Fail a stripe at a time on every retry except the
5673                  * stripe under reconstruction.
5674                  */
5675                 ret = map->num_stripes;
5676         else
5677                 ret = 1;
5678         free_extent_map(em);
5679
5680         down_read(&fs_info->dev_replace.rwsem);
5681         if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
5682             fs_info->dev_replace.tgtdev)
5683                 ret++;
5684         up_read(&fs_info->dev_replace.rwsem);
5685
5686         return ret;
5687 }
5688
5689 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
5690                                     u64 logical)
5691 {
5692         struct extent_map *em;
5693         struct map_lookup *map;
5694         unsigned long len = fs_info->sectorsize;
5695
5696         em = btrfs_get_chunk_map(fs_info, logical, len);
5697
5698         if (!WARN_ON(IS_ERR(em))) {
5699                 map = em->map_lookup;
5700                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5701                         len = map->stripe_len * nr_data_stripes(map);
5702                 free_extent_map(em);
5703         }
5704         return len;
5705 }
5706
5707 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5708 {
5709         struct extent_map *em;
5710         struct map_lookup *map;
5711         int ret = 0;
5712
5713         em = btrfs_get_chunk_map(fs_info, logical, len);
5714
5715         if(!WARN_ON(IS_ERR(em))) {
5716                 map = em->map_lookup;
5717                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5718                         ret = 1;
5719                 free_extent_map(em);
5720         }
5721         return ret;
5722 }
5723
5724 static int find_live_mirror(struct btrfs_fs_info *fs_info,
5725                             struct map_lookup *map, int first,
5726                             int dev_replace_is_ongoing)
5727 {
5728         int i;
5729         int num_stripes;
5730         int preferred_mirror;
5731         int tolerance;
5732         struct btrfs_device *srcdev;
5733
5734         ASSERT((map->type &
5735                  (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
5736
5737         if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5738                 num_stripes = map->sub_stripes;
5739         else
5740                 num_stripes = map->num_stripes;
5741
5742         switch (fs_info->fs_devices->read_policy) {
5743         default:
5744                 /* Shouldn't happen, just warn and use pid instead of failing */
5745                 btrfs_warn_rl(fs_info,
5746                               "unknown read_policy type %u, reset to pid",
5747                               fs_info->fs_devices->read_policy);
5748                 fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID;
5749                 fallthrough;
5750         case BTRFS_READ_POLICY_PID:
5751                 preferred_mirror = first + (current->pid % num_stripes);
5752                 break;
5753         }
5754
5755         if (dev_replace_is_ongoing &&
5756             fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5757              BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5758                 srcdev = fs_info->dev_replace.srcdev;
5759         else
5760                 srcdev = NULL;
5761
5762         /*
5763          * try to avoid the drive that is the source drive for a
5764          * dev-replace procedure, only choose it if no other non-missing
5765          * mirror is available
5766          */
5767         for (tolerance = 0; tolerance < 2; tolerance++) {
5768                 if (map->stripes[preferred_mirror].dev->bdev &&
5769                     (tolerance || map->stripes[preferred_mirror].dev != srcdev))
5770                         return preferred_mirror;
5771                 for (i = first; i < first + num_stripes; i++) {
5772                         if (map->stripes[i].dev->bdev &&
5773                             (tolerance || map->stripes[i].dev != srcdev))
5774                                 return i;
5775                 }
5776         }
5777
5778         /* we couldn't find one that doesn't fail.  Just return something
5779          * and the io error handling code will clean up eventually
5780          */
5781         return preferred_mirror;
5782 }
5783
5784 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5785 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
5786 {
5787         int i;
5788         int again = 1;
5789
5790         while (again) {
5791                 again = 0;
5792                 for (i = 0; i < num_stripes - 1; i++) {
5793                         /* Swap if parity is on a smaller index */
5794                         if (bbio->raid_map[i] > bbio->raid_map[i + 1]) {
5795                                 swap(bbio->stripes[i], bbio->stripes[i + 1]);
5796                                 swap(bbio->raid_map[i], bbio->raid_map[i + 1]);
5797                                 again = 1;
5798                         }
5799                 }
5800         }
5801 }
5802
5803 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
5804 {
5805         struct btrfs_bio *bbio = kzalloc(
5806                  /* the size of the btrfs_bio */
5807                 sizeof(struct btrfs_bio) +
5808                 /* plus the variable array for the stripes */
5809                 sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5810                 /* plus the variable array for the tgt dev */
5811                 sizeof(int) * (real_stripes) +
5812                 /*
5813                  * plus the raid_map, which includes both the tgt dev
5814                  * and the stripes
5815                  */
5816                 sizeof(u64) * (total_stripes),
5817                 GFP_NOFS|__GFP_NOFAIL);
5818
5819         atomic_set(&bbio->error, 0);
5820         refcount_set(&bbio->refs, 1);
5821
5822         bbio->tgtdev_map = (int *)(bbio->stripes + total_stripes);
5823         bbio->raid_map = (u64 *)(bbio->tgtdev_map + real_stripes);
5824
5825         return bbio;
5826 }
5827
5828 void btrfs_get_bbio(struct btrfs_bio *bbio)
5829 {
5830         WARN_ON(!refcount_read(&bbio->refs));
5831         refcount_inc(&bbio->refs);
5832 }
5833
5834 void btrfs_put_bbio(struct btrfs_bio *bbio)
5835 {
5836         if (!bbio)
5837                 return;
5838         if (refcount_dec_and_test(&bbio->refs))
5839                 kfree(bbio);
5840 }
5841
5842 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
5843 /*
5844  * Please note that, discard won't be sent to target device of device
5845  * replace.
5846  */
5847 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
5848                                          u64 logical, u64 *length_ret,
5849                                          struct btrfs_bio **bbio_ret)
5850 {
5851         struct extent_map *em;
5852         struct map_lookup *map;
5853         struct btrfs_bio *bbio;
5854         u64 length = *length_ret;
5855         u64 offset;
5856         u64 stripe_nr;
5857         u64 stripe_nr_end;
5858         u64 stripe_end_offset;
5859         u64 stripe_cnt;
5860         u64 stripe_len;
5861         u64 stripe_offset;
5862         u64 num_stripes;
5863         u32 stripe_index;
5864         u32 factor = 0;
5865         u32 sub_stripes = 0;
5866         u64 stripes_per_dev = 0;
5867         u32 remaining_stripes = 0;
5868         u32 last_stripe = 0;
5869         int ret = 0;
5870         int i;
5871
5872         /* discard always return a bbio */
5873         ASSERT(bbio_ret);
5874
5875         em = btrfs_get_chunk_map(fs_info, logical, length);
5876         if (IS_ERR(em))
5877                 return PTR_ERR(em);
5878
5879         map = em->map_lookup;
5880         /* we don't discard raid56 yet */
5881         if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5882                 ret = -EOPNOTSUPP;
5883                 goto out;
5884         }
5885
5886         offset = logical - em->start;
5887         length = min_t(u64, em->start + em->len - logical, length);
5888         *length_ret = length;
5889
5890         stripe_len = map->stripe_len;
5891         /*
5892          * stripe_nr counts the total number of stripes we have to stride
5893          * to get to this block
5894          */
5895         stripe_nr = div64_u64(offset, stripe_len);
5896
5897         /* stripe_offset is the offset of this block in its stripe */
5898         stripe_offset = offset - stripe_nr * stripe_len;
5899
5900         stripe_nr_end = round_up(offset + length, map->stripe_len);
5901         stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
5902         stripe_cnt = stripe_nr_end - stripe_nr;
5903         stripe_end_offset = stripe_nr_end * map->stripe_len -
5904                             (offset + length);
5905         /*
5906          * after this, stripe_nr is the number of stripes on this
5907          * device we have to walk to find the data, and stripe_index is
5908          * the number of our device in the stripe array
5909          */
5910         num_stripes = 1;
5911         stripe_index = 0;
5912         if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5913                          BTRFS_BLOCK_GROUP_RAID10)) {
5914                 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5915                         sub_stripes = 1;
5916                 else
5917                         sub_stripes = map->sub_stripes;
5918
5919                 factor = map->num_stripes / sub_stripes;
5920                 num_stripes = min_t(u64, map->num_stripes,
5921                                     sub_stripes * stripe_cnt);
5922                 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5923                 stripe_index *= sub_stripes;
5924                 stripes_per_dev = div_u64_rem(stripe_cnt, factor,
5925                                               &remaining_stripes);
5926                 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5927                 last_stripe *= sub_stripes;
5928         } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
5929                                 BTRFS_BLOCK_GROUP_DUP)) {
5930                 num_stripes = map->num_stripes;
5931         } else {
5932                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5933                                         &stripe_index);
5934         }
5935
5936         bbio = alloc_btrfs_bio(num_stripes, 0);
5937         if (!bbio) {
5938                 ret = -ENOMEM;
5939                 goto out;
5940         }
5941
5942         for (i = 0; i < num_stripes; i++) {
5943                 bbio->stripes[i].physical =
5944                         map->stripes[stripe_index].physical +
5945                         stripe_offset + stripe_nr * map->stripe_len;
5946                 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5947
5948                 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5949                                  BTRFS_BLOCK_GROUP_RAID10)) {
5950                         bbio->stripes[i].length = stripes_per_dev *
5951                                 map->stripe_len;
5952
5953                         if (i / sub_stripes < remaining_stripes)
5954                                 bbio->stripes[i].length +=
5955                                         map->stripe_len;
5956
5957                         /*
5958                          * Special for the first stripe and
5959                          * the last stripe:
5960                          *
5961                          * |-------|...|-------|
5962                          *     |----------|
5963                          *    off     end_off
5964                          */
5965                         if (i < sub_stripes)
5966                                 bbio->stripes[i].length -=
5967                                         stripe_offset;
5968
5969                         if (stripe_index >= last_stripe &&
5970                             stripe_index <= (last_stripe +
5971                                              sub_stripes - 1))
5972                                 bbio->stripes[i].length -=
5973                                         stripe_end_offset;
5974
5975                         if (i == sub_stripes - 1)
5976                                 stripe_offset = 0;
5977                 } else {
5978                         bbio->stripes[i].length = length;
5979                 }
5980
5981                 stripe_index++;
5982                 if (stripe_index == map->num_stripes) {
5983                         stripe_index = 0;
5984                         stripe_nr++;
5985                 }
5986         }
5987
5988         *bbio_ret = bbio;
5989         bbio->map_type = map->type;
5990         bbio->num_stripes = num_stripes;
5991 out:
5992         free_extent_map(em);
5993         return ret;
5994 }
5995
5996 /*
5997  * In dev-replace case, for repair case (that's the only case where the mirror
5998  * is selected explicitly when calling btrfs_map_block), blocks left of the
5999  * left cursor can also be read from the target drive.
6000  *
6001  * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
6002  * array of stripes.
6003  * For READ, it also needs to be supported using the same mirror number.
6004  *
6005  * If the requested block is not left of the left cursor, EIO is returned. This
6006  * can happen because btrfs_num_copies() returns one more in the dev-replace
6007  * case.
6008  */
6009 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
6010                                          u64 logical, u64 length,
6011                                          u64 srcdev_devid, int *mirror_num,
6012                                          u64 *physical)
6013 {
6014         struct btrfs_bio *bbio = NULL;
6015         int num_stripes;
6016         int index_srcdev = 0;
6017         int found = 0;
6018         u64 physical_of_found = 0;
6019         int i;
6020         int ret = 0;
6021
6022         ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
6023                                 logical, &length, &bbio, 0, 0);
6024         if (ret) {
6025                 ASSERT(bbio == NULL);
6026                 return ret;
6027         }
6028
6029         num_stripes = bbio->num_stripes;
6030         if (*mirror_num > num_stripes) {
6031                 /*
6032                  * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
6033                  * that means that the requested area is not left of the left
6034                  * cursor
6035                  */
6036                 btrfs_put_bbio(bbio);
6037                 return -EIO;
6038         }
6039
6040         /*
6041          * process the rest of the function using the mirror_num of the source
6042          * drive. Therefore look it up first.  At the end, patch the device
6043          * pointer to the one of the target drive.
6044          */
6045         for (i = 0; i < num_stripes; i++) {
6046                 if (bbio->stripes[i].dev->devid != srcdev_devid)
6047                         continue;
6048
6049                 /*
6050                  * In case of DUP, in order to keep it simple, only add the
6051                  * mirror with the lowest physical address
6052                  */
6053                 if (found &&
6054                     physical_of_found <= bbio->stripes[i].physical)
6055                         continue;
6056
6057                 index_srcdev = i;
6058                 found = 1;
6059                 physical_of_found = bbio->stripes[i].physical;
6060         }
6061
6062         btrfs_put_bbio(bbio);
6063
6064         ASSERT(found);
6065         if (!found)
6066                 return -EIO;
6067
6068         *mirror_num = index_srcdev + 1;
6069         *physical = physical_of_found;
6070         return ret;
6071 }
6072
6073 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical)
6074 {
6075         struct btrfs_block_group *cache;
6076         bool ret;
6077
6078         /* Non zoned filesystem does not use "to_copy" flag */
6079         if (!btrfs_is_zoned(fs_info))
6080                 return false;
6081
6082         cache = btrfs_lookup_block_group(fs_info, logical);
6083
6084         spin_lock(&cache->lock);
6085         ret = cache->to_copy;
6086         spin_unlock(&cache->lock);
6087
6088         btrfs_put_block_group(cache);
6089         return ret;
6090 }
6091
6092 static void handle_ops_on_dev_replace(enum btrfs_map_op op,
6093                                       struct btrfs_bio **bbio_ret,
6094                                       struct btrfs_dev_replace *dev_replace,
6095                                       u64 logical,
6096                                       int *num_stripes_ret, int *max_errors_ret)
6097 {
6098         struct btrfs_bio *bbio = *bbio_ret;
6099         u64 srcdev_devid = dev_replace->srcdev->devid;
6100         int tgtdev_indexes = 0;
6101         int num_stripes = *num_stripes_ret;
6102         int max_errors = *max_errors_ret;
6103         int i;
6104
6105         if (op == BTRFS_MAP_WRITE) {
6106                 int index_where_to_add;
6107
6108                 /*
6109                  * A block group which have "to_copy" set will eventually
6110                  * copied by dev-replace process. We can avoid cloning IO here.
6111                  */
6112                 if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical))
6113                         return;
6114
6115                 /*
6116                  * duplicate the write operations while the dev replace
6117                  * procedure is running. Since the copying of the old disk to
6118                  * the new disk takes place at run time while the filesystem is
6119                  * mounted writable, the regular write operations to the old
6120                  * disk have to be duplicated to go to the new disk as well.
6121                  *
6122                  * Note that device->missing is handled by the caller, and that
6123                  * the write to the old disk is already set up in the stripes
6124                  * array.
6125                  */
6126                 index_where_to_add = num_stripes;
6127                 for (i = 0; i < num_stripes; i++) {
6128                         if (bbio->stripes[i].dev->devid == srcdev_devid) {
6129                                 /* write to new disk, too */
6130                                 struct btrfs_bio_stripe *new =
6131                                         bbio->stripes + index_where_to_add;
6132                                 struct btrfs_bio_stripe *old =
6133                                         bbio->stripes + i;
6134
6135                                 new->physical = old->physical;
6136                                 new->length = old->length;
6137                                 new->dev = dev_replace->tgtdev;
6138                                 bbio->tgtdev_map[i] = index_where_to_add;
6139                                 index_where_to_add++;
6140                                 max_errors++;
6141                                 tgtdev_indexes++;
6142                         }
6143                 }
6144                 num_stripes = index_where_to_add;
6145         } else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
6146                 int index_srcdev = 0;
6147                 int found = 0;
6148                 u64 physical_of_found = 0;
6149
6150                 /*
6151                  * During the dev-replace procedure, the target drive can also
6152                  * be used to read data in case it is needed to repair a corrupt
6153                  * block elsewhere. This is possible if the requested area is
6154                  * left of the left cursor. In this area, the target drive is a
6155                  * full copy of the source drive.
6156                  */
6157                 for (i = 0; i < num_stripes; i++) {
6158                         if (bbio->stripes[i].dev->devid == srcdev_devid) {
6159                                 /*
6160                                  * In case of DUP, in order to keep it simple,
6161                                  * only add the mirror with the lowest physical
6162                                  * address
6163                                  */
6164                                 if (found &&
6165                                     physical_of_found <=
6166                                      bbio->stripes[i].physical)
6167                                         continue;
6168                                 index_srcdev = i;
6169                                 found = 1;
6170                                 physical_of_found = bbio->stripes[i].physical;
6171                         }
6172                 }
6173                 if (found) {
6174                         struct btrfs_bio_stripe *tgtdev_stripe =
6175                                 bbio->stripes + num_stripes;
6176
6177                         tgtdev_stripe->physical = physical_of_found;
6178                         tgtdev_stripe->length =
6179                                 bbio->stripes[index_srcdev].length;
6180                         tgtdev_stripe->dev = dev_replace->tgtdev;
6181                         bbio->tgtdev_map[index_srcdev] = num_stripes;
6182
6183                         tgtdev_indexes++;
6184                         num_stripes++;
6185                 }
6186         }
6187
6188         *num_stripes_ret = num_stripes;
6189         *max_errors_ret = max_errors;
6190         bbio->num_tgtdevs = tgtdev_indexes;
6191         *bbio_ret = bbio;
6192 }
6193
6194 static bool need_full_stripe(enum btrfs_map_op op)
6195 {
6196         return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
6197 }
6198
6199 /*
6200  * Calculate the geometry of a particular (address, len) tuple. This
6201  * information is used to calculate how big a particular bio can get before it
6202  * straddles a stripe.
6203  *
6204  * @fs_info: the filesystem
6205  * @em:      mapping containing the logical extent
6206  * @op:      type of operation - write or read
6207  * @logical: address that we want to figure out the geometry of
6208  * @io_geom: pointer used to return values
6209  *
6210  * Returns < 0 in case a chunk for the given logical address cannot be found,
6211  * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
6212  */
6213 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em,
6214                           enum btrfs_map_op op, u64 logical,
6215                           struct btrfs_io_geometry *io_geom)
6216 {
6217         struct map_lookup *map;
6218         u64 len;
6219         u64 offset;
6220         u64 stripe_offset;
6221         u64 stripe_nr;
6222         u64 stripe_len;
6223         u64 raid56_full_stripe_start = (u64)-1;
6224         int data_stripes;
6225
6226         ASSERT(op != BTRFS_MAP_DISCARD);
6227
6228         map = em->map_lookup;
6229         /* Offset of this logical address in the chunk */
6230         offset = logical - em->start;
6231         /* Len of a stripe in a chunk */
6232         stripe_len = map->stripe_len;
6233         /* Stripe where this block falls in */
6234         stripe_nr = div64_u64(offset, stripe_len);
6235         /* Offset of stripe in the chunk */
6236         stripe_offset = stripe_nr * stripe_len;
6237         if (offset < stripe_offset) {
6238                 btrfs_crit(fs_info,
6239 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
6240                         stripe_offset, offset, em->start, logical, stripe_len);
6241                 return -EINVAL;
6242         }
6243
6244         /* stripe_offset is the offset of this block in its stripe */
6245         stripe_offset = offset - stripe_offset;
6246         data_stripes = nr_data_stripes(map);
6247
6248         if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
6249                 u64 max_len = stripe_len - stripe_offset;
6250
6251                 /*
6252                  * In case of raid56, we need to know the stripe aligned start
6253                  */
6254                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6255                         unsigned long full_stripe_len = stripe_len * data_stripes;
6256                         raid56_full_stripe_start = offset;
6257
6258                         /*
6259                          * Allow a write of a full stripe, but make sure we
6260                          * don't allow straddling of stripes
6261                          */
6262                         raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
6263                                         full_stripe_len);
6264                         raid56_full_stripe_start *= full_stripe_len;
6265
6266                         /*
6267                          * For writes to RAID[56], allow a full stripeset across
6268                          * all disks. For other RAID types and for RAID[56]
6269                          * reads, just allow a single stripe (on a single disk).
6270                          */
6271                         if (op == BTRFS_MAP_WRITE) {
6272                                 max_len = stripe_len * data_stripes -
6273                                           (offset - raid56_full_stripe_start);
6274                         }
6275                 }
6276                 len = min_t(u64, em->len - offset, max_len);
6277         } else {
6278                 len = em->len - offset;
6279         }
6280
6281         io_geom->len = len;
6282         io_geom->offset = offset;
6283         io_geom->stripe_len = stripe_len;
6284         io_geom->stripe_nr = stripe_nr;
6285         io_geom->stripe_offset = stripe_offset;
6286         io_geom->raid56_stripe_offset = raid56_full_stripe_start;
6287
6288         return 0;
6289 }
6290
6291 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
6292                              enum btrfs_map_op op,
6293                              u64 logical, u64 *length,
6294                              struct btrfs_bio **bbio_ret,
6295                              int mirror_num, int need_raid_map)
6296 {
6297         struct extent_map *em;
6298         struct map_lookup *map;
6299         u64 stripe_offset;
6300         u64 stripe_nr;
6301         u64 stripe_len;
6302         u32 stripe_index;
6303         int data_stripes;
6304         int i;
6305         int ret = 0;
6306         int num_stripes;
6307         int max_errors = 0;
6308         int tgtdev_indexes = 0;
6309         struct btrfs_bio *bbio = NULL;
6310         struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
6311         int dev_replace_is_ongoing = 0;
6312         int num_alloc_stripes;
6313         int patch_the_first_stripe_for_dev_replace = 0;
6314         u64 physical_to_patch_in_first_stripe = 0;
6315         u64 raid56_full_stripe_start = (u64)-1;
6316         struct btrfs_io_geometry geom;
6317
6318         ASSERT(bbio_ret);
6319         ASSERT(op != BTRFS_MAP_DISCARD);
6320
6321         em = btrfs_get_chunk_map(fs_info, logical, *length);
6322         ASSERT(!IS_ERR(em));
6323
6324         ret = btrfs_get_io_geometry(fs_info, em, op, logical, &geom);
6325         if (ret < 0)
6326                 return ret;
6327
6328         map = em->map_lookup;
6329
6330         *length = geom.len;
6331         stripe_len = geom.stripe_len;
6332         stripe_nr = geom.stripe_nr;
6333         stripe_offset = geom.stripe_offset;
6334         raid56_full_stripe_start = geom.raid56_stripe_offset;
6335         data_stripes = nr_data_stripes(map);
6336
6337         down_read(&dev_replace->rwsem);
6338         dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
6339         /*
6340          * Hold the semaphore for read during the whole operation, write is
6341          * requested at commit time but must wait.
6342          */
6343         if (!dev_replace_is_ongoing)
6344                 up_read(&dev_replace->rwsem);
6345
6346         if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
6347             !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
6348                 ret = get_extra_mirror_from_replace(fs_info, logical, *length,
6349                                                     dev_replace->srcdev->devid,
6350                                                     &mirror_num,
6351                                             &physical_to_patch_in_first_stripe);
6352                 if (ret)
6353                         goto out;
6354                 else
6355                         patch_the_first_stripe_for_dev_replace = 1;
6356         } else if (mirror_num > map->num_stripes) {
6357                 mirror_num = 0;
6358         }
6359
6360         num_stripes = 1;
6361         stripe_index = 0;
6362         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6363                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6364                                 &stripe_index);
6365                 if (!need_full_stripe(op))
6366                         mirror_num = 1;
6367         } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
6368                 if (need_full_stripe(op))
6369                         num_stripes = map->num_stripes;
6370                 else if (mirror_num)
6371                         stripe_index = mirror_num - 1;
6372                 else {
6373                         stripe_index = find_live_mirror(fs_info, map, 0,
6374                                             dev_replace_is_ongoing);
6375                         mirror_num = stripe_index + 1;
6376                 }
6377
6378         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
6379                 if (need_full_stripe(op)) {
6380                         num_stripes = map->num_stripes;
6381                 } else if (mirror_num) {
6382                         stripe_index = mirror_num - 1;
6383                 } else {
6384                         mirror_num = 1;
6385                 }
6386
6387         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
6388                 u32 factor = map->num_stripes / map->sub_stripes;
6389
6390                 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6391                 stripe_index *= map->sub_stripes;
6392
6393                 if (need_full_stripe(op))
6394                         num_stripes = map->sub_stripes;
6395                 else if (mirror_num)
6396                         stripe_index += mirror_num - 1;
6397                 else {
6398                         int old_stripe_index = stripe_index;
6399                         stripe_index = find_live_mirror(fs_info, map,
6400                                               stripe_index,
6401                                               dev_replace_is_ongoing);
6402                         mirror_num = stripe_index - old_stripe_index + 1;
6403                 }
6404
6405         } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6406                 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
6407                         /* push stripe_nr back to the start of the full stripe */
6408                         stripe_nr = div64_u64(raid56_full_stripe_start,
6409                                         stripe_len * data_stripes);
6410
6411                         /* RAID[56] write or recovery. Return all stripes */
6412                         num_stripes = map->num_stripes;
6413                         max_errors = nr_parity_stripes(map);
6414
6415                         *length = map->stripe_len;
6416                         stripe_index = 0;
6417                         stripe_offset = 0;
6418                 } else {
6419                         /*
6420                          * Mirror #0 or #1 means the original data block.
6421                          * Mirror #2 is RAID5 parity block.
6422                          * Mirror #3 is RAID6 Q block.
6423                          */
6424                         stripe_nr = div_u64_rem(stripe_nr,
6425                                         data_stripes, &stripe_index);
6426                         if (mirror_num > 1)
6427                                 stripe_index = data_stripes + mirror_num - 2;
6428
6429                         /* We distribute the parity blocks across stripes */
6430                         div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
6431                                         &stripe_index);
6432                         if (!need_full_stripe(op) && mirror_num <= 1)
6433                                 mirror_num = 1;
6434                 }
6435         } else {
6436                 /*
6437                  * after this, stripe_nr is the number of stripes on this
6438                  * device we have to walk to find the data, and stripe_index is
6439                  * the number of our device in the stripe array
6440                  */
6441                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6442                                 &stripe_index);
6443                 mirror_num = stripe_index + 1;
6444         }
6445         if (stripe_index >= map->num_stripes) {
6446                 btrfs_crit(fs_info,
6447                            "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6448                            stripe_index, map->num_stripes);
6449                 ret = -EINVAL;
6450                 goto out;
6451         }
6452
6453         num_alloc_stripes = num_stripes;
6454         if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
6455                 if (op == BTRFS_MAP_WRITE)
6456                         num_alloc_stripes <<= 1;
6457                 if (op == BTRFS_MAP_GET_READ_MIRRORS)
6458                         num_alloc_stripes++;
6459                 tgtdev_indexes = num_stripes;
6460         }
6461
6462         bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
6463         if (!bbio) {
6464                 ret = -ENOMEM;
6465                 goto out;
6466         }
6467
6468         for (i = 0; i < num_stripes; i++) {
6469                 bbio->stripes[i].physical = map->stripes[stripe_index].physical +
6470                         stripe_offset + stripe_nr * map->stripe_len;
6471                 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
6472                 stripe_index++;
6473         }
6474
6475         /* build raid_map */
6476         if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
6477             (need_full_stripe(op) || mirror_num > 1)) {
6478                 u64 tmp;
6479                 unsigned rot;
6480
6481                 /* Work out the disk rotation on this stripe-set */
6482                 div_u64_rem(stripe_nr, num_stripes, &rot);
6483
6484                 /* Fill in the logical address of each stripe */
6485                 tmp = stripe_nr * data_stripes;
6486                 for (i = 0; i < data_stripes; i++)
6487                         bbio->raid_map[(i+rot) % num_stripes] =
6488                                 em->start + (tmp + i) * map->stripe_len;
6489
6490                 bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
6491                 if (map->type & BTRFS_BLOCK_GROUP_RAID6)
6492                         bbio->raid_map[(i+rot+1) % num_stripes] =
6493                                 RAID6_Q_STRIPE;
6494
6495                 sort_parity_stripes(bbio, num_stripes);
6496         }
6497
6498         if (need_full_stripe(op))
6499                 max_errors = btrfs_chunk_max_errors(map);
6500
6501         if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6502             need_full_stripe(op)) {
6503                 handle_ops_on_dev_replace(op, &bbio, dev_replace, logical,
6504                                           &num_stripes, &max_errors);
6505         }
6506
6507         *bbio_ret = bbio;
6508         bbio->map_type = map->type;
6509         bbio->num_stripes = num_stripes;
6510         bbio->max_errors = max_errors;
6511         bbio->mirror_num = mirror_num;
6512
6513         /*
6514          * this is the case that REQ_READ && dev_replace_is_ongoing &&
6515          * mirror_num == num_stripes + 1 && dev_replace target drive is
6516          * available as a mirror
6517          */
6518         if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
6519                 WARN_ON(num_stripes > 1);
6520                 bbio->stripes[0].dev = dev_replace->tgtdev;
6521                 bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
6522                 bbio->mirror_num = map->num_stripes + 1;
6523         }
6524 out:
6525         if (dev_replace_is_ongoing) {
6526                 lockdep_assert_held(&dev_replace->rwsem);
6527                 /* Unlock and let waiting writers proceed */
6528                 up_read(&dev_replace->rwsem);
6529         }
6530         free_extent_map(em);
6531         return ret;
6532 }
6533
6534 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6535                       u64 logical, u64 *length,
6536                       struct btrfs_bio **bbio_ret, int mirror_num)
6537 {
6538         if (op == BTRFS_MAP_DISCARD)
6539                 return __btrfs_map_block_for_discard(fs_info, logical,
6540                                                      length, bbio_ret);
6541
6542         return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
6543                                  mirror_num, 0);
6544 }
6545
6546 /* For Scrub/replace */
6547 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6548                      u64 logical, u64 *length,
6549                      struct btrfs_bio **bbio_ret)
6550 {
6551         return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1);
6552 }
6553
6554 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
6555 {
6556         bio->bi_private = bbio->private;
6557         bio->bi_end_io = bbio->end_io;
6558         bio_endio(bio);
6559
6560         btrfs_put_bbio(bbio);
6561 }
6562
6563 static void btrfs_end_bio(struct bio *bio)
6564 {
6565         struct btrfs_bio *bbio = bio->bi_private;
6566         int is_orig_bio = 0;
6567
6568         if (bio->bi_status) {
6569                 atomic_inc(&bbio->error);
6570                 if (bio->bi_status == BLK_STS_IOERR ||
6571                     bio->bi_status == BLK_STS_TARGET) {
6572                         struct btrfs_device *dev = btrfs_io_bio(bio)->device;
6573
6574                         ASSERT(dev->bdev);
6575                         if (btrfs_op(bio) == BTRFS_MAP_WRITE)
6576                                 btrfs_dev_stat_inc_and_print(dev,
6577                                                 BTRFS_DEV_STAT_WRITE_ERRS);
6578                         else if (!(bio->bi_opf & REQ_RAHEAD))
6579                                 btrfs_dev_stat_inc_and_print(dev,
6580                                                 BTRFS_DEV_STAT_READ_ERRS);
6581                         if (bio->bi_opf & REQ_PREFLUSH)
6582                                 btrfs_dev_stat_inc_and_print(dev,
6583                                                 BTRFS_DEV_STAT_FLUSH_ERRS);
6584                 }
6585         }
6586
6587         if (bio == bbio->orig_bio)
6588                 is_orig_bio = 1;
6589
6590         btrfs_bio_counter_dec(bbio->fs_info);
6591
6592         if (atomic_dec_and_test(&bbio->stripes_pending)) {
6593                 if (!is_orig_bio) {
6594                         bio_put(bio);
6595                         bio = bbio->orig_bio;
6596                 }
6597
6598                 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6599                 /* only send an error to the higher layers if it is
6600                  * beyond the tolerance of the btrfs bio
6601                  */
6602                 if (atomic_read(&bbio->error) > bbio->max_errors) {
6603                         bio->bi_status = BLK_STS_IOERR;
6604                 } else {
6605                         /*
6606                          * this bio is actually up to date, we didn't
6607                          * go over the max number of errors
6608                          */
6609                         bio->bi_status = BLK_STS_OK;
6610                 }
6611
6612                 btrfs_end_bbio(bbio, bio);
6613         } else if (!is_orig_bio) {
6614                 bio_put(bio);
6615         }
6616 }
6617
6618 static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
6619                               u64 physical, struct btrfs_device *dev)
6620 {
6621         struct btrfs_fs_info *fs_info = bbio->fs_info;
6622
6623         bio->bi_private = bbio;
6624         btrfs_io_bio(bio)->device = dev;
6625         bio->bi_end_io = btrfs_end_bio;
6626         bio->bi_iter.bi_sector = physical >> 9;
6627         /*
6628          * For zone append writing, bi_sector must point the beginning of the
6629          * zone
6630          */
6631         if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
6632                 if (btrfs_dev_is_sequential(dev, physical)) {
6633                         u64 zone_start = round_down(physical, fs_info->zone_size);
6634
6635                         bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT;
6636                 } else {
6637                         bio->bi_opf &= ~REQ_OP_ZONE_APPEND;
6638                         bio->bi_opf |= REQ_OP_WRITE;
6639                 }
6640         }
6641         btrfs_debug_in_rcu(fs_info,
6642         "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6643                 bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector,
6644                 (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name),
6645                 dev->devid, bio->bi_iter.bi_size);
6646         bio_set_dev(bio, dev->bdev);
6647
6648         btrfs_bio_counter_inc_noblocked(fs_info);
6649
6650         btrfsic_submit_bio(bio);
6651 }
6652
6653 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6654 {
6655         atomic_inc(&bbio->error);
6656         if (atomic_dec_and_test(&bbio->stripes_pending)) {
6657                 /* Should be the original bio. */
6658                 WARN_ON(bio != bbio->orig_bio);
6659
6660                 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6661                 bio->bi_iter.bi_sector = logical >> 9;
6662                 if (atomic_read(&bbio->error) > bbio->max_errors)
6663                         bio->bi_status = BLK_STS_IOERR;
6664                 else
6665                         bio->bi_status = BLK_STS_OK;
6666                 btrfs_end_bbio(bbio, bio);
6667         }
6668 }
6669
6670 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6671                            int mirror_num)
6672 {
6673         struct btrfs_device *dev;
6674         struct bio *first_bio = bio;
6675         u64 logical = bio->bi_iter.bi_sector << 9;
6676         u64 length = 0;
6677         u64 map_length;
6678         int ret;
6679         int dev_nr;
6680         int total_devs;
6681         struct btrfs_bio *bbio = NULL;
6682
6683         length = bio->bi_iter.bi_size;
6684         map_length = length;
6685
6686         btrfs_bio_counter_inc_blocked(fs_info);
6687         ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
6688                                 &map_length, &bbio, mirror_num, 1);
6689         if (ret) {
6690                 btrfs_bio_counter_dec(fs_info);
6691                 return errno_to_blk_status(ret);
6692         }
6693
6694         total_devs = bbio->num_stripes;
6695         bbio->orig_bio = first_bio;
6696         bbio->private = first_bio->bi_private;
6697         bbio->end_io = first_bio->bi_end_io;
6698         bbio->fs_info = fs_info;
6699         atomic_set(&bbio->stripes_pending, bbio->num_stripes);
6700
6701         if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6702             ((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) {
6703                 /* In this case, map_length has been set to the length of
6704                    a single stripe; not the whole write */
6705                 if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
6706                         ret = raid56_parity_write(fs_info, bio, bbio,
6707                                                   map_length);
6708                 } else {
6709                         ret = raid56_parity_recover(fs_info, bio, bbio,
6710                                                     map_length, mirror_num, 1);
6711                 }
6712
6713                 btrfs_bio_counter_dec(fs_info);
6714                 return errno_to_blk_status(ret);
6715         }
6716
6717         if (map_length < length) {
6718                 btrfs_crit(fs_info,
6719                            "mapping failed logical %llu bio len %llu len %llu",
6720                            logical, length, map_length);
6721                 BUG();
6722         }
6723
6724         for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6725                 dev = bbio->stripes[dev_nr].dev;
6726                 if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
6727                                                    &dev->dev_state) ||
6728                     (btrfs_op(first_bio) == BTRFS_MAP_WRITE &&
6729                     !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
6730                         bbio_error(bbio, first_bio, logical);
6731                         continue;
6732                 }
6733
6734                 if (dev_nr < total_devs - 1)
6735                         bio = btrfs_bio_clone(first_bio);
6736                 else
6737                         bio = first_bio;
6738
6739                 submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical, dev);
6740         }
6741         btrfs_bio_counter_dec(fs_info);
6742         return BLK_STS_OK;
6743 }
6744
6745 /*
6746  * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6747  * return NULL.
6748  *
6749  * If devid and uuid are both specified, the match must be exact, otherwise
6750  * only devid is used.
6751  */
6752 struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
6753                                        u64 devid, u8 *uuid, u8 *fsid)
6754 {
6755         struct btrfs_device *device;
6756         struct btrfs_fs_devices *seed_devs;
6757
6758         if (!fsid || !memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
6759                 list_for_each_entry(device, &fs_devices->devices, dev_list) {
6760                         if (device->devid == devid &&
6761                             (!uuid || memcmp(device->uuid, uuid,
6762                                              BTRFS_UUID_SIZE) == 0))
6763                                 return device;
6764                 }
6765         }
6766
6767         list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
6768                 if (!fsid ||
6769                     !memcmp(seed_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
6770                         list_for_each_entry(device, &seed_devs->devices,
6771                                             dev_list) {
6772                                 if (device->devid == devid &&
6773                                     (!uuid || memcmp(device->uuid, uuid,
6774                                                      BTRFS_UUID_SIZE) == 0))
6775                                         return device;
6776                         }
6777                 }
6778         }
6779
6780         return NULL;
6781 }
6782
6783 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6784                                             u64 devid, u8 *dev_uuid)
6785 {
6786         struct btrfs_device *device;
6787         unsigned int nofs_flag;
6788
6789         /*
6790          * We call this under the chunk_mutex, so we want to use NOFS for this
6791          * allocation, however we don't want to change btrfs_alloc_device() to
6792          * always do NOFS because we use it in a lot of other GFP_KERNEL safe
6793          * places.
6794          */
6795         nofs_flag = memalloc_nofs_save();
6796         device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6797         memalloc_nofs_restore(nofs_flag);
6798         if (IS_ERR(device))
6799                 return device;
6800
6801         list_add(&device->dev_list, &fs_devices->devices);
6802         device->fs_devices = fs_devices;
6803         fs_devices->num_devices++;
6804
6805         set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6806         fs_devices->missing_devices++;
6807
6808         return device;
6809 }
6810
6811 /**
6812  * btrfs_alloc_device - allocate struct btrfs_device
6813  * @fs_info:    used only for generating a new devid, can be NULL if
6814  *              devid is provided (i.e. @devid != NULL).
6815  * @devid:      a pointer to devid for this device.  If NULL a new devid
6816  *              is generated.
6817  * @uuid:       a pointer to UUID for this device.  If NULL a new UUID
6818  *              is generated.
6819  *
6820  * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6821  * on error.  Returned struct is not linked onto any lists and must be
6822  * destroyed with btrfs_free_device.
6823  */
6824 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6825                                         const u64 *devid,
6826                                         const u8 *uuid)
6827 {
6828         struct btrfs_device *dev;
6829         u64 tmp;
6830
6831         if (WARN_ON(!devid && !fs_info))
6832                 return ERR_PTR(-EINVAL);
6833
6834         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
6835         if (!dev)
6836                 return ERR_PTR(-ENOMEM);
6837
6838         /*
6839          * Preallocate a bio that's always going to be used for flushing device
6840          * barriers and matches the device lifespan
6841          */
6842         dev->flush_bio = bio_kmalloc(GFP_KERNEL, 0);
6843         if (!dev->flush_bio) {
6844                 kfree(dev);
6845                 return ERR_PTR(-ENOMEM);
6846         }
6847
6848         INIT_LIST_HEAD(&dev->dev_list);
6849         INIT_LIST_HEAD(&dev->dev_alloc_list);
6850         INIT_LIST_HEAD(&dev->post_commit_list);
6851
6852         atomic_set(&dev->reada_in_flight, 0);
6853         atomic_set(&dev->dev_stats_ccnt, 0);
6854         btrfs_device_data_ordered_init(dev);
6855         INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
6856         INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
6857         extent_io_tree_init(fs_info, &dev->alloc_state,
6858                             IO_TREE_DEVICE_ALLOC_STATE, NULL);
6859
6860         if (devid)
6861                 tmp = *devid;
6862         else {
6863                 int ret;
6864
6865                 ret = find_next_devid(fs_info, &tmp);
6866                 if (ret) {
6867                         btrfs_free_device(dev);
6868                         return ERR_PTR(ret);
6869                 }
6870         }
6871         dev->devid = tmp;
6872
6873         if (uuid)
6874                 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6875         else
6876                 generate_random_uuid(dev->uuid);
6877
6878         return dev;
6879 }
6880
6881 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
6882                                         u64 devid, u8 *uuid, bool error)
6883 {
6884         if (error)
6885                 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
6886                               devid, uuid);
6887         else
6888                 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
6889                               devid, uuid);
6890 }
6891
6892 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
6893 {
6894         const int data_stripes = calc_data_stripes(type, num_stripes);
6895
6896         return div_u64(chunk_len, data_stripes);
6897 }
6898
6899 #if BITS_PER_LONG == 32
6900 /*
6901  * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE
6902  * can't be accessed on 32bit systems.
6903  *
6904  * This function do mount time check to reject the fs if it already has
6905  * metadata chunk beyond that limit.
6906  */
6907 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
6908                                   u64 logical, u64 length, u64 type)
6909 {
6910         if (!(type & BTRFS_BLOCK_GROUP_METADATA))
6911                 return 0;
6912
6913         if (logical + length < MAX_LFS_FILESIZE)
6914                 return 0;
6915
6916         btrfs_err_32bit_limit(fs_info);
6917         return -EOVERFLOW;
6918 }
6919
6920 /*
6921  * This is to give early warning for any metadata chunk reaching
6922  * BTRFS_32BIT_EARLY_WARN_THRESHOLD.
6923  * Although we can still access the metadata, it's not going to be possible
6924  * once the limit is reached.
6925  */
6926 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
6927                                   u64 logical, u64 length, u64 type)
6928 {
6929         if (!(type & BTRFS_BLOCK_GROUP_METADATA))
6930                 return;
6931
6932         if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD)
6933                 return;
6934
6935         btrfs_warn_32bit_limit(fs_info);
6936 }
6937 #endif
6938
6939 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
6940                           struct btrfs_chunk *chunk)
6941 {
6942         struct btrfs_fs_info *fs_info = leaf->fs_info;
6943         struct extent_map_tree *map_tree = &fs_info->mapping_tree;
6944         struct map_lookup *map;
6945         struct extent_map *em;
6946         u64 logical;
6947         u64 length;
6948         u64 devid;
6949         u64 type;
6950         u8 uuid[BTRFS_UUID_SIZE];
6951         int num_stripes;
6952         int ret;
6953         int i;
6954
6955         logical = key->offset;
6956         length = btrfs_chunk_length(leaf, chunk);
6957         type = btrfs_chunk_type(leaf, chunk);
6958         num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6959
6960 #if BITS_PER_LONG == 32
6961         ret = check_32bit_meta_chunk(fs_info, logical, length, type);
6962         if (ret < 0)
6963                 return ret;
6964         warn_32bit_meta_chunk(fs_info, logical, length, type);
6965 #endif
6966
6967         /*
6968          * Only need to verify chunk item if we're reading from sys chunk array,
6969          * as chunk item in tree block is already verified by tree-checker.
6970          */
6971         if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
6972                 ret = btrfs_check_chunk_valid(leaf, chunk, logical);
6973                 if (ret)
6974                         return ret;
6975         }
6976
6977         read_lock(&map_tree->lock);
6978         em = lookup_extent_mapping(map_tree, logical, 1);
6979         read_unlock(&map_tree->lock);
6980
6981         /* already mapped? */
6982         if (em && em->start <= logical && em->start + em->len > logical) {
6983                 free_extent_map(em);
6984                 return 0;
6985         } else if (em) {
6986                 free_extent_map(em);
6987         }
6988
6989         em = alloc_extent_map();
6990         if (!em)
6991                 return -ENOMEM;
6992         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6993         if (!map) {
6994                 free_extent_map(em);
6995                 return -ENOMEM;
6996         }
6997
6998         set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6999         em->map_lookup = map;
7000         em->start = logical;
7001         em->len = length;
7002         em->orig_start = 0;
7003         em->block_start = 0;
7004         em->block_len = em->len;
7005
7006         map->num_stripes = num_stripes;
7007         map->io_width = btrfs_chunk_io_width(leaf, chunk);
7008         map->io_align = btrfs_chunk_io_align(leaf, chunk);
7009         map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
7010         map->type = type;
7011         map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
7012         map->verified_stripes = 0;
7013         em->orig_block_len = calc_stripe_length(type, em->len,
7014                                                 map->num_stripes);
7015         for (i = 0; i < num_stripes; i++) {
7016                 map->stripes[i].physical =
7017                         btrfs_stripe_offset_nr(leaf, chunk, i);
7018                 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
7019                 read_extent_buffer(leaf, uuid, (unsigned long)
7020                                    btrfs_stripe_dev_uuid_nr(chunk, i),
7021                                    BTRFS_UUID_SIZE);
7022                 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
7023                                                         devid, uuid, NULL);
7024                 if (!map->stripes[i].dev &&
7025                     !btrfs_test_opt(fs_info, DEGRADED)) {
7026                         free_extent_map(em);
7027                         btrfs_report_missing_device(fs_info, devid, uuid, true);
7028                         return -ENOENT;
7029                 }
7030                 if (!map->stripes[i].dev) {
7031                         map->stripes[i].dev =
7032                                 add_missing_dev(fs_info->fs_devices, devid,
7033                                                 uuid);
7034                         if (IS_ERR(map->stripes[i].dev)) {
7035                                 free_extent_map(em);
7036                                 btrfs_err(fs_info,
7037                                         "failed to init missing dev %llu: %ld",
7038                                         devid, PTR_ERR(map->stripes[i].dev));
7039                                 return PTR_ERR(map->stripes[i].dev);
7040                         }
7041                         btrfs_report_missing_device(fs_info, devid, uuid, false);
7042                 }
7043                 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
7044                                 &(map->stripes[i].dev->dev_state));
7045
7046         }
7047
7048         write_lock(&map_tree->lock);
7049         ret = add_extent_mapping(map_tree, em, 0);
7050         write_unlock(&map_tree->lock);
7051         if (ret < 0) {
7052                 btrfs_err(fs_info,
7053                           "failed to add chunk map, start=%llu len=%llu: %d",
7054                           em->start, em->len, ret);
7055         }
7056         free_extent_map(em);
7057
7058         return ret;
7059 }
7060
7061 static void fill_device_from_item(struct extent_buffer *leaf,
7062                                  struct btrfs_dev_item *dev_item,
7063                                  struct btrfs_device *device)
7064 {
7065         unsigned long ptr;
7066
7067         device->devid = btrfs_device_id(leaf, dev_item);
7068         device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
7069         device->total_bytes = device->disk_total_bytes;
7070         device->commit_total_bytes = device->disk_total_bytes;
7071         device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
7072         device->commit_bytes_used = device->bytes_used;
7073         device->type = btrfs_device_type(leaf, dev_item);
7074         device->io_align = btrfs_device_io_align(leaf, dev_item);
7075         device->io_width = btrfs_device_io_width(leaf, dev_item);
7076         device->sector_size = btrfs_device_sector_size(leaf, dev_item);
7077         WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
7078         clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
7079
7080         ptr = btrfs_device_uuid(dev_item);
7081         read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
7082 }
7083
7084 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
7085                                                   u8 *fsid)
7086 {
7087         struct btrfs_fs_devices *fs_devices;
7088         int ret;
7089
7090         lockdep_assert_held(&uuid_mutex);
7091         ASSERT(fsid);
7092
7093         /* This will match only for multi-device seed fs */
7094         list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list)
7095                 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
7096                         return fs_devices;
7097
7098
7099         fs_devices = find_fsid(fsid, NULL);
7100         if (!fs_devices) {
7101                 if (!btrfs_test_opt(fs_info, DEGRADED))
7102                         return ERR_PTR(-ENOENT);
7103
7104                 fs_devices = alloc_fs_devices(fsid, NULL);
7105                 if (IS_ERR(fs_devices))
7106                         return fs_devices;
7107
7108                 fs_devices->seeding = true;
7109                 fs_devices->opened = 1;
7110                 return fs_devices;
7111         }
7112
7113         /*
7114          * Upon first call for a seed fs fsid, just create a private copy of the
7115          * respective fs_devices and anchor it at fs_info->fs_devices->seed_list
7116          */
7117         fs_devices = clone_fs_devices(fs_devices);
7118         if (IS_ERR(fs_devices))
7119                 return fs_devices;
7120
7121         ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
7122         if (ret) {
7123                 free_fs_devices(fs_devices);
7124                 return ERR_PTR(ret);
7125         }
7126
7127         if (!fs_devices->seeding) {
7128                 close_fs_devices(fs_devices);
7129                 free_fs_devices(fs_devices);
7130                 return ERR_PTR(-EINVAL);
7131         }
7132
7133         list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list);
7134
7135         return fs_devices;
7136 }
7137
7138 static int read_one_dev(struct extent_buffer *leaf,
7139                         struct btrfs_dev_item *dev_item)
7140 {
7141         struct btrfs_fs_info *fs_info = leaf->fs_info;
7142         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7143         struct btrfs_device *device;
7144         u64 devid;
7145         int ret;
7146         u8 fs_uuid[BTRFS_FSID_SIZE];
7147         u8 dev_uuid[BTRFS_UUID_SIZE];
7148
7149         devid = btrfs_device_id(leaf, dev_item);
7150         read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
7151                            BTRFS_UUID_SIZE);
7152         read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
7153                            BTRFS_FSID_SIZE);
7154
7155         if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
7156                 fs_devices = open_seed_devices(fs_info, fs_uuid);
7157                 if (IS_ERR(fs_devices))
7158                         return PTR_ERR(fs_devices);
7159         }
7160
7161         device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
7162                                    fs_uuid);
7163         if (!device) {
7164                 if (!btrfs_test_opt(fs_info, DEGRADED)) {
7165                         btrfs_report_missing_device(fs_info, devid,
7166                                                         dev_uuid, true);
7167                         return -ENOENT;
7168                 }
7169
7170                 device = add_missing_dev(fs_devices, devid, dev_uuid);
7171                 if (IS_ERR(device)) {
7172                         btrfs_err(fs_info,
7173                                 "failed to add missing dev %llu: %ld",
7174                                 devid, PTR_ERR(device));
7175                         return PTR_ERR(device);
7176                 }
7177                 btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
7178         } else {
7179                 if (!device->bdev) {
7180                         if (!btrfs_test_opt(fs_info, DEGRADED)) {
7181                                 btrfs_report_missing_device(fs_info,
7182                                                 devid, dev_uuid, true);
7183                                 return -ENOENT;
7184                         }
7185                         btrfs_report_missing_device(fs_info, devid,
7186                                                         dev_uuid, false);
7187                 }
7188
7189                 if (!device->bdev &&
7190                     !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
7191                         /*
7192                          * this happens when a device that was properly setup
7193                          * in the device info lists suddenly goes bad.
7194                          * device->bdev is NULL, and so we have to set
7195                          * device->missing to one here
7196                          */
7197                         device->fs_devices->missing_devices++;
7198                         set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
7199                 }
7200
7201                 /* Move the device to its own fs_devices */
7202                 if (device->fs_devices != fs_devices) {
7203                         ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
7204                                                         &device->dev_state));
7205
7206                         list_move(&device->dev_list, &fs_devices->devices);
7207                         device->fs_devices->num_devices--;
7208                         fs_devices->num_devices++;
7209
7210                         device->fs_devices->missing_devices--;
7211                         fs_devices->missing_devices++;
7212
7213                         device->fs_devices = fs_devices;
7214                 }
7215         }
7216
7217         if (device->fs_devices != fs_info->fs_devices) {
7218                 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
7219                 if (device->generation !=
7220                     btrfs_device_generation(leaf, dev_item))
7221                         return -EINVAL;
7222         }
7223
7224         fill_device_from_item(leaf, dev_item, device);
7225         if (device->bdev) {
7226                 u64 max_total_bytes = i_size_read(device->bdev->bd_inode);
7227
7228                 if (device->total_bytes > max_total_bytes) {
7229                         btrfs_err(fs_info,
7230                         "device total_bytes should be at most %llu but found %llu",
7231                                   max_total_bytes, device->total_bytes);
7232                         return -EINVAL;
7233                 }
7234         }
7235         set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
7236         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
7237            !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
7238                 device->fs_devices->total_rw_bytes += device->total_bytes;
7239                 atomic64_add(device->total_bytes - device->bytes_used,
7240                                 &fs_info->free_chunk_space);
7241         }
7242         ret = 0;
7243         return ret;
7244 }
7245
7246 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
7247 {
7248         struct btrfs_root *root = fs_info->tree_root;
7249         struct btrfs_super_block *super_copy = fs_info->super_copy;
7250         struct extent_buffer *sb;
7251         struct btrfs_disk_key *disk_key;
7252         struct btrfs_chunk *chunk;
7253         u8 *array_ptr;
7254         unsigned long sb_array_offset;
7255         int ret = 0;
7256         u32 num_stripes;
7257         u32 array_size;
7258         u32 len = 0;
7259         u32 cur_offset;
7260         u64 type;
7261         struct btrfs_key key;
7262
7263         ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
7264         /*
7265          * This will create extent buffer of nodesize, superblock size is
7266          * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
7267          * overallocate but we can keep it as-is, only the first page is used.
7268          */
7269         sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET,
7270                                           root->root_key.objectid, 0);
7271         if (IS_ERR(sb))
7272                 return PTR_ERR(sb);
7273         set_extent_buffer_uptodate(sb);
7274         /*
7275          * The sb extent buffer is artificial and just used to read the system array.
7276          * set_extent_buffer_uptodate() call does not properly mark all it's
7277          * pages up-to-date when the page is larger: extent does not cover the
7278          * whole page and consequently check_page_uptodate does not find all
7279          * the page's extents up-to-date (the hole beyond sb),
7280          * write_extent_buffer then triggers a WARN_ON.
7281          *
7282          * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
7283          * but sb spans only this function. Add an explicit SetPageUptodate call
7284          * to silence the warning eg. on PowerPC 64.
7285          */
7286         if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
7287                 SetPageUptodate(sb->pages[0]);
7288
7289         write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
7290         array_size = btrfs_super_sys_array_size(super_copy);
7291
7292         array_ptr = super_copy->sys_chunk_array;
7293         sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
7294         cur_offset = 0;
7295
7296         while (cur_offset < array_size) {
7297                 disk_key = (struct btrfs_disk_key *)array_ptr;
7298                 len = sizeof(*disk_key);
7299                 if (cur_offset + len > array_size)
7300                         goto out_short_read;
7301
7302                 btrfs_disk_key_to_cpu(&key, disk_key);
7303
7304                 array_ptr += len;
7305                 sb_array_offset += len;
7306                 cur_offset += len;
7307
7308                 if (key.type != BTRFS_CHUNK_ITEM_KEY) {
7309                         btrfs_err(fs_info,
7310                             "unexpected item type %u in sys_array at offset %u",
7311                                   (u32)key.type, cur_offset);
7312                         ret = -EIO;
7313                         break;
7314                 }
7315
7316                 chunk = (struct btrfs_chunk *)sb_array_offset;
7317                 /*
7318                  * At least one btrfs_chunk with one stripe must be present,
7319                  * exact stripe count check comes afterwards
7320                  */
7321                 len = btrfs_chunk_item_size(1);
7322                 if (cur_offset + len > array_size)
7323                         goto out_short_read;
7324
7325                 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
7326                 if (!num_stripes) {
7327                         btrfs_err(fs_info,
7328                         "invalid number of stripes %u in sys_array at offset %u",
7329                                   num_stripes, cur_offset);
7330                         ret = -EIO;
7331                         break;
7332                 }
7333
7334                 type = btrfs_chunk_type(sb, chunk);
7335                 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
7336                         btrfs_err(fs_info,
7337                         "invalid chunk type %llu in sys_array at offset %u",
7338                                   type, cur_offset);
7339                         ret = -EIO;
7340                         break;
7341                 }
7342
7343                 len = btrfs_chunk_item_size(num_stripes);
7344                 if (cur_offset + len > array_size)
7345                         goto out_short_read;
7346
7347                 ret = read_one_chunk(&key, sb, chunk);
7348                 if (ret)
7349                         break;
7350
7351                 array_ptr += len;
7352                 sb_array_offset += len;
7353                 cur_offset += len;
7354         }
7355         clear_extent_buffer_uptodate(sb);
7356         free_extent_buffer_stale(sb);
7357         return ret;
7358
7359 out_short_read:
7360         btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
7361                         len, cur_offset);
7362         clear_extent_buffer_uptodate(sb);
7363         free_extent_buffer_stale(sb);
7364         return -EIO;
7365 }
7366
7367 /*
7368  * Check if all chunks in the fs are OK for read-write degraded mount
7369  *
7370  * If the @failing_dev is specified, it's accounted as missing.
7371  *
7372  * Return true if all chunks meet the minimal RW mount requirements.
7373  * Return false if any chunk doesn't meet the minimal RW mount requirements.
7374  */
7375 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
7376                                         struct btrfs_device *failing_dev)
7377 {
7378         struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7379         struct extent_map *em;
7380         u64 next_start = 0;
7381         bool ret = true;
7382
7383         read_lock(&map_tree->lock);
7384         em = lookup_extent_mapping(map_tree, 0, (u64)-1);
7385         read_unlock(&map_tree->lock);
7386         /* No chunk at all? Return false anyway */
7387         if (!em) {
7388                 ret = false;
7389                 goto out;
7390         }
7391         while (em) {
7392                 struct map_lookup *map;
7393                 int missing = 0;
7394                 int max_tolerated;
7395                 int i;
7396
7397                 map = em->map_lookup;
7398                 max_tolerated =
7399                         btrfs_get_num_tolerated_disk_barrier_failures(
7400                                         map->type);
7401                 for (i = 0; i < map->num_stripes; i++) {
7402                         struct btrfs_device *dev = map->stripes[i].dev;
7403
7404                         if (!dev || !dev->bdev ||
7405                             test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
7406                             dev->last_flush_error)
7407                                 missing++;
7408                         else if (failing_dev && failing_dev == dev)
7409                                 missing++;
7410                 }
7411                 if (missing > max_tolerated) {
7412                         if (!failing_dev)
7413                                 btrfs_warn(fs_info,
7414         "chunk %llu missing %d devices, max tolerance is %d for writable mount",
7415                                    em->start, missing, max_tolerated);
7416                         free_extent_map(em);
7417                         ret = false;
7418                         goto out;
7419                 }
7420                 next_start = extent_map_end(em);
7421                 free_extent_map(em);
7422
7423                 read_lock(&map_tree->lock);
7424                 em = lookup_extent_mapping(map_tree, next_start,
7425                                            (u64)(-1) - next_start);
7426                 read_unlock(&map_tree->lock);
7427         }
7428 out:
7429         return ret;
7430 }
7431
7432 static void readahead_tree_node_children(struct extent_buffer *node)
7433 {
7434         int i;
7435         const int nr_items = btrfs_header_nritems(node);
7436
7437         for (i = 0; i < nr_items; i++)
7438                 btrfs_readahead_node_child(node, i);
7439 }
7440
7441 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
7442 {
7443         struct btrfs_root *root = fs_info->chunk_root;
7444         struct btrfs_path *path;
7445         struct extent_buffer *leaf;
7446         struct btrfs_key key;
7447         struct btrfs_key found_key;
7448         int ret;
7449         int slot;
7450         u64 total_dev = 0;
7451         u64 last_ra_node = 0;
7452
7453         path = btrfs_alloc_path();
7454         if (!path)
7455                 return -ENOMEM;
7456
7457         /*
7458          * uuid_mutex is needed only if we are mounting a sprout FS
7459          * otherwise we don't need it.
7460          */
7461         mutex_lock(&uuid_mutex);
7462
7463         /*
7464          * It is possible for mount and umount to race in such a way that
7465          * we execute this code path, but open_fs_devices failed to clear
7466          * total_rw_bytes. We certainly want it cleared before reading the
7467          * device items, so clear it here.
7468          */
7469         fs_info->fs_devices->total_rw_bytes = 0;
7470
7471         /*
7472          * Read all device items, and then all the chunk items. All
7473          * device items are found before any chunk item (their object id
7474          * is smaller than the lowest possible object id for a chunk
7475          * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7476          */
7477         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
7478         key.offset = 0;
7479         key.type = 0;
7480         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7481         if (ret < 0)
7482                 goto error;
7483         while (1) {
7484                 struct extent_buffer *node;
7485
7486                 leaf = path->nodes[0];
7487                 slot = path->slots[0];
7488                 if (slot >= btrfs_header_nritems(leaf)) {
7489                         ret = btrfs_next_leaf(root, path);
7490                         if (ret == 0)
7491                                 continue;
7492                         if (ret < 0)
7493                                 goto error;
7494                         break;
7495                 }
7496                 /*
7497                  * The nodes on level 1 are not locked but we don't need to do
7498                  * that during mount time as nothing else can access the tree
7499                  */
7500                 node = path->nodes[1];
7501                 if (node) {
7502                         if (last_ra_node != node->start) {
7503                                 readahead_tree_node_children(node);
7504                                 last_ra_node = node->start;
7505                         }
7506                 }
7507                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7508                 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
7509                         struct btrfs_dev_item *dev_item;
7510                         dev_item = btrfs_item_ptr(leaf, slot,
7511                                                   struct btrfs_dev_item);
7512                         ret = read_one_dev(leaf, dev_item);
7513                         if (ret)
7514                                 goto error;
7515                         total_dev++;
7516                 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
7517                         struct btrfs_chunk *chunk;
7518
7519                         /*
7520                          * We are only called at mount time, so no need to take
7521                          * fs_info->chunk_mutex. Plus, to avoid lockdep warnings,
7522                          * we always lock first fs_info->chunk_mutex before
7523                          * acquiring any locks on the chunk tree. This is a
7524                          * requirement for chunk allocation, see the comment on
7525                          * top of btrfs_chunk_alloc() for details.
7526                          */
7527                         ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
7528                         chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
7529                         ret = read_one_chunk(&found_key, leaf, chunk);
7530                         if (ret)
7531                                 goto error;
7532                 }
7533                 path->slots[0]++;
7534         }
7535
7536         /*
7537          * After loading chunk tree, we've got all device information,
7538          * do another round of validation checks.
7539          */
7540         if (total_dev != fs_info->fs_devices->total_devices) {
7541                 btrfs_err(fs_info,
7542            "super_num_devices %llu mismatch with num_devices %llu found here",
7543                           btrfs_super_num_devices(fs_info->super_copy),
7544                           total_dev);
7545                 ret = -EINVAL;
7546                 goto error;
7547         }
7548         if (btrfs_super_total_bytes(fs_info->super_copy) <
7549             fs_info->fs_devices->total_rw_bytes) {
7550                 btrfs_err(fs_info,
7551         "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7552                           btrfs_super_total_bytes(fs_info->super_copy),
7553                           fs_info->fs_devices->total_rw_bytes);
7554                 ret = -EINVAL;
7555                 goto error;
7556         }
7557         ret = 0;
7558 error:
7559         mutex_unlock(&uuid_mutex);
7560
7561         btrfs_free_path(path);
7562         return ret;
7563 }
7564
7565 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
7566 {
7567         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7568         struct btrfs_device *device;
7569
7570         fs_devices->fs_info = fs_info;
7571
7572         mutex_lock(&fs_devices->device_list_mutex);
7573         list_for_each_entry(device, &fs_devices->devices, dev_list)
7574                 device->fs_info = fs_info;
7575
7576         list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7577                 list_for_each_entry(device, &seed_devs->devices, dev_list)
7578                         device->fs_info = fs_info;
7579
7580                 seed_devs->fs_info = fs_info;
7581         }
7582         mutex_unlock(&fs_devices->device_list_mutex);
7583 }
7584
7585 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
7586                                  const struct btrfs_dev_stats_item *ptr,
7587                                  int index)
7588 {
7589         u64 val;
7590
7591         read_extent_buffer(eb, &val,
7592                            offsetof(struct btrfs_dev_stats_item, values) +
7593                             ((unsigned long)ptr) + (index * sizeof(u64)),
7594                            sizeof(val));
7595         return val;
7596 }
7597
7598 static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
7599                                       struct btrfs_dev_stats_item *ptr,
7600                                       int index, u64 val)
7601 {
7602         write_extent_buffer(eb, &val,
7603                             offsetof(struct btrfs_dev_stats_item, values) +
7604                              ((unsigned long)ptr) + (index * sizeof(u64)),
7605                             sizeof(val));
7606 }
7607
7608 static int btrfs_device_init_dev_stats(struct btrfs_device *device,
7609                                        struct btrfs_path *path)
7610 {
7611         struct btrfs_dev_stats_item *ptr;
7612         struct extent_buffer *eb;
7613         struct btrfs_key key;
7614         int item_size;
7615         int i, ret, slot;
7616
7617         if (!device->fs_info->dev_root)
7618                 return 0;
7619
7620         key.objectid = BTRFS_DEV_STATS_OBJECTID;
7621         key.type = BTRFS_PERSISTENT_ITEM_KEY;
7622         key.offset = device->devid;
7623         ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0);
7624         if (ret) {
7625                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7626                         btrfs_dev_stat_set(device, i, 0);
7627                 device->dev_stats_valid = 1;
7628                 btrfs_release_path(path);
7629                 return ret < 0 ? ret : 0;
7630         }
7631         slot = path->slots[0];
7632         eb = path->nodes[0];
7633         item_size = btrfs_item_size_nr(eb, slot);
7634
7635         ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item);
7636
7637         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7638                 if (item_size >= (1 + i) * sizeof(__le64))
7639                         btrfs_dev_stat_set(device, i,
7640                                            btrfs_dev_stats_value(eb, ptr, i));
7641                 else
7642                         btrfs_dev_stat_set(device, i, 0);
7643         }
7644
7645         device->dev_stats_valid = 1;
7646         btrfs_dev_stat_print_on_load(device);
7647         btrfs_release_path(path);
7648
7649         return 0;
7650 }
7651
7652 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
7653 {
7654         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7655         struct btrfs_device *device;
7656         struct btrfs_path *path = NULL;
7657         int ret = 0;
7658
7659         path = btrfs_alloc_path();
7660         if (!path)
7661                 return -ENOMEM;
7662
7663         mutex_lock(&fs_devices->device_list_mutex);
7664         list_for_each_entry(device, &fs_devices->devices, dev_list) {
7665                 ret = btrfs_device_init_dev_stats(device, path);
7666                 if (ret)
7667                         goto out;
7668         }
7669         list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7670                 list_for_each_entry(device, &seed_devs->devices, dev_list) {
7671                         ret = btrfs_device_init_dev_stats(device, path);
7672                         if (ret)
7673                                 goto out;
7674                 }
7675         }
7676 out:
7677         mutex_unlock(&fs_devices->device_list_mutex);
7678
7679         btrfs_free_path(path);
7680         return ret;
7681 }
7682
7683 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7684                                 struct btrfs_device *device)
7685 {
7686         struct btrfs_fs_info *fs_info = trans->fs_info;
7687         struct btrfs_root *dev_root = fs_info->dev_root;
7688         struct btrfs_path *path;
7689         struct btrfs_key key;
7690         struct extent_buffer *eb;
7691         struct btrfs_dev_stats_item *ptr;
7692         int ret;
7693         int i;
7694
7695         key.objectid = BTRFS_DEV_STATS_OBJECTID;
7696         key.type = BTRFS_PERSISTENT_ITEM_KEY;
7697         key.offset = device->devid;
7698
7699         path = btrfs_alloc_path();
7700         if (!path)
7701                 return -ENOMEM;
7702         ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7703         if (ret < 0) {
7704                 btrfs_warn_in_rcu(fs_info,
7705                         "error %d while searching for dev_stats item for device %s",
7706                               ret, rcu_str_deref(device->name));
7707                 goto out;
7708         }
7709
7710         if (ret == 0 &&
7711             btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7712                 /* need to delete old one and insert a new one */
7713                 ret = btrfs_del_item(trans, dev_root, path);
7714                 if (ret != 0) {
7715                         btrfs_warn_in_rcu(fs_info,
7716                                 "delete too small dev_stats item for device %s failed %d",
7717                                       rcu_str_deref(device->name), ret);
7718                         goto out;
7719                 }
7720                 ret = 1;
7721         }
7722
7723         if (ret == 1) {
7724                 /* need to insert a new item */
7725                 btrfs_release_path(path);
7726                 ret = btrfs_insert_empty_item(trans, dev_root, path,
7727                                               &key, sizeof(*ptr));
7728                 if (ret < 0) {
7729                         btrfs_warn_in_rcu(fs_info,
7730                                 "insert dev_stats item for device %s failed %d",
7731                                 rcu_str_deref(device->name), ret);
7732                         goto out;
7733                 }
7734         }
7735
7736         eb = path->nodes[0];
7737         ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7738         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7739                 btrfs_set_dev_stats_value(eb, ptr, i,
7740                                           btrfs_dev_stat_read(device, i));
7741         btrfs_mark_buffer_dirty(eb);
7742
7743 out:
7744         btrfs_free_path(path);
7745         return ret;
7746 }
7747
7748 /*
7749  * called from commit_transaction. Writes all changed device stats to disk.
7750  */
7751 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
7752 {
7753         struct btrfs_fs_info *fs_info = trans->fs_info;
7754         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7755         struct btrfs_device *device;
7756         int stats_cnt;
7757         int ret = 0;
7758
7759         mutex_lock(&fs_devices->device_list_mutex);
7760         list_for_each_entry(device, &fs_devices->devices, dev_list) {
7761                 stats_cnt = atomic_read(&device->dev_stats_ccnt);
7762                 if (!device->dev_stats_valid || stats_cnt == 0)
7763                         continue;
7764
7765
7766                 /*
7767                  * There is a LOAD-LOAD control dependency between the value of
7768                  * dev_stats_ccnt and updating the on-disk values which requires
7769                  * reading the in-memory counters. Such control dependencies
7770                  * require explicit read memory barriers.
7771                  *
7772                  * This memory barriers pairs with smp_mb__before_atomic in
7773                  * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7774                  * barrier implied by atomic_xchg in
7775                  * btrfs_dev_stats_read_and_reset
7776                  */
7777                 smp_rmb();
7778
7779                 ret = update_dev_stat_item(trans, device);
7780                 if (!ret)
7781                         atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7782         }
7783         mutex_unlock(&fs_devices->device_list_mutex);
7784
7785         return ret;
7786 }
7787
7788 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7789 {
7790         btrfs_dev_stat_inc(dev, index);
7791         btrfs_dev_stat_print_on_error(dev);
7792 }
7793
7794 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7795 {
7796         if (!dev->dev_stats_valid)
7797                 return;
7798         btrfs_err_rl_in_rcu(dev->fs_info,
7799                 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7800                            rcu_str_deref(dev->name),
7801                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7802                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7803                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7804                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7805                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7806 }
7807
7808 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7809 {
7810         int i;
7811
7812         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7813                 if (btrfs_dev_stat_read(dev, i) != 0)
7814                         break;
7815         if (i == BTRFS_DEV_STAT_VALUES_MAX)
7816                 return; /* all values == 0, suppress message */
7817
7818         btrfs_info_in_rcu(dev->fs_info,
7819                 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7820                rcu_str_deref(dev->name),
7821                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7822                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7823                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7824                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7825                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7826 }
7827
7828 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7829                         struct btrfs_ioctl_get_dev_stats *stats)
7830 {
7831         struct btrfs_device *dev;
7832         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7833         int i;
7834
7835         mutex_lock(&fs_devices->device_list_mutex);
7836         dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL);
7837         mutex_unlock(&fs_devices->device_list_mutex);
7838
7839         if (!dev) {
7840                 btrfs_warn(fs_info, "get dev_stats failed, device not found");
7841                 return -ENODEV;
7842         } else if (!dev->dev_stats_valid) {
7843                 btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7844                 return -ENODEV;
7845         } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7846                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7847                         if (stats->nr_items > i)
7848                                 stats->values[i] =
7849                                         btrfs_dev_stat_read_and_reset(dev, i);
7850                         else
7851                                 btrfs_dev_stat_set(dev, i, 0);
7852                 }
7853                 btrfs_info(fs_info, "device stats zeroed by %s (%d)",
7854                            current->comm, task_pid_nr(current));
7855         } else {
7856                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7857                         if (stats->nr_items > i)
7858                                 stats->values[i] = btrfs_dev_stat_read(dev, i);
7859         }
7860         if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7861                 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7862         return 0;
7863 }
7864
7865 /*
7866  * Update the size and bytes used for each device where it changed.  This is
7867  * delayed since we would otherwise get errors while writing out the
7868  * superblocks.
7869  *
7870  * Must be invoked during transaction commit.
7871  */
7872 void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
7873 {
7874         struct btrfs_device *curr, *next;
7875
7876         ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
7877
7878         if (list_empty(&trans->dev_update_list))
7879                 return;
7880
7881         /*
7882          * We don't need the device_list_mutex here.  This list is owned by the
7883          * transaction and the transaction must complete before the device is
7884          * released.
7885          */
7886         mutex_lock(&trans->fs_info->chunk_mutex);
7887         list_for_each_entry_safe(curr, next, &trans->dev_update_list,
7888                                  post_commit_list) {
7889                 list_del_init(&curr->post_commit_list);
7890                 curr->commit_total_bytes = curr->disk_total_bytes;
7891                 curr->commit_bytes_used = curr->bytes_used;
7892         }
7893         mutex_unlock(&trans->fs_info->chunk_mutex);
7894 }
7895
7896 /*
7897  * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7898  */
7899 int btrfs_bg_type_to_factor(u64 flags)
7900 {
7901         const int index = btrfs_bg_flags_to_raid_index(flags);
7902
7903         return btrfs_raid_array[index].ncopies;
7904 }
7905
7906
7907
7908 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
7909                                  u64 chunk_offset, u64 devid,
7910                                  u64 physical_offset, u64 physical_len)
7911 {
7912         struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7913         struct extent_map *em;
7914         struct map_lookup *map;
7915         struct btrfs_device *dev;
7916         u64 stripe_len;
7917         bool found = false;
7918         int ret = 0;
7919         int i;
7920
7921         read_lock(&em_tree->lock);
7922         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
7923         read_unlock(&em_tree->lock);
7924
7925         if (!em) {
7926                 btrfs_err(fs_info,
7927 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7928                           physical_offset, devid);
7929                 ret = -EUCLEAN;
7930                 goto out;
7931         }
7932
7933         map = em->map_lookup;
7934         stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
7935         if (physical_len != stripe_len) {
7936                 btrfs_err(fs_info,
7937 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7938                           physical_offset, devid, em->start, physical_len,
7939                           stripe_len);
7940                 ret = -EUCLEAN;
7941                 goto out;
7942         }
7943
7944         for (i = 0; i < map->num_stripes; i++) {
7945                 if (map->stripes[i].dev->devid == devid &&
7946                     map->stripes[i].physical == physical_offset) {
7947                         found = true;
7948                         if (map->verified_stripes >= map->num_stripes) {
7949                                 btrfs_err(fs_info,
7950                                 "too many dev extents for chunk %llu found",
7951                                           em->start);
7952                                 ret = -EUCLEAN;
7953                                 goto out;
7954                         }
7955                         map->verified_stripes++;
7956                         break;
7957                 }
7958         }
7959         if (!found) {
7960                 btrfs_err(fs_info,
7961         "dev extent physical offset %llu devid %llu has no corresponding chunk",
7962                         physical_offset, devid);
7963                 ret = -EUCLEAN;
7964         }
7965
7966         /* Make sure no dev extent is beyond device boundary */
7967         dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL);
7968         if (!dev) {
7969                 btrfs_err(fs_info, "failed to find devid %llu", devid);
7970                 ret = -EUCLEAN;
7971                 goto out;
7972         }
7973
7974         if (physical_offset + physical_len > dev->disk_total_bytes) {
7975                 btrfs_err(fs_info,
7976 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
7977                           devid, physical_offset, physical_len,
7978                           dev->disk_total_bytes);
7979                 ret = -EUCLEAN;
7980                 goto out;
7981         }
7982
7983         if (dev->zone_info) {
7984                 u64 zone_size = dev->zone_info->zone_size;
7985
7986                 if (!IS_ALIGNED(physical_offset, zone_size) ||
7987                     !IS_ALIGNED(physical_len, zone_size)) {
7988                         btrfs_err(fs_info,
7989 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone",
7990                                   devid, physical_offset, physical_len);
7991                         ret = -EUCLEAN;
7992                         goto out;
7993                 }
7994         }
7995
7996 out:
7997         free_extent_map(em);
7998         return ret;
7999 }
8000
8001 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
8002 {
8003         struct extent_map_tree *em_tree = &fs_info->mapping_tree;
8004         struct extent_map *em;
8005         struct rb_node *node;
8006         int ret = 0;
8007
8008         read_lock(&em_tree->lock);
8009         for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
8010                 em = rb_entry(node, struct extent_map, rb_node);
8011                 if (em->map_lookup->num_stripes !=
8012                     em->map_lookup->verified_stripes) {
8013                         btrfs_err(fs_info,
8014                         "chunk %llu has missing dev extent, have %d expect %d",
8015                                   em->start, em->map_lookup->verified_stripes,
8016                                   em->map_lookup->num_stripes);
8017                         ret = -EUCLEAN;
8018                         goto out;
8019                 }
8020         }
8021 out:
8022         read_unlock(&em_tree->lock);
8023         return ret;
8024 }
8025
8026 /*
8027  * Ensure that all dev extents are mapped to correct chunk, otherwise
8028  * later chunk allocation/free would cause unexpected behavior.
8029  *
8030  * NOTE: This will iterate through the whole device tree, which should be of
8031  * the same size level as the chunk tree.  This slightly increases mount time.
8032  */
8033 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
8034 {
8035         struct btrfs_path *path;
8036         struct btrfs_root *root = fs_info->dev_root;
8037         struct btrfs_key key;
8038         u64 prev_devid = 0;
8039         u64 prev_dev_ext_end = 0;
8040         int ret = 0;
8041
8042         /*
8043          * We don't have a dev_root because we mounted with ignorebadroots and
8044          * failed to load the root, so we want to skip the verification in this
8045          * case for sure.
8046          *
8047          * However if the dev root is fine, but the tree itself is corrupted
8048          * we'd still fail to mount.  This verification is only to make sure
8049          * writes can happen safely, so instead just bypass this check
8050          * completely in the case of IGNOREBADROOTS.
8051          */
8052         if (btrfs_test_opt(fs_info, IGNOREBADROOTS))
8053                 return 0;
8054
8055         key.objectid = 1;
8056         key.type = BTRFS_DEV_EXTENT_KEY;
8057         key.offset = 0;
8058
8059         path = btrfs_alloc_path();
8060         if (!path)
8061                 return -ENOMEM;
8062
8063         path->reada = READA_FORWARD;
8064         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8065         if (ret < 0)
8066                 goto out;
8067
8068         if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
8069                 ret = btrfs_next_leaf(root, path);
8070                 if (ret < 0)
8071                         goto out;
8072                 /* No dev extents at all? Not good */
8073                 if (ret > 0) {
8074                         ret = -EUCLEAN;
8075                         goto out;
8076                 }
8077         }
8078         while (1) {
8079                 struct extent_buffer *leaf = path->nodes[0];
8080                 struct btrfs_dev_extent *dext;
8081                 int slot = path->slots[0];
8082                 u64 chunk_offset;
8083                 u64 physical_offset;
8084                 u64 physical_len;
8085                 u64 devid;
8086
8087                 btrfs_item_key_to_cpu(leaf, &key, slot);
8088                 if (key.type != BTRFS_DEV_EXTENT_KEY)
8089                         break;
8090                 devid = key.objectid;
8091                 physical_offset = key.offset;
8092
8093                 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
8094                 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
8095                 physical_len = btrfs_dev_extent_length(leaf, dext);
8096
8097                 /* Check if this dev extent overlaps with the previous one */
8098                 if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
8099                         btrfs_err(fs_info,
8100 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
8101                                   devid, physical_offset, prev_dev_ext_end);
8102                         ret = -EUCLEAN;
8103                         goto out;
8104                 }
8105
8106                 ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
8107                                             physical_offset, physical_len);
8108                 if (ret < 0)
8109                         goto out;
8110                 prev_devid = devid;
8111                 prev_dev_ext_end = physical_offset + physical_len;
8112
8113                 ret = btrfs_next_item(root, path);
8114                 if (ret < 0)
8115                         goto out;
8116                 if (ret > 0) {
8117                         ret = 0;
8118                         break;
8119                 }
8120         }
8121
8122         /* Ensure all chunks have corresponding dev extents */
8123         ret = verify_chunk_dev_extent_mapping(fs_info);
8124 out:
8125         btrfs_free_path(path);
8126         return ret;
8127 }
8128
8129 /*
8130  * Check whether the given block group or device is pinned by any inode being
8131  * used as a swapfile.
8132  */
8133 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
8134 {
8135         struct btrfs_swapfile_pin *sp;
8136         struct rb_node *node;
8137
8138         spin_lock(&fs_info->swapfile_pins_lock);
8139         node = fs_info->swapfile_pins.rb_node;
8140         while (node) {
8141                 sp = rb_entry(node, struct btrfs_swapfile_pin, node);
8142                 if (ptr < sp->ptr)
8143                         node = node->rb_left;
8144                 else if (ptr > sp->ptr)
8145                         node = node->rb_right;
8146                 else
8147                         break;
8148         }
8149         spin_unlock(&fs_info->swapfile_pins_lock);
8150         return node != NULL;
8151 }
8152
8153 static int relocating_repair_kthread(void *data)
8154 {
8155         struct btrfs_block_group *cache = (struct btrfs_block_group *)data;
8156         struct btrfs_fs_info *fs_info = cache->fs_info;
8157         u64 target;
8158         int ret = 0;
8159
8160         target = cache->start;
8161         btrfs_put_block_group(cache);
8162
8163         if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
8164                 btrfs_info(fs_info,
8165                            "zoned: skip relocating block group %llu to repair: EBUSY",
8166                            target);
8167                 return -EBUSY;
8168         }
8169
8170         mutex_lock(&fs_info->reclaim_bgs_lock);
8171
8172         /* Ensure block group still exists */
8173         cache = btrfs_lookup_block_group(fs_info, target);
8174         if (!cache)
8175                 goto out;
8176
8177         if (!cache->relocating_repair)
8178                 goto out;
8179
8180         ret = btrfs_may_alloc_data_chunk(fs_info, target);
8181         if (ret < 0)
8182                 goto out;
8183
8184         btrfs_info(fs_info,
8185                    "zoned: relocating block group %llu to repair IO failure",
8186                    target);
8187         ret = btrfs_relocate_chunk(fs_info, target);
8188
8189 out:
8190         if (cache)
8191                 btrfs_put_block_group(cache);
8192         mutex_unlock(&fs_info->reclaim_bgs_lock);
8193         btrfs_exclop_finish(fs_info);
8194
8195         return ret;
8196 }
8197
8198 int btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical)
8199 {
8200         struct btrfs_block_group *cache;
8201
8202         /* Do not attempt to repair in degraded state */
8203         if (btrfs_test_opt(fs_info, DEGRADED))
8204                 return 0;
8205
8206         cache = btrfs_lookup_block_group(fs_info, logical);
8207         if (!cache)
8208                 return 0;
8209
8210         spin_lock(&cache->lock);
8211         if (cache->relocating_repair) {
8212                 spin_unlock(&cache->lock);
8213                 btrfs_put_block_group(cache);
8214                 return 0;
8215         }
8216         cache->relocating_repair = 1;
8217         spin_unlock(&cache->lock);
8218
8219         kthread_run(relocating_repair_kthread, cache,
8220                     "btrfs-relocating-repair");
8221
8222         return 0;
8223 }