Merge tag 'libnvdimm-for-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdim...
[linux-2.6-microblaze.git] / fs / btrfs / volumes.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5
6 #include <linux/sched.h>
7 #include <linux/sched/mm.h>
8 #include <linux/bio.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/ratelimit.h>
12 #include <linux/kthread.h>
13 #include <linux/raid/pq.h>
14 #include <linux/semaphore.h>
15 #include <linux/uuid.h>
16 #include <linux/list_sort.h>
17 #include "misc.h"
18 #include "ctree.h"
19 #include "extent_map.h"
20 #include "disk-io.h"
21 #include "transaction.h"
22 #include "print-tree.h"
23 #include "volumes.h"
24 #include "raid56.h"
25 #include "async-thread.h"
26 #include "check-integrity.h"
27 #include "rcu-string.h"
28 #include "dev-replace.h"
29 #include "sysfs.h"
30 #include "tree-checker.h"
31 #include "space-info.h"
32 #include "block-group.h"
33 #include "discard.h"
34 #include "zoned.h"
35
36 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
37         [BTRFS_RAID_RAID10] = {
38                 .sub_stripes    = 2,
39                 .dev_stripes    = 1,
40                 .devs_max       = 0,    /* 0 == as many as possible */
41                 .devs_min       = 2,
42                 .tolerated_failures = 1,
43                 .devs_increment = 2,
44                 .ncopies        = 2,
45                 .nparity        = 0,
46                 .raid_name      = "raid10",
47                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID10,
48                 .mindev_error   = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
49         },
50         [BTRFS_RAID_RAID1] = {
51                 .sub_stripes    = 1,
52                 .dev_stripes    = 1,
53                 .devs_max       = 2,
54                 .devs_min       = 2,
55                 .tolerated_failures = 1,
56                 .devs_increment = 2,
57                 .ncopies        = 2,
58                 .nparity        = 0,
59                 .raid_name      = "raid1",
60                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID1,
61                 .mindev_error   = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
62         },
63         [BTRFS_RAID_RAID1C3] = {
64                 .sub_stripes    = 1,
65                 .dev_stripes    = 1,
66                 .devs_max       = 3,
67                 .devs_min       = 3,
68                 .tolerated_failures = 2,
69                 .devs_increment = 3,
70                 .ncopies        = 3,
71                 .nparity        = 0,
72                 .raid_name      = "raid1c3",
73                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID1C3,
74                 .mindev_error   = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
75         },
76         [BTRFS_RAID_RAID1C4] = {
77                 .sub_stripes    = 1,
78                 .dev_stripes    = 1,
79                 .devs_max       = 4,
80                 .devs_min       = 4,
81                 .tolerated_failures = 3,
82                 .devs_increment = 4,
83                 .ncopies        = 4,
84                 .nparity        = 0,
85                 .raid_name      = "raid1c4",
86                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID1C4,
87                 .mindev_error   = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
88         },
89         [BTRFS_RAID_DUP] = {
90                 .sub_stripes    = 1,
91                 .dev_stripes    = 2,
92                 .devs_max       = 1,
93                 .devs_min       = 1,
94                 .tolerated_failures = 0,
95                 .devs_increment = 1,
96                 .ncopies        = 2,
97                 .nparity        = 0,
98                 .raid_name      = "dup",
99                 .bg_flag        = BTRFS_BLOCK_GROUP_DUP,
100                 .mindev_error   = 0,
101         },
102         [BTRFS_RAID_RAID0] = {
103                 .sub_stripes    = 1,
104                 .dev_stripes    = 1,
105                 .devs_max       = 0,
106                 .devs_min       = 1,
107                 .tolerated_failures = 0,
108                 .devs_increment = 1,
109                 .ncopies        = 1,
110                 .nparity        = 0,
111                 .raid_name      = "raid0",
112                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID0,
113                 .mindev_error   = 0,
114         },
115         [BTRFS_RAID_SINGLE] = {
116                 .sub_stripes    = 1,
117                 .dev_stripes    = 1,
118                 .devs_max       = 1,
119                 .devs_min       = 1,
120                 .tolerated_failures = 0,
121                 .devs_increment = 1,
122                 .ncopies        = 1,
123                 .nparity        = 0,
124                 .raid_name      = "single",
125                 .bg_flag        = 0,
126                 .mindev_error   = 0,
127         },
128         [BTRFS_RAID_RAID5] = {
129                 .sub_stripes    = 1,
130                 .dev_stripes    = 1,
131                 .devs_max       = 0,
132                 .devs_min       = 2,
133                 .tolerated_failures = 1,
134                 .devs_increment = 1,
135                 .ncopies        = 1,
136                 .nparity        = 1,
137                 .raid_name      = "raid5",
138                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID5,
139                 .mindev_error   = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
140         },
141         [BTRFS_RAID_RAID6] = {
142                 .sub_stripes    = 1,
143                 .dev_stripes    = 1,
144                 .devs_max       = 0,
145                 .devs_min       = 3,
146                 .tolerated_failures = 2,
147                 .devs_increment = 1,
148                 .ncopies        = 1,
149                 .nparity        = 2,
150                 .raid_name      = "raid6",
151                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID6,
152                 .mindev_error   = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
153         },
154 };
155
156 /*
157  * Convert block group flags (BTRFS_BLOCK_GROUP_*) to btrfs_raid_types, which
158  * can be used as index to access btrfs_raid_array[].
159  */
160 enum btrfs_raid_types __attribute_const__ btrfs_bg_flags_to_raid_index(u64 flags)
161 {
162         if (flags & BTRFS_BLOCK_GROUP_RAID10)
163                 return BTRFS_RAID_RAID10;
164         else if (flags & BTRFS_BLOCK_GROUP_RAID1)
165                 return BTRFS_RAID_RAID1;
166         else if (flags & BTRFS_BLOCK_GROUP_RAID1C3)
167                 return BTRFS_RAID_RAID1C3;
168         else if (flags & BTRFS_BLOCK_GROUP_RAID1C4)
169                 return BTRFS_RAID_RAID1C4;
170         else if (flags & BTRFS_BLOCK_GROUP_DUP)
171                 return BTRFS_RAID_DUP;
172         else if (flags & BTRFS_BLOCK_GROUP_RAID0)
173                 return BTRFS_RAID_RAID0;
174         else if (flags & BTRFS_BLOCK_GROUP_RAID5)
175                 return BTRFS_RAID_RAID5;
176         else if (flags & BTRFS_BLOCK_GROUP_RAID6)
177                 return BTRFS_RAID_RAID6;
178
179         return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
180 }
181
182 const char *btrfs_bg_type_to_raid_name(u64 flags)
183 {
184         const int index = btrfs_bg_flags_to_raid_index(flags);
185
186         if (index >= BTRFS_NR_RAID_TYPES)
187                 return NULL;
188
189         return btrfs_raid_array[index].raid_name;
190 }
191
192 /*
193  * Fill @buf with textual description of @bg_flags, no more than @size_buf
194  * bytes including terminating null byte.
195  */
196 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
197 {
198         int i;
199         int ret;
200         char *bp = buf;
201         u64 flags = bg_flags;
202         u32 size_bp = size_buf;
203
204         if (!flags) {
205                 strcpy(bp, "NONE");
206                 return;
207         }
208
209 #define DESCRIBE_FLAG(flag, desc)                                               \
210         do {                                                            \
211                 if (flags & (flag)) {                                   \
212                         ret = snprintf(bp, size_bp, "%s|", (desc));     \
213                         if (ret < 0 || ret >= size_bp)                  \
214                                 goto out_overflow;                      \
215                         size_bp -= ret;                                 \
216                         bp += ret;                                      \
217                         flags &= ~(flag);                               \
218                 }                                                       \
219         } while (0)
220
221         DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
222         DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
223         DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
224
225         DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
226         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
227                 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
228                               btrfs_raid_array[i].raid_name);
229 #undef DESCRIBE_FLAG
230
231         if (flags) {
232                 ret = snprintf(bp, size_bp, "0x%llx|", flags);
233                 size_bp -= ret;
234         }
235
236         if (size_bp < size_buf)
237                 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
238
239         /*
240          * The text is trimmed, it's up to the caller to provide sufficiently
241          * large buffer
242          */
243 out_overflow:;
244 }
245
246 static int init_first_rw_device(struct btrfs_trans_handle *trans);
247 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
248 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
249 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
250 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
251                              enum btrfs_map_op op,
252                              u64 logical, u64 *length,
253                              struct btrfs_bio **bbio_ret,
254                              int mirror_num, int need_raid_map);
255
256 /*
257  * Device locking
258  * ==============
259  *
260  * There are several mutexes that protect manipulation of devices and low-level
261  * structures like chunks but not block groups, extents or files
262  *
263  * uuid_mutex (global lock)
264  * ------------------------
265  * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
266  * the SCAN_DEV ioctl registration or from mount either implicitly (the first
267  * device) or requested by the device= mount option
268  *
269  * the mutex can be very coarse and can cover long-running operations
270  *
271  * protects: updates to fs_devices counters like missing devices, rw devices,
272  * seeding, structure cloning, opening/closing devices at mount/umount time
273  *
274  * global::fs_devs - add, remove, updates to the global list
275  *
276  * does not protect: manipulation of the fs_devices::devices list in general
277  * but in mount context it could be used to exclude list modifications by eg.
278  * scan ioctl
279  *
280  * btrfs_device::name - renames (write side), read is RCU
281  *
282  * fs_devices::device_list_mutex (per-fs, with RCU)
283  * ------------------------------------------------
284  * protects updates to fs_devices::devices, ie. adding and deleting
285  *
286  * simple list traversal with read-only actions can be done with RCU protection
287  *
288  * may be used to exclude some operations from running concurrently without any
289  * modifications to the list (see write_all_supers)
290  *
291  * Is not required at mount and close times, because our device list is
292  * protected by the uuid_mutex at that point.
293  *
294  * balance_mutex
295  * -------------
296  * protects balance structures (status, state) and context accessed from
297  * several places (internally, ioctl)
298  *
299  * chunk_mutex
300  * -----------
301  * protects chunks, adding or removing during allocation, trim or when a new
302  * device is added/removed. Additionally it also protects post_commit_list of
303  * individual devices, since they can be added to the transaction's
304  * post_commit_list only with chunk_mutex held.
305  *
306  * cleaner_mutex
307  * -------------
308  * a big lock that is held by the cleaner thread and prevents running subvolume
309  * cleaning together with relocation or delayed iputs
310  *
311  *
312  * Lock nesting
313  * ============
314  *
315  * uuid_mutex
316  *   device_list_mutex
317  *     chunk_mutex
318  *   balance_mutex
319  *
320  *
321  * Exclusive operations
322  * ====================
323  *
324  * Maintains the exclusivity of the following operations that apply to the
325  * whole filesystem and cannot run in parallel.
326  *
327  * - Balance (*)
328  * - Device add
329  * - Device remove
330  * - Device replace (*)
331  * - Resize
332  *
333  * The device operations (as above) can be in one of the following states:
334  *
335  * - Running state
336  * - Paused state
337  * - Completed state
338  *
339  * Only device operations marked with (*) can go into the Paused state for the
340  * following reasons:
341  *
342  * - ioctl (only Balance can be Paused through ioctl)
343  * - filesystem remounted as read-only
344  * - filesystem unmounted and mounted as read-only
345  * - system power-cycle and filesystem mounted as read-only
346  * - filesystem or device errors leading to forced read-only
347  *
348  * The status of exclusive operation is set and cleared atomically.
349  * During the course of Paused state, fs_info::exclusive_operation remains set.
350  * A device operation in Paused or Running state can be canceled or resumed
351  * either by ioctl (Balance only) or when remounted as read-write.
352  * The exclusive status is cleared when the device operation is canceled or
353  * completed.
354  */
355
356 DEFINE_MUTEX(uuid_mutex);
357 static LIST_HEAD(fs_uuids);
358 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
359 {
360         return &fs_uuids;
361 }
362
363 /*
364  * alloc_fs_devices - allocate struct btrfs_fs_devices
365  * @fsid:               if not NULL, copy the UUID to fs_devices::fsid
366  * @metadata_fsid:      if not NULL, copy the UUID to fs_devices::metadata_fsid
367  *
368  * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
369  * The returned struct is not linked onto any lists and can be destroyed with
370  * kfree() right away.
371  */
372 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
373                                                  const u8 *metadata_fsid)
374 {
375         struct btrfs_fs_devices *fs_devs;
376
377         fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
378         if (!fs_devs)
379                 return ERR_PTR(-ENOMEM);
380
381         mutex_init(&fs_devs->device_list_mutex);
382
383         INIT_LIST_HEAD(&fs_devs->devices);
384         INIT_LIST_HEAD(&fs_devs->alloc_list);
385         INIT_LIST_HEAD(&fs_devs->fs_list);
386         INIT_LIST_HEAD(&fs_devs->seed_list);
387         if (fsid)
388                 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
389
390         if (metadata_fsid)
391                 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
392         else if (fsid)
393                 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
394
395         return fs_devs;
396 }
397
398 void btrfs_free_device(struct btrfs_device *device)
399 {
400         WARN_ON(!list_empty(&device->post_commit_list));
401         rcu_string_free(device->name);
402         extent_io_tree_release(&device->alloc_state);
403         bio_put(device->flush_bio);
404         btrfs_destroy_dev_zone_info(device);
405         kfree(device);
406 }
407
408 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
409 {
410         struct btrfs_device *device;
411         WARN_ON(fs_devices->opened);
412         while (!list_empty(&fs_devices->devices)) {
413                 device = list_entry(fs_devices->devices.next,
414                                     struct btrfs_device, dev_list);
415                 list_del(&device->dev_list);
416                 btrfs_free_device(device);
417         }
418         kfree(fs_devices);
419 }
420
421 void __exit btrfs_cleanup_fs_uuids(void)
422 {
423         struct btrfs_fs_devices *fs_devices;
424
425         while (!list_empty(&fs_uuids)) {
426                 fs_devices = list_entry(fs_uuids.next,
427                                         struct btrfs_fs_devices, fs_list);
428                 list_del(&fs_devices->fs_list);
429                 free_fs_devices(fs_devices);
430         }
431 }
432
433 static noinline struct btrfs_fs_devices *find_fsid(
434                 const u8 *fsid, const u8 *metadata_fsid)
435 {
436         struct btrfs_fs_devices *fs_devices;
437
438         ASSERT(fsid);
439
440         /* Handle non-split brain cases */
441         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
442                 if (metadata_fsid) {
443                         if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
444                             && memcmp(metadata_fsid, fs_devices->metadata_uuid,
445                                       BTRFS_FSID_SIZE) == 0)
446                                 return fs_devices;
447                 } else {
448                         if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
449                                 return fs_devices;
450                 }
451         }
452         return NULL;
453 }
454
455 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid(
456                                 struct btrfs_super_block *disk_super)
457 {
458
459         struct btrfs_fs_devices *fs_devices;
460
461         /*
462          * Handle scanned device having completed its fsid change but
463          * belonging to a fs_devices that was created by first scanning
464          * a device which didn't have its fsid/metadata_uuid changed
465          * at all and the CHANGING_FSID_V2 flag set.
466          */
467         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
468                 if (fs_devices->fsid_change &&
469                     memcmp(disk_super->metadata_uuid, fs_devices->fsid,
470                            BTRFS_FSID_SIZE) == 0 &&
471                     memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
472                            BTRFS_FSID_SIZE) == 0) {
473                         return fs_devices;
474                 }
475         }
476         /*
477          * Handle scanned device having completed its fsid change but
478          * belonging to a fs_devices that was created by a device that
479          * has an outdated pair of fsid/metadata_uuid and
480          * CHANGING_FSID_V2 flag set.
481          */
482         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
483                 if (fs_devices->fsid_change &&
484                     memcmp(fs_devices->metadata_uuid,
485                            fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
486                     memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid,
487                            BTRFS_FSID_SIZE) == 0) {
488                         return fs_devices;
489                 }
490         }
491
492         return find_fsid(disk_super->fsid, disk_super->metadata_uuid);
493 }
494
495
496 static int
497 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
498                       int flush, struct block_device **bdev,
499                       struct btrfs_super_block **disk_super)
500 {
501         int ret;
502
503         *bdev = blkdev_get_by_path(device_path, flags, holder);
504
505         if (IS_ERR(*bdev)) {
506                 ret = PTR_ERR(*bdev);
507                 goto error;
508         }
509
510         if (flush)
511                 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
512         ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
513         if (ret) {
514                 blkdev_put(*bdev, flags);
515                 goto error;
516         }
517         invalidate_bdev(*bdev);
518         *disk_super = btrfs_read_dev_super(*bdev);
519         if (IS_ERR(*disk_super)) {
520                 ret = PTR_ERR(*disk_super);
521                 blkdev_put(*bdev, flags);
522                 goto error;
523         }
524
525         return 0;
526
527 error:
528         *bdev = NULL;
529         return ret;
530 }
531
532 static bool device_path_matched(const char *path, struct btrfs_device *device)
533 {
534         int found;
535
536         rcu_read_lock();
537         found = strcmp(rcu_str_deref(device->name), path);
538         rcu_read_unlock();
539
540         return found == 0;
541 }
542
543 /*
544  *  Search and remove all stale (devices which are not mounted) devices.
545  *  When both inputs are NULL, it will search and release all stale devices.
546  *  path:       Optional. When provided will it release all unmounted devices
547  *              matching this path only.
548  *  skip_dev:   Optional. Will skip this device when searching for the stale
549  *              devices.
550  *  Return:     0 for success or if @path is NULL.
551  *              -EBUSY if @path is a mounted device.
552  *              -ENOENT if @path does not match any device in the list.
553  */
554 static int btrfs_free_stale_devices(const char *path,
555                                      struct btrfs_device *skip_device)
556 {
557         struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
558         struct btrfs_device *device, *tmp_device;
559         int ret = 0;
560
561         if (path)
562                 ret = -ENOENT;
563
564         list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
565
566                 mutex_lock(&fs_devices->device_list_mutex);
567                 list_for_each_entry_safe(device, tmp_device,
568                                          &fs_devices->devices, dev_list) {
569                         if (skip_device && skip_device == device)
570                                 continue;
571                         if (path && !device->name)
572                                 continue;
573                         if (path && !device_path_matched(path, device))
574                                 continue;
575                         if (fs_devices->opened) {
576                                 /* for an already deleted device return 0 */
577                                 if (path && ret != 0)
578                                         ret = -EBUSY;
579                                 break;
580                         }
581
582                         /* delete the stale device */
583                         fs_devices->num_devices--;
584                         list_del(&device->dev_list);
585                         btrfs_free_device(device);
586
587                         ret = 0;
588                 }
589                 mutex_unlock(&fs_devices->device_list_mutex);
590
591                 if (fs_devices->num_devices == 0) {
592                         btrfs_sysfs_remove_fsid(fs_devices);
593                         list_del(&fs_devices->fs_list);
594                         free_fs_devices(fs_devices);
595                 }
596         }
597
598         return ret;
599 }
600
601 /*
602  * This is only used on mount, and we are protected from competing things
603  * messing with our fs_devices by the uuid_mutex, thus we do not need the
604  * fs_devices->device_list_mutex here.
605  */
606 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
607                         struct btrfs_device *device, fmode_t flags,
608                         void *holder)
609 {
610         struct request_queue *q;
611         struct block_device *bdev;
612         struct btrfs_super_block *disk_super;
613         u64 devid;
614         int ret;
615
616         if (device->bdev)
617                 return -EINVAL;
618         if (!device->name)
619                 return -EINVAL;
620
621         ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
622                                     &bdev, &disk_super);
623         if (ret)
624                 return ret;
625
626         devid = btrfs_stack_device_id(&disk_super->dev_item);
627         if (devid != device->devid)
628                 goto error_free_page;
629
630         if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
631                 goto error_free_page;
632
633         device->generation = btrfs_super_generation(disk_super);
634
635         if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
636                 if (btrfs_super_incompat_flags(disk_super) &
637                     BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
638                         pr_err(
639                 "BTRFS: Invalid seeding and uuid-changed device detected\n");
640                         goto error_free_page;
641                 }
642
643                 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
644                 fs_devices->seeding = true;
645         } else {
646                 if (bdev_read_only(bdev))
647                         clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
648                 else
649                         set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
650         }
651
652         q = bdev_get_queue(bdev);
653         if (!blk_queue_nonrot(q))
654                 fs_devices->rotating = true;
655
656         device->bdev = bdev;
657         clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
658         device->mode = flags;
659
660         fs_devices->open_devices++;
661         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
662             device->devid != BTRFS_DEV_REPLACE_DEVID) {
663                 fs_devices->rw_devices++;
664                 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
665         }
666         btrfs_release_disk_super(disk_super);
667
668         return 0;
669
670 error_free_page:
671         btrfs_release_disk_super(disk_super);
672         blkdev_put(bdev, flags);
673
674         return -EINVAL;
675 }
676
677 /*
678  * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
679  * being created with a disk that has already completed its fsid change. Such
680  * disk can belong to an fs which has its FSID changed or to one which doesn't.
681  * Handle both cases here.
682  */
683 static struct btrfs_fs_devices *find_fsid_inprogress(
684                                         struct btrfs_super_block *disk_super)
685 {
686         struct btrfs_fs_devices *fs_devices;
687
688         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
689                 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
690                            BTRFS_FSID_SIZE) != 0 &&
691                     memcmp(fs_devices->metadata_uuid, disk_super->fsid,
692                            BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
693                         return fs_devices;
694                 }
695         }
696
697         return find_fsid(disk_super->fsid, NULL);
698 }
699
700
701 static struct btrfs_fs_devices *find_fsid_changed(
702                                         struct btrfs_super_block *disk_super)
703 {
704         struct btrfs_fs_devices *fs_devices;
705
706         /*
707          * Handles the case where scanned device is part of an fs that had
708          * multiple successful changes of FSID but currently device didn't
709          * observe it. Meaning our fsid will be different than theirs. We need
710          * to handle two subcases :
711          *  1 - The fs still continues to have different METADATA/FSID uuids.
712          *  2 - The fs is switched back to its original FSID (METADATA/FSID
713          *  are equal).
714          */
715         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
716                 /* Changed UUIDs */
717                 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
718                            BTRFS_FSID_SIZE) != 0 &&
719                     memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
720                            BTRFS_FSID_SIZE) == 0 &&
721                     memcmp(fs_devices->fsid, disk_super->fsid,
722                            BTRFS_FSID_SIZE) != 0)
723                         return fs_devices;
724
725                 /* Unchanged UUIDs */
726                 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
727                            BTRFS_FSID_SIZE) == 0 &&
728                     memcmp(fs_devices->fsid, disk_super->metadata_uuid,
729                            BTRFS_FSID_SIZE) == 0)
730                         return fs_devices;
731         }
732
733         return NULL;
734 }
735
736 static struct btrfs_fs_devices *find_fsid_reverted_metadata(
737                                 struct btrfs_super_block *disk_super)
738 {
739         struct btrfs_fs_devices *fs_devices;
740
741         /*
742          * Handle the case where the scanned device is part of an fs whose last
743          * metadata UUID change reverted it to the original FSID. At the same
744          * time * fs_devices was first created by another constitutent device
745          * which didn't fully observe the operation. This results in an
746          * btrfs_fs_devices created with metadata/fsid different AND
747          * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
748          * fs_devices equal to the FSID of the disk.
749          */
750         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
751                 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
752                            BTRFS_FSID_SIZE) != 0 &&
753                     memcmp(fs_devices->metadata_uuid, disk_super->fsid,
754                            BTRFS_FSID_SIZE) == 0 &&
755                     fs_devices->fsid_change)
756                         return fs_devices;
757         }
758
759         return NULL;
760 }
761 /*
762  * Add new device to list of registered devices
763  *
764  * Returns:
765  * device pointer which was just added or updated when successful
766  * error pointer when failed
767  */
768 static noinline struct btrfs_device *device_list_add(const char *path,
769                            struct btrfs_super_block *disk_super,
770                            bool *new_device_added)
771 {
772         struct btrfs_device *device;
773         struct btrfs_fs_devices *fs_devices = NULL;
774         struct rcu_string *name;
775         u64 found_transid = btrfs_super_generation(disk_super);
776         u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
777         bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
778                 BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
779         bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
780                                         BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
781
782         if (fsid_change_in_progress) {
783                 if (!has_metadata_uuid)
784                         fs_devices = find_fsid_inprogress(disk_super);
785                 else
786                         fs_devices = find_fsid_changed(disk_super);
787         } else if (has_metadata_uuid) {
788                 fs_devices = find_fsid_with_metadata_uuid(disk_super);
789         } else {
790                 fs_devices = find_fsid_reverted_metadata(disk_super);
791                 if (!fs_devices)
792                         fs_devices = find_fsid(disk_super->fsid, NULL);
793         }
794
795
796         if (!fs_devices) {
797                 if (has_metadata_uuid)
798                         fs_devices = alloc_fs_devices(disk_super->fsid,
799                                                       disk_super->metadata_uuid);
800                 else
801                         fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
802
803                 if (IS_ERR(fs_devices))
804                         return ERR_CAST(fs_devices);
805
806                 fs_devices->fsid_change = fsid_change_in_progress;
807
808                 mutex_lock(&fs_devices->device_list_mutex);
809                 list_add(&fs_devices->fs_list, &fs_uuids);
810
811                 device = NULL;
812         } else {
813                 mutex_lock(&fs_devices->device_list_mutex);
814                 device = btrfs_find_device(fs_devices, devid,
815                                 disk_super->dev_item.uuid, NULL);
816
817                 /*
818                  * If this disk has been pulled into an fs devices created by
819                  * a device which had the CHANGING_FSID_V2 flag then replace the
820                  * metadata_uuid/fsid values of the fs_devices.
821                  */
822                 if (fs_devices->fsid_change &&
823                     found_transid > fs_devices->latest_generation) {
824                         memcpy(fs_devices->fsid, disk_super->fsid,
825                                         BTRFS_FSID_SIZE);
826
827                         if (has_metadata_uuid)
828                                 memcpy(fs_devices->metadata_uuid,
829                                        disk_super->metadata_uuid,
830                                        BTRFS_FSID_SIZE);
831                         else
832                                 memcpy(fs_devices->metadata_uuid,
833                                        disk_super->fsid, BTRFS_FSID_SIZE);
834
835                         fs_devices->fsid_change = false;
836                 }
837         }
838
839         if (!device) {
840                 if (fs_devices->opened) {
841                         mutex_unlock(&fs_devices->device_list_mutex);
842                         return ERR_PTR(-EBUSY);
843                 }
844
845                 device = btrfs_alloc_device(NULL, &devid,
846                                             disk_super->dev_item.uuid);
847                 if (IS_ERR(device)) {
848                         mutex_unlock(&fs_devices->device_list_mutex);
849                         /* we can safely leave the fs_devices entry around */
850                         return device;
851                 }
852
853                 name = rcu_string_strdup(path, GFP_NOFS);
854                 if (!name) {
855                         btrfs_free_device(device);
856                         mutex_unlock(&fs_devices->device_list_mutex);
857                         return ERR_PTR(-ENOMEM);
858                 }
859                 rcu_assign_pointer(device->name, name);
860
861                 list_add_rcu(&device->dev_list, &fs_devices->devices);
862                 fs_devices->num_devices++;
863
864                 device->fs_devices = fs_devices;
865                 *new_device_added = true;
866
867                 if (disk_super->label[0])
868                         pr_info(
869         "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
870                                 disk_super->label, devid, found_transid, path,
871                                 current->comm, task_pid_nr(current));
872                 else
873                         pr_info(
874         "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
875                                 disk_super->fsid, devid, found_transid, path,
876                                 current->comm, task_pid_nr(current));
877
878         } else if (!device->name || strcmp(device->name->str, path)) {
879                 /*
880                  * When FS is already mounted.
881                  * 1. If you are here and if the device->name is NULL that
882                  *    means this device was missing at time of FS mount.
883                  * 2. If you are here and if the device->name is different
884                  *    from 'path' that means either
885                  *      a. The same device disappeared and reappeared with
886                  *         different name. or
887                  *      b. The missing-disk-which-was-replaced, has
888                  *         reappeared now.
889                  *
890                  * We must allow 1 and 2a above. But 2b would be a spurious
891                  * and unintentional.
892                  *
893                  * Further in case of 1 and 2a above, the disk at 'path'
894                  * would have missed some transaction when it was away and
895                  * in case of 2a the stale bdev has to be updated as well.
896                  * 2b must not be allowed at all time.
897                  */
898
899                 /*
900                  * For now, we do allow update to btrfs_fs_device through the
901                  * btrfs dev scan cli after FS has been mounted.  We're still
902                  * tracking a problem where systems fail mount by subvolume id
903                  * when we reject replacement on a mounted FS.
904                  */
905                 if (!fs_devices->opened && found_transid < device->generation) {
906                         /*
907                          * That is if the FS is _not_ mounted and if you
908                          * are here, that means there is more than one
909                          * disk with same uuid and devid.We keep the one
910                          * with larger generation number or the last-in if
911                          * generation are equal.
912                          */
913                         mutex_unlock(&fs_devices->device_list_mutex);
914                         return ERR_PTR(-EEXIST);
915                 }
916
917                 /*
918                  * We are going to replace the device path for a given devid,
919                  * make sure it's the same device if the device is mounted
920                  */
921                 if (device->bdev) {
922                         int error;
923                         dev_t path_dev;
924
925                         error = lookup_bdev(path, &path_dev);
926                         if (error) {
927                                 mutex_unlock(&fs_devices->device_list_mutex);
928                                 return ERR_PTR(error);
929                         }
930
931                         if (device->bdev->bd_dev != path_dev) {
932                                 mutex_unlock(&fs_devices->device_list_mutex);
933                                 /*
934                                  * device->fs_info may not be reliable here, so
935                                  * pass in a NULL instead. This avoids a
936                                  * possible use-after-free when the fs_info and
937                                  * fs_info->sb are already torn down.
938                                  */
939                                 btrfs_warn_in_rcu(NULL,
940         "duplicate device %s devid %llu generation %llu scanned by %s (%d)",
941                                                   path, devid, found_transid,
942                                                   current->comm,
943                                                   task_pid_nr(current));
944                                 return ERR_PTR(-EEXIST);
945                         }
946                         btrfs_info_in_rcu(device->fs_info,
947         "devid %llu device path %s changed to %s scanned by %s (%d)",
948                                           devid, rcu_str_deref(device->name),
949                                           path, current->comm,
950                                           task_pid_nr(current));
951                 }
952
953                 name = rcu_string_strdup(path, GFP_NOFS);
954                 if (!name) {
955                         mutex_unlock(&fs_devices->device_list_mutex);
956                         return ERR_PTR(-ENOMEM);
957                 }
958                 rcu_string_free(device->name);
959                 rcu_assign_pointer(device->name, name);
960                 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
961                         fs_devices->missing_devices--;
962                         clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
963                 }
964         }
965
966         /*
967          * Unmount does not free the btrfs_device struct but would zero
968          * generation along with most of the other members. So just update
969          * it back. We need it to pick the disk with largest generation
970          * (as above).
971          */
972         if (!fs_devices->opened) {
973                 device->generation = found_transid;
974                 fs_devices->latest_generation = max_t(u64, found_transid,
975                                                 fs_devices->latest_generation);
976         }
977
978         fs_devices->total_devices = btrfs_super_num_devices(disk_super);
979
980         mutex_unlock(&fs_devices->device_list_mutex);
981         return device;
982 }
983
984 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
985 {
986         struct btrfs_fs_devices *fs_devices;
987         struct btrfs_device *device;
988         struct btrfs_device *orig_dev;
989         int ret = 0;
990
991         fs_devices = alloc_fs_devices(orig->fsid, NULL);
992         if (IS_ERR(fs_devices))
993                 return fs_devices;
994
995         mutex_lock(&orig->device_list_mutex);
996         fs_devices->total_devices = orig->total_devices;
997
998         list_for_each_entry(orig_dev, &orig->devices, dev_list) {
999                 struct rcu_string *name;
1000
1001                 device = btrfs_alloc_device(NULL, &orig_dev->devid,
1002                                             orig_dev->uuid);
1003                 if (IS_ERR(device)) {
1004                         ret = PTR_ERR(device);
1005                         goto error;
1006                 }
1007
1008                 /*
1009                  * This is ok to do without rcu read locked because we hold the
1010                  * uuid mutex so nothing we touch in here is going to disappear.
1011                  */
1012                 if (orig_dev->name) {
1013                         name = rcu_string_strdup(orig_dev->name->str,
1014                                         GFP_KERNEL);
1015                         if (!name) {
1016                                 btrfs_free_device(device);
1017                                 ret = -ENOMEM;
1018                                 goto error;
1019                         }
1020                         rcu_assign_pointer(device->name, name);
1021                 }
1022
1023                 list_add(&device->dev_list, &fs_devices->devices);
1024                 device->fs_devices = fs_devices;
1025                 fs_devices->num_devices++;
1026         }
1027         mutex_unlock(&orig->device_list_mutex);
1028         return fs_devices;
1029 error:
1030         mutex_unlock(&orig->device_list_mutex);
1031         free_fs_devices(fs_devices);
1032         return ERR_PTR(ret);
1033 }
1034
1035 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
1036                                       struct btrfs_device **latest_dev)
1037 {
1038         struct btrfs_device *device, *next;
1039
1040         /* This is the initialized path, it is safe to release the devices. */
1041         list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
1042                 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) {
1043                         if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1044                                       &device->dev_state) &&
1045                             !test_bit(BTRFS_DEV_STATE_MISSING,
1046                                       &device->dev_state) &&
1047                             (!*latest_dev ||
1048                              device->generation > (*latest_dev)->generation)) {
1049                                 *latest_dev = device;
1050                         }
1051                         continue;
1052                 }
1053
1054                 /*
1055                  * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
1056                  * in btrfs_init_dev_replace() so just continue.
1057                  */
1058                 if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1059                         continue;
1060
1061                 if (device->bdev) {
1062                         blkdev_put(device->bdev, device->mode);
1063                         device->bdev = NULL;
1064                         fs_devices->open_devices--;
1065                 }
1066                 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1067                         list_del_init(&device->dev_alloc_list);
1068                         clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1069                         fs_devices->rw_devices--;
1070                 }
1071                 list_del_init(&device->dev_list);
1072                 fs_devices->num_devices--;
1073                 btrfs_free_device(device);
1074         }
1075
1076 }
1077
1078 /*
1079  * After we have read the system tree and know devids belonging to this
1080  * filesystem, remove the device which does not belong there.
1081  */
1082 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices)
1083 {
1084         struct btrfs_device *latest_dev = NULL;
1085         struct btrfs_fs_devices *seed_dev;
1086
1087         mutex_lock(&uuid_mutex);
1088         __btrfs_free_extra_devids(fs_devices, &latest_dev);
1089
1090         list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list)
1091                 __btrfs_free_extra_devids(seed_dev, &latest_dev);
1092
1093         fs_devices->latest_bdev = latest_dev->bdev;
1094
1095         mutex_unlock(&uuid_mutex);
1096 }
1097
1098 static void btrfs_close_bdev(struct btrfs_device *device)
1099 {
1100         if (!device->bdev)
1101                 return;
1102
1103         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1104                 sync_blockdev(device->bdev);
1105                 invalidate_bdev(device->bdev);
1106         }
1107
1108         blkdev_put(device->bdev, device->mode);
1109 }
1110
1111 static void btrfs_close_one_device(struct btrfs_device *device)
1112 {
1113         struct btrfs_fs_devices *fs_devices = device->fs_devices;
1114
1115         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1116             device->devid != BTRFS_DEV_REPLACE_DEVID) {
1117                 list_del_init(&device->dev_alloc_list);
1118                 fs_devices->rw_devices--;
1119         }
1120
1121         if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1122                 clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
1123
1124         if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
1125                 fs_devices->missing_devices--;
1126
1127         btrfs_close_bdev(device);
1128         if (device->bdev) {
1129                 fs_devices->open_devices--;
1130                 device->bdev = NULL;
1131         }
1132         clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1133         btrfs_destroy_dev_zone_info(device);
1134
1135         device->fs_info = NULL;
1136         atomic_set(&device->dev_stats_ccnt, 0);
1137         extent_io_tree_release(&device->alloc_state);
1138
1139         /* Verify the device is back in a pristine state  */
1140         ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
1141         ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1142         ASSERT(list_empty(&device->dev_alloc_list));
1143         ASSERT(list_empty(&device->post_commit_list));
1144         ASSERT(atomic_read(&device->reada_in_flight) == 0);
1145 }
1146
1147 static void close_fs_devices(struct btrfs_fs_devices *fs_devices)
1148 {
1149         struct btrfs_device *device, *tmp;
1150
1151         lockdep_assert_held(&uuid_mutex);
1152
1153         if (--fs_devices->opened > 0)
1154                 return;
1155
1156         list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list)
1157                 btrfs_close_one_device(device);
1158
1159         WARN_ON(fs_devices->open_devices);
1160         WARN_ON(fs_devices->rw_devices);
1161         fs_devices->opened = 0;
1162         fs_devices->seeding = false;
1163         fs_devices->fs_info = NULL;
1164 }
1165
1166 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
1167 {
1168         LIST_HEAD(list);
1169         struct btrfs_fs_devices *tmp;
1170
1171         mutex_lock(&uuid_mutex);
1172         close_fs_devices(fs_devices);
1173         if (!fs_devices->opened)
1174                 list_splice_init(&fs_devices->seed_list, &list);
1175
1176         list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) {
1177                 close_fs_devices(fs_devices);
1178                 list_del(&fs_devices->seed_list);
1179                 free_fs_devices(fs_devices);
1180         }
1181         mutex_unlock(&uuid_mutex);
1182 }
1183
1184 static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
1185                                 fmode_t flags, void *holder)
1186 {
1187         struct btrfs_device *device;
1188         struct btrfs_device *latest_dev = NULL;
1189         struct btrfs_device *tmp_device;
1190
1191         flags |= FMODE_EXCL;
1192
1193         list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
1194                                  dev_list) {
1195                 int ret;
1196
1197                 ret = btrfs_open_one_device(fs_devices, device, flags, holder);
1198                 if (ret == 0 &&
1199                     (!latest_dev || device->generation > latest_dev->generation)) {
1200                         latest_dev = device;
1201                 } else if (ret == -ENODATA) {
1202                         fs_devices->num_devices--;
1203                         list_del(&device->dev_list);
1204                         btrfs_free_device(device);
1205                 }
1206         }
1207         if (fs_devices->open_devices == 0)
1208                 return -EINVAL;
1209
1210         fs_devices->opened = 1;
1211         fs_devices->latest_bdev = latest_dev->bdev;
1212         fs_devices->total_rw_bytes = 0;
1213         fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
1214         fs_devices->read_policy = BTRFS_READ_POLICY_PID;
1215
1216         return 0;
1217 }
1218
1219 static int devid_cmp(void *priv, const struct list_head *a,
1220                      const struct list_head *b)
1221 {
1222         const struct btrfs_device *dev1, *dev2;
1223
1224         dev1 = list_entry(a, struct btrfs_device, dev_list);
1225         dev2 = list_entry(b, struct btrfs_device, dev_list);
1226
1227         if (dev1->devid < dev2->devid)
1228                 return -1;
1229         else if (dev1->devid > dev2->devid)
1230                 return 1;
1231         return 0;
1232 }
1233
1234 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1235                        fmode_t flags, void *holder)
1236 {
1237         int ret;
1238
1239         lockdep_assert_held(&uuid_mutex);
1240         /*
1241          * The device_list_mutex cannot be taken here in case opening the
1242          * underlying device takes further locks like open_mutex.
1243          *
1244          * We also don't need the lock here as this is called during mount and
1245          * exclusion is provided by uuid_mutex
1246          */
1247
1248         if (fs_devices->opened) {
1249                 fs_devices->opened++;
1250                 ret = 0;
1251         } else {
1252                 list_sort(NULL, &fs_devices->devices, devid_cmp);
1253                 ret = open_fs_devices(fs_devices, flags, holder);
1254         }
1255
1256         return ret;
1257 }
1258
1259 void btrfs_release_disk_super(struct btrfs_super_block *super)
1260 {
1261         struct page *page = virt_to_page(super);
1262
1263         put_page(page);
1264 }
1265
1266 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
1267                                                        u64 bytenr, u64 bytenr_orig)
1268 {
1269         struct btrfs_super_block *disk_super;
1270         struct page *page;
1271         void *p;
1272         pgoff_t index;
1273
1274         /* make sure our super fits in the device */
1275         if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1276                 return ERR_PTR(-EINVAL);
1277
1278         /* make sure our super fits in the page */
1279         if (sizeof(*disk_super) > PAGE_SIZE)
1280                 return ERR_PTR(-EINVAL);
1281
1282         /* make sure our super doesn't straddle pages on disk */
1283         index = bytenr >> PAGE_SHIFT;
1284         if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
1285                 return ERR_PTR(-EINVAL);
1286
1287         /* pull in the page with our super */
1288         page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL);
1289
1290         if (IS_ERR(page))
1291                 return ERR_CAST(page);
1292
1293         p = page_address(page);
1294
1295         /* align our pointer to the offset of the super block */
1296         disk_super = p + offset_in_page(bytenr);
1297
1298         if (btrfs_super_bytenr(disk_super) != bytenr_orig ||
1299             btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1300                 btrfs_release_disk_super(p);
1301                 return ERR_PTR(-EINVAL);
1302         }
1303
1304         if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1])
1305                 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0;
1306
1307         return disk_super;
1308 }
1309
1310 int btrfs_forget_devices(const char *path)
1311 {
1312         int ret;
1313
1314         mutex_lock(&uuid_mutex);
1315         ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL);
1316         mutex_unlock(&uuid_mutex);
1317
1318         return ret;
1319 }
1320
1321 /*
1322  * Look for a btrfs signature on a device. This may be called out of the mount path
1323  * and we are not allowed to call set_blocksize during the scan. The superblock
1324  * is read via pagecache
1325  */
1326 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
1327                                            void *holder)
1328 {
1329         struct btrfs_super_block *disk_super;
1330         bool new_device_added = false;
1331         struct btrfs_device *device = NULL;
1332         struct block_device *bdev;
1333         u64 bytenr, bytenr_orig;
1334         int ret;
1335
1336         lockdep_assert_held(&uuid_mutex);
1337
1338         /*
1339          * we would like to check all the supers, but that would make
1340          * a btrfs mount succeed after a mkfs from a different FS.
1341          * So, we need to add a special mount option to scan for
1342          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1343          */
1344         flags |= FMODE_EXCL;
1345
1346         bdev = blkdev_get_by_path(path, flags, holder);
1347         if (IS_ERR(bdev))
1348                 return ERR_CAST(bdev);
1349
1350         bytenr_orig = btrfs_sb_offset(0);
1351         ret = btrfs_sb_log_location_bdev(bdev, 0, READ, &bytenr);
1352         if (ret)
1353                 return ERR_PTR(ret);
1354
1355         disk_super = btrfs_read_disk_super(bdev, bytenr, bytenr_orig);
1356         if (IS_ERR(disk_super)) {
1357                 device = ERR_CAST(disk_super);
1358                 goto error_bdev_put;
1359         }
1360
1361         device = device_list_add(path, disk_super, &new_device_added);
1362         if (!IS_ERR(device)) {
1363                 if (new_device_added)
1364                         btrfs_free_stale_devices(path, device);
1365         }
1366
1367         btrfs_release_disk_super(disk_super);
1368
1369 error_bdev_put:
1370         blkdev_put(bdev, flags);
1371
1372         return device;
1373 }
1374
1375 /*
1376  * Try to find a chunk that intersects [start, start + len] range and when one
1377  * such is found, record the end of it in *start
1378  */
1379 static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
1380                                     u64 len)
1381 {
1382         u64 physical_start, physical_end;
1383
1384         lockdep_assert_held(&device->fs_info->chunk_mutex);
1385
1386         if (!find_first_extent_bit(&device->alloc_state, *start,
1387                                    &physical_start, &physical_end,
1388                                    CHUNK_ALLOCATED, NULL)) {
1389
1390                 if (in_range(physical_start, *start, len) ||
1391                     in_range(*start, physical_start,
1392                              physical_end - physical_start)) {
1393                         *start = physical_end + 1;
1394                         return true;
1395                 }
1396         }
1397         return false;
1398 }
1399
1400 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
1401 {
1402         switch (device->fs_devices->chunk_alloc_policy) {
1403         case BTRFS_CHUNK_ALLOC_REGULAR:
1404                 /*
1405                  * We don't want to overwrite the superblock on the drive nor
1406                  * any area used by the boot loader (grub for example), so we
1407                  * make sure to start at an offset of at least 1MB.
1408                  */
1409                 return max_t(u64, start, SZ_1M);
1410         case BTRFS_CHUNK_ALLOC_ZONED:
1411                 /*
1412                  * We don't care about the starting region like regular
1413                  * allocator, because we anyway use/reserve the first two zones
1414                  * for superblock logging.
1415                  */
1416                 return ALIGN(start, device->zone_info->zone_size);
1417         default:
1418                 BUG();
1419         }
1420 }
1421
1422 static bool dev_extent_hole_check_zoned(struct btrfs_device *device,
1423                                         u64 *hole_start, u64 *hole_size,
1424                                         u64 num_bytes)
1425 {
1426         u64 zone_size = device->zone_info->zone_size;
1427         u64 pos;
1428         int ret;
1429         bool changed = false;
1430
1431         ASSERT(IS_ALIGNED(*hole_start, zone_size));
1432
1433         while (*hole_size > 0) {
1434                 pos = btrfs_find_allocatable_zones(device, *hole_start,
1435                                                    *hole_start + *hole_size,
1436                                                    num_bytes);
1437                 if (pos != *hole_start) {
1438                         *hole_size = *hole_start + *hole_size - pos;
1439                         *hole_start = pos;
1440                         changed = true;
1441                         if (*hole_size < num_bytes)
1442                                 break;
1443                 }
1444
1445                 ret = btrfs_ensure_empty_zones(device, pos, num_bytes);
1446
1447                 /* Range is ensured to be empty */
1448                 if (!ret)
1449                         return changed;
1450
1451                 /* Given hole range was invalid (outside of device) */
1452                 if (ret == -ERANGE) {
1453                         *hole_start += *hole_size;
1454                         *hole_size = 0;
1455                         return true;
1456                 }
1457
1458                 *hole_start += zone_size;
1459                 *hole_size -= zone_size;
1460                 changed = true;
1461         }
1462
1463         return changed;
1464 }
1465
1466 /**
1467  * dev_extent_hole_check - check if specified hole is suitable for allocation
1468  * @device:     the device which we have the hole
1469  * @hole_start: starting position of the hole
1470  * @hole_size:  the size of the hole
1471  * @num_bytes:  the size of the free space that we need
1472  *
1473  * This function may modify @hole_start and @hole_size to reflect the suitable
1474  * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
1475  */
1476 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
1477                                   u64 *hole_size, u64 num_bytes)
1478 {
1479         bool changed = false;
1480         u64 hole_end = *hole_start + *hole_size;
1481
1482         for (;;) {
1483                 /*
1484                  * Check before we set max_hole_start, otherwise we could end up
1485                  * sending back this offset anyway.
1486                  */
1487                 if (contains_pending_extent(device, hole_start, *hole_size)) {
1488                         if (hole_end >= *hole_start)
1489                                 *hole_size = hole_end - *hole_start;
1490                         else
1491                                 *hole_size = 0;
1492                         changed = true;
1493                 }
1494
1495                 switch (device->fs_devices->chunk_alloc_policy) {
1496                 case BTRFS_CHUNK_ALLOC_REGULAR:
1497                         /* No extra check */
1498                         break;
1499                 case BTRFS_CHUNK_ALLOC_ZONED:
1500                         if (dev_extent_hole_check_zoned(device, hole_start,
1501                                                         hole_size, num_bytes)) {
1502                                 changed = true;
1503                                 /*
1504                                  * The changed hole can contain pending extent.
1505                                  * Loop again to check that.
1506                                  */
1507                                 continue;
1508                         }
1509                         break;
1510                 default:
1511                         BUG();
1512                 }
1513
1514                 break;
1515         }
1516
1517         return changed;
1518 }
1519
1520 /*
1521  * find_free_dev_extent_start - find free space in the specified device
1522  * @device:       the device which we search the free space in
1523  * @num_bytes:    the size of the free space that we need
1524  * @search_start: the position from which to begin the search
1525  * @start:        store the start of the free space.
1526  * @len:          the size of the free space. that we find, or the size
1527  *                of the max free space if we don't find suitable free space
1528  *
1529  * this uses a pretty simple search, the expectation is that it is
1530  * called very infrequently and that a given device has a small number
1531  * of extents
1532  *
1533  * @start is used to store the start of the free space if we find. But if we
1534  * don't find suitable free space, it will be used to store the start position
1535  * of the max free space.
1536  *
1537  * @len is used to store the size of the free space that we find.
1538  * But if we don't find suitable free space, it is used to store the size of
1539  * the max free space.
1540  *
1541  * NOTE: This function will search *commit* root of device tree, and does extra
1542  * check to ensure dev extents are not double allocated.
1543  * This makes the function safe to allocate dev extents but may not report
1544  * correct usable device space, as device extent freed in current transaction
1545  * is not reported as available.
1546  */
1547 static int find_free_dev_extent_start(struct btrfs_device *device,
1548                                 u64 num_bytes, u64 search_start, u64 *start,
1549                                 u64 *len)
1550 {
1551         struct btrfs_fs_info *fs_info = device->fs_info;
1552         struct btrfs_root *root = fs_info->dev_root;
1553         struct btrfs_key key;
1554         struct btrfs_dev_extent *dev_extent;
1555         struct btrfs_path *path;
1556         u64 hole_size;
1557         u64 max_hole_start;
1558         u64 max_hole_size;
1559         u64 extent_end;
1560         u64 search_end = device->total_bytes;
1561         int ret;
1562         int slot;
1563         struct extent_buffer *l;
1564
1565         search_start = dev_extent_search_start(device, search_start);
1566
1567         WARN_ON(device->zone_info &&
1568                 !IS_ALIGNED(num_bytes, device->zone_info->zone_size));
1569
1570         path = btrfs_alloc_path();
1571         if (!path)
1572                 return -ENOMEM;
1573
1574         max_hole_start = search_start;
1575         max_hole_size = 0;
1576
1577 again:
1578         if (search_start >= search_end ||
1579                 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1580                 ret = -ENOSPC;
1581                 goto out;
1582         }
1583
1584         path->reada = READA_FORWARD;
1585         path->search_commit_root = 1;
1586         path->skip_locking = 1;
1587
1588         key.objectid = device->devid;
1589         key.offset = search_start;
1590         key.type = BTRFS_DEV_EXTENT_KEY;
1591
1592         ret = btrfs_search_backwards(root, &key, path);
1593         if (ret < 0)
1594                 goto out;
1595
1596         while (1) {
1597                 l = path->nodes[0];
1598                 slot = path->slots[0];
1599                 if (slot >= btrfs_header_nritems(l)) {
1600                         ret = btrfs_next_leaf(root, path);
1601                         if (ret == 0)
1602                                 continue;
1603                         if (ret < 0)
1604                                 goto out;
1605
1606                         break;
1607                 }
1608                 btrfs_item_key_to_cpu(l, &key, slot);
1609
1610                 if (key.objectid < device->devid)
1611                         goto next;
1612
1613                 if (key.objectid > device->devid)
1614                         break;
1615
1616                 if (key.type != BTRFS_DEV_EXTENT_KEY)
1617                         goto next;
1618
1619                 if (key.offset > search_start) {
1620                         hole_size = key.offset - search_start;
1621                         dev_extent_hole_check(device, &search_start, &hole_size,
1622                                               num_bytes);
1623
1624                         if (hole_size > max_hole_size) {
1625                                 max_hole_start = search_start;
1626                                 max_hole_size = hole_size;
1627                         }
1628
1629                         /*
1630                          * If this free space is greater than which we need,
1631                          * it must be the max free space that we have found
1632                          * until now, so max_hole_start must point to the start
1633                          * of this free space and the length of this free space
1634                          * is stored in max_hole_size. Thus, we return
1635                          * max_hole_start and max_hole_size and go back to the
1636                          * caller.
1637                          */
1638                         if (hole_size >= num_bytes) {
1639                                 ret = 0;
1640                                 goto out;
1641                         }
1642                 }
1643
1644                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1645                 extent_end = key.offset + btrfs_dev_extent_length(l,
1646                                                                   dev_extent);
1647                 if (extent_end > search_start)
1648                         search_start = extent_end;
1649 next:
1650                 path->slots[0]++;
1651                 cond_resched();
1652         }
1653
1654         /*
1655          * At this point, search_start should be the end of
1656          * allocated dev extents, and when shrinking the device,
1657          * search_end may be smaller than search_start.
1658          */
1659         if (search_end > search_start) {
1660                 hole_size = search_end - search_start;
1661                 if (dev_extent_hole_check(device, &search_start, &hole_size,
1662                                           num_bytes)) {
1663                         btrfs_release_path(path);
1664                         goto again;
1665                 }
1666
1667                 if (hole_size > max_hole_size) {
1668                         max_hole_start = search_start;
1669                         max_hole_size = hole_size;
1670                 }
1671         }
1672
1673         /* See above. */
1674         if (max_hole_size < num_bytes)
1675                 ret = -ENOSPC;
1676         else
1677                 ret = 0;
1678
1679 out:
1680         btrfs_free_path(path);
1681         *start = max_hole_start;
1682         if (len)
1683                 *len = max_hole_size;
1684         return ret;
1685 }
1686
1687 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1688                          u64 *start, u64 *len)
1689 {
1690         /* FIXME use last free of some kind */
1691         return find_free_dev_extent_start(device, num_bytes, 0, start, len);
1692 }
1693
1694 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1695                           struct btrfs_device *device,
1696                           u64 start, u64 *dev_extent_len)
1697 {
1698         struct btrfs_fs_info *fs_info = device->fs_info;
1699         struct btrfs_root *root = fs_info->dev_root;
1700         int ret;
1701         struct btrfs_path *path;
1702         struct btrfs_key key;
1703         struct btrfs_key found_key;
1704         struct extent_buffer *leaf = NULL;
1705         struct btrfs_dev_extent *extent = NULL;
1706
1707         path = btrfs_alloc_path();
1708         if (!path)
1709                 return -ENOMEM;
1710
1711         key.objectid = device->devid;
1712         key.offset = start;
1713         key.type = BTRFS_DEV_EXTENT_KEY;
1714 again:
1715         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1716         if (ret > 0) {
1717                 ret = btrfs_previous_item(root, path, key.objectid,
1718                                           BTRFS_DEV_EXTENT_KEY);
1719                 if (ret)
1720                         goto out;
1721                 leaf = path->nodes[0];
1722                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1723                 extent = btrfs_item_ptr(leaf, path->slots[0],
1724                                         struct btrfs_dev_extent);
1725                 BUG_ON(found_key.offset > start || found_key.offset +
1726                        btrfs_dev_extent_length(leaf, extent) < start);
1727                 key = found_key;
1728                 btrfs_release_path(path);
1729                 goto again;
1730         } else if (ret == 0) {
1731                 leaf = path->nodes[0];
1732                 extent = btrfs_item_ptr(leaf, path->slots[0],
1733                                         struct btrfs_dev_extent);
1734         } else {
1735                 goto out;
1736         }
1737
1738         *dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1739
1740         ret = btrfs_del_item(trans, root, path);
1741         if (ret == 0)
1742                 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1743 out:
1744         btrfs_free_path(path);
1745         return ret;
1746 }
1747
1748 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1749 {
1750         struct extent_map_tree *em_tree;
1751         struct extent_map *em;
1752         struct rb_node *n;
1753         u64 ret = 0;
1754
1755         em_tree = &fs_info->mapping_tree;
1756         read_lock(&em_tree->lock);
1757         n = rb_last(&em_tree->map.rb_root);
1758         if (n) {
1759                 em = rb_entry(n, struct extent_map, rb_node);
1760                 ret = em->start + em->len;
1761         }
1762         read_unlock(&em_tree->lock);
1763
1764         return ret;
1765 }
1766
1767 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1768                                     u64 *devid_ret)
1769 {
1770         int ret;
1771         struct btrfs_key key;
1772         struct btrfs_key found_key;
1773         struct btrfs_path *path;
1774
1775         path = btrfs_alloc_path();
1776         if (!path)
1777                 return -ENOMEM;
1778
1779         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1780         key.type = BTRFS_DEV_ITEM_KEY;
1781         key.offset = (u64)-1;
1782
1783         ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1784         if (ret < 0)
1785                 goto error;
1786
1787         if (ret == 0) {
1788                 /* Corruption */
1789                 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
1790                 ret = -EUCLEAN;
1791                 goto error;
1792         }
1793
1794         ret = btrfs_previous_item(fs_info->chunk_root, path,
1795                                   BTRFS_DEV_ITEMS_OBJECTID,
1796                                   BTRFS_DEV_ITEM_KEY);
1797         if (ret) {
1798                 *devid_ret = 1;
1799         } else {
1800                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1801                                       path->slots[0]);
1802                 *devid_ret = found_key.offset + 1;
1803         }
1804         ret = 0;
1805 error:
1806         btrfs_free_path(path);
1807         return ret;
1808 }
1809
1810 /*
1811  * the device information is stored in the chunk root
1812  * the btrfs_device struct should be fully filled in
1813  */
1814 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
1815                             struct btrfs_device *device)
1816 {
1817         int ret;
1818         struct btrfs_path *path;
1819         struct btrfs_dev_item *dev_item;
1820         struct extent_buffer *leaf;
1821         struct btrfs_key key;
1822         unsigned long ptr;
1823
1824         path = btrfs_alloc_path();
1825         if (!path)
1826                 return -ENOMEM;
1827
1828         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1829         key.type = BTRFS_DEV_ITEM_KEY;
1830         key.offset = device->devid;
1831
1832         ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
1833                                       &key, sizeof(*dev_item));
1834         if (ret)
1835                 goto out;
1836
1837         leaf = path->nodes[0];
1838         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1839
1840         btrfs_set_device_id(leaf, dev_item, device->devid);
1841         btrfs_set_device_generation(leaf, dev_item, 0);
1842         btrfs_set_device_type(leaf, dev_item, device->type);
1843         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1844         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1845         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1846         btrfs_set_device_total_bytes(leaf, dev_item,
1847                                      btrfs_device_get_disk_total_bytes(device));
1848         btrfs_set_device_bytes_used(leaf, dev_item,
1849                                     btrfs_device_get_bytes_used(device));
1850         btrfs_set_device_group(leaf, dev_item, 0);
1851         btrfs_set_device_seek_speed(leaf, dev_item, 0);
1852         btrfs_set_device_bandwidth(leaf, dev_item, 0);
1853         btrfs_set_device_start_offset(leaf, dev_item, 0);
1854
1855         ptr = btrfs_device_uuid(dev_item);
1856         write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1857         ptr = btrfs_device_fsid(dev_item);
1858         write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
1859                             ptr, BTRFS_FSID_SIZE);
1860         btrfs_mark_buffer_dirty(leaf);
1861
1862         ret = 0;
1863 out:
1864         btrfs_free_path(path);
1865         return ret;
1866 }
1867
1868 /*
1869  * Function to update ctime/mtime for a given device path.
1870  * Mainly used for ctime/mtime based probe like libblkid.
1871  */
1872 static void update_dev_time(const char *path_name)
1873 {
1874         struct file *filp;
1875
1876         filp = filp_open(path_name, O_RDWR, 0);
1877         if (IS_ERR(filp))
1878                 return;
1879         file_update_time(filp);
1880         filp_close(filp, NULL);
1881 }
1882
1883 static int btrfs_rm_dev_item(struct btrfs_device *device)
1884 {
1885         struct btrfs_root *root = device->fs_info->chunk_root;
1886         int ret;
1887         struct btrfs_path *path;
1888         struct btrfs_key key;
1889         struct btrfs_trans_handle *trans;
1890
1891         path = btrfs_alloc_path();
1892         if (!path)
1893                 return -ENOMEM;
1894
1895         trans = btrfs_start_transaction(root, 0);
1896         if (IS_ERR(trans)) {
1897                 btrfs_free_path(path);
1898                 return PTR_ERR(trans);
1899         }
1900         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1901         key.type = BTRFS_DEV_ITEM_KEY;
1902         key.offset = device->devid;
1903
1904         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1905         if (ret) {
1906                 if (ret > 0)
1907                         ret = -ENOENT;
1908                 btrfs_abort_transaction(trans, ret);
1909                 btrfs_end_transaction(trans);
1910                 goto out;
1911         }
1912
1913         ret = btrfs_del_item(trans, root, path);
1914         if (ret) {
1915                 btrfs_abort_transaction(trans, ret);
1916                 btrfs_end_transaction(trans);
1917         }
1918
1919 out:
1920         btrfs_free_path(path);
1921         if (!ret)
1922                 ret = btrfs_commit_transaction(trans);
1923         return ret;
1924 }
1925
1926 /*
1927  * Verify that @num_devices satisfies the RAID profile constraints in the whole
1928  * filesystem. It's up to the caller to adjust that number regarding eg. device
1929  * replace.
1930  */
1931 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
1932                 u64 num_devices)
1933 {
1934         u64 all_avail;
1935         unsigned seq;
1936         int i;
1937
1938         do {
1939                 seq = read_seqbegin(&fs_info->profiles_lock);
1940
1941                 all_avail = fs_info->avail_data_alloc_bits |
1942                             fs_info->avail_system_alloc_bits |
1943                             fs_info->avail_metadata_alloc_bits;
1944         } while (read_seqretry(&fs_info->profiles_lock, seq));
1945
1946         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
1947                 if (!(all_avail & btrfs_raid_array[i].bg_flag))
1948                         continue;
1949
1950                 if (num_devices < btrfs_raid_array[i].devs_min)
1951                         return btrfs_raid_array[i].mindev_error;
1952         }
1953
1954         return 0;
1955 }
1956
1957 static struct btrfs_device * btrfs_find_next_active_device(
1958                 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
1959 {
1960         struct btrfs_device *next_device;
1961
1962         list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
1963                 if (next_device != device &&
1964                     !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
1965                     && next_device->bdev)
1966                         return next_device;
1967         }
1968
1969         return NULL;
1970 }
1971
1972 /*
1973  * Helper function to check if the given device is part of s_bdev / latest_bdev
1974  * and replace it with the provided or the next active device, in the context
1975  * where this function called, there should be always be another device (or
1976  * this_dev) which is active.
1977  */
1978 void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
1979                                             struct btrfs_device *next_device)
1980 {
1981         struct btrfs_fs_info *fs_info = device->fs_info;
1982
1983         if (!next_device)
1984                 next_device = btrfs_find_next_active_device(fs_info->fs_devices,
1985                                                             device);
1986         ASSERT(next_device);
1987
1988         if (fs_info->sb->s_bdev &&
1989                         (fs_info->sb->s_bdev == device->bdev))
1990                 fs_info->sb->s_bdev = next_device->bdev;
1991
1992         if (fs_info->fs_devices->latest_bdev == device->bdev)
1993                 fs_info->fs_devices->latest_bdev = next_device->bdev;
1994 }
1995
1996 /*
1997  * Return btrfs_fs_devices::num_devices excluding the device that's being
1998  * currently replaced.
1999  */
2000 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
2001 {
2002         u64 num_devices = fs_info->fs_devices->num_devices;
2003
2004         down_read(&fs_info->dev_replace.rwsem);
2005         if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
2006                 ASSERT(num_devices > 1);
2007                 num_devices--;
2008         }
2009         up_read(&fs_info->dev_replace.rwsem);
2010
2011         return num_devices;
2012 }
2013
2014 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
2015                                struct block_device *bdev,
2016                                const char *device_path)
2017 {
2018         struct btrfs_super_block *disk_super;
2019         int copy_num;
2020
2021         if (!bdev)
2022                 return;
2023
2024         for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
2025                 struct page *page;
2026                 int ret;
2027
2028                 disk_super = btrfs_read_dev_one_super(bdev, copy_num);
2029                 if (IS_ERR(disk_super))
2030                         continue;
2031
2032                 if (bdev_is_zoned(bdev)) {
2033                         btrfs_reset_sb_log_zones(bdev, copy_num);
2034                         continue;
2035                 }
2036
2037                 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
2038
2039                 page = virt_to_page(disk_super);
2040                 set_page_dirty(page);
2041                 lock_page(page);
2042                 /* write_on_page() unlocks the page */
2043                 ret = write_one_page(page);
2044                 if (ret)
2045                         btrfs_warn(fs_info,
2046                                 "error clearing superblock number %d (%d)",
2047                                 copy_num, ret);
2048                 btrfs_release_disk_super(disk_super);
2049
2050         }
2051
2052         /* Notify udev that device has changed */
2053         btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
2054
2055         /* Update ctime/mtime for device path for libblkid */
2056         update_dev_time(device_path);
2057 }
2058
2059 int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
2060                     u64 devid)
2061 {
2062         struct btrfs_device *device;
2063         struct btrfs_fs_devices *cur_devices;
2064         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2065         u64 num_devices;
2066         int ret = 0;
2067
2068         mutex_lock(&uuid_mutex);
2069
2070         num_devices = btrfs_num_devices(fs_info);
2071
2072         ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
2073         if (ret)
2074                 goto out;
2075
2076         device = btrfs_find_device_by_devspec(fs_info, devid, device_path);
2077
2078         if (IS_ERR(device)) {
2079                 if (PTR_ERR(device) == -ENOENT &&
2080                     device_path && strcmp(device_path, "missing") == 0)
2081                         ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2082                 else
2083                         ret = PTR_ERR(device);
2084                 goto out;
2085         }
2086
2087         if (btrfs_pinned_by_swapfile(fs_info, device)) {
2088                 btrfs_warn_in_rcu(fs_info,
2089                   "cannot remove device %s (devid %llu) due to active swapfile",
2090                                   rcu_str_deref(device->name), device->devid);
2091                 ret = -ETXTBSY;
2092                 goto out;
2093         }
2094
2095         if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2096                 ret = BTRFS_ERROR_DEV_TGT_REPLACE;
2097                 goto out;
2098         }
2099
2100         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
2101             fs_info->fs_devices->rw_devices == 1) {
2102                 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
2103                 goto out;
2104         }
2105
2106         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2107                 mutex_lock(&fs_info->chunk_mutex);
2108                 list_del_init(&device->dev_alloc_list);
2109                 device->fs_devices->rw_devices--;
2110                 mutex_unlock(&fs_info->chunk_mutex);
2111         }
2112
2113         mutex_unlock(&uuid_mutex);
2114         ret = btrfs_shrink_device(device, 0);
2115         if (!ret)
2116                 btrfs_reada_remove_dev(device);
2117         mutex_lock(&uuid_mutex);
2118         if (ret)
2119                 goto error_undo;
2120
2121         /*
2122          * TODO: the superblock still includes this device in its num_devices
2123          * counter although write_all_supers() is not locked out. This
2124          * could give a filesystem state which requires a degraded mount.
2125          */
2126         ret = btrfs_rm_dev_item(device);
2127         if (ret)
2128                 goto error_undo;
2129
2130         clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2131         btrfs_scrub_cancel_dev(device);
2132
2133         /*
2134          * the device list mutex makes sure that we don't change
2135          * the device list while someone else is writing out all
2136          * the device supers. Whoever is writing all supers, should
2137          * lock the device list mutex before getting the number of
2138          * devices in the super block (super_copy). Conversely,
2139          * whoever updates the number of devices in the super block
2140          * (super_copy) should hold the device list mutex.
2141          */
2142
2143         /*
2144          * In normal cases the cur_devices == fs_devices. But in case
2145          * of deleting a seed device, the cur_devices should point to
2146          * its own fs_devices listed under the fs_devices->seed.
2147          */
2148         cur_devices = device->fs_devices;
2149         mutex_lock(&fs_devices->device_list_mutex);
2150         list_del_rcu(&device->dev_list);
2151
2152         cur_devices->num_devices--;
2153         cur_devices->total_devices--;
2154         /* Update total_devices of the parent fs_devices if it's seed */
2155         if (cur_devices != fs_devices)
2156                 fs_devices->total_devices--;
2157
2158         if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
2159                 cur_devices->missing_devices--;
2160
2161         btrfs_assign_next_active_device(device, NULL);
2162
2163         if (device->bdev) {
2164                 cur_devices->open_devices--;
2165                 /* remove sysfs entry */
2166                 btrfs_sysfs_remove_device(device);
2167         }
2168
2169         num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
2170         btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
2171         mutex_unlock(&fs_devices->device_list_mutex);
2172
2173         /*
2174          * at this point, the device is zero sized and detached from
2175          * the devices list.  All that's left is to zero out the old
2176          * supers and free the device.
2177          */
2178         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2179                 btrfs_scratch_superblocks(fs_info, device->bdev,
2180                                           device->name->str);
2181
2182         btrfs_close_bdev(device);
2183         synchronize_rcu();
2184         btrfs_free_device(device);
2185
2186         if (cur_devices->open_devices == 0) {
2187                 list_del_init(&cur_devices->seed_list);
2188                 close_fs_devices(cur_devices);
2189                 free_fs_devices(cur_devices);
2190         }
2191
2192 out:
2193         mutex_unlock(&uuid_mutex);
2194         return ret;
2195
2196 error_undo:
2197         btrfs_reada_undo_remove_dev(device);
2198         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2199                 mutex_lock(&fs_info->chunk_mutex);
2200                 list_add(&device->dev_alloc_list,
2201                          &fs_devices->alloc_list);
2202                 device->fs_devices->rw_devices++;
2203                 mutex_unlock(&fs_info->chunk_mutex);
2204         }
2205         goto out;
2206 }
2207
2208 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2209 {
2210         struct btrfs_fs_devices *fs_devices;
2211
2212         lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2213
2214         /*
2215          * in case of fs with no seed, srcdev->fs_devices will point
2216          * to fs_devices of fs_info. However when the dev being replaced is
2217          * a seed dev it will point to the seed's local fs_devices. In short
2218          * srcdev will have its correct fs_devices in both the cases.
2219          */
2220         fs_devices = srcdev->fs_devices;
2221
2222         list_del_rcu(&srcdev->dev_list);
2223         list_del(&srcdev->dev_alloc_list);
2224         fs_devices->num_devices--;
2225         if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2226                 fs_devices->missing_devices--;
2227
2228         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2229                 fs_devices->rw_devices--;
2230
2231         if (srcdev->bdev)
2232                 fs_devices->open_devices--;
2233 }
2234
2235 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
2236 {
2237         struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2238
2239         mutex_lock(&uuid_mutex);
2240
2241         btrfs_close_bdev(srcdev);
2242         synchronize_rcu();
2243         btrfs_free_device(srcdev);
2244
2245         /* if this is no devs we rather delete the fs_devices */
2246         if (!fs_devices->num_devices) {
2247                 /*
2248                  * On a mounted FS, num_devices can't be zero unless it's a
2249                  * seed. In case of a seed device being replaced, the replace
2250                  * target added to the sprout FS, so there will be no more
2251                  * device left under the seed FS.
2252                  */
2253                 ASSERT(fs_devices->seeding);
2254
2255                 list_del_init(&fs_devices->seed_list);
2256                 close_fs_devices(fs_devices);
2257                 free_fs_devices(fs_devices);
2258         }
2259         mutex_unlock(&uuid_mutex);
2260 }
2261
2262 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2263 {
2264         struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2265
2266         mutex_lock(&fs_devices->device_list_mutex);
2267
2268         btrfs_sysfs_remove_device(tgtdev);
2269
2270         if (tgtdev->bdev)
2271                 fs_devices->open_devices--;
2272
2273         fs_devices->num_devices--;
2274
2275         btrfs_assign_next_active_device(tgtdev, NULL);
2276
2277         list_del_rcu(&tgtdev->dev_list);
2278
2279         mutex_unlock(&fs_devices->device_list_mutex);
2280
2281         /*
2282          * The update_dev_time() with in btrfs_scratch_superblocks()
2283          * may lead to a call to btrfs_show_devname() which will try
2284          * to hold device_list_mutex. And here this device
2285          * is already out of device list, so we don't have to hold
2286          * the device_list_mutex lock.
2287          */
2288         btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev,
2289                                   tgtdev->name->str);
2290
2291         btrfs_close_bdev(tgtdev);
2292         synchronize_rcu();
2293         btrfs_free_device(tgtdev);
2294 }
2295
2296 static struct btrfs_device *btrfs_find_device_by_path(
2297                 struct btrfs_fs_info *fs_info, const char *device_path)
2298 {
2299         int ret = 0;
2300         struct btrfs_super_block *disk_super;
2301         u64 devid;
2302         u8 *dev_uuid;
2303         struct block_device *bdev;
2304         struct btrfs_device *device;
2305
2306         ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2307                                     fs_info->bdev_holder, 0, &bdev, &disk_super);
2308         if (ret)
2309                 return ERR_PTR(ret);
2310
2311         devid = btrfs_stack_device_id(&disk_super->dev_item);
2312         dev_uuid = disk_super->dev_item.uuid;
2313         if (btrfs_fs_incompat(fs_info, METADATA_UUID))
2314                 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2315                                            disk_super->metadata_uuid);
2316         else
2317                 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2318                                            disk_super->fsid);
2319
2320         btrfs_release_disk_super(disk_super);
2321         if (!device)
2322                 device = ERR_PTR(-ENOENT);
2323         blkdev_put(bdev, FMODE_READ);
2324         return device;
2325 }
2326
2327 /*
2328  * Lookup a device given by device id, or the path if the id is 0.
2329  */
2330 struct btrfs_device *btrfs_find_device_by_devspec(
2331                 struct btrfs_fs_info *fs_info, u64 devid,
2332                 const char *device_path)
2333 {
2334         struct btrfs_device *device;
2335
2336         if (devid) {
2337                 device = btrfs_find_device(fs_info->fs_devices, devid, NULL,
2338                                            NULL);
2339                 if (!device)
2340                         return ERR_PTR(-ENOENT);
2341                 return device;
2342         }
2343
2344         if (!device_path || !device_path[0])
2345                 return ERR_PTR(-EINVAL);
2346
2347         if (strcmp(device_path, "missing") == 0) {
2348                 /* Find first missing device */
2349                 list_for_each_entry(device, &fs_info->fs_devices->devices,
2350                                     dev_list) {
2351                         if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
2352                                      &device->dev_state) && !device->bdev)
2353                                 return device;
2354                 }
2355                 return ERR_PTR(-ENOENT);
2356         }
2357
2358         return btrfs_find_device_by_path(fs_info, device_path);
2359 }
2360
2361 /*
2362  * does all the dirty work required for changing file system's UUID.
2363  */
2364 static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
2365 {
2366         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2367         struct btrfs_fs_devices *old_devices;
2368         struct btrfs_fs_devices *seed_devices;
2369         struct btrfs_super_block *disk_super = fs_info->super_copy;
2370         struct btrfs_device *device;
2371         u64 super_flags;
2372
2373         lockdep_assert_held(&uuid_mutex);
2374         if (!fs_devices->seeding)
2375                 return -EINVAL;
2376
2377         /*
2378          * Private copy of the seed devices, anchored at
2379          * fs_info->fs_devices->seed_list
2380          */
2381         seed_devices = alloc_fs_devices(NULL, NULL);
2382         if (IS_ERR(seed_devices))
2383                 return PTR_ERR(seed_devices);
2384
2385         /*
2386          * It's necessary to retain a copy of the original seed fs_devices in
2387          * fs_uuids so that filesystems which have been seeded can successfully
2388          * reference the seed device from open_seed_devices. This also supports
2389          * multiple fs seed.
2390          */
2391         old_devices = clone_fs_devices(fs_devices);
2392         if (IS_ERR(old_devices)) {
2393                 kfree(seed_devices);
2394                 return PTR_ERR(old_devices);
2395         }
2396
2397         list_add(&old_devices->fs_list, &fs_uuids);
2398
2399         memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2400         seed_devices->opened = 1;
2401         INIT_LIST_HEAD(&seed_devices->devices);
2402         INIT_LIST_HEAD(&seed_devices->alloc_list);
2403         mutex_init(&seed_devices->device_list_mutex);
2404
2405         mutex_lock(&fs_devices->device_list_mutex);
2406         list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2407                               synchronize_rcu);
2408         list_for_each_entry(device, &seed_devices->devices, dev_list)
2409                 device->fs_devices = seed_devices;
2410
2411         fs_devices->seeding = false;
2412         fs_devices->num_devices = 0;
2413         fs_devices->open_devices = 0;
2414         fs_devices->missing_devices = 0;
2415         fs_devices->rotating = false;
2416         list_add(&seed_devices->seed_list, &fs_devices->seed_list);
2417
2418         generate_random_uuid(fs_devices->fsid);
2419         memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
2420         memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2421         mutex_unlock(&fs_devices->device_list_mutex);
2422
2423         super_flags = btrfs_super_flags(disk_super) &
2424                       ~BTRFS_SUPER_FLAG_SEEDING;
2425         btrfs_set_super_flags(disk_super, super_flags);
2426
2427         return 0;
2428 }
2429
2430 /*
2431  * Store the expected generation for seed devices in device items.
2432  */
2433 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
2434 {
2435         struct btrfs_fs_info *fs_info = trans->fs_info;
2436         struct btrfs_root *root = fs_info->chunk_root;
2437         struct btrfs_path *path;
2438         struct extent_buffer *leaf;
2439         struct btrfs_dev_item *dev_item;
2440         struct btrfs_device *device;
2441         struct btrfs_key key;
2442         u8 fs_uuid[BTRFS_FSID_SIZE];
2443         u8 dev_uuid[BTRFS_UUID_SIZE];
2444         u64 devid;
2445         int ret;
2446
2447         path = btrfs_alloc_path();
2448         if (!path)
2449                 return -ENOMEM;
2450
2451         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2452         key.offset = 0;
2453         key.type = BTRFS_DEV_ITEM_KEY;
2454
2455         while (1) {
2456                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2457                 if (ret < 0)
2458                         goto error;
2459
2460                 leaf = path->nodes[0];
2461 next_slot:
2462                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2463                         ret = btrfs_next_leaf(root, path);
2464                         if (ret > 0)
2465                                 break;
2466                         if (ret < 0)
2467                                 goto error;
2468                         leaf = path->nodes[0];
2469                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2470                         btrfs_release_path(path);
2471                         continue;
2472                 }
2473
2474                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2475                 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2476                     key.type != BTRFS_DEV_ITEM_KEY)
2477                         break;
2478
2479                 dev_item = btrfs_item_ptr(leaf, path->slots[0],
2480                                           struct btrfs_dev_item);
2481                 devid = btrfs_device_id(leaf, dev_item);
2482                 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2483                                    BTRFS_UUID_SIZE);
2484                 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2485                                    BTRFS_FSID_SIZE);
2486                 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2487                                            fs_uuid);
2488                 BUG_ON(!device); /* Logic error */
2489
2490                 if (device->fs_devices->seeding) {
2491                         btrfs_set_device_generation(leaf, dev_item,
2492                                                     device->generation);
2493                         btrfs_mark_buffer_dirty(leaf);
2494                 }
2495
2496                 path->slots[0]++;
2497                 goto next_slot;
2498         }
2499         ret = 0;
2500 error:
2501         btrfs_free_path(path);
2502         return ret;
2503 }
2504
2505 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2506 {
2507         struct btrfs_root *root = fs_info->dev_root;
2508         struct request_queue *q;
2509         struct btrfs_trans_handle *trans;
2510         struct btrfs_device *device;
2511         struct block_device *bdev;
2512         struct super_block *sb = fs_info->sb;
2513         struct rcu_string *name;
2514         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2515         u64 orig_super_total_bytes;
2516         u64 orig_super_num_devices;
2517         int seeding_dev = 0;
2518         int ret = 0;
2519         bool locked = false;
2520
2521         if (sb_rdonly(sb) && !fs_devices->seeding)
2522                 return -EROFS;
2523
2524         bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2525                                   fs_info->bdev_holder);
2526         if (IS_ERR(bdev))
2527                 return PTR_ERR(bdev);
2528
2529         if (!btrfs_check_device_zone_type(fs_info, bdev)) {
2530                 ret = -EINVAL;
2531                 goto error;
2532         }
2533
2534         if (fs_devices->seeding) {
2535                 seeding_dev = 1;
2536                 down_write(&sb->s_umount);
2537                 mutex_lock(&uuid_mutex);
2538                 locked = true;
2539         }
2540
2541         sync_blockdev(bdev);
2542
2543         rcu_read_lock();
2544         list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
2545                 if (device->bdev == bdev) {
2546                         ret = -EEXIST;
2547                         rcu_read_unlock();
2548                         goto error;
2549                 }
2550         }
2551         rcu_read_unlock();
2552
2553         device = btrfs_alloc_device(fs_info, NULL, NULL);
2554         if (IS_ERR(device)) {
2555                 /* we can safely leave the fs_devices entry around */
2556                 ret = PTR_ERR(device);
2557                 goto error;
2558         }
2559
2560         name = rcu_string_strdup(device_path, GFP_KERNEL);
2561         if (!name) {
2562                 ret = -ENOMEM;
2563                 goto error_free_device;
2564         }
2565         rcu_assign_pointer(device->name, name);
2566
2567         device->fs_info = fs_info;
2568         device->bdev = bdev;
2569
2570         ret = btrfs_get_dev_zone_info(device);
2571         if (ret)
2572                 goto error_free_device;
2573
2574         trans = btrfs_start_transaction(root, 0);
2575         if (IS_ERR(trans)) {
2576                 ret = PTR_ERR(trans);
2577                 goto error_free_zone;
2578         }
2579
2580         q = bdev_get_queue(bdev);
2581         set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
2582         device->generation = trans->transid;
2583         device->io_width = fs_info->sectorsize;
2584         device->io_align = fs_info->sectorsize;
2585         device->sector_size = fs_info->sectorsize;
2586         device->total_bytes = round_down(i_size_read(bdev->bd_inode),
2587                                          fs_info->sectorsize);
2588         device->disk_total_bytes = device->total_bytes;
2589         device->commit_total_bytes = device->total_bytes;
2590         set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2591         clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2592         device->mode = FMODE_EXCL;
2593         device->dev_stats_valid = 1;
2594         set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2595
2596         if (seeding_dev) {
2597                 btrfs_clear_sb_rdonly(sb);
2598                 ret = btrfs_prepare_sprout(fs_info);
2599                 if (ret) {
2600                         btrfs_abort_transaction(trans, ret);
2601                         goto error_trans;
2602                 }
2603         }
2604
2605         device->fs_devices = fs_devices;
2606
2607         mutex_lock(&fs_devices->device_list_mutex);
2608         mutex_lock(&fs_info->chunk_mutex);
2609         list_add_rcu(&device->dev_list, &fs_devices->devices);
2610         list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
2611         fs_devices->num_devices++;
2612         fs_devices->open_devices++;
2613         fs_devices->rw_devices++;
2614         fs_devices->total_devices++;
2615         fs_devices->total_rw_bytes += device->total_bytes;
2616
2617         atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2618
2619         if (!blk_queue_nonrot(q))
2620                 fs_devices->rotating = true;
2621
2622         orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2623         btrfs_set_super_total_bytes(fs_info->super_copy,
2624                 round_down(orig_super_total_bytes + device->total_bytes,
2625                            fs_info->sectorsize));
2626
2627         orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
2628         btrfs_set_super_num_devices(fs_info->super_copy,
2629                                     orig_super_num_devices + 1);
2630
2631         /*
2632          * we've got more storage, clear any full flags on the space
2633          * infos
2634          */
2635         btrfs_clear_space_info_full(fs_info);
2636
2637         mutex_unlock(&fs_info->chunk_mutex);
2638
2639         /* Add sysfs device entry */
2640         btrfs_sysfs_add_device(device);
2641
2642         mutex_unlock(&fs_devices->device_list_mutex);
2643
2644         if (seeding_dev) {
2645                 mutex_lock(&fs_info->chunk_mutex);
2646                 ret = init_first_rw_device(trans);
2647                 mutex_unlock(&fs_info->chunk_mutex);
2648                 if (ret) {
2649                         btrfs_abort_transaction(trans, ret);
2650                         goto error_sysfs;
2651                 }
2652         }
2653
2654         ret = btrfs_add_dev_item(trans, device);
2655         if (ret) {
2656                 btrfs_abort_transaction(trans, ret);
2657                 goto error_sysfs;
2658         }
2659
2660         if (seeding_dev) {
2661                 ret = btrfs_finish_sprout(trans);
2662                 if (ret) {
2663                         btrfs_abort_transaction(trans, ret);
2664                         goto error_sysfs;
2665                 }
2666
2667                 /*
2668                  * fs_devices now represents the newly sprouted filesystem and
2669                  * its fsid has been changed by btrfs_prepare_sprout
2670                  */
2671                 btrfs_sysfs_update_sprout_fsid(fs_devices);
2672         }
2673
2674         ret = btrfs_commit_transaction(trans);
2675
2676         if (seeding_dev) {
2677                 mutex_unlock(&uuid_mutex);
2678                 up_write(&sb->s_umount);
2679                 locked = false;
2680
2681                 if (ret) /* transaction commit */
2682                         return ret;
2683
2684                 ret = btrfs_relocate_sys_chunks(fs_info);
2685                 if (ret < 0)
2686                         btrfs_handle_fs_error(fs_info, ret,
2687                                     "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2688                 trans = btrfs_attach_transaction(root);
2689                 if (IS_ERR(trans)) {
2690                         if (PTR_ERR(trans) == -ENOENT)
2691                                 return 0;
2692                         ret = PTR_ERR(trans);
2693                         trans = NULL;
2694                         goto error_sysfs;
2695                 }
2696                 ret = btrfs_commit_transaction(trans);
2697         }
2698
2699         /*
2700          * Now that we have written a new super block to this device, check all
2701          * other fs_devices list if device_path alienates any other scanned
2702          * device.
2703          * We can ignore the return value as it typically returns -EINVAL and
2704          * only succeeds if the device was an alien.
2705          */
2706         btrfs_forget_devices(device_path);
2707
2708         /* Update ctime/mtime for blkid or udev */
2709         update_dev_time(device_path);
2710
2711         return ret;
2712
2713 error_sysfs:
2714         btrfs_sysfs_remove_device(device);
2715         mutex_lock(&fs_info->fs_devices->device_list_mutex);
2716         mutex_lock(&fs_info->chunk_mutex);
2717         list_del_rcu(&device->dev_list);
2718         list_del(&device->dev_alloc_list);
2719         fs_info->fs_devices->num_devices--;
2720         fs_info->fs_devices->open_devices--;
2721         fs_info->fs_devices->rw_devices--;
2722         fs_info->fs_devices->total_devices--;
2723         fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
2724         atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
2725         btrfs_set_super_total_bytes(fs_info->super_copy,
2726                                     orig_super_total_bytes);
2727         btrfs_set_super_num_devices(fs_info->super_copy,
2728                                     orig_super_num_devices);
2729         mutex_unlock(&fs_info->chunk_mutex);
2730         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2731 error_trans:
2732         if (seeding_dev)
2733                 btrfs_set_sb_rdonly(sb);
2734         if (trans)
2735                 btrfs_end_transaction(trans);
2736 error_free_zone:
2737         btrfs_destroy_dev_zone_info(device);
2738 error_free_device:
2739         btrfs_free_device(device);
2740 error:
2741         blkdev_put(bdev, FMODE_EXCL);
2742         if (locked) {
2743                 mutex_unlock(&uuid_mutex);
2744                 up_write(&sb->s_umount);
2745         }
2746         return ret;
2747 }
2748
2749 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2750                                         struct btrfs_device *device)
2751 {
2752         int ret;
2753         struct btrfs_path *path;
2754         struct btrfs_root *root = device->fs_info->chunk_root;
2755         struct btrfs_dev_item *dev_item;
2756         struct extent_buffer *leaf;
2757         struct btrfs_key key;
2758
2759         path = btrfs_alloc_path();
2760         if (!path)
2761                 return -ENOMEM;
2762
2763         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2764         key.type = BTRFS_DEV_ITEM_KEY;
2765         key.offset = device->devid;
2766
2767         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2768         if (ret < 0)
2769                 goto out;
2770
2771         if (ret > 0) {
2772                 ret = -ENOENT;
2773                 goto out;
2774         }
2775
2776         leaf = path->nodes[0];
2777         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2778
2779         btrfs_set_device_id(leaf, dev_item, device->devid);
2780         btrfs_set_device_type(leaf, dev_item, device->type);
2781         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2782         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2783         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2784         btrfs_set_device_total_bytes(leaf, dev_item,
2785                                      btrfs_device_get_disk_total_bytes(device));
2786         btrfs_set_device_bytes_used(leaf, dev_item,
2787                                     btrfs_device_get_bytes_used(device));
2788         btrfs_mark_buffer_dirty(leaf);
2789
2790 out:
2791         btrfs_free_path(path);
2792         return ret;
2793 }
2794
2795 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2796                       struct btrfs_device *device, u64 new_size)
2797 {
2798         struct btrfs_fs_info *fs_info = device->fs_info;
2799         struct btrfs_super_block *super_copy = fs_info->super_copy;
2800         u64 old_total;
2801         u64 diff;
2802
2803         if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2804                 return -EACCES;
2805
2806         new_size = round_down(new_size, fs_info->sectorsize);
2807
2808         mutex_lock(&fs_info->chunk_mutex);
2809         old_total = btrfs_super_total_bytes(super_copy);
2810         diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2811
2812         if (new_size <= device->total_bytes ||
2813             test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2814                 mutex_unlock(&fs_info->chunk_mutex);
2815                 return -EINVAL;
2816         }
2817
2818         btrfs_set_super_total_bytes(super_copy,
2819                         round_down(old_total + diff, fs_info->sectorsize));
2820         device->fs_devices->total_rw_bytes += diff;
2821
2822         btrfs_device_set_total_bytes(device, new_size);
2823         btrfs_device_set_disk_total_bytes(device, new_size);
2824         btrfs_clear_space_info_full(device->fs_info);
2825         if (list_empty(&device->post_commit_list))
2826                 list_add_tail(&device->post_commit_list,
2827                               &trans->transaction->dev_update_list);
2828         mutex_unlock(&fs_info->chunk_mutex);
2829
2830         return btrfs_update_device(trans, device);
2831 }
2832
2833 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2834 {
2835         struct btrfs_fs_info *fs_info = trans->fs_info;
2836         struct btrfs_root *root = fs_info->chunk_root;
2837         int ret;
2838         struct btrfs_path *path;
2839         struct btrfs_key key;
2840
2841         path = btrfs_alloc_path();
2842         if (!path)
2843                 return -ENOMEM;
2844
2845         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2846         key.offset = chunk_offset;
2847         key.type = BTRFS_CHUNK_ITEM_KEY;
2848
2849         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2850         if (ret < 0)
2851                 goto out;
2852         else if (ret > 0) { /* Logic error or corruption */
2853                 btrfs_handle_fs_error(fs_info, -ENOENT,
2854                                       "Failed lookup while freeing chunk.");
2855                 ret = -ENOENT;
2856                 goto out;
2857         }
2858
2859         ret = btrfs_del_item(trans, root, path);
2860         if (ret < 0)
2861                 btrfs_handle_fs_error(fs_info, ret,
2862                                       "Failed to delete chunk item.");
2863 out:
2864         btrfs_free_path(path);
2865         return ret;
2866 }
2867
2868 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2869 {
2870         struct btrfs_super_block *super_copy = fs_info->super_copy;
2871         struct btrfs_disk_key *disk_key;
2872         struct btrfs_chunk *chunk;
2873         u8 *ptr;
2874         int ret = 0;
2875         u32 num_stripes;
2876         u32 array_size;
2877         u32 len = 0;
2878         u32 cur;
2879         struct btrfs_key key;
2880
2881         lockdep_assert_held(&fs_info->chunk_mutex);
2882         array_size = btrfs_super_sys_array_size(super_copy);
2883
2884         ptr = super_copy->sys_chunk_array;
2885         cur = 0;
2886
2887         while (cur < array_size) {
2888                 disk_key = (struct btrfs_disk_key *)ptr;
2889                 btrfs_disk_key_to_cpu(&key, disk_key);
2890
2891                 len = sizeof(*disk_key);
2892
2893                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2894                         chunk = (struct btrfs_chunk *)(ptr + len);
2895                         num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2896                         len += btrfs_chunk_item_size(num_stripes);
2897                 } else {
2898                         ret = -EIO;
2899                         break;
2900                 }
2901                 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
2902                     key.offset == chunk_offset) {
2903                         memmove(ptr, ptr + len, array_size - (cur + len));
2904                         array_size -= len;
2905                         btrfs_set_super_sys_array_size(super_copy, array_size);
2906                 } else {
2907                         ptr += len;
2908                         cur += len;
2909                 }
2910         }
2911         return ret;
2912 }
2913
2914 /*
2915  * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
2916  * @logical: Logical block offset in bytes.
2917  * @length: Length of extent in bytes.
2918  *
2919  * Return: Chunk mapping or ERR_PTR.
2920  */
2921 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
2922                                        u64 logical, u64 length)
2923 {
2924         struct extent_map_tree *em_tree;
2925         struct extent_map *em;
2926
2927         em_tree = &fs_info->mapping_tree;
2928         read_lock(&em_tree->lock);
2929         em = lookup_extent_mapping(em_tree, logical, length);
2930         read_unlock(&em_tree->lock);
2931
2932         if (!em) {
2933                 btrfs_crit(fs_info, "unable to find logical %llu length %llu",
2934                            logical, length);
2935                 return ERR_PTR(-EINVAL);
2936         }
2937
2938         if (em->start > logical || em->start + em->len < logical) {
2939                 btrfs_crit(fs_info,
2940                            "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
2941                            logical, length, em->start, em->start + em->len);
2942                 free_extent_map(em);
2943                 return ERR_PTR(-EINVAL);
2944         }
2945
2946         /* callers are responsible for dropping em's ref. */
2947         return em;
2948 }
2949
2950 static int remove_chunk_item(struct btrfs_trans_handle *trans,
2951                              struct map_lookup *map, u64 chunk_offset)
2952 {
2953         int i;
2954
2955         /*
2956          * Removing chunk items and updating the device items in the chunks btree
2957          * requires holding the chunk_mutex.
2958          * See the comment at btrfs_chunk_alloc() for the details.
2959          */
2960         lockdep_assert_held(&trans->fs_info->chunk_mutex);
2961
2962         for (i = 0; i < map->num_stripes; i++) {
2963                 int ret;
2964
2965                 ret = btrfs_update_device(trans, map->stripes[i].dev);
2966                 if (ret)
2967                         return ret;
2968         }
2969
2970         return btrfs_free_chunk(trans, chunk_offset);
2971 }
2972
2973 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2974 {
2975         struct btrfs_fs_info *fs_info = trans->fs_info;
2976         struct extent_map *em;
2977         struct map_lookup *map;
2978         u64 dev_extent_len = 0;
2979         int i, ret = 0;
2980         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2981
2982         em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
2983         if (IS_ERR(em)) {
2984                 /*
2985                  * This is a logic error, but we don't want to just rely on the
2986                  * user having built with ASSERT enabled, so if ASSERT doesn't
2987                  * do anything we still error out.
2988                  */
2989                 ASSERT(0);
2990                 return PTR_ERR(em);
2991         }
2992         map = em->map_lookup;
2993
2994         /*
2995          * First delete the device extent items from the devices btree.
2996          * We take the device_list_mutex to avoid racing with the finishing phase
2997          * of a device replace operation. See the comment below before acquiring
2998          * fs_info->chunk_mutex. Note that here we do not acquire the chunk_mutex
2999          * because that can result in a deadlock when deleting the device extent
3000          * items from the devices btree - COWing an extent buffer from the btree
3001          * may result in allocating a new metadata chunk, which would attempt to
3002          * lock again fs_info->chunk_mutex.
3003          */
3004         mutex_lock(&fs_devices->device_list_mutex);
3005         for (i = 0; i < map->num_stripes; i++) {
3006                 struct btrfs_device *device = map->stripes[i].dev;
3007                 ret = btrfs_free_dev_extent(trans, device,
3008                                             map->stripes[i].physical,
3009                                             &dev_extent_len);
3010                 if (ret) {
3011                         mutex_unlock(&fs_devices->device_list_mutex);
3012                         btrfs_abort_transaction(trans, ret);
3013                         goto out;
3014                 }
3015
3016                 if (device->bytes_used > 0) {
3017                         mutex_lock(&fs_info->chunk_mutex);
3018                         btrfs_device_set_bytes_used(device,
3019                                         device->bytes_used - dev_extent_len);
3020                         atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
3021                         btrfs_clear_space_info_full(fs_info);
3022                         mutex_unlock(&fs_info->chunk_mutex);
3023                 }
3024         }
3025         mutex_unlock(&fs_devices->device_list_mutex);
3026
3027         /*
3028          * We acquire fs_info->chunk_mutex for 2 reasons:
3029          *
3030          * 1) Just like with the first phase of the chunk allocation, we must
3031          *    reserve system space, do all chunk btree updates and deletions, and
3032          *    update the system chunk array in the superblock while holding this
3033          *    mutex. This is for similar reasons as explained on the comment at
3034          *    the top of btrfs_chunk_alloc();
3035          *
3036          * 2) Prevent races with the final phase of a device replace operation
3037          *    that replaces the device object associated with the map's stripes,
3038          *    because the device object's id can change at any time during that
3039          *    final phase of the device replace operation
3040          *    (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
3041          *    replaced device and then see it with an ID of
3042          *    BTRFS_DEV_REPLACE_DEVID, which would cause a failure when updating
3043          *    the device item, which does not exists on the chunk btree.
3044          *    The finishing phase of device replace acquires both the
3045          *    device_list_mutex and the chunk_mutex, in that order, so we are
3046          *    safe by just acquiring the chunk_mutex.
3047          */
3048         trans->removing_chunk = true;
3049         mutex_lock(&fs_info->chunk_mutex);
3050
3051         check_system_chunk(trans, map->type);
3052
3053         ret = remove_chunk_item(trans, map, chunk_offset);
3054         /*
3055          * Normally we should not get -ENOSPC since we reserved space before
3056          * through the call to check_system_chunk().
3057          *
3058          * Despite our system space_info having enough free space, we may not
3059          * be able to allocate extents from its block groups, because all have
3060          * an incompatible profile, which will force us to allocate a new system
3061          * block group with the right profile, or right after we called
3062          * check_system_space() above, a scrub turned the only system block group
3063          * with enough free space into RO mode.
3064          * This is explained with more detail at do_chunk_alloc().
3065          *
3066          * So if we get -ENOSPC, allocate a new system chunk and retry once.
3067          */
3068         if (ret == -ENOSPC) {
3069                 const u64 sys_flags = btrfs_system_alloc_profile(fs_info);
3070                 struct btrfs_block_group *sys_bg;
3071
3072                 sys_bg = btrfs_alloc_chunk(trans, sys_flags);
3073                 if (IS_ERR(sys_bg)) {
3074                         ret = PTR_ERR(sys_bg);
3075                         btrfs_abort_transaction(trans, ret);
3076                         goto out;
3077                 }
3078
3079                 ret = btrfs_chunk_alloc_add_chunk_item(trans, sys_bg);
3080                 if (ret) {
3081                         btrfs_abort_transaction(trans, ret);
3082                         goto out;
3083                 }
3084
3085                 ret = remove_chunk_item(trans, map, chunk_offset);
3086                 if (ret) {
3087                         btrfs_abort_transaction(trans, ret);
3088                         goto out;
3089                 }
3090         } else if (ret) {
3091                 btrfs_abort_transaction(trans, ret);
3092                 goto out;
3093         }
3094
3095         trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
3096
3097         if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
3098                 ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
3099                 if (ret) {
3100                         btrfs_abort_transaction(trans, ret);
3101                         goto out;
3102                 }
3103         }
3104
3105         mutex_unlock(&fs_info->chunk_mutex);
3106         trans->removing_chunk = false;
3107
3108         /*
3109          * We are done with chunk btree updates and deletions, so release the
3110          * system space we previously reserved (with check_system_chunk()).
3111          */
3112         btrfs_trans_release_chunk_metadata(trans);
3113
3114         ret = btrfs_remove_block_group(trans, chunk_offset, em);
3115         if (ret) {
3116                 btrfs_abort_transaction(trans, ret);
3117                 goto out;
3118         }
3119
3120 out:
3121         if (trans->removing_chunk) {
3122                 mutex_unlock(&fs_info->chunk_mutex);
3123                 trans->removing_chunk = false;
3124         }
3125         /* once for us */
3126         free_extent_map(em);
3127         return ret;
3128 }
3129
3130 int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3131 {
3132         struct btrfs_root *root = fs_info->chunk_root;
3133         struct btrfs_trans_handle *trans;
3134         struct btrfs_block_group *block_group;
3135         u64 length;
3136         int ret;
3137
3138         /*
3139          * Prevent races with automatic removal of unused block groups.
3140          * After we relocate and before we remove the chunk with offset
3141          * chunk_offset, automatic removal of the block group can kick in,
3142          * resulting in a failure when calling btrfs_remove_chunk() below.
3143          *
3144          * Make sure to acquire this mutex before doing a tree search (dev
3145          * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3146          * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3147          * we release the path used to search the chunk/dev tree and before
3148          * the current task acquires this mutex and calls us.
3149          */
3150         lockdep_assert_held(&fs_info->reclaim_bgs_lock);
3151
3152         /* step one, relocate all the extents inside this chunk */
3153         btrfs_scrub_pause(fs_info);
3154         ret = btrfs_relocate_block_group(fs_info, chunk_offset);
3155         btrfs_scrub_continue(fs_info);
3156         if (ret)
3157                 return ret;
3158
3159         block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
3160         if (!block_group)
3161                 return -ENOENT;
3162         btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
3163         length = block_group->length;
3164         btrfs_put_block_group(block_group);
3165
3166         /*
3167          * On a zoned file system, discard the whole block group, this will
3168          * trigger a REQ_OP_ZONE_RESET operation on the device zone. If
3169          * resetting the zone fails, don't treat it as a fatal problem from the
3170          * filesystem's point of view.
3171          */
3172         if (btrfs_is_zoned(fs_info)) {
3173                 ret = btrfs_discard_extent(fs_info, chunk_offset, length, NULL);
3174                 if (ret)
3175                         btrfs_info(fs_info,
3176                                 "failed to reset zone %llu after relocation",
3177                                 chunk_offset);
3178         }
3179
3180         trans = btrfs_start_trans_remove_block_group(root->fs_info,
3181                                                      chunk_offset);
3182         if (IS_ERR(trans)) {
3183                 ret = PTR_ERR(trans);
3184                 btrfs_handle_fs_error(root->fs_info, ret, NULL);
3185                 return ret;
3186         }
3187
3188         /*
3189          * step two, delete the device extents and the
3190          * chunk tree entries
3191          */
3192         ret = btrfs_remove_chunk(trans, chunk_offset);
3193         btrfs_end_transaction(trans);
3194         return ret;
3195 }
3196
3197 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
3198 {
3199         struct btrfs_root *chunk_root = fs_info->chunk_root;
3200         struct btrfs_path *path;
3201         struct extent_buffer *leaf;
3202         struct btrfs_chunk *chunk;
3203         struct btrfs_key key;
3204         struct btrfs_key found_key;
3205         u64 chunk_type;
3206         bool retried = false;
3207         int failed = 0;
3208         int ret;
3209
3210         path = btrfs_alloc_path();
3211         if (!path)
3212                 return -ENOMEM;
3213
3214 again:
3215         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3216         key.offset = (u64)-1;
3217         key.type = BTRFS_CHUNK_ITEM_KEY;
3218
3219         while (1) {
3220                 mutex_lock(&fs_info->reclaim_bgs_lock);
3221                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3222                 if (ret < 0) {
3223                         mutex_unlock(&fs_info->reclaim_bgs_lock);
3224                         goto error;
3225                 }
3226                 BUG_ON(ret == 0); /* Corruption */
3227
3228                 ret = btrfs_previous_item(chunk_root, path, key.objectid,
3229                                           key.type);
3230                 if (ret)
3231                         mutex_unlock(&fs_info->reclaim_bgs_lock);
3232                 if (ret < 0)
3233                         goto error;
3234                 if (ret > 0)
3235                         break;
3236
3237                 leaf = path->nodes[0];
3238                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3239
3240                 chunk = btrfs_item_ptr(leaf, path->slots[0],
3241                                        struct btrfs_chunk);
3242                 chunk_type = btrfs_chunk_type(leaf, chunk);
3243                 btrfs_release_path(path);
3244
3245                 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3246                         ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3247                         if (ret == -ENOSPC)
3248                                 failed++;
3249                         else
3250                                 BUG_ON(ret);
3251                 }
3252                 mutex_unlock(&fs_info->reclaim_bgs_lock);
3253
3254                 if (found_key.offset == 0)
3255                         break;
3256                 key.offset = found_key.offset - 1;
3257         }
3258         ret = 0;
3259         if (failed && !retried) {
3260                 failed = 0;
3261                 retried = true;
3262                 goto again;
3263         } else if (WARN_ON(failed && retried)) {
3264                 ret = -ENOSPC;
3265         }
3266 error:
3267         btrfs_free_path(path);
3268         return ret;
3269 }
3270
3271 /*
3272  * return 1 : allocate a data chunk successfully,
3273  * return <0: errors during allocating a data chunk,
3274  * return 0 : no need to allocate a data chunk.
3275  */
3276 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3277                                       u64 chunk_offset)
3278 {
3279         struct btrfs_block_group *cache;
3280         u64 bytes_used;
3281         u64 chunk_type;
3282
3283         cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3284         ASSERT(cache);
3285         chunk_type = cache->flags;
3286         btrfs_put_block_group(cache);
3287
3288         if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
3289                 return 0;
3290
3291         spin_lock(&fs_info->data_sinfo->lock);
3292         bytes_used = fs_info->data_sinfo->bytes_used;
3293         spin_unlock(&fs_info->data_sinfo->lock);
3294
3295         if (!bytes_used) {
3296                 struct btrfs_trans_handle *trans;
3297                 int ret;
3298
3299                 trans = btrfs_join_transaction(fs_info->tree_root);
3300                 if (IS_ERR(trans))
3301                         return PTR_ERR(trans);
3302
3303                 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
3304                 btrfs_end_transaction(trans);
3305                 if (ret < 0)
3306                         return ret;
3307                 return 1;
3308         }
3309
3310         return 0;
3311 }
3312
3313 static int insert_balance_item(struct btrfs_fs_info *fs_info,
3314                                struct btrfs_balance_control *bctl)
3315 {
3316         struct btrfs_root *root = fs_info->tree_root;
3317         struct btrfs_trans_handle *trans;
3318         struct btrfs_balance_item *item;
3319         struct btrfs_disk_balance_args disk_bargs;
3320         struct btrfs_path *path;
3321         struct extent_buffer *leaf;
3322         struct btrfs_key key;
3323         int ret, err;
3324
3325         path = btrfs_alloc_path();
3326         if (!path)
3327                 return -ENOMEM;
3328
3329         trans = btrfs_start_transaction(root, 0);
3330         if (IS_ERR(trans)) {
3331                 btrfs_free_path(path);
3332                 return PTR_ERR(trans);
3333         }
3334
3335         key.objectid = BTRFS_BALANCE_OBJECTID;
3336         key.type = BTRFS_TEMPORARY_ITEM_KEY;
3337         key.offset = 0;
3338
3339         ret = btrfs_insert_empty_item(trans, root, path, &key,
3340                                       sizeof(*item));
3341         if (ret)
3342                 goto out;
3343
3344         leaf = path->nodes[0];
3345         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3346
3347         memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3348
3349         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3350         btrfs_set_balance_data(leaf, item, &disk_bargs);
3351         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3352         btrfs_set_balance_meta(leaf, item, &disk_bargs);
3353         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3354         btrfs_set_balance_sys(leaf, item, &disk_bargs);
3355
3356         btrfs_set_balance_flags(leaf, item, bctl->flags);
3357
3358         btrfs_mark_buffer_dirty(leaf);
3359 out:
3360         btrfs_free_path(path);
3361         err = btrfs_commit_transaction(trans);
3362         if (err && !ret)
3363                 ret = err;
3364         return ret;
3365 }
3366
3367 static int del_balance_item(struct btrfs_fs_info *fs_info)
3368 {
3369         struct btrfs_root *root = fs_info->tree_root;
3370         struct btrfs_trans_handle *trans;
3371         struct btrfs_path *path;
3372         struct btrfs_key key;
3373         int ret, err;
3374
3375         path = btrfs_alloc_path();
3376         if (!path)
3377                 return -ENOMEM;
3378
3379         trans = btrfs_start_transaction_fallback_global_rsv(root, 0);
3380         if (IS_ERR(trans)) {
3381                 btrfs_free_path(path);
3382                 return PTR_ERR(trans);
3383         }
3384
3385         key.objectid = BTRFS_BALANCE_OBJECTID;
3386         key.type = BTRFS_TEMPORARY_ITEM_KEY;
3387         key.offset = 0;
3388
3389         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3390         if (ret < 0)
3391                 goto out;
3392         if (ret > 0) {
3393                 ret = -ENOENT;
3394                 goto out;
3395         }
3396
3397         ret = btrfs_del_item(trans, root, path);
3398 out:
3399         btrfs_free_path(path);
3400         err = btrfs_commit_transaction(trans);
3401         if (err && !ret)
3402                 ret = err;
3403         return ret;
3404 }
3405
3406 /*
3407  * This is a heuristic used to reduce the number of chunks balanced on
3408  * resume after balance was interrupted.
3409  */
3410 static void update_balance_args(struct btrfs_balance_control *bctl)
3411 {
3412         /*
3413          * Turn on soft mode for chunk types that were being converted.
3414          */
3415         if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3416                 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3417         if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3418                 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3419         if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3420                 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3421
3422         /*
3423          * Turn on usage filter if is not already used.  The idea is
3424          * that chunks that we have already balanced should be
3425          * reasonably full.  Don't do it for chunks that are being
3426          * converted - that will keep us from relocating unconverted
3427          * (albeit full) chunks.
3428          */
3429         if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3430             !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3431             !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3432                 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3433                 bctl->data.usage = 90;
3434         }
3435         if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3436             !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3437             !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3438                 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3439                 bctl->sys.usage = 90;
3440         }
3441         if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3442             !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3443             !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3444                 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3445                 bctl->meta.usage = 90;
3446         }
3447 }
3448
3449 /*
3450  * Clear the balance status in fs_info and delete the balance item from disk.
3451  */
3452 static void reset_balance_state(struct btrfs_fs_info *fs_info)
3453 {
3454         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3455         int ret;
3456
3457         BUG_ON(!fs_info->balance_ctl);
3458
3459         spin_lock(&fs_info->balance_lock);
3460         fs_info->balance_ctl = NULL;
3461         spin_unlock(&fs_info->balance_lock);
3462
3463         kfree(bctl);
3464         ret = del_balance_item(fs_info);
3465         if (ret)
3466                 btrfs_handle_fs_error(fs_info, ret, NULL);
3467 }
3468
3469 /*
3470  * Balance filters.  Return 1 if chunk should be filtered out
3471  * (should not be balanced).
3472  */
3473 static int chunk_profiles_filter(u64 chunk_type,
3474                                  struct btrfs_balance_args *bargs)
3475 {
3476         chunk_type = chunk_to_extended(chunk_type) &
3477                                 BTRFS_EXTENDED_PROFILE_MASK;
3478
3479         if (bargs->profiles & chunk_type)
3480                 return 0;
3481
3482         return 1;
3483 }
3484
3485 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3486                               struct btrfs_balance_args *bargs)
3487 {
3488         struct btrfs_block_group *cache;
3489         u64 chunk_used;
3490         u64 user_thresh_min;
3491         u64 user_thresh_max;
3492         int ret = 1;
3493
3494         cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3495         chunk_used = cache->used;
3496
3497         if (bargs->usage_min == 0)
3498                 user_thresh_min = 0;
3499         else
3500                 user_thresh_min = div_factor_fine(cache->length,
3501                                                   bargs->usage_min);
3502
3503         if (bargs->usage_max == 0)
3504                 user_thresh_max = 1;
3505         else if (bargs->usage_max > 100)
3506                 user_thresh_max = cache->length;
3507         else
3508                 user_thresh_max = div_factor_fine(cache->length,
3509                                                   bargs->usage_max);
3510
3511         if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3512                 ret = 0;
3513
3514         btrfs_put_block_group(cache);
3515         return ret;
3516 }
3517
3518 static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3519                 u64 chunk_offset, struct btrfs_balance_args *bargs)
3520 {
3521         struct btrfs_block_group *cache;
3522         u64 chunk_used, user_thresh;
3523         int ret = 1;
3524
3525         cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3526         chunk_used = cache->used;
3527
3528         if (bargs->usage_min == 0)
3529                 user_thresh = 1;
3530         else if (bargs->usage > 100)
3531                 user_thresh = cache->length;
3532         else
3533                 user_thresh = div_factor_fine(cache->length, bargs->usage);
3534
3535         if (chunk_used < user_thresh)
3536                 ret = 0;
3537
3538         btrfs_put_block_group(cache);
3539         return ret;
3540 }
3541
3542 static int chunk_devid_filter(struct extent_buffer *leaf,
3543                               struct btrfs_chunk *chunk,
3544                               struct btrfs_balance_args *bargs)
3545 {
3546         struct btrfs_stripe *stripe;
3547         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3548         int i;
3549
3550         for (i = 0; i < num_stripes; i++) {
3551                 stripe = btrfs_stripe_nr(chunk, i);
3552                 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3553                         return 0;
3554         }
3555
3556         return 1;
3557 }
3558
3559 static u64 calc_data_stripes(u64 type, int num_stripes)
3560 {
3561         const int index = btrfs_bg_flags_to_raid_index(type);
3562         const int ncopies = btrfs_raid_array[index].ncopies;
3563         const int nparity = btrfs_raid_array[index].nparity;
3564
3565         return (num_stripes - nparity) / ncopies;
3566 }
3567
3568 /* [pstart, pend) */
3569 static int chunk_drange_filter(struct extent_buffer *leaf,
3570                                struct btrfs_chunk *chunk,
3571                                struct btrfs_balance_args *bargs)
3572 {
3573         struct btrfs_stripe *stripe;
3574         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3575         u64 stripe_offset;
3576         u64 stripe_length;
3577         u64 type;
3578         int factor;
3579         int i;
3580
3581         if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3582                 return 0;
3583
3584         type = btrfs_chunk_type(leaf, chunk);
3585         factor = calc_data_stripes(type, num_stripes);
3586
3587         for (i = 0; i < num_stripes; i++) {
3588                 stripe = btrfs_stripe_nr(chunk, i);
3589                 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3590                         continue;
3591
3592                 stripe_offset = btrfs_stripe_offset(leaf, stripe);
3593                 stripe_length = btrfs_chunk_length(leaf, chunk);
3594                 stripe_length = div_u64(stripe_length, factor);
3595
3596                 if (stripe_offset < bargs->pend &&
3597                     stripe_offset + stripe_length > bargs->pstart)
3598                         return 0;
3599         }
3600
3601         return 1;
3602 }
3603
3604 /* [vstart, vend) */
3605 static int chunk_vrange_filter(struct extent_buffer *leaf,
3606                                struct btrfs_chunk *chunk,
3607                                u64 chunk_offset,
3608                                struct btrfs_balance_args *bargs)
3609 {
3610         if (chunk_offset < bargs->vend &&
3611             chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3612                 /* at least part of the chunk is inside this vrange */
3613                 return 0;
3614
3615         return 1;
3616 }
3617
3618 static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3619                                struct btrfs_chunk *chunk,
3620                                struct btrfs_balance_args *bargs)
3621 {
3622         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3623
3624         if (bargs->stripes_min <= num_stripes
3625                         && num_stripes <= bargs->stripes_max)
3626                 return 0;
3627
3628         return 1;
3629 }
3630
3631 static int chunk_soft_convert_filter(u64 chunk_type,
3632                                      struct btrfs_balance_args *bargs)
3633 {
3634         if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3635                 return 0;
3636
3637         chunk_type = chunk_to_extended(chunk_type) &
3638                                 BTRFS_EXTENDED_PROFILE_MASK;
3639
3640         if (bargs->target == chunk_type)
3641                 return 1;
3642
3643         return 0;
3644 }
3645
3646 static int should_balance_chunk(struct extent_buffer *leaf,
3647                                 struct btrfs_chunk *chunk, u64 chunk_offset)
3648 {
3649         struct btrfs_fs_info *fs_info = leaf->fs_info;
3650         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3651         struct btrfs_balance_args *bargs = NULL;
3652         u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3653
3654         /* type filter */
3655         if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3656               (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3657                 return 0;
3658         }
3659
3660         if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3661                 bargs = &bctl->data;
3662         else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3663                 bargs = &bctl->sys;
3664         else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3665                 bargs = &bctl->meta;
3666
3667         /* profiles filter */
3668         if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3669             chunk_profiles_filter(chunk_type, bargs)) {
3670                 return 0;
3671         }
3672
3673         /* usage filter */
3674         if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3675             chunk_usage_filter(fs_info, chunk_offset, bargs)) {
3676                 return 0;
3677         } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3678             chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3679                 return 0;
3680         }
3681
3682         /* devid filter */
3683         if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3684             chunk_devid_filter(leaf, chunk, bargs)) {
3685                 return 0;
3686         }
3687
3688         /* drange filter, makes sense only with devid filter */
3689         if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3690             chunk_drange_filter(leaf, chunk, bargs)) {
3691                 return 0;
3692         }
3693
3694         /* vrange filter */
3695         if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3696             chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3697                 return 0;
3698         }
3699
3700         /* stripes filter */
3701         if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3702             chunk_stripes_range_filter(leaf, chunk, bargs)) {
3703                 return 0;
3704         }
3705
3706         /* soft profile changing mode */
3707         if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3708             chunk_soft_convert_filter(chunk_type, bargs)) {
3709                 return 0;
3710         }
3711
3712         /*
3713          * limited by count, must be the last filter
3714          */
3715         if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3716                 if (bargs->limit == 0)
3717                         return 0;
3718                 else
3719                         bargs->limit--;
3720         } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3721                 /*
3722                  * Same logic as the 'limit' filter; the minimum cannot be
3723                  * determined here because we do not have the global information
3724                  * about the count of all chunks that satisfy the filters.
3725                  */
3726                 if (bargs->limit_max == 0)
3727                         return 0;
3728                 else
3729                         bargs->limit_max--;
3730         }
3731
3732         return 1;
3733 }
3734
3735 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3736 {
3737         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3738         struct btrfs_root *chunk_root = fs_info->chunk_root;
3739         u64 chunk_type;
3740         struct btrfs_chunk *chunk;
3741         struct btrfs_path *path = NULL;
3742         struct btrfs_key key;
3743         struct btrfs_key found_key;
3744         struct extent_buffer *leaf;
3745         int slot;
3746         int ret;
3747         int enospc_errors = 0;
3748         bool counting = true;
3749         /* The single value limit and min/max limits use the same bytes in the */
3750         u64 limit_data = bctl->data.limit;
3751         u64 limit_meta = bctl->meta.limit;
3752         u64 limit_sys = bctl->sys.limit;
3753         u32 count_data = 0;
3754         u32 count_meta = 0;
3755         u32 count_sys = 0;
3756         int chunk_reserved = 0;
3757
3758         path = btrfs_alloc_path();
3759         if (!path) {
3760                 ret = -ENOMEM;
3761                 goto error;
3762         }
3763
3764         /* zero out stat counters */
3765         spin_lock(&fs_info->balance_lock);
3766         memset(&bctl->stat, 0, sizeof(bctl->stat));
3767         spin_unlock(&fs_info->balance_lock);
3768 again:
3769         if (!counting) {
3770                 /*
3771                  * The single value limit and min/max limits use the same bytes
3772                  * in the
3773                  */
3774                 bctl->data.limit = limit_data;
3775                 bctl->meta.limit = limit_meta;
3776                 bctl->sys.limit = limit_sys;
3777         }
3778         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3779         key.offset = (u64)-1;
3780         key.type = BTRFS_CHUNK_ITEM_KEY;
3781
3782         while (1) {
3783                 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3784                     atomic_read(&fs_info->balance_cancel_req)) {
3785                         ret = -ECANCELED;
3786                         goto error;
3787                 }
3788
3789                 mutex_lock(&fs_info->reclaim_bgs_lock);
3790                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3791                 if (ret < 0) {
3792                         mutex_unlock(&fs_info->reclaim_bgs_lock);
3793                         goto error;
3794                 }
3795
3796                 /*
3797                  * this shouldn't happen, it means the last relocate
3798                  * failed
3799                  */
3800                 if (ret == 0)
3801                         BUG(); /* FIXME break ? */
3802
3803                 ret = btrfs_previous_item(chunk_root, path, 0,
3804                                           BTRFS_CHUNK_ITEM_KEY);
3805                 if (ret) {
3806                         mutex_unlock(&fs_info->reclaim_bgs_lock);
3807                         ret = 0;
3808                         break;
3809                 }
3810
3811                 leaf = path->nodes[0];
3812                 slot = path->slots[0];
3813                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3814
3815                 if (found_key.objectid != key.objectid) {
3816                         mutex_unlock(&fs_info->reclaim_bgs_lock);
3817                         break;
3818                 }
3819
3820                 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3821                 chunk_type = btrfs_chunk_type(leaf, chunk);
3822
3823                 if (!counting) {
3824                         spin_lock(&fs_info->balance_lock);
3825                         bctl->stat.considered++;
3826                         spin_unlock(&fs_info->balance_lock);
3827                 }
3828
3829                 ret = should_balance_chunk(leaf, chunk, found_key.offset);
3830
3831                 btrfs_release_path(path);
3832                 if (!ret) {
3833                         mutex_unlock(&fs_info->reclaim_bgs_lock);
3834                         goto loop;
3835                 }
3836
3837                 if (counting) {
3838                         mutex_unlock(&fs_info->reclaim_bgs_lock);
3839                         spin_lock(&fs_info->balance_lock);
3840                         bctl->stat.expected++;
3841                         spin_unlock(&fs_info->balance_lock);
3842
3843                         if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3844                                 count_data++;
3845                         else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3846                                 count_sys++;
3847                         else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3848                                 count_meta++;
3849
3850                         goto loop;
3851                 }
3852
3853                 /*
3854                  * Apply limit_min filter, no need to check if the LIMITS
3855                  * filter is used, limit_min is 0 by default
3856                  */
3857                 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3858                                         count_data < bctl->data.limit_min)
3859                                 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3860                                         count_meta < bctl->meta.limit_min)
3861                                 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3862                                         count_sys < bctl->sys.limit_min)) {
3863                         mutex_unlock(&fs_info->reclaim_bgs_lock);
3864                         goto loop;
3865                 }
3866
3867                 if (!chunk_reserved) {
3868                         /*
3869                          * We may be relocating the only data chunk we have,
3870                          * which could potentially end up with losing data's
3871                          * raid profile, so lets allocate an empty one in
3872                          * advance.
3873                          */
3874                         ret = btrfs_may_alloc_data_chunk(fs_info,
3875                                                          found_key.offset);
3876                         if (ret < 0) {
3877                                 mutex_unlock(&fs_info->reclaim_bgs_lock);
3878                                 goto error;
3879                         } else if (ret == 1) {
3880                                 chunk_reserved = 1;
3881                         }
3882                 }
3883
3884                 ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3885                 mutex_unlock(&fs_info->reclaim_bgs_lock);
3886                 if (ret == -ENOSPC) {
3887                         enospc_errors++;
3888                 } else if (ret == -ETXTBSY) {
3889                         btrfs_info(fs_info,
3890            "skipping relocation of block group %llu due to active swapfile",
3891                                    found_key.offset);
3892                         ret = 0;
3893                 } else if (ret) {
3894                         goto error;
3895                 } else {
3896                         spin_lock(&fs_info->balance_lock);
3897                         bctl->stat.completed++;
3898                         spin_unlock(&fs_info->balance_lock);
3899                 }
3900 loop:
3901                 if (found_key.offset == 0)
3902                         break;
3903                 key.offset = found_key.offset - 1;
3904         }
3905
3906         if (counting) {
3907                 btrfs_release_path(path);
3908                 counting = false;
3909                 goto again;
3910         }
3911 error:
3912         btrfs_free_path(path);
3913         if (enospc_errors) {
3914                 btrfs_info(fs_info, "%d enospc errors during balance",
3915                            enospc_errors);
3916                 if (!ret)
3917                         ret = -ENOSPC;
3918         }
3919
3920         return ret;
3921 }
3922
3923 /**
3924  * alloc_profile_is_valid - see if a given profile is valid and reduced
3925  * @flags: profile to validate
3926  * @extended: if true @flags is treated as an extended profile
3927  */
3928 static int alloc_profile_is_valid(u64 flags, int extended)
3929 {
3930         u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3931                                BTRFS_BLOCK_GROUP_PROFILE_MASK);
3932
3933         flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3934
3935         /* 1) check that all other bits are zeroed */
3936         if (flags & ~mask)
3937                 return 0;
3938
3939         /* 2) see if profile is reduced */
3940         if (flags == 0)
3941                 return !extended; /* "0" is valid for usual profiles */
3942
3943         return has_single_bit_set(flags);
3944 }
3945
3946 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3947 {
3948         /* cancel requested || normal exit path */
3949         return atomic_read(&fs_info->balance_cancel_req) ||
3950                 (atomic_read(&fs_info->balance_pause_req) == 0 &&
3951                  atomic_read(&fs_info->balance_cancel_req) == 0);
3952 }
3953
3954 /*
3955  * Validate target profile against allowed profiles and return true if it's OK.
3956  * Otherwise print the error message and return false.
3957  */
3958 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
3959                 const struct btrfs_balance_args *bargs,
3960                 u64 allowed, const char *type)
3961 {
3962         if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3963                 return true;
3964
3965         if (fs_info->sectorsize < PAGE_SIZE &&
3966                 bargs->target & BTRFS_BLOCK_GROUP_RAID56_MASK) {
3967                 btrfs_err(fs_info,
3968                 "RAID56 is not yet supported for sectorsize %u with page size %lu",
3969                           fs_info->sectorsize, PAGE_SIZE);
3970                 return false;
3971         }
3972         /* Profile is valid and does not have bits outside of the allowed set */
3973         if (alloc_profile_is_valid(bargs->target, 1) &&
3974             (bargs->target & ~allowed) == 0)
3975                 return true;
3976
3977         btrfs_err(fs_info, "balance: invalid convert %s profile %s",
3978                         type, btrfs_bg_type_to_raid_name(bargs->target));
3979         return false;
3980 }
3981
3982 /*
3983  * Fill @buf with textual description of balance filter flags @bargs, up to
3984  * @size_buf including the terminating null. The output may be trimmed if it
3985  * does not fit into the provided buffer.
3986  */
3987 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
3988                                  u32 size_buf)
3989 {
3990         int ret;
3991         u32 size_bp = size_buf;
3992         char *bp = buf;
3993         u64 flags = bargs->flags;
3994         char tmp_buf[128] = {'\0'};
3995
3996         if (!flags)
3997                 return;
3998
3999 #define CHECK_APPEND_NOARG(a)                                           \
4000         do {                                                            \
4001                 ret = snprintf(bp, size_bp, (a));                       \
4002                 if (ret < 0 || ret >= size_bp)                          \
4003                         goto out_overflow;                              \
4004                 size_bp -= ret;                                         \
4005                 bp += ret;                                              \
4006         } while (0)
4007
4008 #define CHECK_APPEND_1ARG(a, v1)                                        \
4009         do {                                                            \
4010                 ret = snprintf(bp, size_bp, (a), (v1));                 \
4011                 if (ret < 0 || ret >= size_bp)                          \
4012                         goto out_overflow;                              \
4013                 size_bp -= ret;                                         \
4014                 bp += ret;                                              \
4015         } while (0)
4016
4017 #define CHECK_APPEND_2ARG(a, v1, v2)                                    \
4018         do {                                                            \
4019                 ret = snprintf(bp, size_bp, (a), (v1), (v2));           \
4020                 if (ret < 0 || ret >= size_bp)                          \
4021                         goto out_overflow;                              \
4022                 size_bp -= ret;                                         \
4023                 bp += ret;                                              \
4024         } while (0)
4025
4026         if (flags & BTRFS_BALANCE_ARGS_CONVERT)
4027                 CHECK_APPEND_1ARG("convert=%s,",
4028                                   btrfs_bg_type_to_raid_name(bargs->target));
4029
4030         if (flags & BTRFS_BALANCE_ARGS_SOFT)
4031                 CHECK_APPEND_NOARG("soft,");
4032
4033         if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
4034                 btrfs_describe_block_groups(bargs->profiles, tmp_buf,
4035                                             sizeof(tmp_buf));
4036                 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
4037         }
4038
4039         if (flags & BTRFS_BALANCE_ARGS_USAGE)
4040                 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
4041
4042         if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
4043                 CHECK_APPEND_2ARG("usage=%u..%u,",
4044                                   bargs->usage_min, bargs->usage_max);
4045
4046         if (flags & BTRFS_BALANCE_ARGS_DEVID)
4047                 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
4048
4049         if (flags & BTRFS_BALANCE_ARGS_DRANGE)
4050                 CHECK_APPEND_2ARG("drange=%llu..%llu,",
4051                                   bargs->pstart, bargs->pend);
4052
4053         if (flags & BTRFS_BALANCE_ARGS_VRANGE)
4054                 CHECK_APPEND_2ARG("vrange=%llu..%llu,",
4055                                   bargs->vstart, bargs->vend);
4056
4057         if (flags & BTRFS_BALANCE_ARGS_LIMIT)
4058                 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
4059
4060         if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
4061                 CHECK_APPEND_2ARG("limit=%u..%u,",
4062                                 bargs->limit_min, bargs->limit_max);
4063
4064         if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
4065                 CHECK_APPEND_2ARG("stripes=%u..%u,",
4066                                   bargs->stripes_min, bargs->stripes_max);
4067
4068 #undef CHECK_APPEND_2ARG
4069 #undef CHECK_APPEND_1ARG
4070 #undef CHECK_APPEND_NOARG
4071
4072 out_overflow:
4073
4074         if (size_bp < size_buf)
4075                 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
4076         else
4077                 buf[0] = '\0';
4078 }
4079
4080 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
4081 {
4082         u32 size_buf = 1024;
4083         char tmp_buf[192] = {'\0'};
4084         char *buf;
4085         char *bp;
4086         u32 size_bp = size_buf;
4087         int ret;
4088         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
4089
4090         buf = kzalloc(size_buf, GFP_KERNEL);
4091         if (!buf)
4092                 return;
4093
4094         bp = buf;
4095
4096 #define CHECK_APPEND_1ARG(a, v1)                                        \
4097         do {                                                            \
4098                 ret = snprintf(bp, size_bp, (a), (v1));                 \
4099                 if (ret < 0 || ret >= size_bp)                          \
4100                         goto out_overflow;                              \
4101                 size_bp -= ret;                                         \
4102                 bp += ret;                                              \
4103         } while (0)
4104
4105         if (bctl->flags & BTRFS_BALANCE_FORCE)
4106                 CHECK_APPEND_1ARG("%s", "-f ");
4107
4108         if (bctl->flags & BTRFS_BALANCE_DATA) {
4109                 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
4110                 CHECK_APPEND_1ARG("-d%s ", tmp_buf);
4111         }
4112
4113         if (bctl->flags & BTRFS_BALANCE_METADATA) {
4114                 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
4115                 CHECK_APPEND_1ARG("-m%s ", tmp_buf);
4116         }
4117
4118         if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
4119                 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
4120                 CHECK_APPEND_1ARG("-s%s ", tmp_buf);
4121         }
4122
4123 #undef CHECK_APPEND_1ARG
4124
4125 out_overflow:
4126
4127         if (size_bp < size_buf)
4128                 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
4129         btrfs_info(fs_info, "balance: %s %s",
4130                    (bctl->flags & BTRFS_BALANCE_RESUME) ?
4131                    "resume" : "start", buf);
4132
4133         kfree(buf);
4134 }
4135
4136 /*
4137  * Should be called with balance mutexe held
4138  */
4139 int btrfs_balance(struct btrfs_fs_info *fs_info,
4140                   struct btrfs_balance_control *bctl,
4141                   struct btrfs_ioctl_balance_args *bargs)
4142 {
4143         u64 meta_target, data_target;
4144         u64 allowed;
4145         int mixed = 0;
4146         int ret;
4147         u64 num_devices;
4148         unsigned seq;
4149         bool reducing_redundancy;
4150         int i;
4151
4152         if (btrfs_fs_closing(fs_info) ||
4153             atomic_read(&fs_info->balance_pause_req) ||
4154             btrfs_should_cancel_balance(fs_info)) {
4155                 ret = -EINVAL;
4156                 goto out;
4157         }
4158
4159         allowed = btrfs_super_incompat_flags(fs_info->super_copy);
4160         if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
4161                 mixed = 1;
4162
4163         /*
4164          * In case of mixed groups both data and meta should be picked,
4165          * and identical options should be given for both of them.
4166          */
4167         allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
4168         if (mixed && (bctl->flags & allowed)) {
4169                 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
4170                     !(bctl->flags & BTRFS_BALANCE_METADATA) ||
4171                     memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
4172                         btrfs_err(fs_info,
4173           "balance: mixed groups data and metadata options must be the same");
4174                         ret = -EINVAL;
4175                         goto out;
4176                 }
4177         }
4178
4179         /*
4180          * rw_devices will not change at the moment, device add/delete/replace
4181          * are exclusive
4182          */
4183         num_devices = fs_info->fs_devices->rw_devices;
4184
4185         /*
4186          * SINGLE profile on-disk has no profile bit, but in-memory we have a
4187          * special bit for it, to make it easier to distinguish.  Thus we need
4188          * to set it manually, or balance would refuse the profile.
4189          */
4190         allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
4191         for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
4192                 if (num_devices >= btrfs_raid_array[i].devs_min)
4193                         allowed |= btrfs_raid_array[i].bg_flag;
4194
4195         if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") ||
4196             !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") ||
4197             !validate_convert_profile(fs_info, &bctl->sys,  allowed, "system")) {
4198                 ret = -EINVAL;
4199                 goto out;
4200         }
4201
4202         /*
4203          * Allow to reduce metadata or system integrity only if force set for
4204          * profiles with redundancy (copies, parity)
4205          */
4206         allowed = 0;
4207         for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
4208                 if (btrfs_raid_array[i].ncopies >= 2 ||
4209                     btrfs_raid_array[i].tolerated_failures >= 1)
4210                         allowed |= btrfs_raid_array[i].bg_flag;
4211         }
4212         do {
4213                 seq = read_seqbegin(&fs_info->profiles_lock);
4214
4215                 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4216                      (fs_info->avail_system_alloc_bits & allowed) &&
4217                      !(bctl->sys.target & allowed)) ||
4218                     ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4219                      (fs_info->avail_metadata_alloc_bits & allowed) &&
4220                      !(bctl->meta.target & allowed)))
4221                         reducing_redundancy = true;
4222                 else
4223                         reducing_redundancy = false;
4224
4225                 /* if we're not converting, the target field is uninitialized */
4226                 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4227                         bctl->meta.target : fs_info->avail_metadata_alloc_bits;
4228                 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4229                         bctl->data.target : fs_info->avail_data_alloc_bits;
4230         } while (read_seqretry(&fs_info->profiles_lock, seq));
4231
4232         if (reducing_redundancy) {
4233                 if (bctl->flags & BTRFS_BALANCE_FORCE) {
4234                         btrfs_info(fs_info,
4235                            "balance: force reducing metadata redundancy");
4236                 } else {
4237                         btrfs_err(fs_info,
4238         "balance: reduces metadata redundancy, use --force if you want this");
4239                         ret = -EINVAL;
4240                         goto out;
4241                 }
4242         }
4243
4244         if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
4245                 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
4246                 btrfs_warn(fs_info,
4247         "balance: metadata profile %s has lower redundancy than data profile %s",
4248                                 btrfs_bg_type_to_raid_name(meta_target),
4249                                 btrfs_bg_type_to_raid_name(data_target));
4250         }
4251
4252         ret = insert_balance_item(fs_info, bctl);
4253         if (ret && ret != -EEXIST)
4254                 goto out;
4255
4256         if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
4257                 BUG_ON(ret == -EEXIST);
4258                 BUG_ON(fs_info->balance_ctl);
4259                 spin_lock(&fs_info->balance_lock);
4260                 fs_info->balance_ctl = bctl;
4261                 spin_unlock(&fs_info->balance_lock);
4262         } else {
4263                 BUG_ON(ret != -EEXIST);
4264                 spin_lock(&fs_info->balance_lock);
4265                 update_balance_args(bctl);
4266                 spin_unlock(&fs_info->balance_lock);
4267         }
4268
4269         ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4270         set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4271         describe_balance_start_or_resume(fs_info);
4272         mutex_unlock(&fs_info->balance_mutex);
4273
4274         ret = __btrfs_balance(fs_info);
4275
4276         mutex_lock(&fs_info->balance_mutex);
4277         if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
4278                 btrfs_info(fs_info, "balance: paused");
4279         /*
4280          * Balance can be canceled by:
4281          *
4282          * - Regular cancel request
4283          *   Then ret == -ECANCELED and balance_cancel_req > 0
4284          *
4285          * - Fatal signal to "btrfs" process
4286          *   Either the signal caught by wait_reserve_ticket() and callers
4287          *   got -EINTR, or caught by btrfs_should_cancel_balance() and
4288          *   got -ECANCELED.
4289          *   Either way, in this case balance_cancel_req = 0, and
4290          *   ret == -EINTR or ret == -ECANCELED.
4291          *
4292          * So here we only check the return value to catch canceled balance.
4293          */
4294         else if (ret == -ECANCELED || ret == -EINTR)
4295                 btrfs_info(fs_info, "balance: canceled");
4296         else
4297                 btrfs_info(fs_info, "balance: ended with status: %d", ret);
4298
4299         clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4300
4301         if (bargs) {
4302                 memset(bargs, 0, sizeof(*bargs));
4303                 btrfs_update_ioctl_balance_args(fs_info, bargs);
4304         }
4305
4306         if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
4307             balance_need_close(fs_info)) {
4308                 reset_balance_state(fs_info);
4309                 btrfs_exclop_finish(fs_info);
4310         }
4311
4312         wake_up(&fs_info->balance_wait_q);
4313
4314         return ret;
4315 out:
4316         if (bctl->flags & BTRFS_BALANCE_RESUME)
4317                 reset_balance_state(fs_info);
4318         else
4319                 kfree(bctl);
4320         btrfs_exclop_finish(fs_info);
4321
4322         return ret;
4323 }
4324
4325 static int balance_kthread(void *data)
4326 {
4327         struct btrfs_fs_info *fs_info = data;
4328         int ret = 0;
4329
4330         mutex_lock(&fs_info->balance_mutex);
4331         if (fs_info->balance_ctl)
4332                 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
4333         mutex_unlock(&fs_info->balance_mutex);
4334
4335         return ret;
4336 }
4337
4338 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
4339 {
4340         struct task_struct *tsk;
4341
4342         mutex_lock(&fs_info->balance_mutex);
4343         if (!fs_info->balance_ctl) {
4344                 mutex_unlock(&fs_info->balance_mutex);
4345                 return 0;
4346         }
4347         mutex_unlock(&fs_info->balance_mutex);
4348
4349         if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
4350                 btrfs_info(fs_info, "balance: resume skipped");
4351                 return 0;
4352         }
4353
4354         /*
4355          * A ro->rw remount sequence should continue with the paused balance
4356          * regardless of who pauses it, system or the user as of now, so set
4357          * the resume flag.
4358          */
4359         spin_lock(&fs_info->balance_lock);
4360         fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
4361         spin_unlock(&fs_info->balance_lock);
4362
4363         tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
4364         return PTR_ERR_OR_ZERO(tsk);
4365 }
4366
4367 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
4368 {
4369         struct btrfs_balance_control *bctl;
4370         struct btrfs_balance_item *item;
4371         struct btrfs_disk_balance_args disk_bargs;
4372         struct btrfs_path *path;
4373         struct extent_buffer *leaf;
4374         struct btrfs_key key;
4375         int ret;
4376
4377         path = btrfs_alloc_path();
4378         if (!path)
4379                 return -ENOMEM;
4380
4381         key.objectid = BTRFS_BALANCE_OBJECTID;
4382         key.type = BTRFS_TEMPORARY_ITEM_KEY;
4383         key.offset = 0;
4384
4385         ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4386         if (ret < 0)
4387                 goto out;
4388         if (ret > 0) { /* ret = -ENOENT; */
4389                 ret = 0;
4390                 goto out;
4391         }
4392
4393         bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4394         if (!bctl) {
4395                 ret = -ENOMEM;
4396                 goto out;
4397         }
4398
4399         leaf = path->nodes[0];
4400         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
4401
4402         bctl->flags = btrfs_balance_flags(leaf, item);
4403         bctl->flags |= BTRFS_BALANCE_RESUME;
4404
4405         btrfs_balance_data(leaf, item, &disk_bargs);
4406         btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4407         btrfs_balance_meta(leaf, item, &disk_bargs);
4408         btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4409         btrfs_balance_sys(leaf, item, &disk_bargs);
4410         btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4411
4412         /*
4413          * This should never happen, as the paused balance state is recovered
4414          * during mount without any chance of other exclusive ops to collide.
4415          *
4416          * This gives the exclusive op status to balance and keeps in paused
4417          * state until user intervention (cancel or umount). If the ownership
4418          * cannot be assigned, show a message but do not fail. The balance
4419          * is in a paused state and must have fs_info::balance_ctl properly
4420          * set up.
4421          */
4422         if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE))
4423                 btrfs_warn(fs_info,
4424         "balance: cannot set exclusive op status, resume manually");
4425
4426         btrfs_release_path(path);
4427
4428         mutex_lock(&fs_info->balance_mutex);
4429         BUG_ON(fs_info->balance_ctl);
4430         spin_lock(&fs_info->balance_lock);
4431         fs_info->balance_ctl = bctl;
4432         spin_unlock(&fs_info->balance_lock);
4433         mutex_unlock(&fs_info->balance_mutex);
4434 out:
4435         btrfs_free_path(path);
4436         return ret;
4437 }
4438
4439 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4440 {
4441         int ret = 0;
4442
4443         mutex_lock(&fs_info->balance_mutex);
4444         if (!fs_info->balance_ctl) {
4445                 mutex_unlock(&fs_info->balance_mutex);
4446                 return -ENOTCONN;
4447         }
4448
4449         if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4450                 atomic_inc(&fs_info->balance_pause_req);
4451                 mutex_unlock(&fs_info->balance_mutex);
4452
4453                 wait_event(fs_info->balance_wait_q,
4454                            !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4455
4456                 mutex_lock(&fs_info->balance_mutex);
4457                 /* we are good with balance_ctl ripped off from under us */
4458                 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4459                 atomic_dec(&fs_info->balance_pause_req);
4460         } else {
4461                 ret = -ENOTCONN;
4462         }
4463
4464         mutex_unlock(&fs_info->balance_mutex);
4465         return ret;
4466 }
4467
4468 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4469 {
4470         mutex_lock(&fs_info->balance_mutex);
4471         if (!fs_info->balance_ctl) {
4472                 mutex_unlock(&fs_info->balance_mutex);
4473                 return -ENOTCONN;
4474         }
4475
4476         /*
4477          * A paused balance with the item stored on disk can be resumed at
4478          * mount time if the mount is read-write. Otherwise it's still paused
4479          * and we must not allow cancelling as it deletes the item.
4480          */
4481         if (sb_rdonly(fs_info->sb)) {
4482                 mutex_unlock(&fs_info->balance_mutex);
4483                 return -EROFS;
4484         }
4485
4486         atomic_inc(&fs_info->balance_cancel_req);
4487         /*
4488          * if we are running just wait and return, balance item is
4489          * deleted in btrfs_balance in this case
4490          */
4491         if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4492                 mutex_unlock(&fs_info->balance_mutex);
4493                 wait_event(fs_info->balance_wait_q,
4494                            !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4495                 mutex_lock(&fs_info->balance_mutex);
4496         } else {
4497                 mutex_unlock(&fs_info->balance_mutex);
4498                 /*
4499                  * Lock released to allow other waiters to continue, we'll
4500                  * reexamine the status again.
4501                  */
4502                 mutex_lock(&fs_info->balance_mutex);
4503
4504                 if (fs_info->balance_ctl) {
4505                         reset_balance_state(fs_info);
4506                         btrfs_exclop_finish(fs_info);
4507                         btrfs_info(fs_info, "balance: canceled");
4508                 }
4509         }
4510
4511         BUG_ON(fs_info->balance_ctl ||
4512                 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4513         atomic_dec(&fs_info->balance_cancel_req);
4514         mutex_unlock(&fs_info->balance_mutex);
4515         return 0;
4516 }
4517
4518 int btrfs_uuid_scan_kthread(void *data)
4519 {
4520         struct btrfs_fs_info *fs_info = data;
4521         struct btrfs_root *root = fs_info->tree_root;
4522         struct btrfs_key key;
4523         struct btrfs_path *path = NULL;
4524         int ret = 0;
4525         struct extent_buffer *eb;
4526         int slot;
4527         struct btrfs_root_item root_item;
4528         u32 item_size;
4529         struct btrfs_trans_handle *trans = NULL;
4530         bool closing = false;
4531
4532         path = btrfs_alloc_path();
4533         if (!path) {
4534                 ret = -ENOMEM;
4535                 goto out;
4536         }
4537
4538         key.objectid = 0;
4539         key.type = BTRFS_ROOT_ITEM_KEY;
4540         key.offset = 0;
4541
4542         while (1) {
4543                 if (btrfs_fs_closing(fs_info)) {
4544                         closing = true;
4545                         break;
4546                 }
4547                 ret = btrfs_search_forward(root, &key, path,
4548                                 BTRFS_OLDEST_GENERATION);
4549                 if (ret) {
4550                         if (ret > 0)
4551                                 ret = 0;
4552                         break;
4553                 }
4554
4555                 if (key.type != BTRFS_ROOT_ITEM_KEY ||
4556                     (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4557                      key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4558                     key.objectid > BTRFS_LAST_FREE_OBJECTID)
4559                         goto skip;
4560
4561                 eb = path->nodes[0];
4562                 slot = path->slots[0];
4563                 item_size = btrfs_item_size_nr(eb, slot);
4564                 if (item_size < sizeof(root_item))
4565                         goto skip;
4566
4567                 read_extent_buffer(eb, &root_item,
4568                                    btrfs_item_ptr_offset(eb, slot),
4569                                    (int)sizeof(root_item));
4570                 if (btrfs_root_refs(&root_item) == 0)
4571                         goto skip;
4572
4573                 if (!btrfs_is_empty_uuid(root_item.uuid) ||
4574                     !btrfs_is_empty_uuid(root_item.received_uuid)) {
4575                         if (trans)
4576                                 goto update_tree;
4577
4578                         btrfs_release_path(path);
4579                         /*
4580                          * 1 - subvol uuid item
4581                          * 1 - received_subvol uuid item
4582                          */
4583                         trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4584                         if (IS_ERR(trans)) {
4585                                 ret = PTR_ERR(trans);
4586                                 break;
4587                         }
4588                         continue;
4589                 } else {
4590                         goto skip;
4591                 }
4592 update_tree:
4593                 btrfs_release_path(path);
4594                 if (!btrfs_is_empty_uuid(root_item.uuid)) {
4595                         ret = btrfs_uuid_tree_add(trans, root_item.uuid,
4596                                                   BTRFS_UUID_KEY_SUBVOL,
4597                                                   key.objectid);
4598                         if (ret < 0) {
4599                                 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4600                                         ret);
4601                                 break;
4602                         }
4603                 }
4604
4605                 if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4606                         ret = btrfs_uuid_tree_add(trans,
4607                                                   root_item.received_uuid,
4608                                                  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4609                                                   key.objectid);
4610                         if (ret < 0) {
4611                                 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4612                                         ret);
4613                                 break;
4614                         }
4615                 }
4616
4617 skip:
4618                 btrfs_release_path(path);
4619                 if (trans) {
4620                         ret = btrfs_end_transaction(trans);
4621                         trans = NULL;
4622                         if (ret)
4623                                 break;
4624                 }
4625
4626                 if (key.offset < (u64)-1) {
4627                         key.offset++;
4628                 } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4629                         key.offset = 0;
4630                         key.type = BTRFS_ROOT_ITEM_KEY;
4631                 } else if (key.objectid < (u64)-1) {
4632                         key.offset = 0;
4633                         key.type = BTRFS_ROOT_ITEM_KEY;
4634                         key.objectid++;
4635                 } else {
4636                         break;
4637                 }
4638                 cond_resched();
4639         }
4640
4641 out:
4642         btrfs_free_path(path);
4643         if (trans && !IS_ERR(trans))
4644                 btrfs_end_transaction(trans);
4645         if (ret)
4646                 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4647         else if (!closing)
4648                 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4649         up(&fs_info->uuid_tree_rescan_sem);
4650         return 0;
4651 }
4652
4653 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4654 {
4655         struct btrfs_trans_handle *trans;
4656         struct btrfs_root *tree_root = fs_info->tree_root;
4657         struct btrfs_root *uuid_root;
4658         struct task_struct *task;
4659         int ret;
4660
4661         /*
4662          * 1 - root node
4663          * 1 - root item
4664          */
4665         trans = btrfs_start_transaction(tree_root, 2);
4666         if (IS_ERR(trans))
4667                 return PTR_ERR(trans);
4668
4669         uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
4670         if (IS_ERR(uuid_root)) {
4671                 ret = PTR_ERR(uuid_root);
4672                 btrfs_abort_transaction(trans, ret);
4673                 btrfs_end_transaction(trans);
4674                 return ret;
4675         }
4676
4677         fs_info->uuid_root = uuid_root;
4678
4679         ret = btrfs_commit_transaction(trans);
4680         if (ret)
4681                 return ret;
4682
4683         down(&fs_info->uuid_tree_rescan_sem);
4684         task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4685         if (IS_ERR(task)) {
4686                 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4687                 btrfs_warn(fs_info, "failed to start uuid_scan task");
4688                 up(&fs_info->uuid_tree_rescan_sem);
4689                 return PTR_ERR(task);
4690         }
4691
4692         return 0;
4693 }
4694
4695 /*
4696  * shrinking a device means finding all of the device extents past
4697  * the new size, and then following the back refs to the chunks.
4698  * The chunk relocation code actually frees the device extent
4699  */
4700 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4701 {
4702         struct btrfs_fs_info *fs_info = device->fs_info;
4703         struct btrfs_root *root = fs_info->dev_root;
4704         struct btrfs_trans_handle *trans;
4705         struct btrfs_dev_extent *dev_extent = NULL;
4706         struct btrfs_path *path;
4707         u64 length;
4708         u64 chunk_offset;
4709         int ret;
4710         int slot;
4711         int failed = 0;
4712         bool retried = false;
4713         struct extent_buffer *l;
4714         struct btrfs_key key;
4715         struct btrfs_super_block *super_copy = fs_info->super_copy;
4716         u64 old_total = btrfs_super_total_bytes(super_copy);
4717         u64 old_size = btrfs_device_get_total_bytes(device);
4718         u64 diff;
4719         u64 start;
4720
4721         new_size = round_down(new_size, fs_info->sectorsize);
4722         start = new_size;
4723         diff = round_down(old_size - new_size, fs_info->sectorsize);
4724
4725         if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4726                 return -EINVAL;
4727
4728         path = btrfs_alloc_path();
4729         if (!path)
4730                 return -ENOMEM;
4731
4732         path->reada = READA_BACK;
4733
4734         trans = btrfs_start_transaction(root, 0);
4735         if (IS_ERR(trans)) {
4736                 btrfs_free_path(path);
4737                 return PTR_ERR(trans);
4738         }
4739
4740         mutex_lock(&fs_info->chunk_mutex);
4741
4742         btrfs_device_set_total_bytes(device, new_size);
4743         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4744                 device->fs_devices->total_rw_bytes -= diff;
4745                 atomic64_sub(diff, &fs_info->free_chunk_space);
4746         }
4747
4748         /*
4749          * Once the device's size has been set to the new size, ensure all
4750          * in-memory chunks are synced to disk so that the loop below sees them
4751          * and relocates them accordingly.
4752          */
4753         if (contains_pending_extent(device, &start, diff)) {
4754                 mutex_unlock(&fs_info->chunk_mutex);
4755                 ret = btrfs_commit_transaction(trans);
4756                 if (ret)
4757                         goto done;
4758         } else {
4759                 mutex_unlock(&fs_info->chunk_mutex);
4760                 btrfs_end_transaction(trans);
4761         }
4762
4763 again:
4764         key.objectid = device->devid;
4765         key.offset = (u64)-1;
4766         key.type = BTRFS_DEV_EXTENT_KEY;
4767
4768         do {
4769                 mutex_lock(&fs_info->reclaim_bgs_lock);
4770                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4771                 if (ret < 0) {
4772                         mutex_unlock(&fs_info->reclaim_bgs_lock);
4773                         goto done;
4774                 }
4775
4776                 ret = btrfs_previous_item(root, path, 0, key.type);
4777                 if (ret) {
4778                         mutex_unlock(&fs_info->reclaim_bgs_lock);
4779                         if (ret < 0)
4780                                 goto done;
4781                         ret = 0;
4782                         btrfs_release_path(path);
4783                         break;
4784                 }
4785
4786                 l = path->nodes[0];
4787                 slot = path->slots[0];
4788                 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4789
4790                 if (key.objectid != device->devid) {
4791                         mutex_unlock(&fs_info->reclaim_bgs_lock);
4792                         btrfs_release_path(path);
4793                         break;
4794                 }
4795
4796                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4797                 length = btrfs_dev_extent_length(l, dev_extent);
4798
4799                 if (key.offset + length <= new_size) {
4800                         mutex_unlock(&fs_info->reclaim_bgs_lock);
4801                         btrfs_release_path(path);
4802                         break;
4803                 }
4804
4805                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4806                 btrfs_release_path(path);
4807
4808                 /*
4809                  * We may be relocating the only data chunk we have,
4810                  * which could potentially end up with losing data's
4811                  * raid profile, so lets allocate an empty one in
4812                  * advance.
4813                  */
4814                 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
4815                 if (ret < 0) {
4816                         mutex_unlock(&fs_info->reclaim_bgs_lock);
4817                         goto done;
4818                 }
4819
4820                 ret = btrfs_relocate_chunk(fs_info, chunk_offset);
4821                 mutex_unlock(&fs_info->reclaim_bgs_lock);
4822                 if (ret == -ENOSPC) {
4823                         failed++;
4824                 } else if (ret) {
4825                         if (ret == -ETXTBSY) {
4826                                 btrfs_warn(fs_info,
4827                    "could not shrink block group %llu due to active swapfile",
4828                                            chunk_offset);
4829                         }
4830                         goto done;
4831                 }
4832         } while (key.offset-- > 0);
4833
4834         if (failed && !retried) {
4835                 failed = 0;
4836                 retried = true;
4837                 goto again;
4838         } else if (failed && retried) {
4839                 ret = -ENOSPC;
4840                 goto done;
4841         }
4842
4843         /* Shrinking succeeded, else we would be at "done". */
4844         trans = btrfs_start_transaction(root, 0);
4845         if (IS_ERR(trans)) {
4846                 ret = PTR_ERR(trans);
4847                 goto done;
4848         }
4849
4850         mutex_lock(&fs_info->chunk_mutex);
4851         /* Clear all state bits beyond the shrunk device size */
4852         clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
4853                           CHUNK_STATE_MASK);
4854
4855         btrfs_device_set_disk_total_bytes(device, new_size);
4856         if (list_empty(&device->post_commit_list))
4857                 list_add_tail(&device->post_commit_list,
4858                               &trans->transaction->dev_update_list);
4859
4860         WARN_ON(diff > old_total);
4861         btrfs_set_super_total_bytes(super_copy,
4862                         round_down(old_total - diff, fs_info->sectorsize));
4863         mutex_unlock(&fs_info->chunk_mutex);
4864
4865         /* Now btrfs_update_device() will change the on-disk size. */
4866         ret = btrfs_update_device(trans, device);
4867         if (ret < 0) {
4868                 btrfs_abort_transaction(trans, ret);
4869                 btrfs_end_transaction(trans);
4870         } else {
4871                 ret = btrfs_commit_transaction(trans);
4872         }
4873 done:
4874         btrfs_free_path(path);
4875         if (ret) {
4876                 mutex_lock(&fs_info->chunk_mutex);
4877                 btrfs_device_set_total_bytes(device, old_size);
4878                 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
4879                         device->fs_devices->total_rw_bytes += diff;
4880                 atomic64_add(diff, &fs_info->free_chunk_space);
4881                 mutex_unlock(&fs_info->chunk_mutex);
4882         }
4883         return ret;
4884 }
4885
4886 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
4887                            struct btrfs_key *key,
4888                            struct btrfs_chunk *chunk, int item_size)
4889 {
4890         struct btrfs_super_block *super_copy = fs_info->super_copy;
4891         struct btrfs_disk_key disk_key;
4892         u32 array_size;
4893         u8 *ptr;
4894
4895         lockdep_assert_held(&fs_info->chunk_mutex);
4896
4897         array_size = btrfs_super_sys_array_size(super_copy);
4898         if (array_size + item_size + sizeof(disk_key)
4899                         > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE)
4900                 return -EFBIG;
4901
4902         ptr = super_copy->sys_chunk_array + array_size;
4903         btrfs_cpu_key_to_disk(&disk_key, key);
4904         memcpy(ptr, &disk_key, sizeof(disk_key));
4905         ptr += sizeof(disk_key);
4906         memcpy(ptr, chunk, item_size);
4907         item_size += sizeof(disk_key);
4908         btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4909
4910         return 0;
4911 }
4912
4913 /*
4914  * sort the devices in descending order by max_avail, total_avail
4915  */
4916 static int btrfs_cmp_device_info(const void *a, const void *b)
4917 {
4918         const struct btrfs_device_info *di_a = a;
4919         const struct btrfs_device_info *di_b = b;
4920
4921         if (di_a->max_avail > di_b->max_avail)
4922                 return -1;
4923         if (di_a->max_avail < di_b->max_avail)
4924                 return 1;
4925         if (di_a->total_avail > di_b->total_avail)
4926                 return -1;
4927         if (di_a->total_avail < di_b->total_avail)
4928                 return 1;
4929         return 0;
4930 }
4931
4932 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4933 {
4934         if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4935                 return;
4936
4937         btrfs_set_fs_incompat(info, RAID56);
4938 }
4939
4940 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
4941 {
4942         if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4)))
4943                 return;
4944
4945         btrfs_set_fs_incompat(info, RAID1C34);
4946 }
4947
4948 /*
4949  * Structure used internally for __btrfs_alloc_chunk() function.
4950  * Wraps needed parameters.
4951  */
4952 struct alloc_chunk_ctl {
4953         u64 start;
4954         u64 type;
4955         /* Total number of stripes to allocate */
4956         int num_stripes;
4957         /* sub_stripes info for map */
4958         int sub_stripes;
4959         /* Stripes per device */
4960         int dev_stripes;
4961         /* Maximum number of devices to use */
4962         int devs_max;
4963         /* Minimum number of devices to use */
4964         int devs_min;
4965         /* ndevs has to be a multiple of this */
4966         int devs_increment;
4967         /* Number of copies */
4968         int ncopies;
4969         /* Number of stripes worth of bytes to store parity information */
4970         int nparity;
4971         u64 max_stripe_size;
4972         u64 max_chunk_size;
4973         u64 dev_extent_min;
4974         u64 stripe_size;
4975         u64 chunk_size;
4976         int ndevs;
4977 };
4978
4979 static void init_alloc_chunk_ctl_policy_regular(
4980                                 struct btrfs_fs_devices *fs_devices,
4981                                 struct alloc_chunk_ctl *ctl)
4982 {
4983         u64 type = ctl->type;
4984
4985         if (type & BTRFS_BLOCK_GROUP_DATA) {
4986                 ctl->max_stripe_size = SZ_1G;
4987                 ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
4988         } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4989                 /* For larger filesystems, use larger metadata chunks */
4990                 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
4991                         ctl->max_stripe_size = SZ_1G;
4992                 else
4993                         ctl->max_stripe_size = SZ_256M;
4994                 ctl->max_chunk_size = ctl->max_stripe_size;
4995         } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4996                 ctl->max_stripe_size = SZ_32M;
4997                 ctl->max_chunk_size = 2 * ctl->max_stripe_size;
4998                 ctl->devs_max = min_t(int, ctl->devs_max,
4999                                       BTRFS_MAX_DEVS_SYS_CHUNK);
5000         } else {
5001                 BUG();
5002         }
5003
5004         /* We don't want a chunk larger than 10% of writable space */
5005         ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
5006                                   ctl->max_chunk_size);
5007         ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes;
5008 }
5009
5010 static void init_alloc_chunk_ctl_policy_zoned(
5011                                       struct btrfs_fs_devices *fs_devices,
5012                                       struct alloc_chunk_ctl *ctl)
5013 {
5014         u64 zone_size = fs_devices->fs_info->zone_size;
5015         u64 limit;
5016         int min_num_stripes = ctl->devs_min * ctl->dev_stripes;
5017         int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies;
5018         u64 min_chunk_size = min_data_stripes * zone_size;
5019         u64 type = ctl->type;
5020
5021         ctl->max_stripe_size = zone_size;
5022         if (type & BTRFS_BLOCK_GROUP_DATA) {
5023                 ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE,
5024                                                  zone_size);
5025         } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
5026                 ctl->max_chunk_size = ctl->max_stripe_size;
5027         } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
5028                 ctl->max_chunk_size = 2 * ctl->max_stripe_size;
5029                 ctl->devs_max = min_t(int, ctl->devs_max,
5030                                       BTRFS_MAX_DEVS_SYS_CHUNK);
5031         } else {
5032                 BUG();
5033         }
5034
5035         /* We don't want a chunk larger than 10% of writable space */
5036         limit = max(round_down(div_factor(fs_devices->total_rw_bytes, 1),
5037                                zone_size),
5038                     min_chunk_size);
5039         ctl->max_chunk_size = min(limit, ctl->max_chunk_size);
5040         ctl->dev_extent_min = zone_size * ctl->dev_stripes;
5041 }
5042
5043 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
5044                                  struct alloc_chunk_ctl *ctl)
5045 {
5046         int index = btrfs_bg_flags_to_raid_index(ctl->type);
5047
5048         ctl->sub_stripes = btrfs_raid_array[index].sub_stripes;
5049         ctl->dev_stripes = btrfs_raid_array[index].dev_stripes;
5050         ctl->devs_max = btrfs_raid_array[index].devs_max;
5051         if (!ctl->devs_max)
5052                 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info);
5053         ctl->devs_min = btrfs_raid_array[index].devs_min;
5054         ctl->devs_increment = btrfs_raid_array[index].devs_increment;
5055         ctl->ncopies = btrfs_raid_array[index].ncopies;
5056         ctl->nparity = btrfs_raid_array[index].nparity;
5057         ctl->ndevs = 0;
5058
5059         switch (fs_devices->chunk_alloc_policy) {
5060         case BTRFS_CHUNK_ALLOC_REGULAR:
5061                 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
5062                 break;
5063         case BTRFS_CHUNK_ALLOC_ZONED:
5064                 init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl);
5065                 break;
5066         default:
5067                 BUG();
5068         }
5069 }
5070
5071 static int gather_device_info(struct btrfs_fs_devices *fs_devices,
5072                               struct alloc_chunk_ctl *ctl,
5073                               struct btrfs_device_info *devices_info)
5074 {
5075         struct btrfs_fs_info *info = fs_devices->fs_info;
5076         struct btrfs_device *device;
5077         u64 total_avail;
5078         u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes;
5079         int ret;
5080         int ndevs = 0;
5081         u64 max_avail;
5082         u64 dev_offset;
5083
5084         /*
5085          * in the first pass through the devices list, we gather information
5086          * about the available holes on each device.
5087          */
5088         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
5089                 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
5090                         WARN(1, KERN_ERR
5091                                "BTRFS: read-only device in alloc_list\n");
5092                         continue;
5093                 }
5094
5095                 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
5096                                         &device->dev_state) ||
5097                     test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
5098                         continue;
5099
5100                 if (device->total_bytes > device->bytes_used)
5101                         total_avail = device->total_bytes - device->bytes_used;
5102                 else
5103                         total_avail = 0;
5104
5105                 /* If there is no space on this device, skip it. */
5106                 if (total_avail < ctl->dev_extent_min)
5107                         continue;
5108
5109                 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset,
5110                                            &max_avail);
5111                 if (ret && ret != -ENOSPC)
5112                         return ret;
5113
5114                 if (ret == 0)
5115                         max_avail = dev_extent_want;
5116
5117                 if (max_avail < ctl->dev_extent_min) {
5118                         if (btrfs_test_opt(info, ENOSPC_DEBUG))
5119                                 btrfs_debug(info,
5120                         "%s: devid %llu has no free space, have=%llu want=%llu",
5121                                             __func__, device->devid, max_avail,
5122                                             ctl->dev_extent_min);
5123                         continue;
5124                 }
5125
5126                 if (ndevs == fs_devices->rw_devices) {
5127                         WARN(1, "%s: found more than %llu devices\n",
5128                              __func__, fs_devices->rw_devices);
5129                         break;
5130                 }
5131                 devices_info[ndevs].dev_offset = dev_offset;
5132                 devices_info[ndevs].max_avail = max_avail;
5133                 devices_info[ndevs].total_avail = total_avail;
5134                 devices_info[ndevs].dev = device;
5135                 ++ndevs;
5136         }
5137         ctl->ndevs = ndevs;
5138
5139         /*
5140          * now sort the devices by hole size / available space
5141          */
5142         sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
5143              btrfs_cmp_device_info, NULL);
5144
5145         return 0;
5146 }
5147
5148 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
5149                                       struct btrfs_device_info *devices_info)
5150 {
5151         /* Number of stripes that count for block group size */
5152         int data_stripes;
5153
5154         /*
5155          * The primary goal is to maximize the number of stripes, so use as
5156          * many devices as possible, even if the stripes are not maximum sized.
5157          *
5158          * The DUP profile stores more than one stripe per device, the
5159          * max_avail is the total size so we have to adjust.
5160          */
5161         ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail,
5162                                    ctl->dev_stripes);
5163         ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5164
5165         /* This will have to be fixed for RAID1 and RAID10 over more drives */
5166         data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5167
5168         /*
5169          * Use the number of data stripes to figure out how big this chunk is
5170          * really going to be in terms of logical address space, and compare
5171          * that answer with the max chunk size. If it's higher, we try to
5172          * reduce stripe_size.
5173          */
5174         if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5175                 /*
5176                  * Reduce stripe_size, round it up to a 16MB boundary again and
5177                  * then use it, unless it ends up being even bigger than the
5178                  * previous value we had already.
5179                  */
5180                 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size,
5181                                                         data_stripes), SZ_16M),
5182                                        ctl->stripe_size);
5183         }
5184
5185         /* Align to BTRFS_STRIPE_LEN */
5186         ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
5187         ctl->chunk_size = ctl->stripe_size * data_stripes;
5188
5189         return 0;
5190 }
5191
5192 static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl,
5193                                     struct btrfs_device_info *devices_info)
5194 {
5195         u64 zone_size = devices_info[0].dev->zone_info->zone_size;
5196         /* Number of stripes that count for block group size */
5197         int data_stripes;
5198
5199         /*
5200          * It should hold because:
5201          *    dev_extent_min == dev_extent_want == zone_size * dev_stripes
5202          */
5203         ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min);
5204
5205         ctl->stripe_size = zone_size;
5206         ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5207         data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5208
5209         /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */
5210         if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
5211                 ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies,
5212                                              ctl->stripe_size) + ctl->nparity,
5213                                      ctl->dev_stripes);
5214                 ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
5215                 data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
5216                 ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size);
5217         }
5218
5219         ctl->chunk_size = ctl->stripe_size * data_stripes;
5220
5221         return 0;
5222 }
5223
5224 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
5225                               struct alloc_chunk_ctl *ctl,
5226                               struct btrfs_device_info *devices_info)
5227 {
5228         struct btrfs_fs_info *info = fs_devices->fs_info;
5229
5230         /*
5231          * Round down to number of usable stripes, devs_increment can be any
5232          * number so we can't use round_down() that requires power of 2, while
5233          * rounddown is safe.
5234          */
5235         ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment);
5236
5237         if (ctl->ndevs < ctl->devs_min) {
5238                 if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
5239                         btrfs_debug(info,
5240         "%s: not enough devices with free space: have=%d minimum required=%d",
5241                                     __func__, ctl->ndevs, ctl->devs_min);
5242                 }
5243                 return -ENOSPC;
5244         }
5245
5246         ctl->ndevs = min(ctl->ndevs, ctl->devs_max);
5247
5248         switch (fs_devices->chunk_alloc_policy) {
5249         case BTRFS_CHUNK_ALLOC_REGULAR:
5250                 return decide_stripe_size_regular(ctl, devices_info);
5251         case BTRFS_CHUNK_ALLOC_ZONED:
5252                 return decide_stripe_size_zoned(ctl, devices_info);
5253         default:
5254                 BUG();
5255         }
5256 }
5257
5258 static struct btrfs_block_group *create_chunk(struct btrfs_trans_handle *trans,
5259                         struct alloc_chunk_ctl *ctl,
5260                         struct btrfs_device_info *devices_info)
5261 {
5262         struct btrfs_fs_info *info = trans->fs_info;
5263         struct map_lookup *map = NULL;
5264         struct extent_map_tree *em_tree;
5265         struct btrfs_block_group *block_group;
5266         struct extent_map *em;
5267         u64 start = ctl->start;
5268         u64 type = ctl->type;
5269         int ret;
5270         int i;
5271         int j;
5272
5273         map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS);
5274         if (!map)
5275                 return ERR_PTR(-ENOMEM);
5276         map->num_stripes = ctl->num_stripes;
5277
5278         for (i = 0; i < ctl->ndevs; ++i) {
5279                 for (j = 0; j < ctl->dev_stripes; ++j) {
5280                         int s = i * ctl->dev_stripes + j;
5281                         map->stripes[s].dev = devices_info[i].dev;
5282                         map->stripes[s].physical = devices_info[i].dev_offset +
5283                                                    j * ctl->stripe_size;
5284                 }
5285         }
5286         map->stripe_len = BTRFS_STRIPE_LEN;
5287         map->io_align = BTRFS_STRIPE_LEN;
5288         map->io_width = BTRFS_STRIPE_LEN;
5289         map->type = type;
5290         map->sub_stripes = ctl->sub_stripes;
5291
5292         trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size);
5293
5294         em = alloc_extent_map();
5295         if (!em) {
5296                 kfree(map);
5297                 return ERR_PTR(-ENOMEM);
5298         }
5299         set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
5300         em->map_lookup = map;
5301         em->start = start;
5302         em->len = ctl->chunk_size;
5303         em->block_start = 0;
5304         em->block_len = em->len;
5305         em->orig_block_len = ctl->stripe_size;
5306
5307         em_tree = &info->mapping_tree;
5308         write_lock(&em_tree->lock);
5309         ret = add_extent_mapping(em_tree, em, 0);
5310         if (ret) {
5311                 write_unlock(&em_tree->lock);
5312                 free_extent_map(em);
5313                 return ERR_PTR(ret);
5314         }
5315         write_unlock(&em_tree->lock);
5316
5317         block_group = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
5318         if (IS_ERR(block_group))
5319                 goto error_del_extent;
5320
5321         for (i = 0; i < map->num_stripes; i++) {
5322                 struct btrfs_device *dev = map->stripes[i].dev;
5323
5324                 btrfs_device_set_bytes_used(dev,
5325                                             dev->bytes_used + ctl->stripe_size);
5326                 if (list_empty(&dev->post_commit_list))
5327                         list_add_tail(&dev->post_commit_list,
5328                                       &trans->transaction->dev_update_list);
5329         }
5330
5331         atomic64_sub(ctl->stripe_size * map->num_stripes,
5332                      &info->free_chunk_space);
5333
5334         free_extent_map(em);
5335         check_raid56_incompat_flag(info, type);
5336         check_raid1c34_incompat_flag(info, type);
5337
5338         return block_group;
5339
5340 error_del_extent:
5341         write_lock(&em_tree->lock);
5342         remove_extent_mapping(em_tree, em);
5343         write_unlock(&em_tree->lock);
5344
5345         /* One for our allocation */
5346         free_extent_map(em);
5347         /* One for the tree reference */
5348         free_extent_map(em);
5349
5350         return block_group;
5351 }
5352
5353 struct btrfs_block_group *btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
5354                                             u64 type)
5355 {
5356         struct btrfs_fs_info *info = trans->fs_info;
5357         struct btrfs_fs_devices *fs_devices = info->fs_devices;
5358         struct btrfs_device_info *devices_info = NULL;
5359         struct alloc_chunk_ctl ctl;
5360         struct btrfs_block_group *block_group;
5361         int ret;
5362
5363         lockdep_assert_held(&info->chunk_mutex);
5364
5365         if (!alloc_profile_is_valid(type, 0)) {
5366                 ASSERT(0);
5367                 return ERR_PTR(-EINVAL);
5368         }
5369
5370         if (list_empty(&fs_devices->alloc_list)) {
5371                 if (btrfs_test_opt(info, ENOSPC_DEBUG))
5372                         btrfs_debug(info, "%s: no writable device", __func__);
5373                 return ERR_PTR(-ENOSPC);
5374         }
5375
5376         if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
5377                 btrfs_err(info, "invalid chunk type 0x%llx requested", type);
5378                 ASSERT(0);
5379                 return ERR_PTR(-EINVAL);
5380         }
5381
5382         ctl.start = find_next_chunk(info);
5383         ctl.type = type;
5384         init_alloc_chunk_ctl(fs_devices, &ctl);
5385
5386         devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
5387                                GFP_NOFS);
5388         if (!devices_info)
5389                 return ERR_PTR(-ENOMEM);
5390
5391         ret = gather_device_info(fs_devices, &ctl, devices_info);
5392         if (ret < 0) {
5393                 block_group = ERR_PTR(ret);
5394                 goto out;
5395         }
5396
5397         ret = decide_stripe_size(fs_devices, &ctl, devices_info);
5398         if (ret < 0) {
5399                 block_group = ERR_PTR(ret);
5400                 goto out;
5401         }
5402
5403         block_group = create_chunk(trans, &ctl, devices_info);
5404
5405 out:
5406         kfree(devices_info);
5407         return block_group;
5408 }
5409
5410 /*
5411  * This function, btrfs_chunk_alloc_add_chunk_item(), typically belongs to the
5412  * phase 1 of chunk allocation. It belongs to phase 2 only when allocating system
5413  * chunks.
5414  *
5415  * See the comment at btrfs_chunk_alloc() for details about the chunk allocation
5416  * phases.
5417  */
5418 int btrfs_chunk_alloc_add_chunk_item(struct btrfs_trans_handle *trans,
5419                                      struct btrfs_block_group *bg)
5420 {
5421         struct btrfs_fs_info *fs_info = trans->fs_info;
5422         struct btrfs_root *extent_root = fs_info->extent_root;
5423         struct btrfs_root *chunk_root = fs_info->chunk_root;
5424         struct btrfs_key key;
5425         struct btrfs_chunk *chunk;
5426         struct btrfs_stripe *stripe;
5427         struct extent_map *em;
5428         struct map_lookup *map;
5429         size_t item_size;
5430         int i;
5431         int ret;
5432
5433         /*
5434          * We take the chunk_mutex for 2 reasons:
5435          *
5436          * 1) Updates and insertions in the chunk btree must be done while holding
5437          *    the chunk_mutex, as well as updating the system chunk array in the
5438          *    superblock. See the comment on top of btrfs_chunk_alloc() for the
5439          *    details;
5440          *
5441          * 2) To prevent races with the final phase of a device replace operation
5442          *    that replaces the device object associated with the map's stripes,
5443          *    because the device object's id can change at any time during that
5444          *    final phase of the device replace operation
5445          *    (dev-replace.c:btrfs_dev_replace_finishing()), so we could grab the
5446          *    replaced device and then see it with an ID of BTRFS_DEV_REPLACE_DEVID,
5447          *    which would cause a failure when updating the device item, which does
5448          *    not exists, or persisting a stripe of the chunk item with such ID.
5449          *    Here we can't use the device_list_mutex because our caller already
5450          *    has locked the chunk_mutex, and the final phase of device replace
5451          *    acquires both mutexes - first the device_list_mutex and then the
5452          *    chunk_mutex. Using any of those two mutexes protects us from a
5453          *    concurrent device replace.
5454          */
5455         lockdep_assert_held(&fs_info->chunk_mutex);
5456
5457         em = btrfs_get_chunk_map(fs_info, bg->start, bg->length);
5458         if (IS_ERR(em)) {
5459                 ret = PTR_ERR(em);
5460                 btrfs_abort_transaction(trans, ret);
5461                 return ret;
5462         }
5463
5464         map = em->map_lookup;
5465         item_size = btrfs_chunk_item_size(map->num_stripes);
5466
5467         chunk = kzalloc(item_size, GFP_NOFS);
5468         if (!chunk) {
5469                 ret = -ENOMEM;
5470                 btrfs_abort_transaction(trans, ret);
5471                 goto out;
5472         }
5473
5474         for (i = 0; i < map->num_stripes; i++) {
5475                 struct btrfs_device *device = map->stripes[i].dev;
5476
5477                 ret = btrfs_update_device(trans, device);
5478                 if (ret)
5479                         goto out;
5480         }
5481
5482         stripe = &chunk->stripe;
5483         for (i = 0; i < map->num_stripes; i++) {
5484                 struct btrfs_device *device = map->stripes[i].dev;
5485                 const u64 dev_offset = map->stripes[i].physical;
5486
5487                 btrfs_set_stack_stripe_devid(stripe, device->devid);
5488                 btrfs_set_stack_stripe_offset(stripe, dev_offset);
5489                 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
5490                 stripe++;
5491         }
5492
5493         btrfs_set_stack_chunk_length(chunk, bg->length);
5494         btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
5495         btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
5496         btrfs_set_stack_chunk_type(chunk, map->type);
5497         btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5498         btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
5499         btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
5500         btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
5501         btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5502
5503         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
5504         key.type = BTRFS_CHUNK_ITEM_KEY;
5505         key.offset = bg->start;
5506
5507         ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5508         if (ret)
5509                 goto out;
5510
5511         bg->chunk_item_inserted = 1;
5512
5513         if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
5514                 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
5515                 if (ret)
5516                         goto out;
5517         }
5518
5519 out:
5520         kfree(chunk);
5521         free_extent_map(em);
5522         return ret;
5523 }
5524
5525 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
5526 {
5527         struct btrfs_fs_info *fs_info = trans->fs_info;
5528         u64 alloc_profile;
5529         struct btrfs_block_group *meta_bg;
5530         struct btrfs_block_group *sys_bg;
5531
5532         /*
5533          * When adding a new device for sprouting, the seed device is read-only
5534          * so we must first allocate a metadata and a system chunk. But before
5535          * adding the block group items to the extent, device and chunk btrees,
5536          * we must first:
5537          *
5538          * 1) Create both chunks without doing any changes to the btrees, as
5539          *    otherwise we would get -ENOSPC since the block groups from the
5540          *    seed device are read-only;
5541          *
5542          * 2) Add the device item for the new sprout device - finishing the setup
5543          *    of a new block group requires updating the device item in the chunk
5544          *    btree, so it must exist when we attempt to do it. The previous step
5545          *    ensures this does not fail with -ENOSPC.
5546          *
5547          * After that we can add the block group items to their btrees:
5548          * update existing device item in the chunk btree, add a new block group
5549          * item to the extent btree, add a new chunk item to the chunk btree and
5550          * finally add the new device extent items to the devices btree.
5551          */
5552
5553         alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5554         meta_bg = btrfs_alloc_chunk(trans, alloc_profile);
5555         if (IS_ERR(meta_bg))
5556                 return PTR_ERR(meta_bg);
5557
5558         alloc_profile = btrfs_system_alloc_profile(fs_info);
5559         sys_bg = btrfs_alloc_chunk(trans, alloc_profile);
5560         if (IS_ERR(sys_bg))
5561                 return PTR_ERR(sys_bg);
5562
5563         return 0;
5564 }
5565
5566 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
5567 {
5568         const int index = btrfs_bg_flags_to_raid_index(map->type);
5569
5570         return btrfs_raid_array[index].tolerated_failures;
5571 }
5572
5573 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5574 {
5575         struct extent_map *em;
5576         struct map_lookup *map;
5577         int readonly = 0;
5578         int miss_ndevs = 0;
5579         int i;
5580
5581         em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5582         if (IS_ERR(em))
5583                 return 1;
5584
5585         map = em->map_lookup;
5586         for (i = 0; i < map->num_stripes; i++) {
5587                 if (test_bit(BTRFS_DEV_STATE_MISSING,
5588                                         &map->stripes[i].dev->dev_state)) {
5589                         miss_ndevs++;
5590                         continue;
5591                 }
5592                 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
5593                                         &map->stripes[i].dev->dev_state)) {
5594                         readonly = 1;
5595                         goto end;
5596                 }
5597         }
5598
5599         /*
5600          * If the number of missing devices is larger than max errors,
5601          * we can not write the data into that chunk successfully, so
5602          * set it readonly.
5603          */
5604         if (miss_ndevs > btrfs_chunk_max_errors(map))
5605                 readonly = 1;
5606 end:
5607         free_extent_map(em);
5608         return readonly;
5609 }
5610
5611 void btrfs_mapping_tree_free(struct extent_map_tree *tree)
5612 {
5613         struct extent_map *em;
5614
5615         while (1) {
5616                 write_lock(&tree->lock);
5617                 em = lookup_extent_mapping(tree, 0, (u64)-1);
5618                 if (em)
5619                         remove_extent_mapping(tree, em);
5620                 write_unlock(&tree->lock);
5621                 if (!em)
5622                         break;
5623                 /* once for us */
5624                 free_extent_map(em);
5625                 /* once for the tree */
5626                 free_extent_map(em);
5627         }
5628 }
5629
5630 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5631 {
5632         struct extent_map *em;
5633         struct map_lookup *map;
5634         int ret;
5635
5636         em = btrfs_get_chunk_map(fs_info, logical, len);
5637         if (IS_ERR(em))
5638                 /*
5639                  * We could return errors for these cases, but that could get
5640                  * ugly and we'd probably do the same thing which is just not do
5641                  * anything else and exit, so return 1 so the callers don't try
5642                  * to use other copies.
5643                  */
5644                 return 1;
5645
5646         map = em->map_lookup;
5647         if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK))
5648                 ret = map->num_stripes;
5649         else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5650                 ret = map->sub_stripes;
5651         else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5652                 ret = 2;
5653         else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5654                 /*
5655                  * There could be two corrupted data stripes, we need
5656                  * to loop retry in order to rebuild the correct data.
5657                  *
5658                  * Fail a stripe at a time on every retry except the
5659                  * stripe under reconstruction.
5660                  */
5661                 ret = map->num_stripes;
5662         else
5663                 ret = 1;
5664         free_extent_map(em);
5665
5666         down_read(&fs_info->dev_replace.rwsem);
5667         if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
5668             fs_info->dev_replace.tgtdev)
5669                 ret++;
5670         up_read(&fs_info->dev_replace.rwsem);
5671
5672         return ret;
5673 }
5674
5675 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
5676                                     u64 logical)
5677 {
5678         struct extent_map *em;
5679         struct map_lookup *map;
5680         unsigned long len = fs_info->sectorsize;
5681
5682         em = btrfs_get_chunk_map(fs_info, logical, len);
5683
5684         if (!WARN_ON(IS_ERR(em))) {
5685                 map = em->map_lookup;
5686                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5687                         len = map->stripe_len * nr_data_stripes(map);
5688                 free_extent_map(em);
5689         }
5690         return len;
5691 }
5692
5693 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5694 {
5695         struct extent_map *em;
5696         struct map_lookup *map;
5697         int ret = 0;
5698
5699         em = btrfs_get_chunk_map(fs_info, logical, len);
5700
5701         if(!WARN_ON(IS_ERR(em))) {
5702                 map = em->map_lookup;
5703                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5704                         ret = 1;
5705                 free_extent_map(em);
5706         }
5707         return ret;
5708 }
5709
5710 static int find_live_mirror(struct btrfs_fs_info *fs_info,
5711                             struct map_lookup *map, int first,
5712                             int dev_replace_is_ongoing)
5713 {
5714         int i;
5715         int num_stripes;
5716         int preferred_mirror;
5717         int tolerance;
5718         struct btrfs_device *srcdev;
5719
5720         ASSERT((map->type &
5721                  (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
5722
5723         if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5724                 num_stripes = map->sub_stripes;
5725         else
5726                 num_stripes = map->num_stripes;
5727
5728         switch (fs_info->fs_devices->read_policy) {
5729         default:
5730                 /* Shouldn't happen, just warn and use pid instead of failing */
5731                 btrfs_warn_rl(fs_info,
5732                               "unknown read_policy type %u, reset to pid",
5733                               fs_info->fs_devices->read_policy);
5734                 fs_info->fs_devices->read_policy = BTRFS_READ_POLICY_PID;
5735                 fallthrough;
5736         case BTRFS_READ_POLICY_PID:
5737                 preferred_mirror = first + (current->pid % num_stripes);
5738                 break;
5739         }
5740
5741         if (dev_replace_is_ongoing &&
5742             fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5743              BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5744                 srcdev = fs_info->dev_replace.srcdev;
5745         else
5746                 srcdev = NULL;
5747
5748         /*
5749          * try to avoid the drive that is the source drive for a
5750          * dev-replace procedure, only choose it if no other non-missing
5751          * mirror is available
5752          */
5753         for (tolerance = 0; tolerance < 2; tolerance++) {
5754                 if (map->stripes[preferred_mirror].dev->bdev &&
5755                     (tolerance || map->stripes[preferred_mirror].dev != srcdev))
5756                         return preferred_mirror;
5757                 for (i = first; i < first + num_stripes; i++) {
5758                         if (map->stripes[i].dev->bdev &&
5759                             (tolerance || map->stripes[i].dev != srcdev))
5760                                 return i;
5761                 }
5762         }
5763
5764         /* we couldn't find one that doesn't fail.  Just return something
5765          * and the io error handling code will clean up eventually
5766          */
5767         return preferred_mirror;
5768 }
5769
5770 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5771 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
5772 {
5773         int i;
5774         int again = 1;
5775
5776         while (again) {
5777                 again = 0;
5778                 for (i = 0; i < num_stripes - 1; i++) {
5779                         /* Swap if parity is on a smaller index */
5780                         if (bbio->raid_map[i] > bbio->raid_map[i + 1]) {
5781                                 swap(bbio->stripes[i], bbio->stripes[i + 1]);
5782                                 swap(bbio->raid_map[i], bbio->raid_map[i + 1]);
5783                                 again = 1;
5784                         }
5785                 }
5786         }
5787 }
5788
5789 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
5790 {
5791         struct btrfs_bio *bbio = kzalloc(
5792                  /* the size of the btrfs_bio */
5793                 sizeof(struct btrfs_bio) +
5794                 /* plus the variable array for the stripes */
5795                 sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5796                 /* plus the variable array for the tgt dev */
5797                 sizeof(int) * (real_stripes) +
5798                 /*
5799                  * plus the raid_map, which includes both the tgt dev
5800                  * and the stripes
5801                  */
5802                 sizeof(u64) * (total_stripes),
5803                 GFP_NOFS|__GFP_NOFAIL);
5804
5805         atomic_set(&bbio->error, 0);
5806         refcount_set(&bbio->refs, 1);
5807
5808         bbio->tgtdev_map = (int *)(bbio->stripes + total_stripes);
5809         bbio->raid_map = (u64 *)(bbio->tgtdev_map + real_stripes);
5810
5811         return bbio;
5812 }
5813
5814 void btrfs_get_bbio(struct btrfs_bio *bbio)
5815 {
5816         WARN_ON(!refcount_read(&bbio->refs));
5817         refcount_inc(&bbio->refs);
5818 }
5819
5820 void btrfs_put_bbio(struct btrfs_bio *bbio)
5821 {
5822         if (!bbio)
5823                 return;
5824         if (refcount_dec_and_test(&bbio->refs))
5825                 kfree(bbio);
5826 }
5827
5828 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
5829 /*
5830  * Please note that, discard won't be sent to target device of device
5831  * replace.
5832  */
5833 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
5834                                          u64 logical, u64 *length_ret,
5835                                          struct btrfs_bio **bbio_ret)
5836 {
5837         struct extent_map *em;
5838         struct map_lookup *map;
5839         struct btrfs_bio *bbio;
5840         u64 length = *length_ret;
5841         u64 offset;
5842         u64 stripe_nr;
5843         u64 stripe_nr_end;
5844         u64 stripe_end_offset;
5845         u64 stripe_cnt;
5846         u64 stripe_len;
5847         u64 stripe_offset;
5848         u64 num_stripes;
5849         u32 stripe_index;
5850         u32 factor = 0;
5851         u32 sub_stripes = 0;
5852         u64 stripes_per_dev = 0;
5853         u32 remaining_stripes = 0;
5854         u32 last_stripe = 0;
5855         int ret = 0;
5856         int i;
5857
5858         /* discard always return a bbio */
5859         ASSERT(bbio_ret);
5860
5861         em = btrfs_get_chunk_map(fs_info, logical, length);
5862         if (IS_ERR(em))
5863                 return PTR_ERR(em);
5864
5865         map = em->map_lookup;
5866         /* we don't discard raid56 yet */
5867         if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5868                 ret = -EOPNOTSUPP;
5869                 goto out;
5870         }
5871
5872         offset = logical - em->start;
5873         length = min_t(u64, em->start + em->len - logical, length);
5874         *length_ret = length;
5875
5876         stripe_len = map->stripe_len;
5877         /*
5878          * stripe_nr counts the total number of stripes we have to stride
5879          * to get to this block
5880          */
5881         stripe_nr = div64_u64(offset, stripe_len);
5882
5883         /* stripe_offset is the offset of this block in its stripe */
5884         stripe_offset = offset - stripe_nr * stripe_len;
5885
5886         stripe_nr_end = round_up(offset + length, map->stripe_len);
5887         stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
5888         stripe_cnt = stripe_nr_end - stripe_nr;
5889         stripe_end_offset = stripe_nr_end * map->stripe_len -
5890                             (offset + length);
5891         /*
5892          * after this, stripe_nr is the number of stripes on this
5893          * device we have to walk to find the data, and stripe_index is
5894          * the number of our device in the stripe array
5895          */
5896         num_stripes = 1;
5897         stripe_index = 0;
5898         if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5899                          BTRFS_BLOCK_GROUP_RAID10)) {
5900                 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5901                         sub_stripes = 1;
5902                 else
5903                         sub_stripes = map->sub_stripes;
5904
5905                 factor = map->num_stripes / sub_stripes;
5906                 num_stripes = min_t(u64, map->num_stripes,
5907                                     sub_stripes * stripe_cnt);
5908                 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5909                 stripe_index *= sub_stripes;
5910                 stripes_per_dev = div_u64_rem(stripe_cnt, factor,
5911                                               &remaining_stripes);
5912                 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5913                 last_stripe *= sub_stripes;
5914         } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
5915                                 BTRFS_BLOCK_GROUP_DUP)) {
5916                 num_stripes = map->num_stripes;
5917         } else {
5918                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5919                                         &stripe_index);
5920         }
5921
5922         bbio = alloc_btrfs_bio(num_stripes, 0);
5923         if (!bbio) {
5924                 ret = -ENOMEM;
5925                 goto out;
5926         }
5927
5928         for (i = 0; i < num_stripes; i++) {
5929                 bbio->stripes[i].physical =
5930                         map->stripes[stripe_index].physical +
5931                         stripe_offset + stripe_nr * map->stripe_len;
5932                 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5933
5934                 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5935                                  BTRFS_BLOCK_GROUP_RAID10)) {
5936                         bbio->stripes[i].length = stripes_per_dev *
5937                                 map->stripe_len;
5938
5939                         if (i / sub_stripes < remaining_stripes)
5940                                 bbio->stripes[i].length +=
5941                                         map->stripe_len;
5942
5943                         /*
5944                          * Special for the first stripe and
5945                          * the last stripe:
5946                          *
5947                          * |-------|...|-------|
5948                          *     |----------|
5949                          *    off     end_off
5950                          */
5951                         if (i < sub_stripes)
5952                                 bbio->stripes[i].length -=
5953                                         stripe_offset;
5954
5955                         if (stripe_index >= last_stripe &&
5956                             stripe_index <= (last_stripe +
5957                                              sub_stripes - 1))
5958                                 bbio->stripes[i].length -=
5959                                         stripe_end_offset;
5960
5961                         if (i == sub_stripes - 1)
5962                                 stripe_offset = 0;
5963                 } else {
5964                         bbio->stripes[i].length = length;
5965                 }
5966
5967                 stripe_index++;
5968                 if (stripe_index == map->num_stripes) {
5969                         stripe_index = 0;
5970                         stripe_nr++;
5971                 }
5972         }
5973
5974         *bbio_ret = bbio;
5975         bbio->map_type = map->type;
5976         bbio->num_stripes = num_stripes;
5977 out:
5978         free_extent_map(em);
5979         return ret;
5980 }
5981
5982 /*
5983  * In dev-replace case, for repair case (that's the only case where the mirror
5984  * is selected explicitly when calling btrfs_map_block), blocks left of the
5985  * left cursor can also be read from the target drive.
5986  *
5987  * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
5988  * array of stripes.
5989  * For READ, it also needs to be supported using the same mirror number.
5990  *
5991  * If the requested block is not left of the left cursor, EIO is returned. This
5992  * can happen because btrfs_num_copies() returns one more in the dev-replace
5993  * case.
5994  */
5995 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
5996                                          u64 logical, u64 length,
5997                                          u64 srcdev_devid, int *mirror_num,
5998                                          u64 *physical)
5999 {
6000         struct btrfs_bio *bbio = NULL;
6001         int num_stripes;
6002         int index_srcdev = 0;
6003         int found = 0;
6004         u64 physical_of_found = 0;
6005         int i;
6006         int ret = 0;
6007
6008         ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
6009                                 logical, &length, &bbio, 0, 0);
6010         if (ret) {
6011                 ASSERT(bbio == NULL);
6012                 return ret;
6013         }
6014
6015         num_stripes = bbio->num_stripes;
6016         if (*mirror_num > num_stripes) {
6017                 /*
6018                  * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
6019                  * that means that the requested area is not left of the left
6020                  * cursor
6021                  */
6022                 btrfs_put_bbio(bbio);
6023                 return -EIO;
6024         }
6025
6026         /*
6027          * process the rest of the function using the mirror_num of the source
6028          * drive. Therefore look it up first.  At the end, patch the device
6029          * pointer to the one of the target drive.
6030          */
6031         for (i = 0; i < num_stripes; i++) {
6032                 if (bbio->stripes[i].dev->devid != srcdev_devid)
6033                         continue;
6034
6035                 /*
6036                  * In case of DUP, in order to keep it simple, only add the
6037                  * mirror with the lowest physical address
6038                  */
6039                 if (found &&
6040                     physical_of_found <= bbio->stripes[i].physical)
6041                         continue;
6042
6043                 index_srcdev = i;
6044                 found = 1;
6045                 physical_of_found = bbio->stripes[i].physical;
6046         }
6047
6048         btrfs_put_bbio(bbio);
6049
6050         ASSERT(found);
6051         if (!found)
6052                 return -EIO;
6053
6054         *mirror_num = index_srcdev + 1;
6055         *physical = physical_of_found;
6056         return ret;
6057 }
6058
6059 static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical)
6060 {
6061         struct btrfs_block_group *cache;
6062         bool ret;
6063
6064         /* Non zoned filesystem does not use "to_copy" flag */
6065         if (!btrfs_is_zoned(fs_info))
6066                 return false;
6067
6068         cache = btrfs_lookup_block_group(fs_info, logical);
6069
6070         spin_lock(&cache->lock);
6071         ret = cache->to_copy;
6072         spin_unlock(&cache->lock);
6073
6074         btrfs_put_block_group(cache);
6075         return ret;
6076 }
6077
6078 static void handle_ops_on_dev_replace(enum btrfs_map_op op,
6079                                       struct btrfs_bio **bbio_ret,
6080                                       struct btrfs_dev_replace *dev_replace,
6081                                       u64 logical,
6082                                       int *num_stripes_ret, int *max_errors_ret)
6083 {
6084         struct btrfs_bio *bbio = *bbio_ret;
6085         u64 srcdev_devid = dev_replace->srcdev->devid;
6086         int tgtdev_indexes = 0;
6087         int num_stripes = *num_stripes_ret;
6088         int max_errors = *max_errors_ret;
6089         int i;
6090
6091         if (op == BTRFS_MAP_WRITE) {
6092                 int index_where_to_add;
6093
6094                 /*
6095                  * A block group which have "to_copy" set will eventually
6096                  * copied by dev-replace process. We can avoid cloning IO here.
6097                  */
6098                 if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical))
6099                         return;
6100
6101                 /*
6102                  * duplicate the write operations while the dev replace
6103                  * procedure is running. Since the copying of the old disk to
6104                  * the new disk takes place at run time while the filesystem is
6105                  * mounted writable, the regular write operations to the old
6106                  * disk have to be duplicated to go to the new disk as well.
6107                  *
6108                  * Note that device->missing is handled by the caller, and that
6109                  * the write to the old disk is already set up in the stripes
6110                  * array.
6111                  */
6112                 index_where_to_add = num_stripes;
6113                 for (i = 0; i < num_stripes; i++) {
6114                         if (bbio->stripes[i].dev->devid == srcdev_devid) {
6115                                 /* write to new disk, too */
6116                                 struct btrfs_bio_stripe *new =
6117                                         bbio->stripes + index_where_to_add;
6118                                 struct btrfs_bio_stripe *old =
6119                                         bbio->stripes + i;
6120
6121                                 new->physical = old->physical;
6122                                 new->length = old->length;
6123                                 new->dev = dev_replace->tgtdev;
6124                                 bbio->tgtdev_map[i] = index_where_to_add;
6125                                 index_where_to_add++;
6126                                 max_errors++;
6127                                 tgtdev_indexes++;
6128                         }
6129                 }
6130                 num_stripes = index_where_to_add;
6131         } else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
6132                 int index_srcdev = 0;
6133                 int found = 0;
6134                 u64 physical_of_found = 0;
6135
6136                 /*
6137                  * During the dev-replace procedure, the target drive can also
6138                  * be used to read data in case it is needed to repair a corrupt
6139                  * block elsewhere. This is possible if the requested area is
6140                  * left of the left cursor. In this area, the target drive is a
6141                  * full copy of the source drive.
6142                  */
6143                 for (i = 0; i < num_stripes; i++) {
6144                         if (bbio->stripes[i].dev->devid == srcdev_devid) {
6145                                 /*
6146                                  * In case of DUP, in order to keep it simple,
6147                                  * only add the mirror with the lowest physical
6148                                  * address
6149                                  */
6150                                 if (found &&
6151                                     physical_of_found <=
6152                                      bbio->stripes[i].physical)
6153                                         continue;
6154                                 index_srcdev = i;
6155                                 found = 1;
6156                                 physical_of_found = bbio->stripes[i].physical;
6157                         }
6158                 }
6159                 if (found) {
6160                         struct btrfs_bio_stripe *tgtdev_stripe =
6161                                 bbio->stripes + num_stripes;
6162
6163                         tgtdev_stripe->physical = physical_of_found;
6164                         tgtdev_stripe->length =
6165                                 bbio->stripes[index_srcdev].length;
6166                         tgtdev_stripe->dev = dev_replace->tgtdev;
6167                         bbio->tgtdev_map[index_srcdev] = num_stripes;
6168
6169                         tgtdev_indexes++;
6170                         num_stripes++;
6171                 }
6172         }
6173
6174         *num_stripes_ret = num_stripes;
6175         *max_errors_ret = max_errors;
6176         bbio->num_tgtdevs = tgtdev_indexes;
6177         *bbio_ret = bbio;
6178 }
6179
6180 static bool need_full_stripe(enum btrfs_map_op op)
6181 {
6182         return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
6183 }
6184
6185 /*
6186  * Calculate the geometry of a particular (address, len) tuple. This
6187  * information is used to calculate how big a particular bio can get before it
6188  * straddles a stripe.
6189  *
6190  * @fs_info: the filesystem
6191  * @em:      mapping containing the logical extent
6192  * @op:      type of operation - write or read
6193  * @logical: address that we want to figure out the geometry of
6194  * @io_geom: pointer used to return values
6195  *
6196  * Returns < 0 in case a chunk for the given logical address cannot be found,
6197  * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
6198  */
6199 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em,
6200                           enum btrfs_map_op op, u64 logical,
6201                           struct btrfs_io_geometry *io_geom)
6202 {
6203         struct map_lookup *map;
6204         u64 len;
6205         u64 offset;
6206         u64 stripe_offset;
6207         u64 stripe_nr;
6208         u64 stripe_len;
6209         u64 raid56_full_stripe_start = (u64)-1;
6210         int data_stripes;
6211
6212         ASSERT(op != BTRFS_MAP_DISCARD);
6213
6214         map = em->map_lookup;
6215         /* Offset of this logical address in the chunk */
6216         offset = logical - em->start;
6217         /* Len of a stripe in a chunk */
6218         stripe_len = map->stripe_len;
6219         /* Stripe where this block falls in */
6220         stripe_nr = div64_u64(offset, stripe_len);
6221         /* Offset of stripe in the chunk */
6222         stripe_offset = stripe_nr * stripe_len;
6223         if (offset < stripe_offset) {
6224                 btrfs_crit(fs_info,
6225 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
6226                         stripe_offset, offset, em->start, logical, stripe_len);
6227                 return -EINVAL;
6228         }
6229
6230         /* stripe_offset is the offset of this block in its stripe */
6231         stripe_offset = offset - stripe_offset;
6232         data_stripes = nr_data_stripes(map);
6233
6234         if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
6235                 u64 max_len = stripe_len - stripe_offset;
6236
6237                 /*
6238                  * In case of raid56, we need to know the stripe aligned start
6239                  */
6240                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6241                         unsigned long full_stripe_len = stripe_len * data_stripes;
6242                         raid56_full_stripe_start = offset;
6243
6244                         /*
6245                          * Allow a write of a full stripe, but make sure we
6246                          * don't allow straddling of stripes
6247                          */
6248                         raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
6249                                         full_stripe_len);
6250                         raid56_full_stripe_start *= full_stripe_len;
6251
6252                         /*
6253                          * For writes to RAID[56], allow a full stripeset across
6254                          * all disks. For other RAID types and for RAID[56]
6255                          * reads, just allow a single stripe (on a single disk).
6256                          */
6257                         if (op == BTRFS_MAP_WRITE) {
6258                                 max_len = stripe_len * data_stripes -
6259                                           (offset - raid56_full_stripe_start);
6260                         }
6261                 }
6262                 len = min_t(u64, em->len - offset, max_len);
6263         } else {
6264                 len = em->len - offset;
6265         }
6266
6267         io_geom->len = len;
6268         io_geom->offset = offset;
6269         io_geom->stripe_len = stripe_len;
6270         io_geom->stripe_nr = stripe_nr;
6271         io_geom->stripe_offset = stripe_offset;
6272         io_geom->raid56_stripe_offset = raid56_full_stripe_start;
6273
6274         return 0;
6275 }
6276
6277 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
6278                              enum btrfs_map_op op,
6279                              u64 logical, u64 *length,
6280                              struct btrfs_bio **bbio_ret,
6281                              int mirror_num, int need_raid_map)
6282 {
6283         struct extent_map *em;
6284         struct map_lookup *map;
6285         u64 stripe_offset;
6286         u64 stripe_nr;
6287         u64 stripe_len;
6288         u32 stripe_index;
6289         int data_stripes;
6290         int i;
6291         int ret = 0;
6292         int num_stripes;
6293         int max_errors = 0;
6294         int tgtdev_indexes = 0;
6295         struct btrfs_bio *bbio = NULL;
6296         struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
6297         int dev_replace_is_ongoing = 0;
6298         int num_alloc_stripes;
6299         int patch_the_first_stripe_for_dev_replace = 0;
6300         u64 physical_to_patch_in_first_stripe = 0;
6301         u64 raid56_full_stripe_start = (u64)-1;
6302         struct btrfs_io_geometry geom;
6303
6304         ASSERT(bbio_ret);
6305         ASSERT(op != BTRFS_MAP_DISCARD);
6306
6307         em = btrfs_get_chunk_map(fs_info, logical, *length);
6308         ASSERT(!IS_ERR(em));
6309
6310         ret = btrfs_get_io_geometry(fs_info, em, op, logical, &geom);
6311         if (ret < 0)
6312                 return ret;
6313
6314         map = em->map_lookup;
6315
6316         *length = geom.len;
6317         stripe_len = geom.stripe_len;
6318         stripe_nr = geom.stripe_nr;
6319         stripe_offset = geom.stripe_offset;
6320         raid56_full_stripe_start = geom.raid56_stripe_offset;
6321         data_stripes = nr_data_stripes(map);
6322
6323         down_read(&dev_replace->rwsem);
6324         dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
6325         /*
6326          * Hold the semaphore for read during the whole operation, write is
6327          * requested at commit time but must wait.
6328          */
6329         if (!dev_replace_is_ongoing)
6330                 up_read(&dev_replace->rwsem);
6331
6332         if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
6333             !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
6334                 ret = get_extra_mirror_from_replace(fs_info, logical, *length,
6335                                                     dev_replace->srcdev->devid,
6336                                                     &mirror_num,
6337                                             &physical_to_patch_in_first_stripe);
6338                 if (ret)
6339                         goto out;
6340                 else
6341                         patch_the_first_stripe_for_dev_replace = 1;
6342         } else if (mirror_num > map->num_stripes) {
6343                 mirror_num = 0;
6344         }
6345
6346         num_stripes = 1;
6347         stripe_index = 0;
6348         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6349                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6350                                 &stripe_index);
6351                 if (!need_full_stripe(op))
6352                         mirror_num = 1;
6353         } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
6354                 if (need_full_stripe(op))
6355                         num_stripes = map->num_stripes;
6356                 else if (mirror_num)
6357                         stripe_index = mirror_num - 1;
6358                 else {
6359                         stripe_index = find_live_mirror(fs_info, map, 0,
6360                                             dev_replace_is_ongoing);
6361                         mirror_num = stripe_index + 1;
6362                 }
6363
6364         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
6365                 if (need_full_stripe(op)) {
6366                         num_stripes = map->num_stripes;
6367                 } else if (mirror_num) {
6368                         stripe_index = mirror_num - 1;
6369                 } else {
6370                         mirror_num = 1;
6371                 }
6372
6373         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
6374                 u32 factor = map->num_stripes / map->sub_stripes;
6375
6376                 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6377                 stripe_index *= map->sub_stripes;
6378
6379                 if (need_full_stripe(op))
6380                         num_stripes = map->sub_stripes;
6381                 else if (mirror_num)
6382                         stripe_index += mirror_num - 1;
6383                 else {
6384                         int old_stripe_index = stripe_index;
6385                         stripe_index = find_live_mirror(fs_info, map,
6386                                               stripe_index,
6387                                               dev_replace_is_ongoing);
6388                         mirror_num = stripe_index - old_stripe_index + 1;
6389                 }
6390
6391         } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6392                 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
6393                         /* push stripe_nr back to the start of the full stripe */
6394                         stripe_nr = div64_u64(raid56_full_stripe_start,
6395                                         stripe_len * data_stripes);
6396
6397                         /* RAID[56] write or recovery. Return all stripes */
6398                         num_stripes = map->num_stripes;
6399                         max_errors = nr_parity_stripes(map);
6400
6401                         *length = map->stripe_len;
6402                         stripe_index = 0;
6403                         stripe_offset = 0;
6404                 } else {
6405                         /*
6406                          * Mirror #0 or #1 means the original data block.
6407                          * Mirror #2 is RAID5 parity block.
6408                          * Mirror #3 is RAID6 Q block.
6409                          */
6410                         stripe_nr = div_u64_rem(stripe_nr,
6411                                         data_stripes, &stripe_index);
6412                         if (mirror_num > 1)
6413                                 stripe_index = data_stripes + mirror_num - 2;
6414
6415                         /* We distribute the parity blocks across stripes */
6416                         div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
6417                                         &stripe_index);
6418                         if (!need_full_stripe(op) && mirror_num <= 1)
6419                                 mirror_num = 1;
6420                 }
6421         } else {
6422                 /*
6423                  * after this, stripe_nr is the number of stripes on this
6424                  * device we have to walk to find the data, and stripe_index is
6425                  * the number of our device in the stripe array
6426                  */
6427                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6428                                 &stripe_index);
6429                 mirror_num = stripe_index + 1;
6430         }
6431         if (stripe_index >= map->num_stripes) {
6432                 btrfs_crit(fs_info,
6433                            "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6434                            stripe_index, map->num_stripes);
6435                 ret = -EINVAL;
6436                 goto out;
6437         }
6438
6439         num_alloc_stripes = num_stripes;
6440         if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
6441                 if (op == BTRFS_MAP_WRITE)
6442                         num_alloc_stripes <<= 1;
6443                 if (op == BTRFS_MAP_GET_READ_MIRRORS)
6444                         num_alloc_stripes++;
6445                 tgtdev_indexes = num_stripes;
6446         }
6447
6448         bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
6449         if (!bbio) {
6450                 ret = -ENOMEM;
6451                 goto out;
6452         }
6453
6454         for (i = 0; i < num_stripes; i++) {
6455                 bbio->stripes[i].physical = map->stripes[stripe_index].physical +
6456                         stripe_offset + stripe_nr * map->stripe_len;
6457                 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
6458                 stripe_index++;
6459         }
6460
6461         /* build raid_map */
6462         if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
6463             (need_full_stripe(op) || mirror_num > 1)) {
6464                 u64 tmp;
6465                 unsigned rot;
6466
6467                 /* Work out the disk rotation on this stripe-set */
6468                 div_u64_rem(stripe_nr, num_stripes, &rot);
6469
6470                 /* Fill in the logical address of each stripe */
6471                 tmp = stripe_nr * data_stripes;
6472                 for (i = 0; i < data_stripes; i++)
6473                         bbio->raid_map[(i+rot) % num_stripes] =
6474                                 em->start + (tmp + i) * map->stripe_len;
6475
6476                 bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
6477                 if (map->type & BTRFS_BLOCK_GROUP_RAID6)
6478                         bbio->raid_map[(i+rot+1) % num_stripes] =
6479                                 RAID6_Q_STRIPE;
6480
6481                 sort_parity_stripes(bbio, num_stripes);
6482         }
6483
6484         if (need_full_stripe(op))
6485                 max_errors = btrfs_chunk_max_errors(map);
6486
6487         if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6488             need_full_stripe(op)) {
6489                 handle_ops_on_dev_replace(op, &bbio, dev_replace, logical,
6490                                           &num_stripes, &max_errors);
6491         }
6492
6493         *bbio_ret = bbio;
6494         bbio->map_type = map->type;
6495         bbio->num_stripes = num_stripes;
6496         bbio->max_errors = max_errors;
6497         bbio->mirror_num = mirror_num;
6498
6499         /*
6500          * this is the case that REQ_READ && dev_replace_is_ongoing &&
6501          * mirror_num == num_stripes + 1 && dev_replace target drive is
6502          * available as a mirror
6503          */
6504         if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
6505                 WARN_ON(num_stripes > 1);
6506                 bbio->stripes[0].dev = dev_replace->tgtdev;
6507                 bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
6508                 bbio->mirror_num = map->num_stripes + 1;
6509         }
6510 out:
6511         if (dev_replace_is_ongoing) {
6512                 lockdep_assert_held(&dev_replace->rwsem);
6513                 /* Unlock and let waiting writers proceed */
6514                 up_read(&dev_replace->rwsem);
6515         }
6516         free_extent_map(em);
6517         return ret;
6518 }
6519
6520 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6521                       u64 logical, u64 *length,
6522                       struct btrfs_bio **bbio_ret, int mirror_num)
6523 {
6524         if (op == BTRFS_MAP_DISCARD)
6525                 return __btrfs_map_block_for_discard(fs_info, logical,
6526                                                      length, bbio_ret);
6527
6528         return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
6529                                  mirror_num, 0);
6530 }
6531
6532 /* For Scrub/replace */
6533 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6534                      u64 logical, u64 *length,
6535                      struct btrfs_bio **bbio_ret)
6536 {
6537         return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1);
6538 }
6539
6540 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
6541 {
6542         bio->bi_private = bbio->private;
6543         bio->bi_end_io = bbio->end_io;
6544         bio_endio(bio);
6545
6546         btrfs_put_bbio(bbio);
6547 }
6548
6549 static void btrfs_end_bio(struct bio *bio)
6550 {
6551         struct btrfs_bio *bbio = bio->bi_private;
6552         int is_orig_bio = 0;
6553
6554         if (bio->bi_status) {
6555                 atomic_inc(&bbio->error);
6556                 if (bio->bi_status == BLK_STS_IOERR ||
6557                     bio->bi_status == BLK_STS_TARGET) {
6558                         struct btrfs_device *dev = btrfs_io_bio(bio)->device;
6559
6560                         ASSERT(dev->bdev);
6561                         if (btrfs_op(bio) == BTRFS_MAP_WRITE)
6562                                 btrfs_dev_stat_inc_and_print(dev,
6563                                                 BTRFS_DEV_STAT_WRITE_ERRS);
6564                         else if (!(bio->bi_opf & REQ_RAHEAD))
6565                                 btrfs_dev_stat_inc_and_print(dev,
6566                                                 BTRFS_DEV_STAT_READ_ERRS);
6567                         if (bio->bi_opf & REQ_PREFLUSH)
6568                                 btrfs_dev_stat_inc_and_print(dev,
6569                                                 BTRFS_DEV_STAT_FLUSH_ERRS);
6570                 }
6571         }
6572
6573         if (bio == bbio->orig_bio)
6574                 is_orig_bio = 1;
6575
6576         btrfs_bio_counter_dec(bbio->fs_info);
6577
6578         if (atomic_dec_and_test(&bbio->stripes_pending)) {
6579                 if (!is_orig_bio) {
6580                         bio_put(bio);
6581                         bio = bbio->orig_bio;
6582                 }
6583
6584                 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6585                 /* only send an error to the higher layers if it is
6586                  * beyond the tolerance of the btrfs bio
6587                  */
6588                 if (atomic_read(&bbio->error) > bbio->max_errors) {
6589                         bio->bi_status = BLK_STS_IOERR;
6590                 } else {
6591                         /*
6592                          * this bio is actually up to date, we didn't
6593                          * go over the max number of errors
6594                          */
6595                         bio->bi_status = BLK_STS_OK;
6596                 }
6597
6598                 btrfs_end_bbio(bbio, bio);
6599         } else if (!is_orig_bio) {
6600                 bio_put(bio);
6601         }
6602 }
6603
6604 static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
6605                               u64 physical, struct btrfs_device *dev)
6606 {
6607         struct btrfs_fs_info *fs_info = bbio->fs_info;
6608
6609         bio->bi_private = bbio;
6610         btrfs_io_bio(bio)->device = dev;
6611         bio->bi_end_io = btrfs_end_bio;
6612         bio->bi_iter.bi_sector = physical >> 9;
6613         /*
6614          * For zone append writing, bi_sector must point the beginning of the
6615          * zone
6616          */
6617         if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
6618                 if (btrfs_dev_is_sequential(dev, physical)) {
6619                         u64 zone_start = round_down(physical, fs_info->zone_size);
6620
6621                         bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT;
6622                 } else {
6623                         bio->bi_opf &= ~REQ_OP_ZONE_APPEND;
6624                         bio->bi_opf |= REQ_OP_WRITE;
6625                 }
6626         }
6627         btrfs_debug_in_rcu(fs_info,
6628         "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6629                 bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector,
6630                 (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name),
6631                 dev->devid, bio->bi_iter.bi_size);
6632         bio_set_dev(bio, dev->bdev);
6633
6634         btrfs_bio_counter_inc_noblocked(fs_info);
6635
6636         btrfsic_submit_bio(bio);
6637 }
6638
6639 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6640 {
6641         atomic_inc(&bbio->error);
6642         if (atomic_dec_and_test(&bbio->stripes_pending)) {
6643                 /* Should be the original bio. */
6644                 WARN_ON(bio != bbio->orig_bio);
6645
6646                 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6647                 bio->bi_iter.bi_sector = logical >> 9;
6648                 if (atomic_read(&bbio->error) > bbio->max_errors)
6649                         bio->bi_status = BLK_STS_IOERR;
6650                 else
6651                         bio->bi_status = BLK_STS_OK;
6652                 btrfs_end_bbio(bbio, bio);
6653         }
6654 }
6655
6656 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6657                            int mirror_num)
6658 {
6659         struct btrfs_device *dev;
6660         struct bio *first_bio = bio;
6661         u64 logical = bio->bi_iter.bi_sector << 9;
6662         u64 length = 0;
6663         u64 map_length;
6664         int ret;
6665         int dev_nr;
6666         int total_devs;
6667         struct btrfs_bio *bbio = NULL;
6668
6669         length = bio->bi_iter.bi_size;
6670         map_length = length;
6671
6672         btrfs_bio_counter_inc_blocked(fs_info);
6673         ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
6674                                 &map_length, &bbio, mirror_num, 1);
6675         if (ret) {
6676                 btrfs_bio_counter_dec(fs_info);
6677                 return errno_to_blk_status(ret);
6678         }
6679
6680         total_devs = bbio->num_stripes;
6681         bbio->orig_bio = first_bio;
6682         bbio->private = first_bio->bi_private;
6683         bbio->end_io = first_bio->bi_end_io;
6684         bbio->fs_info = fs_info;
6685         atomic_set(&bbio->stripes_pending, bbio->num_stripes);
6686
6687         if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6688             ((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) {
6689                 /* In this case, map_length has been set to the length of
6690                    a single stripe; not the whole write */
6691                 if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
6692                         ret = raid56_parity_write(fs_info, bio, bbio,
6693                                                   map_length);
6694                 } else {
6695                         ret = raid56_parity_recover(fs_info, bio, bbio,
6696                                                     map_length, mirror_num, 1);
6697                 }
6698
6699                 btrfs_bio_counter_dec(fs_info);
6700                 return errno_to_blk_status(ret);
6701         }
6702
6703         if (map_length < length) {
6704                 btrfs_crit(fs_info,
6705                            "mapping failed logical %llu bio len %llu len %llu",
6706                            logical, length, map_length);
6707                 BUG();
6708         }
6709
6710         for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6711                 dev = bbio->stripes[dev_nr].dev;
6712                 if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
6713                                                    &dev->dev_state) ||
6714                     (btrfs_op(first_bio) == BTRFS_MAP_WRITE &&
6715                     !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
6716                         bbio_error(bbio, first_bio, logical);
6717                         continue;
6718                 }
6719
6720                 if (dev_nr < total_devs - 1)
6721                         bio = btrfs_bio_clone(first_bio);
6722                 else
6723                         bio = first_bio;
6724
6725                 submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical, dev);
6726         }
6727         btrfs_bio_counter_dec(fs_info);
6728         return BLK_STS_OK;
6729 }
6730
6731 /*
6732  * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6733  * return NULL.
6734  *
6735  * If devid and uuid are both specified, the match must be exact, otherwise
6736  * only devid is used.
6737  */
6738 struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
6739                                        u64 devid, u8 *uuid, u8 *fsid)
6740 {
6741         struct btrfs_device *device;
6742         struct btrfs_fs_devices *seed_devs;
6743
6744         if (!fsid || !memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
6745                 list_for_each_entry(device, &fs_devices->devices, dev_list) {
6746                         if (device->devid == devid &&
6747                             (!uuid || memcmp(device->uuid, uuid,
6748                                              BTRFS_UUID_SIZE) == 0))
6749                                 return device;
6750                 }
6751         }
6752
6753         list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
6754                 if (!fsid ||
6755                     !memcmp(seed_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
6756                         list_for_each_entry(device, &seed_devs->devices,
6757                                             dev_list) {
6758                                 if (device->devid == devid &&
6759                                     (!uuid || memcmp(device->uuid, uuid,
6760                                                      BTRFS_UUID_SIZE) == 0))
6761                                         return device;
6762                         }
6763                 }
6764         }
6765
6766         return NULL;
6767 }
6768
6769 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6770                                             u64 devid, u8 *dev_uuid)
6771 {
6772         struct btrfs_device *device;
6773         unsigned int nofs_flag;
6774
6775         /*
6776          * We call this under the chunk_mutex, so we want to use NOFS for this
6777          * allocation, however we don't want to change btrfs_alloc_device() to
6778          * always do NOFS because we use it in a lot of other GFP_KERNEL safe
6779          * places.
6780          */
6781         nofs_flag = memalloc_nofs_save();
6782         device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6783         memalloc_nofs_restore(nofs_flag);
6784         if (IS_ERR(device))
6785                 return device;
6786
6787         list_add(&device->dev_list, &fs_devices->devices);
6788         device->fs_devices = fs_devices;
6789         fs_devices->num_devices++;
6790
6791         set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6792         fs_devices->missing_devices++;
6793
6794         return device;
6795 }
6796
6797 /**
6798  * btrfs_alloc_device - allocate struct btrfs_device
6799  * @fs_info:    used only for generating a new devid, can be NULL if
6800  *              devid is provided (i.e. @devid != NULL).
6801  * @devid:      a pointer to devid for this device.  If NULL a new devid
6802  *              is generated.
6803  * @uuid:       a pointer to UUID for this device.  If NULL a new UUID
6804  *              is generated.
6805  *
6806  * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6807  * on error.  Returned struct is not linked onto any lists and must be
6808  * destroyed with btrfs_free_device.
6809  */
6810 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6811                                         const u64 *devid,
6812                                         const u8 *uuid)
6813 {
6814         struct btrfs_device *dev;
6815         u64 tmp;
6816
6817         if (WARN_ON(!devid && !fs_info))
6818                 return ERR_PTR(-EINVAL);
6819
6820         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
6821         if (!dev)
6822                 return ERR_PTR(-ENOMEM);
6823
6824         /*
6825          * Preallocate a bio that's always going to be used for flushing device
6826          * barriers and matches the device lifespan
6827          */
6828         dev->flush_bio = bio_kmalloc(GFP_KERNEL, 0);
6829         if (!dev->flush_bio) {
6830                 kfree(dev);
6831                 return ERR_PTR(-ENOMEM);
6832         }
6833
6834         INIT_LIST_HEAD(&dev->dev_list);
6835         INIT_LIST_HEAD(&dev->dev_alloc_list);
6836         INIT_LIST_HEAD(&dev->post_commit_list);
6837
6838         atomic_set(&dev->reada_in_flight, 0);
6839         atomic_set(&dev->dev_stats_ccnt, 0);
6840         btrfs_device_data_ordered_init(dev);
6841         INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
6842         INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
6843         extent_io_tree_init(fs_info, &dev->alloc_state,
6844                             IO_TREE_DEVICE_ALLOC_STATE, NULL);
6845
6846         if (devid)
6847                 tmp = *devid;
6848         else {
6849                 int ret;
6850
6851                 ret = find_next_devid(fs_info, &tmp);
6852                 if (ret) {
6853                         btrfs_free_device(dev);
6854                         return ERR_PTR(ret);
6855                 }
6856         }
6857         dev->devid = tmp;
6858
6859         if (uuid)
6860                 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6861         else
6862                 generate_random_uuid(dev->uuid);
6863
6864         return dev;
6865 }
6866
6867 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
6868                                         u64 devid, u8 *uuid, bool error)
6869 {
6870         if (error)
6871                 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
6872                               devid, uuid);
6873         else
6874                 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
6875                               devid, uuid);
6876 }
6877
6878 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
6879 {
6880         const int data_stripes = calc_data_stripes(type, num_stripes);
6881
6882         return div_u64(chunk_len, data_stripes);
6883 }
6884
6885 #if BITS_PER_LONG == 32
6886 /*
6887  * Due to page cache limit, metadata beyond BTRFS_32BIT_MAX_FILE_SIZE
6888  * can't be accessed on 32bit systems.
6889  *
6890  * This function do mount time check to reject the fs if it already has
6891  * metadata chunk beyond that limit.
6892  */
6893 static int check_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
6894                                   u64 logical, u64 length, u64 type)
6895 {
6896         if (!(type & BTRFS_BLOCK_GROUP_METADATA))
6897                 return 0;
6898
6899         if (logical + length < MAX_LFS_FILESIZE)
6900                 return 0;
6901
6902         btrfs_err_32bit_limit(fs_info);
6903         return -EOVERFLOW;
6904 }
6905
6906 /*
6907  * This is to give early warning for any metadata chunk reaching
6908  * BTRFS_32BIT_EARLY_WARN_THRESHOLD.
6909  * Although we can still access the metadata, it's not going to be possible
6910  * once the limit is reached.
6911  */
6912 static void warn_32bit_meta_chunk(struct btrfs_fs_info *fs_info,
6913                                   u64 logical, u64 length, u64 type)
6914 {
6915         if (!(type & BTRFS_BLOCK_GROUP_METADATA))
6916                 return;
6917
6918         if (logical + length < BTRFS_32BIT_EARLY_WARN_THRESHOLD)
6919                 return;
6920
6921         btrfs_warn_32bit_limit(fs_info);
6922 }
6923 #endif
6924
6925 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
6926                           struct btrfs_chunk *chunk)
6927 {
6928         struct btrfs_fs_info *fs_info = leaf->fs_info;
6929         struct extent_map_tree *map_tree = &fs_info->mapping_tree;
6930         struct map_lookup *map;
6931         struct extent_map *em;
6932         u64 logical;
6933         u64 length;
6934         u64 devid;
6935         u64 type;
6936         u8 uuid[BTRFS_UUID_SIZE];
6937         int num_stripes;
6938         int ret;
6939         int i;
6940
6941         logical = key->offset;
6942         length = btrfs_chunk_length(leaf, chunk);
6943         type = btrfs_chunk_type(leaf, chunk);
6944         num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6945
6946 #if BITS_PER_LONG == 32
6947         ret = check_32bit_meta_chunk(fs_info, logical, length, type);
6948         if (ret < 0)
6949                 return ret;
6950         warn_32bit_meta_chunk(fs_info, logical, length, type);
6951 #endif
6952
6953         /*
6954          * Only need to verify chunk item if we're reading from sys chunk array,
6955          * as chunk item in tree block is already verified by tree-checker.
6956          */
6957         if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
6958                 ret = btrfs_check_chunk_valid(leaf, chunk, logical);
6959                 if (ret)
6960                         return ret;
6961         }
6962
6963         read_lock(&map_tree->lock);
6964         em = lookup_extent_mapping(map_tree, logical, 1);
6965         read_unlock(&map_tree->lock);
6966
6967         /* already mapped? */
6968         if (em && em->start <= logical && em->start + em->len > logical) {
6969                 free_extent_map(em);
6970                 return 0;
6971         } else if (em) {
6972                 free_extent_map(em);
6973         }
6974
6975         em = alloc_extent_map();
6976         if (!em)
6977                 return -ENOMEM;
6978         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6979         if (!map) {
6980                 free_extent_map(em);
6981                 return -ENOMEM;
6982         }
6983
6984         set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6985         em->map_lookup = map;
6986         em->start = logical;
6987         em->len = length;
6988         em->orig_start = 0;
6989         em->block_start = 0;
6990         em->block_len = em->len;
6991
6992         map->num_stripes = num_stripes;
6993         map->io_width = btrfs_chunk_io_width(leaf, chunk);
6994         map->io_align = btrfs_chunk_io_align(leaf, chunk);
6995         map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6996         map->type = type;
6997         map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6998         map->verified_stripes = 0;
6999         em->orig_block_len = calc_stripe_length(type, em->len,
7000                                                 map->num_stripes);
7001         for (i = 0; i < num_stripes; i++) {
7002                 map->stripes[i].physical =
7003                         btrfs_stripe_offset_nr(leaf, chunk, i);
7004                 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
7005                 read_extent_buffer(leaf, uuid, (unsigned long)
7006                                    btrfs_stripe_dev_uuid_nr(chunk, i),
7007                                    BTRFS_UUID_SIZE);
7008                 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
7009                                                         devid, uuid, NULL);
7010                 if (!map->stripes[i].dev &&
7011                     !btrfs_test_opt(fs_info, DEGRADED)) {
7012                         free_extent_map(em);
7013                         btrfs_report_missing_device(fs_info, devid, uuid, true);
7014                         return -ENOENT;
7015                 }
7016                 if (!map->stripes[i].dev) {
7017                         map->stripes[i].dev =
7018                                 add_missing_dev(fs_info->fs_devices, devid,
7019                                                 uuid);
7020                         if (IS_ERR(map->stripes[i].dev)) {
7021                                 free_extent_map(em);
7022                                 btrfs_err(fs_info,
7023                                         "failed to init missing dev %llu: %ld",
7024                                         devid, PTR_ERR(map->stripes[i].dev));
7025                                 return PTR_ERR(map->stripes[i].dev);
7026                         }
7027                         btrfs_report_missing_device(fs_info, devid, uuid, false);
7028                 }
7029                 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
7030                                 &(map->stripes[i].dev->dev_state));
7031
7032         }
7033
7034         write_lock(&map_tree->lock);
7035         ret = add_extent_mapping(map_tree, em, 0);
7036         write_unlock(&map_tree->lock);
7037         if (ret < 0) {
7038                 btrfs_err(fs_info,
7039                           "failed to add chunk map, start=%llu len=%llu: %d",
7040                           em->start, em->len, ret);
7041         }
7042         free_extent_map(em);
7043
7044         return ret;
7045 }
7046
7047 static void fill_device_from_item(struct extent_buffer *leaf,
7048                                  struct btrfs_dev_item *dev_item,
7049                                  struct btrfs_device *device)
7050 {
7051         unsigned long ptr;
7052
7053         device->devid = btrfs_device_id(leaf, dev_item);
7054         device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
7055         device->total_bytes = device->disk_total_bytes;
7056         device->commit_total_bytes = device->disk_total_bytes;
7057         device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
7058         device->commit_bytes_used = device->bytes_used;
7059         device->type = btrfs_device_type(leaf, dev_item);
7060         device->io_align = btrfs_device_io_align(leaf, dev_item);
7061         device->io_width = btrfs_device_io_width(leaf, dev_item);
7062         device->sector_size = btrfs_device_sector_size(leaf, dev_item);
7063         WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
7064         clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
7065
7066         ptr = btrfs_device_uuid(dev_item);
7067         read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
7068 }
7069
7070 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
7071                                                   u8 *fsid)
7072 {
7073         struct btrfs_fs_devices *fs_devices;
7074         int ret;
7075
7076         lockdep_assert_held(&uuid_mutex);
7077         ASSERT(fsid);
7078
7079         /* This will match only for multi-device seed fs */
7080         list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list)
7081                 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
7082                         return fs_devices;
7083
7084
7085         fs_devices = find_fsid(fsid, NULL);
7086         if (!fs_devices) {
7087                 if (!btrfs_test_opt(fs_info, DEGRADED))
7088                         return ERR_PTR(-ENOENT);
7089
7090                 fs_devices = alloc_fs_devices(fsid, NULL);
7091                 if (IS_ERR(fs_devices))
7092                         return fs_devices;
7093
7094                 fs_devices->seeding = true;
7095                 fs_devices->opened = 1;
7096                 return fs_devices;
7097         }
7098
7099         /*
7100          * Upon first call for a seed fs fsid, just create a private copy of the
7101          * respective fs_devices and anchor it at fs_info->fs_devices->seed_list
7102          */
7103         fs_devices = clone_fs_devices(fs_devices);
7104         if (IS_ERR(fs_devices))
7105                 return fs_devices;
7106
7107         ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
7108         if (ret) {
7109                 free_fs_devices(fs_devices);
7110                 return ERR_PTR(ret);
7111         }
7112
7113         if (!fs_devices->seeding) {
7114                 close_fs_devices(fs_devices);
7115                 free_fs_devices(fs_devices);
7116                 return ERR_PTR(-EINVAL);
7117         }
7118
7119         list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list);
7120
7121         return fs_devices;
7122 }
7123
7124 static int read_one_dev(struct extent_buffer *leaf,
7125                         struct btrfs_dev_item *dev_item)
7126 {
7127         struct btrfs_fs_info *fs_info = leaf->fs_info;
7128         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7129         struct btrfs_device *device;
7130         u64 devid;
7131         int ret;
7132         u8 fs_uuid[BTRFS_FSID_SIZE];
7133         u8 dev_uuid[BTRFS_UUID_SIZE];
7134
7135         devid = btrfs_device_id(leaf, dev_item);
7136         read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
7137                            BTRFS_UUID_SIZE);
7138         read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
7139                            BTRFS_FSID_SIZE);
7140
7141         if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
7142                 fs_devices = open_seed_devices(fs_info, fs_uuid);
7143                 if (IS_ERR(fs_devices))
7144                         return PTR_ERR(fs_devices);
7145         }
7146
7147         device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
7148                                    fs_uuid);
7149         if (!device) {
7150                 if (!btrfs_test_opt(fs_info, DEGRADED)) {
7151                         btrfs_report_missing_device(fs_info, devid,
7152                                                         dev_uuid, true);
7153                         return -ENOENT;
7154                 }
7155
7156                 device = add_missing_dev(fs_devices, devid, dev_uuid);
7157                 if (IS_ERR(device)) {
7158                         btrfs_err(fs_info,
7159                                 "failed to add missing dev %llu: %ld",
7160                                 devid, PTR_ERR(device));
7161                         return PTR_ERR(device);
7162                 }
7163                 btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
7164         } else {
7165                 if (!device->bdev) {
7166                         if (!btrfs_test_opt(fs_info, DEGRADED)) {
7167                                 btrfs_report_missing_device(fs_info,
7168                                                 devid, dev_uuid, true);
7169                                 return -ENOENT;
7170                         }
7171                         btrfs_report_missing_device(fs_info, devid,
7172                                                         dev_uuid, false);
7173                 }
7174
7175                 if (!device->bdev &&
7176                     !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
7177                         /*
7178                          * this happens when a device that was properly setup
7179                          * in the device info lists suddenly goes bad.
7180                          * device->bdev is NULL, and so we have to set
7181                          * device->missing to one here
7182                          */
7183                         device->fs_devices->missing_devices++;
7184                         set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
7185                 }
7186
7187                 /* Move the device to its own fs_devices */
7188                 if (device->fs_devices != fs_devices) {
7189                         ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
7190                                                         &device->dev_state));
7191
7192                         list_move(&device->dev_list, &fs_devices->devices);
7193                         device->fs_devices->num_devices--;
7194                         fs_devices->num_devices++;
7195
7196                         device->fs_devices->missing_devices--;
7197                         fs_devices->missing_devices++;
7198
7199                         device->fs_devices = fs_devices;
7200                 }
7201         }
7202
7203         if (device->fs_devices != fs_info->fs_devices) {
7204                 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
7205                 if (device->generation !=
7206                     btrfs_device_generation(leaf, dev_item))
7207                         return -EINVAL;
7208         }
7209
7210         fill_device_from_item(leaf, dev_item, device);
7211         if (device->bdev) {
7212                 u64 max_total_bytes = i_size_read(device->bdev->bd_inode);
7213
7214                 if (device->total_bytes > max_total_bytes) {
7215                         btrfs_err(fs_info,
7216                         "device total_bytes should be at most %llu but found %llu",
7217                                   max_total_bytes, device->total_bytes);
7218                         return -EINVAL;
7219                 }
7220         }
7221         set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
7222         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
7223            !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
7224                 device->fs_devices->total_rw_bytes += device->total_bytes;
7225                 atomic64_add(device->total_bytes - device->bytes_used,
7226                                 &fs_info->free_chunk_space);
7227         }
7228         ret = 0;
7229         return ret;
7230 }
7231
7232 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
7233 {
7234         struct btrfs_root *root = fs_info->tree_root;
7235         struct btrfs_super_block *super_copy = fs_info->super_copy;
7236         struct extent_buffer *sb;
7237         struct btrfs_disk_key *disk_key;
7238         struct btrfs_chunk *chunk;
7239         u8 *array_ptr;
7240         unsigned long sb_array_offset;
7241         int ret = 0;
7242         u32 num_stripes;
7243         u32 array_size;
7244         u32 len = 0;
7245         u32 cur_offset;
7246         u64 type;
7247         struct btrfs_key key;
7248
7249         ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
7250         /*
7251          * This will create extent buffer of nodesize, superblock size is
7252          * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
7253          * overallocate but we can keep it as-is, only the first page is used.
7254          */
7255         sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET,
7256                                           root->root_key.objectid, 0);
7257         if (IS_ERR(sb))
7258                 return PTR_ERR(sb);
7259         set_extent_buffer_uptodate(sb);
7260         /*
7261          * The sb extent buffer is artificial and just used to read the system array.
7262          * set_extent_buffer_uptodate() call does not properly mark all it's
7263          * pages up-to-date when the page is larger: extent does not cover the
7264          * whole page and consequently check_page_uptodate does not find all
7265          * the page's extents up-to-date (the hole beyond sb),
7266          * write_extent_buffer then triggers a WARN_ON.
7267          *
7268          * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
7269          * but sb spans only this function. Add an explicit SetPageUptodate call
7270          * to silence the warning eg. on PowerPC 64.
7271          */
7272         if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
7273                 SetPageUptodate(sb->pages[0]);
7274
7275         write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
7276         array_size = btrfs_super_sys_array_size(super_copy);
7277
7278         array_ptr = super_copy->sys_chunk_array;
7279         sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
7280         cur_offset = 0;
7281
7282         while (cur_offset < array_size) {
7283                 disk_key = (struct btrfs_disk_key *)array_ptr;
7284                 len = sizeof(*disk_key);
7285                 if (cur_offset + len > array_size)
7286                         goto out_short_read;
7287
7288                 btrfs_disk_key_to_cpu(&key, disk_key);
7289
7290                 array_ptr += len;
7291                 sb_array_offset += len;
7292                 cur_offset += len;
7293
7294                 if (key.type != BTRFS_CHUNK_ITEM_KEY) {
7295                         btrfs_err(fs_info,
7296                             "unexpected item type %u in sys_array at offset %u",
7297                                   (u32)key.type, cur_offset);
7298                         ret = -EIO;
7299                         break;
7300                 }
7301
7302                 chunk = (struct btrfs_chunk *)sb_array_offset;
7303                 /*
7304                  * At least one btrfs_chunk with one stripe must be present,
7305                  * exact stripe count check comes afterwards
7306                  */
7307                 len = btrfs_chunk_item_size(1);
7308                 if (cur_offset + len > array_size)
7309                         goto out_short_read;
7310
7311                 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
7312                 if (!num_stripes) {
7313                         btrfs_err(fs_info,
7314                         "invalid number of stripes %u in sys_array at offset %u",
7315                                   num_stripes, cur_offset);
7316                         ret = -EIO;
7317                         break;
7318                 }
7319
7320                 type = btrfs_chunk_type(sb, chunk);
7321                 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
7322                         btrfs_err(fs_info,
7323                         "invalid chunk type %llu in sys_array at offset %u",
7324                                   type, cur_offset);
7325                         ret = -EIO;
7326                         break;
7327                 }
7328
7329                 len = btrfs_chunk_item_size(num_stripes);
7330                 if (cur_offset + len > array_size)
7331                         goto out_short_read;
7332
7333                 ret = read_one_chunk(&key, sb, chunk);
7334                 if (ret)
7335                         break;
7336
7337                 array_ptr += len;
7338                 sb_array_offset += len;
7339                 cur_offset += len;
7340         }
7341         clear_extent_buffer_uptodate(sb);
7342         free_extent_buffer_stale(sb);
7343         return ret;
7344
7345 out_short_read:
7346         btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
7347                         len, cur_offset);
7348         clear_extent_buffer_uptodate(sb);
7349         free_extent_buffer_stale(sb);
7350         return -EIO;
7351 }
7352
7353 /*
7354  * Check if all chunks in the fs are OK for read-write degraded mount
7355  *
7356  * If the @failing_dev is specified, it's accounted as missing.
7357  *
7358  * Return true if all chunks meet the minimal RW mount requirements.
7359  * Return false if any chunk doesn't meet the minimal RW mount requirements.
7360  */
7361 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
7362                                         struct btrfs_device *failing_dev)
7363 {
7364         struct extent_map_tree *map_tree = &fs_info->mapping_tree;
7365         struct extent_map *em;
7366         u64 next_start = 0;
7367         bool ret = true;
7368
7369         read_lock(&map_tree->lock);
7370         em = lookup_extent_mapping(map_tree, 0, (u64)-1);
7371         read_unlock(&map_tree->lock);
7372         /* No chunk at all? Return false anyway */
7373         if (!em) {
7374                 ret = false;
7375                 goto out;
7376         }
7377         while (em) {
7378                 struct map_lookup *map;
7379                 int missing = 0;
7380                 int max_tolerated;
7381                 int i;
7382
7383                 map = em->map_lookup;
7384                 max_tolerated =
7385                         btrfs_get_num_tolerated_disk_barrier_failures(
7386                                         map->type);
7387                 for (i = 0; i < map->num_stripes; i++) {
7388                         struct btrfs_device *dev = map->stripes[i].dev;
7389
7390                         if (!dev || !dev->bdev ||
7391                             test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
7392                             dev->last_flush_error)
7393                                 missing++;
7394                         else if (failing_dev && failing_dev == dev)
7395                                 missing++;
7396                 }
7397                 if (missing > max_tolerated) {
7398                         if (!failing_dev)
7399                                 btrfs_warn(fs_info,
7400         "chunk %llu missing %d devices, max tolerance is %d for writable mount",
7401                                    em->start, missing, max_tolerated);
7402                         free_extent_map(em);
7403                         ret = false;
7404                         goto out;
7405                 }
7406                 next_start = extent_map_end(em);
7407                 free_extent_map(em);
7408
7409                 read_lock(&map_tree->lock);
7410                 em = lookup_extent_mapping(map_tree, next_start,
7411                                            (u64)(-1) - next_start);
7412                 read_unlock(&map_tree->lock);
7413         }
7414 out:
7415         return ret;
7416 }
7417
7418 static void readahead_tree_node_children(struct extent_buffer *node)
7419 {
7420         int i;
7421         const int nr_items = btrfs_header_nritems(node);
7422
7423         for (i = 0; i < nr_items; i++)
7424                 btrfs_readahead_node_child(node, i);
7425 }
7426
7427 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
7428 {
7429         struct btrfs_root *root = fs_info->chunk_root;
7430         struct btrfs_path *path;
7431         struct extent_buffer *leaf;
7432         struct btrfs_key key;
7433         struct btrfs_key found_key;
7434         int ret;
7435         int slot;
7436         u64 total_dev = 0;
7437         u64 last_ra_node = 0;
7438
7439         path = btrfs_alloc_path();
7440         if (!path)
7441                 return -ENOMEM;
7442
7443         /*
7444          * uuid_mutex is needed only if we are mounting a sprout FS
7445          * otherwise we don't need it.
7446          */
7447         mutex_lock(&uuid_mutex);
7448
7449         /*
7450          * It is possible for mount and umount to race in such a way that
7451          * we execute this code path, but open_fs_devices failed to clear
7452          * total_rw_bytes. We certainly want it cleared before reading the
7453          * device items, so clear it here.
7454          */
7455         fs_info->fs_devices->total_rw_bytes = 0;
7456
7457         /*
7458          * Read all device items, and then all the chunk items. All
7459          * device items are found before any chunk item (their object id
7460          * is smaller than the lowest possible object id for a chunk
7461          * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7462          */
7463         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
7464         key.offset = 0;
7465         key.type = 0;
7466         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7467         if (ret < 0)
7468                 goto error;
7469         while (1) {
7470                 struct extent_buffer *node;
7471
7472                 leaf = path->nodes[0];
7473                 slot = path->slots[0];
7474                 if (slot >= btrfs_header_nritems(leaf)) {
7475                         ret = btrfs_next_leaf(root, path);
7476                         if (ret == 0)
7477                                 continue;
7478                         if (ret < 0)
7479                                 goto error;
7480                         break;
7481                 }
7482                 /*
7483                  * The nodes on level 1 are not locked but we don't need to do
7484                  * that during mount time as nothing else can access the tree
7485                  */
7486                 node = path->nodes[1];
7487                 if (node) {
7488                         if (last_ra_node != node->start) {
7489                                 readahead_tree_node_children(node);
7490                                 last_ra_node = node->start;
7491                         }
7492                 }
7493                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7494                 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
7495                         struct btrfs_dev_item *dev_item;
7496                         dev_item = btrfs_item_ptr(leaf, slot,
7497                                                   struct btrfs_dev_item);
7498                         ret = read_one_dev(leaf, dev_item);
7499                         if (ret)
7500                                 goto error;
7501                         total_dev++;
7502                 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
7503                         struct btrfs_chunk *chunk;
7504
7505                         /*
7506                          * We are only called at mount time, so no need to take
7507                          * fs_info->chunk_mutex. Plus, to avoid lockdep warnings,
7508                          * we always lock first fs_info->chunk_mutex before
7509                          * acquiring any locks on the chunk tree. This is a
7510                          * requirement for chunk allocation, see the comment on
7511                          * top of btrfs_chunk_alloc() for details.
7512                          */
7513                         ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
7514                         chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
7515                         ret = read_one_chunk(&found_key, leaf, chunk);
7516                         if (ret)
7517                                 goto error;
7518                 }
7519                 path->slots[0]++;
7520         }
7521
7522         /*
7523          * After loading chunk tree, we've got all device information,
7524          * do another round of validation checks.
7525          */
7526         if (total_dev != fs_info->fs_devices->total_devices) {
7527                 btrfs_err(fs_info,
7528            "super_num_devices %llu mismatch with num_devices %llu found here",
7529                           btrfs_super_num_devices(fs_info->super_copy),
7530                           total_dev);
7531                 ret = -EINVAL;
7532                 goto error;
7533         }
7534         if (btrfs_super_total_bytes(fs_info->super_copy) <
7535             fs_info->fs_devices->total_rw_bytes) {
7536                 btrfs_err(fs_info,
7537         "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7538                           btrfs_super_total_bytes(fs_info->super_copy),
7539                           fs_info->fs_devices->total_rw_bytes);
7540                 ret = -EINVAL;
7541                 goto error;
7542         }
7543         ret = 0;
7544 error:
7545         mutex_unlock(&uuid_mutex);
7546
7547         btrfs_free_path(path);
7548         return ret;
7549 }
7550
7551 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
7552 {
7553         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7554         struct btrfs_device *device;
7555
7556         fs_devices->fs_info = fs_info;
7557
7558         mutex_lock(&fs_devices->device_list_mutex);
7559         list_for_each_entry(device, &fs_devices->devices, dev_list)
7560                 device->fs_info = fs_info;
7561
7562         list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7563                 list_for_each_entry(device, &seed_devs->devices, dev_list)
7564                         device->fs_info = fs_info;
7565
7566                 seed_devs->fs_info = fs_info;
7567         }
7568         mutex_unlock(&fs_devices->device_list_mutex);
7569 }
7570
7571 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
7572                                  const struct btrfs_dev_stats_item *ptr,
7573                                  int index)
7574 {
7575         u64 val;
7576
7577         read_extent_buffer(eb, &val,
7578                            offsetof(struct btrfs_dev_stats_item, values) +
7579                             ((unsigned long)ptr) + (index * sizeof(u64)),
7580                            sizeof(val));
7581         return val;
7582 }
7583
7584 static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
7585                                       struct btrfs_dev_stats_item *ptr,
7586                                       int index, u64 val)
7587 {
7588         write_extent_buffer(eb, &val,
7589                             offsetof(struct btrfs_dev_stats_item, values) +
7590                              ((unsigned long)ptr) + (index * sizeof(u64)),
7591                             sizeof(val));
7592 }
7593
7594 static int btrfs_device_init_dev_stats(struct btrfs_device *device,
7595                                        struct btrfs_path *path)
7596 {
7597         struct btrfs_dev_stats_item *ptr;
7598         struct extent_buffer *eb;
7599         struct btrfs_key key;
7600         int item_size;
7601         int i, ret, slot;
7602
7603         if (!device->fs_info->dev_root)
7604                 return 0;
7605
7606         key.objectid = BTRFS_DEV_STATS_OBJECTID;
7607         key.type = BTRFS_PERSISTENT_ITEM_KEY;
7608         key.offset = device->devid;
7609         ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0);
7610         if (ret) {
7611                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7612                         btrfs_dev_stat_set(device, i, 0);
7613                 device->dev_stats_valid = 1;
7614                 btrfs_release_path(path);
7615                 return ret < 0 ? ret : 0;
7616         }
7617         slot = path->slots[0];
7618         eb = path->nodes[0];
7619         item_size = btrfs_item_size_nr(eb, slot);
7620
7621         ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item);
7622
7623         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7624                 if (item_size >= (1 + i) * sizeof(__le64))
7625                         btrfs_dev_stat_set(device, i,
7626                                            btrfs_dev_stats_value(eb, ptr, i));
7627                 else
7628                         btrfs_dev_stat_set(device, i, 0);
7629         }
7630
7631         device->dev_stats_valid = 1;
7632         btrfs_dev_stat_print_on_load(device);
7633         btrfs_release_path(path);
7634
7635         return 0;
7636 }
7637
7638 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
7639 {
7640         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7641         struct btrfs_device *device;
7642         struct btrfs_path *path = NULL;
7643         int ret = 0;
7644
7645         path = btrfs_alloc_path();
7646         if (!path)
7647                 return -ENOMEM;
7648
7649         mutex_lock(&fs_devices->device_list_mutex);
7650         list_for_each_entry(device, &fs_devices->devices, dev_list) {
7651                 ret = btrfs_device_init_dev_stats(device, path);
7652                 if (ret)
7653                         goto out;
7654         }
7655         list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7656                 list_for_each_entry(device, &seed_devs->devices, dev_list) {
7657                         ret = btrfs_device_init_dev_stats(device, path);
7658                         if (ret)
7659                                 goto out;
7660                 }
7661         }
7662 out:
7663         mutex_unlock(&fs_devices->device_list_mutex);
7664
7665         btrfs_free_path(path);
7666         return ret;
7667 }
7668
7669 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7670                                 struct btrfs_device *device)
7671 {
7672         struct btrfs_fs_info *fs_info = trans->fs_info;
7673         struct btrfs_root *dev_root = fs_info->dev_root;
7674         struct btrfs_path *path;
7675         struct btrfs_key key;
7676         struct extent_buffer *eb;
7677         struct btrfs_dev_stats_item *ptr;
7678         int ret;
7679         int i;
7680
7681         key.objectid = BTRFS_DEV_STATS_OBJECTID;
7682         key.type = BTRFS_PERSISTENT_ITEM_KEY;
7683         key.offset = device->devid;
7684
7685         path = btrfs_alloc_path();
7686         if (!path)
7687                 return -ENOMEM;
7688         ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7689         if (ret < 0) {
7690                 btrfs_warn_in_rcu(fs_info,
7691                         "error %d while searching for dev_stats item for device %s",
7692                               ret, rcu_str_deref(device->name));
7693                 goto out;
7694         }
7695
7696         if (ret == 0 &&
7697             btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7698                 /* need to delete old one and insert a new one */
7699                 ret = btrfs_del_item(trans, dev_root, path);
7700                 if (ret != 0) {
7701                         btrfs_warn_in_rcu(fs_info,
7702                                 "delete too small dev_stats item for device %s failed %d",
7703                                       rcu_str_deref(device->name), ret);
7704                         goto out;
7705                 }
7706                 ret = 1;
7707         }
7708
7709         if (ret == 1) {
7710                 /* need to insert a new item */
7711                 btrfs_release_path(path);
7712                 ret = btrfs_insert_empty_item(trans, dev_root, path,
7713                                               &key, sizeof(*ptr));
7714                 if (ret < 0) {
7715                         btrfs_warn_in_rcu(fs_info,
7716                                 "insert dev_stats item for device %s failed %d",
7717                                 rcu_str_deref(device->name), ret);
7718                         goto out;
7719                 }
7720         }
7721
7722         eb = path->nodes[0];
7723         ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7724         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7725                 btrfs_set_dev_stats_value(eb, ptr, i,
7726                                           btrfs_dev_stat_read(device, i));
7727         btrfs_mark_buffer_dirty(eb);
7728
7729 out:
7730         btrfs_free_path(path);
7731         return ret;
7732 }
7733
7734 /*
7735  * called from commit_transaction. Writes all changed device stats to disk.
7736  */
7737 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
7738 {
7739         struct btrfs_fs_info *fs_info = trans->fs_info;
7740         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7741         struct btrfs_device *device;
7742         int stats_cnt;
7743         int ret = 0;
7744
7745         mutex_lock(&fs_devices->device_list_mutex);
7746         list_for_each_entry(device, &fs_devices->devices, dev_list) {
7747                 stats_cnt = atomic_read(&device->dev_stats_ccnt);
7748                 if (!device->dev_stats_valid || stats_cnt == 0)
7749                         continue;
7750
7751
7752                 /*
7753                  * There is a LOAD-LOAD control dependency between the value of
7754                  * dev_stats_ccnt and updating the on-disk values which requires
7755                  * reading the in-memory counters. Such control dependencies
7756                  * require explicit read memory barriers.
7757                  *
7758                  * This memory barriers pairs with smp_mb__before_atomic in
7759                  * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7760                  * barrier implied by atomic_xchg in
7761                  * btrfs_dev_stats_read_and_reset
7762                  */
7763                 smp_rmb();
7764
7765                 ret = update_dev_stat_item(trans, device);
7766                 if (!ret)
7767                         atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7768         }
7769         mutex_unlock(&fs_devices->device_list_mutex);
7770
7771         return ret;
7772 }
7773
7774 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7775 {
7776         btrfs_dev_stat_inc(dev, index);
7777         btrfs_dev_stat_print_on_error(dev);
7778 }
7779
7780 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7781 {
7782         if (!dev->dev_stats_valid)
7783                 return;
7784         btrfs_err_rl_in_rcu(dev->fs_info,
7785                 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7786                            rcu_str_deref(dev->name),
7787                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7788                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7789                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7790                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7791                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7792 }
7793
7794 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7795 {
7796         int i;
7797
7798         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7799                 if (btrfs_dev_stat_read(dev, i) != 0)
7800                         break;
7801         if (i == BTRFS_DEV_STAT_VALUES_MAX)
7802                 return; /* all values == 0, suppress message */
7803
7804         btrfs_info_in_rcu(dev->fs_info,
7805                 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7806                rcu_str_deref(dev->name),
7807                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7808                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7809                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7810                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7811                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7812 }
7813
7814 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7815                         struct btrfs_ioctl_get_dev_stats *stats)
7816 {
7817         struct btrfs_device *dev;
7818         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7819         int i;
7820
7821         mutex_lock(&fs_devices->device_list_mutex);
7822         dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL);
7823         mutex_unlock(&fs_devices->device_list_mutex);
7824
7825         if (!dev) {
7826                 btrfs_warn(fs_info, "get dev_stats failed, device not found");
7827                 return -ENODEV;
7828         } else if (!dev->dev_stats_valid) {
7829                 btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7830                 return -ENODEV;
7831         } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7832                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7833                         if (stats->nr_items > i)
7834                                 stats->values[i] =
7835                                         btrfs_dev_stat_read_and_reset(dev, i);
7836                         else
7837                                 btrfs_dev_stat_set(dev, i, 0);
7838                 }
7839                 btrfs_info(fs_info, "device stats zeroed by %s (%d)",
7840                            current->comm, task_pid_nr(current));
7841         } else {
7842                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7843                         if (stats->nr_items > i)
7844                                 stats->values[i] = btrfs_dev_stat_read(dev, i);
7845         }
7846         if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7847                 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7848         return 0;
7849 }
7850
7851 /*
7852  * Update the size and bytes used for each device where it changed.  This is
7853  * delayed since we would otherwise get errors while writing out the
7854  * superblocks.
7855  *
7856  * Must be invoked during transaction commit.
7857  */
7858 void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
7859 {
7860         struct btrfs_device *curr, *next;
7861
7862         ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
7863
7864         if (list_empty(&trans->dev_update_list))
7865                 return;
7866
7867         /*
7868          * We don't need the device_list_mutex here.  This list is owned by the
7869          * transaction and the transaction must complete before the device is
7870          * released.
7871          */
7872         mutex_lock(&trans->fs_info->chunk_mutex);
7873         list_for_each_entry_safe(curr, next, &trans->dev_update_list,
7874                                  post_commit_list) {
7875                 list_del_init(&curr->post_commit_list);
7876                 curr->commit_total_bytes = curr->disk_total_bytes;
7877                 curr->commit_bytes_used = curr->bytes_used;
7878         }
7879         mutex_unlock(&trans->fs_info->chunk_mutex);
7880 }
7881
7882 /*
7883  * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7884  */
7885 int btrfs_bg_type_to_factor(u64 flags)
7886 {
7887         const int index = btrfs_bg_flags_to_raid_index(flags);
7888
7889         return btrfs_raid_array[index].ncopies;
7890 }
7891
7892
7893
7894 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
7895                                  u64 chunk_offset, u64 devid,
7896                                  u64 physical_offset, u64 physical_len)
7897 {
7898         struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7899         struct extent_map *em;
7900         struct map_lookup *map;
7901         struct btrfs_device *dev;
7902         u64 stripe_len;
7903         bool found = false;
7904         int ret = 0;
7905         int i;
7906
7907         read_lock(&em_tree->lock);
7908         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
7909         read_unlock(&em_tree->lock);
7910
7911         if (!em) {
7912                 btrfs_err(fs_info,
7913 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7914                           physical_offset, devid);
7915                 ret = -EUCLEAN;
7916                 goto out;
7917         }
7918
7919         map = em->map_lookup;
7920         stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
7921         if (physical_len != stripe_len) {
7922                 btrfs_err(fs_info,
7923 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7924                           physical_offset, devid, em->start, physical_len,
7925                           stripe_len);
7926                 ret = -EUCLEAN;
7927                 goto out;
7928         }
7929
7930         for (i = 0; i < map->num_stripes; i++) {
7931                 if (map->stripes[i].dev->devid == devid &&
7932                     map->stripes[i].physical == physical_offset) {
7933                         found = true;
7934                         if (map->verified_stripes >= map->num_stripes) {
7935                                 btrfs_err(fs_info,
7936                                 "too many dev extents for chunk %llu found",
7937                                           em->start);
7938                                 ret = -EUCLEAN;
7939                                 goto out;
7940                         }
7941                         map->verified_stripes++;
7942                         break;
7943                 }
7944         }
7945         if (!found) {
7946                 btrfs_err(fs_info,
7947         "dev extent physical offset %llu devid %llu has no corresponding chunk",
7948                         physical_offset, devid);
7949                 ret = -EUCLEAN;
7950         }
7951
7952         /* Make sure no dev extent is beyond device boundary */
7953         dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL);
7954         if (!dev) {
7955                 btrfs_err(fs_info, "failed to find devid %llu", devid);
7956                 ret = -EUCLEAN;
7957                 goto out;
7958         }
7959
7960         if (physical_offset + physical_len > dev->disk_total_bytes) {
7961                 btrfs_err(fs_info,
7962 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
7963                           devid, physical_offset, physical_len,
7964                           dev->disk_total_bytes);
7965                 ret = -EUCLEAN;
7966                 goto out;
7967         }
7968
7969         if (dev->zone_info) {
7970                 u64 zone_size = dev->zone_info->zone_size;
7971
7972                 if (!IS_ALIGNED(physical_offset, zone_size) ||
7973                     !IS_ALIGNED(physical_len, zone_size)) {
7974                         btrfs_err(fs_info,
7975 "zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone",
7976                                   devid, physical_offset, physical_len);
7977                         ret = -EUCLEAN;
7978                         goto out;
7979                 }
7980         }
7981
7982 out:
7983         free_extent_map(em);
7984         return ret;
7985 }
7986
7987 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
7988 {
7989         struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7990         struct extent_map *em;
7991         struct rb_node *node;
7992         int ret = 0;
7993
7994         read_lock(&em_tree->lock);
7995         for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
7996                 em = rb_entry(node, struct extent_map, rb_node);
7997                 if (em->map_lookup->num_stripes !=
7998                     em->map_lookup->verified_stripes) {
7999                         btrfs_err(fs_info,
8000                         "chunk %llu has missing dev extent, have %d expect %d",
8001                                   em->start, em->map_lookup->verified_stripes,
8002                                   em->map_lookup->num_stripes);
8003                         ret = -EUCLEAN;
8004                         goto out;
8005                 }
8006         }
8007 out:
8008         read_unlock(&em_tree->lock);
8009         return ret;
8010 }
8011
8012 /*
8013  * Ensure that all dev extents are mapped to correct chunk, otherwise
8014  * later chunk allocation/free would cause unexpected behavior.
8015  *
8016  * NOTE: This will iterate through the whole device tree, which should be of
8017  * the same size level as the chunk tree.  This slightly increases mount time.
8018  */
8019 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
8020 {
8021         struct btrfs_path *path;
8022         struct btrfs_root *root = fs_info->dev_root;
8023         struct btrfs_key key;
8024         u64 prev_devid = 0;
8025         u64 prev_dev_ext_end = 0;
8026         int ret = 0;
8027
8028         /*
8029          * We don't have a dev_root because we mounted with ignorebadroots and
8030          * failed to load the root, so we want to skip the verification in this
8031          * case for sure.
8032          *
8033          * However if the dev root is fine, but the tree itself is corrupted
8034          * we'd still fail to mount.  This verification is only to make sure
8035          * writes can happen safely, so instead just bypass this check
8036          * completely in the case of IGNOREBADROOTS.
8037          */
8038         if (btrfs_test_opt(fs_info, IGNOREBADROOTS))
8039                 return 0;
8040
8041         key.objectid = 1;
8042         key.type = BTRFS_DEV_EXTENT_KEY;
8043         key.offset = 0;
8044
8045         path = btrfs_alloc_path();
8046         if (!path)
8047                 return -ENOMEM;
8048
8049         path->reada = READA_FORWARD;
8050         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8051         if (ret < 0)
8052                 goto out;
8053
8054         if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
8055                 ret = btrfs_next_leaf(root, path);
8056                 if (ret < 0)
8057                         goto out;
8058                 /* No dev extents at all? Not good */
8059                 if (ret > 0) {
8060                         ret = -EUCLEAN;
8061                         goto out;
8062                 }
8063         }
8064         while (1) {
8065                 struct extent_buffer *leaf = path->nodes[0];
8066                 struct btrfs_dev_extent *dext;
8067                 int slot = path->slots[0];
8068                 u64 chunk_offset;
8069                 u64 physical_offset;
8070                 u64 physical_len;
8071                 u64 devid;
8072
8073                 btrfs_item_key_to_cpu(leaf, &key, slot);
8074                 if (key.type != BTRFS_DEV_EXTENT_KEY)
8075                         break;
8076                 devid = key.objectid;
8077                 physical_offset = key.offset;
8078
8079                 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
8080                 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
8081                 physical_len = btrfs_dev_extent_length(leaf, dext);
8082
8083                 /* Check if this dev extent overlaps with the previous one */
8084                 if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
8085                         btrfs_err(fs_info,
8086 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
8087                                   devid, physical_offset, prev_dev_ext_end);
8088                         ret = -EUCLEAN;
8089                         goto out;
8090                 }
8091
8092                 ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
8093                                             physical_offset, physical_len);
8094                 if (ret < 0)
8095                         goto out;
8096                 prev_devid = devid;
8097                 prev_dev_ext_end = physical_offset + physical_len;
8098
8099                 ret = btrfs_next_item(root, path);
8100                 if (ret < 0)
8101                         goto out;
8102                 if (ret > 0) {
8103                         ret = 0;
8104                         break;
8105                 }
8106         }
8107
8108         /* Ensure all chunks have corresponding dev extents */
8109         ret = verify_chunk_dev_extent_mapping(fs_info);
8110 out:
8111         btrfs_free_path(path);
8112         return ret;
8113 }
8114
8115 /*
8116  * Check whether the given block group or device is pinned by any inode being
8117  * used as a swapfile.
8118  */
8119 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
8120 {
8121         struct btrfs_swapfile_pin *sp;
8122         struct rb_node *node;
8123
8124         spin_lock(&fs_info->swapfile_pins_lock);
8125         node = fs_info->swapfile_pins.rb_node;
8126         while (node) {
8127                 sp = rb_entry(node, struct btrfs_swapfile_pin, node);
8128                 if (ptr < sp->ptr)
8129                         node = node->rb_left;
8130                 else if (ptr > sp->ptr)
8131                         node = node->rb_right;
8132                 else
8133                         break;
8134         }
8135         spin_unlock(&fs_info->swapfile_pins_lock);
8136         return node != NULL;
8137 }
8138
8139 static int relocating_repair_kthread(void *data)
8140 {
8141         struct btrfs_block_group *cache = (struct btrfs_block_group *)data;
8142         struct btrfs_fs_info *fs_info = cache->fs_info;
8143         u64 target;
8144         int ret = 0;
8145
8146         target = cache->start;
8147         btrfs_put_block_group(cache);
8148
8149         if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
8150                 btrfs_info(fs_info,
8151                            "zoned: skip relocating block group %llu to repair: EBUSY",
8152                            target);
8153                 return -EBUSY;
8154         }
8155
8156         mutex_lock(&fs_info->reclaim_bgs_lock);
8157
8158         /* Ensure block group still exists */
8159         cache = btrfs_lookup_block_group(fs_info, target);
8160         if (!cache)
8161                 goto out;
8162
8163         if (!cache->relocating_repair)
8164                 goto out;
8165
8166         ret = btrfs_may_alloc_data_chunk(fs_info, target);
8167         if (ret < 0)
8168                 goto out;
8169
8170         btrfs_info(fs_info,
8171                    "zoned: relocating block group %llu to repair IO failure",
8172                    target);
8173         ret = btrfs_relocate_chunk(fs_info, target);
8174
8175 out:
8176         if (cache)
8177                 btrfs_put_block_group(cache);
8178         mutex_unlock(&fs_info->reclaim_bgs_lock);
8179         btrfs_exclop_finish(fs_info);
8180
8181         return ret;
8182 }
8183
8184 int btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical)
8185 {
8186         struct btrfs_block_group *cache;
8187
8188         /* Do not attempt to repair in degraded state */
8189         if (btrfs_test_opt(fs_info, DEGRADED))
8190                 return 0;
8191
8192         cache = btrfs_lookup_block_group(fs_info, logical);
8193         if (!cache)
8194                 return 0;
8195
8196         spin_lock(&cache->lock);
8197         if (cache->relocating_repair) {
8198                 spin_unlock(&cache->lock);
8199                 btrfs_put_block_group(cache);
8200                 return 0;
8201         }
8202         cache->relocating_repair = 1;
8203         spin_unlock(&cache->lock);
8204
8205         kthread_run(relocating_repair_kthread, cache,
8206                     "btrfs-relocating-repair");
8207
8208         return 0;
8209 }