fbc4b58228f78432edb6f2bd030f874b684dd0e6
[linux-2.6-microblaze.git] / fs / btrfs / volumes.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2007 Oracle.  All rights reserved.
4  */
5
6 #include <linux/sched.h>
7 #include <linux/sched/mm.h>
8 #include <linux/bio.h>
9 #include <linux/slab.h>
10 #include <linux/blkdev.h>
11 #include <linux/ratelimit.h>
12 #include <linux/kthread.h>
13 #include <linux/raid/pq.h>
14 #include <linux/semaphore.h>
15 #include <linux/uuid.h>
16 #include <linux/list_sort.h>
17 #include "misc.h"
18 #include "ctree.h"
19 #include "extent_map.h"
20 #include "disk-io.h"
21 #include "transaction.h"
22 #include "print-tree.h"
23 #include "volumes.h"
24 #include "raid56.h"
25 #include "async-thread.h"
26 #include "check-integrity.h"
27 #include "rcu-string.h"
28 #include "dev-replace.h"
29 #include "sysfs.h"
30 #include "tree-checker.h"
31 #include "space-info.h"
32 #include "block-group.h"
33 #include "discard.h"
34
35 const struct btrfs_raid_attr btrfs_raid_array[BTRFS_NR_RAID_TYPES] = {
36         [BTRFS_RAID_RAID10] = {
37                 .sub_stripes    = 2,
38                 .dev_stripes    = 1,
39                 .devs_max       = 0,    /* 0 == as many as possible */
40                 .devs_min       = 4,
41                 .tolerated_failures = 1,
42                 .devs_increment = 2,
43                 .ncopies        = 2,
44                 .nparity        = 0,
45                 .raid_name      = "raid10",
46                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID10,
47                 .mindev_error   = BTRFS_ERROR_DEV_RAID10_MIN_NOT_MET,
48         },
49         [BTRFS_RAID_RAID1] = {
50                 .sub_stripes    = 1,
51                 .dev_stripes    = 1,
52                 .devs_max       = 2,
53                 .devs_min       = 2,
54                 .tolerated_failures = 1,
55                 .devs_increment = 2,
56                 .ncopies        = 2,
57                 .nparity        = 0,
58                 .raid_name      = "raid1",
59                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID1,
60                 .mindev_error   = BTRFS_ERROR_DEV_RAID1_MIN_NOT_MET,
61         },
62         [BTRFS_RAID_RAID1C3] = {
63                 .sub_stripes    = 1,
64                 .dev_stripes    = 1,
65                 .devs_max       = 3,
66                 .devs_min       = 3,
67                 .tolerated_failures = 2,
68                 .devs_increment = 3,
69                 .ncopies        = 3,
70                 .nparity        = 0,
71                 .raid_name      = "raid1c3",
72                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID1C3,
73                 .mindev_error   = BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
74         },
75         [BTRFS_RAID_RAID1C4] = {
76                 .sub_stripes    = 1,
77                 .dev_stripes    = 1,
78                 .devs_max       = 4,
79                 .devs_min       = 4,
80                 .tolerated_failures = 3,
81                 .devs_increment = 4,
82                 .ncopies        = 4,
83                 .nparity        = 0,
84                 .raid_name      = "raid1c4",
85                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID1C4,
86                 .mindev_error   = BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
87         },
88         [BTRFS_RAID_DUP] = {
89                 .sub_stripes    = 1,
90                 .dev_stripes    = 2,
91                 .devs_max       = 1,
92                 .devs_min       = 1,
93                 .tolerated_failures = 0,
94                 .devs_increment = 1,
95                 .ncopies        = 2,
96                 .nparity        = 0,
97                 .raid_name      = "dup",
98                 .bg_flag        = BTRFS_BLOCK_GROUP_DUP,
99                 .mindev_error   = 0,
100         },
101         [BTRFS_RAID_RAID0] = {
102                 .sub_stripes    = 1,
103                 .dev_stripes    = 1,
104                 .devs_max       = 0,
105                 .devs_min       = 2,
106                 .tolerated_failures = 0,
107                 .devs_increment = 1,
108                 .ncopies        = 1,
109                 .nparity        = 0,
110                 .raid_name      = "raid0",
111                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID0,
112                 .mindev_error   = 0,
113         },
114         [BTRFS_RAID_SINGLE] = {
115                 .sub_stripes    = 1,
116                 .dev_stripes    = 1,
117                 .devs_max       = 1,
118                 .devs_min       = 1,
119                 .tolerated_failures = 0,
120                 .devs_increment = 1,
121                 .ncopies        = 1,
122                 .nparity        = 0,
123                 .raid_name      = "single",
124                 .bg_flag        = 0,
125                 .mindev_error   = 0,
126         },
127         [BTRFS_RAID_RAID5] = {
128                 .sub_stripes    = 1,
129                 .dev_stripes    = 1,
130                 .devs_max       = 0,
131                 .devs_min       = 2,
132                 .tolerated_failures = 1,
133                 .devs_increment = 1,
134                 .ncopies        = 1,
135                 .nparity        = 1,
136                 .raid_name      = "raid5",
137                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID5,
138                 .mindev_error   = BTRFS_ERROR_DEV_RAID5_MIN_NOT_MET,
139         },
140         [BTRFS_RAID_RAID6] = {
141                 .sub_stripes    = 1,
142                 .dev_stripes    = 1,
143                 .devs_max       = 0,
144                 .devs_min       = 3,
145                 .tolerated_failures = 2,
146                 .devs_increment = 1,
147                 .ncopies        = 1,
148                 .nparity        = 2,
149                 .raid_name      = "raid6",
150                 .bg_flag        = BTRFS_BLOCK_GROUP_RAID6,
151                 .mindev_error   = BTRFS_ERROR_DEV_RAID6_MIN_NOT_MET,
152         },
153 };
154
155 const char *btrfs_bg_type_to_raid_name(u64 flags)
156 {
157         const int index = btrfs_bg_flags_to_raid_index(flags);
158
159         if (index >= BTRFS_NR_RAID_TYPES)
160                 return NULL;
161
162         return btrfs_raid_array[index].raid_name;
163 }
164
165 /*
166  * Fill @buf with textual description of @bg_flags, no more than @size_buf
167  * bytes including terminating null byte.
168  */
169 void btrfs_describe_block_groups(u64 bg_flags, char *buf, u32 size_buf)
170 {
171         int i;
172         int ret;
173         char *bp = buf;
174         u64 flags = bg_flags;
175         u32 size_bp = size_buf;
176
177         if (!flags) {
178                 strcpy(bp, "NONE");
179                 return;
180         }
181
182 #define DESCRIBE_FLAG(flag, desc)                                               \
183         do {                                                            \
184                 if (flags & (flag)) {                                   \
185                         ret = snprintf(bp, size_bp, "%s|", (desc));     \
186                         if (ret < 0 || ret >= size_bp)                  \
187                                 goto out_overflow;                      \
188                         size_bp -= ret;                                 \
189                         bp += ret;                                      \
190                         flags &= ~(flag);                               \
191                 }                                                       \
192         } while (0)
193
194         DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_DATA, "data");
195         DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_SYSTEM, "system");
196         DESCRIBE_FLAG(BTRFS_BLOCK_GROUP_METADATA, "metadata");
197
198         DESCRIBE_FLAG(BTRFS_AVAIL_ALLOC_BIT_SINGLE, "single");
199         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
200                 DESCRIBE_FLAG(btrfs_raid_array[i].bg_flag,
201                               btrfs_raid_array[i].raid_name);
202 #undef DESCRIBE_FLAG
203
204         if (flags) {
205                 ret = snprintf(bp, size_bp, "0x%llx|", flags);
206                 size_bp -= ret;
207         }
208
209         if (size_bp < size_buf)
210                 buf[size_buf - size_bp - 1] = '\0'; /* remove last | */
211
212         /*
213          * The text is trimmed, it's up to the caller to provide sufficiently
214          * large buffer
215          */
216 out_overflow:;
217 }
218
219 static int init_first_rw_device(struct btrfs_trans_handle *trans);
220 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info);
221 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev);
222 static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
223 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
224                              enum btrfs_map_op op,
225                              u64 logical, u64 *length,
226                              struct btrfs_bio **bbio_ret,
227                              int mirror_num, int need_raid_map);
228
229 /*
230  * Device locking
231  * ==============
232  *
233  * There are several mutexes that protect manipulation of devices and low-level
234  * structures like chunks but not block groups, extents or files
235  *
236  * uuid_mutex (global lock)
237  * ------------------------
238  * protects the fs_uuids list that tracks all per-fs fs_devices, resulting from
239  * the SCAN_DEV ioctl registration or from mount either implicitly (the first
240  * device) or requested by the device= mount option
241  *
242  * the mutex can be very coarse and can cover long-running operations
243  *
244  * protects: updates to fs_devices counters like missing devices, rw devices,
245  * seeding, structure cloning, opening/closing devices at mount/umount time
246  *
247  * global::fs_devs - add, remove, updates to the global list
248  *
249  * does not protect: manipulation of the fs_devices::devices list in general
250  * but in mount context it could be used to exclude list modifications by eg.
251  * scan ioctl
252  *
253  * btrfs_device::name - renames (write side), read is RCU
254  *
255  * fs_devices::device_list_mutex (per-fs, with RCU)
256  * ------------------------------------------------
257  * protects updates to fs_devices::devices, ie. adding and deleting
258  *
259  * simple list traversal with read-only actions can be done with RCU protection
260  *
261  * may be used to exclude some operations from running concurrently without any
262  * modifications to the list (see write_all_supers)
263  *
264  * Is not required at mount and close times, because our device list is
265  * protected by the uuid_mutex at that point.
266  *
267  * balance_mutex
268  * -------------
269  * protects balance structures (status, state) and context accessed from
270  * several places (internally, ioctl)
271  *
272  * chunk_mutex
273  * -----------
274  * protects chunks, adding or removing during allocation, trim or when a new
275  * device is added/removed. Additionally it also protects post_commit_list of
276  * individual devices, since they can be added to the transaction's
277  * post_commit_list only with chunk_mutex held.
278  *
279  * cleaner_mutex
280  * -------------
281  * a big lock that is held by the cleaner thread and prevents running subvolume
282  * cleaning together with relocation or delayed iputs
283  *
284  *
285  * Lock nesting
286  * ============
287  *
288  * uuid_mutex
289  *   device_list_mutex
290  *     chunk_mutex
291  *   balance_mutex
292  *
293  *
294  * Exclusive operations
295  * ====================
296  *
297  * Maintains the exclusivity of the following operations that apply to the
298  * whole filesystem and cannot run in parallel.
299  *
300  * - Balance (*)
301  * - Device add
302  * - Device remove
303  * - Device replace (*)
304  * - Resize
305  *
306  * The device operations (as above) can be in one of the following states:
307  *
308  * - Running state
309  * - Paused state
310  * - Completed state
311  *
312  * Only device operations marked with (*) can go into the Paused state for the
313  * following reasons:
314  *
315  * - ioctl (only Balance can be Paused through ioctl)
316  * - filesystem remounted as read-only
317  * - filesystem unmounted and mounted as read-only
318  * - system power-cycle and filesystem mounted as read-only
319  * - filesystem or device errors leading to forced read-only
320  *
321  * The status of exclusive operation is set and cleared atomically.
322  * During the course of Paused state, fs_info::exclusive_operation remains set.
323  * A device operation in Paused or Running state can be canceled or resumed
324  * either by ioctl (Balance only) or when remounted as read-write.
325  * The exclusive status is cleared when the device operation is canceled or
326  * completed.
327  */
328
329 DEFINE_MUTEX(uuid_mutex);
330 static LIST_HEAD(fs_uuids);
331 struct list_head * __attribute_const__ btrfs_get_fs_uuids(void)
332 {
333         return &fs_uuids;
334 }
335
336 /*
337  * alloc_fs_devices - allocate struct btrfs_fs_devices
338  * @fsid:               if not NULL, copy the UUID to fs_devices::fsid
339  * @metadata_fsid:      if not NULL, copy the UUID to fs_devices::metadata_fsid
340  *
341  * Return a pointer to a new struct btrfs_fs_devices on success, or ERR_PTR().
342  * The returned struct is not linked onto any lists and can be destroyed with
343  * kfree() right away.
344  */
345 static struct btrfs_fs_devices *alloc_fs_devices(const u8 *fsid,
346                                                  const u8 *metadata_fsid)
347 {
348         struct btrfs_fs_devices *fs_devs;
349
350         fs_devs = kzalloc(sizeof(*fs_devs), GFP_KERNEL);
351         if (!fs_devs)
352                 return ERR_PTR(-ENOMEM);
353
354         mutex_init(&fs_devs->device_list_mutex);
355
356         INIT_LIST_HEAD(&fs_devs->devices);
357         INIT_LIST_HEAD(&fs_devs->alloc_list);
358         INIT_LIST_HEAD(&fs_devs->fs_list);
359         INIT_LIST_HEAD(&fs_devs->seed_list);
360         if (fsid)
361                 memcpy(fs_devs->fsid, fsid, BTRFS_FSID_SIZE);
362
363         if (metadata_fsid)
364                 memcpy(fs_devs->metadata_uuid, metadata_fsid, BTRFS_FSID_SIZE);
365         else if (fsid)
366                 memcpy(fs_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE);
367
368         return fs_devs;
369 }
370
371 void btrfs_free_device(struct btrfs_device *device)
372 {
373         WARN_ON(!list_empty(&device->post_commit_list));
374         rcu_string_free(device->name);
375         extent_io_tree_release(&device->alloc_state);
376         bio_put(device->flush_bio);
377         kfree(device);
378 }
379
380 static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
381 {
382         struct btrfs_device *device;
383         WARN_ON(fs_devices->opened);
384         while (!list_empty(&fs_devices->devices)) {
385                 device = list_entry(fs_devices->devices.next,
386                                     struct btrfs_device, dev_list);
387                 list_del(&device->dev_list);
388                 btrfs_free_device(device);
389         }
390         kfree(fs_devices);
391 }
392
393 void __exit btrfs_cleanup_fs_uuids(void)
394 {
395         struct btrfs_fs_devices *fs_devices;
396
397         while (!list_empty(&fs_uuids)) {
398                 fs_devices = list_entry(fs_uuids.next,
399                                         struct btrfs_fs_devices, fs_list);
400                 list_del(&fs_devices->fs_list);
401                 free_fs_devices(fs_devices);
402         }
403 }
404
405 /*
406  * Returns a pointer to a new btrfs_device on success; ERR_PTR() on error.
407  * Returned struct is not linked onto any lists and must be destroyed using
408  * btrfs_free_device.
409  */
410 static struct btrfs_device *__alloc_device(struct btrfs_fs_info *fs_info)
411 {
412         struct btrfs_device *dev;
413
414         dev = kzalloc(sizeof(*dev), GFP_KERNEL);
415         if (!dev)
416                 return ERR_PTR(-ENOMEM);
417
418         /*
419          * Preallocate a bio that's always going to be used for flushing device
420          * barriers and matches the device lifespan
421          */
422         dev->flush_bio = bio_alloc_bioset(GFP_KERNEL, 0, NULL);
423         if (!dev->flush_bio) {
424                 kfree(dev);
425                 return ERR_PTR(-ENOMEM);
426         }
427
428         INIT_LIST_HEAD(&dev->dev_list);
429         INIT_LIST_HEAD(&dev->dev_alloc_list);
430         INIT_LIST_HEAD(&dev->post_commit_list);
431
432         atomic_set(&dev->reada_in_flight, 0);
433         atomic_set(&dev->dev_stats_ccnt, 0);
434         btrfs_device_data_ordered_init(dev, fs_info);
435         INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
436         INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
437         extent_io_tree_init(fs_info, &dev->alloc_state,
438                             IO_TREE_DEVICE_ALLOC_STATE, NULL);
439
440         return dev;
441 }
442
443 static noinline struct btrfs_fs_devices *find_fsid(
444                 const u8 *fsid, const u8 *metadata_fsid)
445 {
446         struct btrfs_fs_devices *fs_devices;
447
448         ASSERT(fsid);
449
450         /* Handle non-split brain cases */
451         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
452                 if (metadata_fsid) {
453                         if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0
454                             && memcmp(metadata_fsid, fs_devices->metadata_uuid,
455                                       BTRFS_FSID_SIZE) == 0)
456                                 return fs_devices;
457                 } else {
458                         if (memcmp(fsid, fs_devices->fsid, BTRFS_FSID_SIZE) == 0)
459                                 return fs_devices;
460                 }
461         }
462         return NULL;
463 }
464
465 static struct btrfs_fs_devices *find_fsid_with_metadata_uuid(
466                                 struct btrfs_super_block *disk_super)
467 {
468
469         struct btrfs_fs_devices *fs_devices;
470
471         /*
472          * Handle scanned device having completed its fsid change but
473          * belonging to a fs_devices that was created by first scanning
474          * a device which didn't have its fsid/metadata_uuid changed
475          * at all and the CHANGING_FSID_V2 flag set.
476          */
477         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
478                 if (fs_devices->fsid_change &&
479                     memcmp(disk_super->metadata_uuid, fs_devices->fsid,
480                            BTRFS_FSID_SIZE) == 0 &&
481                     memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
482                            BTRFS_FSID_SIZE) == 0) {
483                         return fs_devices;
484                 }
485         }
486         /*
487          * Handle scanned device having completed its fsid change but
488          * belonging to a fs_devices that was created by a device that
489          * has an outdated pair of fsid/metadata_uuid and
490          * CHANGING_FSID_V2 flag set.
491          */
492         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
493                 if (fs_devices->fsid_change &&
494                     memcmp(fs_devices->metadata_uuid,
495                            fs_devices->fsid, BTRFS_FSID_SIZE) != 0 &&
496                     memcmp(disk_super->metadata_uuid, fs_devices->metadata_uuid,
497                            BTRFS_FSID_SIZE) == 0) {
498                         return fs_devices;
499                 }
500         }
501
502         return find_fsid(disk_super->fsid, disk_super->metadata_uuid);
503 }
504
505
506 static int
507 btrfs_get_bdev_and_sb(const char *device_path, fmode_t flags, void *holder,
508                       int flush, struct block_device **bdev,
509                       struct btrfs_super_block **disk_super)
510 {
511         int ret;
512
513         *bdev = blkdev_get_by_path(device_path, flags, holder);
514
515         if (IS_ERR(*bdev)) {
516                 ret = PTR_ERR(*bdev);
517                 goto error;
518         }
519
520         if (flush)
521                 filemap_write_and_wait((*bdev)->bd_inode->i_mapping);
522         ret = set_blocksize(*bdev, BTRFS_BDEV_BLOCKSIZE);
523         if (ret) {
524                 blkdev_put(*bdev, flags);
525                 goto error;
526         }
527         invalidate_bdev(*bdev);
528         *disk_super = btrfs_read_dev_super(*bdev);
529         if (IS_ERR(*disk_super)) {
530                 ret = PTR_ERR(*disk_super);
531                 blkdev_put(*bdev, flags);
532                 goto error;
533         }
534
535         return 0;
536
537 error:
538         *bdev = NULL;
539         return ret;
540 }
541
542 static bool device_path_matched(const char *path, struct btrfs_device *device)
543 {
544         int found;
545
546         rcu_read_lock();
547         found = strcmp(rcu_str_deref(device->name), path);
548         rcu_read_unlock();
549
550         return found == 0;
551 }
552
553 /*
554  *  Search and remove all stale (devices which are not mounted) devices.
555  *  When both inputs are NULL, it will search and release all stale devices.
556  *  path:       Optional. When provided will it release all unmounted devices
557  *              matching this path only.
558  *  skip_dev:   Optional. Will skip this device when searching for the stale
559  *              devices.
560  *  Return:     0 for success or if @path is NULL.
561  *              -EBUSY if @path is a mounted device.
562  *              -ENOENT if @path does not match any device in the list.
563  */
564 static int btrfs_free_stale_devices(const char *path,
565                                      struct btrfs_device *skip_device)
566 {
567         struct btrfs_fs_devices *fs_devices, *tmp_fs_devices;
568         struct btrfs_device *device, *tmp_device;
569         int ret = 0;
570
571         if (path)
572                 ret = -ENOENT;
573
574         list_for_each_entry_safe(fs_devices, tmp_fs_devices, &fs_uuids, fs_list) {
575
576                 mutex_lock(&fs_devices->device_list_mutex);
577                 list_for_each_entry_safe(device, tmp_device,
578                                          &fs_devices->devices, dev_list) {
579                         if (skip_device && skip_device == device)
580                                 continue;
581                         if (path && !device->name)
582                                 continue;
583                         if (path && !device_path_matched(path, device))
584                                 continue;
585                         if (fs_devices->opened) {
586                                 /* for an already deleted device return 0 */
587                                 if (path && ret != 0)
588                                         ret = -EBUSY;
589                                 break;
590                         }
591
592                         /* delete the stale device */
593                         fs_devices->num_devices--;
594                         list_del(&device->dev_list);
595                         btrfs_free_device(device);
596
597                         ret = 0;
598                 }
599                 mutex_unlock(&fs_devices->device_list_mutex);
600
601                 if (fs_devices->num_devices == 0) {
602                         btrfs_sysfs_remove_fsid(fs_devices);
603                         list_del(&fs_devices->fs_list);
604                         free_fs_devices(fs_devices);
605                 }
606         }
607
608         return ret;
609 }
610
611 /*
612  * This is only used on mount, and we are protected from competing things
613  * messing with our fs_devices by the uuid_mutex, thus we do not need the
614  * fs_devices->device_list_mutex here.
615  */
616 static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
617                         struct btrfs_device *device, fmode_t flags,
618                         void *holder)
619 {
620         struct request_queue *q;
621         struct block_device *bdev;
622         struct btrfs_super_block *disk_super;
623         u64 devid;
624         int ret;
625
626         if (device->bdev)
627                 return -EINVAL;
628         if (!device->name)
629                 return -EINVAL;
630
631         ret = btrfs_get_bdev_and_sb(device->name->str, flags, holder, 1,
632                                     &bdev, &disk_super);
633         if (ret)
634                 return ret;
635
636         devid = btrfs_stack_device_id(&disk_super->dev_item);
637         if (devid != device->devid)
638                 goto error_free_page;
639
640         if (memcmp(device->uuid, disk_super->dev_item.uuid, BTRFS_UUID_SIZE))
641                 goto error_free_page;
642
643         device->generation = btrfs_super_generation(disk_super);
644
645         if (btrfs_super_flags(disk_super) & BTRFS_SUPER_FLAG_SEEDING) {
646                 if (btrfs_super_incompat_flags(disk_super) &
647                     BTRFS_FEATURE_INCOMPAT_METADATA_UUID) {
648                         pr_err(
649                 "BTRFS: Invalid seeding and uuid-changed device detected\n");
650                         goto error_free_page;
651                 }
652
653                 clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
654                 fs_devices->seeding = true;
655         } else {
656                 if (bdev_read_only(bdev))
657                         clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
658                 else
659                         set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
660         }
661
662         q = bdev_get_queue(bdev);
663         if (!blk_queue_nonrot(q))
664                 fs_devices->rotating = true;
665
666         device->bdev = bdev;
667         clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
668         device->mode = flags;
669
670         fs_devices->open_devices++;
671         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
672             device->devid != BTRFS_DEV_REPLACE_DEVID) {
673                 fs_devices->rw_devices++;
674                 list_add_tail(&device->dev_alloc_list, &fs_devices->alloc_list);
675         }
676         btrfs_release_disk_super(disk_super);
677
678         return 0;
679
680 error_free_page:
681         btrfs_release_disk_super(disk_super);
682         blkdev_put(bdev, flags);
683
684         return -EINVAL;
685 }
686
687 /*
688  * Handle scanned device having its CHANGING_FSID_V2 flag set and the fs_devices
689  * being created with a disk that has already completed its fsid change. Such
690  * disk can belong to an fs which has its FSID changed or to one which doesn't.
691  * Handle both cases here.
692  */
693 static struct btrfs_fs_devices *find_fsid_inprogress(
694                                         struct btrfs_super_block *disk_super)
695 {
696         struct btrfs_fs_devices *fs_devices;
697
698         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
699                 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
700                            BTRFS_FSID_SIZE) != 0 &&
701                     memcmp(fs_devices->metadata_uuid, disk_super->fsid,
702                            BTRFS_FSID_SIZE) == 0 && !fs_devices->fsid_change) {
703                         return fs_devices;
704                 }
705         }
706
707         return find_fsid(disk_super->fsid, NULL);
708 }
709
710
711 static struct btrfs_fs_devices *find_fsid_changed(
712                                         struct btrfs_super_block *disk_super)
713 {
714         struct btrfs_fs_devices *fs_devices;
715
716         /*
717          * Handles the case where scanned device is part of an fs that had
718          * multiple successful changes of FSID but curently device didn't
719          * observe it. Meaning our fsid will be different than theirs. We need
720          * to handle two subcases :
721          *  1 - The fs still continues to have different METADATA/FSID uuids.
722          *  2 - The fs is switched back to its original FSID (METADATA/FSID
723          *  are equal).
724          */
725         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
726                 /* Changed UUIDs */
727                 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
728                            BTRFS_FSID_SIZE) != 0 &&
729                     memcmp(fs_devices->metadata_uuid, disk_super->metadata_uuid,
730                            BTRFS_FSID_SIZE) == 0 &&
731                     memcmp(fs_devices->fsid, disk_super->fsid,
732                            BTRFS_FSID_SIZE) != 0)
733                         return fs_devices;
734
735                 /* Unchanged UUIDs */
736                 if (memcmp(fs_devices->metadata_uuid, fs_devices->fsid,
737                            BTRFS_FSID_SIZE) == 0 &&
738                     memcmp(fs_devices->fsid, disk_super->metadata_uuid,
739                            BTRFS_FSID_SIZE) == 0)
740                         return fs_devices;
741         }
742
743         return NULL;
744 }
745
746 static struct btrfs_fs_devices *find_fsid_reverted_metadata(
747                                 struct btrfs_super_block *disk_super)
748 {
749         struct btrfs_fs_devices *fs_devices;
750
751         /*
752          * Handle the case where the scanned device is part of an fs whose last
753          * metadata UUID change reverted it to the original FSID. At the same
754          * time * fs_devices was first created by another constitutent device
755          * which didn't fully observe the operation. This results in an
756          * btrfs_fs_devices created with metadata/fsid different AND
757          * btrfs_fs_devices::fsid_change set AND the metadata_uuid of the
758          * fs_devices equal to the FSID of the disk.
759          */
760         list_for_each_entry(fs_devices, &fs_uuids, fs_list) {
761                 if (memcmp(fs_devices->fsid, fs_devices->metadata_uuid,
762                            BTRFS_FSID_SIZE) != 0 &&
763                     memcmp(fs_devices->metadata_uuid, disk_super->fsid,
764                            BTRFS_FSID_SIZE) == 0 &&
765                     fs_devices->fsid_change)
766                         return fs_devices;
767         }
768
769         return NULL;
770 }
771 /*
772  * Add new device to list of registered devices
773  *
774  * Returns:
775  * device pointer which was just added or updated when successful
776  * error pointer when failed
777  */
778 static noinline struct btrfs_device *device_list_add(const char *path,
779                            struct btrfs_super_block *disk_super,
780                            bool *new_device_added)
781 {
782         struct btrfs_device *device;
783         struct btrfs_fs_devices *fs_devices = NULL;
784         struct rcu_string *name;
785         u64 found_transid = btrfs_super_generation(disk_super);
786         u64 devid = btrfs_stack_device_id(&disk_super->dev_item);
787         bool has_metadata_uuid = (btrfs_super_incompat_flags(disk_super) &
788                 BTRFS_FEATURE_INCOMPAT_METADATA_UUID);
789         bool fsid_change_in_progress = (btrfs_super_flags(disk_super) &
790                                         BTRFS_SUPER_FLAG_CHANGING_FSID_V2);
791
792         if (fsid_change_in_progress) {
793                 if (!has_metadata_uuid)
794                         fs_devices = find_fsid_inprogress(disk_super);
795                 else
796                         fs_devices = find_fsid_changed(disk_super);
797         } else if (has_metadata_uuid) {
798                 fs_devices = find_fsid_with_metadata_uuid(disk_super);
799         } else {
800                 fs_devices = find_fsid_reverted_metadata(disk_super);
801                 if (!fs_devices)
802                         fs_devices = find_fsid(disk_super->fsid, NULL);
803         }
804
805
806         if (!fs_devices) {
807                 if (has_metadata_uuid)
808                         fs_devices = alloc_fs_devices(disk_super->fsid,
809                                                       disk_super->metadata_uuid);
810                 else
811                         fs_devices = alloc_fs_devices(disk_super->fsid, NULL);
812
813                 if (IS_ERR(fs_devices))
814                         return ERR_CAST(fs_devices);
815
816                 fs_devices->fsid_change = fsid_change_in_progress;
817
818                 mutex_lock(&fs_devices->device_list_mutex);
819                 list_add(&fs_devices->fs_list, &fs_uuids);
820
821                 device = NULL;
822         } else {
823                 mutex_lock(&fs_devices->device_list_mutex);
824                 device = btrfs_find_device(fs_devices, devid,
825                                 disk_super->dev_item.uuid, NULL, false);
826
827                 /*
828                  * If this disk has been pulled into an fs devices created by
829                  * a device which had the CHANGING_FSID_V2 flag then replace the
830                  * metadata_uuid/fsid values of the fs_devices.
831                  */
832                 if (fs_devices->fsid_change &&
833                     found_transid > fs_devices->latest_generation) {
834                         memcpy(fs_devices->fsid, disk_super->fsid,
835                                         BTRFS_FSID_SIZE);
836
837                         if (has_metadata_uuid)
838                                 memcpy(fs_devices->metadata_uuid,
839                                        disk_super->metadata_uuid,
840                                        BTRFS_FSID_SIZE);
841                         else
842                                 memcpy(fs_devices->metadata_uuid,
843                                        disk_super->fsid, BTRFS_FSID_SIZE);
844
845                         fs_devices->fsid_change = false;
846                 }
847         }
848
849         if (!device) {
850                 if (fs_devices->opened) {
851                         mutex_unlock(&fs_devices->device_list_mutex);
852                         return ERR_PTR(-EBUSY);
853                 }
854
855                 device = btrfs_alloc_device(NULL, &devid,
856                                             disk_super->dev_item.uuid);
857                 if (IS_ERR(device)) {
858                         mutex_unlock(&fs_devices->device_list_mutex);
859                         /* we can safely leave the fs_devices entry around */
860                         return device;
861                 }
862
863                 name = rcu_string_strdup(path, GFP_NOFS);
864                 if (!name) {
865                         btrfs_free_device(device);
866                         mutex_unlock(&fs_devices->device_list_mutex);
867                         return ERR_PTR(-ENOMEM);
868                 }
869                 rcu_assign_pointer(device->name, name);
870
871                 list_add_rcu(&device->dev_list, &fs_devices->devices);
872                 fs_devices->num_devices++;
873
874                 device->fs_devices = fs_devices;
875                 *new_device_added = true;
876
877                 if (disk_super->label[0])
878                         pr_info(
879         "BTRFS: device label %s devid %llu transid %llu %s scanned by %s (%d)\n",
880                                 disk_super->label, devid, found_transid, path,
881                                 current->comm, task_pid_nr(current));
882                 else
883                         pr_info(
884         "BTRFS: device fsid %pU devid %llu transid %llu %s scanned by %s (%d)\n",
885                                 disk_super->fsid, devid, found_transid, path,
886                                 current->comm, task_pid_nr(current));
887
888         } else if (!device->name || strcmp(device->name->str, path)) {
889                 /*
890                  * When FS is already mounted.
891                  * 1. If you are here and if the device->name is NULL that
892                  *    means this device was missing at time of FS mount.
893                  * 2. If you are here and if the device->name is different
894                  *    from 'path' that means either
895                  *      a. The same device disappeared and reappeared with
896                  *         different name. or
897                  *      b. The missing-disk-which-was-replaced, has
898                  *         reappeared now.
899                  *
900                  * We must allow 1 and 2a above. But 2b would be a spurious
901                  * and unintentional.
902                  *
903                  * Further in case of 1 and 2a above, the disk at 'path'
904                  * would have missed some transaction when it was away and
905                  * in case of 2a the stale bdev has to be updated as well.
906                  * 2b must not be allowed at all time.
907                  */
908
909                 /*
910                  * For now, we do allow update to btrfs_fs_device through the
911                  * btrfs dev scan cli after FS has been mounted.  We're still
912                  * tracking a problem where systems fail mount by subvolume id
913                  * when we reject replacement on a mounted FS.
914                  */
915                 if (!fs_devices->opened && found_transid < device->generation) {
916                         /*
917                          * That is if the FS is _not_ mounted and if you
918                          * are here, that means there is more than one
919                          * disk with same uuid and devid.We keep the one
920                          * with larger generation number or the last-in if
921                          * generation are equal.
922                          */
923                         mutex_unlock(&fs_devices->device_list_mutex);
924                         return ERR_PTR(-EEXIST);
925                 }
926
927                 /*
928                  * We are going to replace the device path for a given devid,
929                  * make sure it's the same device if the device is mounted
930                  */
931                 if (device->bdev) {
932                         int error;
933                         dev_t path_dev;
934
935                         error = lookup_bdev(path, &path_dev);
936                         if (error) {
937                                 mutex_unlock(&fs_devices->device_list_mutex);
938                                 return ERR_PTR(error);
939                         }
940
941                         if (device->bdev->bd_dev != path_dev) {
942                                 mutex_unlock(&fs_devices->device_list_mutex);
943                                 btrfs_warn_in_rcu(device->fs_info,
944         "duplicate device %s devid %llu generation %llu scanned by %s (%d)",
945                                                   path, devid, found_transid,
946                                                   current->comm,
947                                                   task_pid_nr(current));
948                                 return ERR_PTR(-EEXIST);
949                         }
950                         btrfs_info_in_rcu(device->fs_info,
951         "devid %llu device path %s changed to %s scanned by %s (%d)",
952                                           devid, rcu_str_deref(device->name),
953                                           path, current->comm,
954                                           task_pid_nr(current));
955                 }
956
957                 name = rcu_string_strdup(path, GFP_NOFS);
958                 if (!name) {
959                         mutex_unlock(&fs_devices->device_list_mutex);
960                         return ERR_PTR(-ENOMEM);
961                 }
962                 rcu_string_free(device->name);
963                 rcu_assign_pointer(device->name, name);
964                 if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
965                         fs_devices->missing_devices--;
966                         clear_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
967                 }
968         }
969
970         /*
971          * Unmount does not free the btrfs_device struct but would zero
972          * generation along with most of the other members. So just update
973          * it back. We need it to pick the disk with largest generation
974          * (as above).
975          */
976         if (!fs_devices->opened) {
977                 device->generation = found_transid;
978                 fs_devices->latest_generation = max_t(u64, found_transid,
979                                                 fs_devices->latest_generation);
980         }
981
982         fs_devices->total_devices = btrfs_super_num_devices(disk_super);
983
984         mutex_unlock(&fs_devices->device_list_mutex);
985         return device;
986 }
987
988 static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
989 {
990         struct btrfs_fs_devices *fs_devices;
991         struct btrfs_device *device;
992         struct btrfs_device *orig_dev;
993         int ret = 0;
994
995         fs_devices = alloc_fs_devices(orig->fsid, NULL);
996         if (IS_ERR(fs_devices))
997                 return fs_devices;
998
999         mutex_lock(&orig->device_list_mutex);
1000         fs_devices->total_devices = orig->total_devices;
1001
1002         list_for_each_entry(orig_dev, &orig->devices, dev_list) {
1003                 struct rcu_string *name;
1004
1005                 device = btrfs_alloc_device(NULL, &orig_dev->devid,
1006                                             orig_dev->uuid);
1007                 if (IS_ERR(device)) {
1008                         ret = PTR_ERR(device);
1009                         goto error;
1010                 }
1011
1012                 /*
1013                  * This is ok to do without rcu read locked because we hold the
1014                  * uuid mutex so nothing we touch in here is going to disappear.
1015                  */
1016                 if (orig_dev->name) {
1017                         name = rcu_string_strdup(orig_dev->name->str,
1018                                         GFP_KERNEL);
1019                         if (!name) {
1020                                 btrfs_free_device(device);
1021                                 ret = -ENOMEM;
1022                                 goto error;
1023                         }
1024                         rcu_assign_pointer(device->name, name);
1025                 }
1026
1027                 list_add(&device->dev_list, &fs_devices->devices);
1028                 device->fs_devices = fs_devices;
1029                 fs_devices->num_devices++;
1030         }
1031         mutex_unlock(&orig->device_list_mutex);
1032         return fs_devices;
1033 error:
1034         mutex_unlock(&orig->device_list_mutex);
1035         free_fs_devices(fs_devices);
1036         return ERR_PTR(ret);
1037 }
1038
1039 static void __btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices,
1040                                       int step, struct btrfs_device **latest_dev)
1041 {
1042         struct btrfs_device *device, *next;
1043
1044         /* This is the initialized path, it is safe to release the devices. */
1045         list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) {
1046                 if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state)) {
1047                         if (!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
1048                                       &device->dev_state) &&
1049                             !test_bit(BTRFS_DEV_STATE_MISSING,
1050                                       &device->dev_state) &&
1051                             (!*latest_dev ||
1052                              device->generation > (*latest_dev)->generation)) {
1053                                 *latest_dev = device;
1054                         }
1055                         continue;
1056                 }
1057
1058                 /*
1059                  * We have already validated the presence of BTRFS_DEV_REPLACE_DEVID,
1060                  * in btrfs_init_dev_replace() so just continue.
1061                  */
1062                 if (device->devid == BTRFS_DEV_REPLACE_DEVID)
1063                         continue;
1064
1065                 if (device->bdev) {
1066                         blkdev_put(device->bdev, device->mode);
1067                         device->bdev = NULL;
1068                         fs_devices->open_devices--;
1069                 }
1070                 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1071                         list_del_init(&device->dev_alloc_list);
1072                         clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1073                 }
1074                 list_del_init(&device->dev_list);
1075                 fs_devices->num_devices--;
1076                 btrfs_free_device(device);
1077         }
1078
1079 }
1080
1081 /*
1082  * After we have read the system tree and know devids belonging to this
1083  * filesystem, remove the device which does not belong there.
1084  */
1085 void btrfs_free_extra_devids(struct btrfs_fs_devices *fs_devices, int step)
1086 {
1087         struct btrfs_device *latest_dev = NULL;
1088         struct btrfs_fs_devices *seed_dev;
1089
1090         mutex_lock(&uuid_mutex);
1091         __btrfs_free_extra_devids(fs_devices, step, &latest_dev);
1092
1093         list_for_each_entry(seed_dev, &fs_devices->seed_list, seed_list)
1094                 __btrfs_free_extra_devids(seed_dev, step, &latest_dev);
1095
1096         fs_devices->latest_bdev = latest_dev->bdev;
1097
1098         mutex_unlock(&uuid_mutex);
1099 }
1100
1101 static void btrfs_close_bdev(struct btrfs_device *device)
1102 {
1103         if (!device->bdev)
1104                 return;
1105
1106         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
1107                 sync_blockdev(device->bdev);
1108                 invalidate_bdev(device->bdev);
1109         }
1110
1111         blkdev_put(device->bdev, device->mode);
1112 }
1113
1114 static void btrfs_close_one_device(struct btrfs_device *device)
1115 {
1116         struct btrfs_fs_devices *fs_devices = device->fs_devices;
1117
1118         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
1119             device->devid != BTRFS_DEV_REPLACE_DEVID) {
1120                 list_del_init(&device->dev_alloc_list);
1121                 fs_devices->rw_devices--;
1122         }
1123
1124         if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
1125                 fs_devices->missing_devices--;
1126
1127         btrfs_close_bdev(device);
1128         if (device->bdev) {
1129                 fs_devices->open_devices--;
1130                 device->bdev = NULL;
1131         }
1132         clear_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
1133
1134         device->fs_info = NULL;
1135         atomic_set(&device->dev_stats_ccnt, 0);
1136         extent_io_tree_release(&device->alloc_state);
1137
1138         /* Verify the device is back in a pristine state  */
1139         ASSERT(!test_bit(BTRFS_DEV_STATE_FLUSH_SENT, &device->dev_state));
1140         ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1141         ASSERT(list_empty(&device->dev_alloc_list));
1142         ASSERT(list_empty(&device->post_commit_list));
1143         ASSERT(atomic_read(&device->reada_in_flight) == 0);
1144 }
1145
1146 static void close_fs_devices(struct btrfs_fs_devices *fs_devices)
1147 {
1148         struct btrfs_device *device, *tmp;
1149
1150         lockdep_assert_held(&uuid_mutex);
1151
1152         if (--fs_devices->opened > 0)
1153                 return;
1154
1155         list_for_each_entry_safe(device, tmp, &fs_devices->devices, dev_list)
1156                 btrfs_close_one_device(device);
1157
1158         WARN_ON(fs_devices->open_devices);
1159         WARN_ON(fs_devices->rw_devices);
1160         fs_devices->opened = 0;
1161         fs_devices->seeding = false;
1162         fs_devices->fs_info = NULL;
1163 }
1164
1165 void btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
1166 {
1167         LIST_HEAD(list);
1168         struct btrfs_fs_devices *tmp;
1169
1170         mutex_lock(&uuid_mutex);
1171         close_fs_devices(fs_devices);
1172         if (!fs_devices->opened)
1173                 list_splice_init(&fs_devices->seed_list, &list);
1174
1175         list_for_each_entry_safe(fs_devices, tmp, &list, seed_list) {
1176                 close_fs_devices(fs_devices);
1177                 list_del(&fs_devices->seed_list);
1178                 free_fs_devices(fs_devices);
1179         }
1180         mutex_unlock(&uuid_mutex);
1181 }
1182
1183 static int open_fs_devices(struct btrfs_fs_devices *fs_devices,
1184                                 fmode_t flags, void *holder)
1185 {
1186         struct btrfs_device *device;
1187         struct btrfs_device *latest_dev = NULL;
1188         struct btrfs_device *tmp_device;
1189
1190         flags |= FMODE_EXCL;
1191
1192         list_for_each_entry_safe(device, tmp_device, &fs_devices->devices,
1193                                  dev_list) {
1194                 int ret;
1195
1196                 ret = btrfs_open_one_device(fs_devices, device, flags, holder);
1197                 if (ret == 0 &&
1198                     (!latest_dev || device->generation > latest_dev->generation)) {
1199                         latest_dev = device;
1200                 } else if (ret == -ENODATA) {
1201                         fs_devices->num_devices--;
1202                         list_del(&device->dev_list);
1203                         btrfs_free_device(device);
1204                 }
1205         }
1206         if (fs_devices->open_devices == 0)
1207                 return -EINVAL;
1208
1209         fs_devices->opened = 1;
1210         fs_devices->latest_bdev = latest_dev->bdev;
1211         fs_devices->total_rw_bytes = 0;
1212         fs_devices->chunk_alloc_policy = BTRFS_CHUNK_ALLOC_REGULAR;
1213
1214         return 0;
1215 }
1216
1217 static int devid_cmp(void *priv, struct list_head *a, struct list_head *b)
1218 {
1219         struct btrfs_device *dev1, *dev2;
1220
1221         dev1 = list_entry(a, struct btrfs_device, dev_list);
1222         dev2 = list_entry(b, struct btrfs_device, dev_list);
1223
1224         if (dev1->devid < dev2->devid)
1225                 return -1;
1226         else if (dev1->devid > dev2->devid)
1227                 return 1;
1228         return 0;
1229 }
1230
1231 int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
1232                        fmode_t flags, void *holder)
1233 {
1234         int ret;
1235
1236         lockdep_assert_held(&uuid_mutex);
1237         /*
1238          * The device_list_mutex cannot be taken here in case opening the
1239          * underlying device takes further locks like bd_mutex.
1240          *
1241          * We also don't need the lock here as this is called during mount and
1242          * exclusion is provided by uuid_mutex
1243          */
1244
1245         if (fs_devices->opened) {
1246                 fs_devices->opened++;
1247                 ret = 0;
1248         } else {
1249                 list_sort(NULL, &fs_devices->devices, devid_cmp);
1250                 ret = open_fs_devices(fs_devices, flags, holder);
1251         }
1252
1253         return ret;
1254 }
1255
1256 void btrfs_release_disk_super(struct btrfs_super_block *super)
1257 {
1258         struct page *page = virt_to_page(super);
1259
1260         put_page(page);
1261 }
1262
1263 static struct btrfs_super_block *btrfs_read_disk_super(struct block_device *bdev,
1264                                                        u64 bytenr)
1265 {
1266         struct btrfs_super_block *disk_super;
1267         struct page *page;
1268         void *p;
1269         pgoff_t index;
1270
1271         /* make sure our super fits in the device */
1272         if (bytenr + PAGE_SIZE >= i_size_read(bdev->bd_inode))
1273                 return ERR_PTR(-EINVAL);
1274
1275         /* make sure our super fits in the page */
1276         if (sizeof(*disk_super) > PAGE_SIZE)
1277                 return ERR_PTR(-EINVAL);
1278
1279         /* make sure our super doesn't straddle pages on disk */
1280         index = bytenr >> PAGE_SHIFT;
1281         if ((bytenr + sizeof(*disk_super) - 1) >> PAGE_SHIFT != index)
1282                 return ERR_PTR(-EINVAL);
1283
1284         /* pull in the page with our super */
1285         page = read_cache_page_gfp(bdev->bd_inode->i_mapping, index, GFP_KERNEL);
1286
1287         if (IS_ERR(page))
1288                 return ERR_CAST(page);
1289
1290         p = page_address(page);
1291
1292         /* align our pointer to the offset of the super block */
1293         disk_super = p + offset_in_page(bytenr);
1294
1295         if (btrfs_super_bytenr(disk_super) != bytenr ||
1296             btrfs_super_magic(disk_super) != BTRFS_MAGIC) {
1297                 btrfs_release_disk_super(p);
1298                 return ERR_PTR(-EINVAL);
1299         }
1300
1301         if (disk_super->label[0] && disk_super->label[BTRFS_LABEL_SIZE - 1])
1302                 disk_super->label[BTRFS_LABEL_SIZE - 1] = 0;
1303
1304         return disk_super;
1305 }
1306
1307 int btrfs_forget_devices(const char *path)
1308 {
1309         int ret;
1310
1311         mutex_lock(&uuid_mutex);
1312         ret = btrfs_free_stale_devices(strlen(path) ? path : NULL, NULL);
1313         mutex_unlock(&uuid_mutex);
1314
1315         return ret;
1316 }
1317
1318 /*
1319  * Look for a btrfs signature on a device. This may be called out of the mount path
1320  * and we are not allowed to call set_blocksize during the scan. The superblock
1321  * is read via pagecache
1322  */
1323 struct btrfs_device *btrfs_scan_one_device(const char *path, fmode_t flags,
1324                                            void *holder)
1325 {
1326         struct btrfs_super_block *disk_super;
1327         bool new_device_added = false;
1328         struct btrfs_device *device = NULL;
1329         struct block_device *bdev;
1330         u64 bytenr;
1331
1332         lockdep_assert_held(&uuid_mutex);
1333
1334         /*
1335          * we would like to check all the supers, but that would make
1336          * a btrfs mount succeed after a mkfs from a different FS.
1337          * So, we need to add a special mount option to scan for
1338          * later supers, using BTRFS_SUPER_MIRROR_MAX instead
1339          */
1340         bytenr = btrfs_sb_offset(0);
1341         flags |= FMODE_EXCL;
1342
1343         bdev = blkdev_get_by_path(path, flags, holder);
1344         if (IS_ERR(bdev))
1345                 return ERR_CAST(bdev);
1346
1347         disk_super = btrfs_read_disk_super(bdev, bytenr);
1348         if (IS_ERR(disk_super)) {
1349                 device = ERR_CAST(disk_super);
1350                 goto error_bdev_put;
1351         }
1352
1353         device = device_list_add(path, disk_super, &new_device_added);
1354         if (!IS_ERR(device)) {
1355                 if (new_device_added)
1356                         btrfs_free_stale_devices(path, device);
1357         }
1358
1359         btrfs_release_disk_super(disk_super);
1360
1361 error_bdev_put:
1362         blkdev_put(bdev, flags);
1363
1364         return device;
1365 }
1366
1367 /*
1368  * Try to find a chunk that intersects [start, start + len] range and when one
1369  * such is found, record the end of it in *start
1370  */
1371 static bool contains_pending_extent(struct btrfs_device *device, u64 *start,
1372                                     u64 len)
1373 {
1374         u64 physical_start, physical_end;
1375
1376         lockdep_assert_held(&device->fs_info->chunk_mutex);
1377
1378         if (!find_first_extent_bit(&device->alloc_state, *start,
1379                                    &physical_start, &physical_end,
1380                                    CHUNK_ALLOCATED, NULL)) {
1381
1382                 if (in_range(physical_start, *start, len) ||
1383                     in_range(*start, physical_start,
1384                              physical_end - physical_start)) {
1385                         *start = physical_end + 1;
1386                         return true;
1387                 }
1388         }
1389         return false;
1390 }
1391
1392 static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
1393 {
1394         switch (device->fs_devices->chunk_alloc_policy) {
1395         case BTRFS_CHUNK_ALLOC_REGULAR:
1396                 /*
1397                  * We don't want to overwrite the superblock on the drive nor
1398                  * any area used by the boot loader (grub for example), so we
1399                  * make sure to start at an offset of at least 1MB.
1400                  */
1401                 return max_t(u64, start, SZ_1M);
1402         default:
1403                 BUG();
1404         }
1405 }
1406
1407 /**
1408  * dev_extent_hole_check - check if specified hole is suitable for allocation
1409  * @device:     the device which we have the hole
1410  * @hole_start: starting position of the hole
1411  * @hole_size:  the size of the hole
1412  * @num_bytes:  the size of the free space that we need
1413  *
1414  * This function may modify @hole_start and @hole_end to reflect the suitable
1415  * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
1416  */
1417 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
1418                                   u64 *hole_size, u64 num_bytes)
1419 {
1420         bool changed = false;
1421         u64 hole_end = *hole_start + *hole_size;
1422
1423         /*
1424          * Check before we set max_hole_start, otherwise we could end up
1425          * sending back this offset anyway.
1426          */
1427         if (contains_pending_extent(device, hole_start, *hole_size)) {
1428                 if (hole_end >= *hole_start)
1429                         *hole_size = hole_end - *hole_start;
1430                 else
1431                         *hole_size = 0;
1432                 changed = true;
1433         }
1434
1435         switch (device->fs_devices->chunk_alloc_policy) {
1436         case BTRFS_CHUNK_ALLOC_REGULAR:
1437                 /* No extra check */
1438                 break;
1439         default:
1440                 BUG();
1441         }
1442
1443         return changed;
1444 }
1445
1446 /*
1447  * find_free_dev_extent_start - find free space in the specified device
1448  * @device:       the device which we search the free space in
1449  * @num_bytes:    the size of the free space that we need
1450  * @search_start: the position from which to begin the search
1451  * @start:        store the start of the free space.
1452  * @len:          the size of the free space. that we find, or the size
1453  *                of the max free space if we don't find suitable free space
1454  *
1455  * this uses a pretty simple search, the expectation is that it is
1456  * called very infrequently and that a given device has a small number
1457  * of extents
1458  *
1459  * @start is used to store the start of the free space if we find. But if we
1460  * don't find suitable free space, it will be used to store the start position
1461  * of the max free space.
1462  *
1463  * @len is used to store the size of the free space that we find.
1464  * But if we don't find suitable free space, it is used to store the size of
1465  * the max free space.
1466  *
1467  * NOTE: This function will search *commit* root of device tree, and does extra
1468  * check to ensure dev extents are not double allocated.
1469  * This makes the function safe to allocate dev extents but may not report
1470  * correct usable device space, as device extent freed in current transaction
1471  * is not reported as avaiable.
1472  */
1473 static int find_free_dev_extent_start(struct btrfs_device *device,
1474                                 u64 num_bytes, u64 search_start, u64 *start,
1475                                 u64 *len)
1476 {
1477         struct btrfs_fs_info *fs_info = device->fs_info;
1478         struct btrfs_root *root = fs_info->dev_root;
1479         struct btrfs_key key;
1480         struct btrfs_dev_extent *dev_extent;
1481         struct btrfs_path *path;
1482         u64 hole_size;
1483         u64 max_hole_start;
1484         u64 max_hole_size;
1485         u64 extent_end;
1486         u64 search_end = device->total_bytes;
1487         int ret;
1488         int slot;
1489         struct extent_buffer *l;
1490
1491         search_start = dev_extent_search_start(device, search_start);
1492
1493         path = btrfs_alloc_path();
1494         if (!path)
1495                 return -ENOMEM;
1496
1497         max_hole_start = search_start;
1498         max_hole_size = 0;
1499
1500 again:
1501         if (search_start >= search_end ||
1502                 test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
1503                 ret = -ENOSPC;
1504                 goto out;
1505         }
1506
1507         path->reada = READA_FORWARD;
1508         path->search_commit_root = 1;
1509         path->skip_locking = 1;
1510
1511         key.objectid = device->devid;
1512         key.offset = search_start;
1513         key.type = BTRFS_DEV_EXTENT_KEY;
1514
1515         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
1516         if (ret < 0)
1517                 goto out;
1518         if (ret > 0) {
1519                 ret = btrfs_previous_item(root, path, key.objectid, key.type);
1520                 if (ret < 0)
1521                         goto out;
1522         }
1523
1524         while (1) {
1525                 l = path->nodes[0];
1526                 slot = path->slots[0];
1527                 if (slot >= btrfs_header_nritems(l)) {
1528                         ret = btrfs_next_leaf(root, path);
1529                         if (ret == 0)
1530                                 continue;
1531                         if (ret < 0)
1532                                 goto out;
1533
1534                         break;
1535                 }
1536                 btrfs_item_key_to_cpu(l, &key, slot);
1537
1538                 if (key.objectid < device->devid)
1539                         goto next;
1540
1541                 if (key.objectid > device->devid)
1542                         break;
1543
1544                 if (key.type != BTRFS_DEV_EXTENT_KEY)
1545                         goto next;
1546
1547                 if (key.offset > search_start) {
1548                         hole_size = key.offset - search_start;
1549                         dev_extent_hole_check(device, &search_start, &hole_size,
1550                                               num_bytes);
1551
1552                         if (hole_size > max_hole_size) {
1553                                 max_hole_start = search_start;
1554                                 max_hole_size = hole_size;
1555                         }
1556
1557                         /*
1558                          * If this free space is greater than which we need,
1559                          * it must be the max free space that we have found
1560                          * until now, so max_hole_start must point to the start
1561                          * of this free space and the length of this free space
1562                          * is stored in max_hole_size. Thus, we return
1563                          * max_hole_start and max_hole_size and go back to the
1564                          * caller.
1565                          */
1566                         if (hole_size >= num_bytes) {
1567                                 ret = 0;
1568                                 goto out;
1569                         }
1570                 }
1571
1572                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
1573                 extent_end = key.offset + btrfs_dev_extent_length(l,
1574                                                                   dev_extent);
1575                 if (extent_end > search_start)
1576                         search_start = extent_end;
1577 next:
1578                 path->slots[0]++;
1579                 cond_resched();
1580         }
1581
1582         /*
1583          * At this point, search_start should be the end of
1584          * allocated dev extents, and when shrinking the device,
1585          * search_end may be smaller than search_start.
1586          */
1587         if (search_end > search_start) {
1588                 hole_size = search_end - search_start;
1589                 if (dev_extent_hole_check(device, &search_start, &hole_size,
1590                                           num_bytes)) {
1591                         btrfs_release_path(path);
1592                         goto again;
1593                 }
1594
1595                 if (hole_size > max_hole_size) {
1596                         max_hole_start = search_start;
1597                         max_hole_size = hole_size;
1598                 }
1599         }
1600
1601         /* See above. */
1602         if (max_hole_size < num_bytes)
1603                 ret = -ENOSPC;
1604         else
1605                 ret = 0;
1606
1607 out:
1608         btrfs_free_path(path);
1609         *start = max_hole_start;
1610         if (len)
1611                 *len = max_hole_size;
1612         return ret;
1613 }
1614
1615 int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
1616                          u64 *start, u64 *len)
1617 {
1618         /* FIXME use last free of some kind */
1619         return find_free_dev_extent_start(device, num_bytes, 0, start, len);
1620 }
1621
1622 static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans,
1623                           struct btrfs_device *device,
1624                           u64 start, u64 *dev_extent_len)
1625 {
1626         struct btrfs_fs_info *fs_info = device->fs_info;
1627         struct btrfs_root *root = fs_info->dev_root;
1628         int ret;
1629         struct btrfs_path *path;
1630         struct btrfs_key key;
1631         struct btrfs_key found_key;
1632         struct extent_buffer *leaf = NULL;
1633         struct btrfs_dev_extent *extent = NULL;
1634
1635         path = btrfs_alloc_path();
1636         if (!path)
1637                 return -ENOMEM;
1638
1639         key.objectid = device->devid;
1640         key.offset = start;
1641         key.type = BTRFS_DEV_EXTENT_KEY;
1642 again:
1643         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1644         if (ret > 0) {
1645                 ret = btrfs_previous_item(root, path, key.objectid,
1646                                           BTRFS_DEV_EXTENT_KEY);
1647                 if (ret)
1648                         goto out;
1649                 leaf = path->nodes[0];
1650                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1651                 extent = btrfs_item_ptr(leaf, path->slots[0],
1652                                         struct btrfs_dev_extent);
1653                 BUG_ON(found_key.offset > start || found_key.offset +
1654                        btrfs_dev_extent_length(leaf, extent) < start);
1655                 key = found_key;
1656                 btrfs_release_path(path);
1657                 goto again;
1658         } else if (ret == 0) {
1659                 leaf = path->nodes[0];
1660                 extent = btrfs_item_ptr(leaf, path->slots[0],
1661                                         struct btrfs_dev_extent);
1662         } else {
1663                 btrfs_handle_fs_error(fs_info, ret, "Slot search failed");
1664                 goto out;
1665         }
1666
1667         *dev_extent_len = btrfs_dev_extent_length(leaf, extent);
1668
1669         ret = btrfs_del_item(trans, root, path);
1670         if (ret) {
1671                 btrfs_handle_fs_error(fs_info, ret,
1672                                       "Failed to remove dev extent item");
1673         } else {
1674                 set_bit(BTRFS_TRANS_HAVE_FREE_BGS, &trans->transaction->flags);
1675         }
1676 out:
1677         btrfs_free_path(path);
1678         return ret;
1679 }
1680
1681 static int btrfs_alloc_dev_extent(struct btrfs_trans_handle *trans,
1682                                   struct btrfs_device *device,
1683                                   u64 chunk_offset, u64 start, u64 num_bytes)
1684 {
1685         int ret;
1686         struct btrfs_path *path;
1687         struct btrfs_fs_info *fs_info = device->fs_info;
1688         struct btrfs_root *root = fs_info->dev_root;
1689         struct btrfs_dev_extent *extent;
1690         struct extent_buffer *leaf;
1691         struct btrfs_key key;
1692
1693         WARN_ON(!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state));
1694         WARN_ON(test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state));
1695         path = btrfs_alloc_path();
1696         if (!path)
1697                 return -ENOMEM;
1698
1699         key.objectid = device->devid;
1700         key.offset = start;
1701         key.type = BTRFS_DEV_EXTENT_KEY;
1702         ret = btrfs_insert_empty_item(trans, root, path, &key,
1703                                       sizeof(*extent));
1704         if (ret)
1705                 goto out;
1706
1707         leaf = path->nodes[0];
1708         extent = btrfs_item_ptr(leaf, path->slots[0],
1709                                 struct btrfs_dev_extent);
1710         btrfs_set_dev_extent_chunk_tree(leaf, extent,
1711                                         BTRFS_CHUNK_TREE_OBJECTID);
1712         btrfs_set_dev_extent_chunk_objectid(leaf, extent,
1713                                             BTRFS_FIRST_CHUNK_TREE_OBJECTID);
1714         btrfs_set_dev_extent_chunk_offset(leaf, extent, chunk_offset);
1715
1716         btrfs_set_dev_extent_length(leaf, extent, num_bytes);
1717         btrfs_mark_buffer_dirty(leaf);
1718 out:
1719         btrfs_free_path(path);
1720         return ret;
1721 }
1722
1723 static u64 find_next_chunk(struct btrfs_fs_info *fs_info)
1724 {
1725         struct extent_map_tree *em_tree;
1726         struct extent_map *em;
1727         struct rb_node *n;
1728         u64 ret = 0;
1729
1730         em_tree = &fs_info->mapping_tree;
1731         read_lock(&em_tree->lock);
1732         n = rb_last(&em_tree->map.rb_root);
1733         if (n) {
1734                 em = rb_entry(n, struct extent_map, rb_node);
1735                 ret = em->start + em->len;
1736         }
1737         read_unlock(&em_tree->lock);
1738
1739         return ret;
1740 }
1741
1742 static noinline int find_next_devid(struct btrfs_fs_info *fs_info,
1743                                     u64 *devid_ret)
1744 {
1745         int ret;
1746         struct btrfs_key key;
1747         struct btrfs_key found_key;
1748         struct btrfs_path *path;
1749
1750         path = btrfs_alloc_path();
1751         if (!path)
1752                 return -ENOMEM;
1753
1754         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1755         key.type = BTRFS_DEV_ITEM_KEY;
1756         key.offset = (u64)-1;
1757
1758         ret = btrfs_search_slot(NULL, fs_info->chunk_root, &key, path, 0, 0);
1759         if (ret < 0)
1760                 goto error;
1761
1762         if (ret == 0) {
1763                 /* Corruption */
1764                 btrfs_err(fs_info, "corrupted chunk tree devid -1 matched");
1765                 ret = -EUCLEAN;
1766                 goto error;
1767         }
1768
1769         ret = btrfs_previous_item(fs_info->chunk_root, path,
1770                                   BTRFS_DEV_ITEMS_OBJECTID,
1771                                   BTRFS_DEV_ITEM_KEY);
1772         if (ret) {
1773                 *devid_ret = 1;
1774         } else {
1775                 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
1776                                       path->slots[0]);
1777                 *devid_ret = found_key.offset + 1;
1778         }
1779         ret = 0;
1780 error:
1781         btrfs_free_path(path);
1782         return ret;
1783 }
1784
1785 /*
1786  * the device information is stored in the chunk root
1787  * the btrfs_device struct should be fully filled in
1788  */
1789 static int btrfs_add_dev_item(struct btrfs_trans_handle *trans,
1790                             struct btrfs_device *device)
1791 {
1792         int ret;
1793         struct btrfs_path *path;
1794         struct btrfs_dev_item *dev_item;
1795         struct extent_buffer *leaf;
1796         struct btrfs_key key;
1797         unsigned long ptr;
1798
1799         path = btrfs_alloc_path();
1800         if (!path)
1801                 return -ENOMEM;
1802
1803         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1804         key.type = BTRFS_DEV_ITEM_KEY;
1805         key.offset = device->devid;
1806
1807         ret = btrfs_insert_empty_item(trans, trans->fs_info->chunk_root, path,
1808                                       &key, sizeof(*dev_item));
1809         if (ret)
1810                 goto out;
1811
1812         leaf = path->nodes[0];
1813         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
1814
1815         btrfs_set_device_id(leaf, dev_item, device->devid);
1816         btrfs_set_device_generation(leaf, dev_item, 0);
1817         btrfs_set_device_type(leaf, dev_item, device->type);
1818         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
1819         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
1820         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
1821         btrfs_set_device_total_bytes(leaf, dev_item,
1822                                      btrfs_device_get_disk_total_bytes(device));
1823         btrfs_set_device_bytes_used(leaf, dev_item,
1824                                     btrfs_device_get_bytes_used(device));
1825         btrfs_set_device_group(leaf, dev_item, 0);
1826         btrfs_set_device_seek_speed(leaf, dev_item, 0);
1827         btrfs_set_device_bandwidth(leaf, dev_item, 0);
1828         btrfs_set_device_start_offset(leaf, dev_item, 0);
1829
1830         ptr = btrfs_device_uuid(dev_item);
1831         write_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
1832         ptr = btrfs_device_fsid(dev_item);
1833         write_extent_buffer(leaf, trans->fs_info->fs_devices->metadata_uuid,
1834                             ptr, BTRFS_FSID_SIZE);
1835         btrfs_mark_buffer_dirty(leaf);
1836
1837         ret = 0;
1838 out:
1839         btrfs_free_path(path);
1840         return ret;
1841 }
1842
1843 /*
1844  * Function to update ctime/mtime for a given device path.
1845  * Mainly used for ctime/mtime based probe like libblkid.
1846  */
1847 static void update_dev_time(const char *path_name)
1848 {
1849         struct file *filp;
1850
1851         filp = filp_open(path_name, O_RDWR, 0);
1852         if (IS_ERR(filp))
1853                 return;
1854         file_update_time(filp);
1855         filp_close(filp, NULL);
1856 }
1857
1858 static int btrfs_rm_dev_item(struct btrfs_device *device)
1859 {
1860         struct btrfs_root *root = device->fs_info->chunk_root;
1861         int ret;
1862         struct btrfs_path *path;
1863         struct btrfs_key key;
1864         struct btrfs_trans_handle *trans;
1865
1866         path = btrfs_alloc_path();
1867         if (!path)
1868                 return -ENOMEM;
1869
1870         trans = btrfs_start_transaction(root, 0);
1871         if (IS_ERR(trans)) {
1872                 btrfs_free_path(path);
1873                 return PTR_ERR(trans);
1874         }
1875         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
1876         key.type = BTRFS_DEV_ITEM_KEY;
1877         key.offset = device->devid;
1878
1879         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1880         if (ret) {
1881                 if (ret > 0)
1882                         ret = -ENOENT;
1883                 btrfs_abort_transaction(trans, ret);
1884                 btrfs_end_transaction(trans);
1885                 goto out;
1886         }
1887
1888         ret = btrfs_del_item(trans, root, path);
1889         if (ret) {
1890                 btrfs_abort_transaction(trans, ret);
1891                 btrfs_end_transaction(trans);
1892         }
1893
1894 out:
1895         btrfs_free_path(path);
1896         if (!ret)
1897                 ret = btrfs_commit_transaction(trans);
1898         return ret;
1899 }
1900
1901 /*
1902  * Verify that @num_devices satisfies the RAID profile constraints in the whole
1903  * filesystem. It's up to the caller to adjust that number regarding eg. device
1904  * replace.
1905  */
1906 static int btrfs_check_raid_min_devices(struct btrfs_fs_info *fs_info,
1907                 u64 num_devices)
1908 {
1909         u64 all_avail;
1910         unsigned seq;
1911         int i;
1912
1913         do {
1914                 seq = read_seqbegin(&fs_info->profiles_lock);
1915
1916                 all_avail = fs_info->avail_data_alloc_bits |
1917                             fs_info->avail_system_alloc_bits |
1918                             fs_info->avail_metadata_alloc_bits;
1919         } while (read_seqretry(&fs_info->profiles_lock, seq));
1920
1921         for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
1922                 if (!(all_avail & btrfs_raid_array[i].bg_flag))
1923                         continue;
1924
1925                 if (num_devices < btrfs_raid_array[i].devs_min) {
1926                         int ret = btrfs_raid_array[i].mindev_error;
1927
1928                         if (ret)
1929                                 return ret;
1930                 }
1931         }
1932
1933         return 0;
1934 }
1935
1936 static struct btrfs_device * btrfs_find_next_active_device(
1937                 struct btrfs_fs_devices *fs_devs, struct btrfs_device *device)
1938 {
1939         struct btrfs_device *next_device;
1940
1941         list_for_each_entry(next_device, &fs_devs->devices, dev_list) {
1942                 if (next_device != device &&
1943                     !test_bit(BTRFS_DEV_STATE_MISSING, &next_device->dev_state)
1944                     && next_device->bdev)
1945                         return next_device;
1946         }
1947
1948         return NULL;
1949 }
1950
1951 /*
1952  * Helper function to check if the given device is part of s_bdev / latest_bdev
1953  * and replace it with the provided or the next active device, in the context
1954  * where this function called, there should be always be another device (or
1955  * this_dev) which is active.
1956  */
1957 void __cold btrfs_assign_next_active_device(struct btrfs_device *device,
1958                                             struct btrfs_device *next_device)
1959 {
1960         struct btrfs_fs_info *fs_info = device->fs_info;
1961
1962         if (!next_device)
1963                 next_device = btrfs_find_next_active_device(fs_info->fs_devices,
1964                                                             device);
1965         ASSERT(next_device);
1966
1967         if (fs_info->sb->s_bdev &&
1968                         (fs_info->sb->s_bdev == device->bdev))
1969                 fs_info->sb->s_bdev = next_device->bdev;
1970
1971         if (fs_info->fs_devices->latest_bdev == device->bdev)
1972                 fs_info->fs_devices->latest_bdev = next_device->bdev;
1973 }
1974
1975 /*
1976  * Return btrfs_fs_devices::num_devices excluding the device that's being
1977  * currently replaced.
1978  */
1979 static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info)
1980 {
1981         u64 num_devices = fs_info->fs_devices->num_devices;
1982
1983         down_read(&fs_info->dev_replace.rwsem);
1984         if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace)) {
1985                 ASSERT(num_devices > 1);
1986                 num_devices--;
1987         }
1988         up_read(&fs_info->dev_replace.rwsem);
1989
1990         return num_devices;
1991 }
1992
1993 void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info,
1994                                struct block_device *bdev,
1995                                const char *device_path)
1996 {
1997         struct btrfs_super_block *disk_super;
1998         int copy_num;
1999
2000         if (!bdev)
2001                 return;
2002
2003         for (copy_num = 0; copy_num < BTRFS_SUPER_MIRROR_MAX; copy_num++) {
2004                 struct page *page;
2005                 int ret;
2006
2007                 disk_super = btrfs_read_dev_one_super(bdev, copy_num);
2008                 if (IS_ERR(disk_super))
2009                         continue;
2010
2011                 memset(&disk_super->magic, 0, sizeof(disk_super->magic));
2012
2013                 page = virt_to_page(disk_super);
2014                 set_page_dirty(page);
2015                 lock_page(page);
2016                 /* write_on_page() unlocks the page */
2017                 ret = write_one_page(page);
2018                 if (ret)
2019                         btrfs_warn(fs_info,
2020                                 "error clearing superblock number %d (%d)",
2021                                 copy_num, ret);
2022                 btrfs_release_disk_super(disk_super);
2023
2024         }
2025
2026         /* Notify udev that device has changed */
2027         btrfs_kobject_uevent(bdev, KOBJ_CHANGE);
2028
2029         /* Update ctime/mtime for device path for libblkid */
2030         update_dev_time(device_path);
2031 }
2032
2033 int btrfs_rm_device(struct btrfs_fs_info *fs_info, const char *device_path,
2034                     u64 devid)
2035 {
2036         struct btrfs_device *device;
2037         struct btrfs_fs_devices *cur_devices;
2038         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2039         u64 num_devices;
2040         int ret = 0;
2041
2042         mutex_lock(&uuid_mutex);
2043
2044         num_devices = btrfs_num_devices(fs_info);
2045
2046         ret = btrfs_check_raid_min_devices(fs_info, num_devices - 1);
2047         if (ret)
2048                 goto out;
2049
2050         device = btrfs_find_device_by_devspec(fs_info, devid, device_path);
2051
2052         if (IS_ERR(device)) {
2053                 if (PTR_ERR(device) == -ENOENT &&
2054                     strcmp(device_path, "missing") == 0)
2055                         ret = BTRFS_ERROR_DEV_MISSING_NOT_FOUND;
2056                 else
2057                         ret = PTR_ERR(device);
2058                 goto out;
2059         }
2060
2061         if (btrfs_pinned_by_swapfile(fs_info, device)) {
2062                 btrfs_warn_in_rcu(fs_info,
2063                   "cannot remove device %s (devid %llu) due to active swapfile",
2064                                   rcu_str_deref(device->name), device->devid);
2065                 ret = -ETXTBSY;
2066                 goto out;
2067         }
2068
2069         if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2070                 ret = BTRFS_ERROR_DEV_TGT_REPLACE;
2071                 goto out;
2072         }
2073
2074         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
2075             fs_info->fs_devices->rw_devices == 1) {
2076                 ret = BTRFS_ERROR_DEV_ONLY_WRITABLE;
2077                 goto out;
2078         }
2079
2080         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2081                 mutex_lock(&fs_info->chunk_mutex);
2082                 list_del_init(&device->dev_alloc_list);
2083                 device->fs_devices->rw_devices--;
2084                 mutex_unlock(&fs_info->chunk_mutex);
2085         }
2086
2087         mutex_unlock(&uuid_mutex);
2088         ret = btrfs_shrink_device(device, 0);
2089         if (!ret)
2090                 btrfs_reada_remove_dev(device);
2091         mutex_lock(&uuid_mutex);
2092         if (ret)
2093                 goto error_undo;
2094
2095         /*
2096          * TODO: the superblock still includes this device in its num_devices
2097          * counter although write_all_supers() is not locked out. This
2098          * could give a filesystem state which requires a degraded mount.
2099          */
2100         ret = btrfs_rm_dev_item(device);
2101         if (ret)
2102                 goto error_undo;
2103
2104         clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2105         btrfs_scrub_cancel_dev(device);
2106
2107         /*
2108          * the device list mutex makes sure that we don't change
2109          * the device list while someone else is writing out all
2110          * the device supers. Whoever is writing all supers, should
2111          * lock the device list mutex before getting the number of
2112          * devices in the super block (super_copy). Conversely,
2113          * whoever updates the number of devices in the super block
2114          * (super_copy) should hold the device list mutex.
2115          */
2116
2117         /*
2118          * In normal cases the cur_devices == fs_devices. But in case
2119          * of deleting a seed device, the cur_devices should point to
2120          * its own fs_devices listed under the fs_devices->seed.
2121          */
2122         cur_devices = device->fs_devices;
2123         mutex_lock(&fs_devices->device_list_mutex);
2124         list_del_rcu(&device->dev_list);
2125
2126         cur_devices->num_devices--;
2127         cur_devices->total_devices--;
2128         /* Update total_devices of the parent fs_devices if it's seed */
2129         if (cur_devices != fs_devices)
2130                 fs_devices->total_devices--;
2131
2132         if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
2133                 cur_devices->missing_devices--;
2134
2135         btrfs_assign_next_active_device(device, NULL);
2136
2137         if (device->bdev) {
2138                 cur_devices->open_devices--;
2139                 /* remove sysfs entry */
2140                 btrfs_sysfs_remove_device(device);
2141         }
2142
2143         num_devices = btrfs_super_num_devices(fs_info->super_copy) - 1;
2144         btrfs_set_super_num_devices(fs_info->super_copy, num_devices);
2145         mutex_unlock(&fs_devices->device_list_mutex);
2146
2147         /*
2148          * at this point, the device is zero sized and detached from
2149          * the devices list.  All that's left is to zero out the old
2150          * supers and free the device.
2151          */
2152         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2153                 btrfs_scratch_superblocks(fs_info, device->bdev,
2154                                           device->name->str);
2155
2156         btrfs_close_bdev(device);
2157         synchronize_rcu();
2158         btrfs_free_device(device);
2159
2160         if (cur_devices->open_devices == 0) {
2161                 list_del_init(&cur_devices->seed_list);
2162                 close_fs_devices(cur_devices);
2163                 free_fs_devices(cur_devices);
2164         }
2165
2166 out:
2167         mutex_unlock(&uuid_mutex);
2168         return ret;
2169
2170 error_undo:
2171         btrfs_reada_undo_remove_dev(device);
2172         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
2173                 mutex_lock(&fs_info->chunk_mutex);
2174                 list_add(&device->dev_alloc_list,
2175                          &fs_devices->alloc_list);
2176                 device->fs_devices->rw_devices++;
2177                 mutex_unlock(&fs_info->chunk_mutex);
2178         }
2179         goto out;
2180 }
2181
2182 void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_device *srcdev)
2183 {
2184         struct btrfs_fs_devices *fs_devices;
2185
2186         lockdep_assert_held(&srcdev->fs_info->fs_devices->device_list_mutex);
2187
2188         /*
2189          * in case of fs with no seed, srcdev->fs_devices will point
2190          * to fs_devices of fs_info. However when the dev being replaced is
2191          * a seed dev it will point to the seed's local fs_devices. In short
2192          * srcdev will have its correct fs_devices in both the cases.
2193          */
2194         fs_devices = srcdev->fs_devices;
2195
2196         list_del_rcu(&srcdev->dev_list);
2197         list_del(&srcdev->dev_alloc_list);
2198         fs_devices->num_devices--;
2199         if (test_bit(BTRFS_DEV_STATE_MISSING, &srcdev->dev_state))
2200                 fs_devices->missing_devices--;
2201
2202         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state))
2203                 fs_devices->rw_devices--;
2204
2205         if (srcdev->bdev)
2206                 fs_devices->open_devices--;
2207 }
2208
2209 void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev)
2210 {
2211         struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
2212
2213         mutex_lock(&uuid_mutex);
2214
2215         btrfs_close_bdev(srcdev);
2216         synchronize_rcu();
2217         btrfs_free_device(srcdev);
2218
2219         /* if this is no devs we rather delete the fs_devices */
2220         if (!fs_devices->num_devices) {
2221                 /*
2222                  * On a mounted FS, num_devices can't be zero unless it's a
2223                  * seed. In case of a seed device being replaced, the replace
2224                  * target added to the sprout FS, so there will be no more
2225                  * device left under the seed FS.
2226                  */
2227                 ASSERT(fs_devices->seeding);
2228
2229                 list_del_init(&fs_devices->seed_list);
2230                 close_fs_devices(fs_devices);
2231                 free_fs_devices(fs_devices);
2232         }
2233         mutex_unlock(&uuid_mutex);
2234 }
2235
2236 void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev)
2237 {
2238         struct btrfs_fs_devices *fs_devices = tgtdev->fs_info->fs_devices;
2239
2240         mutex_lock(&fs_devices->device_list_mutex);
2241
2242         btrfs_sysfs_remove_device(tgtdev);
2243
2244         if (tgtdev->bdev)
2245                 fs_devices->open_devices--;
2246
2247         fs_devices->num_devices--;
2248
2249         btrfs_assign_next_active_device(tgtdev, NULL);
2250
2251         list_del_rcu(&tgtdev->dev_list);
2252
2253         mutex_unlock(&fs_devices->device_list_mutex);
2254
2255         /*
2256          * The update_dev_time() with in btrfs_scratch_superblocks()
2257          * may lead to a call to btrfs_show_devname() which will try
2258          * to hold device_list_mutex. And here this device
2259          * is already out of device list, so we don't have to hold
2260          * the device_list_mutex lock.
2261          */
2262         btrfs_scratch_superblocks(tgtdev->fs_info, tgtdev->bdev,
2263                                   tgtdev->name->str);
2264
2265         btrfs_close_bdev(tgtdev);
2266         synchronize_rcu();
2267         btrfs_free_device(tgtdev);
2268 }
2269
2270 static struct btrfs_device *btrfs_find_device_by_path(
2271                 struct btrfs_fs_info *fs_info, const char *device_path)
2272 {
2273         int ret = 0;
2274         struct btrfs_super_block *disk_super;
2275         u64 devid;
2276         u8 *dev_uuid;
2277         struct block_device *bdev;
2278         struct btrfs_device *device;
2279
2280         ret = btrfs_get_bdev_and_sb(device_path, FMODE_READ,
2281                                     fs_info->bdev_holder, 0, &bdev, &disk_super);
2282         if (ret)
2283                 return ERR_PTR(ret);
2284
2285         devid = btrfs_stack_device_id(&disk_super->dev_item);
2286         dev_uuid = disk_super->dev_item.uuid;
2287         if (btrfs_fs_incompat(fs_info, METADATA_UUID))
2288                 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2289                                            disk_super->metadata_uuid, true);
2290         else
2291                 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2292                                            disk_super->fsid, true);
2293
2294         btrfs_release_disk_super(disk_super);
2295         if (!device)
2296                 device = ERR_PTR(-ENOENT);
2297         blkdev_put(bdev, FMODE_READ);
2298         return device;
2299 }
2300
2301 /*
2302  * Lookup a device given by device id, or the path if the id is 0.
2303  */
2304 struct btrfs_device *btrfs_find_device_by_devspec(
2305                 struct btrfs_fs_info *fs_info, u64 devid,
2306                 const char *device_path)
2307 {
2308         struct btrfs_device *device;
2309
2310         if (devid) {
2311                 device = btrfs_find_device(fs_info->fs_devices, devid, NULL,
2312                                            NULL, true);
2313                 if (!device)
2314                         return ERR_PTR(-ENOENT);
2315                 return device;
2316         }
2317
2318         if (!device_path || !device_path[0])
2319                 return ERR_PTR(-EINVAL);
2320
2321         if (strcmp(device_path, "missing") == 0) {
2322                 /* Find first missing device */
2323                 list_for_each_entry(device, &fs_info->fs_devices->devices,
2324                                     dev_list) {
2325                         if (test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
2326                                      &device->dev_state) && !device->bdev)
2327                                 return device;
2328                 }
2329                 return ERR_PTR(-ENOENT);
2330         }
2331
2332         return btrfs_find_device_by_path(fs_info, device_path);
2333 }
2334
2335 /*
2336  * does all the dirty work required for changing file system's UUID.
2337  */
2338 static int btrfs_prepare_sprout(struct btrfs_fs_info *fs_info)
2339 {
2340         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2341         struct btrfs_fs_devices *old_devices;
2342         struct btrfs_fs_devices *seed_devices;
2343         struct btrfs_super_block *disk_super = fs_info->super_copy;
2344         struct btrfs_device *device;
2345         u64 super_flags;
2346
2347         lockdep_assert_held(&uuid_mutex);
2348         if (!fs_devices->seeding)
2349                 return -EINVAL;
2350
2351         /*
2352          * Private copy of the seed devices, anchored at
2353          * fs_info->fs_devices->seed_list
2354          */
2355         seed_devices = alloc_fs_devices(NULL, NULL);
2356         if (IS_ERR(seed_devices))
2357                 return PTR_ERR(seed_devices);
2358
2359         /*
2360          * It's necessary to retain a copy of the original seed fs_devices in
2361          * fs_uuids so that filesystems which have been seeded can successfully
2362          * reference the seed device from open_seed_devices. This also supports
2363          * multiple fs seed.
2364          */
2365         old_devices = clone_fs_devices(fs_devices);
2366         if (IS_ERR(old_devices)) {
2367                 kfree(seed_devices);
2368                 return PTR_ERR(old_devices);
2369         }
2370
2371         list_add(&old_devices->fs_list, &fs_uuids);
2372
2373         memcpy(seed_devices, fs_devices, sizeof(*seed_devices));
2374         seed_devices->opened = 1;
2375         INIT_LIST_HEAD(&seed_devices->devices);
2376         INIT_LIST_HEAD(&seed_devices->alloc_list);
2377         mutex_init(&seed_devices->device_list_mutex);
2378
2379         mutex_lock(&fs_devices->device_list_mutex);
2380         list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices,
2381                               synchronize_rcu);
2382         list_for_each_entry(device, &seed_devices->devices, dev_list)
2383                 device->fs_devices = seed_devices;
2384
2385         fs_devices->seeding = false;
2386         fs_devices->num_devices = 0;
2387         fs_devices->open_devices = 0;
2388         fs_devices->missing_devices = 0;
2389         fs_devices->rotating = false;
2390         list_add(&seed_devices->seed_list, &fs_devices->seed_list);
2391
2392         generate_random_uuid(fs_devices->fsid);
2393         memcpy(fs_devices->metadata_uuid, fs_devices->fsid, BTRFS_FSID_SIZE);
2394         memcpy(disk_super->fsid, fs_devices->fsid, BTRFS_FSID_SIZE);
2395         mutex_unlock(&fs_devices->device_list_mutex);
2396
2397         super_flags = btrfs_super_flags(disk_super) &
2398                       ~BTRFS_SUPER_FLAG_SEEDING;
2399         btrfs_set_super_flags(disk_super, super_flags);
2400
2401         return 0;
2402 }
2403
2404 /*
2405  * Store the expected generation for seed devices in device items.
2406  */
2407 static int btrfs_finish_sprout(struct btrfs_trans_handle *trans)
2408 {
2409         struct btrfs_fs_info *fs_info = trans->fs_info;
2410         struct btrfs_root *root = fs_info->chunk_root;
2411         struct btrfs_path *path;
2412         struct extent_buffer *leaf;
2413         struct btrfs_dev_item *dev_item;
2414         struct btrfs_device *device;
2415         struct btrfs_key key;
2416         u8 fs_uuid[BTRFS_FSID_SIZE];
2417         u8 dev_uuid[BTRFS_UUID_SIZE];
2418         u64 devid;
2419         int ret;
2420
2421         path = btrfs_alloc_path();
2422         if (!path)
2423                 return -ENOMEM;
2424
2425         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2426         key.offset = 0;
2427         key.type = BTRFS_DEV_ITEM_KEY;
2428
2429         while (1) {
2430                 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2431                 if (ret < 0)
2432                         goto error;
2433
2434                 leaf = path->nodes[0];
2435 next_slot:
2436                 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
2437                         ret = btrfs_next_leaf(root, path);
2438                         if (ret > 0)
2439                                 break;
2440                         if (ret < 0)
2441                                 goto error;
2442                         leaf = path->nodes[0];
2443                         btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2444                         btrfs_release_path(path);
2445                         continue;
2446                 }
2447
2448                 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2449                 if (key.objectid != BTRFS_DEV_ITEMS_OBJECTID ||
2450                     key.type != BTRFS_DEV_ITEM_KEY)
2451                         break;
2452
2453                 dev_item = btrfs_item_ptr(leaf, path->slots[0],
2454                                           struct btrfs_dev_item);
2455                 devid = btrfs_device_id(leaf, dev_item);
2456                 read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
2457                                    BTRFS_UUID_SIZE);
2458                 read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
2459                                    BTRFS_FSID_SIZE);
2460                 device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
2461                                            fs_uuid, true);
2462                 BUG_ON(!device); /* Logic error */
2463
2464                 if (device->fs_devices->seeding) {
2465                         btrfs_set_device_generation(leaf, dev_item,
2466                                                     device->generation);
2467                         btrfs_mark_buffer_dirty(leaf);
2468                 }
2469
2470                 path->slots[0]++;
2471                 goto next_slot;
2472         }
2473         ret = 0;
2474 error:
2475         btrfs_free_path(path);
2476         return ret;
2477 }
2478
2479 int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path)
2480 {
2481         struct btrfs_root *root = fs_info->dev_root;
2482         struct request_queue *q;
2483         struct btrfs_trans_handle *trans;
2484         struct btrfs_device *device;
2485         struct block_device *bdev;
2486         struct super_block *sb = fs_info->sb;
2487         struct rcu_string *name;
2488         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2489         u64 orig_super_total_bytes;
2490         u64 orig_super_num_devices;
2491         int seeding_dev = 0;
2492         int ret = 0;
2493         bool locked = false;
2494
2495         if (sb_rdonly(sb) && !fs_devices->seeding)
2496                 return -EROFS;
2497
2498         bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
2499                                   fs_info->bdev_holder);
2500         if (IS_ERR(bdev))
2501                 return PTR_ERR(bdev);
2502
2503         if (fs_devices->seeding) {
2504                 seeding_dev = 1;
2505                 down_write(&sb->s_umount);
2506                 mutex_lock(&uuid_mutex);
2507                 locked = true;
2508         }
2509
2510         sync_blockdev(bdev);
2511
2512         rcu_read_lock();
2513         list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
2514                 if (device->bdev == bdev) {
2515                         ret = -EEXIST;
2516                         rcu_read_unlock();
2517                         goto error;
2518                 }
2519         }
2520         rcu_read_unlock();
2521
2522         device = btrfs_alloc_device(fs_info, NULL, NULL);
2523         if (IS_ERR(device)) {
2524                 /* we can safely leave the fs_devices entry around */
2525                 ret = PTR_ERR(device);
2526                 goto error;
2527         }
2528
2529         name = rcu_string_strdup(device_path, GFP_KERNEL);
2530         if (!name) {
2531                 ret = -ENOMEM;
2532                 goto error_free_device;
2533         }
2534         rcu_assign_pointer(device->name, name);
2535
2536         trans = btrfs_start_transaction(root, 0);
2537         if (IS_ERR(trans)) {
2538                 ret = PTR_ERR(trans);
2539                 goto error_free_device;
2540         }
2541
2542         q = bdev_get_queue(bdev);
2543         set_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state);
2544         device->generation = trans->transid;
2545         device->io_width = fs_info->sectorsize;
2546         device->io_align = fs_info->sectorsize;
2547         device->sector_size = fs_info->sectorsize;
2548         device->total_bytes = round_down(i_size_read(bdev->bd_inode),
2549                                          fs_info->sectorsize);
2550         device->disk_total_bytes = device->total_bytes;
2551         device->commit_total_bytes = device->total_bytes;
2552         device->fs_info = fs_info;
2553         device->bdev = bdev;
2554         set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
2555         clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
2556         device->mode = FMODE_EXCL;
2557         device->dev_stats_valid = 1;
2558         set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
2559
2560         if (seeding_dev) {
2561                 sb->s_flags &= ~SB_RDONLY;
2562                 ret = btrfs_prepare_sprout(fs_info);
2563                 if (ret) {
2564                         btrfs_abort_transaction(trans, ret);
2565                         goto error_trans;
2566                 }
2567         }
2568
2569         device->fs_devices = fs_devices;
2570
2571         mutex_lock(&fs_devices->device_list_mutex);
2572         mutex_lock(&fs_info->chunk_mutex);
2573         list_add_rcu(&device->dev_list, &fs_devices->devices);
2574         list_add(&device->dev_alloc_list, &fs_devices->alloc_list);
2575         fs_devices->num_devices++;
2576         fs_devices->open_devices++;
2577         fs_devices->rw_devices++;
2578         fs_devices->total_devices++;
2579         fs_devices->total_rw_bytes += device->total_bytes;
2580
2581         atomic64_add(device->total_bytes, &fs_info->free_chunk_space);
2582
2583         if (!blk_queue_nonrot(q))
2584                 fs_devices->rotating = true;
2585
2586         orig_super_total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
2587         btrfs_set_super_total_bytes(fs_info->super_copy,
2588                 round_down(orig_super_total_bytes + device->total_bytes,
2589                            fs_info->sectorsize));
2590
2591         orig_super_num_devices = btrfs_super_num_devices(fs_info->super_copy);
2592         btrfs_set_super_num_devices(fs_info->super_copy,
2593                                     orig_super_num_devices + 1);
2594
2595         /*
2596          * we've got more storage, clear any full flags on the space
2597          * infos
2598          */
2599         btrfs_clear_space_info_full(fs_info);
2600
2601         mutex_unlock(&fs_info->chunk_mutex);
2602
2603         /* Add sysfs device entry */
2604         btrfs_sysfs_add_device(device);
2605
2606         mutex_unlock(&fs_devices->device_list_mutex);
2607
2608         if (seeding_dev) {
2609                 mutex_lock(&fs_info->chunk_mutex);
2610                 ret = init_first_rw_device(trans);
2611                 mutex_unlock(&fs_info->chunk_mutex);
2612                 if (ret) {
2613                         btrfs_abort_transaction(trans, ret);
2614                         goto error_sysfs;
2615                 }
2616         }
2617
2618         ret = btrfs_add_dev_item(trans, device);
2619         if (ret) {
2620                 btrfs_abort_transaction(trans, ret);
2621                 goto error_sysfs;
2622         }
2623
2624         if (seeding_dev) {
2625                 ret = btrfs_finish_sprout(trans);
2626                 if (ret) {
2627                         btrfs_abort_transaction(trans, ret);
2628                         goto error_sysfs;
2629                 }
2630
2631                 /*
2632                  * fs_devices now represents the newly sprouted filesystem and
2633                  * its fsid has been changed by btrfs_prepare_sprout
2634                  */
2635                 btrfs_sysfs_update_sprout_fsid(fs_devices);
2636         }
2637
2638         ret = btrfs_commit_transaction(trans);
2639
2640         if (seeding_dev) {
2641                 mutex_unlock(&uuid_mutex);
2642                 up_write(&sb->s_umount);
2643                 locked = false;
2644
2645                 if (ret) /* transaction commit */
2646                         return ret;
2647
2648                 ret = btrfs_relocate_sys_chunks(fs_info);
2649                 if (ret < 0)
2650                         btrfs_handle_fs_error(fs_info, ret,
2651                                     "Failed to relocate sys chunks after device initialization. This can be fixed using the \"btrfs balance\" command.");
2652                 trans = btrfs_attach_transaction(root);
2653                 if (IS_ERR(trans)) {
2654                         if (PTR_ERR(trans) == -ENOENT)
2655                                 return 0;
2656                         ret = PTR_ERR(trans);
2657                         trans = NULL;
2658                         goto error_sysfs;
2659                 }
2660                 ret = btrfs_commit_transaction(trans);
2661         }
2662
2663         /*
2664          * Now that we have written a new super block to this device, check all
2665          * other fs_devices list if device_path alienates any other scanned
2666          * device.
2667          * We can ignore the return value as it typically returns -EINVAL and
2668          * only succeeds if the device was an alien.
2669          */
2670         btrfs_forget_devices(device_path);
2671
2672         /* Update ctime/mtime for blkid or udev */
2673         update_dev_time(device_path);
2674
2675         return ret;
2676
2677 error_sysfs:
2678         btrfs_sysfs_remove_device(device);
2679         mutex_lock(&fs_info->fs_devices->device_list_mutex);
2680         mutex_lock(&fs_info->chunk_mutex);
2681         list_del_rcu(&device->dev_list);
2682         list_del(&device->dev_alloc_list);
2683         fs_info->fs_devices->num_devices--;
2684         fs_info->fs_devices->open_devices--;
2685         fs_info->fs_devices->rw_devices--;
2686         fs_info->fs_devices->total_devices--;
2687         fs_info->fs_devices->total_rw_bytes -= device->total_bytes;
2688         atomic64_sub(device->total_bytes, &fs_info->free_chunk_space);
2689         btrfs_set_super_total_bytes(fs_info->super_copy,
2690                                     orig_super_total_bytes);
2691         btrfs_set_super_num_devices(fs_info->super_copy,
2692                                     orig_super_num_devices);
2693         mutex_unlock(&fs_info->chunk_mutex);
2694         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
2695 error_trans:
2696         if (seeding_dev)
2697                 sb->s_flags |= SB_RDONLY;
2698         if (trans)
2699                 btrfs_end_transaction(trans);
2700 error_free_device:
2701         btrfs_free_device(device);
2702 error:
2703         blkdev_put(bdev, FMODE_EXCL);
2704         if (locked) {
2705                 mutex_unlock(&uuid_mutex);
2706                 up_write(&sb->s_umount);
2707         }
2708         return ret;
2709 }
2710
2711 static noinline int btrfs_update_device(struct btrfs_trans_handle *trans,
2712                                         struct btrfs_device *device)
2713 {
2714         int ret;
2715         struct btrfs_path *path;
2716         struct btrfs_root *root = device->fs_info->chunk_root;
2717         struct btrfs_dev_item *dev_item;
2718         struct extent_buffer *leaf;
2719         struct btrfs_key key;
2720
2721         path = btrfs_alloc_path();
2722         if (!path)
2723                 return -ENOMEM;
2724
2725         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
2726         key.type = BTRFS_DEV_ITEM_KEY;
2727         key.offset = device->devid;
2728
2729         ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
2730         if (ret < 0)
2731                 goto out;
2732
2733         if (ret > 0) {
2734                 ret = -ENOENT;
2735                 goto out;
2736         }
2737
2738         leaf = path->nodes[0];
2739         dev_item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_dev_item);
2740
2741         btrfs_set_device_id(leaf, dev_item, device->devid);
2742         btrfs_set_device_type(leaf, dev_item, device->type);
2743         btrfs_set_device_io_align(leaf, dev_item, device->io_align);
2744         btrfs_set_device_io_width(leaf, dev_item, device->io_width);
2745         btrfs_set_device_sector_size(leaf, dev_item, device->sector_size);
2746         btrfs_set_device_total_bytes(leaf, dev_item,
2747                                      btrfs_device_get_disk_total_bytes(device));
2748         btrfs_set_device_bytes_used(leaf, dev_item,
2749                                     btrfs_device_get_bytes_used(device));
2750         btrfs_mark_buffer_dirty(leaf);
2751
2752 out:
2753         btrfs_free_path(path);
2754         return ret;
2755 }
2756
2757 int btrfs_grow_device(struct btrfs_trans_handle *trans,
2758                       struct btrfs_device *device, u64 new_size)
2759 {
2760         struct btrfs_fs_info *fs_info = device->fs_info;
2761         struct btrfs_super_block *super_copy = fs_info->super_copy;
2762         u64 old_total;
2763         u64 diff;
2764
2765         if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
2766                 return -EACCES;
2767
2768         new_size = round_down(new_size, fs_info->sectorsize);
2769
2770         mutex_lock(&fs_info->chunk_mutex);
2771         old_total = btrfs_super_total_bytes(super_copy);
2772         diff = round_down(new_size - device->total_bytes, fs_info->sectorsize);
2773
2774         if (new_size <= device->total_bytes ||
2775             test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
2776                 mutex_unlock(&fs_info->chunk_mutex);
2777                 return -EINVAL;
2778         }
2779
2780         btrfs_set_super_total_bytes(super_copy,
2781                         round_down(old_total + diff, fs_info->sectorsize));
2782         device->fs_devices->total_rw_bytes += diff;
2783
2784         btrfs_device_set_total_bytes(device, new_size);
2785         btrfs_device_set_disk_total_bytes(device, new_size);
2786         btrfs_clear_space_info_full(device->fs_info);
2787         if (list_empty(&device->post_commit_list))
2788                 list_add_tail(&device->post_commit_list,
2789                               &trans->transaction->dev_update_list);
2790         mutex_unlock(&fs_info->chunk_mutex);
2791
2792         return btrfs_update_device(trans, device);
2793 }
2794
2795 static int btrfs_free_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2796 {
2797         struct btrfs_fs_info *fs_info = trans->fs_info;
2798         struct btrfs_root *root = fs_info->chunk_root;
2799         int ret;
2800         struct btrfs_path *path;
2801         struct btrfs_key key;
2802
2803         path = btrfs_alloc_path();
2804         if (!path)
2805                 return -ENOMEM;
2806
2807         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
2808         key.offset = chunk_offset;
2809         key.type = BTRFS_CHUNK_ITEM_KEY;
2810
2811         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
2812         if (ret < 0)
2813                 goto out;
2814         else if (ret > 0) { /* Logic error or corruption */
2815                 btrfs_handle_fs_error(fs_info, -ENOENT,
2816                                       "Failed lookup while freeing chunk.");
2817                 ret = -ENOENT;
2818                 goto out;
2819         }
2820
2821         ret = btrfs_del_item(trans, root, path);
2822         if (ret < 0)
2823                 btrfs_handle_fs_error(fs_info, ret,
2824                                       "Failed to delete chunk item.");
2825 out:
2826         btrfs_free_path(path);
2827         return ret;
2828 }
2829
2830 static int btrfs_del_sys_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
2831 {
2832         struct btrfs_super_block *super_copy = fs_info->super_copy;
2833         struct btrfs_disk_key *disk_key;
2834         struct btrfs_chunk *chunk;
2835         u8 *ptr;
2836         int ret = 0;
2837         u32 num_stripes;
2838         u32 array_size;
2839         u32 len = 0;
2840         u32 cur;
2841         struct btrfs_key key;
2842
2843         mutex_lock(&fs_info->chunk_mutex);
2844         array_size = btrfs_super_sys_array_size(super_copy);
2845
2846         ptr = super_copy->sys_chunk_array;
2847         cur = 0;
2848
2849         while (cur < array_size) {
2850                 disk_key = (struct btrfs_disk_key *)ptr;
2851                 btrfs_disk_key_to_cpu(&key, disk_key);
2852
2853                 len = sizeof(*disk_key);
2854
2855                 if (key.type == BTRFS_CHUNK_ITEM_KEY) {
2856                         chunk = (struct btrfs_chunk *)(ptr + len);
2857                         num_stripes = btrfs_stack_chunk_num_stripes(chunk);
2858                         len += btrfs_chunk_item_size(num_stripes);
2859                 } else {
2860                         ret = -EIO;
2861                         break;
2862                 }
2863                 if (key.objectid == BTRFS_FIRST_CHUNK_TREE_OBJECTID &&
2864                     key.offset == chunk_offset) {
2865                         memmove(ptr, ptr + len, array_size - (cur + len));
2866                         array_size -= len;
2867                         btrfs_set_super_sys_array_size(super_copy, array_size);
2868                 } else {
2869                         ptr += len;
2870                         cur += len;
2871                 }
2872         }
2873         mutex_unlock(&fs_info->chunk_mutex);
2874         return ret;
2875 }
2876
2877 /*
2878  * btrfs_get_chunk_map() - Find the mapping containing the given logical extent.
2879  * @logical: Logical block offset in bytes.
2880  * @length: Length of extent in bytes.
2881  *
2882  * Return: Chunk mapping or ERR_PTR.
2883  */
2884 struct extent_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
2885                                        u64 logical, u64 length)
2886 {
2887         struct extent_map_tree *em_tree;
2888         struct extent_map *em;
2889
2890         em_tree = &fs_info->mapping_tree;
2891         read_lock(&em_tree->lock);
2892         em = lookup_extent_mapping(em_tree, logical, length);
2893         read_unlock(&em_tree->lock);
2894
2895         if (!em) {
2896                 btrfs_crit(fs_info, "unable to find logical %llu length %llu",
2897                            logical, length);
2898                 return ERR_PTR(-EINVAL);
2899         }
2900
2901         if (em->start > logical || em->start + em->len < logical) {
2902                 btrfs_crit(fs_info,
2903                            "found a bad mapping, wanted %llu-%llu, found %llu-%llu",
2904                            logical, length, em->start, em->start + em->len);
2905                 free_extent_map(em);
2906                 return ERR_PTR(-EINVAL);
2907         }
2908
2909         /* callers are responsible for dropping em's ref. */
2910         return em;
2911 }
2912
2913 int btrfs_remove_chunk(struct btrfs_trans_handle *trans, u64 chunk_offset)
2914 {
2915         struct btrfs_fs_info *fs_info = trans->fs_info;
2916         struct extent_map *em;
2917         struct map_lookup *map;
2918         u64 dev_extent_len = 0;
2919         int i, ret = 0;
2920         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
2921
2922         em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
2923         if (IS_ERR(em)) {
2924                 /*
2925                  * This is a logic error, but we don't want to just rely on the
2926                  * user having built with ASSERT enabled, so if ASSERT doesn't
2927                  * do anything we still error out.
2928                  */
2929                 ASSERT(0);
2930                 return PTR_ERR(em);
2931         }
2932         map = em->map_lookup;
2933         mutex_lock(&fs_info->chunk_mutex);
2934         check_system_chunk(trans, map->type);
2935         mutex_unlock(&fs_info->chunk_mutex);
2936
2937         /*
2938          * Take the device list mutex to prevent races with the final phase of
2939          * a device replace operation that replaces the device object associated
2940          * with map stripes (dev-replace.c:btrfs_dev_replace_finishing()).
2941          */
2942         mutex_lock(&fs_devices->device_list_mutex);
2943         for (i = 0; i < map->num_stripes; i++) {
2944                 struct btrfs_device *device = map->stripes[i].dev;
2945                 ret = btrfs_free_dev_extent(trans, device,
2946                                             map->stripes[i].physical,
2947                                             &dev_extent_len);
2948                 if (ret) {
2949                         mutex_unlock(&fs_devices->device_list_mutex);
2950                         btrfs_abort_transaction(trans, ret);
2951                         goto out;
2952                 }
2953
2954                 if (device->bytes_used > 0) {
2955                         mutex_lock(&fs_info->chunk_mutex);
2956                         btrfs_device_set_bytes_used(device,
2957                                         device->bytes_used - dev_extent_len);
2958                         atomic64_add(dev_extent_len, &fs_info->free_chunk_space);
2959                         btrfs_clear_space_info_full(fs_info);
2960                         mutex_unlock(&fs_info->chunk_mutex);
2961                 }
2962
2963                 ret = btrfs_update_device(trans, device);
2964                 if (ret) {
2965                         mutex_unlock(&fs_devices->device_list_mutex);
2966                         btrfs_abort_transaction(trans, ret);
2967                         goto out;
2968                 }
2969         }
2970         mutex_unlock(&fs_devices->device_list_mutex);
2971
2972         ret = btrfs_free_chunk(trans, chunk_offset);
2973         if (ret) {
2974                 btrfs_abort_transaction(trans, ret);
2975                 goto out;
2976         }
2977
2978         trace_btrfs_chunk_free(fs_info, map, chunk_offset, em->len);
2979
2980         if (map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
2981                 ret = btrfs_del_sys_chunk(fs_info, chunk_offset);
2982                 if (ret) {
2983                         btrfs_abort_transaction(trans, ret);
2984                         goto out;
2985                 }
2986         }
2987
2988         ret = btrfs_remove_block_group(trans, chunk_offset, em);
2989         if (ret) {
2990                 btrfs_abort_transaction(trans, ret);
2991                 goto out;
2992         }
2993
2994 out:
2995         /* once for us */
2996         free_extent_map(em);
2997         return ret;
2998 }
2999
3000 static int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset)
3001 {
3002         struct btrfs_root *root = fs_info->chunk_root;
3003         struct btrfs_trans_handle *trans;
3004         struct btrfs_block_group *block_group;
3005         int ret;
3006
3007         /*
3008          * Prevent races with automatic removal of unused block groups.
3009          * After we relocate and before we remove the chunk with offset
3010          * chunk_offset, automatic removal of the block group can kick in,
3011          * resulting in a failure when calling btrfs_remove_chunk() below.
3012          *
3013          * Make sure to acquire this mutex before doing a tree search (dev
3014          * or chunk trees) to find chunks. Otherwise the cleaner kthread might
3015          * call btrfs_remove_chunk() (through btrfs_delete_unused_bgs()) after
3016          * we release the path used to search the chunk/dev tree and before
3017          * the current task acquires this mutex and calls us.
3018          */
3019         lockdep_assert_held(&fs_info->delete_unused_bgs_mutex);
3020
3021         /* step one, relocate all the extents inside this chunk */
3022         btrfs_scrub_pause(fs_info);
3023         ret = btrfs_relocate_block_group(fs_info, chunk_offset);
3024         btrfs_scrub_continue(fs_info);
3025         if (ret)
3026                 return ret;
3027
3028         block_group = btrfs_lookup_block_group(fs_info, chunk_offset);
3029         if (!block_group)
3030                 return -ENOENT;
3031         btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
3032         btrfs_put_block_group(block_group);
3033
3034         trans = btrfs_start_trans_remove_block_group(root->fs_info,
3035                                                      chunk_offset);
3036         if (IS_ERR(trans)) {
3037                 ret = PTR_ERR(trans);
3038                 btrfs_handle_fs_error(root->fs_info, ret, NULL);
3039                 return ret;
3040         }
3041
3042         /*
3043          * step two, delete the device extents and the
3044          * chunk tree entries
3045          */
3046         ret = btrfs_remove_chunk(trans, chunk_offset);
3047         btrfs_end_transaction(trans);
3048         return ret;
3049 }
3050
3051 static int btrfs_relocate_sys_chunks(struct btrfs_fs_info *fs_info)
3052 {
3053         struct btrfs_root *chunk_root = fs_info->chunk_root;
3054         struct btrfs_path *path;
3055         struct extent_buffer *leaf;
3056         struct btrfs_chunk *chunk;
3057         struct btrfs_key key;
3058         struct btrfs_key found_key;
3059         u64 chunk_type;
3060         bool retried = false;
3061         int failed = 0;
3062         int ret;
3063
3064         path = btrfs_alloc_path();
3065         if (!path)
3066                 return -ENOMEM;
3067
3068 again:
3069         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3070         key.offset = (u64)-1;
3071         key.type = BTRFS_CHUNK_ITEM_KEY;
3072
3073         while (1) {
3074                 mutex_lock(&fs_info->delete_unused_bgs_mutex);
3075                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3076                 if (ret < 0) {
3077                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3078                         goto error;
3079                 }
3080                 BUG_ON(ret == 0); /* Corruption */
3081
3082                 ret = btrfs_previous_item(chunk_root, path, key.objectid,
3083                                           key.type);
3084                 if (ret)
3085                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3086                 if (ret < 0)
3087                         goto error;
3088                 if (ret > 0)
3089                         break;
3090
3091                 leaf = path->nodes[0];
3092                 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
3093
3094                 chunk = btrfs_item_ptr(leaf, path->slots[0],
3095                                        struct btrfs_chunk);
3096                 chunk_type = btrfs_chunk_type(leaf, chunk);
3097                 btrfs_release_path(path);
3098
3099                 if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) {
3100                         ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3101                         if (ret == -ENOSPC)
3102                                 failed++;
3103                         else
3104                                 BUG_ON(ret);
3105                 }
3106                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3107
3108                 if (found_key.offset == 0)
3109                         break;
3110                 key.offset = found_key.offset - 1;
3111         }
3112         ret = 0;
3113         if (failed && !retried) {
3114                 failed = 0;
3115                 retried = true;
3116                 goto again;
3117         } else if (WARN_ON(failed && retried)) {
3118                 ret = -ENOSPC;
3119         }
3120 error:
3121         btrfs_free_path(path);
3122         return ret;
3123 }
3124
3125 /*
3126  * return 1 : allocate a data chunk successfully,
3127  * return <0: errors during allocating a data chunk,
3128  * return 0 : no need to allocate a data chunk.
3129  */
3130 static int btrfs_may_alloc_data_chunk(struct btrfs_fs_info *fs_info,
3131                                       u64 chunk_offset)
3132 {
3133         struct btrfs_block_group *cache;
3134         u64 bytes_used;
3135         u64 chunk_type;
3136
3137         cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3138         ASSERT(cache);
3139         chunk_type = cache->flags;
3140         btrfs_put_block_group(cache);
3141
3142         if (!(chunk_type & BTRFS_BLOCK_GROUP_DATA))
3143                 return 0;
3144
3145         spin_lock(&fs_info->data_sinfo->lock);
3146         bytes_used = fs_info->data_sinfo->bytes_used;
3147         spin_unlock(&fs_info->data_sinfo->lock);
3148
3149         if (!bytes_used) {
3150                 struct btrfs_trans_handle *trans;
3151                 int ret;
3152
3153                 trans = btrfs_join_transaction(fs_info->tree_root);
3154                 if (IS_ERR(trans))
3155                         return PTR_ERR(trans);
3156
3157                 ret = btrfs_force_chunk_alloc(trans, BTRFS_BLOCK_GROUP_DATA);
3158                 btrfs_end_transaction(trans);
3159                 if (ret < 0)
3160                         return ret;
3161                 return 1;
3162         }
3163
3164         return 0;
3165 }
3166
3167 static int insert_balance_item(struct btrfs_fs_info *fs_info,
3168                                struct btrfs_balance_control *bctl)
3169 {
3170         struct btrfs_root *root = fs_info->tree_root;
3171         struct btrfs_trans_handle *trans;
3172         struct btrfs_balance_item *item;
3173         struct btrfs_disk_balance_args disk_bargs;
3174         struct btrfs_path *path;
3175         struct extent_buffer *leaf;
3176         struct btrfs_key key;
3177         int ret, err;
3178
3179         path = btrfs_alloc_path();
3180         if (!path)
3181                 return -ENOMEM;
3182
3183         trans = btrfs_start_transaction(root, 0);
3184         if (IS_ERR(trans)) {
3185                 btrfs_free_path(path);
3186                 return PTR_ERR(trans);
3187         }
3188
3189         key.objectid = BTRFS_BALANCE_OBJECTID;
3190         key.type = BTRFS_TEMPORARY_ITEM_KEY;
3191         key.offset = 0;
3192
3193         ret = btrfs_insert_empty_item(trans, root, path, &key,
3194                                       sizeof(*item));
3195         if (ret)
3196                 goto out;
3197
3198         leaf = path->nodes[0];
3199         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
3200
3201         memzero_extent_buffer(leaf, (unsigned long)item, sizeof(*item));
3202
3203         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->data);
3204         btrfs_set_balance_data(leaf, item, &disk_bargs);
3205         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->meta);
3206         btrfs_set_balance_meta(leaf, item, &disk_bargs);
3207         btrfs_cpu_balance_args_to_disk(&disk_bargs, &bctl->sys);
3208         btrfs_set_balance_sys(leaf, item, &disk_bargs);
3209
3210         btrfs_set_balance_flags(leaf, item, bctl->flags);
3211
3212         btrfs_mark_buffer_dirty(leaf);
3213 out:
3214         btrfs_free_path(path);
3215         err = btrfs_commit_transaction(trans);
3216         if (err && !ret)
3217                 ret = err;
3218         return ret;
3219 }
3220
3221 static int del_balance_item(struct btrfs_fs_info *fs_info)
3222 {
3223         struct btrfs_root *root = fs_info->tree_root;
3224         struct btrfs_trans_handle *trans;
3225         struct btrfs_path *path;
3226         struct btrfs_key key;
3227         int ret, err;
3228
3229         path = btrfs_alloc_path();
3230         if (!path)
3231                 return -ENOMEM;
3232
3233         trans = btrfs_start_transaction_fallback_global_rsv(root, 0);
3234         if (IS_ERR(trans)) {
3235                 btrfs_free_path(path);
3236                 return PTR_ERR(trans);
3237         }
3238
3239         key.objectid = BTRFS_BALANCE_OBJECTID;
3240         key.type = BTRFS_TEMPORARY_ITEM_KEY;
3241         key.offset = 0;
3242
3243         ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
3244         if (ret < 0)
3245                 goto out;
3246         if (ret > 0) {
3247                 ret = -ENOENT;
3248                 goto out;
3249         }
3250
3251         ret = btrfs_del_item(trans, root, path);
3252 out:
3253         btrfs_free_path(path);
3254         err = btrfs_commit_transaction(trans);
3255         if (err && !ret)
3256                 ret = err;
3257         return ret;
3258 }
3259
3260 /*
3261  * This is a heuristic used to reduce the number of chunks balanced on
3262  * resume after balance was interrupted.
3263  */
3264 static void update_balance_args(struct btrfs_balance_control *bctl)
3265 {
3266         /*
3267          * Turn on soft mode for chunk types that were being converted.
3268          */
3269         if (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)
3270                 bctl->data.flags |= BTRFS_BALANCE_ARGS_SOFT;
3271         if (bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)
3272                 bctl->sys.flags |= BTRFS_BALANCE_ARGS_SOFT;
3273         if (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)
3274                 bctl->meta.flags |= BTRFS_BALANCE_ARGS_SOFT;
3275
3276         /*
3277          * Turn on usage filter if is not already used.  The idea is
3278          * that chunks that we have already balanced should be
3279          * reasonably full.  Don't do it for chunks that are being
3280          * converted - that will keep us from relocating unconverted
3281          * (albeit full) chunks.
3282          */
3283         if (!(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3284             !(bctl->data.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3285             !(bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3286                 bctl->data.flags |= BTRFS_BALANCE_ARGS_USAGE;
3287                 bctl->data.usage = 90;
3288         }
3289         if (!(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3290             !(bctl->sys.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3291             !(bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3292                 bctl->sys.flags |= BTRFS_BALANCE_ARGS_USAGE;
3293                 bctl->sys.usage = 90;
3294         }
3295         if (!(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE) &&
3296             !(bctl->meta.flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3297             !(bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT)) {
3298                 bctl->meta.flags |= BTRFS_BALANCE_ARGS_USAGE;
3299                 bctl->meta.usage = 90;
3300         }
3301 }
3302
3303 /*
3304  * Clear the balance status in fs_info and delete the balance item from disk.
3305  */
3306 static void reset_balance_state(struct btrfs_fs_info *fs_info)
3307 {
3308         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3309         int ret;
3310
3311         BUG_ON(!fs_info->balance_ctl);
3312
3313         spin_lock(&fs_info->balance_lock);
3314         fs_info->balance_ctl = NULL;
3315         spin_unlock(&fs_info->balance_lock);
3316
3317         kfree(bctl);
3318         ret = del_balance_item(fs_info);
3319         if (ret)
3320                 btrfs_handle_fs_error(fs_info, ret, NULL);
3321 }
3322
3323 /*
3324  * Balance filters.  Return 1 if chunk should be filtered out
3325  * (should not be balanced).
3326  */
3327 static int chunk_profiles_filter(u64 chunk_type,
3328                                  struct btrfs_balance_args *bargs)
3329 {
3330         chunk_type = chunk_to_extended(chunk_type) &
3331                                 BTRFS_EXTENDED_PROFILE_MASK;
3332
3333         if (bargs->profiles & chunk_type)
3334                 return 0;
3335
3336         return 1;
3337 }
3338
3339 static int chunk_usage_range_filter(struct btrfs_fs_info *fs_info, u64 chunk_offset,
3340                               struct btrfs_balance_args *bargs)
3341 {
3342         struct btrfs_block_group *cache;
3343         u64 chunk_used;
3344         u64 user_thresh_min;
3345         u64 user_thresh_max;
3346         int ret = 1;
3347
3348         cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3349         chunk_used = cache->used;
3350
3351         if (bargs->usage_min == 0)
3352                 user_thresh_min = 0;
3353         else
3354                 user_thresh_min = div_factor_fine(cache->length,
3355                                                   bargs->usage_min);
3356
3357         if (bargs->usage_max == 0)
3358                 user_thresh_max = 1;
3359         else if (bargs->usage_max > 100)
3360                 user_thresh_max = cache->length;
3361         else
3362                 user_thresh_max = div_factor_fine(cache->length,
3363                                                   bargs->usage_max);
3364
3365         if (user_thresh_min <= chunk_used && chunk_used < user_thresh_max)
3366                 ret = 0;
3367
3368         btrfs_put_block_group(cache);
3369         return ret;
3370 }
3371
3372 static int chunk_usage_filter(struct btrfs_fs_info *fs_info,
3373                 u64 chunk_offset, struct btrfs_balance_args *bargs)
3374 {
3375         struct btrfs_block_group *cache;
3376         u64 chunk_used, user_thresh;
3377         int ret = 1;
3378
3379         cache = btrfs_lookup_block_group(fs_info, chunk_offset);
3380         chunk_used = cache->used;
3381
3382         if (bargs->usage_min == 0)
3383                 user_thresh = 1;
3384         else if (bargs->usage > 100)
3385                 user_thresh = cache->length;
3386         else
3387                 user_thresh = div_factor_fine(cache->length, bargs->usage);
3388
3389         if (chunk_used < user_thresh)
3390                 ret = 0;
3391
3392         btrfs_put_block_group(cache);
3393         return ret;
3394 }
3395
3396 static int chunk_devid_filter(struct extent_buffer *leaf,
3397                               struct btrfs_chunk *chunk,
3398                               struct btrfs_balance_args *bargs)
3399 {
3400         struct btrfs_stripe *stripe;
3401         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3402         int i;
3403
3404         for (i = 0; i < num_stripes; i++) {
3405                 stripe = btrfs_stripe_nr(chunk, i);
3406                 if (btrfs_stripe_devid(leaf, stripe) == bargs->devid)
3407                         return 0;
3408         }
3409
3410         return 1;
3411 }
3412
3413 static u64 calc_data_stripes(u64 type, int num_stripes)
3414 {
3415         const int index = btrfs_bg_flags_to_raid_index(type);
3416         const int ncopies = btrfs_raid_array[index].ncopies;
3417         const int nparity = btrfs_raid_array[index].nparity;
3418
3419         if (nparity)
3420                 return num_stripes - nparity;
3421         else
3422                 return num_stripes / ncopies;
3423 }
3424
3425 /* [pstart, pend) */
3426 static int chunk_drange_filter(struct extent_buffer *leaf,
3427                                struct btrfs_chunk *chunk,
3428                                struct btrfs_balance_args *bargs)
3429 {
3430         struct btrfs_stripe *stripe;
3431         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3432         u64 stripe_offset;
3433         u64 stripe_length;
3434         u64 type;
3435         int factor;
3436         int i;
3437
3438         if (!(bargs->flags & BTRFS_BALANCE_ARGS_DEVID))
3439                 return 0;
3440
3441         type = btrfs_chunk_type(leaf, chunk);
3442         factor = calc_data_stripes(type, num_stripes);
3443
3444         for (i = 0; i < num_stripes; i++) {
3445                 stripe = btrfs_stripe_nr(chunk, i);
3446                 if (btrfs_stripe_devid(leaf, stripe) != bargs->devid)
3447                         continue;
3448
3449                 stripe_offset = btrfs_stripe_offset(leaf, stripe);
3450                 stripe_length = btrfs_chunk_length(leaf, chunk);
3451                 stripe_length = div_u64(stripe_length, factor);
3452
3453                 if (stripe_offset < bargs->pend &&
3454                     stripe_offset + stripe_length > bargs->pstart)
3455                         return 0;
3456         }
3457
3458         return 1;
3459 }
3460
3461 /* [vstart, vend) */
3462 static int chunk_vrange_filter(struct extent_buffer *leaf,
3463                                struct btrfs_chunk *chunk,
3464                                u64 chunk_offset,
3465                                struct btrfs_balance_args *bargs)
3466 {
3467         if (chunk_offset < bargs->vend &&
3468             chunk_offset + btrfs_chunk_length(leaf, chunk) > bargs->vstart)
3469                 /* at least part of the chunk is inside this vrange */
3470                 return 0;
3471
3472         return 1;
3473 }
3474
3475 static int chunk_stripes_range_filter(struct extent_buffer *leaf,
3476                                struct btrfs_chunk *chunk,
3477                                struct btrfs_balance_args *bargs)
3478 {
3479         int num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
3480
3481         if (bargs->stripes_min <= num_stripes
3482                         && num_stripes <= bargs->stripes_max)
3483                 return 0;
3484
3485         return 1;
3486 }
3487
3488 static int chunk_soft_convert_filter(u64 chunk_type,
3489                                      struct btrfs_balance_args *bargs)
3490 {
3491         if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3492                 return 0;
3493
3494         chunk_type = chunk_to_extended(chunk_type) &
3495                                 BTRFS_EXTENDED_PROFILE_MASK;
3496
3497         if (bargs->target == chunk_type)
3498                 return 1;
3499
3500         return 0;
3501 }
3502
3503 static int should_balance_chunk(struct extent_buffer *leaf,
3504                                 struct btrfs_chunk *chunk, u64 chunk_offset)
3505 {
3506         struct btrfs_fs_info *fs_info = leaf->fs_info;
3507         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3508         struct btrfs_balance_args *bargs = NULL;
3509         u64 chunk_type = btrfs_chunk_type(leaf, chunk);
3510
3511         /* type filter */
3512         if (!((chunk_type & BTRFS_BLOCK_GROUP_TYPE_MASK) &
3513               (bctl->flags & BTRFS_BALANCE_TYPE_MASK))) {
3514                 return 0;
3515         }
3516
3517         if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3518                 bargs = &bctl->data;
3519         else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3520                 bargs = &bctl->sys;
3521         else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3522                 bargs = &bctl->meta;
3523
3524         /* profiles filter */
3525         if ((bargs->flags & BTRFS_BALANCE_ARGS_PROFILES) &&
3526             chunk_profiles_filter(chunk_type, bargs)) {
3527                 return 0;
3528         }
3529
3530         /* usage filter */
3531         if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE) &&
3532             chunk_usage_filter(fs_info, chunk_offset, bargs)) {
3533                 return 0;
3534         } else if ((bargs->flags & BTRFS_BALANCE_ARGS_USAGE_RANGE) &&
3535             chunk_usage_range_filter(fs_info, chunk_offset, bargs)) {
3536                 return 0;
3537         }
3538
3539         /* devid filter */
3540         if ((bargs->flags & BTRFS_BALANCE_ARGS_DEVID) &&
3541             chunk_devid_filter(leaf, chunk, bargs)) {
3542                 return 0;
3543         }
3544
3545         /* drange filter, makes sense only with devid filter */
3546         if ((bargs->flags & BTRFS_BALANCE_ARGS_DRANGE) &&
3547             chunk_drange_filter(leaf, chunk, bargs)) {
3548                 return 0;
3549         }
3550
3551         /* vrange filter */
3552         if ((bargs->flags & BTRFS_BALANCE_ARGS_VRANGE) &&
3553             chunk_vrange_filter(leaf, chunk, chunk_offset, bargs)) {
3554                 return 0;
3555         }
3556
3557         /* stripes filter */
3558         if ((bargs->flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE) &&
3559             chunk_stripes_range_filter(leaf, chunk, bargs)) {
3560                 return 0;
3561         }
3562
3563         /* soft profile changing mode */
3564         if ((bargs->flags & BTRFS_BALANCE_ARGS_SOFT) &&
3565             chunk_soft_convert_filter(chunk_type, bargs)) {
3566                 return 0;
3567         }
3568
3569         /*
3570          * limited by count, must be the last filter
3571          */
3572         if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT)) {
3573                 if (bargs->limit == 0)
3574                         return 0;
3575                 else
3576                         bargs->limit--;
3577         } else if ((bargs->flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)) {
3578                 /*
3579                  * Same logic as the 'limit' filter; the minimum cannot be
3580                  * determined here because we do not have the global information
3581                  * about the count of all chunks that satisfy the filters.
3582                  */
3583                 if (bargs->limit_max == 0)
3584                         return 0;
3585                 else
3586                         bargs->limit_max--;
3587         }
3588
3589         return 1;
3590 }
3591
3592 static int __btrfs_balance(struct btrfs_fs_info *fs_info)
3593 {
3594         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3595         struct btrfs_root *chunk_root = fs_info->chunk_root;
3596         u64 chunk_type;
3597         struct btrfs_chunk *chunk;
3598         struct btrfs_path *path = NULL;
3599         struct btrfs_key key;
3600         struct btrfs_key found_key;
3601         struct extent_buffer *leaf;
3602         int slot;
3603         int ret;
3604         int enospc_errors = 0;
3605         bool counting = true;
3606         /* The single value limit and min/max limits use the same bytes in the */
3607         u64 limit_data = bctl->data.limit;
3608         u64 limit_meta = bctl->meta.limit;
3609         u64 limit_sys = bctl->sys.limit;
3610         u32 count_data = 0;
3611         u32 count_meta = 0;
3612         u32 count_sys = 0;
3613         int chunk_reserved = 0;
3614
3615         path = btrfs_alloc_path();
3616         if (!path) {
3617                 ret = -ENOMEM;
3618                 goto error;
3619         }
3620
3621         /* zero out stat counters */
3622         spin_lock(&fs_info->balance_lock);
3623         memset(&bctl->stat, 0, sizeof(bctl->stat));
3624         spin_unlock(&fs_info->balance_lock);
3625 again:
3626         if (!counting) {
3627                 /*
3628                  * The single value limit and min/max limits use the same bytes
3629                  * in the
3630                  */
3631                 bctl->data.limit = limit_data;
3632                 bctl->meta.limit = limit_meta;
3633                 bctl->sys.limit = limit_sys;
3634         }
3635         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
3636         key.offset = (u64)-1;
3637         key.type = BTRFS_CHUNK_ITEM_KEY;
3638
3639         while (1) {
3640                 if ((!counting && atomic_read(&fs_info->balance_pause_req)) ||
3641                     atomic_read(&fs_info->balance_cancel_req)) {
3642                         ret = -ECANCELED;
3643                         goto error;
3644                 }
3645
3646                 mutex_lock(&fs_info->delete_unused_bgs_mutex);
3647                 ret = btrfs_search_slot(NULL, chunk_root, &key, path, 0, 0);
3648                 if (ret < 0) {
3649                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3650                         goto error;
3651                 }
3652
3653                 /*
3654                  * this shouldn't happen, it means the last relocate
3655                  * failed
3656                  */
3657                 if (ret == 0)
3658                         BUG(); /* FIXME break ? */
3659
3660                 ret = btrfs_previous_item(chunk_root, path, 0,
3661                                           BTRFS_CHUNK_ITEM_KEY);
3662                 if (ret) {
3663                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3664                         ret = 0;
3665                         break;
3666                 }
3667
3668                 leaf = path->nodes[0];
3669                 slot = path->slots[0];
3670                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3671
3672                 if (found_key.objectid != key.objectid) {
3673                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3674                         break;
3675                 }
3676
3677                 chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
3678                 chunk_type = btrfs_chunk_type(leaf, chunk);
3679
3680                 if (!counting) {
3681                         spin_lock(&fs_info->balance_lock);
3682                         bctl->stat.considered++;
3683                         spin_unlock(&fs_info->balance_lock);
3684                 }
3685
3686                 ret = should_balance_chunk(leaf, chunk, found_key.offset);
3687
3688                 btrfs_release_path(path);
3689                 if (!ret) {
3690                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3691                         goto loop;
3692                 }
3693
3694                 if (counting) {
3695                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3696                         spin_lock(&fs_info->balance_lock);
3697                         bctl->stat.expected++;
3698                         spin_unlock(&fs_info->balance_lock);
3699
3700                         if (chunk_type & BTRFS_BLOCK_GROUP_DATA)
3701                                 count_data++;
3702                         else if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM)
3703                                 count_sys++;
3704                         else if (chunk_type & BTRFS_BLOCK_GROUP_METADATA)
3705                                 count_meta++;
3706
3707                         goto loop;
3708                 }
3709
3710                 /*
3711                  * Apply limit_min filter, no need to check if the LIMITS
3712                  * filter is used, limit_min is 0 by default
3713                  */
3714                 if (((chunk_type & BTRFS_BLOCK_GROUP_DATA) &&
3715                                         count_data < bctl->data.limit_min)
3716                                 || ((chunk_type & BTRFS_BLOCK_GROUP_METADATA) &&
3717                                         count_meta < bctl->meta.limit_min)
3718                                 || ((chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) &&
3719                                         count_sys < bctl->sys.limit_min)) {
3720                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3721                         goto loop;
3722                 }
3723
3724                 if (!chunk_reserved) {
3725                         /*
3726                          * We may be relocating the only data chunk we have,
3727                          * which could potentially end up with losing data's
3728                          * raid profile, so lets allocate an empty one in
3729                          * advance.
3730                          */
3731                         ret = btrfs_may_alloc_data_chunk(fs_info,
3732                                                          found_key.offset);
3733                         if (ret < 0) {
3734                                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3735                                 goto error;
3736                         } else if (ret == 1) {
3737                                 chunk_reserved = 1;
3738                         }
3739                 }
3740
3741                 ret = btrfs_relocate_chunk(fs_info, found_key.offset);
3742                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
3743                 if (ret == -ENOSPC) {
3744                         enospc_errors++;
3745                 } else if (ret == -ETXTBSY) {
3746                         btrfs_info(fs_info,
3747            "skipping relocation of block group %llu due to active swapfile",
3748                                    found_key.offset);
3749                         ret = 0;
3750                 } else if (ret) {
3751                         goto error;
3752                 } else {
3753                         spin_lock(&fs_info->balance_lock);
3754                         bctl->stat.completed++;
3755                         spin_unlock(&fs_info->balance_lock);
3756                 }
3757 loop:
3758                 if (found_key.offset == 0)
3759                         break;
3760                 key.offset = found_key.offset - 1;
3761         }
3762
3763         if (counting) {
3764                 btrfs_release_path(path);
3765                 counting = false;
3766                 goto again;
3767         }
3768 error:
3769         btrfs_free_path(path);
3770         if (enospc_errors) {
3771                 btrfs_info(fs_info, "%d enospc errors during balance",
3772                            enospc_errors);
3773                 if (!ret)
3774                         ret = -ENOSPC;
3775         }
3776
3777         return ret;
3778 }
3779
3780 /**
3781  * alloc_profile_is_valid - see if a given profile is valid and reduced
3782  * @flags: profile to validate
3783  * @extended: if true @flags is treated as an extended profile
3784  */
3785 static int alloc_profile_is_valid(u64 flags, int extended)
3786 {
3787         u64 mask = (extended ? BTRFS_EXTENDED_PROFILE_MASK :
3788                                BTRFS_BLOCK_GROUP_PROFILE_MASK);
3789
3790         flags &= ~BTRFS_BLOCK_GROUP_TYPE_MASK;
3791
3792         /* 1) check that all other bits are zeroed */
3793         if (flags & ~mask)
3794                 return 0;
3795
3796         /* 2) see if profile is reduced */
3797         if (flags == 0)
3798                 return !extended; /* "0" is valid for usual profiles */
3799
3800         return has_single_bit_set(flags);
3801 }
3802
3803 static inline int balance_need_close(struct btrfs_fs_info *fs_info)
3804 {
3805         /* cancel requested || normal exit path */
3806         return atomic_read(&fs_info->balance_cancel_req) ||
3807                 (atomic_read(&fs_info->balance_pause_req) == 0 &&
3808                  atomic_read(&fs_info->balance_cancel_req) == 0);
3809 }
3810
3811 /*
3812  * Validate target profile against allowed profiles and return true if it's OK.
3813  * Otherwise print the error message and return false.
3814  */
3815 static inline int validate_convert_profile(struct btrfs_fs_info *fs_info,
3816                 const struct btrfs_balance_args *bargs,
3817                 u64 allowed, const char *type)
3818 {
3819         if (!(bargs->flags & BTRFS_BALANCE_ARGS_CONVERT))
3820                 return true;
3821
3822         /* Profile is valid and does not have bits outside of the allowed set */
3823         if (alloc_profile_is_valid(bargs->target, 1) &&
3824             (bargs->target & ~allowed) == 0)
3825                 return true;
3826
3827         btrfs_err(fs_info, "balance: invalid convert %s profile %s",
3828                         type, btrfs_bg_type_to_raid_name(bargs->target));
3829         return false;
3830 }
3831
3832 /*
3833  * Fill @buf with textual description of balance filter flags @bargs, up to
3834  * @size_buf including the terminating null. The output may be trimmed if it
3835  * does not fit into the provided buffer.
3836  */
3837 static void describe_balance_args(struct btrfs_balance_args *bargs, char *buf,
3838                                  u32 size_buf)
3839 {
3840         int ret;
3841         u32 size_bp = size_buf;
3842         char *bp = buf;
3843         u64 flags = bargs->flags;
3844         char tmp_buf[128] = {'\0'};
3845
3846         if (!flags)
3847                 return;
3848
3849 #define CHECK_APPEND_NOARG(a)                                           \
3850         do {                                                            \
3851                 ret = snprintf(bp, size_bp, (a));                       \
3852                 if (ret < 0 || ret >= size_bp)                          \
3853                         goto out_overflow;                              \
3854                 size_bp -= ret;                                         \
3855                 bp += ret;                                              \
3856         } while (0)
3857
3858 #define CHECK_APPEND_1ARG(a, v1)                                        \
3859         do {                                                            \
3860                 ret = snprintf(bp, size_bp, (a), (v1));                 \
3861                 if (ret < 0 || ret >= size_bp)                          \
3862                         goto out_overflow;                              \
3863                 size_bp -= ret;                                         \
3864                 bp += ret;                                              \
3865         } while (0)
3866
3867 #define CHECK_APPEND_2ARG(a, v1, v2)                                    \
3868         do {                                                            \
3869                 ret = snprintf(bp, size_bp, (a), (v1), (v2));           \
3870                 if (ret < 0 || ret >= size_bp)                          \
3871                         goto out_overflow;                              \
3872                 size_bp -= ret;                                         \
3873                 bp += ret;                                              \
3874         } while (0)
3875
3876         if (flags & BTRFS_BALANCE_ARGS_CONVERT)
3877                 CHECK_APPEND_1ARG("convert=%s,",
3878                                   btrfs_bg_type_to_raid_name(bargs->target));
3879
3880         if (flags & BTRFS_BALANCE_ARGS_SOFT)
3881                 CHECK_APPEND_NOARG("soft,");
3882
3883         if (flags & BTRFS_BALANCE_ARGS_PROFILES) {
3884                 btrfs_describe_block_groups(bargs->profiles, tmp_buf,
3885                                             sizeof(tmp_buf));
3886                 CHECK_APPEND_1ARG("profiles=%s,", tmp_buf);
3887         }
3888
3889         if (flags & BTRFS_BALANCE_ARGS_USAGE)
3890                 CHECK_APPEND_1ARG("usage=%llu,", bargs->usage);
3891
3892         if (flags & BTRFS_BALANCE_ARGS_USAGE_RANGE)
3893                 CHECK_APPEND_2ARG("usage=%u..%u,",
3894                                   bargs->usage_min, bargs->usage_max);
3895
3896         if (flags & BTRFS_BALANCE_ARGS_DEVID)
3897                 CHECK_APPEND_1ARG("devid=%llu,", bargs->devid);
3898
3899         if (flags & BTRFS_BALANCE_ARGS_DRANGE)
3900                 CHECK_APPEND_2ARG("drange=%llu..%llu,",
3901                                   bargs->pstart, bargs->pend);
3902
3903         if (flags & BTRFS_BALANCE_ARGS_VRANGE)
3904                 CHECK_APPEND_2ARG("vrange=%llu..%llu,",
3905                                   bargs->vstart, bargs->vend);
3906
3907         if (flags & BTRFS_BALANCE_ARGS_LIMIT)
3908                 CHECK_APPEND_1ARG("limit=%llu,", bargs->limit);
3909
3910         if (flags & BTRFS_BALANCE_ARGS_LIMIT_RANGE)
3911                 CHECK_APPEND_2ARG("limit=%u..%u,",
3912                                 bargs->limit_min, bargs->limit_max);
3913
3914         if (flags & BTRFS_BALANCE_ARGS_STRIPES_RANGE)
3915                 CHECK_APPEND_2ARG("stripes=%u..%u,",
3916                                   bargs->stripes_min, bargs->stripes_max);
3917
3918 #undef CHECK_APPEND_2ARG
3919 #undef CHECK_APPEND_1ARG
3920 #undef CHECK_APPEND_NOARG
3921
3922 out_overflow:
3923
3924         if (size_bp < size_buf)
3925                 buf[size_buf - size_bp - 1] = '\0'; /* remove last , */
3926         else
3927                 buf[0] = '\0';
3928 }
3929
3930 static void describe_balance_start_or_resume(struct btrfs_fs_info *fs_info)
3931 {
3932         u32 size_buf = 1024;
3933         char tmp_buf[192] = {'\0'};
3934         char *buf;
3935         char *bp;
3936         u32 size_bp = size_buf;
3937         int ret;
3938         struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3939
3940         buf = kzalloc(size_buf, GFP_KERNEL);
3941         if (!buf)
3942                 return;
3943
3944         bp = buf;
3945
3946 #define CHECK_APPEND_1ARG(a, v1)                                        \
3947         do {                                                            \
3948                 ret = snprintf(bp, size_bp, (a), (v1));                 \
3949                 if (ret < 0 || ret >= size_bp)                          \
3950                         goto out_overflow;                              \
3951                 size_bp -= ret;                                         \
3952                 bp += ret;                                              \
3953         } while (0)
3954
3955         if (bctl->flags & BTRFS_BALANCE_FORCE)
3956                 CHECK_APPEND_1ARG("%s", "-f ");
3957
3958         if (bctl->flags & BTRFS_BALANCE_DATA) {
3959                 describe_balance_args(&bctl->data, tmp_buf, sizeof(tmp_buf));
3960                 CHECK_APPEND_1ARG("-d%s ", tmp_buf);
3961         }
3962
3963         if (bctl->flags & BTRFS_BALANCE_METADATA) {
3964                 describe_balance_args(&bctl->meta, tmp_buf, sizeof(tmp_buf));
3965                 CHECK_APPEND_1ARG("-m%s ", tmp_buf);
3966         }
3967
3968         if (bctl->flags & BTRFS_BALANCE_SYSTEM) {
3969                 describe_balance_args(&bctl->sys, tmp_buf, sizeof(tmp_buf));
3970                 CHECK_APPEND_1ARG("-s%s ", tmp_buf);
3971         }
3972
3973 #undef CHECK_APPEND_1ARG
3974
3975 out_overflow:
3976
3977         if (size_bp < size_buf)
3978                 buf[size_buf - size_bp - 1] = '\0'; /* remove last " " */
3979         btrfs_info(fs_info, "balance: %s %s",
3980                    (bctl->flags & BTRFS_BALANCE_RESUME) ?
3981                    "resume" : "start", buf);
3982
3983         kfree(buf);
3984 }
3985
3986 /*
3987  * Should be called with balance mutexe held
3988  */
3989 int btrfs_balance(struct btrfs_fs_info *fs_info,
3990                   struct btrfs_balance_control *bctl,
3991                   struct btrfs_ioctl_balance_args *bargs)
3992 {
3993         u64 meta_target, data_target;
3994         u64 allowed;
3995         int mixed = 0;
3996         int ret;
3997         u64 num_devices;
3998         unsigned seq;
3999         bool reducing_redundancy;
4000         int i;
4001
4002         if (btrfs_fs_closing(fs_info) ||
4003             atomic_read(&fs_info->balance_pause_req) ||
4004             btrfs_should_cancel_balance(fs_info)) {
4005                 ret = -EINVAL;
4006                 goto out;
4007         }
4008
4009         allowed = btrfs_super_incompat_flags(fs_info->super_copy);
4010         if (allowed & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
4011                 mixed = 1;
4012
4013         /*
4014          * In case of mixed groups both data and meta should be picked,
4015          * and identical options should be given for both of them.
4016          */
4017         allowed = BTRFS_BALANCE_DATA | BTRFS_BALANCE_METADATA;
4018         if (mixed && (bctl->flags & allowed)) {
4019                 if (!(bctl->flags & BTRFS_BALANCE_DATA) ||
4020                     !(bctl->flags & BTRFS_BALANCE_METADATA) ||
4021                     memcmp(&bctl->data, &bctl->meta, sizeof(bctl->data))) {
4022                         btrfs_err(fs_info,
4023           "balance: mixed groups data and metadata options must be the same");
4024                         ret = -EINVAL;
4025                         goto out;
4026                 }
4027         }
4028
4029         /*
4030          * rw_devices will not change at the moment, device add/delete/replace
4031          * are exclusive
4032          */
4033         num_devices = fs_info->fs_devices->rw_devices;
4034
4035         /*
4036          * SINGLE profile on-disk has no profile bit, but in-memory we have a
4037          * special bit for it, to make it easier to distinguish.  Thus we need
4038          * to set it manually, or balance would refuse the profile.
4039          */
4040         allowed = BTRFS_AVAIL_ALLOC_BIT_SINGLE;
4041         for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++)
4042                 if (num_devices >= btrfs_raid_array[i].devs_min)
4043                         allowed |= btrfs_raid_array[i].bg_flag;
4044
4045         if (!validate_convert_profile(fs_info, &bctl->data, allowed, "data") ||
4046             !validate_convert_profile(fs_info, &bctl->meta, allowed, "metadata") ||
4047             !validate_convert_profile(fs_info, &bctl->sys,  allowed, "system")) {
4048                 ret = -EINVAL;
4049                 goto out;
4050         }
4051
4052         /*
4053          * Allow to reduce metadata or system integrity only if force set for
4054          * profiles with redundancy (copies, parity)
4055          */
4056         allowed = 0;
4057         for (i = 0; i < ARRAY_SIZE(btrfs_raid_array); i++) {
4058                 if (btrfs_raid_array[i].ncopies >= 2 ||
4059                     btrfs_raid_array[i].tolerated_failures >= 1)
4060                         allowed |= btrfs_raid_array[i].bg_flag;
4061         }
4062         do {
4063                 seq = read_seqbegin(&fs_info->profiles_lock);
4064
4065                 if (((bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4066                      (fs_info->avail_system_alloc_bits & allowed) &&
4067                      !(bctl->sys.target & allowed)) ||
4068                     ((bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) &&
4069                      (fs_info->avail_metadata_alloc_bits & allowed) &&
4070                      !(bctl->meta.target & allowed)))
4071                         reducing_redundancy = true;
4072                 else
4073                         reducing_redundancy = false;
4074
4075                 /* if we're not converting, the target field is uninitialized */
4076                 meta_target = (bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4077                         bctl->meta.target : fs_info->avail_metadata_alloc_bits;
4078                 data_target = (bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) ?
4079                         bctl->data.target : fs_info->avail_data_alloc_bits;
4080         } while (read_seqretry(&fs_info->profiles_lock, seq));
4081
4082         if (reducing_redundancy) {
4083                 if (bctl->flags & BTRFS_BALANCE_FORCE) {
4084                         btrfs_info(fs_info,
4085                            "balance: force reducing metadata redundancy");
4086                 } else {
4087                         btrfs_err(fs_info,
4088         "balance: reduces metadata redundancy, use --force if you want this");
4089                         ret = -EINVAL;
4090                         goto out;
4091                 }
4092         }
4093
4094         if (btrfs_get_num_tolerated_disk_barrier_failures(meta_target) <
4095                 btrfs_get_num_tolerated_disk_barrier_failures(data_target)) {
4096                 btrfs_warn(fs_info,
4097         "balance: metadata profile %s has lower redundancy than data profile %s",
4098                                 btrfs_bg_type_to_raid_name(meta_target),
4099                                 btrfs_bg_type_to_raid_name(data_target));
4100         }
4101
4102         if (fs_info->send_in_progress) {
4103                 btrfs_warn_rl(fs_info,
4104 "cannot run balance while send operations are in progress (%d in progress)",
4105                               fs_info->send_in_progress);
4106                 ret = -EAGAIN;
4107                 goto out;
4108         }
4109
4110         ret = insert_balance_item(fs_info, bctl);
4111         if (ret && ret != -EEXIST)
4112                 goto out;
4113
4114         if (!(bctl->flags & BTRFS_BALANCE_RESUME)) {
4115                 BUG_ON(ret == -EEXIST);
4116                 BUG_ON(fs_info->balance_ctl);
4117                 spin_lock(&fs_info->balance_lock);
4118                 fs_info->balance_ctl = bctl;
4119                 spin_unlock(&fs_info->balance_lock);
4120         } else {
4121                 BUG_ON(ret != -EEXIST);
4122                 spin_lock(&fs_info->balance_lock);
4123                 update_balance_args(bctl);
4124                 spin_unlock(&fs_info->balance_lock);
4125         }
4126
4127         ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4128         set_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4129         describe_balance_start_or_resume(fs_info);
4130         mutex_unlock(&fs_info->balance_mutex);
4131
4132         ret = __btrfs_balance(fs_info);
4133
4134         mutex_lock(&fs_info->balance_mutex);
4135         if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req))
4136                 btrfs_info(fs_info, "balance: paused");
4137         /*
4138          * Balance can be canceled by:
4139          *
4140          * - Regular cancel request
4141          *   Then ret == -ECANCELED and balance_cancel_req > 0
4142          *
4143          * - Fatal signal to "btrfs" process
4144          *   Either the signal caught by wait_reserve_ticket() and callers
4145          *   got -EINTR, or caught by btrfs_should_cancel_balance() and
4146          *   got -ECANCELED.
4147          *   Either way, in this case balance_cancel_req = 0, and
4148          *   ret == -EINTR or ret == -ECANCELED.
4149          *
4150          * So here we only check the return value to catch canceled balance.
4151          */
4152         else if (ret == -ECANCELED || ret == -EINTR)
4153                 btrfs_info(fs_info, "balance: canceled");
4154         else
4155                 btrfs_info(fs_info, "balance: ended with status: %d", ret);
4156
4157         clear_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags);
4158
4159         if (bargs) {
4160                 memset(bargs, 0, sizeof(*bargs));
4161                 btrfs_update_ioctl_balance_args(fs_info, bargs);
4162         }
4163
4164         if ((ret && ret != -ECANCELED && ret != -ENOSPC) ||
4165             balance_need_close(fs_info)) {
4166                 reset_balance_state(fs_info);
4167                 btrfs_exclop_finish(fs_info);
4168         }
4169
4170         wake_up(&fs_info->balance_wait_q);
4171
4172         return ret;
4173 out:
4174         if (bctl->flags & BTRFS_BALANCE_RESUME)
4175                 reset_balance_state(fs_info);
4176         else
4177                 kfree(bctl);
4178         btrfs_exclop_finish(fs_info);
4179
4180         return ret;
4181 }
4182
4183 static int balance_kthread(void *data)
4184 {
4185         struct btrfs_fs_info *fs_info = data;
4186         int ret = 0;
4187
4188         mutex_lock(&fs_info->balance_mutex);
4189         if (fs_info->balance_ctl)
4190                 ret = btrfs_balance(fs_info, fs_info->balance_ctl, NULL);
4191         mutex_unlock(&fs_info->balance_mutex);
4192
4193         return ret;
4194 }
4195
4196 int btrfs_resume_balance_async(struct btrfs_fs_info *fs_info)
4197 {
4198         struct task_struct *tsk;
4199
4200         mutex_lock(&fs_info->balance_mutex);
4201         if (!fs_info->balance_ctl) {
4202                 mutex_unlock(&fs_info->balance_mutex);
4203                 return 0;
4204         }
4205         mutex_unlock(&fs_info->balance_mutex);
4206
4207         if (btrfs_test_opt(fs_info, SKIP_BALANCE)) {
4208                 btrfs_info(fs_info, "balance: resume skipped");
4209                 return 0;
4210         }
4211
4212         /*
4213          * A ro->rw remount sequence should continue with the paused balance
4214          * regardless of who pauses it, system or the user as of now, so set
4215          * the resume flag.
4216          */
4217         spin_lock(&fs_info->balance_lock);
4218         fs_info->balance_ctl->flags |= BTRFS_BALANCE_RESUME;
4219         spin_unlock(&fs_info->balance_lock);
4220
4221         tsk = kthread_run(balance_kthread, fs_info, "btrfs-balance");
4222         return PTR_ERR_OR_ZERO(tsk);
4223 }
4224
4225 int btrfs_recover_balance(struct btrfs_fs_info *fs_info)
4226 {
4227         struct btrfs_balance_control *bctl;
4228         struct btrfs_balance_item *item;
4229         struct btrfs_disk_balance_args disk_bargs;
4230         struct btrfs_path *path;
4231         struct extent_buffer *leaf;
4232         struct btrfs_key key;
4233         int ret;
4234
4235         path = btrfs_alloc_path();
4236         if (!path)
4237                 return -ENOMEM;
4238
4239         key.objectid = BTRFS_BALANCE_OBJECTID;
4240         key.type = BTRFS_TEMPORARY_ITEM_KEY;
4241         key.offset = 0;
4242
4243         ret = btrfs_search_slot(NULL, fs_info->tree_root, &key, path, 0, 0);
4244         if (ret < 0)
4245                 goto out;
4246         if (ret > 0) { /* ret = -ENOENT; */
4247                 ret = 0;
4248                 goto out;
4249         }
4250
4251         bctl = kzalloc(sizeof(*bctl), GFP_NOFS);
4252         if (!bctl) {
4253                 ret = -ENOMEM;
4254                 goto out;
4255         }
4256
4257         leaf = path->nodes[0];
4258         item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_balance_item);
4259
4260         bctl->flags = btrfs_balance_flags(leaf, item);
4261         bctl->flags |= BTRFS_BALANCE_RESUME;
4262
4263         btrfs_balance_data(leaf, item, &disk_bargs);
4264         btrfs_disk_balance_args_to_cpu(&bctl->data, &disk_bargs);
4265         btrfs_balance_meta(leaf, item, &disk_bargs);
4266         btrfs_disk_balance_args_to_cpu(&bctl->meta, &disk_bargs);
4267         btrfs_balance_sys(leaf, item, &disk_bargs);
4268         btrfs_disk_balance_args_to_cpu(&bctl->sys, &disk_bargs);
4269
4270         /*
4271          * This should never happen, as the paused balance state is recovered
4272          * during mount without any chance of other exclusive ops to collide.
4273          *
4274          * This gives the exclusive op status to balance and keeps in paused
4275          * state until user intervention (cancel or umount). If the ownership
4276          * cannot be assigned, show a message but do not fail. The balance
4277          * is in a paused state and must have fs_info::balance_ctl properly
4278          * set up.
4279          */
4280         if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE))
4281                 btrfs_warn(fs_info,
4282         "balance: cannot set exclusive op status, resume manually");
4283
4284         mutex_lock(&fs_info->balance_mutex);
4285         BUG_ON(fs_info->balance_ctl);
4286         spin_lock(&fs_info->balance_lock);
4287         fs_info->balance_ctl = bctl;
4288         spin_unlock(&fs_info->balance_lock);
4289         mutex_unlock(&fs_info->balance_mutex);
4290 out:
4291         btrfs_free_path(path);
4292         return ret;
4293 }
4294
4295 int btrfs_pause_balance(struct btrfs_fs_info *fs_info)
4296 {
4297         int ret = 0;
4298
4299         mutex_lock(&fs_info->balance_mutex);
4300         if (!fs_info->balance_ctl) {
4301                 mutex_unlock(&fs_info->balance_mutex);
4302                 return -ENOTCONN;
4303         }
4304
4305         if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4306                 atomic_inc(&fs_info->balance_pause_req);
4307                 mutex_unlock(&fs_info->balance_mutex);
4308
4309                 wait_event(fs_info->balance_wait_q,
4310                            !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4311
4312                 mutex_lock(&fs_info->balance_mutex);
4313                 /* we are good with balance_ctl ripped off from under us */
4314                 BUG_ON(test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4315                 atomic_dec(&fs_info->balance_pause_req);
4316         } else {
4317                 ret = -ENOTCONN;
4318         }
4319
4320         mutex_unlock(&fs_info->balance_mutex);
4321         return ret;
4322 }
4323
4324 int btrfs_cancel_balance(struct btrfs_fs_info *fs_info)
4325 {
4326         mutex_lock(&fs_info->balance_mutex);
4327         if (!fs_info->balance_ctl) {
4328                 mutex_unlock(&fs_info->balance_mutex);
4329                 return -ENOTCONN;
4330         }
4331
4332         /*
4333          * A paused balance with the item stored on disk can be resumed at
4334          * mount time if the mount is read-write. Otherwise it's still paused
4335          * and we must not allow cancelling as it deletes the item.
4336          */
4337         if (sb_rdonly(fs_info->sb)) {
4338                 mutex_unlock(&fs_info->balance_mutex);
4339                 return -EROFS;
4340         }
4341
4342         atomic_inc(&fs_info->balance_cancel_req);
4343         /*
4344          * if we are running just wait and return, balance item is
4345          * deleted in btrfs_balance in this case
4346          */
4347         if (test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)) {
4348                 mutex_unlock(&fs_info->balance_mutex);
4349                 wait_event(fs_info->balance_wait_q,
4350                            !test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4351                 mutex_lock(&fs_info->balance_mutex);
4352         } else {
4353                 mutex_unlock(&fs_info->balance_mutex);
4354                 /*
4355                  * Lock released to allow other waiters to continue, we'll
4356                  * reexamine the status again.
4357                  */
4358                 mutex_lock(&fs_info->balance_mutex);
4359
4360                 if (fs_info->balance_ctl) {
4361                         reset_balance_state(fs_info);
4362                         btrfs_exclop_finish(fs_info);
4363                         btrfs_info(fs_info, "balance: canceled");
4364                 }
4365         }
4366
4367         BUG_ON(fs_info->balance_ctl ||
4368                 test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags));
4369         atomic_dec(&fs_info->balance_cancel_req);
4370         mutex_unlock(&fs_info->balance_mutex);
4371         return 0;
4372 }
4373
4374 int btrfs_uuid_scan_kthread(void *data)
4375 {
4376         struct btrfs_fs_info *fs_info = data;
4377         struct btrfs_root *root = fs_info->tree_root;
4378         struct btrfs_key key;
4379         struct btrfs_path *path = NULL;
4380         int ret = 0;
4381         struct extent_buffer *eb;
4382         int slot;
4383         struct btrfs_root_item root_item;
4384         u32 item_size;
4385         struct btrfs_trans_handle *trans = NULL;
4386         bool closing = false;
4387
4388         path = btrfs_alloc_path();
4389         if (!path) {
4390                 ret = -ENOMEM;
4391                 goto out;
4392         }
4393
4394         key.objectid = 0;
4395         key.type = BTRFS_ROOT_ITEM_KEY;
4396         key.offset = 0;
4397
4398         while (1) {
4399                 if (btrfs_fs_closing(fs_info)) {
4400                         closing = true;
4401                         break;
4402                 }
4403                 ret = btrfs_search_forward(root, &key, path,
4404                                 BTRFS_OLDEST_GENERATION);
4405                 if (ret) {
4406                         if (ret > 0)
4407                                 ret = 0;
4408                         break;
4409                 }
4410
4411                 if (key.type != BTRFS_ROOT_ITEM_KEY ||
4412                     (key.objectid < BTRFS_FIRST_FREE_OBJECTID &&
4413                      key.objectid != BTRFS_FS_TREE_OBJECTID) ||
4414                     key.objectid > BTRFS_LAST_FREE_OBJECTID)
4415                         goto skip;
4416
4417                 eb = path->nodes[0];
4418                 slot = path->slots[0];
4419                 item_size = btrfs_item_size_nr(eb, slot);
4420                 if (item_size < sizeof(root_item))
4421                         goto skip;
4422
4423                 read_extent_buffer(eb, &root_item,
4424                                    btrfs_item_ptr_offset(eb, slot),
4425                                    (int)sizeof(root_item));
4426                 if (btrfs_root_refs(&root_item) == 0)
4427                         goto skip;
4428
4429                 if (!btrfs_is_empty_uuid(root_item.uuid) ||
4430                     !btrfs_is_empty_uuid(root_item.received_uuid)) {
4431                         if (trans)
4432                                 goto update_tree;
4433
4434                         btrfs_release_path(path);
4435                         /*
4436                          * 1 - subvol uuid item
4437                          * 1 - received_subvol uuid item
4438                          */
4439                         trans = btrfs_start_transaction(fs_info->uuid_root, 2);
4440                         if (IS_ERR(trans)) {
4441                                 ret = PTR_ERR(trans);
4442                                 break;
4443                         }
4444                         continue;
4445                 } else {
4446                         goto skip;
4447                 }
4448 update_tree:
4449                 btrfs_release_path(path);
4450                 if (!btrfs_is_empty_uuid(root_item.uuid)) {
4451                         ret = btrfs_uuid_tree_add(trans, root_item.uuid,
4452                                                   BTRFS_UUID_KEY_SUBVOL,
4453                                                   key.objectid);
4454                         if (ret < 0) {
4455                                 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4456                                         ret);
4457                                 break;
4458                         }
4459                 }
4460
4461                 if (!btrfs_is_empty_uuid(root_item.received_uuid)) {
4462                         ret = btrfs_uuid_tree_add(trans,
4463                                                   root_item.received_uuid,
4464                                                  BTRFS_UUID_KEY_RECEIVED_SUBVOL,
4465                                                   key.objectid);
4466                         if (ret < 0) {
4467                                 btrfs_warn(fs_info, "uuid_tree_add failed %d",
4468                                         ret);
4469                                 break;
4470                         }
4471                 }
4472
4473 skip:
4474                 btrfs_release_path(path);
4475                 if (trans) {
4476                         ret = btrfs_end_transaction(trans);
4477                         trans = NULL;
4478                         if (ret)
4479                                 break;
4480                 }
4481
4482                 if (key.offset < (u64)-1) {
4483                         key.offset++;
4484                 } else if (key.type < BTRFS_ROOT_ITEM_KEY) {
4485                         key.offset = 0;
4486                         key.type = BTRFS_ROOT_ITEM_KEY;
4487                 } else if (key.objectid < (u64)-1) {
4488                         key.offset = 0;
4489                         key.type = BTRFS_ROOT_ITEM_KEY;
4490                         key.objectid++;
4491                 } else {
4492                         break;
4493                 }
4494                 cond_resched();
4495         }
4496
4497 out:
4498         btrfs_free_path(path);
4499         if (trans && !IS_ERR(trans))
4500                 btrfs_end_transaction(trans);
4501         if (ret)
4502                 btrfs_warn(fs_info, "btrfs_uuid_scan_kthread failed %d", ret);
4503         else if (!closing)
4504                 set_bit(BTRFS_FS_UPDATE_UUID_TREE_GEN, &fs_info->flags);
4505         up(&fs_info->uuid_tree_rescan_sem);
4506         return 0;
4507 }
4508
4509 int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info)
4510 {
4511         struct btrfs_trans_handle *trans;
4512         struct btrfs_root *tree_root = fs_info->tree_root;
4513         struct btrfs_root *uuid_root;
4514         struct task_struct *task;
4515         int ret;
4516
4517         /*
4518          * 1 - root node
4519          * 1 - root item
4520          */
4521         trans = btrfs_start_transaction(tree_root, 2);
4522         if (IS_ERR(trans))
4523                 return PTR_ERR(trans);
4524
4525         uuid_root = btrfs_create_tree(trans, BTRFS_UUID_TREE_OBJECTID);
4526         if (IS_ERR(uuid_root)) {
4527                 ret = PTR_ERR(uuid_root);
4528                 btrfs_abort_transaction(trans, ret);
4529                 btrfs_end_transaction(trans);
4530                 return ret;
4531         }
4532
4533         fs_info->uuid_root = uuid_root;
4534
4535         ret = btrfs_commit_transaction(trans);
4536         if (ret)
4537                 return ret;
4538
4539         down(&fs_info->uuid_tree_rescan_sem);
4540         task = kthread_run(btrfs_uuid_scan_kthread, fs_info, "btrfs-uuid");
4541         if (IS_ERR(task)) {
4542                 /* fs_info->update_uuid_tree_gen remains 0 in all error case */
4543                 btrfs_warn(fs_info, "failed to start uuid_scan task");
4544                 up(&fs_info->uuid_tree_rescan_sem);
4545                 return PTR_ERR(task);
4546         }
4547
4548         return 0;
4549 }
4550
4551 /*
4552  * shrinking a device means finding all of the device extents past
4553  * the new size, and then following the back refs to the chunks.
4554  * The chunk relocation code actually frees the device extent
4555  */
4556 int btrfs_shrink_device(struct btrfs_device *device, u64 new_size)
4557 {
4558         struct btrfs_fs_info *fs_info = device->fs_info;
4559         struct btrfs_root *root = fs_info->dev_root;
4560         struct btrfs_trans_handle *trans;
4561         struct btrfs_dev_extent *dev_extent = NULL;
4562         struct btrfs_path *path;
4563         u64 length;
4564         u64 chunk_offset;
4565         int ret;
4566         int slot;
4567         int failed = 0;
4568         bool retried = false;
4569         struct extent_buffer *l;
4570         struct btrfs_key key;
4571         struct btrfs_super_block *super_copy = fs_info->super_copy;
4572         u64 old_total = btrfs_super_total_bytes(super_copy);
4573         u64 old_size = btrfs_device_get_total_bytes(device);
4574         u64 diff;
4575         u64 start;
4576
4577         new_size = round_down(new_size, fs_info->sectorsize);
4578         start = new_size;
4579         diff = round_down(old_size - new_size, fs_info->sectorsize);
4580
4581         if (test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4582                 return -EINVAL;
4583
4584         path = btrfs_alloc_path();
4585         if (!path)
4586                 return -ENOMEM;
4587
4588         path->reada = READA_BACK;
4589
4590         trans = btrfs_start_transaction(root, 0);
4591         if (IS_ERR(trans)) {
4592                 btrfs_free_path(path);
4593                 return PTR_ERR(trans);
4594         }
4595
4596         mutex_lock(&fs_info->chunk_mutex);
4597
4598         btrfs_device_set_total_bytes(device, new_size);
4599         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4600                 device->fs_devices->total_rw_bytes -= diff;
4601                 atomic64_sub(diff, &fs_info->free_chunk_space);
4602         }
4603
4604         /*
4605          * Once the device's size has been set to the new size, ensure all
4606          * in-memory chunks are synced to disk so that the loop below sees them
4607          * and relocates them accordingly.
4608          */
4609         if (contains_pending_extent(device, &start, diff)) {
4610                 mutex_unlock(&fs_info->chunk_mutex);
4611                 ret = btrfs_commit_transaction(trans);
4612                 if (ret)
4613                         goto done;
4614         } else {
4615                 mutex_unlock(&fs_info->chunk_mutex);
4616                 btrfs_end_transaction(trans);
4617         }
4618
4619 again:
4620         key.objectid = device->devid;
4621         key.offset = (u64)-1;
4622         key.type = BTRFS_DEV_EXTENT_KEY;
4623
4624         do {
4625                 mutex_lock(&fs_info->delete_unused_bgs_mutex);
4626                 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4627                 if (ret < 0) {
4628                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4629                         goto done;
4630                 }
4631
4632                 ret = btrfs_previous_item(root, path, 0, key.type);
4633                 if (ret)
4634                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4635                 if (ret < 0)
4636                         goto done;
4637                 if (ret) {
4638                         ret = 0;
4639                         btrfs_release_path(path);
4640                         break;
4641                 }
4642
4643                 l = path->nodes[0];
4644                 slot = path->slots[0];
4645                 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
4646
4647                 if (key.objectid != device->devid) {
4648                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4649                         btrfs_release_path(path);
4650                         break;
4651                 }
4652
4653                 dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
4654                 length = btrfs_dev_extent_length(l, dev_extent);
4655
4656                 if (key.offset + length <= new_size) {
4657                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4658                         btrfs_release_path(path);
4659                         break;
4660                 }
4661
4662                 chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent);
4663                 btrfs_release_path(path);
4664
4665                 /*
4666                  * We may be relocating the only data chunk we have,
4667                  * which could potentially end up with losing data's
4668                  * raid profile, so lets allocate an empty one in
4669                  * advance.
4670                  */
4671                 ret = btrfs_may_alloc_data_chunk(fs_info, chunk_offset);
4672                 if (ret < 0) {
4673                         mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4674                         goto done;
4675                 }
4676
4677                 ret = btrfs_relocate_chunk(fs_info, chunk_offset);
4678                 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
4679                 if (ret == -ENOSPC) {
4680                         failed++;
4681                 } else if (ret) {
4682                         if (ret == -ETXTBSY) {
4683                                 btrfs_warn(fs_info,
4684                    "could not shrink block group %llu due to active swapfile",
4685                                            chunk_offset);
4686                         }
4687                         goto done;
4688                 }
4689         } while (key.offset-- > 0);
4690
4691         if (failed && !retried) {
4692                 failed = 0;
4693                 retried = true;
4694                 goto again;
4695         } else if (failed && retried) {
4696                 ret = -ENOSPC;
4697                 goto done;
4698         }
4699
4700         /* Shrinking succeeded, else we would be at "done". */
4701         trans = btrfs_start_transaction(root, 0);
4702         if (IS_ERR(trans)) {
4703                 ret = PTR_ERR(trans);
4704                 goto done;
4705         }
4706
4707         mutex_lock(&fs_info->chunk_mutex);
4708         /* Clear all state bits beyond the shrunk device size */
4709         clear_extent_bits(&device->alloc_state, new_size, (u64)-1,
4710                           CHUNK_STATE_MASK);
4711
4712         btrfs_device_set_disk_total_bytes(device, new_size);
4713         if (list_empty(&device->post_commit_list))
4714                 list_add_tail(&device->post_commit_list,
4715                               &trans->transaction->dev_update_list);
4716
4717         WARN_ON(diff > old_total);
4718         btrfs_set_super_total_bytes(super_copy,
4719                         round_down(old_total - diff, fs_info->sectorsize));
4720         mutex_unlock(&fs_info->chunk_mutex);
4721
4722         /* Now btrfs_update_device() will change the on-disk size. */
4723         ret = btrfs_update_device(trans, device);
4724         if (ret < 0) {
4725                 btrfs_abort_transaction(trans, ret);
4726                 btrfs_end_transaction(trans);
4727         } else {
4728                 ret = btrfs_commit_transaction(trans);
4729         }
4730 done:
4731         btrfs_free_path(path);
4732         if (ret) {
4733                 mutex_lock(&fs_info->chunk_mutex);
4734                 btrfs_device_set_total_bytes(device, old_size);
4735                 if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
4736                         device->fs_devices->total_rw_bytes += diff;
4737                 atomic64_add(diff, &fs_info->free_chunk_space);
4738                 mutex_unlock(&fs_info->chunk_mutex);
4739         }
4740         return ret;
4741 }
4742
4743 static int btrfs_add_system_chunk(struct btrfs_fs_info *fs_info,
4744                            struct btrfs_key *key,
4745                            struct btrfs_chunk *chunk, int item_size)
4746 {
4747         struct btrfs_super_block *super_copy = fs_info->super_copy;
4748         struct btrfs_disk_key disk_key;
4749         u32 array_size;
4750         u8 *ptr;
4751
4752         mutex_lock(&fs_info->chunk_mutex);
4753         array_size = btrfs_super_sys_array_size(super_copy);
4754         if (array_size + item_size + sizeof(disk_key)
4755                         > BTRFS_SYSTEM_CHUNK_ARRAY_SIZE) {
4756                 mutex_unlock(&fs_info->chunk_mutex);
4757                 return -EFBIG;
4758         }
4759
4760         ptr = super_copy->sys_chunk_array + array_size;
4761         btrfs_cpu_key_to_disk(&disk_key, key);
4762         memcpy(ptr, &disk_key, sizeof(disk_key));
4763         ptr += sizeof(disk_key);
4764         memcpy(ptr, chunk, item_size);
4765         item_size += sizeof(disk_key);
4766         btrfs_set_super_sys_array_size(super_copy, array_size + item_size);
4767         mutex_unlock(&fs_info->chunk_mutex);
4768
4769         return 0;
4770 }
4771
4772 /*
4773  * sort the devices in descending order by max_avail, total_avail
4774  */
4775 static int btrfs_cmp_device_info(const void *a, const void *b)
4776 {
4777         const struct btrfs_device_info *di_a = a;
4778         const struct btrfs_device_info *di_b = b;
4779
4780         if (di_a->max_avail > di_b->max_avail)
4781                 return -1;
4782         if (di_a->max_avail < di_b->max_avail)
4783                 return 1;
4784         if (di_a->total_avail > di_b->total_avail)
4785                 return -1;
4786         if (di_a->total_avail < di_b->total_avail)
4787                 return 1;
4788         return 0;
4789 }
4790
4791 static void check_raid56_incompat_flag(struct btrfs_fs_info *info, u64 type)
4792 {
4793         if (!(type & BTRFS_BLOCK_GROUP_RAID56_MASK))
4794                 return;
4795
4796         btrfs_set_fs_incompat(info, RAID56);
4797 }
4798
4799 static void check_raid1c34_incompat_flag(struct btrfs_fs_info *info, u64 type)
4800 {
4801         if (!(type & (BTRFS_BLOCK_GROUP_RAID1C3 | BTRFS_BLOCK_GROUP_RAID1C4)))
4802                 return;
4803
4804         btrfs_set_fs_incompat(info, RAID1C34);
4805 }
4806
4807 /*
4808  * Structure used internally for __btrfs_alloc_chunk() function.
4809  * Wraps needed parameters.
4810  */
4811 struct alloc_chunk_ctl {
4812         u64 start;
4813         u64 type;
4814         /* Total number of stripes to allocate */
4815         int num_stripes;
4816         /* sub_stripes info for map */
4817         int sub_stripes;
4818         /* Stripes per device */
4819         int dev_stripes;
4820         /* Maximum number of devices to use */
4821         int devs_max;
4822         /* Minimum number of devices to use */
4823         int devs_min;
4824         /* ndevs has to be a multiple of this */
4825         int devs_increment;
4826         /* Number of copies */
4827         int ncopies;
4828         /* Number of stripes worth of bytes to store parity information */
4829         int nparity;
4830         u64 max_stripe_size;
4831         u64 max_chunk_size;
4832         u64 dev_extent_min;
4833         u64 stripe_size;
4834         u64 chunk_size;
4835         int ndevs;
4836 };
4837
4838 static void init_alloc_chunk_ctl_policy_regular(
4839                                 struct btrfs_fs_devices *fs_devices,
4840                                 struct alloc_chunk_ctl *ctl)
4841 {
4842         u64 type = ctl->type;
4843
4844         if (type & BTRFS_BLOCK_GROUP_DATA) {
4845                 ctl->max_stripe_size = SZ_1G;
4846                 ctl->max_chunk_size = BTRFS_MAX_DATA_CHUNK_SIZE;
4847         } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
4848                 /* For larger filesystems, use larger metadata chunks */
4849                 if (fs_devices->total_rw_bytes > 50ULL * SZ_1G)
4850                         ctl->max_stripe_size = SZ_1G;
4851                 else
4852                         ctl->max_stripe_size = SZ_256M;
4853                 ctl->max_chunk_size = ctl->max_stripe_size;
4854         } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
4855                 ctl->max_stripe_size = SZ_32M;
4856                 ctl->max_chunk_size = 2 * ctl->max_stripe_size;
4857                 ctl->devs_max = min_t(int, ctl->devs_max,
4858                                       BTRFS_MAX_DEVS_SYS_CHUNK);
4859         } else {
4860                 BUG();
4861         }
4862
4863         /* We don't want a chunk larger than 10% of writable space */
4864         ctl->max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1),
4865                                   ctl->max_chunk_size);
4866         ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes;
4867 }
4868
4869 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
4870                                  struct alloc_chunk_ctl *ctl)
4871 {
4872         int index = btrfs_bg_flags_to_raid_index(ctl->type);
4873
4874         ctl->sub_stripes = btrfs_raid_array[index].sub_stripes;
4875         ctl->dev_stripes = btrfs_raid_array[index].dev_stripes;
4876         ctl->devs_max = btrfs_raid_array[index].devs_max;
4877         if (!ctl->devs_max)
4878                 ctl->devs_max = BTRFS_MAX_DEVS(fs_devices->fs_info);
4879         ctl->devs_min = btrfs_raid_array[index].devs_min;
4880         ctl->devs_increment = btrfs_raid_array[index].devs_increment;
4881         ctl->ncopies = btrfs_raid_array[index].ncopies;
4882         ctl->nparity = btrfs_raid_array[index].nparity;
4883         ctl->ndevs = 0;
4884
4885         switch (fs_devices->chunk_alloc_policy) {
4886         case BTRFS_CHUNK_ALLOC_REGULAR:
4887                 init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
4888                 break;
4889         default:
4890                 BUG();
4891         }
4892 }
4893
4894 static int gather_device_info(struct btrfs_fs_devices *fs_devices,
4895                               struct alloc_chunk_ctl *ctl,
4896                               struct btrfs_device_info *devices_info)
4897 {
4898         struct btrfs_fs_info *info = fs_devices->fs_info;
4899         struct btrfs_device *device;
4900         u64 total_avail;
4901         u64 dev_extent_want = ctl->max_stripe_size * ctl->dev_stripes;
4902         int ret;
4903         int ndevs = 0;
4904         u64 max_avail;
4905         u64 dev_offset;
4906
4907         /*
4908          * in the first pass through the devices list, we gather information
4909          * about the available holes on each device.
4910          */
4911         list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
4912                 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state)) {
4913                         WARN(1, KERN_ERR
4914                                "BTRFS: read-only device in alloc_list\n");
4915                         continue;
4916                 }
4917
4918                 if (!test_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
4919                                         &device->dev_state) ||
4920                     test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state))
4921                         continue;
4922
4923                 if (device->total_bytes > device->bytes_used)
4924                         total_avail = device->total_bytes - device->bytes_used;
4925                 else
4926                         total_avail = 0;
4927
4928                 /* If there is no space on this device, skip it. */
4929                 if (total_avail < ctl->dev_extent_min)
4930                         continue;
4931
4932                 ret = find_free_dev_extent(device, dev_extent_want, &dev_offset,
4933                                            &max_avail);
4934                 if (ret && ret != -ENOSPC)
4935                         return ret;
4936
4937                 if (ret == 0)
4938                         max_avail = dev_extent_want;
4939
4940                 if (max_avail < ctl->dev_extent_min) {
4941                         if (btrfs_test_opt(info, ENOSPC_DEBUG))
4942                                 btrfs_debug(info,
4943                         "%s: devid %llu has no free space, have=%llu want=%llu",
4944                                             __func__, device->devid, max_avail,
4945                                             ctl->dev_extent_min);
4946                         continue;
4947                 }
4948
4949                 if (ndevs == fs_devices->rw_devices) {
4950                         WARN(1, "%s: found more than %llu devices\n",
4951                              __func__, fs_devices->rw_devices);
4952                         break;
4953                 }
4954                 devices_info[ndevs].dev_offset = dev_offset;
4955                 devices_info[ndevs].max_avail = max_avail;
4956                 devices_info[ndevs].total_avail = total_avail;
4957                 devices_info[ndevs].dev = device;
4958                 ++ndevs;
4959         }
4960         ctl->ndevs = ndevs;
4961
4962         /*
4963          * now sort the devices by hole size / available space
4964          */
4965         sort(devices_info, ndevs, sizeof(struct btrfs_device_info),
4966              btrfs_cmp_device_info, NULL);
4967
4968         return 0;
4969 }
4970
4971 static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
4972                                       struct btrfs_device_info *devices_info)
4973 {
4974         /* Number of stripes that count for block group size */
4975         int data_stripes;
4976
4977         /*
4978          * The primary goal is to maximize the number of stripes, so use as
4979          * many devices as possible, even if the stripes are not maximum sized.
4980          *
4981          * The DUP profile stores more than one stripe per device, the
4982          * max_avail is the total size so we have to adjust.
4983          */
4984         ctl->stripe_size = div_u64(devices_info[ctl->ndevs - 1].max_avail,
4985                                    ctl->dev_stripes);
4986         ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
4987
4988         /* This will have to be fixed for RAID1 and RAID10 over more drives */
4989         data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
4990
4991         /*
4992          * Use the number of data stripes to figure out how big this chunk is
4993          * really going to be in terms of logical address space, and compare
4994          * that answer with the max chunk size. If it's higher, we try to
4995          * reduce stripe_size.
4996          */
4997         if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
4998                 /*
4999                  * Reduce stripe_size, round it up to a 16MB boundary again and
5000                  * then use it, unless it ends up being even bigger than the
5001                  * previous value we had already.
5002                  */
5003                 ctl->stripe_size = min(round_up(div_u64(ctl->max_chunk_size,
5004                                                         data_stripes), SZ_16M),
5005                                        ctl->stripe_size);
5006         }
5007
5008         /* Align to BTRFS_STRIPE_LEN */
5009         ctl->stripe_size = round_down(ctl->stripe_size, BTRFS_STRIPE_LEN);
5010         ctl->chunk_size = ctl->stripe_size * data_stripes;
5011
5012         return 0;
5013 }
5014
5015 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
5016                               struct alloc_chunk_ctl *ctl,
5017                               struct btrfs_device_info *devices_info)
5018 {
5019         struct btrfs_fs_info *info = fs_devices->fs_info;
5020
5021         /*
5022          * Round down to number of usable stripes, devs_increment can be any
5023          * number so we can't use round_down() that requires power of 2, while
5024          * rounddown is safe.
5025          */
5026         ctl->ndevs = rounddown(ctl->ndevs, ctl->devs_increment);
5027
5028         if (ctl->ndevs < ctl->devs_min) {
5029                 if (btrfs_test_opt(info, ENOSPC_DEBUG)) {
5030                         btrfs_debug(info,
5031         "%s: not enough devices with free space: have=%d minimum required=%d",
5032                                     __func__, ctl->ndevs, ctl->devs_min);
5033                 }
5034                 return -ENOSPC;
5035         }
5036
5037         ctl->ndevs = min(ctl->ndevs, ctl->devs_max);
5038
5039         switch (fs_devices->chunk_alloc_policy) {
5040         case BTRFS_CHUNK_ALLOC_REGULAR:
5041                 return decide_stripe_size_regular(ctl, devices_info);
5042         default:
5043                 BUG();
5044         }
5045 }
5046
5047 static int create_chunk(struct btrfs_trans_handle *trans,
5048                         struct alloc_chunk_ctl *ctl,
5049                         struct btrfs_device_info *devices_info)
5050 {
5051         struct btrfs_fs_info *info = trans->fs_info;
5052         struct map_lookup *map = NULL;
5053         struct extent_map_tree *em_tree;
5054         struct extent_map *em;
5055         u64 start = ctl->start;
5056         u64 type = ctl->type;
5057         int ret;
5058         int i;
5059         int j;
5060
5061         map = kmalloc(map_lookup_size(ctl->num_stripes), GFP_NOFS);
5062         if (!map)
5063                 return -ENOMEM;
5064         map->num_stripes = ctl->num_stripes;
5065
5066         for (i = 0; i < ctl->ndevs; ++i) {
5067                 for (j = 0; j < ctl->dev_stripes; ++j) {
5068                         int s = i * ctl->dev_stripes + j;
5069                         map->stripes[s].dev = devices_info[i].dev;
5070                         map->stripes[s].physical = devices_info[i].dev_offset +
5071                                                    j * ctl->stripe_size;
5072                 }
5073         }
5074         map->stripe_len = BTRFS_STRIPE_LEN;
5075         map->io_align = BTRFS_STRIPE_LEN;
5076         map->io_width = BTRFS_STRIPE_LEN;
5077         map->type = type;
5078         map->sub_stripes = ctl->sub_stripes;
5079
5080         trace_btrfs_chunk_alloc(info, map, start, ctl->chunk_size);
5081
5082         em = alloc_extent_map();
5083         if (!em) {
5084                 kfree(map);
5085                 return -ENOMEM;
5086         }
5087         set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
5088         em->map_lookup = map;
5089         em->start = start;
5090         em->len = ctl->chunk_size;
5091         em->block_start = 0;
5092         em->block_len = em->len;
5093         em->orig_block_len = ctl->stripe_size;
5094
5095         em_tree = &info->mapping_tree;
5096         write_lock(&em_tree->lock);
5097         ret = add_extent_mapping(em_tree, em, 0);
5098         if (ret) {
5099                 write_unlock(&em_tree->lock);
5100                 free_extent_map(em);
5101                 return ret;
5102         }
5103         write_unlock(&em_tree->lock);
5104
5105         ret = btrfs_make_block_group(trans, 0, type, start, ctl->chunk_size);
5106         if (ret)
5107                 goto error_del_extent;
5108
5109         for (i = 0; i < map->num_stripes; i++) {
5110                 struct btrfs_device *dev = map->stripes[i].dev;
5111
5112                 btrfs_device_set_bytes_used(dev,
5113                                             dev->bytes_used + ctl->stripe_size);
5114                 if (list_empty(&dev->post_commit_list))
5115                         list_add_tail(&dev->post_commit_list,
5116                                       &trans->transaction->dev_update_list);
5117         }
5118
5119         atomic64_sub(ctl->stripe_size * map->num_stripes,
5120                      &info->free_chunk_space);
5121
5122         free_extent_map(em);
5123         check_raid56_incompat_flag(info, type);
5124         check_raid1c34_incompat_flag(info, type);
5125
5126         return 0;
5127
5128 error_del_extent:
5129         write_lock(&em_tree->lock);
5130         remove_extent_mapping(em_tree, em);
5131         write_unlock(&em_tree->lock);
5132
5133         /* One for our allocation */
5134         free_extent_map(em);
5135         /* One for the tree reference */
5136         free_extent_map(em);
5137
5138         return ret;
5139 }
5140
5141 int btrfs_alloc_chunk(struct btrfs_trans_handle *trans, u64 type)
5142 {
5143         struct btrfs_fs_info *info = trans->fs_info;
5144         struct btrfs_fs_devices *fs_devices = info->fs_devices;
5145         struct btrfs_device_info *devices_info = NULL;
5146         struct alloc_chunk_ctl ctl;
5147         int ret;
5148
5149         lockdep_assert_held(&info->chunk_mutex);
5150
5151         if (!alloc_profile_is_valid(type, 0)) {
5152                 ASSERT(0);
5153                 return -EINVAL;
5154         }
5155
5156         if (list_empty(&fs_devices->alloc_list)) {
5157                 if (btrfs_test_opt(info, ENOSPC_DEBUG))
5158                         btrfs_debug(info, "%s: no writable device", __func__);
5159                 return -ENOSPC;
5160         }
5161
5162         if (!(type & BTRFS_BLOCK_GROUP_TYPE_MASK)) {
5163                 btrfs_err(info, "invalid chunk type 0x%llx requested", type);
5164                 ASSERT(0);
5165                 return -EINVAL;
5166         }
5167
5168         ctl.start = find_next_chunk(info);
5169         ctl.type = type;
5170         init_alloc_chunk_ctl(fs_devices, &ctl);
5171
5172         devices_info = kcalloc(fs_devices->rw_devices, sizeof(*devices_info),
5173                                GFP_NOFS);
5174         if (!devices_info)
5175                 return -ENOMEM;
5176
5177         ret = gather_device_info(fs_devices, &ctl, devices_info);
5178         if (ret < 0)
5179                 goto out;
5180
5181         ret = decide_stripe_size(fs_devices, &ctl, devices_info);
5182         if (ret < 0)
5183                 goto out;
5184
5185         ret = create_chunk(trans, &ctl, devices_info);
5186
5187 out:
5188         kfree(devices_info);
5189         return ret;
5190 }
5191
5192 /*
5193  * Chunk allocation falls into two parts. The first part does work
5194  * that makes the new allocated chunk usable, but does not do any operation
5195  * that modifies the chunk tree. The second part does the work that
5196  * requires modifying the chunk tree. This division is important for the
5197  * bootstrap process of adding storage to a seed btrfs.
5198  */
5199 int btrfs_finish_chunk_alloc(struct btrfs_trans_handle *trans,
5200                              u64 chunk_offset, u64 chunk_size)
5201 {
5202         struct btrfs_fs_info *fs_info = trans->fs_info;
5203         struct btrfs_root *extent_root = fs_info->extent_root;
5204         struct btrfs_root *chunk_root = fs_info->chunk_root;
5205         struct btrfs_key key;
5206         struct btrfs_device *device;
5207         struct btrfs_chunk *chunk;
5208         struct btrfs_stripe *stripe;
5209         struct extent_map *em;
5210         struct map_lookup *map;
5211         size_t item_size;
5212         u64 dev_offset;
5213         u64 stripe_size;
5214         int i = 0;
5215         int ret = 0;
5216
5217         em = btrfs_get_chunk_map(fs_info, chunk_offset, chunk_size);
5218         if (IS_ERR(em))
5219                 return PTR_ERR(em);
5220
5221         map = em->map_lookup;
5222         item_size = btrfs_chunk_item_size(map->num_stripes);
5223         stripe_size = em->orig_block_len;
5224
5225         chunk = kzalloc(item_size, GFP_NOFS);
5226         if (!chunk) {
5227                 ret = -ENOMEM;
5228                 goto out;
5229         }
5230
5231         /*
5232          * Take the device list mutex to prevent races with the final phase of
5233          * a device replace operation that replaces the device object associated
5234          * with the map's stripes, because the device object's id can change
5235          * at any time during that final phase of the device replace operation
5236          * (dev-replace.c:btrfs_dev_replace_finishing()).
5237          */
5238         mutex_lock(&fs_info->fs_devices->device_list_mutex);
5239         for (i = 0; i < map->num_stripes; i++) {
5240                 device = map->stripes[i].dev;
5241                 dev_offset = map->stripes[i].physical;
5242
5243                 ret = btrfs_update_device(trans, device);
5244                 if (ret)
5245                         break;
5246                 ret = btrfs_alloc_dev_extent(trans, device, chunk_offset,
5247                                              dev_offset, stripe_size);
5248                 if (ret)
5249                         break;
5250         }
5251         if (ret) {
5252                 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5253                 goto out;
5254         }
5255
5256         stripe = &chunk->stripe;
5257         for (i = 0; i < map->num_stripes; i++) {
5258                 device = map->stripes[i].dev;
5259                 dev_offset = map->stripes[i].physical;
5260
5261                 btrfs_set_stack_stripe_devid(stripe, device->devid);
5262                 btrfs_set_stack_stripe_offset(stripe, dev_offset);
5263                 memcpy(stripe->dev_uuid, device->uuid, BTRFS_UUID_SIZE);
5264                 stripe++;
5265         }
5266         mutex_unlock(&fs_info->fs_devices->device_list_mutex);
5267
5268         btrfs_set_stack_chunk_length(chunk, chunk_size);
5269         btrfs_set_stack_chunk_owner(chunk, extent_root->root_key.objectid);
5270         btrfs_set_stack_chunk_stripe_len(chunk, map->stripe_len);
5271         btrfs_set_stack_chunk_type(chunk, map->type);
5272         btrfs_set_stack_chunk_num_stripes(chunk, map->num_stripes);
5273         btrfs_set_stack_chunk_io_align(chunk, map->stripe_len);
5274         btrfs_set_stack_chunk_io_width(chunk, map->stripe_len);
5275         btrfs_set_stack_chunk_sector_size(chunk, fs_info->sectorsize);
5276         btrfs_set_stack_chunk_sub_stripes(chunk, map->sub_stripes);
5277
5278         key.objectid = BTRFS_FIRST_CHUNK_TREE_OBJECTID;
5279         key.type = BTRFS_CHUNK_ITEM_KEY;
5280         key.offset = chunk_offset;
5281
5282         ret = btrfs_insert_item(trans, chunk_root, &key, chunk, item_size);
5283         if (ret == 0 && map->type & BTRFS_BLOCK_GROUP_SYSTEM) {
5284                 /*
5285                  * TODO: Cleanup of inserted chunk root in case of
5286                  * failure.
5287                  */
5288                 ret = btrfs_add_system_chunk(fs_info, &key, chunk, item_size);
5289         }
5290
5291 out:
5292         kfree(chunk);
5293         free_extent_map(em);
5294         return ret;
5295 }
5296
5297 static noinline int init_first_rw_device(struct btrfs_trans_handle *trans)
5298 {
5299         struct btrfs_fs_info *fs_info = trans->fs_info;
5300         u64 alloc_profile;
5301         int ret;
5302
5303         alloc_profile = btrfs_metadata_alloc_profile(fs_info);
5304         ret = btrfs_alloc_chunk(trans, alloc_profile);
5305         if (ret)
5306                 return ret;
5307
5308         alloc_profile = btrfs_system_alloc_profile(fs_info);
5309         ret = btrfs_alloc_chunk(trans, alloc_profile);
5310         return ret;
5311 }
5312
5313 static inline int btrfs_chunk_max_errors(struct map_lookup *map)
5314 {
5315         const int index = btrfs_bg_flags_to_raid_index(map->type);
5316
5317         return btrfs_raid_array[index].tolerated_failures;
5318 }
5319
5320 int btrfs_chunk_readonly(struct btrfs_fs_info *fs_info, u64 chunk_offset)
5321 {
5322         struct extent_map *em;
5323         struct map_lookup *map;
5324         int readonly = 0;
5325         int miss_ndevs = 0;
5326         int i;
5327
5328         em = btrfs_get_chunk_map(fs_info, chunk_offset, 1);
5329         if (IS_ERR(em))
5330                 return 1;
5331
5332         map = em->map_lookup;
5333         for (i = 0; i < map->num_stripes; i++) {
5334                 if (test_bit(BTRFS_DEV_STATE_MISSING,
5335                                         &map->stripes[i].dev->dev_state)) {
5336                         miss_ndevs++;
5337                         continue;
5338                 }
5339                 if (!test_bit(BTRFS_DEV_STATE_WRITEABLE,
5340                                         &map->stripes[i].dev->dev_state)) {
5341                         readonly = 1;
5342                         goto end;
5343                 }
5344         }
5345
5346         /*
5347          * If the number of missing devices is larger than max errors,
5348          * we can not write the data into that chunk successfully, so
5349          * set it readonly.
5350          */
5351         if (miss_ndevs > btrfs_chunk_max_errors(map))
5352                 readonly = 1;
5353 end:
5354         free_extent_map(em);
5355         return readonly;
5356 }
5357
5358 void btrfs_mapping_tree_free(struct extent_map_tree *tree)
5359 {
5360         struct extent_map *em;
5361
5362         while (1) {
5363                 write_lock(&tree->lock);
5364                 em = lookup_extent_mapping(tree, 0, (u64)-1);
5365                 if (em)
5366                         remove_extent_mapping(tree, em);
5367                 write_unlock(&tree->lock);
5368                 if (!em)
5369                         break;
5370                 /* once for us */
5371                 free_extent_map(em);
5372                 /* once for the tree */
5373                 free_extent_map(em);
5374         }
5375 }
5376
5377 int btrfs_num_copies(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5378 {
5379         struct extent_map *em;
5380         struct map_lookup *map;
5381         int ret;
5382
5383         em = btrfs_get_chunk_map(fs_info, logical, len);
5384         if (IS_ERR(em))
5385                 /*
5386                  * We could return errors for these cases, but that could get
5387                  * ugly and we'd probably do the same thing which is just not do
5388                  * anything else and exit, so return 1 so the callers don't try
5389                  * to use other copies.
5390                  */
5391                 return 1;
5392
5393         map = em->map_lookup;
5394         if (map->type & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1_MASK))
5395                 ret = map->num_stripes;
5396         else if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5397                 ret = map->sub_stripes;
5398         else if (map->type & BTRFS_BLOCK_GROUP_RAID5)
5399                 ret = 2;
5400         else if (map->type & BTRFS_BLOCK_GROUP_RAID6)
5401                 /*
5402                  * There could be two corrupted data stripes, we need
5403                  * to loop retry in order to rebuild the correct data.
5404                  *
5405                  * Fail a stripe at a time on every retry except the
5406                  * stripe under reconstruction.
5407                  */
5408                 ret = map->num_stripes;
5409         else
5410                 ret = 1;
5411         free_extent_map(em);
5412
5413         down_read(&fs_info->dev_replace.rwsem);
5414         if (btrfs_dev_replace_is_ongoing(&fs_info->dev_replace) &&
5415             fs_info->dev_replace.tgtdev)
5416                 ret++;
5417         up_read(&fs_info->dev_replace.rwsem);
5418
5419         return ret;
5420 }
5421
5422 unsigned long btrfs_full_stripe_len(struct btrfs_fs_info *fs_info,
5423                                     u64 logical)
5424 {
5425         struct extent_map *em;
5426         struct map_lookup *map;
5427         unsigned long len = fs_info->sectorsize;
5428
5429         em = btrfs_get_chunk_map(fs_info, logical, len);
5430
5431         if (!WARN_ON(IS_ERR(em))) {
5432                 map = em->map_lookup;
5433                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5434                         len = map->stripe_len * nr_data_stripes(map);
5435                 free_extent_map(em);
5436         }
5437         return len;
5438 }
5439
5440 int btrfs_is_parity_mirror(struct btrfs_fs_info *fs_info, u64 logical, u64 len)
5441 {
5442         struct extent_map *em;
5443         struct map_lookup *map;
5444         int ret = 0;
5445
5446         em = btrfs_get_chunk_map(fs_info, logical, len);
5447
5448         if(!WARN_ON(IS_ERR(em))) {
5449                 map = em->map_lookup;
5450                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK)
5451                         ret = 1;
5452                 free_extent_map(em);
5453         }
5454         return ret;
5455 }
5456
5457 static int find_live_mirror(struct btrfs_fs_info *fs_info,
5458                             struct map_lookup *map, int first,
5459                             int dev_replace_is_ongoing)
5460 {
5461         int i;
5462         int num_stripes;
5463         int preferred_mirror;
5464         int tolerance;
5465         struct btrfs_device *srcdev;
5466
5467         ASSERT((map->type &
5468                  (BTRFS_BLOCK_GROUP_RAID1_MASK | BTRFS_BLOCK_GROUP_RAID10)));
5469
5470         if (map->type & BTRFS_BLOCK_GROUP_RAID10)
5471                 num_stripes = map->sub_stripes;
5472         else
5473                 num_stripes = map->num_stripes;
5474
5475         preferred_mirror = first + current->pid % num_stripes;
5476
5477         if (dev_replace_is_ongoing &&
5478             fs_info->dev_replace.cont_reading_from_srcdev_mode ==
5479              BTRFS_DEV_REPLACE_ITEM_CONT_READING_FROM_SRCDEV_MODE_AVOID)
5480                 srcdev = fs_info->dev_replace.srcdev;
5481         else
5482                 srcdev = NULL;
5483
5484         /*
5485          * try to avoid the drive that is the source drive for a
5486          * dev-replace procedure, only choose it if no other non-missing
5487          * mirror is available
5488          */
5489         for (tolerance = 0; tolerance < 2; tolerance++) {
5490                 if (map->stripes[preferred_mirror].dev->bdev &&
5491                     (tolerance || map->stripes[preferred_mirror].dev != srcdev))
5492                         return preferred_mirror;
5493                 for (i = first; i < first + num_stripes; i++) {
5494                         if (map->stripes[i].dev->bdev &&
5495                             (tolerance || map->stripes[i].dev != srcdev))
5496                                 return i;
5497                 }
5498         }
5499
5500         /* we couldn't find one that doesn't fail.  Just return something
5501          * and the io error handling code will clean up eventually
5502          */
5503         return preferred_mirror;
5504 }
5505
5506 /* Bubble-sort the stripe set to put the parity/syndrome stripes last */
5507 static void sort_parity_stripes(struct btrfs_bio *bbio, int num_stripes)
5508 {
5509         int i;
5510         int again = 1;
5511
5512         while (again) {
5513                 again = 0;
5514                 for (i = 0; i < num_stripes - 1; i++) {
5515                         /* Swap if parity is on a smaller index */
5516                         if (bbio->raid_map[i] > bbio->raid_map[i + 1]) {
5517                                 swap(bbio->stripes[i], bbio->stripes[i + 1]);
5518                                 swap(bbio->raid_map[i], bbio->raid_map[i + 1]);
5519                                 again = 1;
5520                         }
5521                 }
5522         }
5523 }
5524
5525 static struct btrfs_bio *alloc_btrfs_bio(int total_stripes, int real_stripes)
5526 {
5527         struct btrfs_bio *bbio = kzalloc(
5528                  /* the size of the btrfs_bio */
5529                 sizeof(struct btrfs_bio) +
5530                 /* plus the variable array for the stripes */
5531                 sizeof(struct btrfs_bio_stripe) * (total_stripes) +
5532                 /* plus the variable array for the tgt dev */
5533                 sizeof(int) * (real_stripes) +
5534                 /*
5535                  * plus the raid_map, which includes both the tgt dev
5536                  * and the stripes
5537                  */
5538                 sizeof(u64) * (total_stripes),
5539                 GFP_NOFS|__GFP_NOFAIL);
5540
5541         atomic_set(&bbio->error, 0);
5542         refcount_set(&bbio->refs, 1);
5543
5544         bbio->tgtdev_map = (int *)(bbio->stripes + total_stripes);
5545         bbio->raid_map = (u64 *)(bbio->tgtdev_map + real_stripes);
5546
5547         return bbio;
5548 }
5549
5550 void btrfs_get_bbio(struct btrfs_bio *bbio)
5551 {
5552         WARN_ON(!refcount_read(&bbio->refs));
5553         refcount_inc(&bbio->refs);
5554 }
5555
5556 void btrfs_put_bbio(struct btrfs_bio *bbio)
5557 {
5558         if (!bbio)
5559                 return;
5560         if (refcount_dec_and_test(&bbio->refs))
5561                 kfree(bbio);
5562 }
5563
5564 /* can REQ_OP_DISCARD be sent with other REQ like REQ_OP_WRITE? */
5565 /*
5566  * Please note that, discard won't be sent to target device of device
5567  * replace.
5568  */
5569 static int __btrfs_map_block_for_discard(struct btrfs_fs_info *fs_info,
5570                                          u64 logical, u64 *length_ret,
5571                                          struct btrfs_bio **bbio_ret)
5572 {
5573         struct extent_map *em;
5574         struct map_lookup *map;
5575         struct btrfs_bio *bbio;
5576         u64 length = *length_ret;
5577         u64 offset;
5578         u64 stripe_nr;
5579         u64 stripe_nr_end;
5580         u64 stripe_end_offset;
5581         u64 stripe_cnt;
5582         u64 stripe_len;
5583         u64 stripe_offset;
5584         u64 num_stripes;
5585         u32 stripe_index;
5586         u32 factor = 0;
5587         u32 sub_stripes = 0;
5588         u64 stripes_per_dev = 0;
5589         u32 remaining_stripes = 0;
5590         u32 last_stripe = 0;
5591         int ret = 0;
5592         int i;
5593
5594         /* discard always return a bbio */
5595         ASSERT(bbio_ret);
5596
5597         em = btrfs_get_chunk_map(fs_info, logical, length);
5598         if (IS_ERR(em))
5599                 return PTR_ERR(em);
5600
5601         map = em->map_lookup;
5602         /* we don't discard raid56 yet */
5603         if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5604                 ret = -EOPNOTSUPP;
5605                 goto out;
5606         }
5607
5608         offset = logical - em->start;
5609         length = min_t(u64, em->start + em->len - logical, length);
5610         *length_ret = length;
5611
5612         stripe_len = map->stripe_len;
5613         /*
5614          * stripe_nr counts the total number of stripes we have to stride
5615          * to get to this block
5616          */
5617         stripe_nr = div64_u64(offset, stripe_len);
5618
5619         /* stripe_offset is the offset of this block in its stripe */
5620         stripe_offset = offset - stripe_nr * stripe_len;
5621
5622         stripe_nr_end = round_up(offset + length, map->stripe_len);
5623         stripe_nr_end = div64_u64(stripe_nr_end, map->stripe_len);
5624         stripe_cnt = stripe_nr_end - stripe_nr;
5625         stripe_end_offset = stripe_nr_end * map->stripe_len -
5626                             (offset + length);
5627         /*
5628          * after this, stripe_nr is the number of stripes on this
5629          * device we have to walk to find the data, and stripe_index is
5630          * the number of our device in the stripe array
5631          */
5632         num_stripes = 1;
5633         stripe_index = 0;
5634         if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5635                          BTRFS_BLOCK_GROUP_RAID10)) {
5636                 if (map->type & BTRFS_BLOCK_GROUP_RAID0)
5637                         sub_stripes = 1;
5638                 else
5639                         sub_stripes = map->sub_stripes;
5640
5641                 factor = map->num_stripes / sub_stripes;
5642                 num_stripes = min_t(u64, map->num_stripes,
5643                                     sub_stripes * stripe_cnt);
5644                 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
5645                 stripe_index *= sub_stripes;
5646                 stripes_per_dev = div_u64_rem(stripe_cnt, factor,
5647                                               &remaining_stripes);
5648                 div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
5649                 last_stripe *= sub_stripes;
5650         } else if (map->type & (BTRFS_BLOCK_GROUP_RAID1_MASK |
5651                                 BTRFS_BLOCK_GROUP_DUP)) {
5652                 num_stripes = map->num_stripes;
5653         } else {
5654                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
5655                                         &stripe_index);
5656         }
5657
5658         bbio = alloc_btrfs_bio(num_stripes, 0);
5659         if (!bbio) {
5660                 ret = -ENOMEM;
5661                 goto out;
5662         }
5663
5664         for (i = 0; i < num_stripes; i++) {
5665                 bbio->stripes[i].physical =
5666                         map->stripes[stripe_index].physical +
5667                         stripe_offset + stripe_nr * map->stripe_len;
5668                 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
5669
5670                 if (map->type & (BTRFS_BLOCK_GROUP_RAID0 |
5671                                  BTRFS_BLOCK_GROUP_RAID10)) {
5672                         bbio->stripes[i].length = stripes_per_dev *
5673                                 map->stripe_len;
5674
5675                         if (i / sub_stripes < remaining_stripes)
5676                                 bbio->stripes[i].length +=
5677                                         map->stripe_len;
5678
5679                         /*
5680                          * Special for the first stripe and
5681                          * the last stripe:
5682                          *
5683                          * |-------|...|-------|
5684                          *     |----------|
5685                          *    off     end_off
5686                          */
5687                         if (i < sub_stripes)
5688                                 bbio->stripes[i].length -=
5689                                         stripe_offset;
5690
5691                         if (stripe_index >= last_stripe &&
5692                             stripe_index <= (last_stripe +
5693                                              sub_stripes - 1))
5694                                 bbio->stripes[i].length -=
5695                                         stripe_end_offset;
5696
5697                         if (i == sub_stripes - 1)
5698                                 stripe_offset = 0;
5699                 } else {
5700                         bbio->stripes[i].length = length;
5701                 }
5702
5703                 stripe_index++;
5704                 if (stripe_index == map->num_stripes) {
5705                         stripe_index = 0;
5706                         stripe_nr++;
5707                 }
5708         }
5709
5710         *bbio_ret = bbio;
5711         bbio->map_type = map->type;
5712         bbio->num_stripes = num_stripes;
5713 out:
5714         free_extent_map(em);
5715         return ret;
5716 }
5717
5718 /*
5719  * In dev-replace case, for repair case (that's the only case where the mirror
5720  * is selected explicitly when calling btrfs_map_block), blocks left of the
5721  * left cursor can also be read from the target drive.
5722  *
5723  * For REQ_GET_READ_MIRRORS, the target drive is added as the last one to the
5724  * array of stripes.
5725  * For READ, it also needs to be supported using the same mirror number.
5726  *
5727  * If the requested block is not left of the left cursor, EIO is returned. This
5728  * can happen because btrfs_num_copies() returns one more in the dev-replace
5729  * case.
5730  */
5731 static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
5732                                          u64 logical, u64 length,
5733                                          u64 srcdev_devid, int *mirror_num,
5734                                          u64 *physical)
5735 {
5736         struct btrfs_bio *bbio = NULL;
5737         int num_stripes;
5738         int index_srcdev = 0;
5739         int found = 0;
5740         u64 physical_of_found = 0;
5741         int i;
5742         int ret = 0;
5743
5744         ret = __btrfs_map_block(fs_info, BTRFS_MAP_GET_READ_MIRRORS,
5745                                 logical, &length, &bbio, 0, 0);
5746         if (ret) {
5747                 ASSERT(bbio == NULL);
5748                 return ret;
5749         }
5750
5751         num_stripes = bbio->num_stripes;
5752         if (*mirror_num > num_stripes) {
5753                 /*
5754                  * BTRFS_MAP_GET_READ_MIRRORS does not contain this mirror,
5755                  * that means that the requested area is not left of the left
5756                  * cursor
5757                  */
5758                 btrfs_put_bbio(bbio);
5759                 return -EIO;
5760         }
5761
5762         /*
5763          * process the rest of the function using the mirror_num of the source
5764          * drive. Therefore look it up first.  At the end, patch the device
5765          * pointer to the one of the target drive.
5766          */
5767         for (i = 0; i < num_stripes; i++) {
5768                 if (bbio->stripes[i].dev->devid != srcdev_devid)
5769                         continue;
5770
5771                 /*
5772                  * In case of DUP, in order to keep it simple, only add the
5773                  * mirror with the lowest physical address
5774                  */
5775                 if (found &&
5776                     physical_of_found <= bbio->stripes[i].physical)
5777                         continue;
5778
5779                 index_srcdev = i;
5780                 found = 1;
5781                 physical_of_found = bbio->stripes[i].physical;
5782         }
5783
5784         btrfs_put_bbio(bbio);
5785
5786         ASSERT(found);
5787         if (!found)
5788                 return -EIO;
5789
5790         *mirror_num = index_srcdev + 1;
5791         *physical = physical_of_found;
5792         return ret;
5793 }
5794
5795 static void handle_ops_on_dev_replace(enum btrfs_map_op op,
5796                                       struct btrfs_bio **bbio_ret,
5797                                       struct btrfs_dev_replace *dev_replace,
5798                                       int *num_stripes_ret, int *max_errors_ret)
5799 {
5800         struct btrfs_bio *bbio = *bbio_ret;
5801         u64 srcdev_devid = dev_replace->srcdev->devid;
5802         int tgtdev_indexes = 0;
5803         int num_stripes = *num_stripes_ret;
5804         int max_errors = *max_errors_ret;
5805         int i;
5806
5807         if (op == BTRFS_MAP_WRITE) {
5808                 int index_where_to_add;
5809
5810                 /*
5811                  * duplicate the write operations while the dev replace
5812                  * procedure is running. Since the copying of the old disk to
5813                  * the new disk takes place at run time while the filesystem is
5814                  * mounted writable, the regular write operations to the old
5815                  * disk have to be duplicated to go to the new disk as well.
5816                  *
5817                  * Note that device->missing is handled by the caller, and that
5818                  * the write to the old disk is already set up in the stripes
5819                  * array.
5820                  */
5821                 index_where_to_add = num_stripes;
5822                 for (i = 0; i < num_stripes; i++) {
5823                         if (bbio->stripes[i].dev->devid == srcdev_devid) {
5824                                 /* write to new disk, too */
5825                                 struct btrfs_bio_stripe *new =
5826                                         bbio->stripes + index_where_to_add;
5827                                 struct btrfs_bio_stripe *old =
5828                                         bbio->stripes + i;
5829
5830                                 new->physical = old->physical;
5831                                 new->length = old->length;
5832                                 new->dev = dev_replace->tgtdev;
5833                                 bbio->tgtdev_map[i] = index_where_to_add;
5834                                 index_where_to_add++;
5835                                 max_errors++;
5836                                 tgtdev_indexes++;
5837                         }
5838                 }
5839                 num_stripes = index_where_to_add;
5840         } else if (op == BTRFS_MAP_GET_READ_MIRRORS) {
5841                 int index_srcdev = 0;
5842                 int found = 0;
5843                 u64 physical_of_found = 0;
5844
5845                 /*
5846                  * During the dev-replace procedure, the target drive can also
5847                  * be used to read data in case it is needed to repair a corrupt
5848                  * block elsewhere. This is possible if the requested area is
5849                  * left of the left cursor. In this area, the target drive is a
5850                  * full copy of the source drive.
5851                  */
5852                 for (i = 0; i < num_stripes; i++) {
5853                         if (bbio->stripes[i].dev->devid == srcdev_devid) {
5854                                 /*
5855                                  * In case of DUP, in order to keep it simple,
5856                                  * only add the mirror with the lowest physical
5857                                  * address
5858                                  */
5859                                 if (found &&
5860                                     physical_of_found <=
5861                                      bbio->stripes[i].physical)
5862                                         continue;
5863                                 index_srcdev = i;
5864                                 found = 1;
5865                                 physical_of_found = bbio->stripes[i].physical;
5866                         }
5867                 }
5868                 if (found) {
5869                         struct btrfs_bio_stripe *tgtdev_stripe =
5870                                 bbio->stripes + num_stripes;
5871
5872                         tgtdev_stripe->physical = physical_of_found;
5873                         tgtdev_stripe->length =
5874                                 bbio->stripes[index_srcdev].length;
5875                         tgtdev_stripe->dev = dev_replace->tgtdev;
5876                         bbio->tgtdev_map[index_srcdev] = num_stripes;
5877
5878                         tgtdev_indexes++;
5879                         num_stripes++;
5880                 }
5881         }
5882
5883         *num_stripes_ret = num_stripes;
5884         *max_errors_ret = max_errors;
5885         bbio->num_tgtdevs = tgtdev_indexes;
5886         *bbio_ret = bbio;
5887 }
5888
5889 static bool need_full_stripe(enum btrfs_map_op op)
5890 {
5891         return (op == BTRFS_MAP_WRITE || op == BTRFS_MAP_GET_READ_MIRRORS);
5892 }
5893
5894 /*
5895  * btrfs_get_io_geometry - calculates the geomery of a particular (address, len)
5896  *                     tuple. This information is used to calculate how big a
5897  *                     particular bio can get before it straddles a stripe.
5898  *
5899  * @fs_info - the filesystem
5900  * @logical - address that we want to figure out the geometry of
5901  * @len     - the length of IO we are going to perform, starting at @logical
5902  * @op      - type of operation - write or read
5903  * @io_geom - pointer used to return values
5904  *
5905  * Returns < 0 in case a chunk for the given logical address cannot be found,
5906  * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
5907  */
5908 int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
5909                         u64 logical, u64 len, struct btrfs_io_geometry *io_geom)
5910 {
5911         struct extent_map *em;
5912         struct map_lookup *map;
5913         u64 offset;
5914         u64 stripe_offset;
5915         u64 stripe_nr;
5916         u64 stripe_len;
5917         u64 raid56_full_stripe_start = (u64)-1;
5918         int data_stripes;
5919         int ret = 0;
5920
5921         ASSERT(op != BTRFS_MAP_DISCARD);
5922
5923         em = btrfs_get_chunk_map(fs_info, logical, len);
5924         if (IS_ERR(em))
5925                 return PTR_ERR(em);
5926
5927         map = em->map_lookup;
5928         /* Offset of this logical address in the chunk */
5929         offset = logical - em->start;
5930         /* Len of a stripe in a chunk */
5931         stripe_len = map->stripe_len;
5932         /* Stripe wher this block falls in */
5933         stripe_nr = div64_u64(offset, stripe_len);
5934         /* Offset of stripe in the chunk */
5935         stripe_offset = stripe_nr * stripe_len;
5936         if (offset < stripe_offset) {
5937                 btrfs_crit(fs_info,
5938 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
5939                         stripe_offset, offset, em->start, logical, stripe_len);
5940                 ret = -EINVAL;
5941                 goto out;
5942         }
5943
5944         /* stripe_offset is the offset of this block in its stripe */
5945         stripe_offset = offset - stripe_offset;
5946         data_stripes = nr_data_stripes(map);
5947
5948         if (map->type & BTRFS_BLOCK_GROUP_PROFILE_MASK) {
5949                 u64 max_len = stripe_len - stripe_offset;
5950
5951                 /*
5952                  * In case of raid56, we need to know the stripe aligned start
5953                  */
5954                 if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
5955                         unsigned long full_stripe_len = stripe_len * data_stripes;
5956                         raid56_full_stripe_start = offset;
5957
5958                         /*
5959                          * Allow a write of a full stripe, but make sure we
5960                          * don't allow straddling of stripes
5961                          */
5962                         raid56_full_stripe_start = div64_u64(raid56_full_stripe_start,
5963                                         full_stripe_len);
5964                         raid56_full_stripe_start *= full_stripe_len;
5965
5966                         /*
5967                          * For writes to RAID[56], allow a full stripeset across
5968                          * all disks. For other RAID types and for RAID[56]
5969                          * reads, just allow a single stripe (on a single disk).
5970                          */
5971                         if (op == BTRFS_MAP_WRITE) {
5972                                 max_len = stripe_len * data_stripes -
5973                                           (offset - raid56_full_stripe_start);
5974                         }
5975                 }
5976                 len = min_t(u64, em->len - offset, max_len);
5977         } else {
5978                 len = em->len - offset;
5979         }
5980
5981         io_geom->len = len;
5982         io_geom->offset = offset;
5983         io_geom->stripe_len = stripe_len;
5984         io_geom->stripe_nr = stripe_nr;
5985         io_geom->stripe_offset = stripe_offset;
5986         io_geom->raid56_stripe_offset = raid56_full_stripe_start;
5987
5988 out:
5989         /* once for us */
5990         free_extent_map(em);
5991         return ret;
5992 }
5993
5994 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
5995                              enum btrfs_map_op op,
5996                              u64 logical, u64 *length,
5997                              struct btrfs_bio **bbio_ret,
5998                              int mirror_num, int need_raid_map)
5999 {
6000         struct extent_map *em;
6001         struct map_lookup *map;
6002         u64 stripe_offset;
6003         u64 stripe_nr;
6004         u64 stripe_len;
6005         u32 stripe_index;
6006         int data_stripes;
6007         int i;
6008         int ret = 0;
6009         int num_stripes;
6010         int max_errors = 0;
6011         int tgtdev_indexes = 0;
6012         struct btrfs_bio *bbio = NULL;
6013         struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
6014         int dev_replace_is_ongoing = 0;
6015         int num_alloc_stripes;
6016         int patch_the_first_stripe_for_dev_replace = 0;
6017         u64 physical_to_patch_in_first_stripe = 0;
6018         u64 raid56_full_stripe_start = (u64)-1;
6019         struct btrfs_io_geometry geom;
6020
6021         ASSERT(bbio_ret);
6022         ASSERT(op != BTRFS_MAP_DISCARD);
6023
6024         ret = btrfs_get_io_geometry(fs_info, op, logical, *length, &geom);
6025         if (ret < 0)
6026                 return ret;
6027
6028         em = btrfs_get_chunk_map(fs_info, logical, *length);
6029         ASSERT(!IS_ERR(em));
6030         map = em->map_lookup;
6031
6032         *length = geom.len;
6033         stripe_len = geom.stripe_len;
6034         stripe_nr = geom.stripe_nr;
6035         stripe_offset = geom.stripe_offset;
6036         raid56_full_stripe_start = geom.raid56_stripe_offset;
6037         data_stripes = nr_data_stripes(map);
6038
6039         down_read(&dev_replace->rwsem);
6040         dev_replace_is_ongoing = btrfs_dev_replace_is_ongoing(dev_replace);
6041         /*
6042          * Hold the semaphore for read during the whole operation, write is
6043          * requested at commit time but must wait.
6044          */
6045         if (!dev_replace_is_ongoing)
6046                 up_read(&dev_replace->rwsem);
6047
6048         if (dev_replace_is_ongoing && mirror_num == map->num_stripes + 1 &&
6049             !need_full_stripe(op) && dev_replace->tgtdev != NULL) {
6050                 ret = get_extra_mirror_from_replace(fs_info, logical, *length,
6051                                                     dev_replace->srcdev->devid,
6052                                                     &mirror_num,
6053                                             &physical_to_patch_in_first_stripe);
6054                 if (ret)
6055                         goto out;
6056                 else
6057                         patch_the_first_stripe_for_dev_replace = 1;
6058         } else if (mirror_num > map->num_stripes) {
6059                 mirror_num = 0;
6060         }
6061
6062         num_stripes = 1;
6063         stripe_index = 0;
6064         if (map->type & BTRFS_BLOCK_GROUP_RAID0) {
6065                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6066                                 &stripe_index);
6067                 if (!need_full_stripe(op))
6068                         mirror_num = 1;
6069         } else if (map->type & BTRFS_BLOCK_GROUP_RAID1_MASK) {
6070                 if (need_full_stripe(op))
6071                         num_stripes = map->num_stripes;
6072                 else if (mirror_num)
6073                         stripe_index = mirror_num - 1;
6074                 else {
6075                         stripe_index = find_live_mirror(fs_info, map, 0,
6076                                             dev_replace_is_ongoing);
6077                         mirror_num = stripe_index + 1;
6078                 }
6079
6080         } else if (map->type & BTRFS_BLOCK_GROUP_DUP) {
6081                 if (need_full_stripe(op)) {
6082                         num_stripes = map->num_stripes;
6083                 } else if (mirror_num) {
6084                         stripe_index = mirror_num - 1;
6085                 } else {
6086                         mirror_num = 1;
6087                 }
6088
6089         } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) {
6090                 u32 factor = map->num_stripes / map->sub_stripes;
6091
6092                 stripe_nr = div_u64_rem(stripe_nr, factor, &stripe_index);
6093                 stripe_index *= map->sub_stripes;
6094
6095                 if (need_full_stripe(op))
6096                         num_stripes = map->sub_stripes;
6097                 else if (mirror_num)
6098                         stripe_index += mirror_num - 1;
6099                 else {
6100                         int old_stripe_index = stripe_index;
6101                         stripe_index = find_live_mirror(fs_info, map,
6102                                               stripe_index,
6103                                               dev_replace_is_ongoing);
6104                         mirror_num = stripe_index - old_stripe_index + 1;
6105                 }
6106
6107         } else if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK) {
6108                 if (need_raid_map && (need_full_stripe(op) || mirror_num > 1)) {
6109                         /* push stripe_nr back to the start of the full stripe */
6110                         stripe_nr = div64_u64(raid56_full_stripe_start,
6111                                         stripe_len * data_stripes);
6112
6113                         /* RAID[56] write or recovery. Return all stripes */
6114                         num_stripes = map->num_stripes;
6115                         max_errors = nr_parity_stripes(map);
6116
6117                         *length = map->stripe_len;
6118                         stripe_index = 0;
6119                         stripe_offset = 0;
6120                 } else {
6121                         /*
6122                          * Mirror #0 or #1 means the original data block.
6123                          * Mirror #2 is RAID5 parity block.
6124                          * Mirror #3 is RAID6 Q block.
6125                          */
6126                         stripe_nr = div_u64_rem(stripe_nr,
6127                                         data_stripes, &stripe_index);
6128                         if (mirror_num > 1)
6129                                 stripe_index = data_stripes + mirror_num - 2;
6130
6131                         /* We distribute the parity blocks across stripes */
6132                         div_u64_rem(stripe_nr + stripe_index, map->num_stripes,
6133                                         &stripe_index);
6134                         if (!need_full_stripe(op) && mirror_num <= 1)
6135                                 mirror_num = 1;
6136                 }
6137         } else {
6138                 /*
6139                  * after this, stripe_nr is the number of stripes on this
6140                  * device we have to walk to find the data, and stripe_index is
6141                  * the number of our device in the stripe array
6142                  */
6143                 stripe_nr = div_u64_rem(stripe_nr, map->num_stripes,
6144                                 &stripe_index);
6145                 mirror_num = stripe_index + 1;
6146         }
6147         if (stripe_index >= map->num_stripes) {
6148                 btrfs_crit(fs_info,
6149                            "stripe index math went horribly wrong, got stripe_index=%u, num_stripes=%u",
6150                            stripe_index, map->num_stripes);
6151                 ret = -EINVAL;
6152                 goto out;
6153         }
6154
6155         num_alloc_stripes = num_stripes;
6156         if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL) {
6157                 if (op == BTRFS_MAP_WRITE)
6158                         num_alloc_stripes <<= 1;
6159                 if (op == BTRFS_MAP_GET_READ_MIRRORS)
6160                         num_alloc_stripes++;
6161                 tgtdev_indexes = num_stripes;
6162         }
6163
6164         bbio = alloc_btrfs_bio(num_alloc_stripes, tgtdev_indexes);
6165         if (!bbio) {
6166                 ret = -ENOMEM;
6167                 goto out;
6168         }
6169
6170         for (i = 0; i < num_stripes; i++) {
6171                 bbio->stripes[i].physical = map->stripes[stripe_index].physical +
6172                         stripe_offset + stripe_nr * map->stripe_len;
6173                 bbio->stripes[i].dev = map->stripes[stripe_index].dev;
6174                 stripe_index++;
6175         }
6176
6177         /* build raid_map */
6178         if (map->type & BTRFS_BLOCK_GROUP_RAID56_MASK && need_raid_map &&
6179             (need_full_stripe(op) || mirror_num > 1)) {
6180                 u64 tmp;
6181                 unsigned rot;
6182
6183                 /* Work out the disk rotation on this stripe-set */
6184                 div_u64_rem(stripe_nr, num_stripes, &rot);
6185
6186                 /* Fill in the logical address of each stripe */
6187                 tmp = stripe_nr * data_stripes;
6188                 for (i = 0; i < data_stripes; i++)
6189                         bbio->raid_map[(i+rot) % num_stripes] =
6190                                 em->start + (tmp + i) * map->stripe_len;
6191
6192                 bbio->raid_map[(i+rot) % map->num_stripes] = RAID5_P_STRIPE;
6193                 if (map->type & BTRFS_BLOCK_GROUP_RAID6)
6194                         bbio->raid_map[(i+rot+1) % num_stripes] =
6195                                 RAID6_Q_STRIPE;
6196
6197                 sort_parity_stripes(bbio, num_stripes);
6198         }
6199
6200         if (need_full_stripe(op))
6201                 max_errors = btrfs_chunk_max_errors(map);
6202
6203         if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
6204             need_full_stripe(op)) {
6205                 handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes,
6206                                           &max_errors);
6207         }
6208
6209         *bbio_ret = bbio;
6210         bbio->map_type = map->type;
6211         bbio->num_stripes = num_stripes;
6212         bbio->max_errors = max_errors;
6213         bbio->mirror_num = mirror_num;
6214
6215         /*
6216          * this is the case that REQ_READ && dev_replace_is_ongoing &&
6217          * mirror_num == num_stripes + 1 && dev_replace target drive is
6218          * available as a mirror
6219          */
6220         if (patch_the_first_stripe_for_dev_replace && num_stripes > 0) {
6221                 WARN_ON(num_stripes > 1);
6222                 bbio->stripes[0].dev = dev_replace->tgtdev;
6223                 bbio->stripes[0].physical = physical_to_patch_in_first_stripe;
6224                 bbio->mirror_num = map->num_stripes + 1;
6225         }
6226 out:
6227         if (dev_replace_is_ongoing) {
6228                 lockdep_assert_held(&dev_replace->rwsem);
6229                 /* Unlock and let waiting writers proceed */
6230                 up_read(&dev_replace->rwsem);
6231         }
6232         free_extent_map(em);
6233         return ret;
6234 }
6235
6236 int btrfs_map_block(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6237                       u64 logical, u64 *length,
6238                       struct btrfs_bio **bbio_ret, int mirror_num)
6239 {
6240         if (op == BTRFS_MAP_DISCARD)
6241                 return __btrfs_map_block_for_discard(fs_info, logical,
6242                                                      length, bbio_ret);
6243
6244         return __btrfs_map_block(fs_info, op, logical, length, bbio_ret,
6245                                  mirror_num, 0);
6246 }
6247
6248 /* For Scrub/replace */
6249 int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
6250                      u64 logical, u64 *length,
6251                      struct btrfs_bio **bbio_ret)
6252 {
6253         return __btrfs_map_block(fs_info, op, logical, length, bbio_ret, 0, 1);
6254 }
6255
6256 static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio)
6257 {
6258         bio->bi_private = bbio->private;
6259         bio->bi_end_io = bbio->end_io;
6260         bio_endio(bio);
6261
6262         btrfs_put_bbio(bbio);
6263 }
6264
6265 static void btrfs_end_bio(struct bio *bio)
6266 {
6267         struct btrfs_bio *bbio = bio->bi_private;
6268         int is_orig_bio = 0;
6269
6270         if (bio->bi_status) {
6271                 atomic_inc(&bbio->error);
6272                 if (bio->bi_status == BLK_STS_IOERR ||
6273                     bio->bi_status == BLK_STS_TARGET) {
6274                         struct btrfs_device *dev = btrfs_io_bio(bio)->device;
6275
6276                         ASSERT(dev->bdev);
6277                         if (bio_op(bio) == REQ_OP_WRITE)
6278                                 btrfs_dev_stat_inc_and_print(dev,
6279                                                 BTRFS_DEV_STAT_WRITE_ERRS);
6280                         else if (!(bio->bi_opf & REQ_RAHEAD))
6281                                 btrfs_dev_stat_inc_and_print(dev,
6282                                                 BTRFS_DEV_STAT_READ_ERRS);
6283                         if (bio->bi_opf & REQ_PREFLUSH)
6284                                 btrfs_dev_stat_inc_and_print(dev,
6285                                                 BTRFS_DEV_STAT_FLUSH_ERRS);
6286                 }
6287         }
6288
6289         if (bio == bbio->orig_bio)
6290                 is_orig_bio = 1;
6291
6292         btrfs_bio_counter_dec(bbio->fs_info);
6293
6294         if (atomic_dec_and_test(&bbio->stripes_pending)) {
6295                 if (!is_orig_bio) {
6296                         bio_put(bio);
6297                         bio = bbio->orig_bio;
6298                 }
6299
6300                 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6301                 /* only send an error to the higher layers if it is
6302                  * beyond the tolerance of the btrfs bio
6303                  */
6304                 if (atomic_read(&bbio->error) > bbio->max_errors) {
6305                         bio->bi_status = BLK_STS_IOERR;
6306                 } else {
6307                         /*
6308                          * this bio is actually up to date, we didn't
6309                          * go over the max number of errors
6310                          */
6311                         bio->bi_status = BLK_STS_OK;
6312                 }
6313
6314                 btrfs_end_bbio(bbio, bio);
6315         } else if (!is_orig_bio) {
6316                 bio_put(bio);
6317         }
6318 }
6319
6320 static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
6321                               u64 physical, struct btrfs_device *dev)
6322 {
6323         struct btrfs_fs_info *fs_info = bbio->fs_info;
6324
6325         bio->bi_private = bbio;
6326         btrfs_io_bio(bio)->device = dev;
6327         bio->bi_end_io = btrfs_end_bio;
6328         bio->bi_iter.bi_sector = physical >> 9;
6329         btrfs_debug_in_rcu(fs_info,
6330         "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
6331                 bio_op(bio), bio->bi_opf, (u64)bio->bi_iter.bi_sector,
6332                 (unsigned long)dev->bdev->bd_dev, rcu_str_deref(dev->name),
6333                 dev->devid, bio->bi_iter.bi_size);
6334         bio_set_dev(bio, dev->bdev);
6335
6336         btrfs_bio_counter_inc_noblocked(fs_info);
6337
6338         btrfsic_submit_bio(bio);
6339 }
6340
6341 static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
6342 {
6343         atomic_inc(&bbio->error);
6344         if (atomic_dec_and_test(&bbio->stripes_pending)) {
6345                 /* Should be the original bio. */
6346                 WARN_ON(bio != bbio->orig_bio);
6347
6348                 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
6349                 bio->bi_iter.bi_sector = logical >> 9;
6350                 if (atomic_read(&bbio->error) > bbio->max_errors)
6351                         bio->bi_status = BLK_STS_IOERR;
6352                 else
6353                         bio->bi_status = BLK_STS_OK;
6354                 btrfs_end_bbio(bbio, bio);
6355         }
6356 }
6357
6358 blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
6359                            int mirror_num)
6360 {
6361         struct btrfs_device *dev;
6362         struct bio *first_bio = bio;
6363         u64 logical = (u64)bio->bi_iter.bi_sector << 9;
6364         u64 length = 0;
6365         u64 map_length;
6366         int ret;
6367         int dev_nr;
6368         int total_devs;
6369         struct btrfs_bio *bbio = NULL;
6370
6371         length = bio->bi_iter.bi_size;
6372         map_length = length;
6373
6374         btrfs_bio_counter_inc_blocked(fs_info);
6375         ret = __btrfs_map_block(fs_info, btrfs_op(bio), logical,
6376                                 &map_length, &bbio, mirror_num, 1);
6377         if (ret) {
6378                 btrfs_bio_counter_dec(fs_info);
6379                 return errno_to_blk_status(ret);
6380         }
6381
6382         total_devs = bbio->num_stripes;
6383         bbio->orig_bio = first_bio;
6384         bbio->private = first_bio->bi_private;
6385         bbio->end_io = first_bio->bi_end_io;
6386         bbio->fs_info = fs_info;
6387         atomic_set(&bbio->stripes_pending, bbio->num_stripes);
6388
6389         if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
6390             ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
6391                 /* In this case, map_length has been set to the length of
6392                    a single stripe; not the whole write */
6393                 if (bio_op(bio) == REQ_OP_WRITE) {
6394                         ret = raid56_parity_write(fs_info, bio, bbio,
6395                                                   map_length);
6396                 } else {
6397                         ret = raid56_parity_recover(fs_info, bio, bbio,
6398                                                     map_length, mirror_num, 1);
6399                 }
6400
6401                 btrfs_bio_counter_dec(fs_info);
6402                 return errno_to_blk_status(ret);
6403         }
6404
6405         if (map_length < length) {
6406                 btrfs_crit(fs_info,
6407                            "mapping failed logical %llu bio len %llu len %llu",
6408                            logical, length, map_length);
6409                 BUG();
6410         }
6411
6412         for (dev_nr = 0; dev_nr < total_devs; dev_nr++) {
6413                 dev = bbio->stripes[dev_nr].dev;
6414                 if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
6415                                                    &dev->dev_state) ||
6416                     (bio_op(first_bio) == REQ_OP_WRITE &&
6417                     !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
6418                         bbio_error(bbio, first_bio, logical);
6419                         continue;
6420                 }
6421
6422                 if (dev_nr < total_devs - 1)
6423                         bio = btrfs_bio_clone(first_bio);
6424                 else
6425                         bio = first_bio;
6426
6427                 submit_stripe_bio(bbio, bio, bbio->stripes[dev_nr].physical, dev);
6428         }
6429         btrfs_bio_counter_dec(fs_info);
6430         return BLK_STS_OK;
6431 }
6432
6433 /*
6434  * Find a device specified by @devid or @uuid in the list of @fs_devices, or
6435  * return NULL.
6436  *
6437  * If devid and uuid are both specified, the match must be exact, otherwise
6438  * only devid is used.
6439  *
6440  * If @seed is true, traverse through the seed devices.
6441  */
6442 struct btrfs_device *btrfs_find_device(struct btrfs_fs_devices *fs_devices,
6443                                        u64 devid, u8 *uuid, u8 *fsid,
6444                                        bool seed)
6445 {
6446         struct btrfs_device *device;
6447         struct btrfs_fs_devices *seed_devs;
6448
6449         if (!fsid || !memcmp(fs_devices->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
6450                 list_for_each_entry(device, &fs_devices->devices, dev_list) {
6451                         if (device->devid == devid &&
6452                             (!uuid || memcmp(device->uuid, uuid,
6453                                              BTRFS_UUID_SIZE) == 0))
6454                                 return device;
6455                 }
6456         }
6457
6458         list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
6459                 if (!fsid ||
6460                     !memcmp(seed_devs->metadata_uuid, fsid, BTRFS_FSID_SIZE)) {
6461                         list_for_each_entry(device, &seed_devs->devices,
6462                                             dev_list) {
6463                                 if (device->devid == devid &&
6464                                     (!uuid || memcmp(device->uuid, uuid,
6465                                                      BTRFS_UUID_SIZE) == 0))
6466                                         return device;
6467                         }
6468                 }
6469         }
6470
6471         return NULL;
6472 }
6473
6474 static struct btrfs_device *add_missing_dev(struct btrfs_fs_devices *fs_devices,
6475                                             u64 devid, u8 *dev_uuid)
6476 {
6477         struct btrfs_device *device;
6478         unsigned int nofs_flag;
6479
6480         /*
6481          * We call this under the chunk_mutex, so we want to use NOFS for this
6482          * allocation, however we don't want to change btrfs_alloc_device() to
6483          * always do NOFS because we use it in a lot of other GFP_KERNEL safe
6484          * places.
6485          */
6486         nofs_flag = memalloc_nofs_save();
6487         device = btrfs_alloc_device(NULL, &devid, dev_uuid);
6488         memalloc_nofs_restore(nofs_flag);
6489         if (IS_ERR(device))
6490                 return device;
6491
6492         list_add(&device->dev_list, &fs_devices->devices);
6493         device->fs_devices = fs_devices;
6494         fs_devices->num_devices++;
6495
6496         set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6497         fs_devices->missing_devices++;
6498
6499         return device;
6500 }
6501
6502 /**
6503  * btrfs_alloc_device - allocate struct btrfs_device
6504  * @fs_info:    used only for generating a new devid, can be NULL if
6505  *              devid is provided (i.e. @devid != NULL).
6506  * @devid:      a pointer to devid for this device.  If NULL a new devid
6507  *              is generated.
6508  * @uuid:       a pointer to UUID for this device.  If NULL a new UUID
6509  *              is generated.
6510  *
6511  * Return: a pointer to a new &struct btrfs_device on success; ERR_PTR()
6512  * on error.  Returned struct is not linked onto any lists and must be
6513  * destroyed with btrfs_free_device.
6514  */
6515 struct btrfs_device *btrfs_alloc_device(struct btrfs_fs_info *fs_info,
6516                                         const u64 *devid,
6517                                         const u8 *uuid)
6518 {
6519         struct btrfs_device *dev;
6520         u64 tmp;
6521
6522         if (WARN_ON(!devid && !fs_info))
6523                 return ERR_PTR(-EINVAL);
6524
6525         dev = __alloc_device(fs_info);
6526         if (IS_ERR(dev))
6527                 return dev;
6528
6529         if (devid)
6530                 tmp = *devid;
6531         else {
6532                 int ret;
6533
6534                 ret = find_next_devid(fs_info, &tmp);
6535                 if (ret) {
6536                         btrfs_free_device(dev);
6537                         return ERR_PTR(ret);
6538                 }
6539         }
6540         dev->devid = tmp;
6541
6542         if (uuid)
6543                 memcpy(dev->uuid, uuid, BTRFS_UUID_SIZE);
6544         else
6545                 generate_random_uuid(dev->uuid);
6546
6547         return dev;
6548 }
6549
6550 static void btrfs_report_missing_device(struct btrfs_fs_info *fs_info,
6551                                         u64 devid, u8 *uuid, bool error)
6552 {
6553         if (error)
6554                 btrfs_err_rl(fs_info, "devid %llu uuid %pU is missing",
6555                               devid, uuid);
6556         else
6557                 btrfs_warn_rl(fs_info, "devid %llu uuid %pU is missing",
6558                               devid, uuid);
6559 }
6560
6561 static u64 calc_stripe_length(u64 type, u64 chunk_len, int num_stripes)
6562 {
6563         int index = btrfs_bg_flags_to_raid_index(type);
6564         int ncopies = btrfs_raid_array[index].ncopies;
6565         const int nparity = btrfs_raid_array[index].nparity;
6566         int data_stripes;
6567
6568         if (nparity)
6569                 data_stripes = num_stripes - nparity;
6570         else
6571                 data_stripes = num_stripes / ncopies;
6572
6573         return div_u64(chunk_len, data_stripes);
6574 }
6575
6576 static int read_one_chunk(struct btrfs_key *key, struct extent_buffer *leaf,
6577                           struct btrfs_chunk *chunk)
6578 {
6579         struct btrfs_fs_info *fs_info = leaf->fs_info;
6580         struct extent_map_tree *map_tree = &fs_info->mapping_tree;
6581         struct map_lookup *map;
6582         struct extent_map *em;
6583         u64 logical;
6584         u64 length;
6585         u64 devid;
6586         u8 uuid[BTRFS_UUID_SIZE];
6587         int num_stripes;
6588         int ret;
6589         int i;
6590
6591         logical = key->offset;
6592         length = btrfs_chunk_length(leaf, chunk);
6593         num_stripes = btrfs_chunk_num_stripes(leaf, chunk);
6594
6595         /*
6596          * Only need to verify chunk item if we're reading from sys chunk array,
6597          * as chunk item in tree block is already verified by tree-checker.
6598          */
6599         if (leaf->start == BTRFS_SUPER_INFO_OFFSET) {
6600                 ret = btrfs_check_chunk_valid(leaf, chunk, logical);
6601                 if (ret)
6602                         return ret;
6603         }
6604
6605         read_lock(&map_tree->lock);
6606         em = lookup_extent_mapping(map_tree, logical, 1);
6607         read_unlock(&map_tree->lock);
6608
6609         /* already mapped? */
6610         if (em && em->start <= logical && em->start + em->len > logical) {
6611                 free_extent_map(em);
6612                 return 0;
6613         } else if (em) {
6614                 free_extent_map(em);
6615         }
6616
6617         em = alloc_extent_map();
6618         if (!em)
6619                 return -ENOMEM;
6620         map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS);
6621         if (!map) {
6622                 free_extent_map(em);
6623                 return -ENOMEM;
6624         }
6625
6626         set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
6627         em->map_lookup = map;
6628         em->start = logical;
6629         em->len = length;
6630         em->orig_start = 0;
6631         em->block_start = 0;
6632         em->block_len = em->len;
6633
6634         map->num_stripes = num_stripes;
6635         map->io_width = btrfs_chunk_io_width(leaf, chunk);
6636         map->io_align = btrfs_chunk_io_align(leaf, chunk);
6637         map->stripe_len = btrfs_chunk_stripe_len(leaf, chunk);
6638         map->type = btrfs_chunk_type(leaf, chunk);
6639         map->sub_stripes = btrfs_chunk_sub_stripes(leaf, chunk);
6640         map->verified_stripes = 0;
6641         em->orig_block_len = calc_stripe_length(map->type, em->len,
6642                                                 map->num_stripes);
6643         for (i = 0; i < num_stripes; i++) {
6644                 map->stripes[i].physical =
6645                         btrfs_stripe_offset_nr(leaf, chunk, i);
6646                 devid = btrfs_stripe_devid_nr(leaf, chunk, i);
6647                 read_extent_buffer(leaf, uuid, (unsigned long)
6648                                    btrfs_stripe_dev_uuid_nr(chunk, i),
6649                                    BTRFS_UUID_SIZE);
6650                 map->stripes[i].dev = btrfs_find_device(fs_info->fs_devices,
6651                                                         devid, uuid, NULL, true);
6652                 if (!map->stripes[i].dev &&
6653                     !btrfs_test_opt(fs_info, DEGRADED)) {
6654                         free_extent_map(em);
6655                         btrfs_report_missing_device(fs_info, devid, uuid, true);
6656                         return -ENOENT;
6657                 }
6658                 if (!map->stripes[i].dev) {
6659                         map->stripes[i].dev =
6660                                 add_missing_dev(fs_info->fs_devices, devid,
6661                                                 uuid);
6662                         if (IS_ERR(map->stripes[i].dev)) {
6663                                 free_extent_map(em);
6664                                 btrfs_err(fs_info,
6665                                         "failed to init missing dev %llu: %ld",
6666                                         devid, PTR_ERR(map->stripes[i].dev));
6667                                 return PTR_ERR(map->stripes[i].dev);
6668                         }
6669                         btrfs_report_missing_device(fs_info, devid, uuid, false);
6670                 }
6671                 set_bit(BTRFS_DEV_STATE_IN_FS_METADATA,
6672                                 &(map->stripes[i].dev->dev_state));
6673
6674         }
6675
6676         write_lock(&map_tree->lock);
6677         ret = add_extent_mapping(map_tree, em, 0);
6678         write_unlock(&map_tree->lock);
6679         if (ret < 0) {
6680                 btrfs_err(fs_info,
6681                           "failed to add chunk map, start=%llu len=%llu: %d",
6682                           em->start, em->len, ret);
6683         }
6684         free_extent_map(em);
6685
6686         return ret;
6687 }
6688
6689 static void fill_device_from_item(struct extent_buffer *leaf,
6690                                  struct btrfs_dev_item *dev_item,
6691                                  struct btrfs_device *device)
6692 {
6693         unsigned long ptr;
6694
6695         device->devid = btrfs_device_id(leaf, dev_item);
6696         device->disk_total_bytes = btrfs_device_total_bytes(leaf, dev_item);
6697         device->total_bytes = device->disk_total_bytes;
6698         device->commit_total_bytes = device->disk_total_bytes;
6699         device->bytes_used = btrfs_device_bytes_used(leaf, dev_item);
6700         device->commit_bytes_used = device->bytes_used;
6701         device->type = btrfs_device_type(leaf, dev_item);
6702         device->io_align = btrfs_device_io_align(leaf, dev_item);
6703         device->io_width = btrfs_device_io_width(leaf, dev_item);
6704         device->sector_size = btrfs_device_sector_size(leaf, dev_item);
6705         WARN_ON(device->devid == BTRFS_DEV_REPLACE_DEVID);
6706         clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
6707
6708         ptr = btrfs_device_uuid(dev_item);
6709         read_extent_buffer(leaf, device->uuid, ptr, BTRFS_UUID_SIZE);
6710 }
6711
6712 static struct btrfs_fs_devices *open_seed_devices(struct btrfs_fs_info *fs_info,
6713                                                   u8 *fsid)
6714 {
6715         struct btrfs_fs_devices *fs_devices;
6716         int ret;
6717
6718         lockdep_assert_held(&uuid_mutex);
6719         ASSERT(fsid);
6720
6721         /* This will match only for multi-device seed fs */
6722         list_for_each_entry(fs_devices, &fs_info->fs_devices->seed_list, seed_list)
6723                 if (!memcmp(fs_devices->fsid, fsid, BTRFS_FSID_SIZE))
6724                         return fs_devices;
6725
6726
6727         fs_devices = find_fsid(fsid, NULL);
6728         if (!fs_devices) {
6729                 if (!btrfs_test_opt(fs_info, DEGRADED))
6730                         return ERR_PTR(-ENOENT);
6731
6732                 fs_devices = alloc_fs_devices(fsid, NULL);
6733                 if (IS_ERR(fs_devices))
6734                         return fs_devices;
6735
6736                 fs_devices->seeding = true;
6737                 fs_devices->opened = 1;
6738                 return fs_devices;
6739         }
6740
6741         /*
6742          * Upon first call for a seed fs fsid, just create a private copy of the
6743          * respective fs_devices and anchor it at fs_info->fs_devices->seed_list
6744          */
6745         fs_devices = clone_fs_devices(fs_devices);
6746         if (IS_ERR(fs_devices))
6747                 return fs_devices;
6748
6749         ret = open_fs_devices(fs_devices, FMODE_READ, fs_info->bdev_holder);
6750         if (ret) {
6751                 free_fs_devices(fs_devices);
6752                 return ERR_PTR(ret);
6753         }
6754
6755         if (!fs_devices->seeding) {
6756                 close_fs_devices(fs_devices);
6757                 free_fs_devices(fs_devices);
6758                 return ERR_PTR(-EINVAL);
6759         }
6760
6761         list_add(&fs_devices->seed_list, &fs_info->fs_devices->seed_list);
6762
6763         return fs_devices;
6764 }
6765
6766 static int read_one_dev(struct extent_buffer *leaf,
6767                         struct btrfs_dev_item *dev_item)
6768 {
6769         struct btrfs_fs_info *fs_info = leaf->fs_info;
6770         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
6771         struct btrfs_device *device;
6772         u64 devid;
6773         int ret;
6774         u8 fs_uuid[BTRFS_FSID_SIZE];
6775         u8 dev_uuid[BTRFS_UUID_SIZE];
6776
6777         devid = btrfs_device_id(leaf, dev_item);
6778         read_extent_buffer(leaf, dev_uuid, btrfs_device_uuid(dev_item),
6779                            BTRFS_UUID_SIZE);
6780         read_extent_buffer(leaf, fs_uuid, btrfs_device_fsid(dev_item),
6781                            BTRFS_FSID_SIZE);
6782
6783         if (memcmp(fs_uuid, fs_devices->metadata_uuid, BTRFS_FSID_SIZE)) {
6784                 fs_devices = open_seed_devices(fs_info, fs_uuid);
6785                 if (IS_ERR(fs_devices))
6786                         return PTR_ERR(fs_devices);
6787         }
6788
6789         device = btrfs_find_device(fs_info->fs_devices, devid, dev_uuid,
6790                                    fs_uuid, true);
6791         if (!device) {
6792                 if (!btrfs_test_opt(fs_info, DEGRADED)) {
6793                         btrfs_report_missing_device(fs_info, devid,
6794                                                         dev_uuid, true);
6795                         return -ENOENT;
6796                 }
6797
6798                 device = add_missing_dev(fs_devices, devid, dev_uuid);
6799                 if (IS_ERR(device)) {
6800                         btrfs_err(fs_info,
6801                                 "failed to add missing dev %llu: %ld",
6802                                 devid, PTR_ERR(device));
6803                         return PTR_ERR(device);
6804                 }
6805                 btrfs_report_missing_device(fs_info, devid, dev_uuid, false);
6806         } else {
6807                 if (!device->bdev) {
6808                         if (!btrfs_test_opt(fs_info, DEGRADED)) {
6809                                 btrfs_report_missing_device(fs_info,
6810                                                 devid, dev_uuid, true);
6811                                 return -ENOENT;
6812                         }
6813                         btrfs_report_missing_device(fs_info, devid,
6814                                                         dev_uuid, false);
6815                 }
6816
6817                 if (!device->bdev &&
6818                     !test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state)) {
6819                         /*
6820                          * this happens when a device that was properly setup
6821                          * in the device info lists suddenly goes bad.
6822                          * device->bdev is NULL, and so we have to set
6823                          * device->missing to one here
6824                          */
6825                         device->fs_devices->missing_devices++;
6826                         set_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state);
6827                 }
6828
6829                 /* Move the device to its own fs_devices */
6830                 if (device->fs_devices != fs_devices) {
6831                         ASSERT(test_bit(BTRFS_DEV_STATE_MISSING,
6832                                                         &device->dev_state));
6833
6834                         list_move(&device->dev_list, &fs_devices->devices);
6835                         device->fs_devices->num_devices--;
6836                         fs_devices->num_devices++;
6837
6838                         device->fs_devices->missing_devices--;
6839                         fs_devices->missing_devices++;
6840
6841                         device->fs_devices = fs_devices;
6842                 }
6843         }
6844
6845         if (device->fs_devices != fs_info->fs_devices) {
6846                 BUG_ON(test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state));
6847                 if (device->generation !=
6848                     btrfs_device_generation(leaf, dev_item))
6849                         return -EINVAL;
6850         }
6851
6852         fill_device_from_item(leaf, dev_item, device);
6853         set_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
6854         if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
6855            !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state)) {
6856                 device->fs_devices->total_rw_bytes += device->total_bytes;
6857                 atomic64_add(device->total_bytes - device->bytes_used,
6858                                 &fs_info->free_chunk_space);
6859         }
6860         ret = 0;
6861         return ret;
6862 }
6863
6864 int btrfs_read_sys_array(struct btrfs_fs_info *fs_info)
6865 {
6866         struct btrfs_root *root = fs_info->tree_root;
6867         struct btrfs_super_block *super_copy = fs_info->super_copy;
6868         struct extent_buffer *sb;
6869         struct btrfs_disk_key *disk_key;
6870         struct btrfs_chunk *chunk;
6871         u8 *array_ptr;
6872         unsigned long sb_array_offset;
6873         int ret = 0;
6874         u32 num_stripes;
6875         u32 array_size;
6876         u32 len = 0;
6877         u32 cur_offset;
6878         u64 type;
6879         struct btrfs_key key;
6880
6881         ASSERT(BTRFS_SUPER_INFO_SIZE <= fs_info->nodesize);
6882         /*
6883          * This will create extent buffer of nodesize, superblock size is
6884          * fixed to BTRFS_SUPER_INFO_SIZE. If nodesize > sb size, this will
6885          * overallocate but we can keep it as-is, only the first page is used.
6886          */
6887         sb = btrfs_find_create_tree_block(fs_info, BTRFS_SUPER_INFO_OFFSET);
6888         if (IS_ERR(sb))
6889                 return PTR_ERR(sb);
6890         set_extent_buffer_uptodate(sb);
6891         btrfs_set_buffer_lockdep_class(root->root_key.objectid, sb, 0);
6892         /*
6893          * The sb extent buffer is artificial and just used to read the system array.
6894          * set_extent_buffer_uptodate() call does not properly mark all it's
6895          * pages up-to-date when the page is larger: extent does not cover the
6896          * whole page and consequently check_page_uptodate does not find all
6897          * the page's extents up-to-date (the hole beyond sb),
6898          * write_extent_buffer then triggers a WARN_ON.
6899          *
6900          * Regular short extents go through mark_extent_buffer_dirty/writeback cycle,
6901          * but sb spans only this function. Add an explicit SetPageUptodate call
6902          * to silence the warning eg. on PowerPC 64.
6903          */
6904         if (PAGE_SIZE > BTRFS_SUPER_INFO_SIZE)
6905                 SetPageUptodate(sb->pages[0]);
6906
6907         write_extent_buffer(sb, super_copy, 0, BTRFS_SUPER_INFO_SIZE);
6908         array_size = btrfs_super_sys_array_size(super_copy);
6909
6910         array_ptr = super_copy->sys_chunk_array;
6911         sb_array_offset = offsetof(struct btrfs_super_block, sys_chunk_array);
6912         cur_offset = 0;
6913
6914         while (cur_offset < array_size) {
6915                 disk_key = (struct btrfs_disk_key *)array_ptr;
6916                 len = sizeof(*disk_key);
6917                 if (cur_offset + len > array_size)
6918                         goto out_short_read;
6919
6920                 btrfs_disk_key_to_cpu(&key, disk_key);
6921
6922                 array_ptr += len;
6923                 sb_array_offset += len;
6924                 cur_offset += len;
6925
6926                 if (key.type != BTRFS_CHUNK_ITEM_KEY) {
6927                         btrfs_err(fs_info,
6928                             "unexpected item type %u in sys_array at offset %u",
6929                                   (u32)key.type, cur_offset);
6930                         ret = -EIO;
6931                         break;
6932                 }
6933
6934                 chunk = (struct btrfs_chunk *)sb_array_offset;
6935                 /*
6936                  * At least one btrfs_chunk with one stripe must be present,
6937                  * exact stripe count check comes afterwards
6938                  */
6939                 len = btrfs_chunk_item_size(1);
6940                 if (cur_offset + len > array_size)
6941                         goto out_short_read;
6942
6943                 num_stripes = btrfs_chunk_num_stripes(sb, chunk);
6944                 if (!num_stripes) {
6945                         btrfs_err(fs_info,
6946                         "invalid number of stripes %u in sys_array at offset %u",
6947                                   num_stripes, cur_offset);
6948                         ret = -EIO;
6949                         break;
6950                 }
6951
6952                 type = btrfs_chunk_type(sb, chunk);
6953                 if ((type & BTRFS_BLOCK_GROUP_SYSTEM) == 0) {
6954                         btrfs_err(fs_info,
6955                         "invalid chunk type %llu in sys_array at offset %u",
6956                                   type, cur_offset);
6957                         ret = -EIO;
6958                         break;
6959                 }
6960
6961                 len = btrfs_chunk_item_size(num_stripes);
6962                 if (cur_offset + len > array_size)
6963                         goto out_short_read;
6964
6965                 ret = read_one_chunk(&key, sb, chunk);
6966                 if (ret)
6967                         break;
6968
6969                 array_ptr += len;
6970                 sb_array_offset += len;
6971                 cur_offset += len;
6972         }
6973         clear_extent_buffer_uptodate(sb);
6974         free_extent_buffer_stale(sb);
6975         return ret;
6976
6977 out_short_read:
6978         btrfs_err(fs_info, "sys_array too short to read %u bytes at offset %u",
6979                         len, cur_offset);
6980         clear_extent_buffer_uptodate(sb);
6981         free_extent_buffer_stale(sb);
6982         return -EIO;
6983 }
6984
6985 /*
6986  * Check if all chunks in the fs are OK for read-write degraded mount
6987  *
6988  * If the @failing_dev is specified, it's accounted as missing.
6989  *
6990  * Return true if all chunks meet the minimal RW mount requirements.
6991  * Return false if any chunk doesn't meet the minimal RW mount requirements.
6992  */
6993 bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info,
6994                                         struct btrfs_device *failing_dev)
6995 {
6996         struct extent_map_tree *map_tree = &fs_info->mapping_tree;
6997         struct extent_map *em;
6998         u64 next_start = 0;
6999         bool ret = true;
7000
7001         read_lock(&map_tree->lock);
7002         em = lookup_extent_mapping(map_tree, 0, (u64)-1);
7003         read_unlock(&map_tree->lock);
7004         /* No chunk at all? Return false anyway */
7005         if (!em) {
7006                 ret = false;
7007                 goto out;
7008         }
7009         while (em) {
7010                 struct map_lookup *map;
7011                 int missing = 0;
7012                 int max_tolerated;
7013                 int i;
7014
7015                 map = em->map_lookup;
7016                 max_tolerated =
7017                         btrfs_get_num_tolerated_disk_barrier_failures(
7018                                         map->type);
7019                 for (i = 0; i < map->num_stripes; i++) {
7020                         struct btrfs_device *dev = map->stripes[i].dev;
7021
7022                         if (!dev || !dev->bdev ||
7023                             test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state) ||
7024                             dev->last_flush_error)
7025                                 missing++;
7026                         else if (failing_dev && failing_dev == dev)
7027                                 missing++;
7028                 }
7029                 if (missing > max_tolerated) {
7030                         if (!failing_dev)
7031                                 btrfs_warn(fs_info,
7032         "chunk %llu missing %d devices, max tolerance is %d for writable mount",
7033                                    em->start, missing, max_tolerated);
7034                         free_extent_map(em);
7035                         ret = false;
7036                         goto out;
7037                 }
7038                 next_start = extent_map_end(em);
7039                 free_extent_map(em);
7040
7041                 read_lock(&map_tree->lock);
7042                 em = lookup_extent_mapping(map_tree, next_start,
7043                                            (u64)(-1) - next_start);
7044                 read_unlock(&map_tree->lock);
7045         }
7046 out:
7047         return ret;
7048 }
7049
7050 static void readahead_tree_node_children(struct extent_buffer *node)
7051 {
7052         int i;
7053         const int nr_items = btrfs_header_nritems(node);
7054
7055         for (i = 0; i < nr_items; i++) {
7056                 u64 start;
7057
7058                 start = btrfs_node_blockptr(node, i);
7059                 readahead_tree_block(node->fs_info, start);
7060         }
7061 }
7062
7063 int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
7064 {
7065         struct btrfs_root *root = fs_info->chunk_root;
7066         struct btrfs_path *path;
7067         struct extent_buffer *leaf;
7068         struct btrfs_key key;
7069         struct btrfs_key found_key;
7070         int ret;
7071         int slot;
7072         u64 total_dev = 0;
7073         u64 last_ra_node = 0;
7074
7075         path = btrfs_alloc_path();
7076         if (!path)
7077                 return -ENOMEM;
7078
7079         /*
7080          * uuid_mutex is needed only if we are mounting a sprout FS
7081          * otherwise we don't need it.
7082          */
7083         mutex_lock(&uuid_mutex);
7084
7085         /*
7086          * It is possible for mount and umount to race in such a way that
7087          * we execute this code path, but open_fs_devices failed to clear
7088          * total_rw_bytes. We certainly want it cleared before reading the
7089          * device items, so clear it here.
7090          */
7091         fs_info->fs_devices->total_rw_bytes = 0;
7092
7093         /*
7094          * Read all device items, and then all the chunk items. All
7095          * device items are found before any chunk item (their object id
7096          * is smaller than the lowest possible object id for a chunk
7097          * item - BTRFS_FIRST_CHUNK_TREE_OBJECTID).
7098          */
7099         key.objectid = BTRFS_DEV_ITEMS_OBJECTID;
7100         key.offset = 0;
7101         key.type = 0;
7102         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7103         if (ret < 0)
7104                 goto error;
7105         while (1) {
7106                 struct extent_buffer *node;
7107
7108                 leaf = path->nodes[0];
7109                 slot = path->slots[0];
7110                 if (slot >= btrfs_header_nritems(leaf)) {
7111                         ret = btrfs_next_leaf(root, path);
7112                         if (ret == 0)
7113                                 continue;
7114                         if (ret < 0)
7115                                 goto error;
7116                         break;
7117                 }
7118                 /*
7119                  * The nodes on level 1 are not locked but we don't need to do
7120                  * that during mount time as nothing else can access the tree
7121                  */
7122                 node = path->nodes[1];
7123                 if (node) {
7124                         if (last_ra_node != node->start) {
7125                                 readahead_tree_node_children(node);
7126                                 last_ra_node = node->start;
7127                         }
7128                 }
7129                 btrfs_item_key_to_cpu(leaf, &found_key, slot);
7130                 if (found_key.type == BTRFS_DEV_ITEM_KEY) {
7131                         struct btrfs_dev_item *dev_item;
7132                         dev_item = btrfs_item_ptr(leaf, slot,
7133                                                   struct btrfs_dev_item);
7134                         ret = read_one_dev(leaf, dev_item);
7135                         if (ret)
7136                                 goto error;
7137                         total_dev++;
7138                 } else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
7139                         struct btrfs_chunk *chunk;
7140                         chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
7141                         mutex_lock(&fs_info->chunk_mutex);
7142                         ret = read_one_chunk(&found_key, leaf, chunk);
7143                         mutex_unlock(&fs_info->chunk_mutex);
7144                         if (ret)
7145                                 goto error;
7146                 }
7147                 path->slots[0]++;
7148         }
7149
7150         /*
7151          * After loading chunk tree, we've got all device information,
7152          * do another round of validation checks.
7153          */
7154         if (total_dev != fs_info->fs_devices->total_devices) {
7155                 btrfs_err(fs_info,
7156            "super_num_devices %llu mismatch with num_devices %llu found here",
7157                           btrfs_super_num_devices(fs_info->super_copy),
7158                           total_dev);
7159                 ret = -EINVAL;
7160                 goto error;
7161         }
7162         if (btrfs_super_total_bytes(fs_info->super_copy) <
7163             fs_info->fs_devices->total_rw_bytes) {
7164                 btrfs_err(fs_info,
7165         "super_total_bytes %llu mismatch with fs_devices total_rw_bytes %llu",
7166                           btrfs_super_total_bytes(fs_info->super_copy),
7167                           fs_info->fs_devices->total_rw_bytes);
7168                 ret = -EINVAL;
7169                 goto error;
7170         }
7171         ret = 0;
7172 error:
7173         mutex_unlock(&uuid_mutex);
7174
7175         btrfs_free_path(path);
7176         return ret;
7177 }
7178
7179 void btrfs_init_devices_late(struct btrfs_fs_info *fs_info)
7180 {
7181         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7182         struct btrfs_device *device;
7183
7184         fs_devices->fs_info = fs_info;
7185
7186         mutex_lock(&fs_devices->device_list_mutex);
7187         list_for_each_entry(device, &fs_devices->devices, dev_list)
7188                 device->fs_info = fs_info;
7189
7190         list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7191                 list_for_each_entry(device, &seed_devs->devices, dev_list)
7192                         device->fs_info = fs_info;
7193
7194                 seed_devs->fs_info = fs_info;
7195         }
7196         mutex_unlock(&fs_devices->device_list_mutex);
7197 }
7198
7199 static u64 btrfs_dev_stats_value(const struct extent_buffer *eb,
7200                                  const struct btrfs_dev_stats_item *ptr,
7201                                  int index)
7202 {
7203         u64 val;
7204
7205         read_extent_buffer(eb, &val,
7206                            offsetof(struct btrfs_dev_stats_item, values) +
7207                             ((unsigned long)ptr) + (index * sizeof(u64)),
7208                            sizeof(val));
7209         return val;
7210 }
7211
7212 static void btrfs_set_dev_stats_value(struct extent_buffer *eb,
7213                                       struct btrfs_dev_stats_item *ptr,
7214                                       int index, u64 val)
7215 {
7216         write_extent_buffer(eb, &val,
7217                             offsetof(struct btrfs_dev_stats_item, values) +
7218                              ((unsigned long)ptr) + (index * sizeof(u64)),
7219                             sizeof(val));
7220 }
7221
7222 static int btrfs_device_init_dev_stats(struct btrfs_device *device,
7223                                        struct btrfs_path *path)
7224 {
7225         struct btrfs_dev_stats_item *ptr;
7226         struct extent_buffer *eb;
7227         struct btrfs_key key;
7228         int item_size;
7229         int i, ret, slot;
7230
7231         key.objectid = BTRFS_DEV_STATS_OBJECTID;
7232         key.type = BTRFS_PERSISTENT_ITEM_KEY;
7233         key.offset = device->devid;
7234         ret = btrfs_search_slot(NULL, device->fs_info->dev_root, &key, path, 0, 0);
7235         if (ret) {
7236                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7237                         btrfs_dev_stat_set(device, i, 0);
7238                 device->dev_stats_valid = 1;
7239                 btrfs_release_path(path);
7240                 return ret < 0 ? ret : 0;
7241         }
7242         slot = path->slots[0];
7243         eb = path->nodes[0];
7244         item_size = btrfs_item_size_nr(eb, slot);
7245
7246         ptr = btrfs_item_ptr(eb, slot, struct btrfs_dev_stats_item);
7247
7248         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7249                 if (item_size >= (1 + i) * sizeof(__le64))
7250                         btrfs_dev_stat_set(device, i,
7251                                            btrfs_dev_stats_value(eb, ptr, i));
7252                 else
7253                         btrfs_dev_stat_set(device, i, 0);
7254         }
7255
7256         device->dev_stats_valid = 1;
7257         btrfs_dev_stat_print_on_load(device);
7258         btrfs_release_path(path);
7259
7260         return 0;
7261 }
7262
7263 int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
7264 {
7265         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices, *seed_devs;
7266         struct btrfs_device *device;
7267         struct btrfs_path *path = NULL;
7268         int ret = 0;
7269
7270         path = btrfs_alloc_path();
7271         if (!path)
7272                 return -ENOMEM;
7273
7274         mutex_lock(&fs_devices->device_list_mutex);
7275         list_for_each_entry(device, &fs_devices->devices, dev_list) {
7276                 ret = btrfs_device_init_dev_stats(device, path);
7277                 if (ret)
7278                         goto out;
7279         }
7280         list_for_each_entry(seed_devs, &fs_devices->seed_list, seed_list) {
7281                 list_for_each_entry(device, &seed_devs->devices, dev_list) {
7282                         ret = btrfs_device_init_dev_stats(device, path);
7283                         if (ret)
7284                                 goto out;
7285                 }
7286         }
7287 out:
7288         mutex_unlock(&fs_devices->device_list_mutex);
7289
7290         btrfs_free_path(path);
7291         return ret;
7292 }
7293
7294 static int update_dev_stat_item(struct btrfs_trans_handle *trans,
7295                                 struct btrfs_device *device)
7296 {
7297         struct btrfs_fs_info *fs_info = trans->fs_info;
7298         struct btrfs_root *dev_root = fs_info->dev_root;
7299         struct btrfs_path *path;
7300         struct btrfs_key key;
7301         struct extent_buffer *eb;
7302         struct btrfs_dev_stats_item *ptr;
7303         int ret;
7304         int i;
7305
7306         key.objectid = BTRFS_DEV_STATS_OBJECTID;
7307         key.type = BTRFS_PERSISTENT_ITEM_KEY;
7308         key.offset = device->devid;
7309
7310         path = btrfs_alloc_path();
7311         if (!path)
7312                 return -ENOMEM;
7313         ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
7314         if (ret < 0) {
7315                 btrfs_warn_in_rcu(fs_info,
7316                         "error %d while searching for dev_stats item for device %s",
7317                               ret, rcu_str_deref(device->name));
7318                 goto out;
7319         }
7320
7321         if (ret == 0 &&
7322             btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
7323                 /* need to delete old one and insert a new one */
7324                 ret = btrfs_del_item(trans, dev_root, path);
7325                 if (ret != 0) {
7326                         btrfs_warn_in_rcu(fs_info,
7327                                 "delete too small dev_stats item for device %s failed %d",
7328                                       rcu_str_deref(device->name), ret);
7329                         goto out;
7330                 }
7331                 ret = 1;
7332         }
7333
7334         if (ret == 1) {
7335                 /* need to insert a new item */
7336                 btrfs_release_path(path);
7337                 ret = btrfs_insert_empty_item(trans, dev_root, path,
7338                                               &key, sizeof(*ptr));
7339                 if (ret < 0) {
7340                         btrfs_warn_in_rcu(fs_info,
7341                                 "insert dev_stats item for device %s failed %d",
7342                                 rcu_str_deref(device->name), ret);
7343                         goto out;
7344                 }
7345         }
7346
7347         eb = path->nodes[0];
7348         ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
7349         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7350                 btrfs_set_dev_stats_value(eb, ptr, i,
7351                                           btrfs_dev_stat_read(device, i));
7352         btrfs_mark_buffer_dirty(eb);
7353
7354 out:
7355         btrfs_free_path(path);
7356         return ret;
7357 }
7358
7359 /*
7360  * called from commit_transaction. Writes all changed device stats to disk.
7361  */
7362 int btrfs_run_dev_stats(struct btrfs_trans_handle *trans)
7363 {
7364         struct btrfs_fs_info *fs_info = trans->fs_info;
7365         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7366         struct btrfs_device *device;
7367         int stats_cnt;
7368         int ret = 0;
7369
7370         mutex_lock(&fs_devices->device_list_mutex);
7371         list_for_each_entry(device, &fs_devices->devices, dev_list) {
7372                 stats_cnt = atomic_read(&device->dev_stats_ccnt);
7373                 if (!device->dev_stats_valid || stats_cnt == 0)
7374                         continue;
7375
7376
7377                 /*
7378                  * There is a LOAD-LOAD control dependency between the value of
7379                  * dev_stats_ccnt and updating the on-disk values which requires
7380                  * reading the in-memory counters. Such control dependencies
7381                  * require explicit read memory barriers.
7382                  *
7383                  * This memory barriers pairs with smp_mb__before_atomic in
7384                  * btrfs_dev_stat_inc/btrfs_dev_stat_set and with the full
7385                  * barrier implied by atomic_xchg in
7386                  * btrfs_dev_stats_read_and_reset
7387                  */
7388                 smp_rmb();
7389
7390                 ret = update_dev_stat_item(trans, device);
7391                 if (!ret)
7392                         atomic_sub(stats_cnt, &device->dev_stats_ccnt);
7393         }
7394         mutex_unlock(&fs_devices->device_list_mutex);
7395
7396         return ret;
7397 }
7398
7399 void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
7400 {
7401         btrfs_dev_stat_inc(dev, index);
7402         btrfs_dev_stat_print_on_error(dev);
7403 }
7404
7405 static void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
7406 {
7407         if (!dev->dev_stats_valid)
7408                 return;
7409         btrfs_err_rl_in_rcu(dev->fs_info,
7410                 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7411                            rcu_str_deref(dev->name),
7412                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7413                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7414                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7415                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7416                            btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7417 }
7418
7419 static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
7420 {
7421         int i;
7422
7423         for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7424                 if (btrfs_dev_stat_read(dev, i) != 0)
7425                         break;
7426         if (i == BTRFS_DEV_STAT_VALUES_MAX)
7427                 return; /* all values == 0, suppress message */
7428
7429         btrfs_info_in_rcu(dev->fs_info,
7430                 "bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u",
7431                rcu_str_deref(dev->name),
7432                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
7433                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
7434                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
7435                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
7436                btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
7437 }
7438
7439 int btrfs_get_dev_stats(struct btrfs_fs_info *fs_info,
7440                         struct btrfs_ioctl_get_dev_stats *stats)
7441 {
7442         struct btrfs_device *dev;
7443         struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
7444         int i;
7445
7446         mutex_lock(&fs_devices->device_list_mutex);
7447         dev = btrfs_find_device(fs_info->fs_devices, stats->devid, NULL, NULL,
7448                                 true);
7449         mutex_unlock(&fs_devices->device_list_mutex);
7450
7451         if (!dev) {
7452                 btrfs_warn(fs_info, "get dev_stats failed, device not found");
7453                 return -ENODEV;
7454         } else if (!dev->dev_stats_valid) {
7455                 btrfs_warn(fs_info, "get dev_stats failed, not yet valid");
7456                 return -ENODEV;
7457         } else if (stats->flags & BTRFS_DEV_STATS_RESET) {
7458                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
7459                         if (stats->nr_items > i)
7460                                 stats->values[i] =
7461                                         btrfs_dev_stat_read_and_reset(dev, i);
7462                         else
7463                                 btrfs_dev_stat_set(dev, i, 0);
7464                 }
7465                 btrfs_info(fs_info, "device stats zeroed by %s (%d)",
7466                            current->comm, task_pid_nr(current));
7467         } else {
7468                 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
7469                         if (stats->nr_items > i)
7470                                 stats->values[i] = btrfs_dev_stat_read(dev, i);
7471         }
7472         if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
7473                 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
7474         return 0;
7475 }
7476
7477 /*
7478  * Update the size and bytes used for each device where it changed.  This is
7479  * delayed since we would otherwise get errors while writing out the
7480  * superblocks.
7481  *
7482  * Must be invoked during transaction commit.
7483  */
7484 void btrfs_commit_device_sizes(struct btrfs_transaction *trans)
7485 {
7486         struct btrfs_device *curr, *next;
7487
7488         ASSERT(trans->state == TRANS_STATE_COMMIT_DOING);
7489
7490         if (list_empty(&trans->dev_update_list))
7491                 return;
7492
7493         /*
7494          * We don't need the device_list_mutex here.  This list is owned by the
7495          * transaction and the transaction must complete before the device is
7496          * released.
7497          */
7498         mutex_lock(&trans->fs_info->chunk_mutex);
7499         list_for_each_entry_safe(curr, next, &trans->dev_update_list,
7500                                  post_commit_list) {
7501                 list_del_init(&curr->post_commit_list);
7502                 curr->commit_total_bytes = curr->disk_total_bytes;
7503                 curr->commit_bytes_used = curr->bytes_used;
7504         }
7505         mutex_unlock(&trans->fs_info->chunk_mutex);
7506 }
7507
7508 /*
7509  * Multiplicity factor for simple profiles: DUP, RAID1-like and RAID10.
7510  */
7511 int btrfs_bg_type_to_factor(u64 flags)
7512 {
7513         const int index = btrfs_bg_flags_to_raid_index(flags);
7514
7515         return btrfs_raid_array[index].ncopies;
7516 }
7517
7518
7519
7520 static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
7521                                  u64 chunk_offset, u64 devid,
7522                                  u64 physical_offset, u64 physical_len)
7523 {
7524         struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7525         struct extent_map *em;
7526         struct map_lookup *map;
7527         struct btrfs_device *dev;
7528         u64 stripe_len;
7529         bool found = false;
7530         int ret = 0;
7531         int i;
7532
7533         read_lock(&em_tree->lock);
7534         em = lookup_extent_mapping(em_tree, chunk_offset, 1);
7535         read_unlock(&em_tree->lock);
7536
7537         if (!em) {
7538                 btrfs_err(fs_info,
7539 "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk",
7540                           physical_offset, devid);
7541                 ret = -EUCLEAN;
7542                 goto out;
7543         }
7544
7545         map = em->map_lookup;
7546         stripe_len = calc_stripe_length(map->type, em->len, map->num_stripes);
7547         if (physical_len != stripe_len) {
7548                 btrfs_err(fs_info,
7549 "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu",
7550                           physical_offset, devid, em->start, physical_len,
7551                           stripe_len);
7552                 ret = -EUCLEAN;
7553                 goto out;
7554         }
7555
7556         for (i = 0; i < map->num_stripes; i++) {
7557                 if (map->stripes[i].dev->devid == devid &&
7558                     map->stripes[i].physical == physical_offset) {
7559                         found = true;
7560                         if (map->verified_stripes >= map->num_stripes) {
7561                                 btrfs_err(fs_info,
7562                                 "too many dev extents for chunk %llu found",
7563                                           em->start);
7564                                 ret = -EUCLEAN;
7565                                 goto out;
7566                         }
7567                         map->verified_stripes++;
7568                         break;
7569                 }
7570         }
7571         if (!found) {
7572                 btrfs_err(fs_info,
7573         "dev extent physical offset %llu devid %llu has no corresponding chunk",
7574                         physical_offset, devid);
7575                 ret = -EUCLEAN;
7576         }
7577
7578         /* Make sure no dev extent is beyond device bondary */
7579         dev = btrfs_find_device(fs_info->fs_devices, devid, NULL, NULL, true);
7580         if (!dev) {
7581                 btrfs_err(fs_info, "failed to find devid %llu", devid);
7582                 ret = -EUCLEAN;
7583                 goto out;
7584         }
7585
7586         /* It's possible this device is a dummy for seed device */
7587         if (dev->disk_total_bytes == 0) {
7588                 struct btrfs_fs_devices *devs;
7589
7590                 devs = list_first_entry(&fs_info->fs_devices->seed_list,
7591                                         struct btrfs_fs_devices, seed_list);
7592                 dev = btrfs_find_device(devs, devid, NULL, NULL, false);
7593                 if (!dev) {
7594                         btrfs_err(fs_info, "failed to find seed devid %llu",
7595                                   devid);
7596                         ret = -EUCLEAN;
7597                         goto out;
7598                 }
7599         }
7600
7601         if (physical_offset + physical_len > dev->disk_total_bytes) {
7602                 btrfs_err(fs_info,
7603 "dev extent devid %llu physical offset %llu len %llu is beyond device boundary %llu",
7604                           devid, physical_offset, physical_len,
7605                           dev->disk_total_bytes);
7606                 ret = -EUCLEAN;
7607                 goto out;
7608         }
7609 out:
7610         free_extent_map(em);
7611         return ret;
7612 }
7613
7614 static int verify_chunk_dev_extent_mapping(struct btrfs_fs_info *fs_info)
7615 {
7616         struct extent_map_tree *em_tree = &fs_info->mapping_tree;
7617         struct extent_map *em;
7618         struct rb_node *node;
7619         int ret = 0;
7620
7621         read_lock(&em_tree->lock);
7622         for (node = rb_first_cached(&em_tree->map); node; node = rb_next(node)) {
7623                 em = rb_entry(node, struct extent_map, rb_node);
7624                 if (em->map_lookup->num_stripes !=
7625                     em->map_lookup->verified_stripes) {
7626                         btrfs_err(fs_info,
7627                         "chunk %llu has missing dev extent, have %d expect %d",
7628                                   em->start, em->map_lookup->verified_stripes,
7629                                   em->map_lookup->num_stripes);
7630                         ret = -EUCLEAN;
7631                         goto out;
7632                 }
7633         }
7634 out:
7635         read_unlock(&em_tree->lock);
7636         return ret;
7637 }
7638
7639 /*
7640  * Ensure that all dev extents are mapped to correct chunk, otherwise
7641  * later chunk allocation/free would cause unexpected behavior.
7642  *
7643  * NOTE: This will iterate through the whole device tree, which should be of
7644  * the same size level as the chunk tree.  This slightly increases mount time.
7645  */
7646 int btrfs_verify_dev_extents(struct btrfs_fs_info *fs_info)
7647 {
7648         struct btrfs_path *path;
7649         struct btrfs_root *root = fs_info->dev_root;
7650         struct btrfs_key key;
7651         u64 prev_devid = 0;
7652         u64 prev_dev_ext_end = 0;
7653         int ret = 0;
7654
7655         key.objectid = 1;
7656         key.type = BTRFS_DEV_EXTENT_KEY;
7657         key.offset = 0;
7658
7659         path = btrfs_alloc_path();
7660         if (!path)
7661                 return -ENOMEM;
7662
7663         path->reada = READA_FORWARD;
7664         ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
7665         if (ret < 0)
7666                 goto out;
7667
7668         if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) {
7669                 ret = btrfs_next_item(root, path);
7670                 if (ret < 0)
7671                         goto out;
7672                 /* No dev extents at all? Not good */
7673                 if (ret > 0) {
7674                         ret = -EUCLEAN;
7675                         goto out;
7676                 }
7677         }
7678         while (1) {
7679                 struct extent_buffer *leaf = path->nodes[0];
7680                 struct btrfs_dev_extent *dext;
7681                 int slot = path->slots[0];
7682                 u64 chunk_offset;
7683                 u64 physical_offset;
7684                 u64 physical_len;
7685                 u64 devid;
7686
7687                 btrfs_item_key_to_cpu(leaf, &key, slot);
7688                 if (key.type != BTRFS_DEV_EXTENT_KEY)
7689                         break;
7690                 devid = key.objectid;
7691                 physical_offset = key.offset;
7692
7693                 dext = btrfs_item_ptr(leaf, slot, struct btrfs_dev_extent);
7694                 chunk_offset = btrfs_dev_extent_chunk_offset(leaf, dext);
7695                 physical_len = btrfs_dev_extent_length(leaf, dext);
7696
7697                 /* Check if this dev extent overlaps with the previous one */
7698                 if (devid == prev_devid && physical_offset < prev_dev_ext_end) {
7699                         btrfs_err(fs_info,
7700 "dev extent devid %llu physical offset %llu overlap with previous dev extent end %llu",
7701                                   devid, physical_offset, prev_dev_ext_end);
7702                         ret = -EUCLEAN;
7703                         goto out;
7704                 }
7705
7706                 ret = verify_one_dev_extent(fs_info, chunk_offset, devid,
7707                                             physical_offset, physical_len);
7708                 if (ret < 0)
7709                         goto out;
7710                 prev_devid = devid;
7711                 prev_dev_ext_end = physical_offset + physical_len;
7712
7713                 ret = btrfs_next_item(root, path);
7714                 if (ret < 0)
7715                         goto out;
7716                 if (ret > 0) {
7717                         ret = 0;
7718                         break;
7719                 }
7720         }
7721
7722         /* Ensure all chunks have corresponding dev extents */
7723         ret = verify_chunk_dev_extent_mapping(fs_info);
7724 out:
7725         btrfs_free_path(path);
7726         return ret;
7727 }
7728
7729 /*
7730  * Check whether the given block group or device is pinned by any inode being
7731  * used as a swapfile.
7732  */
7733 bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
7734 {
7735         struct btrfs_swapfile_pin *sp;
7736         struct rb_node *node;
7737
7738         spin_lock(&fs_info->swapfile_pins_lock);
7739         node = fs_info->swapfile_pins.rb_node;
7740         while (node) {
7741                 sp = rb_entry(node, struct btrfs_swapfile_pin, node);
7742                 if (ptr < sp->ptr)
7743                         node = node->rb_left;
7744                 else if (ptr > sp->ptr)
7745                         node = node->rb_right;
7746                 else
7747                         break;
7748         }
7749         spin_unlock(&fs_info->swapfile_pins_lock);
7750         return node != NULL;
7751 }