Merge tag 'for-5.12/block-2021-02-17' of git://git.kernel.dk/linux-block
[linux-2.6-microblaze.git] / fs / btrfs / volumes.c
index 584ba09..bc3b33e 100644 (file)
@@ -433,7 +433,7 @@ static struct btrfs_device *__alloc_device(struct btrfs_fs_info *fs_info)
 
        atomic_set(&dev->reada_in_flight, 0);
        atomic_set(&dev->dev_stats_ccnt, 0);
-       btrfs_device_data_ordered_init(dev, fs_info);
+       btrfs_device_data_ordered_init(dev);
        INIT_RADIX_TREE(&dev->reada_zones, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
        INIT_RADIX_TREE(&dev->reada_extents, GFP_NOFS & ~__GFP_DIRECT_RECLAIM);
        extent_io_tree_init(fs_info, &dev->alloc_state,
@@ -669,10 +669,6 @@ static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
        clear_bit(BTRFS_DEV_STATE_IN_FS_METADATA, &device->dev_state);
        device->mode = flags;
 
-       ret = btrfs_get_dev_zone_info(device);
-       if (ret != 0)
-               goto error_free_page;
-
        fs_devices->open_devices++;
        if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state) &&
            device->devid != BTRFS_DEV_REPLACE_DEVID) {
@@ -1418,11 +1414,62 @@ static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
                 * make sure to start at an offset of at least 1MB.
                 */
                return max_t(u64, start, SZ_1M);
+       case BTRFS_CHUNK_ALLOC_ZONED:
+               /*
+                * We don't care about the starting region like regular
+                * allocator, because we anyway use/reserve the first two zones
+                * for superblock logging.
+                */
+               return ALIGN(start, device->zone_info->zone_size);
        default:
                BUG();
        }
 }
 
+static bool dev_extent_hole_check_zoned(struct btrfs_device *device,
+                                       u64 *hole_start, u64 *hole_size,
+                                       u64 num_bytes)
+{
+       u64 zone_size = device->zone_info->zone_size;
+       u64 pos;
+       int ret;
+       bool changed = false;
+
+       ASSERT(IS_ALIGNED(*hole_start, zone_size));
+
+       while (*hole_size > 0) {
+               pos = btrfs_find_allocatable_zones(device, *hole_start,
+                                                  *hole_start + *hole_size,
+                                                  num_bytes);
+               if (pos != *hole_start) {
+                       *hole_size = *hole_start + *hole_size - pos;
+                       *hole_start = pos;
+                       changed = true;
+                       if (*hole_size < num_bytes)
+                               break;
+               }
+
+               ret = btrfs_ensure_empty_zones(device, pos, num_bytes);
+
+               /* Range is ensured to be empty */
+               if (!ret)
+                       return changed;
+
+               /* Given hole range was invalid (outside of device) */
+               if (ret == -ERANGE) {
+                       *hole_start += *hole_size;
+                       *hole_size = 0;
+                       return 1;
+               }
+
+               *hole_start += zone_size;
+               *hole_size -= zone_size;
+               changed = true;
+       }
+
+       return changed;
+}
+
 /**
  * dev_extent_hole_check - check if specified hole is suitable for allocation
  * @device:    the device which we have the hole
@@ -1430,7 +1477,7 @@ static u64 dev_extent_search_start(struct btrfs_device *device, u64 start)
  * @hole_size: the size of the hole
  * @num_bytes: the size of the free space that we need
  *
- * This function may modify @hole_start and @hole_end to reflect the suitable
+ * This function may modify @hole_start and @hole_size to reflect the suitable
  * position for allocation. Returns 1 if hole position is updated, 0 otherwise.
  */
 static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
@@ -1439,24 +1486,39 @@ static bool dev_extent_hole_check(struct btrfs_device *device, u64 *hole_start,
        bool changed = false;
        u64 hole_end = *hole_start + *hole_size;
 
-       /*
-        * Check before we set max_hole_start, otherwise we could end up
-        * sending back this offset anyway.
-        */
-       if (contains_pending_extent(device, hole_start, *hole_size)) {
-               if (hole_end >= *hole_start)
-                       *hole_size = hole_end - *hole_start;
-               else
-                       *hole_size = 0;
-               changed = true;
-       }
+       for (;;) {
+               /*
+                * Check before we set max_hole_start, otherwise we could end up
+                * sending back this offset anyway.
+                */
+               if (contains_pending_extent(device, hole_start, *hole_size)) {
+                       if (hole_end >= *hole_start)
+                               *hole_size = hole_end - *hole_start;
+                       else
+                               *hole_size = 0;
+                       changed = true;
+               }
+
+               switch (device->fs_devices->chunk_alloc_policy) {
+               case BTRFS_CHUNK_ALLOC_REGULAR:
+                       /* No extra check */
+                       break;
+               case BTRFS_CHUNK_ALLOC_ZONED:
+                       if (dev_extent_hole_check_zoned(device, hole_start,
+                                                       hole_size, num_bytes)) {
+                               changed = true;
+                               /*
+                                * The changed hole can contain pending extent.
+                                * Loop again to check that.
+                                */
+                               continue;
+                       }
+                       break;
+               default:
+                       BUG();
+               }
 
-       switch (device->fs_devices->chunk_alloc_policy) {
-       case BTRFS_CHUNK_ALLOC_REGULAR:
-               /* No extra check */
                break;
-       default:
-               BUG();
        }
 
        return changed;
@@ -1509,6 +1571,9 @@ static int find_free_dev_extent_start(struct btrfs_device *device,
 
        search_start = dev_extent_search_start(device, search_start);
 
+       WARN_ON(device->zone_info &&
+               !IS_ALIGNED(num_bytes, device->zone_info->zone_size));
+
        path = btrfs_alloc_path();
        if (!path)
                return -ENOMEM;
@@ -4668,11 +4733,10 @@ again:
                }
 
                ret = btrfs_previous_item(root, path, 0, key.type);
-               if (ret)
-                       mutex_unlock(&fs_info->delete_unused_bgs_mutex);
-               if (ret < 0)
-                       goto done;
                if (ret) {
+                       mutex_unlock(&fs_info->delete_unused_bgs_mutex);
+                       if (ret < 0)
+                               goto done;
                        ret = 0;
                        btrfs_release_path(path);
                        break;
@@ -4904,6 +4968,37 @@ static void init_alloc_chunk_ctl_policy_regular(
        ctl->dev_extent_min = BTRFS_STRIPE_LEN * ctl->dev_stripes;
 }
 
+static void init_alloc_chunk_ctl_policy_zoned(
+                                     struct btrfs_fs_devices *fs_devices,
+                                     struct alloc_chunk_ctl *ctl)
+{
+       u64 zone_size = fs_devices->fs_info->zone_size;
+       u64 limit;
+       int min_num_stripes = ctl->devs_min * ctl->dev_stripes;
+       int min_data_stripes = (min_num_stripes - ctl->nparity) / ctl->ncopies;
+       u64 min_chunk_size = min_data_stripes * zone_size;
+       u64 type = ctl->type;
+
+       ctl->max_stripe_size = zone_size;
+       if (type & BTRFS_BLOCK_GROUP_DATA) {
+               ctl->max_chunk_size = round_down(BTRFS_MAX_DATA_CHUNK_SIZE,
+                                                zone_size);
+       } else if (type & BTRFS_BLOCK_GROUP_METADATA) {
+               ctl->max_chunk_size = ctl->max_stripe_size;
+       } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) {
+               ctl->max_chunk_size = 2 * ctl->max_stripe_size;
+               ctl->devs_max = min_t(int, ctl->devs_max,
+                                     BTRFS_MAX_DEVS_SYS_CHUNK);
+       }
+
+       /* We don't want a chunk larger than 10% of writable space */
+       limit = max(round_down(div_factor(fs_devices->total_rw_bytes, 1),
+                              zone_size),
+                   min_chunk_size);
+       ctl->max_chunk_size = min(limit, ctl->max_chunk_size);
+       ctl->dev_extent_min = zone_size * ctl->dev_stripes;
+}
+
 static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
                                 struct alloc_chunk_ctl *ctl)
 {
@@ -4924,6 +5019,9 @@ static void init_alloc_chunk_ctl(struct btrfs_fs_devices *fs_devices,
        case BTRFS_CHUNK_ALLOC_REGULAR:
                init_alloc_chunk_ctl_policy_regular(fs_devices, ctl);
                break;
+       case BTRFS_CHUNK_ALLOC_ZONED:
+               init_alloc_chunk_ctl_policy_zoned(fs_devices, ctl);
+               break;
        default:
                BUG();
        }
@@ -5050,6 +5148,38 @@ static int decide_stripe_size_regular(struct alloc_chunk_ctl *ctl,
        return 0;
 }
 
+static int decide_stripe_size_zoned(struct alloc_chunk_ctl *ctl,
+                                   struct btrfs_device_info *devices_info)
+{
+       u64 zone_size = devices_info[0].dev->zone_info->zone_size;
+       /* Number of stripes that count for block group size */
+       int data_stripes;
+
+       /*
+        * It should hold because:
+        *    dev_extent_min == dev_extent_want == zone_size * dev_stripes
+        */
+       ASSERT(devices_info[ctl->ndevs - 1].max_avail == ctl->dev_extent_min);
+
+       ctl->stripe_size = zone_size;
+       ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
+       data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
+
+       /* stripe_size is fixed in zoned filesysmte. Reduce ndevs instead. */
+       if (ctl->stripe_size * data_stripes > ctl->max_chunk_size) {
+               ctl->ndevs = div_u64(div_u64(ctl->max_chunk_size * ctl->ncopies,
+                                            ctl->stripe_size) + ctl->nparity,
+                                    ctl->dev_stripes);
+               ctl->num_stripes = ctl->ndevs * ctl->dev_stripes;
+               data_stripes = (ctl->num_stripes - ctl->nparity) / ctl->ncopies;
+               ASSERT(ctl->stripe_size * data_stripes <= ctl->max_chunk_size);
+       }
+
+       ctl->chunk_size = ctl->stripe_size * data_stripes;
+
+       return 0;
+}
+
 static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
                              struct alloc_chunk_ctl *ctl,
                              struct btrfs_device_info *devices_info)
@@ -5077,6 +5207,8 @@ static int decide_stripe_size(struct btrfs_fs_devices *fs_devices,
        switch (fs_devices->chunk_alloc_policy) {
        case BTRFS_CHUNK_ALLOC_REGULAR:
                return decide_stripe_size_regular(ctl, devices_info);
+       case BTRFS_CHUNK_ALLOC_ZONED:
+               return decide_stripe_size_zoned(ctl, devices_info);
        default:
                BUG();
        }
@@ -5841,9 +5973,29 @@ static int get_extra_mirror_from_replace(struct btrfs_fs_info *fs_info,
        return ret;
 }
 
+static bool is_block_group_to_copy(struct btrfs_fs_info *fs_info, u64 logical)
+{
+       struct btrfs_block_group *cache;
+       bool ret;
+
+       /* Non zoned filesystem does not use "to_copy" flag */
+       if (!btrfs_is_zoned(fs_info))
+               return false;
+
+       cache = btrfs_lookup_block_group(fs_info, logical);
+
+       spin_lock(&cache->lock);
+       ret = cache->to_copy;
+       spin_unlock(&cache->lock);
+
+       btrfs_put_block_group(cache);
+       return ret;
+}
+
 static void handle_ops_on_dev_replace(enum btrfs_map_op op,
                                      struct btrfs_bio **bbio_ret,
                                      struct btrfs_dev_replace *dev_replace,
+                                     u64 logical,
                                      int *num_stripes_ret, int *max_errors_ret)
 {
        struct btrfs_bio *bbio = *bbio_ret;
@@ -5856,6 +6008,13 @@ static void handle_ops_on_dev_replace(enum btrfs_map_op op,
        if (op == BTRFS_MAP_WRITE) {
                int index_where_to_add;
 
+               /*
+                * A block group which have "to_copy" set will eventually
+                * copied by dev-replace process. We can avoid cloning IO here.
+                */
+               if (is_block_group_to_copy(dev_replace->srcdev->fs_info, logical))
+                       return;
+
                /*
                 * duplicate the write operations while the dev replace
                 * procedure is running. Since the copying of the old disk to
@@ -5941,23 +6100,24 @@ static bool need_full_stripe(enum btrfs_map_op op)
 }
 
 /*
- * btrfs_get_io_geometry - calculates the geomery of a particular (address, len)
- *                    tuple. This information is used to calculate how big a
- *                    particular bio can get before it straddles a stripe.
+ * Calculate the geometry of a particular (address, len) tuple. This
+ * information is used to calculate how big a particular bio can get before it
+ * straddles a stripe.
  *
- * @fs_info - the filesystem
- * @logical - address that we want to figure out the geometry of
- * @len            - the length of IO we are going to perform, starting at @logical
- * @op      - type of operation - write or read
- * @io_geom - pointer used to return values
+ * @fs_info: the filesystem
+ * @em:      mapping containing the logical extent
+ * @op:      type of operation - write or read
+ * @logical: address that we want to figure out the geometry of
+ * @len:     the length of IO we are going to perform, starting at @logical
+ * @io_geom: pointer used to return values
  *
  * Returns < 0 in case a chunk for the given logical address cannot be found,
  * usually shouldn't happen unless @logical is corrupted, 0 otherwise.
  */
-int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
-                       u64 logical, u64 len, struct btrfs_io_geometry *io_geom)
+int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, struct extent_map *em,
+                         enum btrfs_map_op op, u64 logical, u64 len,
+                         struct btrfs_io_geometry *io_geom)
 {
-       struct extent_map *em;
        struct map_lookup *map;
        u64 offset;
        u64 stripe_offset;
@@ -5965,14 +6125,9 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
        u64 stripe_len;
        u64 raid56_full_stripe_start = (u64)-1;
        int data_stripes;
-       int ret = 0;
 
        ASSERT(op != BTRFS_MAP_DISCARD);
 
-       em = btrfs_get_chunk_map(fs_info, logical, len);
-       if (IS_ERR(em))
-               return PTR_ERR(em);
-
        map = em->map_lookup;
        /* Offset of this logical address in the chunk */
        offset = logical - em->start;
@@ -5986,8 +6141,7 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
                btrfs_crit(fs_info,
 "stripe math has gone wrong, stripe_offset=%llu offset=%llu start=%llu logical=%llu stripe_len=%llu",
                        stripe_offset, offset, em->start, logical, stripe_len);
-               ret = -EINVAL;
-               goto out;
+               return -EINVAL;
        }
 
        /* stripe_offset is the offset of this block in its stripe */
@@ -6034,10 +6188,7 @@ int btrfs_get_io_geometry(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
        io_geom->stripe_offset = stripe_offset;
        io_geom->raid56_stripe_offset = raid56_full_stripe_start;
 
-out:
-       /* once for us */
-       free_extent_map(em);
-       return ret;
+       return 0;
 }
 
 static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
@@ -6070,12 +6221,13 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
        ASSERT(bbio_ret);
        ASSERT(op != BTRFS_MAP_DISCARD);
 
-       ret = btrfs_get_io_geometry(fs_info, op, logical, *length, &geom);
+       em = btrfs_get_chunk_map(fs_info, logical, *length);
+       ASSERT(!IS_ERR(em));
+
+       ret = btrfs_get_io_geometry(fs_info, em, op, logical, *length, &geom);
        if (ret < 0)
                return ret;
 
-       em = btrfs_get_chunk_map(fs_info, logical, *length);
-       ASSERT(!IS_ERR(em));
        map = em->map_lookup;
 
        *length = geom.len;
@@ -6251,8 +6403,8 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
 
        if (dev_replace_is_ongoing && dev_replace->tgtdev != NULL &&
            need_full_stripe(op)) {
-               handle_ops_on_dev_replace(op, &bbio, dev_replace, &num_stripes,
-                                         &max_errors);
+               handle_ops_on_dev_replace(op, &bbio, dev_replace, logical,
+                                         &num_stripes, &max_errors);
        }
 
        *bbio_ret = bbio;
@@ -6323,7 +6475,7 @@ static void btrfs_end_bio(struct bio *bio)
                        struct btrfs_device *dev = btrfs_io_bio(bio)->device;
 
                        ASSERT(dev->bdev);
-                       if (bio_op(bio) == REQ_OP_WRITE)
+                       if (btrfs_op(bio) == BTRFS_MAP_WRITE)
                                btrfs_dev_stat_inc_and_print(dev,
                                                BTRFS_DEV_STAT_WRITE_ERRS);
                        else if (!(bio->bi_opf & REQ_RAHEAD))
@@ -6375,6 +6527,20 @@ static void submit_stripe_bio(struct btrfs_bio *bbio, struct bio *bio,
        btrfs_io_bio(bio)->device = dev;
        bio->bi_end_io = btrfs_end_bio;
        bio->bi_iter.bi_sector = physical >> 9;
+       /*
+        * For zone append writing, bi_sector must point the beginning of the
+        * zone
+        */
+       if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
+               if (btrfs_dev_is_sequential(dev, physical)) {
+                       u64 zone_start = round_down(physical, fs_info->zone_size);
+
+                       bio->bi_iter.bi_sector = zone_start >> SECTOR_SHIFT;
+               } else {
+                       bio->bi_opf &= ~REQ_OP_ZONE_APPEND;
+                       bio->bi_opf |= REQ_OP_WRITE;
+               }
+       }
        btrfs_debug_in_rcu(fs_info,
        "btrfs_map_bio: rw %d 0x%x, sector=%llu, dev=%lu (%s id %llu), size=%u",
                bio_op(bio), bio->bi_opf, bio->bi_iter.bi_sector,
@@ -6436,10 +6602,10 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
        atomic_set(&bbio->stripes_pending, bbio->num_stripes);
 
        if ((bbio->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
-           ((bio_op(bio) == REQ_OP_WRITE) || (mirror_num > 1))) {
+           ((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) {
                /* In this case, map_length has been set to the length of
                   a single stripe; not the whole write */
-               if (bio_op(bio) == REQ_OP_WRITE) {
+               if (btrfs_op(bio) == BTRFS_MAP_WRITE) {
                        ret = raid56_parity_write(fs_info, bio, bbio,
                                                  map_length);
                } else {
@@ -6462,7 +6628,7 @@ blk_status_t btrfs_map_bio(struct btrfs_fs_info *fs_info, struct bio *bio,
                dev = bbio->stripes[dev_nr].dev;
                if (!dev || !dev->bdev || test_bit(BTRFS_DEV_STATE_MISSING,
                                                   &dev->dev_state) ||
-                   (bio_op(first_bio) == REQ_OP_WRITE &&
+                   (btrfs_op(first_bio) == BTRFS_MAP_WRITE &&
                    !test_bit(BTRFS_DEV_STATE_WRITEABLE, &dev->dev_state))) {
                        bbio_error(bbio, first_bio, logical);
                        continue;
@@ -7644,6 +7810,20 @@ static int verify_one_dev_extent(struct btrfs_fs_info *fs_info,
                ret = -EUCLEAN;
                goto out;
        }
+
+       if (dev->zone_info) {
+               u64 zone_size = dev->zone_info->zone_size;
+
+               if (!IS_ALIGNED(physical_offset, zone_size) ||
+                   !IS_ALIGNED(physical_len, zone_size)) {
+                       btrfs_err(fs_info,
+"zoned: dev extent devid %llu physical offset %llu len %llu is not aligned to device zone",
+                                 devid, physical_offset, physical_len);
+                       ret = -EUCLEAN;
+                       goto out;
+               }
+       }
+
 out:
        free_extent_map(em);
        return ret;
@@ -7800,3 +7980,75 @@ bool btrfs_pinned_by_swapfile(struct btrfs_fs_info *fs_info, void *ptr)
        spin_unlock(&fs_info->swapfile_pins_lock);
        return node != NULL;
 }
+
+static int relocating_repair_kthread(void *data)
+{
+       struct btrfs_block_group *cache = (struct btrfs_block_group *)data;
+       struct btrfs_fs_info *fs_info = cache->fs_info;
+       u64 target;
+       int ret = 0;
+
+       target = cache->start;
+       btrfs_put_block_group(cache);
+
+       if (!btrfs_exclop_start(fs_info, BTRFS_EXCLOP_BALANCE)) {
+               btrfs_info(fs_info,
+                          "zoned: skip relocating block group %llu to repair: EBUSY",
+                          target);
+               return -EBUSY;
+       }
+
+       mutex_lock(&fs_info->delete_unused_bgs_mutex);
+
+       /* Ensure block group still exists */
+       cache = btrfs_lookup_block_group(fs_info, target);
+       if (!cache)
+               goto out;
+
+       if (!cache->relocating_repair)
+               goto out;
+
+       ret = btrfs_may_alloc_data_chunk(fs_info, target);
+       if (ret < 0)
+               goto out;
+
+       btrfs_info(fs_info,
+                  "zoned: relocating block group %llu to repair IO failure",
+                  target);
+       ret = btrfs_relocate_chunk(fs_info, target);
+
+out:
+       if (cache)
+               btrfs_put_block_group(cache);
+       mutex_unlock(&fs_info->delete_unused_bgs_mutex);
+       btrfs_exclop_finish(fs_info);
+
+       return ret;
+}
+
+int btrfs_repair_one_zone(struct btrfs_fs_info *fs_info, u64 logical)
+{
+       struct btrfs_block_group *cache;
+
+       /* Do not attempt to repair in degraded state */
+       if (btrfs_test_opt(fs_info, DEGRADED))
+               return 0;
+
+       cache = btrfs_lookup_block_group(fs_info, logical);
+       if (!cache)
+               return 0;
+
+       spin_lock(&cache->lock);
+       if (cache->relocating_repair) {
+               spin_unlock(&cache->lock);
+               btrfs_put_block_group(cache);
+               return 0;
+       }
+       cache->relocating_repair = 1;
+       spin_unlock(&cache->lock);
+
+       kthread_run(relocating_repair_kthread, cache,
+                   "btrfs-relocating-repair");
+
+       return 0;
+}