dm: fix missing imposition of queue_limits from dm_wq_work() thread
[linux-2.6-microblaze.git] / drivers / md / dm.c
index fb0255d..80266b9 100644 (file)
@@ -422,21 +422,6 @@ static void do_deferred_remove(struct work_struct *w)
        dm_deferred_remove();
 }
 
-sector_t dm_get_size(struct mapped_device *md)
-{
-       return get_capacity(md->disk);
-}
-
-struct request_queue *dm_get_md_queue(struct mapped_device *md)
-{
-       return md->queue;
-}
-
-struct dm_stats *dm_get_stats(struct mapped_device *md)
-{
-       return &md->stats;
-}
-
 static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
 {
        struct mapped_device *md = bdev->bd_disk->private_data;
@@ -591,7 +576,44 @@ out:
        return r;
 }
 
-static void start_io_acct(struct dm_io *io);
+u64 dm_start_time_ns_from_clone(struct bio *bio)
+{
+       struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
+       struct dm_io *io = tio->io;
+
+       return jiffies_to_nsecs(io->start_time);
+}
+EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
+
+static void start_io_acct(struct dm_io *io)
+{
+       struct mapped_device *md = io->md;
+       struct bio *bio = io->orig_bio;
+
+       io->start_time = bio_start_io_acct(bio);
+       if (unlikely(dm_stats_used(&md->stats)))
+               dm_stats_account_io(&md->stats, bio_data_dir(bio),
+                                   bio->bi_iter.bi_sector, bio_sectors(bio),
+                                   false, 0, &io->stats_aux);
+}
+
+static void end_io_acct(struct dm_io *io)
+{
+       struct mapped_device *md = io->md;
+       struct bio *bio = io->orig_bio;
+       unsigned long duration = jiffies - io->start_time;
+
+       bio_end_io_acct(bio, io->start_time);
+
+       if (unlikely(dm_stats_used(&md->stats)))
+               dm_stats_account_io(&md->stats, bio_data_dir(bio),
+                                   bio->bi_iter.bi_sector, bio_sectors(bio),
+                                   true, duration, &io->stats_aux);
+
+       /* nudge anyone waiting on suspend queue */
+       if (unlikely(wq_has_sleeper(&md->wait)))
+               wake_up(&md->wait);
+}
 
 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio)
 {
@@ -657,45 +679,6 @@ static void free_tio(struct dm_target_io *tio)
        bio_put(&tio->clone);
 }
 
-u64 dm_start_time_ns_from_clone(struct bio *bio)
-{
-       struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
-       struct dm_io *io = tio->io;
-
-       return jiffies_to_nsecs(io->start_time);
-}
-EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
-
-static void start_io_acct(struct dm_io *io)
-{
-       struct mapped_device *md = io->md;
-       struct bio *bio = io->orig_bio;
-
-       io->start_time = bio_start_io_acct(bio);
-       if (unlikely(dm_stats_used(&md->stats)))
-               dm_stats_account_io(&md->stats, bio_data_dir(bio),
-                                   bio->bi_iter.bi_sector, bio_sectors(bio),
-                                   false, 0, &io->stats_aux);
-}
-
-static void end_io_acct(struct dm_io *io)
-{
-       struct mapped_device *md = io->md;
-       struct bio *bio = io->orig_bio;
-       unsigned long duration = jiffies - io->start_time;
-
-       bio_end_io_acct(bio, io->start_time);
-
-       if (unlikely(dm_stats_used(&md->stats)))
-               dm_stats_account_io(&md->stats, bio_data_dir(bio),
-                                   bio->bi_iter.bi_sector, bio_sectors(bio),
-                                   true, duration, &io->stats_aux);
-
-       /* nudge anyone waiting on suspend queue */
-       if (unlikely(wq_has_sleeper(&md->wait)))
-               wake_up(&md->wait);
-}
-
 /*
  * Add the bio to the list of deferred io.
  */
@@ -1041,32 +1024,28 @@ static void clone_endio(struct bio *bio)
  * Return maximum size of I/O possible at the supplied sector up to the current
  * target boundary.
  */
-static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
+static inline sector_t max_io_len_target_boundary(struct dm_target *ti,
+                                                 sector_t target_offset)
 {
-       sector_t target_offset = dm_target_offset(ti, sector);
-
        return ti->len - target_offset;
 }
 
-static sector_t max_io_len(sector_t sector, struct dm_target *ti)
+static sector_t max_io_len(struct dm_target *ti, sector_t sector)
 {
-       sector_t len = max_io_len_target_boundary(sector, ti);
-       sector_t offset, max_len;
+       sector_t target_offset = dm_target_offset(ti, sector);
+       sector_t len = max_io_len_target_boundary(ti, target_offset);
+       sector_t max_len;
 
        /*
         * Does the target need to split even further?
+        * - q->limits.chunk_sectors reflects ti->max_io_len so
+        *   blk_max_size_offset() provides required splitting.
+        * - blk_max_size_offset() also respects q->limits.max_sectors
         */
-       if (ti->max_io_len) {
-               offset = dm_target_offset(ti, sector);
-               if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
-                       max_len = sector_div(offset, ti->max_io_len);
-               else
-                       max_len = offset & (ti->max_io_len - 1);
-               max_len = ti->max_io_len - max_len;
-
-               if (len > max_len)
-                       len = max_len;
-       }
+       max_len = blk_max_size_offset(ti->table->md->queue,
+                                     target_offset);
+       if (len > max_len)
+               len = max_len;
 
        return len;
 }
@@ -1119,7 +1098,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
                goto out;
        if (!ti->type->direct_access)
                goto out;
-       len = max_io_len(sector, ti) / PAGE_SECTORS;
+       len = max_io_len(ti, sector) / PAGE_SECTORS;
        if (len < 1)
                goto out;
        nr_pages = min(len, nr_pages);
@@ -1136,15 +1115,16 @@ static bool dm_dax_supported(struct dax_device *dax_dev, struct block_device *bd
 {
        struct mapped_device *md = dax_get_private(dax_dev);
        struct dm_table *map;
+       bool ret = false;
        int srcu_idx;
-       bool ret;
 
        map = dm_get_live_table(md, &srcu_idx);
        if (!map)
-               return false;
+               goto out;
 
        ret = dm_table_supports_dax(map, device_supports_dax, &blocksize);
 
+out:
        dm_put_live_table(md, srcu_idx);
 
        return ret;
@@ -1429,6 +1409,17 @@ static int __send_empty_flush(struct clone_info *ci)
 {
        unsigned target_nr = 0;
        struct dm_target *ti;
+       struct bio flush_bio;
+
+       /*
+        * Use an on-stack bio for this, it's safe since we don't
+        * need to reference it after submit. It's just used as
+        * the basis for the clone(s).
+        */
+       bio_init(&flush_bio, NULL, 0);
+       flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
+       ci->bio = &flush_bio;
+       ci->sector_count = 0;
 
        /*
         * Empty flush uses a statically initialized bio, as the base for
@@ -1442,6 +1433,8 @@ static int __send_empty_flush(struct clone_info *ci)
        BUG_ON(bio_has_data(ci->bio));
        while ((ti = dm_table_get_target(ci->map, target_nr++)))
                __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
+
+       bio_uninit(ci->bio);
        return 0;
 }
 
@@ -1464,28 +1457,6 @@ static int __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
        return 0;
 }
 
-typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
-
-static unsigned get_num_discard_bios(struct dm_target *ti)
-{
-       return ti->num_discard_bios;
-}
-
-static unsigned get_num_secure_erase_bios(struct dm_target *ti)
-{
-       return ti->num_secure_erase_bios;
-}
-
-static unsigned get_num_write_same_bios(struct dm_target *ti)
-{
-       return ti->num_write_same_bios;
-}
-
-static unsigned get_num_write_zeroes_bios(struct dm_target *ti)
-{
-       return ti->num_write_zeroes_bios;
-}
-
 static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *ti,
                                       unsigned num_bios)
 {
@@ -1500,7 +1471,8 @@ static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *
        if (!num_bios)
                return -EOPNOTSUPP;
 
-       len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
+       len = min_t(sector_t, ci->sector_count,
+                   max_io_len_target_boundary(ti, dm_target_offset(ti, ci->sector)));
 
        __send_duplicate_bios(ci, ti, num_bios, &len);
 
@@ -1510,26 +1482,6 @@ static int __send_changing_extent_only(struct clone_info *ci, struct dm_target *
        return 0;
 }
 
-static int __send_discard(struct clone_info *ci, struct dm_target *ti)
-{
-       return __send_changing_extent_only(ci, ti, get_num_discard_bios(ti));
-}
-
-static int __send_secure_erase(struct clone_info *ci, struct dm_target *ti)
-{
-       return __send_changing_extent_only(ci, ti, get_num_secure_erase_bios(ti));
-}
-
-static int __send_write_same(struct clone_info *ci, struct dm_target *ti)
-{
-       return __send_changing_extent_only(ci, ti, get_num_write_same_bios(ti));
-}
-
-static int __send_write_zeroes(struct clone_info *ci, struct dm_target *ti)
-{
-       return __send_changing_extent_only(ci, ti, get_num_write_zeroes_bios(ti));
-}
-
 static bool is_abnormal_io(struct bio *bio)
 {
        bool r = false;
@@ -1550,18 +1502,26 @@ static bool __process_abnormal_io(struct clone_info *ci, struct dm_target *ti,
                                  int *result)
 {
        struct bio *bio = ci->bio;
+       unsigned num_bios = 0;
 
-       if (bio_op(bio) == REQ_OP_DISCARD)
-               *result = __send_discard(ci, ti);
-       else if (bio_op(bio) == REQ_OP_SECURE_ERASE)
-               *result = __send_secure_erase(ci, ti);
-       else if (bio_op(bio) == REQ_OP_WRITE_SAME)
-               *result = __send_write_same(ci, ti);
-       else if (bio_op(bio) == REQ_OP_WRITE_ZEROES)
-               *result = __send_write_zeroes(ci, ti);
-       else
+       switch (bio_op(bio)) {
+       case REQ_OP_DISCARD:
+               num_bios = ti->num_discard_bios;
+               break;
+       case REQ_OP_SECURE_ERASE:
+               num_bios = ti->num_secure_erase_bios;
+               break;
+       case REQ_OP_WRITE_SAME:
+               num_bios = ti->num_write_same_bios;
+               break;
+       case REQ_OP_WRITE_ZEROES:
+               num_bios = ti->num_write_zeroes_bios;
+               break;
+       default:
                return false;
+       }
 
+       *result = __send_changing_extent_only(ci, ti, num_bios);
        return true;
 }
 
@@ -1581,7 +1541,7 @@ static int __split_and_process_non_flush(struct clone_info *ci)
        if (__process_abnormal_io(ci, ti, &r))
                return r;
 
-       len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
+       len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
 
        r = __clone_and_map_data_bio(ci, ti, ci->sector, &len);
        if (r < 0)
@@ -1617,19 +1577,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
        init_clone_info(&ci, md, map, bio);
 
        if (bio->bi_opf & REQ_PREFLUSH) {
-               struct bio flush_bio;
-
-               /*
-                * Use an on-stack bio for this, it's safe since we don't
-                * need to reference it after submit. It's just used as
-                * the basis for the clone(s).
-                */
-               bio_init(&flush_bio, NULL, 0);
-               flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
-               ci.bio = &flush_bio;
-               ci.sector_count = 0;
                error = __send_empty_flush(&ci);
-               bio_uninit(ci.bio);
                /* dec_pending submits any data associated with flush */
        } else if (op_is_zone_mgmt(bio_op(bio))) {
                ci.bio = bio;
@@ -1683,7 +1631,7 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
  * fact that targets that use it do _not_ have a need to split bios.
  */
 static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
-                             struct bio *bio, struct dm_target *ti)
+                             struct bio *bio)
 {
        struct clone_info ci;
        blk_qc_t ret = BLK_QC_T_NONE;
@@ -1692,22 +1640,16 @@ static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
        init_clone_info(&ci, md, map, bio);
 
        if (bio->bi_opf & REQ_PREFLUSH) {
-               struct bio flush_bio;
-
-               /*
-                * Use an on-stack bio for this, it's safe since we don't
-                * need to reference it after submit. It's just used as
-                * the basis for the clone(s).
-                */
-               bio_init(&flush_bio, NULL, 0);
-               flush_bio.bi_opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
-               ci.bio = &flush_bio;
-               ci.sector_count = 0;
                error = __send_empty_flush(&ci);
-               bio_uninit(ci.bio);
                /* dec_pending submits any data associated with flush */
        } else {
                struct dm_target_io *tio;
+               struct dm_target *ti = md->immutable_target;
+
+               if (WARN_ON_ONCE(!ti)) {
+                       error = -EIO;
+                       goto out;
+               }
 
                ci.bio = bio;
                ci.sector_count = bio_sectors(bio);
@@ -1723,58 +1665,26 @@ out:
        return ret;
 }
 
-static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio)
-{
-       unsigned len, sector_count;
-
-       sector_count = bio_sectors(*bio);
-       len = min_t(sector_t, max_io_len((*bio)->bi_iter.bi_sector, ti), sector_count);
-
-       if (sector_count > len) {
-               struct bio *split = bio_split(*bio, len, GFP_NOIO, &md->queue->bio_split);
-
-               bio_chain(split, *bio);
-               trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector);
-               submit_bio_noacct(*bio);
-               *bio = split;
-       }
-}
-
 static blk_qc_t dm_process_bio(struct mapped_device *md,
                               struct dm_table *map, struct bio *bio)
 {
        blk_qc_t ret = BLK_QC_T_NONE;
-       struct dm_target *ti = md->immutable_target;
 
        if (unlikely(!map)) {
                bio_io_error(bio);
                return ret;
        }
 
-       if (!ti) {
-               ti = dm_table_find_target(map, bio->bi_iter.bi_sector);
-               if (unlikely(!ti)) {
-                       bio_io_error(bio);
-                       return ret;
-               }
-       }
-
        /*
-        * If in ->queue_bio we need to use blk_queue_split(), otherwise
-        * queue_limits for abnormal requests (e.g. discard, writesame, etc)
-        * won't be imposed.
+        * Use blk_queue_split() for abnormal IO (e.g. discard, writesame, etc)
+        * otherwise associated queue_limits won't be imposed.
         */
-       if (current->bio_list) {
-               if (is_abnormal_io(bio))
-                       blk_queue_split(&bio);
-               else
-                       dm_queue_split(md, ti, &bio);
-       }
+       if (is_abnormal_io(bio))
+               blk_queue_split(&bio);
 
        if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
-               return __process_bio(md, map, bio, ti);
-       else
-               return __split_and_process_bio(md, map, bio);
+               return __process_bio(md, map, bio);
+       return __split_and_process_bio(md, map, bio);
 }
 
 static blk_qc_t dm_submit_bio(struct bio *bio)
@@ -1802,7 +1712,9 @@ static blk_qc_t dm_submit_bio(struct bio *bio)
        if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
                dm_put_live_table(md, srcu_idx);
 
-               if (!(bio->bi_opf & REQ_RAHEAD))
+               if (bio->bi_opf & REQ_NOWAIT)
+                       bio_wouldblock_error(bio);
+               else if (!(bio->bi_opf & REQ_RAHEAD))
                        queue_io(md, bio);
                else
                        bio_io_error(bio);
@@ -2097,18 +2009,6 @@ static void event_callback(void *context)
        dm_issue_global_event();
 }
 
-/*
- * Protected by md->suspend_lock obtained by dm_swap_table().
- */
-static void __set_size(struct mapped_device *md, sector_t size)
-{
-       lockdep_assert_held(&md->suspend_lock);
-
-       set_capacity(md->disk, size);
-
-       i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
-}
-
 /*
  * Returns old map, which caller must destroy.
  */
@@ -2131,7 +2031,8 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
        if (size != dm_get_size(md))
                memset(&md->geometry, 0, sizeof(md->geometry));
 
-       __set_size(md, size);
+       set_capacity(md->disk, size);
+       bd_set_nr_sectors(md->bdev, size);
 
        dm_table_event_callback(t, event_callback, md);
 
@@ -2149,8 +2050,7 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
                /*
                 * Leverage the fact that request-based DM targets and
                 * NVMe bio based targets are immutable singletons
-                * - used to optimize both dm_request_fn and dm_mq_queue_rq;
-                *   and __process_bio.
+                * - used to optimize both __process_bio and dm_mq_queue_rq
                 */
                md->immutable_target = dm_table_get_immutable_target(t);
        }
@@ -2477,29 +2377,19 @@ static int dm_wait_for_completion(struct mapped_device *md, long task_state)
  */
 static void dm_wq_work(struct work_struct *work)
 {
-       struct mapped_device *md = container_of(work, struct mapped_device,
-                                               work);
-       struct bio *c;
-       int srcu_idx;
-       struct dm_table *map;
-
-       map = dm_get_live_table(md, &srcu_idx);
+       struct mapped_device *md = container_of(work, struct mapped_device, work);
+       struct bio *bio;
 
        while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
                spin_lock_irq(&md->deferred_lock);
-               c = bio_list_pop(&md->deferred);
+               bio = bio_list_pop(&md->deferred);
                spin_unlock_irq(&md->deferred_lock);
 
-               if (!c)
+               if (!bio)
                        break;
 
-               if (dm_request_based(md))
-                       (void) submit_bio_noacct(c);
-               else
-                       (void) dm_process_bio(md, map, c);
+               submit_bio_noacct(bio);
        }
-
-       dm_put_live_table(md, srcu_idx);
 }
 
 static void dm_queue_flush(struct mapped_device *md)
@@ -3010,19 +2900,19 @@ int dm_test_deferred_remove_flag(struct mapped_device *md)
 
 int dm_suspended(struct dm_target *ti)
 {
-       return dm_suspended_md(dm_table_get_md(ti->table));
+       return dm_suspended_md(ti->table->md);
 }
 EXPORT_SYMBOL_GPL(dm_suspended);
 
 int dm_post_suspending(struct dm_target *ti)
 {
-       return dm_post_suspending_md(dm_table_get_md(ti->table));
+       return dm_post_suspending_md(ti->table->md);
 }
 EXPORT_SYMBOL_GPL(dm_post_suspending);
 
 int dm_noflush_suspending(struct dm_target *ti)
 {
-       return __noflush_suspending(dm_table_get_md(ti->table));
+       return __noflush_suspending(ti->table->md);
 }
 EXPORT_SYMBOL_GPL(dm_noflush_suspending);