dm: remove special-casing of bio-based immutable singleton target on NVMe
authorMike Snitzer <snitzer@redhat.com>
Wed, 7 Oct 2020 19:15:08 +0000 (15:15 -0400)
committerMike Snitzer <snitzer@redhat.com>
Wed, 7 Oct 2020 22:08:41 +0000 (18:08 -0400)
Since commit 5a6c35f9af416 ("block: remove direct_make_request") there
is no benefit to DM special-casing NVMe. Remove all code used to
establish DM_TYPE_NVME_BIO_BASED.

Signed-off-by: Mike Snitzer <snitzer@redhat.com>
drivers/md/dm-table.c
drivers/md/dm.c
include/linux/device-mapper.h

index 3ad22ad..ce543b7 100644 (file)
@@ -804,8 +804,7 @@ EXPORT_SYMBOL(dm_consume_args);
 static bool __table_type_bio_based(enum dm_queue_mode table_type)
 {
        return (table_type == DM_TYPE_BIO_BASED ||
-               table_type == DM_TYPE_DAX_BIO_BASED ||
-               table_type == DM_TYPE_NVME_BIO_BASED);
+               table_type == DM_TYPE_DAX_BIO_BASED);
 }
 
 static bool __table_type_request_based(enum dm_queue_mode table_type)
@@ -861,8 +860,6 @@ bool dm_table_supports_dax(struct dm_table *t,
        return true;
 }
 
-static bool dm_table_does_not_support_partial_completion(struct dm_table *t);
-
 static int device_is_rq_stackable(struct dm_target *ti, struct dm_dev *dev,
                                  sector_t start, sector_t len, void *data)
 {
@@ -892,7 +889,6 @@ static int dm_table_determine_type(struct dm_table *t)
                        goto verify_bio_based;
                }
                BUG_ON(t->type == DM_TYPE_DAX_BIO_BASED);
-               BUG_ON(t->type == DM_TYPE_NVME_BIO_BASED);
                goto verify_rq_based;
        }
 
@@ -931,15 +927,6 @@ verify_bio_based:
                if (dm_table_supports_dax(t, device_supports_dax, &page_size) ||
                    (list_empty(devices) && live_md_type == DM_TYPE_DAX_BIO_BASED)) {
                        t->type = DM_TYPE_DAX_BIO_BASED;
-               } else {
-                       /* Check if upgrading to NVMe bio-based is valid or required */
-                       tgt = dm_table_get_immutable_target(t);
-                       if (tgt && !tgt->max_io_len && dm_table_does_not_support_partial_completion(t)) {
-                               t->type = DM_TYPE_NVME_BIO_BASED;
-                               goto verify_rq_based; /* must be stacked directly on NVMe (blk-mq) */
-                       } else if (list_empty(devices) && live_md_type == DM_TYPE_NVME_BIO_BASED) {
-                               t->type = DM_TYPE_NVME_BIO_BASED;
-                       }
                }
                return 0;
        }
@@ -956,8 +943,7 @@ verify_rq_based:
         * (e.g. request completion process for partial completion.)
         */
        if (t->num_targets > 1) {
-               DMERR("%s DM doesn't support multiple targets",
-                     t->type == DM_TYPE_NVME_BIO_BASED ? "nvme bio-based" : "request-based");
+               DMERR("request-based DM doesn't support multiple targets");
                return -EINVAL;
        }
 
@@ -1651,20 +1637,6 @@ static bool dm_table_all_devices_attribute(struct dm_table *t,
        return true;
 }
 
-static int device_no_partial_completion(struct dm_target *ti, struct dm_dev *dev,
-                                       sector_t start, sector_t len, void *data)
-{
-       char b[BDEVNAME_SIZE];
-
-       /* For now, NVMe devices are the only devices of this class */
-       return (strncmp(bdevname(dev->bdev, b), "nvme", 4) == 0);
-}
-
-static bool dm_table_does_not_support_partial_completion(struct dm_table *t)
-{
-       return dm_table_all_devices_attribute(t, device_no_partial_completion);
-}
-
 static int device_not_write_same_capable(struct dm_target *ti, struct dm_dev *dev,
                                         sector_t start, sector_t len, void *data)
 {
index 32ac196..af1bab3 100644 (file)
@@ -975,7 +975,7 @@ static void clone_endio(struct bio *bio)
        dm_endio_fn endio = tio->ti->type->end_io;
        struct bio *orig_bio = io->orig_bio;
 
-       if (unlikely(error == BLK_STS_TARGET) && md->type != DM_TYPE_NVME_BIO_BASED) {
+       if (unlikely(error == BLK_STS_TARGET)) {
                if (bio_op(bio) == REQ_OP_DISCARD &&
                    !bio->bi_disk->queue->limits.max_discard_sectors)
                        disable_discard(md);
@@ -1626,45 +1626,6 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
        return ret;
 }
 
-/*
- * Optimized variant of __split_and_process_bio that leverages the
- * fact that targets that use it do _not_ have a need to split bios.
- */
-static blk_qc_t __process_bio(struct mapped_device *md, struct dm_table *map,
-                             struct bio *bio)
-{
-       struct clone_info ci;
-       blk_qc_t ret = BLK_QC_T_NONE;
-       int error = 0;
-
-       init_clone_info(&ci, md, map, bio);
-
-       if (bio->bi_opf & REQ_PREFLUSH) {
-               error = __send_empty_flush(&ci);
-               /* dec_pending submits any data associated with flush */
-       } else {
-               struct dm_target_io *tio;
-               struct dm_target *ti = md->immutable_target;
-
-               if (WARN_ON_ONCE(!ti)) {
-                       error = -EIO;
-                       goto out;
-               }
-
-               ci.bio = bio;
-               ci.sector_count = bio_sectors(bio);
-               if (__process_abnormal_io(&ci, ti, &error))
-                       goto out;
-
-               tio = alloc_tio(&ci, ti, 0, GFP_NOIO);
-               ret = __clone_and_map_simple_bio(&ci, tio, NULL);
-       }
-out:
-       /* drop the extra reference count */
-       dec_pending(ci.io, errno_to_blk_status(error));
-       return ret;
-}
-
 static blk_qc_t dm_submit_bio(struct bio *bio)
 {
        struct mapped_device *md = bio->bi_disk->private_data;
@@ -1710,10 +1671,7 @@ static blk_qc_t dm_submit_bio(struct bio *bio)
        if (is_abnormal_io(bio))
                blk_queue_split(&bio);
 
-       if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED)
-               ret = __process_bio(md, map, bio);
-       else
-               ret = __split_and_process_bio(md, map, bio);
+       ret = __split_and_process_bio(md, map, bio);
 out:
        dm_put_live_table(md, srcu_idx);
        return ret;
@@ -2038,11 +1996,10 @@ static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
        if (request_based)
                dm_stop_queue(q);
 
-       if (request_based || md->type == DM_TYPE_NVME_BIO_BASED) {
+       if (request_based) {
                /*
-                * Leverage the fact that request-based DM targets and
-                * NVMe bio based targets are immutable singletons
-                * - used to optimize both __process_bio and dm_mq_queue_rq
+                * Leverage the fact that request-based DM targets are
+                * immutable singletons - used to optimize dm_mq_queue_rq.
                 */
                md->immutable_target = dm_table_get_immutable_target(t);
        }
@@ -2164,7 +2121,6 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
                break;
        case DM_TYPE_BIO_BASED:
        case DM_TYPE_DAX_BIO_BASED:
-       case DM_TYPE_NVME_BIO_BASED:
                break;
        case DM_TYPE_NONE:
                WARN_ON_ONCE(true);
@@ -2922,7 +2878,6 @@ struct dm_md_mempools *dm_alloc_md_mempools(struct mapped_device *md, enum dm_qu
        switch (type) {
        case DM_TYPE_BIO_BASED:
        case DM_TYPE_DAX_BIO_BASED:
-       case DM_TYPE_NVME_BIO_BASED:
                pool_size = max(dm_get_reserved_bio_based_ios(), min_pool_size);
                front_pad = roundup(per_io_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
                io_front_pad = roundup(front_pad,  __alignof__(struct dm_io)) + offsetof(struct dm_io, tio);
index d6f8d4b..61a66fb 100644 (file)
@@ -29,7 +29,6 @@ enum dm_queue_mode {
        DM_TYPE_BIO_BASED        = 1,
        DM_TYPE_REQUEST_BASED    = 2,
        DM_TYPE_DAX_BIO_BASED    = 3,
-       DM_TYPE_NVME_BIO_BASED   = 4,
 };
 
 typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;