dasd: move queue setup to common code
authorChristoph Hellwig <hch@lst.de>
Wed, 28 Feb 2024 13:37:41 +0000 (05:37 -0800)
committerJens Axboe <axboe@kernel.dk>
Wed, 6 Mar 2024 15:27:00 +0000 (08:27 -0700)
Most of the code in setup_blk_queue is shared between all disciplines.
Move it to common code and leave a method to query the maximum number
of transferable blocks, and a flag to indicate discard support.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Stefan Haberland <sth@linux.ibm.com>
Link: https://lore.kernel.org/r/20240228133742.806274-3-hch@lst.de
Signed-off-by: Jens Axboe <axboe@kernel.dk>
drivers/s390/block/dasd.c
drivers/s390/block/dasd_diag.c
drivers/s390/block/dasd_eckd.c
drivers/s390/block/dasd_fba.c
drivers/s390/block/dasd_int.h

index e754e4f..bdeab44 100644 (file)
@@ -308,6 +308,7 @@ static int dasd_state_basic_to_known(struct dasd_device *device)
 static int dasd_state_basic_to_ready(struct dasd_device *device)
 {
        struct dasd_block *block = device->block;
+       struct request_queue *q;
        int rc = 0;
 
        /* make disk known with correct capacity */
@@ -327,8 +328,32 @@ static int dasd_state_basic_to_ready(struct dasd_device *device)
                goto out;
        }
 
-       if (device->discipline->setup_blk_queue)
-               device->discipline->setup_blk_queue(block);
+       q = block->gdp->queue;
+       blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
+       q->limits.max_dev_sectors = device->discipline->max_sectors(block);
+       blk_queue_max_hw_sectors(q, q->limits.max_dev_sectors);
+       blk_queue_logical_block_size(q, block->bp_block);
+       blk_queue_max_segments(q, USHRT_MAX);
+
+       /* With page sized segments each segment can be translated into one idaw/tidaw */
+       blk_queue_max_segment_size(q, PAGE_SIZE);
+       blk_queue_segment_boundary(q, PAGE_SIZE - 1);
+       blk_queue_dma_alignment(q, PAGE_SIZE - 1);
+
+       if (device->discipline->has_discard) {
+               unsigned int max_bytes, max_discard_sectors;
+
+               q->limits.discard_granularity = block->bp_block;
+
+               /* Calculate max_discard_sectors and make it PAGE aligned */
+               max_bytes = USHRT_MAX * block->bp_block;
+               max_bytes = ALIGN_DOWN(max_bytes, PAGE_SIZE);
+               max_discard_sectors = max_bytes / block->bp_block;
+
+               blk_queue_max_discard_sectors(q, max_discard_sectors);
+               blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
+       }
+
        set_capacity(block->gdp, block->blocks << block->s2b_shift);
        device->state = DASD_STATE_READY;
 
index 041088c..ea4b1d0 100644 (file)
@@ -617,25 +617,9 @@ dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
                    "dump sense not available for DIAG data");
 }
 
-/*
- * Initialize block layer request queue.
- */
-static void dasd_diag_setup_blk_queue(struct dasd_block *block)
+static unsigned int dasd_diag_max_sectors(struct dasd_block *block)
 {
-       unsigned int logical_block_size = block->bp_block;
-       struct request_queue *q = block->gdp->queue;
-       int max;
-
-       max = DIAG_MAX_BLOCKS << block->s2b_shift;
-       blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
-       q->limits.max_dev_sectors = max;
-       blk_queue_logical_block_size(q, logical_block_size);
-       blk_queue_max_hw_sectors(q, max);
-       blk_queue_max_segments(q, USHRT_MAX);
-       /* With page sized segments each segment can be translated into one idaw/tidaw */
-       blk_queue_max_segment_size(q, PAGE_SIZE);
-       blk_queue_segment_boundary(q, PAGE_SIZE - 1);
-       blk_queue_dma_alignment(q, PAGE_SIZE - 1);
+       return DIAG_MAX_BLOCKS << block->s2b_shift;
 }
 
 static int dasd_diag_pe_handler(struct dasd_device *device,
@@ -648,10 +632,10 @@ static struct dasd_discipline dasd_diag_discipline = {
        .owner = THIS_MODULE,
        .name = "DIAG",
        .ebcname = "DIAG",
+       .max_sectors = dasd_diag_max_sectors,
        .check_device = dasd_diag_check_device,
        .pe_handler = dasd_diag_pe_handler,
        .fill_geometry = dasd_diag_fill_geometry,
-       .setup_blk_queue = dasd_diag_setup_blk_queue,
        .start_IO = dasd_start_diag,
        .term_IO = dasd_diag_term_IO,
        .handle_terminated_request = dasd_diag_handle_terminated_request,
index 8aade17..373c1a8 100644 (file)
@@ -6826,17 +6826,9 @@ static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
        dasd_schedule_requeue(device);
 }
 
-/*
- * Initialize block layer request queue.
- */
-static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
+static unsigned int dasd_eckd_max_sectors(struct dasd_block *block)
 {
-       unsigned int logical_block_size = block->bp_block;
-       struct request_queue *q = block->gdp->queue;
-       struct dasd_device *device = block->base;
-       int max;
-
-       if (device->features & DASD_FEATURE_USERAW) {
+       if (block->base->features & DASD_FEATURE_USERAW) {
                /*
                 * the max_blocks value for raw_track access is 256
                 * it is higher than the native ECKD value because we
@@ -6844,19 +6836,10 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
                 * so the max_hw_sectors are
                 * 2048 x 512B = 1024kB = 16 tracks
                 */
-               max = DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift;
-       } else {
-               max = DASD_ECKD_MAX_BLOCKS << block->s2b_shift;
+               return DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift;
        }
-       blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
-       q->limits.max_dev_sectors = max;
-       blk_queue_logical_block_size(q, logical_block_size);
-       blk_queue_max_hw_sectors(q, max);
-       blk_queue_max_segments(q, USHRT_MAX);
-       /* With page sized segments each segment can be translated into one idaw/tidaw */
-       blk_queue_max_segment_size(q, PAGE_SIZE);
-       blk_queue_segment_boundary(q, PAGE_SIZE - 1);
-       blk_queue_dma_alignment(q, PAGE_SIZE - 1);
+
+       return DASD_ECKD_MAX_BLOCKS << block->s2b_shift;
 }
 
 static struct ccw_driver dasd_eckd_driver = {
@@ -6888,7 +6871,7 @@ static struct dasd_discipline dasd_eckd_discipline = {
        .basic_to_ready = dasd_eckd_basic_to_ready,
        .online_to_ready = dasd_eckd_online_to_ready,
        .basic_to_known = dasd_eckd_basic_to_known,
-       .setup_blk_queue = dasd_eckd_setup_blk_queue,
+       .max_sectors = dasd_eckd_max_sectors,
        .fill_geometry = dasd_eckd_fill_geometry,
        .start_IO = dasd_start_IO,
        .term_IO = dasd_term_IO,
index 045e548..bcbb2f8 100644 (file)
@@ -748,35 +748,9 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
        free_page((unsigned long) page);
 }
 
-/*
- * Initialize block layer request queue.
- */
-static void dasd_fba_setup_blk_queue(struct dasd_block *block)
+static unsigned int dasd_fba_max_sectors(struct dasd_block *block)
 {
-       unsigned int logical_block_size = block->bp_block;
-       struct request_queue *q = block->gdp->queue;
-       unsigned int max_bytes, max_discard_sectors;
-       int max;
-
-       max = DASD_FBA_MAX_BLOCKS << block->s2b_shift;
-       blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
-       q->limits.max_dev_sectors = max;
-       blk_queue_logical_block_size(q, logical_block_size);
-       blk_queue_max_hw_sectors(q, max);
-       blk_queue_max_segments(q, USHRT_MAX);
-       /* With page sized segments each segment can be translated into one idaw/tidaw */
-       blk_queue_max_segment_size(q, PAGE_SIZE);
-       blk_queue_segment_boundary(q, PAGE_SIZE - 1);
-
-       q->limits.discard_granularity = logical_block_size;
-
-       /* Calculate max_discard_sectors and make it PAGE aligned */
-       max_bytes = USHRT_MAX * logical_block_size;
-       max_bytes = ALIGN_DOWN(max_bytes, PAGE_SIZE);
-       max_discard_sectors = max_bytes / logical_block_size;
-
-       blk_queue_max_discard_sectors(q, max_discard_sectors);
-       blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
+       return DASD_FBA_MAX_BLOCKS << block->s2b_shift;
 }
 
 static int dasd_fba_pe_handler(struct dasd_device *device,
@@ -789,10 +763,11 @@ static struct dasd_discipline dasd_fba_discipline = {
        .owner = THIS_MODULE,
        .name = "FBA ",
        .ebcname = "FBA ",
+       .has_discard = true,
        .check_device = dasd_fba_check_characteristics,
        .do_analysis = dasd_fba_do_analysis,
        .pe_handler = dasd_fba_pe_handler,
-       .setup_blk_queue = dasd_fba_setup_blk_queue,
+       .max_sectors = dasd_fba_max_sectors,
        .fill_geometry = dasd_fba_fill_geometry,
        .start_IO = dasd_start_IO,
        .term_IO = dasd_term_IO,
index a6c5f1f..b56d683 100644 (file)
@@ -293,6 +293,7 @@ struct dasd_discipline {
        struct module *owner;
        char ebcname[8];        /* a name used for tagging and printks */
        char name[8];           /* a name used for tagging and printks */
+       bool has_discard;
 
        struct list_head list;  /* used for list of disciplines */
 
@@ -331,10 +332,7 @@ struct dasd_discipline {
        int (*online_to_ready) (struct dasd_device *);
        int (*basic_to_known)(struct dasd_device *);
 
-       /*
-        * Initialize block layer request queue.
-        */
-       void (*setup_blk_queue)(struct dasd_block *);
+       unsigned int (*max_sectors)(struct dasd_block *);
        /* (struct dasd_device *);
         * Device operation functions. build_cp creates a ccw chain for
         * a block device request, start_io starts the request and